1 /* 2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include <qdf_types.h> 21 #include <qdf_lock.h> 22 #include <hal_hw_headers.h> 23 #include "dp_htt.h" 24 #include "dp_types.h" 25 #include "dp_internal.h" 26 #include "dp_peer.h" 27 #include "dp_rx_defrag.h" 28 #include "dp_rx.h" 29 #include <hal_api.h> 30 #include <hal_reo.h> 31 #include <cdp_txrx_handle.h> 32 #include <wlan_cfg.h> 33 #ifdef WIFI_MONITOR_SUPPORT 34 #include <dp_mon.h> 35 #endif 36 #ifdef FEATURE_WDS 37 #include "dp_txrx_wds.h" 38 #endif 39 #include <qdf_module.h> 40 #ifdef QCA_PEER_EXT_STATS 41 #include "dp_hist.h" 42 #endif 43 #ifdef BYPASS_OL_OPS 44 #include <target_if_dp.h> 45 #endif 46 47 #ifdef REO_QDESC_HISTORY 48 #define REO_QDESC_HISTORY_SIZE 512 49 uint64_t reo_qdesc_history_idx; 50 struct reo_qdesc_event reo_qdesc_history[REO_QDESC_HISTORY_SIZE]; 51 #endif 52 53 #ifdef FEATURE_AST 54 #ifdef BYPASS_OL_OPS 55 /* 56 * dp_add_wds_entry_wrapper() - Add new AST entry for the wds station 57 * @soc: DP soc structure pointer 58 * @peer: dp peer structure 59 * @dest_mac: MAC address of ast node 60 * @flags: wds or hmwds 61 * @type: type from enum cdp_txrx_ast_entry_type 62 * 63 * This API is used by WDS source port learning function to 64 * add a new AST entry in the fw. 65 * 66 * Return: 0 on success, error code otherwise. 67 */ 68 static int dp_add_wds_entry_wrapper(struct dp_soc *soc, 69 struct dp_peer *peer, 70 const uint8_t *dest_macaddr, 71 uint32_t flags, 72 uint8_t type) 73 { 74 QDF_STATUS status; 75 76 status = target_if_add_wds_entry(soc->ctrl_psoc, 77 peer->vdev->vdev_id, 78 peer->mac_addr.raw, 79 dest_macaddr, 80 WMI_HOST_WDS_FLAG_STATIC, 81 type); 82 83 return qdf_status_to_os_return(status); 84 } 85 86 /* 87 * dp_update_wds_entry_wrapper() - update an existing wds entry with new peer 88 * @soc: DP soc structure pointer 89 * @peer: dp peer structure 90 * @dest_macaddr: MAC address of ast node 91 * @flags: wds or hmwds 92 * 93 * This API is used by update the peer mac address for the ast 94 * in the fw. 95 * 96 * Return: 0 on success, error code otherwise. 97 */ 98 static int dp_update_wds_entry_wrapper(struct dp_soc *soc, 99 struct dp_peer *peer, 100 uint8_t *dest_macaddr, 101 uint32_t flags) 102 { 103 QDF_STATUS status; 104 105 status = target_if_update_wds_entry(soc->ctrl_psoc, 106 peer->vdev->vdev_id, 107 dest_macaddr, 108 peer->mac_addr.raw, 109 WMI_HOST_WDS_FLAG_STATIC); 110 111 return qdf_status_to_os_return(status); 112 } 113 114 /* 115 * dp_del_wds_entry_wrapper() - delete a WSD AST entry 116 * @soc: DP soc structure pointer 117 * @vdev_id: vdev_id 118 * @wds_macaddr: MAC address of ast node 119 * @type: type from enum cdp_txrx_ast_entry_type 120 * @delete_in_fw: Flag to indicate if entry needs to be deleted in fw 121 * 122 * This API is used to delete an AST entry from fw 123 * 124 * Return: None 125 */ 126 static void dp_del_wds_entry_wrapper(struct dp_soc *soc, 127 uint8_t vdev_id, 128 uint8_t *wds_macaddr, 129 uint8_t type, 130 uint8_t delete_in_fw) 131 { 132 target_if_del_wds_entry(soc->ctrl_psoc, vdev_id, 133 wds_macaddr, type, delete_in_fw); 134 } 135 #else 136 static int dp_add_wds_entry_wrapper(struct dp_soc *soc, 137 struct dp_peer *peer, 138 const uint8_t *dest_macaddr, 139 uint32_t flags, 140 uint8_t type) 141 { 142 int status; 143 144 status = soc->cdp_soc.ol_ops->peer_add_wds_entry( 145 soc->ctrl_psoc, 146 peer->vdev->vdev_id, 147 peer->mac_addr.raw, 148 peer->peer_id, 149 dest_macaddr, 150 peer->mac_addr.raw, 151 flags, 152 type); 153 154 return status; 155 } 156 157 static int dp_update_wds_entry_wrapper(struct dp_soc *soc, 158 struct dp_peer *peer, 159 uint8_t *dest_macaddr, 160 uint32_t flags) 161 { 162 int status; 163 164 status = soc->cdp_soc.ol_ops->peer_update_wds_entry( 165 soc->ctrl_psoc, 166 peer->vdev->vdev_id, 167 dest_macaddr, 168 peer->mac_addr.raw, 169 flags); 170 171 return status; 172 } 173 174 static void dp_del_wds_entry_wrapper(struct dp_soc *soc, 175 uint8_t vdev_id, 176 uint8_t *wds_macaddr, 177 uint8_t type, 178 uint8_t delete_in_fw) 179 { 180 soc->cdp_soc.ol_ops->peer_del_wds_entry(soc->ctrl_psoc, 181 vdev_id, 182 wds_macaddr, 183 type, 184 delete_in_fw); 185 } 186 #endif 187 #endif 188 189 #ifdef FEATURE_WDS 190 static inline bool 191 dp_peer_ast_free_in_unmap_supported(struct dp_soc *soc, 192 struct dp_ast_entry *ast_entry) 193 { 194 /* if peer map v2 is enabled we are not freeing ast entry 195 * here and it is supposed to be freed in unmap event (after 196 * we receive delete confirmation from target) 197 * 198 * if peer_id is invalid we did not get the peer map event 199 * for the peer free ast entry from here only in this case 200 */ 201 202 if ((ast_entry->type != CDP_TXRX_AST_TYPE_WDS_HM_SEC) && 203 (ast_entry->type != CDP_TXRX_AST_TYPE_SELF)) 204 return true; 205 206 return false; 207 } 208 #else 209 static inline bool 210 dp_peer_ast_free_in_unmap_supported(struct dp_soc *soc, 211 struct dp_ast_entry *ast_entry) 212 { 213 return false; 214 } 215 216 void dp_soc_wds_attach(struct dp_soc *soc) 217 { 218 } 219 220 void dp_soc_wds_detach(struct dp_soc *soc) 221 { 222 } 223 #endif 224 225 #ifdef REO_QDESC_HISTORY 226 static inline void 227 dp_rx_reo_qdesc_history_add(struct reo_desc_list_node *free_desc, 228 enum reo_qdesc_event_type type) 229 { 230 struct reo_qdesc_event *evt; 231 struct dp_rx_tid *rx_tid = &free_desc->rx_tid; 232 uint32_t idx; 233 234 reo_qdesc_history_idx++; 235 idx = (reo_qdesc_history_idx & (REO_QDESC_HISTORY_SIZE - 1)); 236 237 evt = &reo_qdesc_history[idx]; 238 239 qdf_mem_copy(evt->peer_mac, free_desc->peer_mac, QDF_MAC_ADDR_SIZE); 240 evt->qdesc_addr = rx_tid->hw_qdesc_paddr; 241 evt->ts = qdf_get_log_timestamp(); 242 evt->type = type; 243 } 244 245 #ifdef WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY 246 static inline void 247 dp_rx_reo_qdesc_deferred_evt_add(struct reo_desc_deferred_freelist_node *desc, 248 enum reo_qdesc_event_type type) 249 { 250 struct reo_qdesc_event *evt; 251 uint32_t idx; 252 253 reo_qdesc_history_idx++; 254 idx = (reo_qdesc_history_idx & (REO_QDESC_HISTORY_SIZE - 1)); 255 256 evt = &reo_qdesc_history[idx]; 257 258 qdf_mem_copy(evt->peer_mac, desc->peer_mac, QDF_MAC_ADDR_SIZE); 259 evt->qdesc_addr = desc->hw_qdesc_paddr; 260 evt->ts = qdf_get_log_timestamp(); 261 evt->type = type; 262 } 263 264 #define DP_RX_REO_QDESC_DEFERRED_FREE_EVT(desc) \ 265 dp_rx_reo_qdesc_deferred_evt_add((desc), REO_QDESC_FREE) 266 267 #define DP_RX_REO_QDESC_DEFERRED_GET_MAC(desc, freedesc) \ 268 qdf_mem_copy(desc->peer_mac, freedesc->peer_mac, QDF_MAC_ADDR_SIZE) 269 #endif /* WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY */ 270 271 #define DP_RX_REO_QDESC_GET_MAC(freedesc, peer) \ 272 qdf_mem_copy(freedesc->peer_mac, peer->mac_addr.raw, QDF_MAC_ADDR_SIZE) 273 274 #define DP_RX_REO_QDESC_UPDATE_EVT(free_desc) \ 275 dp_rx_reo_qdesc_history_add((free_desc), REO_QDESC_UPDATE_CB) 276 277 #define DP_RX_REO_QDESC_FREE_EVT(free_desc) \ 278 dp_rx_reo_qdesc_history_add((free_desc), REO_QDESC_FREE) 279 280 #else 281 #define DP_RX_REO_QDESC_GET_MAC(freedesc, peer) 282 283 #define DP_RX_REO_QDESC_UPDATE_EVT(free_desc) 284 285 #define DP_RX_REO_QDESC_FREE_EVT(free_desc) 286 287 #define DP_RX_REO_QDESC_DEFERRED_FREE_EVT(desc) 288 289 #define DP_RX_REO_QDESC_DEFERRED_GET_MAC(desc, freedesc) 290 #endif 291 292 static inline void 293 dp_set_ssn_valid_flag(struct hal_reo_cmd_params *params, 294 uint8_t valid) 295 { 296 params->u.upd_queue_params.update_svld = 1; 297 params->u.upd_queue_params.svld = valid; 298 dp_peer_debug("Setting SSN valid bit to %d", 299 valid); 300 } 301 302 QDF_STATUS dp_peer_ast_table_attach(struct dp_soc *soc) 303 { 304 uint32_t max_ast_index; 305 306 max_ast_index = wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx); 307 /* allocate ast_table for ast entry to ast_index map */ 308 dp_peer_info("\n%pK:<=== cfg max ast idx %d ====>", soc, max_ast_index); 309 soc->ast_table = qdf_mem_malloc(max_ast_index * 310 sizeof(struct dp_ast_entry *)); 311 if (!soc->ast_table) { 312 dp_peer_err("%pK: ast_table memory allocation failed", soc); 313 return QDF_STATUS_E_NOMEM; 314 } 315 return QDF_STATUS_SUCCESS; /* success */ 316 } 317 318 /* 319 * dp_peer_find_map_attach() - allocate memory for peer_id_to_obj_map 320 * @soc: soc handle 321 * 322 * return: QDF_STATUS 323 */ 324 static QDF_STATUS dp_peer_find_map_attach(struct dp_soc *soc) 325 { 326 uint32_t max_peers, peer_map_size; 327 328 max_peers = soc->max_peer_id; 329 /* allocate the peer ID -> peer object map */ 330 dp_peer_info("\n%pK:<=== cfg max peer id %d ====>", soc, max_peers); 331 peer_map_size = max_peers * sizeof(soc->peer_id_to_obj_map[0]); 332 soc->peer_id_to_obj_map = qdf_mem_malloc(peer_map_size); 333 if (!soc->peer_id_to_obj_map) { 334 dp_peer_err("%pK: peer map memory allocation failed", soc); 335 return QDF_STATUS_E_NOMEM; 336 } 337 338 /* 339 * The peer_id_to_obj_map doesn't really need to be initialized, 340 * since elements are only used after they have been individually 341 * initialized. 342 * However, it is convenient for debugging to have all elements 343 * that are not in use set to 0. 344 */ 345 qdf_mem_zero(soc->peer_id_to_obj_map, peer_map_size); 346 347 qdf_spinlock_create(&soc->peer_map_lock); 348 return QDF_STATUS_SUCCESS; /* success */ 349 } 350 351 #define DP_AST_HASH_LOAD_MULT 2 352 #define DP_AST_HASH_LOAD_SHIFT 0 353 354 static inline uint32_t 355 dp_peer_find_hash_index(struct dp_soc *soc, 356 union dp_align_mac_addr *mac_addr) 357 { 358 uint32_t index; 359 360 index = 361 mac_addr->align2.bytes_ab ^ 362 mac_addr->align2.bytes_cd ^ 363 mac_addr->align2.bytes_ef; 364 365 index ^= index >> soc->peer_hash.idx_bits; 366 index &= soc->peer_hash.mask; 367 return index; 368 } 369 370 /* 371 * dp_peer_find_hash_find() - returns legacy or mlo link peer from 372 * peer_hash_table matching vdev_id and mac_address 373 * @soc: soc handle 374 * @peer_mac_addr: peer mac address 375 * @mac_addr_is_aligned: is mac addr alligned 376 * @vdev_id: vdev_id 377 * @mod_id: id of module requesting reference 378 * 379 * return: peer in sucsess 380 * NULL in failure 381 */ 382 struct dp_peer *dp_peer_find_hash_find( 383 struct dp_soc *soc, uint8_t *peer_mac_addr, 384 int mac_addr_is_aligned, uint8_t vdev_id, 385 enum dp_mod_id mod_id) 386 { 387 union dp_align_mac_addr local_mac_addr_aligned, *mac_addr; 388 uint32_t index; 389 struct dp_peer *peer; 390 391 if (!soc->peer_hash.bins) 392 return NULL; 393 394 if (mac_addr_is_aligned) { 395 mac_addr = (union dp_align_mac_addr *)peer_mac_addr; 396 } else { 397 qdf_mem_copy( 398 &local_mac_addr_aligned.raw[0], 399 peer_mac_addr, QDF_MAC_ADDR_SIZE); 400 mac_addr = &local_mac_addr_aligned; 401 } 402 index = dp_peer_find_hash_index(soc, mac_addr); 403 qdf_spin_lock_bh(&soc->peer_hash_lock); 404 TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) { 405 if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 && 406 ((peer->vdev->vdev_id == vdev_id) || 407 (vdev_id == DP_VDEV_ALL))) { 408 /* take peer reference before returning */ 409 if (dp_peer_get_ref(soc, peer, mod_id) != 410 QDF_STATUS_SUCCESS) 411 peer = NULL; 412 413 qdf_spin_unlock_bh(&soc->peer_hash_lock); 414 return peer; 415 } 416 } 417 qdf_spin_unlock_bh(&soc->peer_hash_lock); 418 return NULL; /* failure */ 419 } 420 421 qdf_export_symbol(dp_peer_find_hash_find); 422 423 #ifdef WLAN_FEATURE_11BE_MLO 424 /* 425 * dp_peer_find_hash_detach() - cleanup memory for peer_hash table 426 * @soc: soc handle 427 * 428 * return: none 429 */ 430 static void dp_peer_find_hash_detach(struct dp_soc *soc) 431 { 432 if (soc->peer_hash.bins) { 433 qdf_mem_free(soc->peer_hash.bins); 434 soc->peer_hash.bins = NULL; 435 qdf_spinlock_destroy(&soc->peer_hash_lock); 436 } 437 438 if (soc->arch_ops.mlo_peer_find_hash_detach) 439 soc->arch_ops.mlo_peer_find_hash_detach(soc); 440 } 441 442 /* 443 * dp_peer_find_hash_attach() - allocate memory for peer_hash table 444 * @soc: soc handle 445 * 446 * return: QDF_STATUS 447 */ 448 static QDF_STATUS dp_peer_find_hash_attach(struct dp_soc *soc) 449 { 450 int i, hash_elems, log2; 451 452 /* allocate the peer MAC address -> peer object hash table */ 453 hash_elems = soc->max_peers; 454 hash_elems *= DP_PEER_HASH_LOAD_MULT; 455 hash_elems >>= DP_PEER_HASH_LOAD_SHIFT; 456 log2 = dp_log2_ceil(hash_elems); 457 hash_elems = 1 << log2; 458 459 soc->peer_hash.mask = hash_elems - 1; 460 soc->peer_hash.idx_bits = log2; 461 /* allocate an array of TAILQ peer object lists */ 462 soc->peer_hash.bins = qdf_mem_malloc( 463 hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q, dp_peer))); 464 if (!soc->peer_hash.bins) 465 return QDF_STATUS_E_NOMEM; 466 467 for (i = 0; i < hash_elems; i++) 468 TAILQ_INIT(&soc->peer_hash.bins[i]); 469 470 qdf_spinlock_create(&soc->peer_hash_lock); 471 472 if (soc->arch_ops.mlo_peer_find_hash_attach && 473 (soc->arch_ops.mlo_peer_find_hash_attach(soc) != 474 QDF_STATUS_SUCCESS)) { 475 dp_peer_find_hash_detach(soc); 476 return QDF_STATUS_E_NOMEM; 477 } 478 return QDF_STATUS_SUCCESS; 479 } 480 481 /* 482 * dp_peer_find_hash_add() - add peer to peer_hash_table 483 * @soc: soc handle 484 * @peer: peer handle 485 * @peer_type: link or mld peer 486 * 487 * return: none 488 */ 489 void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer) 490 { 491 unsigned index; 492 493 index = dp_peer_find_hash_index(soc, &peer->mac_addr); 494 if (peer->peer_type == CDP_LINK_PEER_TYPE) { 495 qdf_spin_lock_bh(&soc->peer_hash_lock); 496 497 if (QDF_IS_STATUS_ERROR(dp_peer_get_ref(soc, peer, 498 DP_MOD_ID_CONFIG))) { 499 dp_err("fail to get peer ref:" QDF_MAC_ADDR_FMT, 500 QDF_MAC_ADDR_REF(peer->mac_addr.raw)); 501 qdf_spin_unlock_bh(&soc->peer_hash_lock); 502 return; 503 } 504 505 /* 506 * It is important to add the new peer at the tail of 507 * peer list with the bin index. Together with having 508 * the hash_find function search from head to tail, 509 * this ensures that if two entries with the same MAC address 510 * are stored, the one added first will be found first. 511 */ 512 TAILQ_INSERT_TAIL(&soc->peer_hash.bins[index], peer, 513 hash_list_elem); 514 515 qdf_spin_unlock_bh(&soc->peer_hash_lock); 516 } else if (peer->peer_type == CDP_MLD_PEER_TYPE) { 517 if (soc->arch_ops.mlo_peer_find_hash_add) 518 soc->arch_ops.mlo_peer_find_hash_add(soc, peer); 519 } else { 520 dp_err("unknown peer type %d", peer->peer_type); 521 } 522 } 523 524 /* 525 * dp_peer_find_hash_remove() - remove peer from peer_hash_table 526 * @soc: soc handle 527 * @peer: peer handle 528 * 529 * return: none 530 */ 531 void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer) 532 { 533 unsigned index; 534 struct dp_peer *tmppeer = NULL; 535 int found = 0; 536 537 index = dp_peer_find_hash_index(soc, &peer->mac_addr); 538 539 if (peer->peer_type == CDP_LINK_PEER_TYPE) { 540 /* Check if tail is not empty before delete*/ 541 QDF_ASSERT(!TAILQ_EMPTY(&soc->peer_hash.bins[index])); 542 543 qdf_spin_lock_bh(&soc->peer_hash_lock); 544 TAILQ_FOREACH(tmppeer, &soc->peer_hash.bins[index], 545 hash_list_elem) { 546 if (tmppeer == peer) { 547 found = 1; 548 break; 549 } 550 } 551 QDF_ASSERT(found); 552 TAILQ_REMOVE(&soc->peer_hash.bins[index], peer, 553 hash_list_elem); 554 555 dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG); 556 qdf_spin_unlock_bh(&soc->peer_hash_lock); 557 } else if (peer->peer_type == CDP_MLD_PEER_TYPE) { 558 if (soc->arch_ops.mlo_peer_find_hash_remove) 559 soc->arch_ops.mlo_peer_find_hash_remove(soc, peer); 560 } else { 561 dp_err("unknown peer type %d", peer->peer_type); 562 } 563 } 564 #else 565 static QDF_STATUS dp_peer_find_hash_attach(struct dp_soc *soc) 566 { 567 int i, hash_elems, log2; 568 569 /* allocate the peer MAC address -> peer object hash table */ 570 hash_elems = soc->max_peers; 571 hash_elems *= DP_PEER_HASH_LOAD_MULT; 572 hash_elems >>= DP_PEER_HASH_LOAD_SHIFT; 573 log2 = dp_log2_ceil(hash_elems); 574 hash_elems = 1 << log2; 575 576 soc->peer_hash.mask = hash_elems - 1; 577 soc->peer_hash.idx_bits = log2; 578 /* allocate an array of TAILQ peer object lists */ 579 soc->peer_hash.bins = qdf_mem_malloc( 580 hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q, dp_peer))); 581 if (!soc->peer_hash.bins) 582 return QDF_STATUS_E_NOMEM; 583 584 for (i = 0; i < hash_elems; i++) 585 TAILQ_INIT(&soc->peer_hash.bins[i]); 586 587 qdf_spinlock_create(&soc->peer_hash_lock); 588 return QDF_STATUS_SUCCESS; 589 } 590 591 static void dp_peer_find_hash_detach(struct dp_soc *soc) 592 { 593 if (soc->peer_hash.bins) { 594 qdf_mem_free(soc->peer_hash.bins); 595 soc->peer_hash.bins = NULL; 596 qdf_spinlock_destroy(&soc->peer_hash_lock); 597 } 598 } 599 600 void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer) 601 { 602 unsigned index; 603 604 index = dp_peer_find_hash_index(soc, &peer->mac_addr); 605 qdf_spin_lock_bh(&soc->peer_hash_lock); 606 607 if (QDF_IS_STATUS_ERROR(dp_peer_get_ref(soc, peer, DP_MOD_ID_CONFIG))) { 608 dp_err("unable to get peer ref at MAP mac: "QDF_MAC_ADDR_FMT, 609 QDF_MAC_ADDR_REF(peer->mac_addr.raw)); 610 qdf_spin_unlock_bh(&soc->peer_hash_lock); 611 return; 612 } 613 614 /* 615 * It is important to add the new peer at the tail of the peer list 616 * with the bin index. Together with having the hash_find function 617 * search from head to tail, this ensures that if two entries with 618 * the same MAC address are stored, the one added first will be 619 * found first. 620 */ 621 TAILQ_INSERT_TAIL(&soc->peer_hash.bins[index], peer, hash_list_elem); 622 623 qdf_spin_unlock_bh(&soc->peer_hash_lock); 624 } 625 626 void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer) 627 { 628 unsigned index; 629 struct dp_peer *tmppeer = NULL; 630 int found = 0; 631 632 index = dp_peer_find_hash_index(soc, &peer->mac_addr); 633 /* Check if tail is not empty before delete*/ 634 QDF_ASSERT(!TAILQ_EMPTY(&soc->peer_hash.bins[index])); 635 636 qdf_spin_lock_bh(&soc->peer_hash_lock); 637 TAILQ_FOREACH(tmppeer, &soc->peer_hash.bins[index], hash_list_elem) { 638 if (tmppeer == peer) { 639 found = 1; 640 break; 641 } 642 } 643 QDF_ASSERT(found); 644 TAILQ_REMOVE(&soc->peer_hash.bins[index], peer, hash_list_elem); 645 646 dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG); 647 qdf_spin_unlock_bh(&soc->peer_hash_lock); 648 } 649 650 651 #endif/* WLAN_FEATURE_11BE_MLO */ 652 653 /* 654 * dp_peer_vdev_list_add() - add peer into vdev's peer list 655 * @soc: soc handle 656 * @vdev: vdev handle 657 * @peer: peer handle 658 * 659 * return: none 660 */ 661 void dp_peer_vdev_list_add(struct dp_soc *soc, struct dp_vdev *vdev, 662 struct dp_peer *peer) 663 { 664 /* only link peer will be added to vdev peer list */ 665 if (IS_MLO_DP_MLD_PEER(peer)) 666 return; 667 668 qdf_spin_lock_bh(&vdev->peer_list_lock); 669 if (QDF_IS_STATUS_ERROR(dp_peer_get_ref(soc, peer, DP_MOD_ID_CONFIG))) { 670 dp_err("unable to get peer ref at MAP mac: "QDF_MAC_ADDR_FMT, 671 QDF_MAC_ADDR_REF(peer->mac_addr.raw)); 672 qdf_spin_unlock_bh(&vdev->peer_list_lock); 673 return; 674 } 675 676 /* add this peer into the vdev's list */ 677 if (wlan_op_mode_sta == vdev->opmode) 678 TAILQ_INSERT_HEAD(&vdev->peer_list, peer, peer_list_elem); 679 else 680 TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem); 681 682 vdev->num_peers++; 683 qdf_spin_unlock_bh(&vdev->peer_list_lock); 684 } 685 686 /* 687 * dp_peer_vdev_list_remove() - remove peer from vdev's peer list 688 * @soc: SoC handle 689 * @vdev: VDEV handle 690 * @peer: peer handle 691 * 692 * Return: none 693 */ 694 void dp_peer_vdev_list_remove(struct dp_soc *soc, struct dp_vdev *vdev, 695 struct dp_peer *peer) 696 { 697 uint8_t found = 0; 698 struct dp_peer *tmppeer = NULL; 699 700 /* only link peer will be added to vdev peer list */ 701 if (IS_MLO_DP_MLD_PEER(peer)) 702 return; 703 704 qdf_spin_lock_bh(&vdev->peer_list_lock); 705 TAILQ_FOREACH(tmppeer, &peer->vdev->peer_list, peer_list_elem) { 706 if (tmppeer == peer) { 707 found = 1; 708 break; 709 } 710 } 711 712 if (found) { 713 TAILQ_REMOVE(&peer->vdev->peer_list, peer, 714 peer_list_elem); 715 dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG); 716 vdev->num_peers--; 717 } else { 718 /*Ignoring the remove operation as peer not found*/ 719 dp_peer_debug("%pK: peer:%pK not found in vdev:%pK peerlist:%pK" 720 , soc, peer, vdev, &peer->vdev->peer_list); 721 } 722 qdf_spin_unlock_bh(&vdev->peer_list_lock); 723 } 724 725 /* 726 * dp_txrx_peer_attach_add() - Attach txrx_peer and add it to peer_id table 727 * @soc: SoC handle 728 * @peer: peer handle 729 * @txrx_peer: txrx peer handle 730 * 731 * Return: None 732 */ 733 void dp_txrx_peer_attach_add(struct dp_soc *soc, 734 struct dp_peer *peer, 735 struct dp_txrx_peer *txrx_peer) 736 { 737 qdf_spin_lock_bh(&soc->peer_map_lock); 738 739 peer->txrx_peer = txrx_peer; 740 txrx_peer->bss_peer = peer->bss_peer; 741 742 if (peer->peer_id == HTT_INVALID_PEER) { 743 qdf_spin_unlock_bh(&soc->peer_map_lock); 744 return; 745 } 746 747 txrx_peer->peer_id = peer->peer_id; 748 749 QDF_ASSERT(soc->peer_id_to_obj_map[peer->peer_id]); 750 751 qdf_spin_unlock_bh(&soc->peer_map_lock); 752 } 753 754 /* 755 * dp_peer_find_id_to_obj_add() - Add peer into peer_id table 756 * @soc: SoC handle 757 * @peer: peer handle 758 * @peer_id: peer_id 759 * 760 * Return: None 761 */ 762 void dp_peer_find_id_to_obj_add(struct dp_soc *soc, 763 struct dp_peer *peer, 764 uint16_t peer_id) 765 { 766 QDF_ASSERT(peer_id <= soc->max_peer_id); 767 768 qdf_spin_lock_bh(&soc->peer_map_lock); 769 770 peer->peer_id = peer_id; 771 772 if (QDF_IS_STATUS_ERROR(dp_peer_get_ref(soc, peer, DP_MOD_ID_CONFIG))) { 773 dp_err("unable to get peer ref at MAP mac: "QDF_MAC_ADDR_FMT" peer_id %u", 774 QDF_MAC_ADDR_REF(peer->mac_addr.raw), peer_id); 775 qdf_spin_unlock_bh(&soc->peer_map_lock); 776 return; 777 } 778 779 if (!soc->peer_id_to_obj_map[peer_id]) { 780 soc->peer_id_to_obj_map[peer_id] = peer; 781 if (peer->txrx_peer) 782 peer->txrx_peer->peer_id = peer_id; 783 } else { 784 /* Peer map event came for peer_id which 785 * is already mapped, this is not expected 786 */ 787 dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG); 788 QDF_ASSERT(0); 789 } 790 qdf_spin_unlock_bh(&soc->peer_map_lock); 791 } 792 793 /* 794 * dp_peer_find_id_to_obj_remove() - remove peer from peer_id table 795 * @soc: SoC handle 796 * @peer_id: peer_id 797 * 798 * Return: None 799 */ 800 void dp_peer_find_id_to_obj_remove(struct dp_soc *soc, 801 uint16_t peer_id) 802 { 803 struct dp_peer *peer = NULL; 804 QDF_ASSERT(peer_id <= soc->max_peer_id); 805 806 qdf_spin_lock_bh(&soc->peer_map_lock); 807 peer = soc->peer_id_to_obj_map[peer_id]; 808 peer->peer_id = HTT_INVALID_PEER; 809 if (peer->txrx_peer) 810 peer->txrx_peer->peer_id = HTT_INVALID_PEER; 811 soc->peer_id_to_obj_map[peer_id] = NULL; 812 dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG); 813 qdf_spin_unlock_bh(&soc->peer_map_lock); 814 } 815 816 #ifdef FEATURE_MEC 817 /** 818 * dp_peer_mec_hash_attach() - Allocate and initialize MEC Hash Table 819 * @soc: SoC handle 820 * 821 * Return: QDF_STATUS 822 */ 823 QDF_STATUS dp_peer_mec_hash_attach(struct dp_soc *soc) 824 { 825 int log2, hash_elems, i; 826 827 log2 = dp_log2_ceil(DP_PEER_MAX_MEC_IDX); 828 hash_elems = 1 << log2; 829 830 soc->mec_hash.mask = hash_elems - 1; 831 soc->mec_hash.idx_bits = log2; 832 833 dp_peer_info("%pK: max mec index: %d", 834 soc, DP_PEER_MAX_MEC_IDX); 835 836 /* allocate an array of TAILQ mec object lists */ 837 soc->mec_hash.bins = qdf_mem_malloc(hash_elems * 838 sizeof(TAILQ_HEAD(anonymous_tail_q, 839 dp_mec_entry))); 840 841 if (!soc->mec_hash.bins) 842 return QDF_STATUS_E_NOMEM; 843 844 for (i = 0; i < hash_elems; i++) 845 TAILQ_INIT(&soc->mec_hash.bins[i]); 846 847 return QDF_STATUS_SUCCESS; 848 } 849 850 /** 851 * dp_peer_mec_hash_index() - Compute the MEC hash from MAC address 852 * @soc: SoC handle 853 * 854 * Return: MEC hash 855 */ 856 static inline uint32_t dp_peer_mec_hash_index(struct dp_soc *soc, 857 union dp_align_mac_addr *mac_addr) 858 { 859 uint32_t index; 860 861 index = 862 mac_addr->align2.bytes_ab ^ 863 mac_addr->align2.bytes_cd ^ 864 mac_addr->align2.bytes_ef; 865 index ^= index >> soc->mec_hash.idx_bits; 866 index &= soc->mec_hash.mask; 867 return index; 868 } 869 870 struct dp_mec_entry *dp_peer_mec_hash_find_by_pdevid(struct dp_soc *soc, 871 uint8_t pdev_id, 872 uint8_t *mec_mac_addr) 873 { 874 union dp_align_mac_addr local_mac_addr_aligned, *mac_addr; 875 uint32_t index; 876 struct dp_mec_entry *mecentry; 877 878 qdf_mem_copy(&local_mac_addr_aligned.raw[0], 879 mec_mac_addr, QDF_MAC_ADDR_SIZE); 880 mac_addr = &local_mac_addr_aligned; 881 882 index = dp_peer_mec_hash_index(soc, mac_addr); 883 TAILQ_FOREACH(mecentry, &soc->mec_hash.bins[index], hash_list_elem) { 884 if ((pdev_id == mecentry->pdev_id) && 885 !dp_peer_find_mac_addr_cmp(mac_addr, &mecentry->mac_addr)) 886 return mecentry; 887 } 888 889 return NULL; 890 } 891 892 /** 893 * dp_peer_mec_hash_add() - Add MEC entry into hash table 894 * @soc: SoC handle 895 * 896 * This function adds the MEC entry into SoC MEC hash table 897 * 898 * Return: None 899 */ 900 static inline void dp_peer_mec_hash_add(struct dp_soc *soc, 901 struct dp_mec_entry *mecentry) 902 { 903 uint32_t index; 904 905 index = dp_peer_mec_hash_index(soc, &mecentry->mac_addr); 906 qdf_spin_lock_bh(&soc->mec_lock); 907 TAILQ_INSERT_TAIL(&soc->mec_hash.bins[index], mecentry, hash_list_elem); 908 qdf_spin_unlock_bh(&soc->mec_lock); 909 } 910 911 QDF_STATUS dp_peer_mec_add_entry(struct dp_soc *soc, 912 struct dp_vdev *vdev, 913 uint8_t *mac_addr) 914 { 915 struct dp_mec_entry *mecentry = NULL; 916 struct dp_pdev *pdev = NULL; 917 918 if (!vdev) { 919 dp_peer_err("%pK: Peers vdev is NULL", soc); 920 return QDF_STATUS_E_INVAL; 921 } 922 923 pdev = vdev->pdev; 924 925 if (qdf_unlikely(qdf_atomic_read(&soc->mec_cnt) >= 926 DP_PEER_MAX_MEC_ENTRY)) { 927 dp_peer_warn("%pK: max MEC entry limit reached mac_addr: " 928 QDF_MAC_ADDR_FMT, soc, QDF_MAC_ADDR_REF(mac_addr)); 929 return QDF_STATUS_E_NOMEM; 930 } 931 932 qdf_spin_lock_bh(&soc->mec_lock); 933 mecentry = dp_peer_mec_hash_find_by_pdevid(soc, pdev->pdev_id, 934 mac_addr); 935 if (qdf_likely(mecentry)) { 936 mecentry->is_active = TRUE; 937 qdf_spin_unlock_bh(&soc->mec_lock); 938 return QDF_STATUS_E_ALREADY; 939 } 940 941 qdf_spin_unlock_bh(&soc->mec_lock); 942 943 dp_peer_debug("%pK: pdevid: %u vdev: %u type: MEC mac_addr: " 944 QDF_MAC_ADDR_FMT, 945 soc, pdev->pdev_id, vdev->vdev_id, 946 QDF_MAC_ADDR_REF(mac_addr)); 947 948 mecentry = (struct dp_mec_entry *) 949 qdf_mem_malloc(sizeof(struct dp_mec_entry)); 950 951 if (qdf_unlikely(!mecentry)) { 952 dp_peer_err("%pK: fail to allocate mecentry", soc); 953 return QDF_STATUS_E_NOMEM; 954 } 955 956 qdf_copy_macaddr((struct qdf_mac_addr *)&mecentry->mac_addr.raw[0], 957 (struct qdf_mac_addr *)mac_addr); 958 mecentry->pdev_id = pdev->pdev_id; 959 mecentry->vdev_id = vdev->vdev_id; 960 mecentry->is_active = TRUE; 961 dp_peer_mec_hash_add(soc, mecentry); 962 963 qdf_atomic_inc(&soc->mec_cnt); 964 DP_STATS_INC(soc, mec.added, 1); 965 966 return QDF_STATUS_SUCCESS; 967 } 968 969 void dp_peer_mec_detach_entry(struct dp_soc *soc, struct dp_mec_entry *mecentry, 970 void *ptr) 971 { 972 uint32_t index = dp_peer_mec_hash_index(soc, &mecentry->mac_addr); 973 974 TAILQ_HEAD(, dp_mec_entry) * free_list = ptr; 975 976 TAILQ_REMOVE(&soc->mec_hash.bins[index], mecentry, 977 hash_list_elem); 978 TAILQ_INSERT_TAIL(free_list, mecentry, hash_list_elem); 979 } 980 981 void dp_peer_mec_free_list(struct dp_soc *soc, void *ptr) 982 { 983 struct dp_mec_entry *mecentry, *mecentry_next; 984 985 TAILQ_HEAD(, dp_mec_entry) * free_list = ptr; 986 987 TAILQ_FOREACH_SAFE(mecentry, free_list, hash_list_elem, 988 mecentry_next) { 989 dp_peer_debug("%pK: MEC delete for mac_addr " QDF_MAC_ADDR_FMT, 990 soc, QDF_MAC_ADDR_REF(&mecentry->mac_addr)); 991 qdf_mem_free(mecentry); 992 qdf_atomic_dec(&soc->mec_cnt); 993 DP_STATS_INC(soc, mec.deleted, 1); 994 } 995 } 996 997 /** 998 * dp_peer_mec_hash_detach() - Free MEC Hash table 999 * @soc: SoC handle 1000 * 1001 * Return: None 1002 */ 1003 void dp_peer_mec_hash_detach(struct dp_soc *soc) 1004 { 1005 dp_peer_mec_flush_entries(soc); 1006 qdf_mem_free(soc->mec_hash.bins); 1007 soc->mec_hash.bins = NULL; 1008 } 1009 1010 void dp_peer_mec_spinlock_destroy(struct dp_soc *soc) 1011 { 1012 qdf_spinlock_destroy(&soc->mec_lock); 1013 } 1014 1015 void dp_peer_mec_spinlock_create(struct dp_soc *soc) 1016 { 1017 qdf_spinlock_create(&soc->mec_lock); 1018 } 1019 #else 1020 QDF_STATUS dp_peer_mec_hash_attach(struct dp_soc *soc) 1021 { 1022 return QDF_STATUS_SUCCESS; 1023 } 1024 1025 void dp_peer_mec_hash_detach(struct dp_soc *soc) 1026 { 1027 } 1028 #endif 1029 1030 #ifdef FEATURE_AST 1031 #ifdef WLAN_FEATURE_11BE_MLO 1032 /* 1033 * dp_peer_exist_on_pdev - check if peer with mac address exist on pdev 1034 * 1035 * @soc: Datapath SOC handle 1036 * @peer_mac_addr: peer mac address 1037 * @mac_addr_is_aligned: is mac address aligned 1038 * @pdev: Datapath PDEV handle 1039 * 1040 * Return: true if peer found else return false 1041 */ 1042 static bool dp_peer_exist_on_pdev(struct dp_soc *soc, 1043 uint8_t *peer_mac_addr, 1044 int mac_addr_is_aligned, 1045 struct dp_pdev *pdev) 1046 { 1047 union dp_align_mac_addr local_mac_addr_aligned, *mac_addr; 1048 unsigned int index; 1049 struct dp_peer *peer; 1050 bool found = false; 1051 1052 if (mac_addr_is_aligned) { 1053 mac_addr = (union dp_align_mac_addr *)peer_mac_addr; 1054 } else { 1055 qdf_mem_copy( 1056 &local_mac_addr_aligned.raw[0], 1057 peer_mac_addr, QDF_MAC_ADDR_SIZE); 1058 mac_addr = &local_mac_addr_aligned; 1059 } 1060 index = dp_peer_find_hash_index(soc, mac_addr); 1061 qdf_spin_lock_bh(&soc->peer_hash_lock); 1062 TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) { 1063 if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 && 1064 (peer->vdev->pdev == pdev)) { 1065 found = true; 1066 break; 1067 } 1068 } 1069 qdf_spin_unlock_bh(&soc->peer_hash_lock); 1070 1071 if (found) 1072 return found; 1073 1074 peer = dp_mld_peer_find_hash_find(soc, peer_mac_addr, 1075 mac_addr_is_aligned, DP_VDEV_ALL, 1076 DP_MOD_ID_CDP); 1077 if (peer) { 1078 if (peer->vdev->pdev == pdev) 1079 found = true; 1080 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 1081 } 1082 1083 return found; 1084 } 1085 #else 1086 static bool dp_peer_exist_on_pdev(struct dp_soc *soc, 1087 uint8_t *peer_mac_addr, 1088 int mac_addr_is_aligned, 1089 struct dp_pdev *pdev) 1090 { 1091 union dp_align_mac_addr local_mac_addr_aligned, *mac_addr; 1092 unsigned int index; 1093 struct dp_peer *peer; 1094 bool found = false; 1095 1096 if (mac_addr_is_aligned) { 1097 mac_addr = (union dp_align_mac_addr *)peer_mac_addr; 1098 } else { 1099 qdf_mem_copy( 1100 &local_mac_addr_aligned.raw[0], 1101 peer_mac_addr, QDF_MAC_ADDR_SIZE); 1102 mac_addr = &local_mac_addr_aligned; 1103 } 1104 index = dp_peer_find_hash_index(soc, mac_addr); 1105 qdf_spin_lock_bh(&soc->peer_hash_lock); 1106 TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) { 1107 if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 && 1108 (peer->vdev->pdev == pdev)) { 1109 found = true; 1110 break; 1111 } 1112 } 1113 qdf_spin_unlock_bh(&soc->peer_hash_lock); 1114 return found; 1115 } 1116 #endif /* WLAN_FEATURE_11BE_MLO */ 1117 1118 /* 1119 * dp_peer_ast_hash_attach() - Allocate and initialize AST Hash Table 1120 * @soc: SoC handle 1121 * 1122 * Return: QDF_STATUS 1123 */ 1124 QDF_STATUS dp_peer_ast_hash_attach(struct dp_soc *soc) 1125 { 1126 int i, hash_elems, log2; 1127 unsigned int max_ast_idx = wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx); 1128 1129 hash_elems = ((max_ast_idx * DP_AST_HASH_LOAD_MULT) >> 1130 DP_AST_HASH_LOAD_SHIFT); 1131 1132 log2 = dp_log2_ceil(hash_elems); 1133 hash_elems = 1 << log2; 1134 1135 soc->ast_hash.mask = hash_elems - 1; 1136 soc->ast_hash.idx_bits = log2; 1137 1138 dp_peer_info("%pK: ast hash_elems: %d, max_ast_idx: %d", 1139 soc, hash_elems, max_ast_idx); 1140 1141 /* allocate an array of TAILQ peer object lists */ 1142 soc->ast_hash.bins = qdf_mem_malloc( 1143 hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q, 1144 dp_ast_entry))); 1145 1146 if (!soc->ast_hash.bins) 1147 return QDF_STATUS_E_NOMEM; 1148 1149 for (i = 0; i < hash_elems; i++) 1150 TAILQ_INIT(&soc->ast_hash.bins[i]); 1151 1152 return QDF_STATUS_SUCCESS; 1153 } 1154 1155 /* 1156 * dp_peer_ast_cleanup() - cleanup the references 1157 * @soc: SoC handle 1158 * @ast: ast entry 1159 * 1160 * Return: None 1161 */ 1162 static inline void dp_peer_ast_cleanup(struct dp_soc *soc, 1163 struct dp_ast_entry *ast) 1164 { 1165 txrx_ast_free_cb cb = ast->callback; 1166 void *cookie = ast->cookie; 1167 1168 dp_peer_debug("mac_addr: " QDF_MAC_ADDR_FMT ", cb: %pK, cookie: %pK", 1169 QDF_MAC_ADDR_REF(ast->mac_addr.raw), cb, cookie); 1170 1171 /* Call the callbacks to free up the cookie */ 1172 if (cb) { 1173 ast->callback = NULL; 1174 ast->cookie = NULL; 1175 cb(soc->ctrl_psoc, 1176 dp_soc_to_cdp_soc(soc), 1177 cookie, 1178 CDP_TXRX_AST_DELETE_IN_PROGRESS); 1179 } 1180 } 1181 1182 /* 1183 * dp_peer_ast_hash_detach() - Free AST Hash table 1184 * @soc: SoC handle 1185 * 1186 * Return: None 1187 */ 1188 void dp_peer_ast_hash_detach(struct dp_soc *soc) 1189 { 1190 unsigned int index; 1191 struct dp_ast_entry *ast, *ast_next; 1192 1193 if (!soc->ast_hash.mask) 1194 return; 1195 1196 if (!soc->ast_hash.bins) 1197 return; 1198 1199 dp_peer_debug("%pK: num_ast_entries: %u", soc, soc->num_ast_entries); 1200 1201 qdf_spin_lock_bh(&soc->ast_lock); 1202 for (index = 0; index <= soc->ast_hash.mask; index++) { 1203 if (!TAILQ_EMPTY(&soc->ast_hash.bins[index])) { 1204 TAILQ_FOREACH_SAFE(ast, &soc->ast_hash.bins[index], 1205 hash_list_elem, ast_next) { 1206 TAILQ_REMOVE(&soc->ast_hash.bins[index], ast, 1207 hash_list_elem); 1208 dp_peer_ast_cleanup(soc, ast); 1209 soc->num_ast_entries--; 1210 qdf_mem_free(ast); 1211 } 1212 } 1213 } 1214 qdf_spin_unlock_bh(&soc->ast_lock); 1215 1216 qdf_mem_free(soc->ast_hash.bins); 1217 soc->ast_hash.bins = NULL; 1218 } 1219 1220 /* 1221 * dp_peer_ast_hash_index() - Compute the AST hash from MAC address 1222 * @soc: SoC handle 1223 * 1224 * Return: AST hash 1225 */ 1226 static inline uint32_t dp_peer_ast_hash_index(struct dp_soc *soc, 1227 union dp_align_mac_addr *mac_addr) 1228 { 1229 uint32_t index; 1230 1231 index = 1232 mac_addr->align2.bytes_ab ^ 1233 mac_addr->align2.bytes_cd ^ 1234 mac_addr->align2.bytes_ef; 1235 index ^= index >> soc->ast_hash.idx_bits; 1236 index &= soc->ast_hash.mask; 1237 return index; 1238 } 1239 1240 /* 1241 * dp_peer_ast_hash_add() - Add AST entry into hash table 1242 * @soc: SoC handle 1243 * 1244 * This function adds the AST entry into SoC AST hash table 1245 * It assumes caller has taken the ast lock to protect the access to this table 1246 * 1247 * Return: None 1248 */ 1249 static inline void dp_peer_ast_hash_add(struct dp_soc *soc, 1250 struct dp_ast_entry *ase) 1251 { 1252 uint32_t index; 1253 1254 index = dp_peer_ast_hash_index(soc, &ase->mac_addr); 1255 TAILQ_INSERT_TAIL(&soc->ast_hash.bins[index], ase, hash_list_elem); 1256 } 1257 1258 /* 1259 * dp_peer_ast_hash_remove() - Look up and remove AST entry from hash table 1260 * @soc: SoC handle 1261 * 1262 * This function removes the AST entry from soc AST hash table 1263 * It assumes caller has taken the ast lock to protect the access to this table 1264 * 1265 * Return: None 1266 */ 1267 void dp_peer_ast_hash_remove(struct dp_soc *soc, 1268 struct dp_ast_entry *ase) 1269 { 1270 unsigned index; 1271 struct dp_ast_entry *tmpase; 1272 int found = 0; 1273 1274 if (soc->ast_offload_support && !soc->host_ast_db_enable) 1275 return; 1276 1277 index = dp_peer_ast_hash_index(soc, &ase->mac_addr); 1278 /* Check if tail is not empty before delete*/ 1279 QDF_ASSERT(!TAILQ_EMPTY(&soc->ast_hash.bins[index])); 1280 1281 dp_peer_debug("ID: %u idx: %u mac_addr: " QDF_MAC_ADDR_FMT, 1282 ase->peer_id, index, QDF_MAC_ADDR_REF(ase->mac_addr.raw)); 1283 1284 TAILQ_FOREACH(tmpase, &soc->ast_hash.bins[index], hash_list_elem) { 1285 if (tmpase == ase) { 1286 found = 1; 1287 break; 1288 } 1289 } 1290 1291 QDF_ASSERT(found); 1292 1293 if (found) 1294 TAILQ_REMOVE(&soc->ast_hash.bins[index], ase, hash_list_elem); 1295 } 1296 1297 /* 1298 * dp_peer_ast_hash_find_by_vdevid() - Find AST entry by MAC address 1299 * @soc: SoC handle 1300 * 1301 * It assumes caller has taken the ast lock to protect the access to 1302 * AST hash table 1303 * 1304 * Return: AST entry 1305 */ 1306 struct dp_ast_entry *dp_peer_ast_hash_find_by_vdevid(struct dp_soc *soc, 1307 uint8_t *ast_mac_addr, 1308 uint8_t vdev_id) 1309 { 1310 union dp_align_mac_addr local_mac_addr_aligned, *mac_addr; 1311 uint32_t index; 1312 struct dp_ast_entry *ase; 1313 1314 qdf_mem_copy(&local_mac_addr_aligned.raw[0], 1315 ast_mac_addr, QDF_MAC_ADDR_SIZE); 1316 mac_addr = &local_mac_addr_aligned; 1317 1318 index = dp_peer_ast_hash_index(soc, mac_addr); 1319 TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) { 1320 if ((vdev_id == ase->vdev_id) && 1321 !dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr)) { 1322 return ase; 1323 } 1324 } 1325 1326 return NULL; 1327 } 1328 1329 /* 1330 * dp_peer_ast_hash_find_by_pdevid() - Find AST entry by MAC address 1331 * @soc: SoC handle 1332 * 1333 * It assumes caller has taken the ast lock to protect the access to 1334 * AST hash table 1335 * 1336 * Return: AST entry 1337 */ 1338 struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc, 1339 uint8_t *ast_mac_addr, 1340 uint8_t pdev_id) 1341 { 1342 union dp_align_mac_addr local_mac_addr_aligned, *mac_addr; 1343 uint32_t index; 1344 struct dp_ast_entry *ase; 1345 1346 qdf_mem_copy(&local_mac_addr_aligned.raw[0], 1347 ast_mac_addr, QDF_MAC_ADDR_SIZE); 1348 mac_addr = &local_mac_addr_aligned; 1349 1350 index = dp_peer_ast_hash_index(soc, mac_addr); 1351 TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) { 1352 if ((pdev_id == ase->pdev_id) && 1353 !dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr)) { 1354 return ase; 1355 } 1356 } 1357 1358 return NULL; 1359 } 1360 1361 /* 1362 * dp_peer_ast_hash_find_soc() - Find AST entry by MAC address 1363 * @soc: SoC handle 1364 * 1365 * It assumes caller has taken the ast lock to protect the access to 1366 * AST hash table 1367 * 1368 * Return: AST entry 1369 */ 1370 struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc, 1371 uint8_t *ast_mac_addr) 1372 { 1373 union dp_align_mac_addr local_mac_addr_aligned, *mac_addr; 1374 unsigned index; 1375 struct dp_ast_entry *ase; 1376 1377 if (!soc->ast_hash.bins) 1378 return NULL; 1379 1380 qdf_mem_copy(&local_mac_addr_aligned.raw[0], 1381 ast_mac_addr, QDF_MAC_ADDR_SIZE); 1382 mac_addr = &local_mac_addr_aligned; 1383 1384 index = dp_peer_ast_hash_index(soc, mac_addr); 1385 TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) { 1386 if (dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr) == 0) { 1387 return ase; 1388 } 1389 } 1390 1391 return NULL; 1392 } 1393 1394 /* 1395 * dp_peer_host_add_map_ast() - Add ast entry with HW AST Index 1396 * @soc: SoC handle 1397 * @peer_id: peer id from firmware 1398 * @mac_addr: MAC address of ast node 1399 * @hw_peer_id: HW AST Index returned by target in peer map event 1400 * @vdev_id: vdev id for VAP to which the peer belongs to 1401 * @ast_hash: ast hash value in HW 1402 * @is_wds: flag to indicate peer map event for WDS ast entry 1403 * 1404 * Return: QDF_STATUS code 1405 */ 1406 static inline 1407 QDF_STATUS dp_peer_host_add_map_ast(struct dp_soc *soc, uint16_t peer_id, 1408 uint8_t *mac_addr, uint16_t hw_peer_id, 1409 uint8_t vdev_id, uint16_t ast_hash, 1410 uint8_t is_wds) 1411 { 1412 struct dp_vdev *vdev; 1413 struct dp_ast_entry *ast_entry; 1414 enum cdp_txrx_ast_entry_type type; 1415 struct dp_peer *peer; 1416 struct dp_peer *old_peer; 1417 QDF_STATUS status = QDF_STATUS_SUCCESS; 1418 1419 if (is_wds) 1420 type = CDP_TXRX_AST_TYPE_WDS; 1421 else 1422 type = CDP_TXRX_AST_TYPE_STATIC; 1423 1424 peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_HTT); 1425 if (!peer) { 1426 dp_peer_info("Peer not found soc:%pK: peer_id %d, peer_mac " QDF_MAC_ADDR_FMT ", vdev_id %d", 1427 soc, peer_id, 1428 QDF_MAC_ADDR_REF(mac_addr), vdev_id); 1429 return QDF_STATUS_E_INVAL; 1430 } 1431 1432 if (!is_wds && IS_MLO_DP_MLD_PEER(peer)) 1433 type = CDP_TXRX_AST_TYPE_MLD; 1434 1435 vdev = peer->vdev; 1436 if (!vdev) { 1437 dp_peer_err("%pK: Peers vdev is NULL", soc); 1438 status = QDF_STATUS_E_INVAL; 1439 goto fail; 1440 } 1441 1442 if (!dp_peer_state_cmp(peer, DP_PEER_STATE_ACTIVE)) { 1443 if (type != CDP_TXRX_AST_TYPE_STATIC && 1444 type != CDP_TXRX_AST_TYPE_MLD && 1445 type != CDP_TXRX_AST_TYPE_SELF) { 1446 status = QDF_STATUS_E_BUSY; 1447 goto fail; 1448 } 1449 } 1450 1451 dp_peer_debug("%pK: vdev: %u ast_entry->type: %d peer_mac: " QDF_MAC_ADDR_FMT " peer: %pK mac " QDF_MAC_ADDR_FMT, 1452 soc, vdev->vdev_id, type, 1453 QDF_MAC_ADDR_REF(peer->mac_addr.raw), peer, 1454 QDF_MAC_ADDR_REF(mac_addr)); 1455 1456 /* 1457 * In MLO scenario, there is possibility for same mac address 1458 * on both link mac address and MLD mac address. 1459 * Duplicate AST map needs to be handled for non-mld type. 1460 */ 1461 qdf_spin_lock_bh(&soc->ast_lock); 1462 ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr); 1463 if (ast_entry && type != CDP_TXRX_AST_TYPE_MLD) { 1464 dp_peer_debug("AST present ID %d vid %d mac " QDF_MAC_ADDR_FMT, 1465 hw_peer_id, vdev_id, 1466 QDF_MAC_ADDR_REF(mac_addr)); 1467 1468 old_peer = __dp_peer_get_ref_by_id(soc, ast_entry->peer_id, 1469 DP_MOD_ID_AST); 1470 if (!old_peer) { 1471 dp_peer_info("Peer not found soc:%pK: peer_id %d, peer_mac " QDF_MAC_ADDR_FMT ", vdev_id %d", 1472 soc, ast_entry->peer_id, 1473 QDF_MAC_ADDR_REF(mac_addr), vdev_id); 1474 qdf_spin_unlock_bh(&soc->ast_lock); 1475 status = QDF_STATUS_E_INVAL; 1476 goto fail; 1477 } 1478 1479 dp_peer_unlink_ast_entry(soc, ast_entry, old_peer); 1480 dp_peer_free_ast_entry(soc, ast_entry); 1481 if (old_peer) 1482 dp_peer_unref_delete(old_peer, DP_MOD_ID_AST); 1483 } 1484 1485 ast_entry = (struct dp_ast_entry *) 1486 qdf_mem_malloc(sizeof(struct dp_ast_entry)); 1487 if (!ast_entry) { 1488 dp_peer_err("%pK: fail to allocate ast_entry", soc); 1489 qdf_spin_unlock_bh(&soc->ast_lock); 1490 QDF_ASSERT(0); 1491 status = QDF_STATUS_E_NOMEM; 1492 goto fail; 1493 } 1494 1495 qdf_mem_copy(&ast_entry->mac_addr.raw[0], mac_addr, QDF_MAC_ADDR_SIZE); 1496 ast_entry->pdev_id = vdev->pdev->pdev_id; 1497 ast_entry->is_mapped = false; 1498 ast_entry->delete_in_progress = false; 1499 ast_entry->next_hop = 0; 1500 ast_entry->vdev_id = vdev->vdev_id; 1501 ast_entry->type = type; 1502 1503 switch (type) { 1504 case CDP_TXRX_AST_TYPE_STATIC: 1505 if (peer->vdev->opmode == wlan_op_mode_sta) 1506 ast_entry->type = CDP_TXRX_AST_TYPE_STA_BSS; 1507 break; 1508 case CDP_TXRX_AST_TYPE_WDS: 1509 ast_entry->next_hop = 1; 1510 break; 1511 case CDP_TXRX_AST_TYPE_MLD: 1512 break; 1513 default: 1514 dp_peer_alert("%pK: Incorrect AST entry type", soc); 1515 } 1516 1517 ast_entry->is_active = TRUE; 1518 DP_STATS_INC(soc, ast.added, 1); 1519 soc->num_ast_entries++; 1520 dp_peer_ast_hash_add(soc, ast_entry); 1521 1522 ast_entry->ast_idx = hw_peer_id; 1523 ast_entry->ast_hash_value = ast_hash; 1524 ast_entry->peer_id = peer_id; 1525 TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, 1526 ase_list_elem); 1527 1528 qdf_spin_unlock_bh(&soc->ast_lock); 1529 fail: 1530 dp_peer_unref_delete(peer, DP_MOD_ID_HTT); 1531 1532 return status; 1533 } 1534 1535 /* 1536 * dp_peer_map_ast() - Map the ast entry with HW AST Index 1537 * @soc: SoC handle 1538 * @peer: peer to which ast node belongs 1539 * @mac_addr: MAC address of ast node 1540 * @hw_peer_id: HW AST Index returned by target in peer map event 1541 * @vdev_id: vdev id for VAP to which the peer belongs to 1542 * @ast_hash: ast hash value in HW 1543 * @is_wds: flag to indicate peer map event for WDS ast entry 1544 * 1545 * Return: QDF_STATUS code 1546 */ 1547 static inline QDF_STATUS dp_peer_map_ast(struct dp_soc *soc, 1548 struct dp_peer *peer, 1549 uint8_t *mac_addr, 1550 uint16_t hw_peer_id, 1551 uint8_t vdev_id, 1552 uint16_t ast_hash, 1553 uint8_t is_wds) 1554 { 1555 struct dp_ast_entry *ast_entry = NULL; 1556 enum cdp_txrx_ast_entry_type peer_type = CDP_TXRX_AST_TYPE_STATIC; 1557 void *cookie = NULL; 1558 txrx_ast_free_cb cb = NULL; 1559 QDF_STATUS err = QDF_STATUS_SUCCESS; 1560 1561 if (soc->ast_offload_support) 1562 return QDF_STATUS_SUCCESS; 1563 1564 if (!peer) { 1565 return QDF_STATUS_E_INVAL; 1566 } 1567 1568 dp_peer_err("%pK: peer %pK ID %d vid %d mac " QDF_MAC_ADDR_FMT, 1569 soc, peer, hw_peer_id, vdev_id, 1570 QDF_MAC_ADDR_REF(mac_addr)); 1571 1572 qdf_spin_lock_bh(&soc->ast_lock); 1573 1574 ast_entry = dp_peer_ast_hash_find_by_vdevid(soc, mac_addr, vdev_id); 1575 1576 if (is_wds) { 1577 /* 1578 * In certain cases like Auth attack on a repeater 1579 * can result in the number of ast_entries falling 1580 * in the same hash bucket to exceed the max_skid 1581 * length supported by HW in root AP. In these cases 1582 * the FW will return the hw_peer_id (ast_index) as 1583 * 0xffff indicating HW could not add the entry in 1584 * its table. Host has to delete the entry from its 1585 * table in these cases. 1586 */ 1587 if (hw_peer_id == HTT_INVALID_PEER) { 1588 DP_STATS_INC(soc, ast.map_err, 1); 1589 if (ast_entry) { 1590 if (ast_entry->is_mapped) { 1591 soc->ast_table[ast_entry->ast_idx] = 1592 NULL; 1593 } 1594 1595 cb = ast_entry->callback; 1596 cookie = ast_entry->cookie; 1597 peer_type = ast_entry->type; 1598 1599 dp_peer_unlink_ast_entry(soc, ast_entry, peer); 1600 dp_peer_free_ast_entry(soc, ast_entry); 1601 1602 qdf_spin_unlock_bh(&soc->ast_lock); 1603 1604 if (cb) { 1605 cb(soc->ctrl_psoc, 1606 dp_soc_to_cdp_soc(soc), 1607 cookie, 1608 CDP_TXRX_AST_DELETED); 1609 } 1610 } else { 1611 qdf_spin_unlock_bh(&soc->ast_lock); 1612 dp_peer_alert("AST entry not found with peer %pK peer_id %u peer_mac " QDF_MAC_ADDR_FMT " mac_addr " QDF_MAC_ADDR_FMT " vdev_id %u next_hop %u", 1613 peer, peer->peer_id, 1614 QDF_MAC_ADDR_REF(peer->mac_addr.raw), 1615 QDF_MAC_ADDR_REF(mac_addr), 1616 vdev_id, is_wds); 1617 } 1618 err = QDF_STATUS_E_INVAL; 1619 1620 dp_hmwds_ast_add_notify(peer, mac_addr, 1621 peer_type, err, true); 1622 1623 return err; 1624 } 1625 } 1626 1627 if (ast_entry) { 1628 ast_entry->ast_idx = hw_peer_id; 1629 soc->ast_table[hw_peer_id] = ast_entry; 1630 ast_entry->is_active = TRUE; 1631 peer_type = ast_entry->type; 1632 ast_entry->ast_hash_value = ast_hash; 1633 ast_entry->is_mapped = TRUE; 1634 qdf_assert_always(ast_entry->peer_id == HTT_INVALID_PEER); 1635 1636 ast_entry->peer_id = peer->peer_id; 1637 TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, 1638 ase_list_elem); 1639 } 1640 1641 if (ast_entry || (peer->vdev && peer->vdev->proxysta_vdev)) { 1642 if (soc->cdp_soc.ol_ops->peer_map_event) { 1643 soc->cdp_soc.ol_ops->peer_map_event( 1644 soc->ctrl_psoc, peer->peer_id, 1645 hw_peer_id, vdev_id, 1646 mac_addr, peer_type, ast_hash); 1647 } 1648 } else { 1649 dp_peer_err("%pK: AST entry not found", soc); 1650 err = QDF_STATUS_E_NOENT; 1651 } 1652 1653 qdf_spin_unlock_bh(&soc->ast_lock); 1654 1655 dp_hmwds_ast_add_notify(peer, mac_addr, 1656 peer_type, err, true); 1657 1658 return err; 1659 } 1660 1661 void dp_peer_free_hmwds_cb(struct cdp_ctrl_objmgr_psoc *ctrl_psoc, 1662 struct cdp_soc *dp_soc, 1663 void *cookie, 1664 enum cdp_ast_free_status status) 1665 { 1666 struct dp_ast_free_cb_params *param = 1667 (struct dp_ast_free_cb_params *)cookie; 1668 struct dp_soc *soc = (struct dp_soc *)dp_soc; 1669 struct dp_peer *peer = NULL; 1670 QDF_STATUS err = QDF_STATUS_SUCCESS; 1671 1672 if (status != CDP_TXRX_AST_DELETED) { 1673 qdf_mem_free(cookie); 1674 return; 1675 } 1676 1677 peer = dp_peer_find_hash_find(soc, ¶m->peer_mac_addr.raw[0], 1678 0, param->vdev_id, DP_MOD_ID_AST); 1679 if (peer) { 1680 err = dp_peer_add_ast(soc, peer, 1681 ¶m->mac_addr.raw[0], 1682 param->type, 1683 param->flags); 1684 1685 dp_hmwds_ast_add_notify(peer, ¶m->mac_addr.raw[0], 1686 param->type, err, false); 1687 1688 dp_peer_unref_delete(peer, DP_MOD_ID_AST); 1689 } 1690 qdf_mem_free(cookie); 1691 } 1692 1693 /* 1694 * dp_peer_add_ast() - Allocate and add AST entry into peer list 1695 * @soc: SoC handle 1696 * @peer: peer to which ast node belongs 1697 * @mac_addr: MAC address of ast node 1698 * @is_self: Is this base AST entry with peer mac address 1699 * 1700 * This API is used by WDS source port learning function to 1701 * add a new AST entry into peer AST list 1702 * 1703 * Return: QDF_STATUS code 1704 */ 1705 QDF_STATUS dp_peer_add_ast(struct dp_soc *soc, 1706 struct dp_peer *peer, 1707 uint8_t *mac_addr, 1708 enum cdp_txrx_ast_entry_type type, 1709 uint32_t flags) 1710 { 1711 struct dp_ast_entry *ast_entry = NULL; 1712 struct dp_vdev *vdev = NULL; 1713 struct dp_pdev *pdev = NULL; 1714 txrx_ast_free_cb cb = NULL; 1715 void *cookie = NULL; 1716 struct dp_peer *vap_bss_peer = NULL; 1717 bool is_peer_found = false; 1718 int status = 0; 1719 1720 if (soc->ast_offload_support) 1721 return QDF_STATUS_E_INVAL; 1722 1723 vdev = peer->vdev; 1724 if (!vdev) { 1725 dp_peer_err("%pK: Peers vdev is NULL", soc); 1726 QDF_ASSERT(0); 1727 return QDF_STATUS_E_INVAL; 1728 } 1729 1730 pdev = vdev->pdev; 1731 1732 is_peer_found = dp_peer_exist_on_pdev(soc, mac_addr, 0, pdev); 1733 1734 qdf_spin_lock_bh(&soc->ast_lock); 1735 1736 if (!dp_peer_state_cmp(peer, DP_PEER_STATE_ACTIVE)) { 1737 if ((type != CDP_TXRX_AST_TYPE_STATIC) && 1738 (type != CDP_TXRX_AST_TYPE_SELF)) { 1739 qdf_spin_unlock_bh(&soc->ast_lock); 1740 return QDF_STATUS_E_BUSY; 1741 } 1742 } 1743 1744 dp_peer_debug("%pK: pdevid: %u vdev: %u ast_entry->type: %d flags: 0x%x peer_mac: " QDF_MAC_ADDR_FMT " peer: %pK mac " QDF_MAC_ADDR_FMT, 1745 soc, pdev->pdev_id, vdev->vdev_id, type, flags, 1746 QDF_MAC_ADDR_REF(peer->mac_addr.raw), peer, 1747 QDF_MAC_ADDR_REF(mac_addr)); 1748 1749 /* fw supports only 2 times the max_peers ast entries */ 1750 if (soc->num_ast_entries >= 1751 wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx)) { 1752 qdf_spin_unlock_bh(&soc->ast_lock); 1753 dp_peer_err("%pK: Max ast entries reached", soc); 1754 return QDF_STATUS_E_RESOURCES; 1755 } 1756 1757 /* If AST entry already exists , just return from here 1758 * ast entry with same mac address can exist on different radios 1759 * if ast_override support is enabled use search by pdev in this 1760 * case 1761 */ 1762 if (soc->ast_override_support) { 1763 ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, mac_addr, 1764 pdev->pdev_id); 1765 if (ast_entry) { 1766 qdf_spin_unlock_bh(&soc->ast_lock); 1767 return QDF_STATUS_E_ALREADY; 1768 } 1769 1770 if (is_peer_found) { 1771 /* During WDS to static roaming, peer is added 1772 * to the list before static AST entry create. 1773 * So, allow AST entry for STATIC type 1774 * even if peer is present 1775 */ 1776 if (type != CDP_TXRX_AST_TYPE_STATIC) { 1777 qdf_spin_unlock_bh(&soc->ast_lock); 1778 return QDF_STATUS_E_ALREADY; 1779 } 1780 } 1781 } else { 1782 /* For HWMWDS_SEC entries can be added for same mac address 1783 * do not check for existing entry 1784 */ 1785 if (type == CDP_TXRX_AST_TYPE_WDS_HM_SEC) 1786 goto add_ast_entry; 1787 1788 ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr); 1789 1790 if (ast_entry) { 1791 if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) && 1792 !ast_entry->delete_in_progress) { 1793 qdf_spin_unlock_bh(&soc->ast_lock); 1794 return QDF_STATUS_E_ALREADY; 1795 } 1796 1797 /* Add for HMWDS entry we cannot be ignored if there 1798 * is AST entry with same mac address 1799 * 1800 * if ast entry exists with the requested mac address 1801 * send a delete command and register callback which 1802 * can take care of adding HMWDS ast enty on delete 1803 * confirmation from target 1804 */ 1805 if (type == CDP_TXRX_AST_TYPE_WDS_HM) { 1806 struct dp_ast_free_cb_params *param = NULL; 1807 1808 if (ast_entry->type == 1809 CDP_TXRX_AST_TYPE_WDS_HM_SEC) 1810 goto add_ast_entry; 1811 1812 /* save existing callback */ 1813 if (ast_entry->callback) { 1814 cb = ast_entry->callback; 1815 cookie = ast_entry->cookie; 1816 } 1817 1818 param = qdf_mem_malloc(sizeof(*param)); 1819 if (!param) { 1820 QDF_TRACE(QDF_MODULE_ID_TXRX, 1821 QDF_TRACE_LEVEL_ERROR, 1822 "Allocation failed"); 1823 qdf_spin_unlock_bh(&soc->ast_lock); 1824 return QDF_STATUS_E_NOMEM; 1825 } 1826 1827 qdf_mem_copy(¶m->mac_addr.raw[0], mac_addr, 1828 QDF_MAC_ADDR_SIZE); 1829 qdf_mem_copy(¶m->peer_mac_addr.raw[0], 1830 &peer->mac_addr.raw[0], 1831 QDF_MAC_ADDR_SIZE); 1832 param->type = type; 1833 param->flags = flags; 1834 param->vdev_id = vdev->vdev_id; 1835 ast_entry->callback = dp_peer_free_hmwds_cb; 1836 ast_entry->pdev_id = vdev->pdev->pdev_id; 1837 ast_entry->type = type; 1838 ast_entry->cookie = (void *)param; 1839 if (!ast_entry->delete_in_progress) 1840 dp_peer_del_ast(soc, ast_entry); 1841 1842 qdf_spin_unlock_bh(&soc->ast_lock); 1843 1844 /* Call the saved callback*/ 1845 if (cb) { 1846 cb(soc->ctrl_psoc, 1847 dp_soc_to_cdp_soc(soc), 1848 cookie, 1849 CDP_TXRX_AST_DELETE_IN_PROGRESS); 1850 } 1851 return QDF_STATUS_E_AGAIN; 1852 } 1853 1854 qdf_spin_unlock_bh(&soc->ast_lock); 1855 return QDF_STATUS_E_ALREADY; 1856 } 1857 } 1858 1859 add_ast_entry: 1860 ast_entry = (struct dp_ast_entry *) 1861 qdf_mem_malloc(sizeof(struct dp_ast_entry)); 1862 1863 if (!ast_entry) { 1864 qdf_spin_unlock_bh(&soc->ast_lock); 1865 dp_peer_err("%pK: fail to allocate ast_entry", soc); 1866 QDF_ASSERT(0); 1867 return QDF_STATUS_E_NOMEM; 1868 } 1869 1870 qdf_mem_copy(&ast_entry->mac_addr.raw[0], mac_addr, QDF_MAC_ADDR_SIZE); 1871 ast_entry->pdev_id = vdev->pdev->pdev_id; 1872 ast_entry->is_mapped = false; 1873 ast_entry->delete_in_progress = false; 1874 ast_entry->peer_id = HTT_INVALID_PEER; 1875 ast_entry->next_hop = 0; 1876 ast_entry->vdev_id = vdev->vdev_id; 1877 1878 switch (type) { 1879 case CDP_TXRX_AST_TYPE_STATIC: 1880 peer->self_ast_entry = ast_entry; 1881 ast_entry->type = CDP_TXRX_AST_TYPE_STATIC; 1882 if (peer->vdev->opmode == wlan_op_mode_sta) 1883 ast_entry->type = CDP_TXRX_AST_TYPE_STA_BSS; 1884 break; 1885 case CDP_TXRX_AST_TYPE_SELF: 1886 peer->self_ast_entry = ast_entry; 1887 ast_entry->type = CDP_TXRX_AST_TYPE_SELF; 1888 break; 1889 case CDP_TXRX_AST_TYPE_WDS: 1890 ast_entry->next_hop = 1; 1891 ast_entry->type = CDP_TXRX_AST_TYPE_WDS; 1892 break; 1893 case CDP_TXRX_AST_TYPE_WDS_HM: 1894 ast_entry->next_hop = 1; 1895 ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM; 1896 break; 1897 case CDP_TXRX_AST_TYPE_WDS_HM_SEC: 1898 ast_entry->next_hop = 1; 1899 ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM_SEC; 1900 ast_entry->peer_id = peer->peer_id; 1901 TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, 1902 ase_list_elem); 1903 break; 1904 case CDP_TXRX_AST_TYPE_DA: 1905 vap_bss_peer = dp_vdev_bss_peer_ref_n_get(soc, vdev, 1906 DP_MOD_ID_AST); 1907 if (!vap_bss_peer) { 1908 qdf_spin_unlock_bh(&soc->ast_lock); 1909 qdf_mem_free(ast_entry); 1910 return QDF_STATUS_E_FAILURE; 1911 } 1912 peer = vap_bss_peer; 1913 ast_entry->next_hop = 1; 1914 ast_entry->type = CDP_TXRX_AST_TYPE_DA; 1915 break; 1916 default: 1917 dp_peer_err("%pK: Incorrect AST entry type", soc); 1918 } 1919 1920 ast_entry->is_active = TRUE; 1921 DP_STATS_INC(soc, ast.added, 1); 1922 soc->num_ast_entries++; 1923 dp_peer_ast_hash_add(soc, ast_entry); 1924 1925 if ((ast_entry->type != CDP_TXRX_AST_TYPE_STATIC) && 1926 (ast_entry->type != CDP_TXRX_AST_TYPE_SELF) && 1927 (ast_entry->type != CDP_TXRX_AST_TYPE_STA_BSS) && 1928 (ast_entry->type != CDP_TXRX_AST_TYPE_WDS_HM_SEC)) 1929 status = dp_add_wds_entry_wrapper(soc, 1930 peer, 1931 mac_addr, 1932 flags, 1933 ast_entry->type); 1934 1935 if (vap_bss_peer) 1936 dp_peer_unref_delete(vap_bss_peer, DP_MOD_ID_AST); 1937 1938 qdf_spin_unlock_bh(&soc->ast_lock); 1939 return qdf_status_from_os_return(status); 1940 } 1941 1942 qdf_export_symbol(dp_peer_add_ast); 1943 1944 /* 1945 * dp_peer_free_ast_entry() - Free up the ast entry memory 1946 * @soc: SoC handle 1947 * @ast_entry: Address search entry 1948 * 1949 * This API is used to free up the memory associated with 1950 * AST entry. 1951 * 1952 * Return: None 1953 */ 1954 void dp_peer_free_ast_entry(struct dp_soc *soc, 1955 struct dp_ast_entry *ast_entry) 1956 { 1957 /* 1958 * NOTE: Ensure that call to this API is done 1959 * after soc->ast_lock is taken 1960 */ 1961 dp_peer_debug("type: %d ID: %u vid: %u mac_addr: " QDF_MAC_ADDR_FMT, 1962 ast_entry->type, ast_entry->peer_id, ast_entry->vdev_id, 1963 QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw)); 1964 1965 ast_entry->callback = NULL; 1966 ast_entry->cookie = NULL; 1967 1968 DP_STATS_INC(soc, ast.deleted, 1); 1969 dp_peer_ast_hash_remove(soc, ast_entry); 1970 dp_peer_ast_cleanup(soc, ast_entry); 1971 qdf_mem_free(ast_entry); 1972 soc->num_ast_entries--; 1973 } 1974 1975 /* 1976 * dp_peer_unlink_ast_entry() - Free up the ast entry memory 1977 * @soc: SoC handle 1978 * @ast_entry: Address search entry 1979 * @peer: peer 1980 * 1981 * This API is used to remove/unlink AST entry from the peer list 1982 * and hash list. 1983 * 1984 * Return: None 1985 */ 1986 void dp_peer_unlink_ast_entry(struct dp_soc *soc, 1987 struct dp_ast_entry *ast_entry, 1988 struct dp_peer *peer) 1989 { 1990 if (!peer) { 1991 dp_info_rl("NULL peer"); 1992 return; 1993 } 1994 1995 if (ast_entry->peer_id == HTT_INVALID_PEER) { 1996 dp_info_rl("Invalid peer id in AST entry mac addr:"QDF_MAC_ADDR_FMT" type:%d", 1997 QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw), 1998 ast_entry->type); 1999 return; 2000 } 2001 /* 2002 * NOTE: Ensure that call to this API is done 2003 * after soc->ast_lock is taken 2004 */ 2005 2006 qdf_assert_always(ast_entry->peer_id == peer->peer_id); 2007 TAILQ_REMOVE(&peer->ast_entry_list, ast_entry, ase_list_elem); 2008 2009 if (ast_entry == peer->self_ast_entry) 2010 peer->self_ast_entry = NULL; 2011 2012 /* 2013 * release the reference only if it is mapped 2014 * to ast_table 2015 */ 2016 if (ast_entry->is_mapped) 2017 soc->ast_table[ast_entry->ast_idx] = NULL; 2018 2019 ast_entry->peer_id = HTT_INVALID_PEER; 2020 } 2021 2022 /* 2023 * dp_peer_del_ast() - Delete and free AST entry 2024 * @soc: SoC handle 2025 * @ast_entry: AST entry of the node 2026 * 2027 * This function removes the AST entry from peer and soc tables 2028 * It assumes caller has taken the ast lock to protect the access to these 2029 * tables 2030 * 2031 * Return: None 2032 */ 2033 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry) 2034 { 2035 struct dp_peer *peer = NULL; 2036 2037 if (soc->ast_offload_support) 2038 return; 2039 2040 if (!ast_entry) { 2041 dp_info_rl("NULL AST entry"); 2042 return; 2043 } 2044 2045 if (ast_entry->delete_in_progress) { 2046 dp_info_rl("AST entry deletion in progress mac addr:"QDF_MAC_ADDR_FMT" type:%d", 2047 QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw), 2048 ast_entry->type); 2049 return; 2050 } 2051 2052 dp_peer_debug("call by %ps: ID: %u vid: %u mac_addr: " QDF_MAC_ADDR_FMT, 2053 (void *)_RET_IP_, ast_entry->peer_id, ast_entry->vdev_id, 2054 QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw)); 2055 2056 ast_entry->delete_in_progress = true; 2057 2058 /* In teardown del ast is called after setting logical delete state 2059 * use __dp_peer_get_ref_by_id to get the reference irrespective of 2060 * state 2061 */ 2062 peer = __dp_peer_get_ref_by_id(soc, ast_entry->peer_id, 2063 DP_MOD_ID_AST); 2064 2065 dp_peer_ast_send_wds_del(soc, ast_entry, peer); 2066 2067 /* Remove SELF and STATIC entries in teardown itself */ 2068 if (!ast_entry->next_hop) 2069 dp_peer_unlink_ast_entry(soc, ast_entry, peer); 2070 2071 if (ast_entry->is_mapped) 2072 soc->ast_table[ast_entry->ast_idx] = NULL; 2073 2074 /* if peer map v2 is enabled we are not freeing ast entry 2075 * here and it is supposed to be freed in unmap event (after 2076 * we receive delete confirmation from target) 2077 * 2078 * if peer_id is invalid we did not get the peer map event 2079 * for the peer free ast entry from here only in this case 2080 */ 2081 if (dp_peer_ast_free_in_unmap_supported(soc, ast_entry)) 2082 goto end; 2083 2084 /* for WDS secondary entry ast_entry->next_hop would be set so 2085 * unlinking has to be done explicitly here. 2086 * As this entry is not a mapped entry unmap notification from 2087 * FW wil not come. Hence unlinkling is done right here. 2088 */ 2089 2090 if (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC) 2091 dp_peer_unlink_ast_entry(soc, ast_entry, peer); 2092 2093 dp_peer_free_ast_entry(soc, ast_entry); 2094 2095 end: 2096 if (peer) 2097 dp_peer_unref_delete(peer, DP_MOD_ID_AST); 2098 } 2099 2100 /* 2101 * dp_peer_update_ast() - Delete and free AST entry 2102 * @soc: SoC handle 2103 * @peer: peer to which ast node belongs 2104 * @ast_entry: AST entry of the node 2105 * @flags: wds or hmwds 2106 * 2107 * This function update the AST entry to the roamed peer and soc tables 2108 * It assumes caller has taken the ast lock to protect the access to these 2109 * tables 2110 * 2111 * Return: 0 if ast entry is updated successfully 2112 * -1 failure 2113 */ 2114 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer, 2115 struct dp_ast_entry *ast_entry, uint32_t flags) 2116 { 2117 int ret = -1; 2118 struct dp_peer *old_peer; 2119 2120 if (soc->ast_offload_support) 2121 return QDF_STATUS_E_INVAL; 2122 2123 dp_peer_debug("%pK: ast_entry->type: %d pdevid: %u vdevid: %u flags: 0x%x mac_addr: " QDF_MAC_ADDR_FMT " peer_mac: " QDF_MAC_ADDR_FMT "\n", 2124 soc, ast_entry->type, peer->vdev->pdev->pdev_id, 2125 peer->vdev->vdev_id, flags, 2126 QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw), 2127 QDF_MAC_ADDR_REF(peer->mac_addr.raw)); 2128 2129 /* Do not send AST update in below cases 2130 * 1) Ast entry delete has already triggered 2131 * 2) Peer delete is already triggered 2132 * 3) We did not get the HTT map for create event 2133 */ 2134 if (ast_entry->delete_in_progress || 2135 !dp_peer_state_cmp(peer, DP_PEER_STATE_ACTIVE) || 2136 !ast_entry->is_mapped) 2137 return ret; 2138 2139 if ((ast_entry->type == CDP_TXRX_AST_TYPE_STATIC) || 2140 (ast_entry->type == CDP_TXRX_AST_TYPE_SELF) || 2141 (ast_entry->type == CDP_TXRX_AST_TYPE_STA_BSS) || 2142 (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC)) 2143 return 0; 2144 2145 /* 2146 * Avoids flood of WMI update messages sent to FW for same peer. 2147 */ 2148 if (qdf_unlikely(ast_entry->peer_id == peer->peer_id) && 2149 (ast_entry->type == CDP_TXRX_AST_TYPE_WDS) && 2150 (ast_entry->vdev_id == peer->vdev->vdev_id) && 2151 (ast_entry->is_active)) 2152 return 0; 2153 2154 old_peer = dp_peer_get_ref_by_id(soc, ast_entry->peer_id, 2155 DP_MOD_ID_AST); 2156 if (!old_peer) 2157 return 0; 2158 2159 TAILQ_REMOVE(&old_peer->ast_entry_list, ast_entry, ase_list_elem); 2160 2161 dp_peer_unref_delete(old_peer, DP_MOD_ID_AST); 2162 2163 ast_entry->peer_id = peer->peer_id; 2164 ast_entry->type = CDP_TXRX_AST_TYPE_WDS; 2165 ast_entry->pdev_id = peer->vdev->pdev->pdev_id; 2166 ast_entry->vdev_id = peer->vdev->vdev_id; 2167 ast_entry->is_active = TRUE; 2168 TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem); 2169 2170 ret = dp_update_wds_entry_wrapper(soc, 2171 peer, 2172 ast_entry->mac_addr.raw, 2173 flags); 2174 2175 return ret; 2176 } 2177 2178 /* 2179 * dp_peer_ast_get_pdev_id() - get pdev_id from the ast entry 2180 * @soc: SoC handle 2181 * @ast_entry: AST entry of the node 2182 * 2183 * This function gets the pdev_id from the ast entry. 2184 * 2185 * Return: (uint8_t) pdev_id 2186 */ 2187 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc, 2188 struct dp_ast_entry *ast_entry) 2189 { 2190 return ast_entry->pdev_id; 2191 } 2192 2193 /* 2194 * dp_peer_ast_get_next_hop() - get next_hop from the ast entry 2195 * @soc: SoC handle 2196 * @ast_entry: AST entry of the node 2197 * 2198 * This function gets the next hop from the ast entry. 2199 * 2200 * Return: (uint8_t) next_hop 2201 */ 2202 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc, 2203 struct dp_ast_entry *ast_entry) 2204 { 2205 return ast_entry->next_hop; 2206 } 2207 2208 /* 2209 * dp_peer_ast_set_type() - set type from the ast entry 2210 * @soc: SoC handle 2211 * @ast_entry: AST entry of the node 2212 * 2213 * This function sets the type in the ast entry. 2214 * 2215 * Return: 2216 */ 2217 void dp_peer_ast_set_type(struct dp_soc *soc, 2218 struct dp_ast_entry *ast_entry, 2219 enum cdp_txrx_ast_entry_type type) 2220 { 2221 ast_entry->type = type; 2222 } 2223 2224 void dp_peer_ast_send_wds_del(struct dp_soc *soc, 2225 struct dp_ast_entry *ast_entry, 2226 struct dp_peer *peer) 2227 { 2228 bool delete_in_fw = false; 2229 2230 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_TRACE, 2231 "%s: ast_entry->type: %d pdevid: %u vdev: %u mac_addr: "QDF_MAC_ADDR_FMT" next_hop: %u peer_id: %uM\n", 2232 __func__, ast_entry->type, ast_entry->pdev_id, 2233 ast_entry->vdev_id, 2234 QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw), 2235 ast_entry->next_hop, ast_entry->peer_id); 2236 2237 /* 2238 * If peer state is logical delete, the peer is about to get 2239 * teared down with a peer delete command to firmware, 2240 * which will cleanup all the wds ast entries. 2241 * So, no need to send explicit wds ast delete to firmware. 2242 */ 2243 if (ast_entry->next_hop) { 2244 if (peer && dp_peer_state_cmp(peer, 2245 DP_PEER_STATE_LOGICAL_DELETE)) 2246 delete_in_fw = false; 2247 else 2248 delete_in_fw = true; 2249 2250 dp_del_wds_entry_wrapper(soc, 2251 ast_entry->vdev_id, 2252 ast_entry->mac_addr.raw, 2253 ast_entry->type, 2254 delete_in_fw); 2255 } 2256 } 2257 #else 2258 void dp_peer_free_ast_entry(struct dp_soc *soc, 2259 struct dp_ast_entry *ast_entry) 2260 { 2261 } 2262 2263 void dp_peer_unlink_ast_entry(struct dp_soc *soc, 2264 struct dp_ast_entry *ast_entry, 2265 struct dp_peer *peer) 2266 { 2267 } 2268 2269 void dp_peer_ast_hash_remove(struct dp_soc *soc, 2270 struct dp_ast_entry *ase) 2271 { 2272 } 2273 2274 struct dp_ast_entry *dp_peer_ast_hash_find_by_vdevid(struct dp_soc *soc, 2275 uint8_t *ast_mac_addr, 2276 uint8_t vdev_id) 2277 { 2278 return NULL; 2279 } 2280 2281 QDF_STATUS dp_peer_add_ast(struct dp_soc *soc, 2282 struct dp_peer *peer, 2283 uint8_t *mac_addr, 2284 enum cdp_txrx_ast_entry_type type, 2285 uint32_t flags) 2286 { 2287 return QDF_STATUS_E_FAILURE; 2288 } 2289 2290 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry) 2291 { 2292 } 2293 2294 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer, 2295 struct dp_ast_entry *ast_entry, uint32_t flags) 2296 { 2297 return 1; 2298 } 2299 2300 struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc, 2301 uint8_t *ast_mac_addr) 2302 { 2303 return NULL; 2304 } 2305 2306 static inline 2307 QDF_STATUS dp_peer_host_add_map_ast(struct dp_soc *soc, uint16_t peer_id, 2308 uint8_t *mac_addr, uint16_t hw_peer_id, 2309 uint8_t vdev_id, uint16_t ast_hash, 2310 uint8_t is_wds) 2311 { 2312 return QDF_STATUS_SUCCESS; 2313 } 2314 2315 struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc, 2316 uint8_t *ast_mac_addr, 2317 uint8_t pdev_id) 2318 { 2319 return NULL; 2320 } 2321 2322 QDF_STATUS dp_peer_ast_hash_attach(struct dp_soc *soc) 2323 { 2324 return QDF_STATUS_SUCCESS; 2325 } 2326 2327 static inline QDF_STATUS dp_peer_map_ast(struct dp_soc *soc, 2328 struct dp_peer *peer, 2329 uint8_t *mac_addr, 2330 uint16_t hw_peer_id, 2331 uint8_t vdev_id, 2332 uint16_t ast_hash, 2333 uint8_t is_wds) 2334 { 2335 return QDF_STATUS_SUCCESS; 2336 } 2337 2338 void dp_peer_ast_hash_detach(struct dp_soc *soc) 2339 { 2340 } 2341 2342 void dp_peer_ast_set_type(struct dp_soc *soc, 2343 struct dp_ast_entry *ast_entry, 2344 enum cdp_txrx_ast_entry_type type) 2345 { 2346 } 2347 2348 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc, 2349 struct dp_ast_entry *ast_entry) 2350 { 2351 return 0xff; 2352 } 2353 2354 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc, 2355 struct dp_ast_entry *ast_entry) 2356 { 2357 return 0xff; 2358 } 2359 2360 void dp_peer_ast_send_wds_del(struct dp_soc *soc, 2361 struct dp_ast_entry *ast_entry, 2362 struct dp_peer *peer) 2363 { 2364 } 2365 #endif 2366 2367 #ifdef WLAN_FEATURE_MULTI_AST_DEL 2368 void dp_peer_ast_send_multi_wds_del( 2369 struct dp_soc *soc, uint8_t vdev_id, 2370 struct peer_del_multi_wds_entries *wds_list) 2371 { 2372 struct cdp_soc_t *cdp_soc = &soc->cdp_soc; 2373 2374 if (cdp_soc && cdp_soc->ol_ops && 2375 cdp_soc->ol_ops->peer_del_multi_wds_entry) 2376 cdp_soc->ol_ops->peer_del_multi_wds_entry(soc->ctrl_psoc, 2377 vdev_id, wds_list); 2378 } 2379 #endif 2380 2381 #ifdef FEATURE_WDS 2382 /** 2383 * dp_peer_ast_free_wds_entries() - Free wds ast entries associated with peer 2384 * @soc: soc handle 2385 * @peer: peer handle 2386 * 2387 * Free all the wds ast entries associated with peer 2388 * 2389 * Return: Number of wds ast entries freed 2390 */ 2391 static uint32_t dp_peer_ast_free_wds_entries(struct dp_soc *soc, 2392 struct dp_peer *peer) 2393 { 2394 TAILQ_HEAD(, dp_ast_entry) ast_local_list = {0}; 2395 struct dp_ast_entry *ast_entry, *temp_ast_entry; 2396 uint32_t num_ast = 0; 2397 2398 TAILQ_INIT(&ast_local_list); 2399 qdf_spin_lock_bh(&soc->ast_lock); 2400 2401 DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry) { 2402 if (ast_entry->next_hop) 2403 num_ast++; 2404 2405 if (ast_entry->is_mapped) 2406 soc->ast_table[ast_entry->ast_idx] = NULL; 2407 2408 dp_peer_unlink_ast_entry(soc, ast_entry, peer); 2409 DP_STATS_INC(soc, ast.deleted, 1); 2410 dp_peer_ast_hash_remove(soc, ast_entry); 2411 TAILQ_INSERT_TAIL(&ast_local_list, ast_entry, 2412 ase_list_elem); 2413 soc->num_ast_entries--; 2414 } 2415 2416 qdf_spin_unlock_bh(&soc->ast_lock); 2417 2418 TAILQ_FOREACH_SAFE(ast_entry, &ast_local_list, ase_list_elem, 2419 temp_ast_entry) { 2420 if (ast_entry->callback) 2421 ast_entry->callback(soc->ctrl_psoc, 2422 dp_soc_to_cdp_soc(soc), 2423 ast_entry->cookie, 2424 CDP_TXRX_AST_DELETED); 2425 2426 qdf_mem_free(ast_entry); 2427 } 2428 2429 return num_ast; 2430 } 2431 /** 2432 * dp_peer_clean_wds_entries() - Clean wds ast entries and compare 2433 * @soc: soc handle 2434 * @peer: peer handle 2435 * @free_wds_count - number of wds entries freed by FW with peer delete 2436 * 2437 * Free all the wds ast entries associated with peer and compare with 2438 * the value received from firmware 2439 * 2440 * Return: Number of wds ast entries freed 2441 */ 2442 static void 2443 dp_peer_clean_wds_entries(struct dp_soc *soc, struct dp_peer *peer, 2444 uint32_t free_wds_count) 2445 { 2446 uint32_t wds_deleted = 0; 2447 2448 if (soc->ast_offload_support && !soc->host_ast_db_enable) 2449 return; 2450 2451 wds_deleted = dp_peer_ast_free_wds_entries(soc, peer); 2452 if ((DP_PEER_WDS_COUNT_INVALID != free_wds_count) && 2453 (free_wds_count != wds_deleted)) { 2454 DP_STATS_INC(soc, ast.ast_mismatch, 1); 2455 dp_alert("For peer %pK (mac: "QDF_MAC_ADDR_FMT")number of wds entries deleted by fw = %d during peer delete is not same as the numbers deleted by host = %d", 2456 peer, peer->mac_addr.raw, free_wds_count, 2457 wds_deleted); 2458 } 2459 } 2460 2461 #else 2462 static void 2463 dp_peer_clean_wds_entries(struct dp_soc *soc, struct dp_peer *peer, 2464 uint32_t free_wds_count) 2465 { 2466 struct dp_ast_entry *ast_entry, *temp_ast_entry; 2467 2468 qdf_spin_lock_bh(&soc->ast_lock); 2469 2470 DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry) { 2471 dp_peer_unlink_ast_entry(soc, ast_entry, peer); 2472 2473 if (ast_entry->is_mapped) 2474 soc->ast_table[ast_entry->ast_idx] = NULL; 2475 2476 dp_peer_free_ast_entry(soc, ast_entry); 2477 } 2478 2479 peer->self_ast_entry = NULL; 2480 qdf_spin_unlock_bh(&soc->ast_lock); 2481 } 2482 #endif 2483 2484 /** 2485 * dp_peer_ast_free_entry_by_mac() - find ast entry by MAC address and delete 2486 * @soc: soc handle 2487 * @peer: peer handle 2488 * @vdev_id: vdev_id 2489 * @mac_addr: mac address of the AST entry to searc and delete 2490 * 2491 * find the ast entry from the peer list using the mac address and free 2492 * the entry. 2493 * 2494 * Return: SUCCESS or NOENT 2495 */ 2496 static int dp_peer_ast_free_entry_by_mac(struct dp_soc *soc, 2497 struct dp_peer *peer, 2498 uint8_t vdev_id, 2499 uint8_t *mac_addr) 2500 { 2501 struct dp_ast_entry *ast_entry; 2502 void *cookie = NULL; 2503 txrx_ast_free_cb cb = NULL; 2504 2505 /* 2506 * release the reference only if it is mapped 2507 * to ast_table 2508 */ 2509 2510 qdf_spin_lock_bh(&soc->ast_lock); 2511 2512 ast_entry = dp_peer_ast_hash_find_by_vdevid(soc, mac_addr, vdev_id); 2513 if (!ast_entry) { 2514 qdf_spin_unlock_bh(&soc->ast_lock); 2515 return QDF_STATUS_E_NOENT; 2516 } else if (ast_entry->is_mapped) { 2517 soc->ast_table[ast_entry->ast_idx] = NULL; 2518 } 2519 2520 cb = ast_entry->callback; 2521 cookie = ast_entry->cookie; 2522 2523 2524 dp_peer_unlink_ast_entry(soc, ast_entry, peer); 2525 2526 dp_peer_free_ast_entry(soc, ast_entry); 2527 2528 qdf_spin_unlock_bh(&soc->ast_lock); 2529 2530 if (cb) { 2531 cb(soc->ctrl_psoc, 2532 dp_soc_to_cdp_soc(soc), 2533 cookie, 2534 CDP_TXRX_AST_DELETED); 2535 } 2536 2537 return QDF_STATUS_SUCCESS; 2538 } 2539 2540 void dp_peer_find_hash_erase(struct dp_soc *soc) 2541 { 2542 int i; 2543 2544 /* 2545 * Not really necessary to take peer_ref_mutex lock - by this point, 2546 * it's known that the soc is no longer in use. 2547 */ 2548 for (i = 0; i <= soc->peer_hash.mask; i++) { 2549 if (!TAILQ_EMPTY(&soc->peer_hash.bins[i])) { 2550 struct dp_peer *peer, *peer_next; 2551 2552 /* 2553 * TAILQ_FOREACH_SAFE must be used here to avoid any 2554 * memory access violation after peer is freed 2555 */ 2556 TAILQ_FOREACH_SAFE(peer, &soc->peer_hash.bins[i], 2557 hash_list_elem, peer_next) { 2558 /* 2559 * Don't remove the peer from the hash table - 2560 * that would modify the list we are currently 2561 * traversing, and it's not necessary anyway. 2562 */ 2563 /* 2564 * Artificially adjust the peer's ref count to 2565 * 1, so it will get deleted by 2566 * dp_peer_unref_delete. 2567 */ 2568 /* set to zero */ 2569 qdf_atomic_init(&peer->ref_cnt); 2570 for (i = 0; i < DP_MOD_ID_MAX; i++) 2571 qdf_atomic_init(&peer->mod_refs[i]); 2572 /* incr to one */ 2573 qdf_atomic_inc(&peer->ref_cnt); 2574 qdf_atomic_inc(&peer->mod_refs 2575 [DP_MOD_ID_CONFIG]); 2576 dp_peer_unref_delete(peer, 2577 DP_MOD_ID_CONFIG); 2578 } 2579 } 2580 } 2581 } 2582 2583 void dp_peer_ast_table_detach(struct dp_soc *soc) 2584 { 2585 if (soc->ast_table) { 2586 qdf_mem_free(soc->ast_table); 2587 soc->ast_table = NULL; 2588 } 2589 } 2590 2591 /* 2592 * dp_peer_find_map_detach() - cleanup memory for peer_id_to_obj_map 2593 * @soc: soc handle 2594 * 2595 * return: none 2596 */ 2597 void dp_peer_find_map_detach(struct dp_soc *soc) 2598 { 2599 if (soc->peer_id_to_obj_map) { 2600 qdf_mem_free(soc->peer_id_to_obj_map); 2601 soc->peer_id_to_obj_map = NULL; 2602 qdf_spinlock_destroy(&soc->peer_map_lock); 2603 } 2604 } 2605 2606 #ifndef AST_OFFLOAD_ENABLE 2607 QDF_STATUS dp_peer_find_attach(struct dp_soc *soc) 2608 { 2609 QDF_STATUS status; 2610 2611 status = dp_peer_find_map_attach(soc); 2612 if (!QDF_IS_STATUS_SUCCESS(status)) 2613 return status; 2614 2615 status = dp_peer_find_hash_attach(soc); 2616 if (!QDF_IS_STATUS_SUCCESS(status)) 2617 goto map_detach; 2618 2619 status = dp_peer_ast_table_attach(soc); 2620 if (!QDF_IS_STATUS_SUCCESS(status)) 2621 goto hash_detach; 2622 2623 status = dp_peer_ast_hash_attach(soc); 2624 if (!QDF_IS_STATUS_SUCCESS(status)) 2625 goto ast_table_detach; 2626 2627 status = dp_peer_mec_hash_attach(soc); 2628 if (QDF_IS_STATUS_SUCCESS(status)) { 2629 dp_soc_wds_attach(soc); 2630 return status; 2631 } 2632 2633 dp_peer_ast_hash_detach(soc); 2634 ast_table_detach: 2635 dp_peer_ast_table_detach(soc); 2636 hash_detach: 2637 dp_peer_find_hash_detach(soc); 2638 map_detach: 2639 dp_peer_find_map_detach(soc); 2640 2641 return status; 2642 } 2643 #else 2644 QDF_STATUS dp_peer_find_attach(struct dp_soc *soc) 2645 { 2646 QDF_STATUS status; 2647 2648 status = dp_peer_find_map_attach(soc); 2649 if (!QDF_IS_STATUS_SUCCESS(status)) 2650 return status; 2651 2652 status = dp_peer_find_hash_attach(soc); 2653 if (!QDF_IS_STATUS_SUCCESS(status)) 2654 goto map_detach; 2655 2656 return status; 2657 map_detach: 2658 dp_peer_find_map_detach(soc); 2659 2660 return status; 2661 } 2662 #endif 2663 2664 #ifdef IPA_OFFLOAD 2665 /* 2666 * dp_peer_update_tid_stats_from_reo() - update rx pkt and byte count from reo 2667 * @soc - soc handle 2668 * @cb_ctxt - combination of peer_id and tid 2669 * @reo_status - reo status 2670 * 2671 * return: void 2672 */ 2673 void dp_peer_update_tid_stats_from_reo(struct dp_soc *soc, void *cb_ctxt, 2674 union hal_reo_status *reo_status) 2675 { 2676 struct dp_peer *peer = NULL; 2677 struct dp_rx_tid *rx_tid = NULL; 2678 unsigned long comb_peer_id_tid; 2679 struct hal_reo_queue_status *queue_status = &reo_status->queue_status; 2680 uint16_t tid; 2681 uint16_t peer_id; 2682 2683 if (queue_status->header.status != HAL_REO_CMD_SUCCESS) { 2684 dp_err("REO stats failure %d\n", 2685 queue_status->header.status); 2686 return; 2687 } 2688 comb_peer_id_tid = (unsigned long)cb_ctxt; 2689 tid = DP_PEER_GET_REO_STATS_TID(comb_peer_id_tid); 2690 peer_id = DP_PEER_GET_REO_STATS_PEER_ID(comb_peer_id_tid); 2691 peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_GENERIC_STATS); 2692 if (!peer) 2693 return; 2694 rx_tid = &peer->rx_tid[tid]; 2695 2696 if (!rx_tid) { 2697 dp_peer_unref_delete(peer, DP_MOD_ID_GENERIC_STATS); 2698 return; 2699 } 2700 2701 rx_tid->rx_msdu_cnt.bytes += queue_status->total_cnt; 2702 rx_tid->rx_msdu_cnt.num += queue_status->msdu_frms_cnt; 2703 dp_peer_unref_delete(peer, DP_MOD_ID_GENERIC_STATS); 2704 } 2705 2706 qdf_export_symbol(dp_peer_update_tid_stats_from_reo); 2707 #endif 2708 2709 void dp_rx_tid_stats_cb(struct dp_soc *soc, void *cb_ctxt, 2710 union hal_reo_status *reo_status) 2711 { 2712 struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt; 2713 struct hal_reo_queue_status *queue_status = &(reo_status->queue_status); 2714 2715 if (queue_status->header.status == HAL_REO_CMD_DRAIN) 2716 return; 2717 2718 if (queue_status->header.status != HAL_REO_CMD_SUCCESS) { 2719 DP_PRINT_STATS("REO stats failure %d for TID %d\n", 2720 queue_status->header.status, rx_tid->tid); 2721 return; 2722 } 2723 2724 DP_PRINT_STATS("REO queue stats (TID: %d):\n" 2725 "ssn: %d\n" 2726 "curr_idx : %d\n" 2727 "pn_31_0 : %08x\n" 2728 "pn_63_32 : %08x\n" 2729 "pn_95_64 : %08x\n" 2730 "pn_127_96 : %08x\n" 2731 "last_rx_enq_tstamp : %08x\n" 2732 "last_rx_deq_tstamp : %08x\n" 2733 "rx_bitmap_31_0 : %08x\n" 2734 "rx_bitmap_63_32 : %08x\n" 2735 "rx_bitmap_95_64 : %08x\n" 2736 "rx_bitmap_127_96 : %08x\n" 2737 "rx_bitmap_159_128 : %08x\n" 2738 "rx_bitmap_191_160 : %08x\n" 2739 "rx_bitmap_223_192 : %08x\n" 2740 "rx_bitmap_255_224 : %08x\n", 2741 rx_tid->tid, 2742 queue_status->ssn, queue_status->curr_idx, 2743 queue_status->pn_31_0, queue_status->pn_63_32, 2744 queue_status->pn_95_64, queue_status->pn_127_96, 2745 queue_status->last_rx_enq_tstamp, 2746 queue_status->last_rx_deq_tstamp, 2747 queue_status->rx_bitmap_31_0, 2748 queue_status->rx_bitmap_63_32, 2749 queue_status->rx_bitmap_95_64, 2750 queue_status->rx_bitmap_127_96, 2751 queue_status->rx_bitmap_159_128, 2752 queue_status->rx_bitmap_191_160, 2753 queue_status->rx_bitmap_223_192, 2754 queue_status->rx_bitmap_255_224); 2755 2756 DP_PRINT_STATS( 2757 "curr_mpdu_cnt : %d\n" 2758 "curr_msdu_cnt : %d\n" 2759 "fwd_timeout_cnt : %d\n" 2760 "fwd_bar_cnt : %d\n" 2761 "dup_cnt : %d\n" 2762 "frms_in_order_cnt : %d\n" 2763 "bar_rcvd_cnt : %d\n" 2764 "mpdu_frms_cnt : %d\n" 2765 "msdu_frms_cnt : %d\n" 2766 "total_byte_cnt : %d\n" 2767 "late_recv_mpdu_cnt : %d\n" 2768 "win_jump_2k : %d\n" 2769 "hole_cnt : %d\n", 2770 queue_status->curr_mpdu_cnt, 2771 queue_status->curr_msdu_cnt, 2772 queue_status->fwd_timeout_cnt, 2773 queue_status->fwd_bar_cnt, 2774 queue_status->dup_cnt, 2775 queue_status->frms_in_order_cnt, 2776 queue_status->bar_rcvd_cnt, 2777 queue_status->mpdu_frms_cnt, 2778 queue_status->msdu_frms_cnt, 2779 queue_status->total_cnt, 2780 queue_status->late_recv_mpdu_cnt, 2781 queue_status->win_jump_2k, 2782 queue_status->hole_cnt); 2783 2784 DP_PRINT_STATS("Addba Req : %d\n" 2785 "Addba Resp : %d\n" 2786 "Addba Resp success : %d\n" 2787 "Addba Resp failed : %d\n" 2788 "Delba Req received : %d\n" 2789 "Delba Tx success : %d\n" 2790 "Delba Tx Fail : %d\n" 2791 "BA window size : %d\n" 2792 "Pn size : %d\n", 2793 rx_tid->num_of_addba_req, 2794 rx_tid->num_of_addba_resp, 2795 rx_tid->num_addba_rsp_success, 2796 rx_tid->num_addba_rsp_failed, 2797 rx_tid->num_of_delba_req, 2798 rx_tid->delba_tx_success_cnt, 2799 rx_tid->delba_tx_fail_cnt, 2800 rx_tid->ba_win_size, 2801 rx_tid->pn_size); 2802 } 2803 2804 #ifdef REO_SHARED_QREF_TABLE_EN 2805 void dp_peer_rx_reo_shared_qaddr_delete(struct dp_soc *soc, 2806 struct dp_peer *peer) 2807 { 2808 uint8_t tid; 2809 2810 if (IS_MLO_DP_LINK_PEER(peer)) 2811 return; 2812 if (hal_reo_shared_qaddr_is_enable(soc->hal_soc)) { 2813 for (tid = 0; tid < DP_MAX_TIDS; tid++) 2814 hal_reo_shared_qaddr_write(soc->hal_soc, 2815 peer->peer_id, tid, 0); 2816 } 2817 } 2818 #endif 2819 2820 /* 2821 * dp_peer_find_add_id() - map peer_id with peer 2822 * @soc: soc handle 2823 * @peer_mac_addr: peer mac address 2824 * @peer_id: peer id to be mapped 2825 * @hw_peer_id: HW ast index 2826 * @vdev_id: vdev_id 2827 * @peer_type: peer type (link or MLD) 2828 * 2829 * return: peer in success 2830 * NULL in failure 2831 */ 2832 static inline struct dp_peer *dp_peer_find_add_id(struct dp_soc *soc, 2833 uint8_t *peer_mac_addr, uint16_t peer_id, uint16_t hw_peer_id, 2834 uint8_t vdev_id, enum cdp_peer_type peer_type) 2835 { 2836 struct dp_peer *peer; 2837 struct cdp_peer_info peer_info = { 0 }; 2838 2839 QDF_ASSERT(peer_id <= soc->max_peer_id); 2840 /* check if there's already a peer object with this MAC address */ 2841 DP_PEER_INFO_PARAMS_INIT(&peer_info, vdev_id, peer_mac_addr, 2842 false, peer_type); 2843 peer = dp_peer_hash_find_wrapper(soc, &peer_info, DP_MOD_ID_CONFIG); 2844 dp_peer_err("%pK: peer %pK ID %d vid %d mac " QDF_MAC_ADDR_FMT, 2845 soc, peer, peer_id, vdev_id, 2846 QDF_MAC_ADDR_REF(peer_mac_addr)); 2847 2848 if (peer) { 2849 /* peer's ref count was already incremented by 2850 * peer_find_hash_find 2851 */ 2852 dp_peer_info("%pK: ref_cnt: %d", soc, 2853 qdf_atomic_read(&peer->ref_cnt)); 2854 2855 /* 2856 * if peer is in logical delete CP triggered delete before map 2857 * is received ignore this event 2858 */ 2859 if (dp_peer_state_cmp(peer, DP_PEER_STATE_LOGICAL_DELETE)) { 2860 dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG); 2861 dp_alert("Peer %pK["QDF_MAC_ADDR_FMT"] logical delete state vid %d", 2862 peer, QDF_MAC_ADDR_REF(peer_mac_addr), 2863 vdev_id); 2864 return NULL; 2865 } 2866 2867 if (peer->peer_id == HTT_INVALID_PEER) { 2868 if (!IS_MLO_DP_MLD_PEER(peer)) 2869 dp_monitor_peer_tid_peer_id_update(soc, peer, 2870 peer_id); 2871 } else { 2872 dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG); 2873 QDF_ASSERT(0); 2874 return NULL; 2875 } 2876 dp_peer_find_id_to_obj_add(soc, peer, peer_id); 2877 if (soc->arch_ops.dp_partner_chips_map) 2878 soc->arch_ops.dp_partner_chips_map(soc, peer, peer_id); 2879 2880 dp_peer_update_state(soc, peer, DP_PEER_STATE_ACTIVE); 2881 return peer; 2882 } 2883 2884 return NULL; 2885 } 2886 2887 #ifdef WLAN_FEATURE_11BE_MLO 2888 #ifdef DP_USE_REDUCED_PEER_ID_FIELD_WIDTH 2889 static inline uint16_t dp_gen_ml_peer_id(struct dp_soc *soc, 2890 uint16_t peer_id) 2891 { 2892 return ((peer_id & soc->peer_id_mask) | (1 << soc->peer_id_shift)); 2893 } 2894 #else 2895 static inline uint16_t dp_gen_ml_peer_id(struct dp_soc *soc, 2896 uint16_t peer_id) 2897 { 2898 return (peer_id | (1 << HTT_RX_PEER_META_DATA_V1_ML_PEER_VALID_S)); 2899 } 2900 #endif 2901 2902 QDF_STATUS 2903 dp_rx_mlo_peer_map_handler(struct dp_soc *soc, uint16_t peer_id, 2904 uint8_t *peer_mac_addr, 2905 struct dp_mlo_flow_override_info *mlo_flow_info, 2906 struct dp_mlo_link_info *mlo_link_info) 2907 { 2908 struct dp_peer *peer = NULL; 2909 uint16_t hw_peer_id = mlo_flow_info[0].ast_idx; 2910 uint16_t ast_hash = mlo_flow_info[0].cache_set_num; 2911 uint8_t vdev_id = 0; 2912 uint8_t is_wds = 0; 2913 int i; 2914 uint16_t ml_peer_id = dp_gen_ml_peer_id(soc, peer_id); 2915 enum cdp_txrx_ast_entry_type type = CDP_TXRX_AST_TYPE_STATIC; 2916 QDF_STATUS err = QDF_STATUS_SUCCESS; 2917 struct dp_soc *primary_soc; 2918 2919 dp_info("mlo_peer_map_event (soc:%pK): peer_id %d ml_peer_id %d, peer_mac "QDF_MAC_ADDR_FMT, 2920 soc, peer_id, ml_peer_id, 2921 QDF_MAC_ADDR_REF(peer_mac_addr)); 2922 2923 /* Get corresponding vdev ID for the peer based 2924 * on chip ID obtained from mlo peer_map event 2925 */ 2926 for (i = 0; i < DP_MAX_MLO_LINKS; i++) { 2927 if (mlo_link_info[i].peer_chip_id == dp_mlo_get_chip_id(soc)) { 2928 vdev_id = mlo_link_info[i].vdev_id; 2929 break; 2930 } 2931 } 2932 2933 peer = dp_peer_find_add_id(soc, peer_mac_addr, ml_peer_id, 2934 hw_peer_id, vdev_id, CDP_MLD_PEER_TYPE); 2935 2936 if (peer) { 2937 if (wlan_op_mode_sta == peer->vdev->opmode && 2938 qdf_mem_cmp(peer->mac_addr.raw, 2939 peer->vdev->mld_mac_addr.raw, 2940 QDF_MAC_ADDR_SIZE) != 0) { 2941 dp_peer_info("%pK: STA vdev bss_peer!!!!", soc); 2942 peer->bss_peer = 1; 2943 if (peer->txrx_peer) 2944 peer->txrx_peer->bss_peer = 1; 2945 } 2946 2947 if (peer->vdev->opmode == wlan_op_mode_sta) { 2948 peer->vdev->bss_ast_hash = ast_hash; 2949 peer->vdev->bss_ast_idx = hw_peer_id; 2950 } 2951 2952 /* Add ast entry incase self ast entry is 2953 * deleted due to DP CP sync issue 2954 * 2955 * self_ast_entry is modified in peer create 2956 * and peer unmap path which cannot run in 2957 * parllel with peer map, no lock need before 2958 * referring it 2959 */ 2960 if (!peer->self_ast_entry) { 2961 dp_info("Add self ast from map "QDF_MAC_ADDR_FMT, 2962 QDF_MAC_ADDR_REF(peer_mac_addr)); 2963 dp_peer_add_ast(soc, peer, 2964 peer_mac_addr, 2965 type, 0); 2966 } 2967 /* If peer setup and hence rx_tid setup got called 2968 * before htt peer map then Qref write to LUT did not 2969 * happen in rx_tid setup as peer_id was invalid. 2970 * So defer Qref write to peer map handler. Check if 2971 * rx_tid qdesc for tid 0 is already setup and perform 2972 * qref write to LUT for Tid 0 and 16. 2973 * 2974 * Peer map could be obtained on assoc link, hence 2975 * change to primary link's soc. 2976 */ 2977 primary_soc = peer->vdev->pdev->soc; 2978 if (hal_reo_shared_qaddr_is_enable(primary_soc->hal_soc) && 2979 peer->rx_tid[0].hw_qdesc_vaddr_unaligned) { 2980 hal_reo_shared_qaddr_write(primary_soc->hal_soc, 2981 ml_peer_id, 2982 0, 2983 peer->rx_tid[0].hw_qdesc_paddr); 2984 hal_reo_shared_qaddr_write(primary_soc->hal_soc, 2985 ml_peer_id, 2986 DP_NON_QOS_TID, 2987 peer->rx_tid[DP_NON_QOS_TID].hw_qdesc_paddr); 2988 } 2989 } 2990 2991 err = dp_peer_map_ast(soc, peer, peer_mac_addr, hw_peer_id, 2992 vdev_id, ast_hash, is_wds); 2993 2994 /* 2995 * If AST offload and host AST DB is enabled, populate AST entries on 2996 * host based on mlo peer map event from FW 2997 */ 2998 if (soc->ast_offload_support && soc->host_ast_db_enable) { 2999 dp_peer_host_add_map_ast(soc, ml_peer_id, peer_mac_addr, 3000 hw_peer_id, vdev_id, 3001 ast_hash, is_wds); 3002 } 3003 3004 return err; 3005 } 3006 #endif 3007 3008 #ifdef DP_RX_UDP_OVER_PEER_ROAM 3009 void dp_rx_reset_roaming_peer(struct dp_soc *soc, uint8_t vdev_id, 3010 uint8_t *peer_mac_addr) 3011 { 3012 struct dp_vdev *vdev = NULL; 3013 3014 vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_HTT); 3015 if (vdev) { 3016 if (qdf_mem_cmp(vdev->roaming_peer_mac.raw, peer_mac_addr, 3017 QDF_MAC_ADDR_SIZE) == 0) { 3018 vdev->roaming_peer_status = 3019 WLAN_ROAM_PEER_AUTH_STATUS_NONE; 3020 qdf_mem_zero(vdev->roaming_peer_mac.raw, 3021 QDF_MAC_ADDR_SIZE); 3022 } 3023 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT); 3024 } 3025 } 3026 #endif 3027 3028 /** 3029 * dp_rx_peer_map_handler() - handle peer map event from firmware 3030 * @soc_handle - genereic soc handle 3031 * @peeri_id - peer_id from firmware 3032 * @hw_peer_id - ast index for this peer 3033 * @vdev_id - vdev ID 3034 * @peer_mac_addr - mac address of the peer 3035 * @ast_hash - ast hash value 3036 * @is_wds - flag to indicate peer map event for WDS ast entry 3037 * 3038 * associate the peer_id that firmware provided with peer entry 3039 * and update the ast table in the host with the hw_peer_id. 3040 * 3041 * Return: QDF_STATUS code 3042 */ 3043 3044 QDF_STATUS 3045 dp_rx_peer_map_handler(struct dp_soc *soc, uint16_t peer_id, 3046 uint16_t hw_peer_id, uint8_t vdev_id, 3047 uint8_t *peer_mac_addr, uint16_t ast_hash, 3048 uint8_t is_wds) 3049 { 3050 struct dp_peer *peer = NULL; 3051 struct dp_vdev *vdev = NULL; 3052 enum cdp_txrx_ast_entry_type type = CDP_TXRX_AST_TYPE_STATIC; 3053 QDF_STATUS err = QDF_STATUS_SUCCESS; 3054 3055 dp_info("peer_map_event (soc:%pK): peer_id %d, hw_peer_id %d, peer_mac "QDF_MAC_ADDR_FMT", vdev_id %d", 3056 soc, peer_id, hw_peer_id, 3057 QDF_MAC_ADDR_REF(peer_mac_addr), vdev_id); 3058 3059 /* Peer map event for WDS ast entry get the peer from 3060 * obj map 3061 */ 3062 if (is_wds) { 3063 if (!soc->ast_offload_support) { 3064 peer = dp_peer_get_ref_by_id(soc, peer_id, 3065 DP_MOD_ID_HTT); 3066 3067 err = dp_peer_map_ast(soc, peer, peer_mac_addr, 3068 hw_peer_id, 3069 vdev_id, ast_hash, is_wds); 3070 if (peer) 3071 dp_peer_unref_delete(peer, DP_MOD_ID_HTT); 3072 } 3073 } else { 3074 /* 3075 * It's the responsibility of the CP and FW to ensure 3076 * that peer is created successfully. Ideally DP should 3077 * not hit the below condition for directly assocaited 3078 * peers. 3079 */ 3080 if ((!soc->ast_offload_support) && ((hw_peer_id < 0) || 3081 (hw_peer_id >= 3082 wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx)))) { 3083 dp_peer_err("%pK: invalid hw_peer_id: %d", soc, hw_peer_id); 3084 qdf_assert_always(0); 3085 } 3086 3087 peer = dp_peer_find_add_id(soc, peer_mac_addr, peer_id, 3088 hw_peer_id, vdev_id, 3089 CDP_LINK_PEER_TYPE); 3090 3091 if (peer) { 3092 vdev = peer->vdev; 3093 /* Only check for STA Vdev and peer is not for TDLS */ 3094 if (wlan_op_mode_sta == vdev->opmode && 3095 !peer->is_tdls_peer) { 3096 if (qdf_mem_cmp(peer->mac_addr.raw, 3097 vdev->mac_addr.raw, 3098 QDF_MAC_ADDR_SIZE) != 0) { 3099 dp_info("%pK: STA vdev bss_peer", soc); 3100 peer->bss_peer = 1; 3101 if (peer->txrx_peer) 3102 peer->txrx_peer->bss_peer = 1; 3103 } 3104 3105 dp_info("bss ast_hash 0x%x, ast_index 0x%x", 3106 ast_hash, hw_peer_id); 3107 vdev->bss_ast_hash = ast_hash; 3108 vdev->bss_ast_idx = hw_peer_id; 3109 } 3110 3111 /* Add ast entry incase self ast entry is 3112 * deleted due to DP CP sync issue 3113 * 3114 * self_ast_entry is modified in peer create 3115 * and peer unmap path which cannot run in 3116 * parllel with peer map, no lock need before 3117 * referring it 3118 */ 3119 if (!soc->ast_offload_support && 3120 !peer->self_ast_entry) { 3121 dp_info("Add self ast from map "QDF_MAC_ADDR_FMT, 3122 QDF_MAC_ADDR_REF(peer_mac_addr)); 3123 dp_peer_add_ast(soc, peer, 3124 peer_mac_addr, 3125 type, 0); 3126 } 3127 3128 /* If peer setup and hence rx_tid setup got called 3129 * before htt peer map then Qref write to LUT did 3130 * not happen in rx_tid setup as peer_id was invalid. 3131 * So defer Qref write to peer map handler. Check if 3132 * rx_tid qdesc for tid 0 is already setup perform qref 3133 * write to LUT for Tid 0 and 16. 3134 */ 3135 if (hal_reo_shared_qaddr_is_enable(soc->hal_soc) && 3136 peer->rx_tid[0].hw_qdesc_vaddr_unaligned && 3137 !IS_MLO_DP_LINK_PEER(peer)) { 3138 hal_reo_shared_qaddr_write(soc->hal_soc, 3139 peer_id, 3140 0, 3141 peer->rx_tid[0].hw_qdesc_paddr); 3142 hal_reo_shared_qaddr_write(soc->hal_soc, 3143 peer_id, 3144 DP_NON_QOS_TID, 3145 peer->rx_tid[DP_NON_QOS_TID].hw_qdesc_paddr); 3146 } 3147 } 3148 3149 err = dp_peer_map_ast(soc, peer, peer_mac_addr, hw_peer_id, 3150 vdev_id, ast_hash, is_wds); 3151 } 3152 3153 dp_rx_reset_roaming_peer(soc, vdev_id, peer_mac_addr); 3154 3155 /* 3156 * If AST offload and host AST DB is enabled, populate AST entries on 3157 * host based on peer map event from FW 3158 */ 3159 if (soc->ast_offload_support && soc->host_ast_db_enable) { 3160 dp_peer_host_add_map_ast(soc, peer_id, peer_mac_addr, 3161 hw_peer_id, vdev_id, 3162 ast_hash, is_wds); 3163 } 3164 3165 return err; 3166 } 3167 3168 /** 3169 * dp_rx_peer_unmap_handler() - handle peer unmap event from firmware 3170 * @soc_handle - genereic soc handle 3171 * @peeri_id - peer_id from firmware 3172 * @vdev_id - vdev ID 3173 * @mac_addr - mac address of the peer or wds entry 3174 * @is_wds - flag to indicate peer map event for WDS ast entry 3175 * @free_wds_count - number of wds entries freed by FW with peer delete 3176 * 3177 * Return: none 3178 */ 3179 void 3180 dp_rx_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id, 3181 uint8_t vdev_id, uint8_t *mac_addr, 3182 uint8_t is_wds, uint32_t free_wds_count) 3183 { 3184 struct dp_peer *peer; 3185 struct dp_vdev *vdev = NULL; 3186 3187 /* 3188 * If FW AST offload is enabled and host AST DB is enabled, 3189 * the AST entries are created during peer map from FW. 3190 */ 3191 if (soc->ast_offload_support && is_wds) { 3192 if (!soc->host_ast_db_enable) 3193 return; 3194 } 3195 3196 peer = __dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_HTT); 3197 3198 /* 3199 * Currently peer IDs are assigned for vdevs as well as peers. 3200 * If the peer ID is for a vdev, then the peer pointer stored 3201 * in peer_id_to_obj_map will be NULL. 3202 */ 3203 if (!peer) { 3204 dp_err("Received unmap event for invalid peer_id %u", 3205 peer_id); 3206 return; 3207 } 3208 3209 /* If V2 Peer map messages are enabled AST entry has to be 3210 * freed here 3211 */ 3212 if (is_wds) { 3213 if (!dp_peer_ast_free_entry_by_mac(soc, peer, vdev_id, 3214 mac_addr)) { 3215 dp_peer_unref_delete(peer, DP_MOD_ID_HTT); 3216 return; 3217 } 3218 3219 dp_alert("AST entry not found with peer %pK peer_id %u peer_mac "QDF_MAC_ADDR_FMT" mac_addr "QDF_MAC_ADDR_FMT" vdev_id %u next_hop %u", 3220 peer, peer->peer_id, 3221 QDF_MAC_ADDR_REF(peer->mac_addr.raw), 3222 QDF_MAC_ADDR_REF(mac_addr), vdev_id, 3223 is_wds); 3224 3225 dp_peer_unref_delete(peer, DP_MOD_ID_HTT); 3226 return; 3227 } 3228 3229 dp_peer_clean_wds_entries(soc, peer, free_wds_count); 3230 3231 dp_info("peer_unmap_event (soc:%pK) peer_id %d peer %pK", 3232 soc, peer_id, peer); 3233 3234 /* Clear entries in Qref LUT */ 3235 /* TODO: Check if this is to be called from 3236 * dp_peer_delete for MLO case if there is race between 3237 * new peer id assignment and still not having received 3238 * peer unmap for MLD peer with same peer id. 3239 */ 3240 dp_peer_rx_reo_shared_qaddr_delete(soc, peer); 3241 3242 dp_peer_find_id_to_obj_remove(soc, peer_id); 3243 3244 if (soc->arch_ops.dp_partner_chips_unmap) 3245 soc->arch_ops.dp_partner_chips_unmap(soc, peer_id); 3246 3247 peer->peer_id = HTT_INVALID_PEER; 3248 3249 /* 3250 * Reset ast flow mapping table 3251 */ 3252 if (!soc->ast_offload_support) 3253 dp_peer_reset_flowq_map(peer); 3254 3255 if (soc->cdp_soc.ol_ops->peer_unmap_event) { 3256 soc->cdp_soc.ol_ops->peer_unmap_event(soc->ctrl_psoc, 3257 peer_id, vdev_id, mac_addr); 3258 } 3259 3260 vdev = peer->vdev; 3261 dp_update_vdev_stats_on_peer_unmap(vdev, peer); 3262 3263 dp_peer_update_state(soc, peer, DP_PEER_STATE_INACTIVE); 3264 dp_peer_unref_delete(peer, DP_MOD_ID_HTT); 3265 /* 3266 * Remove a reference to the peer. 3267 * If there are no more references, delete the peer object. 3268 */ 3269 dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG); 3270 } 3271 3272 #ifdef WLAN_FEATURE_11BE_MLO 3273 void dp_rx_mlo_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id) 3274 { 3275 uint16_t ml_peer_id = dp_gen_ml_peer_id(soc, peer_id); 3276 uint8_t mac_addr[QDF_MAC_ADDR_SIZE] = {0}; 3277 uint8_t vdev_id = DP_VDEV_ALL; 3278 uint8_t is_wds = 0; 3279 3280 dp_info("MLO peer_unmap_event (soc:%pK) peer_id %d", 3281 soc, peer_id); 3282 3283 dp_rx_peer_unmap_handler(soc, ml_peer_id, vdev_id, 3284 mac_addr, is_wds, 3285 DP_PEER_WDS_COUNT_INVALID); 3286 } 3287 #endif 3288 3289 #ifndef AST_OFFLOAD_ENABLE 3290 void 3291 dp_peer_find_detach(struct dp_soc *soc) 3292 { 3293 dp_soc_wds_detach(soc); 3294 dp_peer_find_map_detach(soc); 3295 dp_peer_find_hash_detach(soc); 3296 dp_peer_ast_hash_detach(soc); 3297 dp_peer_ast_table_detach(soc); 3298 dp_peer_mec_hash_detach(soc); 3299 } 3300 #else 3301 void 3302 dp_peer_find_detach(struct dp_soc *soc) 3303 { 3304 dp_peer_find_map_detach(soc); 3305 dp_peer_find_hash_detach(soc); 3306 } 3307 #endif 3308 3309 static void dp_rx_tid_update_cb(struct dp_soc *soc, void *cb_ctxt, 3310 union hal_reo_status *reo_status) 3311 { 3312 struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt; 3313 3314 if ((reo_status->rx_queue_status.header.status != 3315 HAL_REO_CMD_SUCCESS) && 3316 (reo_status->rx_queue_status.header.status != 3317 HAL_REO_CMD_DRAIN)) { 3318 /* Should not happen normally. Just print error for now */ 3319 dp_peer_err("%pK: Rx tid HW desc update failed(%d): tid %d", 3320 soc, reo_status->rx_queue_status.header.status, 3321 rx_tid->tid); 3322 } 3323 } 3324 3325 static bool dp_get_peer_vdev_roaming_in_progress(struct dp_peer *peer) 3326 { 3327 struct ol_if_ops *ol_ops = NULL; 3328 bool is_roaming = false; 3329 uint8_t vdev_id = -1; 3330 struct cdp_soc_t *soc; 3331 3332 if (!peer) { 3333 dp_peer_info("Peer is NULL. No roaming possible"); 3334 return false; 3335 } 3336 3337 soc = dp_soc_to_cdp_soc_t(peer->vdev->pdev->soc); 3338 ol_ops = peer->vdev->pdev->soc->cdp_soc.ol_ops; 3339 3340 if (ol_ops && ol_ops->is_roam_inprogress) { 3341 dp_get_vdevid(soc, peer->mac_addr.raw, &vdev_id); 3342 is_roaming = ol_ops->is_roam_inprogress(vdev_id); 3343 } 3344 3345 dp_peer_info("peer: " QDF_MAC_ADDR_FMT ", vdev_id: %d, is_roaming: %d", 3346 QDF_MAC_ADDR_REF(peer->mac_addr.raw), vdev_id, is_roaming); 3347 3348 return is_roaming; 3349 } 3350 3351 #ifdef WLAN_FEATURE_11BE_MLO 3352 /** 3353 * dp_rx_tid_setup_allow() - check if rx_tid and reo queue desc 3354 setup is necessary 3355 * @peer: DP peer handle 3356 * 3357 * Return: true - allow, false - disallow 3358 */ 3359 static inline 3360 bool dp_rx_tid_setup_allow(struct dp_peer *peer) 3361 { 3362 if (IS_MLO_DP_LINK_PEER(peer) && !peer->first_link) 3363 return false; 3364 3365 return true; 3366 } 3367 3368 /** 3369 * dp_rx_tid_update_allow() - check if rx_tid update needed 3370 * @peer: DP peer handle 3371 * 3372 * Return: true - allow, false - disallow 3373 */ 3374 static inline 3375 bool dp_rx_tid_update_allow(struct dp_peer *peer) 3376 { 3377 /* not as expected for MLO connection link peer */ 3378 if (IS_MLO_DP_LINK_PEER(peer)) { 3379 QDF_BUG(0); 3380 return false; 3381 } 3382 3383 return true; 3384 } 3385 #else 3386 static inline 3387 bool dp_rx_tid_setup_allow(struct dp_peer *peer) 3388 { 3389 return true; 3390 } 3391 3392 static inline 3393 bool dp_rx_tid_update_allow(struct dp_peer *peer) 3394 { 3395 return true; 3396 } 3397 #endif 3398 3399 QDF_STATUS dp_rx_tid_update_wifi3(struct dp_peer *peer, int tid, uint32_t 3400 ba_window_size, uint32_t start_seq, 3401 bool bar_update) 3402 { 3403 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid]; 3404 struct dp_soc *soc = peer->vdev->pdev->soc; 3405 struct hal_reo_cmd_params params; 3406 3407 if (!dp_rx_tid_update_allow(peer)) { 3408 dp_peer_err("skip tid update for peer:" QDF_MAC_ADDR_FMT, 3409 QDF_MAC_ADDR_REF(peer->mac_addr.raw)); 3410 return QDF_STATUS_E_FAILURE; 3411 } 3412 3413 qdf_mem_zero(¶ms, sizeof(params)); 3414 3415 params.std.need_status = 1; 3416 params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff; 3417 params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32; 3418 params.u.upd_queue_params.update_ba_window_size = 1; 3419 params.u.upd_queue_params.ba_window_size = ba_window_size; 3420 3421 if (start_seq < IEEE80211_SEQ_MAX) { 3422 params.u.upd_queue_params.update_ssn = 1; 3423 params.u.upd_queue_params.ssn = start_seq; 3424 } else { 3425 dp_set_ssn_valid_flag(¶ms, 0); 3426 } 3427 3428 if (dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, ¶ms, 3429 dp_rx_tid_update_cb, rx_tid)) { 3430 dp_err_log("failed to send reo cmd CMD_UPDATE_RX_REO_QUEUE"); 3431 DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1); 3432 } 3433 3434 rx_tid->ba_win_size = ba_window_size; 3435 3436 if (dp_get_peer_vdev_roaming_in_progress(peer)) 3437 return QDF_STATUS_E_PERM; 3438 3439 if (!bar_update) 3440 dp_peer_rx_reorder_queue_setup(soc, peer, 3441 tid, ba_window_size); 3442 3443 return QDF_STATUS_SUCCESS; 3444 } 3445 3446 #ifdef WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY 3447 /* 3448 * dp_reo_desc_defer_free_enqueue() - enqueue REO QDESC to be freed into 3449 * the deferred list 3450 * @soc: Datapath soc handle 3451 * @free_desc: REO DESC reference that needs to be freed 3452 * 3453 * Return: true if enqueued, else false 3454 */ 3455 static bool dp_reo_desc_defer_free_enqueue(struct dp_soc *soc, 3456 struct reo_desc_list_node *freedesc) 3457 { 3458 struct reo_desc_deferred_freelist_node *desc; 3459 3460 if (!qdf_atomic_read(&soc->cmn_init_done)) 3461 return false; 3462 3463 desc = qdf_mem_malloc(sizeof(*desc)); 3464 if (!desc) 3465 return false; 3466 3467 desc->hw_qdesc_paddr = freedesc->rx_tid.hw_qdesc_paddr; 3468 desc->hw_qdesc_alloc_size = freedesc->rx_tid.hw_qdesc_alloc_size; 3469 desc->hw_qdesc_vaddr_unaligned = 3470 freedesc->rx_tid.hw_qdesc_vaddr_unaligned; 3471 desc->free_ts = qdf_get_system_timestamp(); 3472 DP_RX_REO_QDESC_DEFERRED_GET_MAC(desc, freedesc); 3473 3474 qdf_spin_lock_bh(&soc->reo_desc_deferred_freelist_lock); 3475 if (!soc->reo_desc_deferred_freelist_init) { 3476 qdf_mem_free(desc); 3477 qdf_spin_unlock_bh(&soc->reo_desc_deferred_freelist_lock); 3478 return false; 3479 } 3480 qdf_list_insert_back(&soc->reo_desc_deferred_freelist, 3481 (qdf_list_node_t *)desc); 3482 qdf_spin_unlock_bh(&soc->reo_desc_deferred_freelist_lock); 3483 3484 return true; 3485 } 3486 3487 /* 3488 * dp_reo_desc_defer_free() - free the REO QDESC in the deferred list 3489 * based on time threshold 3490 * @soc: Datapath soc handle 3491 * @free_desc: REO DESC reference that needs to be freed 3492 * 3493 * Return: true if enqueued, else false 3494 */ 3495 static void dp_reo_desc_defer_free(struct dp_soc *soc) 3496 { 3497 struct reo_desc_deferred_freelist_node *desc; 3498 unsigned long curr_ts = qdf_get_system_timestamp(); 3499 3500 qdf_spin_lock_bh(&soc->reo_desc_deferred_freelist_lock); 3501 3502 while ((qdf_list_peek_front(&soc->reo_desc_deferred_freelist, 3503 (qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) && 3504 (curr_ts > (desc->free_ts + REO_DESC_DEFERRED_FREE_MS))) { 3505 qdf_list_remove_front(&soc->reo_desc_deferred_freelist, 3506 (qdf_list_node_t **)&desc); 3507 3508 DP_RX_REO_QDESC_DEFERRED_FREE_EVT(desc); 3509 3510 qdf_mem_unmap_nbytes_single(soc->osdev, 3511 desc->hw_qdesc_paddr, 3512 QDF_DMA_BIDIRECTIONAL, 3513 desc->hw_qdesc_alloc_size); 3514 qdf_mem_free(desc->hw_qdesc_vaddr_unaligned); 3515 qdf_mem_free(desc); 3516 3517 curr_ts = qdf_get_system_timestamp(); 3518 } 3519 3520 qdf_spin_unlock_bh(&soc->reo_desc_deferred_freelist_lock); 3521 } 3522 #else 3523 static inline bool 3524 dp_reo_desc_defer_free_enqueue(struct dp_soc *soc, 3525 struct reo_desc_list_node *freedesc) 3526 { 3527 return false; 3528 } 3529 3530 static void dp_reo_desc_defer_free(struct dp_soc *soc) 3531 { 3532 } 3533 #endif /* !WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY */ 3534 3535 /* 3536 * dp_reo_desc_free() - Callback free reo descriptor memory after 3537 * HW cache flush 3538 * 3539 * @soc: DP SOC handle 3540 * @cb_ctxt: Callback context 3541 * @reo_status: REO command status 3542 */ 3543 static void dp_reo_desc_free(struct dp_soc *soc, void *cb_ctxt, 3544 union hal_reo_status *reo_status) 3545 { 3546 struct reo_desc_list_node *freedesc = 3547 (struct reo_desc_list_node *)cb_ctxt; 3548 struct dp_rx_tid *rx_tid = &freedesc->rx_tid; 3549 unsigned long curr_ts = qdf_get_system_timestamp(); 3550 3551 if ((reo_status->fl_cache_status.header.status != 3552 HAL_REO_CMD_SUCCESS) && 3553 (reo_status->fl_cache_status.header.status != 3554 HAL_REO_CMD_DRAIN)) { 3555 dp_peer_err("%pK: Rx tid HW desc flush failed(%d): tid %d", 3556 soc, reo_status->rx_queue_status.header.status, 3557 freedesc->rx_tid.tid); 3558 } 3559 dp_peer_info("%pK: %lu hw_qdesc_paddr: %pK, tid:%d", soc, 3560 curr_ts, (void *)(rx_tid->hw_qdesc_paddr), 3561 rx_tid->tid); 3562 3563 /* REO desc is enqueued to be freed at a later point 3564 * in time, just free the freedesc alone and return 3565 */ 3566 if (dp_reo_desc_defer_free_enqueue(soc, freedesc)) 3567 goto out; 3568 3569 DP_RX_REO_QDESC_FREE_EVT(freedesc); 3570 3571 qdf_mem_unmap_nbytes_single(soc->osdev, 3572 rx_tid->hw_qdesc_paddr, 3573 QDF_DMA_BIDIRECTIONAL, 3574 rx_tid->hw_qdesc_alloc_size); 3575 qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned); 3576 out: 3577 qdf_mem_free(freedesc); 3578 } 3579 3580 #if defined(CONFIG_WIFI_EMULATION_WIFI_3_0) && defined(BUILD_X86) 3581 /* Hawkeye emulation requires bus address to be >= 0x50000000 */ 3582 static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr) 3583 { 3584 if (dma_addr < 0x50000000) 3585 return QDF_STATUS_E_FAILURE; 3586 else 3587 return QDF_STATUS_SUCCESS; 3588 } 3589 #else 3590 static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr) 3591 { 3592 return QDF_STATUS_SUCCESS; 3593 } 3594 #endif 3595 3596 /* 3597 * dp_rx_tid_setup_wifi3() – Setup receive TID state 3598 * @peer: Datapath peer handle 3599 * @tid: TID 3600 * @ba_window_size: BlockAck window size 3601 * @start_seq: Starting sequence number 3602 * 3603 * Return: QDF_STATUS code 3604 */ 3605 QDF_STATUS dp_rx_tid_setup_wifi3(struct dp_peer *peer, int tid, 3606 uint32_t ba_window_size, uint32_t start_seq) 3607 { 3608 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid]; 3609 struct dp_vdev *vdev = peer->vdev; 3610 struct dp_soc *soc = vdev->pdev->soc; 3611 uint32_t hw_qdesc_size; 3612 uint32_t hw_qdesc_align; 3613 int hal_pn_type; 3614 void *hw_qdesc_vaddr; 3615 uint32_t alloc_tries = 0; 3616 QDF_STATUS status = QDF_STATUS_SUCCESS; 3617 struct dp_txrx_peer *txrx_peer; 3618 3619 if (!qdf_atomic_read(&peer->is_default_route_set)) 3620 return QDF_STATUS_E_FAILURE; 3621 3622 if (!dp_rx_tid_setup_allow(peer)) { 3623 dp_peer_info("skip rx tid setup for peer" QDF_MAC_ADDR_FMT, 3624 QDF_MAC_ADDR_REF(peer->mac_addr.raw)); 3625 goto send_wmi_reo_cmd; 3626 } 3627 3628 rx_tid->ba_win_size = ba_window_size; 3629 if (rx_tid->hw_qdesc_vaddr_unaligned) 3630 return dp_rx_tid_update_wifi3(peer, tid, ba_window_size, 3631 start_seq, false); 3632 rx_tid->delba_tx_status = 0; 3633 rx_tid->ppdu_id_2k = 0; 3634 rx_tid->num_of_addba_req = 0; 3635 rx_tid->num_of_delba_req = 0; 3636 rx_tid->num_of_addba_resp = 0; 3637 rx_tid->num_addba_rsp_failed = 0; 3638 rx_tid->num_addba_rsp_success = 0; 3639 rx_tid->delba_tx_success_cnt = 0; 3640 rx_tid->delba_tx_fail_cnt = 0; 3641 rx_tid->statuscode = 0; 3642 3643 /* TODO: Allocating HW queue descriptors based on max BA window size 3644 * for all QOS TIDs so that same descriptor can be used later when 3645 * ADDBA request is received. This should be changed to allocate HW 3646 * queue descriptors based on BA window size being negotiated (0 for 3647 * non BA cases), and reallocate when BA window size changes and also 3648 * send WMI message to FW to change the REO queue descriptor in Rx 3649 * peer entry as part of dp_rx_tid_update. 3650 */ 3651 hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc, 3652 ba_window_size, tid); 3653 3654 hw_qdesc_align = hal_get_reo_qdesc_align(soc->hal_soc); 3655 /* To avoid unnecessary extra allocation for alignment, try allocating 3656 * exact size and see if we already have aligned address. 3657 */ 3658 rx_tid->hw_qdesc_alloc_size = hw_qdesc_size; 3659 3660 try_desc_alloc: 3661 rx_tid->hw_qdesc_vaddr_unaligned = 3662 qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size); 3663 3664 if (!rx_tid->hw_qdesc_vaddr_unaligned) { 3665 dp_peer_err("%pK: Rx tid HW desc alloc failed: tid %d", 3666 soc, tid); 3667 return QDF_STATUS_E_NOMEM; 3668 } 3669 3670 if ((unsigned long)(rx_tid->hw_qdesc_vaddr_unaligned) % 3671 hw_qdesc_align) { 3672 /* Address allocated above is not alinged. Allocate extra 3673 * memory for alignment 3674 */ 3675 qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned); 3676 rx_tid->hw_qdesc_vaddr_unaligned = 3677 qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size + 3678 hw_qdesc_align - 1); 3679 3680 if (!rx_tid->hw_qdesc_vaddr_unaligned) { 3681 dp_peer_err("%pK: Rx tid HW desc alloc failed: tid %d", 3682 soc, tid); 3683 return QDF_STATUS_E_NOMEM; 3684 } 3685 3686 hw_qdesc_vaddr = (void *)qdf_align((unsigned long) 3687 rx_tid->hw_qdesc_vaddr_unaligned, 3688 hw_qdesc_align); 3689 3690 dp_peer_debug("%pK: Total Size %d Aligned Addr %pK", 3691 soc, rx_tid->hw_qdesc_alloc_size, 3692 hw_qdesc_vaddr); 3693 3694 } else { 3695 hw_qdesc_vaddr = rx_tid->hw_qdesc_vaddr_unaligned; 3696 } 3697 rx_tid->hw_qdesc_vaddr_aligned = hw_qdesc_vaddr; 3698 3699 txrx_peer = dp_get_txrx_peer(peer); 3700 3701 /* TODO: Ensure that sec_type is set before ADDBA is received. 3702 * Currently this is set based on htt indication 3703 * HTT_T2H_MSG_TYPE_SEC_IND from target 3704 */ 3705 switch (txrx_peer->security[dp_sec_ucast].sec_type) { 3706 case cdp_sec_type_tkip_nomic: 3707 case cdp_sec_type_aes_ccmp: 3708 case cdp_sec_type_aes_ccmp_256: 3709 case cdp_sec_type_aes_gcmp: 3710 case cdp_sec_type_aes_gcmp_256: 3711 hal_pn_type = HAL_PN_WPA; 3712 break; 3713 case cdp_sec_type_wapi: 3714 if (vdev->opmode == wlan_op_mode_ap) 3715 hal_pn_type = HAL_PN_WAPI_EVEN; 3716 else 3717 hal_pn_type = HAL_PN_WAPI_UNEVEN; 3718 break; 3719 default: 3720 hal_pn_type = HAL_PN_NONE; 3721 break; 3722 } 3723 3724 hal_reo_qdesc_setup(soc->hal_soc, tid, ba_window_size, start_seq, 3725 hw_qdesc_vaddr, rx_tid->hw_qdesc_paddr, hal_pn_type, 3726 vdev->vdev_stats_id); 3727 3728 qdf_mem_map_nbytes_single(soc->osdev, hw_qdesc_vaddr, 3729 QDF_DMA_BIDIRECTIONAL, rx_tid->hw_qdesc_alloc_size, 3730 &(rx_tid->hw_qdesc_paddr)); 3731 3732 if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) != 3733 QDF_STATUS_SUCCESS) { 3734 if (alloc_tries++ < 10) { 3735 qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned); 3736 rx_tid->hw_qdesc_vaddr_unaligned = NULL; 3737 goto try_desc_alloc; 3738 } else { 3739 dp_peer_err("%pK: Rx tid HW desc alloc failed (lowmem): tid %d", 3740 soc, tid); 3741 status = QDF_STATUS_E_NOMEM; 3742 goto error; 3743 } 3744 } 3745 3746 send_wmi_reo_cmd: 3747 if (dp_get_peer_vdev_roaming_in_progress(peer)) { 3748 status = QDF_STATUS_E_PERM; 3749 goto error; 3750 } 3751 3752 status = dp_peer_rx_reorder_queue_setup(soc, peer, 3753 tid, ba_window_size); 3754 if (QDF_IS_STATUS_SUCCESS(status)) 3755 return status; 3756 3757 error: 3758 if (rx_tid->hw_qdesc_vaddr_unaligned) { 3759 if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) == 3760 QDF_STATUS_SUCCESS) 3761 qdf_mem_unmap_nbytes_single( 3762 soc->osdev, 3763 rx_tid->hw_qdesc_paddr, 3764 QDF_DMA_BIDIRECTIONAL, 3765 rx_tid->hw_qdesc_alloc_size); 3766 qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned); 3767 rx_tid->hw_qdesc_vaddr_unaligned = NULL; 3768 rx_tid->hw_qdesc_paddr = 0; 3769 } 3770 return status; 3771 } 3772 3773 #ifdef DP_UMAC_HW_RESET_SUPPORT 3774 static 3775 void dp_peer_rst_tids(struct dp_soc *soc, struct dp_peer *peer, void *arg) 3776 { 3777 int tid; 3778 3779 for (tid = 0; tid < (DP_MAX_TIDS - 1); tid++) { 3780 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid]; 3781 void *vaddr = rx_tid->hw_qdesc_vaddr_aligned; 3782 3783 if (vaddr) 3784 dp_reset_rx_reo_tid_queue(soc, vaddr, 3785 rx_tid->hw_qdesc_alloc_size); 3786 } 3787 } 3788 3789 void dp_reset_tid_q_setup(struct dp_soc *soc) 3790 { 3791 dp_soc_iterate_peer(soc, dp_peer_rst_tids, NULL, DP_MOD_ID_UMAC_RESET); 3792 } 3793 #endif 3794 #ifdef REO_DESC_DEFER_FREE 3795 /* 3796 * dp_reo_desc_clean_up() - If cmd to flush base desc fails add 3797 * desc back to freelist and defer the deletion 3798 * 3799 * @soc: DP SOC handle 3800 * @desc: Base descriptor to be freed 3801 * @reo_status: REO command status 3802 */ 3803 static void dp_reo_desc_clean_up(struct dp_soc *soc, 3804 struct reo_desc_list_node *desc, 3805 union hal_reo_status *reo_status) 3806 { 3807 desc->free_ts = qdf_get_system_timestamp(); 3808 DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1); 3809 qdf_list_insert_back(&soc->reo_desc_freelist, 3810 (qdf_list_node_t *)desc); 3811 } 3812 3813 /* 3814 * dp_reo_limit_clean_batch_sz() - Limit number REO CMD queued to cmd 3815 * ring in aviod of REO hang 3816 * 3817 * @list_size: REO desc list size to be cleaned 3818 */ 3819 static inline void dp_reo_limit_clean_batch_sz(uint32_t *list_size) 3820 { 3821 unsigned long curr_ts = qdf_get_system_timestamp(); 3822 3823 if ((*list_size) > REO_DESC_FREELIST_SIZE) { 3824 dp_err_log("%lu:freedesc number %d in freelist", 3825 curr_ts, *list_size); 3826 /* limit the batch queue size */ 3827 *list_size = REO_DESC_FREELIST_SIZE; 3828 } 3829 } 3830 #else 3831 /* 3832 * dp_reo_desc_clean_up() - If send cmd to REO inorder to flush 3833 * cache fails free the base REO desc anyway 3834 * 3835 * @soc: DP SOC handle 3836 * @desc: Base descriptor to be freed 3837 * @reo_status: REO command status 3838 */ 3839 static void dp_reo_desc_clean_up(struct dp_soc *soc, 3840 struct reo_desc_list_node *desc, 3841 union hal_reo_status *reo_status) 3842 { 3843 if (reo_status) { 3844 qdf_mem_zero(reo_status, sizeof(*reo_status)); 3845 reo_status->fl_cache_status.header.status = 0; 3846 dp_reo_desc_free(soc, (void *)desc, reo_status); 3847 } 3848 } 3849 3850 /* 3851 * dp_reo_limit_clean_batch_sz() - Limit number REO CMD queued to cmd 3852 * ring in aviod of REO hang 3853 * 3854 * @list_size: REO desc list size to be cleaned 3855 */ 3856 static inline void dp_reo_limit_clean_batch_sz(uint32_t *list_size) 3857 { 3858 } 3859 #endif 3860 3861 /* 3862 * dp_resend_update_reo_cmd() - Resend the UPDATE_REO_QUEUE 3863 * cmd and re-insert desc into free list if send fails. 3864 * 3865 * @soc: DP SOC handle 3866 * @desc: desc with resend update cmd flag set 3867 * @rx_tid: Desc RX tid associated with update cmd for resetting 3868 * valid field to 0 in h/w 3869 * 3870 * Return: QDF status 3871 */ 3872 static QDF_STATUS 3873 dp_resend_update_reo_cmd(struct dp_soc *soc, 3874 struct reo_desc_list_node *desc, 3875 struct dp_rx_tid *rx_tid) 3876 { 3877 struct hal_reo_cmd_params params; 3878 3879 qdf_mem_zero(¶ms, sizeof(params)); 3880 params.std.need_status = 1; 3881 params.std.addr_lo = 3882 rx_tid->hw_qdesc_paddr & 0xffffffff; 3883 params.std.addr_hi = 3884 (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32; 3885 params.u.upd_queue_params.update_vld = 1; 3886 params.u.upd_queue_params.vld = 0; 3887 desc->resend_update_reo_cmd = false; 3888 /* 3889 * If the cmd send fails then set resend_update_reo_cmd flag 3890 * and insert the desc at the end of the free list to retry. 3891 */ 3892 if (dp_reo_send_cmd(soc, 3893 CMD_UPDATE_RX_REO_QUEUE, 3894 ¶ms, 3895 dp_rx_tid_delete_cb, 3896 (void *)desc) 3897 != QDF_STATUS_SUCCESS) { 3898 desc->resend_update_reo_cmd = true; 3899 desc->free_ts = qdf_get_system_timestamp(); 3900 qdf_list_insert_back(&soc->reo_desc_freelist, 3901 (qdf_list_node_t *)desc); 3902 dp_err_log("failed to send reo cmd CMD_UPDATE_RX_REO_QUEUE"); 3903 DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1); 3904 return QDF_STATUS_E_FAILURE; 3905 } 3906 3907 return QDF_STATUS_SUCCESS; 3908 } 3909 3910 /* 3911 * dp_rx_tid_delete_cb() - Callback to flush reo descriptor HW cache 3912 * after deleting the entries (ie., setting valid=0) 3913 * 3914 * @soc: DP SOC handle 3915 * @cb_ctxt: Callback context 3916 * @reo_status: REO command status 3917 */ 3918 void dp_rx_tid_delete_cb(struct dp_soc *soc, void *cb_ctxt, 3919 union hal_reo_status *reo_status) 3920 { 3921 struct reo_desc_list_node *freedesc = 3922 (struct reo_desc_list_node *)cb_ctxt; 3923 uint32_t list_size; 3924 struct reo_desc_list_node *desc; 3925 unsigned long curr_ts = qdf_get_system_timestamp(); 3926 uint32_t desc_size, tot_desc_size; 3927 struct hal_reo_cmd_params params; 3928 bool flush_failure = false; 3929 3930 DP_RX_REO_QDESC_UPDATE_EVT(freedesc); 3931 3932 if (reo_status->rx_queue_status.header.status == HAL_REO_CMD_DRAIN) { 3933 qdf_mem_zero(reo_status, sizeof(*reo_status)); 3934 reo_status->fl_cache_status.header.status = HAL_REO_CMD_DRAIN; 3935 dp_reo_desc_free(soc, (void *)freedesc, reo_status); 3936 DP_STATS_INC(soc, rx.err.reo_cmd_send_drain, 1); 3937 return; 3938 } else if (reo_status->rx_queue_status.header.status != 3939 HAL_REO_CMD_SUCCESS) { 3940 /* Should not happen normally. Just print error for now */ 3941 dp_info_rl("Rx tid HW desc deletion failed(%d): tid %d", 3942 reo_status->rx_queue_status.header.status, 3943 freedesc->rx_tid.tid); 3944 } 3945 3946 dp_peer_info("%pK: rx_tid: %d status: %d", 3947 soc, freedesc->rx_tid.tid, 3948 reo_status->rx_queue_status.header.status); 3949 3950 qdf_spin_lock_bh(&soc->reo_desc_freelist_lock); 3951 freedesc->free_ts = curr_ts; 3952 qdf_list_insert_back_size(&soc->reo_desc_freelist, 3953 (qdf_list_node_t *)freedesc, &list_size); 3954 3955 /* MCL path add the desc back to reo_desc_freelist when REO FLUSH 3956 * failed. it may cause the number of REO queue pending in free 3957 * list is even larger than REO_CMD_RING max size and lead REO CMD 3958 * flood then cause REO HW in an unexpected condition. So it's 3959 * needed to limit the number REO cmds in a batch operation. 3960 */ 3961 dp_reo_limit_clean_batch_sz(&list_size); 3962 3963 while ((qdf_list_peek_front(&soc->reo_desc_freelist, 3964 (qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) && 3965 ((list_size >= REO_DESC_FREELIST_SIZE) || 3966 (curr_ts > (desc->free_ts + REO_DESC_FREE_DEFER_MS)) || 3967 (desc->resend_update_reo_cmd && list_size))) { 3968 struct dp_rx_tid *rx_tid; 3969 3970 qdf_list_remove_front(&soc->reo_desc_freelist, 3971 (qdf_list_node_t **)&desc); 3972 list_size--; 3973 rx_tid = &desc->rx_tid; 3974 3975 /* First process descs with resend_update_reo_cmd set */ 3976 if (desc->resend_update_reo_cmd) { 3977 if (dp_resend_update_reo_cmd(soc, desc, rx_tid) != 3978 QDF_STATUS_SUCCESS) 3979 break; 3980 else 3981 continue; 3982 } 3983 3984 /* Flush and invalidate REO descriptor from HW cache: Base and 3985 * extension descriptors should be flushed separately */ 3986 if (desc->pending_ext_desc_size) 3987 tot_desc_size = desc->pending_ext_desc_size; 3988 else 3989 tot_desc_size = rx_tid->hw_qdesc_alloc_size; 3990 /* Get base descriptor size by passing non-qos TID */ 3991 desc_size = hal_get_reo_qdesc_size(soc->hal_soc, 0, 3992 DP_NON_QOS_TID); 3993 3994 /* Flush reo extension descriptors */ 3995 while ((tot_desc_size -= desc_size) > 0) { 3996 qdf_mem_zero(¶ms, sizeof(params)); 3997 params.std.addr_lo = 3998 ((uint64_t)(rx_tid->hw_qdesc_paddr) + 3999 tot_desc_size) & 0xffffffff; 4000 params.std.addr_hi = 4001 (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32; 4002 4003 if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc, 4004 CMD_FLUSH_CACHE, 4005 ¶ms, 4006 NULL, 4007 NULL)) { 4008 dp_info_rl("fail to send CMD_CACHE_FLUSH:" 4009 "tid %d desc %pK", rx_tid->tid, 4010 (void *)(rx_tid->hw_qdesc_paddr)); 4011 desc->pending_ext_desc_size = tot_desc_size + 4012 desc_size; 4013 dp_reo_desc_clean_up(soc, desc, reo_status); 4014 flush_failure = true; 4015 break; 4016 } 4017 } 4018 4019 if (flush_failure) 4020 break; 4021 else 4022 desc->pending_ext_desc_size = desc_size; 4023 4024 /* Flush base descriptor */ 4025 qdf_mem_zero(¶ms, sizeof(params)); 4026 params.std.need_status = 1; 4027 params.std.addr_lo = 4028 (uint64_t)(rx_tid->hw_qdesc_paddr) & 0xffffffff; 4029 params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32; 4030 4031 if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc, 4032 CMD_FLUSH_CACHE, 4033 ¶ms, 4034 dp_reo_desc_free, 4035 (void *)desc)) { 4036 union hal_reo_status reo_status; 4037 /* 4038 * If dp_reo_send_cmd return failure, related TID queue desc 4039 * should be unmapped. Also locally reo_desc, together with 4040 * TID queue desc also need to be freed accordingly. 4041 * 4042 * Here invoke desc_free function directly to do clean up. 4043 * 4044 * In case of MCL path add the desc back to the free 4045 * desc list and defer deletion. 4046 */ 4047 dp_info_rl("fail to send REO cmd to flush cache: tid %d", 4048 rx_tid->tid); 4049 dp_reo_desc_clean_up(soc, desc, &reo_status); 4050 DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1); 4051 break; 4052 } 4053 } 4054 qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock); 4055 4056 dp_reo_desc_defer_free(soc); 4057 } 4058 4059 /* 4060 * dp_rx_tid_delete_wifi3() – Delete receive TID queue 4061 * @peer: Datapath peer handle 4062 * @tid: TID 4063 * 4064 * Return: 0 on success, error code on failure 4065 */ 4066 static int dp_rx_tid_delete_wifi3(struct dp_peer *peer, int tid) 4067 { 4068 struct dp_rx_tid *rx_tid = &(peer->rx_tid[tid]); 4069 struct dp_soc *soc = peer->vdev->pdev->soc; 4070 struct hal_reo_cmd_params params; 4071 struct reo_desc_list_node *freedesc = 4072 qdf_mem_malloc(sizeof(*freedesc)); 4073 4074 if (!freedesc) { 4075 dp_peer_err("%pK: malloc failed for freedesc: tid %d", 4076 soc, tid); 4077 return -ENOMEM; 4078 } 4079 4080 freedesc->rx_tid = *rx_tid; 4081 freedesc->resend_update_reo_cmd = false; 4082 4083 qdf_mem_zero(¶ms, sizeof(params)); 4084 4085 DP_RX_REO_QDESC_GET_MAC(freedesc, peer); 4086 4087 params.std.need_status = 1; 4088 params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff; 4089 params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32; 4090 params.u.upd_queue_params.update_vld = 1; 4091 params.u.upd_queue_params.vld = 0; 4092 4093 if (dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, ¶ms, 4094 dp_rx_tid_delete_cb, (void *)freedesc) 4095 != QDF_STATUS_SUCCESS) { 4096 /* Defer the clean up to the call back context */ 4097 qdf_spin_lock_bh(&soc->reo_desc_freelist_lock); 4098 freedesc->free_ts = qdf_get_system_timestamp(); 4099 freedesc->resend_update_reo_cmd = true; 4100 qdf_list_insert_front(&soc->reo_desc_freelist, 4101 (qdf_list_node_t *)freedesc); 4102 DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1); 4103 qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock); 4104 dp_info("Failed to send CMD_UPDATE_RX_REO_QUEUE"); 4105 } 4106 4107 rx_tid->hw_qdesc_vaddr_unaligned = NULL; 4108 rx_tid->hw_qdesc_alloc_size = 0; 4109 rx_tid->hw_qdesc_paddr = 0; 4110 4111 return 0; 4112 } 4113 4114 #ifdef DP_LFR 4115 static void dp_peer_setup_remaining_tids(struct dp_peer *peer) 4116 { 4117 int tid; 4118 4119 for (tid = 1; tid < DP_MAX_TIDS-1; tid++) { 4120 dp_rx_tid_setup_wifi3(peer, tid, 1, 0); 4121 dp_peer_debug("Setting up TID %d for peer %pK peer->local_id %d", 4122 tid, peer, peer->local_id); 4123 } 4124 } 4125 #else 4126 static void dp_peer_setup_remaining_tids(struct dp_peer *peer) {}; 4127 #endif 4128 4129 #ifdef WLAN_FEATURE_11BE_MLO 4130 /** 4131 * dp_peer_rx_tids_init() - initialize each tids in peer 4132 * @peer: peer pointer 4133 * 4134 * Return: None 4135 */ 4136 static void dp_peer_rx_tids_init(struct dp_peer *peer) 4137 { 4138 int tid; 4139 struct dp_rx_tid *rx_tid; 4140 struct dp_rx_tid_defrag *rx_tid_defrag; 4141 4142 if (!IS_MLO_DP_LINK_PEER(peer)) { 4143 for (tid = 0; tid < DP_MAX_TIDS; tid++) { 4144 rx_tid_defrag = &peer->txrx_peer->rx_tid[tid]; 4145 4146 rx_tid_defrag->array = &rx_tid_defrag->base; 4147 rx_tid_defrag->defrag_timeout_ms = 0; 4148 rx_tid_defrag->defrag_waitlist_elem.tqe_next = NULL; 4149 rx_tid_defrag->defrag_waitlist_elem.tqe_prev = NULL; 4150 rx_tid_defrag->base.head = NULL; 4151 rx_tid_defrag->base.tail = NULL; 4152 rx_tid_defrag->tid = tid; 4153 rx_tid_defrag->defrag_peer = peer->txrx_peer; 4154 } 4155 } 4156 4157 /* if not first assoc link peer, 4158 * not to initialize rx_tids again. 4159 */ 4160 if (IS_MLO_DP_LINK_PEER(peer) && !peer->first_link) 4161 return; 4162 4163 for (tid = 0; tid < DP_MAX_TIDS; tid++) { 4164 rx_tid = &peer->rx_tid[tid]; 4165 rx_tid->tid = tid; 4166 rx_tid->ba_win_size = 0; 4167 rx_tid->ba_status = DP_RX_BA_INACTIVE; 4168 } 4169 } 4170 #else 4171 static void dp_peer_rx_tids_init(struct dp_peer *peer) 4172 { 4173 int tid; 4174 struct dp_rx_tid *rx_tid; 4175 struct dp_rx_tid_defrag *rx_tid_defrag; 4176 4177 for (tid = 0; tid < DP_MAX_TIDS; tid++) { 4178 rx_tid = &peer->rx_tid[tid]; 4179 4180 rx_tid_defrag = &peer->txrx_peer->rx_tid[tid]; 4181 rx_tid->tid = tid; 4182 rx_tid->ba_win_size = 0; 4183 rx_tid->ba_status = DP_RX_BA_INACTIVE; 4184 4185 rx_tid_defrag->base.head = NULL; 4186 rx_tid_defrag->base.tail = NULL; 4187 rx_tid_defrag->tid = tid; 4188 rx_tid_defrag->array = &rx_tid_defrag->base; 4189 rx_tid_defrag->defrag_timeout_ms = 0; 4190 rx_tid_defrag->defrag_waitlist_elem.tqe_next = NULL; 4191 rx_tid_defrag->defrag_waitlist_elem.tqe_prev = NULL; 4192 rx_tid_defrag->defrag_peer = peer->txrx_peer; 4193 } 4194 } 4195 #endif 4196 4197 /* 4198 * dp_peer_rx_init() – Initialize receive TID state 4199 * @pdev: Datapath pdev 4200 * @peer: Datapath peer 4201 * 4202 */ 4203 void dp_peer_rx_init(struct dp_pdev *pdev, struct dp_peer *peer) 4204 { 4205 dp_peer_rx_tids_init(peer); 4206 4207 peer->active_ba_session_cnt = 0; 4208 peer->hw_buffer_size = 0; 4209 peer->kill_256_sessions = 0; 4210 4211 /* Setup default (non-qos) rx tid queue */ 4212 dp_rx_tid_setup_wifi3(peer, DP_NON_QOS_TID, 1, 0); 4213 4214 /* Setup rx tid queue for TID 0. 4215 * Other queues will be setup on receiving first packet, which will cause 4216 * NULL REO queue error 4217 */ 4218 dp_rx_tid_setup_wifi3(peer, 0, 1, 0); 4219 4220 /* 4221 * Setup the rest of TID's to handle LFR 4222 */ 4223 dp_peer_setup_remaining_tids(peer); 4224 4225 /* 4226 * Set security defaults: no PN check, no security. The target may 4227 * send a HTT SEC_IND message to overwrite these defaults. 4228 */ 4229 if (peer->txrx_peer) 4230 peer->txrx_peer->security[dp_sec_ucast].sec_type = 4231 peer->txrx_peer->security[dp_sec_mcast].sec_type = 4232 cdp_sec_type_none; 4233 } 4234 4235 /* 4236 * dp_peer_rx_cleanup() – Cleanup receive TID state 4237 * @vdev: Datapath vdev 4238 * @peer: Datapath peer 4239 * 4240 */ 4241 void dp_peer_rx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer) 4242 { 4243 int tid; 4244 uint32_t tid_delete_mask = 0; 4245 4246 if (!peer->txrx_peer) 4247 return; 4248 4249 dp_info("Remove tids for peer: %pK", peer); 4250 4251 for (tid = 0; tid < DP_MAX_TIDS; tid++) { 4252 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid]; 4253 struct dp_rx_tid_defrag *defrag_rx_tid = 4254 &peer->txrx_peer->rx_tid[tid]; 4255 4256 qdf_spin_lock_bh(&defrag_rx_tid->defrag_tid_lock); 4257 if (!peer->bss_peer || peer->vdev->opmode == wlan_op_mode_sta) { 4258 /* Cleanup defrag related resource */ 4259 dp_rx_defrag_waitlist_remove(peer->txrx_peer, tid); 4260 dp_rx_reorder_flush_frag(peer->txrx_peer, tid); 4261 } 4262 qdf_spin_unlock_bh(&defrag_rx_tid->defrag_tid_lock); 4263 4264 qdf_spin_lock_bh(&rx_tid->tid_lock); 4265 if (peer->rx_tid[tid].hw_qdesc_vaddr_unaligned) { 4266 dp_rx_tid_delete_wifi3(peer, tid); 4267 4268 tid_delete_mask |= (1 << tid); 4269 } 4270 qdf_spin_unlock_bh(&rx_tid->tid_lock); 4271 } 4272 #ifdef notyet /* See if FW can remove queues as part of peer cleanup */ 4273 if (soc->ol_ops->peer_rx_reorder_queue_remove) { 4274 soc->ol_ops->peer_rx_reorder_queue_remove(soc->ctrl_psoc, 4275 peer->vdev->pdev->pdev_id, 4276 peer->vdev->vdev_id, peer->mac_addr.raw, 4277 tid_delete_mask); 4278 } 4279 #endif 4280 } 4281 4282 /* 4283 * dp_peer_cleanup() – Cleanup peer information 4284 * @vdev: Datapath vdev 4285 * @peer: Datapath peer 4286 * 4287 */ 4288 void dp_peer_cleanup(struct dp_vdev *vdev, struct dp_peer *peer) 4289 { 4290 enum wlan_op_mode vdev_opmode; 4291 uint8_t vdev_mac_addr[QDF_MAC_ADDR_SIZE]; 4292 struct dp_pdev *pdev = vdev->pdev; 4293 struct dp_soc *soc = pdev->soc; 4294 4295 /* save vdev related member in case vdev freed */ 4296 vdev_opmode = vdev->opmode; 4297 4298 if (!IS_MLO_DP_MLD_PEER(peer)) 4299 dp_monitor_peer_tx_cleanup(vdev, peer); 4300 4301 if (vdev_opmode != wlan_op_mode_monitor) 4302 /* cleanup the Rx reorder queues for this peer */ 4303 dp_peer_rx_cleanup(vdev, peer); 4304 4305 dp_peer_rx_tids_destroy(peer); 4306 4307 if (IS_MLO_DP_LINK_PEER(peer)) 4308 dp_link_peer_del_mld_peer(peer); 4309 if (IS_MLO_DP_MLD_PEER(peer)) 4310 dp_mld_peer_deinit_link_peers_info(peer); 4311 4312 qdf_mem_copy(vdev_mac_addr, vdev->mac_addr.raw, 4313 QDF_MAC_ADDR_SIZE); 4314 4315 if (soc->cdp_soc.ol_ops->peer_unref_delete) 4316 soc->cdp_soc.ol_ops->peer_unref_delete( 4317 soc->ctrl_psoc, 4318 vdev->pdev->pdev_id, 4319 peer->mac_addr.raw, vdev_mac_addr, 4320 vdev_opmode); 4321 } 4322 4323 /* dp_teardown_256_ba_session() - Teardown sessions using 256 4324 * window size when a request with 4325 * 64 window size is received. 4326 * This is done as a WAR since HW can 4327 * have only one setting per peer (64 or 256). 4328 * For HKv2, we use per tid buffersize setting 4329 * for 0 to per_tid_basize_max_tid. For tid 4330 * more than per_tid_basize_max_tid we use HKv1 4331 * method. 4332 * @peer: Datapath peer 4333 * 4334 * Return: void 4335 */ 4336 static void dp_teardown_256_ba_sessions(struct dp_peer *peer) 4337 { 4338 uint8_t delba_rcode = 0; 4339 int tid; 4340 struct dp_rx_tid *rx_tid = NULL; 4341 4342 tid = peer->vdev->pdev->soc->per_tid_basize_max_tid; 4343 for (; tid < DP_MAX_TIDS; tid++) { 4344 rx_tid = &peer->rx_tid[tid]; 4345 qdf_spin_lock_bh(&rx_tid->tid_lock); 4346 4347 if (rx_tid->ba_win_size <= 64) { 4348 qdf_spin_unlock_bh(&rx_tid->tid_lock); 4349 continue; 4350 } else { 4351 if (rx_tid->ba_status == DP_RX_BA_ACTIVE || 4352 rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) { 4353 /* send delba */ 4354 if (!rx_tid->delba_tx_status) { 4355 rx_tid->delba_tx_retry++; 4356 rx_tid->delba_tx_status = 1; 4357 rx_tid->delba_rcode = 4358 IEEE80211_REASON_QOS_SETUP_REQUIRED; 4359 delba_rcode = rx_tid->delba_rcode; 4360 4361 qdf_spin_unlock_bh(&rx_tid->tid_lock); 4362 if (peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba) 4363 peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba( 4364 peer->vdev->pdev->soc->ctrl_psoc, 4365 peer->vdev->vdev_id, 4366 peer->mac_addr.raw, 4367 tid, delba_rcode, 4368 CDP_DELBA_REASON_NONE); 4369 } else { 4370 qdf_spin_unlock_bh(&rx_tid->tid_lock); 4371 } 4372 } else { 4373 qdf_spin_unlock_bh(&rx_tid->tid_lock); 4374 } 4375 } 4376 } 4377 } 4378 4379 /* 4380 * dp_rx_addba_resp_tx_completion_wifi3() – Update Rx Tid State 4381 * 4382 * @soc: Datapath soc handle 4383 * @peer_mac: Datapath peer mac address 4384 * @vdev_id: id of atapath vdev 4385 * @tid: TID number 4386 * @status: tx completion status 4387 * Return: 0 on success, error code on failure 4388 */ 4389 int dp_addba_resp_tx_completion_wifi3(struct cdp_soc_t *cdp_soc, 4390 uint8_t *peer_mac, 4391 uint16_t vdev_id, 4392 uint8_t tid, int status) 4393 { 4394 struct dp_peer *peer = dp_peer_get_tgt_peer_hash_find( 4395 (struct dp_soc *)cdp_soc, 4396 peer_mac, 0, vdev_id, 4397 DP_MOD_ID_CDP); 4398 struct dp_rx_tid *rx_tid = NULL; 4399 4400 if (!peer) { 4401 dp_peer_debug("%pK: Peer is NULL!\n", cdp_soc); 4402 goto fail; 4403 } 4404 rx_tid = &peer->rx_tid[tid]; 4405 qdf_spin_lock_bh(&rx_tid->tid_lock); 4406 if (status) { 4407 rx_tid->num_addba_rsp_failed++; 4408 if (rx_tid->hw_qdesc_vaddr_unaligned) 4409 dp_rx_tid_update_wifi3(peer, tid, 1, 4410 IEEE80211_SEQ_MAX, false); 4411 rx_tid->ba_status = DP_RX_BA_INACTIVE; 4412 qdf_spin_unlock_bh(&rx_tid->tid_lock); 4413 dp_err("RxTid- %d addba rsp tx completion failed", tid); 4414 4415 goto success; 4416 } 4417 4418 rx_tid->num_addba_rsp_success++; 4419 if (rx_tid->ba_status == DP_RX_BA_INACTIVE) { 4420 qdf_spin_unlock_bh(&rx_tid->tid_lock); 4421 dp_peer_err("%pK: Rx Tid- %d hw qdesc is not in IN_PROGRESS", 4422 cdp_soc, tid); 4423 goto fail; 4424 } 4425 4426 if (!qdf_atomic_read(&peer->is_default_route_set)) { 4427 qdf_spin_unlock_bh(&rx_tid->tid_lock); 4428 dp_peer_debug("%pK: default route is not set for peer: " QDF_MAC_ADDR_FMT, 4429 cdp_soc, QDF_MAC_ADDR_REF(peer->mac_addr.raw)); 4430 goto fail; 4431 } 4432 4433 if (dp_rx_tid_update_wifi3(peer, tid, 4434 rx_tid->ba_win_size, 4435 rx_tid->startseqnum, 4436 false)) { 4437 dp_err("Failed update REO SSN"); 4438 } 4439 4440 dp_info("tid %u window_size %u start_seq_num %u", 4441 tid, rx_tid->ba_win_size, 4442 rx_tid->startseqnum); 4443 4444 /* First Session */ 4445 if (peer->active_ba_session_cnt == 0) { 4446 if (rx_tid->ba_win_size > 64 && rx_tid->ba_win_size <= 256) 4447 peer->hw_buffer_size = 256; 4448 else if (rx_tid->ba_win_size <= 1024 && 4449 rx_tid->ba_win_size > 256) 4450 peer->hw_buffer_size = 1024; 4451 else 4452 peer->hw_buffer_size = 64; 4453 } 4454 4455 rx_tid->ba_status = DP_RX_BA_ACTIVE; 4456 4457 peer->active_ba_session_cnt++; 4458 4459 qdf_spin_unlock_bh(&rx_tid->tid_lock); 4460 4461 /* Kill any session having 256 buffer size 4462 * when 64 buffer size request is received. 4463 * Also, latch on to 64 as new buffer size. 4464 */ 4465 if (peer->kill_256_sessions) { 4466 dp_teardown_256_ba_sessions(peer); 4467 peer->kill_256_sessions = 0; 4468 } 4469 4470 success: 4471 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 4472 return QDF_STATUS_SUCCESS; 4473 4474 fail: 4475 if (peer) 4476 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 4477 4478 return QDF_STATUS_E_FAILURE; 4479 } 4480 4481 /* 4482 * dp_rx_addba_responsesetup_wifi3() – Process ADDBA request from peer 4483 * 4484 * @soc: Datapath soc handle 4485 * @peer_mac: Datapath peer mac address 4486 * @vdev_id: id of atapath vdev 4487 * @tid: TID number 4488 * @dialogtoken: output dialogtoken 4489 * @statuscode: output dialogtoken 4490 * @buffersize: Output BA window size 4491 * @batimeout: Output BA timeout 4492 */ 4493 QDF_STATUS 4494 dp_addba_responsesetup_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac, 4495 uint16_t vdev_id, uint8_t tid, 4496 uint8_t *dialogtoken, uint16_t *statuscode, 4497 uint16_t *buffersize, uint16_t *batimeout) 4498 { 4499 struct dp_rx_tid *rx_tid = NULL; 4500 QDF_STATUS status = QDF_STATUS_SUCCESS; 4501 struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc, 4502 peer_mac, 0, vdev_id, 4503 DP_MOD_ID_CDP); 4504 4505 if (!peer) { 4506 dp_peer_debug("%pK: Peer is NULL!\n", cdp_soc); 4507 return QDF_STATUS_E_FAILURE; 4508 } 4509 rx_tid = &peer->rx_tid[tid]; 4510 qdf_spin_lock_bh(&rx_tid->tid_lock); 4511 rx_tid->num_of_addba_resp++; 4512 /* setup ADDBA response parameters */ 4513 *dialogtoken = rx_tid->dialogtoken; 4514 *statuscode = rx_tid->statuscode; 4515 *buffersize = rx_tid->ba_win_size; 4516 *batimeout = 0; 4517 qdf_spin_unlock_bh(&rx_tid->tid_lock); 4518 4519 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 4520 4521 return status; 4522 } 4523 4524 /* dp_check_ba_buffersize() - Check buffer size in request 4525 * and latch onto this size based on 4526 * size used in first active session. 4527 * @peer: Datapath peer 4528 * @tid: Tid 4529 * @buffersize: Block ack window size 4530 * 4531 * Return: void 4532 */ 4533 static void dp_check_ba_buffersize(struct dp_peer *peer, 4534 uint16_t tid, 4535 uint16_t buffersize) 4536 { 4537 struct dp_rx_tid *rx_tid = NULL; 4538 struct dp_soc *soc = peer->vdev->pdev->soc; 4539 uint16_t max_ba_window; 4540 4541 max_ba_window = hal_get_rx_max_ba_window(soc->hal_soc, tid); 4542 dp_info("Input buffersize %d, max dp allowed %d", 4543 buffersize, max_ba_window); 4544 /* Adjust BA window size, restrict it to max DP allowed */ 4545 buffersize = QDF_MIN(buffersize, max_ba_window); 4546 4547 dp_info(QDF_MAC_ADDR_FMT" per_tid_basize_max_tid %d tid %d buffersize %d hw_buffer_size %d", 4548 peer->mac_addr.raw, 4549 soc->per_tid_basize_max_tid, tid, buffersize, 4550 peer->hw_buffer_size); 4551 4552 rx_tid = &peer->rx_tid[tid]; 4553 if (soc->per_tid_basize_max_tid && 4554 tid < soc->per_tid_basize_max_tid) { 4555 rx_tid->ba_win_size = buffersize; 4556 goto out; 4557 } else { 4558 if (peer->active_ba_session_cnt == 0) { 4559 rx_tid->ba_win_size = buffersize; 4560 } else { 4561 if (peer->hw_buffer_size == 64) { 4562 if (buffersize <= 64) 4563 rx_tid->ba_win_size = buffersize; 4564 else 4565 rx_tid->ba_win_size = peer->hw_buffer_size; 4566 } else if (peer->hw_buffer_size == 256) { 4567 if (buffersize > 64) { 4568 rx_tid->ba_win_size = buffersize; 4569 } else { 4570 rx_tid->ba_win_size = buffersize; 4571 peer->hw_buffer_size = 64; 4572 peer->kill_256_sessions = 1; 4573 } 4574 } else if (buffersize <= 1024) { 4575 /** 4576 * Above checks are only for HK V2 4577 * Set incoming buffer size for others 4578 */ 4579 rx_tid->ba_win_size = buffersize; 4580 } else { 4581 dp_err("Invalid buffer size %d", buffersize); 4582 qdf_assert_always(0); 4583 } 4584 } 4585 } 4586 4587 out: 4588 dp_info("rx_tid->ba_win_size %d peer->hw_buffer_size %d peer->kill_256_sessions %d", 4589 rx_tid->ba_win_size, 4590 peer->hw_buffer_size, 4591 peer->kill_256_sessions); 4592 } 4593 4594 QDF_STATUS dp_rx_tid_update_ba_win_size(struct cdp_soc_t *cdp_soc, 4595 uint8_t *peer_mac, uint16_t vdev_id, 4596 uint8_t tid, uint16_t buffersize) 4597 { 4598 struct dp_rx_tid *rx_tid = NULL; 4599 struct dp_peer *peer; 4600 4601 peer = dp_peer_get_tgt_peer_hash_find((struct dp_soc *)cdp_soc, 4602 peer_mac, 0, vdev_id, 4603 DP_MOD_ID_CDP); 4604 if (!peer) { 4605 dp_peer_debug("%pK: Peer is NULL!\n", cdp_soc); 4606 return QDF_STATUS_E_FAILURE; 4607 } 4608 4609 rx_tid = &peer->rx_tid[tid]; 4610 4611 qdf_spin_lock_bh(&rx_tid->tid_lock); 4612 rx_tid->ba_win_size = buffersize; 4613 qdf_spin_unlock_bh(&rx_tid->tid_lock); 4614 4615 dp_info("peer "QDF_MAC_ADDR_FMT", tid %d, update BA win size to %d", 4616 QDF_MAC_ADDR_REF(peer->mac_addr.raw), tid, buffersize); 4617 4618 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 4619 4620 return QDF_STATUS_SUCCESS; 4621 } 4622 4623 #define DP_RX_BA_SESSION_DISABLE 1 4624 4625 /* 4626 * dp_addba_requestprocess_wifi3() - Process ADDBA request from peer 4627 * 4628 * @soc: Datapath soc handle 4629 * @peer_mac: Datapath peer mac address 4630 * @vdev_id: id of atapath vdev 4631 * @dialogtoken: dialogtoken from ADDBA frame 4632 * @tid: TID number 4633 * @batimeout: BA timeout 4634 * @buffersize: BA window size 4635 * @startseqnum: Start seq. number received in BA sequence control 4636 * 4637 * Return: 0 on success, error code on failure 4638 */ 4639 int dp_addba_requestprocess_wifi3(struct cdp_soc_t *cdp_soc, 4640 uint8_t *peer_mac, 4641 uint16_t vdev_id, 4642 uint8_t dialogtoken, 4643 uint16_t tid, uint16_t batimeout, 4644 uint16_t buffersize, 4645 uint16_t startseqnum) 4646 { 4647 QDF_STATUS status = QDF_STATUS_SUCCESS; 4648 struct dp_rx_tid *rx_tid = NULL; 4649 struct dp_peer *peer; 4650 4651 peer = dp_peer_get_tgt_peer_hash_find((struct dp_soc *)cdp_soc, 4652 peer_mac, 4653 0, vdev_id, 4654 DP_MOD_ID_CDP); 4655 4656 if (!peer) { 4657 dp_peer_debug("%pK: Peer is NULL!\n", cdp_soc); 4658 return QDF_STATUS_E_FAILURE; 4659 } 4660 rx_tid = &peer->rx_tid[tid]; 4661 qdf_spin_lock_bh(&rx_tid->tid_lock); 4662 rx_tid->num_of_addba_req++; 4663 if ((rx_tid->ba_status == DP_RX_BA_ACTIVE && 4664 rx_tid->hw_qdesc_vaddr_unaligned)) { 4665 dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX, false); 4666 rx_tid->ba_status = DP_RX_BA_INACTIVE; 4667 peer->active_ba_session_cnt--; 4668 dp_peer_debug("%pK: Rx Tid- %d hw qdesc is already setup", 4669 cdp_soc, tid); 4670 } 4671 4672 if (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) { 4673 qdf_spin_unlock_bh(&rx_tid->tid_lock); 4674 status = QDF_STATUS_E_FAILURE; 4675 goto fail; 4676 } 4677 4678 if (rx_tid->rx_ba_win_size_override == DP_RX_BA_SESSION_DISABLE) { 4679 dp_peer_info("%pK: disable BA session", 4680 cdp_soc); 4681 4682 buffersize = 1; 4683 } else if (rx_tid->rx_ba_win_size_override) { 4684 dp_peer_info("%pK: override BA win to %d", cdp_soc, 4685 rx_tid->rx_ba_win_size_override); 4686 4687 buffersize = rx_tid->rx_ba_win_size_override; 4688 } else { 4689 dp_peer_info("%pK: restore BA win %d based on addba req", cdp_soc, 4690 buffersize); 4691 } 4692 4693 dp_check_ba_buffersize(peer, tid, buffersize); 4694 4695 if (dp_rx_tid_setup_wifi3(peer, tid, 4696 rx_tid->ba_win_size, startseqnum)) { 4697 rx_tid->ba_status = DP_RX_BA_INACTIVE; 4698 qdf_spin_unlock_bh(&rx_tid->tid_lock); 4699 status = QDF_STATUS_E_FAILURE; 4700 goto fail; 4701 } 4702 rx_tid->ba_status = DP_RX_BA_IN_PROGRESS; 4703 4704 rx_tid->dialogtoken = dialogtoken; 4705 rx_tid->startseqnum = startseqnum; 4706 4707 if (rx_tid->userstatuscode != IEEE80211_STATUS_SUCCESS) 4708 rx_tid->statuscode = rx_tid->userstatuscode; 4709 else 4710 rx_tid->statuscode = IEEE80211_STATUS_SUCCESS; 4711 4712 if (rx_tid->rx_ba_win_size_override == DP_RX_BA_SESSION_DISABLE) 4713 rx_tid->statuscode = IEEE80211_STATUS_REFUSED; 4714 4715 qdf_spin_unlock_bh(&rx_tid->tid_lock); 4716 4717 fail: 4718 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 4719 4720 return status; 4721 } 4722 4723 /* 4724 * dp_set_addba_response() – Set a user defined ADDBA response status code 4725 * 4726 * @soc: Datapath soc handle 4727 * @peer_mac: Datapath peer mac address 4728 * @vdev_id: id of atapath vdev 4729 * @tid: TID number 4730 * @statuscode: response status code to be set 4731 */ 4732 QDF_STATUS 4733 dp_set_addba_response(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac, 4734 uint16_t vdev_id, uint8_t tid, uint16_t statuscode) 4735 { 4736 struct dp_peer *peer = dp_peer_get_tgt_peer_hash_find( 4737 (struct dp_soc *)cdp_soc, 4738 peer_mac, 0, vdev_id, 4739 DP_MOD_ID_CDP); 4740 struct dp_rx_tid *rx_tid; 4741 4742 if (!peer) { 4743 dp_peer_debug("%pK: Peer is NULL!\n", cdp_soc); 4744 return QDF_STATUS_E_FAILURE; 4745 } 4746 4747 rx_tid = &peer->rx_tid[tid]; 4748 qdf_spin_lock_bh(&rx_tid->tid_lock); 4749 rx_tid->userstatuscode = statuscode; 4750 qdf_spin_unlock_bh(&rx_tid->tid_lock); 4751 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 4752 4753 return QDF_STATUS_SUCCESS; 4754 } 4755 4756 /* 4757 * dp_rx_delba_process_wifi3() – Process DELBA from peer 4758 * @soc: Datapath soc handle 4759 * @peer_mac: Datapath peer mac address 4760 * @vdev_id: id of atapath vdev 4761 * @tid: TID number 4762 * @reasoncode: Reason code received in DELBA frame 4763 * 4764 * Return: 0 on success, error code on failure 4765 */ 4766 int dp_delba_process_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac, 4767 uint16_t vdev_id, int tid, uint16_t reasoncode) 4768 { 4769 QDF_STATUS status = QDF_STATUS_SUCCESS; 4770 struct dp_rx_tid *rx_tid; 4771 struct dp_peer *peer = dp_peer_get_tgt_peer_hash_find( 4772 (struct dp_soc *)cdp_soc, 4773 peer_mac, 0, vdev_id, 4774 DP_MOD_ID_CDP); 4775 4776 if (!peer) { 4777 dp_peer_debug("%pK: Peer is NULL!\n", cdp_soc); 4778 return QDF_STATUS_E_FAILURE; 4779 } 4780 rx_tid = &peer->rx_tid[tid]; 4781 qdf_spin_lock_bh(&rx_tid->tid_lock); 4782 if (rx_tid->ba_status == DP_RX_BA_INACTIVE || 4783 rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) { 4784 qdf_spin_unlock_bh(&rx_tid->tid_lock); 4785 status = QDF_STATUS_E_FAILURE; 4786 goto fail; 4787 } 4788 /* TODO: See if we can delete the existing REO queue descriptor and 4789 * replace with a new one without queue extenstion descript to save 4790 * memory 4791 */ 4792 rx_tid->delba_rcode = reasoncode; 4793 rx_tid->num_of_delba_req++; 4794 dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX, false); 4795 4796 rx_tid->ba_status = DP_RX_BA_INACTIVE; 4797 peer->active_ba_session_cnt--; 4798 qdf_spin_unlock_bh(&rx_tid->tid_lock); 4799 fail: 4800 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 4801 4802 return status; 4803 } 4804 4805 /* 4806 * dp_rx_delba_tx_completion_wifi3() – Send Delba Request 4807 * 4808 * @soc: Datapath soc handle 4809 * @peer_mac: Datapath peer mac address 4810 * @vdev_id: id of atapath vdev 4811 * @tid: TID number 4812 * @status: tx completion status 4813 * Return: 0 on success, error code on failure 4814 */ 4815 4816 int dp_delba_tx_completion_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac, 4817 uint16_t vdev_id, 4818 uint8_t tid, int status) 4819 { 4820 QDF_STATUS ret = QDF_STATUS_SUCCESS; 4821 struct dp_rx_tid *rx_tid = NULL; 4822 struct dp_peer *peer = dp_peer_get_tgt_peer_hash_find( 4823 (struct dp_soc *)cdp_soc, 4824 peer_mac, 0, vdev_id, 4825 DP_MOD_ID_CDP); 4826 4827 if (!peer) { 4828 dp_peer_debug("%pK: Peer is NULL!", cdp_soc); 4829 return QDF_STATUS_E_FAILURE; 4830 } 4831 rx_tid = &peer->rx_tid[tid]; 4832 qdf_spin_lock_bh(&rx_tid->tid_lock); 4833 if (status) { 4834 rx_tid->delba_tx_fail_cnt++; 4835 if (rx_tid->delba_tx_retry >= DP_MAX_DELBA_RETRY) { 4836 rx_tid->delba_tx_retry = 0; 4837 rx_tid->delba_tx_status = 0; 4838 qdf_spin_unlock_bh(&rx_tid->tid_lock); 4839 } else { 4840 rx_tid->delba_tx_retry++; 4841 rx_tid->delba_tx_status = 1; 4842 qdf_spin_unlock_bh(&rx_tid->tid_lock); 4843 if (peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba) 4844 peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba( 4845 peer->vdev->pdev->soc->ctrl_psoc, 4846 peer->vdev->vdev_id, 4847 peer->mac_addr.raw, tid, 4848 rx_tid->delba_rcode, 4849 CDP_DELBA_REASON_NONE); 4850 } 4851 goto end; 4852 } else { 4853 rx_tid->delba_tx_success_cnt++; 4854 rx_tid->delba_tx_retry = 0; 4855 rx_tid->delba_tx_status = 0; 4856 } 4857 if (rx_tid->ba_status == DP_RX_BA_ACTIVE) { 4858 dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX, false); 4859 rx_tid->ba_status = DP_RX_BA_INACTIVE; 4860 peer->active_ba_session_cnt--; 4861 } 4862 if (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) { 4863 dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX, false); 4864 rx_tid->ba_status = DP_RX_BA_INACTIVE; 4865 } 4866 qdf_spin_unlock_bh(&rx_tid->tid_lock); 4867 4868 end: 4869 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 4870 4871 return ret; 4872 } 4873 4874 /** 4875 * dp_set_pn_check_wifi3() - enable PN check in REO for security 4876 * @soc: Datapath soc handle 4877 * @peer_mac: Datapath peer mac address 4878 * @vdev_id: id of atapath vdev 4879 * @vdev: Datapath vdev 4880 * @pdev - data path device instance 4881 * @sec_type - security type 4882 * @rx_pn - Receive pn starting number 4883 * 4884 */ 4885 4886 QDF_STATUS 4887 dp_set_pn_check_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id, 4888 uint8_t *peer_mac, enum cdp_sec_type sec_type, 4889 uint32_t *rx_pn) 4890 { 4891 struct dp_pdev *pdev; 4892 int i; 4893 uint8_t pn_size; 4894 struct hal_reo_cmd_params params; 4895 struct dp_peer *peer = NULL; 4896 struct dp_vdev *vdev = NULL; 4897 4898 peer = dp_peer_find_hash_find((struct dp_soc *)soc, 4899 peer_mac, 0, vdev_id, 4900 DP_MOD_ID_CDP); 4901 4902 if (!peer) { 4903 dp_peer_debug("%pK: Peer is NULL!\n", soc); 4904 return QDF_STATUS_E_FAILURE; 4905 } 4906 4907 vdev = peer->vdev; 4908 4909 if (!vdev) { 4910 dp_peer_debug("%pK: VDEV is NULL!\n", soc); 4911 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 4912 return QDF_STATUS_E_FAILURE; 4913 } 4914 4915 pdev = vdev->pdev; 4916 qdf_mem_zero(¶ms, sizeof(params)); 4917 4918 params.std.need_status = 1; 4919 params.u.upd_queue_params.update_pn_valid = 1; 4920 params.u.upd_queue_params.update_pn_size = 1; 4921 params.u.upd_queue_params.update_pn = 1; 4922 params.u.upd_queue_params.update_pn_check_needed = 1; 4923 params.u.upd_queue_params.update_svld = 1; 4924 params.u.upd_queue_params.svld = 0; 4925 4926 switch (sec_type) { 4927 case cdp_sec_type_tkip_nomic: 4928 case cdp_sec_type_aes_ccmp: 4929 case cdp_sec_type_aes_ccmp_256: 4930 case cdp_sec_type_aes_gcmp: 4931 case cdp_sec_type_aes_gcmp_256: 4932 params.u.upd_queue_params.pn_check_needed = 1; 4933 params.u.upd_queue_params.pn_size = PN_SIZE_48; 4934 pn_size = 48; 4935 break; 4936 case cdp_sec_type_wapi: 4937 params.u.upd_queue_params.pn_check_needed = 1; 4938 params.u.upd_queue_params.pn_size = PN_SIZE_128; 4939 pn_size = 128; 4940 if (vdev->opmode == wlan_op_mode_ap) { 4941 params.u.upd_queue_params.pn_even = 1; 4942 params.u.upd_queue_params.update_pn_even = 1; 4943 } else { 4944 params.u.upd_queue_params.pn_uneven = 1; 4945 params.u.upd_queue_params.update_pn_uneven = 1; 4946 } 4947 break; 4948 default: 4949 params.u.upd_queue_params.pn_check_needed = 0; 4950 pn_size = 0; 4951 break; 4952 } 4953 4954 4955 for (i = 0; i < DP_MAX_TIDS; i++) { 4956 struct dp_rx_tid *rx_tid = &peer->rx_tid[i]; 4957 qdf_spin_lock_bh(&rx_tid->tid_lock); 4958 if (rx_tid->hw_qdesc_vaddr_unaligned) { 4959 params.std.addr_lo = 4960 rx_tid->hw_qdesc_paddr & 0xffffffff; 4961 params.std.addr_hi = 4962 (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32; 4963 4964 if (pn_size) { 4965 dp_peer_info("%pK: PN set for TID:%d pn:%x:%x:%x:%x", 4966 soc, i, rx_pn[3], rx_pn[2], 4967 rx_pn[1], rx_pn[0]); 4968 params.u.upd_queue_params.update_pn_valid = 1; 4969 params.u.upd_queue_params.pn_31_0 = rx_pn[0]; 4970 params.u.upd_queue_params.pn_63_32 = rx_pn[1]; 4971 params.u.upd_queue_params.pn_95_64 = rx_pn[2]; 4972 params.u.upd_queue_params.pn_127_96 = rx_pn[3]; 4973 } 4974 rx_tid->pn_size = pn_size; 4975 if (dp_reo_send_cmd(cdp_soc_t_to_dp_soc(soc), 4976 CMD_UPDATE_RX_REO_QUEUE, 4977 ¶ms, dp_rx_tid_update_cb, 4978 rx_tid)) { 4979 dp_err_log("fail to send CMD_UPDATE_RX_REO_QUEUE" 4980 "tid %d desc %pK", rx_tid->tid, 4981 (void *)(rx_tid->hw_qdesc_paddr)); 4982 DP_STATS_INC(cdp_soc_t_to_dp_soc(soc), 4983 rx.err.reo_cmd_send_fail, 1); 4984 } 4985 } else { 4986 dp_peer_info("%pK: PN Check not setup for TID :%d ", soc, i); 4987 } 4988 qdf_spin_unlock_bh(&rx_tid->tid_lock); 4989 } 4990 4991 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 4992 4993 return QDF_STATUS_SUCCESS; 4994 } 4995 4996 4997 /** 4998 * dp_set_key_sec_type_wifi3() - set security mode of key 4999 * @soc: Datapath soc handle 5000 * @peer_mac: Datapath peer mac address 5001 * @vdev_id: id of atapath vdev 5002 * @vdev: Datapath vdev 5003 * @pdev - data path device instance 5004 * @sec_type - security type 5005 * #is_unicast - key type 5006 * 5007 */ 5008 5009 QDF_STATUS 5010 dp_set_key_sec_type_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id, 5011 uint8_t *peer_mac, enum cdp_sec_type sec_type, 5012 bool is_unicast) 5013 { 5014 struct dp_peer *peer = 5015 dp_peer_get_tgt_peer_hash_find((struct dp_soc *)soc, 5016 peer_mac, 0, vdev_id, 5017 DP_MOD_ID_CDP); 5018 int sec_index; 5019 5020 if (!peer) { 5021 dp_peer_debug("%pK: Peer is NULL!\n", soc); 5022 return QDF_STATUS_E_FAILURE; 5023 } 5024 5025 if (!peer->txrx_peer) { 5026 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 5027 dp_peer_debug("%pK: txrx peer is NULL!\n", soc); 5028 return QDF_STATUS_E_FAILURE; 5029 } 5030 5031 dp_peer_info("%pK: key sec spec for peer %pK " QDF_MAC_ADDR_FMT ": %s key of type %d", 5032 soc, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw), 5033 is_unicast ? "ucast" : "mcast", sec_type); 5034 5035 sec_index = is_unicast ? dp_sec_ucast : dp_sec_mcast; 5036 peer->txrx_peer->security[sec_index].sec_type = sec_type; 5037 5038 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 5039 5040 return QDF_STATUS_SUCCESS; 5041 } 5042 5043 void 5044 dp_rx_sec_ind_handler(struct dp_soc *soc, uint16_t peer_id, 5045 enum cdp_sec_type sec_type, int is_unicast, 5046 u_int32_t *michael_key, 5047 u_int32_t *rx_pn) 5048 { 5049 struct dp_peer *peer; 5050 struct dp_txrx_peer *txrx_peer; 5051 int sec_index; 5052 5053 peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_HTT); 5054 if (!peer) { 5055 dp_peer_err("Couldn't find peer from ID %d - skipping security inits", 5056 peer_id); 5057 return; 5058 } 5059 txrx_peer = dp_get_txrx_peer(peer); 5060 if (!txrx_peer) { 5061 dp_peer_err("Couldn't find txrx peer from ID %d - skipping security inits", 5062 peer_id); 5063 return; 5064 } 5065 5066 dp_peer_info("%pK: sec spec for peer %pK " QDF_MAC_ADDR_FMT ": %s key of type %d", 5067 soc, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw), 5068 is_unicast ? "ucast" : "mcast", sec_type); 5069 sec_index = is_unicast ? dp_sec_ucast : dp_sec_mcast; 5070 5071 peer->txrx_peer->security[sec_index].sec_type = sec_type; 5072 #ifdef notyet /* TODO: See if this is required for defrag support */ 5073 /* michael key only valid for TKIP, but for simplicity, 5074 * copy it anyway 5075 */ 5076 qdf_mem_copy( 5077 &peer->txrx_peer->security[sec_index].michael_key[0], 5078 michael_key, 5079 sizeof(peer->txrx_peer->security[sec_index].michael_key)); 5080 #ifdef BIG_ENDIAN_HOST 5081 OL_IF_SWAPBO(peer->txrx_peer->security[sec_index].michael_key[0], 5082 sizeof(peer->txrx_peer->security[sec_index].michael_key)); 5083 #endif /* BIG_ENDIAN_HOST */ 5084 #endif 5085 5086 #ifdef notyet /* TODO: Check if this is required for wifi3.0 */ 5087 if (sec_type != cdp_sec_type_wapi) { 5088 qdf_mem_zero(peer->tids_last_pn_valid, _EXT_TIDS); 5089 } else { 5090 for (i = 0; i < DP_MAX_TIDS; i++) { 5091 /* 5092 * Setting PN valid bit for WAPI sec_type, 5093 * since WAPI PN has to be started with predefined value 5094 */ 5095 peer->tids_last_pn_valid[i] = 1; 5096 qdf_mem_copy( 5097 (u_int8_t *) &peer->tids_last_pn[i], 5098 (u_int8_t *) rx_pn, sizeof(union htt_rx_pn_t)); 5099 peer->tids_last_pn[i].pn128[1] = 5100 qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[1]); 5101 peer->tids_last_pn[i].pn128[0] = 5102 qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[0]); 5103 } 5104 } 5105 #endif 5106 /* TODO: Update HW TID queue with PN check parameters (pn type for 5107 * all security types and last pn for WAPI) once REO command API 5108 * is available 5109 */ 5110 5111 dp_peer_unref_delete(peer, DP_MOD_ID_HTT); 5112 } 5113 5114 #ifdef QCA_PEER_EXT_STATS 5115 /* 5116 * dp_peer_delay_stats_ctx_alloc() - Allocate peer delay 5117 * stats content 5118 * @soc: DP SoC context 5119 * @txrx_peer: DP txrx peer context 5120 * 5121 * Allocate the peer delay stats context 5122 * 5123 * Return: QDF_STATUS_SUCCESS if allocation is 5124 * successful 5125 */ 5126 QDF_STATUS dp_peer_delay_stats_ctx_alloc(struct dp_soc *soc, 5127 struct dp_txrx_peer *txrx_peer) 5128 { 5129 uint8_t tid, ctx_id; 5130 5131 if (!soc || !txrx_peer) { 5132 dp_warn("Null soc%pK or peer%pK", soc, txrx_peer); 5133 return QDF_STATUS_E_INVAL; 5134 } 5135 5136 if (!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx)) 5137 return QDF_STATUS_SUCCESS; 5138 5139 /* 5140 * Allocate memory for peer extended stats. 5141 */ 5142 txrx_peer->delay_stats = 5143 qdf_mem_malloc(sizeof(struct dp_peer_delay_stats)); 5144 if (!txrx_peer->delay_stats) { 5145 dp_err("Peer extended stats obj alloc failed!!"); 5146 return QDF_STATUS_E_NOMEM; 5147 } 5148 5149 for (tid = 0; tid < CDP_MAX_DATA_TIDS; tid++) { 5150 for (ctx_id = 0; ctx_id < CDP_MAX_TXRX_CTX; ctx_id++) { 5151 struct cdp_delay_tx_stats *tx_delay = 5152 &txrx_peer->delay_stats->delay_tid_stats[tid][ctx_id].tx_delay; 5153 struct cdp_delay_rx_stats *rx_delay = 5154 &txrx_peer->delay_stats->delay_tid_stats[tid][ctx_id].rx_delay; 5155 5156 dp_hist_init(&tx_delay->tx_swq_delay, 5157 CDP_HIST_TYPE_SW_ENQEUE_DELAY); 5158 dp_hist_init(&tx_delay->hwtx_delay, 5159 CDP_HIST_TYPE_HW_COMP_DELAY); 5160 dp_hist_init(&rx_delay->to_stack_delay, 5161 CDP_HIST_TYPE_REAP_STACK); 5162 } 5163 } 5164 5165 return QDF_STATUS_SUCCESS; 5166 } 5167 5168 /* 5169 * dp_peer_delay_stats_ctx_dealloc() - Dealloc the peer delay stats context 5170 * @txrx_peer: txrx DP peer context 5171 * 5172 * Free the peer delay stats context 5173 * 5174 * Return: Void 5175 */ 5176 void dp_peer_delay_stats_ctx_dealloc(struct dp_soc *soc, 5177 struct dp_txrx_peer *txrx_peer) 5178 { 5179 if (!txrx_peer) { 5180 dp_warn("peer_ext dealloc failed due to NULL peer object"); 5181 return; 5182 } 5183 5184 if (!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx)) 5185 return; 5186 5187 if (!txrx_peer->delay_stats) 5188 return; 5189 5190 qdf_mem_free(txrx_peer->delay_stats); 5191 txrx_peer->delay_stats = NULL; 5192 } 5193 5194 /** 5195 * dp_peer_delay_stats_ctx_clr() - Clear delay stats context of peer 5196 * 5197 * @txrx_peer: dp_txrx_peer handle 5198 * 5199 * Return: void 5200 */ 5201 void dp_peer_delay_stats_ctx_clr(struct dp_txrx_peer *txrx_peer) 5202 { 5203 if (txrx_peer->delay_stats) 5204 qdf_mem_zero(txrx_peer->delay_stats, 5205 sizeof(struct dp_peer_delay_stats)); 5206 } 5207 #endif 5208 5209 #ifdef WLAN_PEER_JITTER 5210 /** 5211 * dp_peer_jitter_stats_ctx_alloc() - Allocate jitter stats context for peer 5212 * 5213 * @soc: Datapath pdev handle 5214 * @txrx_peer: dp_txrx_peer handle 5215 * 5216 * Return: QDF_STATUS 5217 */ 5218 QDF_STATUS dp_peer_jitter_stats_ctx_alloc(struct dp_pdev *pdev, 5219 struct dp_txrx_peer *txrx_peer) 5220 { 5221 if (!pdev || !txrx_peer) { 5222 dp_warn("Null pdev or peer"); 5223 return QDF_STATUS_E_INVAL; 5224 } 5225 5226 /* 5227 * Allocate memory for jitter stats only when 5228 * operating in offload enabled mode. 5229 */ 5230 if (!wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) 5231 return QDF_STATUS_SUCCESS; 5232 5233 txrx_peer->jitter_stats = 5234 qdf_mem_malloc(sizeof(struct cdp_peer_tid_stats) * DP_MAX_TIDS); 5235 if (!txrx_peer->jitter_stats) { 5236 dp_warn("Jitter stats obj alloc failed!!"); 5237 return QDF_STATUS_E_NOMEM; 5238 } 5239 5240 return QDF_STATUS_SUCCESS; 5241 } 5242 5243 /** 5244 * dp_peer_jitter_stats_ctx_dealloc() - Deallocate jitter stats context 5245 * 5246 * @pdev: Datapath pdev handle 5247 * @txrx_peer: dp_txrx_peer handle 5248 * 5249 * Return: void 5250 */ 5251 void dp_peer_jitter_stats_ctx_dealloc(struct dp_pdev *pdev, 5252 struct dp_txrx_peer *txrx_peer) 5253 { 5254 if (!pdev || !txrx_peer) { 5255 dp_warn("Null pdev or peer"); 5256 return; 5257 } 5258 5259 /* Check for offload mode */ 5260 if (!wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) 5261 return; 5262 5263 if (txrx_peer->jitter_stats) { 5264 qdf_mem_free(txrx_peer->jitter_stats); 5265 txrx_peer->jitter_stats = NULL; 5266 } 5267 } 5268 5269 /** 5270 * dp_peer_jitter_stats_ctx_clr() - Clear jitter stats context of peer 5271 * 5272 * @txrx_peer: dp_txrx_peer handle 5273 * 5274 * Return: void 5275 */ 5276 void dp_peer_jitter_stats_ctx_clr(struct dp_txrx_peer *txrx_peer) 5277 { 5278 if (txrx_peer->jitter_stats) 5279 qdf_mem_zero(txrx_peer->jitter_stats, 5280 sizeof(struct cdp_peer_tid_stats) * DP_MAX_TIDS); 5281 } 5282 #endif 5283 5284 QDF_STATUS 5285 dp_rx_delba_ind_handler(void *soc_handle, uint16_t peer_id, 5286 uint8_t tid, uint16_t win_sz) 5287 { 5288 struct dp_soc *soc = (struct dp_soc *)soc_handle; 5289 struct dp_peer *peer; 5290 struct dp_rx_tid *rx_tid; 5291 QDF_STATUS status = QDF_STATUS_SUCCESS; 5292 5293 peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_HTT); 5294 5295 if (!peer) { 5296 dp_peer_err("%pK: Couldn't find peer from ID %d", 5297 soc, peer_id); 5298 return QDF_STATUS_E_FAILURE; 5299 } 5300 5301 qdf_assert_always(tid < DP_MAX_TIDS); 5302 5303 rx_tid = &peer->rx_tid[tid]; 5304 5305 if (rx_tid->hw_qdesc_vaddr_unaligned) { 5306 if (!rx_tid->delba_tx_status) { 5307 dp_peer_info("%pK: PEER_ID: %d TID: %d, BA win: %d ", 5308 soc, peer_id, tid, win_sz); 5309 5310 qdf_spin_lock_bh(&rx_tid->tid_lock); 5311 5312 rx_tid->delba_tx_status = 1; 5313 5314 rx_tid->rx_ba_win_size_override = 5315 qdf_min((uint16_t)63, win_sz); 5316 5317 rx_tid->delba_rcode = 5318 IEEE80211_REASON_QOS_SETUP_REQUIRED; 5319 5320 qdf_spin_unlock_bh(&rx_tid->tid_lock); 5321 5322 if (soc->cdp_soc.ol_ops->send_delba) 5323 soc->cdp_soc.ol_ops->send_delba( 5324 peer->vdev->pdev->soc->ctrl_psoc, 5325 peer->vdev->vdev_id, 5326 peer->mac_addr.raw, 5327 tid, 5328 rx_tid->delba_rcode, 5329 CDP_DELBA_REASON_NONE); 5330 } 5331 } else { 5332 dp_peer_err("%pK: BA session is not setup for TID:%d ", soc, tid); 5333 status = QDF_STATUS_E_FAILURE; 5334 } 5335 5336 dp_peer_unref_delete(peer, DP_MOD_ID_HTT); 5337 5338 return status; 5339 } 5340 5341 #ifdef DP_PEER_EXTENDED_API 5342 /** 5343 * dp_peer_set_bw() - Set bandwidth and mpdu retry count threshold for peer 5344 * @soc: DP soc handle 5345 * @txrx_peer: Core txrx_peer handle 5346 * @set_bw: enum of bandwidth to be set for this peer connection 5347 * 5348 * Return: None 5349 */ 5350 static void dp_peer_set_bw(struct dp_soc *soc, struct dp_txrx_peer *txrx_peer, 5351 enum cdp_peer_bw set_bw) 5352 { 5353 if (!txrx_peer) 5354 return; 5355 5356 txrx_peer->bw = set_bw; 5357 5358 switch (set_bw) { 5359 case CDP_160_MHZ: 5360 case CDP_320_MHZ: 5361 txrx_peer->mpdu_retry_threshold = 5362 soc->wlan_cfg_ctx->mpdu_retry_threshold_2; 5363 break; 5364 case CDP_20_MHZ: 5365 case CDP_40_MHZ: 5366 case CDP_80_MHZ: 5367 default: 5368 txrx_peer->mpdu_retry_threshold = 5369 soc->wlan_cfg_ctx->mpdu_retry_threshold_1; 5370 break; 5371 } 5372 5373 dp_info("Peer id: %u: BW: %u, mpdu retry threshold: %u", 5374 txrx_peer->peer_id, txrx_peer->bw, 5375 txrx_peer->mpdu_retry_threshold); 5376 } 5377 5378 #ifdef WLAN_FEATURE_11BE_MLO 5379 QDF_STATUS dp_register_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 5380 struct ol_txrx_desc_type *sta_desc) 5381 { 5382 struct dp_peer *peer; 5383 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 5384 5385 peer = dp_peer_find_hash_find(soc, sta_desc->peer_addr.bytes, 5386 0, DP_VDEV_ALL, DP_MOD_ID_CDP); 5387 5388 if (!peer) 5389 return QDF_STATUS_E_FAULT; 5390 5391 qdf_spin_lock_bh(&peer->peer_info_lock); 5392 peer->state = OL_TXRX_PEER_STATE_CONN; 5393 qdf_spin_unlock_bh(&peer->peer_info_lock); 5394 5395 dp_peer_set_bw(soc, peer->txrx_peer, sta_desc->bw); 5396 5397 dp_rx_flush_rx_cached(peer, false); 5398 5399 if (IS_MLO_DP_LINK_PEER(peer) && peer->first_link) { 5400 dp_peer_info("register for mld peer" QDF_MAC_ADDR_FMT, 5401 QDF_MAC_ADDR_REF(peer->mld_peer->mac_addr.raw)); 5402 qdf_spin_lock_bh(&peer->mld_peer->peer_info_lock); 5403 peer->mld_peer->state = peer->state; 5404 qdf_spin_unlock_bh(&peer->mld_peer->peer_info_lock); 5405 dp_rx_flush_rx_cached(peer->mld_peer, false); 5406 } 5407 5408 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 5409 5410 return QDF_STATUS_SUCCESS; 5411 } 5412 5413 QDF_STATUS dp_peer_state_update(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac, 5414 enum ol_txrx_peer_state state) 5415 { 5416 struct dp_peer *peer; 5417 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 5418 5419 peer = dp_peer_find_hash_find(soc, peer_mac, 0, DP_VDEV_ALL, 5420 DP_MOD_ID_CDP); 5421 if (!peer) { 5422 dp_peer_err("%pK: Failed to find peer[" QDF_MAC_ADDR_FMT "]", 5423 soc, QDF_MAC_ADDR_REF(peer_mac)); 5424 return QDF_STATUS_E_FAILURE; 5425 } 5426 peer->state = state; 5427 peer->authorize = (state == OL_TXRX_PEER_STATE_AUTH) ? 1 : 0; 5428 5429 if (peer->txrx_peer) 5430 peer->txrx_peer->authorize = peer->authorize; 5431 5432 dp_peer_info("peer" QDF_MAC_ADDR_FMT "state %d", 5433 QDF_MAC_ADDR_REF(peer->mac_addr.raw), 5434 peer->state); 5435 5436 if (IS_MLO_DP_LINK_PEER(peer) && peer->first_link) { 5437 peer->mld_peer->state = peer->state; 5438 peer->mld_peer->txrx_peer->authorize = peer->authorize; 5439 dp_peer_info("mld peer" QDF_MAC_ADDR_FMT "state %d", 5440 QDF_MAC_ADDR_REF(peer->mld_peer->mac_addr.raw), 5441 peer->mld_peer->state); 5442 } 5443 5444 /* ref_cnt is incremented inside dp_peer_find_hash_find(). 5445 * Decrement it here. 5446 */ 5447 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 5448 5449 return QDF_STATUS_SUCCESS; 5450 } 5451 #else 5452 QDF_STATUS dp_register_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 5453 struct ol_txrx_desc_type *sta_desc) 5454 { 5455 struct dp_peer *peer; 5456 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 5457 5458 peer = dp_peer_find_hash_find(soc, sta_desc->peer_addr.bytes, 5459 0, DP_VDEV_ALL, DP_MOD_ID_CDP); 5460 5461 if (!peer) 5462 return QDF_STATUS_E_FAULT; 5463 5464 qdf_spin_lock_bh(&peer->peer_info_lock); 5465 peer->state = OL_TXRX_PEER_STATE_CONN; 5466 qdf_spin_unlock_bh(&peer->peer_info_lock); 5467 5468 dp_peer_set_bw(soc, peer->txrx_peer, sta_desc->bw); 5469 5470 dp_rx_flush_rx_cached(peer, false); 5471 5472 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 5473 5474 return QDF_STATUS_SUCCESS; 5475 } 5476 5477 QDF_STATUS dp_peer_state_update(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac, 5478 enum ol_txrx_peer_state state) 5479 { 5480 struct dp_peer *peer; 5481 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 5482 5483 peer = dp_peer_find_hash_find(soc, peer_mac, 0, DP_VDEV_ALL, 5484 DP_MOD_ID_CDP); 5485 if (!peer) { 5486 dp_peer_err("%pK: Failed to find peer for: [" QDF_MAC_ADDR_FMT "]", 5487 soc, QDF_MAC_ADDR_REF(peer_mac)); 5488 return QDF_STATUS_E_FAILURE; 5489 } 5490 peer->state = state; 5491 peer->authorize = (state == OL_TXRX_PEER_STATE_AUTH) ? 1 : 0; 5492 5493 if (peer->txrx_peer) 5494 peer->txrx_peer->authorize = peer->authorize; 5495 5496 dp_info("peer %pK state %d", peer, peer->state); 5497 /* ref_cnt is incremented inside dp_peer_find_hash_find(). 5498 * Decrement it here. 5499 */ 5500 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 5501 5502 return QDF_STATUS_SUCCESS; 5503 } 5504 #endif 5505 5506 QDF_STATUS 5507 dp_clear_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 5508 struct qdf_mac_addr peer_addr) 5509 { 5510 struct dp_peer *peer; 5511 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 5512 5513 peer = dp_peer_find_hash_find(soc, peer_addr.bytes, 5514 0, DP_VDEV_ALL, DP_MOD_ID_CDP); 5515 if (!peer || !peer->valid) 5516 return QDF_STATUS_E_FAULT; 5517 5518 dp_clear_peer_internal(soc, peer); 5519 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 5520 return QDF_STATUS_SUCCESS; 5521 } 5522 5523 QDF_STATUS dp_get_vdevid(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac, 5524 uint8_t *vdev_id) 5525 { 5526 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 5527 struct dp_peer *peer = 5528 dp_peer_find_hash_find(soc, peer_mac, 0, DP_VDEV_ALL, 5529 DP_MOD_ID_CDP); 5530 5531 if (!peer) 5532 return QDF_STATUS_E_FAILURE; 5533 5534 dp_info("peer %pK vdev %pK vdev id %d", 5535 peer, peer->vdev, peer->vdev->vdev_id); 5536 *vdev_id = peer->vdev->vdev_id; 5537 /* ref_cnt is incremented inside dp_peer_find_hash_find(). 5538 * Decrement it here. 5539 */ 5540 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 5541 5542 return QDF_STATUS_SUCCESS; 5543 } 5544 5545 struct cdp_vdev * 5546 dp_get_vdev_by_peer_addr(struct cdp_pdev *pdev_handle, 5547 struct qdf_mac_addr peer_addr) 5548 { 5549 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; 5550 struct dp_peer *peer = NULL; 5551 struct cdp_vdev *vdev = NULL; 5552 5553 if (!pdev) { 5554 dp_peer_info("PDEV not found for peer_addr: " QDF_MAC_ADDR_FMT, 5555 QDF_MAC_ADDR_REF(peer_addr.bytes)); 5556 return NULL; 5557 } 5558 5559 peer = dp_peer_find_hash_find(pdev->soc, peer_addr.bytes, 0, 5560 DP_VDEV_ALL, DP_MOD_ID_CDP); 5561 if (!peer) { 5562 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH, 5563 "PDEV not found for peer_addr: "QDF_MAC_ADDR_FMT, 5564 QDF_MAC_ADDR_REF(peer_addr.bytes)); 5565 return NULL; 5566 } 5567 5568 vdev = (struct cdp_vdev *)peer->vdev; 5569 5570 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 5571 return vdev; 5572 } 5573 5574 /** 5575 * dp_get_vdev_for_peer() - Get virtual interface instance which peer belongs 5576 * @peer - peer instance 5577 * 5578 * Get virtual interface instance which peer belongs 5579 * 5580 * Return: virtual interface instance pointer 5581 * NULL in case cannot find 5582 */ 5583 struct cdp_vdev *dp_get_vdev_for_peer(void *peer_handle) 5584 { 5585 struct dp_peer *peer = peer_handle; 5586 5587 DP_TRACE(DEBUG, "peer %pK vdev %pK", peer, peer->vdev); 5588 return (struct cdp_vdev *)peer->vdev; 5589 } 5590 5591 /** 5592 * dp_peer_get_peer_mac_addr() - Get peer mac address 5593 * @peer - peer instance 5594 * 5595 * Get peer mac address 5596 * 5597 * Return: peer mac address pointer 5598 * NULL in case cannot find 5599 */ 5600 uint8_t *dp_peer_get_peer_mac_addr(void *peer_handle) 5601 { 5602 struct dp_peer *peer = peer_handle; 5603 uint8_t *mac; 5604 5605 mac = peer->mac_addr.raw; 5606 dp_info("peer %pK mac 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x", 5607 peer, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); 5608 return peer->mac_addr.raw; 5609 } 5610 5611 int dp_get_peer_state(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 5612 uint8_t *peer_mac) 5613 { 5614 enum ol_txrx_peer_state peer_state; 5615 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 5616 struct cdp_peer_info peer_info = { 0 }; 5617 struct dp_peer *peer; 5618 struct dp_peer *tgt_peer; 5619 5620 DP_PEER_INFO_PARAMS_INIT(&peer_info, vdev_id, peer_mac, 5621 false, CDP_WILD_PEER_TYPE); 5622 5623 peer = dp_peer_hash_find_wrapper(soc, &peer_info, DP_MOD_ID_CDP); 5624 5625 if (!peer) 5626 return OL_TXRX_PEER_STATE_INVALID; 5627 5628 DP_TRACE(DEBUG, "peer %pK stats %d", peer, peer->state); 5629 5630 tgt_peer = dp_get_tgt_peer_from_peer(peer); 5631 peer_state = tgt_peer->state; 5632 5633 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 5634 5635 return peer_state; 5636 } 5637 5638 /** 5639 * dp_local_peer_id_pool_init() - local peer id pool alloc for physical device 5640 * @pdev - data path device instance 5641 * 5642 * local peer id pool alloc for physical device 5643 * 5644 * Return: none 5645 */ 5646 void dp_local_peer_id_pool_init(struct dp_pdev *pdev) 5647 { 5648 int i; 5649 5650 /* point the freelist to the first ID */ 5651 pdev->local_peer_ids.freelist = 0; 5652 5653 /* link each ID to the next one */ 5654 for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) { 5655 pdev->local_peer_ids.pool[i] = i + 1; 5656 pdev->local_peer_ids.map[i] = NULL; 5657 } 5658 5659 /* link the last ID to itself, to mark the end of the list */ 5660 i = OL_TXRX_NUM_LOCAL_PEER_IDS; 5661 pdev->local_peer_ids.pool[i] = i; 5662 5663 qdf_spinlock_create(&pdev->local_peer_ids.lock); 5664 DP_TRACE(INFO, "Peer pool init"); 5665 } 5666 5667 /** 5668 * dp_local_peer_id_alloc() - allocate local peer id 5669 * @pdev - data path device instance 5670 * @peer - new peer instance 5671 * 5672 * allocate local peer id 5673 * 5674 * Return: none 5675 */ 5676 void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer) 5677 { 5678 int i; 5679 5680 qdf_spin_lock_bh(&pdev->local_peer_ids.lock); 5681 i = pdev->local_peer_ids.freelist; 5682 if (pdev->local_peer_ids.pool[i] == i) { 5683 /* the list is empty, except for the list-end marker */ 5684 peer->local_id = OL_TXRX_INVALID_LOCAL_PEER_ID; 5685 } else { 5686 /* take the head ID and advance the freelist */ 5687 peer->local_id = i; 5688 pdev->local_peer_ids.freelist = pdev->local_peer_ids.pool[i]; 5689 pdev->local_peer_ids.map[i] = peer; 5690 } 5691 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock); 5692 dp_info("peer %pK, local id %d", peer, peer->local_id); 5693 } 5694 5695 /** 5696 * dp_local_peer_id_free() - remove local peer id 5697 * @pdev - data path device instance 5698 * @peer - peer instance should be removed 5699 * 5700 * remove local peer id 5701 * 5702 * Return: none 5703 */ 5704 void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer) 5705 { 5706 int i = peer->local_id; 5707 if ((i == OL_TXRX_INVALID_LOCAL_PEER_ID) || 5708 (i >= OL_TXRX_NUM_LOCAL_PEER_IDS)) { 5709 return; 5710 } 5711 5712 /* put this ID on the head of the freelist */ 5713 qdf_spin_lock_bh(&pdev->local_peer_ids.lock); 5714 pdev->local_peer_ids.pool[i] = pdev->local_peer_ids.freelist; 5715 pdev->local_peer_ids.freelist = i; 5716 pdev->local_peer_ids.map[i] = NULL; 5717 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock); 5718 } 5719 5720 bool dp_find_peer_exist_on_vdev(struct cdp_soc_t *soc_hdl, 5721 uint8_t vdev_id, uint8_t *peer_addr) 5722 { 5723 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 5724 struct dp_peer *peer = NULL; 5725 5726 peer = dp_peer_find_hash_find(soc, peer_addr, 0, vdev_id, 5727 DP_MOD_ID_CDP); 5728 if (!peer) 5729 return false; 5730 5731 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 5732 5733 return true; 5734 } 5735 5736 bool dp_find_peer_exist_on_other_vdev(struct cdp_soc_t *soc_hdl, 5737 uint8_t vdev_id, uint8_t *peer_addr, 5738 uint16_t max_bssid) 5739 { 5740 int i; 5741 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 5742 struct dp_peer *peer = NULL; 5743 5744 for (i = 0; i < max_bssid; i++) { 5745 /* Need to check vdevs other than the vdev_id */ 5746 if (vdev_id == i) 5747 continue; 5748 peer = dp_peer_find_hash_find(soc, peer_addr, 0, i, 5749 DP_MOD_ID_CDP); 5750 if (peer) { 5751 dp_err("Duplicate peer "QDF_MAC_ADDR_FMT" already exist on vdev %d", 5752 QDF_MAC_ADDR_REF(peer_addr), i); 5753 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 5754 return true; 5755 } 5756 } 5757 5758 return false; 5759 } 5760 5761 bool dp_find_peer_exist(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 5762 uint8_t *peer_addr) 5763 { 5764 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 5765 struct dp_peer *peer = NULL; 5766 5767 peer = dp_peer_find_hash_find(soc, peer_addr, 0, DP_VDEV_ALL, 5768 DP_MOD_ID_CDP); 5769 if (peer) { 5770 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 5771 return true; 5772 } 5773 5774 return false; 5775 } 5776 5777 void dp_set_peer_as_tdls_peer(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 5778 uint8_t *peer_mac, bool val) 5779 { 5780 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 5781 struct dp_peer *peer = NULL; 5782 5783 peer = dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id, 5784 DP_MOD_ID_CDP); 5785 if (!peer) { 5786 dp_err("Failed to find peer for:" QDF_MAC_ADDR_FMT, 5787 QDF_MAC_ADDR_REF(peer_mac)); 5788 return; 5789 } 5790 5791 dp_info("Set tdls flag %d for peer:" QDF_MAC_ADDR_FMT, 5792 val, QDF_MAC_ADDR_REF(peer_mac)); 5793 peer->is_tdls_peer = val; 5794 5795 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 5796 } 5797 #endif 5798 5799 #ifdef IPA_OFFLOAD 5800 int dp_peer_get_rxtid_stats_ipa(struct dp_peer *peer, 5801 dp_rxtid_stats_cmd_cb dp_stats_cmd_cb) 5802 { 5803 struct dp_soc *soc = peer->vdev->pdev->soc; 5804 struct hal_reo_cmd_params params; 5805 int i; 5806 int stats_cmd_sent_cnt = 0; 5807 QDF_STATUS status; 5808 uint16_t peer_id = peer->peer_id; 5809 unsigned long comb_peer_id_tid; 5810 5811 if (!dp_stats_cmd_cb) 5812 return stats_cmd_sent_cnt; 5813 5814 qdf_mem_zero(¶ms, sizeof(params)); 5815 for (i = 0; i < DP_MAX_TIDS; i++) { 5816 struct dp_rx_tid *rx_tid = &peer->rx_tid[i]; 5817 5818 if (rx_tid->hw_qdesc_vaddr_unaligned) { 5819 params.std.need_status = 1; 5820 params.std.addr_lo = 5821 rx_tid->hw_qdesc_paddr & 0xffffffff; 5822 params.std.addr_hi = 5823 (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32; 5824 params.u.stats_params.clear = 1; 5825 comb_peer_id_tid = ((i << DP_PEER_REO_STATS_TID_SHIFT) 5826 | peer_id); 5827 status = dp_reo_send_cmd(soc, CMD_GET_QUEUE_STATS, 5828 ¶ms, dp_stats_cmd_cb, 5829 (void *)comb_peer_id_tid); 5830 if (QDF_IS_STATUS_SUCCESS(status)) 5831 stats_cmd_sent_cnt++; 5832 5833 /* Flush REO descriptor from HW cache to update stats 5834 * in descriptor memory. This is to help debugging 5835 */ 5836 qdf_mem_zero(¶ms, sizeof(params)); 5837 params.std.need_status = 0; 5838 params.std.addr_lo = 5839 rx_tid->hw_qdesc_paddr & 0xffffffff; 5840 params.std.addr_hi = 5841 (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32; 5842 params.u.fl_cache_params.flush_no_inval = 1; 5843 dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, ¶ms, NULL, 5844 NULL); 5845 } 5846 } 5847 5848 return stats_cmd_sent_cnt; 5849 } 5850 5851 qdf_export_symbol(dp_peer_get_rxtid_stats_ipa); 5852 5853 #endif 5854 /** 5855 * dp_peer_rxtid_stats: Retried Rx TID (REO queue) stats from HW 5856 * @peer: DP peer handle 5857 * @dp_stats_cmd_cb: REO command callback function 5858 * @cb_ctxt: Callback context 5859 * 5860 * Return: count of tid stats cmd send succeeded 5861 */ 5862 int dp_peer_rxtid_stats(struct dp_peer *peer, 5863 dp_rxtid_stats_cmd_cb dp_stats_cmd_cb, 5864 void *cb_ctxt) 5865 { 5866 struct dp_soc *soc = peer->vdev->pdev->soc; 5867 struct hal_reo_cmd_params params; 5868 int i; 5869 int stats_cmd_sent_cnt = 0; 5870 QDF_STATUS status; 5871 5872 if (!dp_stats_cmd_cb) 5873 return stats_cmd_sent_cnt; 5874 5875 qdf_mem_zero(¶ms, sizeof(params)); 5876 for (i = 0; i < DP_MAX_TIDS; i++) { 5877 struct dp_rx_tid *rx_tid = &peer->rx_tid[i]; 5878 if (rx_tid->hw_qdesc_vaddr_unaligned) { 5879 params.std.need_status = 1; 5880 params.std.addr_lo = 5881 rx_tid->hw_qdesc_paddr & 0xffffffff; 5882 params.std.addr_hi = 5883 (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32; 5884 5885 if (cb_ctxt) { 5886 status = dp_reo_send_cmd( 5887 soc, CMD_GET_QUEUE_STATS, 5888 ¶ms, dp_stats_cmd_cb, 5889 cb_ctxt); 5890 } else { 5891 status = dp_reo_send_cmd( 5892 soc, CMD_GET_QUEUE_STATS, 5893 ¶ms, dp_stats_cmd_cb, 5894 rx_tid); 5895 } 5896 5897 if (QDF_IS_STATUS_SUCCESS(status)) 5898 stats_cmd_sent_cnt++; 5899 5900 /* Flush REO descriptor from HW cache to update stats 5901 * in descriptor memory. This is to help debugging */ 5902 qdf_mem_zero(¶ms, sizeof(params)); 5903 params.std.need_status = 0; 5904 params.std.addr_lo = 5905 rx_tid->hw_qdesc_paddr & 0xffffffff; 5906 params.std.addr_hi = 5907 (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32; 5908 params.u.fl_cache_params.flush_no_inval = 1; 5909 dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, ¶ms, NULL, 5910 NULL); 5911 } 5912 } 5913 5914 return stats_cmd_sent_cnt; 5915 } 5916 5917 QDF_STATUS 5918 dp_set_michael_key(struct cdp_soc_t *soc, 5919 uint8_t vdev_id, 5920 uint8_t *peer_mac, 5921 bool is_unicast, uint32_t *key) 5922 { 5923 uint8_t sec_index = is_unicast ? 1 : 0; 5924 struct dp_peer *peer = 5925 dp_peer_get_tgt_peer_hash_find((struct dp_soc *)soc, 5926 peer_mac, 0, vdev_id, 5927 DP_MOD_ID_CDP); 5928 5929 if (!peer) { 5930 dp_peer_err("%pK: peer not found ", soc); 5931 return QDF_STATUS_E_FAILURE; 5932 } 5933 5934 qdf_mem_copy(&peer->txrx_peer->security[sec_index].michael_key[0], 5935 key, IEEE80211_WEP_MICLEN); 5936 5937 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 5938 5939 return QDF_STATUS_SUCCESS; 5940 } 5941 5942 5943 /** 5944 * dp_vdev_bss_peer_ref_n_get: Get bss peer of a vdev 5945 * @soc: DP soc 5946 * @vdev: vdev 5947 * @mod_id: id of module requesting reference 5948 * 5949 * Return: VDEV BSS peer 5950 */ 5951 struct dp_peer *dp_vdev_bss_peer_ref_n_get(struct dp_soc *soc, 5952 struct dp_vdev *vdev, 5953 enum dp_mod_id mod_id) 5954 { 5955 struct dp_peer *peer = NULL; 5956 5957 qdf_spin_lock_bh(&vdev->peer_list_lock); 5958 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) { 5959 if (peer->bss_peer) 5960 break; 5961 } 5962 5963 if (!peer) { 5964 qdf_spin_unlock_bh(&vdev->peer_list_lock); 5965 return NULL; 5966 } 5967 5968 if (dp_peer_get_ref(soc, peer, mod_id) == QDF_STATUS_SUCCESS) { 5969 qdf_spin_unlock_bh(&vdev->peer_list_lock); 5970 return peer; 5971 } 5972 5973 qdf_spin_unlock_bh(&vdev->peer_list_lock); 5974 return peer; 5975 } 5976 5977 /** 5978 * dp_sta_vdev_self_peer_ref_n_get: Get self peer of sta vdev 5979 * @soc: DP soc 5980 * @vdev: vdev 5981 * @mod_id: id of module requesting reference 5982 * 5983 * Return: VDEV self peer 5984 */ 5985 struct dp_peer *dp_sta_vdev_self_peer_ref_n_get(struct dp_soc *soc, 5986 struct dp_vdev *vdev, 5987 enum dp_mod_id mod_id) 5988 { 5989 struct dp_peer *peer; 5990 5991 if (vdev->opmode != wlan_op_mode_sta) 5992 return NULL; 5993 5994 qdf_spin_lock_bh(&vdev->peer_list_lock); 5995 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) { 5996 if (peer->sta_self_peer) 5997 break; 5998 } 5999 6000 if (!peer) { 6001 qdf_spin_unlock_bh(&vdev->peer_list_lock); 6002 return NULL; 6003 } 6004 6005 if (dp_peer_get_ref(soc, peer, mod_id) == QDF_STATUS_SUCCESS) { 6006 qdf_spin_unlock_bh(&vdev->peer_list_lock); 6007 return peer; 6008 } 6009 6010 qdf_spin_unlock_bh(&vdev->peer_list_lock); 6011 return peer; 6012 } 6013 6014 #ifdef DUMP_REO_QUEUE_INFO_IN_DDR 6015 void dp_dump_rx_reo_queue_info( 6016 struct dp_soc *soc, void *cb_ctxt, union hal_reo_status *reo_status) 6017 { 6018 struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt; 6019 6020 if (!rx_tid) 6021 return; 6022 6023 if (reo_status->fl_cache_status.header.status != 6024 HAL_REO_CMD_SUCCESS) { 6025 dp_err_rl("Rx tid REO HW desc flush failed(%d)", 6026 reo_status->rx_queue_status.header.status); 6027 return; 6028 } 6029 qdf_spin_lock_bh(&rx_tid->tid_lock); 6030 hal_dump_rx_reo_queue_desc(rx_tid->hw_qdesc_vaddr_aligned); 6031 qdf_spin_unlock_bh(&rx_tid->tid_lock); 6032 } 6033 6034 void dp_send_cache_flush_for_rx_tid( 6035 struct dp_soc *soc, struct dp_peer *peer) 6036 { 6037 int i; 6038 struct dp_rx_tid *rx_tid; 6039 struct hal_reo_cmd_params params; 6040 6041 if (!peer) { 6042 dp_err_rl("Peer is NULL"); 6043 return; 6044 } 6045 6046 for (i = 0; i < DP_MAX_TIDS; i++) { 6047 rx_tid = &peer->rx_tid[i]; 6048 if (!rx_tid) 6049 continue; 6050 qdf_spin_lock_bh(&rx_tid->tid_lock); 6051 if (rx_tid->hw_qdesc_vaddr_aligned) { 6052 qdf_mem_zero(¶ms, sizeof(params)); 6053 params.std.need_status = 1; 6054 params.std.addr_lo = 6055 rx_tid->hw_qdesc_paddr & 0xffffffff; 6056 params.std.addr_hi = 6057 (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32; 6058 params.u.fl_cache_params.flush_no_inval = 0; 6059 if (QDF_STATUS_SUCCESS != 6060 dp_reo_send_cmd( 6061 soc, CMD_FLUSH_CACHE, 6062 ¶ms, dp_dump_rx_reo_queue_info, 6063 (void *)rx_tid)) { 6064 dp_err_rl("cache flush send failed tid %d", 6065 rx_tid->tid); 6066 qdf_spin_unlock_bh(&rx_tid->tid_lock); 6067 break; 6068 } 6069 } 6070 qdf_spin_unlock_bh(&rx_tid->tid_lock); 6071 } 6072 } 6073 6074 void dp_get_rx_reo_queue_info( 6075 struct cdp_soc_t *soc_hdl, uint8_t vdev_id) 6076 { 6077 struct dp_soc *soc = (struct dp_soc *)soc_hdl; 6078 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, 6079 DP_MOD_ID_GENERIC_STATS); 6080 struct dp_peer *peer = NULL; 6081 6082 if (!vdev) { 6083 dp_err_rl("vdev is null for vdev_id: %u", vdev_id); 6084 goto failed; 6085 } 6086 6087 peer = dp_vdev_bss_peer_ref_n_get(soc, vdev, DP_MOD_ID_GENERIC_STATS); 6088 6089 if (!peer) { 6090 dp_err_rl("Peer is NULL"); 6091 goto failed; 6092 } 6093 dp_send_cache_flush_for_rx_tid(soc, peer); 6094 failed: 6095 if (peer) 6096 dp_peer_unref_delete(peer, DP_MOD_ID_GENERIC_STATS); 6097 if (vdev) 6098 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_GENERIC_STATS); 6099 } 6100 #endif /* DUMP_REO_QUEUE_INFO_IN_DDR */ 6101 6102 void dp_peer_flush_frags(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 6103 uint8_t *peer_mac) 6104 { 6105 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); 6106 struct dp_peer *peer = dp_peer_get_tgt_peer_hash_find(soc, peer_mac, 0, 6107 vdev_id, 6108 DP_MOD_ID_CDP); 6109 struct dp_txrx_peer *txrx_peer; 6110 uint8_t tid; 6111 struct dp_rx_tid_defrag *defrag_rx_tid; 6112 6113 if (!peer) 6114 return; 6115 6116 if (!peer->txrx_peer) 6117 goto fail; 6118 6119 dp_info("Flushing fragments for peer " QDF_MAC_ADDR_FMT, 6120 QDF_MAC_ADDR_REF(peer->mac_addr.raw)); 6121 6122 txrx_peer = peer->txrx_peer; 6123 6124 for (tid = 0; tid < DP_MAX_TIDS; tid++) { 6125 defrag_rx_tid = &txrx_peer->rx_tid[tid]; 6126 6127 qdf_spin_lock_bh(&defrag_rx_tid->defrag_tid_lock); 6128 dp_rx_defrag_waitlist_remove(txrx_peer, tid); 6129 dp_rx_reorder_flush_frag(txrx_peer, tid); 6130 qdf_spin_unlock_bh(&defrag_rx_tid->defrag_tid_lock); 6131 } 6132 fail: 6133 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 6134 } 6135 6136 /* 6137 * dp_peer_find_by_id_valid - check if peer exists for given id 6138 * @soc: core DP soc context 6139 * @peer_id: peer id from peer object can be retrieved 6140 * 6141 * Return: true if peer exists of false otherwise 6142 */ 6143 bool dp_peer_find_by_id_valid(struct dp_soc *soc, uint16_t peer_id) 6144 { 6145 struct dp_peer *peer = dp_peer_get_ref_by_id(soc, peer_id, 6146 DP_MOD_ID_HTT); 6147 6148 if (peer) { 6149 /* 6150 * Decrement the peer ref which is taken as part of 6151 * dp_peer_get_ref_by_id if PEER_LOCK_REF_PROTECT is enabled 6152 */ 6153 dp_peer_unref_delete(peer, DP_MOD_ID_HTT); 6154 6155 return true; 6156 } 6157 6158 return false; 6159 } 6160 6161 qdf_export_symbol(dp_peer_find_by_id_valid); 6162