1 /* 2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 #ifndef _DP_PEER_H_ 20 #define _DP_PEER_H_ 21 22 #include <qdf_types.h> 23 #include <qdf_lock.h> 24 #include "dp_types.h" 25 #include "dp_internal.h" 26 27 #ifdef DUMP_REO_QUEUE_INFO_IN_DDR 28 #include "hal_reo.h" 29 #endif 30 31 #define DP_INVALID_PEER_ID 0xffff 32 33 #define DP_PEER_MAX_MEC_IDX 1024 /* maximum index for MEC table */ 34 #define DP_PEER_MAX_MEC_ENTRY 4096 /* maximum MEC entries in MEC table */ 35 36 #define DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC 5000 37 38 #define DP_PEER_HASH_LOAD_MULT 2 39 #define DP_PEER_HASH_LOAD_SHIFT 0 40 41 /* Threshold for peer's cached buf queue beyond which frames are dropped */ 42 #define DP_RX_CACHED_BUFQ_THRESH 64 43 44 #define dp_peer_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_PEER, params) 45 #define dp_peer_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_PEER, params) 46 #define dp_peer_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_PEER, params) 47 #define dp_peer_info(params...) \ 48 __QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_PEER, ## params) 49 #define dp_peer_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_PEER, params) 50 51 #if defined(WLAN_FEATURE_11BE_MLO) && defined(DP_MLO_LINK_STATS_SUPPORT) 52 /** 53 * enum dp_bands - WiFi Band 54 * 55 * @DP_BAND_INVALID: Invalid band 56 * @DP_BAND_2GHZ: 2GHz link 57 * @DP_BAND_5GHZ: 5GHz link 58 * @DP_BAND_6GHZ: 6GHz link 59 * @DP_BAND_UNKNOWN: Unknown band 60 */ 61 enum dp_bands { 62 DP_BAND_INVALID = 0, 63 DP_BAND_2GHZ = 1, 64 DP_BAND_5GHZ = 2, 65 DP_BAND_6GHZ = 3, 66 DP_BAND_UNKNOWN = 4, 67 }; 68 #endif 69 70 void check_free_list_for_invalid_flush(struct dp_soc *soc); 71 72 static inline 73 void add_entry_alloc_list(struct dp_soc *soc, struct dp_rx_tid *rx_tid, 74 struct dp_peer *peer, void *hw_qdesc_vaddr) 75 { 76 uint32_t max_list_size; 77 unsigned long curr_ts = qdf_get_system_timestamp(); 78 uint32_t qref_index = soc->free_addr_list_idx; 79 80 max_list_size = soc->wlan_cfg_ctx->qref_control_size; 81 82 if (max_list_size == 0) 83 return; 84 85 soc->list_qdesc_addr_alloc[qref_index].hw_qdesc_paddr = 86 rx_tid->hw_qdesc_paddr; 87 soc->list_qdesc_addr_alloc[qref_index].ts_qdesc_mem_hdl = curr_ts; 88 soc->list_qdesc_addr_alloc[qref_index].hw_qdesc_vaddr_align = 89 hw_qdesc_vaddr; 90 soc->list_qdesc_addr_alloc[qref_index].hw_qdesc_vaddr_unalign = 91 rx_tid->hw_qdesc_vaddr_unaligned; 92 soc->list_qdesc_addr_alloc[qref_index].peer_id = peer->peer_id; 93 soc->list_qdesc_addr_alloc[qref_index].tid = rx_tid->tid; 94 soc->alloc_addr_list_idx++; 95 96 if (soc->alloc_addr_list_idx == max_list_size) 97 soc->alloc_addr_list_idx = 0; 98 } 99 100 static inline 101 void add_entry_free_list(struct dp_soc *soc, struct dp_rx_tid *rx_tid) 102 { 103 uint32_t max_list_size; 104 unsigned long curr_ts = qdf_get_system_timestamp(); 105 uint32_t qref_index = soc->free_addr_list_idx; 106 107 max_list_size = soc->wlan_cfg_ctx->qref_control_size; 108 109 if (max_list_size == 0) 110 return; 111 112 soc->list_qdesc_addr_free[qref_index].ts_qdesc_mem_hdl = curr_ts; 113 soc->list_qdesc_addr_free[qref_index].hw_qdesc_paddr = 114 rx_tid->hw_qdesc_paddr; 115 soc->list_qdesc_addr_free[qref_index].hw_qdesc_vaddr_align = 116 rx_tid->hw_qdesc_vaddr_aligned; 117 soc->list_qdesc_addr_free[qref_index].hw_qdesc_vaddr_unalign = 118 rx_tid->hw_qdesc_vaddr_unaligned; 119 soc->free_addr_list_idx++; 120 121 if (soc->free_addr_list_idx == max_list_size) 122 soc->free_addr_list_idx = 0; 123 } 124 125 static inline 126 void add_entry_write_list(struct dp_soc *soc, struct dp_peer *peer, 127 uint32_t tid) 128 { 129 uint32_t max_list_size; 130 unsigned long curr_ts = qdf_get_system_timestamp(); 131 132 max_list_size = soc->wlan_cfg_ctx->qref_control_size; 133 134 if (max_list_size == 0) 135 return; 136 137 soc->reo_write_list[soc->write_paddr_list_idx].ts_qaddr_del = curr_ts; 138 soc->reo_write_list[soc->write_paddr_list_idx].peer_id = peer->peer_id; 139 soc->reo_write_list[soc->write_paddr_list_idx].paddr = 140 peer->rx_tid[tid].hw_qdesc_paddr; 141 soc->reo_write_list[soc->write_paddr_list_idx].tid = tid; 142 soc->write_paddr_list_idx++; 143 144 if (soc->write_paddr_list_idx == max_list_size) 145 soc->write_paddr_list_idx = 0; 146 } 147 148 #ifdef REO_QDESC_HISTORY 149 enum reo_qdesc_event_type { 150 REO_QDESC_UPDATE_CB = 0, 151 REO_QDESC_FREE, 152 }; 153 154 struct reo_qdesc_event { 155 qdf_dma_addr_t qdesc_addr; 156 uint64_t ts; 157 enum reo_qdesc_event_type type; 158 uint8_t peer_mac[QDF_MAC_ADDR_SIZE]; 159 }; 160 #endif 161 162 struct ast_del_ctxt { 163 bool age; 164 int del_count; 165 }; 166 167 #ifdef QCA_SUPPORT_WDS_EXTENDED 168 /** 169 * dp_peer_is_wds_ext_peer() - peer is WDS_EXT peer 170 * 171 * @peer: DP peer context 172 * 173 * This API checks whether the peer is WDS_EXT peer or not 174 * 175 * Return: true in the wds_ext peer else flase 176 */ 177 static inline bool dp_peer_is_wds_ext_peer(struct dp_txrx_peer *peer) 178 { 179 return qdf_atomic_test_bit(WDS_EXT_PEER_INIT_BIT, &peer->wds_ext.init); 180 } 181 #else 182 static inline bool dp_peer_is_wds_ext_peer(struct dp_txrx_peer *peer) 183 { 184 return false; 185 } 186 #endif 187 188 typedef void dp_peer_iter_func(struct dp_soc *soc, struct dp_peer *peer, 189 void *arg); 190 /** 191 * dp_peer_unref_delete() - unref and delete peer 192 * @peer: Datapath peer handle 193 * @id: ID of module releasing reference 194 * 195 */ 196 void dp_peer_unref_delete(struct dp_peer *peer, enum dp_mod_id id); 197 198 /** 199 * dp_txrx_peer_unref_delete() - unref and delete peer 200 * @handle: Datapath txrx ref handle 201 * @id: Module ID of the caller 202 * 203 */ 204 void dp_txrx_peer_unref_delete(dp_txrx_ref_handle handle, enum dp_mod_id id); 205 206 /** 207 * dp_peer_find_hash_find() - returns legacy or mlo link peer from 208 * peer_hash_table matching vdev_id and mac_address 209 * @soc: soc handle 210 * @peer_mac_addr: peer mac address 211 * @mac_addr_is_aligned: is mac addr aligned 212 * @vdev_id: vdev_id 213 * @mod_id: id of module requesting reference 214 * 215 * return: peer in success 216 * NULL in failure 217 */ 218 struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc, 219 uint8_t *peer_mac_addr, 220 int mac_addr_is_aligned, 221 uint8_t vdev_id, 222 enum dp_mod_id mod_id); 223 224 /** 225 * dp_peer_find_by_id_valid - check if peer exists for given id 226 * @soc: core DP soc context 227 * @peer_id: peer id from peer object can be retrieved 228 * 229 * Return: true if peer exists of false otherwise 230 */ 231 bool dp_peer_find_by_id_valid(struct dp_soc *soc, uint16_t peer_id); 232 233 /** 234 * dp_peer_get_ref() - Returns peer object given the peer id 235 * 236 * @soc: core DP soc context 237 * @peer: DP peer 238 * @mod_id: id of module requesting the reference 239 * 240 * Return: QDF_STATUS_SUCCESS if reference held successfully 241 * else QDF_STATUS_E_INVAL 242 */ 243 static inline 244 QDF_STATUS dp_peer_get_ref(struct dp_soc *soc, 245 struct dp_peer *peer, 246 enum dp_mod_id mod_id) 247 { 248 if (!qdf_atomic_inc_not_zero(&peer->ref_cnt)) 249 return QDF_STATUS_E_INVAL; 250 251 if (mod_id > DP_MOD_ID_RX) 252 qdf_atomic_inc(&peer->mod_refs[mod_id]); 253 254 return QDF_STATUS_SUCCESS; 255 } 256 257 /** 258 * __dp_peer_get_ref_by_id() - Returns peer object given the peer id 259 * 260 * @soc: core DP soc context 261 * @peer_id: peer id from peer object can be retrieved 262 * @mod_id: module id 263 * 264 * Return: struct dp_peer*: Pointer to DP peer object 265 */ 266 static inline struct dp_peer * 267 __dp_peer_get_ref_by_id(struct dp_soc *soc, 268 uint16_t peer_id, 269 enum dp_mod_id mod_id) 270 271 { 272 struct dp_peer *peer; 273 274 qdf_spin_lock_bh(&soc->peer_map_lock); 275 peer = (peer_id >= soc->max_peer_id) ? NULL : 276 soc->peer_id_to_obj_map[peer_id]; 277 if (!peer || 278 (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) { 279 qdf_spin_unlock_bh(&soc->peer_map_lock); 280 return NULL; 281 } 282 283 qdf_spin_unlock_bh(&soc->peer_map_lock); 284 return peer; 285 } 286 287 /** 288 * dp_peer_get_ref_by_id() - Returns peer object given the peer id 289 * if peer state is active 290 * 291 * @soc: core DP soc context 292 * @peer_id: peer id from peer object can be retrieved 293 * @mod_id: ID of module requesting reference 294 * 295 * Return: struct dp_peer*: Pointer to DP peer object 296 */ 297 static inline 298 struct dp_peer *dp_peer_get_ref_by_id(struct dp_soc *soc, 299 uint16_t peer_id, 300 enum dp_mod_id mod_id) 301 { 302 struct dp_peer *peer; 303 304 qdf_spin_lock_bh(&soc->peer_map_lock); 305 peer = (peer_id >= soc->max_peer_id) ? NULL : 306 soc->peer_id_to_obj_map[peer_id]; 307 308 if (!peer || peer->peer_state >= DP_PEER_STATE_LOGICAL_DELETE || 309 (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) { 310 qdf_spin_unlock_bh(&soc->peer_map_lock); 311 return NULL; 312 } 313 314 qdf_spin_unlock_bh(&soc->peer_map_lock); 315 316 return peer; 317 } 318 319 /** 320 * dp_txrx_peer_get_ref_by_id() - Returns txrx peer object given the peer id 321 * 322 * @soc: core DP soc context 323 * @peer_id: peer id from peer object can be retrieved 324 * @handle: reference handle 325 * @mod_id: ID of module requesting reference 326 * 327 * Return: struct dp_txrx_peer*: Pointer to txrx DP peer object 328 */ 329 static inline struct dp_txrx_peer * 330 dp_txrx_peer_get_ref_by_id(struct dp_soc *soc, 331 uint16_t peer_id, 332 dp_txrx_ref_handle *handle, 333 enum dp_mod_id mod_id) 334 335 { 336 struct dp_peer *peer; 337 338 peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id); 339 if (!peer) 340 return NULL; 341 342 if (!peer->txrx_peer) { 343 dp_peer_unref_delete(peer, mod_id); 344 return NULL; 345 } 346 347 *handle = (dp_txrx_ref_handle)peer; 348 return peer->txrx_peer; 349 } 350 351 #ifdef PEER_CACHE_RX_PKTS 352 /** 353 * dp_rx_flush_rx_cached() - flush cached rx frames 354 * @peer: peer 355 * @drop: set flag to drop frames 356 * 357 * Return: None 358 */ 359 void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop); 360 #else 361 static inline void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop) 362 { 363 } 364 #endif 365 366 static inline void 367 dp_clear_peer_internal(struct dp_soc *soc, struct dp_peer *peer) 368 { 369 qdf_spin_lock_bh(&peer->peer_info_lock); 370 peer->state = OL_TXRX_PEER_STATE_DISC; 371 qdf_spin_unlock_bh(&peer->peer_info_lock); 372 373 dp_rx_flush_rx_cached(peer, true); 374 } 375 376 /** 377 * dp_vdev_iterate_peer() - API to iterate through vdev peer list 378 * 379 * @vdev: DP vdev context 380 * @func: function to be called for each peer 381 * @arg: argument need to be passed to func 382 * @mod_id: module_id 383 * 384 * Return: void 385 */ 386 static inline void 387 dp_vdev_iterate_peer(struct dp_vdev *vdev, dp_peer_iter_func *func, void *arg, 388 enum dp_mod_id mod_id) 389 { 390 struct dp_peer *peer; 391 struct dp_peer *tmp_peer; 392 struct dp_soc *soc = NULL; 393 394 if (!vdev || !vdev->pdev || !vdev->pdev->soc) 395 return; 396 397 soc = vdev->pdev->soc; 398 399 qdf_spin_lock_bh(&vdev->peer_list_lock); 400 TAILQ_FOREACH_SAFE(peer, &vdev->peer_list, 401 peer_list_elem, 402 tmp_peer) { 403 if (dp_peer_get_ref(soc, peer, mod_id) == 404 QDF_STATUS_SUCCESS) { 405 (*func)(soc, peer, arg); 406 dp_peer_unref_delete(peer, mod_id); 407 } 408 } 409 qdf_spin_unlock_bh(&vdev->peer_list_lock); 410 } 411 412 /** 413 * dp_pdev_iterate_peer() - API to iterate through all peers of pdev 414 * 415 * @pdev: DP pdev context 416 * @func: function to be called for each peer 417 * @arg: argument need to be passed to func 418 * @mod_id: module_id 419 * 420 * Return: void 421 */ 422 static inline void 423 dp_pdev_iterate_peer(struct dp_pdev *pdev, dp_peer_iter_func *func, void *arg, 424 enum dp_mod_id mod_id) 425 { 426 struct dp_vdev *vdev; 427 428 if (!pdev) 429 return; 430 431 qdf_spin_lock_bh(&pdev->vdev_list_lock); 432 DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) 433 dp_vdev_iterate_peer(vdev, func, arg, mod_id); 434 qdf_spin_unlock_bh(&pdev->vdev_list_lock); 435 } 436 437 /** 438 * dp_soc_iterate_peer() - API to iterate through all peers of soc 439 * 440 * @soc: DP soc context 441 * @func: function to be called for each peer 442 * @arg: argument need to be passed to func 443 * @mod_id: module_id 444 * 445 * Return: void 446 */ 447 static inline void 448 dp_soc_iterate_peer(struct dp_soc *soc, dp_peer_iter_func *func, void *arg, 449 enum dp_mod_id mod_id) 450 { 451 struct dp_pdev *pdev; 452 int i; 453 454 if (!soc) 455 return; 456 457 for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) { 458 pdev = soc->pdev_list[i]; 459 dp_pdev_iterate_peer(pdev, func, arg, mod_id); 460 } 461 } 462 463 /** 464 * dp_vdev_iterate_peer_lock_safe() - API to iterate through vdev list 465 * 466 * This API will cache the peers in local allocated memory and calls 467 * iterate function outside the lock. 468 * 469 * As this API is allocating new memory it is suggested to use this 470 * only when lock cannot be held 471 * 472 * @vdev: DP vdev context 473 * @func: function to be called for each peer 474 * @arg: argument need to be passed to func 475 * @mod_id: module_id 476 * 477 * Return: void 478 */ 479 static inline void 480 dp_vdev_iterate_peer_lock_safe(struct dp_vdev *vdev, 481 dp_peer_iter_func *func, 482 void *arg, 483 enum dp_mod_id mod_id) 484 { 485 struct dp_peer *peer; 486 struct dp_peer *tmp_peer; 487 struct dp_soc *soc = NULL; 488 struct dp_peer **peer_array = NULL; 489 int i = 0; 490 uint32_t num_peers = 0; 491 492 if (!vdev || !vdev->pdev || !vdev->pdev->soc) 493 return; 494 495 num_peers = vdev->num_peers; 496 497 soc = vdev->pdev->soc; 498 499 peer_array = qdf_mem_malloc(num_peers * sizeof(struct dp_peer *)); 500 if (!peer_array) 501 return; 502 503 qdf_spin_lock_bh(&vdev->peer_list_lock); 504 TAILQ_FOREACH_SAFE(peer, &vdev->peer_list, 505 peer_list_elem, 506 tmp_peer) { 507 if (i >= num_peers) 508 break; 509 510 if (dp_peer_get_ref(soc, peer, mod_id) == QDF_STATUS_SUCCESS) { 511 peer_array[i] = peer; 512 i = (i + 1); 513 } 514 } 515 qdf_spin_unlock_bh(&vdev->peer_list_lock); 516 517 for (i = 0; i < num_peers; i++) { 518 peer = peer_array[i]; 519 520 if (!peer) 521 continue; 522 523 (*func)(soc, peer, arg); 524 dp_peer_unref_delete(peer, mod_id); 525 } 526 527 qdf_mem_free(peer_array); 528 } 529 530 /** 531 * dp_pdev_iterate_peer_lock_safe() - API to iterate through all peers of pdev 532 * 533 * This API will cache the peers in local allocated memory and calls 534 * iterate function outside the lock. 535 * 536 * As this API is allocating new memory it is suggested to use this 537 * only when lock cannot be held 538 * 539 * @pdev: DP pdev context 540 * @func: function to be called for each peer 541 * @arg: argument need to be passed to func 542 * @mod_id: module_id 543 * 544 * Return: void 545 */ 546 static inline void 547 dp_pdev_iterate_peer_lock_safe(struct dp_pdev *pdev, 548 dp_peer_iter_func *func, 549 void *arg, 550 enum dp_mod_id mod_id) 551 { 552 struct dp_peer *peer; 553 struct dp_peer *tmp_peer; 554 struct dp_soc *soc = NULL; 555 struct dp_vdev *vdev = NULL; 556 struct dp_peer **peer_array[DP_PDEV_MAX_VDEVS] = {0}; 557 int i = 0; 558 int j = 0; 559 uint32_t num_peers[DP_PDEV_MAX_VDEVS] = {0}; 560 561 if (!pdev || !pdev->soc) 562 return; 563 564 soc = pdev->soc; 565 566 qdf_spin_lock_bh(&pdev->vdev_list_lock); 567 DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) { 568 num_peers[i] = vdev->num_peers; 569 peer_array[i] = qdf_mem_malloc(num_peers[i] * 570 sizeof(struct dp_peer *)); 571 if (!peer_array[i]) 572 break; 573 574 qdf_spin_lock_bh(&vdev->peer_list_lock); 575 TAILQ_FOREACH_SAFE(peer, &vdev->peer_list, 576 peer_list_elem, 577 tmp_peer) { 578 if (j >= num_peers[i]) 579 break; 580 581 if (dp_peer_get_ref(soc, peer, mod_id) == 582 QDF_STATUS_SUCCESS) { 583 peer_array[i][j] = peer; 584 585 j = (j + 1); 586 } 587 } 588 qdf_spin_unlock_bh(&vdev->peer_list_lock); 589 i = (i + 1); 590 } 591 qdf_spin_unlock_bh(&pdev->vdev_list_lock); 592 593 for (i = 0; i < DP_PDEV_MAX_VDEVS; i++) { 594 if (!peer_array[i]) 595 break; 596 597 for (j = 0; j < num_peers[i]; j++) { 598 peer = peer_array[i][j]; 599 600 if (!peer) 601 continue; 602 603 (*func)(soc, peer, arg); 604 dp_peer_unref_delete(peer, mod_id); 605 } 606 607 qdf_mem_free(peer_array[i]); 608 } 609 } 610 611 /** 612 * dp_soc_iterate_peer_lock_safe() - API to iterate through all peers of soc 613 * 614 * This API will cache the peers in local allocated memory and calls 615 * iterate function outside the lock. 616 * 617 * As this API is allocating new memory it is suggested to use this 618 * only when lock cannot be held 619 * 620 * @soc: DP soc context 621 * @func: function to be called for each peer 622 * @arg: argument need to be passed to func 623 * @mod_id: module_id 624 * 625 * Return: void 626 */ 627 static inline void 628 dp_soc_iterate_peer_lock_safe(struct dp_soc *soc, 629 dp_peer_iter_func *func, 630 void *arg, 631 enum dp_mod_id mod_id) 632 { 633 struct dp_pdev *pdev; 634 int i; 635 636 if (!soc) 637 return; 638 639 for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) { 640 pdev = soc->pdev_list[i]; 641 dp_pdev_iterate_peer_lock_safe(pdev, func, arg, mod_id); 642 } 643 } 644 645 #ifdef DP_PEER_STATE_DEBUG 646 #define DP_PEER_STATE_ASSERT(_peer, _new_state, _condition) \ 647 do { \ 648 if (!(_condition)) { \ 649 dp_alert("Invalid state shift from %u to %u peer " \ 650 QDF_MAC_ADDR_FMT, \ 651 (_peer)->peer_state, (_new_state), \ 652 QDF_MAC_ADDR_REF((_peer)->mac_addr.raw)); \ 653 QDF_ASSERT(0); \ 654 } \ 655 } while (0) 656 657 #else 658 #define DP_PEER_STATE_ASSERT(_peer, _new_state, _condition) \ 659 do { \ 660 if (!(_condition)) { \ 661 dp_alert("Invalid state shift from %u to %u peer " \ 662 QDF_MAC_ADDR_FMT, \ 663 (_peer)->peer_state, (_new_state), \ 664 QDF_MAC_ADDR_REF((_peer)->mac_addr.raw)); \ 665 } \ 666 } while (0) 667 #endif 668 669 /** 670 * dp_peer_state_cmp() - compare dp peer state 671 * 672 * @peer: DP peer 673 * @state: state 674 * 675 * Return: true if state matches with peer state 676 * false if it does not match 677 */ 678 static inline bool 679 dp_peer_state_cmp(struct dp_peer *peer, 680 enum dp_peer_state state) 681 { 682 bool is_status_equal = false; 683 684 qdf_spin_lock_bh(&peer->peer_state_lock); 685 is_status_equal = (peer->peer_state == state); 686 qdf_spin_unlock_bh(&peer->peer_state_lock); 687 688 return is_status_equal; 689 } 690 691 /** 692 * dp_print_ast_stats() - Dump AST table contents 693 * @soc: Datapath soc handle 694 * 695 * Return: void 696 */ 697 void dp_print_ast_stats(struct dp_soc *soc); 698 699 /** 700 * dp_rx_peer_map_handler() - handle peer map event from firmware 701 * @soc: generic soc handle 702 * @peer_id: peer_id from firmware 703 * @hw_peer_id: ast index for this peer 704 * @vdev_id: vdev ID 705 * @peer_mac_addr: mac address of the peer 706 * @ast_hash: ast hash value 707 * @is_wds: flag to indicate peer map event for WDS ast entry 708 * 709 * associate the peer_id that firmware provided with peer entry 710 * and update the ast table in the host with the hw_peer_id. 711 * 712 * Return: QDF_STATUS code 713 */ 714 715 QDF_STATUS dp_rx_peer_map_handler(struct dp_soc *soc, uint16_t peer_id, 716 uint16_t hw_peer_id, uint8_t vdev_id, 717 uint8_t *peer_mac_addr, uint16_t ast_hash, 718 uint8_t is_wds); 719 720 /** 721 * dp_rx_peer_unmap_handler() - handle peer unmap event from firmware 722 * @soc: generic soc handle 723 * @peer_id: peer_id from firmware 724 * @vdev_id: vdev ID 725 * @peer_mac_addr: mac address of the peer or wds entry 726 * @is_wds: flag to indicate peer map event for WDS ast entry 727 * @free_wds_count: number of wds entries freed by FW with peer delete 728 * 729 * Return: none 730 */ 731 void dp_rx_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id, 732 uint8_t vdev_id, uint8_t *peer_mac_addr, 733 uint8_t is_wds, uint32_t free_wds_count); 734 735 #if defined(WLAN_FEATURE_11BE_MLO) && defined(DP_MLO_LINK_STATS_SUPPORT) 736 /** 737 * dp_rx_peer_ext_evt() - handle peer extended event from firmware 738 * @soc: DP soc handle 739 * @info: extended evt info 740 * 741 * 742 * Return: QDF_STATUS 743 */ 744 745 QDF_STATUS 746 dp_rx_peer_ext_evt(struct dp_soc *soc, struct dp_peer_ext_evt_info *info); 747 #endif 748 #ifdef DP_RX_UDP_OVER_PEER_ROAM 749 /** 750 * dp_rx_reset_roaming_peer() - Reset the roamed peer in vdev 751 * @soc: dp soc pointer 752 * @vdev_id: vdev id 753 * @peer_mac_addr: mac address of the peer 754 * 755 * This function resets the roamed peer auth status and mac address 756 * after peer map indication of same peer is received from firmware. 757 * 758 * Return: None 759 */ 760 void dp_rx_reset_roaming_peer(struct dp_soc *soc, uint8_t vdev_id, 761 uint8_t *peer_mac_addr); 762 #else 763 static inline void dp_rx_reset_roaming_peer(struct dp_soc *soc, uint8_t vdev_id, 764 uint8_t *peer_mac_addr) 765 { 766 } 767 #endif 768 769 #ifdef WLAN_FEATURE_11BE_MLO 770 /** 771 * dp_rx_mlo_peer_map_handler() - handle MLO peer map event from firmware 772 * @soc: generic soc handle 773 * @peer_id: ML peer_id from firmware 774 * @peer_mac_addr: mac address of the peer 775 * @mlo_flow_info: MLO AST flow info 776 * @mlo_link_info: MLO link info 777 * 778 * associate the ML peer_id that firmware provided with peer entry 779 * and update the ast table in the host with the hw_peer_id. 780 * 781 * Return: QDF_STATUS code 782 */ 783 QDF_STATUS 784 dp_rx_mlo_peer_map_handler(struct dp_soc *soc, uint16_t peer_id, 785 uint8_t *peer_mac_addr, 786 struct dp_mlo_flow_override_info *mlo_flow_info, 787 struct dp_mlo_link_info *mlo_link_info); 788 789 /** 790 * dp_rx_mlo_peer_unmap_handler() - handle MLO peer unmap event from firmware 791 * @soc: generic soc handle 792 * @peer_id: peer_id from firmware 793 * 794 * Return: none 795 */ 796 void dp_rx_mlo_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id); 797 #endif 798 799 void dp_rx_sec_ind_handler(struct dp_soc *soc, uint16_t peer_id, 800 enum cdp_sec_type sec_type, int is_unicast, 801 u_int32_t *michael_key, u_int32_t *rx_pn); 802 803 uint8_t dp_get_peer_mac_addr_frm_id(struct cdp_soc_t *soc_handle, 804 uint16_t peer_id, uint8_t *peer_mac); 805 806 /** 807 * dp_peer_add_ast() - Allocate and add AST entry into peer list 808 * @soc: SoC handle 809 * @peer: peer to which ast node belongs 810 * @mac_addr: MAC address of ast node 811 * @type: AST entry type 812 * @flags: AST configuration flags 813 * 814 * This API is used by WDS source port learning function to 815 * add a new AST entry into peer AST list 816 * 817 * Return: QDF_STATUS code 818 */ 819 QDF_STATUS dp_peer_add_ast(struct dp_soc *soc, struct dp_peer *peer, 820 uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type, 821 uint32_t flags); 822 823 /** 824 * dp_peer_del_ast() - Delete and free AST entry 825 * @soc: SoC handle 826 * @ast_entry: AST entry of the node 827 * 828 * This function removes the AST entry from peer and soc tables 829 * It assumes caller has taken the ast lock to protect the access to these 830 * tables 831 * 832 * Return: None 833 */ 834 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry); 835 836 void dp_peer_ast_unmap_handler(struct dp_soc *soc, 837 struct dp_ast_entry *ast_entry); 838 839 /** 840 * dp_peer_update_ast() - Delete and free AST entry 841 * @soc: SoC handle 842 * @peer: peer to which ast node belongs 843 * @ast_entry: AST entry of the node 844 * @flags: wds or hmwds 845 * 846 * This function update the AST entry to the roamed peer and soc tables 847 * It assumes caller has taken the ast lock to protect the access to these 848 * tables 849 * 850 * Return: 0 if ast entry is updated successfully 851 * -1 failure 852 */ 853 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer, 854 struct dp_ast_entry *ast_entry, uint32_t flags); 855 856 /** 857 * dp_peer_ast_hash_find_by_pdevid() - Find AST entry by MAC address 858 * @soc: SoC handle 859 * @ast_mac_addr: Mac address 860 * @pdev_id: pdev Id 861 * 862 * It assumes caller has taken the ast lock to protect the access to 863 * AST hash table 864 * 865 * Return: AST entry 866 */ 867 struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc, 868 uint8_t *ast_mac_addr, 869 uint8_t pdev_id); 870 871 /** 872 * dp_peer_ast_hash_find_by_vdevid() - Find AST entry by MAC address 873 * @soc: SoC handle 874 * @ast_mac_addr: Mac address 875 * @vdev_id: vdev Id 876 * 877 * It assumes caller has taken the ast lock to protect the access to 878 * AST hash table 879 * 880 * Return: AST entry 881 */ 882 struct dp_ast_entry *dp_peer_ast_hash_find_by_vdevid(struct dp_soc *soc, 883 uint8_t *ast_mac_addr, 884 uint8_t vdev_id); 885 886 /** 887 * dp_peer_ast_hash_find_soc() - Find AST entry by MAC address 888 * @soc: SoC handle 889 * @ast_mac_addr: Mac address 890 * 891 * It assumes caller has taken the ast lock to protect the access to 892 * AST hash table 893 * 894 * Return: AST entry 895 */ 896 struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc, 897 uint8_t *ast_mac_addr); 898 899 /** 900 * dp_peer_ast_get_pdev_id() - get pdev_id from the ast entry 901 * @soc: SoC handle 902 * @ast_entry: AST entry of the node 903 * 904 * This function gets the pdev_id from the ast entry. 905 * 906 * Return: (uint8_t) pdev_id 907 */ 908 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc, 909 struct dp_ast_entry *ast_entry); 910 911 912 /** 913 * dp_peer_ast_get_next_hop() - get next_hop from the ast entry 914 * @soc: SoC handle 915 * @ast_entry: AST entry of the node 916 * 917 * This function gets the next hop from the ast entry. 918 * 919 * Return: (uint8_t) next_hop 920 */ 921 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc, 922 struct dp_ast_entry *ast_entry); 923 924 /** 925 * dp_peer_ast_set_type() - set type from the ast entry 926 * @soc: SoC handle 927 * @ast_entry: AST entry of the node 928 * @type: AST entry type 929 * 930 * This function sets the type in the ast entry. 931 * 932 * Return: 933 */ 934 void dp_peer_ast_set_type(struct dp_soc *soc, 935 struct dp_ast_entry *ast_entry, 936 enum cdp_txrx_ast_entry_type type); 937 938 void dp_peer_ast_send_wds_del(struct dp_soc *soc, 939 struct dp_ast_entry *ast_entry, 940 struct dp_peer *peer); 941 942 #ifdef WLAN_FEATURE_MULTI_AST_DEL 943 void dp_peer_ast_send_multi_wds_del( 944 struct dp_soc *soc, uint8_t vdev_id, 945 struct peer_del_multi_wds_entries *wds_list); 946 #endif 947 948 void dp_peer_free_hmwds_cb(struct cdp_ctrl_objmgr_psoc *ctrl_psoc, 949 struct cdp_soc *dp_soc, 950 void *cookie, 951 enum cdp_ast_free_status status); 952 953 /** 954 * dp_peer_ast_hash_remove() - Look up and remove AST entry from hash table 955 * @soc: SoC handle 956 * @ase: Address search entry 957 * 958 * This function removes the AST entry from soc AST hash table 959 * It assumes caller has taken the ast lock to protect the access to this table 960 * 961 * Return: None 962 */ 963 void dp_peer_ast_hash_remove(struct dp_soc *soc, 964 struct dp_ast_entry *ase); 965 966 /** 967 * dp_peer_free_ast_entry() - Free up the ast entry memory 968 * @soc: SoC handle 969 * @ast_entry: Address search entry 970 * 971 * This API is used to free up the memory associated with 972 * AST entry. 973 * 974 * Return: None 975 */ 976 void dp_peer_free_ast_entry(struct dp_soc *soc, 977 struct dp_ast_entry *ast_entry); 978 979 /** 980 * dp_peer_unlink_ast_entry() - Free up the ast entry memory 981 * @soc: SoC handle 982 * @ast_entry: Address search entry 983 * @peer: peer 984 * 985 * This API is used to remove/unlink AST entry from the peer list 986 * and hash list. 987 * 988 * Return: None 989 */ 990 void dp_peer_unlink_ast_entry(struct dp_soc *soc, 991 struct dp_ast_entry *ast_entry, 992 struct dp_peer *peer); 993 994 /** 995 * dp_peer_mec_detach_entry() - Detach the MEC entry 996 * @soc: SoC handle 997 * @mecentry: MEC entry of the node 998 * @ptr: pointer to free list 999 * 1000 * The MEC entry is detached from MEC table and added to free_list 1001 * to free the object outside lock 1002 * 1003 * Return: None 1004 */ 1005 void dp_peer_mec_detach_entry(struct dp_soc *soc, struct dp_mec_entry *mecentry, 1006 void *ptr); 1007 1008 /** 1009 * dp_peer_mec_free_list() - free the MEC entry from free_list 1010 * @soc: SoC handle 1011 * @ptr: pointer to free list 1012 * 1013 * Return: None 1014 */ 1015 void dp_peer_mec_free_list(struct dp_soc *soc, void *ptr); 1016 1017 /** 1018 * dp_peer_mec_add_entry() 1019 * @soc: SoC handle 1020 * @vdev: vdev to which mec node belongs 1021 * @mac_addr: MAC address of mec node 1022 * 1023 * This function allocates and adds MEC entry to MEC table. 1024 * It assumes caller has taken the mec lock to protect the access to these 1025 * tables 1026 * 1027 * Return: QDF_STATUS 1028 */ 1029 QDF_STATUS dp_peer_mec_add_entry(struct dp_soc *soc, 1030 struct dp_vdev *vdev, 1031 uint8_t *mac_addr); 1032 1033 /** 1034 * dp_peer_mec_hash_find_by_pdevid() - Find MEC entry by PDEV Id 1035 * within pdev 1036 * @soc: SoC handle 1037 * @pdev_id: pdev Id 1038 * @mec_mac_addr: MAC address of mec node 1039 * 1040 * It assumes caller has taken the mec_lock to protect the access to 1041 * MEC hash table 1042 * 1043 * Return: MEC entry 1044 */ 1045 struct dp_mec_entry *dp_peer_mec_hash_find_by_pdevid(struct dp_soc *soc, 1046 uint8_t pdev_id, 1047 uint8_t *mec_mac_addr); 1048 1049 #define DP_AST_ASSERT(_condition) \ 1050 do { \ 1051 if (!(_condition)) { \ 1052 dp_print_ast_stats(soc);\ 1053 QDF_BUG(_condition); \ 1054 } \ 1055 } while (0) 1056 1057 /** 1058 * dp_peer_update_inactive_time() - Update inactive time for peer 1059 * @pdev: pdev object 1060 * @tag_type: htt_tlv_tag type 1061 * @tag_buf: buf message 1062 */ 1063 void 1064 dp_peer_update_inactive_time(struct dp_pdev *pdev, uint32_t tag_type, 1065 uint32_t *tag_buf); 1066 1067 #ifndef QCA_MULTIPASS_SUPPORT 1068 static inline 1069 /** 1070 * dp_peer_set_vlan_id() - set vlan_id for this peer 1071 * @cdp_soc: soc handle 1072 * @vdev_id: id of vdev object 1073 * @peer_mac: mac address 1074 * @vlan_id: vlan id for peer 1075 * 1076 * Return: void 1077 */ 1078 void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc, 1079 uint8_t vdev_id, uint8_t *peer_mac, 1080 uint16_t vlan_id) 1081 { 1082 } 1083 1084 /** 1085 * dp_set_vlan_groupkey() - set vlan map for vdev 1086 * @soc_hdl: pointer to soc 1087 * @vdev_id: id of vdev handle 1088 * @vlan_id: vlan_id 1089 * @group_key: group key for vlan 1090 * 1091 * Return: set success/failure 1092 */ 1093 static inline 1094 QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 1095 uint16_t vlan_id, uint16_t group_key) 1096 { 1097 return QDF_STATUS_SUCCESS; 1098 } 1099 1100 /** 1101 * dp_peer_multipass_list_init() - initialize multipass peer list 1102 * @vdev: pointer to vdev 1103 * 1104 * Return: void 1105 */ 1106 static inline 1107 void dp_peer_multipass_list_init(struct dp_vdev *vdev) 1108 { 1109 } 1110 1111 /** 1112 * dp_peer_multipass_list_remove() - remove peer from special peer list 1113 * @peer: peer handle 1114 * 1115 * Return: void 1116 */ 1117 static inline 1118 void dp_peer_multipass_list_remove(struct dp_peer *peer) 1119 { 1120 } 1121 #else 1122 void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc, 1123 uint8_t vdev_id, uint8_t *peer_mac, 1124 uint16_t vlan_id); 1125 QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc, uint8_t vdev_id, 1126 uint16_t vlan_id, uint16_t group_key); 1127 void dp_peer_multipass_list_init(struct dp_vdev *vdev); 1128 void dp_peer_multipass_list_remove(struct dp_peer *peer); 1129 #endif 1130 1131 1132 #ifndef QCA_PEER_MULTIQ_SUPPORT 1133 /** 1134 * dp_peer_reset_flowq_map() - reset peer flowq map table 1135 * @peer: dp peer handle 1136 * 1137 * Return: none 1138 */ 1139 static inline 1140 void dp_peer_reset_flowq_map(struct dp_peer *peer) 1141 { 1142 } 1143 1144 /** 1145 * dp_peer_ast_index_flow_queue_map_create() - create ast index flow queue map 1146 * @soc_hdl: generic soc handle 1147 * @is_wds: flag to indicate if peer is wds 1148 * @peer_id: peer_id from htt peer map message 1149 * @peer_mac_addr: mac address of the peer 1150 * @ast_info: ast flow override information from peer map 1151 * 1152 * Return: none 1153 */ 1154 static inline 1155 void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl, 1156 bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr, 1157 struct dp_ast_flow_override_info *ast_info) 1158 { 1159 } 1160 #else 1161 void dp_peer_reset_flowq_map(struct dp_peer *peer); 1162 1163 void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl, 1164 bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr, 1165 struct dp_ast_flow_override_info *ast_info); 1166 #endif 1167 1168 #ifdef QCA_PEER_EXT_STATS 1169 /** 1170 * dp_peer_delay_stats_ctx_alloc() - Allocate peer delay stats content 1171 * @soc: DP SoC context 1172 * @txrx_peer: DP txrx peer context 1173 * 1174 * Allocate the peer delay stats context 1175 * 1176 * Return: QDF_STATUS_SUCCESS if allocation is 1177 * successful 1178 */ 1179 QDF_STATUS dp_peer_delay_stats_ctx_alloc(struct dp_soc *soc, 1180 struct dp_txrx_peer *txrx_peer); 1181 1182 /** 1183 * dp_peer_delay_stats_ctx_dealloc() - Dealloc the peer delay stats context 1184 * @soc: DP SoC context 1185 * @txrx_peer: txrx DP peer context 1186 * 1187 * Free the peer delay stats context 1188 * 1189 * Return: Void 1190 */ 1191 void dp_peer_delay_stats_ctx_dealloc(struct dp_soc *soc, 1192 struct dp_txrx_peer *txrx_peer); 1193 1194 /** 1195 * dp_peer_delay_stats_ctx_clr() - Clear delay stats context of peer 1196 * @txrx_peer: dp_txrx_peer handle 1197 * 1198 * Return: void 1199 */ 1200 void dp_peer_delay_stats_ctx_clr(struct dp_txrx_peer *txrx_peer); 1201 #else 1202 static inline 1203 QDF_STATUS dp_peer_delay_stats_ctx_alloc(struct dp_soc *soc, 1204 struct dp_txrx_peer *txrx_peer) 1205 { 1206 return QDF_STATUS_SUCCESS; 1207 } 1208 1209 static inline 1210 void dp_peer_delay_stats_ctx_dealloc(struct dp_soc *soc, 1211 struct dp_txrx_peer *txrx_peer) 1212 { 1213 } 1214 1215 static inline 1216 void dp_peer_delay_stats_ctx_clr(struct dp_txrx_peer *txrx_peer) 1217 { 1218 } 1219 #endif 1220 1221 #ifdef WLAN_PEER_JITTER 1222 /** 1223 * dp_peer_jitter_stats_ctx_alloc() - Allocate jitter stats context for peer 1224 * @pdev: Datapath pdev handle 1225 * @txrx_peer: dp_txrx_peer handle 1226 * 1227 * Return: QDF_STATUS 1228 */ 1229 QDF_STATUS dp_peer_jitter_stats_ctx_alloc(struct dp_pdev *pdev, 1230 struct dp_txrx_peer *txrx_peer); 1231 1232 /** 1233 * dp_peer_jitter_stats_ctx_dealloc() - Deallocate jitter stats context 1234 * @pdev: Datapath pdev handle 1235 * @txrx_peer: dp_txrx_peer handle 1236 * 1237 * Return: void 1238 */ 1239 void dp_peer_jitter_stats_ctx_dealloc(struct dp_pdev *pdev, 1240 struct dp_txrx_peer *txrx_peer); 1241 1242 /** 1243 * dp_peer_jitter_stats_ctx_clr() - Clear jitter stats context of peer 1244 * @txrx_peer: dp_txrx_peer handle 1245 * 1246 * Return: void 1247 */ 1248 void dp_peer_jitter_stats_ctx_clr(struct dp_txrx_peer *txrx_peer); 1249 #else 1250 static inline 1251 QDF_STATUS dp_peer_jitter_stats_ctx_alloc(struct dp_pdev *pdev, 1252 struct dp_txrx_peer *txrx_peer) 1253 { 1254 return QDF_STATUS_SUCCESS; 1255 } 1256 1257 static inline 1258 void dp_peer_jitter_stats_ctx_dealloc(struct dp_pdev *pdev, 1259 struct dp_txrx_peer *txrx_peer) 1260 { 1261 } 1262 1263 static inline 1264 void dp_peer_jitter_stats_ctx_clr(struct dp_txrx_peer *txrx_peer) 1265 { 1266 } 1267 #endif 1268 1269 #ifndef CONFIG_SAWF_DEF_QUEUES 1270 static inline QDF_STATUS dp_peer_sawf_ctx_alloc(struct dp_soc *soc, 1271 struct dp_peer *peer) 1272 { 1273 return QDF_STATUS_SUCCESS; 1274 } 1275 1276 static inline QDF_STATUS dp_peer_sawf_ctx_free(struct dp_soc *soc, 1277 struct dp_peer *peer) 1278 { 1279 return QDF_STATUS_SUCCESS; 1280 } 1281 1282 #endif 1283 1284 #ifndef CONFIG_SAWF 1285 static inline 1286 QDF_STATUS dp_peer_sawf_stats_ctx_alloc(struct dp_soc *soc, 1287 struct dp_txrx_peer *txrx_peer) 1288 { 1289 return QDF_STATUS_SUCCESS; 1290 } 1291 1292 static inline 1293 QDF_STATUS dp_peer_sawf_stats_ctx_free(struct dp_soc *soc, 1294 struct dp_txrx_peer *txrx_peer) 1295 { 1296 return QDF_STATUS_SUCCESS; 1297 } 1298 #endif 1299 1300 /** 1301 * dp_vdev_bss_peer_ref_n_get: Get bss peer of a vdev 1302 * @soc: DP soc 1303 * @vdev: vdev 1304 * @mod_id: id of module requesting reference 1305 * 1306 * Return: VDEV BSS peer 1307 */ 1308 struct dp_peer *dp_vdev_bss_peer_ref_n_get(struct dp_soc *soc, 1309 struct dp_vdev *vdev, 1310 enum dp_mod_id mod_id); 1311 1312 /** 1313 * dp_sta_vdev_self_peer_ref_n_get: Get self peer of sta vdev 1314 * @soc: DP soc 1315 * @vdev: vdev 1316 * @mod_id: id of module requesting reference 1317 * 1318 * Return: VDEV self peer 1319 */ 1320 struct dp_peer *dp_sta_vdev_self_peer_ref_n_get(struct dp_soc *soc, 1321 struct dp_vdev *vdev, 1322 enum dp_mod_id mod_id); 1323 1324 void dp_peer_ast_table_detach(struct dp_soc *soc); 1325 1326 /** 1327 * dp_peer_find_map_detach() - cleanup memory for peer_id_to_obj_map 1328 * @soc: soc handle 1329 * 1330 * Return: none 1331 */ 1332 void dp_peer_find_map_detach(struct dp_soc *soc); 1333 1334 void dp_soc_wds_detach(struct dp_soc *soc); 1335 QDF_STATUS dp_peer_ast_table_attach(struct dp_soc *soc); 1336 1337 /** 1338 * dp_find_peer_by_macaddr() - Finding the peer from mac address provided. 1339 * @soc: soc handle 1340 * @mac_addr: MAC address to be used to find peer 1341 * @vdev_id: VDEV id 1342 * @mod_id: MODULE ID 1343 * 1344 * Return: struct dp_peer 1345 */ 1346 struct dp_peer *dp_find_peer_by_macaddr(struct dp_soc *soc, uint8_t *mac_addr, 1347 uint8_t vdev_id, enum dp_mod_id mod_id); 1348 /** 1349 * dp_peer_ast_hash_attach() - Allocate and initialize AST Hash Table 1350 * @soc: SoC handle 1351 * 1352 * Return: QDF_STATUS 1353 */ 1354 QDF_STATUS dp_peer_ast_hash_attach(struct dp_soc *soc); 1355 1356 /** 1357 * dp_peer_mec_hash_attach() - Allocate and initialize MEC Hash Table 1358 * @soc: SoC handle 1359 * 1360 * Return: QDF_STATUS 1361 */ 1362 QDF_STATUS dp_peer_mec_hash_attach(struct dp_soc *soc); 1363 1364 /** 1365 * dp_del_wds_entry_wrapper() - delete a WDS AST entry 1366 * @soc: DP soc structure pointer 1367 * @vdev_id: vdev_id 1368 * @wds_macaddr: MAC address of ast node 1369 * @type: type from enum cdp_txrx_ast_entry_type 1370 * @delete_in_fw: Flag to indicate if entry needs to be deleted in fw 1371 * 1372 * This API is used to delete an AST entry from fw 1373 * 1374 * Return: None 1375 */ 1376 void dp_del_wds_entry_wrapper(struct dp_soc *soc, uint8_t vdev_id, 1377 uint8_t *wds_macaddr, uint8_t type, 1378 uint8_t delete_in_fw); 1379 1380 void dp_soc_wds_attach(struct dp_soc *soc); 1381 1382 /** 1383 * dp_peer_mec_hash_detach() - Free MEC Hash table 1384 * @soc: SoC handle 1385 * 1386 * Return: None 1387 */ 1388 void dp_peer_mec_hash_detach(struct dp_soc *soc); 1389 1390 /** 1391 * dp_peer_ast_hash_detach() - Free AST Hash table 1392 * @soc: SoC handle 1393 * 1394 * Return: None 1395 */ 1396 void dp_peer_ast_hash_detach(struct dp_soc *soc); 1397 1398 #ifdef FEATURE_AST 1399 /** 1400 * dp_peer_delete_ast_entries(): Delete all AST entries for a peer 1401 * @soc: datapath soc handle 1402 * @peer: datapath peer handle 1403 * 1404 * Delete the AST entries belonging to a peer 1405 */ 1406 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc, 1407 struct dp_peer *peer) 1408 { 1409 struct dp_ast_entry *ast_entry, *temp_ast_entry; 1410 1411 dp_peer_debug("peer: %pK, self_ast: %pK", peer, peer->self_ast_entry); 1412 /* 1413 * Delete peer self ast entry. This is done to handle scenarios 1414 * where peer is freed before peer map is received(for ex in case 1415 * of auth disallow due to ACL) in such cases self ast is not added 1416 * to peer->ast_list. 1417 */ 1418 if (peer->self_ast_entry) { 1419 dp_peer_del_ast(soc, peer->self_ast_entry); 1420 peer->self_ast_entry = NULL; 1421 } 1422 1423 DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry) 1424 dp_peer_del_ast(soc, ast_entry); 1425 } 1426 1427 /** 1428 * dp_print_peer_ast_entries() - Dump AST entries of peer 1429 * @soc: Datapath soc handle 1430 * @peer: Datapath peer 1431 * @arg: argument to iterate function 1432 * 1433 * Return: void 1434 */ 1435 void dp_print_peer_ast_entries(struct dp_soc *soc, struct dp_peer *peer, 1436 void *arg); 1437 #else 1438 static inline void dp_print_peer_ast_entries(struct dp_soc *soc, 1439 struct dp_peer *peer, void *arg) 1440 { 1441 } 1442 1443 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc, 1444 struct dp_peer *peer) 1445 { 1446 } 1447 #endif 1448 1449 #ifdef FEATURE_MEC 1450 /** 1451 * dp_peer_mec_spinlock_create() - Create the MEC spinlock 1452 * @soc: SoC handle 1453 * 1454 * Return: none 1455 */ 1456 void dp_peer_mec_spinlock_create(struct dp_soc *soc); 1457 1458 /** 1459 * dp_peer_mec_spinlock_destroy() - Destroy the MEC spinlock 1460 * @soc: SoC handle 1461 * 1462 * Return: none 1463 */ 1464 void dp_peer_mec_spinlock_destroy(struct dp_soc *soc); 1465 1466 /** 1467 * dp_peer_mec_flush_entries() - Delete all mec entries in table 1468 * @soc: Datapath SOC 1469 * 1470 * Return: None 1471 */ 1472 void dp_peer_mec_flush_entries(struct dp_soc *soc); 1473 #else 1474 static inline void dp_peer_mec_spinlock_create(struct dp_soc *soc) 1475 { 1476 } 1477 1478 static inline void dp_peer_mec_spinlock_destroy(struct dp_soc *soc) 1479 { 1480 } 1481 1482 static inline void dp_peer_mec_flush_entries(struct dp_soc *soc) 1483 { 1484 } 1485 #endif 1486 1487 static inline int dp_peer_find_mac_addr_cmp( 1488 union dp_align_mac_addr *mac_addr1, 1489 union dp_align_mac_addr *mac_addr2) 1490 { 1491 /* 1492 * Intentionally use & rather than &&. 1493 * because the operands are binary rather than generic boolean, 1494 * the functionality is equivalent. 1495 * Using && has the advantage of short-circuited evaluation, 1496 * but using & has the advantage of no conditional branching, 1497 * which is a more significant benefit. 1498 */ 1499 return !((mac_addr1->align4.bytes_abcd == mac_addr2->align4.bytes_abcd) 1500 & (mac_addr1->align4.bytes_ef == mac_addr2->align4.bytes_ef)); 1501 } 1502 1503 /** 1504 * dp_peer_delete() - delete DP peer 1505 * 1506 * @soc: Datatpath soc 1507 * @peer: Datapath peer 1508 * @arg: argument to iter function 1509 * 1510 * Return: void 1511 */ 1512 void dp_peer_delete(struct dp_soc *soc, 1513 struct dp_peer *peer, 1514 void *arg); 1515 1516 /** 1517 * dp_mlo_peer_delete() - delete MLO DP peer 1518 * 1519 * @soc: Datapath soc 1520 * @peer: Datapath peer 1521 * @arg: argument to iter function 1522 * 1523 * Return: void 1524 */ 1525 void dp_mlo_peer_delete(struct dp_soc *soc, struct dp_peer *peer, void *arg); 1526 1527 #ifdef WLAN_FEATURE_11BE_MLO 1528 1529 /* is MLO connection mld peer */ 1530 #define IS_MLO_DP_MLD_TXRX_PEER(_peer) ((_peer)->mld_peer) 1531 1532 /* set peer type */ 1533 #define DP_PEER_SET_TYPE(_peer, _type_val) \ 1534 ((_peer)->peer_type = (_type_val)) 1535 1536 /* is legacy peer */ 1537 #define IS_DP_LEGACY_PEER(_peer) \ 1538 ((_peer)->peer_type == CDP_LINK_PEER_TYPE && !((_peer)->mld_peer)) 1539 /* is MLO connection link peer */ 1540 #define IS_MLO_DP_LINK_PEER(_peer) \ 1541 ((_peer)->peer_type == CDP_LINK_PEER_TYPE && (_peer)->mld_peer) 1542 /* is MLO connection mld peer */ 1543 #define IS_MLO_DP_MLD_PEER(_peer) \ 1544 ((_peer)->peer_type == CDP_MLD_PEER_TYPE) 1545 /* Get Mld peer from link peer */ 1546 #define DP_GET_MLD_PEER_FROM_PEER(link_peer) \ 1547 ((link_peer)->mld_peer) 1548 1549 #ifdef WLAN_MLO_MULTI_CHIP 1550 static inline uint8_t dp_get_chip_id(struct dp_soc *soc) 1551 { 1552 if (soc->arch_ops.mlo_get_chip_id) 1553 return soc->arch_ops.mlo_get_chip_id(soc); 1554 1555 return 0; 1556 } 1557 1558 static inline struct dp_peer * 1559 dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc, 1560 uint8_t *peer_mac_addr, 1561 int mac_addr_is_aligned, 1562 uint8_t vdev_id, 1563 uint8_t chip_id, 1564 enum dp_mod_id mod_id) 1565 { 1566 if (soc->arch_ops.mlo_link_peer_find_hash_find_by_chip_id) 1567 return soc->arch_ops.mlo_link_peer_find_hash_find_by_chip_id 1568 (soc, peer_mac_addr, 1569 mac_addr_is_aligned, 1570 vdev_id, chip_id, 1571 mod_id); 1572 1573 return NULL; 1574 } 1575 #else 1576 static inline uint8_t dp_get_chip_id(struct dp_soc *soc) 1577 { 1578 return 0; 1579 } 1580 1581 static inline struct dp_peer * 1582 dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc, 1583 uint8_t *peer_mac_addr, 1584 int mac_addr_is_aligned, 1585 uint8_t vdev_id, 1586 uint8_t chip_id, 1587 enum dp_mod_id mod_id) 1588 { 1589 return dp_peer_find_hash_find(soc, peer_mac_addr, 1590 mac_addr_is_aligned, 1591 vdev_id, mod_id); 1592 } 1593 #endif 1594 1595 /** 1596 * dp_mld_peer_find_hash_find() - returns mld peer from mld peer_hash_table 1597 * matching mac_address 1598 * @soc: soc handle 1599 * @peer_mac_addr: mld peer mac address 1600 * @mac_addr_is_aligned: is mac addr aligned 1601 * @vdev_id: vdev_id 1602 * @mod_id: id of module requesting reference 1603 * 1604 * Return: peer in success 1605 * NULL in failure 1606 */ 1607 static inline 1608 struct dp_peer *dp_mld_peer_find_hash_find(struct dp_soc *soc, 1609 uint8_t *peer_mac_addr, 1610 int mac_addr_is_aligned, 1611 uint8_t vdev_id, 1612 enum dp_mod_id mod_id) 1613 { 1614 if (soc->arch_ops.mlo_peer_find_hash_find) 1615 return soc->arch_ops.mlo_peer_find_hash_find(soc, 1616 peer_mac_addr, 1617 mac_addr_is_aligned, 1618 mod_id, vdev_id); 1619 return NULL; 1620 } 1621 1622 /** 1623 * dp_peer_hash_find_wrapper() - find link peer or mld per according to 1624 * peer_type 1625 * @soc: DP SOC handle 1626 * @peer_info: peer information for hash find 1627 * @mod_id: ID of module requesting reference 1628 * 1629 * Return: peer handle 1630 */ 1631 static inline 1632 struct dp_peer *dp_peer_hash_find_wrapper(struct dp_soc *soc, 1633 struct cdp_peer_info *peer_info, 1634 enum dp_mod_id mod_id) 1635 { 1636 struct dp_peer *peer = NULL; 1637 1638 if (peer_info->peer_type == CDP_LINK_PEER_TYPE || 1639 peer_info->peer_type == CDP_WILD_PEER_TYPE) { 1640 peer = dp_peer_find_hash_find(soc, peer_info->mac_addr, 1641 peer_info->mac_addr_is_aligned, 1642 peer_info->vdev_id, 1643 mod_id); 1644 if (peer) 1645 return peer; 1646 } 1647 if (peer_info->peer_type == CDP_MLD_PEER_TYPE || 1648 peer_info->peer_type == CDP_WILD_PEER_TYPE) 1649 peer = dp_mld_peer_find_hash_find( 1650 soc, peer_info->mac_addr, 1651 peer_info->mac_addr_is_aligned, 1652 peer_info->vdev_id, 1653 mod_id); 1654 return peer; 1655 } 1656 1657 /** 1658 * dp_link_peer_add_mld_peer() - add mld peer pointer to link peer, 1659 * increase mld peer ref_cnt 1660 * @link_peer: link peer pointer 1661 * @mld_peer: mld peer pointer 1662 * 1663 * Return: none 1664 */ 1665 static inline 1666 void dp_link_peer_add_mld_peer(struct dp_peer *link_peer, 1667 struct dp_peer *mld_peer) 1668 { 1669 /* increase mld_peer ref_cnt */ 1670 dp_peer_get_ref(NULL, mld_peer, DP_MOD_ID_CDP); 1671 link_peer->mld_peer = mld_peer; 1672 } 1673 1674 /** 1675 * dp_link_peer_del_mld_peer() - delete mld peer pointer from link peer, 1676 * decrease mld peer ref_cnt 1677 * @link_peer: link peer pointer 1678 * 1679 * Return: None 1680 */ 1681 static inline 1682 void dp_link_peer_del_mld_peer(struct dp_peer *link_peer) 1683 { 1684 dp_peer_unref_delete(link_peer->mld_peer, DP_MOD_ID_CDP); 1685 link_peer->mld_peer = NULL; 1686 } 1687 1688 /** 1689 * dp_mld_peer_init_link_peers_info() - init link peers info in mld peer 1690 * @mld_peer: mld peer pointer 1691 * 1692 * Return: None 1693 */ 1694 static inline 1695 void dp_mld_peer_init_link_peers_info(struct dp_peer *mld_peer) 1696 { 1697 int i; 1698 1699 qdf_spinlock_create(&mld_peer->link_peers_info_lock); 1700 mld_peer->num_links = 0; 1701 for (i = 0; i < DP_MAX_MLO_LINKS; i++) 1702 mld_peer->link_peers[i].is_valid = false; 1703 } 1704 1705 /** 1706 * dp_mld_peer_deinit_link_peers_info() - Deinit link peers info in mld peer 1707 * @mld_peer: mld peer pointer 1708 * 1709 * Return: None 1710 */ 1711 static inline 1712 void dp_mld_peer_deinit_link_peers_info(struct dp_peer *mld_peer) 1713 { 1714 qdf_spinlock_destroy(&mld_peer->link_peers_info_lock); 1715 } 1716 1717 /** 1718 * dp_mld_peer_add_link_peer() - add link peer info to mld peer 1719 * @mld_peer: mld dp peer pointer 1720 * @link_peer: link dp peer pointer 1721 * 1722 * Return: None 1723 */ 1724 static inline 1725 void dp_mld_peer_add_link_peer(struct dp_peer *mld_peer, 1726 struct dp_peer *link_peer) 1727 { 1728 int i; 1729 struct dp_peer_link_info *link_peer_info; 1730 struct dp_soc *soc = mld_peer->vdev->pdev->soc; 1731 1732 qdf_spin_lock_bh(&mld_peer->link_peers_info_lock); 1733 for (i = 0; i < DP_MAX_MLO_LINKS; i++) { 1734 link_peer_info = &mld_peer->link_peers[i]; 1735 if (!link_peer_info->is_valid) { 1736 qdf_mem_copy(link_peer_info->mac_addr.raw, 1737 link_peer->mac_addr.raw, 1738 QDF_MAC_ADDR_SIZE); 1739 link_peer_info->is_valid = true; 1740 link_peer_info->vdev_id = link_peer->vdev->vdev_id; 1741 link_peer_info->chip_id = 1742 dp_get_chip_id(link_peer->vdev->pdev->soc); 1743 mld_peer->num_links++; 1744 break; 1745 } 1746 } 1747 qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock); 1748 1749 dp_peer_info("%s addition of link peer %pK (" QDF_MAC_ADDR_FMT ") " 1750 "to MLD peer %pK (" QDF_MAC_ADDR_FMT "), " 1751 "idx %u num_links %u", 1752 (i != DP_MAX_MLO_LINKS) ? "Successful" : "Failed", 1753 link_peer, QDF_MAC_ADDR_REF(link_peer->mac_addr.raw), 1754 mld_peer, QDF_MAC_ADDR_REF(mld_peer->mac_addr.raw), 1755 i, mld_peer->num_links); 1756 1757 dp_cfg_event_record_mlo_link_delink_evt(soc, DP_CFG_EVENT_MLO_ADD_LINK, 1758 mld_peer, link_peer, i, 1759 (i != DP_MAX_MLO_LINKS) ? 1 : 0); 1760 } 1761 1762 /** 1763 * dp_mld_peer_del_link_peer() - Delete link peer info from MLD peer 1764 * @mld_peer: MLD dp peer pointer 1765 * @link_peer: link dp peer pointer 1766 * 1767 * Return: number of links left after deletion 1768 */ 1769 static inline 1770 uint8_t dp_mld_peer_del_link_peer(struct dp_peer *mld_peer, 1771 struct dp_peer *link_peer) 1772 { 1773 int i; 1774 struct dp_peer_link_info *link_peer_info; 1775 uint8_t num_links; 1776 struct dp_soc *soc = mld_peer->vdev->pdev->soc; 1777 1778 qdf_spin_lock_bh(&mld_peer->link_peers_info_lock); 1779 for (i = 0; i < DP_MAX_MLO_LINKS; i++) { 1780 link_peer_info = &mld_peer->link_peers[i]; 1781 if (link_peer_info->is_valid && 1782 !dp_peer_find_mac_addr_cmp(&link_peer->mac_addr, 1783 &link_peer_info->mac_addr)) { 1784 link_peer_info->is_valid = false; 1785 mld_peer->num_links--; 1786 break; 1787 } 1788 } 1789 num_links = mld_peer->num_links; 1790 qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock); 1791 1792 dp_peer_info("%s deletion of link peer %pK (" QDF_MAC_ADDR_FMT ") " 1793 "from MLD peer %pK (" QDF_MAC_ADDR_FMT "), " 1794 "idx %u num_links %u", 1795 (i != DP_MAX_MLO_LINKS) ? "Successful" : "Failed", 1796 link_peer, QDF_MAC_ADDR_REF(link_peer->mac_addr.raw), 1797 mld_peer, QDF_MAC_ADDR_REF(mld_peer->mac_addr.raw), 1798 i, mld_peer->num_links); 1799 1800 dp_cfg_event_record_mlo_link_delink_evt(soc, DP_CFG_EVENT_MLO_DEL_LINK, 1801 mld_peer, link_peer, i, 1802 (i != DP_MAX_MLO_LINKS) ? 1 : 0); 1803 1804 return num_links; 1805 } 1806 1807 /** 1808 * dp_get_link_peers_ref_from_mld_peer() - get link peers pointer and 1809 * increase link peers ref_cnt 1810 * @soc: dp_soc handle 1811 * @mld_peer: dp mld peer pointer 1812 * @mld_link_peers: structure that hold links peers pointer array and number 1813 * @mod_id: id of module requesting reference 1814 * 1815 * Return: None 1816 */ 1817 static inline 1818 void dp_get_link_peers_ref_from_mld_peer( 1819 struct dp_soc *soc, 1820 struct dp_peer *mld_peer, 1821 struct dp_mld_link_peers *mld_link_peers, 1822 enum dp_mod_id mod_id) 1823 { 1824 struct dp_peer *peer; 1825 uint8_t i = 0, j = 0; 1826 struct dp_peer_link_info *link_peer_info; 1827 1828 qdf_mem_zero(mld_link_peers, sizeof(*mld_link_peers)); 1829 qdf_spin_lock_bh(&mld_peer->link_peers_info_lock); 1830 for (i = 0; i < DP_MAX_MLO_LINKS; i++) { 1831 link_peer_info = &mld_peer->link_peers[i]; 1832 if (link_peer_info->is_valid) { 1833 peer = dp_link_peer_hash_find_by_chip_id( 1834 soc, 1835 link_peer_info->mac_addr.raw, 1836 true, 1837 link_peer_info->vdev_id, 1838 link_peer_info->chip_id, 1839 mod_id); 1840 if (peer) 1841 mld_link_peers->link_peers[j++] = peer; 1842 } 1843 } 1844 qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock); 1845 1846 mld_link_peers->num_links = j; 1847 } 1848 1849 /** 1850 * dp_release_link_peers_ref() - release all link peers reference 1851 * @mld_link_peers: structure that hold links peers pointer array and number 1852 * @mod_id: id of module requesting reference 1853 * 1854 * Return: None. 1855 */ 1856 static inline 1857 void dp_release_link_peers_ref( 1858 struct dp_mld_link_peers *mld_link_peers, 1859 enum dp_mod_id mod_id) 1860 { 1861 struct dp_peer *peer; 1862 uint8_t i; 1863 1864 for (i = 0; i < mld_link_peers->num_links; i++) { 1865 peer = mld_link_peers->link_peers[i]; 1866 if (peer) 1867 dp_peer_unref_delete(peer, mod_id); 1868 mld_link_peers->link_peers[i] = NULL; 1869 } 1870 1871 mld_link_peers->num_links = 0; 1872 } 1873 1874 /** 1875 * dp_get_link_peer_id_by_lmac_id() - Get link peer id using peer id and lmac id 1876 * @soc: Datapath soc handle 1877 * @peer_id: peer id 1878 * @lmac_id: lmac id to find the link peer on given lmac 1879 * 1880 * Return: peer_id of link peer if found 1881 * else return HTT_INVALID_PEER 1882 */ 1883 static inline 1884 uint16_t dp_get_link_peer_id_by_lmac_id(struct dp_soc *soc, uint16_t peer_id, 1885 uint8_t lmac_id) 1886 { 1887 uint8_t i; 1888 struct dp_peer *peer; 1889 struct dp_peer *link_peer; 1890 struct dp_soc *link_peer_soc; 1891 struct dp_mld_link_peers link_peers_info; 1892 uint16_t link_peer_id = HTT_INVALID_PEER; 1893 1894 peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_CDP); 1895 1896 if (!peer) 1897 return HTT_INVALID_PEER; 1898 1899 if (IS_MLO_DP_MLD_PEER(peer)) { 1900 /* get link peers with reference */ 1901 dp_get_link_peers_ref_from_mld_peer(soc, peer, &link_peers_info, 1902 DP_MOD_ID_CDP); 1903 1904 for (i = 0; i < link_peers_info.num_links; i++) { 1905 link_peer = link_peers_info.link_peers[i]; 1906 link_peer_soc = link_peer->vdev->pdev->soc; 1907 if ((link_peer_soc == soc) && 1908 (link_peer->vdev->pdev->lmac_id == lmac_id)) { 1909 link_peer_id = link_peer->peer_id; 1910 break; 1911 } 1912 } 1913 /* release link peers reference */ 1914 dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP); 1915 } else { 1916 link_peer_id = peer_id; 1917 } 1918 1919 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 1920 1921 return link_peer_id; 1922 } 1923 1924 /** 1925 * dp_peer_get_tgt_peer_hash_find() - get dp_peer handle 1926 * @soc: soc handle 1927 * @peer_mac: peer mac address 1928 * @mac_addr_is_aligned: is mac addr aligned 1929 * @vdev_id: vdev_id 1930 * @mod_id: id of module requesting reference 1931 * 1932 * for MLO connection, get corresponding MLD peer, 1933 * otherwise get link peer for non-MLO case. 1934 * 1935 * Return: peer in success 1936 * NULL in failure 1937 */ 1938 static inline 1939 struct dp_peer *dp_peer_get_tgt_peer_hash_find(struct dp_soc *soc, 1940 uint8_t *peer_mac, 1941 int mac_addr_is_aligned, 1942 uint8_t vdev_id, 1943 enum dp_mod_id mod_id) 1944 { 1945 struct dp_peer *ta_peer = NULL; 1946 struct dp_peer *peer = dp_peer_find_hash_find(soc, 1947 peer_mac, 0, vdev_id, 1948 mod_id); 1949 1950 if (peer) { 1951 /* mlo connection link peer, get mld peer with reference */ 1952 if (IS_MLO_DP_LINK_PEER(peer)) { 1953 /* increase mld peer ref_cnt */ 1954 if (QDF_STATUS_SUCCESS == 1955 dp_peer_get_ref(soc, peer->mld_peer, mod_id)) 1956 ta_peer = peer->mld_peer; 1957 else 1958 ta_peer = NULL; 1959 1960 /* release peer reference that added by hash find */ 1961 dp_peer_unref_delete(peer, mod_id); 1962 } else { 1963 /* mlo MLD peer or non-mlo link peer */ 1964 ta_peer = peer; 1965 } 1966 } else { 1967 dp_peer_err("fail to find peer:" QDF_MAC_ADDR_FMT, 1968 QDF_MAC_ADDR_REF(peer_mac)); 1969 } 1970 1971 return ta_peer; 1972 } 1973 1974 /** 1975 * dp_peer_get_tgt_peer_by_id() - Returns target peer object given the peer id 1976 * @soc: core DP soc context 1977 * @peer_id: peer id from peer object can be retrieved 1978 * @mod_id: ID of module requesting reference 1979 * 1980 * for MLO connection, get corresponding MLD peer, 1981 * otherwise get link peer for non-MLO case. 1982 * 1983 * Return: peer in success 1984 * NULL in failure 1985 */ 1986 static inline 1987 struct dp_peer *dp_peer_get_tgt_peer_by_id(struct dp_soc *soc, 1988 uint16_t peer_id, 1989 enum dp_mod_id mod_id) 1990 { 1991 struct dp_peer *ta_peer = NULL; 1992 struct dp_peer *peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id); 1993 1994 if (peer) { 1995 /* mlo connection link peer, get mld peer with reference */ 1996 if (IS_MLO_DP_LINK_PEER(peer)) { 1997 /* increase mld peer ref_cnt */ 1998 if (QDF_STATUS_SUCCESS == 1999 dp_peer_get_ref(soc, peer->mld_peer, mod_id)) 2000 ta_peer = peer->mld_peer; 2001 else 2002 ta_peer = NULL; 2003 2004 /* release peer reference that added by hash find */ 2005 dp_peer_unref_delete(peer, mod_id); 2006 } else { 2007 /* mlo MLD peer or non-mlo link peer */ 2008 ta_peer = peer; 2009 } 2010 } 2011 2012 return ta_peer; 2013 } 2014 2015 /** 2016 * dp_peer_mlo_delete() - peer MLO related delete operation 2017 * @peer: DP peer handle 2018 * Return: None 2019 */ 2020 static inline 2021 void dp_peer_mlo_delete(struct dp_peer *peer) 2022 { 2023 struct dp_peer *ml_peer; 2024 struct dp_soc *soc; 2025 2026 dp_info("peer " QDF_MAC_ADDR_FMT " type %d", 2027 QDF_MAC_ADDR_REF(peer->mac_addr.raw), peer->peer_type); 2028 2029 /* MLO connection link peer */ 2030 if (IS_MLO_DP_LINK_PEER(peer)) { 2031 ml_peer = peer->mld_peer; 2032 soc = ml_peer->vdev->pdev->soc; 2033 2034 /* if last link peer deletion, delete MLD peer */ 2035 if (dp_mld_peer_del_link_peer(peer->mld_peer, peer) == 0) 2036 dp_peer_delete(soc, peer->mld_peer, NULL); 2037 } 2038 } 2039 2040 /** 2041 * dp_peer_mlo_setup() - create MLD peer and MLO related initialization 2042 * @soc: Soc handle 2043 * @peer: DP peer handle 2044 * @vdev_id: Vdev ID 2045 * @setup_info: peer setup information for MLO 2046 */ 2047 QDF_STATUS dp_peer_mlo_setup( 2048 struct dp_soc *soc, 2049 struct dp_peer *peer, 2050 uint8_t vdev_id, 2051 struct cdp_peer_setup_info *setup_info); 2052 2053 /** 2054 * dp_get_tgt_peer_from_peer() - Get target peer from the given peer 2055 * @peer: datapath peer 2056 * 2057 * Return: MLD peer in case of MLO Link peer 2058 * Peer itself in other cases 2059 */ 2060 static inline 2061 struct dp_peer *dp_get_tgt_peer_from_peer(struct dp_peer *peer) 2062 { 2063 return IS_MLO_DP_LINK_PEER(peer) ? peer->mld_peer : peer; 2064 } 2065 2066 /** 2067 * dp_get_primary_link_peer_by_id(): Get primary link peer from the given 2068 * peer id 2069 * @soc: core DP soc context 2070 * @peer_id: peer id 2071 * @mod_id: ID of module requesting reference 2072 * 2073 * Return: primary link peer for the MLO peer 2074 * legacy peer itself in case of legacy peer 2075 */ 2076 static inline 2077 struct dp_peer *dp_get_primary_link_peer_by_id(struct dp_soc *soc, 2078 uint16_t peer_id, 2079 enum dp_mod_id mod_id) 2080 { 2081 uint8_t i; 2082 struct dp_mld_link_peers link_peers_info; 2083 struct dp_peer *peer; 2084 struct dp_peer *link_peer; 2085 struct dp_peer *primary_peer = NULL; 2086 2087 peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id); 2088 2089 if (!peer) 2090 return NULL; 2091 2092 if (IS_MLO_DP_MLD_PEER(peer)) { 2093 /* get link peers with reference */ 2094 dp_get_link_peers_ref_from_mld_peer(soc, peer, &link_peers_info, 2095 mod_id); 2096 2097 for (i = 0; i < link_peers_info.num_links; i++) { 2098 link_peer = link_peers_info.link_peers[i]; 2099 if (link_peer->primary_link) { 2100 primary_peer = link_peer; 2101 /* 2102 * Take additional reference over 2103 * primary link peer. 2104 */ 2105 dp_peer_get_ref(NULL, primary_peer, mod_id); 2106 break; 2107 } 2108 } 2109 /* release link peers reference */ 2110 dp_release_link_peers_ref(&link_peers_info, mod_id); 2111 dp_peer_unref_delete(peer, mod_id); 2112 } else { 2113 primary_peer = peer; 2114 } 2115 2116 return primary_peer; 2117 } 2118 2119 /** 2120 * dp_get_txrx_peer() - Get dp_txrx_peer from passed dp_peer 2121 * @peer: Datapath peer 2122 * 2123 * Return: dp_txrx_peer from MLD peer if peer type is link peer 2124 * dp_txrx_peer from peer itself for other cases 2125 */ 2126 static inline 2127 struct dp_txrx_peer *dp_get_txrx_peer(struct dp_peer *peer) 2128 { 2129 return IS_MLO_DP_LINK_PEER(peer) ? 2130 peer->mld_peer->txrx_peer : peer->txrx_peer; 2131 } 2132 2133 /** 2134 * dp_peer_is_primary_link_peer() - Check if peer is primary link peer 2135 * @peer: Datapath peer 2136 * 2137 * Return: true if peer is primary link peer or legacy peer 2138 * false otherwise 2139 */ 2140 static inline 2141 bool dp_peer_is_primary_link_peer(struct dp_peer *peer) 2142 { 2143 if (IS_MLO_DP_LINK_PEER(peer) && peer->primary_link) 2144 return true; 2145 else if (IS_DP_LEGACY_PEER(peer)) 2146 return true; 2147 else 2148 return false; 2149 } 2150 2151 /** 2152 * dp_tgt_txrx_peer_get_ref_by_id() - Gets tgt txrx peer for given the peer id 2153 * 2154 * @soc: core DP soc context 2155 * @peer_id: peer id from peer object can be retrieved 2156 * @handle: reference handle 2157 * @mod_id: ID of module requesting reference 2158 * 2159 * Return: struct dp_txrx_peer*: Pointer to txrx DP peer object 2160 */ 2161 static inline struct dp_txrx_peer * 2162 dp_tgt_txrx_peer_get_ref_by_id(struct dp_soc *soc, 2163 uint16_t peer_id, 2164 dp_txrx_ref_handle *handle, 2165 enum dp_mod_id mod_id) 2166 2167 { 2168 struct dp_peer *peer; 2169 struct dp_txrx_peer *txrx_peer; 2170 2171 peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id); 2172 if (!peer) 2173 return NULL; 2174 2175 txrx_peer = dp_get_txrx_peer(peer); 2176 if (txrx_peer) { 2177 *handle = (dp_txrx_ref_handle)peer; 2178 return txrx_peer; 2179 } 2180 2181 dp_peer_unref_delete(peer, mod_id); 2182 return NULL; 2183 } 2184 2185 /** 2186 * dp_print_mlo_ast_stats_be() - Print AST stats for MLO peers 2187 * 2188 * @soc: core DP soc context 2189 * 2190 * Return: void 2191 */ 2192 void dp_print_mlo_ast_stats_be(struct dp_soc *soc); 2193 2194 #else 2195 2196 #define IS_MLO_DP_MLD_TXRX_PEER(_peer) false 2197 2198 #define DP_PEER_SET_TYPE(_peer, _type_val) /* no op */ 2199 /* is legacy peer */ 2200 #define IS_DP_LEGACY_PEER(_peer) true 2201 #define IS_MLO_DP_LINK_PEER(_peer) false 2202 #define IS_MLO_DP_MLD_PEER(_peer) false 2203 #define DP_GET_MLD_PEER_FROM_PEER(link_peer) NULL 2204 2205 static inline 2206 struct dp_peer *dp_peer_hash_find_wrapper(struct dp_soc *soc, 2207 struct cdp_peer_info *peer_info, 2208 enum dp_mod_id mod_id) 2209 { 2210 return dp_peer_find_hash_find(soc, peer_info->mac_addr, 2211 peer_info->mac_addr_is_aligned, 2212 peer_info->vdev_id, 2213 mod_id); 2214 } 2215 2216 static inline 2217 struct dp_peer *dp_peer_get_tgt_peer_hash_find(struct dp_soc *soc, 2218 uint8_t *peer_mac, 2219 int mac_addr_is_aligned, 2220 uint8_t vdev_id, 2221 enum dp_mod_id mod_id) 2222 { 2223 return dp_peer_find_hash_find(soc, peer_mac, 2224 mac_addr_is_aligned, vdev_id, 2225 mod_id); 2226 } 2227 2228 static inline 2229 struct dp_peer *dp_peer_get_tgt_peer_by_id(struct dp_soc *soc, 2230 uint16_t peer_id, 2231 enum dp_mod_id mod_id) 2232 { 2233 return dp_peer_get_ref_by_id(soc, peer_id, mod_id); 2234 } 2235 2236 static inline 2237 QDF_STATUS dp_peer_mlo_setup( 2238 struct dp_soc *soc, 2239 struct dp_peer *peer, 2240 uint8_t vdev_id, 2241 struct cdp_peer_setup_info *setup_info) 2242 { 2243 return QDF_STATUS_SUCCESS; 2244 } 2245 2246 static inline 2247 void dp_mld_peer_init_link_peers_info(struct dp_peer *mld_peer) 2248 { 2249 } 2250 2251 static inline 2252 void dp_mld_peer_deinit_link_peers_info(struct dp_peer *mld_peer) 2253 { 2254 } 2255 2256 static inline 2257 void dp_link_peer_del_mld_peer(struct dp_peer *link_peer) 2258 { 2259 } 2260 2261 static inline 2262 void dp_peer_mlo_delete(struct dp_peer *peer) 2263 { 2264 } 2265 2266 static inline 2267 void dp_mlo_peer_authorize(struct dp_soc *soc, 2268 struct dp_peer *link_peer) 2269 { 2270 } 2271 2272 static inline uint8_t dp_get_chip_id(struct dp_soc *soc) 2273 { 2274 return 0; 2275 } 2276 2277 static inline struct dp_peer * 2278 dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc, 2279 uint8_t *peer_mac_addr, 2280 int mac_addr_is_aligned, 2281 uint8_t vdev_id, 2282 uint8_t chip_id, 2283 enum dp_mod_id mod_id) 2284 { 2285 return dp_peer_find_hash_find(soc, peer_mac_addr, 2286 mac_addr_is_aligned, 2287 vdev_id, mod_id); 2288 } 2289 2290 static inline 2291 struct dp_peer *dp_get_tgt_peer_from_peer(struct dp_peer *peer) 2292 { 2293 return peer; 2294 } 2295 2296 static inline 2297 struct dp_peer *dp_get_primary_link_peer_by_id(struct dp_soc *soc, 2298 uint16_t peer_id, 2299 enum dp_mod_id mod_id) 2300 { 2301 return dp_peer_get_ref_by_id(soc, peer_id, mod_id); 2302 } 2303 2304 static inline 2305 struct dp_txrx_peer *dp_get_txrx_peer(struct dp_peer *peer) 2306 { 2307 return peer->txrx_peer; 2308 } 2309 2310 static inline 2311 bool dp_peer_is_primary_link_peer(struct dp_peer *peer) 2312 { 2313 return true; 2314 } 2315 2316 /** 2317 * dp_tgt_txrx_peer_get_ref_by_id() - Gets tgt txrx peer for given the peer id 2318 * 2319 * @soc: core DP soc context 2320 * @peer_id: peer id from peer object can be retrieved 2321 * @handle: reference handle 2322 * @mod_id: ID of module requesting reference 2323 * 2324 * Return: struct dp_txrx_peer*: Pointer to txrx DP peer object 2325 */ 2326 static inline struct dp_txrx_peer * 2327 dp_tgt_txrx_peer_get_ref_by_id(struct dp_soc *soc, 2328 uint16_t peer_id, 2329 dp_txrx_ref_handle *handle, 2330 enum dp_mod_id mod_id) 2331 2332 { 2333 return dp_txrx_peer_get_ref_by_id(soc, peer_id, handle, mod_id); 2334 } 2335 2336 static inline 2337 uint16_t dp_get_link_peer_id_by_lmac_id(struct dp_soc *soc, uint16_t peer_id, 2338 uint8_t lmac_id) 2339 { 2340 return peer_id; 2341 } 2342 2343 static inline void dp_print_mlo_ast_stats_be(struct dp_soc *soc) 2344 { 2345 } 2346 #endif /* WLAN_FEATURE_11BE_MLO */ 2347 2348 static inline 2349 void dp_peer_defrag_rx_tids_init(struct dp_txrx_peer *txrx_peer) 2350 { 2351 uint8_t i; 2352 2353 qdf_mem_zero(&txrx_peer->rx_tid, DP_MAX_TIDS * 2354 sizeof(struct dp_rx_tid_defrag)); 2355 2356 for (i = 0; i < DP_MAX_TIDS; i++) 2357 qdf_spinlock_create(&txrx_peer->rx_tid[i].defrag_tid_lock); 2358 } 2359 2360 static inline 2361 void dp_peer_defrag_rx_tids_deinit(struct dp_txrx_peer *txrx_peer) 2362 { 2363 uint8_t i; 2364 2365 for (i = 0; i < DP_MAX_TIDS; i++) 2366 qdf_spinlock_destroy(&txrx_peer->rx_tid[i].defrag_tid_lock); 2367 } 2368 2369 #ifdef PEER_CACHE_RX_PKTS 2370 static inline 2371 void dp_peer_rx_bufq_resources_init(struct dp_txrx_peer *txrx_peer) 2372 { 2373 qdf_spinlock_create(&txrx_peer->bufq_info.bufq_lock); 2374 txrx_peer->bufq_info.thresh = DP_RX_CACHED_BUFQ_THRESH; 2375 qdf_list_create(&txrx_peer->bufq_info.cached_bufq, 2376 DP_RX_CACHED_BUFQ_THRESH); 2377 } 2378 2379 static inline 2380 void dp_peer_rx_bufq_resources_deinit(struct dp_txrx_peer *txrx_peer) 2381 { 2382 qdf_list_destroy(&txrx_peer->bufq_info.cached_bufq); 2383 qdf_spinlock_destroy(&txrx_peer->bufq_info.bufq_lock); 2384 } 2385 2386 #else 2387 static inline 2388 void dp_peer_rx_bufq_resources_init(struct dp_txrx_peer *txrx_peer) 2389 { 2390 } 2391 2392 static inline 2393 void dp_peer_rx_bufq_resources_deinit(struct dp_txrx_peer *txrx_peer) 2394 { 2395 } 2396 #endif 2397 2398 /** 2399 * dp_peer_update_state() - update dp peer state 2400 * 2401 * @soc: core DP soc context 2402 * @peer: DP peer 2403 * @state: new state 2404 * 2405 * Return: None 2406 */ 2407 static inline void 2408 dp_peer_update_state(struct dp_soc *soc, 2409 struct dp_peer *peer, 2410 enum dp_peer_state state) 2411 { 2412 uint8_t peer_state; 2413 2414 qdf_spin_lock_bh(&peer->peer_state_lock); 2415 peer_state = peer->peer_state; 2416 2417 switch (state) { 2418 case DP_PEER_STATE_INIT: 2419 DP_PEER_STATE_ASSERT 2420 (peer, state, (peer_state != DP_PEER_STATE_ACTIVE) || 2421 (peer_state != DP_PEER_STATE_LOGICAL_DELETE)); 2422 break; 2423 2424 case DP_PEER_STATE_ACTIVE: 2425 DP_PEER_STATE_ASSERT(peer, state, 2426 (peer_state == DP_PEER_STATE_INIT)); 2427 break; 2428 2429 case DP_PEER_STATE_LOGICAL_DELETE: 2430 DP_PEER_STATE_ASSERT(peer, state, 2431 (peer_state == DP_PEER_STATE_ACTIVE) || 2432 (peer_state == DP_PEER_STATE_INIT)); 2433 break; 2434 2435 case DP_PEER_STATE_INACTIVE: 2436 if (IS_MLO_DP_MLD_PEER(peer)) 2437 DP_PEER_STATE_ASSERT 2438 (peer, state, 2439 (peer_state == DP_PEER_STATE_ACTIVE)); 2440 else 2441 DP_PEER_STATE_ASSERT 2442 (peer, state, 2443 (peer_state == DP_PEER_STATE_LOGICAL_DELETE)); 2444 break; 2445 2446 case DP_PEER_STATE_FREED: 2447 if (peer->sta_self_peer) 2448 DP_PEER_STATE_ASSERT 2449 (peer, state, (peer_state == DP_PEER_STATE_INIT)); 2450 else 2451 DP_PEER_STATE_ASSERT 2452 (peer, state, 2453 (peer_state == DP_PEER_STATE_INACTIVE) || 2454 (peer_state == DP_PEER_STATE_LOGICAL_DELETE)); 2455 break; 2456 2457 default: 2458 qdf_spin_unlock_bh(&peer->peer_state_lock); 2459 dp_alert("Invalid peer state %u for peer " QDF_MAC_ADDR_FMT, 2460 state, QDF_MAC_ADDR_REF(peer->mac_addr.raw)); 2461 return; 2462 } 2463 peer->peer_state = state; 2464 qdf_spin_unlock_bh(&peer->peer_state_lock); 2465 dp_info("Updating peer state from %u to %u mac " QDF_MAC_ADDR_FMT "\n", 2466 peer_state, state, 2467 QDF_MAC_ADDR_REF(peer->mac_addr.raw)); 2468 } 2469 2470 /** 2471 * dp_vdev_iterate_specific_peer_type() - API to iterate through vdev peer 2472 * list based on type of peer (Legacy or MLD peer) 2473 * 2474 * @vdev: DP vdev context 2475 * @func: function to be called for each peer 2476 * @arg: argument need to be passed to func 2477 * @mod_id: module_id 2478 * @peer_type: type of peer - MLO Link Peer or Legacy Peer 2479 * 2480 * Return: void 2481 */ 2482 static inline void 2483 dp_vdev_iterate_specific_peer_type(struct dp_vdev *vdev, 2484 dp_peer_iter_func *func, 2485 void *arg, enum dp_mod_id mod_id, 2486 enum dp_peer_type peer_type) 2487 { 2488 struct dp_peer *peer; 2489 struct dp_peer *tmp_peer; 2490 struct dp_soc *soc = NULL; 2491 2492 if (!vdev || !vdev->pdev || !vdev->pdev->soc) 2493 return; 2494 2495 soc = vdev->pdev->soc; 2496 2497 qdf_spin_lock_bh(&vdev->peer_list_lock); 2498 TAILQ_FOREACH_SAFE(peer, &vdev->peer_list, 2499 peer_list_elem, 2500 tmp_peer) { 2501 if (dp_peer_get_ref(soc, peer, mod_id) == 2502 QDF_STATUS_SUCCESS) { 2503 if ((peer_type == DP_PEER_TYPE_LEGACY && 2504 (IS_DP_LEGACY_PEER(peer))) || 2505 (peer_type == DP_PEER_TYPE_MLO_LINK && 2506 (IS_MLO_DP_LINK_PEER(peer)))) { 2507 (*func)(soc, peer, arg); 2508 } 2509 dp_peer_unref_delete(peer, mod_id); 2510 } 2511 } 2512 qdf_spin_unlock_bh(&vdev->peer_list_lock); 2513 } 2514 2515 #ifdef REO_SHARED_QREF_TABLE_EN 2516 void dp_peer_rx_reo_shared_qaddr_delete(struct dp_soc *soc, 2517 struct dp_peer *peer); 2518 #else 2519 static inline void dp_peer_rx_reo_shared_qaddr_delete(struct dp_soc *soc, 2520 struct dp_peer *peer) {} 2521 #endif 2522 2523 /** 2524 * dp_peer_check_wds_ext_peer() - Check WDS ext peer 2525 * 2526 * @peer: DP peer 2527 * 2528 * Return: True for WDS ext peer, false otherwise 2529 */ 2530 bool dp_peer_check_wds_ext_peer(struct dp_peer *peer); 2531 2532 /** 2533 * dp_gen_ml_peer_id() - Generate MLD peer id for DP 2534 * 2535 * @soc: DP soc context 2536 * @peer_id: mld peer id 2537 * 2538 * Return: DP MLD peer id 2539 */ 2540 uint16_t dp_gen_ml_peer_id(struct dp_soc *soc, uint16_t peer_id); 2541 2542 #ifdef FEATURE_AST 2543 /** 2544 * dp_peer_host_add_map_ast() - Add ast entry with HW AST Index 2545 * @soc: SoC handle 2546 * @peer_id: peer id from firmware 2547 * @mac_addr: MAC address of ast node 2548 * @hw_peer_id: HW AST Index returned by target in peer map event 2549 * @vdev_id: vdev id for VAP to which the peer belongs to 2550 * @ast_hash: ast hash value in HW 2551 * @is_wds: flag to indicate peer map event for WDS ast entry 2552 * 2553 * Return: QDF_STATUS code 2554 */ 2555 QDF_STATUS dp_peer_host_add_map_ast(struct dp_soc *soc, uint16_t peer_id, 2556 uint8_t *mac_addr, uint16_t hw_peer_id, 2557 uint8_t vdev_id, uint16_t ast_hash, 2558 uint8_t is_wds); 2559 #endif 2560 2561 #if defined(WLAN_FEATURE_11BE_MLO) && defined(DP_MLO_LINK_STATS_SUPPORT) 2562 /** 2563 * dp_map_link_id_band: Set link id to band mapping in txrx_peer 2564 * @peer: dp peer pointer 2565 * 2566 * Return: None 2567 */ 2568 void dp_map_link_id_band(struct dp_peer *peer); 2569 #else 2570 static inline 2571 void dp_map_link_id_band(struct dp_peer *peer) 2572 { 2573 } 2574 #endif 2575 #endif /* _DP_PEER_H_ */ 2576