1 /* 2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 #ifndef _DP_PEER_H_ 20 #define _DP_PEER_H_ 21 22 #include <qdf_types.h> 23 #include <qdf_lock.h> 24 #include "dp_types.h" 25 #include "dp_internal.h" 26 27 #ifdef DUMP_REO_QUEUE_INFO_IN_DDR 28 #include "hal_reo.h" 29 #endif 30 31 #define DP_INVALID_PEER_ID 0xffff 32 33 #define DP_PEER_MAX_MEC_IDX 1024 /* maximum index for MEC table */ 34 #define DP_PEER_MAX_MEC_ENTRY 4096 /* maximum MEC entries in MEC table */ 35 36 #define DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC 5000 37 38 #define DP_PEER_HASH_LOAD_MULT 2 39 #define DP_PEER_HASH_LOAD_SHIFT 0 40 41 /* Threshold for peer's cached buf queue beyond which frames are dropped */ 42 #define DP_RX_CACHED_BUFQ_THRESH 64 43 44 #define dp_peer_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_PEER, params) 45 #define dp_peer_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_PEER, params) 46 #define dp_peer_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_PEER, params) 47 #define dp_peer_info(params...) \ 48 __QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_PEER, ## params) 49 #define dp_peer_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_PEER, params) 50 51 #if defined(WLAN_FEATURE_11BE_MLO) && defined(DP_MLO_LINK_STATS_SUPPORT) 52 /** 53 * enum dp_bands - WiFi Band 54 * 55 * @DP_BAND_INVALID: Invalid band 56 * @DP_BAND_2GHZ: 2GHz link 57 * @DP_BAND_5GHZ: 5GHz link 58 * @DP_BAND_6GHZ: 6GHz link 59 * @DP_BAND_UNKNOWN: Unknown band 60 */ 61 enum dp_bands { 62 DP_BAND_INVALID = 0, 63 DP_BAND_2GHZ = 1, 64 DP_BAND_5GHZ = 2, 65 DP_BAND_6GHZ = 3, 66 DP_BAND_UNKNOWN = 4, 67 }; 68 #endif 69 70 void check_free_list_for_invalid_flush(struct dp_soc *soc); 71 72 static inline 73 void add_entry_alloc_list(struct dp_soc *soc, struct dp_rx_tid *rx_tid, 74 struct dp_peer *peer, void *hw_qdesc_vaddr) 75 { 76 uint32_t max_list_size; 77 unsigned long curr_ts = qdf_get_system_timestamp(); 78 uint32_t qref_index = soc->free_addr_list_idx; 79 80 max_list_size = soc->wlan_cfg_ctx->qref_control_size; 81 82 if (max_list_size == 0) 83 return; 84 85 soc->list_qdesc_addr_alloc[qref_index].hw_qdesc_paddr = 86 rx_tid->hw_qdesc_paddr; 87 soc->list_qdesc_addr_alloc[qref_index].ts_qdesc_mem_hdl = curr_ts; 88 soc->list_qdesc_addr_alloc[qref_index].hw_qdesc_vaddr_align = 89 hw_qdesc_vaddr; 90 soc->list_qdesc_addr_alloc[qref_index].hw_qdesc_vaddr_unalign = 91 rx_tid->hw_qdesc_vaddr_unaligned; 92 soc->list_qdesc_addr_alloc[qref_index].peer_id = peer->peer_id; 93 soc->list_qdesc_addr_alloc[qref_index].tid = rx_tid->tid; 94 soc->alloc_addr_list_idx++; 95 96 if (soc->alloc_addr_list_idx == max_list_size) 97 soc->alloc_addr_list_idx = 0; 98 } 99 100 static inline 101 void add_entry_free_list(struct dp_soc *soc, struct dp_rx_tid *rx_tid) 102 { 103 uint32_t max_list_size; 104 unsigned long curr_ts = qdf_get_system_timestamp(); 105 uint32_t qref_index = soc->free_addr_list_idx; 106 107 max_list_size = soc->wlan_cfg_ctx->qref_control_size; 108 109 if (max_list_size == 0) 110 return; 111 112 soc->list_qdesc_addr_free[qref_index].ts_qdesc_mem_hdl = curr_ts; 113 soc->list_qdesc_addr_free[qref_index].hw_qdesc_paddr = 114 rx_tid->hw_qdesc_paddr; 115 soc->list_qdesc_addr_free[qref_index].hw_qdesc_vaddr_align = 116 rx_tid->hw_qdesc_vaddr_aligned; 117 soc->list_qdesc_addr_free[qref_index].hw_qdesc_vaddr_unalign = 118 rx_tid->hw_qdesc_vaddr_unaligned; 119 soc->free_addr_list_idx++; 120 121 if (soc->free_addr_list_idx == max_list_size) 122 soc->free_addr_list_idx = 0; 123 } 124 125 static inline 126 void add_entry_write_list(struct dp_soc *soc, struct dp_peer *peer, 127 uint32_t tid) 128 { 129 uint32_t max_list_size; 130 unsigned long curr_ts = qdf_get_system_timestamp(); 131 132 max_list_size = soc->wlan_cfg_ctx->qref_control_size; 133 134 if (max_list_size == 0) 135 return; 136 137 soc->reo_write_list[soc->write_paddr_list_idx].ts_qaddr_del = curr_ts; 138 soc->reo_write_list[soc->write_paddr_list_idx].peer_id = peer->peer_id; 139 soc->reo_write_list[soc->write_paddr_list_idx].paddr = 140 peer->rx_tid[tid].hw_qdesc_paddr; 141 soc->reo_write_list[soc->write_paddr_list_idx].tid = tid; 142 soc->write_paddr_list_idx++; 143 144 if (soc->write_paddr_list_idx == max_list_size) 145 soc->write_paddr_list_idx = 0; 146 } 147 148 #ifdef REO_QDESC_HISTORY 149 enum reo_qdesc_event_type { 150 REO_QDESC_UPDATE_CB = 0, 151 REO_QDESC_FREE, 152 }; 153 154 struct reo_qdesc_event { 155 qdf_dma_addr_t qdesc_addr; 156 uint64_t ts; 157 enum reo_qdesc_event_type type; 158 uint8_t peer_mac[QDF_MAC_ADDR_SIZE]; 159 }; 160 #endif 161 162 struct ast_del_ctxt { 163 bool age; 164 int del_count; 165 }; 166 167 #ifdef QCA_SUPPORT_WDS_EXTENDED 168 /** 169 * dp_peer_is_wds_ext_peer() - peer is WDS_EXT peer 170 * 171 * @peer: DP peer context 172 * 173 * This API checks whether the peer is WDS_EXT peer or not 174 * 175 * Return: true in the wds_ext peer else flase 176 */ 177 static inline bool dp_peer_is_wds_ext_peer(struct dp_txrx_peer *peer) 178 { 179 return qdf_atomic_test_bit(WDS_EXT_PEER_INIT_BIT, &peer->wds_ext.init); 180 } 181 #else 182 static inline bool dp_peer_is_wds_ext_peer(struct dp_txrx_peer *peer) 183 { 184 return false; 185 } 186 #endif 187 188 typedef void dp_peer_iter_func(struct dp_soc *soc, struct dp_peer *peer, 189 void *arg); 190 /** 191 * dp_peer_unref_delete() - unref and delete peer 192 * @peer: Datapath peer handle 193 * @id: ID of module releasing reference 194 * 195 */ 196 void dp_peer_unref_delete(struct dp_peer *peer, enum dp_mod_id id); 197 198 /** 199 * dp_txrx_peer_unref_delete() - unref and delete peer 200 * @handle: Datapath txrx ref handle 201 * @id: Module ID of the caller 202 * 203 */ 204 void dp_txrx_peer_unref_delete(dp_txrx_ref_handle handle, enum dp_mod_id id); 205 206 /** 207 * dp_peer_find_hash_find() - returns legacy or mlo link peer from 208 * peer_hash_table matching vdev_id and mac_address 209 * @soc: soc handle 210 * @peer_mac_addr: peer mac address 211 * @mac_addr_is_aligned: is mac addr aligned 212 * @vdev_id: vdev_id 213 * @mod_id: id of module requesting reference 214 * 215 * return: peer in success 216 * NULL in failure 217 */ 218 struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc, 219 uint8_t *peer_mac_addr, 220 int mac_addr_is_aligned, 221 uint8_t vdev_id, 222 enum dp_mod_id mod_id); 223 224 /** 225 * dp_peer_find_by_id_valid - check if peer exists for given id 226 * @soc: core DP soc context 227 * @peer_id: peer id from peer object can be retrieved 228 * 229 * Return: true if peer exists of false otherwise 230 */ 231 bool dp_peer_find_by_id_valid(struct dp_soc *soc, uint16_t peer_id); 232 233 /** 234 * dp_peer_get_ref() - Returns peer object given the peer id 235 * 236 * @soc: core DP soc context 237 * @peer: DP peer 238 * @mod_id: id of module requesting the reference 239 * 240 * Return: QDF_STATUS_SUCCESS if reference held successfully 241 * else QDF_STATUS_E_INVAL 242 */ 243 static inline 244 QDF_STATUS dp_peer_get_ref(struct dp_soc *soc, 245 struct dp_peer *peer, 246 enum dp_mod_id mod_id) 247 { 248 if (!qdf_atomic_inc_not_zero(&peer->ref_cnt)) 249 return QDF_STATUS_E_INVAL; 250 251 if (mod_id > DP_MOD_ID_RX) 252 qdf_atomic_inc(&peer->mod_refs[mod_id]); 253 254 return QDF_STATUS_SUCCESS; 255 } 256 257 /** 258 * __dp_peer_get_ref_by_id() - Returns peer object given the peer id 259 * 260 * @soc: core DP soc context 261 * @peer_id: peer id from peer object can be retrieved 262 * @mod_id: module id 263 * 264 * Return: struct dp_peer*: Pointer to DP peer object 265 */ 266 static inline struct dp_peer * 267 __dp_peer_get_ref_by_id(struct dp_soc *soc, 268 uint16_t peer_id, 269 enum dp_mod_id mod_id) 270 271 { 272 struct dp_peer *peer; 273 274 qdf_spin_lock_bh(&soc->peer_map_lock); 275 peer = (peer_id >= soc->max_peer_id) ? NULL : 276 soc->peer_id_to_obj_map[peer_id]; 277 if (!peer || 278 (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) { 279 qdf_spin_unlock_bh(&soc->peer_map_lock); 280 return NULL; 281 } 282 283 qdf_spin_unlock_bh(&soc->peer_map_lock); 284 return peer; 285 } 286 287 /** 288 * dp_peer_get_ref_by_id() - Returns peer object given the peer id 289 * if peer state is active 290 * 291 * @soc: core DP soc context 292 * @peer_id: peer id from peer object can be retrieved 293 * @mod_id: ID of module requesting reference 294 * 295 * Return: struct dp_peer*: Pointer to DP peer object 296 */ 297 static inline 298 struct dp_peer *dp_peer_get_ref_by_id(struct dp_soc *soc, 299 uint16_t peer_id, 300 enum dp_mod_id mod_id) 301 { 302 struct dp_peer *peer; 303 304 qdf_spin_lock_bh(&soc->peer_map_lock); 305 peer = (peer_id >= soc->max_peer_id) ? NULL : 306 soc->peer_id_to_obj_map[peer_id]; 307 308 if (!peer || peer->peer_state >= DP_PEER_STATE_LOGICAL_DELETE || 309 (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) { 310 qdf_spin_unlock_bh(&soc->peer_map_lock); 311 return NULL; 312 } 313 314 qdf_spin_unlock_bh(&soc->peer_map_lock); 315 316 return peer; 317 } 318 319 /** 320 * dp_txrx_peer_get_ref_by_id() - Returns txrx peer object given the peer id 321 * 322 * @soc: core DP soc context 323 * @peer_id: peer id from peer object can be retrieved 324 * @handle: reference handle 325 * @mod_id: ID of module requesting reference 326 * 327 * Return: struct dp_txrx_peer*: Pointer to txrx DP peer object 328 */ 329 static inline struct dp_txrx_peer * 330 dp_txrx_peer_get_ref_by_id(struct dp_soc *soc, 331 uint16_t peer_id, 332 dp_txrx_ref_handle *handle, 333 enum dp_mod_id mod_id) 334 335 { 336 struct dp_peer *peer; 337 338 peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id); 339 if (!peer) 340 return NULL; 341 342 if (!peer->txrx_peer) { 343 dp_peer_unref_delete(peer, mod_id); 344 return NULL; 345 } 346 347 *handle = (dp_txrx_ref_handle)peer; 348 return peer->txrx_peer; 349 } 350 351 #ifdef PEER_CACHE_RX_PKTS 352 /** 353 * dp_rx_flush_rx_cached() - flush cached rx frames 354 * @peer: peer 355 * @drop: set flag to drop frames 356 * 357 * Return: None 358 */ 359 void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop); 360 #else 361 static inline void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop) 362 { 363 } 364 #endif 365 366 static inline void 367 dp_clear_peer_internal(struct dp_soc *soc, struct dp_peer *peer) 368 { 369 qdf_spin_lock_bh(&peer->peer_info_lock); 370 peer->state = OL_TXRX_PEER_STATE_DISC; 371 qdf_spin_unlock_bh(&peer->peer_info_lock); 372 373 dp_rx_flush_rx_cached(peer, true); 374 } 375 376 /** 377 * dp_vdev_iterate_peer() - API to iterate through vdev peer list 378 * 379 * @vdev: DP vdev context 380 * @func: function to be called for each peer 381 * @arg: argument need to be passed to func 382 * @mod_id: module_id 383 * 384 * Return: void 385 */ 386 static inline void 387 dp_vdev_iterate_peer(struct dp_vdev *vdev, dp_peer_iter_func *func, void *arg, 388 enum dp_mod_id mod_id) 389 { 390 struct dp_peer *peer; 391 struct dp_peer *tmp_peer; 392 struct dp_soc *soc = NULL; 393 394 if (!vdev || !vdev->pdev || !vdev->pdev->soc) 395 return; 396 397 soc = vdev->pdev->soc; 398 399 qdf_spin_lock_bh(&vdev->peer_list_lock); 400 TAILQ_FOREACH_SAFE(peer, &vdev->peer_list, 401 peer_list_elem, 402 tmp_peer) { 403 if (dp_peer_get_ref(soc, peer, mod_id) == 404 QDF_STATUS_SUCCESS) { 405 (*func)(soc, peer, arg); 406 dp_peer_unref_delete(peer, mod_id); 407 } 408 } 409 qdf_spin_unlock_bh(&vdev->peer_list_lock); 410 } 411 412 /** 413 * dp_pdev_iterate_peer() - API to iterate through all peers of pdev 414 * 415 * @pdev: DP pdev context 416 * @func: function to be called for each peer 417 * @arg: argument need to be passed to func 418 * @mod_id: module_id 419 * 420 * Return: void 421 */ 422 static inline void 423 dp_pdev_iterate_peer(struct dp_pdev *pdev, dp_peer_iter_func *func, void *arg, 424 enum dp_mod_id mod_id) 425 { 426 struct dp_vdev *vdev; 427 428 if (!pdev) 429 return; 430 431 qdf_spin_lock_bh(&pdev->vdev_list_lock); 432 DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) 433 dp_vdev_iterate_peer(vdev, func, arg, mod_id); 434 qdf_spin_unlock_bh(&pdev->vdev_list_lock); 435 } 436 437 /** 438 * dp_soc_iterate_peer() - API to iterate through all peers of soc 439 * 440 * @soc: DP soc context 441 * @func: function to be called for each peer 442 * @arg: argument need to be passed to func 443 * @mod_id: module_id 444 * 445 * Return: void 446 */ 447 static inline void 448 dp_soc_iterate_peer(struct dp_soc *soc, dp_peer_iter_func *func, void *arg, 449 enum dp_mod_id mod_id) 450 { 451 struct dp_pdev *pdev; 452 int i; 453 454 if (!soc) 455 return; 456 457 for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) { 458 pdev = soc->pdev_list[i]; 459 dp_pdev_iterate_peer(pdev, func, arg, mod_id); 460 } 461 } 462 463 /** 464 * dp_vdev_iterate_peer_lock_safe() - API to iterate through vdev list 465 * 466 * This API will cache the peers in local allocated memory and calls 467 * iterate function outside the lock. 468 * 469 * As this API is allocating new memory it is suggested to use this 470 * only when lock cannot be held 471 * 472 * @vdev: DP vdev context 473 * @func: function to be called for each peer 474 * @arg: argument need to be passed to func 475 * @mod_id: module_id 476 * 477 * Return: void 478 */ 479 static inline void 480 dp_vdev_iterate_peer_lock_safe(struct dp_vdev *vdev, 481 dp_peer_iter_func *func, 482 void *arg, 483 enum dp_mod_id mod_id) 484 { 485 struct dp_peer *peer; 486 struct dp_peer *tmp_peer; 487 struct dp_soc *soc = NULL; 488 struct dp_peer **peer_array = NULL; 489 int i = 0; 490 uint32_t num_peers = 0; 491 492 if (!vdev || !vdev->pdev || !vdev->pdev->soc) 493 return; 494 495 num_peers = vdev->num_peers; 496 497 soc = vdev->pdev->soc; 498 499 peer_array = qdf_mem_malloc(num_peers * sizeof(struct dp_peer *)); 500 if (!peer_array) 501 return; 502 503 qdf_spin_lock_bh(&vdev->peer_list_lock); 504 TAILQ_FOREACH_SAFE(peer, &vdev->peer_list, 505 peer_list_elem, 506 tmp_peer) { 507 if (i >= num_peers) 508 break; 509 510 if (dp_peer_get_ref(soc, peer, mod_id) == QDF_STATUS_SUCCESS) { 511 peer_array[i] = peer; 512 i = (i + 1); 513 } 514 } 515 qdf_spin_unlock_bh(&vdev->peer_list_lock); 516 517 for (i = 0; i < num_peers; i++) { 518 peer = peer_array[i]; 519 520 if (!peer) 521 continue; 522 523 (*func)(soc, peer, arg); 524 dp_peer_unref_delete(peer, mod_id); 525 } 526 527 qdf_mem_free(peer_array); 528 } 529 530 /** 531 * dp_pdev_iterate_peer_lock_safe() - API to iterate through all peers of pdev 532 * 533 * This API will cache the peers in local allocated memory and calls 534 * iterate function outside the lock. 535 * 536 * As this API is allocating new memory it is suggested to use this 537 * only when lock cannot be held 538 * 539 * @pdev: DP pdev context 540 * @func: function to be called for each peer 541 * @arg: argument need to be passed to func 542 * @mod_id: module_id 543 * 544 * Return: void 545 */ 546 static inline void 547 dp_pdev_iterate_peer_lock_safe(struct dp_pdev *pdev, 548 dp_peer_iter_func *func, 549 void *arg, 550 enum dp_mod_id mod_id) 551 { 552 struct dp_peer *peer; 553 struct dp_peer *tmp_peer; 554 struct dp_soc *soc = NULL; 555 struct dp_vdev *vdev = NULL; 556 struct dp_peer **peer_array[DP_PDEV_MAX_VDEVS] = {0}; 557 int i = 0; 558 int j = 0; 559 uint32_t num_peers[DP_PDEV_MAX_VDEVS] = {0}; 560 561 if (!pdev || !pdev->soc) 562 return; 563 564 soc = pdev->soc; 565 566 qdf_spin_lock_bh(&pdev->vdev_list_lock); 567 DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) { 568 num_peers[i] = vdev->num_peers; 569 peer_array[i] = qdf_mem_malloc(num_peers[i] * 570 sizeof(struct dp_peer *)); 571 if (!peer_array[i]) 572 break; 573 574 qdf_spin_lock_bh(&vdev->peer_list_lock); 575 TAILQ_FOREACH_SAFE(peer, &vdev->peer_list, 576 peer_list_elem, 577 tmp_peer) { 578 if (j >= num_peers[i]) 579 break; 580 581 if (dp_peer_get_ref(soc, peer, mod_id) == 582 QDF_STATUS_SUCCESS) { 583 peer_array[i][j] = peer; 584 585 j = (j + 1); 586 } 587 } 588 qdf_spin_unlock_bh(&vdev->peer_list_lock); 589 i = (i + 1); 590 } 591 qdf_spin_unlock_bh(&pdev->vdev_list_lock); 592 593 for (i = 0; i < DP_PDEV_MAX_VDEVS; i++) { 594 if (!peer_array[i]) 595 break; 596 597 for (j = 0; j < num_peers[i]; j++) { 598 peer = peer_array[i][j]; 599 600 if (!peer) 601 continue; 602 603 (*func)(soc, peer, arg); 604 dp_peer_unref_delete(peer, mod_id); 605 } 606 607 qdf_mem_free(peer_array[i]); 608 } 609 } 610 611 /** 612 * dp_soc_iterate_peer_lock_safe() - API to iterate through all peers of soc 613 * 614 * This API will cache the peers in local allocated memory and calls 615 * iterate function outside the lock. 616 * 617 * As this API is allocating new memory it is suggested to use this 618 * only when lock cannot be held 619 * 620 * @soc: DP soc context 621 * @func: function to be called for each peer 622 * @arg: argument need to be passed to func 623 * @mod_id: module_id 624 * 625 * Return: void 626 */ 627 static inline void 628 dp_soc_iterate_peer_lock_safe(struct dp_soc *soc, 629 dp_peer_iter_func *func, 630 void *arg, 631 enum dp_mod_id mod_id) 632 { 633 struct dp_pdev *pdev; 634 int i; 635 636 if (!soc) 637 return; 638 639 for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) { 640 pdev = soc->pdev_list[i]; 641 dp_pdev_iterate_peer_lock_safe(pdev, func, arg, mod_id); 642 } 643 } 644 645 #ifdef DP_PEER_STATE_DEBUG 646 #define DP_PEER_STATE_ASSERT(_peer, _new_state, _condition) \ 647 do { \ 648 if (!(_condition)) { \ 649 dp_alert("Invalid state shift from %u to %u peer " \ 650 QDF_MAC_ADDR_FMT, \ 651 (_peer)->peer_state, (_new_state), \ 652 QDF_MAC_ADDR_REF((_peer)->mac_addr.raw)); \ 653 QDF_ASSERT(0); \ 654 } \ 655 } while (0) 656 657 #else 658 #define DP_PEER_STATE_ASSERT(_peer, _new_state, _condition) \ 659 do { \ 660 if (!(_condition)) { \ 661 dp_alert("Invalid state shift from %u to %u peer " \ 662 QDF_MAC_ADDR_FMT, \ 663 (_peer)->peer_state, (_new_state), \ 664 QDF_MAC_ADDR_REF((_peer)->mac_addr.raw)); \ 665 } \ 666 } while (0) 667 #endif 668 669 /** 670 * dp_peer_state_cmp() - compare dp peer state 671 * 672 * @peer: DP peer 673 * @state: state 674 * 675 * Return: true if state matches with peer state 676 * false if it does not match 677 */ 678 static inline bool 679 dp_peer_state_cmp(struct dp_peer *peer, 680 enum dp_peer_state state) 681 { 682 bool is_status_equal = false; 683 684 qdf_spin_lock_bh(&peer->peer_state_lock); 685 is_status_equal = (peer->peer_state == state); 686 qdf_spin_unlock_bh(&peer->peer_state_lock); 687 688 return is_status_equal; 689 } 690 691 /** 692 * dp_print_ast_stats() - Dump AST table contents 693 * @soc: Datapath soc handle 694 * 695 * Return: void 696 */ 697 void dp_print_ast_stats(struct dp_soc *soc); 698 699 /** 700 * dp_rx_peer_map_handler() - handle peer map event from firmware 701 * @soc: generic soc handle 702 * @peer_id: peer_id from firmware 703 * @hw_peer_id: ast index for this peer 704 * @vdev_id: vdev ID 705 * @peer_mac_addr: mac address of the peer 706 * @ast_hash: ast hash value 707 * @is_wds: flag to indicate peer map event for WDS ast entry 708 * 709 * associate the peer_id that firmware provided with peer entry 710 * and update the ast table in the host with the hw_peer_id. 711 * 712 * Return: QDF_STATUS code 713 */ 714 715 QDF_STATUS dp_rx_peer_map_handler(struct dp_soc *soc, uint16_t peer_id, 716 uint16_t hw_peer_id, uint8_t vdev_id, 717 uint8_t *peer_mac_addr, uint16_t ast_hash, 718 uint8_t is_wds); 719 720 /** 721 * dp_rx_peer_unmap_handler() - handle peer unmap event from firmware 722 * @soc: generic soc handle 723 * @peer_id: peer_id from firmware 724 * @vdev_id: vdev ID 725 * @peer_mac_addr: mac address of the peer or wds entry 726 * @is_wds: flag to indicate peer map event for WDS ast entry 727 * @free_wds_count: number of wds entries freed by FW with peer delete 728 * 729 * Return: none 730 */ 731 void dp_rx_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id, 732 uint8_t vdev_id, uint8_t *peer_mac_addr, 733 uint8_t is_wds, uint32_t free_wds_count); 734 735 #if defined(WLAN_FEATURE_11BE_MLO) && defined(DP_MLO_LINK_STATS_SUPPORT) 736 /** 737 * dp_rx_peer_ext_evt() - handle peer extended event from firmware 738 * @soc: DP soc handle 739 * @info: extended evt info 740 * 741 * 742 * Return: QDF_STATUS 743 */ 744 745 QDF_STATUS 746 dp_rx_peer_ext_evt(struct dp_soc *soc, struct dp_peer_ext_evt_info *info); 747 #endif 748 #ifdef DP_RX_UDP_OVER_PEER_ROAM 749 /** 750 * dp_rx_reset_roaming_peer() - Reset the roamed peer in vdev 751 * @soc: dp soc pointer 752 * @vdev_id: vdev id 753 * @peer_mac_addr: mac address of the peer 754 * 755 * This function resets the roamed peer auth status and mac address 756 * after peer map indication of same peer is received from firmware. 757 * 758 * Return: None 759 */ 760 void dp_rx_reset_roaming_peer(struct dp_soc *soc, uint8_t vdev_id, 761 uint8_t *peer_mac_addr); 762 #else 763 static inline void dp_rx_reset_roaming_peer(struct dp_soc *soc, uint8_t vdev_id, 764 uint8_t *peer_mac_addr) 765 { 766 } 767 #endif 768 769 #ifdef WLAN_FEATURE_11BE_MLO 770 /** 771 * dp_rx_mlo_peer_map_handler() - handle MLO peer map event from firmware 772 * @soc: generic soc handle 773 * @peer_id: ML peer_id from firmware 774 * @peer_mac_addr: mac address of the peer 775 * @mlo_flow_info: MLO AST flow info 776 * @mlo_link_info: MLO link info 777 * 778 * associate the ML peer_id that firmware provided with peer entry 779 * and update the ast table in the host with the hw_peer_id. 780 * 781 * Return: QDF_STATUS code 782 */ 783 QDF_STATUS 784 dp_rx_mlo_peer_map_handler(struct dp_soc *soc, uint16_t peer_id, 785 uint8_t *peer_mac_addr, 786 struct dp_mlo_flow_override_info *mlo_flow_info, 787 struct dp_mlo_link_info *mlo_link_info); 788 789 /** 790 * dp_rx_mlo_peer_unmap_handler() - handle MLO peer unmap event from firmware 791 * @soc: generic soc handle 792 * @peer_id: peer_id from firmware 793 * 794 * Return: none 795 */ 796 void dp_rx_mlo_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id); 797 #endif 798 799 void dp_rx_sec_ind_handler(struct dp_soc *soc, uint16_t peer_id, 800 enum cdp_sec_type sec_type, int is_unicast, 801 u_int32_t *michael_key, u_int32_t *rx_pn); 802 803 uint8_t dp_get_peer_mac_addr_frm_id(struct cdp_soc_t *soc_handle, 804 uint16_t peer_id, uint8_t *peer_mac); 805 806 /** 807 * dp_peer_add_ast() - Allocate and add AST entry into peer list 808 * @soc: SoC handle 809 * @peer: peer to which ast node belongs 810 * @mac_addr: MAC address of ast node 811 * @type: AST entry type 812 * @flags: AST configuration flags 813 * 814 * This API is used by WDS source port learning function to 815 * add a new AST entry into peer AST list 816 * 817 * Return: QDF_STATUS code 818 */ 819 QDF_STATUS dp_peer_add_ast(struct dp_soc *soc, struct dp_peer *peer, 820 uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type, 821 uint32_t flags); 822 823 /** 824 * dp_peer_del_ast() - Delete and free AST entry 825 * @soc: SoC handle 826 * @ast_entry: AST entry of the node 827 * 828 * This function removes the AST entry from peer and soc tables 829 * It assumes caller has taken the ast lock to protect the access to these 830 * tables 831 * 832 * Return: None 833 */ 834 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry); 835 836 void dp_peer_ast_unmap_handler(struct dp_soc *soc, 837 struct dp_ast_entry *ast_entry); 838 839 /** 840 * dp_peer_update_ast() - Delete and free AST entry 841 * @soc: SoC handle 842 * @peer: peer to which ast node belongs 843 * @ast_entry: AST entry of the node 844 * @flags: wds or hmwds 845 * 846 * This function update the AST entry to the roamed peer and soc tables 847 * It assumes caller has taken the ast lock to protect the access to these 848 * tables 849 * 850 * Return: 0 if ast entry is updated successfully 851 * -1 failure 852 */ 853 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer, 854 struct dp_ast_entry *ast_entry, uint32_t flags); 855 856 /** 857 * dp_peer_ast_hash_find_by_pdevid() - Find AST entry by MAC address 858 * @soc: SoC handle 859 * @ast_mac_addr: Mac address 860 * @pdev_id: pdev Id 861 * 862 * It assumes caller has taken the ast lock to protect the access to 863 * AST hash table 864 * 865 * Return: AST entry 866 */ 867 struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc, 868 uint8_t *ast_mac_addr, 869 uint8_t pdev_id); 870 871 /** 872 * dp_peer_ast_hash_find_by_vdevid() - Find AST entry by MAC address 873 * @soc: SoC handle 874 * @ast_mac_addr: Mac address 875 * @vdev_id: vdev Id 876 * 877 * It assumes caller has taken the ast lock to protect the access to 878 * AST hash table 879 * 880 * Return: AST entry 881 */ 882 struct dp_ast_entry *dp_peer_ast_hash_find_by_vdevid(struct dp_soc *soc, 883 uint8_t *ast_mac_addr, 884 uint8_t vdev_id); 885 886 /** 887 * dp_peer_ast_hash_find_soc() - Find AST entry by MAC address 888 * @soc: SoC handle 889 * @ast_mac_addr: Mac address 890 * 891 * It assumes caller has taken the ast lock to protect the access to 892 * AST hash table 893 * 894 * Return: AST entry 895 */ 896 struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc, 897 uint8_t *ast_mac_addr); 898 899 /** 900 * dp_peer_ast_hash_find_soc_by_type() - Find AST entry by MAC address 901 * and AST type 902 * @soc: SoC handle 903 * @ast_mac_addr: Mac address 904 * @type: AST entry type 905 * 906 * It assumes caller has taken the ast lock to protect the access to 907 * AST hash table 908 * 909 * Return: AST entry 910 */ 911 struct dp_ast_entry *dp_peer_ast_hash_find_soc_by_type( 912 struct dp_soc *soc, 913 uint8_t *ast_mac_addr, 914 enum cdp_txrx_ast_entry_type type); 915 916 /** 917 * dp_peer_ast_get_pdev_id() - get pdev_id from the ast entry 918 * @soc: SoC handle 919 * @ast_entry: AST entry of the node 920 * 921 * This function gets the pdev_id from the ast entry. 922 * 923 * Return: (uint8_t) pdev_id 924 */ 925 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc, 926 struct dp_ast_entry *ast_entry); 927 928 929 /** 930 * dp_peer_ast_get_next_hop() - get next_hop from the ast entry 931 * @soc: SoC handle 932 * @ast_entry: AST entry of the node 933 * 934 * This function gets the next hop from the ast entry. 935 * 936 * Return: (uint8_t) next_hop 937 */ 938 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc, 939 struct dp_ast_entry *ast_entry); 940 941 /** 942 * dp_peer_ast_set_type() - set type from the ast entry 943 * @soc: SoC handle 944 * @ast_entry: AST entry of the node 945 * @type: AST entry type 946 * 947 * This function sets the type in the ast entry. 948 * 949 * Return: 950 */ 951 void dp_peer_ast_set_type(struct dp_soc *soc, 952 struct dp_ast_entry *ast_entry, 953 enum cdp_txrx_ast_entry_type type); 954 955 void dp_peer_ast_send_wds_del(struct dp_soc *soc, 956 struct dp_ast_entry *ast_entry, 957 struct dp_peer *peer); 958 959 #ifdef WLAN_FEATURE_MULTI_AST_DEL 960 void dp_peer_ast_send_multi_wds_del( 961 struct dp_soc *soc, uint8_t vdev_id, 962 struct peer_del_multi_wds_entries *wds_list); 963 #endif 964 965 void dp_peer_free_hmwds_cb(struct cdp_ctrl_objmgr_psoc *ctrl_psoc, 966 struct cdp_soc *dp_soc, 967 void *cookie, 968 enum cdp_ast_free_status status); 969 970 /** 971 * dp_peer_ast_hash_remove() - Look up and remove AST entry from hash table 972 * @soc: SoC handle 973 * @ase: Address search entry 974 * 975 * This function removes the AST entry from soc AST hash table 976 * It assumes caller has taken the ast lock to protect the access to this table 977 * 978 * Return: None 979 */ 980 void dp_peer_ast_hash_remove(struct dp_soc *soc, 981 struct dp_ast_entry *ase); 982 983 /** 984 * dp_peer_free_ast_entry() - Free up the ast entry memory 985 * @soc: SoC handle 986 * @ast_entry: Address search entry 987 * 988 * This API is used to free up the memory associated with 989 * AST entry. 990 * 991 * Return: None 992 */ 993 void dp_peer_free_ast_entry(struct dp_soc *soc, 994 struct dp_ast_entry *ast_entry); 995 996 /** 997 * dp_peer_unlink_ast_entry() - Free up the ast entry memory 998 * @soc: SoC handle 999 * @ast_entry: Address search entry 1000 * @peer: peer 1001 * 1002 * This API is used to remove/unlink AST entry from the peer list 1003 * and hash list. 1004 * 1005 * Return: None 1006 */ 1007 void dp_peer_unlink_ast_entry(struct dp_soc *soc, 1008 struct dp_ast_entry *ast_entry, 1009 struct dp_peer *peer); 1010 1011 /** 1012 * dp_peer_mec_detach_entry() - Detach the MEC entry 1013 * @soc: SoC handle 1014 * @mecentry: MEC entry of the node 1015 * @ptr: pointer to free list 1016 * 1017 * The MEC entry is detached from MEC table and added to free_list 1018 * to free the object outside lock 1019 * 1020 * Return: None 1021 */ 1022 void dp_peer_mec_detach_entry(struct dp_soc *soc, struct dp_mec_entry *mecentry, 1023 void *ptr); 1024 1025 /** 1026 * dp_peer_mec_free_list() - free the MEC entry from free_list 1027 * @soc: SoC handle 1028 * @ptr: pointer to free list 1029 * 1030 * Return: None 1031 */ 1032 void dp_peer_mec_free_list(struct dp_soc *soc, void *ptr); 1033 1034 /** 1035 * dp_peer_mec_add_entry() 1036 * @soc: SoC handle 1037 * @vdev: vdev to which mec node belongs 1038 * @mac_addr: MAC address of mec node 1039 * 1040 * This function allocates and adds MEC entry to MEC table. 1041 * It assumes caller has taken the mec lock to protect the access to these 1042 * tables 1043 * 1044 * Return: QDF_STATUS 1045 */ 1046 QDF_STATUS dp_peer_mec_add_entry(struct dp_soc *soc, 1047 struct dp_vdev *vdev, 1048 uint8_t *mac_addr); 1049 1050 /** 1051 * dp_peer_mec_hash_find_by_pdevid() - Find MEC entry by PDEV Id 1052 * within pdev 1053 * @soc: SoC handle 1054 * @pdev_id: pdev Id 1055 * @mec_mac_addr: MAC address of mec node 1056 * 1057 * It assumes caller has taken the mec_lock to protect the access to 1058 * MEC hash table 1059 * 1060 * Return: MEC entry 1061 */ 1062 struct dp_mec_entry *dp_peer_mec_hash_find_by_pdevid(struct dp_soc *soc, 1063 uint8_t pdev_id, 1064 uint8_t *mec_mac_addr); 1065 1066 #define DP_AST_ASSERT(_condition) \ 1067 do { \ 1068 if (!(_condition)) { \ 1069 dp_print_ast_stats(soc);\ 1070 QDF_BUG(_condition); \ 1071 } \ 1072 } while (0) 1073 1074 /** 1075 * dp_peer_update_inactive_time() - Update inactive time for peer 1076 * @pdev: pdev object 1077 * @tag_type: htt_tlv_tag type 1078 * @tag_buf: buf message 1079 */ 1080 void 1081 dp_peer_update_inactive_time(struct dp_pdev *pdev, uint32_t tag_type, 1082 uint32_t *tag_buf); 1083 1084 #ifndef QCA_MULTIPASS_SUPPORT 1085 static inline 1086 /** 1087 * dp_peer_set_vlan_id() - set vlan_id for this peer 1088 * @cdp_soc: soc handle 1089 * @vdev_id: id of vdev object 1090 * @peer_mac: mac address 1091 * @vlan_id: vlan id for peer 1092 * 1093 * Return: void 1094 */ 1095 void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc, 1096 uint8_t vdev_id, uint8_t *peer_mac, 1097 uint16_t vlan_id) 1098 { 1099 } 1100 1101 /** 1102 * dp_set_vlan_groupkey() - set vlan map for vdev 1103 * @soc_hdl: pointer to soc 1104 * @vdev_id: id of vdev handle 1105 * @vlan_id: vlan_id 1106 * @group_key: group key for vlan 1107 * 1108 * Return: set success/failure 1109 */ 1110 static inline 1111 QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 1112 uint16_t vlan_id, uint16_t group_key) 1113 { 1114 return QDF_STATUS_SUCCESS; 1115 } 1116 1117 /** 1118 * dp_peer_multipass_list_init() - initialize multipass peer list 1119 * @vdev: pointer to vdev 1120 * 1121 * Return: void 1122 */ 1123 static inline 1124 void dp_peer_multipass_list_init(struct dp_vdev *vdev) 1125 { 1126 } 1127 1128 /** 1129 * dp_peer_multipass_list_remove() - remove peer from special peer list 1130 * @peer: peer handle 1131 * 1132 * Return: void 1133 */ 1134 static inline 1135 void dp_peer_multipass_list_remove(struct dp_peer *peer) 1136 { 1137 } 1138 #else 1139 void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc, 1140 uint8_t vdev_id, uint8_t *peer_mac, 1141 uint16_t vlan_id); 1142 QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc, uint8_t vdev_id, 1143 uint16_t vlan_id, uint16_t group_key); 1144 void dp_peer_multipass_list_init(struct dp_vdev *vdev); 1145 void dp_peer_multipass_list_remove(struct dp_peer *peer); 1146 #endif 1147 1148 1149 #ifndef QCA_PEER_MULTIQ_SUPPORT 1150 /** 1151 * dp_peer_reset_flowq_map() - reset peer flowq map table 1152 * @peer: dp peer handle 1153 * 1154 * Return: none 1155 */ 1156 static inline 1157 void dp_peer_reset_flowq_map(struct dp_peer *peer) 1158 { 1159 } 1160 1161 /** 1162 * dp_peer_ast_index_flow_queue_map_create() - create ast index flow queue map 1163 * @soc_hdl: generic soc handle 1164 * @is_wds: flag to indicate if peer is wds 1165 * @peer_id: peer_id from htt peer map message 1166 * @peer_mac_addr: mac address of the peer 1167 * @ast_info: ast flow override information from peer map 1168 * 1169 * Return: none 1170 */ 1171 static inline 1172 void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl, 1173 bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr, 1174 struct dp_ast_flow_override_info *ast_info) 1175 { 1176 } 1177 #else 1178 void dp_peer_reset_flowq_map(struct dp_peer *peer); 1179 1180 void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl, 1181 bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr, 1182 struct dp_ast_flow_override_info *ast_info); 1183 #endif 1184 1185 #ifdef QCA_PEER_EXT_STATS 1186 /** 1187 * dp_peer_delay_stats_ctx_alloc() - Allocate peer delay stats content 1188 * @soc: DP SoC context 1189 * @txrx_peer: DP txrx peer context 1190 * 1191 * Allocate the peer delay stats context 1192 * 1193 * Return: QDF_STATUS_SUCCESS if allocation is 1194 * successful 1195 */ 1196 QDF_STATUS dp_peer_delay_stats_ctx_alloc(struct dp_soc *soc, 1197 struct dp_txrx_peer *txrx_peer); 1198 1199 /** 1200 * dp_peer_delay_stats_ctx_dealloc() - Dealloc the peer delay stats context 1201 * @soc: DP SoC context 1202 * @txrx_peer: txrx DP peer context 1203 * 1204 * Free the peer delay stats context 1205 * 1206 * Return: Void 1207 */ 1208 void dp_peer_delay_stats_ctx_dealloc(struct dp_soc *soc, 1209 struct dp_txrx_peer *txrx_peer); 1210 1211 /** 1212 * dp_peer_delay_stats_ctx_clr() - Clear delay stats context of peer 1213 * @txrx_peer: dp_txrx_peer handle 1214 * 1215 * Return: void 1216 */ 1217 void dp_peer_delay_stats_ctx_clr(struct dp_txrx_peer *txrx_peer); 1218 #else 1219 static inline 1220 QDF_STATUS dp_peer_delay_stats_ctx_alloc(struct dp_soc *soc, 1221 struct dp_txrx_peer *txrx_peer) 1222 { 1223 return QDF_STATUS_SUCCESS; 1224 } 1225 1226 static inline 1227 void dp_peer_delay_stats_ctx_dealloc(struct dp_soc *soc, 1228 struct dp_txrx_peer *txrx_peer) 1229 { 1230 } 1231 1232 static inline 1233 void dp_peer_delay_stats_ctx_clr(struct dp_txrx_peer *txrx_peer) 1234 { 1235 } 1236 #endif 1237 1238 #ifdef WLAN_PEER_JITTER 1239 /** 1240 * dp_peer_jitter_stats_ctx_alloc() - Allocate jitter stats context for peer 1241 * @pdev: Datapath pdev handle 1242 * @txrx_peer: dp_txrx_peer handle 1243 * 1244 * Return: QDF_STATUS 1245 */ 1246 QDF_STATUS dp_peer_jitter_stats_ctx_alloc(struct dp_pdev *pdev, 1247 struct dp_txrx_peer *txrx_peer); 1248 1249 /** 1250 * dp_peer_jitter_stats_ctx_dealloc() - Deallocate jitter stats context 1251 * @pdev: Datapath pdev handle 1252 * @txrx_peer: dp_txrx_peer handle 1253 * 1254 * Return: void 1255 */ 1256 void dp_peer_jitter_stats_ctx_dealloc(struct dp_pdev *pdev, 1257 struct dp_txrx_peer *txrx_peer); 1258 1259 /** 1260 * dp_peer_jitter_stats_ctx_clr() - Clear jitter stats context of peer 1261 * @txrx_peer: dp_txrx_peer handle 1262 * 1263 * Return: void 1264 */ 1265 void dp_peer_jitter_stats_ctx_clr(struct dp_txrx_peer *txrx_peer); 1266 #else 1267 static inline 1268 QDF_STATUS dp_peer_jitter_stats_ctx_alloc(struct dp_pdev *pdev, 1269 struct dp_txrx_peer *txrx_peer) 1270 { 1271 return QDF_STATUS_SUCCESS; 1272 } 1273 1274 static inline 1275 void dp_peer_jitter_stats_ctx_dealloc(struct dp_pdev *pdev, 1276 struct dp_txrx_peer *txrx_peer) 1277 { 1278 } 1279 1280 static inline 1281 void dp_peer_jitter_stats_ctx_clr(struct dp_txrx_peer *txrx_peer) 1282 { 1283 } 1284 #endif 1285 1286 #ifndef CONFIG_SAWF_DEF_QUEUES 1287 static inline QDF_STATUS dp_peer_sawf_ctx_alloc(struct dp_soc *soc, 1288 struct dp_peer *peer) 1289 { 1290 return QDF_STATUS_SUCCESS; 1291 } 1292 1293 static inline QDF_STATUS dp_peer_sawf_ctx_free(struct dp_soc *soc, 1294 struct dp_peer *peer) 1295 { 1296 return QDF_STATUS_SUCCESS; 1297 } 1298 1299 #endif 1300 1301 #ifndef CONFIG_SAWF 1302 static inline 1303 QDF_STATUS dp_peer_sawf_stats_ctx_alloc(struct dp_soc *soc, 1304 struct dp_txrx_peer *txrx_peer) 1305 { 1306 return QDF_STATUS_SUCCESS; 1307 } 1308 1309 static inline 1310 QDF_STATUS dp_peer_sawf_stats_ctx_free(struct dp_soc *soc, 1311 struct dp_txrx_peer *txrx_peer) 1312 { 1313 return QDF_STATUS_SUCCESS; 1314 } 1315 #endif 1316 1317 /** 1318 * dp_vdev_bss_peer_ref_n_get: Get bss peer of a vdev 1319 * @soc: DP soc 1320 * @vdev: vdev 1321 * @mod_id: id of module requesting reference 1322 * 1323 * Return: VDEV BSS peer 1324 */ 1325 struct dp_peer *dp_vdev_bss_peer_ref_n_get(struct dp_soc *soc, 1326 struct dp_vdev *vdev, 1327 enum dp_mod_id mod_id); 1328 1329 /** 1330 * dp_sta_vdev_self_peer_ref_n_get: Get self peer of sta vdev 1331 * @soc: DP soc 1332 * @vdev: vdev 1333 * @mod_id: id of module requesting reference 1334 * 1335 * Return: VDEV self peer 1336 */ 1337 struct dp_peer *dp_sta_vdev_self_peer_ref_n_get(struct dp_soc *soc, 1338 struct dp_vdev *vdev, 1339 enum dp_mod_id mod_id); 1340 1341 void dp_peer_ast_table_detach(struct dp_soc *soc); 1342 1343 /** 1344 * dp_peer_find_map_detach() - cleanup memory for peer_id_to_obj_map 1345 * @soc: soc handle 1346 * 1347 * Return: none 1348 */ 1349 void dp_peer_find_map_detach(struct dp_soc *soc); 1350 1351 void dp_soc_wds_detach(struct dp_soc *soc); 1352 QDF_STATUS dp_peer_ast_table_attach(struct dp_soc *soc); 1353 1354 /** 1355 * dp_find_peer_by_macaddr() - Finding the peer from mac address provided. 1356 * @soc: soc handle 1357 * @mac_addr: MAC address to be used to find peer 1358 * @vdev_id: VDEV id 1359 * @mod_id: MODULE ID 1360 * 1361 * Return: struct dp_peer 1362 */ 1363 struct dp_peer *dp_find_peer_by_macaddr(struct dp_soc *soc, uint8_t *mac_addr, 1364 uint8_t vdev_id, enum dp_mod_id mod_id); 1365 /** 1366 * dp_peer_ast_hash_attach() - Allocate and initialize AST Hash Table 1367 * @soc: SoC handle 1368 * 1369 * Return: QDF_STATUS 1370 */ 1371 QDF_STATUS dp_peer_ast_hash_attach(struct dp_soc *soc); 1372 1373 /** 1374 * dp_peer_mec_hash_attach() - Allocate and initialize MEC Hash Table 1375 * @soc: SoC handle 1376 * 1377 * Return: QDF_STATUS 1378 */ 1379 QDF_STATUS dp_peer_mec_hash_attach(struct dp_soc *soc); 1380 1381 /** 1382 * dp_del_wds_entry_wrapper() - delete a WDS AST entry 1383 * @soc: DP soc structure pointer 1384 * @vdev_id: vdev_id 1385 * @wds_macaddr: MAC address of ast node 1386 * @type: type from enum cdp_txrx_ast_entry_type 1387 * @delete_in_fw: Flag to indicate if entry needs to be deleted in fw 1388 * 1389 * This API is used to delete an AST entry from fw 1390 * 1391 * Return: None 1392 */ 1393 void dp_del_wds_entry_wrapper(struct dp_soc *soc, uint8_t vdev_id, 1394 uint8_t *wds_macaddr, uint8_t type, 1395 uint8_t delete_in_fw); 1396 1397 void dp_soc_wds_attach(struct dp_soc *soc); 1398 1399 /** 1400 * dp_peer_mec_hash_detach() - Free MEC Hash table 1401 * @soc: SoC handle 1402 * 1403 * Return: None 1404 */ 1405 void dp_peer_mec_hash_detach(struct dp_soc *soc); 1406 1407 /** 1408 * dp_peer_ast_hash_detach() - Free AST Hash table 1409 * @soc: SoC handle 1410 * 1411 * Return: None 1412 */ 1413 void dp_peer_ast_hash_detach(struct dp_soc *soc); 1414 1415 #ifdef FEATURE_AST 1416 /** 1417 * dp_peer_delete_ast_entries(): Delete all AST entries for a peer 1418 * @soc: datapath soc handle 1419 * @peer: datapath peer handle 1420 * 1421 * Delete the AST entries belonging to a peer 1422 */ 1423 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc, 1424 struct dp_peer *peer) 1425 { 1426 struct dp_ast_entry *ast_entry, *temp_ast_entry; 1427 1428 dp_peer_debug("peer: %pK, self_ast: %pK", peer, peer->self_ast_entry); 1429 /* 1430 * Delete peer self ast entry. This is done to handle scenarios 1431 * where peer is freed before peer map is received(for ex in case 1432 * of auth disallow due to ACL) in such cases self ast is not added 1433 * to peer->ast_list. 1434 */ 1435 if (peer->self_ast_entry) { 1436 dp_peer_del_ast(soc, peer->self_ast_entry); 1437 peer->self_ast_entry = NULL; 1438 } 1439 1440 DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry) 1441 dp_peer_del_ast(soc, ast_entry); 1442 } 1443 1444 /** 1445 * dp_print_peer_ast_entries() - Dump AST entries of peer 1446 * @soc: Datapath soc handle 1447 * @peer: Datapath peer 1448 * @arg: argument to iterate function 1449 * 1450 * Return: void 1451 */ 1452 void dp_print_peer_ast_entries(struct dp_soc *soc, struct dp_peer *peer, 1453 void *arg); 1454 #else 1455 static inline void dp_print_peer_ast_entries(struct dp_soc *soc, 1456 struct dp_peer *peer, void *arg) 1457 { 1458 } 1459 1460 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc, 1461 struct dp_peer *peer) 1462 { 1463 } 1464 #endif 1465 1466 #ifdef FEATURE_MEC 1467 /** 1468 * dp_peer_mec_spinlock_create() - Create the MEC spinlock 1469 * @soc: SoC handle 1470 * 1471 * Return: none 1472 */ 1473 void dp_peer_mec_spinlock_create(struct dp_soc *soc); 1474 1475 /** 1476 * dp_peer_mec_spinlock_destroy() - Destroy the MEC spinlock 1477 * @soc: SoC handle 1478 * 1479 * Return: none 1480 */ 1481 void dp_peer_mec_spinlock_destroy(struct dp_soc *soc); 1482 1483 /** 1484 * dp_peer_mec_flush_entries() - Delete all mec entries in table 1485 * @soc: Datapath SOC 1486 * 1487 * Return: None 1488 */ 1489 void dp_peer_mec_flush_entries(struct dp_soc *soc); 1490 #else 1491 static inline void dp_peer_mec_spinlock_create(struct dp_soc *soc) 1492 { 1493 } 1494 1495 static inline void dp_peer_mec_spinlock_destroy(struct dp_soc *soc) 1496 { 1497 } 1498 1499 static inline void dp_peer_mec_flush_entries(struct dp_soc *soc) 1500 { 1501 } 1502 #endif 1503 1504 static inline int dp_peer_find_mac_addr_cmp( 1505 union dp_align_mac_addr *mac_addr1, 1506 union dp_align_mac_addr *mac_addr2) 1507 { 1508 /* 1509 * Intentionally use & rather than &&. 1510 * because the operands are binary rather than generic boolean, 1511 * the functionality is equivalent. 1512 * Using && has the advantage of short-circuited evaluation, 1513 * but using & has the advantage of no conditional branching, 1514 * which is a more significant benefit. 1515 */ 1516 return !((mac_addr1->align4.bytes_abcd == mac_addr2->align4.bytes_abcd) 1517 & (mac_addr1->align4.bytes_ef == mac_addr2->align4.bytes_ef)); 1518 } 1519 1520 /** 1521 * dp_peer_delete() - delete DP peer 1522 * 1523 * @soc: Datatpath soc 1524 * @peer: Datapath peer 1525 * @arg: argument to iter function 1526 * 1527 * Return: void 1528 */ 1529 void dp_peer_delete(struct dp_soc *soc, 1530 struct dp_peer *peer, 1531 void *arg); 1532 1533 /** 1534 * dp_mlo_peer_delete() - delete MLO DP peer 1535 * 1536 * @soc: Datapath soc 1537 * @peer: Datapath peer 1538 * @arg: argument to iter function 1539 * 1540 * Return: void 1541 */ 1542 void dp_mlo_peer_delete(struct dp_soc *soc, struct dp_peer *peer, void *arg); 1543 1544 #ifdef WLAN_FEATURE_11BE_MLO 1545 1546 /* is MLO connection mld peer */ 1547 #define IS_MLO_DP_MLD_TXRX_PEER(_peer) ((_peer)->mld_peer) 1548 1549 /* set peer type */ 1550 #define DP_PEER_SET_TYPE(_peer, _type_val) \ 1551 ((_peer)->peer_type = (_type_val)) 1552 1553 /* is legacy peer */ 1554 #define IS_DP_LEGACY_PEER(_peer) \ 1555 ((_peer)->peer_type == CDP_LINK_PEER_TYPE && !((_peer)->mld_peer)) 1556 /* is MLO connection link peer */ 1557 #define IS_MLO_DP_LINK_PEER(_peer) \ 1558 ((_peer)->peer_type == CDP_LINK_PEER_TYPE && (_peer)->mld_peer) 1559 /* is MLO connection mld peer */ 1560 #define IS_MLO_DP_MLD_PEER(_peer) \ 1561 ((_peer)->peer_type == CDP_MLD_PEER_TYPE) 1562 /* Get Mld peer from link peer */ 1563 #define DP_GET_MLD_PEER_FROM_PEER(link_peer) \ 1564 ((link_peer)->mld_peer) 1565 1566 #ifdef WLAN_MLO_MULTI_CHIP 1567 static inline uint8_t dp_get_chip_id(struct dp_soc *soc) 1568 { 1569 if (soc->arch_ops.mlo_get_chip_id) 1570 return soc->arch_ops.mlo_get_chip_id(soc); 1571 1572 return 0; 1573 } 1574 1575 static inline struct dp_peer * 1576 dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc, 1577 uint8_t *peer_mac_addr, 1578 int mac_addr_is_aligned, 1579 uint8_t vdev_id, 1580 uint8_t chip_id, 1581 enum dp_mod_id mod_id) 1582 { 1583 if (soc->arch_ops.mlo_link_peer_find_hash_find_by_chip_id) 1584 return soc->arch_ops.mlo_link_peer_find_hash_find_by_chip_id 1585 (soc, peer_mac_addr, 1586 mac_addr_is_aligned, 1587 vdev_id, chip_id, 1588 mod_id); 1589 1590 return NULL; 1591 } 1592 #else 1593 static inline uint8_t dp_get_chip_id(struct dp_soc *soc) 1594 { 1595 return 0; 1596 } 1597 1598 static inline struct dp_peer * 1599 dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc, 1600 uint8_t *peer_mac_addr, 1601 int mac_addr_is_aligned, 1602 uint8_t vdev_id, 1603 uint8_t chip_id, 1604 enum dp_mod_id mod_id) 1605 { 1606 return dp_peer_find_hash_find(soc, peer_mac_addr, 1607 mac_addr_is_aligned, 1608 vdev_id, mod_id); 1609 } 1610 #endif 1611 1612 /** 1613 * dp_mld_peer_find_hash_find() - returns mld peer from mld peer_hash_table 1614 * matching mac_address 1615 * @soc: soc handle 1616 * @peer_mac_addr: mld peer mac address 1617 * @mac_addr_is_aligned: is mac addr aligned 1618 * @vdev_id: vdev_id 1619 * @mod_id: id of module requesting reference 1620 * 1621 * Return: peer in success 1622 * NULL in failure 1623 */ 1624 static inline 1625 struct dp_peer *dp_mld_peer_find_hash_find(struct dp_soc *soc, 1626 uint8_t *peer_mac_addr, 1627 int mac_addr_is_aligned, 1628 uint8_t vdev_id, 1629 enum dp_mod_id mod_id) 1630 { 1631 if (soc->arch_ops.mlo_peer_find_hash_find) 1632 return soc->arch_ops.mlo_peer_find_hash_find(soc, 1633 peer_mac_addr, 1634 mac_addr_is_aligned, 1635 mod_id, vdev_id); 1636 return NULL; 1637 } 1638 1639 /** 1640 * dp_peer_hash_find_wrapper() - find link peer or mld per according to 1641 * peer_type 1642 * @soc: DP SOC handle 1643 * @peer_info: peer information for hash find 1644 * @mod_id: ID of module requesting reference 1645 * 1646 * Return: peer handle 1647 */ 1648 static inline 1649 struct dp_peer *dp_peer_hash_find_wrapper(struct dp_soc *soc, 1650 struct cdp_peer_info *peer_info, 1651 enum dp_mod_id mod_id) 1652 { 1653 struct dp_peer *peer = NULL; 1654 1655 if (peer_info->peer_type == CDP_LINK_PEER_TYPE || 1656 peer_info->peer_type == CDP_WILD_PEER_TYPE) { 1657 peer = dp_peer_find_hash_find(soc, peer_info->mac_addr, 1658 peer_info->mac_addr_is_aligned, 1659 peer_info->vdev_id, 1660 mod_id); 1661 if (peer) 1662 return peer; 1663 } 1664 if (peer_info->peer_type == CDP_MLD_PEER_TYPE || 1665 peer_info->peer_type == CDP_WILD_PEER_TYPE) 1666 peer = dp_mld_peer_find_hash_find( 1667 soc, peer_info->mac_addr, 1668 peer_info->mac_addr_is_aligned, 1669 peer_info->vdev_id, 1670 mod_id); 1671 return peer; 1672 } 1673 1674 /** 1675 * dp_link_peer_add_mld_peer() - add mld peer pointer to link peer, 1676 * increase mld peer ref_cnt 1677 * @link_peer: link peer pointer 1678 * @mld_peer: mld peer pointer 1679 * 1680 * Return: none 1681 */ 1682 static inline 1683 void dp_link_peer_add_mld_peer(struct dp_peer *link_peer, 1684 struct dp_peer *mld_peer) 1685 { 1686 /* increase mld_peer ref_cnt */ 1687 dp_peer_get_ref(NULL, mld_peer, DP_MOD_ID_CDP); 1688 link_peer->mld_peer = mld_peer; 1689 } 1690 1691 /** 1692 * dp_link_peer_del_mld_peer() - delete mld peer pointer from link peer, 1693 * decrease mld peer ref_cnt 1694 * @link_peer: link peer pointer 1695 * 1696 * Return: None 1697 */ 1698 static inline 1699 void dp_link_peer_del_mld_peer(struct dp_peer *link_peer) 1700 { 1701 dp_peer_unref_delete(link_peer->mld_peer, DP_MOD_ID_CDP); 1702 link_peer->mld_peer = NULL; 1703 } 1704 1705 /** 1706 * dp_mld_peer_init_link_peers_info() - init link peers info in mld peer 1707 * @mld_peer: mld peer pointer 1708 * 1709 * Return: None 1710 */ 1711 static inline 1712 void dp_mld_peer_init_link_peers_info(struct dp_peer *mld_peer) 1713 { 1714 int i; 1715 1716 qdf_spinlock_create(&mld_peer->link_peers_info_lock); 1717 mld_peer->num_links = 0; 1718 for (i = 0; i < DP_MAX_MLO_LINKS; i++) 1719 mld_peer->link_peers[i].is_valid = false; 1720 } 1721 1722 /** 1723 * dp_mld_peer_deinit_link_peers_info() - Deinit link peers info in mld peer 1724 * @mld_peer: mld peer pointer 1725 * 1726 * Return: None 1727 */ 1728 static inline 1729 void dp_mld_peer_deinit_link_peers_info(struct dp_peer *mld_peer) 1730 { 1731 qdf_spinlock_destroy(&mld_peer->link_peers_info_lock); 1732 } 1733 1734 /** 1735 * dp_mld_peer_add_link_peer() - add link peer info to mld peer 1736 * @mld_peer: mld dp peer pointer 1737 * @link_peer: link dp peer pointer 1738 * 1739 * Return: None 1740 */ 1741 static inline 1742 void dp_mld_peer_add_link_peer(struct dp_peer *mld_peer, 1743 struct dp_peer *link_peer) 1744 { 1745 int i; 1746 struct dp_peer_link_info *link_peer_info; 1747 struct dp_soc *soc = mld_peer->vdev->pdev->soc; 1748 1749 qdf_spin_lock_bh(&mld_peer->link_peers_info_lock); 1750 for (i = 0; i < DP_MAX_MLO_LINKS; i++) { 1751 link_peer_info = &mld_peer->link_peers[i]; 1752 if (!link_peer_info->is_valid) { 1753 qdf_mem_copy(link_peer_info->mac_addr.raw, 1754 link_peer->mac_addr.raw, 1755 QDF_MAC_ADDR_SIZE); 1756 link_peer_info->is_valid = true; 1757 link_peer_info->vdev_id = link_peer->vdev->vdev_id; 1758 link_peer_info->chip_id = 1759 dp_get_chip_id(link_peer->vdev->pdev->soc); 1760 mld_peer->num_links++; 1761 break; 1762 } 1763 } 1764 qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock); 1765 1766 dp_peer_info("%s addition of link peer %pK (" QDF_MAC_ADDR_FMT ") " 1767 "to MLD peer %pK (" QDF_MAC_ADDR_FMT "), " 1768 "idx %u num_links %u", 1769 (i != DP_MAX_MLO_LINKS) ? "Successful" : "Failed", 1770 link_peer, QDF_MAC_ADDR_REF(link_peer->mac_addr.raw), 1771 mld_peer, QDF_MAC_ADDR_REF(mld_peer->mac_addr.raw), 1772 i, mld_peer->num_links); 1773 1774 dp_cfg_event_record_mlo_link_delink_evt(soc, DP_CFG_EVENT_MLO_ADD_LINK, 1775 mld_peer, link_peer, i, 1776 (i != DP_MAX_MLO_LINKS) ? 1 : 0); 1777 } 1778 1779 /** 1780 * dp_mld_peer_del_link_peer() - Delete link peer info from MLD peer 1781 * @mld_peer: MLD dp peer pointer 1782 * @link_peer: link dp peer pointer 1783 * 1784 * Return: number of links left after deletion 1785 */ 1786 static inline 1787 uint8_t dp_mld_peer_del_link_peer(struct dp_peer *mld_peer, 1788 struct dp_peer *link_peer) 1789 { 1790 int i; 1791 struct dp_peer_link_info *link_peer_info; 1792 uint8_t num_links; 1793 struct dp_soc *soc = mld_peer->vdev->pdev->soc; 1794 1795 qdf_spin_lock_bh(&mld_peer->link_peers_info_lock); 1796 for (i = 0; i < DP_MAX_MLO_LINKS; i++) { 1797 link_peer_info = &mld_peer->link_peers[i]; 1798 if (link_peer_info->is_valid && 1799 !dp_peer_find_mac_addr_cmp(&link_peer->mac_addr, 1800 &link_peer_info->mac_addr)) { 1801 link_peer_info->is_valid = false; 1802 mld_peer->num_links--; 1803 break; 1804 } 1805 } 1806 num_links = mld_peer->num_links; 1807 qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock); 1808 1809 dp_peer_info("%s deletion of link peer %pK (" QDF_MAC_ADDR_FMT ") " 1810 "from MLD peer %pK (" QDF_MAC_ADDR_FMT "), " 1811 "idx %u num_links %u", 1812 (i != DP_MAX_MLO_LINKS) ? "Successful" : "Failed", 1813 link_peer, QDF_MAC_ADDR_REF(link_peer->mac_addr.raw), 1814 mld_peer, QDF_MAC_ADDR_REF(mld_peer->mac_addr.raw), 1815 i, mld_peer->num_links); 1816 1817 dp_cfg_event_record_mlo_link_delink_evt(soc, DP_CFG_EVENT_MLO_DEL_LINK, 1818 mld_peer, link_peer, i, 1819 (i != DP_MAX_MLO_LINKS) ? 1 : 0); 1820 1821 return num_links; 1822 } 1823 1824 /** 1825 * dp_get_link_peers_ref_from_mld_peer() - get link peers pointer and 1826 * increase link peers ref_cnt 1827 * @soc: dp_soc handle 1828 * @mld_peer: dp mld peer pointer 1829 * @mld_link_peers: structure that hold links peers pointer array and number 1830 * @mod_id: id of module requesting reference 1831 * 1832 * Return: None 1833 */ 1834 static inline 1835 void dp_get_link_peers_ref_from_mld_peer( 1836 struct dp_soc *soc, 1837 struct dp_peer *mld_peer, 1838 struct dp_mld_link_peers *mld_link_peers, 1839 enum dp_mod_id mod_id) 1840 { 1841 struct dp_peer *peer; 1842 uint8_t i = 0, j = 0; 1843 struct dp_peer_link_info *link_peer_info; 1844 1845 qdf_mem_zero(mld_link_peers, sizeof(*mld_link_peers)); 1846 qdf_spin_lock_bh(&mld_peer->link_peers_info_lock); 1847 for (i = 0; i < DP_MAX_MLO_LINKS; i++) { 1848 link_peer_info = &mld_peer->link_peers[i]; 1849 if (link_peer_info->is_valid) { 1850 peer = dp_link_peer_hash_find_by_chip_id( 1851 soc, 1852 link_peer_info->mac_addr.raw, 1853 true, 1854 link_peer_info->vdev_id, 1855 link_peer_info->chip_id, 1856 mod_id); 1857 if (peer) 1858 mld_link_peers->link_peers[j++] = peer; 1859 } 1860 } 1861 qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock); 1862 1863 mld_link_peers->num_links = j; 1864 } 1865 1866 /** 1867 * dp_release_link_peers_ref() - release all link peers reference 1868 * @mld_link_peers: structure that hold links peers pointer array and number 1869 * @mod_id: id of module requesting reference 1870 * 1871 * Return: None. 1872 */ 1873 static inline 1874 void dp_release_link_peers_ref( 1875 struct dp_mld_link_peers *mld_link_peers, 1876 enum dp_mod_id mod_id) 1877 { 1878 struct dp_peer *peer; 1879 uint8_t i; 1880 1881 for (i = 0; i < mld_link_peers->num_links; i++) { 1882 peer = mld_link_peers->link_peers[i]; 1883 if (peer) 1884 dp_peer_unref_delete(peer, mod_id); 1885 mld_link_peers->link_peers[i] = NULL; 1886 } 1887 1888 mld_link_peers->num_links = 0; 1889 } 1890 1891 /** 1892 * dp_get_link_peer_id_by_lmac_id() - Get link peer id using peer id and lmac id 1893 * @soc: Datapath soc handle 1894 * @peer_id: peer id 1895 * @lmac_id: lmac id to find the link peer on given lmac 1896 * 1897 * Return: peer_id of link peer if found 1898 * else return HTT_INVALID_PEER 1899 */ 1900 static inline 1901 uint16_t dp_get_link_peer_id_by_lmac_id(struct dp_soc *soc, uint16_t peer_id, 1902 uint8_t lmac_id) 1903 { 1904 uint8_t i; 1905 struct dp_peer *peer; 1906 struct dp_peer *link_peer; 1907 struct dp_soc *link_peer_soc; 1908 struct dp_mld_link_peers link_peers_info; 1909 uint16_t link_peer_id = HTT_INVALID_PEER; 1910 1911 peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_CDP); 1912 1913 if (!peer) 1914 return HTT_INVALID_PEER; 1915 1916 if (IS_MLO_DP_MLD_PEER(peer)) { 1917 /* get link peers with reference */ 1918 dp_get_link_peers_ref_from_mld_peer(soc, peer, &link_peers_info, 1919 DP_MOD_ID_CDP); 1920 1921 for (i = 0; i < link_peers_info.num_links; i++) { 1922 link_peer = link_peers_info.link_peers[i]; 1923 link_peer_soc = link_peer->vdev->pdev->soc; 1924 if ((link_peer_soc == soc) && 1925 (link_peer->vdev->pdev->lmac_id == lmac_id)) { 1926 link_peer_id = link_peer->peer_id; 1927 break; 1928 } 1929 } 1930 /* release link peers reference */ 1931 dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP); 1932 } else { 1933 link_peer_id = peer_id; 1934 } 1935 1936 dp_peer_unref_delete(peer, DP_MOD_ID_CDP); 1937 1938 return link_peer_id; 1939 } 1940 1941 /** 1942 * dp_peer_get_tgt_peer_hash_find() - get dp_peer handle 1943 * @soc: soc handle 1944 * @peer_mac: peer mac address 1945 * @mac_addr_is_aligned: is mac addr aligned 1946 * @vdev_id: vdev_id 1947 * @mod_id: id of module requesting reference 1948 * 1949 * for MLO connection, get corresponding MLD peer, 1950 * otherwise get link peer for non-MLO case. 1951 * 1952 * Return: peer in success 1953 * NULL in failure 1954 */ 1955 static inline 1956 struct dp_peer *dp_peer_get_tgt_peer_hash_find(struct dp_soc *soc, 1957 uint8_t *peer_mac, 1958 int mac_addr_is_aligned, 1959 uint8_t vdev_id, 1960 enum dp_mod_id mod_id) 1961 { 1962 struct dp_peer *ta_peer = NULL; 1963 struct dp_peer *peer = dp_peer_find_hash_find(soc, 1964 peer_mac, 0, vdev_id, 1965 mod_id); 1966 1967 if (peer) { 1968 /* mlo connection link peer, get mld peer with reference */ 1969 if (IS_MLO_DP_LINK_PEER(peer)) { 1970 /* increase mld peer ref_cnt */ 1971 if (QDF_STATUS_SUCCESS == 1972 dp_peer_get_ref(soc, peer->mld_peer, mod_id)) 1973 ta_peer = peer->mld_peer; 1974 else 1975 ta_peer = NULL; 1976 1977 /* release peer reference that added by hash find */ 1978 dp_peer_unref_delete(peer, mod_id); 1979 } else { 1980 /* mlo MLD peer or non-mlo link peer */ 1981 ta_peer = peer; 1982 } 1983 } else { 1984 dp_peer_err("fail to find peer:" QDF_MAC_ADDR_FMT " vdev_id: %u", 1985 QDF_MAC_ADDR_REF(peer_mac), vdev_id); 1986 } 1987 1988 return ta_peer; 1989 } 1990 1991 /** 1992 * dp_peer_get_tgt_peer_by_id() - Returns target peer object given the peer id 1993 * @soc: core DP soc context 1994 * @peer_id: peer id from peer object can be retrieved 1995 * @mod_id: ID of module requesting reference 1996 * 1997 * for MLO connection, get corresponding MLD peer, 1998 * otherwise get link peer for non-MLO case. 1999 * 2000 * Return: peer in success 2001 * NULL in failure 2002 */ 2003 static inline 2004 struct dp_peer *dp_peer_get_tgt_peer_by_id(struct dp_soc *soc, 2005 uint16_t peer_id, 2006 enum dp_mod_id mod_id) 2007 { 2008 struct dp_peer *ta_peer = NULL; 2009 struct dp_peer *peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id); 2010 2011 if (peer) { 2012 /* mlo connection link peer, get mld peer with reference */ 2013 if (IS_MLO_DP_LINK_PEER(peer)) { 2014 /* increase mld peer ref_cnt */ 2015 if (QDF_STATUS_SUCCESS == 2016 dp_peer_get_ref(soc, peer->mld_peer, mod_id)) 2017 ta_peer = peer->mld_peer; 2018 else 2019 ta_peer = NULL; 2020 2021 /* release peer reference that added by hash find */ 2022 dp_peer_unref_delete(peer, mod_id); 2023 } else { 2024 /* mlo MLD peer or non-mlo link peer */ 2025 ta_peer = peer; 2026 } 2027 } 2028 2029 return ta_peer; 2030 } 2031 2032 /** 2033 * dp_peer_mlo_delete() - peer MLO related delete operation 2034 * @peer: DP peer handle 2035 * Return: None 2036 */ 2037 static inline 2038 void dp_peer_mlo_delete(struct dp_peer *peer) 2039 { 2040 struct dp_peer *ml_peer; 2041 struct dp_soc *soc; 2042 2043 dp_info("peer " QDF_MAC_ADDR_FMT " type %d", 2044 QDF_MAC_ADDR_REF(peer->mac_addr.raw), peer->peer_type); 2045 2046 /* MLO connection link peer */ 2047 if (IS_MLO_DP_LINK_PEER(peer)) { 2048 ml_peer = peer->mld_peer; 2049 soc = ml_peer->vdev->pdev->soc; 2050 2051 /* if last link peer deletion, delete MLD peer */ 2052 if (dp_mld_peer_del_link_peer(peer->mld_peer, peer) == 0) 2053 dp_peer_delete(soc, peer->mld_peer, NULL); 2054 } 2055 } 2056 2057 /** 2058 * dp_peer_mlo_setup() - create MLD peer and MLO related initialization 2059 * @soc: Soc handle 2060 * @peer: DP peer handle 2061 * @vdev_id: Vdev ID 2062 * @setup_info: peer setup information for MLO 2063 */ 2064 QDF_STATUS dp_peer_mlo_setup( 2065 struct dp_soc *soc, 2066 struct dp_peer *peer, 2067 uint8_t vdev_id, 2068 struct cdp_peer_setup_info *setup_info); 2069 2070 /** 2071 * dp_get_tgt_peer_from_peer() - Get target peer from the given peer 2072 * @peer: datapath peer 2073 * 2074 * Return: MLD peer in case of MLO Link peer 2075 * Peer itself in other cases 2076 */ 2077 static inline 2078 struct dp_peer *dp_get_tgt_peer_from_peer(struct dp_peer *peer) 2079 { 2080 return IS_MLO_DP_LINK_PEER(peer) ? peer->mld_peer : peer; 2081 } 2082 2083 /** 2084 * dp_get_primary_link_peer_by_id(): Get primary link peer from the given 2085 * peer id 2086 * @soc: core DP soc context 2087 * @peer_id: peer id 2088 * @mod_id: ID of module requesting reference 2089 * 2090 * Return: primary link peer for the MLO peer 2091 * legacy peer itself in case of legacy peer 2092 */ 2093 static inline 2094 struct dp_peer *dp_get_primary_link_peer_by_id(struct dp_soc *soc, 2095 uint16_t peer_id, 2096 enum dp_mod_id mod_id) 2097 { 2098 uint8_t i; 2099 struct dp_mld_link_peers link_peers_info; 2100 struct dp_peer *peer; 2101 struct dp_peer *link_peer; 2102 struct dp_peer *primary_peer = NULL; 2103 2104 peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id); 2105 2106 if (!peer) 2107 return NULL; 2108 2109 if (IS_MLO_DP_MLD_PEER(peer)) { 2110 /* get link peers with reference */ 2111 dp_get_link_peers_ref_from_mld_peer(soc, peer, &link_peers_info, 2112 mod_id); 2113 2114 for (i = 0; i < link_peers_info.num_links; i++) { 2115 link_peer = link_peers_info.link_peers[i]; 2116 if (link_peer->primary_link) { 2117 /* 2118 * Take additional reference over 2119 * primary link peer. 2120 */ 2121 if (QDF_STATUS_SUCCESS == 2122 dp_peer_get_ref(NULL, link_peer, mod_id)) 2123 primary_peer = link_peer; 2124 break; 2125 } 2126 } 2127 /* release link peers reference */ 2128 dp_release_link_peers_ref(&link_peers_info, mod_id); 2129 dp_peer_unref_delete(peer, mod_id); 2130 } else { 2131 primary_peer = peer; 2132 } 2133 2134 return primary_peer; 2135 } 2136 2137 /** 2138 * dp_get_txrx_peer() - Get dp_txrx_peer from passed dp_peer 2139 * @peer: Datapath peer 2140 * 2141 * Return: dp_txrx_peer from MLD peer if peer type is link peer 2142 * dp_txrx_peer from peer itself for other cases 2143 */ 2144 static inline 2145 struct dp_txrx_peer *dp_get_txrx_peer(struct dp_peer *peer) 2146 { 2147 return IS_MLO_DP_LINK_PEER(peer) ? 2148 peer->mld_peer->txrx_peer : peer->txrx_peer; 2149 } 2150 2151 /** 2152 * dp_peer_is_primary_link_peer() - Check if peer is primary link peer 2153 * @peer: Datapath peer 2154 * 2155 * Return: true if peer is primary link peer or legacy peer 2156 * false otherwise 2157 */ 2158 static inline 2159 bool dp_peer_is_primary_link_peer(struct dp_peer *peer) 2160 { 2161 if (IS_MLO_DP_LINK_PEER(peer) && peer->primary_link) 2162 return true; 2163 else if (IS_DP_LEGACY_PEER(peer)) 2164 return true; 2165 else 2166 return false; 2167 } 2168 2169 /** 2170 * dp_tgt_txrx_peer_get_ref_by_id() - Gets tgt txrx peer for given the peer id 2171 * 2172 * @soc: core DP soc context 2173 * @peer_id: peer id from peer object can be retrieved 2174 * @handle: reference handle 2175 * @mod_id: ID of module requesting reference 2176 * 2177 * Return: struct dp_txrx_peer*: Pointer to txrx DP peer object 2178 */ 2179 static inline struct dp_txrx_peer * 2180 dp_tgt_txrx_peer_get_ref_by_id(struct dp_soc *soc, 2181 uint16_t peer_id, 2182 dp_txrx_ref_handle *handle, 2183 enum dp_mod_id mod_id) 2184 2185 { 2186 struct dp_peer *peer; 2187 struct dp_txrx_peer *txrx_peer; 2188 2189 peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id); 2190 if (!peer) 2191 return NULL; 2192 2193 txrx_peer = dp_get_txrx_peer(peer); 2194 if (txrx_peer) { 2195 *handle = (dp_txrx_ref_handle)peer; 2196 return txrx_peer; 2197 } 2198 2199 dp_peer_unref_delete(peer, mod_id); 2200 return NULL; 2201 } 2202 2203 /** 2204 * dp_print_mlo_ast_stats_be() - Print AST stats for MLO peers 2205 * 2206 * @soc: core DP soc context 2207 * 2208 * Return: void 2209 */ 2210 void dp_print_mlo_ast_stats_be(struct dp_soc *soc); 2211 2212 /** 2213 * dp_get_peer_link_id() - Get Link peer Link ID 2214 * @peer: Datapath peer 2215 * 2216 * Return: Link peer Link ID 2217 */ 2218 uint8_t dp_get_peer_link_id(struct dp_peer *peer); 2219 #else 2220 2221 #define IS_MLO_DP_MLD_TXRX_PEER(_peer) false 2222 2223 #define DP_PEER_SET_TYPE(_peer, _type_val) /* no op */ 2224 /* is legacy peer */ 2225 #define IS_DP_LEGACY_PEER(_peer) true 2226 #define IS_MLO_DP_LINK_PEER(_peer) false 2227 #define IS_MLO_DP_MLD_PEER(_peer) false 2228 #define DP_GET_MLD_PEER_FROM_PEER(link_peer) NULL 2229 2230 static inline 2231 struct dp_peer *dp_peer_hash_find_wrapper(struct dp_soc *soc, 2232 struct cdp_peer_info *peer_info, 2233 enum dp_mod_id mod_id) 2234 { 2235 return dp_peer_find_hash_find(soc, peer_info->mac_addr, 2236 peer_info->mac_addr_is_aligned, 2237 peer_info->vdev_id, 2238 mod_id); 2239 } 2240 2241 static inline 2242 struct dp_peer *dp_peer_get_tgt_peer_hash_find(struct dp_soc *soc, 2243 uint8_t *peer_mac, 2244 int mac_addr_is_aligned, 2245 uint8_t vdev_id, 2246 enum dp_mod_id mod_id) 2247 { 2248 return dp_peer_find_hash_find(soc, peer_mac, 2249 mac_addr_is_aligned, vdev_id, 2250 mod_id); 2251 } 2252 2253 static inline 2254 struct dp_peer *dp_peer_get_tgt_peer_by_id(struct dp_soc *soc, 2255 uint16_t peer_id, 2256 enum dp_mod_id mod_id) 2257 { 2258 return dp_peer_get_ref_by_id(soc, peer_id, mod_id); 2259 } 2260 2261 static inline 2262 QDF_STATUS dp_peer_mlo_setup( 2263 struct dp_soc *soc, 2264 struct dp_peer *peer, 2265 uint8_t vdev_id, 2266 struct cdp_peer_setup_info *setup_info) 2267 { 2268 return QDF_STATUS_SUCCESS; 2269 } 2270 2271 static inline 2272 void dp_mld_peer_init_link_peers_info(struct dp_peer *mld_peer) 2273 { 2274 } 2275 2276 static inline 2277 void dp_mld_peer_deinit_link_peers_info(struct dp_peer *mld_peer) 2278 { 2279 } 2280 2281 static inline 2282 void dp_link_peer_del_mld_peer(struct dp_peer *link_peer) 2283 { 2284 } 2285 2286 static inline 2287 void dp_peer_mlo_delete(struct dp_peer *peer) 2288 { 2289 } 2290 2291 static inline 2292 void dp_mlo_peer_authorize(struct dp_soc *soc, 2293 struct dp_peer *link_peer) 2294 { 2295 } 2296 2297 static inline uint8_t dp_get_chip_id(struct dp_soc *soc) 2298 { 2299 return 0; 2300 } 2301 2302 static inline struct dp_peer * 2303 dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc, 2304 uint8_t *peer_mac_addr, 2305 int mac_addr_is_aligned, 2306 uint8_t vdev_id, 2307 uint8_t chip_id, 2308 enum dp_mod_id mod_id) 2309 { 2310 return dp_peer_find_hash_find(soc, peer_mac_addr, 2311 mac_addr_is_aligned, 2312 vdev_id, mod_id); 2313 } 2314 2315 static inline 2316 struct dp_peer *dp_get_tgt_peer_from_peer(struct dp_peer *peer) 2317 { 2318 return peer; 2319 } 2320 2321 static inline 2322 struct dp_peer *dp_get_primary_link_peer_by_id(struct dp_soc *soc, 2323 uint16_t peer_id, 2324 enum dp_mod_id mod_id) 2325 { 2326 return dp_peer_get_ref_by_id(soc, peer_id, mod_id); 2327 } 2328 2329 static inline 2330 struct dp_txrx_peer *dp_get_txrx_peer(struct dp_peer *peer) 2331 { 2332 return peer->txrx_peer; 2333 } 2334 2335 static inline 2336 bool dp_peer_is_primary_link_peer(struct dp_peer *peer) 2337 { 2338 return true; 2339 } 2340 2341 /** 2342 * dp_tgt_txrx_peer_get_ref_by_id() - Gets tgt txrx peer for given the peer id 2343 * 2344 * @soc: core DP soc context 2345 * @peer_id: peer id from peer object can be retrieved 2346 * @handle: reference handle 2347 * @mod_id: ID of module requesting reference 2348 * 2349 * Return: struct dp_txrx_peer*: Pointer to txrx DP peer object 2350 */ 2351 static inline struct dp_txrx_peer * 2352 dp_tgt_txrx_peer_get_ref_by_id(struct dp_soc *soc, 2353 uint16_t peer_id, 2354 dp_txrx_ref_handle *handle, 2355 enum dp_mod_id mod_id) 2356 2357 { 2358 return dp_txrx_peer_get_ref_by_id(soc, peer_id, handle, mod_id); 2359 } 2360 2361 static inline 2362 uint16_t dp_get_link_peer_id_by_lmac_id(struct dp_soc *soc, uint16_t peer_id, 2363 uint8_t lmac_id) 2364 { 2365 return peer_id; 2366 } 2367 2368 static inline void dp_print_mlo_ast_stats_be(struct dp_soc *soc) 2369 { 2370 } 2371 2372 static inline uint8_t dp_get_peer_link_id(struct dp_peer *peer) 2373 { 2374 return 0; 2375 } 2376 #endif /* WLAN_FEATURE_11BE_MLO */ 2377 2378 static inline 2379 void dp_peer_defrag_rx_tids_init(struct dp_txrx_peer *txrx_peer) 2380 { 2381 uint8_t i; 2382 2383 qdf_mem_zero(&txrx_peer->rx_tid, DP_MAX_TIDS * 2384 sizeof(struct dp_rx_tid_defrag)); 2385 2386 for (i = 0; i < DP_MAX_TIDS; i++) 2387 qdf_spinlock_create(&txrx_peer->rx_tid[i].defrag_tid_lock); 2388 } 2389 2390 static inline 2391 void dp_peer_defrag_rx_tids_deinit(struct dp_txrx_peer *txrx_peer) 2392 { 2393 uint8_t i; 2394 2395 for (i = 0; i < DP_MAX_TIDS; i++) 2396 qdf_spinlock_destroy(&txrx_peer->rx_tid[i].defrag_tid_lock); 2397 } 2398 2399 #ifdef PEER_CACHE_RX_PKTS 2400 static inline 2401 void dp_peer_rx_bufq_resources_init(struct dp_txrx_peer *txrx_peer) 2402 { 2403 qdf_spinlock_create(&txrx_peer->bufq_info.bufq_lock); 2404 txrx_peer->bufq_info.thresh = DP_RX_CACHED_BUFQ_THRESH; 2405 qdf_list_create(&txrx_peer->bufq_info.cached_bufq, 2406 DP_RX_CACHED_BUFQ_THRESH); 2407 } 2408 2409 static inline 2410 void dp_peer_rx_bufq_resources_deinit(struct dp_txrx_peer *txrx_peer) 2411 { 2412 qdf_list_destroy(&txrx_peer->bufq_info.cached_bufq); 2413 qdf_spinlock_destroy(&txrx_peer->bufq_info.bufq_lock); 2414 } 2415 2416 #else 2417 static inline 2418 void dp_peer_rx_bufq_resources_init(struct dp_txrx_peer *txrx_peer) 2419 { 2420 } 2421 2422 static inline 2423 void dp_peer_rx_bufq_resources_deinit(struct dp_txrx_peer *txrx_peer) 2424 { 2425 } 2426 #endif 2427 2428 /** 2429 * dp_peer_update_state() - update dp peer state 2430 * 2431 * @soc: core DP soc context 2432 * @peer: DP peer 2433 * @state: new state 2434 * 2435 * Return: None 2436 */ 2437 static inline void 2438 dp_peer_update_state(struct dp_soc *soc, 2439 struct dp_peer *peer, 2440 enum dp_peer_state state) 2441 { 2442 uint8_t peer_state; 2443 2444 qdf_spin_lock_bh(&peer->peer_state_lock); 2445 peer_state = peer->peer_state; 2446 2447 switch (state) { 2448 case DP_PEER_STATE_INIT: 2449 DP_PEER_STATE_ASSERT 2450 (peer, state, (peer_state != DP_PEER_STATE_ACTIVE) || 2451 (peer_state != DP_PEER_STATE_LOGICAL_DELETE)); 2452 break; 2453 2454 case DP_PEER_STATE_ACTIVE: 2455 DP_PEER_STATE_ASSERT(peer, state, 2456 (peer_state == DP_PEER_STATE_INIT)); 2457 break; 2458 2459 case DP_PEER_STATE_LOGICAL_DELETE: 2460 DP_PEER_STATE_ASSERT(peer, state, 2461 (peer_state == DP_PEER_STATE_ACTIVE) || 2462 (peer_state == DP_PEER_STATE_INIT)); 2463 break; 2464 2465 case DP_PEER_STATE_INACTIVE: 2466 if (IS_MLO_DP_MLD_PEER(peer)) 2467 DP_PEER_STATE_ASSERT 2468 (peer, state, 2469 (peer_state == DP_PEER_STATE_ACTIVE)); 2470 else 2471 DP_PEER_STATE_ASSERT 2472 (peer, state, 2473 (peer_state == DP_PEER_STATE_LOGICAL_DELETE)); 2474 break; 2475 2476 case DP_PEER_STATE_FREED: 2477 if (peer->sta_self_peer) 2478 DP_PEER_STATE_ASSERT 2479 (peer, state, (peer_state == DP_PEER_STATE_INIT)); 2480 else 2481 DP_PEER_STATE_ASSERT 2482 (peer, state, 2483 (peer_state == DP_PEER_STATE_INACTIVE) || 2484 (peer_state == DP_PEER_STATE_LOGICAL_DELETE)); 2485 break; 2486 2487 default: 2488 qdf_spin_unlock_bh(&peer->peer_state_lock); 2489 dp_alert("Invalid peer state %u for peer " QDF_MAC_ADDR_FMT, 2490 state, QDF_MAC_ADDR_REF(peer->mac_addr.raw)); 2491 return; 2492 } 2493 peer->peer_state = state; 2494 qdf_spin_unlock_bh(&peer->peer_state_lock); 2495 dp_info("Updating peer state from %u to %u mac " QDF_MAC_ADDR_FMT "\n", 2496 peer_state, state, 2497 QDF_MAC_ADDR_REF(peer->mac_addr.raw)); 2498 } 2499 2500 /** 2501 * dp_vdev_iterate_specific_peer_type() - API to iterate through vdev peer 2502 * list based on type of peer (Legacy or MLD peer) 2503 * 2504 * @vdev: DP vdev context 2505 * @func: function to be called for each peer 2506 * @arg: argument need to be passed to func 2507 * @mod_id: module_id 2508 * @peer_type: type of peer - MLO Link Peer or Legacy Peer 2509 * 2510 * Return: void 2511 */ 2512 static inline void 2513 dp_vdev_iterate_specific_peer_type(struct dp_vdev *vdev, 2514 dp_peer_iter_func *func, 2515 void *arg, enum dp_mod_id mod_id, 2516 enum dp_peer_type peer_type) 2517 { 2518 struct dp_peer *peer; 2519 struct dp_peer *tmp_peer; 2520 struct dp_soc *soc = NULL; 2521 2522 if (!vdev || !vdev->pdev || !vdev->pdev->soc) 2523 return; 2524 2525 soc = vdev->pdev->soc; 2526 2527 qdf_spin_lock_bh(&vdev->peer_list_lock); 2528 TAILQ_FOREACH_SAFE(peer, &vdev->peer_list, 2529 peer_list_elem, 2530 tmp_peer) { 2531 if (dp_peer_get_ref(soc, peer, mod_id) == 2532 QDF_STATUS_SUCCESS) { 2533 if ((peer_type == DP_PEER_TYPE_LEGACY && 2534 (IS_DP_LEGACY_PEER(peer))) || 2535 (peer_type == DP_PEER_TYPE_MLO_LINK && 2536 (IS_MLO_DP_LINK_PEER(peer)))) { 2537 (*func)(soc, peer, arg); 2538 } 2539 dp_peer_unref_delete(peer, mod_id); 2540 } 2541 } 2542 qdf_spin_unlock_bh(&vdev->peer_list_lock); 2543 } 2544 2545 #ifdef REO_SHARED_QREF_TABLE_EN 2546 void dp_peer_rx_reo_shared_qaddr_delete(struct dp_soc *soc, 2547 struct dp_peer *peer); 2548 #else 2549 static inline void dp_peer_rx_reo_shared_qaddr_delete(struct dp_soc *soc, 2550 struct dp_peer *peer) {} 2551 #endif 2552 2553 /** 2554 * dp_peer_check_wds_ext_peer() - Check WDS ext peer 2555 * 2556 * @peer: DP peer 2557 * 2558 * Return: True for WDS ext peer, false otherwise 2559 */ 2560 bool dp_peer_check_wds_ext_peer(struct dp_peer *peer); 2561 2562 /** 2563 * dp_gen_ml_peer_id() - Generate MLD peer id for DP 2564 * 2565 * @soc: DP soc context 2566 * @peer_id: mld peer id 2567 * 2568 * Return: DP MLD peer id 2569 */ 2570 uint16_t dp_gen_ml_peer_id(struct dp_soc *soc, uint16_t peer_id); 2571 2572 #ifdef FEATURE_AST 2573 /** 2574 * dp_peer_host_add_map_ast() - Add ast entry with HW AST Index 2575 * @soc: SoC handle 2576 * @peer_id: peer id from firmware 2577 * @mac_addr: MAC address of ast node 2578 * @hw_peer_id: HW AST Index returned by target in peer map event 2579 * @vdev_id: vdev id for VAP to which the peer belongs to 2580 * @ast_hash: ast hash value in HW 2581 * @is_wds: flag to indicate peer map event for WDS ast entry 2582 * 2583 * Return: QDF_STATUS code 2584 */ 2585 QDF_STATUS dp_peer_host_add_map_ast(struct dp_soc *soc, uint16_t peer_id, 2586 uint8_t *mac_addr, uint16_t hw_peer_id, 2587 uint8_t vdev_id, uint16_t ast_hash, 2588 uint8_t is_wds); 2589 #endif 2590 2591 #if defined(WLAN_FEATURE_11BE_MLO) && defined(DP_MLO_LINK_STATS_SUPPORT) 2592 /** 2593 * dp_map_link_id_band: Set link id to band mapping in txrx_peer 2594 * @peer: dp peer pointer 2595 * 2596 * Return: None 2597 */ 2598 void dp_map_link_id_band(struct dp_peer *peer); 2599 #else 2600 static inline 2601 void dp_map_link_id_band(struct dp_peer *peer) 2602 { 2603 } 2604 #endif 2605 #endif /* _DP_PEER_H_ */ 2606