1 /* 2 * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include <qdf_types.h> 20 #include <qdf_lock.h> 21 #include <hal_hw_headers.h> 22 #include "dp_htt.h" 23 #include "dp_types.h" 24 #include "dp_internal.h" 25 #include "dp_peer.h" 26 #include "dp_rx_defrag.h" 27 #include <hal_api.h> 28 #include <hal_reo.h> 29 #ifdef CONFIG_MCL 30 #include <cds_ieee80211_common.h> 31 #include <cds_api.h> 32 #endif 33 #include <cdp_txrx_handle.h> 34 #include <wlan_cfg.h> 35 36 #ifdef DP_LFR 37 static inline void 38 dp_set_ssn_valid_flag(struct hal_reo_cmd_params *params, 39 uint8_t valid) 40 { 41 params->u.upd_queue_params.update_svld = 1; 42 params->u.upd_queue_params.svld = valid; 43 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 44 "%s: Setting SSN valid bit to %d", 45 __func__, valid); 46 } 47 #else 48 static inline void 49 dp_set_ssn_valid_flag(struct hal_reo_cmd_params *params, 50 uint8_t valid) {}; 51 #endif 52 53 static inline int dp_peer_find_mac_addr_cmp( 54 union dp_align_mac_addr *mac_addr1, 55 union dp_align_mac_addr *mac_addr2) 56 { 57 return !((mac_addr1->align4.bytes_abcd == mac_addr2->align4.bytes_abcd) 58 /* 59 * Intentionally use & rather than &&. 60 * because the operands are binary rather than generic boolean, 61 * the functionality is equivalent. 62 * Using && has the advantage of short-circuited evaluation, 63 * but using & has the advantage of no conditional branching, 64 * which is a more significant benefit. 65 */ 66 & 67 (mac_addr1->align4.bytes_ef == mac_addr2->align4.bytes_ef)); 68 } 69 70 static int dp_peer_find_map_attach(struct dp_soc *soc) 71 { 72 uint32_t max_peers, peer_map_size; 73 74 max_peers = soc->max_peers; 75 /* allocate the peer ID -> peer object map */ 76 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, 77 "\n<=== cfg max peer id %d ====>", max_peers); 78 peer_map_size = max_peers * sizeof(soc->peer_id_to_obj_map[0]); 79 soc->peer_id_to_obj_map = qdf_mem_malloc(peer_map_size); 80 if (!soc->peer_id_to_obj_map) { 81 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 82 "%s: peer map memory allocation failed", __func__); 83 return QDF_STATUS_E_NOMEM; 84 } 85 86 /* 87 * The peer_id_to_obj_map doesn't really need to be initialized, 88 * since elements are only used after they have been individually 89 * initialized. 90 * However, it is convenient for debugging to have all elements 91 * that are not in use set to 0. 92 */ 93 qdf_mem_zero(soc->peer_id_to_obj_map, peer_map_size); 94 return 0; /* success */ 95 } 96 97 static int dp_log2_ceil(unsigned value) 98 { 99 unsigned tmp = value; 100 int log2 = -1; 101 102 while (tmp) { 103 log2++; 104 tmp >>= 1; 105 } 106 if (1 << log2 != value) 107 log2++; 108 return log2; 109 } 110 111 static int dp_peer_find_add_id_to_obj( 112 struct dp_peer *peer, 113 uint16_t peer_id) 114 { 115 int i; 116 117 for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) { 118 if (peer->peer_ids[i] == HTT_INVALID_PEER) { 119 peer->peer_ids[i] = peer_id; 120 return 0; /* success */ 121 } 122 } 123 return QDF_STATUS_E_FAILURE; /* failure */ 124 } 125 126 #define DP_PEER_HASH_LOAD_MULT 2 127 #define DP_PEER_HASH_LOAD_SHIFT 0 128 129 #define DP_AST_HASH_LOAD_MULT 2 130 #define DP_AST_HASH_LOAD_SHIFT 0 131 132 static int dp_peer_find_hash_attach(struct dp_soc *soc) 133 { 134 int i, hash_elems, log2; 135 136 /* allocate the peer MAC address -> peer object hash table */ 137 hash_elems = soc->max_peers; 138 hash_elems *= DP_PEER_HASH_LOAD_MULT; 139 hash_elems >>= DP_PEER_HASH_LOAD_SHIFT; 140 log2 = dp_log2_ceil(hash_elems); 141 hash_elems = 1 << log2; 142 143 soc->peer_hash.mask = hash_elems - 1; 144 soc->peer_hash.idx_bits = log2; 145 /* allocate an array of TAILQ peer object lists */ 146 soc->peer_hash.bins = qdf_mem_malloc( 147 hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q, dp_peer))); 148 if (!soc->peer_hash.bins) 149 return QDF_STATUS_E_NOMEM; 150 151 for (i = 0; i < hash_elems; i++) 152 TAILQ_INIT(&soc->peer_hash.bins[i]); 153 154 return 0; 155 } 156 157 static void dp_peer_find_hash_detach(struct dp_soc *soc) 158 { 159 qdf_mem_free(soc->peer_hash.bins); 160 } 161 162 static inline unsigned dp_peer_find_hash_index(struct dp_soc *soc, 163 union dp_align_mac_addr *mac_addr) 164 { 165 unsigned index; 166 167 index = 168 mac_addr->align2.bytes_ab ^ 169 mac_addr->align2.bytes_cd ^ 170 mac_addr->align2.bytes_ef; 171 index ^= index >> soc->peer_hash.idx_bits; 172 index &= soc->peer_hash.mask; 173 return index; 174 } 175 176 177 void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer) 178 { 179 unsigned index; 180 181 index = dp_peer_find_hash_index(soc, &peer->mac_addr); 182 qdf_spin_lock_bh(&soc->peer_ref_mutex); 183 /* 184 * It is important to add the new peer at the tail of the peer list 185 * with the bin index. Together with having the hash_find function 186 * search from head to tail, this ensures that if two entries with 187 * the same MAC address are stored, the one added first will be 188 * found first. 189 */ 190 TAILQ_INSERT_TAIL(&soc->peer_hash.bins[index], peer, hash_list_elem); 191 qdf_spin_unlock_bh(&soc->peer_ref_mutex); 192 } 193 194 #ifdef FEATURE_AST 195 /* 196 * dp_peer_ast_hash_attach() - Allocate and initialize AST Hash Table 197 * @soc: SoC handle 198 * 199 * Return: None 200 */ 201 static int dp_peer_ast_hash_attach(struct dp_soc *soc) 202 { 203 int i, hash_elems, log2; 204 205 hash_elems = ((soc->max_peers * DP_AST_HASH_LOAD_MULT) >> 206 DP_AST_HASH_LOAD_SHIFT); 207 208 log2 = dp_log2_ceil(hash_elems); 209 hash_elems = 1 << log2; 210 211 soc->ast_hash.mask = hash_elems - 1; 212 soc->ast_hash.idx_bits = log2; 213 214 /* allocate an array of TAILQ peer object lists */ 215 soc->ast_hash.bins = qdf_mem_malloc( 216 hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q, 217 dp_ast_entry))); 218 219 if (!soc->ast_hash.bins) 220 return QDF_STATUS_E_NOMEM; 221 222 for (i = 0; i < hash_elems; i++) 223 TAILQ_INIT(&soc->ast_hash.bins[i]); 224 225 return 0; 226 } 227 228 /* 229 * dp_peer_ast_hash_detach() - Free AST Hash table 230 * @soc: SoC handle 231 * 232 * Return: None 233 */ 234 static void dp_peer_ast_hash_detach(struct dp_soc *soc) 235 { 236 qdf_mem_free(soc->ast_hash.bins); 237 } 238 239 /* 240 * dp_peer_ast_hash_index() - Compute the AST hash from MAC address 241 * @soc: SoC handle 242 * 243 * Return: AST hash 244 */ 245 static inline uint32_t dp_peer_ast_hash_index(struct dp_soc *soc, 246 union dp_align_mac_addr *mac_addr) 247 { 248 uint32_t index; 249 250 index = 251 mac_addr->align2.bytes_ab ^ 252 mac_addr->align2.bytes_cd ^ 253 mac_addr->align2.bytes_ef; 254 index ^= index >> soc->ast_hash.idx_bits; 255 index &= soc->ast_hash.mask; 256 return index; 257 } 258 259 /* 260 * dp_peer_ast_hash_add() - Add AST entry into hash table 261 * @soc: SoC handle 262 * 263 * This function adds the AST entry into SoC AST hash table 264 * It assumes caller has taken the ast lock to protect the access to this table 265 * 266 * Return: None 267 */ 268 static inline void dp_peer_ast_hash_add(struct dp_soc *soc, 269 struct dp_ast_entry *ase) 270 { 271 uint32_t index; 272 273 index = dp_peer_ast_hash_index(soc, &ase->mac_addr); 274 TAILQ_INSERT_TAIL(&soc->ast_hash.bins[index], ase, hash_list_elem); 275 } 276 277 /* 278 * dp_peer_ast_hash_remove() - Look up and remove AST entry from hash table 279 * @soc: SoC handle 280 * 281 * This function removes the AST entry from soc AST hash table 282 * It assumes caller has taken the ast lock to protect the access to this table 283 * 284 * Return: None 285 */ 286 static inline void dp_peer_ast_hash_remove(struct dp_soc *soc, 287 struct dp_ast_entry *ase) 288 { 289 unsigned index; 290 struct dp_ast_entry *tmpase; 291 int found = 0; 292 293 index = dp_peer_ast_hash_index(soc, &ase->mac_addr); 294 /* Check if tail is not empty before delete*/ 295 QDF_ASSERT(!TAILQ_EMPTY(&soc->ast_hash.bins[index])); 296 297 TAILQ_FOREACH(tmpase, &soc->ast_hash.bins[index], hash_list_elem) { 298 if (tmpase == ase) { 299 found = 1; 300 break; 301 } 302 } 303 304 QDF_ASSERT(found); 305 TAILQ_REMOVE(&soc->ast_hash.bins[index], ase, hash_list_elem); 306 } 307 308 /* 309 * dp_peer_ast_hash_find() - Find AST entry by MAC address 310 * @soc: SoC handle 311 * 312 * It assumes caller has taken the ast lock to protect the access to 313 * AST hash table 314 * 315 * Return: AST entry 316 */ 317 struct dp_ast_entry *dp_peer_ast_hash_find(struct dp_soc *soc, 318 uint8_t *ast_mac_addr) 319 { 320 union dp_align_mac_addr local_mac_addr_aligned, *mac_addr; 321 unsigned index; 322 struct dp_ast_entry *ase; 323 324 qdf_mem_copy(&local_mac_addr_aligned.raw[0], 325 ast_mac_addr, DP_MAC_ADDR_LEN); 326 mac_addr = &local_mac_addr_aligned; 327 328 index = dp_peer_ast_hash_index(soc, mac_addr); 329 TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) { 330 if (dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr) == 0) { 331 return ase; 332 } 333 } 334 335 return NULL; 336 } 337 338 /* 339 * dp_peer_map_ast() - Map the ast entry with HW AST Index 340 * @soc: SoC handle 341 * @peer: peer to which ast node belongs 342 * @mac_addr: MAC address of ast node 343 * @hw_peer_id: HW AST Index returned by target in peer map event 344 * @vdev_id: vdev id for VAP to which the peer belongs to 345 * 346 * Return: None 347 */ 348 static inline void dp_peer_map_ast(struct dp_soc *soc, 349 struct dp_peer *peer, uint8_t *mac_addr, uint16_t hw_peer_id, 350 uint8_t vdev_id) 351 { 352 struct dp_ast_entry *ast_entry; 353 enum cdp_txrx_ast_entry_type peer_type = CDP_TXRX_AST_TYPE_STATIC; 354 bool ast_entry_found = FALSE; 355 356 if (!peer) { 357 return; 358 } 359 360 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 361 "%s: peer %pK ID %d vid %d mac %02x:%02x:%02x:%02x:%02x:%02x", 362 __func__, peer, hw_peer_id, vdev_id, mac_addr[0], 363 mac_addr[1], mac_addr[2], mac_addr[3], 364 mac_addr[4], mac_addr[5]); 365 366 qdf_spin_lock_bh(&soc->ast_lock); 367 TAILQ_FOREACH(ast_entry, &peer->ast_entry_list, ase_list_elem) { 368 if (!(qdf_mem_cmp(mac_addr, ast_entry->mac_addr.raw, 369 DP_MAC_ADDR_LEN))) { 370 ast_entry->ast_idx = hw_peer_id; 371 soc->ast_table[hw_peer_id] = ast_entry; 372 ast_entry->is_active = TRUE; 373 peer_type = ast_entry->type; 374 ast_entry_found = TRUE; 375 } 376 } 377 378 if (ast_entry_found || (peer->vdev && peer->vdev->proxysta_vdev)) { 379 if (soc->cdp_soc.ol_ops->peer_map_event) { 380 soc->cdp_soc.ol_ops->peer_map_event( 381 soc->ctrl_psoc, peer->peer_ids[0], 382 hw_peer_id, vdev_id, 383 mac_addr, peer_type); 384 } 385 } else { 386 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 387 "AST entry not found"); 388 } 389 390 qdf_spin_unlock_bh(&soc->ast_lock); 391 return; 392 } 393 394 /* 395 * dp_peer_add_ast() - Allocate and add AST entry into peer list 396 * @soc: SoC handle 397 * @peer: peer to which ast node belongs 398 * @mac_addr: MAC address of ast node 399 * @is_self: Is this base AST entry with peer mac address 400 * 401 * This API is used by WDS source port learning function to 402 * add a new AST entry into peer AST list 403 * 404 * Return: 0 if new entry is allocated, 405 * -1 if entry add failed 406 */ 407 int dp_peer_add_ast(struct dp_soc *soc, 408 struct dp_peer *peer, 409 uint8_t *mac_addr, 410 enum cdp_txrx_ast_entry_type type, 411 uint32_t flags) 412 { 413 struct dp_ast_entry *ast_entry; 414 struct dp_vdev *vdev = peer->vdev; 415 uint8_t next_node_mac[6]; 416 int ret = -1; 417 418 if (!vdev) { 419 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 420 FL("Peers vdev is NULL")); 421 QDF_ASSERT(0); 422 return ret; 423 } 424 425 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 426 "%s: peer %pK mac %02x:%02x:%02x:%02x:%02x:%02x", 427 __func__, peer, mac_addr[0], mac_addr[1], mac_addr[2], 428 mac_addr[3], mac_addr[4], mac_addr[5]); 429 430 qdf_spin_lock_bh(&soc->ast_lock); 431 432 /* If AST entry already exists , just return from here */ 433 ast_entry = dp_peer_ast_hash_find(soc, mac_addr); 434 435 if (ast_entry) { 436 if (ast_entry->type == CDP_TXRX_AST_TYPE_MEC) { 437 ast_entry->is_active = TRUE; 438 qdf_spin_unlock_bh(&soc->ast_lock); 439 return 0; 440 } 441 442 /* 443 * WAR for HK 1.x AST issue 444 * If an AST entry with same mac address already exists and is 445 * mapped to a different radio, and if the current radio is 446 * primary radio , delete the existing AST entry and return. 447 * 448 * New AST entry will be created again on next SA_invalid 449 * frame 450 */ 451 if ((ast_entry->pdev_id != vdev->pdev->pdev_id) && 452 vdev->pdev->is_primary) { 453 qdf_print("Deleting ast_pdev=%d pdev=%d addr=%pM\n", 454 ast_entry->pdev_id, 455 vdev->pdev->pdev_id, mac_addr); 456 dp_peer_del_ast(soc, ast_entry); 457 } 458 459 qdf_spin_unlock_bh(&soc->ast_lock); 460 return 0; 461 } 462 463 ast_entry = (struct dp_ast_entry *) 464 qdf_mem_malloc(sizeof(struct dp_ast_entry)); 465 466 if (!ast_entry) { 467 qdf_spin_unlock_bh(&soc->ast_lock); 468 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 469 FL("fail to allocate ast_entry")); 470 QDF_ASSERT(0); 471 return ret; 472 } 473 474 qdf_mem_copy(&ast_entry->mac_addr.raw[0], mac_addr, DP_MAC_ADDR_LEN); 475 ast_entry->peer = peer; 476 ast_entry->pdev_id = vdev->pdev->pdev_id; 477 ast_entry->vdev_id = vdev->vdev_id; 478 479 switch (type) { 480 case CDP_TXRX_AST_TYPE_STATIC: 481 peer->self_ast_entry = ast_entry; 482 ast_entry->type = CDP_TXRX_AST_TYPE_STATIC; 483 break; 484 case CDP_TXRX_AST_TYPE_SELF: 485 peer->self_ast_entry = ast_entry; 486 ast_entry->type = CDP_TXRX_AST_TYPE_SELF; 487 break; 488 case CDP_TXRX_AST_TYPE_WDS: 489 ast_entry->next_hop = 1; 490 ast_entry->type = CDP_TXRX_AST_TYPE_WDS; 491 break; 492 case CDP_TXRX_AST_TYPE_WDS_HM: 493 ast_entry->next_hop = 1; 494 ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM; 495 break; 496 case CDP_TXRX_AST_TYPE_MEC: 497 ast_entry->next_hop = 1; 498 ast_entry->type = CDP_TXRX_AST_TYPE_MEC; 499 break; 500 default: 501 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 502 FL("Incorrect AST entry type")); 503 } 504 505 ast_entry->is_active = TRUE; 506 TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem); 507 DP_STATS_INC(soc, ast.added, 1); 508 dp_peer_ast_hash_add(soc, ast_entry); 509 qdf_spin_unlock_bh(&soc->ast_lock); 510 511 if (ast_entry->type == CDP_TXRX_AST_TYPE_MEC) 512 qdf_mem_copy(next_node_mac, peer->vdev->mac_addr.raw, 6); 513 else 514 qdf_mem_copy(next_node_mac, peer->mac_addr.raw, 6); 515 516 if ((ast_entry->type != CDP_TXRX_AST_TYPE_STATIC) && 517 (ast_entry->type != CDP_TXRX_AST_TYPE_SELF)) { 518 if (QDF_STATUS_SUCCESS == 519 soc->cdp_soc.ol_ops->peer_add_wds_entry( 520 peer->vdev->osif_vdev, 521 mac_addr, 522 next_node_mac, 523 flags)) 524 return 0; 525 } 526 527 return ret; 528 } 529 530 /* 531 * dp_peer_del_ast() - Delete and free AST entry 532 * @soc: SoC handle 533 * @ast_entry: AST entry of the node 534 * 535 * This function removes the AST entry from peer and soc tables 536 * It assumes caller has taken the ast lock to protect the access to these 537 * tables 538 * 539 * Return: None 540 */ 541 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry) 542 { 543 struct dp_peer *peer = ast_entry->peer; 544 545 if (ast_entry->next_hop) 546 soc->cdp_soc.ol_ops->peer_del_wds_entry(peer->vdev->osif_vdev, 547 ast_entry->mac_addr.raw); 548 549 soc->ast_table[ast_entry->ast_idx] = NULL; 550 TAILQ_REMOVE(&peer->ast_entry_list, ast_entry, ase_list_elem); 551 552 if (ast_entry == peer->self_ast_entry) 553 peer->self_ast_entry = NULL; 554 555 DP_STATS_INC(soc, ast.deleted, 1); 556 dp_peer_ast_hash_remove(soc, ast_entry); 557 qdf_mem_free(ast_entry); 558 } 559 560 /* 561 * dp_peer_update_ast() - Delete and free AST entry 562 * @soc: SoC handle 563 * @peer: peer to which ast node belongs 564 * @ast_entry: AST entry of the node 565 * @flags: wds or hmwds 566 * 567 * This function update the AST entry to the roamed peer and soc tables 568 * It assumes caller has taken the ast lock to protect the access to these 569 * tables 570 * 571 * Return: 0 if ast entry is updated successfully 572 * -1 failure 573 */ 574 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer, 575 struct dp_ast_entry *ast_entry, uint32_t flags) 576 { 577 int ret = -1; 578 struct dp_peer *old_peer; 579 580 if ((ast_entry->type == CDP_TXRX_AST_TYPE_STATIC) || 581 (ast_entry->type == CDP_TXRX_AST_TYPE_SELF)) 582 return 0; 583 584 old_peer = ast_entry->peer; 585 TAILQ_REMOVE(&old_peer->ast_entry_list, ast_entry, ase_list_elem); 586 587 ast_entry->peer = peer; 588 ast_entry->type = CDP_TXRX_AST_TYPE_WDS; 589 ast_entry->pdev_id = peer->vdev->pdev->pdev_id; 590 ast_entry->vdev_id = peer->vdev->vdev_id; 591 ast_entry->is_active = TRUE; 592 TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem); 593 594 ret = soc->cdp_soc.ol_ops->peer_update_wds_entry( 595 peer->vdev->osif_vdev, 596 ast_entry->mac_addr.raw, 597 peer->mac_addr.raw, 598 flags); 599 600 return ret; 601 } 602 603 /* 604 * dp_peer_ast_get_pdev_id() - get pdev_id from the ast entry 605 * @soc: SoC handle 606 * @ast_entry: AST entry of the node 607 * 608 * This function gets the pdev_id from the ast entry. 609 * 610 * Return: (uint8_t) pdev_id 611 */ 612 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc, 613 struct dp_ast_entry *ast_entry) 614 { 615 return ast_entry->pdev_id; 616 } 617 618 /* 619 * dp_peer_ast_get_next_hop() - get next_hop from the ast entry 620 * @soc: SoC handle 621 * @ast_entry: AST entry of the node 622 * 623 * This function gets the next hop from the ast entry. 624 * 625 * Return: (uint8_t) next_hop 626 */ 627 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc, 628 struct dp_ast_entry *ast_entry) 629 { 630 return ast_entry->next_hop; 631 } 632 633 /* 634 * dp_peer_ast_set_type() - set type from the ast entry 635 * @soc: SoC handle 636 * @ast_entry: AST entry of the node 637 * 638 * This function sets the type in the ast entry. 639 * 640 * Return: 641 */ 642 void dp_peer_ast_set_type(struct dp_soc *soc, 643 struct dp_ast_entry *ast_entry, 644 enum cdp_txrx_ast_entry_type type) 645 { 646 ast_entry->type = type; 647 } 648 649 #else 650 int dp_peer_add_ast(struct dp_soc *soc, struct dp_peer *peer, 651 uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type, 652 uint32_t flags) 653 { 654 return 1; 655 } 656 657 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry) 658 { 659 } 660 661 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer, 662 struct dp_ast_entry *ast_entry, uint32_t flags) 663 { 664 return 1; 665 } 666 667 struct dp_ast_entry *dp_peer_ast_hash_find(struct dp_soc *soc, 668 uint8_t *ast_mac_addr) 669 { 670 return NULL; 671 } 672 673 static int dp_peer_ast_hash_attach(struct dp_soc *soc) 674 { 675 return 0; 676 } 677 678 static inline void dp_peer_map_ast(struct dp_soc *soc, 679 struct dp_peer *peer, uint8_t *mac_addr, uint16_t hw_peer_id, 680 uint8_t vdev_id) 681 { 682 return; 683 } 684 685 static void dp_peer_ast_hash_detach(struct dp_soc *soc) 686 { 687 } 688 689 void dp_peer_ast_set_type(struct dp_soc *soc, 690 struct dp_ast_entry *ast_entry, 691 enum cdp_txrx_ast_entry_type type) 692 { 693 } 694 695 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc, 696 struct dp_ast_entry *ast_entry) 697 { 698 return 0xff; 699 } 700 701 702 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc, 703 struct dp_ast_entry *ast_entry) 704 { 705 return 0xff; 706 } 707 #endif 708 709 struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc, 710 uint8_t *peer_mac_addr, int mac_addr_is_aligned, uint8_t vdev_id) 711 { 712 union dp_align_mac_addr local_mac_addr_aligned, *mac_addr; 713 unsigned index; 714 struct dp_peer *peer; 715 716 if (mac_addr_is_aligned) { 717 mac_addr = (union dp_align_mac_addr *) peer_mac_addr; 718 } else { 719 qdf_mem_copy( 720 &local_mac_addr_aligned.raw[0], 721 peer_mac_addr, DP_MAC_ADDR_LEN); 722 mac_addr = &local_mac_addr_aligned; 723 } 724 index = dp_peer_find_hash_index(soc, mac_addr); 725 qdf_spin_lock_bh(&soc->peer_ref_mutex); 726 TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) { 727 #if ATH_SUPPORT_WRAP 728 /* ProxySTA may have multiple BSS peer with same MAC address, 729 * modified find will take care of finding the correct BSS peer. 730 */ 731 if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 && 732 ((peer->vdev->vdev_id == vdev_id) || 733 (vdev_id == DP_VDEV_ALL))) { 734 #else 735 if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0) { 736 #endif 737 /* found it - increment the ref count before releasing 738 * the lock 739 */ 740 qdf_atomic_inc(&peer->ref_cnt); 741 qdf_spin_unlock_bh(&soc->peer_ref_mutex); 742 return peer; 743 } 744 } 745 qdf_spin_unlock_bh(&soc->peer_ref_mutex); 746 return NULL; /* failure */ 747 } 748 749 void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer) 750 { 751 unsigned index; 752 struct dp_peer *tmppeer = NULL; 753 int found = 0; 754 755 index = dp_peer_find_hash_index(soc, &peer->mac_addr); 756 /* Check if tail is not empty before delete*/ 757 QDF_ASSERT(!TAILQ_EMPTY(&soc->peer_hash.bins[index])); 758 /* 759 * DO NOT take the peer_ref_mutex lock here - it needs to be taken 760 * by the caller. 761 * The caller needs to hold the lock from the time the peer object's 762 * reference count is decremented and tested up through the time the 763 * reference to the peer object is removed from the hash table, by 764 * this function. 765 * Holding the lock only while removing the peer object reference 766 * from the hash table keeps the hash table consistent, but does not 767 * protect against a new HL tx context starting to use the peer object 768 * if it looks up the peer object from its MAC address just after the 769 * peer ref count is decremented to zero, but just before the peer 770 * object reference is removed from the hash table. 771 */ 772 TAILQ_FOREACH(tmppeer, &soc->peer_hash.bins[index], hash_list_elem) { 773 if (tmppeer == peer) { 774 found = 1; 775 break; 776 } 777 } 778 QDF_ASSERT(found); 779 TAILQ_REMOVE(&soc->peer_hash.bins[index], peer, hash_list_elem); 780 } 781 782 void dp_peer_find_hash_erase(struct dp_soc *soc) 783 { 784 int i; 785 786 /* 787 * Not really necessary to take peer_ref_mutex lock - by this point, 788 * it's known that the soc is no longer in use. 789 */ 790 for (i = 0; i <= soc->peer_hash.mask; i++) { 791 if (!TAILQ_EMPTY(&soc->peer_hash.bins[i])) { 792 struct dp_peer *peer, *peer_next; 793 794 /* 795 * TAILQ_FOREACH_SAFE must be used here to avoid any 796 * memory access violation after peer is freed 797 */ 798 TAILQ_FOREACH_SAFE(peer, &soc->peer_hash.bins[i], 799 hash_list_elem, peer_next) { 800 /* 801 * Don't remove the peer from the hash table - 802 * that would modify the list we are currently 803 * traversing, and it's not necessary anyway. 804 */ 805 /* 806 * Artificially adjust the peer's ref count to 807 * 1, so it will get deleted by 808 * dp_peer_unref_delete. 809 */ 810 /* set to zero */ 811 qdf_atomic_init(&peer->ref_cnt); 812 /* incr to one */ 813 qdf_atomic_inc(&peer->ref_cnt); 814 dp_peer_unref_delete(peer); 815 } 816 } 817 } 818 } 819 820 static void dp_peer_find_map_detach(struct dp_soc *soc) 821 { 822 qdf_mem_free(soc->peer_id_to_obj_map); 823 } 824 825 int dp_peer_find_attach(struct dp_soc *soc) 826 { 827 if (dp_peer_find_map_attach(soc)) 828 return 1; 829 830 if (dp_peer_find_hash_attach(soc)) { 831 dp_peer_find_map_detach(soc); 832 return 1; 833 } 834 835 if (dp_peer_ast_hash_attach(soc)) { 836 dp_peer_find_hash_detach(soc); 837 dp_peer_find_map_detach(soc); 838 return 1; 839 } 840 return 0; /* success */ 841 } 842 843 void dp_rx_tid_stats_cb(struct dp_soc *soc, void *cb_ctxt, 844 union hal_reo_status *reo_status) 845 { 846 struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt; 847 struct hal_reo_queue_status *queue_status = &(reo_status->queue_status); 848 849 if (queue_status->header.status != HAL_REO_CMD_SUCCESS) { 850 DP_TRACE_STATS(FATAL, "REO stats failure %d for TID %d\n", 851 queue_status->header.status, rx_tid->tid); 852 return; 853 } 854 855 DP_TRACE_STATS(FATAL, "REO queue stats (TID: %d): \n" 856 "ssn: %d\n" 857 "curr_idx : %d\n" 858 "pn_31_0 : %08x\n" 859 "pn_63_32 : %08x\n" 860 "pn_95_64 : %08x\n" 861 "pn_127_96 : %08x\n" 862 "last_rx_enq_tstamp : %08x\n" 863 "last_rx_deq_tstamp : %08x\n" 864 "rx_bitmap_31_0 : %08x\n" 865 "rx_bitmap_63_32 : %08x\n" 866 "rx_bitmap_95_64 : %08x\n" 867 "rx_bitmap_127_96 : %08x\n" 868 "rx_bitmap_159_128 : %08x\n" 869 "rx_bitmap_191_160 : %08x\n" 870 "rx_bitmap_223_192 : %08x\n" 871 "rx_bitmap_255_224 : %08x\n", 872 rx_tid->tid, 873 queue_status->ssn, queue_status->curr_idx, 874 queue_status->pn_31_0, queue_status->pn_63_32, 875 queue_status->pn_95_64, queue_status->pn_127_96, 876 queue_status->last_rx_enq_tstamp, 877 queue_status->last_rx_deq_tstamp, 878 queue_status->rx_bitmap_31_0, queue_status->rx_bitmap_63_32, 879 queue_status->rx_bitmap_95_64, queue_status->rx_bitmap_127_96, 880 queue_status->rx_bitmap_159_128, 881 queue_status->rx_bitmap_191_160, 882 queue_status->rx_bitmap_223_192, 883 queue_status->rx_bitmap_255_224); 884 885 DP_TRACE_STATS(FATAL, 886 "curr_mpdu_cnt : %d\n" 887 "curr_msdu_cnt : %d\n" 888 "fwd_timeout_cnt : %d\n" 889 "fwd_bar_cnt : %d\n" 890 "dup_cnt : %d\n" 891 "frms_in_order_cnt : %d\n" 892 "bar_rcvd_cnt : %d\n" 893 "mpdu_frms_cnt : %d\n" 894 "msdu_frms_cnt : %d\n" 895 "total_byte_cnt : %d\n" 896 "late_recv_mpdu_cnt : %d\n" 897 "win_jump_2k : %d\n" 898 "hole_cnt : %d\n", 899 queue_status->curr_mpdu_cnt, queue_status->curr_msdu_cnt, 900 queue_status->fwd_timeout_cnt, queue_status->fwd_bar_cnt, 901 queue_status->dup_cnt, queue_status->frms_in_order_cnt, 902 queue_status->bar_rcvd_cnt, queue_status->mpdu_frms_cnt, 903 queue_status->msdu_frms_cnt, queue_status->total_cnt, 904 queue_status->late_recv_mpdu_cnt, queue_status->win_jump_2k, 905 queue_status->hole_cnt); 906 907 DP_PRINT_STATS("Addba Req : %d\n" 908 "Addba Resp : %d\n" 909 "Addba Resp success : %d\n" 910 "Addba Resp failed : %d\n" 911 "Delba Req received : %d\n" 912 "Delba Tx success : %d\n" 913 "Delba Tx Fail : %d\n" 914 "BA window size : %d\n" 915 "Pn size : %d\n", 916 rx_tid->num_of_addba_req, 917 rx_tid->num_of_addba_resp, 918 rx_tid->num_addba_rsp_success, 919 rx_tid->num_addba_rsp_failed, 920 rx_tid->num_of_delba_req, 921 rx_tid->delba_tx_success_cnt, 922 rx_tid->delba_tx_fail_cnt, 923 rx_tid->ba_win_size, 924 rx_tid->pn_size); 925 } 926 927 static inline struct dp_peer *dp_peer_find_add_id(struct dp_soc *soc, 928 uint8_t *peer_mac_addr, uint16_t peer_id, uint16_t hw_peer_id, 929 uint8_t vdev_id) 930 { 931 struct dp_peer *peer; 932 933 QDF_ASSERT(peer_id <= soc->max_peers); 934 /* check if there's already a peer object with this MAC address */ 935 peer = dp_peer_find_hash_find(soc, peer_mac_addr, 936 0 /* is aligned */, vdev_id); 937 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 938 "%s: peer %pK ID %d vid %d mac %02x:%02x:%02x:%02x:%02x:%02x", 939 __func__, peer, peer_id, vdev_id, peer_mac_addr[0], 940 peer_mac_addr[1], peer_mac_addr[2], peer_mac_addr[3], 941 peer_mac_addr[4], peer_mac_addr[5]); 942 943 if (peer) { 944 /* peer's ref count was already incremented by 945 * peer_find_hash_find 946 */ 947 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 948 "%s: ref_cnt: %d", __func__, 949 qdf_atomic_read(&peer->ref_cnt)); 950 soc->peer_id_to_obj_map[peer_id] = peer; 951 952 if (dp_peer_find_add_id_to_obj(peer, peer_id)) { 953 /* TBDXXX: assert for now */ 954 QDF_ASSERT(0); 955 } 956 957 return peer; 958 } 959 960 return NULL; 961 } 962 963 /** 964 * dp_rx_peer_map_handler() - handle peer map event from firmware 965 * @soc_handle - genereic soc handle 966 * @peeri_id - peer_id from firmware 967 * @hw_peer_id - ast index for this peer 968 * vdev_id - vdev ID 969 * peer_mac_addr - macc assress of the peer 970 * 971 * associate the peer_id that firmware provided with peer entry 972 * and update the ast table in the host with the hw_peer_id. 973 * 974 * Return: none 975 */ 976 977 void 978 dp_rx_peer_map_handler(void *soc_handle, uint16_t peer_id, uint16_t hw_peer_id, 979 uint8_t vdev_id, uint8_t *peer_mac_addr) 980 { 981 struct dp_soc *soc = (struct dp_soc *)soc_handle; 982 struct dp_peer *peer = NULL; 983 984 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH, 985 "peer_map_event (soc:%pK): peer_id %di, hw_peer_id %d, peer_mac " 986 "%02x:%02x:%02x:%02x:%02x:%02x, vdev_id %d", soc, peer_id, 987 hw_peer_id, peer_mac_addr[0], peer_mac_addr[1], 988 peer_mac_addr[2], peer_mac_addr[3], peer_mac_addr[4], 989 peer_mac_addr[5], vdev_id); 990 991 peer = soc->peer_id_to_obj_map[peer_id]; 992 993 if ((hw_peer_id < 0) || (hw_peer_id > (WLAN_UMAC_PSOC_MAX_PEERS * 2))) { 994 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 995 "invalid hw_peer_id: %d", hw_peer_id); 996 qdf_assert_always(0); 997 } 998 999 /* 1000 * check if peer already exists for this peer_id, if so 1001 * this peer map event is in response for a wds peer add 1002 * wmi command sent during wds source port learning. 1003 * in this case just add the ast entry to the existing 1004 * peer ast_list. 1005 */ 1006 if (!peer) 1007 peer = dp_peer_find_add_id(soc, peer_mac_addr, peer_id, 1008 hw_peer_id, vdev_id); 1009 1010 if (peer) { 1011 qdf_assert_always(peer->vdev); 1012 /* 1013 * For every peer MAp message search and set if bss_peer 1014 */ 1015 if (!(qdf_mem_cmp(peer->mac_addr.raw, peer->vdev->mac_addr.raw, 1016 DP_MAC_ADDR_LEN))) { 1017 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH, 1018 "vdev bss_peer!!!!"); 1019 peer->bss_peer = 1; 1020 peer->vdev->vap_bss_peer = peer; 1021 } 1022 } 1023 1024 dp_peer_map_ast(soc, peer, peer_mac_addr, 1025 hw_peer_id, vdev_id); 1026 } 1027 1028 void 1029 dp_rx_peer_unmap_handler(void *soc_handle, uint16_t peer_id) 1030 { 1031 struct dp_peer *peer; 1032 struct dp_soc *soc = (struct dp_soc *)soc_handle; 1033 uint8_t i; 1034 1035 peer = __dp_peer_find_by_id(soc, peer_id); 1036 1037 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH, 1038 "peer_unmap_event (soc:%pK) peer_id %d peer %pK", 1039 soc, peer_id, peer); 1040 1041 /* 1042 * Currently peer IDs are assigned for vdevs as well as peers. 1043 * If the peer ID is for a vdev, then the peer pointer stored 1044 * in peer_id_to_obj_map will be NULL. 1045 */ 1046 if (!peer) { 1047 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1048 "%s: Received unmap event for invalid peer_id" 1049 " %u", __func__, peer_id); 1050 return; 1051 } 1052 1053 soc->peer_id_to_obj_map[peer_id] = NULL; 1054 for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) { 1055 if (peer->peer_ids[i] == peer_id) { 1056 peer->peer_ids[i] = HTT_INVALID_PEER; 1057 break; 1058 } 1059 } 1060 1061 if (soc->cdp_soc.ol_ops->peer_unmap_event) { 1062 soc->cdp_soc.ol_ops->peer_unmap_event(soc->ctrl_psoc, 1063 peer_id); 1064 } 1065 1066 /* 1067 * Remove a reference to the peer. 1068 * If there are no more references, delete the peer object. 1069 */ 1070 dp_peer_unref_delete(peer); 1071 } 1072 1073 void 1074 dp_peer_find_detach(struct dp_soc *soc) 1075 { 1076 dp_peer_find_map_detach(soc); 1077 dp_peer_find_hash_detach(soc); 1078 dp_peer_ast_hash_detach(soc); 1079 } 1080 1081 static void dp_rx_tid_update_cb(struct dp_soc *soc, void *cb_ctxt, 1082 union hal_reo_status *reo_status) 1083 { 1084 struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt; 1085 1086 if ((reo_status->rx_queue_status.header.status != 1087 HAL_REO_CMD_SUCCESS) && 1088 (reo_status->rx_queue_status.header.status != 1089 HAL_REO_CMD_DRAIN)) { 1090 /* Should not happen normally. Just print error for now */ 1091 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1092 "%s: Rx tid HW desc update failed(%d): tid %d", 1093 __func__, 1094 reo_status->rx_queue_status.header.status, 1095 rx_tid->tid); 1096 } 1097 } 1098 1099 /* 1100 * dp_find_peer_by_addr - find peer instance by mac address 1101 * @dev: physical device instance 1102 * @peer_mac_addr: peer mac address 1103 * @local_id: local id for the peer 1104 * 1105 * Return: peer instance pointer 1106 */ 1107 void *dp_find_peer_by_addr(struct cdp_pdev *dev, uint8_t *peer_mac_addr, 1108 uint8_t *local_id) 1109 { 1110 struct dp_pdev *pdev = (struct dp_pdev *)dev; 1111 struct dp_peer *peer; 1112 1113 peer = dp_peer_find_hash_find(pdev->soc, peer_mac_addr, 0, DP_VDEV_ALL); 1114 1115 if (!peer) 1116 return NULL; 1117 1118 /* Multiple peer ids? How can know peer id? */ 1119 *local_id = peer->local_id; 1120 DP_TRACE(INFO, "%s: peer %pK id %d", __func__, peer, *local_id); 1121 1122 /* ref_cnt is incremented inside dp_peer_find_hash_find(). 1123 * Decrement it here. 1124 */ 1125 qdf_atomic_dec(&peer->ref_cnt); 1126 1127 return peer; 1128 } 1129 1130 /* 1131 * dp_rx_tid_update_wifi3() – Update receive TID state 1132 * @peer: Datapath peer handle 1133 * @tid: TID 1134 * @ba_window_size: BlockAck window size 1135 * @start_seq: Starting sequence number 1136 * 1137 * Return: 0 on success, error code on failure 1138 */ 1139 static int dp_rx_tid_update_wifi3(struct dp_peer *peer, int tid, uint32_t 1140 ba_window_size, uint32_t start_seq) 1141 { 1142 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid]; 1143 struct dp_soc *soc = peer->vdev->pdev->soc; 1144 struct hal_reo_cmd_params params; 1145 1146 qdf_mem_zero(¶ms, sizeof(params)); 1147 1148 params.std.need_status = 1; 1149 params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff; 1150 params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32; 1151 params.u.upd_queue_params.update_ba_window_size = 1; 1152 params.u.upd_queue_params.ba_window_size = ba_window_size; 1153 1154 if (start_seq < IEEE80211_SEQ_MAX) { 1155 params.u.upd_queue_params.update_ssn = 1; 1156 params.u.upd_queue_params.ssn = start_seq; 1157 } 1158 1159 dp_set_ssn_valid_flag(¶ms, 0); 1160 1161 dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, ¶ms, dp_rx_tid_update_cb, rx_tid); 1162 1163 rx_tid->ba_win_size = ba_window_size; 1164 if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) { 1165 soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup( 1166 peer->vdev->pdev->ctrl_pdev, 1167 peer->vdev->vdev_id, peer->mac_addr.raw, 1168 rx_tid->hw_qdesc_paddr, tid, tid, 1, ba_window_size); 1169 1170 } 1171 return 0; 1172 } 1173 1174 /* 1175 * dp_reo_desc_free() - Callback free reo descriptor memory after 1176 * HW cache flush 1177 * 1178 * @soc: DP SOC handle 1179 * @cb_ctxt: Callback context 1180 * @reo_status: REO command status 1181 */ 1182 static void dp_reo_desc_free(struct dp_soc *soc, void *cb_ctxt, 1183 union hal_reo_status *reo_status) 1184 { 1185 struct reo_desc_list_node *freedesc = 1186 (struct reo_desc_list_node *)cb_ctxt; 1187 struct dp_rx_tid *rx_tid = &freedesc->rx_tid; 1188 1189 if ((reo_status->fl_cache_status.header.status != 1190 HAL_REO_CMD_SUCCESS) && 1191 (reo_status->fl_cache_status.header.status != 1192 HAL_REO_CMD_DRAIN)) { 1193 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1194 "%s: Rx tid HW desc flush failed(%d): tid %d", 1195 __func__, 1196 reo_status->rx_queue_status.header.status, 1197 freedesc->rx_tid.tid); 1198 } 1199 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 1200 "%s: hw_qdesc_paddr: %pK, tid:%d", __func__, 1201 (void *)(rx_tid->hw_qdesc_paddr), rx_tid->tid); 1202 qdf_mem_unmap_nbytes_single(soc->osdev, 1203 rx_tid->hw_qdesc_paddr, 1204 QDF_DMA_BIDIRECTIONAL, 1205 rx_tid->hw_qdesc_alloc_size); 1206 qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned); 1207 qdf_mem_free(freedesc); 1208 } 1209 1210 #if defined(QCA_WIFI_QCA8074) && defined(BUILD_X86) 1211 /* Hawkeye emulation requires bus address to be >= 0x50000000 */ 1212 static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr) 1213 { 1214 if (dma_addr < 0x50000000) 1215 return QDF_STATUS_E_FAILURE; 1216 else 1217 return QDF_STATUS_SUCCESS; 1218 } 1219 #else 1220 static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr) 1221 { 1222 return QDF_STATUS_SUCCESS; 1223 } 1224 #endif 1225 1226 1227 /* 1228 * dp_rx_tid_setup_wifi3() – Setup receive TID state 1229 * @peer: Datapath peer handle 1230 * @tid: TID 1231 * @ba_window_size: BlockAck window size 1232 * @start_seq: Starting sequence number 1233 * 1234 * Return: 0 on success, error code on failure 1235 */ 1236 int dp_rx_tid_setup_wifi3(struct dp_peer *peer, int tid, 1237 uint32_t ba_window_size, uint32_t start_seq) 1238 { 1239 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid]; 1240 struct dp_vdev *vdev = peer->vdev; 1241 struct dp_soc *soc = vdev->pdev->soc; 1242 uint32_t hw_qdesc_size; 1243 uint32_t hw_qdesc_align; 1244 int hal_pn_type; 1245 void *hw_qdesc_vaddr; 1246 uint32_t alloc_tries = 0; 1247 1248 if (peer->delete_in_progress) 1249 return QDF_STATUS_E_FAILURE; 1250 1251 rx_tid->ba_win_size = ba_window_size; 1252 if (rx_tid->hw_qdesc_vaddr_unaligned != NULL) 1253 return dp_rx_tid_update_wifi3(peer, tid, ba_window_size, 1254 start_seq); 1255 rx_tid->delba_tx_status = 0; 1256 rx_tid->ppdu_id_2k = 0; 1257 rx_tid->num_of_addba_req = 0; 1258 rx_tid->num_of_delba_req = 0; 1259 rx_tid->num_of_addba_resp = 0; 1260 rx_tid->num_addba_rsp_failed = 0; 1261 rx_tid->num_addba_rsp_success = 0; 1262 rx_tid->delba_tx_success_cnt = 0; 1263 rx_tid->delba_tx_fail_cnt = 0; 1264 rx_tid->statuscode = 0; 1265 #ifdef notyet 1266 hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc, ba_window_size); 1267 #else 1268 /* TODO: Allocating HW queue descriptors based on max BA window size 1269 * for all QOS TIDs so that same descriptor can be used later when 1270 * ADDBA request is recevied. This should be changed to allocate HW 1271 * queue descriptors based on BA window size being negotiated (0 for 1272 * non BA cases), and reallocate when BA window size changes and also 1273 * send WMI message to FW to change the REO queue descriptor in Rx 1274 * peer entry as part of dp_rx_tid_update. 1275 */ 1276 if (tid != DP_NON_QOS_TID) 1277 hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc, 1278 HAL_RX_MAX_BA_WINDOW); 1279 else 1280 hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc, 1281 ba_window_size); 1282 #endif 1283 1284 hw_qdesc_align = hal_get_reo_qdesc_align(soc->hal_soc); 1285 /* To avoid unnecessary extra allocation for alignment, try allocating 1286 * exact size and see if we already have aligned address. 1287 */ 1288 rx_tid->hw_qdesc_alloc_size = hw_qdesc_size; 1289 1290 try_desc_alloc: 1291 rx_tid->hw_qdesc_vaddr_unaligned = 1292 qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size); 1293 1294 if (!rx_tid->hw_qdesc_vaddr_unaligned) { 1295 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1296 "%s: Rx tid HW desc alloc failed: tid %d", 1297 __func__, tid); 1298 return QDF_STATUS_E_NOMEM; 1299 } 1300 1301 if ((unsigned long)(rx_tid->hw_qdesc_vaddr_unaligned) % 1302 hw_qdesc_align) { 1303 /* Address allocated above is not alinged. Allocate extra 1304 * memory for alignment 1305 */ 1306 qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned); 1307 rx_tid->hw_qdesc_vaddr_unaligned = 1308 qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size + 1309 hw_qdesc_align - 1); 1310 1311 if (!rx_tid->hw_qdesc_vaddr_unaligned) { 1312 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1313 "%s: Rx tid HW desc alloc failed: tid %d", 1314 __func__, tid); 1315 return QDF_STATUS_E_NOMEM; 1316 } 1317 1318 hw_qdesc_vaddr = (void *)qdf_align((unsigned long) 1319 rx_tid->hw_qdesc_vaddr_unaligned, 1320 hw_qdesc_align); 1321 1322 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 1323 "%s: Total Size %d Aligned Addr %pK", 1324 __func__, rx_tid->hw_qdesc_alloc_size, 1325 hw_qdesc_vaddr); 1326 1327 } else { 1328 hw_qdesc_vaddr = rx_tid->hw_qdesc_vaddr_unaligned; 1329 } 1330 1331 /* TODO: Ensure that sec_type is set before ADDBA is received. 1332 * Currently this is set based on htt indication 1333 * HTT_T2H_MSG_TYPE_SEC_IND from target 1334 */ 1335 switch (peer->security[dp_sec_ucast].sec_type) { 1336 case cdp_sec_type_tkip_nomic: 1337 case cdp_sec_type_aes_ccmp: 1338 case cdp_sec_type_aes_ccmp_256: 1339 case cdp_sec_type_aes_gcmp: 1340 case cdp_sec_type_aes_gcmp_256: 1341 hal_pn_type = HAL_PN_WPA; 1342 break; 1343 case cdp_sec_type_wapi: 1344 if (vdev->opmode == wlan_op_mode_ap) 1345 hal_pn_type = HAL_PN_WAPI_EVEN; 1346 else 1347 hal_pn_type = HAL_PN_WAPI_UNEVEN; 1348 break; 1349 default: 1350 hal_pn_type = HAL_PN_NONE; 1351 break; 1352 } 1353 1354 hal_reo_qdesc_setup(soc->hal_soc, tid, ba_window_size, start_seq, 1355 hw_qdesc_vaddr, rx_tid->hw_qdesc_paddr, hal_pn_type); 1356 1357 qdf_mem_map_nbytes_single(soc->osdev, hw_qdesc_vaddr, 1358 QDF_DMA_BIDIRECTIONAL, rx_tid->hw_qdesc_alloc_size, 1359 &(rx_tid->hw_qdesc_paddr)); 1360 1361 if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) != 1362 QDF_STATUS_SUCCESS) { 1363 if (alloc_tries++ < 10) 1364 goto try_desc_alloc; 1365 else { 1366 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1367 "%s: Rx tid HW desc alloc failed (lowmem): tid %d", 1368 __func__, tid); 1369 return QDF_STATUS_E_NOMEM; 1370 } 1371 } 1372 1373 if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) { 1374 soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup( 1375 vdev->pdev->ctrl_pdev, 1376 peer->vdev->vdev_id, peer->mac_addr.raw, 1377 rx_tid->hw_qdesc_paddr, tid, tid, 1, ba_window_size); 1378 1379 } 1380 return 0; 1381 } 1382 1383 /* 1384 * dp_rx_tid_delete_cb() - Callback to flush reo descriptor HW cache 1385 * after deleting the entries (ie., setting valid=0) 1386 * 1387 * @soc: DP SOC handle 1388 * @cb_ctxt: Callback context 1389 * @reo_status: REO command status 1390 */ 1391 static void dp_rx_tid_delete_cb(struct dp_soc *soc, void *cb_ctxt, 1392 union hal_reo_status *reo_status) 1393 { 1394 struct reo_desc_list_node *freedesc = 1395 (struct reo_desc_list_node *)cb_ctxt; 1396 uint32_t list_size; 1397 struct reo_desc_list_node *desc; 1398 unsigned long curr_ts = qdf_get_system_timestamp(); 1399 uint32_t desc_size, tot_desc_size; 1400 struct hal_reo_cmd_params params; 1401 1402 if (reo_status->rx_queue_status.header.status == HAL_REO_CMD_DRAIN) { 1403 qdf_mem_zero(reo_status, sizeof(*reo_status)); 1404 reo_status->fl_cache_status.header.status = HAL_REO_CMD_DRAIN; 1405 dp_reo_desc_free(soc, (void *)freedesc, reo_status); 1406 return; 1407 } else if (reo_status->rx_queue_status.header.status != 1408 HAL_REO_CMD_SUCCESS) { 1409 /* Should not happen normally. Just print error for now */ 1410 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1411 "%s: Rx tid HW desc deletion failed(%d): tid %d", 1412 __func__, 1413 reo_status->rx_queue_status.header.status, 1414 freedesc->rx_tid.tid); 1415 } 1416 1417 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW, 1418 "%s: rx_tid: %d status: %d", __func__, 1419 freedesc->rx_tid.tid, 1420 reo_status->rx_queue_status.header.status); 1421 1422 qdf_spin_lock_bh(&soc->reo_desc_freelist_lock); 1423 freedesc->free_ts = curr_ts; 1424 qdf_list_insert_back_size(&soc->reo_desc_freelist, 1425 (qdf_list_node_t *)freedesc, &list_size); 1426 1427 while ((qdf_list_peek_front(&soc->reo_desc_freelist, 1428 (qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) && 1429 ((list_size >= REO_DESC_FREELIST_SIZE) || 1430 ((curr_ts - desc->free_ts) > REO_DESC_FREE_DEFER_MS))) { 1431 struct dp_rx_tid *rx_tid; 1432 1433 qdf_list_remove_front(&soc->reo_desc_freelist, 1434 (qdf_list_node_t **)&desc); 1435 list_size--; 1436 rx_tid = &desc->rx_tid; 1437 1438 /* Flush and invalidate REO descriptor from HW cache: Base and 1439 * extension descriptors should be flushed separately */ 1440 tot_desc_size = hal_get_reo_qdesc_size(soc->hal_soc, 1441 rx_tid->ba_win_size); 1442 desc_size = hal_get_reo_qdesc_size(soc->hal_soc, 0); 1443 1444 /* Flush reo extension descriptors */ 1445 while ((tot_desc_size -= desc_size) > 0) { 1446 qdf_mem_zero(¶ms, sizeof(params)); 1447 params.std.addr_lo = 1448 ((uint64_t)(rx_tid->hw_qdesc_paddr) + 1449 tot_desc_size) & 0xffffffff; 1450 params.std.addr_hi = 1451 (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32; 1452 1453 if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc, 1454 CMD_FLUSH_CACHE, 1455 ¶ms, 1456 NULL, 1457 NULL)) { 1458 QDF_TRACE(QDF_MODULE_ID_DP, 1459 QDF_TRACE_LEVEL_ERROR, 1460 "%s: fail to send CMD_CACHE_FLUSH:" 1461 "tid %d desc %pK", __func__, 1462 rx_tid->tid, 1463 (void *)(rx_tid->hw_qdesc_paddr)); 1464 } 1465 } 1466 1467 /* Flush base descriptor */ 1468 qdf_mem_zero(¶ms, sizeof(params)); 1469 params.std.need_status = 1; 1470 params.std.addr_lo = 1471 (uint64_t)(rx_tid->hw_qdesc_paddr) & 0xffffffff; 1472 params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32; 1473 1474 if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc, 1475 CMD_FLUSH_CACHE, 1476 ¶ms, 1477 dp_reo_desc_free, 1478 (void *)desc)) { 1479 union hal_reo_status reo_status; 1480 /* 1481 * If dp_reo_send_cmd return failure, related TID queue desc 1482 * should be unmapped. Also locally reo_desc, together with 1483 * TID queue desc also need to be freed accordingly. 1484 * 1485 * Here invoke desc_free function directly to do clean up. 1486 */ 1487 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1488 "%s: fail to send REO cmd to flush cache: tid %d", 1489 __func__, rx_tid->tid); 1490 qdf_mem_zero(&reo_status, sizeof(reo_status)); 1491 reo_status.fl_cache_status.header.status = 0; 1492 dp_reo_desc_free(soc, (void *)desc, &reo_status); 1493 } 1494 } 1495 qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock); 1496 } 1497 1498 /* 1499 * dp_rx_tid_delete_wifi3() – Delete receive TID queue 1500 * @peer: Datapath peer handle 1501 * @tid: TID 1502 * 1503 * Return: 0 on success, error code on failure 1504 */ 1505 static int dp_rx_tid_delete_wifi3(struct dp_peer *peer, int tid) 1506 { 1507 struct dp_rx_tid *rx_tid = &(peer->rx_tid[tid]); 1508 struct dp_soc *soc = peer->vdev->pdev->soc; 1509 struct hal_reo_cmd_params params; 1510 struct reo_desc_list_node *freedesc = 1511 qdf_mem_malloc(sizeof(*freedesc)); 1512 1513 if (!freedesc) { 1514 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1515 "%s: malloc failed for freedesc: tid %d", 1516 __func__, tid); 1517 return -ENOMEM; 1518 } 1519 1520 freedesc->rx_tid = *rx_tid; 1521 1522 qdf_mem_zero(¶ms, sizeof(params)); 1523 1524 params.std.need_status = 0; 1525 params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff; 1526 params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32; 1527 params.u.upd_queue_params.update_vld = 1; 1528 params.u.upd_queue_params.vld = 0; 1529 1530 dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, ¶ms, 1531 dp_rx_tid_delete_cb, (void *)freedesc); 1532 1533 rx_tid->hw_qdesc_vaddr_unaligned = NULL; 1534 rx_tid->hw_qdesc_alloc_size = 0; 1535 rx_tid->hw_qdesc_paddr = 0; 1536 1537 return 0; 1538 } 1539 1540 #ifdef DP_LFR 1541 static void dp_peer_setup_remaining_tids(struct dp_peer *peer) 1542 { 1543 int tid; 1544 1545 for (tid = 1; tid < DP_MAX_TIDS-1; tid++) { 1546 dp_rx_tid_setup_wifi3(peer, tid, 1, 0); 1547 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 1548 "Setting up TID %d for peer %pK peer->local_id %d", 1549 tid, peer, peer->local_id); 1550 } 1551 } 1552 #else 1553 static void dp_peer_setup_remaining_tids(struct dp_peer *peer) {}; 1554 #endif 1555 /* 1556 * dp_peer_rx_init() – Initialize receive TID state 1557 * @pdev: Datapath pdev 1558 * @peer: Datapath peer 1559 * 1560 */ 1561 void dp_peer_rx_init(struct dp_pdev *pdev, struct dp_peer *peer) 1562 { 1563 int tid; 1564 struct dp_rx_tid *rx_tid; 1565 for (tid = 0; tid < DP_MAX_TIDS; tid++) { 1566 rx_tid = &peer->rx_tid[tid]; 1567 rx_tid->array = &rx_tid->base; 1568 rx_tid->base.head = rx_tid->base.tail = NULL; 1569 rx_tid->tid = tid; 1570 rx_tid->defrag_timeout_ms = 0; 1571 rx_tid->ba_win_size = 0; 1572 rx_tid->ba_status = DP_RX_BA_INACTIVE; 1573 1574 rx_tid->defrag_waitlist_elem.tqe_next = NULL; 1575 rx_tid->defrag_waitlist_elem.tqe_prev = NULL; 1576 1577 #ifdef notyet /* TODO: See if this is required for exception handling */ 1578 /* invalid sequence number */ 1579 peer->tids_last_seq[tid] = 0xffff; 1580 #endif 1581 } 1582 1583 /* Setup default (non-qos) rx tid queue */ 1584 dp_rx_tid_setup_wifi3(peer, DP_NON_QOS_TID, 1, 0); 1585 1586 /* Setup rx tid queue for TID 0. 1587 * Other queues will be setup on receiving first packet, which will cause 1588 * NULL REO queue error 1589 */ 1590 dp_rx_tid_setup_wifi3(peer, 0, 1, 0); 1591 1592 /* 1593 * Setup the rest of TID's to handle LFR 1594 */ 1595 dp_peer_setup_remaining_tids(peer); 1596 1597 /* 1598 * Set security defaults: no PN check, no security. The target may 1599 * send a HTT SEC_IND message to overwrite these defaults. 1600 */ 1601 peer->security[dp_sec_ucast].sec_type = 1602 peer->security[dp_sec_mcast].sec_type = cdp_sec_type_none; 1603 } 1604 1605 /* 1606 * dp_peer_rx_cleanup() – Cleanup receive TID state 1607 * @vdev: Datapath vdev 1608 * @peer: Datapath peer 1609 * 1610 */ 1611 void dp_peer_rx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer) 1612 { 1613 int tid; 1614 uint32_t tid_delete_mask = 0; 1615 for (tid = 0; tid < DP_MAX_TIDS; tid++) { 1616 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid]; 1617 1618 qdf_spin_lock_bh(&rx_tid->tid_lock); 1619 if (peer->rx_tid[tid].hw_qdesc_vaddr_unaligned != NULL) { 1620 dp_rx_tid_delete_wifi3(peer, tid); 1621 1622 /* Cleanup defrag related resource */ 1623 dp_rx_defrag_waitlist_remove(peer, tid); 1624 dp_rx_reorder_flush_frag(peer, tid); 1625 1626 tid_delete_mask |= (1 << tid); 1627 } 1628 qdf_spin_unlock_bh(&rx_tid->tid_lock); 1629 } 1630 #ifdef notyet /* See if FW can remove queues as part of peer cleanup */ 1631 if (soc->ol_ops->peer_rx_reorder_queue_remove) { 1632 soc->ol_ops->peer_rx_reorder_queue_remove(vdev->pdev->ctrl_pdev, 1633 peer->vdev->vdev_id, peer->mac_addr.raw, 1634 tid_delete_mask); 1635 } 1636 #endif 1637 for (tid = 0; tid < DP_MAX_TIDS; tid++) 1638 qdf_spinlock_destroy(&peer->rx_tid[tid].tid_lock); 1639 } 1640 1641 /* 1642 * dp_peer_cleanup() – Cleanup peer information 1643 * @vdev: Datapath vdev 1644 * @peer: Datapath peer 1645 * 1646 */ 1647 void dp_peer_cleanup(struct dp_vdev *vdev, struct dp_peer *peer) 1648 { 1649 peer->last_assoc_rcvd = 0; 1650 peer->last_disassoc_rcvd = 0; 1651 peer->last_deauth_rcvd = 0; 1652 1653 /* cleanup the Rx reorder queues for this peer */ 1654 dp_peer_rx_cleanup(vdev, peer); 1655 } 1656 1657 /* 1658 * dp_rx_addba_resp_tx_completion_wifi3() – Update Rx Tid State 1659 * 1660 * @peer: Datapath peer handle 1661 * @tid: TID number 1662 * @status: tx completion status 1663 * Return: 0 on success, error code on failure 1664 */ 1665 int dp_addba_resp_tx_completion_wifi3(void *peer_handle, 1666 uint8_t tid, int status) 1667 { 1668 struct dp_peer *peer = (struct dp_peer *)peer_handle; 1669 struct dp_rx_tid *rx_tid = NULL; 1670 1671 if (!peer || peer->delete_in_progress) { 1672 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 1673 "%s: Peer is NULL!\n", __func__); 1674 return QDF_STATUS_E_FAILURE; 1675 } 1676 rx_tid = &peer->rx_tid[tid]; 1677 qdf_spin_lock_bh(&rx_tid->tid_lock); 1678 if (status) { 1679 rx_tid->num_addba_rsp_failed++; 1680 dp_rx_tid_update_wifi3(peer, tid, 1, 0); 1681 rx_tid->ba_status = DP_RX_BA_INACTIVE; 1682 qdf_spin_unlock_bh(&rx_tid->tid_lock); 1683 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1684 "%s: Rx Tid- %d addba rsp tx completion failed!", 1685 __func__, tid); 1686 return QDF_STATUS_SUCCESS; 1687 } 1688 1689 rx_tid->num_addba_rsp_success++; 1690 if (rx_tid->ba_status == DP_RX_BA_INACTIVE) { 1691 qdf_spin_unlock_bh(&rx_tid->tid_lock); 1692 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1693 "%s: Rx Tid- %d hw qdesc is not in IN_PROGRESS", 1694 __func__, tid); 1695 return QDF_STATUS_E_FAILURE; 1696 } 1697 1698 rx_tid->ba_status = DP_RX_BA_ACTIVE; 1699 qdf_spin_unlock_bh(&rx_tid->tid_lock); 1700 return QDF_STATUS_SUCCESS; 1701 } 1702 1703 /* 1704 * dp_rx_addba_responsesetup_wifi3() – Process ADDBA request from peer 1705 * 1706 * @peer: Datapath peer handle 1707 * @tid: TID number 1708 * @dialogtoken: output dialogtoken 1709 * @statuscode: output dialogtoken 1710 * @buffersize: Output BA window size 1711 * @batimeout: Output BA timeout 1712 */ 1713 void dp_addba_responsesetup_wifi3(void *peer_handle, uint8_t tid, 1714 uint8_t *dialogtoken, uint16_t *statuscode, 1715 uint16_t *buffersize, uint16_t *batimeout) 1716 { 1717 struct dp_peer *peer = (struct dp_peer *)peer_handle; 1718 struct dp_rx_tid *rx_tid = NULL; 1719 1720 if (!peer || peer->delete_in_progress) { 1721 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 1722 "%s: Peer is NULL!\n", __func__); 1723 return; 1724 } 1725 rx_tid = &peer->rx_tid[tid]; 1726 qdf_spin_lock_bh(&rx_tid->tid_lock); 1727 rx_tid->num_of_addba_resp++; 1728 /* setup ADDBA response parameters */ 1729 *dialogtoken = rx_tid->dialogtoken; 1730 *statuscode = rx_tid->statuscode; 1731 *buffersize = rx_tid->ba_win_size; 1732 *batimeout = 0; 1733 qdf_spin_unlock_bh(&rx_tid->tid_lock); 1734 } 1735 1736 /* 1737 * dp_addba_requestprocess_wifi3() - Process ADDBA request from peer 1738 * 1739 * @peer: Datapath peer handle 1740 * @dialogtoken: dialogtoken from ADDBA frame 1741 * @tid: TID number 1742 * @batimeout: BA timeout 1743 * @buffersize: BA window size 1744 * @startseqnum: Start seq. number received in BA sequence control 1745 * 1746 * Return: 0 on success, error code on failure 1747 */ 1748 int dp_addba_requestprocess_wifi3(void *peer_handle, 1749 uint8_t dialogtoken, 1750 uint16_t tid, uint16_t batimeout, 1751 uint16_t buffersize, 1752 uint16_t startseqnum) 1753 { 1754 struct dp_peer *peer = (struct dp_peer *)peer_handle; 1755 struct dp_rx_tid *rx_tid = NULL; 1756 1757 if (!peer || peer->delete_in_progress) { 1758 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 1759 "%s: Peer is NULL!\n", __func__); 1760 return QDF_STATUS_E_FAILURE; 1761 } 1762 rx_tid = &peer->rx_tid[tid]; 1763 qdf_spin_lock_bh(&rx_tid->tid_lock); 1764 rx_tid->num_of_addba_req++; 1765 if ((rx_tid->ba_status == DP_RX_BA_ACTIVE && 1766 rx_tid->hw_qdesc_vaddr_unaligned != NULL) || 1767 (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS)) { 1768 dp_rx_tid_update_wifi3(peer, tid, 1, 0); 1769 rx_tid->ba_status = DP_RX_BA_INACTIVE; 1770 qdf_spin_unlock_bh(&rx_tid->tid_lock); 1771 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1772 "%s: Rx Tid- %d hw qdesc is already setup", 1773 __func__, tid); 1774 return QDF_STATUS_E_FAILURE; 1775 } 1776 1777 if (dp_rx_tid_setup_wifi3(peer, tid, buffersize, 0)) { 1778 rx_tid->ba_status = DP_RX_BA_INACTIVE; 1779 qdf_spin_unlock_bh(&rx_tid->tid_lock); 1780 return QDF_STATUS_E_FAILURE; 1781 } 1782 rx_tid->ba_status = DP_RX_BA_IN_PROGRESS; 1783 1784 rx_tid->ba_win_size = buffersize; 1785 rx_tid->dialogtoken = dialogtoken; 1786 rx_tid->startseqnum = startseqnum; 1787 1788 if (rx_tid->userstatuscode != IEEE80211_STATUS_SUCCESS) 1789 rx_tid->statuscode = rx_tid->userstatuscode; 1790 else 1791 rx_tid->statuscode = IEEE80211_STATUS_SUCCESS; 1792 1793 qdf_spin_unlock_bh(&rx_tid->tid_lock); 1794 1795 return QDF_STATUS_SUCCESS; 1796 } 1797 1798 /* 1799 * dp_set_addba_response() – Set a user defined ADDBA response status code 1800 * 1801 * @peer: Datapath peer handle 1802 * @tid: TID number 1803 * @statuscode: response status code to be set 1804 */ 1805 void dp_set_addba_response(void *peer_handle, uint8_t tid, 1806 uint16_t statuscode) 1807 { 1808 struct dp_peer *peer = (struct dp_peer *)peer_handle; 1809 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid]; 1810 1811 qdf_spin_lock_bh(&rx_tid->tid_lock); 1812 rx_tid->userstatuscode = statuscode; 1813 qdf_spin_unlock_bh(&rx_tid->tid_lock); 1814 } 1815 1816 /* 1817 * dp_rx_delba_process_wifi3() – Process DELBA from peer 1818 * @peer: Datapath peer handle 1819 * @tid: TID number 1820 * @reasoncode: Reason code received in DELBA frame 1821 * 1822 * Return: 0 on success, error code on failure 1823 */ 1824 int dp_delba_process_wifi3(void *peer_handle, 1825 int tid, uint16_t reasoncode) 1826 { 1827 struct dp_peer *peer = (struct dp_peer *)peer_handle; 1828 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid]; 1829 1830 qdf_spin_lock_bh(&rx_tid->tid_lock); 1831 if (rx_tid->ba_status == DP_RX_BA_INACTIVE) { 1832 qdf_spin_unlock_bh(&rx_tid->tid_lock); 1833 return QDF_STATUS_E_FAILURE; 1834 } 1835 /* TODO: See if we can delete the existing REO queue descriptor and 1836 * replace with a new one without queue extenstion descript to save 1837 * memory 1838 */ 1839 rx_tid->num_of_delba_req++; 1840 dp_rx_tid_update_wifi3(peer, tid, 1, 0); 1841 1842 rx_tid->ba_status = DP_RX_BA_INACTIVE; 1843 qdf_spin_unlock_bh(&rx_tid->tid_lock); 1844 return 0; 1845 } 1846 1847 /* 1848 * dp_rx_delba_tx_completion_wifi3() – Send Delba Request 1849 * 1850 * @peer: Datapath peer handle 1851 * @tid: TID number 1852 * @status: tx completion status 1853 * Return: 0 on success, error code on failure 1854 */ 1855 1856 int dp_delba_tx_completion_wifi3(void *peer_handle, 1857 uint8_t tid, int status) 1858 { 1859 struct dp_peer *peer = (struct dp_peer *)peer_handle; 1860 struct dp_rx_tid *rx_tid = NULL; 1861 1862 if (!peer || peer->delete_in_progress) { 1863 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 1864 "%s: Peer is NULL!", __func__); 1865 return QDF_STATUS_E_FAILURE; 1866 } 1867 rx_tid = &peer->rx_tid[tid]; 1868 qdf_spin_lock_bh(&rx_tid->tid_lock); 1869 if (status) { 1870 rx_tid->delba_tx_fail_cnt++; 1871 if (rx_tid->delba_tx_retry >= DP_MAX_DELBA_RETRY) { 1872 rx_tid->delba_tx_retry = 0; 1873 rx_tid->delba_tx_status = 0; 1874 qdf_spin_unlock_bh(&rx_tid->tid_lock); 1875 } else { 1876 rx_tid->delba_tx_retry++; 1877 rx_tid->delba_tx_status = 1; 1878 qdf_spin_unlock_bh(&rx_tid->tid_lock); 1879 peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba( 1880 peer->vdev->pdev->ctrl_pdev, peer->ctrl_peer, 1881 peer->mac_addr.raw, tid, peer->vdev->ctrl_vdev); 1882 } 1883 return QDF_STATUS_SUCCESS; 1884 } else { 1885 rx_tid->delba_tx_success_cnt++; 1886 rx_tid->delba_tx_retry = 0; 1887 rx_tid->delba_tx_status = 0; 1888 } 1889 qdf_spin_unlock_bh(&rx_tid->tid_lock); 1890 1891 return QDF_STATUS_SUCCESS; 1892 } 1893 1894 void dp_rx_discard(struct dp_vdev *vdev, struct dp_peer *peer, unsigned tid, 1895 qdf_nbuf_t msdu_list) 1896 { 1897 while (msdu_list) { 1898 qdf_nbuf_t msdu = msdu_list; 1899 1900 msdu_list = qdf_nbuf_next(msdu_list); 1901 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH, 1902 "discard rx %pK from partly-deleted peer %pK " 1903 "(%02x:%02x:%02x:%02x:%02x:%02x)", 1904 msdu, peer, 1905 peer->mac_addr.raw[0], peer->mac_addr.raw[1], 1906 peer->mac_addr.raw[2], peer->mac_addr.raw[3], 1907 peer->mac_addr.raw[4], peer->mac_addr.raw[5]); 1908 qdf_nbuf_free(msdu); 1909 } 1910 } 1911 1912 1913 /** 1914 * dp_set_pn_check_wifi3() - enable PN check in REO for security 1915 * @peer: Datapath peer handle 1916 * @vdev: Datapath vdev 1917 * @pdev - data path device instance 1918 * @sec_type - security type 1919 * @rx_pn - Receive pn starting number 1920 * 1921 */ 1922 1923 void 1924 dp_set_pn_check_wifi3(struct cdp_vdev *vdev_handle, struct cdp_peer *peer_handle, enum cdp_sec_type sec_type, uint32_t *rx_pn) 1925 { 1926 struct dp_peer *peer = (struct dp_peer *)peer_handle; 1927 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle; 1928 struct dp_pdev *pdev; 1929 struct dp_soc *soc; 1930 int i; 1931 uint8_t pn_size; 1932 struct hal_reo_cmd_params params; 1933 1934 /* preconditions */ 1935 qdf_assert(vdev); 1936 1937 pdev = vdev->pdev; 1938 soc = pdev->soc; 1939 1940 1941 qdf_mem_zero(¶ms, sizeof(params)); 1942 1943 params.std.need_status = 1; 1944 params.u.upd_queue_params.update_pn_valid = 1; 1945 params.u.upd_queue_params.update_pn_size = 1; 1946 params.u.upd_queue_params.update_pn = 1; 1947 params.u.upd_queue_params.update_pn_check_needed = 1; 1948 params.u.upd_queue_params.update_svld = 1; 1949 params.u.upd_queue_params.svld = 0; 1950 1951 peer->security[dp_sec_ucast].sec_type = sec_type; 1952 1953 switch (sec_type) { 1954 case cdp_sec_type_tkip_nomic: 1955 case cdp_sec_type_aes_ccmp: 1956 case cdp_sec_type_aes_ccmp_256: 1957 case cdp_sec_type_aes_gcmp: 1958 case cdp_sec_type_aes_gcmp_256: 1959 params.u.upd_queue_params.pn_check_needed = 1; 1960 params.u.upd_queue_params.pn_size = 48; 1961 pn_size = 48; 1962 break; 1963 case cdp_sec_type_wapi: 1964 params.u.upd_queue_params.pn_check_needed = 1; 1965 params.u.upd_queue_params.pn_size = 128; 1966 pn_size = 128; 1967 if (vdev->opmode == wlan_op_mode_ap) { 1968 params.u.upd_queue_params.pn_even = 1; 1969 params.u.upd_queue_params.update_pn_even = 1; 1970 } else { 1971 params.u.upd_queue_params.pn_uneven = 1; 1972 params.u.upd_queue_params.update_pn_uneven = 1; 1973 } 1974 break; 1975 default: 1976 params.u.upd_queue_params.pn_check_needed = 0; 1977 pn_size = 0; 1978 break; 1979 } 1980 1981 1982 for (i = 0; i < DP_MAX_TIDS; i++) { 1983 struct dp_rx_tid *rx_tid = &peer->rx_tid[i]; 1984 qdf_spin_lock_bh(&rx_tid->tid_lock); 1985 if (rx_tid->hw_qdesc_vaddr_unaligned != NULL) { 1986 params.std.addr_lo = 1987 rx_tid->hw_qdesc_paddr & 0xffffffff; 1988 params.std.addr_hi = 1989 (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32; 1990 1991 if (sec_type != cdp_sec_type_wapi) { 1992 params.u.upd_queue_params.update_pn_valid = 0; 1993 } else { 1994 /* 1995 * Setting PN valid bit for WAPI sec_type, 1996 * since WAPI PN has to be started with 1997 * predefined value 1998 */ 1999 params.u.upd_queue_params.update_pn_valid = 1; 2000 params.u.upd_queue_params.pn_31_0 = rx_pn[0]; 2001 params.u.upd_queue_params.pn_63_32 = rx_pn[1]; 2002 params.u.upd_queue_params.pn_95_64 = rx_pn[2]; 2003 params.u.upd_queue_params.pn_127_96 = rx_pn[3]; 2004 } 2005 rx_tid->pn_size = pn_size; 2006 dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, ¶ms, 2007 dp_rx_tid_update_cb, rx_tid); 2008 } else { 2009 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH, 2010 "PN Check not setup for TID :%d ", i); 2011 } 2012 qdf_spin_unlock_bh(&rx_tid->tid_lock); 2013 } 2014 } 2015 2016 2017 void 2018 dp_rx_sec_ind_handler(void *soc_handle, uint16_t peer_id, 2019 enum htt_sec_type sec_type, int is_unicast, u_int32_t *michael_key, 2020 u_int32_t *rx_pn) 2021 { 2022 struct dp_soc *soc = (struct dp_soc *)soc_handle; 2023 struct dp_peer *peer; 2024 int sec_index; 2025 2026 peer = dp_peer_find_by_id(soc, peer_id); 2027 if (!peer) { 2028 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 2029 "Couldn't find peer from ID %d - skipping security inits", 2030 peer_id); 2031 return; 2032 } 2033 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH, 2034 "sec spec for peer %pK (%02x:%02x:%02x:%02x:%02x:%02x): " 2035 "%s key of type %d", 2036 peer, 2037 peer->mac_addr.raw[0], peer->mac_addr.raw[1], 2038 peer->mac_addr.raw[2], peer->mac_addr.raw[3], 2039 peer->mac_addr.raw[4], peer->mac_addr.raw[5], 2040 is_unicast ? "ucast" : "mcast", 2041 sec_type); 2042 sec_index = is_unicast ? dp_sec_ucast : dp_sec_mcast; 2043 peer->security[sec_index].sec_type = sec_type; 2044 #ifdef notyet /* TODO: See if this is required for defrag support */ 2045 /* michael key only valid for TKIP, but for simplicity, 2046 * copy it anyway 2047 */ 2048 qdf_mem_copy( 2049 &peer->security[sec_index].michael_key[0], 2050 michael_key, 2051 sizeof(peer->security[sec_index].michael_key)); 2052 #ifdef BIG_ENDIAN_HOST 2053 OL_IF_SWAPBO(peer->security[sec_index].michael_key[0], 2054 sizeof(peer->security[sec_index].michael_key)); 2055 #endif /* BIG_ENDIAN_HOST */ 2056 #endif 2057 2058 #ifdef notyet /* TODO: Check if this is required for wifi3.0 */ 2059 if (sec_type != htt_sec_type_wapi) { 2060 qdf_mem_set(peer->tids_last_pn_valid, _EXT_TIDS, 0x00); 2061 } else { 2062 for (i = 0; i < DP_MAX_TIDS; i++) { 2063 /* 2064 * Setting PN valid bit for WAPI sec_type, 2065 * since WAPI PN has to be started with predefined value 2066 */ 2067 peer->tids_last_pn_valid[i] = 1; 2068 qdf_mem_copy( 2069 (u_int8_t *) &peer->tids_last_pn[i], 2070 (u_int8_t *) rx_pn, sizeof(union htt_rx_pn_t)); 2071 peer->tids_last_pn[i].pn128[1] = 2072 qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[1]); 2073 peer->tids_last_pn[i].pn128[0] = 2074 qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[0]); 2075 } 2076 } 2077 #endif 2078 /* TODO: Update HW TID queue with PN check parameters (pn type for 2079 * all security types and last pn for WAPI) once REO command API 2080 * is available 2081 */ 2082 } 2083 2084 #ifndef CONFIG_WIN 2085 /** 2086 * dp_register_peer() - Register peer into physical device 2087 * @pdev - data path device instance 2088 * @sta_desc - peer description 2089 * 2090 * Register peer into physical device 2091 * 2092 * Return: QDF_STATUS_SUCCESS registration success 2093 * QDF_STATUS_E_FAULT peer not found 2094 */ 2095 QDF_STATUS dp_register_peer(struct cdp_pdev *pdev_handle, 2096 struct ol_txrx_desc_type *sta_desc) 2097 { 2098 struct dp_peer *peer; 2099 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; 2100 2101 peer = dp_peer_find_by_local_id((struct cdp_pdev *)pdev, 2102 sta_desc->sta_id); 2103 if (!peer) 2104 return QDF_STATUS_E_FAULT; 2105 2106 qdf_spin_lock_bh(&peer->peer_info_lock); 2107 peer->state = OL_TXRX_PEER_STATE_CONN; 2108 qdf_spin_unlock_bh(&peer->peer_info_lock); 2109 2110 return QDF_STATUS_SUCCESS; 2111 } 2112 2113 /** 2114 * dp_clear_peer() - remove peer from physical device 2115 * @pdev - data path device instance 2116 * @sta_id - local peer id 2117 * 2118 * remove peer from physical device 2119 * 2120 * Return: QDF_STATUS_SUCCESS registration success 2121 * QDF_STATUS_E_FAULT peer not found 2122 */ 2123 QDF_STATUS dp_clear_peer(struct cdp_pdev *pdev_handle, uint8_t local_id) 2124 { 2125 struct dp_peer *peer; 2126 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; 2127 2128 peer = dp_peer_find_by_local_id((struct cdp_pdev *)pdev, local_id); 2129 if (!peer) 2130 return QDF_STATUS_E_FAULT; 2131 2132 qdf_spin_lock_bh(&peer->peer_info_lock); 2133 peer->state = OL_TXRX_PEER_STATE_DISC; 2134 qdf_spin_unlock_bh(&peer->peer_info_lock); 2135 2136 return QDF_STATUS_SUCCESS; 2137 } 2138 2139 /** 2140 * dp_find_peer_by_addr_and_vdev() - Find peer by peer mac address within vdev 2141 * @pdev - data path device instance 2142 * @vdev - virtual interface instance 2143 * @peer_addr - peer mac address 2144 * @peer_id - local peer id with target mac address 2145 * 2146 * Find peer by peer mac address within vdev 2147 * 2148 * Return: peer instance void pointer 2149 * NULL cannot find target peer 2150 */ 2151 void *dp_find_peer_by_addr_and_vdev(struct cdp_pdev *pdev_handle, 2152 struct cdp_vdev *vdev_handle, 2153 uint8_t *peer_addr, uint8_t *local_id) 2154 { 2155 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; 2156 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle; 2157 struct dp_peer *peer; 2158 2159 DP_TRACE(INFO, "vdev %pK peer_addr %pK", vdev, peer_addr); 2160 peer = dp_peer_find_hash_find(pdev->soc, peer_addr, 0, 0); 2161 DP_TRACE(INFO, "peer %pK vdev %pK", peer, vdev); 2162 2163 if (!peer) 2164 return NULL; 2165 2166 if (peer->vdev != vdev) { 2167 qdf_atomic_dec(&peer->ref_cnt); 2168 return NULL; 2169 } 2170 2171 *local_id = peer->local_id; 2172 DP_TRACE(INFO, "peer %pK vdev %pK local id %d", peer, vdev, *local_id); 2173 2174 /* ref_cnt is incremented inside dp_peer_find_hash_find(). 2175 * Decrement it here. 2176 */ 2177 qdf_atomic_dec(&peer->ref_cnt); 2178 2179 return peer; 2180 } 2181 2182 /** 2183 * dp_local_peer_id() - Find local peer id within peer instance 2184 * @peer - peer instance 2185 * 2186 * Find local peer id within peer instance 2187 * 2188 * Return: local peer id 2189 */ 2190 uint16_t dp_local_peer_id(void *peer) 2191 { 2192 return ((struct dp_peer *)peer)->local_id; 2193 } 2194 2195 /** 2196 * dp_peer_find_by_local_id() - Find peer by local peer id 2197 * @pdev - data path device instance 2198 * @local_peer_id - local peer id want to find 2199 * 2200 * Find peer by local peer id within physical device 2201 * 2202 * Return: peer instance void pointer 2203 * NULL cannot find target peer 2204 */ 2205 void *dp_peer_find_by_local_id(struct cdp_pdev *pdev_handle, uint8_t local_id) 2206 { 2207 struct dp_peer *peer; 2208 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; 2209 2210 if (local_id >= OL_TXRX_NUM_LOCAL_PEER_IDS) { 2211 QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_DP, 2212 "Incorrect local id %u", local_id); 2213 return NULL; 2214 } 2215 qdf_spin_lock_bh(&pdev->local_peer_ids.lock); 2216 peer = pdev->local_peer_ids.map[local_id]; 2217 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock); 2218 DP_TRACE(DEBUG, "peer %pK local id %d", peer, local_id); 2219 return peer; 2220 } 2221 2222 /** 2223 * dp_peer_state_update() - update peer local state 2224 * @pdev - data path device instance 2225 * @peer_addr - peer mac address 2226 * @state - new peer local state 2227 * 2228 * update peer local state 2229 * 2230 * Return: QDF_STATUS_SUCCESS registration success 2231 */ 2232 QDF_STATUS dp_peer_state_update(struct cdp_pdev *pdev_handle, uint8_t *peer_mac, 2233 enum ol_txrx_peer_state state) 2234 { 2235 struct dp_peer *peer; 2236 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; 2237 2238 peer = dp_peer_find_hash_find(pdev->soc, peer_mac, 0, DP_VDEV_ALL); 2239 if (NULL == peer) { 2240 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 2241 "Failed to find peer for: [%pM]", peer_mac); 2242 return QDF_STATUS_E_FAILURE; 2243 } 2244 peer->state = state; 2245 2246 DP_TRACE(INFO, "peer %pK state %d", peer, peer->state); 2247 /* ref_cnt is incremented inside dp_peer_find_hash_find(). 2248 * Decrement it here. 2249 */ 2250 qdf_atomic_dec(&peer->ref_cnt); 2251 2252 return QDF_STATUS_SUCCESS; 2253 } 2254 2255 /** 2256 * dp_get_vdevid() - Get virtual interface id which peer registered 2257 * @peer - peer instance 2258 * @vdev_id - virtual interface id which peer registered 2259 * 2260 * Get virtual interface id which peer registered 2261 * 2262 * Return: QDF_STATUS_SUCCESS registration success 2263 */ 2264 QDF_STATUS dp_get_vdevid(void *peer_handle, uint8_t *vdev_id) 2265 { 2266 struct dp_peer *peer = peer_handle; 2267 2268 DP_TRACE(INFO, "peer %pK vdev %pK vdev id %d", 2269 peer, peer->vdev, peer->vdev->vdev_id); 2270 *vdev_id = peer->vdev->vdev_id; 2271 return QDF_STATUS_SUCCESS; 2272 } 2273 2274 struct cdp_vdev *dp_get_vdev_by_sta_id(struct cdp_pdev *pdev_handle, 2275 uint8_t sta_id) 2276 { 2277 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; 2278 struct dp_peer *peer = NULL; 2279 2280 if (sta_id >= WLAN_MAX_STA_COUNT) { 2281 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH, 2282 "Invalid sta id passed"); 2283 return NULL; 2284 } 2285 2286 if (!pdev) { 2287 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH, 2288 "PDEV not found for sta_id [%d]", sta_id); 2289 return NULL; 2290 } 2291 2292 peer = dp_peer_find_by_local_id((struct cdp_pdev *)pdev, sta_id); 2293 if (!peer) { 2294 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH, 2295 "PEER [%d] not found", sta_id); 2296 return NULL; 2297 } 2298 2299 return (struct cdp_vdev *)peer->vdev; 2300 } 2301 2302 /** 2303 * dp_get_vdev_for_peer() - Get virtual interface instance which peer belongs 2304 * @peer - peer instance 2305 * 2306 * Get virtual interface instance which peer belongs 2307 * 2308 * Return: virtual interface instance pointer 2309 * NULL in case cannot find 2310 */ 2311 struct cdp_vdev *dp_get_vdev_for_peer(void *peer_handle) 2312 { 2313 struct dp_peer *peer = peer_handle; 2314 2315 DP_TRACE(INFO, "peer %pK vdev %pK", peer, peer->vdev); 2316 return (struct cdp_vdev *)peer->vdev; 2317 } 2318 2319 /** 2320 * dp_peer_get_peer_mac_addr() - Get peer mac address 2321 * @peer - peer instance 2322 * 2323 * Get peer mac address 2324 * 2325 * Return: peer mac address pointer 2326 * NULL in case cannot find 2327 */ 2328 uint8_t *dp_peer_get_peer_mac_addr(void *peer_handle) 2329 { 2330 struct dp_peer *peer = peer_handle; 2331 uint8_t *mac; 2332 2333 mac = peer->mac_addr.raw; 2334 DP_TRACE(INFO, "peer %pK mac 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x", 2335 peer, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); 2336 return peer->mac_addr.raw; 2337 } 2338 2339 /** 2340 * dp_get_peer_state() - Get local peer state 2341 * @peer - peer instance 2342 * 2343 * Get local peer state 2344 * 2345 * Return: peer status 2346 */ 2347 int dp_get_peer_state(void *peer_handle) 2348 { 2349 struct dp_peer *peer = peer_handle; 2350 2351 DP_TRACE(DEBUG, "peer %pK stats %d", peer, peer->state); 2352 return peer->state; 2353 } 2354 2355 /** 2356 * dp_get_last_mgmt_timestamp() - get timestamp of last mgmt frame 2357 * @pdev: pdev handle 2358 * @ppeer_addr: peer mac addr 2359 * @subtype: management frame type 2360 * @timestamp: last timestamp 2361 * 2362 * Return: true if timestamp is retrieved for valid peer else false 2363 */ 2364 bool dp_get_last_mgmt_timestamp(struct cdp_pdev *ppdev, u8 *peer_addr, 2365 u8 subtype, qdf_time_t *timestamp) 2366 { 2367 union dp_align_mac_addr local_mac_addr_aligned, *mac_addr; 2368 unsigned int index; 2369 struct dp_peer *peer; 2370 struct dp_soc *soc; 2371 2372 bool ret = false; 2373 struct dp_pdev *pdev = (struct dp_pdev *)ppdev; 2374 2375 soc = pdev->soc; 2376 qdf_mem_copy( 2377 &local_mac_addr_aligned.raw[0], 2378 peer_addr, DP_MAC_ADDR_LEN); 2379 mac_addr = &local_mac_addr_aligned; 2380 2381 index = dp_peer_find_hash_index(soc, mac_addr); 2382 2383 qdf_spin_lock_bh(&soc->peer_ref_mutex); 2384 TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) { 2385 #if ATH_SUPPORT_WRAP 2386 /* ProxySTA may have multiple BSS peer with same MAC address, 2387 * modified find will take care of finding the correct BSS peer. 2388 */ 2389 if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 && 2390 (peer->vdev->vdev_id == DP_VDEV_ALL)) { 2391 #else 2392 if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0) { 2393 #endif 2394 /* found it */ 2395 switch (subtype) { 2396 case IEEE80211_FC0_SUBTYPE_ASSOC_REQ: 2397 *timestamp = peer->last_assoc_rcvd; 2398 ret = true; 2399 break; 2400 case IEEE80211_FC0_SUBTYPE_DISASSOC: 2401 case IEEE80211_FC0_SUBTYPE_DEAUTH: 2402 *timestamp = peer->last_disassoc_rcvd; 2403 ret = true; 2404 break; 2405 default: 2406 break; 2407 } 2408 qdf_spin_unlock_bh(&soc->peer_ref_mutex); 2409 return ret; 2410 } 2411 } 2412 qdf_spin_unlock_bh(&soc->peer_ref_mutex); 2413 return false; /*failure*/ 2414 } 2415 2416 /** 2417 * dp_update_last_mgmt_timestamp() - set timestamp of last mgmt frame 2418 * @pdev: pdev handle 2419 * @ppeer_addr: peer mac addr 2420 * @timestamp: time to be set 2421 * @subtype: management frame type 2422 * 2423 * Return: true if timestamp is updated for valid peer else false 2424 */ 2425 2426 bool dp_update_last_mgmt_timestamp(struct cdp_pdev *ppdev, u8 *peer_addr, 2427 qdf_time_t timestamp, u8 subtype) 2428 { 2429 union dp_align_mac_addr local_mac_addr_aligned, *mac_addr; 2430 unsigned int index; 2431 struct dp_peer *peer; 2432 struct dp_soc *soc; 2433 2434 bool ret = false; 2435 struct dp_pdev *pdev = (struct dp_pdev *)ppdev; 2436 2437 soc = pdev->soc; 2438 qdf_mem_copy(&local_mac_addr_aligned.raw[0], 2439 peer_addr, DP_MAC_ADDR_LEN); 2440 mac_addr = &local_mac_addr_aligned; 2441 2442 index = dp_peer_find_hash_index(soc, mac_addr); 2443 2444 qdf_spin_lock_bh(&soc->peer_ref_mutex); 2445 TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) { 2446 #if ATH_SUPPORT_WRAP 2447 /* ProxySTA may have multiple BSS peer with same MAC address, 2448 * modified find will take care of finding the correct BSS peer. 2449 */ 2450 if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 && 2451 (peer->vdev->vdev_id == DP_VDEV_ALL)) { 2452 #else 2453 if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0) { 2454 #endif 2455 /* found it */ 2456 switch (subtype) { 2457 case IEEE80211_FC0_SUBTYPE_ASSOC_REQ: 2458 peer->last_assoc_rcvd = timestamp; 2459 ret = true; 2460 break; 2461 case IEEE80211_FC0_SUBTYPE_DISASSOC: 2462 case IEEE80211_FC0_SUBTYPE_DEAUTH: 2463 peer->last_disassoc_rcvd = timestamp; 2464 ret = true; 2465 break; 2466 default: 2467 break; 2468 } 2469 qdf_spin_unlock_bh(&soc->peer_ref_mutex); 2470 return ret; 2471 } 2472 } 2473 qdf_spin_unlock_bh(&soc->peer_ref_mutex); 2474 return false; /*failure*/ 2475 } 2476 2477 /** 2478 * dp_local_peer_id_pool_init() - local peer id pool alloc for physical device 2479 * @pdev - data path device instance 2480 * 2481 * local peer id pool alloc for physical device 2482 * 2483 * Return: none 2484 */ 2485 void dp_local_peer_id_pool_init(struct dp_pdev *pdev) 2486 { 2487 int i; 2488 2489 /* point the freelist to the first ID */ 2490 pdev->local_peer_ids.freelist = 0; 2491 2492 /* link each ID to the next one */ 2493 for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) { 2494 pdev->local_peer_ids.pool[i] = i + 1; 2495 pdev->local_peer_ids.map[i] = NULL; 2496 } 2497 2498 /* link the last ID to itself, to mark the end of the list */ 2499 i = OL_TXRX_NUM_LOCAL_PEER_IDS; 2500 pdev->local_peer_ids.pool[i] = i; 2501 2502 qdf_spinlock_create(&pdev->local_peer_ids.lock); 2503 DP_TRACE(INFO, "Peer pool init"); 2504 } 2505 2506 /** 2507 * dp_local_peer_id_alloc() - allocate local peer id 2508 * @pdev - data path device instance 2509 * @peer - new peer instance 2510 * 2511 * allocate local peer id 2512 * 2513 * Return: none 2514 */ 2515 void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer) 2516 { 2517 int i; 2518 2519 qdf_spin_lock_bh(&pdev->local_peer_ids.lock); 2520 i = pdev->local_peer_ids.freelist; 2521 if (pdev->local_peer_ids.pool[i] == i) { 2522 /* the list is empty, except for the list-end marker */ 2523 peer->local_id = OL_TXRX_INVALID_LOCAL_PEER_ID; 2524 } else { 2525 /* take the head ID and advance the freelist */ 2526 peer->local_id = i; 2527 pdev->local_peer_ids.freelist = pdev->local_peer_ids.pool[i]; 2528 pdev->local_peer_ids.map[i] = peer; 2529 } 2530 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock); 2531 DP_TRACE(INFO, "peer %pK, local id %d", peer, peer->local_id); 2532 } 2533 2534 /** 2535 * dp_local_peer_id_free() - remove local peer id 2536 * @pdev - data path device instance 2537 * @peer - peer instance should be removed 2538 * 2539 * remove local peer id 2540 * 2541 * Return: none 2542 */ 2543 void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer) 2544 { 2545 int i = peer->local_id; 2546 if ((i == OL_TXRX_INVALID_LOCAL_PEER_ID) || 2547 (i >= OL_TXRX_NUM_LOCAL_PEER_IDS)) { 2548 return; 2549 } 2550 2551 /* put this ID on the head of the freelist */ 2552 qdf_spin_lock_bh(&pdev->local_peer_ids.lock); 2553 pdev->local_peer_ids.pool[i] = pdev->local_peer_ids.freelist; 2554 pdev->local_peer_ids.freelist = i; 2555 pdev->local_peer_ids.map[i] = NULL; 2556 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock); 2557 } 2558 #endif 2559 2560 /** 2561 * dp_get_peer_mac_addr_frm_id(): get mac address of the peer 2562 * @soc_handle: DP SOC handle 2563 * @peer_id:peer_id of the peer 2564 * 2565 * return: vdev_id of the vap 2566 */ 2567 uint8_t dp_get_peer_mac_addr_frm_id(struct cdp_soc_t *soc_handle, 2568 uint16_t peer_id, uint8_t *peer_mac) 2569 { 2570 struct dp_soc *soc = (struct dp_soc *)soc_handle; 2571 struct dp_peer *peer; 2572 2573 peer = dp_peer_find_by_id(soc, peer_id); 2574 2575 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 2576 "soc %pK peer_id %d", soc, peer_id); 2577 2578 if (!peer) { 2579 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 2580 "peer not found "); 2581 return CDP_INVALID_VDEV_ID; 2582 } 2583 2584 qdf_mem_copy(peer_mac, peer->mac_addr.raw, 6); 2585 return peer->vdev->vdev_id; 2586 } 2587 2588 /** 2589 * dp_peer_rxtid_stats: Retried Rx TID (REO queue) stats from HW 2590 * @peer: DP peer handle 2591 * @dp_stats_cmd_cb: REO command callback function 2592 * @cb_ctxt: Callback context 2593 * 2594 * Return: none 2595 */ 2596 void dp_peer_rxtid_stats(struct dp_peer *peer, void (*dp_stats_cmd_cb), 2597 void *cb_ctxt) 2598 { 2599 struct dp_soc *soc = peer->vdev->pdev->soc; 2600 struct hal_reo_cmd_params params; 2601 int i; 2602 2603 if (!dp_stats_cmd_cb) 2604 return; 2605 2606 qdf_mem_zero(¶ms, sizeof(params)); 2607 for (i = 0; i < DP_MAX_TIDS; i++) { 2608 struct dp_rx_tid *rx_tid = &peer->rx_tid[i]; 2609 if (rx_tid->hw_qdesc_vaddr_unaligned != NULL) { 2610 params.std.need_status = 1; 2611 params.std.addr_lo = 2612 rx_tid->hw_qdesc_paddr & 0xffffffff; 2613 params.std.addr_hi = 2614 (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32; 2615 2616 if (cb_ctxt) { 2617 dp_reo_send_cmd(soc, CMD_GET_QUEUE_STATS, 2618 ¶ms, dp_stats_cmd_cb, cb_ctxt); 2619 } else { 2620 dp_reo_send_cmd(soc, CMD_GET_QUEUE_STATS, 2621 ¶ms, dp_stats_cmd_cb, rx_tid); 2622 } 2623 2624 /* Flush REO descriptor from HW cache to update stats 2625 * in descriptor memory. This is to help debugging */ 2626 qdf_mem_zero(¶ms, sizeof(params)); 2627 params.std.need_status = 0; 2628 params.std.addr_lo = 2629 rx_tid->hw_qdesc_paddr & 0xffffffff; 2630 params.std.addr_hi = 2631 (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32; 2632 params.u.fl_cache_params.flush_no_inval = 1; 2633 dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, ¶ms, NULL, 2634 NULL); 2635 } 2636 } 2637 } 2638 2639 void dp_set_michael_key(struct cdp_peer *peer_handle, 2640 bool is_unicast, uint32_t *key) 2641 { 2642 struct dp_peer *peer = (struct dp_peer *)peer_handle; 2643 uint8_t sec_index = is_unicast ? 1 : 0; 2644 2645 if (!peer) { 2646 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 2647 "peer not found "); 2648 return; 2649 } 2650 2651 qdf_mem_copy(&peer->security[sec_index].michael_key[0], 2652 key, IEEE80211_WEP_MICLEN); 2653 } 2654