1 /* 2 * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include <qdf_types.h> 20 #include <qdf_lock.h> 21 #include <hal_hw_headers.h> 22 #include "dp_htt.h" 23 #include "dp_types.h" 24 #include "dp_internal.h" 25 #include "dp_peer.h" 26 #include "dp_rx_defrag.h" 27 #include <hal_api.h> 28 #include <hal_reo.h> 29 #ifdef CONFIG_MCL 30 #include <cds_ieee80211_common.h> 31 #include <cds_api.h> 32 #endif 33 #include <cdp_txrx_handle.h> 34 #include <wlan_cfg.h> 35 36 #ifdef DP_LFR 37 static inline void 38 dp_set_ssn_valid_flag(struct hal_reo_cmd_params *params, 39 uint8_t valid) 40 { 41 params->u.upd_queue_params.update_svld = 1; 42 params->u.upd_queue_params.svld = valid; 43 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 44 "%s: Setting SSN valid bit to %d", 45 __func__, valid); 46 } 47 #else 48 static inline void 49 dp_set_ssn_valid_flag(struct hal_reo_cmd_params *params, 50 uint8_t valid) {}; 51 #endif 52 53 static inline int dp_peer_find_mac_addr_cmp( 54 union dp_align_mac_addr *mac_addr1, 55 union dp_align_mac_addr *mac_addr2) 56 { 57 return !((mac_addr1->align4.bytes_abcd == mac_addr2->align4.bytes_abcd) 58 /* 59 * Intentionally use & rather than &&. 60 * because the operands are binary rather than generic boolean, 61 * the functionality is equivalent. 62 * Using && has the advantage of short-circuited evaluation, 63 * but using & has the advantage of no conditional branching, 64 * which is a more significant benefit. 65 */ 66 & 67 (mac_addr1->align4.bytes_ef == mac_addr2->align4.bytes_ef)); 68 } 69 70 static int dp_peer_find_map_attach(struct dp_soc *soc) 71 { 72 uint32_t max_peers, peer_map_size; 73 74 max_peers = soc->max_peers; 75 /* allocate the peer ID -> peer object map */ 76 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, 77 "\n<=== cfg max peer id %d ====>", max_peers); 78 peer_map_size = max_peers * sizeof(soc->peer_id_to_obj_map[0]); 79 soc->peer_id_to_obj_map = qdf_mem_malloc(peer_map_size); 80 if (!soc->peer_id_to_obj_map) { 81 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 82 "%s: peer map memory allocation failed", __func__); 83 return QDF_STATUS_E_NOMEM; 84 } 85 86 /* 87 * The peer_id_to_obj_map doesn't really need to be initialized, 88 * since elements are only used after they have been individually 89 * initialized. 90 * However, it is convenient for debugging to have all elements 91 * that are not in use set to 0. 92 */ 93 qdf_mem_zero(soc->peer_id_to_obj_map, peer_map_size); 94 return 0; /* success */ 95 } 96 97 static int dp_log2_ceil(unsigned value) 98 { 99 unsigned tmp = value; 100 int log2 = -1; 101 102 while (tmp) { 103 log2++; 104 tmp >>= 1; 105 } 106 if (1 << log2 != value) 107 log2++; 108 return log2; 109 } 110 111 static int dp_peer_find_add_id_to_obj( 112 struct dp_peer *peer, 113 uint16_t peer_id) 114 { 115 int i; 116 117 for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) { 118 if (peer->peer_ids[i] == HTT_INVALID_PEER) { 119 peer->peer_ids[i] = peer_id; 120 return 0; /* success */ 121 } 122 } 123 return QDF_STATUS_E_FAILURE; /* failure */ 124 } 125 126 #define DP_PEER_HASH_LOAD_MULT 2 127 #define DP_PEER_HASH_LOAD_SHIFT 0 128 129 #define DP_AST_HASH_LOAD_MULT 2 130 #define DP_AST_HASH_LOAD_SHIFT 0 131 132 static int dp_peer_find_hash_attach(struct dp_soc *soc) 133 { 134 int i, hash_elems, log2; 135 136 /* allocate the peer MAC address -> peer object hash table */ 137 hash_elems = soc->max_peers; 138 hash_elems *= DP_PEER_HASH_LOAD_MULT; 139 hash_elems >>= DP_PEER_HASH_LOAD_SHIFT; 140 log2 = dp_log2_ceil(hash_elems); 141 hash_elems = 1 << log2; 142 143 soc->peer_hash.mask = hash_elems - 1; 144 soc->peer_hash.idx_bits = log2; 145 /* allocate an array of TAILQ peer object lists */ 146 soc->peer_hash.bins = qdf_mem_malloc( 147 hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q, dp_peer))); 148 if (!soc->peer_hash.bins) 149 return QDF_STATUS_E_NOMEM; 150 151 for (i = 0; i < hash_elems; i++) 152 TAILQ_INIT(&soc->peer_hash.bins[i]); 153 154 return 0; 155 } 156 157 static void dp_peer_find_hash_detach(struct dp_soc *soc) 158 { 159 qdf_mem_free(soc->peer_hash.bins); 160 } 161 162 static inline unsigned dp_peer_find_hash_index(struct dp_soc *soc, 163 union dp_align_mac_addr *mac_addr) 164 { 165 unsigned index; 166 167 index = 168 mac_addr->align2.bytes_ab ^ 169 mac_addr->align2.bytes_cd ^ 170 mac_addr->align2.bytes_ef; 171 index ^= index >> soc->peer_hash.idx_bits; 172 index &= soc->peer_hash.mask; 173 return index; 174 } 175 176 177 void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer) 178 { 179 unsigned index; 180 181 index = dp_peer_find_hash_index(soc, &peer->mac_addr); 182 qdf_spin_lock_bh(&soc->peer_ref_mutex); 183 /* 184 * It is important to add the new peer at the tail of the peer list 185 * with the bin index. Together with having the hash_find function 186 * search from head to tail, this ensures that if two entries with 187 * the same MAC address are stored, the one added first will be 188 * found first. 189 */ 190 TAILQ_INSERT_TAIL(&soc->peer_hash.bins[index], peer, hash_list_elem); 191 qdf_spin_unlock_bh(&soc->peer_ref_mutex); 192 } 193 194 #ifdef FEATURE_AST 195 /* 196 * dp_peer_ast_hash_attach() - Allocate and initialize AST Hash Table 197 * @soc: SoC handle 198 * 199 * Return: None 200 */ 201 static int dp_peer_ast_hash_attach(struct dp_soc *soc) 202 { 203 int i, hash_elems, log2; 204 205 hash_elems = ((soc->max_peers * DP_AST_HASH_LOAD_MULT) >> 206 DP_AST_HASH_LOAD_SHIFT); 207 208 log2 = dp_log2_ceil(hash_elems); 209 hash_elems = 1 << log2; 210 211 soc->ast_hash.mask = hash_elems - 1; 212 soc->ast_hash.idx_bits = log2; 213 214 /* allocate an array of TAILQ peer object lists */ 215 soc->ast_hash.bins = qdf_mem_malloc( 216 hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q, 217 dp_ast_entry))); 218 219 if (!soc->ast_hash.bins) 220 return QDF_STATUS_E_NOMEM; 221 222 for (i = 0; i < hash_elems; i++) 223 TAILQ_INIT(&soc->ast_hash.bins[i]); 224 225 return 0; 226 } 227 228 /* 229 * dp_peer_ast_hash_detach() - Free AST Hash table 230 * @soc: SoC handle 231 * 232 * Return: None 233 */ 234 static void dp_peer_ast_hash_detach(struct dp_soc *soc) 235 { 236 qdf_mem_free(soc->ast_hash.bins); 237 } 238 239 /* 240 * dp_peer_ast_hash_index() - Compute the AST hash from MAC address 241 * @soc: SoC handle 242 * 243 * Return: AST hash 244 */ 245 static inline uint32_t dp_peer_ast_hash_index(struct dp_soc *soc, 246 union dp_align_mac_addr *mac_addr) 247 { 248 uint32_t index; 249 250 index = 251 mac_addr->align2.bytes_ab ^ 252 mac_addr->align2.bytes_cd ^ 253 mac_addr->align2.bytes_ef; 254 index ^= index >> soc->ast_hash.idx_bits; 255 index &= soc->ast_hash.mask; 256 return index; 257 } 258 259 /* 260 * dp_peer_ast_hash_add() - Add AST entry into hash table 261 * @soc: SoC handle 262 * 263 * This function adds the AST entry into SoC AST hash table 264 * It assumes caller has taken the ast lock to protect the access to this table 265 * 266 * Return: None 267 */ 268 static inline void dp_peer_ast_hash_add(struct dp_soc *soc, 269 struct dp_ast_entry *ase) 270 { 271 uint32_t index; 272 273 index = dp_peer_ast_hash_index(soc, &ase->mac_addr); 274 TAILQ_INSERT_TAIL(&soc->ast_hash.bins[index], ase, hash_list_elem); 275 } 276 277 /* 278 * dp_peer_ast_hash_remove() - Look up and remove AST entry from hash table 279 * @soc: SoC handle 280 * 281 * This function removes the AST entry from soc AST hash table 282 * It assumes caller has taken the ast lock to protect the access to this table 283 * 284 * Return: None 285 */ 286 static inline void dp_peer_ast_hash_remove(struct dp_soc *soc, 287 struct dp_ast_entry *ase) 288 { 289 unsigned index; 290 struct dp_ast_entry *tmpase; 291 int found = 0; 292 293 index = dp_peer_ast_hash_index(soc, &ase->mac_addr); 294 /* Check if tail is not empty before delete*/ 295 QDF_ASSERT(!TAILQ_EMPTY(&soc->ast_hash.bins[index])); 296 297 TAILQ_FOREACH(tmpase, &soc->ast_hash.bins[index], hash_list_elem) { 298 if (tmpase == ase) { 299 found = 1; 300 break; 301 } 302 } 303 304 QDF_ASSERT(found); 305 TAILQ_REMOVE(&soc->ast_hash.bins[index], ase, hash_list_elem); 306 } 307 308 /* 309 * dp_peer_ast_hash_find() - Find AST entry by MAC address 310 * @soc: SoC handle 311 * 312 * It assumes caller has taken the ast lock to protect the access to 313 * AST hash table 314 * 315 * Return: AST entry 316 */ 317 struct dp_ast_entry *dp_peer_ast_hash_find(struct dp_soc *soc, 318 uint8_t *ast_mac_addr) 319 { 320 union dp_align_mac_addr local_mac_addr_aligned, *mac_addr; 321 unsigned index; 322 struct dp_ast_entry *ase; 323 324 qdf_mem_copy(&local_mac_addr_aligned.raw[0], 325 ast_mac_addr, DP_MAC_ADDR_LEN); 326 mac_addr = &local_mac_addr_aligned; 327 328 index = dp_peer_ast_hash_index(soc, mac_addr); 329 TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) { 330 if (dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr) == 0) { 331 return ase; 332 } 333 } 334 335 return NULL; 336 } 337 338 /* 339 * dp_peer_map_ast() - Map the ast entry with HW AST Index 340 * @soc: SoC handle 341 * @peer: peer to which ast node belongs 342 * @mac_addr: MAC address of ast node 343 * @hw_peer_id: HW AST Index returned by target in peer map event 344 * @vdev_id: vdev id for VAP to which the peer belongs to 345 * 346 * Return: None 347 */ 348 static inline void dp_peer_map_ast(struct dp_soc *soc, 349 struct dp_peer *peer, uint8_t *mac_addr, uint16_t hw_peer_id, 350 uint8_t vdev_id) 351 { 352 struct dp_ast_entry *ast_entry; 353 enum cdp_txrx_ast_entry_type peer_type = CDP_TXRX_AST_TYPE_STATIC; 354 bool ast_entry_found = FALSE; 355 356 if (!peer) { 357 return; 358 } 359 360 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 361 "%s: peer %pK ID %d vid %d mac %02x:%02x:%02x:%02x:%02x:%02x", 362 __func__, peer, hw_peer_id, vdev_id, mac_addr[0], 363 mac_addr[1], mac_addr[2], mac_addr[3], 364 mac_addr[4], mac_addr[5]); 365 366 qdf_spin_lock_bh(&soc->ast_lock); 367 TAILQ_FOREACH(ast_entry, &peer->ast_entry_list, ase_list_elem) { 368 if (!(qdf_mem_cmp(mac_addr, ast_entry->mac_addr.raw, 369 DP_MAC_ADDR_LEN))) { 370 ast_entry->ast_idx = hw_peer_id; 371 soc->ast_table[hw_peer_id] = ast_entry; 372 ast_entry->is_active = TRUE; 373 peer_type = ast_entry->type; 374 ast_entry_found = TRUE; 375 } 376 } 377 378 if (ast_entry_found || (peer->vdev && peer->vdev->proxysta_vdev)) { 379 if (soc->cdp_soc.ol_ops->peer_map_event) { 380 soc->cdp_soc.ol_ops->peer_map_event( 381 soc->ctrl_psoc, peer->peer_ids[0], 382 hw_peer_id, vdev_id, 383 mac_addr, peer_type); 384 } 385 } else { 386 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 387 "AST entry not found"); 388 } 389 390 qdf_spin_unlock_bh(&soc->ast_lock); 391 return; 392 } 393 394 /* 395 * dp_peer_add_ast() - Allocate and add AST entry into peer list 396 * @soc: SoC handle 397 * @peer: peer to which ast node belongs 398 * @mac_addr: MAC address of ast node 399 * @is_self: Is this base AST entry with peer mac address 400 * 401 * This API is used by WDS source port learning function to 402 * add a new AST entry into peer AST list 403 * 404 * Return: 0 if new entry is allocated, 405 * -1 if entry add failed 406 */ 407 int dp_peer_add_ast(struct dp_soc *soc, 408 struct dp_peer *peer, 409 uint8_t *mac_addr, 410 enum cdp_txrx_ast_entry_type type, 411 uint32_t flags) 412 { 413 struct dp_ast_entry *ast_entry; 414 struct dp_vdev *vdev = peer->vdev; 415 uint8_t next_node_mac[6]; 416 int ret = -1; 417 418 if (!vdev) { 419 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 420 FL("Peers vdev is NULL")); 421 QDF_ASSERT(0); 422 return ret; 423 } 424 425 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 426 "%s: peer %pK mac %02x:%02x:%02x:%02x:%02x:%02x", 427 __func__, peer, mac_addr[0], mac_addr[1], mac_addr[2], 428 mac_addr[3], mac_addr[4], mac_addr[5]); 429 430 qdf_spin_lock_bh(&soc->ast_lock); 431 432 /* If AST entry already exists , just return from here */ 433 ast_entry = dp_peer_ast_hash_find(soc, mac_addr); 434 435 if (ast_entry) { 436 if (ast_entry->type == CDP_TXRX_AST_TYPE_MEC) { 437 ast_entry->is_active = TRUE; 438 qdf_spin_unlock_bh(&soc->ast_lock); 439 return 0; 440 } 441 442 /* 443 * WAR for HK 1.x AST issue 444 * If an AST entry with same mac address already exists and is 445 * mapped to a different radio, and if the current radio is 446 * primary radio , delete the existing AST entry and return. 447 * 448 * New AST entry will be created again on next SA_invalid 449 * frame 450 */ 451 if ((ast_entry->pdev_id != vdev->pdev->pdev_id) && 452 vdev->pdev->is_primary) { 453 qdf_print("Deleting ast_pdev=%d pdev=%d addr=%pM\n", 454 ast_entry->pdev_id, 455 vdev->pdev->pdev_id, mac_addr); 456 dp_peer_del_ast(soc, ast_entry); 457 } 458 459 qdf_spin_unlock_bh(&soc->ast_lock); 460 return 0; 461 } 462 463 ast_entry = (struct dp_ast_entry *) 464 qdf_mem_malloc(sizeof(struct dp_ast_entry)); 465 466 if (!ast_entry) { 467 qdf_spin_unlock_bh(&soc->ast_lock); 468 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 469 FL("fail to allocate ast_entry")); 470 QDF_ASSERT(0); 471 return ret; 472 } 473 474 qdf_mem_copy(&ast_entry->mac_addr.raw[0], mac_addr, DP_MAC_ADDR_LEN); 475 ast_entry->peer = peer; 476 ast_entry->pdev_id = vdev->pdev->pdev_id; 477 ast_entry->vdev_id = vdev->vdev_id; 478 479 switch (type) { 480 case CDP_TXRX_AST_TYPE_STATIC: 481 peer->self_ast_entry = ast_entry; 482 ast_entry->type = CDP_TXRX_AST_TYPE_STATIC; 483 break; 484 case CDP_TXRX_AST_TYPE_SELF: 485 peer->self_ast_entry = ast_entry; 486 ast_entry->type = CDP_TXRX_AST_TYPE_SELF; 487 break; 488 case CDP_TXRX_AST_TYPE_WDS: 489 ast_entry->next_hop = 1; 490 ast_entry->type = CDP_TXRX_AST_TYPE_WDS; 491 break; 492 case CDP_TXRX_AST_TYPE_WDS_HM: 493 ast_entry->next_hop = 1; 494 ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM; 495 break; 496 case CDP_TXRX_AST_TYPE_MEC: 497 ast_entry->next_hop = 1; 498 ast_entry->type = CDP_TXRX_AST_TYPE_MEC; 499 break; 500 default: 501 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 502 FL("Incorrect AST entry type")); 503 } 504 505 ast_entry->is_active = TRUE; 506 TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem); 507 DP_STATS_INC(soc, ast.added, 1); 508 dp_peer_ast_hash_add(soc, ast_entry); 509 qdf_spin_unlock_bh(&soc->ast_lock); 510 511 if (ast_entry->type == CDP_TXRX_AST_TYPE_MEC) 512 qdf_mem_copy(next_node_mac, peer->vdev->mac_addr.raw, 6); 513 else 514 qdf_mem_copy(next_node_mac, peer->mac_addr.raw, 6); 515 516 if ((ast_entry->type != CDP_TXRX_AST_TYPE_STATIC) && 517 (ast_entry->type != CDP_TXRX_AST_TYPE_SELF)) { 518 if (QDF_STATUS_SUCCESS == 519 soc->cdp_soc.ol_ops->peer_add_wds_entry( 520 peer->vdev->osif_vdev, 521 mac_addr, 522 next_node_mac, 523 flags)) 524 return 0; 525 } 526 527 return ret; 528 } 529 530 /* 531 * dp_peer_del_ast() - Delete and free AST entry 532 * @soc: SoC handle 533 * @ast_entry: AST entry of the node 534 * 535 * This function removes the AST entry from peer and soc tables 536 * It assumes caller has taken the ast lock to protect the access to these 537 * tables 538 * 539 * Return: None 540 */ 541 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry) 542 { 543 struct dp_peer *peer = ast_entry->peer; 544 545 if (ast_entry->next_hop) 546 soc->cdp_soc.ol_ops->peer_del_wds_entry(peer->vdev->osif_vdev, 547 ast_entry->mac_addr.raw); 548 549 soc->ast_table[ast_entry->ast_idx] = NULL; 550 TAILQ_REMOVE(&peer->ast_entry_list, ast_entry, ase_list_elem); 551 DP_STATS_INC(soc, ast.deleted, 1); 552 dp_peer_ast_hash_remove(soc, ast_entry); 553 qdf_mem_free(ast_entry); 554 } 555 556 /* 557 * dp_peer_update_ast() - Delete and free AST entry 558 * @soc: SoC handle 559 * @peer: peer to which ast node belongs 560 * @ast_entry: AST entry of the node 561 * @flags: wds or hmwds 562 * 563 * This function update the AST entry to the roamed peer and soc tables 564 * It assumes caller has taken the ast lock to protect the access to these 565 * tables 566 * 567 * Return: 0 if ast entry is updated successfully 568 * -1 failure 569 */ 570 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer, 571 struct dp_ast_entry *ast_entry, uint32_t flags) 572 { 573 int ret = -1; 574 struct dp_peer *old_peer; 575 576 if ((ast_entry->type == CDP_TXRX_AST_TYPE_STATIC) || 577 (ast_entry->type == CDP_TXRX_AST_TYPE_SELF)) 578 return 0; 579 580 old_peer = ast_entry->peer; 581 TAILQ_REMOVE(&old_peer->ast_entry_list, ast_entry, ase_list_elem); 582 583 ast_entry->peer = peer; 584 ast_entry->type = CDP_TXRX_AST_TYPE_WDS; 585 ast_entry->pdev_id = peer->vdev->pdev->pdev_id; 586 ast_entry->vdev_id = peer->vdev->vdev_id; 587 ast_entry->is_active = TRUE; 588 TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem); 589 590 ret = soc->cdp_soc.ol_ops->peer_update_wds_entry( 591 peer->vdev->osif_vdev, 592 ast_entry->mac_addr.raw, 593 peer->mac_addr.raw, 594 flags); 595 596 return ret; 597 } 598 599 /* 600 * dp_peer_ast_get_pdev_id() - get pdev_id from the ast entry 601 * @soc: SoC handle 602 * @ast_entry: AST entry of the node 603 * 604 * This function gets the pdev_id from the ast entry. 605 * 606 * Return: (uint8_t) pdev_id 607 */ 608 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc, 609 struct dp_ast_entry *ast_entry) 610 { 611 return ast_entry->pdev_id; 612 } 613 614 /* 615 * dp_peer_ast_get_next_hop() - get next_hop from the ast entry 616 * @soc: SoC handle 617 * @ast_entry: AST entry of the node 618 * 619 * This function gets the next hop from the ast entry. 620 * 621 * Return: (uint8_t) next_hop 622 */ 623 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc, 624 struct dp_ast_entry *ast_entry) 625 { 626 return ast_entry->next_hop; 627 } 628 629 /* 630 * dp_peer_ast_set_type() - set type from the ast entry 631 * @soc: SoC handle 632 * @ast_entry: AST entry of the node 633 * 634 * This function sets the type in the ast entry. 635 * 636 * Return: 637 */ 638 void dp_peer_ast_set_type(struct dp_soc *soc, 639 struct dp_ast_entry *ast_entry, 640 enum cdp_txrx_ast_entry_type type) 641 { 642 ast_entry->type = type; 643 } 644 645 #else 646 int dp_peer_add_ast(struct dp_soc *soc, struct dp_peer *peer, 647 uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type, 648 uint32_t flags) 649 { 650 return 1; 651 } 652 653 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry) 654 { 655 } 656 657 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer, 658 struct dp_ast_entry *ast_entry, uint32_t flags) 659 { 660 return 1; 661 } 662 663 struct dp_ast_entry *dp_peer_ast_hash_find(struct dp_soc *soc, 664 uint8_t *ast_mac_addr) 665 { 666 return NULL; 667 } 668 669 static int dp_peer_ast_hash_attach(struct dp_soc *soc) 670 { 671 return 0; 672 } 673 674 static inline void dp_peer_map_ast(struct dp_soc *soc, 675 struct dp_peer *peer, uint8_t *mac_addr, uint16_t hw_peer_id, 676 uint8_t vdev_id) 677 { 678 return; 679 } 680 681 static void dp_peer_ast_hash_detach(struct dp_soc *soc) 682 { 683 } 684 685 void dp_peer_ast_set_type(struct dp_soc *soc, 686 struct dp_ast_entry *ast_entry, 687 enum cdp_txrx_ast_entry_type type) 688 { 689 } 690 691 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc, 692 struct dp_ast_entry *ast_entry) 693 { 694 return 0xff; 695 } 696 697 698 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc, 699 struct dp_ast_entry *ast_entry) 700 { 701 return 0xff; 702 } 703 #endif 704 705 struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc, 706 uint8_t *peer_mac_addr, int mac_addr_is_aligned, uint8_t vdev_id) 707 { 708 union dp_align_mac_addr local_mac_addr_aligned, *mac_addr; 709 unsigned index; 710 struct dp_peer *peer; 711 712 if (mac_addr_is_aligned) { 713 mac_addr = (union dp_align_mac_addr *) peer_mac_addr; 714 } else { 715 qdf_mem_copy( 716 &local_mac_addr_aligned.raw[0], 717 peer_mac_addr, DP_MAC_ADDR_LEN); 718 mac_addr = &local_mac_addr_aligned; 719 } 720 index = dp_peer_find_hash_index(soc, mac_addr); 721 qdf_spin_lock_bh(&soc->peer_ref_mutex); 722 TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) { 723 #if ATH_SUPPORT_WRAP 724 /* ProxySTA may have multiple BSS peer with same MAC address, 725 * modified find will take care of finding the correct BSS peer. 726 */ 727 if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 && 728 ((peer->vdev->vdev_id == vdev_id) || 729 (vdev_id == DP_VDEV_ALL))) { 730 #else 731 if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0) { 732 #endif 733 /* found it - increment the ref count before releasing 734 * the lock 735 */ 736 qdf_atomic_inc(&peer->ref_cnt); 737 qdf_spin_unlock_bh(&soc->peer_ref_mutex); 738 return peer; 739 } 740 } 741 qdf_spin_unlock_bh(&soc->peer_ref_mutex); 742 return NULL; /* failure */ 743 } 744 745 void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer) 746 { 747 unsigned index; 748 struct dp_peer *tmppeer = NULL; 749 int found = 0; 750 751 index = dp_peer_find_hash_index(soc, &peer->mac_addr); 752 /* Check if tail is not empty before delete*/ 753 QDF_ASSERT(!TAILQ_EMPTY(&soc->peer_hash.bins[index])); 754 /* 755 * DO NOT take the peer_ref_mutex lock here - it needs to be taken 756 * by the caller. 757 * The caller needs to hold the lock from the time the peer object's 758 * reference count is decremented and tested up through the time the 759 * reference to the peer object is removed from the hash table, by 760 * this function. 761 * Holding the lock only while removing the peer object reference 762 * from the hash table keeps the hash table consistent, but does not 763 * protect against a new HL tx context starting to use the peer object 764 * if it looks up the peer object from its MAC address just after the 765 * peer ref count is decremented to zero, but just before the peer 766 * object reference is removed from the hash table. 767 */ 768 TAILQ_FOREACH(tmppeer, &soc->peer_hash.bins[index], hash_list_elem) { 769 if (tmppeer == peer) { 770 found = 1; 771 break; 772 } 773 } 774 QDF_ASSERT(found); 775 TAILQ_REMOVE(&soc->peer_hash.bins[index], peer, hash_list_elem); 776 } 777 778 void dp_peer_find_hash_erase(struct dp_soc *soc) 779 { 780 int i; 781 782 /* 783 * Not really necessary to take peer_ref_mutex lock - by this point, 784 * it's known that the soc is no longer in use. 785 */ 786 for (i = 0; i <= soc->peer_hash.mask; i++) { 787 if (!TAILQ_EMPTY(&soc->peer_hash.bins[i])) { 788 struct dp_peer *peer, *peer_next; 789 790 /* 791 * TAILQ_FOREACH_SAFE must be used here to avoid any 792 * memory access violation after peer is freed 793 */ 794 TAILQ_FOREACH_SAFE(peer, &soc->peer_hash.bins[i], 795 hash_list_elem, peer_next) { 796 /* 797 * Don't remove the peer from the hash table - 798 * that would modify the list we are currently 799 * traversing, and it's not necessary anyway. 800 */ 801 /* 802 * Artificially adjust the peer's ref count to 803 * 1, so it will get deleted by 804 * dp_peer_unref_delete. 805 */ 806 /* set to zero */ 807 qdf_atomic_init(&peer->ref_cnt); 808 /* incr to one */ 809 qdf_atomic_inc(&peer->ref_cnt); 810 dp_peer_unref_delete(peer); 811 } 812 } 813 } 814 } 815 816 static void dp_peer_find_map_detach(struct dp_soc *soc) 817 { 818 qdf_mem_free(soc->peer_id_to_obj_map); 819 } 820 821 int dp_peer_find_attach(struct dp_soc *soc) 822 { 823 if (dp_peer_find_map_attach(soc)) 824 return 1; 825 826 if (dp_peer_find_hash_attach(soc)) { 827 dp_peer_find_map_detach(soc); 828 return 1; 829 } 830 831 if (dp_peer_ast_hash_attach(soc)) { 832 dp_peer_find_hash_detach(soc); 833 dp_peer_find_map_detach(soc); 834 return 1; 835 } 836 return 0; /* success */ 837 } 838 839 void dp_rx_tid_stats_cb(struct dp_soc *soc, void *cb_ctxt, 840 union hal_reo_status *reo_status) 841 { 842 struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt; 843 struct hal_reo_queue_status *queue_status = &(reo_status->queue_status); 844 845 if (queue_status->header.status != HAL_REO_CMD_SUCCESS) { 846 DP_TRACE_STATS(FATAL, "REO stats failure %d for TID %d\n", 847 queue_status->header.status, rx_tid->tid); 848 return; 849 } 850 851 DP_TRACE_STATS(FATAL, "REO queue stats (TID: %d): \n" 852 "ssn: %d\n" 853 "curr_idx : %d\n" 854 "pn_31_0 : %08x\n" 855 "pn_63_32 : %08x\n" 856 "pn_95_64 : %08x\n" 857 "pn_127_96 : %08x\n" 858 "last_rx_enq_tstamp : %08x\n" 859 "last_rx_deq_tstamp : %08x\n" 860 "rx_bitmap_31_0 : %08x\n" 861 "rx_bitmap_63_32 : %08x\n" 862 "rx_bitmap_95_64 : %08x\n" 863 "rx_bitmap_127_96 : %08x\n" 864 "rx_bitmap_159_128 : %08x\n" 865 "rx_bitmap_191_160 : %08x\n" 866 "rx_bitmap_223_192 : %08x\n" 867 "rx_bitmap_255_224 : %08x\n", 868 rx_tid->tid, 869 queue_status->ssn, queue_status->curr_idx, 870 queue_status->pn_31_0, queue_status->pn_63_32, 871 queue_status->pn_95_64, queue_status->pn_127_96, 872 queue_status->last_rx_enq_tstamp, 873 queue_status->last_rx_deq_tstamp, 874 queue_status->rx_bitmap_31_0, queue_status->rx_bitmap_63_32, 875 queue_status->rx_bitmap_95_64, queue_status->rx_bitmap_127_96, 876 queue_status->rx_bitmap_159_128, 877 queue_status->rx_bitmap_191_160, 878 queue_status->rx_bitmap_223_192, 879 queue_status->rx_bitmap_255_224); 880 881 DP_TRACE_STATS(FATAL, 882 "curr_mpdu_cnt : %d\n" 883 "curr_msdu_cnt : %d\n" 884 "fwd_timeout_cnt : %d\n" 885 "fwd_bar_cnt : %d\n" 886 "dup_cnt : %d\n" 887 "frms_in_order_cnt : %d\n" 888 "bar_rcvd_cnt : %d\n" 889 "mpdu_frms_cnt : %d\n" 890 "msdu_frms_cnt : %d\n" 891 "total_byte_cnt : %d\n" 892 "late_recv_mpdu_cnt : %d\n" 893 "win_jump_2k : %d\n" 894 "hole_cnt : %d\n", 895 queue_status->curr_mpdu_cnt, queue_status->curr_msdu_cnt, 896 queue_status->fwd_timeout_cnt, queue_status->fwd_bar_cnt, 897 queue_status->dup_cnt, queue_status->frms_in_order_cnt, 898 queue_status->bar_rcvd_cnt, queue_status->mpdu_frms_cnt, 899 queue_status->msdu_frms_cnt, queue_status->total_cnt, 900 queue_status->late_recv_mpdu_cnt, queue_status->win_jump_2k, 901 queue_status->hole_cnt); 902 903 DP_PRINT_STATS("Addba Req : %d\n" 904 "Addba Resp : %d\n" 905 "Addba Resp success : %d\n" 906 "Addba Resp failed : %d\n" 907 "Delba Req received : %d\n" 908 "Delba Tx success : %d\n" 909 "Delba Tx Fail : %d\n" 910 "BA window size : %d\n" 911 "Pn size : %d\n", 912 rx_tid->num_of_addba_req, 913 rx_tid->num_of_addba_resp, 914 rx_tid->num_addba_rsp_success, 915 rx_tid->num_addba_rsp_failed, 916 rx_tid->num_of_delba_req, 917 rx_tid->delba_tx_success_cnt, 918 rx_tid->delba_tx_fail_cnt, 919 rx_tid->ba_win_size, 920 rx_tid->pn_size); 921 } 922 923 static inline struct dp_peer *dp_peer_find_add_id(struct dp_soc *soc, 924 uint8_t *peer_mac_addr, uint16_t peer_id, uint16_t hw_peer_id, 925 uint8_t vdev_id) 926 { 927 struct dp_peer *peer; 928 929 QDF_ASSERT(peer_id <= soc->max_peers); 930 /* check if there's already a peer object with this MAC address */ 931 peer = dp_peer_find_hash_find(soc, peer_mac_addr, 932 0 /* is aligned */, vdev_id); 933 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 934 "%s: peer %pK ID %d vid %d mac %02x:%02x:%02x:%02x:%02x:%02x", 935 __func__, peer, peer_id, vdev_id, peer_mac_addr[0], 936 peer_mac_addr[1], peer_mac_addr[2], peer_mac_addr[3], 937 peer_mac_addr[4], peer_mac_addr[5]); 938 939 if (peer) { 940 /* peer's ref count was already incremented by 941 * peer_find_hash_find 942 */ 943 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 944 "%s: ref_cnt: %d", __func__, 945 qdf_atomic_read(&peer->ref_cnt)); 946 soc->peer_id_to_obj_map[peer_id] = peer; 947 948 if (dp_peer_find_add_id_to_obj(peer, peer_id)) { 949 /* TBDXXX: assert for now */ 950 QDF_ASSERT(0); 951 } 952 953 return peer; 954 } 955 956 return NULL; 957 } 958 959 /** 960 * dp_rx_peer_map_handler() - handle peer map event from firmware 961 * @soc_handle - genereic soc handle 962 * @peeri_id - peer_id from firmware 963 * @hw_peer_id - ast index for this peer 964 * vdev_id - vdev ID 965 * peer_mac_addr - macc assress of the peer 966 * 967 * associate the peer_id that firmware provided with peer entry 968 * and update the ast table in the host with the hw_peer_id. 969 * 970 * Return: none 971 */ 972 973 void 974 dp_rx_peer_map_handler(void *soc_handle, uint16_t peer_id, uint16_t hw_peer_id, 975 uint8_t vdev_id, uint8_t *peer_mac_addr) 976 { 977 struct dp_soc *soc = (struct dp_soc *)soc_handle; 978 struct dp_peer *peer = NULL; 979 980 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH, 981 "peer_map_event (soc:%pK): peer_id %di, hw_peer_id %d, peer_mac " 982 "%02x:%02x:%02x:%02x:%02x:%02x, vdev_id %d", soc, peer_id, 983 hw_peer_id, peer_mac_addr[0], peer_mac_addr[1], 984 peer_mac_addr[2], peer_mac_addr[3], peer_mac_addr[4], 985 peer_mac_addr[5], vdev_id); 986 987 peer = soc->peer_id_to_obj_map[peer_id]; 988 989 if ((hw_peer_id < 0) || (hw_peer_id > (WLAN_UMAC_PSOC_MAX_PEERS * 2))) { 990 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 991 "invalid hw_peer_id: %d", hw_peer_id); 992 qdf_assert_always(0); 993 } 994 995 /* 996 * check if peer already exists for this peer_id, if so 997 * this peer map event is in response for a wds peer add 998 * wmi command sent during wds source port learning. 999 * in this case just add the ast entry to the existing 1000 * peer ast_list. 1001 */ 1002 if (!peer) 1003 peer = dp_peer_find_add_id(soc, peer_mac_addr, peer_id, 1004 hw_peer_id, vdev_id); 1005 1006 if (peer) { 1007 qdf_assert_always(peer->vdev); 1008 /* 1009 * For every peer MAp message search and set if bss_peer 1010 */ 1011 if (!(qdf_mem_cmp(peer->mac_addr.raw, peer->vdev->mac_addr.raw, 1012 DP_MAC_ADDR_LEN))) { 1013 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH, 1014 "vdev bss_peer!!!!"); 1015 peer->bss_peer = 1; 1016 peer->vdev->vap_bss_peer = peer; 1017 } 1018 } 1019 1020 dp_peer_map_ast(soc, peer, peer_mac_addr, 1021 hw_peer_id, vdev_id); 1022 } 1023 1024 void 1025 dp_rx_peer_unmap_handler(void *soc_handle, uint16_t peer_id) 1026 { 1027 struct dp_peer *peer; 1028 struct dp_soc *soc = (struct dp_soc *)soc_handle; 1029 uint8_t i; 1030 1031 peer = __dp_peer_find_by_id(soc, peer_id); 1032 1033 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH, 1034 "peer_unmap_event (soc:%pK) peer_id %d peer %pK", 1035 soc, peer_id, peer); 1036 1037 /* 1038 * Currently peer IDs are assigned for vdevs as well as peers. 1039 * If the peer ID is for a vdev, then the peer pointer stored 1040 * in peer_id_to_obj_map will be NULL. 1041 */ 1042 if (!peer) { 1043 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1044 "%s: Received unmap event for invalid peer_id" 1045 " %u", __func__, peer_id); 1046 return; 1047 } 1048 1049 soc->peer_id_to_obj_map[peer_id] = NULL; 1050 for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) { 1051 if (peer->peer_ids[i] == peer_id) { 1052 peer->peer_ids[i] = HTT_INVALID_PEER; 1053 break; 1054 } 1055 } 1056 1057 if (soc->cdp_soc.ol_ops->peer_unmap_event) { 1058 soc->cdp_soc.ol_ops->peer_unmap_event(soc->ctrl_psoc, 1059 peer_id); 1060 } 1061 1062 /* 1063 * Remove a reference to the peer. 1064 * If there are no more references, delete the peer object. 1065 */ 1066 dp_peer_unref_delete(peer); 1067 } 1068 1069 void 1070 dp_peer_find_detach(struct dp_soc *soc) 1071 { 1072 dp_peer_find_map_detach(soc); 1073 dp_peer_find_hash_detach(soc); 1074 dp_peer_ast_hash_detach(soc); 1075 } 1076 1077 static void dp_rx_tid_update_cb(struct dp_soc *soc, void *cb_ctxt, 1078 union hal_reo_status *reo_status) 1079 { 1080 struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt; 1081 1082 if ((reo_status->rx_queue_status.header.status != 1083 HAL_REO_CMD_SUCCESS) && 1084 (reo_status->rx_queue_status.header.status != 1085 HAL_REO_CMD_DRAIN)) { 1086 /* Should not happen normally. Just print error for now */ 1087 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1088 "%s: Rx tid HW desc update failed(%d): tid %d", 1089 __func__, 1090 reo_status->rx_queue_status.header.status, 1091 rx_tid->tid); 1092 } 1093 } 1094 1095 /* 1096 * dp_find_peer_by_addr - find peer instance by mac address 1097 * @dev: physical device instance 1098 * @peer_mac_addr: peer mac address 1099 * @local_id: local id for the peer 1100 * 1101 * Return: peer instance pointer 1102 */ 1103 void *dp_find_peer_by_addr(struct cdp_pdev *dev, uint8_t *peer_mac_addr, 1104 uint8_t *local_id) 1105 { 1106 struct dp_pdev *pdev = (struct dp_pdev *)dev; 1107 struct dp_peer *peer; 1108 1109 peer = dp_peer_find_hash_find(pdev->soc, peer_mac_addr, 0, DP_VDEV_ALL); 1110 1111 if (!peer) 1112 return NULL; 1113 1114 /* Multiple peer ids? How can know peer id? */ 1115 *local_id = peer->local_id; 1116 DP_TRACE(INFO, "%s: peer %pK id %d", __func__, peer, *local_id); 1117 1118 /* ref_cnt is incremented inside dp_peer_find_hash_find(). 1119 * Decrement it here. 1120 */ 1121 qdf_atomic_dec(&peer->ref_cnt); 1122 1123 return peer; 1124 } 1125 1126 /* 1127 * dp_rx_tid_update_wifi3() – Update receive TID state 1128 * @peer: Datapath peer handle 1129 * @tid: TID 1130 * @ba_window_size: BlockAck window size 1131 * @start_seq: Starting sequence number 1132 * 1133 * Return: 0 on success, error code on failure 1134 */ 1135 static int dp_rx_tid_update_wifi3(struct dp_peer *peer, int tid, uint32_t 1136 ba_window_size, uint32_t start_seq) 1137 { 1138 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid]; 1139 struct dp_soc *soc = peer->vdev->pdev->soc; 1140 struct hal_reo_cmd_params params; 1141 1142 qdf_mem_zero(¶ms, sizeof(params)); 1143 1144 params.std.need_status = 1; 1145 params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff; 1146 params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32; 1147 params.u.upd_queue_params.update_ba_window_size = 1; 1148 params.u.upd_queue_params.ba_window_size = ba_window_size; 1149 1150 if (start_seq < IEEE80211_SEQ_MAX) { 1151 params.u.upd_queue_params.update_ssn = 1; 1152 params.u.upd_queue_params.ssn = start_seq; 1153 } 1154 1155 dp_set_ssn_valid_flag(¶ms, 0); 1156 1157 dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, ¶ms, dp_rx_tid_update_cb, rx_tid); 1158 1159 rx_tid->ba_win_size = ba_window_size; 1160 if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) { 1161 soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup( 1162 peer->vdev->pdev->ctrl_pdev, 1163 peer->vdev->vdev_id, peer->mac_addr.raw, 1164 rx_tid->hw_qdesc_paddr, tid, tid, 1, ba_window_size); 1165 1166 } 1167 return 0; 1168 } 1169 1170 /* 1171 * dp_reo_desc_free() - Callback free reo descriptor memory after 1172 * HW cache flush 1173 * 1174 * @soc: DP SOC handle 1175 * @cb_ctxt: Callback context 1176 * @reo_status: REO command status 1177 */ 1178 static void dp_reo_desc_free(struct dp_soc *soc, void *cb_ctxt, 1179 union hal_reo_status *reo_status) 1180 { 1181 struct reo_desc_list_node *freedesc = 1182 (struct reo_desc_list_node *)cb_ctxt; 1183 struct dp_rx_tid *rx_tid = &freedesc->rx_tid; 1184 1185 if ((reo_status->fl_cache_status.header.status != 1186 HAL_REO_CMD_SUCCESS) && 1187 (reo_status->fl_cache_status.header.status != 1188 HAL_REO_CMD_DRAIN)) { 1189 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1190 "%s: Rx tid HW desc flush failed(%d): tid %d", 1191 __func__, 1192 reo_status->rx_queue_status.header.status, 1193 freedesc->rx_tid.tid); 1194 } 1195 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 1196 "%s: hw_qdesc_paddr: %pK, tid:%d", __func__, 1197 (void *)(rx_tid->hw_qdesc_paddr), rx_tid->tid); 1198 qdf_mem_unmap_nbytes_single(soc->osdev, 1199 rx_tid->hw_qdesc_paddr, 1200 QDF_DMA_BIDIRECTIONAL, 1201 rx_tid->hw_qdesc_alloc_size); 1202 qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned); 1203 qdf_mem_free(freedesc); 1204 } 1205 1206 #if defined(QCA_WIFI_QCA8074) && defined(BUILD_X86) 1207 /* Hawkeye emulation requires bus address to be >= 0x50000000 */ 1208 static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr) 1209 { 1210 if (dma_addr < 0x50000000) 1211 return QDF_STATUS_E_FAILURE; 1212 else 1213 return QDF_STATUS_SUCCESS; 1214 } 1215 #else 1216 static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr) 1217 { 1218 return QDF_STATUS_SUCCESS; 1219 } 1220 #endif 1221 1222 1223 /* 1224 * dp_rx_tid_setup_wifi3() – Setup receive TID state 1225 * @peer: Datapath peer handle 1226 * @tid: TID 1227 * @ba_window_size: BlockAck window size 1228 * @start_seq: Starting sequence number 1229 * 1230 * Return: 0 on success, error code on failure 1231 */ 1232 int dp_rx_tid_setup_wifi3(struct dp_peer *peer, int tid, 1233 uint32_t ba_window_size, uint32_t start_seq) 1234 { 1235 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid]; 1236 struct dp_vdev *vdev = peer->vdev; 1237 struct dp_soc *soc = vdev->pdev->soc; 1238 uint32_t hw_qdesc_size; 1239 uint32_t hw_qdesc_align; 1240 int hal_pn_type; 1241 void *hw_qdesc_vaddr; 1242 uint32_t alloc_tries = 0; 1243 1244 if (peer->delete_in_progress) 1245 return QDF_STATUS_E_FAILURE; 1246 1247 rx_tid->ba_win_size = ba_window_size; 1248 if (rx_tid->hw_qdesc_vaddr_unaligned != NULL) 1249 return dp_rx_tid_update_wifi3(peer, tid, ba_window_size, 1250 start_seq); 1251 rx_tid->delba_tx_status = 0; 1252 rx_tid->ppdu_id_2k = 0; 1253 rx_tid->num_of_addba_req = 0; 1254 rx_tid->num_of_delba_req = 0; 1255 rx_tid->num_of_addba_resp = 0; 1256 rx_tid->num_addba_rsp_failed = 0; 1257 rx_tid->num_addba_rsp_success = 0; 1258 rx_tid->delba_tx_success_cnt = 0; 1259 rx_tid->delba_tx_fail_cnt = 0; 1260 rx_tid->statuscode = 0; 1261 #ifdef notyet 1262 hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc, ba_window_size); 1263 #else 1264 /* TODO: Allocating HW queue descriptors based on max BA window size 1265 * for all QOS TIDs so that same descriptor can be used later when 1266 * ADDBA request is recevied. This should be changed to allocate HW 1267 * queue descriptors based on BA window size being negotiated (0 for 1268 * non BA cases), and reallocate when BA window size changes and also 1269 * send WMI message to FW to change the REO queue descriptor in Rx 1270 * peer entry as part of dp_rx_tid_update. 1271 */ 1272 if (tid != DP_NON_QOS_TID) 1273 hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc, 1274 HAL_RX_MAX_BA_WINDOW); 1275 else 1276 hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc, 1277 ba_window_size); 1278 #endif 1279 1280 hw_qdesc_align = hal_get_reo_qdesc_align(soc->hal_soc); 1281 /* To avoid unnecessary extra allocation for alignment, try allocating 1282 * exact size and see if we already have aligned address. 1283 */ 1284 rx_tid->hw_qdesc_alloc_size = hw_qdesc_size; 1285 1286 try_desc_alloc: 1287 rx_tid->hw_qdesc_vaddr_unaligned = 1288 qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size); 1289 1290 if (!rx_tid->hw_qdesc_vaddr_unaligned) { 1291 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1292 "%s: Rx tid HW desc alloc failed: tid %d", 1293 __func__, tid); 1294 return QDF_STATUS_E_NOMEM; 1295 } 1296 1297 if ((unsigned long)(rx_tid->hw_qdesc_vaddr_unaligned) % 1298 hw_qdesc_align) { 1299 /* Address allocated above is not alinged. Allocate extra 1300 * memory for alignment 1301 */ 1302 qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned); 1303 rx_tid->hw_qdesc_vaddr_unaligned = 1304 qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size + 1305 hw_qdesc_align - 1); 1306 1307 if (!rx_tid->hw_qdesc_vaddr_unaligned) { 1308 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1309 "%s: Rx tid HW desc alloc failed: tid %d", 1310 __func__, tid); 1311 return QDF_STATUS_E_NOMEM; 1312 } 1313 1314 hw_qdesc_vaddr = (void *)qdf_align((unsigned long) 1315 rx_tid->hw_qdesc_vaddr_unaligned, 1316 hw_qdesc_align); 1317 1318 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 1319 "%s: Total Size %d Aligned Addr %pK", 1320 __func__, rx_tid->hw_qdesc_alloc_size, 1321 hw_qdesc_vaddr); 1322 1323 } else { 1324 hw_qdesc_vaddr = rx_tid->hw_qdesc_vaddr_unaligned; 1325 } 1326 1327 /* TODO: Ensure that sec_type is set before ADDBA is received. 1328 * Currently this is set based on htt indication 1329 * HTT_T2H_MSG_TYPE_SEC_IND from target 1330 */ 1331 switch (peer->security[dp_sec_ucast].sec_type) { 1332 case cdp_sec_type_tkip_nomic: 1333 case cdp_sec_type_aes_ccmp: 1334 case cdp_sec_type_aes_ccmp_256: 1335 case cdp_sec_type_aes_gcmp: 1336 case cdp_sec_type_aes_gcmp_256: 1337 hal_pn_type = HAL_PN_WPA; 1338 break; 1339 case cdp_sec_type_wapi: 1340 if (vdev->opmode == wlan_op_mode_ap) 1341 hal_pn_type = HAL_PN_WAPI_EVEN; 1342 else 1343 hal_pn_type = HAL_PN_WAPI_UNEVEN; 1344 break; 1345 default: 1346 hal_pn_type = HAL_PN_NONE; 1347 break; 1348 } 1349 1350 hal_reo_qdesc_setup(soc->hal_soc, tid, ba_window_size, start_seq, 1351 hw_qdesc_vaddr, rx_tid->hw_qdesc_paddr, hal_pn_type); 1352 1353 qdf_mem_map_nbytes_single(soc->osdev, hw_qdesc_vaddr, 1354 QDF_DMA_BIDIRECTIONAL, rx_tid->hw_qdesc_alloc_size, 1355 &(rx_tid->hw_qdesc_paddr)); 1356 1357 if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) != 1358 QDF_STATUS_SUCCESS) { 1359 if (alloc_tries++ < 10) 1360 goto try_desc_alloc; 1361 else { 1362 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1363 "%s: Rx tid HW desc alloc failed (lowmem): tid %d", 1364 __func__, tid); 1365 return QDF_STATUS_E_NOMEM; 1366 } 1367 } 1368 1369 if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) { 1370 soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup( 1371 vdev->pdev->ctrl_pdev, 1372 peer->vdev->vdev_id, peer->mac_addr.raw, 1373 rx_tid->hw_qdesc_paddr, tid, tid, 1, ba_window_size); 1374 1375 } 1376 return 0; 1377 } 1378 1379 /* 1380 * dp_rx_tid_delete_cb() - Callback to flush reo descriptor HW cache 1381 * after deleting the entries (ie., setting valid=0) 1382 * 1383 * @soc: DP SOC handle 1384 * @cb_ctxt: Callback context 1385 * @reo_status: REO command status 1386 */ 1387 static void dp_rx_tid_delete_cb(struct dp_soc *soc, void *cb_ctxt, 1388 union hal_reo_status *reo_status) 1389 { 1390 struct reo_desc_list_node *freedesc = 1391 (struct reo_desc_list_node *)cb_ctxt; 1392 uint32_t list_size; 1393 struct reo_desc_list_node *desc; 1394 unsigned long curr_ts = qdf_get_system_timestamp(); 1395 uint32_t desc_size, tot_desc_size; 1396 struct hal_reo_cmd_params params; 1397 1398 if (reo_status->rx_queue_status.header.status == HAL_REO_CMD_DRAIN) { 1399 qdf_mem_zero(reo_status, sizeof(*reo_status)); 1400 reo_status->fl_cache_status.header.status = HAL_REO_CMD_DRAIN; 1401 dp_reo_desc_free(soc, (void *)freedesc, reo_status); 1402 return; 1403 } else if (reo_status->rx_queue_status.header.status != 1404 HAL_REO_CMD_SUCCESS) { 1405 /* Should not happen normally. Just print error for now */ 1406 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1407 "%s: Rx tid HW desc deletion failed(%d): tid %d", 1408 __func__, 1409 reo_status->rx_queue_status.header.status, 1410 freedesc->rx_tid.tid); 1411 } 1412 1413 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW, 1414 "%s: rx_tid: %d status: %d", __func__, 1415 freedesc->rx_tid.tid, 1416 reo_status->rx_queue_status.header.status); 1417 1418 qdf_spin_lock_bh(&soc->reo_desc_freelist_lock); 1419 freedesc->free_ts = curr_ts; 1420 qdf_list_insert_back_size(&soc->reo_desc_freelist, 1421 (qdf_list_node_t *)freedesc, &list_size); 1422 1423 while ((qdf_list_peek_front(&soc->reo_desc_freelist, 1424 (qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) && 1425 ((list_size >= REO_DESC_FREELIST_SIZE) || 1426 ((curr_ts - desc->free_ts) > REO_DESC_FREE_DEFER_MS))) { 1427 struct dp_rx_tid *rx_tid; 1428 1429 qdf_list_remove_front(&soc->reo_desc_freelist, 1430 (qdf_list_node_t **)&desc); 1431 list_size--; 1432 rx_tid = &desc->rx_tid; 1433 1434 /* Flush and invalidate REO descriptor from HW cache: Base and 1435 * extension descriptors should be flushed separately */ 1436 tot_desc_size = hal_get_reo_qdesc_size(soc->hal_soc, 1437 rx_tid->ba_win_size); 1438 desc_size = hal_get_reo_qdesc_size(soc->hal_soc, 0); 1439 1440 /* Flush reo extension descriptors */ 1441 while ((tot_desc_size -= desc_size) > 0) { 1442 qdf_mem_zero(¶ms, sizeof(params)); 1443 params.std.addr_lo = 1444 ((uint64_t)(rx_tid->hw_qdesc_paddr) + 1445 tot_desc_size) & 0xffffffff; 1446 params.std.addr_hi = 1447 (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32; 1448 1449 if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc, 1450 CMD_FLUSH_CACHE, 1451 ¶ms, 1452 NULL, 1453 NULL)) { 1454 QDF_TRACE(QDF_MODULE_ID_DP, 1455 QDF_TRACE_LEVEL_ERROR, 1456 "%s: fail to send CMD_CACHE_FLUSH:" 1457 "tid %d desc %pK", __func__, 1458 rx_tid->tid, 1459 (void *)(rx_tid->hw_qdesc_paddr)); 1460 } 1461 } 1462 1463 /* Flush base descriptor */ 1464 qdf_mem_zero(¶ms, sizeof(params)); 1465 params.std.need_status = 1; 1466 params.std.addr_lo = 1467 (uint64_t)(rx_tid->hw_qdesc_paddr) & 0xffffffff; 1468 params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32; 1469 1470 if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc, 1471 CMD_FLUSH_CACHE, 1472 ¶ms, 1473 dp_reo_desc_free, 1474 (void *)desc)) { 1475 union hal_reo_status reo_status; 1476 /* 1477 * If dp_reo_send_cmd return failure, related TID queue desc 1478 * should be unmapped. Also locally reo_desc, together with 1479 * TID queue desc also need to be freed accordingly. 1480 * 1481 * Here invoke desc_free function directly to do clean up. 1482 */ 1483 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1484 "%s: fail to send REO cmd to flush cache: tid %d", 1485 __func__, rx_tid->tid); 1486 qdf_mem_zero(&reo_status, sizeof(reo_status)); 1487 reo_status.fl_cache_status.header.status = 0; 1488 dp_reo_desc_free(soc, (void *)desc, &reo_status); 1489 } 1490 } 1491 qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock); 1492 } 1493 1494 /* 1495 * dp_rx_tid_delete_wifi3() – Delete receive TID queue 1496 * @peer: Datapath peer handle 1497 * @tid: TID 1498 * 1499 * Return: 0 on success, error code on failure 1500 */ 1501 static int dp_rx_tid_delete_wifi3(struct dp_peer *peer, int tid) 1502 { 1503 struct dp_rx_tid *rx_tid = &(peer->rx_tid[tid]); 1504 struct dp_soc *soc = peer->vdev->pdev->soc; 1505 struct hal_reo_cmd_params params; 1506 struct reo_desc_list_node *freedesc = 1507 qdf_mem_malloc(sizeof(*freedesc)); 1508 1509 if (!freedesc) { 1510 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1511 "%s: malloc failed for freedesc: tid %d", 1512 __func__, tid); 1513 return -ENOMEM; 1514 } 1515 1516 freedesc->rx_tid = *rx_tid; 1517 1518 qdf_mem_zero(¶ms, sizeof(params)); 1519 1520 params.std.need_status = 0; 1521 params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff; 1522 params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32; 1523 params.u.upd_queue_params.update_vld = 1; 1524 params.u.upd_queue_params.vld = 0; 1525 1526 dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, ¶ms, 1527 dp_rx_tid_delete_cb, (void *)freedesc); 1528 1529 rx_tid->hw_qdesc_vaddr_unaligned = NULL; 1530 rx_tid->hw_qdesc_alloc_size = 0; 1531 rx_tid->hw_qdesc_paddr = 0; 1532 1533 return 0; 1534 } 1535 1536 #ifdef DP_LFR 1537 static void dp_peer_setup_remaining_tids(struct dp_peer *peer) 1538 { 1539 int tid; 1540 1541 for (tid = 1; tid < DP_MAX_TIDS-1; tid++) { 1542 dp_rx_tid_setup_wifi3(peer, tid, 1, 0); 1543 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 1544 "Setting up TID %d for peer %pK peer->local_id %d", 1545 tid, peer, peer->local_id); 1546 } 1547 } 1548 #else 1549 static void dp_peer_setup_remaining_tids(struct dp_peer *peer) {}; 1550 #endif 1551 /* 1552 * dp_peer_rx_init() – Initialize receive TID state 1553 * @pdev: Datapath pdev 1554 * @peer: Datapath peer 1555 * 1556 */ 1557 void dp_peer_rx_init(struct dp_pdev *pdev, struct dp_peer *peer) 1558 { 1559 int tid; 1560 struct dp_rx_tid *rx_tid; 1561 for (tid = 0; tid < DP_MAX_TIDS; tid++) { 1562 rx_tid = &peer->rx_tid[tid]; 1563 rx_tid->array = &rx_tid->base; 1564 rx_tid->base.head = rx_tid->base.tail = NULL; 1565 rx_tid->tid = tid; 1566 rx_tid->defrag_timeout_ms = 0; 1567 rx_tid->ba_win_size = 0; 1568 rx_tid->ba_status = DP_RX_BA_INACTIVE; 1569 1570 rx_tid->defrag_waitlist_elem.tqe_next = NULL; 1571 rx_tid->defrag_waitlist_elem.tqe_prev = NULL; 1572 1573 #ifdef notyet /* TODO: See if this is required for exception handling */ 1574 /* invalid sequence number */ 1575 peer->tids_last_seq[tid] = 0xffff; 1576 #endif 1577 } 1578 1579 /* Setup default (non-qos) rx tid queue */ 1580 dp_rx_tid_setup_wifi3(peer, DP_NON_QOS_TID, 1, 0); 1581 1582 /* Setup rx tid queue for TID 0. 1583 * Other queues will be setup on receiving first packet, which will cause 1584 * NULL REO queue error 1585 */ 1586 dp_rx_tid_setup_wifi3(peer, 0, 1, 0); 1587 1588 /* 1589 * Setup the rest of TID's to handle LFR 1590 */ 1591 dp_peer_setup_remaining_tids(peer); 1592 1593 /* 1594 * Set security defaults: no PN check, no security. The target may 1595 * send a HTT SEC_IND message to overwrite these defaults. 1596 */ 1597 peer->security[dp_sec_ucast].sec_type = 1598 peer->security[dp_sec_mcast].sec_type = cdp_sec_type_none; 1599 } 1600 1601 /* 1602 * dp_peer_rx_cleanup() – Cleanup receive TID state 1603 * @vdev: Datapath vdev 1604 * @peer: Datapath peer 1605 * 1606 */ 1607 void dp_peer_rx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer) 1608 { 1609 int tid; 1610 uint32_t tid_delete_mask = 0; 1611 for (tid = 0; tid < DP_MAX_TIDS; tid++) { 1612 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid]; 1613 1614 qdf_spin_lock_bh(&rx_tid->tid_lock); 1615 if (peer->rx_tid[tid].hw_qdesc_vaddr_unaligned != NULL) { 1616 dp_rx_tid_delete_wifi3(peer, tid); 1617 1618 /* Cleanup defrag related resource */ 1619 dp_rx_defrag_waitlist_remove(peer, tid); 1620 dp_rx_reorder_flush_frag(peer, tid); 1621 1622 tid_delete_mask |= (1 << tid); 1623 } 1624 qdf_spin_unlock_bh(&rx_tid->tid_lock); 1625 } 1626 #ifdef notyet /* See if FW can remove queues as part of peer cleanup */ 1627 if (soc->ol_ops->peer_rx_reorder_queue_remove) { 1628 soc->ol_ops->peer_rx_reorder_queue_remove(vdev->pdev->ctrl_pdev, 1629 peer->vdev->vdev_id, peer->mac_addr.raw, 1630 tid_delete_mask); 1631 } 1632 #endif 1633 for (tid = 0; tid < DP_MAX_TIDS; tid++) 1634 qdf_spinlock_destroy(&peer->rx_tid[tid].tid_lock); 1635 } 1636 1637 /* 1638 * dp_peer_cleanup() – Cleanup peer information 1639 * @vdev: Datapath vdev 1640 * @peer: Datapath peer 1641 * 1642 */ 1643 void dp_peer_cleanup(struct dp_vdev *vdev, struct dp_peer *peer) 1644 { 1645 peer->last_assoc_rcvd = 0; 1646 peer->last_disassoc_rcvd = 0; 1647 peer->last_deauth_rcvd = 0; 1648 1649 /* cleanup the Rx reorder queues for this peer */ 1650 dp_peer_rx_cleanup(vdev, peer); 1651 } 1652 1653 /* 1654 * dp_rx_addba_resp_tx_completion_wifi3() – Update Rx Tid State 1655 * 1656 * @peer: Datapath peer handle 1657 * @tid: TID number 1658 * @status: tx completion status 1659 * Return: 0 on success, error code on failure 1660 */ 1661 int dp_addba_resp_tx_completion_wifi3(void *peer_handle, 1662 uint8_t tid, int status) 1663 { 1664 struct dp_peer *peer = (struct dp_peer *)peer_handle; 1665 struct dp_rx_tid *rx_tid = NULL; 1666 1667 if (!peer || peer->delete_in_progress) { 1668 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 1669 "%s: Peer is NULL!\n", __func__); 1670 return QDF_STATUS_E_FAILURE; 1671 } 1672 rx_tid = &peer->rx_tid[tid]; 1673 qdf_spin_lock_bh(&rx_tid->tid_lock); 1674 if (status) { 1675 rx_tid->num_addba_rsp_failed++; 1676 dp_rx_tid_update_wifi3(peer, tid, 1, 0); 1677 rx_tid->ba_status = DP_RX_BA_INACTIVE; 1678 qdf_spin_unlock_bh(&rx_tid->tid_lock); 1679 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1680 "%s: Rx Tid- %d addba rsp tx completion failed!", 1681 __func__, tid); 1682 return QDF_STATUS_SUCCESS; 1683 } 1684 1685 rx_tid->num_addba_rsp_success++; 1686 if (rx_tid->ba_status == DP_RX_BA_INACTIVE) { 1687 qdf_spin_unlock_bh(&rx_tid->tid_lock); 1688 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1689 "%s: Rx Tid- %d hw qdesc is not in IN_PROGRESS", 1690 __func__, tid); 1691 return QDF_STATUS_E_FAILURE; 1692 } 1693 1694 rx_tid->ba_status = DP_RX_BA_ACTIVE; 1695 qdf_spin_unlock_bh(&rx_tid->tid_lock); 1696 return QDF_STATUS_SUCCESS; 1697 } 1698 1699 /* 1700 * dp_rx_addba_responsesetup_wifi3() – Process ADDBA request from peer 1701 * 1702 * @peer: Datapath peer handle 1703 * @tid: TID number 1704 * @dialogtoken: output dialogtoken 1705 * @statuscode: output dialogtoken 1706 * @buffersize: Output BA window size 1707 * @batimeout: Output BA timeout 1708 */ 1709 void dp_addba_responsesetup_wifi3(void *peer_handle, uint8_t tid, 1710 uint8_t *dialogtoken, uint16_t *statuscode, 1711 uint16_t *buffersize, uint16_t *batimeout) 1712 { 1713 struct dp_peer *peer = (struct dp_peer *)peer_handle; 1714 struct dp_rx_tid *rx_tid = NULL; 1715 1716 if (!peer || peer->delete_in_progress) { 1717 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 1718 "%s: Peer is NULL!\n", __func__); 1719 return; 1720 } 1721 rx_tid = &peer->rx_tid[tid]; 1722 qdf_spin_lock_bh(&rx_tid->tid_lock); 1723 rx_tid->num_of_addba_resp++; 1724 /* setup ADDBA response parameters */ 1725 *dialogtoken = rx_tid->dialogtoken; 1726 *statuscode = rx_tid->statuscode; 1727 *buffersize = rx_tid->ba_win_size; 1728 *batimeout = 0; 1729 qdf_spin_unlock_bh(&rx_tid->tid_lock); 1730 } 1731 1732 /* 1733 * dp_addba_requestprocess_wifi3() - Process ADDBA request from peer 1734 * 1735 * @peer: Datapath peer handle 1736 * @dialogtoken: dialogtoken from ADDBA frame 1737 * @tid: TID number 1738 * @batimeout: BA timeout 1739 * @buffersize: BA window size 1740 * @startseqnum: Start seq. number received in BA sequence control 1741 * 1742 * Return: 0 on success, error code on failure 1743 */ 1744 int dp_addba_requestprocess_wifi3(void *peer_handle, 1745 uint8_t dialogtoken, 1746 uint16_t tid, uint16_t batimeout, 1747 uint16_t buffersize, 1748 uint16_t startseqnum) 1749 { 1750 struct dp_peer *peer = (struct dp_peer *)peer_handle; 1751 struct dp_rx_tid *rx_tid = NULL; 1752 1753 if (!peer || peer->delete_in_progress) { 1754 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 1755 "%s: Peer is NULL!\n", __func__); 1756 return QDF_STATUS_E_FAILURE; 1757 } 1758 rx_tid = &peer->rx_tid[tid]; 1759 qdf_spin_lock_bh(&rx_tid->tid_lock); 1760 rx_tid->num_of_addba_req++; 1761 if ((rx_tid->ba_status == DP_RX_BA_ACTIVE && 1762 rx_tid->hw_qdesc_vaddr_unaligned != NULL) || 1763 (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS)) { 1764 dp_rx_tid_update_wifi3(peer, tid, 1, 0); 1765 rx_tid->ba_status = DP_RX_BA_INACTIVE; 1766 qdf_spin_unlock_bh(&rx_tid->tid_lock); 1767 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1768 "%s: Rx Tid- %d hw qdesc is already setup", 1769 __func__, tid); 1770 return QDF_STATUS_E_FAILURE; 1771 } 1772 1773 if (dp_rx_tid_setup_wifi3(peer, tid, buffersize, 0)) { 1774 rx_tid->ba_status = DP_RX_BA_INACTIVE; 1775 qdf_spin_unlock_bh(&rx_tid->tid_lock); 1776 return QDF_STATUS_E_FAILURE; 1777 } 1778 rx_tid->ba_status = DP_RX_BA_IN_PROGRESS; 1779 1780 rx_tid->ba_win_size = buffersize; 1781 rx_tid->dialogtoken = dialogtoken; 1782 rx_tid->startseqnum = startseqnum; 1783 1784 if (rx_tid->userstatuscode != IEEE80211_STATUS_SUCCESS) 1785 rx_tid->statuscode = rx_tid->userstatuscode; 1786 else 1787 rx_tid->statuscode = IEEE80211_STATUS_SUCCESS; 1788 1789 qdf_spin_unlock_bh(&rx_tid->tid_lock); 1790 1791 return QDF_STATUS_SUCCESS; 1792 } 1793 1794 /* 1795 * dp_set_addba_response() – Set a user defined ADDBA response status code 1796 * 1797 * @peer: Datapath peer handle 1798 * @tid: TID number 1799 * @statuscode: response status code to be set 1800 */ 1801 void dp_set_addba_response(void *peer_handle, uint8_t tid, 1802 uint16_t statuscode) 1803 { 1804 struct dp_peer *peer = (struct dp_peer *)peer_handle; 1805 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid]; 1806 1807 qdf_spin_lock_bh(&rx_tid->tid_lock); 1808 rx_tid->userstatuscode = statuscode; 1809 qdf_spin_unlock_bh(&rx_tid->tid_lock); 1810 } 1811 1812 /* 1813 * dp_rx_delba_process_wifi3() – Process DELBA from peer 1814 * @peer: Datapath peer handle 1815 * @tid: TID number 1816 * @reasoncode: Reason code received in DELBA frame 1817 * 1818 * Return: 0 on success, error code on failure 1819 */ 1820 int dp_delba_process_wifi3(void *peer_handle, 1821 int tid, uint16_t reasoncode) 1822 { 1823 struct dp_peer *peer = (struct dp_peer *)peer_handle; 1824 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid]; 1825 1826 qdf_spin_lock_bh(&rx_tid->tid_lock); 1827 if (rx_tid->ba_status == DP_RX_BA_INACTIVE) { 1828 qdf_spin_unlock_bh(&rx_tid->tid_lock); 1829 return QDF_STATUS_E_FAILURE; 1830 } 1831 /* TODO: See if we can delete the existing REO queue descriptor and 1832 * replace with a new one without queue extenstion descript to save 1833 * memory 1834 */ 1835 rx_tid->num_of_delba_req++; 1836 dp_rx_tid_update_wifi3(peer, tid, 1, 0); 1837 1838 rx_tid->ba_status = DP_RX_BA_INACTIVE; 1839 qdf_spin_unlock_bh(&rx_tid->tid_lock); 1840 return 0; 1841 } 1842 1843 /* 1844 * dp_rx_delba_tx_completion_wifi3() – Send Delba Request 1845 * 1846 * @peer: Datapath peer handle 1847 * @tid: TID number 1848 * @status: tx completion status 1849 * Return: 0 on success, error code on failure 1850 */ 1851 1852 int dp_delba_tx_completion_wifi3(void *peer_handle, 1853 uint8_t tid, int status) 1854 { 1855 struct dp_peer *peer = (struct dp_peer *)peer_handle; 1856 struct dp_rx_tid *rx_tid = NULL; 1857 1858 if (!peer || peer->delete_in_progress) { 1859 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 1860 "%s: Peer is NULL!", __func__); 1861 return QDF_STATUS_E_FAILURE; 1862 } 1863 rx_tid = &peer->rx_tid[tid]; 1864 qdf_spin_lock_bh(&rx_tid->tid_lock); 1865 if (status) { 1866 rx_tid->delba_tx_fail_cnt++; 1867 if (rx_tid->delba_tx_retry >= DP_MAX_DELBA_RETRY) { 1868 rx_tid->delba_tx_retry = 0; 1869 rx_tid->delba_tx_status = 0; 1870 qdf_spin_unlock_bh(&rx_tid->tid_lock); 1871 } else { 1872 rx_tid->delba_tx_retry++; 1873 rx_tid->delba_tx_status = 1; 1874 qdf_spin_unlock_bh(&rx_tid->tid_lock); 1875 peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba( 1876 peer->vdev->pdev->ctrl_pdev, peer->ctrl_peer, 1877 peer->mac_addr.raw, tid, peer->vdev->ctrl_vdev); 1878 } 1879 return QDF_STATUS_SUCCESS; 1880 } else { 1881 rx_tid->delba_tx_success_cnt++; 1882 rx_tid->delba_tx_retry = 0; 1883 rx_tid->delba_tx_status = 0; 1884 } 1885 qdf_spin_unlock_bh(&rx_tid->tid_lock); 1886 1887 return QDF_STATUS_SUCCESS; 1888 } 1889 1890 void dp_rx_discard(struct dp_vdev *vdev, struct dp_peer *peer, unsigned tid, 1891 qdf_nbuf_t msdu_list) 1892 { 1893 while (msdu_list) { 1894 qdf_nbuf_t msdu = msdu_list; 1895 1896 msdu_list = qdf_nbuf_next(msdu_list); 1897 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH, 1898 "discard rx %pK from partly-deleted peer %pK " 1899 "(%02x:%02x:%02x:%02x:%02x:%02x)", 1900 msdu, peer, 1901 peer->mac_addr.raw[0], peer->mac_addr.raw[1], 1902 peer->mac_addr.raw[2], peer->mac_addr.raw[3], 1903 peer->mac_addr.raw[4], peer->mac_addr.raw[5]); 1904 qdf_nbuf_free(msdu); 1905 } 1906 } 1907 1908 1909 /** 1910 * dp_set_pn_check_wifi3() - enable PN check in REO for security 1911 * @peer: Datapath peer handle 1912 * @vdev: Datapath vdev 1913 * @pdev - data path device instance 1914 * @sec_type - security type 1915 * @rx_pn - Receive pn starting number 1916 * 1917 */ 1918 1919 void 1920 dp_set_pn_check_wifi3(struct cdp_vdev *vdev_handle, struct cdp_peer *peer_handle, enum cdp_sec_type sec_type, uint32_t *rx_pn) 1921 { 1922 struct dp_peer *peer = (struct dp_peer *)peer_handle; 1923 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle; 1924 struct dp_pdev *pdev; 1925 struct dp_soc *soc; 1926 int i; 1927 uint8_t pn_size; 1928 struct hal_reo_cmd_params params; 1929 1930 /* preconditions */ 1931 qdf_assert(vdev); 1932 1933 pdev = vdev->pdev; 1934 soc = pdev->soc; 1935 1936 1937 qdf_mem_zero(¶ms, sizeof(params)); 1938 1939 params.std.need_status = 1; 1940 params.u.upd_queue_params.update_pn_valid = 1; 1941 params.u.upd_queue_params.update_pn_size = 1; 1942 params.u.upd_queue_params.update_pn = 1; 1943 params.u.upd_queue_params.update_pn_check_needed = 1; 1944 params.u.upd_queue_params.update_svld = 1; 1945 params.u.upd_queue_params.svld = 0; 1946 1947 peer->security[dp_sec_ucast].sec_type = sec_type; 1948 1949 switch (sec_type) { 1950 case cdp_sec_type_tkip_nomic: 1951 case cdp_sec_type_aes_ccmp: 1952 case cdp_sec_type_aes_ccmp_256: 1953 case cdp_sec_type_aes_gcmp: 1954 case cdp_sec_type_aes_gcmp_256: 1955 params.u.upd_queue_params.pn_check_needed = 1; 1956 params.u.upd_queue_params.pn_size = 48; 1957 pn_size = 48; 1958 break; 1959 case cdp_sec_type_wapi: 1960 params.u.upd_queue_params.pn_check_needed = 1; 1961 params.u.upd_queue_params.pn_size = 128; 1962 pn_size = 128; 1963 if (vdev->opmode == wlan_op_mode_ap) { 1964 params.u.upd_queue_params.pn_even = 1; 1965 params.u.upd_queue_params.update_pn_even = 1; 1966 } else { 1967 params.u.upd_queue_params.pn_uneven = 1; 1968 params.u.upd_queue_params.update_pn_uneven = 1; 1969 } 1970 break; 1971 default: 1972 params.u.upd_queue_params.pn_check_needed = 0; 1973 pn_size = 0; 1974 break; 1975 } 1976 1977 1978 for (i = 0; i < DP_MAX_TIDS; i++) { 1979 struct dp_rx_tid *rx_tid = &peer->rx_tid[i]; 1980 qdf_spin_lock_bh(&rx_tid->tid_lock); 1981 if (rx_tid->hw_qdesc_vaddr_unaligned != NULL) { 1982 params.std.addr_lo = 1983 rx_tid->hw_qdesc_paddr & 0xffffffff; 1984 params.std.addr_hi = 1985 (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32; 1986 1987 if (sec_type != cdp_sec_type_wapi) { 1988 params.u.upd_queue_params.update_pn_valid = 0; 1989 } else { 1990 /* 1991 * Setting PN valid bit for WAPI sec_type, 1992 * since WAPI PN has to be started with 1993 * predefined value 1994 */ 1995 params.u.upd_queue_params.update_pn_valid = 1; 1996 params.u.upd_queue_params.pn_31_0 = rx_pn[0]; 1997 params.u.upd_queue_params.pn_63_32 = rx_pn[1]; 1998 params.u.upd_queue_params.pn_95_64 = rx_pn[2]; 1999 params.u.upd_queue_params.pn_127_96 = rx_pn[3]; 2000 } 2001 rx_tid->pn_size = pn_size; 2002 dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, ¶ms, 2003 dp_rx_tid_update_cb, rx_tid); 2004 } else { 2005 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH, 2006 "PN Check not setup for TID :%d ", i); 2007 } 2008 qdf_spin_unlock_bh(&rx_tid->tid_lock); 2009 } 2010 } 2011 2012 2013 void 2014 dp_rx_sec_ind_handler(void *soc_handle, uint16_t peer_id, 2015 enum htt_sec_type sec_type, int is_unicast, u_int32_t *michael_key, 2016 u_int32_t *rx_pn) 2017 { 2018 struct dp_soc *soc = (struct dp_soc *)soc_handle; 2019 struct dp_peer *peer; 2020 int sec_index; 2021 2022 peer = dp_peer_find_by_id(soc, peer_id); 2023 if (!peer) { 2024 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 2025 "Couldn't find peer from ID %d - skipping security inits", 2026 peer_id); 2027 return; 2028 } 2029 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH, 2030 "sec spec for peer %pK (%02x:%02x:%02x:%02x:%02x:%02x): " 2031 "%s key of type %d", 2032 peer, 2033 peer->mac_addr.raw[0], peer->mac_addr.raw[1], 2034 peer->mac_addr.raw[2], peer->mac_addr.raw[3], 2035 peer->mac_addr.raw[4], peer->mac_addr.raw[5], 2036 is_unicast ? "ucast" : "mcast", 2037 sec_type); 2038 sec_index = is_unicast ? dp_sec_ucast : dp_sec_mcast; 2039 peer->security[sec_index].sec_type = sec_type; 2040 #ifdef notyet /* TODO: See if this is required for defrag support */ 2041 /* michael key only valid for TKIP, but for simplicity, 2042 * copy it anyway 2043 */ 2044 qdf_mem_copy( 2045 &peer->security[sec_index].michael_key[0], 2046 michael_key, 2047 sizeof(peer->security[sec_index].michael_key)); 2048 #ifdef BIG_ENDIAN_HOST 2049 OL_IF_SWAPBO(peer->security[sec_index].michael_key[0], 2050 sizeof(peer->security[sec_index].michael_key)); 2051 #endif /* BIG_ENDIAN_HOST */ 2052 #endif 2053 2054 #ifdef notyet /* TODO: Check if this is required for wifi3.0 */ 2055 if (sec_type != htt_sec_type_wapi) { 2056 qdf_mem_set(peer->tids_last_pn_valid, _EXT_TIDS, 0x00); 2057 } else { 2058 for (i = 0; i < DP_MAX_TIDS; i++) { 2059 /* 2060 * Setting PN valid bit for WAPI sec_type, 2061 * since WAPI PN has to be started with predefined value 2062 */ 2063 peer->tids_last_pn_valid[i] = 1; 2064 qdf_mem_copy( 2065 (u_int8_t *) &peer->tids_last_pn[i], 2066 (u_int8_t *) rx_pn, sizeof(union htt_rx_pn_t)); 2067 peer->tids_last_pn[i].pn128[1] = 2068 qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[1]); 2069 peer->tids_last_pn[i].pn128[0] = 2070 qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[0]); 2071 } 2072 } 2073 #endif 2074 /* TODO: Update HW TID queue with PN check parameters (pn type for 2075 * all security types and last pn for WAPI) once REO command API 2076 * is available 2077 */ 2078 } 2079 2080 #ifndef CONFIG_WIN 2081 /** 2082 * dp_register_peer() - Register peer into physical device 2083 * @pdev - data path device instance 2084 * @sta_desc - peer description 2085 * 2086 * Register peer into physical device 2087 * 2088 * Return: QDF_STATUS_SUCCESS registration success 2089 * QDF_STATUS_E_FAULT peer not found 2090 */ 2091 QDF_STATUS dp_register_peer(struct cdp_pdev *pdev_handle, 2092 struct ol_txrx_desc_type *sta_desc) 2093 { 2094 struct dp_peer *peer; 2095 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; 2096 2097 peer = dp_peer_find_by_local_id((struct cdp_pdev *)pdev, 2098 sta_desc->sta_id); 2099 if (!peer) 2100 return QDF_STATUS_E_FAULT; 2101 2102 qdf_spin_lock_bh(&peer->peer_info_lock); 2103 peer->state = OL_TXRX_PEER_STATE_CONN; 2104 qdf_spin_unlock_bh(&peer->peer_info_lock); 2105 2106 return QDF_STATUS_SUCCESS; 2107 } 2108 2109 /** 2110 * dp_clear_peer() - remove peer from physical device 2111 * @pdev - data path device instance 2112 * @sta_id - local peer id 2113 * 2114 * remove peer from physical device 2115 * 2116 * Return: QDF_STATUS_SUCCESS registration success 2117 * QDF_STATUS_E_FAULT peer not found 2118 */ 2119 QDF_STATUS dp_clear_peer(struct cdp_pdev *pdev_handle, uint8_t local_id) 2120 { 2121 struct dp_peer *peer; 2122 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; 2123 2124 peer = dp_peer_find_by_local_id((struct cdp_pdev *)pdev, local_id); 2125 if (!peer) 2126 return QDF_STATUS_E_FAULT; 2127 2128 qdf_spin_lock_bh(&peer->peer_info_lock); 2129 peer->state = OL_TXRX_PEER_STATE_DISC; 2130 qdf_spin_unlock_bh(&peer->peer_info_lock); 2131 2132 return QDF_STATUS_SUCCESS; 2133 } 2134 2135 /** 2136 * dp_find_peer_by_addr_and_vdev() - Find peer by peer mac address within vdev 2137 * @pdev - data path device instance 2138 * @vdev - virtual interface instance 2139 * @peer_addr - peer mac address 2140 * @peer_id - local peer id with target mac address 2141 * 2142 * Find peer by peer mac address within vdev 2143 * 2144 * Return: peer instance void pointer 2145 * NULL cannot find target peer 2146 */ 2147 void *dp_find_peer_by_addr_and_vdev(struct cdp_pdev *pdev_handle, 2148 struct cdp_vdev *vdev_handle, 2149 uint8_t *peer_addr, uint8_t *local_id) 2150 { 2151 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; 2152 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle; 2153 struct dp_peer *peer; 2154 2155 DP_TRACE(INFO, "vdev %pK peer_addr %pK", vdev, peer_addr); 2156 peer = dp_peer_find_hash_find(pdev->soc, peer_addr, 0, 0); 2157 DP_TRACE(INFO, "peer %pK vdev %pK", peer, vdev); 2158 2159 if (!peer) 2160 return NULL; 2161 2162 if (peer->vdev != vdev) { 2163 qdf_atomic_dec(&peer->ref_cnt); 2164 return NULL; 2165 } 2166 2167 *local_id = peer->local_id; 2168 DP_TRACE(INFO, "peer %pK vdev %pK local id %d", peer, vdev, *local_id); 2169 2170 /* ref_cnt is incremented inside dp_peer_find_hash_find(). 2171 * Decrement it here. 2172 */ 2173 qdf_atomic_dec(&peer->ref_cnt); 2174 2175 return peer; 2176 } 2177 2178 /** 2179 * dp_local_peer_id() - Find local peer id within peer instance 2180 * @peer - peer instance 2181 * 2182 * Find local peer id within peer instance 2183 * 2184 * Return: local peer id 2185 */ 2186 uint16_t dp_local_peer_id(void *peer) 2187 { 2188 return ((struct dp_peer *)peer)->local_id; 2189 } 2190 2191 /** 2192 * dp_peer_find_by_local_id() - Find peer by local peer id 2193 * @pdev - data path device instance 2194 * @local_peer_id - local peer id want to find 2195 * 2196 * Find peer by local peer id within physical device 2197 * 2198 * Return: peer instance void pointer 2199 * NULL cannot find target peer 2200 */ 2201 void *dp_peer_find_by_local_id(struct cdp_pdev *pdev_handle, uint8_t local_id) 2202 { 2203 struct dp_peer *peer; 2204 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; 2205 2206 if (local_id >= OL_TXRX_NUM_LOCAL_PEER_IDS) { 2207 QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_DP, 2208 "Incorrect local id %u", local_id); 2209 return NULL; 2210 } 2211 qdf_spin_lock_bh(&pdev->local_peer_ids.lock); 2212 peer = pdev->local_peer_ids.map[local_id]; 2213 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock); 2214 DP_TRACE(DEBUG, "peer %pK local id %d", peer, local_id); 2215 return peer; 2216 } 2217 2218 /** 2219 * dp_peer_state_update() - update peer local state 2220 * @pdev - data path device instance 2221 * @peer_addr - peer mac address 2222 * @state - new peer local state 2223 * 2224 * update peer local state 2225 * 2226 * Return: QDF_STATUS_SUCCESS registration success 2227 */ 2228 QDF_STATUS dp_peer_state_update(struct cdp_pdev *pdev_handle, uint8_t *peer_mac, 2229 enum ol_txrx_peer_state state) 2230 { 2231 struct dp_peer *peer; 2232 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; 2233 2234 peer = dp_peer_find_hash_find(pdev->soc, peer_mac, 0, DP_VDEV_ALL); 2235 if (NULL == peer) { 2236 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 2237 "Failed to find peer for: [%pM]", peer_mac); 2238 return QDF_STATUS_E_FAILURE; 2239 } 2240 peer->state = state; 2241 2242 DP_TRACE(INFO, "peer %pK state %d", peer, peer->state); 2243 /* ref_cnt is incremented inside dp_peer_find_hash_find(). 2244 * Decrement it here. 2245 */ 2246 qdf_atomic_dec(&peer->ref_cnt); 2247 2248 return QDF_STATUS_SUCCESS; 2249 } 2250 2251 /** 2252 * dp_get_vdevid() - Get virtual interface id which peer registered 2253 * @peer - peer instance 2254 * @vdev_id - virtual interface id which peer registered 2255 * 2256 * Get virtual interface id which peer registered 2257 * 2258 * Return: QDF_STATUS_SUCCESS registration success 2259 */ 2260 QDF_STATUS dp_get_vdevid(void *peer_handle, uint8_t *vdev_id) 2261 { 2262 struct dp_peer *peer = peer_handle; 2263 2264 DP_TRACE(INFO, "peer %pK vdev %pK vdev id %d", 2265 peer, peer->vdev, peer->vdev->vdev_id); 2266 *vdev_id = peer->vdev->vdev_id; 2267 return QDF_STATUS_SUCCESS; 2268 } 2269 2270 struct cdp_vdev *dp_get_vdev_by_sta_id(struct cdp_pdev *pdev_handle, 2271 uint8_t sta_id) 2272 { 2273 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; 2274 struct dp_peer *peer = NULL; 2275 2276 if (sta_id >= WLAN_MAX_STA_COUNT) { 2277 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH, 2278 "Invalid sta id passed"); 2279 return NULL; 2280 } 2281 2282 if (!pdev) { 2283 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH, 2284 "PDEV not found for sta_id [%d]", sta_id); 2285 return NULL; 2286 } 2287 2288 peer = dp_peer_find_by_local_id((struct cdp_pdev *)pdev, sta_id); 2289 if (!peer) { 2290 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH, 2291 "PEER [%d] not found", sta_id); 2292 return NULL; 2293 } 2294 2295 return (struct cdp_vdev *)peer->vdev; 2296 } 2297 2298 /** 2299 * dp_get_vdev_for_peer() - Get virtual interface instance which peer belongs 2300 * @peer - peer instance 2301 * 2302 * Get virtual interface instance which peer belongs 2303 * 2304 * Return: virtual interface instance pointer 2305 * NULL in case cannot find 2306 */ 2307 struct cdp_vdev *dp_get_vdev_for_peer(void *peer_handle) 2308 { 2309 struct dp_peer *peer = peer_handle; 2310 2311 DP_TRACE(INFO, "peer %pK vdev %pK", peer, peer->vdev); 2312 return (struct cdp_vdev *)peer->vdev; 2313 } 2314 2315 /** 2316 * dp_peer_get_peer_mac_addr() - Get peer mac address 2317 * @peer - peer instance 2318 * 2319 * Get peer mac address 2320 * 2321 * Return: peer mac address pointer 2322 * NULL in case cannot find 2323 */ 2324 uint8_t *dp_peer_get_peer_mac_addr(void *peer_handle) 2325 { 2326 struct dp_peer *peer = peer_handle; 2327 uint8_t *mac; 2328 2329 mac = peer->mac_addr.raw; 2330 DP_TRACE(INFO, "peer %pK mac 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x", 2331 peer, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); 2332 return peer->mac_addr.raw; 2333 } 2334 2335 /** 2336 * dp_get_peer_state() - Get local peer state 2337 * @peer - peer instance 2338 * 2339 * Get local peer state 2340 * 2341 * Return: peer status 2342 */ 2343 int dp_get_peer_state(void *peer_handle) 2344 { 2345 struct dp_peer *peer = peer_handle; 2346 2347 DP_TRACE(DEBUG, "peer %pK stats %d", peer, peer->state); 2348 return peer->state; 2349 } 2350 2351 /** 2352 * dp_get_last_mgmt_timestamp() - get timestamp of last mgmt frame 2353 * @pdev: pdev handle 2354 * @ppeer_addr: peer mac addr 2355 * @subtype: management frame type 2356 * @timestamp: last timestamp 2357 * 2358 * Return: true if timestamp is retrieved for valid peer else false 2359 */ 2360 bool dp_get_last_mgmt_timestamp(struct cdp_pdev *ppdev, u8 *peer_addr, 2361 u8 subtype, qdf_time_t *timestamp) 2362 { 2363 union dp_align_mac_addr local_mac_addr_aligned, *mac_addr; 2364 unsigned int index; 2365 struct dp_peer *peer; 2366 struct dp_soc *soc; 2367 2368 bool ret = false; 2369 struct dp_pdev *pdev = (struct dp_pdev *)ppdev; 2370 2371 soc = pdev->soc; 2372 qdf_mem_copy( 2373 &local_mac_addr_aligned.raw[0], 2374 peer_addr, DP_MAC_ADDR_LEN); 2375 mac_addr = &local_mac_addr_aligned; 2376 2377 index = dp_peer_find_hash_index(soc, mac_addr); 2378 2379 qdf_spin_lock_bh(&soc->peer_ref_mutex); 2380 TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) { 2381 #if ATH_SUPPORT_WRAP 2382 /* ProxySTA may have multiple BSS peer with same MAC address, 2383 * modified find will take care of finding the correct BSS peer. 2384 */ 2385 if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 && 2386 (peer->vdev->vdev_id == DP_VDEV_ALL)) { 2387 #else 2388 if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0) { 2389 #endif 2390 /* found it */ 2391 switch (subtype) { 2392 case IEEE80211_FC0_SUBTYPE_ASSOC_REQ: 2393 *timestamp = peer->last_assoc_rcvd; 2394 ret = true; 2395 break; 2396 case IEEE80211_FC0_SUBTYPE_DISASSOC: 2397 case IEEE80211_FC0_SUBTYPE_DEAUTH: 2398 *timestamp = peer->last_disassoc_rcvd; 2399 ret = true; 2400 break; 2401 default: 2402 break; 2403 } 2404 qdf_spin_unlock_bh(&soc->peer_ref_mutex); 2405 return ret; 2406 } 2407 } 2408 qdf_spin_unlock_bh(&soc->peer_ref_mutex); 2409 return false; /*failure*/ 2410 } 2411 2412 /** 2413 * dp_update_last_mgmt_timestamp() - set timestamp of last mgmt frame 2414 * @pdev: pdev handle 2415 * @ppeer_addr: peer mac addr 2416 * @timestamp: time to be set 2417 * @subtype: management frame type 2418 * 2419 * Return: true if timestamp is updated for valid peer else false 2420 */ 2421 2422 bool dp_update_last_mgmt_timestamp(struct cdp_pdev *ppdev, u8 *peer_addr, 2423 qdf_time_t timestamp, u8 subtype) 2424 { 2425 union dp_align_mac_addr local_mac_addr_aligned, *mac_addr; 2426 unsigned int index; 2427 struct dp_peer *peer; 2428 struct dp_soc *soc; 2429 2430 bool ret = false; 2431 struct dp_pdev *pdev = (struct dp_pdev *)ppdev; 2432 2433 soc = pdev->soc; 2434 qdf_mem_copy(&local_mac_addr_aligned.raw[0], 2435 peer_addr, DP_MAC_ADDR_LEN); 2436 mac_addr = &local_mac_addr_aligned; 2437 2438 index = dp_peer_find_hash_index(soc, mac_addr); 2439 2440 qdf_spin_lock_bh(&soc->peer_ref_mutex); 2441 TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) { 2442 #if ATH_SUPPORT_WRAP 2443 /* ProxySTA may have multiple BSS peer with same MAC address, 2444 * modified find will take care of finding the correct BSS peer. 2445 */ 2446 if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 && 2447 (peer->vdev->vdev_id == DP_VDEV_ALL)) { 2448 #else 2449 if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0) { 2450 #endif 2451 /* found it */ 2452 switch (subtype) { 2453 case IEEE80211_FC0_SUBTYPE_ASSOC_REQ: 2454 peer->last_assoc_rcvd = timestamp; 2455 ret = true; 2456 break; 2457 case IEEE80211_FC0_SUBTYPE_DISASSOC: 2458 case IEEE80211_FC0_SUBTYPE_DEAUTH: 2459 peer->last_disassoc_rcvd = timestamp; 2460 ret = true; 2461 break; 2462 default: 2463 break; 2464 } 2465 qdf_spin_unlock_bh(&soc->peer_ref_mutex); 2466 return ret; 2467 } 2468 } 2469 qdf_spin_unlock_bh(&soc->peer_ref_mutex); 2470 return false; /*failure*/ 2471 } 2472 2473 /** 2474 * dp_local_peer_id_pool_init() - local peer id pool alloc for physical device 2475 * @pdev - data path device instance 2476 * 2477 * local peer id pool alloc for physical device 2478 * 2479 * Return: none 2480 */ 2481 void dp_local_peer_id_pool_init(struct dp_pdev *pdev) 2482 { 2483 int i; 2484 2485 /* point the freelist to the first ID */ 2486 pdev->local_peer_ids.freelist = 0; 2487 2488 /* link each ID to the next one */ 2489 for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) { 2490 pdev->local_peer_ids.pool[i] = i + 1; 2491 pdev->local_peer_ids.map[i] = NULL; 2492 } 2493 2494 /* link the last ID to itself, to mark the end of the list */ 2495 i = OL_TXRX_NUM_LOCAL_PEER_IDS; 2496 pdev->local_peer_ids.pool[i] = i; 2497 2498 qdf_spinlock_create(&pdev->local_peer_ids.lock); 2499 DP_TRACE(INFO, "Peer pool init"); 2500 } 2501 2502 /** 2503 * dp_local_peer_id_alloc() - allocate local peer id 2504 * @pdev - data path device instance 2505 * @peer - new peer instance 2506 * 2507 * allocate local peer id 2508 * 2509 * Return: none 2510 */ 2511 void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer) 2512 { 2513 int i; 2514 2515 qdf_spin_lock_bh(&pdev->local_peer_ids.lock); 2516 i = pdev->local_peer_ids.freelist; 2517 if (pdev->local_peer_ids.pool[i] == i) { 2518 /* the list is empty, except for the list-end marker */ 2519 peer->local_id = OL_TXRX_INVALID_LOCAL_PEER_ID; 2520 } else { 2521 /* take the head ID and advance the freelist */ 2522 peer->local_id = i; 2523 pdev->local_peer_ids.freelist = pdev->local_peer_ids.pool[i]; 2524 pdev->local_peer_ids.map[i] = peer; 2525 } 2526 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock); 2527 DP_TRACE(INFO, "peer %pK, local id %d", peer, peer->local_id); 2528 } 2529 2530 /** 2531 * dp_local_peer_id_free() - remove local peer id 2532 * @pdev - data path device instance 2533 * @peer - peer instance should be removed 2534 * 2535 * remove local peer id 2536 * 2537 * Return: none 2538 */ 2539 void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer) 2540 { 2541 int i = peer->local_id; 2542 if ((i == OL_TXRX_INVALID_LOCAL_PEER_ID) || 2543 (i >= OL_TXRX_NUM_LOCAL_PEER_IDS)) { 2544 return; 2545 } 2546 2547 /* put this ID on the head of the freelist */ 2548 qdf_spin_lock_bh(&pdev->local_peer_ids.lock); 2549 pdev->local_peer_ids.pool[i] = pdev->local_peer_ids.freelist; 2550 pdev->local_peer_ids.freelist = i; 2551 pdev->local_peer_ids.map[i] = NULL; 2552 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock); 2553 } 2554 #endif 2555 2556 /** 2557 * dp_get_peer_mac_addr_frm_id(): get mac address of the peer 2558 * @soc_handle: DP SOC handle 2559 * @peer_id:peer_id of the peer 2560 * 2561 * return: vdev_id of the vap 2562 */ 2563 uint8_t dp_get_peer_mac_addr_frm_id(struct cdp_soc_t *soc_handle, 2564 uint16_t peer_id, uint8_t *peer_mac) 2565 { 2566 struct dp_soc *soc = (struct dp_soc *)soc_handle; 2567 struct dp_peer *peer; 2568 2569 peer = dp_peer_find_by_id(soc, peer_id); 2570 2571 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 2572 "soc %pK peer_id %d", soc, peer_id); 2573 2574 if (!peer) { 2575 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 2576 "peer not found "); 2577 return CDP_INVALID_VDEV_ID; 2578 } 2579 2580 qdf_mem_copy(peer_mac, peer->mac_addr.raw, 6); 2581 return peer->vdev->vdev_id; 2582 } 2583 2584 /** 2585 * dp_peer_rxtid_stats: Retried Rx TID (REO queue) stats from HW 2586 * @peer: DP peer handle 2587 * @dp_stats_cmd_cb: REO command callback function 2588 * @cb_ctxt: Callback context 2589 * 2590 * Return: none 2591 */ 2592 void dp_peer_rxtid_stats(struct dp_peer *peer, void (*dp_stats_cmd_cb), 2593 void *cb_ctxt) 2594 { 2595 struct dp_soc *soc = peer->vdev->pdev->soc; 2596 struct hal_reo_cmd_params params; 2597 int i; 2598 2599 if (!dp_stats_cmd_cb) 2600 return; 2601 2602 qdf_mem_zero(¶ms, sizeof(params)); 2603 for (i = 0; i < DP_MAX_TIDS; i++) { 2604 struct dp_rx_tid *rx_tid = &peer->rx_tid[i]; 2605 if (rx_tid->hw_qdesc_vaddr_unaligned != NULL) { 2606 params.std.need_status = 1; 2607 params.std.addr_lo = 2608 rx_tid->hw_qdesc_paddr & 0xffffffff; 2609 params.std.addr_hi = 2610 (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32; 2611 2612 if (cb_ctxt) { 2613 dp_reo_send_cmd(soc, CMD_GET_QUEUE_STATS, 2614 ¶ms, dp_stats_cmd_cb, cb_ctxt); 2615 } else { 2616 dp_reo_send_cmd(soc, CMD_GET_QUEUE_STATS, 2617 ¶ms, dp_stats_cmd_cb, rx_tid); 2618 } 2619 2620 /* Flush REO descriptor from HW cache to update stats 2621 * in descriptor memory. This is to help debugging */ 2622 qdf_mem_zero(¶ms, sizeof(params)); 2623 params.std.need_status = 0; 2624 params.std.addr_lo = 2625 rx_tid->hw_qdesc_paddr & 0xffffffff; 2626 params.std.addr_hi = 2627 (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32; 2628 params.u.fl_cache_params.flush_no_inval = 1; 2629 dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, ¶ms, NULL, 2630 NULL); 2631 } 2632 } 2633 } 2634 2635 void dp_set_michael_key(struct cdp_peer *peer_handle, 2636 bool is_unicast, uint32_t *key) 2637 { 2638 struct dp_peer *peer = (struct dp_peer *)peer_handle; 2639 uint8_t sec_index = is_unicast ? 1 : 0; 2640 2641 if (!peer) { 2642 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 2643 "peer not found "); 2644 return; 2645 } 2646 2647 qdf_mem_copy(&peer->security[sec_index].michael_key[0], 2648 key, IEEE80211_WEP_MICLEN); 2649 } 2650