1 /* 2 * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include <qdf_types.h> 20 #include <qdf_lock.h> 21 #include <hal_hw_headers.h> 22 #include "dp_htt.h" 23 #include "dp_types.h" 24 #include "dp_internal.h" 25 #include "dp_peer.h" 26 #include "dp_rx_defrag.h" 27 #include <hal_api.h> 28 #include <hal_reo.h> 29 #ifdef CONFIG_MCL 30 #include <cds_ieee80211_common.h> 31 #include <cds_api.h> 32 #endif 33 #include <cdp_txrx_handle.h> 34 #include <wlan_cfg.h> 35 36 #ifdef DP_LFR 37 static inline void 38 dp_set_ssn_valid_flag(struct hal_reo_cmd_params *params, 39 uint8_t valid) 40 { 41 params->u.upd_queue_params.update_svld = 1; 42 params->u.upd_queue_params.svld = valid; 43 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 44 "%s: Setting SSN valid bit to %d", 45 __func__, valid); 46 } 47 #else 48 static inline void 49 dp_set_ssn_valid_flag(struct hal_reo_cmd_params *params, 50 uint8_t valid) {}; 51 #endif 52 53 static inline int dp_peer_find_mac_addr_cmp( 54 union dp_align_mac_addr *mac_addr1, 55 union dp_align_mac_addr *mac_addr2) 56 { 57 return !((mac_addr1->align4.bytes_abcd == mac_addr2->align4.bytes_abcd) 58 /* 59 * Intentionally use & rather than &&. 60 * because the operands are binary rather than generic boolean, 61 * the functionality is equivalent. 62 * Using && has the advantage of short-circuited evaluation, 63 * but using & has the advantage of no conditional branching, 64 * which is a more significant benefit. 65 */ 66 & 67 (mac_addr1->align4.bytes_ef == mac_addr2->align4.bytes_ef)); 68 } 69 70 static int dp_peer_find_map_attach(struct dp_soc *soc) 71 { 72 uint32_t max_peers, peer_map_size; 73 74 max_peers = soc->max_peers; 75 /* allocate the peer ID -> peer object map */ 76 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, 77 "\n<=== cfg max peer id %d ====>", max_peers); 78 peer_map_size = max_peers * sizeof(soc->peer_id_to_obj_map[0]); 79 soc->peer_id_to_obj_map = qdf_mem_malloc(peer_map_size); 80 if (!soc->peer_id_to_obj_map) { 81 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 82 "%s: peer map memory allocation failed", __func__); 83 return QDF_STATUS_E_NOMEM; 84 } 85 86 /* 87 * The peer_id_to_obj_map doesn't really need to be initialized, 88 * since elements are only used after they have been individually 89 * initialized. 90 * However, it is convenient for debugging to have all elements 91 * that are not in use set to 0. 92 */ 93 qdf_mem_zero(soc->peer_id_to_obj_map, peer_map_size); 94 return 0; /* success */ 95 } 96 97 static int dp_log2_ceil(unsigned value) 98 { 99 unsigned tmp = value; 100 int log2 = -1; 101 102 while (tmp) { 103 log2++; 104 tmp >>= 1; 105 } 106 if (1 << log2 != value) 107 log2++; 108 return log2; 109 } 110 111 static int dp_peer_find_add_id_to_obj( 112 struct dp_peer *peer, 113 uint16_t peer_id) 114 { 115 int i; 116 117 for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) { 118 if (peer->peer_ids[i] == HTT_INVALID_PEER) { 119 peer->peer_ids[i] = peer_id; 120 return 0; /* success */ 121 } 122 } 123 return QDF_STATUS_E_FAILURE; /* failure */ 124 } 125 126 #define DP_PEER_HASH_LOAD_MULT 2 127 #define DP_PEER_HASH_LOAD_SHIFT 0 128 129 #define DP_AST_HASH_LOAD_MULT 2 130 #define DP_AST_HASH_LOAD_SHIFT 0 131 132 static int dp_peer_find_hash_attach(struct dp_soc *soc) 133 { 134 int i, hash_elems, log2; 135 136 /* allocate the peer MAC address -> peer object hash table */ 137 hash_elems = soc->max_peers; 138 hash_elems *= DP_PEER_HASH_LOAD_MULT; 139 hash_elems >>= DP_PEER_HASH_LOAD_SHIFT; 140 log2 = dp_log2_ceil(hash_elems); 141 hash_elems = 1 << log2; 142 143 soc->peer_hash.mask = hash_elems - 1; 144 soc->peer_hash.idx_bits = log2; 145 /* allocate an array of TAILQ peer object lists */ 146 soc->peer_hash.bins = qdf_mem_malloc( 147 hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q, dp_peer))); 148 if (!soc->peer_hash.bins) 149 return QDF_STATUS_E_NOMEM; 150 151 for (i = 0; i < hash_elems; i++) 152 TAILQ_INIT(&soc->peer_hash.bins[i]); 153 154 return 0; 155 } 156 157 static void dp_peer_find_hash_detach(struct dp_soc *soc) 158 { 159 qdf_mem_free(soc->peer_hash.bins); 160 } 161 162 static inline unsigned dp_peer_find_hash_index(struct dp_soc *soc, 163 union dp_align_mac_addr *mac_addr) 164 { 165 unsigned index; 166 167 index = 168 mac_addr->align2.bytes_ab ^ 169 mac_addr->align2.bytes_cd ^ 170 mac_addr->align2.bytes_ef; 171 index ^= index >> soc->peer_hash.idx_bits; 172 index &= soc->peer_hash.mask; 173 return index; 174 } 175 176 177 void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer) 178 { 179 unsigned index; 180 181 index = dp_peer_find_hash_index(soc, &peer->mac_addr); 182 qdf_spin_lock_bh(&soc->peer_ref_mutex); 183 /* 184 * It is important to add the new peer at the tail of the peer list 185 * with the bin index. Together with having the hash_find function 186 * search from head to tail, this ensures that if two entries with 187 * the same MAC address are stored, the one added first will be 188 * found first. 189 */ 190 TAILQ_INSERT_TAIL(&soc->peer_hash.bins[index], peer, hash_list_elem); 191 qdf_spin_unlock_bh(&soc->peer_ref_mutex); 192 } 193 194 #ifdef FEATURE_AST 195 /* 196 * dp_peer_ast_hash_attach() - Allocate and initialize AST Hash Table 197 * @soc: SoC handle 198 * 199 * Return: None 200 */ 201 static int dp_peer_ast_hash_attach(struct dp_soc *soc) 202 { 203 int i, hash_elems, log2; 204 205 hash_elems = ((soc->max_peers * DP_AST_HASH_LOAD_MULT) >> 206 DP_AST_HASH_LOAD_SHIFT); 207 208 log2 = dp_log2_ceil(hash_elems); 209 hash_elems = 1 << log2; 210 211 soc->ast_hash.mask = hash_elems - 1; 212 soc->ast_hash.idx_bits = log2; 213 214 /* allocate an array of TAILQ peer object lists */ 215 soc->ast_hash.bins = qdf_mem_malloc( 216 hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q, 217 dp_ast_entry))); 218 219 if (!soc->ast_hash.bins) 220 return QDF_STATUS_E_NOMEM; 221 222 for (i = 0; i < hash_elems; i++) 223 TAILQ_INIT(&soc->ast_hash.bins[i]); 224 225 return 0; 226 } 227 228 #if defined(FEATURE_AST) && defined(AST_HKV1_WORKAROUND) 229 static inline void dp_peer_ast_cleanup(struct dp_soc *soc, 230 struct dp_ast_entry *ast) 231 { 232 struct cdp_soc_t *cdp_soc = &soc->cdp_soc; 233 234 if (ast->cp_ctx && cdp_soc->ol_ops->peer_del_wds_cp_ctx) 235 cdp_soc->ol_ops->peer_del_wds_cp_ctx(ast->cp_ctx); 236 } 237 #else 238 static inline void dp_peer_ast_cleanup(struct dp_soc *soc, 239 struct dp_ast_entry *ast) 240 { 241 } 242 #endif 243 /* 244 * dp_peer_ast_hash_detach() - Free AST Hash table 245 * @soc: SoC handle 246 * 247 * Return: None 248 */ 249 static void dp_peer_ast_hash_detach(struct dp_soc *soc) 250 { 251 unsigned int index; 252 struct dp_ast_entry *ast, *ast_next; 253 254 if (!soc->ast_hash.mask) 255 return; 256 257 for (index = 0; index <= soc->ast_hash.mask; index++) { 258 if (!TAILQ_EMPTY(&soc->ast_hash.bins[index])) { 259 TAILQ_FOREACH_SAFE(ast, &soc->ast_hash.bins[index], 260 hash_list_elem, ast_next) { 261 TAILQ_REMOVE(&soc->ast_hash.bins[index], ast, 262 hash_list_elem); 263 dp_peer_ast_cleanup(soc, ast); 264 qdf_mem_free(ast); 265 } 266 } 267 } 268 269 qdf_mem_free(soc->ast_hash.bins); 270 } 271 272 /* 273 * dp_peer_ast_hash_index() - Compute the AST hash from MAC address 274 * @soc: SoC handle 275 * 276 * Return: AST hash 277 */ 278 static inline uint32_t dp_peer_ast_hash_index(struct dp_soc *soc, 279 union dp_align_mac_addr *mac_addr) 280 { 281 uint32_t index; 282 283 index = 284 mac_addr->align2.bytes_ab ^ 285 mac_addr->align2.bytes_cd ^ 286 mac_addr->align2.bytes_ef; 287 index ^= index >> soc->ast_hash.idx_bits; 288 index &= soc->ast_hash.mask; 289 return index; 290 } 291 292 /* 293 * dp_peer_ast_hash_add() - Add AST entry into hash table 294 * @soc: SoC handle 295 * 296 * This function adds the AST entry into SoC AST hash table 297 * It assumes caller has taken the ast lock to protect the access to this table 298 * 299 * Return: None 300 */ 301 static inline void dp_peer_ast_hash_add(struct dp_soc *soc, 302 struct dp_ast_entry *ase) 303 { 304 uint32_t index; 305 306 index = dp_peer_ast_hash_index(soc, &ase->mac_addr); 307 TAILQ_INSERT_TAIL(&soc->ast_hash.bins[index], ase, hash_list_elem); 308 } 309 310 /* 311 * dp_peer_ast_hash_remove() - Look up and remove AST entry from hash table 312 * @soc: SoC handle 313 * 314 * This function removes the AST entry from soc AST hash table 315 * It assumes caller has taken the ast lock to protect the access to this table 316 * 317 * Return: None 318 */ 319 static inline void dp_peer_ast_hash_remove(struct dp_soc *soc, 320 struct dp_ast_entry *ase) 321 { 322 unsigned index; 323 struct dp_ast_entry *tmpase; 324 int found = 0; 325 326 index = dp_peer_ast_hash_index(soc, &ase->mac_addr); 327 /* Check if tail is not empty before delete*/ 328 QDF_ASSERT(!TAILQ_EMPTY(&soc->ast_hash.bins[index])); 329 330 TAILQ_FOREACH(tmpase, &soc->ast_hash.bins[index], hash_list_elem) { 331 if (tmpase == ase) { 332 found = 1; 333 break; 334 } 335 } 336 337 QDF_ASSERT(found); 338 TAILQ_REMOVE(&soc->ast_hash.bins[index], ase, hash_list_elem); 339 } 340 341 /* 342 * dp_peer_ast_hash_find_by_pdevid() - Find AST entry by MAC address 343 * and pdev id 344 * @soc: SoC handle 345 * @ast_mac_addr: mac address 346 * @pdev_id: pdev_id 347 * 348 * It assumes caller has taken the ast lock to protect the access to 349 * AST hash table 350 * 351 * Return: AST entry 352 */ 353 struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc, 354 uint8_t *ast_mac_addr, 355 uint8_t pdev_id) 356 { 357 union dp_align_mac_addr local_mac_addr_aligned, *mac_addr; 358 uint32_t index; 359 struct dp_ast_entry *ase; 360 361 qdf_mem_copy(&local_mac_addr_aligned.raw[0], 362 ast_mac_addr, DP_MAC_ADDR_LEN); 363 mac_addr = &local_mac_addr_aligned; 364 365 index = dp_peer_ast_hash_index(soc, mac_addr); 366 TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) { 367 if ((pdev_id == ase->pdev_id) && 368 !dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr)) { 369 return ase; 370 } 371 } 372 373 return NULL; 374 } 375 376 /* 377 * dp_peer_ast_hash_find() - Find AST entry by MAC address 378 * @soc: SoC handle 379 * 380 * It assumes caller has taken the ast lock to protect the access to 381 * AST hash table 382 * 383 * Return: AST entry 384 */ 385 struct dp_ast_entry *dp_peer_ast_hash_find(struct dp_soc *soc, 386 uint8_t *ast_mac_addr) 387 { 388 union dp_align_mac_addr local_mac_addr_aligned, *mac_addr; 389 unsigned index; 390 struct dp_ast_entry *ase; 391 392 qdf_mem_copy(&local_mac_addr_aligned.raw[0], 393 ast_mac_addr, DP_MAC_ADDR_LEN); 394 mac_addr = &local_mac_addr_aligned; 395 396 index = dp_peer_ast_hash_index(soc, mac_addr); 397 TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) { 398 if (dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr) == 0) { 399 return ase; 400 } 401 } 402 403 return NULL; 404 } 405 406 /* 407 * dp_peer_map_ast() - Map the ast entry with HW AST Index 408 * @soc: SoC handle 409 * @peer: peer to which ast node belongs 410 * @mac_addr: MAC address of ast node 411 * @hw_peer_id: HW AST Index returned by target in peer map event 412 * @vdev_id: vdev id for VAP to which the peer belongs to 413 * @ast_hash: ast hash value in HW 414 * 415 * Return: None 416 */ 417 static inline void dp_peer_map_ast(struct dp_soc *soc, 418 struct dp_peer *peer, uint8_t *mac_addr, uint16_t hw_peer_id, 419 uint8_t vdev_id, uint16_t ast_hash) 420 { 421 struct dp_ast_entry *ast_entry; 422 enum cdp_txrx_ast_entry_type peer_type = CDP_TXRX_AST_TYPE_STATIC; 423 bool ast_entry_found = FALSE; 424 425 if (!peer) { 426 return; 427 } 428 429 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 430 "%s: peer %pK ID %d vid %d mac %02x:%02x:%02x:%02x:%02x:%02x", 431 __func__, peer, hw_peer_id, vdev_id, mac_addr[0], 432 mac_addr[1], mac_addr[2], mac_addr[3], 433 mac_addr[4], mac_addr[5]); 434 435 qdf_spin_lock_bh(&soc->ast_lock); 436 TAILQ_FOREACH(ast_entry, &peer->ast_entry_list, ase_list_elem) { 437 if (!(qdf_mem_cmp(mac_addr, ast_entry->mac_addr.raw, 438 DP_MAC_ADDR_LEN))) { 439 ast_entry->ast_idx = hw_peer_id; 440 soc->ast_table[hw_peer_id] = ast_entry; 441 ast_entry->is_active = TRUE; 442 peer_type = ast_entry->type; 443 ast_entry_found = TRUE; 444 ast_entry->ast_hash_value = ast_hash; 445 } 446 } 447 448 if (ast_entry_found || (peer->vdev && peer->vdev->proxysta_vdev)) { 449 if (soc->cdp_soc.ol_ops->peer_map_event) { 450 soc->cdp_soc.ol_ops->peer_map_event( 451 soc->ctrl_psoc, peer->peer_ids[0], 452 hw_peer_id, vdev_id, 453 mac_addr, peer_type, ast_hash); 454 } 455 } else { 456 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 457 "AST entry not found"); 458 } 459 460 qdf_spin_unlock_bh(&soc->ast_lock); 461 return; 462 } 463 464 /* 465 * dp_peer_add_ast() - Allocate and add AST entry into peer list 466 * @soc: SoC handle 467 * @peer: peer to which ast node belongs 468 * @mac_addr: MAC address of ast node 469 * @is_self: Is this base AST entry with peer mac address 470 * 471 * This API is used by WDS source port learning function to 472 * add a new AST entry into peer AST list 473 * 474 * Return: 0 if new entry is allocated, 475 * -1 if entry add failed 476 */ 477 int dp_peer_add_ast(struct dp_soc *soc, 478 struct dp_peer *peer, 479 uint8_t *mac_addr, 480 enum cdp_txrx_ast_entry_type type, 481 uint32_t flags) 482 { 483 struct dp_ast_entry *ast_entry; 484 struct dp_vdev *vdev = peer->vdev; 485 struct dp_pdev *pdev = NULL; 486 uint8_t next_node_mac[6]; 487 int ret = -1; 488 489 if (!vdev) { 490 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 491 FL("Peers vdev is NULL")); 492 QDF_ASSERT(0); 493 return ret; 494 } 495 496 pdev = vdev->pdev; 497 498 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 499 "%s: peer %pK mac %02x:%02x:%02x:%02x:%02x:%02x", 500 __func__, peer, mac_addr[0], mac_addr[1], mac_addr[2], 501 mac_addr[3], mac_addr[4], mac_addr[5]); 502 503 qdf_spin_lock_bh(&soc->ast_lock); 504 505 /* If AST entry already exists , just return from here 506 * ast entry with same mac address can exist on different radios 507 * if ast_override support is enabled use search by pdev in this 508 * case 509 */ 510 if (soc->ast_override_support) { 511 ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, mac_addr, 512 pdev->pdev_id); 513 if (ast_entry) { 514 qdf_spin_unlock_bh(&soc->ast_lock); 515 return 0; 516 } 517 } else { 518 ast_entry = dp_peer_ast_hash_find(soc, mac_addr); 519 520 if (ast_entry) { 521 if (ast_entry->type == CDP_TXRX_AST_TYPE_MEC) { 522 ast_entry->is_active = TRUE; 523 qdf_spin_unlock_bh(&soc->ast_lock); 524 return 0; 525 } 526 527 /* 528 * WAR for HK 1.x AST issue 529 * If an AST entry with same mac address already 530 * exists and is mapped to a different radio, and 531 * if the current radio is primary radio , delete 532 * the existing AST entry and return. 533 * 534 * New AST entry will be created again on next 535 * SA_invalid frame 536 */ 537 if ((ast_entry->pdev_id != vdev->pdev->pdev_id) && 538 vdev->pdev->is_primary) { 539 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 540 "Deleting ast_pdev=%d pdev=%d addr=%pM\n", 541 ast_entry->pdev_id, 542 vdev->pdev->pdev_id, mac_addr); 543 dp_peer_del_ast(soc, ast_entry); 544 } 545 546 qdf_spin_unlock_bh(&soc->ast_lock); 547 return 0; 548 } 549 } 550 551 ast_entry = (struct dp_ast_entry *) 552 qdf_mem_malloc(sizeof(struct dp_ast_entry)); 553 554 if (!ast_entry) { 555 qdf_spin_unlock_bh(&soc->ast_lock); 556 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 557 FL("fail to allocate ast_entry")); 558 QDF_ASSERT(0); 559 return ret; 560 } 561 562 qdf_mem_copy(&ast_entry->mac_addr.raw[0], mac_addr, DP_MAC_ADDR_LEN); 563 ast_entry->peer = peer; 564 ast_entry->pdev_id = vdev->pdev->pdev_id; 565 ast_entry->vdev_id = vdev->vdev_id; 566 567 switch (type) { 568 case CDP_TXRX_AST_TYPE_STATIC: 569 peer->self_ast_entry = ast_entry; 570 ast_entry->type = CDP_TXRX_AST_TYPE_STATIC; 571 if (peer->vdev->opmode == wlan_op_mode_sta) 572 ast_entry->type = CDP_TXRX_AST_TYPE_STA_BSS; 573 break; 574 case CDP_TXRX_AST_TYPE_SELF: 575 peer->self_ast_entry = ast_entry; 576 ast_entry->type = CDP_TXRX_AST_TYPE_SELF; 577 break; 578 case CDP_TXRX_AST_TYPE_WDS: 579 ast_entry->next_hop = 1; 580 ast_entry->type = CDP_TXRX_AST_TYPE_WDS; 581 break; 582 case CDP_TXRX_AST_TYPE_WDS_HM: 583 ast_entry->next_hop = 1; 584 ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM; 585 break; 586 case CDP_TXRX_AST_TYPE_MEC: 587 ast_entry->next_hop = 1; 588 ast_entry->type = CDP_TXRX_AST_TYPE_MEC; 589 break; 590 default: 591 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 592 FL("Incorrect AST entry type")); 593 } 594 595 ast_entry->is_active = TRUE; 596 TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem); 597 DP_STATS_INC(soc, ast.added, 1); 598 dp_peer_ast_hash_add(soc, ast_entry); 599 qdf_spin_unlock_bh(&soc->ast_lock); 600 601 if (ast_entry->type == CDP_TXRX_AST_TYPE_MEC) 602 qdf_mem_copy(next_node_mac, peer->vdev->mac_addr.raw, 6); 603 else 604 qdf_mem_copy(next_node_mac, peer->mac_addr.raw, 6); 605 606 if ((ast_entry->type != CDP_TXRX_AST_TYPE_STATIC) && 607 (ast_entry->type != CDP_TXRX_AST_TYPE_SELF) && 608 (ast_entry->type != CDP_TXRX_AST_TYPE_STA_BSS)) { 609 if (QDF_STATUS_SUCCESS == 610 soc->cdp_soc.ol_ops->peer_add_wds_entry( 611 peer->vdev->osif_vdev, 612 mac_addr, 613 next_node_mac, 614 flags)) 615 return 0; 616 } 617 618 return ret; 619 } 620 621 #if defined(FEATURE_AST) && defined(AST_HKV1_WORKAROUND) 622 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry) 623 { 624 struct dp_peer *peer = ast_entry->peer; 625 626 if (ast_entry->next_hop) { 627 dp_peer_ast_send_wds_del(soc, ast_entry); 628 } else { 629 soc->ast_table[ast_entry->ast_idx] = NULL; 630 TAILQ_REMOVE(&peer->ast_entry_list, ast_entry, ase_list_elem); 631 DP_STATS_INC(soc, ast.deleted, 1); 632 dp_peer_ast_hash_remove(soc, ast_entry); 633 qdf_mem_free(ast_entry); 634 } 635 } 636 #else 637 /* 638 * dp_peer_del_ast() - Delete and free AST entry 639 * @soc: SoC handle 640 * @ast_entry: AST entry of the node 641 * 642 * This function removes the AST entry from peer and soc tables 643 * It assumes caller has taken the ast lock to protect the access to these 644 * tables 645 * 646 * Return: None 647 */ 648 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry) 649 { 650 struct dp_peer *peer = ast_entry->peer; 651 652 if (ast_entry->next_hop) 653 soc->cdp_soc.ol_ops->peer_del_wds_entry(peer->vdev->osif_vdev, 654 ast_entry->mac_addr.raw); 655 656 soc->ast_table[ast_entry->ast_idx] = NULL; 657 TAILQ_REMOVE(&peer->ast_entry_list, ast_entry, ase_list_elem); 658 659 if (ast_entry == peer->self_ast_entry) 660 peer->self_ast_entry = NULL; 661 662 DP_STATS_INC(soc, ast.deleted, 1); 663 dp_peer_ast_hash_remove(soc, ast_entry); 664 qdf_mem_free(ast_entry); 665 } 666 #endif 667 668 /* 669 * dp_peer_update_ast() - Delete and free AST entry 670 * @soc: SoC handle 671 * @peer: peer to which ast node belongs 672 * @ast_entry: AST entry of the node 673 * @flags: wds or hmwds 674 * 675 * This function update the AST entry to the roamed peer and soc tables 676 * It assumes caller has taken the ast lock to protect the access to these 677 * tables 678 * 679 * Return: 0 if ast entry is updated successfully 680 * -1 failure 681 */ 682 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer, 683 struct dp_ast_entry *ast_entry, uint32_t flags) 684 { 685 int ret = -1; 686 struct dp_peer *old_peer; 687 688 if ((ast_entry->type == CDP_TXRX_AST_TYPE_STATIC) || 689 (ast_entry->type == CDP_TXRX_AST_TYPE_SELF) || 690 (ast_entry->type == CDP_TXRX_AST_TYPE_STA_BSS)) 691 return 0; 692 693 old_peer = ast_entry->peer; 694 TAILQ_REMOVE(&old_peer->ast_entry_list, ast_entry, ase_list_elem); 695 696 ast_entry->peer = peer; 697 ast_entry->type = CDP_TXRX_AST_TYPE_WDS; 698 ast_entry->pdev_id = peer->vdev->pdev->pdev_id; 699 ast_entry->vdev_id = peer->vdev->vdev_id; 700 ast_entry->is_active = TRUE; 701 TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem); 702 703 ret = soc->cdp_soc.ol_ops->peer_update_wds_entry( 704 peer->vdev->osif_vdev, 705 ast_entry->mac_addr.raw, 706 peer->mac_addr.raw, 707 flags); 708 709 return ret; 710 } 711 712 /* 713 * dp_peer_ast_get_pdev_id() - get pdev_id from the ast entry 714 * @soc: SoC handle 715 * @ast_entry: AST entry of the node 716 * 717 * This function gets the pdev_id from the ast entry. 718 * 719 * Return: (uint8_t) pdev_id 720 */ 721 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc, 722 struct dp_ast_entry *ast_entry) 723 { 724 return ast_entry->pdev_id; 725 } 726 727 /* 728 * dp_peer_ast_get_next_hop() - get next_hop from the ast entry 729 * @soc: SoC handle 730 * @ast_entry: AST entry of the node 731 * 732 * This function gets the next hop from the ast entry. 733 * 734 * Return: (uint8_t) next_hop 735 */ 736 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc, 737 struct dp_ast_entry *ast_entry) 738 { 739 return ast_entry->next_hop; 740 } 741 742 /* 743 * dp_peer_ast_set_type() - set type from the ast entry 744 * @soc: SoC handle 745 * @ast_entry: AST entry of the node 746 * 747 * This function sets the type in the ast entry. 748 * 749 * Return: 750 */ 751 void dp_peer_ast_set_type(struct dp_soc *soc, 752 struct dp_ast_entry *ast_entry, 753 enum cdp_txrx_ast_entry_type type) 754 { 755 ast_entry->type = type; 756 } 757 758 #else 759 int dp_peer_add_ast(struct dp_soc *soc, struct dp_peer *peer, 760 uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type, 761 uint32_t flags) 762 { 763 return 1; 764 } 765 766 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry) 767 { 768 } 769 770 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer, 771 struct dp_ast_entry *ast_entry, uint32_t flags) 772 { 773 return 1; 774 } 775 776 struct dp_ast_entry *dp_peer_ast_hash_find(struct dp_soc *soc, 777 uint8_t *ast_mac_addr) 778 { 779 return NULL; 780 } 781 782 static int dp_peer_ast_hash_attach(struct dp_soc *soc) 783 { 784 return 0; 785 } 786 787 static inline void dp_peer_map_ast(struct dp_soc *soc, 788 struct dp_peer *peer, uint8_t *mac_addr, uint16_t hw_peer_id, 789 uint8_t vdev_id, uint16_t ast_hash) 790 { 791 return; 792 } 793 794 static void dp_peer_ast_hash_detach(struct dp_soc *soc) 795 { 796 } 797 798 void dp_peer_ast_set_type(struct dp_soc *soc, 799 struct dp_ast_entry *ast_entry, 800 enum cdp_txrx_ast_entry_type type) 801 { 802 } 803 804 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc, 805 struct dp_ast_entry *ast_entry) 806 { 807 return 0xff; 808 } 809 810 811 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc, 812 struct dp_ast_entry *ast_entry) 813 { 814 return 0xff; 815 } 816 #endif 817 818 #if defined(FEATURE_AST) && defined(AST_HKV1_WORKAROUND) 819 void dp_peer_ast_set_cp_ctx(struct dp_soc *soc, 820 struct dp_ast_entry *ast_entry, 821 void *cp_ctx) 822 { 823 ast_entry->cp_ctx = cp_ctx; 824 } 825 826 void *dp_peer_ast_get_cp_ctx(struct dp_soc *soc, 827 struct dp_ast_entry *ast_entry) 828 { 829 void *cp_ctx = NULL; 830 831 cp_ctx = ast_entry->cp_ctx; 832 ast_entry->cp_ctx = NULL; 833 834 return cp_ctx; 835 } 836 837 void dp_peer_ast_send_wds_del(struct dp_soc *soc, 838 struct dp_ast_entry *ast_entry) 839 { 840 struct dp_peer *peer = ast_entry->peer; 841 struct cdp_soc_t *cdp_soc = &soc->cdp_soc; 842 843 if (!ast_entry->wmi_sent) { 844 cdp_soc->ol_ops->peer_del_wds_entry(peer->vdev->osif_vdev, 845 ast_entry->mac_addr.raw); 846 ast_entry->wmi_sent = true; 847 } 848 } 849 850 bool dp_peer_ast_get_wmi_sent(struct dp_soc *soc, 851 struct dp_ast_entry *ast_entry) 852 { 853 return ast_entry->wmi_sent; 854 } 855 856 void dp_peer_ast_free_entry(struct dp_soc *soc, 857 struct dp_ast_entry *ast_entry) 858 { 859 struct dp_peer *peer = ast_entry->peer; 860 861 soc->ast_table[ast_entry->ast_idx] = NULL; 862 TAILQ_REMOVE(&peer->ast_entry_list, ast_entry, ase_list_elem); 863 DP_STATS_INC(soc, ast.deleted, 1); 864 dp_peer_ast_hash_remove(soc, ast_entry); 865 qdf_mem_free(ast_entry); 866 } 867 #endif 868 869 struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc, 870 uint8_t *peer_mac_addr, int mac_addr_is_aligned, uint8_t vdev_id) 871 { 872 union dp_align_mac_addr local_mac_addr_aligned, *mac_addr; 873 unsigned index; 874 struct dp_peer *peer; 875 876 if (mac_addr_is_aligned) { 877 mac_addr = (union dp_align_mac_addr *) peer_mac_addr; 878 } else { 879 qdf_mem_copy( 880 &local_mac_addr_aligned.raw[0], 881 peer_mac_addr, DP_MAC_ADDR_LEN); 882 mac_addr = &local_mac_addr_aligned; 883 } 884 index = dp_peer_find_hash_index(soc, mac_addr); 885 qdf_spin_lock_bh(&soc->peer_ref_mutex); 886 TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) { 887 #if ATH_SUPPORT_WRAP 888 /* ProxySTA may have multiple BSS peer with same MAC address, 889 * modified find will take care of finding the correct BSS peer. 890 */ 891 if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 && 892 ((peer->vdev->vdev_id == vdev_id) || 893 (vdev_id == DP_VDEV_ALL))) { 894 #else 895 if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0) { 896 #endif 897 /* found it - increment the ref count before releasing 898 * the lock 899 */ 900 qdf_atomic_inc(&peer->ref_cnt); 901 qdf_spin_unlock_bh(&soc->peer_ref_mutex); 902 return peer; 903 } 904 } 905 qdf_spin_unlock_bh(&soc->peer_ref_mutex); 906 return NULL; /* failure */ 907 } 908 909 void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer) 910 { 911 unsigned index; 912 struct dp_peer *tmppeer = NULL; 913 int found = 0; 914 915 index = dp_peer_find_hash_index(soc, &peer->mac_addr); 916 /* Check if tail is not empty before delete*/ 917 QDF_ASSERT(!TAILQ_EMPTY(&soc->peer_hash.bins[index])); 918 /* 919 * DO NOT take the peer_ref_mutex lock here - it needs to be taken 920 * by the caller. 921 * The caller needs to hold the lock from the time the peer object's 922 * reference count is decremented and tested up through the time the 923 * reference to the peer object is removed from the hash table, by 924 * this function. 925 * Holding the lock only while removing the peer object reference 926 * from the hash table keeps the hash table consistent, but does not 927 * protect against a new HL tx context starting to use the peer object 928 * if it looks up the peer object from its MAC address just after the 929 * peer ref count is decremented to zero, but just before the peer 930 * object reference is removed from the hash table. 931 */ 932 TAILQ_FOREACH(tmppeer, &soc->peer_hash.bins[index], hash_list_elem) { 933 if (tmppeer == peer) { 934 found = 1; 935 break; 936 } 937 } 938 QDF_ASSERT(found); 939 TAILQ_REMOVE(&soc->peer_hash.bins[index], peer, hash_list_elem); 940 } 941 942 void dp_peer_find_hash_erase(struct dp_soc *soc) 943 { 944 int i; 945 946 /* 947 * Not really necessary to take peer_ref_mutex lock - by this point, 948 * it's known that the soc is no longer in use. 949 */ 950 for (i = 0; i <= soc->peer_hash.mask; i++) { 951 if (!TAILQ_EMPTY(&soc->peer_hash.bins[i])) { 952 struct dp_peer *peer, *peer_next; 953 954 /* 955 * TAILQ_FOREACH_SAFE must be used here to avoid any 956 * memory access violation after peer is freed 957 */ 958 TAILQ_FOREACH_SAFE(peer, &soc->peer_hash.bins[i], 959 hash_list_elem, peer_next) { 960 /* 961 * Don't remove the peer from the hash table - 962 * that would modify the list we are currently 963 * traversing, and it's not necessary anyway. 964 */ 965 /* 966 * Artificially adjust the peer's ref count to 967 * 1, so it will get deleted by 968 * dp_peer_unref_delete. 969 */ 970 /* set to zero */ 971 qdf_atomic_init(&peer->ref_cnt); 972 /* incr to one */ 973 qdf_atomic_inc(&peer->ref_cnt); 974 dp_peer_unref_delete(peer); 975 } 976 } 977 } 978 } 979 980 static void dp_peer_find_map_detach(struct dp_soc *soc) 981 { 982 qdf_mem_free(soc->peer_id_to_obj_map); 983 } 984 985 int dp_peer_find_attach(struct dp_soc *soc) 986 { 987 if (dp_peer_find_map_attach(soc)) 988 return 1; 989 990 if (dp_peer_find_hash_attach(soc)) { 991 dp_peer_find_map_detach(soc); 992 return 1; 993 } 994 995 if (dp_peer_ast_hash_attach(soc)) { 996 dp_peer_find_hash_detach(soc); 997 dp_peer_find_map_detach(soc); 998 return 1; 999 } 1000 return 0; /* success */ 1001 } 1002 1003 void dp_rx_tid_stats_cb(struct dp_soc *soc, void *cb_ctxt, 1004 union hal_reo_status *reo_status) 1005 { 1006 struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt; 1007 struct hal_reo_queue_status *queue_status = &(reo_status->queue_status); 1008 1009 if (queue_status->header.status != HAL_REO_CMD_SUCCESS) { 1010 DP_TRACE_STATS(FATAL, "REO stats failure %d for TID %d\n", 1011 queue_status->header.status, rx_tid->tid); 1012 return; 1013 } 1014 1015 DP_TRACE_STATS(FATAL, "REO queue stats (TID: %d): \n" 1016 "ssn: %d\n" 1017 "curr_idx : %d\n" 1018 "pn_31_0 : %08x\n" 1019 "pn_63_32 : %08x\n" 1020 "pn_95_64 : %08x\n" 1021 "pn_127_96 : %08x\n" 1022 "last_rx_enq_tstamp : %08x\n" 1023 "last_rx_deq_tstamp : %08x\n" 1024 "rx_bitmap_31_0 : %08x\n" 1025 "rx_bitmap_63_32 : %08x\n" 1026 "rx_bitmap_95_64 : %08x\n" 1027 "rx_bitmap_127_96 : %08x\n" 1028 "rx_bitmap_159_128 : %08x\n" 1029 "rx_bitmap_191_160 : %08x\n" 1030 "rx_bitmap_223_192 : %08x\n" 1031 "rx_bitmap_255_224 : %08x\n", 1032 rx_tid->tid, 1033 queue_status->ssn, queue_status->curr_idx, 1034 queue_status->pn_31_0, queue_status->pn_63_32, 1035 queue_status->pn_95_64, queue_status->pn_127_96, 1036 queue_status->last_rx_enq_tstamp, 1037 queue_status->last_rx_deq_tstamp, 1038 queue_status->rx_bitmap_31_0, queue_status->rx_bitmap_63_32, 1039 queue_status->rx_bitmap_95_64, queue_status->rx_bitmap_127_96, 1040 queue_status->rx_bitmap_159_128, 1041 queue_status->rx_bitmap_191_160, 1042 queue_status->rx_bitmap_223_192, 1043 queue_status->rx_bitmap_255_224); 1044 1045 DP_TRACE_STATS(FATAL, 1046 "curr_mpdu_cnt : %d\n" 1047 "curr_msdu_cnt : %d\n" 1048 "fwd_timeout_cnt : %d\n" 1049 "fwd_bar_cnt : %d\n" 1050 "dup_cnt : %d\n" 1051 "frms_in_order_cnt : %d\n" 1052 "bar_rcvd_cnt : %d\n" 1053 "mpdu_frms_cnt : %d\n" 1054 "msdu_frms_cnt : %d\n" 1055 "total_byte_cnt : %d\n" 1056 "late_recv_mpdu_cnt : %d\n" 1057 "win_jump_2k : %d\n" 1058 "hole_cnt : %d\n", 1059 queue_status->curr_mpdu_cnt, queue_status->curr_msdu_cnt, 1060 queue_status->fwd_timeout_cnt, queue_status->fwd_bar_cnt, 1061 queue_status->dup_cnt, queue_status->frms_in_order_cnt, 1062 queue_status->bar_rcvd_cnt, queue_status->mpdu_frms_cnt, 1063 queue_status->msdu_frms_cnt, queue_status->total_cnt, 1064 queue_status->late_recv_mpdu_cnt, queue_status->win_jump_2k, 1065 queue_status->hole_cnt); 1066 1067 DP_PRINT_STATS("Addba Req : %d\n" 1068 "Addba Resp : %d\n" 1069 "Addba Resp success : %d\n" 1070 "Addba Resp failed : %d\n" 1071 "Delba Req received : %d\n" 1072 "Delba Tx success : %d\n" 1073 "Delba Tx Fail : %d\n" 1074 "BA window size : %d\n" 1075 "Pn size : %d\n", 1076 rx_tid->num_of_addba_req, 1077 rx_tid->num_of_addba_resp, 1078 rx_tid->num_addba_rsp_success, 1079 rx_tid->num_addba_rsp_failed, 1080 rx_tid->num_of_delba_req, 1081 rx_tid->delba_tx_success_cnt, 1082 rx_tid->delba_tx_fail_cnt, 1083 rx_tid->ba_win_size, 1084 rx_tid->pn_size); 1085 } 1086 1087 static inline struct dp_peer *dp_peer_find_add_id(struct dp_soc *soc, 1088 uint8_t *peer_mac_addr, uint16_t peer_id, uint16_t hw_peer_id, 1089 uint8_t vdev_id) 1090 { 1091 struct dp_peer *peer; 1092 1093 QDF_ASSERT(peer_id <= soc->max_peers); 1094 /* check if there's already a peer object with this MAC address */ 1095 peer = dp_peer_find_hash_find(soc, peer_mac_addr, 1096 0 /* is aligned */, vdev_id); 1097 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1098 "%s: peer %pK ID %d vid %d mac %02x:%02x:%02x:%02x:%02x:%02x", 1099 __func__, peer, peer_id, vdev_id, peer_mac_addr[0], 1100 peer_mac_addr[1], peer_mac_addr[2], peer_mac_addr[3], 1101 peer_mac_addr[4], peer_mac_addr[5]); 1102 1103 if (peer) { 1104 /* peer's ref count was already incremented by 1105 * peer_find_hash_find 1106 */ 1107 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, 1108 "%s: ref_cnt: %d", __func__, 1109 qdf_atomic_read(&peer->ref_cnt)); 1110 if (!soc->peer_id_to_obj_map[peer_id]) 1111 soc->peer_id_to_obj_map[peer_id] = peer; 1112 else { 1113 /* Peer map event came for peer_id which 1114 * is already mapped, this is not expected 1115 */ 1116 QDF_ASSERT(0); 1117 } 1118 1119 if (dp_peer_find_add_id_to_obj(peer, peer_id)) { 1120 /* TBDXXX: assert for now */ 1121 QDF_ASSERT(0); 1122 } 1123 1124 return peer; 1125 } 1126 1127 return NULL; 1128 } 1129 1130 /** 1131 * dp_rx_peer_map_handler() - handle peer map event from firmware 1132 * @soc_handle - genereic soc handle 1133 * @peeri_id - peer_id from firmware 1134 * @hw_peer_id - ast index for this peer 1135 * @vdev_id - vdev ID 1136 * @peer_mac_addr - mac address of the peer 1137 * @ast_hash - ast hash value 1138 * @is_wds - flag to indicate peer map event for WDS ast entry 1139 * 1140 * associate the peer_id that firmware provided with peer entry 1141 * and update the ast table in the host with the hw_peer_id. 1142 * 1143 * Return: none 1144 */ 1145 1146 void 1147 dp_rx_peer_map_handler(void *soc_handle, uint16_t peer_id, 1148 uint16_t hw_peer_id, uint8_t vdev_id, 1149 uint8_t *peer_mac_addr, uint16_t ast_hash, 1150 uint8_t is_wds) 1151 { 1152 struct dp_soc *soc = (struct dp_soc *)soc_handle; 1153 struct dp_peer *peer = NULL; 1154 1155 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH, 1156 "peer_map_event (soc:%pK): peer_id %di, hw_peer_id %d, peer_mac " 1157 "%02x:%02x:%02x:%02x:%02x:%02x, vdev_id %d", soc, peer_id, 1158 hw_peer_id, peer_mac_addr[0], peer_mac_addr[1], 1159 peer_mac_addr[2], peer_mac_addr[3], peer_mac_addr[4], 1160 peer_mac_addr[5], vdev_id); 1161 1162 if ((hw_peer_id < 0) || (hw_peer_id > (WLAN_UMAC_PSOC_MAX_PEERS * 2))) { 1163 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1164 "invalid hw_peer_id: %d", hw_peer_id); 1165 qdf_assert_always(0); 1166 } 1167 1168 /* Peer map event for WDS ast entry get the peer from 1169 * obj map 1170 */ 1171 if (is_wds) { 1172 peer = soc->peer_id_to_obj_map[peer_id]; 1173 } else { 1174 peer = dp_peer_find_add_id(soc, peer_mac_addr, peer_id, 1175 hw_peer_id, vdev_id); 1176 1177 if (peer) { 1178 /* 1179 * For every peer MAp message search and set if bss_peer 1180 */ 1181 if (!(qdf_mem_cmp(peer->mac_addr.raw, 1182 peer->vdev->mac_addr.raw, 1183 DP_MAC_ADDR_LEN))) { 1184 QDF_TRACE(QDF_MODULE_ID_DP, 1185 QDF_TRACE_LEVEL_INFO_HIGH, 1186 "vdev bss_peer!!!!"); 1187 peer->bss_peer = 1; 1188 peer->vdev->vap_bss_peer = peer; 1189 } 1190 1191 if (peer->vdev->opmode == wlan_op_mode_sta) 1192 peer->vdev->bss_ast_hash = ast_hash; 1193 } 1194 } 1195 1196 dp_peer_map_ast(soc, peer, peer_mac_addr, 1197 hw_peer_id, vdev_id, ast_hash); 1198 } 1199 1200 /** 1201 * dp_rx_peer_unmap_handler() - handle peer unmap event from firmware 1202 * @soc_handle - genereic soc handle 1203 * @peeri_id - peer_id from firmware 1204 * @vdev_id - vdev ID 1205 * @peer_mac_addr - mac address of the peer 1206 * @is_wds - flag to indicate peer map event for WDS ast entry 1207 * 1208 * Return: none 1209 */ 1210 void 1211 dp_rx_peer_unmap_handler(void *soc_handle, uint16_t peer_id, 1212 uint8_t vdev_id, uint8_t *peer_mac_addr, 1213 uint8_t is_wds) 1214 { 1215 struct dp_peer *peer; 1216 struct dp_soc *soc = (struct dp_soc *)soc_handle; 1217 uint8_t i; 1218 1219 if (is_wds) 1220 return; 1221 1222 peer = __dp_peer_find_by_id(soc, peer_id); 1223 1224 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH, 1225 "peer_unmap_event (soc:%pK) peer_id %d peer %pK", 1226 soc, peer_id, peer); 1227 1228 /* 1229 * Currently peer IDs are assigned for vdevs as well as peers. 1230 * If the peer ID is for a vdev, then the peer pointer stored 1231 * in peer_id_to_obj_map will be NULL. 1232 */ 1233 if (!peer) { 1234 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1235 "%s: Received unmap event for invalid peer_id" 1236 " %u", __func__, peer_id); 1237 return; 1238 } 1239 1240 soc->peer_id_to_obj_map[peer_id] = NULL; 1241 for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) { 1242 if (peer->peer_ids[i] == peer_id) { 1243 peer->peer_ids[i] = HTT_INVALID_PEER; 1244 break; 1245 } 1246 } 1247 1248 if (soc->cdp_soc.ol_ops->peer_unmap_event) { 1249 soc->cdp_soc.ol_ops->peer_unmap_event(soc->ctrl_psoc, 1250 peer_id); 1251 } 1252 1253 /* 1254 * Remove a reference to the peer. 1255 * If there are no more references, delete the peer object. 1256 */ 1257 dp_peer_unref_delete(peer); 1258 } 1259 1260 void 1261 dp_peer_find_detach(struct dp_soc *soc) 1262 { 1263 dp_peer_find_map_detach(soc); 1264 dp_peer_find_hash_detach(soc); 1265 dp_peer_ast_hash_detach(soc); 1266 } 1267 1268 static void dp_rx_tid_update_cb(struct dp_soc *soc, void *cb_ctxt, 1269 union hal_reo_status *reo_status) 1270 { 1271 struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt; 1272 1273 if ((reo_status->rx_queue_status.header.status != 1274 HAL_REO_CMD_SUCCESS) && 1275 (reo_status->rx_queue_status.header.status != 1276 HAL_REO_CMD_DRAIN)) { 1277 /* Should not happen normally. Just print error for now */ 1278 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1279 "%s: Rx tid HW desc update failed(%d): tid %d", 1280 __func__, 1281 reo_status->rx_queue_status.header.status, 1282 rx_tid->tid); 1283 } 1284 } 1285 1286 /* 1287 * dp_find_peer_by_addr - find peer instance by mac address 1288 * @dev: physical device instance 1289 * @peer_mac_addr: peer mac address 1290 * @local_id: local id for the peer 1291 * 1292 * Return: peer instance pointer 1293 */ 1294 void *dp_find_peer_by_addr(struct cdp_pdev *dev, uint8_t *peer_mac_addr, 1295 uint8_t *local_id) 1296 { 1297 struct dp_pdev *pdev = (struct dp_pdev *)dev; 1298 struct dp_peer *peer; 1299 1300 peer = dp_peer_find_hash_find(pdev->soc, peer_mac_addr, 0, DP_VDEV_ALL); 1301 1302 if (!peer) 1303 return NULL; 1304 1305 /* Multiple peer ids? How can know peer id? */ 1306 *local_id = peer->local_id; 1307 DP_TRACE(INFO, "%s: peer %pK id %d", __func__, peer, *local_id); 1308 1309 /* ref_cnt is incremented inside dp_peer_find_hash_find(). 1310 * Decrement it here. 1311 */ 1312 qdf_atomic_dec(&peer->ref_cnt); 1313 1314 return peer; 1315 } 1316 1317 /* 1318 * dp_rx_tid_update_wifi3() – Update receive TID state 1319 * @peer: Datapath peer handle 1320 * @tid: TID 1321 * @ba_window_size: BlockAck window size 1322 * @start_seq: Starting sequence number 1323 * 1324 * Return: 0 on success, error code on failure 1325 */ 1326 static int dp_rx_tid_update_wifi3(struct dp_peer *peer, int tid, uint32_t 1327 ba_window_size, uint32_t start_seq) 1328 { 1329 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid]; 1330 struct dp_soc *soc = peer->vdev->pdev->soc; 1331 struct hal_reo_cmd_params params; 1332 1333 qdf_mem_zero(¶ms, sizeof(params)); 1334 1335 params.std.need_status = 1; 1336 params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff; 1337 params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32; 1338 params.u.upd_queue_params.update_ba_window_size = 1; 1339 params.u.upd_queue_params.ba_window_size = ba_window_size; 1340 1341 if (start_seq < IEEE80211_SEQ_MAX) { 1342 params.u.upd_queue_params.update_ssn = 1; 1343 params.u.upd_queue_params.ssn = start_seq; 1344 } 1345 1346 dp_set_ssn_valid_flag(¶ms, 0); 1347 1348 dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, ¶ms, dp_rx_tid_update_cb, rx_tid); 1349 1350 rx_tid->ba_win_size = ba_window_size; 1351 if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) { 1352 soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup( 1353 peer->vdev->pdev->ctrl_pdev, 1354 peer->vdev->vdev_id, peer->mac_addr.raw, 1355 rx_tid->hw_qdesc_paddr, tid, tid, 1, ba_window_size); 1356 1357 } 1358 return 0; 1359 } 1360 1361 /* 1362 * dp_reo_desc_free() - Callback free reo descriptor memory after 1363 * HW cache flush 1364 * 1365 * @soc: DP SOC handle 1366 * @cb_ctxt: Callback context 1367 * @reo_status: REO command status 1368 */ 1369 static void dp_reo_desc_free(struct dp_soc *soc, void *cb_ctxt, 1370 union hal_reo_status *reo_status) 1371 { 1372 struct reo_desc_list_node *freedesc = 1373 (struct reo_desc_list_node *)cb_ctxt; 1374 struct dp_rx_tid *rx_tid = &freedesc->rx_tid; 1375 1376 if ((reo_status->fl_cache_status.header.status != 1377 HAL_REO_CMD_SUCCESS) && 1378 (reo_status->fl_cache_status.header.status != 1379 HAL_REO_CMD_DRAIN)) { 1380 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1381 "%s: Rx tid HW desc flush failed(%d): tid %d", 1382 __func__, 1383 reo_status->rx_queue_status.header.status, 1384 freedesc->rx_tid.tid); 1385 } 1386 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 1387 "%s: hw_qdesc_paddr: %pK, tid:%d", __func__, 1388 (void *)(rx_tid->hw_qdesc_paddr), rx_tid->tid); 1389 qdf_mem_unmap_nbytes_single(soc->osdev, 1390 rx_tid->hw_qdesc_paddr, 1391 QDF_DMA_BIDIRECTIONAL, 1392 rx_tid->hw_qdesc_alloc_size); 1393 qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned); 1394 qdf_mem_free(freedesc); 1395 } 1396 1397 #if defined(QCA_WIFI_QCA8074) && defined(BUILD_X86) 1398 /* Hawkeye emulation requires bus address to be >= 0x50000000 */ 1399 static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr) 1400 { 1401 if (dma_addr < 0x50000000) 1402 return QDF_STATUS_E_FAILURE; 1403 else 1404 return QDF_STATUS_SUCCESS; 1405 } 1406 #else 1407 static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr) 1408 { 1409 return QDF_STATUS_SUCCESS; 1410 } 1411 #endif 1412 1413 1414 /* 1415 * dp_rx_tid_setup_wifi3() – Setup receive TID state 1416 * @peer: Datapath peer handle 1417 * @tid: TID 1418 * @ba_window_size: BlockAck window size 1419 * @start_seq: Starting sequence number 1420 * 1421 * Return: 0 on success, error code on failure 1422 */ 1423 int dp_rx_tid_setup_wifi3(struct dp_peer *peer, int tid, 1424 uint32_t ba_window_size, uint32_t start_seq) 1425 { 1426 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid]; 1427 struct dp_vdev *vdev = peer->vdev; 1428 struct dp_soc *soc = vdev->pdev->soc; 1429 uint32_t hw_qdesc_size; 1430 uint32_t hw_qdesc_align; 1431 int hal_pn_type; 1432 void *hw_qdesc_vaddr; 1433 uint32_t alloc_tries = 0; 1434 1435 if (peer->delete_in_progress) 1436 return QDF_STATUS_E_FAILURE; 1437 1438 rx_tid->ba_win_size = ba_window_size; 1439 if (rx_tid->hw_qdesc_vaddr_unaligned != NULL) 1440 return dp_rx_tid_update_wifi3(peer, tid, ba_window_size, 1441 start_seq); 1442 rx_tid->delba_tx_status = 0; 1443 rx_tid->ppdu_id_2k = 0; 1444 rx_tid->num_of_addba_req = 0; 1445 rx_tid->num_of_delba_req = 0; 1446 rx_tid->num_of_addba_resp = 0; 1447 rx_tid->num_addba_rsp_failed = 0; 1448 rx_tid->num_addba_rsp_success = 0; 1449 rx_tid->delba_tx_success_cnt = 0; 1450 rx_tid->delba_tx_fail_cnt = 0; 1451 rx_tid->statuscode = 0; 1452 #ifdef notyet 1453 hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc, ba_window_size); 1454 #else 1455 /* TODO: Allocating HW queue descriptors based on max BA window size 1456 * for all QOS TIDs so that same descriptor can be used later when 1457 * ADDBA request is recevied. This should be changed to allocate HW 1458 * queue descriptors based on BA window size being negotiated (0 for 1459 * non BA cases), and reallocate when BA window size changes and also 1460 * send WMI message to FW to change the REO queue descriptor in Rx 1461 * peer entry as part of dp_rx_tid_update. 1462 */ 1463 if (tid != DP_NON_QOS_TID) 1464 hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc, 1465 HAL_RX_MAX_BA_WINDOW); 1466 else 1467 hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc, 1468 ba_window_size); 1469 #endif 1470 1471 hw_qdesc_align = hal_get_reo_qdesc_align(soc->hal_soc); 1472 /* To avoid unnecessary extra allocation for alignment, try allocating 1473 * exact size and see if we already have aligned address. 1474 */ 1475 rx_tid->hw_qdesc_alloc_size = hw_qdesc_size; 1476 1477 try_desc_alloc: 1478 rx_tid->hw_qdesc_vaddr_unaligned = 1479 qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size); 1480 1481 if (!rx_tid->hw_qdesc_vaddr_unaligned) { 1482 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1483 "%s: Rx tid HW desc alloc failed: tid %d", 1484 __func__, tid); 1485 return QDF_STATUS_E_NOMEM; 1486 } 1487 1488 if ((unsigned long)(rx_tid->hw_qdesc_vaddr_unaligned) % 1489 hw_qdesc_align) { 1490 /* Address allocated above is not alinged. Allocate extra 1491 * memory for alignment 1492 */ 1493 qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned); 1494 rx_tid->hw_qdesc_vaddr_unaligned = 1495 qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size + 1496 hw_qdesc_align - 1); 1497 1498 if (!rx_tid->hw_qdesc_vaddr_unaligned) { 1499 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1500 "%s: Rx tid HW desc alloc failed: tid %d", 1501 __func__, tid); 1502 return QDF_STATUS_E_NOMEM; 1503 } 1504 1505 hw_qdesc_vaddr = (void *)qdf_align((unsigned long) 1506 rx_tid->hw_qdesc_vaddr_unaligned, 1507 hw_qdesc_align); 1508 1509 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 1510 "%s: Total Size %d Aligned Addr %pK", 1511 __func__, rx_tid->hw_qdesc_alloc_size, 1512 hw_qdesc_vaddr); 1513 1514 } else { 1515 hw_qdesc_vaddr = rx_tid->hw_qdesc_vaddr_unaligned; 1516 } 1517 1518 /* TODO: Ensure that sec_type is set before ADDBA is received. 1519 * Currently this is set based on htt indication 1520 * HTT_T2H_MSG_TYPE_SEC_IND from target 1521 */ 1522 switch (peer->security[dp_sec_ucast].sec_type) { 1523 case cdp_sec_type_tkip_nomic: 1524 case cdp_sec_type_aes_ccmp: 1525 case cdp_sec_type_aes_ccmp_256: 1526 case cdp_sec_type_aes_gcmp: 1527 case cdp_sec_type_aes_gcmp_256: 1528 hal_pn_type = HAL_PN_WPA; 1529 break; 1530 case cdp_sec_type_wapi: 1531 if (vdev->opmode == wlan_op_mode_ap) 1532 hal_pn_type = HAL_PN_WAPI_EVEN; 1533 else 1534 hal_pn_type = HAL_PN_WAPI_UNEVEN; 1535 break; 1536 default: 1537 hal_pn_type = HAL_PN_NONE; 1538 break; 1539 } 1540 1541 hal_reo_qdesc_setup(soc->hal_soc, tid, ba_window_size, start_seq, 1542 hw_qdesc_vaddr, rx_tid->hw_qdesc_paddr, hal_pn_type); 1543 1544 qdf_mem_map_nbytes_single(soc->osdev, hw_qdesc_vaddr, 1545 QDF_DMA_BIDIRECTIONAL, rx_tid->hw_qdesc_alloc_size, 1546 &(rx_tid->hw_qdesc_paddr)); 1547 1548 if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) != 1549 QDF_STATUS_SUCCESS) { 1550 if (alloc_tries++ < 10) 1551 goto try_desc_alloc; 1552 else { 1553 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1554 "%s: Rx tid HW desc alloc failed (lowmem): tid %d", 1555 __func__, tid); 1556 return QDF_STATUS_E_NOMEM; 1557 } 1558 } 1559 1560 if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) { 1561 soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup( 1562 vdev->pdev->ctrl_pdev, 1563 peer->vdev->vdev_id, peer->mac_addr.raw, 1564 rx_tid->hw_qdesc_paddr, tid, tid, 1, ba_window_size); 1565 1566 } 1567 return 0; 1568 } 1569 1570 /* 1571 * dp_rx_tid_delete_cb() - Callback to flush reo descriptor HW cache 1572 * after deleting the entries (ie., setting valid=0) 1573 * 1574 * @soc: DP SOC handle 1575 * @cb_ctxt: Callback context 1576 * @reo_status: REO command status 1577 */ 1578 static void dp_rx_tid_delete_cb(struct dp_soc *soc, void *cb_ctxt, 1579 union hal_reo_status *reo_status) 1580 { 1581 struct reo_desc_list_node *freedesc = 1582 (struct reo_desc_list_node *)cb_ctxt; 1583 uint32_t list_size; 1584 struct reo_desc_list_node *desc; 1585 unsigned long curr_ts = qdf_get_system_timestamp(); 1586 uint32_t desc_size, tot_desc_size; 1587 struct hal_reo_cmd_params params; 1588 1589 if (reo_status->rx_queue_status.header.status == HAL_REO_CMD_DRAIN) { 1590 qdf_mem_zero(reo_status, sizeof(*reo_status)); 1591 reo_status->fl_cache_status.header.status = HAL_REO_CMD_DRAIN; 1592 dp_reo_desc_free(soc, (void *)freedesc, reo_status); 1593 return; 1594 } else if (reo_status->rx_queue_status.header.status != 1595 HAL_REO_CMD_SUCCESS) { 1596 /* Should not happen normally. Just print error for now */ 1597 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1598 "%s: Rx tid HW desc deletion failed(%d): tid %d", 1599 __func__, 1600 reo_status->rx_queue_status.header.status, 1601 freedesc->rx_tid.tid); 1602 } 1603 1604 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW, 1605 "%s: rx_tid: %d status: %d", __func__, 1606 freedesc->rx_tid.tid, 1607 reo_status->rx_queue_status.header.status); 1608 1609 qdf_spin_lock_bh(&soc->reo_desc_freelist_lock); 1610 freedesc->free_ts = curr_ts; 1611 qdf_list_insert_back_size(&soc->reo_desc_freelist, 1612 (qdf_list_node_t *)freedesc, &list_size); 1613 1614 while ((qdf_list_peek_front(&soc->reo_desc_freelist, 1615 (qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) && 1616 ((list_size >= REO_DESC_FREELIST_SIZE) || 1617 ((curr_ts - desc->free_ts) > REO_DESC_FREE_DEFER_MS))) { 1618 struct dp_rx_tid *rx_tid; 1619 1620 qdf_list_remove_front(&soc->reo_desc_freelist, 1621 (qdf_list_node_t **)&desc); 1622 list_size--; 1623 rx_tid = &desc->rx_tid; 1624 1625 /* Flush and invalidate REO descriptor from HW cache: Base and 1626 * extension descriptors should be flushed separately */ 1627 tot_desc_size = hal_get_reo_qdesc_size(soc->hal_soc, 1628 rx_tid->ba_win_size); 1629 desc_size = hal_get_reo_qdesc_size(soc->hal_soc, 0); 1630 1631 /* Flush reo extension descriptors */ 1632 while ((tot_desc_size -= desc_size) > 0) { 1633 qdf_mem_zero(¶ms, sizeof(params)); 1634 params.std.addr_lo = 1635 ((uint64_t)(rx_tid->hw_qdesc_paddr) + 1636 tot_desc_size) & 0xffffffff; 1637 params.std.addr_hi = 1638 (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32; 1639 1640 if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc, 1641 CMD_FLUSH_CACHE, 1642 ¶ms, 1643 NULL, 1644 NULL)) { 1645 QDF_TRACE(QDF_MODULE_ID_DP, 1646 QDF_TRACE_LEVEL_ERROR, 1647 "%s: fail to send CMD_CACHE_FLUSH:" 1648 "tid %d desc %pK", __func__, 1649 rx_tid->tid, 1650 (void *)(rx_tid->hw_qdesc_paddr)); 1651 } 1652 } 1653 1654 /* Flush base descriptor */ 1655 qdf_mem_zero(¶ms, sizeof(params)); 1656 params.std.need_status = 1; 1657 params.std.addr_lo = 1658 (uint64_t)(rx_tid->hw_qdesc_paddr) & 0xffffffff; 1659 params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32; 1660 1661 if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc, 1662 CMD_FLUSH_CACHE, 1663 ¶ms, 1664 dp_reo_desc_free, 1665 (void *)desc)) { 1666 union hal_reo_status reo_status; 1667 /* 1668 * If dp_reo_send_cmd return failure, related TID queue desc 1669 * should be unmapped. Also locally reo_desc, together with 1670 * TID queue desc also need to be freed accordingly. 1671 * 1672 * Here invoke desc_free function directly to do clean up. 1673 */ 1674 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1675 "%s: fail to send REO cmd to flush cache: tid %d", 1676 __func__, rx_tid->tid); 1677 qdf_mem_zero(&reo_status, sizeof(reo_status)); 1678 reo_status.fl_cache_status.header.status = 0; 1679 dp_reo_desc_free(soc, (void *)desc, &reo_status); 1680 } 1681 } 1682 qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock); 1683 } 1684 1685 /* 1686 * dp_rx_tid_delete_wifi3() – Delete receive TID queue 1687 * @peer: Datapath peer handle 1688 * @tid: TID 1689 * 1690 * Return: 0 on success, error code on failure 1691 */ 1692 static int dp_rx_tid_delete_wifi3(struct dp_peer *peer, int tid) 1693 { 1694 struct dp_rx_tid *rx_tid = &(peer->rx_tid[tid]); 1695 struct dp_soc *soc = peer->vdev->pdev->soc; 1696 struct hal_reo_cmd_params params; 1697 struct reo_desc_list_node *freedesc = 1698 qdf_mem_malloc(sizeof(*freedesc)); 1699 1700 if (!freedesc) { 1701 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1702 "%s: malloc failed for freedesc: tid %d", 1703 __func__, tid); 1704 return -ENOMEM; 1705 } 1706 1707 freedesc->rx_tid = *rx_tid; 1708 1709 qdf_mem_zero(¶ms, sizeof(params)); 1710 1711 params.std.need_status = 1; 1712 params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff; 1713 params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32; 1714 params.u.upd_queue_params.update_vld = 1; 1715 params.u.upd_queue_params.vld = 0; 1716 1717 dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, ¶ms, 1718 dp_rx_tid_delete_cb, (void *)freedesc); 1719 1720 rx_tid->hw_qdesc_vaddr_unaligned = NULL; 1721 rx_tid->hw_qdesc_alloc_size = 0; 1722 rx_tid->hw_qdesc_paddr = 0; 1723 1724 return 0; 1725 } 1726 1727 #ifdef DP_LFR 1728 static void dp_peer_setup_remaining_tids(struct dp_peer *peer) 1729 { 1730 int tid; 1731 1732 for (tid = 1; tid < DP_MAX_TIDS-1; tid++) { 1733 dp_rx_tid_setup_wifi3(peer, tid, 1, 0); 1734 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 1735 "Setting up TID %d for peer %pK peer->local_id %d", 1736 tid, peer, peer->local_id); 1737 } 1738 } 1739 #else 1740 static void dp_peer_setup_remaining_tids(struct dp_peer *peer) {}; 1741 #endif 1742 /* 1743 * dp_peer_rx_init() – Initialize receive TID state 1744 * @pdev: Datapath pdev 1745 * @peer: Datapath peer 1746 * 1747 */ 1748 void dp_peer_rx_init(struct dp_pdev *pdev, struct dp_peer *peer) 1749 { 1750 int tid; 1751 struct dp_rx_tid *rx_tid; 1752 for (tid = 0; tid < DP_MAX_TIDS; tid++) { 1753 rx_tid = &peer->rx_tid[tid]; 1754 rx_tid->array = &rx_tid->base; 1755 rx_tid->base.head = rx_tid->base.tail = NULL; 1756 rx_tid->tid = tid; 1757 rx_tid->defrag_timeout_ms = 0; 1758 rx_tid->ba_win_size = 0; 1759 rx_tid->ba_status = DP_RX_BA_INACTIVE; 1760 1761 rx_tid->defrag_waitlist_elem.tqe_next = NULL; 1762 rx_tid->defrag_waitlist_elem.tqe_prev = NULL; 1763 1764 #ifdef notyet /* TODO: See if this is required for exception handling */ 1765 /* invalid sequence number */ 1766 peer->tids_last_seq[tid] = 0xffff; 1767 #endif 1768 } 1769 1770 peer->active_ba_session_cnt = 0; 1771 peer->hw_buffer_size = 0; 1772 peer->kill_256_sessions = 0; 1773 1774 /* Setup default (non-qos) rx tid queue */ 1775 dp_rx_tid_setup_wifi3(peer, DP_NON_QOS_TID, 1, 0); 1776 1777 /* Setup rx tid queue for TID 0. 1778 * Other queues will be setup on receiving first packet, which will cause 1779 * NULL REO queue error 1780 */ 1781 dp_rx_tid_setup_wifi3(peer, 0, 1, 0); 1782 1783 /* 1784 * Setup the rest of TID's to handle LFR 1785 */ 1786 dp_peer_setup_remaining_tids(peer); 1787 1788 /* 1789 * Set security defaults: no PN check, no security. The target may 1790 * send a HTT SEC_IND message to overwrite these defaults. 1791 */ 1792 peer->security[dp_sec_ucast].sec_type = 1793 peer->security[dp_sec_mcast].sec_type = cdp_sec_type_none; 1794 } 1795 1796 /* 1797 * dp_peer_rx_cleanup() – Cleanup receive TID state 1798 * @vdev: Datapath vdev 1799 * @peer: Datapath peer 1800 * 1801 */ 1802 void dp_peer_rx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer) 1803 { 1804 int tid; 1805 uint32_t tid_delete_mask = 0; 1806 1807 DP_TRACE(INFO_HIGH, FL("Remove tids for peer: %pK"), peer); 1808 for (tid = 0; tid < DP_MAX_TIDS; tid++) { 1809 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid]; 1810 1811 qdf_spin_lock_bh(&rx_tid->tid_lock); 1812 if (peer->rx_tid[tid].hw_qdesc_vaddr_unaligned != NULL) { 1813 dp_rx_tid_delete_wifi3(peer, tid); 1814 1815 /* Cleanup defrag related resource */ 1816 dp_rx_defrag_waitlist_remove(peer, tid); 1817 dp_rx_reorder_flush_frag(peer, tid); 1818 1819 tid_delete_mask |= (1 << tid); 1820 } 1821 qdf_spin_unlock_bh(&rx_tid->tid_lock); 1822 } 1823 #ifdef notyet /* See if FW can remove queues as part of peer cleanup */ 1824 if (soc->ol_ops->peer_rx_reorder_queue_remove) { 1825 soc->ol_ops->peer_rx_reorder_queue_remove(vdev->pdev->ctrl_pdev, 1826 peer->vdev->vdev_id, peer->mac_addr.raw, 1827 tid_delete_mask); 1828 } 1829 #endif 1830 for (tid = 0; tid < DP_MAX_TIDS; tid++) 1831 qdf_spinlock_destroy(&peer->rx_tid[tid].tid_lock); 1832 } 1833 1834 /* 1835 * dp_peer_cleanup() – Cleanup peer information 1836 * @vdev: Datapath vdev 1837 * @peer: Datapath peer 1838 * 1839 */ 1840 void dp_peer_cleanup(struct dp_vdev *vdev, struct dp_peer *peer) 1841 { 1842 peer->last_assoc_rcvd = 0; 1843 peer->last_disassoc_rcvd = 0; 1844 peer->last_deauth_rcvd = 0; 1845 1846 /* cleanup the Rx reorder queues for this peer */ 1847 dp_peer_rx_cleanup(vdev, peer); 1848 } 1849 1850 /* dp_teardown_256_ba_session() - Teardown sessions using 256 1851 * window size when a request with 1852 * 64 window size is received. 1853 * This is done as a WAR since HW can 1854 * have only one setting per peer (64 or 256). 1855 * @peer: Datapath peer 1856 * 1857 * Return: void 1858 */ 1859 static void dp_teardown_256_ba_sessions(struct dp_peer *peer) 1860 { 1861 uint8_t delba_rcode = 0; 1862 int tid; 1863 struct dp_rx_tid *rx_tid = NULL; 1864 1865 for (tid = 0; tid < DP_MAX_TIDS; tid++) { 1866 rx_tid = &peer->rx_tid[tid]; 1867 qdf_spin_lock_bh(&rx_tid->tid_lock); 1868 1869 if (rx_tid->ba_win_size <= 64) { 1870 qdf_spin_unlock_bh(&rx_tid->tid_lock); 1871 continue; 1872 } else { 1873 if (rx_tid->ba_status == DP_RX_BA_ACTIVE || 1874 rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) { 1875 /* send delba */ 1876 if (!rx_tid->delba_tx_status) { 1877 rx_tid->delba_tx_retry++; 1878 rx_tid->delba_tx_status = 1; 1879 rx_tid->delba_rcode = 1880 IEEE80211_REASON_QOS_SETUP_REQUIRED; 1881 delba_rcode = rx_tid->delba_rcode; 1882 1883 qdf_spin_unlock_bh(&rx_tid->tid_lock); 1884 peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba( 1885 peer->vdev->pdev->ctrl_pdev, 1886 peer->ctrl_peer, 1887 peer->mac_addr.raw, 1888 tid, peer->vdev->ctrl_vdev, 1889 delba_rcode); 1890 } else { 1891 qdf_spin_unlock_bh(&rx_tid->tid_lock); 1892 } 1893 } else { 1894 qdf_spin_unlock_bh(&rx_tid->tid_lock); 1895 } 1896 } 1897 } 1898 } 1899 1900 /* 1901 * dp_rx_addba_resp_tx_completion_wifi3() – Update Rx Tid State 1902 * 1903 * @peer: Datapath peer handle 1904 * @tid: TID number 1905 * @status: tx completion status 1906 * Return: 0 on success, error code on failure 1907 */ 1908 int dp_addba_resp_tx_completion_wifi3(void *peer_handle, 1909 uint8_t tid, int status) 1910 { 1911 struct dp_peer *peer = (struct dp_peer *)peer_handle; 1912 struct dp_rx_tid *rx_tid = NULL; 1913 1914 if (!peer || peer->delete_in_progress) { 1915 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 1916 "%s: Peer is NULL!\n", __func__); 1917 return QDF_STATUS_E_FAILURE; 1918 } 1919 rx_tid = &peer->rx_tid[tid]; 1920 qdf_spin_lock_bh(&rx_tid->tid_lock); 1921 if (status) { 1922 rx_tid->num_addba_rsp_failed++; 1923 dp_rx_tid_update_wifi3(peer, tid, 1, 0); 1924 rx_tid->ba_status = DP_RX_BA_INACTIVE; 1925 qdf_spin_unlock_bh(&rx_tid->tid_lock); 1926 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1927 "%s: Rx Tid- %d addba rsp tx completion failed!", 1928 __func__, tid); 1929 return QDF_STATUS_SUCCESS; 1930 } 1931 1932 rx_tid->num_addba_rsp_success++; 1933 if (rx_tid->ba_status == DP_RX_BA_INACTIVE) { 1934 qdf_spin_unlock_bh(&rx_tid->tid_lock); 1935 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1936 "%s: Rx Tid- %d hw qdesc is not in IN_PROGRESS", 1937 __func__, tid); 1938 return QDF_STATUS_E_FAILURE; 1939 } 1940 1941 /* First Session */ 1942 if (peer->active_ba_session_cnt == 0) { 1943 if (rx_tid->ba_win_size > 64 && rx_tid->ba_win_size <= 256) 1944 peer->hw_buffer_size = 256; 1945 else 1946 peer->hw_buffer_size = 64; 1947 } 1948 1949 rx_tid->ba_status = DP_RX_BA_ACTIVE; 1950 1951 peer->active_ba_session_cnt++; 1952 1953 qdf_spin_unlock_bh(&rx_tid->tid_lock); 1954 1955 /* Kill any session having 256 buffer size 1956 * when 64 buffer size request is received. 1957 * Also, latch on to 64 as new buffer size. 1958 */ 1959 if (peer->kill_256_sessions) { 1960 dp_teardown_256_ba_sessions(peer); 1961 peer->kill_256_sessions = 0; 1962 } 1963 return QDF_STATUS_SUCCESS; 1964 } 1965 1966 /* 1967 * dp_rx_addba_responsesetup_wifi3() – Process ADDBA request from peer 1968 * 1969 * @peer: Datapath peer handle 1970 * @tid: TID number 1971 * @dialogtoken: output dialogtoken 1972 * @statuscode: output dialogtoken 1973 * @buffersize: Output BA window size 1974 * @batimeout: Output BA timeout 1975 */ 1976 void dp_addba_responsesetup_wifi3(void *peer_handle, uint8_t tid, 1977 uint8_t *dialogtoken, uint16_t *statuscode, 1978 uint16_t *buffersize, uint16_t *batimeout) 1979 { 1980 struct dp_peer *peer = (struct dp_peer *)peer_handle; 1981 struct dp_rx_tid *rx_tid = NULL; 1982 1983 if (!peer || peer->delete_in_progress) { 1984 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 1985 "%s: Peer is NULL!\n", __func__); 1986 return; 1987 } 1988 rx_tid = &peer->rx_tid[tid]; 1989 qdf_spin_lock_bh(&rx_tid->tid_lock); 1990 rx_tid->num_of_addba_resp++; 1991 /* setup ADDBA response parameters */ 1992 *dialogtoken = rx_tid->dialogtoken; 1993 *statuscode = rx_tid->statuscode; 1994 *buffersize = rx_tid->ba_win_size; 1995 *batimeout = 0; 1996 qdf_spin_unlock_bh(&rx_tid->tid_lock); 1997 } 1998 1999 /* dp_check_ba_buffersize() - Check buffer size in request 2000 * and latch onto this size based on 2001 * size used in first active session. 2002 * @peer: Datapath peer 2003 * @tid: Tid 2004 * @buffersize: Block ack window size 2005 * 2006 * Return: void 2007 */ 2008 static void dp_check_ba_buffersize(struct dp_peer *peer, 2009 uint16_t tid, 2010 uint16_t buffersize) 2011 { 2012 struct dp_rx_tid *rx_tid = NULL; 2013 2014 rx_tid = &peer->rx_tid[tid]; 2015 2016 if (peer->active_ba_session_cnt == 0) { 2017 rx_tid->ba_win_size = buffersize; 2018 } else { 2019 if (peer->hw_buffer_size == 64) { 2020 if (buffersize <= 64) 2021 rx_tid->ba_win_size = buffersize; 2022 else 2023 rx_tid->ba_win_size = peer->hw_buffer_size; 2024 } else if (peer->hw_buffer_size == 256) { 2025 if (buffersize > 64) { 2026 rx_tid->ba_win_size = buffersize; 2027 } else { 2028 rx_tid->ba_win_size = buffersize; 2029 peer->hw_buffer_size = 64; 2030 peer->kill_256_sessions = 1; 2031 } 2032 } 2033 } 2034 } 2035 2036 /* 2037 * dp_addba_requestprocess_wifi3() - Process ADDBA request from peer 2038 * 2039 * @peer: Datapath peer handle 2040 * @dialogtoken: dialogtoken from ADDBA frame 2041 * @tid: TID number 2042 * @batimeout: BA timeout 2043 * @buffersize: BA window size 2044 * @startseqnum: Start seq. number received in BA sequence control 2045 * 2046 * Return: 0 on success, error code on failure 2047 */ 2048 int dp_addba_requestprocess_wifi3(void *peer_handle, 2049 uint8_t dialogtoken, 2050 uint16_t tid, uint16_t batimeout, 2051 uint16_t buffersize, 2052 uint16_t startseqnum) 2053 { 2054 struct dp_peer *peer = (struct dp_peer *)peer_handle; 2055 struct dp_rx_tid *rx_tid = NULL; 2056 2057 if (!peer || peer->delete_in_progress) { 2058 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 2059 "%s: Peer is NULL!\n", __func__); 2060 return QDF_STATUS_E_FAILURE; 2061 } 2062 rx_tid = &peer->rx_tid[tid]; 2063 qdf_spin_lock_bh(&rx_tid->tid_lock); 2064 rx_tid->num_of_addba_req++; 2065 if ((rx_tid->ba_status == DP_RX_BA_ACTIVE && 2066 rx_tid->hw_qdesc_vaddr_unaligned != NULL) || 2067 (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS)) { 2068 dp_rx_tid_update_wifi3(peer, tid, 1, 0); 2069 rx_tid->ba_status = DP_RX_BA_INACTIVE; 2070 peer->active_ba_session_cnt--; 2071 qdf_spin_unlock_bh(&rx_tid->tid_lock); 2072 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 2073 "%s: Rx Tid- %d hw qdesc is already setup", 2074 __func__, tid); 2075 return QDF_STATUS_E_FAILURE; 2076 } 2077 2078 if (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) { 2079 qdf_spin_unlock_bh(&rx_tid->tid_lock); 2080 return QDF_STATUS_E_FAILURE; 2081 } 2082 2083 dp_check_ba_buffersize(peer, tid, buffersize); 2084 2085 if (dp_rx_tid_setup_wifi3(peer, tid, buffersize, startseqnum)) { 2086 rx_tid->ba_status = DP_RX_BA_INACTIVE; 2087 qdf_spin_unlock_bh(&rx_tid->tid_lock); 2088 return QDF_STATUS_E_FAILURE; 2089 } 2090 rx_tid->ba_status = DP_RX_BA_IN_PROGRESS; 2091 2092 rx_tid->ba_win_size = buffersize; 2093 rx_tid->dialogtoken = dialogtoken; 2094 rx_tid->startseqnum = startseqnum; 2095 2096 if (rx_tid->userstatuscode != IEEE80211_STATUS_SUCCESS) 2097 rx_tid->statuscode = rx_tid->userstatuscode; 2098 else 2099 rx_tid->statuscode = IEEE80211_STATUS_SUCCESS; 2100 2101 qdf_spin_unlock_bh(&rx_tid->tid_lock); 2102 2103 return QDF_STATUS_SUCCESS; 2104 } 2105 2106 /* 2107 * dp_set_addba_response() – Set a user defined ADDBA response status code 2108 * 2109 * @peer: Datapath peer handle 2110 * @tid: TID number 2111 * @statuscode: response status code to be set 2112 */ 2113 void dp_set_addba_response(void *peer_handle, uint8_t tid, 2114 uint16_t statuscode) 2115 { 2116 struct dp_peer *peer = (struct dp_peer *)peer_handle; 2117 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid]; 2118 2119 qdf_spin_lock_bh(&rx_tid->tid_lock); 2120 rx_tid->userstatuscode = statuscode; 2121 qdf_spin_unlock_bh(&rx_tid->tid_lock); 2122 } 2123 2124 /* 2125 * dp_rx_delba_process_wifi3() – Process DELBA from peer 2126 * @peer: Datapath peer handle 2127 * @tid: TID number 2128 * @reasoncode: Reason code received in DELBA frame 2129 * 2130 * Return: 0 on success, error code on failure 2131 */ 2132 int dp_delba_process_wifi3(void *peer_handle, 2133 int tid, uint16_t reasoncode) 2134 { 2135 struct dp_peer *peer = (struct dp_peer *)peer_handle; 2136 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid]; 2137 2138 qdf_spin_lock_bh(&rx_tid->tid_lock); 2139 if (rx_tid->ba_status == DP_RX_BA_INACTIVE || 2140 rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) { 2141 qdf_spin_unlock_bh(&rx_tid->tid_lock); 2142 return QDF_STATUS_E_FAILURE; 2143 } 2144 /* TODO: See if we can delete the existing REO queue descriptor and 2145 * replace with a new one without queue extenstion descript to save 2146 * memory 2147 */ 2148 rx_tid->delba_rcode = reasoncode; 2149 rx_tid->num_of_delba_req++; 2150 dp_rx_tid_update_wifi3(peer, tid, 1, 0); 2151 2152 rx_tid->ba_status = DP_RX_BA_INACTIVE; 2153 peer->active_ba_session_cnt--; 2154 qdf_spin_unlock_bh(&rx_tid->tid_lock); 2155 return 0; 2156 } 2157 2158 /* 2159 * dp_rx_delba_tx_completion_wifi3() – Send Delba Request 2160 * 2161 * @peer: Datapath peer handle 2162 * @tid: TID number 2163 * @status: tx completion status 2164 * Return: 0 on success, error code on failure 2165 */ 2166 2167 int dp_delba_tx_completion_wifi3(void *peer_handle, 2168 uint8_t tid, int status) 2169 { 2170 struct dp_peer *peer = (struct dp_peer *)peer_handle; 2171 struct dp_rx_tid *rx_tid = NULL; 2172 2173 if (!peer || peer->delete_in_progress) { 2174 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 2175 "%s: Peer is NULL!", __func__); 2176 return QDF_STATUS_E_FAILURE; 2177 } 2178 rx_tid = &peer->rx_tid[tid]; 2179 qdf_spin_lock_bh(&rx_tid->tid_lock); 2180 if (status) { 2181 rx_tid->delba_tx_fail_cnt++; 2182 if (rx_tid->delba_tx_retry >= DP_MAX_DELBA_RETRY) { 2183 rx_tid->delba_tx_retry = 0; 2184 rx_tid->delba_tx_status = 0; 2185 qdf_spin_unlock_bh(&rx_tid->tid_lock); 2186 } else { 2187 rx_tid->delba_tx_retry++; 2188 rx_tid->delba_tx_status = 1; 2189 qdf_spin_unlock_bh(&rx_tid->tid_lock); 2190 peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba( 2191 peer->vdev->pdev->ctrl_pdev, peer->ctrl_peer, 2192 peer->mac_addr.raw, tid, peer->vdev->ctrl_vdev, 2193 rx_tid->delba_rcode); 2194 } 2195 return QDF_STATUS_SUCCESS; 2196 } else { 2197 rx_tid->delba_tx_success_cnt++; 2198 rx_tid->delba_tx_retry = 0; 2199 rx_tid->delba_tx_status = 0; 2200 } 2201 if (rx_tid->ba_status == DP_RX_BA_ACTIVE) { 2202 dp_rx_tid_update_wifi3(peer, tid, 1, 0); 2203 rx_tid->ba_status = DP_RX_BA_INACTIVE; 2204 peer->active_ba_session_cnt--; 2205 } 2206 if (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) { 2207 dp_rx_tid_update_wifi3(peer, tid, 1, 0); 2208 rx_tid->ba_status = DP_RX_BA_INACTIVE; 2209 } 2210 qdf_spin_unlock_bh(&rx_tid->tid_lock); 2211 2212 return QDF_STATUS_SUCCESS; 2213 } 2214 2215 void dp_rx_discard(struct dp_vdev *vdev, struct dp_peer *peer, unsigned tid, 2216 qdf_nbuf_t msdu_list) 2217 { 2218 while (msdu_list) { 2219 qdf_nbuf_t msdu = msdu_list; 2220 2221 msdu_list = qdf_nbuf_next(msdu_list); 2222 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH, 2223 "discard rx %pK from partly-deleted peer %pK " 2224 "(%02x:%02x:%02x:%02x:%02x:%02x)", 2225 msdu, peer, 2226 peer->mac_addr.raw[0], peer->mac_addr.raw[1], 2227 peer->mac_addr.raw[2], peer->mac_addr.raw[3], 2228 peer->mac_addr.raw[4], peer->mac_addr.raw[5]); 2229 qdf_nbuf_free(msdu); 2230 } 2231 } 2232 2233 2234 /** 2235 * dp_set_pn_check_wifi3() - enable PN check in REO for security 2236 * @peer: Datapath peer handle 2237 * @vdev: Datapath vdev 2238 * @pdev - data path device instance 2239 * @sec_type - security type 2240 * @rx_pn - Receive pn starting number 2241 * 2242 */ 2243 2244 void 2245 dp_set_pn_check_wifi3(struct cdp_vdev *vdev_handle, struct cdp_peer *peer_handle, enum cdp_sec_type sec_type, uint32_t *rx_pn) 2246 { 2247 struct dp_peer *peer = (struct dp_peer *)peer_handle; 2248 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle; 2249 struct dp_pdev *pdev; 2250 struct dp_soc *soc; 2251 int i; 2252 uint8_t pn_size; 2253 struct hal_reo_cmd_params params; 2254 2255 /* preconditions */ 2256 qdf_assert(vdev); 2257 2258 pdev = vdev->pdev; 2259 soc = pdev->soc; 2260 2261 2262 qdf_mem_zero(¶ms, sizeof(params)); 2263 2264 params.std.need_status = 1; 2265 params.u.upd_queue_params.update_pn_valid = 1; 2266 params.u.upd_queue_params.update_pn_size = 1; 2267 params.u.upd_queue_params.update_pn = 1; 2268 params.u.upd_queue_params.update_pn_check_needed = 1; 2269 params.u.upd_queue_params.update_svld = 1; 2270 params.u.upd_queue_params.svld = 0; 2271 2272 peer->security[dp_sec_ucast].sec_type = sec_type; 2273 2274 switch (sec_type) { 2275 case cdp_sec_type_tkip_nomic: 2276 case cdp_sec_type_aes_ccmp: 2277 case cdp_sec_type_aes_ccmp_256: 2278 case cdp_sec_type_aes_gcmp: 2279 case cdp_sec_type_aes_gcmp_256: 2280 params.u.upd_queue_params.pn_check_needed = 1; 2281 params.u.upd_queue_params.pn_size = 48; 2282 pn_size = 48; 2283 break; 2284 case cdp_sec_type_wapi: 2285 params.u.upd_queue_params.pn_check_needed = 1; 2286 params.u.upd_queue_params.pn_size = 128; 2287 pn_size = 128; 2288 if (vdev->opmode == wlan_op_mode_ap) { 2289 params.u.upd_queue_params.pn_even = 1; 2290 params.u.upd_queue_params.update_pn_even = 1; 2291 } else { 2292 params.u.upd_queue_params.pn_uneven = 1; 2293 params.u.upd_queue_params.update_pn_uneven = 1; 2294 } 2295 break; 2296 default: 2297 params.u.upd_queue_params.pn_check_needed = 0; 2298 pn_size = 0; 2299 break; 2300 } 2301 2302 2303 for (i = 0; i < DP_MAX_TIDS; i++) { 2304 struct dp_rx_tid *rx_tid = &peer->rx_tid[i]; 2305 qdf_spin_lock_bh(&rx_tid->tid_lock); 2306 if (rx_tid->hw_qdesc_vaddr_unaligned != NULL) { 2307 params.std.addr_lo = 2308 rx_tid->hw_qdesc_paddr & 0xffffffff; 2309 params.std.addr_hi = 2310 (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32; 2311 2312 if (sec_type != cdp_sec_type_wapi) { 2313 params.u.upd_queue_params.update_pn_valid = 0; 2314 } else { 2315 /* 2316 * Setting PN valid bit for WAPI sec_type, 2317 * since WAPI PN has to be started with 2318 * predefined value 2319 */ 2320 params.u.upd_queue_params.update_pn_valid = 1; 2321 params.u.upd_queue_params.pn_31_0 = rx_pn[0]; 2322 params.u.upd_queue_params.pn_63_32 = rx_pn[1]; 2323 params.u.upd_queue_params.pn_95_64 = rx_pn[2]; 2324 params.u.upd_queue_params.pn_127_96 = rx_pn[3]; 2325 } 2326 rx_tid->pn_size = pn_size; 2327 dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, ¶ms, 2328 dp_rx_tid_update_cb, rx_tid); 2329 } else { 2330 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH, 2331 "PN Check not setup for TID :%d ", i); 2332 } 2333 qdf_spin_unlock_bh(&rx_tid->tid_lock); 2334 } 2335 } 2336 2337 2338 void 2339 dp_rx_sec_ind_handler(void *soc_handle, uint16_t peer_id, 2340 enum htt_sec_type sec_type, int is_unicast, u_int32_t *michael_key, 2341 u_int32_t *rx_pn) 2342 { 2343 struct dp_soc *soc = (struct dp_soc *)soc_handle; 2344 struct dp_peer *peer; 2345 int sec_index; 2346 2347 peer = dp_peer_find_by_id(soc, peer_id); 2348 if (!peer) { 2349 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 2350 "Couldn't find peer from ID %d - skipping security inits", 2351 peer_id); 2352 return; 2353 } 2354 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH, 2355 "sec spec for peer %pK (%02x:%02x:%02x:%02x:%02x:%02x): " 2356 "%s key of type %d", 2357 peer, 2358 peer->mac_addr.raw[0], peer->mac_addr.raw[1], 2359 peer->mac_addr.raw[2], peer->mac_addr.raw[3], 2360 peer->mac_addr.raw[4], peer->mac_addr.raw[5], 2361 is_unicast ? "ucast" : "mcast", 2362 sec_type); 2363 sec_index = is_unicast ? dp_sec_ucast : dp_sec_mcast; 2364 peer->security[sec_index].sec_type = sec_type; 2365 #ifdef notyet /* TODO: See if this is required for defrag support */ 2366 /* michael key only valid for TKIP, but for simplicity, 2367 * copy it anyway 2368 */ 2369 qdf_mem_copy( 2370 &peer->security[sec_index].michael_key[0], 2371 michael_key, 2372 sizeof(peer->security[sec_index].michael_key)); 2373 #ifdef BIG_ENDIAN_HOST 2374 OL_IF_SWAPBO(peer->security[sec_index].michael_key[0], 2375 sizeof(peer->security[sec_index].michael_key)); 2376 #endif /* BIG_ENDIAN_HOST */ 2377 #endif 2378 2379 #ifdef notyet /* TODO: Check if this is required for wifi3.0 */ 2380 if (sec_type != htt_sec_type_wapi) { 2381 qdf_mem_set(peer->tids_last_pn_valid, _EXT_TIDS, 0x00); 2382 } else { 2383 for (i = 0; i < DP_MAX_TIDS; i++) { 2384 /* 2385 * Setting PN valid bit for WAPI sec_type, 2386 * since WAPI PN has to be started with predefined value 2387 */ 2388 peer->tids_last_pn_valid[i] = 1; 2389 qdf_mem_copy( 2390 (u_int8_t *) &peer->tids_last_pn[i], 2391 (u_int8_t *) rx_pn, sizeof(union htt_rx_pn_t)); 2392 peer->tids_last_pn[i].pn128[1] = 2393 qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[1]); 2394 peer->tids_last_pn[i].pn128[0] = 2395 qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[0]); 2396 } 2397 } 2398 #endif 2399 /* TODO: Update HW TID queue with PN check parameters (pn type for 2400 * all security types and last pn for WAPI) once REO command API 2401 * is available 2402 */ 2403 2404 dp_peer_unref_del_find_by_id(peer); 2405 } 2406 2407 #ifndef CONFIG_WIN 2408 /** 2409 * dp_register_peer() - Register peer into physical device 2410 * @pdev - data path device instance 2411 * @sta_desc - peer description 2412 * 2413 * Register peer into physical device 2414 * 2415 * Return: QDF_STATUS_SUCCESS registration success 2416 * QDF_STATUS_E_FAULT peer not found 2417 */ 2418 QDF_STATUS dp_register_peer(struct cdp_pdev *pdev_handle, 2419 struct ol_txrx_desc_type *sta_desc) 2420 { 2421 struct dp_peer *peer; 2422 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; 2423 2424 peer = dp_peer_find_by_local_id((struct cdp_pdev *)pdev, 2425 sta_desc->sta_id); 2426 if (!peer) 2427 return QDF_STATUS_E_FAULT; 2428 2429 qdf_spin_lock_bh(&peer->peer_info_lock); 2430 peer->state = OL_TXRX_PEER_STATE_CONN; 2431 qdf_spin_unlock_bh(&peer->peer_info_lock); 2432 2433 return QDF_STATUS_SUCCESS; 2434 } 2435 2436 /** 2437 * dp_clear_peer() - remove peer from physical device 2438 * @pdev - data path device instance 2439 * @sta_id - local peer id 2440 * 2441 * remove peer from physical device 2442 * 2443 * Return: QDF_STATUS_SUCCESS registration success 2444 * QDF_STATUS_E_FAULT peer not found 2445 */ 2446 QDF_STATUS dp_clear_peer(struct cdp_pdev *pdev_handle, uint8_t local_id) 2447 { 2448 struct dp_peer *peer; 2449 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; 2450 2451 peer = dp_peer_find_by_local_id((struct cdp_pdev *)pdev, local_id); 2452 if (!peer) 2453 return QDF_STATUS_E_FAULT; 2454 2455 qdf_spin_lock_bh(&peer->peer_info_lock); 2456 peer->state = OL_TXRX_PEER_STATE_DISC; 2457 qdf_spin_unlock_bh(&peer->peer_info_lock); 2458 2459 return QDF_STATUS_SUCCESS; 2460 } 2461 2462 /** 2463 * dp_find_peer_by_addr_and_vdev() - Find peer by peer mac address within vdev 2464 * @pdev - data path device instance 2465 * @vdev - virtual interface instance 2466 * @peer_addr - peer mac address 2467 * @peer_id - local peer id with target mac address 2468 * 2469 * Find peer by peer mac address within vdev 2470 * 2471 * Return: peer instance void pointer 2472 * NULL cannot find target peer 2473 */ 2474 void *dp_find_peer_by_addr_and_vdev(struct cdp_pdev *pdev_handle, 2475 struct cdp_vdev *vdev_handle, 2476 uint8_t *peer_addr, uint8_t *local_id) 2477 { 2478 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; 2479 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle; 2480 struct dp_peer *peer; 2481 2482 DP_TRACE(INFO, "vdev %pK peer_addr %pK", vdev, peer_addr); 2483 peer = dp_peer_find_hash_find(pdev->soc, peer_addr, 0, 0); 2484 DP_TRACE(INFO, "peer %pK vdev %pK", peer, vdev); 2485 2486 if (!peer) 2487 return NULL; 2488 2489 if (peer->vdev != vdev) { 2490 qdf_atomic_dec(&peer->ref_cnt); 2491 return NULL; 2492 } 2493 2494 *local_id = peer->local_id; 2495 DP_TRACE(INFO, "peer %pK vdev %pK local id %d", peer, vdev, *local_id); 2496 2497 /* ref_cnt is incremented inside dp_peer_find_hash_find(). 2498 * Decrement it here. 2499 */ 2500 qdf_atomic_dec(&peer->ref_cnt); 2501 2502 return peer; 2503 } 2504 2505 /** 2506 * dp_local_peer_id() - Find local peer id within peer instance 2507 * @peer - peer instance 2508 * 2509 * Find local peer id within peer instance 2510 * 2511 * Return: local peer id 2512 */ 2513 uint16_t dp_local_peer_id(void *peer) 2514 { 2515 return ((struct dp_peer *)peer)->local_id; 2516 } 2517 2518 /** 2519 * dp_peer_find_by_local_id() - Find peer by local peer id 2520 * @pdev - data path device instance 2521 * @local_peer_id - local peer id want to find 2522 * 2523 * Find peer by local peer id within physical device 2524 * 2525 * Return: peer instance void pointer 2526 * NULL cannot find target peer 2527 */ 2528 void *dp_peer_find_by_local_id(struct cdp_pdev *pdev_handle, uint8_t local_id) 2529 { 2530 struct dp_peer *peer; 2531 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; 2532 2533 if (local_id >= OL_TXRX_NUM_LOCAL_PEER_IDS) { 2534 QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_DP, 2535 "Incorrect local id %u", local_id); 2536 return NULL; 2537 } 2538 qdf_spin_lock_bh(&pdev->local_peer_ids.lock); 2539 peer = pdev->local_peer_ids.map[local_id]; 2540 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock); 2541 DP_TRACE(DEBUG, "peer %pK local id %d", peer, local_id); 2542 return peer; 2543 } 2544 2545 /** 2546 * dp_peer_state_update() - update peer local state 2547 * @pdev - data path device instance 2548 * @peer_addr - peer mac address 2549 * @state - new peer local state 2550 * 2551 * update peer local state 2552 * 2553 * Return: QDF_STATUS_SUCCESS registration success 2554 */ 2555 QDF_STATUS dp_peer_state_update(struct cdp_pdev *pdev_handle, uint8_t *peer_mac, 2556 enum ol_txrx_peer_state state) 2557 { 2558 struct dp_peer *peer; 2559 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; 2560 2561 peer = dp_peer_find_hash_find(pdev->soc, peer_mac, 0, DP_VDEV_ALL); 2562 if (NULL == peer) { 2563 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 2564 "Failed to find peer for: [%pM]", peer_mac); 2565 return QDF_STATUS_E_FAILURE; 2566 } 2567 peer->state = state; 2568 2569 DP_TRACE(INFO, "peer %pK state %d", peer, peer->state); 2570 /* ref_cnt is incremented inside dp_peer_find_hash_find(). 2571 * Decrement it here. 2572 */ 2573 qdf_atomic_dec(&peer->ref_cnt); 2574 2575 return QDF_STATUS_SUCCESS; 2576 } 2577 2578 /** 2579 * dp_get_vdevid() - Get virtual interface id which peer registered 2580 * @peer - peer instance 2581 * @vdev_id - virtual interface id which peer registered 2582 * 2583 * Get virtual interface id which peer registered 2584 * 2585 * Return: QDF_STATUS_SUCCESS registration success 2586 */ 2587 QDF_STATUS dp_get_vdevid(void *peer_handle, uint8_t *vdev_id) 2588 { 2589 struct dp_peer *peer = peer_handle; 2590 2591 DP_TRACE(INFO, "peer %pK vdev %pK vdev id %d", 2592 peer, peer->vdev, peer->vdev->vdev_id); 2593 *vdev_id = peer->vdev->vdev_id; 2594 return QDF_STATUS_SUCCESS; 2595 } 2596 2597 struct cdp_vdev *dp_get_vdev_by_sta_id(struct cdp_pdev *pdev_handle, 2598 uint8_t sta_id) 2599 { 2600 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; 2601 struct dp_peer *peer = NULL; 2602 2603 if (sta_id >= WLAN_MAX_STA_COUNT) { 2604 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH, 2605 "Invalid sta id passed"); 2606 return NULL; 2607 } 2608 2609 if (!pdev) { 2610 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH, 2611 "PDEV not found for sta_id [%d]", sta_id); 2612 return NULL; 2613 } 2614 2615 peer = dp_peer_find_by_local_id((struct cdp_pdev *)pdev, sta_id); 2616 if (!peer) { 2617 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH, 2618 "PEER [%d] not found", sta_id); 2619 return NULL; 2620 } 2621 2622 return (struct cdp_vdev *)peer->vdev; 2623 } 2624 2625 /** 2626 * dp_get_vdev_for_peer() - Get virtual interface instance which peer belongs 2627 * @peer - peer instance 2628 * 2629 * Get virtual interface instance which peer belongs 2630 * 2631 * Return: virtual interface instance pointer 2632 * NULL in case cannot find 2633 */ 2634 struct cdp_vdev *dp_get_vdev_for_peer(void *peer_handle) 2635 { 2636 struct dp_peer *peer = peer_handle; 2637 2638 DP_TRACE(DEBUG, "peer %pK vdev %pK", peer, peer->vdev); 2639 return (struct cdp_vdev *)peer->vdev; 2640 } 2641 2642 /** 2643 * dp_peer_get_peer_mac_addr() - Get peer mac address 2644 * @peer - peer instance 2645 * 2646 * Get peer mac address 2647 * 2648 * Return: peer mac address pointer 2649 * NULL in case cannot find 2650 */ 2651 uint8_t *dp_peer_get_peer_mac_addr(void *peer_handle) 2652 { 2653 struct dp_peer *peer = peer_handle; 2654 uint8_t *mac; 2655 2656 mac = peer->mac_addr.raw; 2657 DP_TRACE(INFO, "peer %pK mac 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x", 2658 peer, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); 2659 return peer->mac_addr.raw; 2660 } 2661 2662 /** 2663 * dp_get_peer_state() - Get local peer state 2664 * @peer - peer instance 2665 * 2666 * Get local peer state 2667 * 2668 * Return: peer status 2669 */ 2670 int dp_get_peer_state(void *peer_handle) 2671 { 2672 struct dp_peer *peer = peer_handle; 2673 2674 DP_TRACE(DEBUG, "peer %pK stats %d", peer, peer->state); 2675 return peer->state; 2676 } 2677 2678 /** 2679 * dp_local_peer_id_pool_init() - local peer id pool alloc for physical device 2680 * @pdev - data path device instance 2681 * 2682 * local peer id pool alloc for physical device 2683 * 2684 * Return: none 2685 */ 2686 void dp_local_peer_id_pool_init(struct dp_pdev *pdev) 2687 { 2688 int i; 2689 2690 /* point the freelist to the first ID */ 2691 pdev->local_peer_ids.freelist = 0; 2692 2693 /* link each ID to the next one */ 2694 for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) { 2695 pdev->local_peer_ids.pool[i] = i + 1; 2696 pdev->local_peer_ids.map[i] = NULL; 2697 } 2698 2699 /* link the last ID to itself, to mark the end of the list */ 2700 i = OL_TXRX_NUM_LOCAL_PEER_IDS; 2701 pdev->local_peer_ids.pool[i] = i; 2702 2703 qdf_spinlock_create(&pdev->local_peer_ids.lock); 2704 DP_TRACE(INFO, "Peer pool init"); 2705 } 2706 2707 /** 2708 * dp_local_peer_id_alloc() - allocate local peer id 2709 * @pdev - data path device instance 2710 * @peer - new peer instance 2711 * 2712 * allocate local peer id 2713 * 2714 * Return: none 2715 */ 2716 void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer) 2717 { 2718 int i; 2719 2720 qdf_spin_lock_bh(&pdev->local_peer_ids.lock); 2721 i = pdev->local_peer_ids.freelist; 2722 if (pdev->local_peer_ids.pool[i] == i) { 2723 /* the list is empty, except for the list-end marker */ 2724 peer->local_id = OL_TXRX_INVALID_LOCAL_PEER_ID; 2725 } else { 2726 /* take the head ID and advance the freelist */ 2727 peer->local_id = i; 2728 pdev->local_peer_ids.freelist = pdev->local_peer_ids.pool[i]; 2729 pdev->local_peer_ids.map[i] = peer; 2730 } 2731 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock); 2732 DP_TRACE(INFO, "peer %pK, local id %d", peer, peer->local_id); 2733 } 2734 2735 /** 2736 * dp_local_peer_id_free() - remove local peer id 2737 * @pdev - data path device instance 2738 * @peer - peer instance should be removed 2739 * 2740 * remove local peer id 2741 * 2742 * Return: none 2743 */ 2744 void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer) 2745 { 2746 int i = peer->local_id; 2747 if ((i == OL_TXRX_INVALID_LOCAL_PEER_ID) || 2748 (i >= OL_TXRX_NUM_LOCAL_PEER_IDS)) { 2749 return; 2750 } 2751 2752 /* put this ID on the head of the freelist */ 2753 qdf_spin_lock_bh(&pdev->local_peer_ids.lock); 2754 pdev->local_peer_ids.pool[i] = pdev->local_peer_ids.freelist; 2755 pdev->local_peer_ids.freelist = i; 2756 pdev->local_peer_ids.map[i] = NULL; 2757 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock); 2758 } 2759 #endif 2760 2761 /** 2762 * dp_get_peer_mac_addr_frm_id(): get mac address of the peer 2763 * @soc_handle: DP SOC handle 2764 * @peer_id:peer_id of the peer 2765 * 2766 * return: vdev_id of the vap 2767 */ 2768 uint8_t dp_get_peer_mac_addr_frm_id(struct cdp_soc_t *soc_handle, 2769 uint16_t peer_id, uint8_t *peer_mac) 2770 { 2771 struct dp_soc *soc = (struct dp_soc *)soc_handle; 2772 struct dp_peer *peer; 2773 uint8_t vdev_id; 2774 2775 peer = dp_peer_find_by_id(soc, peer_id); 2776 2777 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 2778 "soc %pK peer_id %d", soc, peer_id); 2779 2780 if (!peer) { 2781 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 2782 "peer not found "); 2783 return CDP_INVALID_VDEV_ID; 2784 } 2785 2786 qdf_mem_copy(peer_mac, peer->mac_addr.raw, 6); 2787 vdev_id = peer->vdev->vdev_id; 2788 2789 dp_peer_unref_del_find_by_id(peer); 2790 2791 return vdev_id; 2792 } 2793 2794 /** 2795 * dp_peer_rxtid_stats: Retried Rx TID (REO queue) stats from HW 2796 * @peer: DP peer handle 2797 * @dp_stats_cmd_cb: REO command callback function 2798 * @cb_ctxt: Callback context 2799 * 2800 * Return: none 2801 */ 2802 void dp_peer_rxtid_stats(struct dp_peer *peer, void (*dp_stats_cmd_cb), 2803 void *cb_ctxt) 2804 { 2805 struct dp_soc *soc = peer->vdev->pdev->soc; 2806 struct hal_reo_cmd_params params; 2807 int i; 2808 2809 if (!dp_stats_cmd_cb) 2810 return; 2811 2812 qdf_mem_zero(¶ms, sizeof(params)); 2813 for (i = 0; i < DP_MAX_TIDS; i++) { 2814 struct dp_rx_tid *rx_tid = &peer->rx_tid[i]; 2815 if (rx_tid->hw_qdesc_vaddr_unaligned != NULL) { 2816 params.std.need_status = 1; 2817 params.std.addr_lo = 2818 rx_tid->hw_qdesc_paddr & 0xffffffff; 2819 params.std.addr_hi = 2820 (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32; 2821 2822 if (cb_ctxt) { 2823 dp_reo_send_cmd(soc, CMD_GET_QUEUE_STATS, 2824 ¶ms, dp_stats_cmd_cb, cb_ctxt); 2825 } else { 2826 dp_reo_send_cmd(soc, CMD_GET_QUEUE_STATS, 2827 ¶ms, dp_stats_cmd_cb, rx_tid); 2828 } 2829 2830 /* Flush REO descriptor from HW cache to update stats 2831 * in descriptor memory. This is to help debugging */ 2832 qdf_mem_zero(¶ms, sizeof(params)); 2833 params.std.need_status = 0; 2834 params.std.addr_lo = 2835 rx_tid->hw_qdesc_paddr & 0xffffffff; 2836 params.std.addr_hi = 2837 (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32; 2838 params.u.fl_cache_params.flush_no_inval = 1; 2839 dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, ¶ms, NULL, 2840 NULL); 2841 } 2842 } 2843 } 2844 2845 void dp_set_michael_key(struct cdp_peer *peer_handle, 2846 bool is_unicast, uint32_t *key) 2847 { 2848 struct dp_peer *peer = (struct dp_peer *)peer_handle; 2849 uint8_t sec_index = is_unicast ? 1 : 0; 2850 2851 if (!peer) { 2852 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 2853 "peer not found "); 2854 return; 2855 } 2856 2857 qdf_mem_copy(&peer->security[sec_index].michael_key[0], 2858 key, IEEE80211_WEP_MICLEN); 2859 } 2860 2861 bool dp_peer_find_by_id_valid(struct dp_soc *soc, uint16_t peer_id) 2862 { 2863 struct dp_peer *peer = dp_peer_find_by_id(soc, peer_id); 2864 2865 if (peer) { 2866 /* 2867 * Decrement the peer ref which is taken as part of 2868 * dp_peer_find_by_id if PEER_LOCK_REF_PROTECT is enabled 2869 */ 2870 dp_peer_unref_del_find_by_id(peer); 2871 2872 return true; 2873 } 2874 2875 return false; 2876 } 2877