1 /* 2 * Copyright (c) 2021, The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 /** 19 * DOC: wlan_mgmt_txrx_rx_reo.c 20 * This file contains mgmt rx re-ordering related function definitions 21 */ 22 23 #include "wlan_mgmt_txrx_rx_reo_i.h" 24 #include <wlan_mgmt_txrx_rx_reo_tgt_api.h> 25 #include "wlan_mgmt_txrx_main_i.h" 26 #include <qdf_util.h> 27 #include <wlan_mlo_mgr_cmn.h> 28 29 static struct mgmt_rx_reo_context *g_rx_reo_ctx; 30 31 #define mgmt_rx_reo_get_context() (g_rx_reo_ctx) 32 #define mgmt_rx_reo_set_context(c) (g_rx_reo_ctx = c) 33 34 #define MGMT_RX_REO_PKT_CTR_HALF_RANGE (0x8000) 35 #define MGMT_RX_REO_PKT_CTR_FULL_RANGE (MGMT_RX_REO_PKT_CTR_HALF_RANGE << 1) 36 37 /** 38 * mgmt_rx_reo_compare_pkt_ctrs_gte() - Compare given mgmt packet counters 39 * @ctr1: Management packet counter1 40 * @ctr2: Management packet counter2 41 * 42 * We can't directly use the comparison operator here because the counters can 43 * overflow. But these counters have a property that the difference between 44 * them can never be greater than half the range of the data type. 45 * We can make use of this condition to detect which one is actually greater. 46 * 47 * Return: true if @ctr1 is greater than or equal to @ctr2, else false 48 */ 49 static inline bool 50 mgmt_rx_reo_compare_pkt_ctrs_gte(uint16_t ctr1, uint16_t ctr2) 51 { 52 uint16_t delta = ctr1 - ctr2; 53 54 return delta <= MGMT_RX_REO_PKT_CTR_HALF_RANGE; 55 } 56 57 /** 58 * mgmt_rx_reo_subtract_pkt_ctrs() - Subtract given mgmt packet counters 59 * @ctr1: Management packet counter1 60 * @ctr2: Management packet counter2 61 * 62 * We can't directly use the subtract operator here because the counters can 63 * overflow. But these counters have a property that the difference between 64 * them can never be greater than half the range of the data type. 65 * We can make use of this condition to detect whichone is actually greater and 66 * return the difference accordingly. 67 * 68 * Return: Difference between @ctr1 and @crt2 69 */ 70 static inline int 71 mgmt_rx_reo_subtract_pkt_ctrs(uint16_t ctr1, uint16_t ctr2) 72 { 73 uint16_t delta = ctr1 - ctr2; 74 75 /** 76 * if delta is greater than half the range (i.e, ctr1 is actually 77 * smaller than ctr2), then the result should be a negative number. 78 * subtracting the entire range should give the correct value. 79 */ 80 if (delta > MGMT_RX_REO_PKT_CTR_HALF_RANGE) 81 return delta - MGMT_RX_REO_PKT_CTR_FULL_RANGE; 82 83 return delta; 84 } 85 86 #define MGMT_RX_REO_GLOBAL_TS_HALF_RANGE (0x80000000) 87 /** 88 * mgmt_rx_reo_compare_global_timestamps_gte()-Compare given global timestamps 89 * @ts1: Global timestamp1 90 * @ts2: Global timestamp2 91 * 92 * We can't directly use the comparison operator here because the timestamps can 93 * overflow. But these timestamps have a property that the difference between 94 * them can never be greater than half the range of the data type. 95 * We can make use of this condition to detect which one is actually greater. 96 * 97 * Return: true if @ts1 is greater than or equal to @ts2, else false 98 */ 99 static inline bool 100 mgmt_rx_reo_compare_global_timestamps_gte(uint32_t ts1, uint32_t ts2) 101 { 102 uint32_t delta = ts1 - ts2; 103 104 return delta <= MGMT_RX_REO_GLOBAL_TS_HALF_RANGE; 105 } 106 107 /** 108 * mgmt_rx_reo_is_stale_frame()- API to check whether the given management frame 109 * is stale 110 * @ts_last_released_frame: pointer to global time stamp of the last frame 111 * removed from the reorder list 112 * @frame_desc: pointer to frame descriptor 113 * 114 * This API checks whether the current management frame under processing is 115 * stale. Any frame older than the last frame delivered to upper layer is a 116 * stale frame. This could happen when we have to deliver frames out of order 117 * due to time out or list size limit. The frames which arrive late at host and 118 * with time stamp lesser than the last delivered frame are stale frames and 119 * they need to be handled differently. 120 * 121 * Return: QDF_STATUS. On success "is_stale" and "is_parallel_rx" members of 122 * @frame_desc will be filled with proper values. 123 */ 124 static QDF_STATUS 125 mgmt_rx_reo_is_stale_frame( 126 struct mgmt_rx_reo_global_ts_info *ts_last_released_frame, 127 struct mgmt_rx_reo_frame_descriptor *frame_desc) 128 { 129 uint32_t cur_frame_start_ts; 130 uint32_t cur_frame_end_ts; 131 132 if (!ts_last_released_frame) { 133 mgmt_rx_reo_err("Last released frame time stamp info is null"); 134 return QDF_STATUS_E_NULL_VALUE; 135 } 136 137 if (!frame_desc) { 138 mgmt_rx_reo_err("Frame descriptor is null"); 139 return QDF_STATUS_E_NULL_VALUE; 140 } 141 142 frame_desc->is_stale = false; 143 frame_desc->is_parallel_rx = false; 144 145 if (!ts_last_released_frame->valid) 146 return QDF_STATUS_SUCCESS; 147 148 cur_frame_start_ts = mgmt_rx_reo_get_start_ts(frame_desc->rx_params); 149 cur_frame_end_ts = mgmt_rx_reo_get_end_ts(frame_desc->rx_params); 150 151 frame_desc->is_stale = 152 !mgmt_rx_reo_compare_global_timestamps_gte(cur_frame_start_ts, 153 ts_last_released_frame->start_ts); 154 155 if (mgmt_rx_reo_compare_global_timestamps_gte 156 (ts_last_released_frame->start_ts, cur_frame_start_ts) && 157 mgmt_rx_reo_compare_global_timestamps_gte 158 (cur_frame_end_ts, ts_last_released_frame->end_ts)) { 159 frame_desc->is_parallel_rx = true; 160 frame_desc->is_stale = false; 161 } 162 163 return QDF_STATUS_SUCCESS; 164 } 165 166 QDF_STATUS 167 mgmt_rx_reo_validate_mlo_link_info(struct wlan_objmgr_psoc *psoc) 168 { 169 uint16_t valid_link_bitmap_shmem; 170 uint16_t valid_link_bitmap; 171 int8_t num_active_links_shmem; 172 int8_t num_active_links; 173 QDF_STATUS status; 174 175 if (!psoc) { 176 mgmt_rx_reo_err("psoc is null"); 177 return QDF_STATUS_E_NULL_VALUE; 178 } 179 180 if (!wlan_mgmt_rx_reo_is_feature_enabled_at_psoc(psoc)) 181 return QDF_STATUS_SUCCESS; 182 183 status = tgt_mgmt_rx_reo_get_num_active_hw_links(psoc, 184 &num_active_links_shmem); 185 if (QDF_IS_STATUS_ERROR(status)) { 186 mgmt_rx_reo_err("Failed to get number of active MLO HW links"); 187 return QDF_STATUS_E_FAILURE; 188 } 189 qdf_assert_always(num_active_links_shmem > 0); 190 191 num_active_links = wlan_mlo_get_num_active_links(); 192 qdf_assert_always(num_active_links > 0); 193 194 qdf_assert_always(num_active_links_shmem == num_active_links); 195 196 status = tgt_mgmt_rx_reo_get_valid_hw_link_bitmap(psoc, 197 &valid_link_bitmap_shmem); 198 if (QDF_IS_STATUS_ERROR(status)) { 199 mgmt_rx_reo_err("Failed to get valid MLO HW link bitmap"); 200 return QDF_STATUS_E_INVAL; 201 } 202 qdf_assert_always(valid_link_bitmap_shmem != 0); 203 204 valid_link_bitmap = wlan_mlo_get_valid_link_bitmap(); 205 qdf_assert_always(valid_link_bitmap_shmem != 0); 206 207 qdf_assert_always(valid_link_bitmap_shmem == valid_link_bitmap); 208 209 return QDF_STATUS_SUCCESS; 210 } 211 212 #ifndef WLAN_MGMT_RX_REO_SIM_SUPPORT 213 /** 214 * mgmt_rx_reo_is_valid_link() - Check whether the given HW link is valid 215 * 216 * Return: true if @link_id is a valid link else false 217 */ 218 static bool 219 mgmt_rx_reo_is_valid_link(uint8_t link_id) 220 { 221 uint16_t valid_hw_link_bitmap; 222 223 if (link_id >= MAX_MLO_LINKS) { 224 mgmt_rx_reo_err("Invalid link id %u", link_id); 225 return false; 226 } 227 228 valid_hw_link_bitmap = wlan_mlo_get_valid_link_bitmap(); 229 qdf_assert_always(valid_hw_link_bitmap); 230 231 return (valid_hw_link_bitmap & (1 << link_id)); 232 } 233 234 /** 235 * mgmt_rx_reo_get_num_mlo_links() - Get number of MLO HW links active in the 236 * system 237 * @reo_context: Pointer to reo context object 238 * 239 * Return: On success returns number of active MLO HW links. On failure 240 * returns WLAN_MLO_INVALID_NUM_LINKS. 241 */ 242 static int8_t 243 mgmt_rx_reo_get_num_mlo_links(struct mgmt_rx_reo_context *reo_context) { 244 if (!reo_context) { 245 mgmt_rx_reo_err("Mgmt reo context is null"); 246 return WLAN_MLO_INVALID_NUM_LINKS; 247 } 248 249 return wlan_mlo_get_num_active_links(); 250 } 251 252 static QDF_STATUS 253 mgmt_rx_reo_handle_potential_premature_delivery( 254 struct mgmt_rx_reo_context *reo_context, 255 uint32_t global_timestamp) 256 { 257 return QDF_STATUS_SUCCESS; 258 } 259 260 static QDF_STATUS 261 mgmt_rx_reo_handle_stale_frame(struct mgmt_rx_reo_list *reo_list, 262 struct mgmt_rx_reo_frame_descriptor *desc) 263 { 264 return QDF_STATUS_SUCCESS; 265 } 266 #else 267 /** 268 * mgmt_rx_reo_sim_is_valid_link() - Check whether the given HW link is valid 269 * 270 * Return: true if @link_id is a valid link, else false 271 */ 272 static bool 273 mgmt_rx_reo_sim_is_valid_link(struct mgmt_rx_reo_sim_context *sim_context, 274 uint8_t link_id) 275 { 276 bool is_valid_link = false; 277 278 if (!sim_context) { 279 mgmt_rx_reo_err("Mgmt reo sim context is null"); 280 return false; 281 } 282 283 if (link_id >= MAX_MLO_LINKS) { 284 mgmt_rx_reo_err("Invalid link id %u", link_id); 285 return false; 286 } 287 288 qdf_spin_lock(&sim_context->link_id_to_pdev_map.lock); 289 290 if (sim_context->link_id_to_pdev_map.map[link_id]) 291 is_valid_link = true; 292 293 qdf_spin_unlock(&sim_context->link_id_to_pdev_map.lock); 294 295 return is_valid_link; 296 } 297 298 /** 299 * mgmt_rx_reo_is_valid_link() - Check whether the given HW link is valid 300 * 301 * Return: true if @link_id is a valid link else false 302 */ 303 static bool 304 mgmt_rx_reo_is_valid_link(uint8_t link_id) 305 { 306 struct mgmt_rx_reo_context *reo_context; 307 308 reo_context = mgmt_rx_reo_get_context(); 309 310 if (!reo_context) { 311 mgmt_rx_reo_err("Mgmt reo context is null"); 312 return false; 313 } 314 315 return mgmt_rx_reo_sim_is_valid_link(&reo_context->sim_context, 316 link_id); 317 } 318 319 /** 320 * mgmt_rx_reo_sim_get_num_mlo_links() - Get number of MLO HW links from the reo 321 * simulation context object 322 * @sim_context: Pointer to reo simulation context object 323 * 324 * Number of MLO links will be equal to number of pdevs in the 325 * system. In case of simulation all the pdevs are assumed 326 * to have MLO capability. 327 * 328 * Return: On success returns number of MLO HW links. On failure 329 * returns WLAN_MLO_INVALID_NUM_LINKS. 330 */ 331 static int8_t 332 mgmt_rx_reo_sim_get_num_mlo_links(struct mgmt_rx_reo_sim_context *sim_context) 333 { 334 uint8_t num_mlo_links; 335 336 if (!sim_context) { 337 mgmt_rx_reo_err("Mgmt reo simulation context is null"); 338 return WLAN_MLO_INVALID_NUM_LINKS; 339 } 340 341 qdf_spin_lock(&sim_context->link_id_to_pdev_map.lock); 342 343 num_mlo_links = sim_context->link_id_to_pdev_map.num_mlo_links; 344 345 qdf_spin_unlock(&sim_context->link_id_to_pdev_map.lock); 346 347 return num_mlo_links; 348 } 349 350 /** 351 * mgmt_rx_reo_get_num_mlo_links() - Get number of MLO links from the reo 352 * context object 353 * @reo_context: Pointer to reo context object 354 * 355 * Return: On success returns number of MLO HW links. On failure 356 * returns WLAN_MLO_INVALID_NUM_LINKS. 357 */ 358 static int8_t 359 mgmt_rx_reo_get_num_mlo_links(struct mgmt_rx_reo_context *reo_context) { 360 if (!reo_context) { 361 mgmt_rx_reo_err("Mgmt reo context is null"); 362 return WLAN_MLO_INVALID_NUM_LINKS; 363 } 364 365 return mgmt_rx_reo_sim_get_num_mlo_links(&reo_context->sim_context); 366 } 367 368 /** 369 * mgmt_rx_reo_sim_get_context() - Helper API to get the management 370 * rx reorder simulation context 371 * 372 * Return: On success returns the pointer to management rx reorder 373 * simulation context. On failure returns NULL. 374 */ 375 static struct mgmt_rx_reo_sim_context * 376 mgmt_rx_reo_sim_get_context(void) 377 { 378 struct mgmt_rx_reo_context *reo_context; 379 380 reo_context = mgmt_rx_reo_get_context(); 381 if (!reo_context) { 382 mgmt_rx_reo_err("Mgmt reo context is null"); 383 return NULL; 384 } 385 386 return &reo_context->sim_context; 387 } 388 389 int8_t 390 mgmt_rx_reo_sim_get_mlo_link_id_from_pdev(struct wlan_objmgr_pdev *pdev) 391 { 392 struct mgmt_rx_reo_sim_context *sim_context; 393 int8_t link_id; 394 395 sim_context = mgmt_rx_reo_sim_get_context(); 396 if (!sim_context) { 397 mgmt_rx_reo_err("Mgmt reo simulation context is null"); 398 return MGMT_RX_REO_INVALID_LINK_ID; 399 } 400 401 qdf_spin_lock(&sim_context->link_id_to_pdev_map.lock); 402 403 for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) 404 if (sim_context->link_id_to_pdev_map.map[link_id] == pdev) 405 break; 406 407 /* pdev is not found in map */ 408 if (link_id == MAX_MLO_LINKS) 409 link_id = MGMT_RX_REO_INVALID_LINK_ID; 410 411 qdf_spin_unlock(&sim_context->link_id_to_pdev_map.lock); 412 413 return link_id; 414 } 415 416 struct wlan_objmgr_pdev * 417 mgmt_rx_reo_sim_get_pdev_from_mlo_link_id(uint8_t mlo_link_id, 418 wlan_objmgr_ref_dbgid refdbgid) 419 { 420 struct mgmt_rx_reo_sim_context *sim_context; 421 struct wlan_objmgr_pdev *pdev; 422 QDF_STATUS status; 423 424 sim_context = mgmt_rx_reo_sim_get_context(); 425 if (!sim_context) { 426 mgmt_rx_reo_err("Mgmt reo simulation context is null"); 427 return NULL; 428 } 429 430 if (mlo_link_id >= MAX_MLO_LINKS) { 431 mgmt_rx_reo_err("Invalid link id %u", mlo_link_id); 432 return NULL; 433 } 434 435 qdf_spin_lock(&sim_context->link_id_to_pdev_map.lock); 436 437 pdev = sim_context->link_id_to_pdev_map.map[mlo_link_id]; 438 status = wlan_objmgr_pdev_try_get_ref(pdev, refdbgid); 439 if (QDF_IS_STATUS_ERROR(status)) { 440 mgmt_rx_reo_err("Failed to get pdev reference"); 441 return NULL; 442 } 443 444 qdf_spin_unlock(&sim_context->link_id_to_pdev_map.lock); 445 446 return pdev; 447 } 448 449 /** 450 * mgmt_rx_reo_handle_potential_premature_delivery - Helper API to handle 451 * premature delivery. 452 * @reo_context: Pointer to reorder list 453 * @global_timestamp: Global time stamp of the current management frame 454 * 455 * Sometimes we have to deliver a management frame to the upper layers even 456 * before its wait count reaching zero. This is called premature delivery. 457 * Premature delivery could happen due to time out or reorder list overflow. 458 * 459 * Return: QDF_STATUS 460 */ 461 static QDF_STATUS 462 mgmt_rx_reo_handle_potential_premature_delivery( 463 struct mgmt_rx_reo_context *reo_context, 464 uint32_t global_timestamp) 465 { 466 qdf_list_t stale_frame_list_temp; 467 QDF_STATUS status; 468 struct mgmt_rx_reo_pending_frame_list_entry *latest_stale_frame = NULL; 469 struct mgmt_rx_reo_pending_frame_list_entry *cur_entry; 470 struct mgmt_rx_reo_sim_context *sim_context; 471 struct mgmt_rx_reo_master_frame_list *master_frame_list; 472 473 if (!reo_context) 474 return QDF_STATUS_E_NULL_VALUE; 475 476 sim_context = &reo_context->sim_context; 477 master_frame_list = &sim_context->master_frame_list; 478 479 qdf_spin_lock(&master_frame_list->lock); 480 481 qdf_list_for_each(&master_frame_list->pending_list, cur_entry, node) { 482 if (cur_entry->params.global_timestamp == global_timestamp) 483 break; 484 485 latest_stale_frame = cur_entry; 486 } 487 488 if (latest_stale_frame) { 489 qdf_list_create(&stale_frame_list_temp, 490 MGMT_RX_REO_SIM_STALE_FRAME_TEMP_LIST_MAX_SIZE); 491 492 status = qdf_list_split(&stale_frame_list_temp, 493 &master_frame_list->pending_list, 494 &latest_stale_frame->node); 495 if (QDF_IS_STATUS_ERROR(status)) 496 goto exit_unlock_master_frame_list; 497 498 status = qdf_list_join(&master_frame_list->stale_list, 499 &stale_frame_list_temp); 500 if (QDF_IS_STATUS_ERROR(status)) 501 goto exit_unlock_master_frame_list; 502 } 503 504 status = QDF_STATUS_SUCCESS; 505 506 exit_unlock_master_frame_list: 507 qdf_spin_unlock(&master_frame_list->lock); 508 509 return status; 510 } 511 512 /** 513 * mgmt_rx_reo_sim_remove_frame_from_stale_list() - Removes frame from the 514 * stale management frame list 515 * @master_frame_list: pointer to master management frame list 516 * @reo_params: pointer to reo params 517 * 518 * This API removes frames from the stale management frame list. 519 * 520 * Return: QDF_STATUS of operation 521 */ 522 static QDF_STATUS 523 mgmt_rx_reo_sim_remove_frame_from_stale_list( 524 struct mgmt_rx_reo_master_frame_list *master_frame_list, 525 const struct mgmt_rx_reo_params *reo_params) 526 { 527 struct mgmt_rx_reo_stale_frame_list_entry *cur_entry; 528 struct mgmt_rx_reo_stale_frame_list_entry *matching_entry = NULL; 529 QDF_STATUS status; 530 531 if (!master_frame_list || !reo_params) 532 return QDF_STATUS_E_NULL_VALUE; 533 534 qdf_spin_lock(&master_frame_list->lock); 535 536 /** 537 * Stale frames can come in any order at host. Do a linear search and 538 * remove the matching entry. 539 */ 540 qdf_list_for_each(&master_frame_list->stale_list, cur_entry, node) { 541 if (cur_entry->params.link_id == reo_params->link_id && 542 cur_entry->params.mgmt_pkt_ctr == reo_params->mgmt_pkt_ctr && 543 cur_entry->params.global_timestamp == 544 reo_params->global_timestamp) { 545 matching_entry = cur_entry; 546 break; 547 } 548 } 549 550 if (!matching_entry) { 551 qdf_spin_unlock(&master_frame_list->lock); 552 mgmt_rx_reo_err("reo sim failure: absent in stale frame list"); 553 qdf_assert_always(0); 554 } 555 556 status = qdf_list_remove_node(&master_frame_list->stale_list, 557 &matching_entry->node); 558 559 if (QDF_IS_STATUS_ERROR(status)) { 560 qdf_spin_unlock(&master_frame_list->lock); 561 return status; 562 } 563 564 qdf_mem_free(matching_entry); 565 566 qdf_spin_unlock(&master_frame_list->lock); 567 568 return QDF_STATUS_SUCCESS; 569 } 570 571 /** 572 * mgmt_rx_reo_handle_stale_frame() - API to handle stale management frames. 573 * @reo_list: Pointer to reorder list 574 * @desc: Pointer to frame descriptor 575 * 576 * Return: QDF_STATUS of operation 577 */ 578 static QDF_STATUS 579 mgmt_rx_reo_handle_stale_frame(struct mgmt_rx_reo_list *reo_list, 580 struct mgmt_rx_reo_frame_descriptor *desc) 581 { 582 QDF_STATUS status; 583 struct mgmt_rx_reo_context *reo_context; 584 struct mgmt_rx_reo_sim_context *sim_context; 585 struct mgmt_rx_reo_params *reo_params; 586 587 if (!reo_list || !desc) 588 return QDF_STATUS_E_NULL_VALUE; 589 590 /* FW consumed/Error frames are already removed */ 591 if (desc->type != MGMT_RX_REO_FRAME_DESC_HOST_CONSUMED_FRAME) 592 return QDF_STATUS_SUCCESS; 593 594 reo_context = mgmt_rx_reo_get_context_from_reo_list(reo_list); 595 if (!reo_context) 596 return QDF_STATUS_E_NULL_VALUE; 597 598 sim_context = &reo_context->sim_context; 599 600 reo_params = desc->rx_params->reo_params; 601 if (!reo_params) 602 return QDF_STATUS_E_NULL_VALUE; 603 604 status = mgmt_rx_reo_sim_remove_frame_from_stale_list( 605 &sim_context->master_frame_list, reo_params); 606 607 return status; 608 } 609 #endif /* WLAN_MGMT_RX_REO_SIM_SUPPORT */ 610 611 /** 612 * mgmt_rx_reo_is_potential_premature_delivery() - Helper API to check 613 * whether the current frame getting delivered to upper layer is a premature 614 * delivery 615 * @release_reason: release reason 616 * 617 * Return: true for a premature delivery 618 */ 619 static bool 620 mgmt_rx_reo_is_potential_premature_delivery(uint8_t release_reason) 621 { 622 return !(release_reason & 623 MGMT_RX_REO_LIST_ENTRY_RELEASE_REASON_ZERO_WAIT_COUNT); 624 } 625 626 /** 627 * wlan_mgmt_rx_reo_get_priv_object() - Get the pdev private object of 628 * MGMT Rx REO module 629 * @pdev: pointer to pdev object 630 * 631 * Return: Pointer to pdev private object of MGMT Rx REO module on success, 632 * else NULL 633 */ 634 static struct mgmt_rx_reo_pdev_info * 635 wlan_mgmt_rx_reo_get_priv_object(struct wlan_objmgr_pdev *pdev) 636 { 637 struct mgmt_txrx_priv_pdev_context *mgmt_txrx_pdev_ctx; 638 639 if (!pdev) { 640 mgmt_rx_reo_err("pdev is null"); 641 return NULL; 642 } 643 644 mgmt_txrx_pdev_ctx = (struct mgmt_txrx_priv_pdev_context *) 645 wlan_objmgr_pdev_get_comp_private_obj(pdev, 646 WLAN_UMAC_COMP_MGMT_TXRX); 647 648 if (!mgmt_txrx_pdev_ctx) { 649 mgmt_rx_reo_err("mgmt txrx context is NULL"); 650 return NULL; 651 } 652 653 return mgmt_txrx_pdev_ctx->mgmt_rx_reo_pdev_ctx; 654 } 655 656 /** 657 * mgmt_rx_reo_print_snapshots() - Print all snapshots related 658 * to management Rx reorder module 659 * @mac_hw_ss: MAC HW snapshot 660 * @fw_forwarded_ss: FW forwarded snapshot 661 * @fw_consumed_ss: FW consumed snapshot 662 * @host_ss: Host snapshot 663 * 664 * return: QDF_STATUS 665 */ 666 static QDF_STATUS 667 mgmt_rx_reo_print_snapshots 668 (struct mgmt_rx_reo_snapshot_params *mac_hw_ss, 669 struct mgmt_rx_reo_snapshot_params *fw_forwarded_ss, 670 struct mgmt_rx_reo_snapshot_params *fw_consumed_ss, 671 struct mgmt_rx_reo_snapshot_params *host_ss) 672 { 673 mgmt_rx_reo_debug("HW SS: valid = %u, ctr = %u, ts = %u", 674 mac_hw_ss->valid, mac_hw_ss->mgmt_pkt_ctr, 675 mac_hw_ss->global_timestamp); 676 mgmt_rx_reo_debug("FW forwarded SS: valid = %u, ctr = %u, ts = %u", 677 fw_forwarded_ss->valid, 678 fw_forwarded_ss->mgmt_pkt_ctr, 679 fw_forwarded_ss->global_timestamp); 680 mgmt_rx_reo_debug("FW consumed SS: valid = %u, ctr = %u, ts = %u", 681 fw_consumed_ss->valid, 682 fw_consumed_ss->mgmt_pkt_ctr, 683 fw_consumed_ss->global_timestamp); 684 mgmt_rx_reo_debug("HOST SS: valid = %u, ctr = %u, ts = %u", 685 host_ss->valid, host_ss->mgmt_pkt_ctr, 686 host_ss->global_timestamp); 687 688 return QDF_STATUS_SUCCESS; 689 } 690 691 /** 692 * mgmt_rx_reo_invalidate_stale_snapshots() - Invalidate stale management 693 * Rx REO snapshots 694 * @mac_hw_ss: MAC HW snapshot 695 * @fw_forwarded_ss: FW forwarded snapshot 696 * @fw_consumed_ss: FW consumed snapshot 697 * @host_ss: Host snapshot 698 * @link: link ID 699 * 700 * return: QDF_STATUS 701 */ 702 static QDF_STATUS 703 mgmt_rx_reo_invalidate_stale_snapshots 704 (struct mgmt_rx_reo_snapshot_params *mac_hw_ss, 705 struct mgmt_rx_reo_snapshot_params *fw_forwarded_ss, 706 struct mgmt_rx_reo_snapshot_params *fw_consumed_ss, 707 struct mgmt_rx_reo_snapshot_params *host_ss, 708 uint8_t link) 709 { 710 if (!mac_hw_ss->valid) 711 return QDF_STATUS_SUCCESS; 712 713 if (fw_forwarded_ss->valid) { 714 if (!mgmt_rx_reo_compare_global_timestamps_gte 715 (mac_hw_ss->global_timestamp, 716 fw_forwarded_ss->global_timestamp) || 717 !mgmt_rx_reo_compare_pkt_ctrs_gte 718 (mac_hw_ss->mgmt_pkt_ctr, 719 fw_forwarded_ss->mgmt_pkt_ctr)) { 720 mgmt_rx_reo_print_snapshots(mac_hw_ss, fw_forwarded_ss, 721 fw_consumed_ss, host_ss); 722 mgmt_rx_reo_debug("Invalidate FW forwarded SS, link %u", 723 link); 724 fw_forwarded_ss->valid = false; 725 } 726 } 727 728 if (fw_consumed_ss->valid) { 729 if (!mgmt_rx_reo_compare_global_timestamps_gte 730 (mac_hw_ss->global_timestamp, 731 fw_consumed_ss->global_timestamp) || 732 !mgmt_rx_reo_compare_pkt_ctrs_gte 733 (mac_hw_ss->mgmt_pkt_ctr, 734 fw_consumed_ss->mgmt_pkt_ctr)) { 735 mgmt_rx_reo_print_snapshots(mac_hw_ss, fw_forwarded_ss, 736 fw_consumed_ss, host_ss); 737 mgmt_rx_reo_debug("Invalidate FW consumed SS, link %u", 738 link); 739 fw_consumed_ss->valid = false; 740 } 741 } 742 743 if (host_ss->valid) { 744 if (!mgmt_rx_reo_compare_global_timestamps_gte 745 (mac_hw_ss->global_timestamp, 746 host_ss->global_timestamp) || 747 !mgmt_rx_reo_compare_pkt_ctrs_gte 748 (mac_hw_ss->mgmt_pkt_ctr, 749 host_ss->mgmt_pkt_ctr)) { 750 mgmt_rx_reo_print_snapshots(mac_hw_ss, fw_forwarded_ss, 751 fw_consumed_ss, host_ss); 752 mgmt_rx_reo_debug("Invalidate host snapshot, link %u", 753 link); 754 host_ss->valid = false; 755 } 756 } 757 758 return QDF_STATUS_SUCCESS; 759 } 760 761 /** 762 * mgmt_rx_reo_snapshots_check_sanity() - Check the sanity of management 763 * Rx REO snapshots 764 * @mac_hw_ss: MAC HW snapshot 765 * @fw_forwarded_ss: FW forwarded snapshot 766 * @fw_consumed_ss: FW consumed snapshot 767 * @host_ss: Host snapshot 768 * 769 * return: QDF_STATUS 770 */ 771 static QDF_STATUS 772 mgmt_rx_reo_snapshots_check_sanity 773 (struct mgmt_rx_reo_snapshot_params *mac_hw_ss, 774 struct mgmt_rx_reo_snapshot_params *fw_forwarded_ss, 775 struct mgmt_rx_reo_snapshot_params *fw_consumed_ss, 776 struct mgmt_rx_reo_snapshot_params *host_ss) 777 { 778 QDF_STATUS status; 779 780 if (!mac_hw_ss->valid) { 781 if (fw_forwarded_ss->valid || fw_consumed_ss->valid || 782 host_ss->valid) { 783 mgmt_rx_reo_err("MAC HW SS is invalid"); 784 status = QDF_STATUS_E_INVAL; 785 goto fail; 786 } 787 788 return QDF_STATUS_SUCCESS; 789 } 790 791 if (!fw_forwarded_ss->valid && !fw_consumed_ss->valid) { 792 if (host_ss->valid) { 793 mgmt_rx_reo_err("FW forwarded and consumed SS invalid"); 794 status = QDF_STATUS_E_INVAL; 795 goto fail; 796 } 797 798 return QDF_STATUS_SUCCESS; 799 } 800 801 if (fw_forwarded_ss->valid) { 802 if (!mgmt_rx_reo_compare_global_timestamps_gte 803 (mac_hw_ss->global_timestamp, 804 fw_forwarded_ss->global_timestamp)) { 805 mgmt_rx_reo_err("TS: MAC HW SS < FW forwarded SS"); 806 status = QDF_STATUS_E_INVAL; 807 goto fail; 808 } 809 810 if (!mgmt_rx_reo_compare_pkt_ctrs_gte 811 (mac_hw_ss->mgmt_pkt_ctr, 812 fw_forwarded_ss->mgmt_pkt_ctr)) { 813 mgmt_rx_reo_err("PKT CTR: MAC HW SS < FW forwarded SS"); 814 status = QDF_STATUS_E_INVAL; 815 goto fail; 816 } 817 } 818 819 if (fw_consumed_ss->valid) { 820 if (!mgmt_rx_reo_compare_global_timestamps_gte 821 (mac_hw_ss->global_timestamp, 822 fw_consumed_ss->global_timestamp)) { 823 mgmt_rx_reo_err("TS: MAC HW SS < FW consumed SS"); 824 status = QDF_STATUS_E_INVAL; 825 goto fail; 826 } 827 828 if (!mgmt_rx_reo_compare_pkt_ctrs_gte 829 (mac_hw_ss->mgmt_pkt_ctr, 830 fw_consumed_ss->mgmt_pkt_ctr)) { 831 mgmt_rx_reo_err("PKT CTR: MAC HW SS < FW consumed SS"); 832 status = QDF_STATUS_E_INVAL; 833 goto fail; 834 } 835 } 836 837 if (host_ss->valid) { 838 if (!mgmt_rx_reo_compare_global_timestamps_gte 839 (mac_hw_ss->global_timestamp, 840 host_ss->global_timestamp)) { 841 mgmt_rx_reo_err("TS: MAC HW SS < host SS"); 842 status = QDF_STATUS_E_INVAL; 843 goto fail; 844 } 845 846 if (!mgmt_rx_reo_compare_pkt_ctrs_gte 847 (mac_hw_ss->mgmt_pkt_ctr, 848 host_ss->mgmt_pkt_ctr)) { 849 mgmt_rx_reo_err("PKT CTR: MAC HW SS < host SS"); 850 status = QDF_STATUS_E_INVAL; 851 goto fail; 852 } 853 854 if (fw_forwarded_ss->valid && !fw_consumed_ss->valid) { 855 if (!mgmt_rx_reo_compare_global_timestamps_gte 856 (fw_forwarded_ss->global_timestamp, 857 host_ss->global_timestamp)) { 858 mgmt_rx_reo_err("TS: FW forwarded < host SS"); 859 status = QDF_STATUS_E_INVAL; 860 goto fail; 861 } 862 863 if (!mgmt_rx_reo_compare_pkt_ctrs_gte 864 (fw_forwarded_ss->mgmt_pkt_ctr, 865 host_ss->mgmt_pkt_ctr)) { 866 mgmt_rx_reo_err("CTR: FW forwarded < host SS"); 867 status = QDF_STATUS_E_INVAL; 868 goto fail; 869 } 870 } 871 872 if (fw_consumed_ss->valid && !fw_forwarded_ss->valid) { 873 if (!mgmt_rx_reo_compare_global_timestamps_gte 874 (fw_consumed_ss->global_timestamp, 875 host_ss->global_timestamp)) { 876 mgmt_rx_reo_err("TS: FW consumed < host SS"); 877 status = QDF_STATUS_E_INVAL; 878 goto fail; 879 } 880 881 if (!mgmt_rx_reo_compare_pkt_ctrs_gte 882 (fw_consumed_ss->mgmt_pkt_ctr, 883 host_ss->mgmt_pkt_ctr)) { 884 mgmt_rx_reo_err("CTR: FW consumed < host SS"); 885 status = QDF_STATUS_E_INVAL; 886 goto fail; 887 } 888 } 889 890 if (fw_forwarded_ss->valid && fw_consumed_ss->valid) { 891 if (!mgmt_rx_reo_compare_global_timestamps_gte 892 (fw_consumed_ss->global_timestamp, 893 host_ss->global_timestamp) && 894 !mgmt_rx_reo_compare_global_timestamps_gte 895 (fw_forwarded_ss->global_timestamp, 896 host_ss->global_timestamp)) { 897 mgmt_rx_reo_err("TS: FW consumed/forwarded < host"); 898 status = QDF_STATUS_E_INVAL; 899 goto fail; 900 } 901 902 if (!mgmt_rx_reo_compare_pkt_ctrs_gte 903 (fw_consumed_ss->mgmt_pkt_ctr, 904 host_ss->mgmt_pkt_ctr) && 905 !mgmt_rx_reo_compare_pkt_ctrs_gte 906 (fw_forwarded_ss->mgmt_pkt_ctr, 907 host_ss->mgmt_pkt_ctr)) { 908 mgmt_rx_reo_err("CTR: FW consumed/forwarded < host"); 909 status = QDF_STATUS_E_INVAL; 910 goto fail; 911 } 912 } 913 } 914 915 return QDF_STATUS_SUCCESS; 916 917 fail: 918 mgmt_rx_reo_debug("HW SS: valid = %u, ctr = %u, ts = %u", 919 mac_hw_ss->valid, mac_hw_ss->mgmt_pkt_ctr, 920 mac_hw_ss->global_timestamp); 921 mgmt_rx_reo_debug("FW forwarded SS: valid = %u, ctr = %u, ts = %u", 922 fw_forwarded_ss->valid, 923 fw_forwarded_ss->mgmt_pkt_ctr, 924 fw_forwarded_ss->global_timestamp); 925 mgmt_rx_reo_debug("FW consumed SS: valid = %u, ctr = %u, ts = %u", 926 fw_consumed_ss->valid, 927 fw_consumed_ss->mgmt_pkt_ctr, 928 fw_consumed_ss->global_timestamp); 929 mgmt_rx_reo_debug("HOST SS: valid = %u, ctr = %u, ts = %u", 930 host_ss->valid, host_ss->mgmt_pkt_ctr, 931 host_ss->global_timestamp); 932 933 return status; 934 } 935 936 /** 937 * wlan_mgmt_rx_reo_algo_calculate_wait_count() - Calculates the number of 938 * frames an incoming frame should wait for before it gets delivered. 939 * @in_frame_pdev: pdev on which this frame is received 940 * @desc: frame Descriptor 941 * 942 * Each frame carrys a MGMT pkt number which is local to that link, and a 943 * timestamp which is global across all the links. MAC HW and FW also captures 944 * the same details of the last frame that they have seen. Host also maintains 945 * the details of the last frame it has seen. In total, there are 4 snapshots. 946 * 1. MAC HW snapshot - latest frame seen at MAC HW 947 * 2. FW forwarded snapshot- latest frame forwarded to the Host 948 * 3. FW consumed snapshot - latest frame consumed by the FW 949 * 4. Host/FW consumed snapshot - latest frame seen by the Host 950 * By using all these snapshots, this function tries to compute the wait count 951 * for a given incoming frame on all links. 952 * 953 * Return: QDF_STATUS of operation 954 */ 955 static QDF_STATUS 956 wlan_mgmt_rx_reo_algo_calculate_wait_count( 957 struct wlan_objmgr_pdev *in_frame_pdev, 958 struct mgmt_rx_reo_frame_descriptor *desc) 959 { 960 QDF_STATUS status; 961 uint8_t link; 962 int8_t in_frame_link; 963 int frames_pending, delta_fwd_host; 964 uint8_t snapshot_id; 965 struct wlan_objmgr_pdev *pdev; 966 struct mgmt_rx_reo_pdev_info *rx_reo_pdev_ctx; 967 struct mgmt_rx_reo_pdev_info *in_frame_rx_reo_pdev_ctx; 968 struct mgmt_rx_reo_snapshot_info *snapshot_info; 969 struct mgmt_rx_reo_snapshot_params snapshot_params 970 [MGMT_RX_REO_SHARED_SNAPSHOT_MAX]; 971 struct mgmt_rx_reo_snapshot_params *mac_hw_ss, *fw_forwarded_ss, 972 *fw_consumed_ss, *host_ss; 973 struct mgmt_rx_reo_params *in_frame_params; 974 struct mgmt_rx_reo_wait_count *wait_count; 975 976 if (!in_frame_pdev) { 977 mgmt_rx_reo_err("pdev is null"); 978 return QDF_STATUS_E_NULL_VALUE; 979 } 980 981 if (!desc) { 982 mgmt_rx_reo_err("Frame descriptor is null"); 983 return QDF_STATUS_E_NULL_VALUE; 984 } 985 986 if (!desc->rx_params) { 987 mgmt_rx_reo_err("MGMT Rx params of incoming frame is NULL"); 988 return QDF_STATUS_E_NULL_VALUE; 989 } 990 991 in_frame_params = desc->rx_params->reo_params; 992 if (!in_frame_params) { 993 mgmt_rx_reo_err("MGMT Rx REO params of incoming frame is NULL"); 994 return QDF_STATUS_E_NULL_VALUE; 995 } 996 997 wait_count = &desc->wait_count; 998 999 /* Get the MLO link ID of incoming frame */ 1000 in_frame_link = wlan_get_mlo_link_id_from_pdev(in_frame_pdev); 1001 qdf_assert_always(in_frame_link >= 0); 1002 qdf_assert_always(in_frame_link < MAX_MLO_LINKS); 1003 qdf_assert_always(mgmt_rx_reo_is_valid_link(in_frame_link)); 1004 1005 in_frame_rx_reo_pdev_ctx = 1006 wlan_mgmt_rx_reo_get_priv_object(in_frame_pdev); 1007 if (!in_frame_rx_reo_pdev_ctx) { 1008 mgmt_rx_reo_err("Reo context null for incoming frame pdev"); 1009 return QDF_STATUS_E_FAILURE; 1010 } 1011 qdf_mem_zero(in_frame_rx_reo_pdev_ctx->raw_snapshots, 1012 sizeof(in_frame_rx_reo_pdev_ctx->raw_snapshots)); 1013 1014 /* Iterate over all the valid MLO links */ 1015 for (link = 0; link < MAX_MLO_LINKS; link++) { 1016 /* No need wait for any frames on an invalid link */ 1017 if (!mgmt_rx_reo_is_valid_link(link)) { 1018 frames_pending = 0; 1019 goto update_pending_frames; 1020 } 1021 1022 pdev = wlan_get_pdev_from_mlo_link_id(link, 1023 WLAN_MGMT_RX_REO_ID); 1024 1025 /* No need to wait for any frames if the pdev is not found */ 1026 if (!pdev) { 1027 mgmt_rx_reo_debug("pdev is null for link %d", link); 1028 frames_pending = 0; 1029 goto update_pending_frames; 1030 } 1031 1032 rx_reo_pdev_ctx = wlan_mgmt_rx_reo_get_priv_object(pdev); 1033 if (!rx_reo_pdev_ctx) { 1034 mgmt_rx_reo_err("Mgmt reo context empty for pdev %pK", 1035 pdev); 1036 wlan_objmgr_pdev_release_ref(pdev, WLAN_MGMT_RX_REO_ID); 1037 return QDF_STATUS_E_FAILURE; 1038 } 1039 1040 if (!rx_reo_pdev_ctx->init_complete) { 1041 mgmt_rx_reo_debug("REO init in progress for link %d", 1042 link); 1043 wlan_objmgr_pdev_release_ref(pdev, WLAN_MGMT_RX_REO_ID); 1044 frames_pending = 0; 1045 goto update_pending_frames; 1046 } 1047 1048 host_ss = &rx_reo_pdev_ctx->host_snapshot; 1049 desc->host_snapshot[link] = rx_reo_pdev_ctx->host_snapshot; 1050 1051 mgmt_rx_reo_info("link_id = %u HOST SS: valid = %u, ctr = %u, ts = %u", 1052 link, host_ss->valid, host_ss->mgmt_pkt_ctr, 1053 host_ss->global_timestamp); 1054 1055 snapshot_id = 0; 1056 /* Read all the shared snapshots */ 1057 while (snapshot_id < 1058 MGMT_RX_REO_SHARED_SNAPSHOT_MAX) { 1059 snapshot_info = &rx_reo_pdev_ctx-> 1060 host_target_shared_snapshot_info[snapshot_id]; 1061 1062 qdf_mem_zero(&snapshot_params[snapshot_id], 1063 sizeof(snapshot_params[snapshot_id])); 1064 1065 status = tgt_mgmt_rx_reo_read_snapshot( 1066 pdev, snapshot_info, snapshot_id, 1067 &snapshot_params[snapshot_id], 1068 in_frame_rx_reo_pdev_ctx->raw_snapshots 1069 [link][snapshot_id]); 1070 1071 /* Read operation shouldn't fail */ 1072 if (QDF_IS_STATUS_ERROR(status)) { 1073 mgmt_rx_reo_err("snapshot(%d) read failed on" 1074 "link (%d)", snapshot_id, link); 1075 wlan_objmgr_pdev_release_ref( 1076 pdev, WLAN_MGMT_RX_REO_ID); 1077 return status; 1078 } 1079 1080 /* If snapshot is valid, save it in the pdev context */ 1081 if (snapshot_params[snapshot_id].valid) { 1082 rx_reo_pdev_ctx-> 1083 last_valid_shared_snapshot[snapshot_id] = 1084 snapshot_params[snapshot_id]; 1085 } 1086 desc->shared_snapshots[link][snapshot_id] = 1087 snapshot_params[snapshot_id]; 1088 1089 snapshot_id++; 1090 } 1091 1092 wlan_objmgr_pdev_release_ref(pdev, WLAN_MGMT_RX_REO_ID); 1093 1094 mac_hw_ss = &snapshot_params 1095 [MGMT_RX_REO_SHARED_SNAPSHOT_MAC_HW]; 1096 fw_forwarded_ss = &snapshot_params 1097 [MGMT_RX_REO_SHARED_SNAPSHOT_FW_FORWARDED]; 1098 fw_consumed_ss = &snapshot_params 1099 [MGMT_RX_REO_SHARED_SNAPSHOT_FW_CONSUMED]; 1100 1101 status = mgmt_rx_reo_invalidate_stale_snapshots(mac_hw_ss, 1102 fw_forwarded_ss, 1103 fw_consumed_ss, 1104 host_ss, link); 1105 if (QDF_IS_STATUS_ERROR(status)) { 1106 mgmt_rx_reo_err("Failed to invalidate SS for link %u", 1107 link); 1108 return status; 1109 } 1110 1111 desc->shared_snapshots[link][MGMT_RX_REO_SHARED_SNAPSHOT_MAC_HW] = 1112 *mac_hw_ss; 1113 desc->shared_snapshots[link][MGMT_RX_REO_SHARED_SNAPSHOT_FW_FORWARDED] = 1114 *fw_forwarded_ss; 1115 desc->shared_snapshots[link][MGMT_RX_REO_SHARED_SNAPSHOT_FW_CONSUMED] = 1116 *fw_consumed_ss; 1117 desc->host_snapshot[link] = *host_ss; 1118 1119 status = mgmt_rx_reo_snapshots_check_sanity 1120 (mac_hw_ss, fw_forwarded_ss, fw_consumed_ss, host_ss); 1121 if (QDF_IS_STATUS_ERROR(status)) { 1122 mgmt_rx_reo_err_rl("Snapshot sanity for link %u failed", 1123 link); 1124 return status; 1125 } 1126 1127 mgmt_rx_reo_info("link_id = %u HW SS: valid = %u, ctr = %u, ts = %u", 1128 link, mac_hw_ss->valid, 1129 mac_hw_ss->mgmt_pkt_ctr, 1130 mac_hw_ss->global_timestamp); 1131 mgmt_rx_reo_info("link_id = %u FW forwarded SS: valid = %u, ctr = %u, ts = %u", 1132 link, fw_forwarded_ss->valid, 1133 fw_forwarded_ss->mgmt_pkt_ctr, 1134 fw_forwarded_ss->global_timestamp); 1135 mgmt_rx_reo_info("link_id = %u FW consumed SS: valid = %u, ctr = %u, ts = %u", 1136 link, fw_consumed_ss->valid, 1137 fw_consumed_ss->mgmt_pkt_ctr, 1138 fw_consumed_ss->global_timestamp); 1139 1140 /* No need wait for any frames on the same link */ 1141 if (link == in_frame_link) { 1142 frames_pending = 0; 1143 goto update_pending_frames; 1144 } 1145 1146 /** 1147 * If MAC HW snapshot is invalid, the link has not started 1148 * receiving management frames. Set wait count to zero. 1149 */ 1150 if (!mac_hw_ss->valid) { 1151 frames_pending = 0; 1152 goto update_pending_frames; 1153 } 1154 1155 /** 1156 * If host snapshot is invalid, wait for MAX number of frames. 1157 * When any frame in this link arrives at host, actual wait 1158 * counts will be updated. 1159 */ 1160 if (!host_ss->valid) { 1161 wait_count->per_link_count[link] = UINT_MAX; 1162 wait_count->total_count += UINT_MAX; 1163 goto print_wait_count; 1164 } 1165 1166 /** 1167 * If MAC HW snapshot sequence number and host snapshot 1168 * sequence number are same, all the frames received by 1169 * this link are processed by host. No need to wait for 1170 * any frames from this link. 1171 */ 1172 if (!mgmt_rx_reo_subtract_pkt_ctrs(mac_hw_ss->mgmt_pkt_ctr, 1173 host_ss->mgmt_pkt_ctr)) { 1174 frames_pending = 0; 1175 goto update_pending_frames; 1176 } 1177 1178 /** 1179 * Ideally, the incoming frame has to wait for only those frames 1180 * (on other links) which meet all the below criterion. 1181 * 1. Frame's timestamp is less than incoming frame's 1182 * 2. Frame is supposed to be consumed by the Host 1183 * 3. Frame is not yet seen by the Host. 1184 * We may not be able to compute the exact optimal wait count 1185 * because HW/FW provides a limited assist. 1186 * This algorithm tries to get the best estimate of wait count 1187 * by not waiting for those frames where we have a conclusive 1188 * evidence that we don't have to wait for those frames. 1189 */ 1190 1191 /** 1192 * If this link has already seen a frame whose timestamp is 1193 * greater than or equal to incoming frame's timestamp, 1194 * then no need to wait for any frames on this link. 1195 * If the total wait count becomes zero, then the policy on 1196 * whether to deliver such a frame to upper layers is handled 1197 * separately. 1198 */ 1199 if (mgmt_rx_reo_compare_global_timestamps_gte( 1200 host_ss->global_timestamp, 1201 in_frame_params->global_timestamp)) { 1202 frames_pending = 0; 1203 goto update_pending_frames; 1204 } 1205 1206 /** 1207 * For starters, we only have to wait for the frames that are 1208 * seen by MAC HW but not yet seen by Host. The frames which 1209 * reach MAC HW later are guaranteed to have a timestamp 1210 * greater than incoming frame's timestamp. 1211 */ 1212 frames_pending = mgmt_rx_reo_subtract_pkt_ctrs( 1213 mac_hw_ss->mgmt_pkt_ctr, 1214 host_ss->mgmt_pkt_ctr); 1215 qdf_assert_always(frames_pending >= 0); 1216 1217 if (frames_pending && 1218 mgmt_rx_reo_compare_global_timestamps_gte 1219 (mac_hw_ss->global_timestamp, 1220 in_frame_params->global_timestamp)) { 1221 /** 1222 * Last frame seen at MAC HW has timestamp greater than 1223 * or equal to incoming frame's timestamp. So no need to 1224 * wait for that last frame, but we can't conclusively 1225 * say anything about timestamp of frames before the 1226 * last frame, so try to wait for all of those frames. 1227 */ 1228 frames_pending--; 1229 qdf_assert_always(frames_pending >= 0); 1230 1231 if (fw_consumed_ss->valid && 1232 mgmt_rx_reo_compare_global_timestamps_gte( 1233 fw_consumed_ss->global_timestamp, 1234 in_frame_params->global_timestamp)) { 1235 /** 1236 * Last frame consumed by the FW has timestamp 1237 * greater than or equal to incoming frame's. 1238 * That means all the frames from 1239 * fw_consumed_ss->mgmt_pkt_ctr to 1240 * mac_hw->mgmt_pkt_ctr will have timestamp 1241 * greater than or equal to incoming frame's and 1242 * hence, no need to wait for those frames. 1243 * We just need to wait for frames from 1244 * host_ss->mgmt_pkt_ctr to 1245 * fw_consumed_ss->mgmt_pkt_ctr-1. This is a 1246 * better estimate over the above estimate, 1247 * so update frames_pending. 1248 */ 1249 frames_pending = 1250 mgmt_rx_reo_subtract_pkt_ctrs( 1251 fw_consumed_ss->mgmt_pkt_ctr, 1252 host_ss->mgmt_pkt_ctr) - 1; 1253 1254 qdf_assert_always(frames_pending >= 0); 1255 1256 /** 1257 * Last frame forwarded to Host has timestamp 1258 * less than incoming frame's. That means all 1259 * the frames starting from 1260 * fw_forwarded_ss->mgmt_pkt_ctr+1 to 1261 * fw_consumed_ss->mgmt_pkt_ctr are consumed by 1262 * the FW and hence, no need to wait for those 1263 * frames. We just need to wait for frames 1264 * from host_ss->mgmt_pkt_ctr to 1265 * fw_forwarded_ss->mgmt_pkt_ctr. This is a 1266 * better estimate over the above estimate, 1267 * so update frames_pending. 1268 */ 1269 if (fw_forwarded_ss->valid && 1270 !mgmt_rx_reo_compare_global_timestamps_gte( 1271 fw_forwarded_ss->global_timestamp, 1272 in_frame_params->global_timestamp)) { 1273 frames_pending = 1274 mgmt_rx_reo_subtract_pkt_ctrs( 1275 fw_forwarded_ss->mgmt_pkt_ctr, 1276 host_ss->mgmt_pkt_ctr); 1277 1278 /** 1279 * frames_pending can be negative in 1280 * cases whene there are no frames 1281 * getting forwarded to the Host. No 1282 * need to wait for any frames in that 1283 * case. 1284 */ 1285 if (frames_pending < 0) 1286 frames_pending = 0; 1287 } 1288 } 1289 1290 /** 1291 * Last frame forwarded to Host has timestamp greater 1292 * than or equal to incoming frame's. That means all the 1293 * frames from fw_forwarded->mgmt_pkt_ctr to 1294 * mac_hw->mgmt_pkt_ctr will have timestamp greater than 1295 * or equal to incoming frame's and hence, no need to 1296 * wait for those frames. We may have to just wait for 1297 * frames from host_ss->mgmt_pkt_ctr to 1298 * fw_forwarded_ss->mgmt_pkt_ctr-1 1299 */ 1300 if (fw_forwarded_ss->valid && 1301 mgmt_rx_reo_compare_global_timestamps_gte( 1302 fw_forwarded_ss->global_timestamp, 1303 in_frame_params->global_timestamp)) { 1304 delta_fwd_host = 1305 mgmt_rx_reo_subtract_pkt_ctrs( 1306 fw_forwarded_ss->mgmt_pkt_ctr, 1307 host_ss->mgmt_pkt_ctr) - 1; 1308 1309 qdf_assert_always(delta_fwd_host >= 0); 1310 1311 /** 1312 * This will be a better estimate over the one 1313 * we computed using mac_hw_ss but this may or 1314 * may not be a better estimate over the 1315 * one we computed using fw_consumed_ss. 1316 * When timestamps of both fw_consumed_ss and 1317 * fw_forwarded_ss are greater than incoming 1318 * frame's but timestamp of fw_consumed_ss is 1319 * smaller than fw_forwarded_ss, then 1320 * frames_pending will be smaller than 1321 * delta_fwd_host, the reverse will be true in 1322 * other cases. Instead of checking for all 1323 * those cases, just waiting for the minimum 1324 * among these two should be sufficient. 1325 */ 1326 frames_pending = qdf_min(frames_pending, 1327 delta_fwd_host); 1328 qdf_assert_always(frames_pending >= 0); 1329 } 1330 } 1331 1332 update_pending_frames: 1333 qdf_assert_always(frames_pending >= 0); 1334 1335 wait_count->per_link_count[link] = frames_pending; 1336 wait_count->total_count += frames_pending; 1337 1338 print_wait_count: 1339 mgmt_rx_reo_info("link_id = %u wait count: per link = 0x%x, total = 0x%llx", 1340 link, wait_count->per_link_count[link], 1341 wait_count->total_count); 1342 } 1343 1344 return QDF_STATUS_SUCCESS; 1345 } 1346 1347 /* 1348 * struct mgmt_rx_reo_list_entry_debug_info - This structure holds the necessary 1349 * information about a reo list entry for debug purposes. 1350 * @link_id: link id 1351 * @mgmt_pkt_ctr: management packet counter 1352 * @global_timestamp: global time stamp 1353 * @wait_count: wait count values 1354 * @status: status of the entry in the list 1355 * @entry: pointer to reo list entry 1356 */ 1357 struct mgmt_rx_reo_list_entry_debug_info { 1358 uint8_t link_id; 1359 uint16_t mgmt_pkt_ctr; 1360 uint32_t global_timestamp; 1361 struct mgmt_rx_reo_wait_count wait_count; 1362 uint32_t status; 1363 struct mgmt_rx_reo_list_entry *entry; 1364 }; 1365 1366 /** 1367 * mgmt_rx_reo_list_display() - API to print the entries in the reorder list 1368 * @reo_list: Pointer to reorder list 1369 * 1370 * Return: QDF_STATUS 1371 */ 1372 static QDF_STATUS 1373 mgmt_rx_reo_list_display(struct mgmt_rx_reo_list *reo_list) 1374 { 1375 uint32_t reo_list_size; 1376 uint32_t index; 1377 struct mgmt_rx_reo_list_entry *cur_entry; 1378 struct mgmt_rx_reo_list_entry_debug_info *debug_info; 1379 1380 if (!reo_list) { 1381 mgmt_rx_reo_err("Pointer to reo list is null"); 1382 return QDF_STATUS_E_NULL_VALUE; 1383 } 1384 1385 qdf_spin_lock_bh(&reo_list->list_lock); 1386 1387 reo_list_size = qdf_list_size(&reo_list->list); 1388 1389 if (reo_list_size == 0) { 1390 qdf_spin_unlock_bh(&reo_list->list_lock); 1391 mgmt_rx_reo_debug("Number of entries in the reo list = %u", 1392 reo_list_size); 1393 return QDF_STATUS_SUCCESS; 1394 } 1395 1396 debug_info = qdf_mem_malloc_atomic(reo_list_size * sizeof(*debug_info)); 1397 if (!debug_info) { 1398 qdf_spin_unlock_bh(&reo_list->list_lock); 1399 mgmt_rx_reo_err("Memory allocation failed"); 1400 return QDF_STATUS_E_NOMEM; 1401 } 1402 1403 index = 0; 1404 qdf_list_for_each(&reo_list->list, cur_entry, node) { 1405 debug_info[index].link_id = 1406 mgmt_rx_reo_get_link_id(cur_entry->rx_params); 1407 debug_info[index].mgmt_pkt_ctr = 1408 mgmt_rx_reo_get_pkt_counter(cur_entry->rx_params); 1409 debug_info[index].global_timestamp = 1410 mgmt_rx_reo_get_global_ts(cur_entry->rx_params); 1411 debug_info[index].wait_count = cur_entry->wait_count; 1412 debug_info[index].status = cur_entry->status; 1413 debug_info[index].entry = cur_entry; 1414 1415 ++index; 1416 } 1417 1418 qdf_spin_unlock_bh(&reo_list->list_lock); 1419 1420 mgmt_rx_reo_debug("Reorder list"); 1421 mgmt_rx_reo_debug("##################################################"); 1422 mgmt_rx_reo_debug("Number of entries in the reo list = %u", 1423 reo_list_size); 1424 for (index = 0; index < reo_list_size; index++) { 1425 uint8_t link_id; 1426 1427 mgmt_rx_reo_debug("index = %u: link_id = %u, ts = %u, ctr = %u, status = 0x%x, entry = %pK", 1428 index, debug_info[index].link_id, 1429 debug_info[index].global_timestamp, 1430 debug_info[index].mgmt_pkt_ctr, 1431 debug_info[index].status, 1432 debug_info[index].entry); 1433 1434 mgmt_rx_reo_debug("Total wait count = 0x%llx", 1435 debug_info[index].wait_count.total_count); 1436 1437 for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) 1438 mgmt_rx_reo_debug("Link id = %u, wait_count = 0x%x", 1439 link_id, debug_info[index].wait_count. 1440 per_link_count[link_id]); 1441 } 1442 mgmt_rx_reo_debug("##################################################"); 1443 1444 qdf_mem_free(debug_info); 1445 1446 return QDF_STATUS_SUCCESS; 1447 } 1448 1449 #ifdef WLAN_MGMT_RX_REO_DEBUG_SUPPORT 1450 /** 1451 * mgmt_rx_reo_egress_frame_debug_info_enabled() - API to check whether egress 1452 * frame info debug feaure is enabled 1453 * @egress_frame_debug_info: Pointer to egress frame debug info object 1454 * 1455 * Return: true or false 1456 */ 1457 static bool 1458 mgmt_rx_reo_egress_frame_debug_info_enabled 1459 (struct reo_egress_debug_info *egress_frame_debug_info) 1460 { 1461 return egress_frame_debug_info->frame_list_size; 1462 } 1463 1464 /** 1465 * mgmt_rx_reo_debug_print_egress_frame_stats() - API to print the stats 1466 * related to frames going out of the reorder module 1467 * @reo_ctx: Pointer to reorder context 1468 * 1469 * API to print the stats related to frames going out of the management 1470 * Rx reorder module. 1471 * 1472 * Return: QDF_STATUS 1473 */ 1474 static QDF_STATUS 1475 mgmt_rx_reo_debug_print_egress_frame_stats(struct mgmt_rx_reo_context *reo_ctx) 1476 { 1477 struct reo_egress_frame_stats *stats; 1478 uint8_t link_id; 1479 uint8_t reason; 1480 uint64_t total_delivery_attempts_count = 0; 1481 uint64_t total_delivery_success_count = 0; 1482 uint64_t total_premature_delivery_count = 0; 1483 uint64_t delivery_count_per_link[MAX_MLO_LINKS] = {0}; 1484 uint64_t delivery_count_per_reason[MGMT_RX_REO_RELEASE_REASON_MAX] = {0}; 1485 uint64_t total_delivery_count = 0; 1486 char delivery_reason_stats_boarder_a[MGMT_RX_REO_EGRESS_FRAME_DELIVERY_REASON_STATS_BOARDER_A_MAX_SIZE + 1] = {0}; 1487 char delivery_reason_stats_boarder_b[MGMT_RX_REO_EGRESS_FRAME_DELIVERY_REASON_STATS_BOARDER_B_MAX_SIZE + 1] = {0}; 1488 1489 if (!reo_ctx) 1490 return QDF_STATUS_E_NULL_VALUE; 1491 1492 stats = &reo_ctx->egress_frame_debug_info.stats; 1493 1494 for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) { 1495 total_delivery_attempts_count += 1496 stats->delivery_attempts_count[link_id]; 1497 total_delivery_success_count += 1498 stats->delivery_success_count[link_id]; 1499 total_premature_delivery_count += 1500 stats->premature_delivery_count[link_id]; 1501 } 1502 1503 for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) { 1504 for (reason = 0; reason < MGMT_RX_REO_RELEASE_REASON_MAX; 1505 reason++) 1506 delivery_count_per_link[link_id] += 1507 stats->delivery_count[link_id][reason]; 1508 total_delivery_count += delivery_count_per_link[link_id]; 1509 } 1510 for (reason = 0; reason < MGMT_RX_REO_RELEASE_REASON_MAX; reason++) 1511 for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) 1512 delivery_count_per_reason[reason] += 1513 stats->delivery_count[link_id][reason]; 1514 1515 mgmt_rx_reo_alert("Egress frame stats:"); 1516 mgmt_rx_reo_alert("\t1) Delivery related stats:"); 1517 mgmt_rx_reo_alert("\t------------------------------------------"); 1518 mgmt_rx_reo_alert("\t|link id |Attempts |Success |Premature |"); 1519 mgmt_rx_reo_alert("\t| | count | count | count |"); 1520 mgmt_rx_reo_alert("\t------------------------------------------"); 1521 for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) { 1522 mgmt_rx_reo_alert("\t|%10u|%9llu|%8llu|%10llu|", link_id, 1523 stats->delivery_attempts_count[link_id], 1524 stats->delivery_success_count[link_id], 1525 stats->premature_delivery_count[link_id]); 1526 mgmt_rx_reo_alert("\t------------------------------------------"); 1527 } 1528 mgmt_rx_reo_alert("\t%11s|%9llu|%8llu|%10llu|\n\n", "", 1529 total_delivery_attempts_count, 1530 total_delivery_success_count, 1531 total_premature_delivery_count); 1532 1533 mgmt_rx_reo_alert("\t2) Delivery reason related stats"); 1534 mgmt_rx_reo_alert("\tRelease Reason Values:-"); 1535 mgmt_rx_reo_alert("\tRELEASE_REASON_ZERO_WAIT_COUNT - 0x%lx", 1536 MGMT_RX_REO_LIST_ENTRY_RELEASE_REASON_ZERO_WAIT_COUNT); 1537 mgmt_rx_reo_alert("\tRELEASE_REASON_AGED_OUT - 0x%lx", 1538 MGMT_RX_REO_LIST_ENTRY_RELEASE_REASON_AGED_OUT); 1539 mgmt_rx_reo_alert("\tRELEASE_REASON_OLDER_THAN_AGED_OUT_FRAME - 0x%lx", 1540 MGMT_RX_REO_LIST_ENTRY_RELEASE_REASON_OLDER_THAN_AGED_OUT_FRAME); 1541 mgmt_rx_reo_alert("\tRELEASE_REASON_LIST_MAX_SIZE_EXCEEDED - 0x%lx", 1542 MGMT_RX_REO_LIST_ENTRY_RELEASE_REASON_LIST_MAX_SIZE_EXCEEDED); 1543 1544 qdf_mem_set(delivery_reason_stats_boarder_a, 1545 MGMT_RX_REO_EGRESS_FRAME_DELIVERY_REASON_STATS_BOARDER_A_MAX_SIZE, '-'); 1546 qdf_mem_set(delivery_reason_stats_boarder_b, 1547 MGMT_RX_REO_EGRESS_FRAME_DELIVERY_REASON_STATS_BOARDER_B_MAX_SIZE, '-'); 1548 1549 mgmt_rx_reo_alert("\t%66s", delivery_reason_stats_boarder_a); 1550 mgmt_rx_reo_alert("\t|%16s|%7s|%7s|%7s|%7s|%7s|%7s|", "Release Reason/", 1551 "", "", "", "", "", ""); 1552 mgmt_rx_reo_alert("\t|%16s|%7s|%7s|%7s|%7s|%7s|%7s|", "link id", 1553 "0", "1", "2", "3", "4", "5"); 1554 mgmt_rx_reo_alert("\t%s", delivery_reason_stats_boarder_b); 1555 1556 for (reason = 0; reason < MGMT_RX_REO_RELEASE_REASON_MAX; reason++) { 1557 mgmt_rx_reo_alert("\t|%16x|%7llu|%7llu|%7llu|%7llu|%7llu|%7llu|%7llu", 1558 reason, stats->delivery_count[0][reason], 1559 stats->delivery_count[1][reason], 1560 stats->delivery_count[2][reason], 1561 stats->delivery_count[3][reason], 1562 stats->delivery_count[4][reason], 1563 stats->delivery_count[5][reason], 1564 delivery_count_per_reason[reason]); 1565 mgmt_rx_reo_alert("\t%s", delivery_reason_stats_boarder_b); 1566 } 1567 mgmt_rx_reo_alert("\t%17s|%7llu|%7llu|%7llu|%7llu|%7llu|%7llu|%7llu\n\n", 1568 "", delivery_count_per_link[0], 1569 delivery_count_per_link[1], 1570 delivery_count_per_link[2], 1571 delivery_count_per_link[3], 1572 delivery_count_per_link[4], 1573 delivery_count_per_link[5], 1574 total_delivery_count); 1575 1576 return QDF_STATUS_SUCCESS; 1577 } 1578 1579 /** 1580 * mgmt_rx_reo_log_egress_frame_before_delivery() - Log the information about a 1581 * frame exiting the reorder module. Logging is done before attempting the frame 1582 * delivery to upper layers. 1583 * @reo_ctx: management rx reorder context 1584 * @entry: Pointer to reorder list entry 1585 * 1586 * Return: QDF_STATUS of operation 1587 */ 1588 static QDF_STATUS 1589 mgmt_rx_reo_log_egress_frame_before_delivery( 1590 struct mgmt_rx_reo_context *reo_ctx, 1591 struct mgmt_rx_reo_list_entry *entry) 1592 { 1593 struct reo_egress_debug_info *egress_frame_debug_info; 1594 struct reo_egress_debug_frame_info *cur_frame_debug_info; 1595 struct reo_egress_frame_stats *stats; 1596 uint8_t link_id; 1597 1598 if (!reo_ctx || !entry) 1599 return QDF_STATUS_E_NULL_VALUE; 1600 1601 egress_frame_debug_info = &reo_ctx->egress_frame_debug_info; 1602 1603 stats = &egress_frame_debug_info->stats; 1604 link_id = mgmt_rx_reo_get_link_id(entry->rx_params); 1605 stats->delivery_attempts_count[link_id]++; 1606 if (entry->is_premature_delivery) 1607 stats->premature_delivery_count[link_id]++; 1608 1609 if (!mgmt_rx_reo_egress_frame_debug_info_enabled 1610 (egress_frame_debug_info)) 1611 return QDF_STATUS_SUCCESS; 1612 1613 cur_frame_debug_info = &egress_frame_debug_info->frame_list 1614 [egress_frame_debug_info->next_index]; 1615 1616 cur_frame_debug_info->link_id = link_id; 1617 cur_frame_debug_info->mgmt_pkt_ctr = 1618 mgmt_rx_reo_get_pkt_counter(entry->rx_params); 1619 cur_frame_debug_info->global_timestamp = 1620 mgmt_rx_reo_get_global_ts(entry->rx_params); 1621 cur_frame_debug_info->initial_wait_count = entry->initial_wait_count; 1622 cur_frame_debug_info->final_wait_count = entry->wait_count; 1623 qdf_mem_copy(cur_frame_debug_info->shared_snapshots, 1624 entry->shared_snapshots, 1625 qdf_min(sizeof(cur_frame_debug_info->shared_snapshots), 1626 sizeof(entry->shared_snapshots))); 1627 qdf_mem_copy(cur_frame_debug_info->host_snapshot, entry->host_snapshot, 1628 qdf_min(sizeof(cur_frame_debug_info->host_snapshot), 1629 sizeof(entry->host_snapshot))); 1630 cur_frame_debug_info->insertion_ts = entry->insertion_ts; 1631 cur_frame_debug_info->ingress_timestamp = entry->ingress_timestamp; 1632 cur_frame_debug_info->removal_ts = entry->removal_ts; 1633 cur_frame_debug_info->egress_timestamp = qdf_get_log_timestamp(); 1634 cur_frame_debug_info->release_reason = entry->release_reason; 1635 cur_frame_debug_info->is_premature_delivery = 1636 entry->is_premature_delivery; 1637 cur_frame_debug_info->cpu_id = qdf_get_smp_processor_id(); 1638 1639 return QDF_STATUS_SUCCESS; 1640 } 1641 1642 /** 1643 * mgmt_rx_reo_log_egress_frame_after_delivery() - Log the information about a 1644 * frame exiting the reorder module. Logging is done after attempting the frame 1645 * delivery to upper layer. 1646 * @reo_ctx: management rx reorder context 1647 * @entry: Pointer to reorder list entry 1648 * 1649 * Return: QDF_STATUS of operation 1650 */ 1651 static QDF_STATUS 1652 mgmt_rx_reo_log_egress_frame_after_delivery( 1653 struct mgmt_rx_reo_context *reo_ctx, 1654 struct mgmt_rx_reo_list_entry *entry, 1655 uint8_t link_id) 1656 { 1657 struct reo_egress_debug_info *egress_frame_debug_info; 1658 struct reo_egress_debug_frame_info *cur_frame_debug_info; 1659 struct reo_egress_frame_stats *stats; 1660 1661 if (!reo_ctx || !entry) 1662 return QDF_STATUS_E_NULL_VALUE; 1663 1664 egress_frame_debug_info = &reo_ctx->egress_frame_debug_info; 1665 1666 stats = &egress_frame_debug_info->stats; 1667 if (entry->is_delivered) { 1668 uint8_t release_reason = entry->release_reason; 1669 1670 stats->delivery_count[link_id][release_reason]++; 1671 stats->delivery_success_count[link_id]++; 1672 } 1673 1674 if (!mgmt_rx_reo_egress_frame_debug_info_enabled 1675 (egress_frame_debug_info)) 1676 return QDF_STATUS_SUCCESS; 1677 1678 cur_frame_debug_info = &egress_frame_debug_info->frame_list 1679 [egress_frame_debug_info->next_index]; 1680 1681 cur_frame_debug_info->is_delivered = entry->is_delivered; 1682 cur_frame_debug_info->egress_duration = qdf_get_log_timestamp() - 1683 cur_frame_debug_info->egress_timestamp; 1684 1685 egress_frame_debug_info->next_index++; 1686 egress_frame_debug_info->next_index %= 1687 egress_frame_debug_info->frame_list_size; 1688 if (egress_frame_debug_info->next_index == 0) 1689 egress_frame_debug_info->wrap_aroud = true; 1690 1691 return QDF_STATUS_SUCCESS; 1692 } 1693 1694 /** 1695 * mgmt_rx_reo_debug_print_egress_frame_info() - Print the debug information 1696 * about the latest frames leaving the reorder module 1697 * @reo_ctx: management rx reorder context 1698 * @num_frames: Number of frames for which the debug information is to be 1699 * printed. If @num_frames is 0, then debug information about all the frames 1700 * in the ring buffer will be printed. 1701 * 1702 * Return: QDF_STATUS of operation 1703 */ 1704 static QDF_STATUS 1705 mgmt_rx_reo_debug_print_egress_frame_info(struct mgmt_rx_reo_context *reo_ctx, 1706 uint16_t num_frames) 1707 { 1708 struct reo_egress_debug_info *egress_frame_debug_info; 1709 int start_index; 1710 uint16_t index; 1711 uint16_t entry; 1712 uint16_t num_valid_entries; 1713 uint16_t num_entries_to_print; 1714 char *boarder; 1715 1716 if (!reo_ctx) 1717 return QDF_STATUS_E_NULL_VALUE; 1718 1719 egress_frame_debug_info = &reo_ctx->egress_frame_debug_info; 1720 1721 if (egress_frame_debug_info->wrap_aroud) 1722 num_valid_entries = egress_frame_debug_info->frame_list_size; 1723 else 1724 num_valid_entries = egress_frame_debug_info->next_index; 1725 1726 if (num_frames == 0) { 1727 num_entries_to_print = num_valid_entries; 1728 1729 if (egress_frame_debug_info->wrap_aroud) 1730 start_index = egress_frame_debug_info->next_index; 1731 else 1732 start_index = 0; 1733 } else { 1734 num_entries_to_print = qdf_min(num_frames, num_valid_entries); 1735 1736 start_index = (egress_frame_debug_info->next_index - 1737 num_entries_to_print + 1738 egress_frame_debug_info->frame_list_size) 1739 % egress_frame_debug_info->frame_list_size; 1740 1741 qdf_assert_always(start_index >= 0 && 1742 start_index < egress_frame_debug_info->frame_list_size); 1743 } 1744 1745 mgmt_rx_reo_alert_no_fl("Egress Frame Info:-"); 1746 mgmt_rx_reo_alert_no_fl("num_frames = %u, wrap = %u, next_index = %u", 1747 num_frames, 1748 egress_frame_debug_info->wrap_aroud, 1749 egress_frame_debug_info->next_index); 1750 mgmt_rx_reo_alert_no_fl("start_index = %d num_entries_to_print = %u", 1751 start_index, num_entries_to_print); 1752 1753 if (!num_entries_to_print) 1754 return QDF_STATUS_SUCCESS; 1755 1756 boarder = egress_frame_debug_info->boarder; 1757 1758 mgmt_rx_reo_alert_no_fl("%s", boarder); 1759 mgmt_rx_reo_alert_no_fl("|%3s|%5s|%4s|%5s|%10s|%11s|%11s|%11s|%11s|%5s|%7s|%5s|%4s|%69s|%69s|%94s|%94s|%94s|%94s|%94s|%94s|", 1760 "No.", "CPU", "Link", "SeqNo", "Global ts", 1761 "Ingress ts", "Insert. ts", "Removal ts", 1762 "Egress ts", "E Dur", "W Dur", "Flags", "Rea.", 1763 "Final wait count", "Initial wait count", 1764 "Snapshot : link 0", "Snapshot : link 1", 1765 "Snapshot : link 2", "Snapshot : link 3", 1766 "Snapshot : link 4", "Snapshot : link 5"); 1767 mgmt_rx_reo_alert_no_fl("%s", boarder); 1768 1769 index = start_index; 1770 for (entry = 0; entry < num_entries_to_print; entry++) { 1771 struct reo_egress_debug_frame_info *info; 1772 char flags[MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_FLAG_MAX_SIZE + 1] = {'\0'}; 1773 char final_wait_count[MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_WAIT_COUNT_MAX_SIZE + 1] = {'\0'}; 1774 char initial_wait_count[MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_WAIT_COUNT_MAX_SIZE + 1] = {'\0'}; 1775 char snapshots[MAX_MLO_LINKS][MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_PER_LINK_SNAPSHOTS_MAX_SIZE + 1] = {'\0'}; 1776 char flag_premature_delivery = ' '; 1777 char flag_error = ' '; 1778 uint8_t link; 1779 1780 info = &reo_ctx->egress_frame_debug_info.frame_list[index]; 1781 1782 if (!info->is_delivered) 1783 flag_error = 'E'; 1784 1785 if (info->is_premature_delivery) 1786 flag_premature_delivery = 'P'; 1787 1788 snprintf(flags, sizeof(flags), "%c %c", flag_error, 1789 flag_premature_delivery); 1790 snprintf(initial_wait_count, sizeof(initial_wait_count), 1791 "%9llx(%8x, %8x, %8x, %8x, %8x, %8x)", 1792 info->initial_wait_count.total_count, 1793 info->initial_wait_count.per_link_count[0], 1794 info->initial_wait_count.per_link_count[1], 1795 info->initial_wait_count.per_link_count[2], 1796 info->initial_wait_count.per_link_count[3], 1797 info->initial_wait_count.per_link_count[4], 1798 info->initial_wait_count.per_link_count[5]); 1799 snprintf(final_wait_count, sizeof(final_wait_count), 1800 "%9llx(%8x, %8x, %8x, %8x, %8x, %8x)", 1801 info->final_wait_count.total_count, 1802 info->final_wait_count.per_link_count[0], 1803 info->final_wait_count.per_link_count[1], 1804 info->final_wait_count.per_link_count[2], 1805 info->final_wait_count.per_link_count[3], 1806 info->final_wait_count.per_link_count[4], 1807 info->final_wait_count.per_link_count[5]); 1808 1809 for (link = 0; link < MAX_MLO_LINKS; link++) { 1810 char mac_hw[MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_SNAPSHOT_MAX_SIZE + 1] = {'\0'}; 1811 char fw_consumed[MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_SNAPSHOT_MAX_SIZE + 1] = {'\0'}; 1812 char fw_forwarded[MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_SNAPSHOT_MAX_SIZE + 1] = {'\0'}; 1813 char host[MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_SNAPSHOT_MAX_SIZE + 1] = {'\0'}; 1814 struct mgmt_rx_reo_snapshot_params *mac_hw_ss; 1815 struct mgmt_rx_reo_snapshot_params *fw_consumed_ss; 1816 struct mgmt_rx_reo_snapshot_params *fw_forwarded_ss; 1817 struct mgmt_rx_reo_snapshot_params *host_ss; 1818 1819 mac_hw_ss = &info->shared_snapshots 1820 [link][MGMT_RX_REO_SHARED_SNAPSHOT_MAC_HW]; 1821 fw_consumed_ss = &info->shared_snapshots 1822 [link][MGMT_RX_REO_SHARED_SNAPSHOT_FW_CONSUMED]; 1823 fw_forwarded_ss = &info->shared_snapshots 1824 [link][MGMT_RX_REO_SHARED_SNAPSHOT_FW_FORWARDED]; 1825 host_ss = &info->host_snapshot[link]; 1826 1827 snprintf(mac_hw, sizeof(mac_hw), "(%1u, %5u, %10u)", 1828 mac_hw_ss->valid, mac_hw_ss->mgmt_pkt_ctr, 1829 mac_hw_ss->global_timestamp); 1830 snprintf(fw_consumed, sizeof(fw_consumed), 1831 "(%1u, %5u, %10u)", 1832 fw_consumed_ss->valid, 1833 fw_consumed_ss->mgmt_pkt_ctr, 1834 fw_consumed_ss->global_timestamp); 1835 snprintf(fw_forwarded, sizeof(fw_forwarded), 1836 "(%1u, %5u, %10u)", 1837 fw_forwarded_ss->valid, 1838 fw_forwarded_ss->mgmt_pkt_ctr, 1839 fw_forwarded_ss->global_timestamp); 1840 snprintf(host, sizeof(host), "(%1u, %5u, %10u)", 1841 host_ss->valid, 1842 host_ss->mgmt_pkt_ctr, 1843 host_ss->global_timestamp); 1844 snprintf(snapshots[link], sizeof(snapshots[link]), 1845 "%22s, %22s, %22s, %22s", mac_hw, fw_consumed, 1846 fw_forwarded, host); 1847 } 1848 1849 mgmt_rx_reo_alert_no_fl("|%3u|%5d|%4u|%5u|%10u|%11llu|%11llu|%11llu|%11llu|%5llu|%7llu|%5s|%4x|%69s|%69s|%94s|%94s|%94s|%94s|%94s|%94s|", 1850 entry, info->cpu_id, info->link_id, 1851 info->mgmt_pkt_ctr, 1852 info->global_timestamp, 1853 info->ingress_timestamp, 1854 info->insertion_ts, info->removal_ts, 1855 info->egress_timestamp, 1856 info->egress_duration, 1857 info->removal_ts - info->insertion_ts, 1858 flags, info->release_reason, 1859 final_wait_count, initial_wait_count, 1860 snapshots[0], snapshots[1], 1861 snapshots[2], snapshots[3], 1862 snapshots[4], snapshots[5]); 1863 mgmt_rx_reo_alert_no_fl("%s", boarder); 1864 1865 index++; 1866 index %= egress_frame_debug_info->frame_list_size; 1867 } 1868 1869 return QDF_STATUS_SUCCESS; 1870 } 1871 #else 1872 /** 1873 * mgmt_rx_reo_debug_print_egress_frame_stats() - API to print the stats 1874 * related to frames going out of the reorder module 1875 * @reo_ctx: Pointer to reorder context 1876 * 1877 * API to print the stats related to frames going out of the management 1878 * Rx reorder module. 1879 * 1880 * Return: QDF_STATUS 1881 */ 1882 static QDF_STATUS 1883 mgmt_rx_reo_debug_print_egress_frame_stats(struct mgmt_rx_reo_context *reo_ctx) 1884 { 1885 return QDF_STATUS_SUCCESS; 1886 } 1887 1888 /** 1889 * mgmt_rx_reo_log_egress_frame_before_delivery() - Log the information about a 1890 * frame exiting the reorder module. Logging is done before attempting the frame 1891 * delivery to upper layers. 1892 * @reo_ctx: management rx reorder context 1893 * @entry: Pointer to reorder list entry 1894 * 1895 * Return: QDF_STATUS of operation 1896 */ 1897 static QDF_STATUS 1898 mgmt_rx_reo_log_egress_frame_before_delivery( 1899 struct mgmt_rx_reo_context *reo_ctx, 1900 struct mgmt_rx_reo_list_entry *entry) 1901 { 1902 return QDF_STATUS_SUCCESS; 1903 } 1904 1905 /** 1906 * mgmt_rx_reo_log_egress_frame_after_delivery() - Log the information about a 1907 * frame exiting the reorder module. Logging is done after attempting the frame 1908 * delivery to upper layer. 1909 * @reo_ctx: management rx reorder context 1910 * @is_delivered: Flag to indicate whether the frame is delivered to upper 1911 * layers 1912 * 1913 * Return: QDF_STATUS of operation 1914 */ 1915 static QDF_STATUS 1916 mgmt_rx_reo_log_egress_frame_after_delivery( 1917 struct mgmt_rx_reo_context *reo_ctx, 1918 bool is_delivered) 1919 { 1920 return QDF_STATUS_SUCCESS; 1921 } 1922 1923 /** 1924 * mgmt_rx_reo_debug_print_egress_frame_info() - Print debug information about 1925 * the latest frames leaving the reorder module 1926 * @reo_ctx: management rx reorder context 1927 * 1928 * Return: QDF_STATUS of operation 1929 */ 1930 static QDF_STATUS 1931 mgmt_rx_reo_debug_print_egress_frame_info(struct mgmt_rx_reo_context *reo_ctx) 1932 { 1933 return QDF_STATUS_SUCCESS; 1934 } 1935 #endif /* WLAN_MGMT_RX_REO_DEBUG_SUPPORT */ 1936 1937 /** 1938 * mgmt_rx_reo_list_entry_get_release_reason() - Helper API to get the reason 1939 * for releasing the reorder list entry to upper layer. 1940 * reorder list. 1941 * @entry: List entry 1942 * 1943 * This API expects the caller to acquire the spin lock protecting the reorder 1944 * list. 1945 * 1946 * Return: Reason for releasing the frame. 1947 */ 1948 static uint8_t 1949 mgmt_rx_reo_list_entry_get_release_reason(struct mgmt_rx_reo_list_entry *entry) 1950 { 1951 uint8_t release_reason = 0; 1952 1953 if (!entry) 1954 return 0; 1955 1956 if (MGMT_RX_REO_LIST_ENTRY_IS_MAX_SIZE_EXCEEDED(entry)) 1957 release_reason |= 1958 MGMT_RX_REO_LIST_ENTRY_RELEASE_REASON_LIST_MAX_SIZE_EXCEEDED; 1959 1960 if (!MGMT_RX_REO_LIST_ENTRY_IS_WAITING_FOR_FRAME_ON_OTHER_LINK(entry)) 1961 release_reason |= 1962 MGMT_RX_REO_LIST_ENTRY_RELEASE_REASON_ZERO_WAIT_COUNT; 1963 1964 if (MGMT_RX_REO_LIST_ENTRY_IS_AGED_OUT(entry)) 1965 release_reason |= 1966 MGMT_RX_REO_LIST_ENTRY_RELEASE_REASON_AGED_OUT; 1967 1968 if (MGMT_RX_REO_LIST_ENTRY_IS_OLDER_THAN_LATEST_AGED_OUT_FRAME(entry)) 1969 release_reason |= 1970 MGMT_RX_REO_LIST_ENTRY_RELEASE_REASON_OLDER_THAN_AGED_OUT_FRAME; 1971 1972 return release_reason; 1973 } 1974 1975 /** 1976 * mgmt_rx_reo_list_entry_send_up() - API to send the frame to the upper layer. 1977 * @reo_list: Pointer to reorder list 1978 * @entry: List entry 1979 * 1980 * API to send the frame to the upper layer. This API has to be called only 1981 * for entries which can be released to upper layer. It is the caller's 1982 * responsibility to ensure that entry can be released (by using API 1983 * mgmt_rx_reo_list_is_ready_to_send_up_entry). This API is called after 1984 * acquiring the lock which serializes the frame delivery to the upper layers. 1985 * 1986 * Return: QDF_STATUS 1987 */ 1988 static QDF_STATUS 1989 mgmt_rx_reo_list_entry_send_up(struct mgmt_rx_reo_list *reo_list, 1990 struct mgmt_rx_reo_list_entry *entry) 1991 { 1992 uint8_t release_reason; 1993 uint8_t link_id; 1994 uint32_t entry_global_ts; 1995 QDF_STATUS status; 1996 QDF_STATUS temp; 1997 struct mgmt_rx_reo_context *reo_context; 1998 1999 qdf_assert_always(reo_list); 2000 qdf_assert_always(entry); 2001 2002 reo_context = mgmt_rx_reo_get_context_from_reo_list(reo_list); 2003 qdf_assert_always(reo_context); 2004 2005 link_id = mgmt_rx_reo_get_link_id(entry->rx_params); 2006 entry_global_ts = mgmt_rx_reo_get_global_ts(entry->rx_params); 2007 2008 release_reason = mgmt_rx_reo_list_entry_get_release_reason(entry); 2009 2010 qdf_assert_always(release_reason != 0); 2011 2012 entry->is_delivered = false; 2013 entry->is_premature_delivery = false; 2014 entry->release_reason = release_reason; 2015 2016 if (mgmt_rx_reo_is_potential_premature_delivery(release_reason)) { 2017 entry->is_premature_delivery = true; 2018 status = mgmt_rx_reo_handle_potential_premature_delivery( 2019 reo_context, entry_global_ts); 2020 if (QDF_IS_STATUS_ERROR(status)) 2021 goto exit; 2022 } 2023 2024 status = mgmt_rx_reo_log_egress_frame_before_delivery(reo_context, 2025 entry); 2026 if (QDF_IS_STATUS_ERROR(status)) 2027 goto exit; 2028 2029 status = wlan_mgmt_txrx_process_rx_frame(entry->pdev, entry->nbuf, 2030 entry->rx_params); 2031 /* Above call frees nbuf and rx_params, make it null explicitly */ 2032 entry->nbuf = NULL; 2033 entry->rx_params = NULL; 2034 2035 if (QDF_IS_STATUS_ERROR(status)) 2036 goto exit_log; 2037 2038 entry->is_delivered = true; 2039 2040 status = QDF_STATUS_SUCCESS; 2041 2042 exit_log: 2043 temp = mgmt_rx_reo_log_egress_frame_after_delivery(reo_context, entry, 2044 link_id); 2045 if (QDF_IS_STATUS_ERROR(temp)) 2046 status = temp; 2047 exit: 2048 /** 2049 * Release the reference taken when the entry is inserted into 2050 * the reorder list 2051 */ 2052 wlan_objmgr_pdev_release_ref(entry->pdev, WLAN_MGMT_RX_REO_ID); 2053 2054 return status; 2055 } 2056 2057 /** 2058 * mgmt_rx_reo_list_is_ready_to_send_up_entry() - API to check whether the 2059 * list entry can be send to upper layers. 2060 * @reo_list: Pointer to reorder list 2061 * @entry: List entry 2062 * 2063 * Return: QDF_STATUS 2064 */ 2065 static bool 2066 mgmt_rx_reo_list_is_ready_to_send_up_entry(struct mgmt_rx_reo_list *reo_list, 2067 struct mgmt_rx_reo_list_entry *entry) 2068 { 2069 if (!reo_list || !entry) 2070 return false; 2071 2072 return mgmt_rx_reo_list_max_size_exceeded(reo_list) || 2073 !MGMT_RX_REO_LIST_ENTRY_IS_WAITING_FOR_FRAME_ON_OTHER_LINK( 2074 entry) || MGMT_RX_REO_LIST_ENTRY_IS_AGED_OUT(entry) || 2075 MGMT_RX_REO_LIST_ENTRY_IS_OLDER_THAN_LATEST_AGED_OUT_FRAME 2076 (entry); 2077 } 2078 2079 /** 2080 * mgmt_rx_reo_list_release_entries() - Release entries from the reorder list 2081 * @reo_context: Pointer to management Rx reorder context 2082 * 2083 * This API releases the entries from the reorder list based on the following 2084 * conditions. 2085 * a) Entries with total wait count equal to 0 2086 * b) Entries which are timed out or entries with global time stamp <= global 2087 * time stamp of the latest frame which is timed out. We can only release 2088 * the entries in the increasing order of the global time stamp. 2089 * So all the entries with global time stamp <= global time stamp of the 2090 * latest timed out frame has to be released. 2091 * 2092 * Return: QDF_STATUS 2093 */ 2094 static QDF_STATUS 2095 mgmt_rx_reo_list_release_entries(struct mgmt_rx_reo_context *reo_context) 2096 { 2097 struct mgmt_rx_reo_list *reo_list; 2098 QDF_STATUS status; 2099 2100 if (!reo_context) { 2101 mgmt_rx_reo_err("reo context is null"); 2102 return QDF_STATUS_E_NULL_VALUE; 2103 } 2104 2105 reo_list = &reo_context->reo_list; 2106 2107 qdf_spin_lock(&reo_context->frame_release_lock); 2108 2109 while (1) { 2110 struct mgmt_rx_reo_list_entry *first_entry; 2111 /* TODO yield if release_count > THRESHOLD */ 2112 uint16_t release_count = 0; 2113 struct mgmt_rx_reo_global_ts_info *ts_last_released_frame = 2114 &reo_list->ts_last_released_frame; 2115 uint32_t entry_global_ts; 2116 2117 qdf_spin_lock_bh(&reo_list->list_lock); 2118 2119 first_entry = qdf_list_first_entry_or_null( 2120 &reo_list->list, struct mgmt_rx_reo_list_entry, node); 2121 2122 if (!first_entry) { 2123 status = QDF_STATUS_SUCCESS; 2124 goto exit_unlock_list_lock; 2125 } 2126 2127 if (!mgmt_rx_reo_list_is_ready_to_send_up_entry(reo_list, 2128 first_entry)) { 2129 status = QDF_STATUS_SUCCESS; 2130 goto exit_unlock_list_lock; 2131 } 2132 2133 if (mgmt_rx_reo_list_max_size_exceeded(reo_list)) 2134 first_entry->status |= 2135 MGMT_RX_REO_STATUS_LIST_MAX_SIZE_EXCEEDED; 2136 2137 status = qdf_list_remove_node(&reo_list->list, 2138 &first_entry->node); 2139 if (QDF_IS_STATUS_ERROR(status)) { 2140 status = QDF_STATUS_E_FAILURE; 2141 goto exit_unlock_list_lock; 2142 } 2143 first_entry->removal_ts = qdf_get_log_timestamp(); 2144 2145 /** 2146 * Last released frame global time stamp is invalid means that 2147 * current frame is the first frame to be released to the 2148 * upper layer from the reorder list. Blindly update the last 2149 * released frame global time stamp to the current frame's 2150 * global time stamp and set the valid to true. 2151 * If the last released frame global time stamp is valid and 2152 * current frame's global time stamp is >= last released frame 2153 * global time stamp, deliver the current frame to upper layer 2154 * and update the last released frame global time stamp. 2155 */ 2156 entry_global_ts = 2157 mgmt_rx_reo_get_global_ts(first_entry->rx_params); 2158 2159 if (!ts_last_released_frame->valid || 2160 mgmt_rx_reo_compare_global_timestamps_gte( 2161 entry_global_ts, ts_last_released_frame->global_ts)) { 2162 struct mgmt_rx_event_params *params; 2163 2164 params = first_entry->rx_params; 2165 2166 ts_last_released_frame->global_ts = entry_global_ts; 2167 ts_last_released_frame->start_ts = 2168 mgmt_rx_reo_get_start_ts(params); 2169 ts_last_released_frame->end_ts = 2170 mgmt_rx_reo_get_end_ts(params); 2171 ts_last_released_frame->valid = true; 2172 2173 qdf_timer_mod 2174 (&reo_list->global_mgmt_rx_inactivity_timer, 2175 MGMT_RX_REO_GLOBAL_MGMT_RX_INACTIVITY_TIMEOUT); 2176 } else { 2177 /** 2178 * This should never happen. All the frames older than 2179 * the last frame released from the reorder list will be 2180 * discarded at the entry to reorder algorithm itself. 2181 */ 2182 qdf_assert_always(first_entry->is_parallel_rx); 2183 } 2184 2185 qdf_spin_unlock_bh(&reo_list->list_lock); 2186 2187 status = mgmt_rx_reo_list_entry_send_up(reo_list, 2188 first_entry); 2189 if (QDF_IS_STATUS_ERROR(status)) { 2190 status = QDF_STATUS_E_FAILURE; 2191 qdf_mem_free(first_entry); 2192 goto exit_unlock_frame_release_lock; 2193 } 2194 2195 qdf_mem_free(first_entry); 2196 release_count++; 2197 } 2198 2199 status = QDF_STATUS_SUCCESS; 2200 goto exit_unlock_frame_release_lock; 2201 2202 exit_unlock_list_lock: 2203 qdf_spin_unlock_bh(&reo_list->list_lock); 2204 exit_unlock_frame_release_lock: 2205 qdf_spin_unlock(&reo_context->frame_release_lock); 2206 2207 return status; 2208 } 2209 2210 /** 2211 * mgmt_rx_reo_list_ageout_timer_handler() - Periodic ageout timer handler 2212 * @arg: Argument to timer handler 2213 * 2214 * This is the handler for periodic ageout timer used to timeout entries in the 2215 * reorder list. 2216 * 2217 * Return: void 2218 */ 2219 static void 2220 mgmt_rx_reo_list_ageout_timer_handler(void *arg) 2221 { 2222 struct mgmt_rx_reo_list *reo_list = arg; 2223 struct mgmt_rx_reo_list_entry *cur_entry; 2224 uint64_t cur_ts; 2225 QDF_STATUS status; 2226 struct mgmt_rx_reo_context *reo_context; 2227 /** 2228 * Stores the pointer to the entry in reorder list for the latest aged 2229 * out frame. Latest aged out frame is the aged out frame in reorder 2230 * list which has the largest global time stamp value. 2231 */ 2232 struct mgmt_rx_reo_list_entry *latest_aged_out_entry = NULL; 2233 2234 qdf_assert_always(reo_list); 2235 2236 qdf_timer_mod(&reo_list->ageout_timer, 2237 MGMT_RX_REO_AGEOUT_TIMER_PERIOD_MS); 2238 2239 reo_context = mgmt_rx_reo_get_context_from_reo_list(reo_list); 2240 qdf_assert_always(reo_context); 2241 2242 qdf_spin_lock_bh(&reo_list->list_lock); 2243 2244 cur_ts = qdf_get_log_timestamp(); 2245 2246 qdf_list_for_each(&reo_list->list, cur_entry, node) { 2247 if (cur_ts - cur_entry->insertion_ts >= 2248 reo_list->list_entry_timeout_us) { 2249 latest_aged_out_entry = cur_entry; 2250 cur_entry->status |= MGMT_RX_REO_STATUS_AGED_OUT; 2251 } 2252 } 2253 2254 if (latest_aged_out_entry) { 2255 qdf_list_for_each(&reo_list->list, cur_entry, node) { 2256 if (cur_entry == latest_aged_out_entry) 2257 break; 2258 cur_entry->status |= MGMT_RX_REO_STATUS_OLDER_THAN_LATEST_AGED_OUT_FRAME; 2259 } 2260 } 2261 2262 qdf_spin_unlock_bh(&reo_list->list_lock); 2263 2264 if (latest_aged_out_entry) { 2265 status = mgmt_rx_reo_list_release_entries(reo_context); 2266 if (QDF_IS_STATUS_ERROR(status)) { 2267 mgmt_rx_reo_err("Failed to release entries, ret = %d", 2268 status); 2269 return; 2270 } 2271 } 2272 } 2273 2274 /** 2275 * mgmt_rx_reo_global_mgmt_rx_inactivity_timer_handler() - Timer handler 2276 * for global management Rx inactivity timer 2277 * @arg: Argument to timer handler 2278 * 2279 * This is the timer handler for tracking management Rx inactivity across 2280 * links. 2281 * 2282 * Return: void 2283 */ 2284 static void 2285 mgmt_rx_reo_global_mgmt_rx_inactivity_timer_handler(void *arg) 2286 { 2287 struct mgmt_rx_reo_list *reo_list = arg; 2288 struct mgmt_rx_reo_context *reo_context; 2289 struct mgmt_rx_reo_global_ts_info *ts_last_released_frame; 2290 2291 qdf_assert_always(reo_list); 2292 ts_last_released_frame = &reo_list->ts_last_released_frame; 2293 2294 reo_context = mgmt_rx_reo_get_context_from_reo_list(reo_list); 2295 qdf_assert_always(reo_context); 2296 2297 qdf_spin_lock(&reo_context->frame_release_lock); 2298 qdf_spin_lock_bh(&reo_list->list_lock); 2299 2300 qdf_mem_zero(ts_last_released_frame, sizeof(*ts_last_released_frame)); 2301 2302 qdf_spin_unlock_bh(&reo_list->list_lock); 2303 qdf_spin_unlock(&reo_context->frame_release_lock); 2304 } 2305 2306 /** 2307 * mgmt_rx_reo_prepare_list_entry() - Prepare a list entry from the management 2308 * frame received. 2309 * @frame_desc: Pointer to the frame descriptor 2310 * @entry: Pointer to the list entry 2311 * 2312 * This API prepares the reorder list entry corresponding to a management frame 2313 * to be consumed by host. This entry would be inserted at the appropriate 2314 * position in the reorder list. 2315 * 2316 * Return: QDF_STATUS 2317 */ 2318 static QDF_STATUS 2319 mgmt_rx_reo_prepare_list_entry( 2320 const struct mgmt_rx_reo_frame_descriptor *frame_desc, 2321 struct mgmt_rx_reo_list_entry **entry) 2322 { 2323 struct mgmt_rx_reo_list_entry *list_entry; 2324 struct wlan_objmgr_pdev *pdev; 2325 uint8_t link_id; 2326 2327 if (!frame_desc) { 2328 mgmt_rx_reo_err("frame descriptor is null"); 2329 return QDF_STATUS_E_NULL_VALUE; 2330 } 2331 2332 if (!entry) { 2333 mgmt_rx_reo_err("Pointer to list entry is null"); 2334 return QDF_STATUS_E_NULL_VALUE; 2335 } 2336 2337 link_id = mgmt_rx_reo_get_link_id(frame_desc->rx_params); 2338 2339 pdev = wlan_get_pdev_from_mlo_link_id(link_id, WLAN_MGMT_RX_REO_ID); 2340 if (!pdev) { 2341 mgmt_rx_reo_err("pdev corresponding to link %u is null", 2342 link_id); 2343 return QDF_STATUS_E_NULL_VALUE; 2344 } 2345 2346 list_entry = qdf_mem_malloc(sizeof(*list_entry)); 2347 if (!list_entry) { 2348 wlan_objmgr_pdev_release_ref(pdev, WLAN_MGMT_RX_REO_ID); 2349 mgmt_rx_reo_err("List entry allocation failed"); 2350 return QDF_STATUS_E_NOMEM; 2351 } 2352 2353 list_entry->pdev = pdev; 2354 list_entry->nbuf = frame_desc->nbuf; 2355 list_entry->rx_params = frame_desc->rx_params; 2356 list_entry->wait_count = frame_desc->wait_count; 2357 list_entry->initial_wait_count = frame_desc->wait_count; 2358 qdf_mem_copy(list_entry->shared_snapshots, frame_desc->shared_snapshots, 2359 qdf_min(sizeof(list_entry->shared_snapshots), 2360 sizeof(frame_desc->shared_snapshots))); 2361 qdf_mem_copy(list_entry->host_snapshot, frame_desc->host_snapshot, 2362 qdf_min(sizeof(list_entry->host_snapshot), 2363 sizeof(frame_desc->host_snapshot))); 2364 list_entry->status = 0; 2365 if (list_entry->wait_count.total_count) 2366 list_entry->status |= 2367 MGMT_RX_REO_STATUS_WAIT_FOR_FRAME_ON_OTHER_LINKS; 2368 2369 *entry = list_entry; 2370 2371 return QDF_STATUS_SUCCESS; 2372 } 2373 2374 /** 2375 * mgmt_rx_reo_update_wait_count() - Update the wait count for a frame based 2376 * on the wait count of a frame received after that on air. 2377 * @wait_count_old_frame: Pointer to the wait count structure for the old frame. 2378 * @wait_count_new_frame: Pointer to the wait count structure for the new frame. 2379 * 2380 * This API optimizes the wait count of a frame based on the wait count of 2381 * a frame received after that on air. Old frame refers to the frame received 2382 * first on the air and new frame refers to the frame received after that. 2383 * We use the following fundamental idea. Wait counts for old frames can't be 2384 * more than wait counts for the new frame. Use this to optimize the wait count 2385 * for the old frames. Per link wait count of an old frame is minimum of the 2386 * per link wait count of the old frame and new frame. 2387 * 2388 * Return: QDF_STATUS 2389 */ 2390 static QDF_STATUS 2391 mgmt_rx_reo_update_wait_count( 2392 struct mgmt_rx_reo_wait_count *wait_count_old_frame, 2393 const struct mgmt_rx_reo_wait_count *wait_count_new_frame) 2394 { 2395 uint8_t link_id; 2396 2397 qdf_assert_always(wait_count_old_frame); 2398 qdf_assert_always(wait_count_new_frame); 2399 2400 for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) { 2401 if (wait_count_old_frame->per_link_count[link_id]) { 2402 uint32_t temp_wait_count; 2403 uint32_t wait_count_diff; 2404 2405 temp_wait_count = 2406 wait_count_old_frame->per_link_count[link_id]; 2407 wait_count_old_frame->per_link_count[link_id] = 2408 qdf_min(wait_count_old_frame-> 2409 per_link_count[link_id], 2410 wait_count_new_frame-> 2411 per_link_count[link_id]); 2412 wait_count_diff = temp_wait_count - 2413 wait_count_old_frame->per_link_count[link_id]; 2414 2415 wait_count_old_frame->total_count -= wait_count_diff; 2416 } 2417 } 2418 2419 return QDF_STATUS_SUCCESS; 2420 } 2421 2422 /** 2423 * mgmt_rx_reo_update_list() - Modify the reorder list when a frame is received 2424 * @reo_list: Pointer to reorder list 2425 * @frame_desc: Pointer to frame descriptor 2426 * @is_queued: Whether this frame is queued in the REO list 2427 * 2428 * API to update the reorder list on every management frame reception. 2429 * This API does the following things. 2430 * a) Update the wait counts for all the frames in the reorder list with 2431 * global time stamp <= current frame's global time stamp. We use the 2432 * following principle for updating the wait count in this case. 2433 * Let A and B be two management frames with global time stamp of A <= 2434 * global time stamp of B. Let WAi and WBi be the wait count of A and B 2435 * for link i, then WAi <= WBi. Hence we can optimize WAi as 2436 * min(WAi, WBi). 2437 * b) If the current frame is to be consumed by host, insert it in the 2438 * reorder list such that the list is always sorted in the increasing order 2439 * of global time stamp. Update the wait count of the current frame based 2440 * on the frame next to it in the reorder list (if any). 2441 * c) Update the wait count of the frames in the reorder list with global 2442 * time stamp > current frame's global time stamp. Let the current frame 2443 * belong to link "l". Then link "l"'s wait count can be reduced by one for 2444 * all the frames in the reorder list with global time stamp > current 2445 * frame's global time stamp. 2446 * 2447 * Return: QDF_STATUS 2448 */ 2449 static QDF_STATUS 2450 mgmt_rx_reo_update_list(struct mgmt_rx_reo_list *reo_list, 2451 struct mgmt_rx_reo_frame_descriptor *frame_desc, 2452 bool *is_queued) 2453 { 2454 struct mgmt_rx_reo_list_entry *cur_entry; 2455 struct mgmt_rx_reo_list_entry *least_greater_entry = NULL; 2456 bool least_greater_entry_found = false; 2457 QDF_STATUS status; 2458 uint32_t new_frame_global_ts; 2459 struct mgmt_rx_reo_list_entry *new_entry = NULL; 2460 uint16_t list_insertion_pos = 0; 2461 2462 if (!is_queued) 2463 return QDF_STATUS_E_NULL_VALUE; 2464 *is_queued = false; 2465 2466 if (!reo_list) { 2467 mgmt_rx_reo_err("Mgmt Rx reo list is null"); 2468 return QDF_STATUS_E_NULL_VALUE; 2469 } 2470 2471 if (!frame_desc) { 2472 mgmt_rx_reo_err("Mgmt frame descriptor is null"); 2473 return QDF_STATUS_E_NULL_VALUE; 2474 } 2475 2476 new_frame_global_ts = mgmt_rx_reo_get_global_ts(frame_desc->rx_params); 2477 2478 /* Prepare the list entry before acquiring lock */ 2479 if (frame_desc->type == MGMT_RX_REO_FRAME_DESC_HOST_CONSUMED_FRAME && 2480 frame_desc->reo_required) { 2481 status = mgmt_rx_reo_prepare_list_entry(frame_desc, &new_entry); 2482 if (QDF_IS_STATUS_ERROR(status)) { 2483 mgmt_rx_reo_err("Failed to prepare list entry"); 2484 return QDF_STATUS_E_FAILURE; 2485 } 2486 } 2487 2488 qdf_spin_lock_bh(&reo_list->list_lock); 2489 2490 frame_desc->list_size_rx = qdf_list_size(&reo_list->list); 2491 2492 status = mgmt_rx_reo_is_stale_frame(&reo_list->ts_last_released_frame, 2493 frame_desc); 2494 if (QDF_IS_STATUS_ERROR(status)) 2495 goto exit_free_entry; 2496 2497 if (frame_desc->is_stale) { 2498 status = mgmt_rx_reo_handle_stale_frame(reo_list, frame_desc); 2499 if (QDF_IS_STATUS_ERROR(status)) 2500 goto exit_free_entry; 2501 } 2502 2503 qdf_list_for_each(&reo_list->list, cur_entry, node) { 2504 uint32_t cur_entry_global_ts; 2505 2506 cur_entry_global_ts = mgmt_rx_reo_get_global_ts( 2507 cur_entry->rx_params); 2508 2509 if (!mgmt_rx_reo_compare_global_timestamps_gte( 2510 new_frame_global_ts, cur_entry_global_ts)) { 2511 least_greater_entry = cur_entry; 2512 least_greater_entry_found = true; 2513 break; 2514 } 2515 2516 list_insertion_pos++; 2517 2518 status = mgmt_rx_reo_update_wait_count( 2519 &cur_entry->wait_count, 2520 &frame_desc->wait_count); 2521 if (QDF_IS_STATUS_ERROR(status)) 2522 goto exit_free_entry; 2523 2524 if (cur_entry->wait_count.total_count == 0) 2525 cur_entry->status &= 2526 ~MGMT_RX_REO_STATUS_WAIT_FOR_FRAME_ON_OTHER_LINKS; 2527 } 2528 2529 if (frame_desc->is_stale) 2530 qdf_assert_always(!list_insertion_pos); 2531 2532 if (frame_desc->type == MGMT_RX_REO_FRAME_DESC_HOST_CONSUMED_FRAME && 2533 !frame_desc->is_stale && frame_desc->reo_required) { 2534 if (least_greater_entry_found) { 2535 status = mgmt_rx_reo_update_wait_count( 2536 &new_entry->wait_count, 2537 &least_greater_entry->wait_count); 2538 2539 if (QDF_IS_STATUS_ERROR(status)) 2540 goto exit_free_entry; 2541 2542 frame_desc->wait_count = new_entry->wait_count; 2543 2544 if (new_entry->wait_count.total_count == 0) 2545 new_entry->status &= 2546 ~MGMT_RX_REO_STATUS_WAIT_FOR_FRAME_ON_OTHER_LINKS; 2547 } 2548 2549 new_entry->insertion_ts = qdf_get_log_timestamp(); 2550 new_entry->ingress_timestamp = frame_desc->ingress_timestamp; 2551 new_entry->is_parallel_rx = frame_desc->is_parallel_rx; 2552 frame_desc->list_insertion_pos = list_insertion_pos; 2553 2554 if (least_greater_entry_found) 2555 status = qdf_list_insert_before( 2556 &reo_list->list, &new_entry->node, 2557 &least_greater_entry->node); 2558 else 2559 status = qdf_list_insert_back( 2560 &reo_list->list, &new_entry->node); 2561 2562 if (QDF_IS_STATUS_ERROR(status)) 2563 goto exit_free_entry; 2564 2565 *is_queued = true; 2566 2567 if (new_entry->wait_count.total_count == 0) 2568 frame_desc->zero_wait_count_rx = true; 2569 2570 if (frame_desc->zero_wait_count_rx && 2571 qdf_list_first_entry_or_null(&reo_list->list, 2572 struct mgmt_rx_reo_list_entry, 2573 node) == new_entry) 2574 frame_desc->immediate_delivery = true; 2575 } 2576 2577 if (least_greater_entry_found) { 2578 cur_entry = least_greater_entry; 2579 2580 qdf_list_for_each_from(&reo_list->list, cur_entry, node) { 2581 uint8_t frame_link_id; 2582 struct mgmt_rx_reo_wait_count *wait_count; 2583 2584 frame_link_id = 2585 mgmt_rx_reo_get_link_id(frame_desc->rx_params); 2586 wait_count = &cur_entry->wait_count; 2587 if (wait_count->per_link_count[frame_link_id]) { 2588 uint32_t old_wait_count; 2589 uint32_t new_wait_count; 2590 uint32_t wait_count_diff; 2591 uint16_t pkt_ctr_delta; 2592 2593 pkt_ctr_delta = frame_desc->pkt_ctr_delta; 2594 old_wait_count = 2595 wait_count->per_link_count[frame_link_id]; 2596 2597 if (old_wait_count >= pkt_ctr_delta) 2598 new_wait_count = old_wait_count - 2599 pkt_ctr_delta; 2600 else 2601 new_wait_count = 0; 2602 2603 wait_count_diff = old_wait_count - 2604 new_wait_count; 2605 2606 wait_count->per_link_count[frame_link_id] = 2607 new_wait_count; 2608 wait_count->total_count -= wait_count_diff; 2609 2610 if (wait_count->total_count == 0) 2611 cur_entry->status &= 2612 ~MGMT_RX_REO_STATUS_WAIT_FOR_FRAME_ON_OTHER_LINKS; 2613 } 2614 } 2615 } 2616 2617 status = QDF_STATUS_SUCCESS; 2618 2619 exit_free_entry: 2620 /* Cleanup the entry if it is not queued */ 2621 if (new_entry && !*is_queued) { 2622 /** 2623 * New entry created is not inserted to reorder list, free 2624 * the entry and release the reference 2625 */ 2626 wlan_objmgr_pdev_release_ref(new_entry->pdev, 2627 WLAN_MGMT_RX_REO_ID); 2628 qdf_mem_free(new_entry); 2629 } 2630 2631 qdf_spin_unlock_bh(&reo_list->list_lock); 2632 2633 if (!*is_queued) 2634 return status; 2635 2636 return status; 2637 } 2638 2639 /** 2640 * mgmt_rx_reo_list_init() - Initialize the management rx-reorder list 2641 * @reo_list: Pointer to reorder list 2642 * 2643 * API to initialize the management rx-reorder list. 2644 * 2645 * Return: QDF_STATUS 2646 */ 2647 static QDF_STATUS 2648 mgmt_rx_reo_list_init(struct mgmt_rx_reo_list *reo_list) 2649 { 2650 QDF_STATUS status; 2651 2652 reo_list->max_list_size = MGMT_RX_REO_LIST_MAX_SIZE; 2653 reo_list->list_entry_timeout_us = MGMT_RX_REO_LIST_TIMEOUT_US; 2654 2655 qdf_list_create(&reo_list->list, reo_list->max_list_size); 2656 qdf_spinlock_create(&reo_list->list_lock); 2657 2658 status = qdf_timer_init(NULL, &reo_list->ageout_timer, 2659 mgmt_rx_reo_list_ageout_timer_handler, reo_list, 2660 QDF_TIMER_TYPE_WAKE_APPS); 2661 if (QDF_IS_STATUS_ERROR(status)) { 2662 mgmt_rx_reo_err("Failed to initialize reo list ageout timer"); 2663 return status; 2664 } 2665 2666 reo_list->ts_last_released_frame.valid = false; 2667 2668 status = qdf_timer_init 2669 (NULL, &reo_list->global_mgmt_rx_inactivity_timer, 2670 mgmt_rx_reo_global_mgmt_rx_inactivity_timer_handler, 2671 reo_list, QDF_TIMER_TYPE_WAKE_APPS); 2672 if (QDF_IS_STATUS_ERROR(status)) { 2673 mgmt_rx_reo_err("Failed to init glb mgmt rx inactivity timer"); 2674 return status; 2675 } 2676 2677 return QDF_STATUS_SUCCESS; 2678 } 2679 2680 /** 2681 * wlan_mgmt_rx_reo_update_host_snapshot() - Update Host snapshot with the MGMT 2682 * Rx REO parameters. 2683 * @pdev: pdev extracted from the WMI event 2684 * @desc: pointer to frame descriptor 2685 * 2686 * Return: QDF_STATUS of operation 2687 */ 2688 static QDF_STATUS 2689 wlan_mgmt_rx_reo_update_host_snapshot(struct wlan_objmgr_pdev *pdev, 2690 struct mgmt_rx_reo_frame_descriptor *desc) 2691 { 2692 struct mgmt_rx_reo_pdev_info *rx_reo_pdev_ctx; 2693 struct mgmt_rx_reo_snapshot_params *host_ss; 2694 struct mgmt_rx_reo_params *reo_params; 2695 int pkt_ctr_delta; 2696 struct wlan_objmgr_psoc *psoc; 2697 uint16_t pkt_ctr_delta_thresh; 2698 2699 if (!desc) { 2700 mgmt_rx_reo_err("Mgmt Rx REO frame descriptor null"); 2701 return QDF_STATUS_E_NULL_VALUE; 2702 } 2703 2704 if (!desc->rx_params) { 2705 mgmt_rx_reo_err("Mgmt Rx params null"); 2706 return QDF_STATUS_E_NULL_VALUE; 2707 } 2708 2709 reo_params = desc->rx_params->reo_params; 2710 if (!reo_params) { 2711 mgmt_rx_reo_err("Mgmt Rx REO params NULL"); 2712 return QDF_STATUS_E_NULL_VALUE; 2713 } 2714 2715 rx_reo_pdev_ctx = wlan_mgmt_rx_reo_get_priv_object(pdev); 2716 if (!rx_reo_pdev_ctx) { 2717 mgmt_rx_reo_err("Mgmt Rx REO context empty for pdev %pK", pdev); 2718 return QDF_STATUS_E_FAILURE; 2719 } 2720 2721 psoc = wlan_pdev_get_psoc(pdev); 2722 2723 /* FW should send valid REO parameters */ 2724 if (!reo_params->valid) { 2725 mgmt_rx_reo_err("Mgmt Rx REO params is invalid"); 2726 return QDF_STATUS_E_FAILURE; 2727 } 2728 2729 host_ss = &rx_reo_pdev_ctx->host_snapshot; 2730 2731 if (!host_ss->valid) { 2732 desc->pkt_ctr_delta = 1; 2733 goto update_host_ss; 2734 } 2735 2736 if (mgmt_rx_reo_compare_pkt_ctrs_gte(host_ss->mgmt_pkt_ctr, 2737 reo_params->mgmt_pkt_ctr)) { 2738 mgmt_rx_reo_err("Cur frame ctr > last frame ctr for link = %u", 2739 reo_params->link_id); 2740 goto failure_debug; 2741 } 2742 2743 pkt_ctr_delta = mgmt_rx_reo_subtract_pkt_ctrs(reo_params->mgmt_pkt_ctr, 2744 host_ss->mgmt_pkt_ctr); 2745 qdf_assert_always(pkt_ctr_delta > 0); 2746 desc->pkt_ctr_delta = pkt_ctr_delta; 2747 2748 if (pkt_ctr_delta == 1) 2749 goto update_host_ss; 2750 2751 /* 2752 * Under back pressure scenarios, FW may drop management Rx frame 2753 * WMI events. So holes in the management packet counter is expected. 2754 * Add a debug print and optional assert to track the holes. 2755 */ 2756 mgmt_rx_reo_debug("pkt_ctr_delta = %u", pkt_ctr_delta); 2757 mgmt_rx_reo_debug("Cur frame valid = %u, pkt_ctr = %u, ts =%u", 2758 reo_params->valid, reo_params->mgmt_pkt_ctr, 2759 reo_params->global_timestamp); 2760 mgmt_rx_reo_debug("Last frame valid = %u, pkt_ctr = %u, ts =%u", 2761 host_ss->valid, host_ss->mgmt_pkt_ctr, 2762 host_ss->global_timestamp); 2763 2764 pkt_ctr_delta_thresh = wlan_mgmt_rx_reo_get_pkt_ctr_delta_thresh(psoc); 2765 2766 if (pkt_ctr_delta_thresh && pkt_ctr_delta > pkt_ctr_delta_thresh) { 2767 mgmt_rx_reo_err("pkt ctr delta %u > thresh %u for link %u", 2768 pkt_ctr_delta, pkt_ctr_delta_thresh, 2769 reo_params->link_id); 2770 goto failure_debug; 2771 } 2772 2773 update_host_ss: 2774 host_ss->valid = true; 2775 host_ss->global_timestamp = reo_params->global_timestamp; 2776 host_ss->mgmt_pkt_ctr = reo_params->mgmt_pkt_ctr; 2777 2778 return QDF_STATUS_SUCCESS; 2779 2780 failure_debug: 2781 mgmt_rx_reo_err("Cur frame valid = %u, pkt_ctr = %u, ts =%u", 2782 reo_params->valid, reo_params->mgmt_pkt_ctr, 2783 reo_params->global_timestamp); 2784 mgmt_rx_reo_err("Last frame vailid = %u, pkt_ctr = %u, ts =%u", 2785 host_ss->valid, host_ss->mgmt_pkt_ctr, 2786 host_ss->global_timestamp); 2787 qdf_assert_always(0); 2788 2789 return QDF_STATUS_E_FAILURE; 2790 } 2791 2792 #ifdef WLAN_MGMT_RX_REO_DEBUG_SUPPORT 2793 /** 2794 * mgmt_rx_reo_ingress_frame_debug_info_enabled() - API to check whether ingress 2795 * frame info debug feaure is enabled 2796 * @ingress_frame_debug_info: Pointer to ingress frame debug info object 2797 * 2798 * Return: true or false 2799 */ 2800 static bool 2801 mgmt_rx_reo_ingress_frame_debug_info_enabled 2802 (struct reo_ingress_debug_info *ingress_frame_debug_info) 2803 { 2804 return ingress_frame_debug_info->frame_list_size; 2805 } 2806 2807 /** 2808 * mgmt_rx_reo_debug_print_ingress_frame_stats() - API to print the stats 2809 * related to frames going into the reorder module 2810 * @reo_ctx: Pointer to reorder context 2811 * 2812 * API to print the stats related to frames going into the management 2813 * Rx reorder module. 2814 * 2815 * Return: QDF_STATUS 2816 */ 2817 static QDF_STATUS 2818 mgmt_rx_reo_debug_print_ingress_frame_stats(struct mgmt_rx_reo_context *reo_ctx) 2819 { 2820 struct reo_ingress_frame_stats *stats; 2821 uint8_t link_id; 2822 uint8_t desc_type; 2823 uint64_t ingress_count_per_link[MAX_MLO_LINKS] = {0}; 2824 uint64_t ingress_count_per_desc_type[MGMT_RX_REO_FRAME_DESC_TYPE_MAX] = {0}; 2825 uint64_t total_ingress_count = 0; 2826 uint64_t stale_count_per_link[MAX_MLO_LINKS] = {0}; 2827 uint64_t stale_count_per_desc_type[MGMT_RX_REO_FRAME_DESC_TYPE_MAX] = {0}; 2828 uint64_t total_stale_count = 0; 2829 uint64_t error_count_per_link[MAX_MLO_LINKS] = {0}; 2830 uint64_t error_count_per_desc_type[MGMT_RX_REO_FRAME_DESC_TYPE_MAX] = {0}; 2831 uint64_t total_error_count = 0; 2832 uint64_t total_queued_count = 0; 2833 uint64_t total_zero_wait_count_rx_count = 0; 2834 uint64_t total_immediate_delivery_count = 0; 2835 2836 if (!reo_ctx) 2837 return QDF_STATUS_E_NULL_VALUE; 2838 2839 stats = &reo_ctx->ingress_frame_debug_info.stats; 2840 2841 for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) { 2842 for (desc_type = 0; desc_type < MGMT_RX_REO_FRAME_DESC_TYPE_MAX; 2843 desc_type++) { 2844 ingress_count_per_link[link_id] += 2845 stats->ingress_count[link_id][desc_type]; 2846 stale_count_per_link[link_id] += 2847 stats->stale_count[link_id][desc_type]; 2848 error_count_per_link[link_id] += 2849 stats->error_count[link_id][desc_type]; 2850 } 2851 2852 total_ingress_count += ingress_count_per_link[link_id]; 2853 total_stale_count += stale_count_per_link[link_id]; 2854 total_error_count += error_count_per_link[link_id]; 2855 } 2856 2857 for (desc_type = 0; desc_type < MGMT_RX_REO_FRAME_DESC_TYPE_MAX; 2858 desc_type++) { 2859 for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) { 2860 ingress_count_per_desc_type[desc_type] += 2861 stats->ingress_count[link_id][desc_type]; 2862 stale_count_per_desc_type[desc_type] += 2863 stats->stale_count[link_id][desc_type]; 2864 error_count_per_desc_type[desc_type] += 2865 stats->error_count[link_id][desc_type]; 2866 } 2867 } 2868 2869 for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) { 2870 total_queued_count += stats->queued_count[link_id]; 2871 total_zero_wait_count_rx_count += 2872 stats->zero_wait_count_rx_count[link_id]; 2873 total_immediate_delivery_count += 2874 stats->immediate_delivery_count[link_id]; 2875 } 2876 2877 mgmt_rx_reo_alert("Ingress Frame Stats:"); 2878 mgmt_rx_reo_alert("\t1) Ingress Frame Count:"); 2879 mgmt_rx_reo_alert("\tDescriptor Type Values:-"); 2880 mgmt_rx_reo_alert("\t\t0 - MGMT_RX_REO_FRAME_DESC_HOST_CONSUMED_FRAME"); 2881 mgmt_rx_reo_alert("\t\t1 - MGMT_RX_REO_FRAME_DESC_FW_CONSUMED_FRAME"); 2882 mgmt_rx_reo_alert("\t\t2 - MGMT_RX_REO_FRAME_DESC_ERROR_FRAME"); 2883 mgmt_rx_reo_alert("\t------------------------------------"); 2884 mgmt_rx_reo_alert("\t|link id/ | | | |"); 2885 mgmt_rx_reo_alert("\t|desc type | 0| 1| 2|"); 2886 mgmt_rx_reo_alert("\t-------------------------------------------"); 2887 2888 for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) { 2889 mgmt_rx_reo_alert("\t|%10u|%7llu|%7llu|%7llu|%7llu", link_id, 2890 stats->ingress_count[link_id][0], 2891 stats->ingress_count[link_id][1], 2892 stats->ingress_count[link_id][2], 2893 ingress_count_per_link[link_id]); 2894 mgmt_rx_reo_alert("\t-------------------------------------------"); 2895 } 2896 mgmt_rx_reo_alert("\t |%7llu|%7llu|%7llu|%7llu\n\n", 2897 ingress_count_per_desc_type[0], 2898 ingress_count_per_desc_type[1], 2899 ingress_count_per_desc_type[2], 2900 total_ingress_count); 2901 2902 mgmt_rx_reo_alert("\t2) Stale Frame Count:"); 2903 mgmt_rx_reo_alert("\t------------------------------------"); 2904 mgmt_rx_reo_alert("\t|link id/ | | | |"); 2905 mgmt_rx_reo_alert("\t|desc type | 0| 1| 2|"); 2906 mgmt_rx_reo_alert("\t-------------------------------------------"); 2907 for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) { 2908 mgmt_rx_reo_alert("\t|%10u|%7llu|%7llu|%7llu|%7llu", link_id, 2909 stats->stale_count[link_id][0], 2910 stats->stale_count[link_id][1], 2911 stats->stale_count[link_id][2], 2912 stale_count_per_link[link_id]); 2913 mgmt_rx_reo_alert("\t-------------------------------------------"); 2914 } 2915 mgmt_rx_reo_alert("\t |%7llu|%7llu|%7llu|%7llu\n\n", 2916 stale_count_per_desc_type[0], 2917 stale_count_per_desc_type[1], 2918 stale_count_per_desc_type[2], 2919 total_stale_count); 2920 2921 mgmt_rx_reo_alert("\t3) Error Frame Count:"); 2922 mgmt_rx_reo_alert("\t------------------------------------"); 2923 mgmt_rx_reo_alert("\t|link id/ | | | |"); 2924 mgmt_rx_reo_alert("\t|desc type | 0| 1| 2|"); 2925 mgmt_rx_reo_alert("\t-------------------------------------------"); 2926 for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) { 2927 mgmt_rx_reo_alert("\t|%10u|%7llu|%7llu|%7llu|%7llu", link_id, 2928 stats->error_count[link_id][0], 2929 stats->error_count[link_id][1], 2930 stats->error_count[link_id][2], 2931 error_count_per_link[link_id]); 2932 mgmt_rx_reo_alert("\t-------------------------------------------"); 2933 } 2934 mgmt_rx_reo_alert("\t |%7llu|%7llu|%7llu|%7llu\n\n", 2935 error_count_per_desc_type[0], 2936 error_count_per_desc_type[1], 2937 error_count_per_desc_type[2], 2938 total_error_count); 2939 2940 mgmt_rx_reo_alert("\t4) Host consumed frames related stats:"); 2941 mgmt_rx_reo_alert("\t------------------------------------------------"); 2942 mgmt_rx_reo_alert("\t|link id |Queued frame |Zero wait |Immediate |"); 2943 mgmt_rx_reo_alert("\t| | count | count | delivery |"); 2944 mgmt_rx_reo_alert("\t------------------------------------------------"); 2945 for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) { 2946 mgmt_rx_reo_alert("\t|%10u|%13llu|%10llu|%10llu|", link_id, 2947 stats->queued_count[link_id], 2948 stats->zero_wait_count_rx_count[link_id], 2949 stats->immediate_delivery_count[link_id]); 2950 mgmt_rx_reo_alert("\t------------------------------------------------"); 2951 } 2952 mgmt_rx_reo_alert("\t%11s|%13llu|%10llu|%10llu|\n\n", "", 2953 total_queued_count, 2954 total_zero_wait_count_rx_count, 2955 total_immediate_delivery_count); 2956 2957 return QDF_STATUS_SUCCESS; 2958 } 2959 2960 /** 2961 * mgmt_rx_reo_log_ingress_frame() - Log the information about a frame entering 2962 * the reorder algorithm. 2963 * @reo_ctx: management rx reorder context 2964 * @desc: Pointer to frame descriptor 2965 * @is_queued: Indicates whether this frame is queued to reorder list 2966 * @is_error: Indicates whether any error occurred during processing this frame 2967 * 2968 * Return: QDF_STATUS of operation 2969 */ 2970 static QDF_STATUS 2971 mgmt_rx_reo_log_ingress_frame(struct mgmt_rx_reo_context *reo_ctx, 2972 struct mgmt_rx_reo_frame_descriptor *desc, 2973 bool is_queued, bool is_error) 2974 { 2975 struct reo_ingress_debug_info *ingress_frame_debug_info; 2976 struct reo_ingress_debug_frame_info *cur_frame_debug_info; 2977 struct reo_ingress_frame_stats *stats; 2978 uint8_t link_id; 2979 2980 if (!reo_ctx || !desc) 2981 return QDF_STATUS_E_NULL_VALUE; 2982 2983 ingress_frame_debug_info = &reo_ctx->ingress_frame_debug_info; 2984 2985 stats = &ingress_frame_debug_info->stats; 2986 link_id = mgmt_rx_reo_get_link_id(desc->rx_params); 2987 stats->ingress_count[link_id][desc->type]++; 2988 if (is_queued) 2989 stats->queued_count[link_id]++; 2990 if (desc->zero_wait_count_rx) 2991 stats->zero_wait_count_rx_count[link_id]++; 2992 if (desc->immediate_delivery) 2993 stats->immediate_delivery_count[link_id]++; 2994 if (is_error) 2995 stats->error_count[link_id][desc->type]++; 2996 if (desc->is_stale) 2997 stats->stale_count[link_id][desc->type]++; 2998 2999 if (!mgmt_rx_reo_ingress_frame_debug_info_enabled 3000 (ingress_frame_debug_info)) 3001 return QDF_STATUS_SUCCESS; 3002 3003 cur_frame_debug_info = &ingress_frame_debug_info->frame_list 3004 [ingress_frame_debug_info->next_index]; 3005 3006 cur_frame_debug_info->link_id = link_id; 3007 cur_frame_debug_info->mgmt_pkt_ctr = 3008 mgmt_rx_reo_get_pkt_counter(desc->rx_params); 3009 cur_frame_debug_info->global_timestamp = 3010 mgmt_rx_reo_get_global_ts(desc->rx_params); 3011 cur_frame_debug_info->start_timestamp = 3012 mgmt_rx_reo_get_start_ts(desc->rx_params); 3013 cur_frame_debug_info->end_timestamp = 3014 mgmt_rx_reo_get_end_ts(desc->rx_params); 3015 cur_frame_debug_info->duration_us = 3016 mgmt_rx_reo_get_duration_us(desc->rx_params); 3017 cur_frame_debug_info->desc_type = desc->type; 3018 cur_frame_debug_info->frame_type = desc->frame_type; 3019 cur_frame_debug_info->frame_subtype = desc->frame_subtype; 3020 cur_frame_debug_info->wait_count = desc->wait_count; 3021 qdf_mem_copy(cur_frame_debug_info->shared_snapshots, 3022 desc->shared_snapshots, 3023 qdf_min(sizeof(cur_frame_debug_info->shared_snapshots), 3024 sizeof(desc->shared_snapshots))); 3025 qdf_mem_copy(cur_frame_debug_info->host_snapshot, desc->host_snapshot, 3026 qdf_min(sizeof(cur_frame_debug_info->host_snapshot), 3027 sizeof(desc->host_snapshot))); 3028 cur_frame_debug_info->is_queued = is_queued; 3029 cur_frame_debug_info->is_stale = desc->is_stale; 3030 cur_frame_debug_info->is_parallel_rx = desc->is_parallel_rx; 3031 cur_frame_debug_info->zero_wait_count_rx = desc->zero_wait_count_rx; 3032 cur_frame_debug_info->immediate_delivery = desc->immediate_delivery; 3033 cur_frame_debug_info->is_error = is_error; 3034 cur_frame_debug_info->ts_last_released_frame = 3035 reo_ctx->reo_list.ts_last_released_frame; 3036 cur_frame_debug_info->ingress_timestamp = desc->ingress_timestamp; 3037 cur_frame_debug_info->ingress_duration = 3038 qdf_get_log_timestamp() - desc->ingress_timestamp; 3039 cur_frame_debug_info->list_size_rx = desc->list_size_rx; 3040 cur_frame_debug_info->list_insertion_pos = desc->list_insertion_pos; 3041 cur_frame_debug_info->cpu_id = qdf_get_smp_processor_id(); 3042 cur_frame_debug_info->reo_required = desc->reo_required; 3043 3044 ingress_frame_debug_info->next_index++; 3045 ingress_frame_debug_info->next_index %= 3046 ingress_frame_debug_info->frame_list_size; 3047 if (ingress_frame_debug_info->next_index == 0) 3048 ingress_frame_debug_info->wrap_aroud = true; 3049 3050 return QDF_STATUS_SUCCESS; 3051 } 3052 3053 /** 3054 * mgmt_rx_reo_debug_print_ingress_frame_info() - Print the debug information 3055 * about the latest frames entered the reorder module 3056 * @reo_ctx: management rx reorder context 3057 * @num_frames: Number of frames for which the debug information is to be 3058 * printed. If @num_frames is 0, then debug information about all the frames 3059 * in the ring buffer will be printed. 3060 * 3061 * Return: QDF_STATUS of operation 3062 */ 3063 static QDF_STATUS 3064 mgmt_rx_reo_debug_print_ingress_frame_info(struct mgmt_rx_reo_context *reo_ctx, 3065 uint16_t num_frames) 3066 { 3067 struct reo_ingress_debug_info *ingress_frame_debug_info; 3068 int start_index; 3069 uint16_t index; 3070 uint16_t entry; 3071 uint16_t num_valid_entries; 3072 uint16_t num_entries_to_print; 3073 char *boarder; 3074 3075 if (!reo_ctx) 3076 return QDF_STATUS_E_NULL_VALUE; 3077 3078 ingress_frame_debug_info = &reo_ctx->ingress_frame_debug_info; 3079 3080 if (ingress_frame_debug_info->wrap_aroud) 3081 num_valid_entries = ingress_frame_debug_info->frame_list_size; 3082 else 3083 num_valid_entries = ingress_frame_debug_info->next_index; 3084 3085 if (num_frames == 0) { 3086 num_entries_to_print = num_valid_entries; 3087 3088 if (ingress_frame_debug_info->wrap_aroud) 3089 start_index = ingress_frame_debug_info->next_index; 3090 else 3091 start_index = 0; 3092 } else { 3093 num_entries_to_print = qdf_min(num_frames, num_valid_entries); 3094 3095 start_index = (ingress_frame_debug_info->next_index - 3096 num_entries_to_print + 3097 ingress_frame_debug_info->frame_list_size) 3098 % ingress_frame_debug_info->frame_list_size; 3099 3100 qdf_assert_always(start_index >= 0 && 3101 start_index < ingress_frame_debug_info->frame_list_size); 3102 } 3103 3104 mgmt_rx_reo_alert_no_fl("Ingress Frame Info:-"); 3105 mgmt_rx_reo_alert_no_fl("num_frames = %u, wrap = %u, next_index = %u", 3106 num_frames, 3107 ingress_frame_debug_info->wrap_aroud, 3108 ingress_frame_debug_info->next_index); 3109 mgmt_rx_reo_alert_no_fl("start_index = %d num_entries_to_print = %u", 3110 start_index, num_entries_to_print); 3111 3112 if (!num_entries_to_print) 3113 return QDF_STATUS_SUCCESS; 3114 3115 boarder = ingress_frame_debug_info->boarder; 3116 3117 mgmt_rx_reo_alert_no_fl("%s", boarder); 3118 mgmt_rx_reo_alert_no_fl("|%5s|%5s|%6s|%6s|%9s|%4s|%5s|%10s|%10s|%10s|%5s|%10s|%11s|%13s|%11s|%4s|%3s|%69s|%94s|%94s|%94s|%94s|%94s|%94s|", 3119 "Index", "CPU", "D.type", "F.type", "F.subtype", 3120 "Link", "SeqNo", "Global ts", 3121 "Start ts", "End ts", "Dur", "Last ts", 3122 "Ingress ts", "Flags", "Ingress Dur", "Size", 3123 "Pos", "Wait Count", "Snapshot : link 0", 3124 "Snapshot : link 1", "Snapshot : link 2", 3125 "Snapshot : link 3", "Snapshot : link 4", 3126 "Snapshot : link 5"); 3127 mgmt_rx_reo_alert_no_fl("%s", boarder); 3128 3129 index = start_index; 3130 for (entry = 0; entry < num_entries_to_print; entry++) { 3131 struct reo_ingress_debug_frame_info *info; 3132 char flags[MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_FLAG_MAX_SIZE + 1] = {'\0'}; 3133 char wait_count[MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_WAIT_COUNT_MAX_SIZE + 1] = {'\0'}; 3134 char snapshots[MAX_MLO_LINKS][MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_PER_LINK_SNAPSHOTS_MAX_SIZE + 1] = {'\0'}; 3135 char flag_queued = ' '; 3136 char flag_stale = ' '; 3137 char flag_parallel_rx = ' '; 3138 char flag_error = ' '; 3139 char flag_zero_wait_count_rx = ' '; 3140 char flag_immediate_delivery = ' '; 3141 char flag_reo_required = ' '; 3142 int64_t ts_last_released_frame = -1; 3143 uint8_t link; 3144 3145 info = &reo_ctx->ingress_frame_debug_info.frame_list[index]; 3146 3147 if (info->ts_last_released_frame.valid) 3148 ts_last_released_frame = 3149 info->ts_last_released_frame.global_ts; 3150 3151 if (info->is_queued) 3152 flag_queued = 'Q'; 3153 3154 if (info->is_stale) 3155 flag_stale = 'S'; 3156 3157 if (info->is_parallel_rx) 3158 flag_parallel_rx = 'P'; 3159 3160 if (info->is_error) 3161 flag_error = 'E'; 3162 3163 if (info->zero_wait_count_rx) 3164 flag_zero_wait_count_rx = 'Z'; 3165 3166 if (info->immediate_delivery) 3167 flag_immediate_delivery = 'I'; 3168 3169 if (!info->reo_required) 3170 flag_reo_required = 'N'; 3171 3172 snprintf(flags, sizeof(flags), "%c %c %c %c %c %c %c", flag_error, 3173 flag_stale, flag_parallel_rx, flag_queued, 3174 flag_zero_wait_count_rx, flag_immediate_delivery, 3175 flag_reo_required); 3176 snprintf(wait_count, sizeof(wait_count), 3177 "%9llx(%8x, %8x, %8x, %8x, %8x, %8x)", 3178 info->wait_count.total_count, 3179 info->wait_count.per_link_count[0], 3180 info->wait_count.per_link_count[1], 3181 info->wait_count.per_link_count[2], 3182 info->wait_count.per_link_count[3], 3183 info->wait_count.per_link_count[4], 3184 info->wait_count.per_link_count[5]); 3185 3186 for (link = 0; link < MAX_MLO_LINKS; link++) { 3187 char mac_hw[MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_SNAPSHOT_MAX_SIZE + 1] = {'\0'}; 3188 char fw_consumed[MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_SNAPSHOT_MAX_SIZE + 1] = {'\0'}; 3189 char fw_forwarded[MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_SNAPSHOT_MAX_SIZE + 1] = {'\0'}; 3190 char host[MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_SNAPSHOT_MAX_SIZE + 1] = {'\0'}; 3191 struct mgmt_rx_reo_snapshot_params *mac_hw_ss; 3192 struct mgmt_rx_reo_snapshot_params *fw_consumed_ss; 3193 struct mgmt_rx_reo_snapshot_params *fw_forwarded_ss; 3194 struct mgmt_rx_reo_snapshot_params *host_ss; 3195 3196 mac_hw_ss = &info->shared_snapshots 3197 [link][MGMT_RX_REO_SHARED_SNAPSHOT_MAC_HW]; 3198 fw_consumed_ss = &info->shared_snapshots 3199 [link][MGMT_RX_REO_SHARED_SNAPSHOT_FW_CONSUMED]; 3200 fw_forwarded_ss = &info->shared_snapshots 3201 [link][MGMT_RX_REO_SHARED_SNAPSHOT_FW_FORWARDED]; 3202 host_ss = &info->host_snapshot[link]; 3203 3204 snprintf(mac_hw, sizeof(mac_hw), "(%1u, %5u, %10u)", 3205 mac_hw_ss->valid, mac_hw_ss->mgmt_pkt_ctr, 3206 mac_hw_ss->global_timestamp); 3207 snprintf(fw_consumed, sizeof(fw_consumed), 3208 "(%1u, %5u, %10u)", 3209 fw_consumed_ss->valid, 3210 fw_consumed_ss->mgmt_pkt_ctr, 3211 fw_consumed_ss->global_timestamp); 3212 snprintf(fw_forwarded, sizeof(fw_forwarded), 3213 "(%1u, %5u, %10u)", 3214 fw_forwarded_ss->valid, 3215 fw_forwarded_ss->mgmt_pkt_ctr, 3216 fw_forwarded_ss->global_timestamp); 3217 snprintf(host, sizeof(host), "(%1u, %5u, %10u)", 3218 host_ss->valid, 3219 host_ss->mgmt_pkt_ctr, 3220 host_ss->global_timestamp); 3221 snprintf(snapshots[link], sizeof(snapshots[link]), 3222 "%22s, %22s, %22s, %22s", mac_hw, fw_consumed, 3223 fw_forwarded, host); 3224 } 3225 3226 mgmt_rx_reo_alert_no_fl("|%5u|%5d|%6u|%6x|%9x|%4u|%5u|%10u|%10u|%10u|%5u|%10lld|%11llu|%13s|%11llu|%4d|%3d|%69s|%70s|%70s|%70s|%70s|%70s|%70s|", 3227 entry, info->cpu_id, info->desc_type, 3228 info->frame_type, info->frame_subtype, 3229 info->link_id, 3230 info->mgmt_pkt_ctr, 3231 info->global_timestamp, 3232 info->start_timestamp, 3233 info->end_timestamp, 3234 info->duration_us, 3235 ts_last_released_frame, 3236 info->ingress_timestamp, flags, 3237 info->ingress_duration, 3238 info->list_size_rx, 3239 info->list_insertion_pos, wait_count, 3240 snapshots[0], snapshots[1], 3241 snapshots[2], snapshots[3], 3242 snapshots[4], snapshots[5]); 3243 mgmt_rx_reo_alert_no_fl("%s", boarder); 3244 3245 index++; 3246 index %= ingress_frame_debug_info->frame_list_size; 3247 } 3248 3249 return QDF_STATUS_SUCCESS; 3250 } 3251 #else 3252 /** 3253 * mgmt_rx_reo_debug_print_ingress_frame_stats() - API to print the stats 3254 * related to frames going into the reorder module 3255 * @reo_ctx: Pointer to reorder context 3256 * 3257 * API to print the stats related to frames going into the management 3258 * Rx reorder module. 3259 * 3260 * Return: QDF_STATUS 3261 */ 3262 static QDF_STATUS 3263 mgmt_rx_reo_debug_print_ingress_frame_stats(struct mgmt_rx_reo_context *reo_ctx) 3264 { 3265 return QDF_STATUS_SUCCESS; 3266 } 3267 3268 /** 3269 * mgmt_rx_reo_log_ingress_frame() - Log the information about a frame entering 3270 * the reorder algorithm. 3271 * @reo_ctx: management rx reorder context 3272 * @desc: Pointer to frame descriptor 3273 * @is_queued: Indicates whether this frame is queued to reorder list 3274 * @is_error: Indicates whether any error occurred during processing this frame 3275 * 3276 * Return: QDF_STATUS of operation 3277 */ 3278 static QDF_STATUS 3279 mgmt_rx_reo_log_ingress_frame(struct mgmt_rx_reo_context *reo_ctx, 3280 struct mgmt_rx_reo_frame_descriptor *desc, 3281 bool is_queued, bool is_error) 3282 { 3283 return QDF_STATUS_SUCCESS; 3284 } 3285 3286 /** 3287 * mgmt_rx_reo_debug_print_ingress_frame_info() - Print debug information about 3288 * the latest frames entering the reorder module 3289 * @reo_ctx: management rx reorder context 3290 * 3291 * Return: QDF_STATUS of operation 3292 */ 3293 static QDF_STATUS 3294 mgmt_rx_reo_debug_print_ingress_frame_info(struct mgmt_rx_reo_context *reo_ctx) 3295 { 3296 return QDF_STATUS_SUCCESS; 3297 } 3298 #endif /* WLAN_MGMT_RX_REO_DEBUG_SUPPORT */ 3299 3300 QDF_STATUS 3301 wlan_mgmt_rx_reo_algo_entry(struct wlan_objmgr_pdev *pdev, 3302 struct mgmt_rx_reo_frame_descriptor *desc, 3303 bool *is_queued) 3304 { 3305 struct mgmt_rx_reo_context *reo_ctx; 3306 QDF_STATUS ret; 3307 3308 if (!is_queued) 3309 return QDF_STATUS_E_NULL_VALUE; 3310 3311 *is_queued = false; 3312 3313 if (!desc || !desc->rx_params) { 3314 mgmt_rx_reo_err("MGMT Rx REO descriptor or rx params are null"); 3315 return QDF_STATUS_E_NULL_VALUE; 3316 } 3317 3318 reo_ctx = mgmt_rx_reo_get_context(); 3319 if (!reo_ctx) { 3320 mgmt_rx_reo_err("REO context is NULL"); 3321 return QDF_STATUS_E_NULL_VALUE; 3322 } 3323 3324 /** 3325 * Critical Section = Host snapshot update + Calculation of wait 3326 * counts + Update reorder list. Following section describes the 3327 * motivation for making this a critical section. 3328 * Lets take an example of 2 links (Link A & B) and each has received 3329 * a management frame A1 and B1 such that MLO global time stamp of A1 < 3330 * MLO global time stamp of B1. Host is concurrently executing 3331 * "wlan_mgmt_rx_reo_algo_entry" for A1 and B1 in 2 different CPUs. 3332 * 3333 * A lock less version of this API("wlan_mgmt_rx_reo_algo_entry_v1") is 3334 * as follows. 3335 * 3336 * wlan_mgmt_rx_reo_algo_entry() 3337 * { 3338 * Host snapshot update 3339 * Calculation of wait counts 3340 * Update reorder list 3341 * Release to upper layer 3342 * } 3343 * 3344 * We may run into race conditions under the following sequence of 3345 * operations. 3346 * 3347 * 1. Host snapshot update for link A in context of frame A1 3348 * 2. Host snapshot update for link B in context of frame B1 3349 * 3. Calculation of wait count for frame B1 3350 * link A wait count = 0 3351 * link B wait count = 0 3352 * 4. Update reorder list with frame B1 3353 * 5. Release B1 to upper layer 3354 * 6. Calculation of wait count for frame A1 3355 * link A wait count = 0 3356 * link B wait count = 0 3357 * 7. Update reorder list with frame A1 3358 * 8. Release A1 to upper layer 3359 * 3360 * This leads to incorrect behaviour as B1 goes to upper layer before 3361 * A1. 3362 * 3363 * To prevent this lets make Host snapshot update + Calculate wait count 3364 * a critical section by adding locks. The updated version of the API 3365 * ("wlan_mgmt_rx_reo_algo_entry_v2") is as follows. 3366 * 3367 * wlan_mgmt_rx_reo_algo_entry() 3368 * { 3369 * LOCK 3370 * Host snapshot update 3371 * Calculation of wait counts 3372 * UNLOCK 3373 * Update reorder list 3374 * Release to upper layer 3375 * } 3376 * 3377 * With this API also We may run into race conditions under the 3378 * following sequence of operations. 3379 * 3380 * 1. Host snapshot update for link A in context of frame A1 + 3381 * Calculation of wait count for frame A1 3382 * link A wait count = 0 3383 * link B wait count = 0 3384 * 2. Host snapshot update for link B in context of frame B1 + 3385 * Calculation of wait count for frame B1 3386 * link A wait count = 0 3387 * link B wait count = 0 3388 * 4. Update reorder list with frame B1 3389 * 5. Release B1 to upper layer 3390 * 7. Update reorder list with frame A1 3391 * 8. Release A1 to upper layer 3392 * 3393 * This also leads to incorrect behaviour as B1 goes to upper layer 3394 * before A1. 3395 * 3396 * To prevent this, let's make Host snapshot update + Calculate wait 3397 * count + Update reorder list a critical section by adding locks. 3398 * The updated version of the API ("wlan_mgmt_rx_reo_algo_entry_final") 3399 * is as follows. 3400 * 3401 * wlan_mgmt_rx_reo_algo_entry() 3402 * { 3403 * LOCK 3404 * Host snapshot update 3405 * Calculation of wait counts 3406 * Update reorder list 3407 * UNLOCK 3408 * Release to upper layer 3409 * } 3410 */ 3411 qdf_spin_lock(&reo_ctx->reo_algo_entry_lock); 3412 3413 qdf_assert_always(desc->rx_params->reo_params->valid); 3414 qdf_assert_always(desc->frame_type == IEEE80211_FC0_TYPE_MGT); 3415 3416 if (desc->type == MGMT_RX_REO_FRAME_DESC_HOST_CONSUMED_FRAME || 3417 desc->type == MGMT_RX_REO_FRAME_DESC_FW_CONSUMED_FRAME) 3418 qdf_assert_always(desc->rx_params->reo_params->duration_us); 3419 3420 /* Update the Host snapshot */ 3421 ret = wlan_mgmt_rx_reo_update_host_snapshot(pdev, desc); 3422 if (QDF_IS_STATUS_ERROR(ret)) 3423 goto failure; 3424 3425 /* Compute wait count for this frame/event */ 3426 ret = wlan_mgmt_rx_reo_algo_calculate_wait_count(pdev, desc); 3427 if (QDF_IS_STATUS_ERROR(ret)) 3428 goto failure; 3429 3430 /* Update the REO list */ 3431 ret = mgmt_rx_reo_update_list(&reo_ctx->reo_list, desc, is_queued); 3432 if (QDF_IS_STATUS_ERROR(ret)) 3433 goto failure; 3434 3435 ret = mgmt_rx_reo_log_ingress_frame(reo_ctx, desc, 3436 *is_queued, false); 3437 if (QDF_IS_STATUS_ERROR(ret)) { 3438 qdf_spin_unlock(&reo_ctx->reo_algo_entry_lock); 3439 return ret; 3440 } 3441 3442 qdf_spin_unlock(&reo_ctx->reo_algo_entry_lock); 3443 3444 /* Finally, release the entries for which pending frame is received */ 3445 return mgmt_rx_reo_list_release_entries(reo_ctx); 3446 3447 failure: 3448 /** 3449 * Ignore the return value of this function call, return 3450 * the actual reason for failure. 3451 */ 3452 mgmt_rx_reo_log_ingress_frame(reo_ctx, desc, *is_queued, true); 3453 3454 qdf_spin_unlock(&reo_ctx->reo_algo_entry_lock); 3455 3456 return ret; 3457 } 3458 3459 #ifndef WLAN_MGMT_RX_REO_SIM_SUPPORT 3460 /** 3461 * mgmt_rx_reo_sim_init() - Initialize management rx reorder simulation 3462 * context. 3463 * @reo_context: Pointer to reo context 3464 * 3465 * Return: QDF_STATUS of operation 3466 */ 3467 static inline QDF_STATUS 3468 mgmt_rx_reo_sim_init(struct mgmt_rx_reo_context *reo_context) 3469 { 3470 return QDF_STATUS_SUCCESS; 3471 } 3472 3473 /** 3474 * mgmt_rx_reo_sim_deinit() - De initialize management rx reorder simulation 3475 * context. 3476 * @reo_context: Pointer to reo context 3477 * 3478 * Return: QDF_STATUS of operation 3479 */ 3480 static inline QDF_STATUS 3481 mgmt_rx_reo_sim_deinit(struct mgmt_rx_reo_context *reo_context) 3482 { 3483 return QDF_STATUS_SUCCESS; 3484 } 3485 3486 QDF_STATUS 3487 mgmt_rx_reo_sim_pdev_object_create_notification(struct wlan_objmgr_pdev *pdev) 3488 { 3489 return QDF_STATUS_SUCCESS; 3490 } 3491 3492 QDF_STATUS 3493 mgmt_rx_reo_sim_pdev_object_destroy_notification(struct wlan_objmgr_pdev *pdev) 3494 { 3495 return QDF_STATUS_SUCCESS; 3496 } 3497 #else 3498 /** 3499 * mgmt_rx_reo_sim_remove_frame_from_master_list() - Removes frame from the 3500 * master management frame list 3501 * @master_frame_list: pointer to master management frame list 3502 * @frame: pointer to management frame parameters 3503 * 3504 * This API removes frames from the master management frame list. This API is 3505 * used in case of FW consumed management frames or management frames which 3506 * are dropped at host due to any error. 3507 * 3508 * Return: QDF_STATUS of operation 3509 */ 3510 static QDF_STATUS 3511 mgmt_rx_reo_sim_remove_frame_from_master_list( 3512 struct mgmt_rx_reo_master_frame_list *master_frame_list, 3513 const struct mgmt_rx_frame_params *frame) 3514 { 3515 struct mgmt_rx_reo_pending_frame_list_entry *pending_entry; 3516 struct mgmt_rx_reo_pending_frame_list_entry *matching_pend_entry = NULL; 3517 struct mgmt_rx_reo_stale_frame_list_entry *stale_entry; 3518 struct mgmt_rx_reo_stale_frame_list_entry *matching_stale_entry = NULL; 3519 QDF_STATUS status; 3520 3521 if (!master_frame_list) { 3522 mgmt_rx_reo_err("Mgmt master frame list is null"); 3523 return QDF_STATUS_E_NULL_VALUE; 3524 } 3525 3526 if (!frame) { 3527 mgmt_rx_reo_err("Pointer to mgmt frame params is null"); 3528 return QDF_STATUS_E_NULL_VALUE; 3529 } 3530 3531 qdf_spin_lock(&master_frame_list->lock); 3532 3533 qdf_list_for_each(&master_frame_list->pending_list, pending_entry, 3534 node) { 3535 if (pending_entry->params.link_id == frame->link_id && 3536 pending_entry->params.mgmt_pkt_ctr == frame->mgmt_pkt_ctr && 3537 pending_entry->params.global_timestamp == 3538 frame->global_timestamp) { 3539 matching_pend_entry = pending_entry; 3540 break; 3541 } 3542 } 3543 3544 qdf_list_for_each(&master_frame_list->stale_list, stale_entry, node) { 3545 if (stale_entry->params.link_id == frame->link_id && 3546 stale_entry->params.mgmt_pkt_ctr == frame->mgmt_pkt_ctr && 3547 stale_entry->params.global_timestamp == 3548 frame->global_timestamp) { 3549 matching_stale_entry = stale_entry; 3550 break; 3551 } 3552 } 3553 3554 /* Found in pending and stale list. Duplicate entries, assert */ 3555 qdf_assert_always(!matching_pend_entry || !matching_stale_entry); 3556 3557 if (!matching_pend_entry && !matching_stale_entry) { 3558 qdf_spin_unlock(&master_frame_list->lock); 3559 mgmt_rx_reo_err("No matching frame in pend/stale list"); 3560 return QDF_STATUS_E_FAILURE; 3561 } 3562 3563 if (matching_pend_entry) { 3564 status = qdf_list_remove_node(&master_frame_list->pending_list, 3565 &matching_pend_entry->node); 3566 if (QDF_IS_STATUS_ERROR(status)) { 3567 qdf_spin_unlock(&master_frame_list->lock); 3568 mgmt_rx_reo_err("Failed to remove the matching entry"); 3569 return status; 3570 } 3571 3572 qdf_mem_free(matching_pend_entry); 3573 } 3574 3575 if (matching_stale_entry) { 3576 status = qdf_list_remove_node(&master_frame_list->stale_list, 3577 &matching_stale_entry->node); 3578 if (QDF_IS_STATUS_ERROR(status)) { 3579 qdf_spin_unlock(&master_frame_list->lock); 3580 mgmt_rx_reo_err("Failed to remove the matching entry"); 3581 return status; 3582 } 3583 3584 qdf_mem_free(matching_stale_entry); 3585 } 3586 3587 qdf_spin_unlock(&master_frame_list->lock); 3588 3589 return QDF_STATUS_SUCCESS; 3590 } 3591 3592 /** 3593 * mgmt_rx_reo_sim_remove_frame_from_pending_list() - Removes frame from the 3594 * pending management frame list 3595 * @master_frame_list: pointer to master management frame list 3596 * @frame: pointer to management frame parameters 3597 * 3598 * This API removes frames from the pending management frame list. This API is 3599 * used in case of FW consumed management frames or management frames which 3600 * are dropped at host due to any error. 3601 * 3602 * Return: QDF_STATUS of operation 3603 */ 3604 static QDF_STATUS 3605 mgmt_rx_reo_sim_remove_frame_from_pending_list( 3606 struct mgmt_rx_reo_master_frame_list *master_frame_list, 3607 const struct mgmt_rx_frame_params *frame) 3608 { 3609 struct mgmt_rx_reo_pending_frame_list_entry *cur_entry; 3610 struct mgmt_rx_reo_pending_frame_list_entry *matching_entry = NULL; 3611 QDF_STATUS status; 3612 3613 if (!master_frame_list) { 3614 mgmt_rx_reo_err("Mgmt master frame list is null"); 3615 return QDF_STATUS_E_NULL_VALUE; 3616 } 3617 3618 if (!frame) { 3619 mgmt_rx_reo_err("Pointer to mgmt frame params is null"); 3620 return QDF_STATUS_E_NULL_VALUE; 3621 } 3622 3623 qdf_spin_lock(&master_frame_list->lock); 3624 3625 qdf_list_for_each(&master_frame_list->pending_list, cur_entry, node) { 3626 if (cur_entry->params.link_id == frame->link_id && 3627 cur_entry->params.mgmt_pkt_ctr == frame->mgmt_pkt_ctr && 3628 cur_entry->params.global_timestamp == 3629 frame->global_timestamp) { 3630 matching_entry = cur_entry; 3631 break; 3632 } 3633 } 3634 3635 if (!matching_entry) { 3636 qdf_spin_unlock(&master_frame_list->lock); 3637 mgmt_rx_reo_err("No matching frame in the pend list to remove"); 3638 return QDF_STATUS_E_FAILURE; 3639 } 3640 3641 status = qdf_list_remove_node(&master_frame_list->pending_list, 3642 &matching_entry->node); 3643 if (QDF_IS_STATUS_ERROR(status)) { 3644 qdf_spin_unlock(&master_frame_list->lock); 3645 mgmt_rx_reo_err("Failed to remove the matching entry"); 3646 return status; 3647 } 3648 3649 qdf_mem_free(matching_entry); 3650 3651 qdf_spin_unlock(&master_frame_list->lock); 3652 3653 3654 return QDF_STATUS_SUCCESS; 3655 } 3656 3657 /** 3658 * mgmt_rx_reo_sim_add_frame_to_pending_list() - Inserts frame to the 3659 * pending management frame list 3660 * @master_frame_list: pointer to master management frame list 3661 * @frame: pointer to management frame parameters 3662 * 3663 * This API inserts frames to the pending management frame list. This API is 3664 * used to insert frames generated by the MAC HW to the pending frame list. 3665 * 3666 * Return: QDF_STATUS of operation 3667 */ 3668 static QDF_STATUS 3669 mgmt_rx_reo_sim_add_frame_to_pending_list( 3670 struct mgmt_rx_reo_master_frame_list *master_frame_list, 3671 const struct mgmt_rx_frame_params *frame) 3672 { 3673 struct mgmt_rx_reo_pending_frame_list_entry *new_entry; 3674 QDF_STATUS status; 3675 3676 if (!master_frame_list) { 3677 mgmt_rx_reo_err("Mgmt master frame list is null"); 3678 return QDF_STATUS_E_NULL_VALUE; 3679 } 3680 3681 if (!frame) { 3682 mgmt_rx_reo_err("Pointer mgmt frame params is null"); 3683 return QDF_STATUS_E_NULL_VALUE; 3684 } 3685 3686 new_entry = qdf_mem_malloc(sizeof(*new_entry)); 3687 if (!new_entry) { 3688 mgmt_rx_reo_err("Failed to allocate new entry to frame list"); 3689 return QDF_STATUS_E_NOMEM; 3690 } 3691 3692 new_entry->params = *frame; 3693 3694 qdf_spin_lock(&master_frame_list->lock); 3695 3696 status = qdf_list_insert_back(&master_frame_list->pending_list, 3697 &new_entry->node); 3698 3699 qdf_spin_unlock(&master_frame_list->lock); 3700 3701 if (QDF_IS_STATUS_ERROR(status)) { 3702 mgmt_rx_reo_err("Failed to add frame to pending list"); 3703 qdf_mem_free(new_entry); 3704 return status; 3705 } 3706 3707 return QDF_STATUS_SUCCESS; 3708 } 3709 3710 QDF_STATUS 3711 mgmt_rx_reo_sim_process_rx_frame(struct wlan_objmgr_pdev *pdev, qdf_nbuf_t buf, 3712 struct mgmt_rx_event_params *mgmt_rx_params) 3713 { 3714 struct mgmt_rx_reo_context *reo_context; 3715 struct mgmt_rx_reo_sim_context *sim_context; 3716 QDF_STATUS status; 3717 struct mgmt_rx_reo_params *reo_params; 3718 3719 if (!mgmt_rx_params) { 3720 mgmt_rx_reo_err("Mgmt rx params null"); 3721 return QDF_STATUS_E_NULL_VALUE; 3722 } 3723 3724 reo_params = mgmt_rx_params->reo_params; 3725 3726 reo_context = mgmt_rx_reo_get_context(); 3727 if (!reo_context) { 3728 mgmt_rx_reo_err("Mgmt reo context is null"); 3729 return QDF_STATUS_E_NULL_VALUE; 3730 } 3731 3732 sim_context = &reo_context->sim_context; 3733 3734 qdf_spin_lock(&sim_context->master_frame_list.lock); 3735 3736 if (qdf_list_empty(&sim_context->master_frame_list.pending_list)) { 3737 qdf_spin_unlock(&sim_context->master_frame_list.lock); 3738 mgmt_rx_reo_err("reo sim failure: pending frame list is empty"); 3739 qdf_assert_always(0); 3740 } else { 3741 struct mgmt_rx_frame_params *cur_entry_params; 3742 struct mgmt_rx_reo_pending_frame_list_entry *cur_entry; 3743 struct mgmt_rx_reo_pending_frame_list_entry *matching_entry = NULL; 3744 3745 /** 3746 * Make sure the frames delivered to upper layer are in the 3747 * increasing order of global time stamp. For that the frame 3748 * which is being delivered should be present at the head of the 3749 * pending frame list. There could be multiple frames with the 3750 * same global time stamp in the pending frame list. Search 3751 * among all the frames at the head of the list which has the 3752 * same global time stamp as the frame which is being delivered. 3753 * To find matching frame, check whether packet counter, 3754 * global time stamp and link id are same. 3755 */ 3756 qdf_list_for_each(&sim_context->master_frame_list.pending_list, 3757 cur_entry, node) { 3758 cur_entry_params = &cur_entry->params; 3759 3760 if (cur_entry_params->global_timestamp != 3761 reo_params->global_timestamp) 3762 break; 3763 3764 if (cur_entry_params->link_id == reo_params->link_id && 3765 cur_entry_params->mgmt_pkt_ctr == 3766 reo_params->mgmt_pkt_ctr) { 3767 matching_entry = cur_entry; 3768 break; 3769 } 3770 } 3771 3772 if (!matching_entry) { 3773 qdf_spin_unlock(&sim_context->master_frame_list.lock); 3774 mgmt_rx_reo_err("reo sim failure: mismatch"); 3775 qdf_assert_always(0); 3776 } 3777 3778 status = qdf_list_remove_node( 3779 &sim_context->master_frame_list.pending_list, 3780 &matching_entry->node); 3781 qdf_mem_free(matching_entry); 3782 3783 if (QDF_IS_STATUS_ERROR(status)) { 3784 qdf_spin_unlock(&sim_context->master_frame_list.lock); 3785 mgmt_rx_reo_err("Failed to remove matching entry"); 3786 return status; 3787 } 3788 } 3789 3790 qdf_spin_unlock(&sim_context->master_frame_list.lock); 3791 3792 mgmt_rx_reo_debug("Successfully processed mgmt frame"); 3793 mgmt_rx_reo_debug("link_id = %u, ctr = %u, ts = %u", 3794 reo_params->link_id, reo_params->mgmt_pkt_ctr, 3795 reo_params->global_timestamp); 3796 3797 return QDF_STATUS_SUCCESS; 3798 } 3799 3800 /** 3801 * mgmt_rx_reo_sim_get_random_bool() - Generate true/false randomly 3802 * @percentage_true: probability (in percentage) of true 3803 * 3804 * API to generate true with probability @percentage_true % and false with 3805 * probability (100 - @percentage_true) %. 3806 * 3807 * Return: true with probability @percentage_true % and false with probability 3808 * (100 - @percentage_true) % 3809 */ 3810 static bool 3811 mgmt_rx_reo_sim_get_random_bool(uint8_t percentage_true) 3812 { 3813 uint32_t rand; 3814 3815 if (percentage_true > 100) { 3816 mgmt_rx_reo_err("Invalid probability value for true, %u", 3817 percentage_true); 3818 return -EINVAL; 3819 } 3820 3821 get_random_bytes(&rand, sizeof(rand)); 3822 3823 return ((rand % 100) < percentage_true); 3824 } 3825 3826 /** 3827 * mgmt_rx_reo_sim_get_random_unsigned_int() - Generate random unsigned integer 3828 * value in the range [0, max) 3829 * @max: upper limit for the output 3830 * 3831 * API to generate random unsigned integer value in the range [0, max). 3832 * 3833 * Return: unsigned integer value in the range [0, max) 3834 */ 3835 static uint32_t 3836 mgmt_rx_reo_sim_get_random_unsigned_int(uint32_t max) 3837 { 3838 uint32_t rand; 3839 3840 get_random_bytes(&rand, sizeof(rand)); 3841 3842 return (rand % max); 3843 } 3844 3845 /** 3846 * mgmt_rx_reo_sim_sleep() - Wrapper API to sleep for given micro seconds 3847 * @sleeptime_us: Sleep time in micro seconds 3848 * 3849 * This API uses msleep() internally. So the granularity is limited to 3850 * milliseconds. 3851 * 3852 * Return: none 3853 */ 3854 static void 3855 mgmt_rx_reo_sim_sleep(uint32_t sleeptime_us) 3856 { 3857 msleep(sleeptime_us / USEC_PER_MSEC); 3858 } 3859 3860 /** 3861 * mgmt_rx_reo_sim_frame_handler_host() - Management frame handler at the host 3862 * layer 3863 * @arg: Argument 3864 * 3865 * This API handles the management frame at the host layer. This is applicable 3866 * for simulation alone. 3867 * 3868 * Return: none 3869 */ 3870 static void 3871 mgmt_rx_reo_sim_frame_handler_host(void *arg) 3872 { 3873 struct mgmt_rx_frame_fw *frame_fw = (struct mgmt_rx_frame_fw *)arg; 3874 uint32_t fw_to_host_delay_us; 3875 bool is_error_frame = false; 3876 int8_t link_id = -1; 3877 struct mgmt_rx_event_params *rx_params; 3878 QDF_STATUS status; 3879 struct mgmt_rx_reo_sim_context *sim_context; 3880 struct wlan_objmgr_pdev *pdev; 3881 3882 if (!frame_fw) { 3883 mgmt_rx_reo_err("HOST-%d : Pointer to FW frame struct is null", 3884 link_id); 3885 goto error_print; 3886 } 3887 3888 link_id = frame_fw->params.link_id; 3889 3890 sim_context = frame_fw->sim_context; 3891 if (!sim_context) { 3892 mgmt_rx_reo_err("HOST-%d : Mgmt rx reo simulation context null", 3893 link_id); 3894 goto error_free_fw_frame; 3895 } 3896 3897 fw_to_host_delay_us = MGMT_RX_REO_SIM_DELAY_FW_TO_HOST_MIN + 3898 mgmt_rx_reo_sim_get_random_unsigned_int( 3899 MGMT_RX_REO_SIM_DELAY_FW_TO_HOST_MIN_MAX_DELTA); 3900 3901 mgmt_rx_reo_sim_sleep(fw_to_host_delay_us); 3902 3903 if (!frame_fw->is_consumed_by_fw) { 3904 is_error_frame = mgmt_rx_reo_sim_get_random_bool( 3905 MGMT_RX_REO_SIM_PERCENTAGE_ERROR_FRAMES); 3906 3907 /** 3908 * This frame should be present in pending/stale list of the 3909 * master frame list. Error frames need not be reordered 3910 * by reorder algorithm. It is just used for book 3911 * keeping purposes. Hence remove it from the master list. 3912 */ 3913 if (is_error_frame) { 3914 status = mgmt_rx_reo_sim_remove_frame_from_master_list( 3915 &sim_context->master_frame_list, 3916 &frame_fw->params); 3917 3918 if (QDF_IS_STATUS_ERROR(status)) { 3919 mgmt_rx_reo_err("HOST-%d : Failed to remove error frame", 3920 link_id); 3921 qdf_assert_always(0); 3922 } 3923 } 3924 } 3925 3926 mgmt_rx_reo_debug("HOST-%d : Received frame with ts = %u, ctr = %u, consume = %u, error = %u", 3927 link_id, frame_fw->params.global_timestamp, 3928 frame_fw->params.mgmt_pkt_ctr, 3929 frame_fw->is_consumed_by_fw, is_error_frame); 3930 3931 rx_params = alloc_mgmt_rx_event_params(); 3932 if (!rx_params) { 3933 mgmt_rx_reo_err("HOST-%d : Failed to allocate event params", 3934 link_id); 3935 goto error_free_fw_frame; 3936 } 3937 3938 rx_params->reo_params->link_id = frame_fw->params.link_id; 3939 rx_params->reo_params->global_timestamp = 3940 frame_fw->params.global_timestamp; 3941 rx_params->reo_params->mgmt_pkt_ctr = frame_fw->params.mgmt_pkt_ctr; 3942 rx_params->reo_params->valid = true; 3943 3944 pdev = wlan_get_pdev_from_mlo_link_id(link_id, WLAN_MGMT_RX_REO_SIM_ID); 3945 if (!pdev) { 3946 mgmt_rx_reo_err("No pdev corresponding to link_id %d", link_id); 3947 goto error_free_mgmt_rx_event_params; 3948 } 3949 3950 if (is_error_frame) { 3951 status = tgt_mgmt_rx_reo_host_drop_handler( 3952 pdev, rx_params->reo_params); 3953 free_mgmt_rx_event_params(rx_params); 3954 } else if (frame_fw->is_consumed_by_fw) { 3955 status = tgt_mgmt_rx_reo_fw_consumed_event_handler( 3956 pdev, rx_params->reo_params); 3957 free_mgmt_rx_event_params(rx_params); 3958 } else { 3959 status = tgt_mgmt_rx_reo_frame_handler(pdev, NULL, rx_params); 3960 } 3961 3962 wlan_objmgr_pdev_release_ref(pdev, WLAN_MGMT_RX_REO_SIM_ID); 3963 3964 if (QDF_IS_STATUS_ERROR(status)) { 3965 mgmt_rx_reo_err("Failed to execute reo algorithm"); 3966 goto error_free_fw_frame; 3967 } 3968 3969 qdf_mem_free(frame_fw); 3970 3971 return; 3972 3973 error_free_mgmt_rx_event_params: 3974 free_mgmt_rx_event_params(rx_params); 3975 error_free_fw_frame: 3976 qdf_mem_free(frame_fw); 3977 error_print: 3978 mgmt_rx_reo_err("HOST-%d : Exiting host frame handler due to error", 3979 link_id); 3980 } 3981 3982 /** 3983 * mgmt_rx_reo_sim_write_snapshot() - API to write snapshots used for management 3984 * frame reordering 3985 * @link_id: link id 3986 * @id: snapshot id 3987 * @value: snapshot value 3988 * 3989 * This API writes the snapshots used for management frame reordering. MAC HW 3990 * and FW can use this API to update the MAC HW/FW consumed/FW forwarded 3991 * snapshots. 3992 * 3993 * Return: QDF_STATUS 3994 */ 3995 static QDF_STATUS 3996 mgmt_rx_reo_sim_write_snapshot(uint8_t link_id, 3997 enum mgmt_rx_reo_shared_snapshot_id id, 3998 struct mgmt_rx_reo_shared_snapshot value) 3999 { 4000 struct wlan_objmgr_pdev *pdev; 4001 struct mgmt_rx_reo_shared_snapshot *snapshot_address; 4002 QDF_STATUS status; 4003 4004 pdev = wlan_get_pdev_from_mlo_link_id(link_id, WLAN_MGMT_RX_REO_SIM_ID); 4005 4006 if (!pdev) { 4007 mgmt_rx_reo_err("pdev is null"); 4008 return QDF_STATUS_E_NULL_VALUE; 4009 } 4010 4011 status = mgmt_rx_reo_sim_get_snapshot_address(pdev, id, 4012 &snapshot_address); 4013 4014 wlan_objmgr_pdev_release_ref(pdev, WLAN_MGMT_RX_REO_SIM_ID); 4015 4016 if (QDF_IS_STATUS_ERROR(status)) { 4017 mgmt_rx_reo_err("Failed to get snapshot address %d of pdev %pK", 4018 id, pdev); 4019 return QDF_STATUS_E_FAILURE; 4020 } 4021 4022 snapshot_address->mgmt_rx_reo_snapshot_low = 4023 value.mgmt_rx_reo_snapshot_low; 4024 snapshot_address->mgmt_rx_reo_snapshot_high = 4025 value.mgmt_rx_reo_snapshot_high; 4026 4027 return QDF_STATUS_SUCCESS; 4028 } 4029 4030 #define MGMT_RX_REO_SNAPSHOT_LOW_VALID_POS (0) 4031 #define MGMT_RX_REO_SNAPSHOT_LOW_VALID_SIZE (1) 4032 #define MGMT_RX_REO_SNAPSHOT_LOW_MGMT_PKT_CTR_POS (1) 4033 #define MGMT_RX_REO_SNAPSHOT_LOW_MGMT_PKT_CTR_SIZE (16) 4034 #define MGMT_RX_REO_SNAPSHOT_LOW_GLOBAL_TIMESTAMP_POS (17) 4035 #define MGMT_RX_REO_SNAPSHOT_LOW_GLOBAL_TIMESTAMP_SIZE (15) 4036 4037 #define MGMT_RX_REO_SNAPSHOT_HIGH_GLOBAL_TIMESTAMP_POS (0) 4038 #define MGMT_RX_REO_SNAPSHOT_HIGH_GLOBAL_TIMESTAMP_SIZE (17) 4039 #define MGMT_RX_REO_SNAPSHOT_HIGH_MGMT_PKT_CTR_REDUNDANT_POS (17) 4040 #define MGMT_RX_REO_SNAPSHOT_HIGH_MGMT_PKT_CTR_REDUNDANT_SIZE (15) 4041 4042 /** 4043 * mgmt_rx_reo_sim_get_snapshot_value() - API to get snapshot value for a given 4044 * management frame 4045 * @global_timestamp: global time stamp 4046 * @mgmt_pkt_ctr: management packet counter 4047 * 4048 * This API gets the snapshot value for a frame with time stamp 4049 * @global_timestamp and sequence number @mgmt_pkt_ctr. 4050 * 4051 * Return: snapshot value (struct mgmt_rx_reo_shared_snapshot) 4052 */ 4053 static struct mgmt_rx_reo_shared_snapshot 4054 mgmt_rx_reo_sim_get_snapshot_value(uint32_t global_timestamp, 4055 uint16_t mgmt_pkt_ctr) 4056 { 4057 struct mgmt_rx_reo_shared_snapshot snapshot = {0}; 4058 4059 QDF_SET_BITS(snapshot.mgmt_rx_reo_snapshot_low, 4060 MGMT_RX_REO_SNAPSHOT_LOW_VALID_POS, 4061 MGMT_RX_REO_SNAPSHOT_LOW_VALID_SIZE, 1); 4062 QDF_SET_BITS(snapshot.mgmt_rx_reo_snapshot_low, 4063 MGMT_RX_REO_SNAPSHOT_LOW_MGMT_PKT_CTR_POS, 4064 MGMT_RX_REO_SNAPSHOT_LOW_MGMT_PKT_CTR_SIZE, mgmt_pkt_ctr); 4065 QDF_SET_BITS(snapshot.mgmt_rx_reo_snapshot_low, 4066 MGMT_RX_REO_SNAPSHOT_LOW_GLOBAL_TIMESTAMP_POS, 4067 MGMT_RX_REO_SNAPSHOT_LOW_GLOBAL_TIMESTAMP_SIZE, 4068 global_timestamp); 4069 4070 QDF_SET_BITS(snapshot.mgmt_rx_reo_snapshot_high, 4071 MGMT_RX_REO_SNAPSHOT_HIGH_GLOBAL_TIMESTAMP_POS, 4072 MGMT_RX_REO_SNAPSHOT_HIGH_GLOBAL_TIMESTAMP_SIZE, 4073 global_timestamp >> 15); 4074 QDF_SET_BITS(snapshot.mgmt_rx_reo_snapshot_high, 4075 MGMT_RX_REO_SNAPSHOT_HIGH_MGMT_PKT_CTR_REDUNDANT_POS, 4076 MGMT_RX_REO_SNAPSHOT_HIGH_MGMT_PKT_CTR_REDUNDANT_SIZE, 4077 mgmt_pkt_ctr); 4078 4079 return snapshot; 4080 } 4081 4082 /** 4083 * mgmt_rx_reo_sim_frame_handler_fw() - Management frame handler at the fw layer 4084 * @arg: Argument 4085 * 4086 * This API handles the management frame at the fw layer. This is applicable 4087 * for simulation alone. 4088 * 4089 * Return: none 4090 */ 4091 static void 4092 mgmt_rx_reo_sim_frame_handler_fw(void *arg) 4093 { 4094 struct mgmt_rx_frame_mac_hw *frame_hw = 4095 (struct mgmt_rx_frame_mac_hw *)arg; 4096 uint32_t mac_hw_to_fw_delay_us; 4097 bool is_consumed_by_fw; 4098 struct mgmt_rx_frame_fw *frame_fw; 4099 int8_t link_id = -1; 4100 QDF_STATUS status; 4101 struct mgmt_rx_reo_sim_context *sim_context; 4102 enum mgmt_rx_reo_shared_snapshot_id snapshot_id; 4103 struct mgmt_rx_reo_shared_snapshot snapshot_value; 4104 bool ret; 4105 4106 if (!frame_hw) { 4107 mgmt_rx_reo_err("FW-%d : Pointer to HW frame struct is null", 4108 link_id); 4109 qdf_assert_always(0); 4110 } 4111 4112 link_id = frame_hw->params.link_id; 4113 4114 sim_context = frame_hw->sim_context; 4115 if (!sim_context) { 4116 mgmt_rx_reo_err("FW-%d : Mgmt rx reo simulation context null", 4117 link_id); 4118 goto error_free_mac_hw_frame; 4119 } 4120 4121 mac_hw_to_fw_delay_us = MGMT_RX_REO_SIM_DELAY_MAC_HW_TO_FW_MIN + 4122 mgmt_rx_reo_sim_get_random_unsigned_int( 4123 MGMT_RX_REO_SIM_DELAY_MAC_HW_TO_FW_MIN_MAX_DELTA); 4124 mgmt_rx_reo_sim_sleep(mac_hw_to_fw_delay_us); 4125 4126 is_consumed_by_fw = mgmt_rx_reo_sim_get_random_bool( 4127 MGMT_RX_REO_SIM_PERCENTAGE_FW_CONSUMED_FRAMES); 4128 4129 if (is_consumed_by_fw) { 4130 /** 4131 * This frame should be present in pending/stale list of the 4132 * master frame list. FW consumed frames need not be reordered 4133 * by reorder algorithm. It is just used for book 4134 * keeping purposes. Hence remove it from the master list. 4135 */ 4136 status = mgmt_rx_reo_sim_remove_frame_from_master_list( 4137 &sim_context->master_frame_list, 4138 &frame_hw->params); 4139 4140 if (QDF_IS_STATUS_ERROR(status)) { 4141 mgmt_rx_reo_err("FW-%d : Failed to remove FW consumed frame", 4142 link_id); 4143 qdf_assert_always(0); 4144 } 4145 } 4146 4147 mgmt_rx_reo_debug("FW-%d : Processing frame with ts = %u, ctr = %u, consume = %u", 4148 link_id, frame_hw->params.global_timestamp, 4149 frame_hw->params.mgmt_pkt_ctr, is_consumed_by_fw); 4150 4151 frame_fw = qdf_mem_malloc(sizeof(*frame_fw)); 4152 if (!frame_fw) { 4153 mgmt_rx_reo_err("FW-%d : Failed to allocate FW mgmt frame", 4154 link_id); 4155 goto error_free_mac_hw_frame; 4156 } 4157 4158 frame_fw->params = frame_hw->params; 4159 frame_fw->is_consumed_by_fw = is_consumed_by_fw; 4160 frame_fw->sim_context = frame_hw->sim_context; 4161 4162 snapshot_id = is_consumed_by_fw ? 4163 MGMT_RX_REO_SHARED_SNAPSHOT_FW_CONSUMED : 4164 MGMT_RX_REO_SHARED_SNAPSHOT_FW_FORWARDED; 4165 4166 snapshot_value = mgmt_rx_reo_sim_get_snapshot_value( 4167 frame_hw->params.global_timestamp, 4168 frame_hw->params.mgmt_pkt_ctr); 4169 4170 status = mgmt_rx_reo_sim_write_snapshot(link_id, snapshot_id, 4171 snapshot_value); 4172 4173 if (QDF_IS_STATUS_ERROR(status)) { 4174 mgmt_rx_reo_err("FW-%d : Failed to write snapshot %d", 4175 link_id, snapshot_id); 4176 goto error_free_fw_frame; 4177 } 4178 4179 status = qdf_create_work(NULL, &frame_fw->frame_handler_host, 4180 mgmt_rx_reo_sim_frame_handler_host, frame_fw); 4181 if (QDF_IS_STATUS_ERROR(status)) { 4182 mgmt_rx_reo_err("FW-%d : Failed to create work", link_id); 4183 goto error_free_fw_frame; 4184 } 4185 4186 ret = qdf_queue_work( 4187 NULL, sim_context->host_mgmt_frame_handler[link_id], 4188 &frame_fw->frame_handler_host); 4189 if (!ret) { 4190 mgmt_rx_reo_err("FW-%d : Work is already present on the queue", 4191 link_id); 4192 goto error_free_fw_frame; 4193 } 4194 4195 qdf_mem_free(frame_hw); 4196 4197 return; 4198 4199 error_free_fw_frame: 4200 qdf_mem_free(frame_fw); 4201 error_free_mac_hw_frame: 4202 qdf_mem_free(frame_hw); 4203 4204 mgmt_rx_reo_err("FW-%d : Exiting fw frame handler due to error", 4205 link_id); 4206 } 4207 4208 /** 4209 * mgmt_rx_reo_sim_get_link_id() - Helper API to get the link id value 4210 * from the index to the valid link list 4211 * @valid_link_list_index: Index to list of valid links 4212 * 4213 * Return: link id 4214 */ 4215 static int8_t 4216 mgmt_rx_reo_sim_get_link_id(uint8_t valid_link_list_index) 4217 { 4218 struct mgmt_rx_reo_sim_context *sim_context; 4219 4220 if (valid_link_list_index >= MAX_MLO_LINKS) { 4221 mgmt_rx_reo_err("Invalid index %u to valid link list", 4222 valid_link_list_index); 4223 return MGMT_RX_REO_INVALID_LINK_ID; 4224 } 4225 4226 sim_context = mgmt_rx_reo_sim_get_context(); 4227 if (!sim_context) { 4228 mgmt_rx_reo_err("Mgmt reo simulation context is null"); 4229 return MGMT_RX_REO_INVALID_LINK_ID; 4230 } 4231 4232 return sim_context->link_id_to_pdev_map.valid_link_list 4233 [valid_link_list_index]; 4234 } 4235 4236 /** 4237 * mgmt_rx_reo_sim_receive_from_air() - Simulate management frame reception from 4238 * the air 4239 * @mac_hw: pointer to structure representing MAC HW 4240 * @num_mlo_links: number of MLO HW links 4241 * @frame: pointer to management frame parameters 4242 * 4243 * This API simulates the management frame reception from air. 4244 * 4245 * Return: QDF_STATUS 4246 */ 4247 static QDF_STATUS 4248 mgmt_rx_reo_sim_receive_from_air(struct mgmt_rx_reo_sim_mac_hw *mac_hw, 4249 uint8_t num_mlo_links, 4250 struct mgmt_rx_frame_params *frame) 4251 { 4252 uint8_t valid_link_list_index; 4253 QDF_STATUS status; 4254 int8_t link_id; 4255 4256 if (!mac_hw) { 4257 mgmt_rx_reo_err("pointer to MAC HW struct is null"); 4258 return QDF_STATUS_E_NULL_VALUE; 4259 } 4260 4261 if (num_mlo_links == 0 || num_mlo_links > MAX_MLO_LINKS) { 4262 mgmt_rx_reo_err("Invalid number of MLO links %u", 4263 num_mlo_links); 4264 return QDF_STATUS_E_INVAL; 4265 } 4266 4267 if (!frame) { 4268 mgmt_rx_reo_err("pointer to frame parameters is null"); 4269 return QDF_STATUS_E_NULL_VALUE; 4270 } 4271 4272 valid_link_list_index = mgmt_rx_reo_sim_get_random_unsigned_int( 4273 num_mlo_links); 4274 link_id = mgmt_rx_reo_sim_get_link_id(valid_link_list_index); 4275 qdf_assert_always(link_id >= 0); 4276 qdf_assert_always(link_id < MAX_MLO_LINKS); 4277 4278 frame->global_timestamp = div_u64(ktime_get_ns(), NSEC_PER_USEC); 4279 frame->mgmt_pkt_ctr = ++mac_hw->mgmt_pkt_ctr[link_id]; 4280 frame->link_id = link_id; 4281 4282 return QDF_STATUS_SUCCESS; 4283 } 4284 4285 /** 4286 * mgmt_rx_reo_sim_undo_receive_from_air() - API to restore the state of MAC 4287 * HW in case of any Rx error. 4288 * @mac_hw: pointer to structure representing MAC HW 4289 * @frame: pointer to management frame parameters 4290 * 4291 * Return: QDF_STATUS 4292 */ 4293 static QDF_STATUS 4294 mgmt_rx_reo_sim_undo_receive_from_air(struct mgmt_rx_reo_sim_mac_hw *mac_hw, 4295 struct mgmt_rx_frame_params *frame) 4296 { 4297 if (!mac_hw) { 4298 mgmt_rx_reo_err("pointer to MAC HW struct is null"); 4299 return QDF_STATUS_E_NULL_VALUE; 4300 } 4301 4302 if (!frame) { 4303 mgmt_rx_reo_err("pointer to frame parameters is null"); 4304 return QDF_STATUS_E_NULL_VALUE; 4305 } 4306 4307 if (frame->link_id >= MAX_MLO_LINKS) { 4308 mgmt_rx_reo_err("Invalid link id %u", frame->link_id); 4309 return QDF_STATUS_E_INVAL; 4310 } 4311 4312 --mac_hw->mgmt_pkt_ctr[frame->link_id]; 4313 4314 return QDF_STATUS_SUCCESS; 4315 } 4316 4317 /** 4318 * mgmt_rx_reo_sim_mac_hw_thread() - kthread to simulate MAC HW 4319 * @data: pointer to data input 4320 * 4321 * kthread handler to simulate MAC HW. 4322 * 4323 * Return: 0 for success, else failure 4324 */ 4325 static int 4326 mgmt_rx_reo_sim_mac_hw_thread(void *data) 4327 { 4328 struct mgmt_rx_reo_sim_context *sim_context = data; 4329 struct mgmt_rx_reo_sim_mac_hw *mac_hw; 4330 4331 if (!sim_context) { 4332 mgmt_rx_reo_err("HW: Mgmt rx reo simulation context is null"); 4333 return -EINVAL; 4334 } 4335 4336 mac_hw = &sim_context->mac_hw_sim.mac_hw_info; 4337 4338 while (!qdf_thread_should_stop()) { 4339 uint32_t inter_frame_delay_us; 4340 struct mgmt_rx_frame_params frame; 4341 struct mgmt_rx_frame_mac_hw *frame_mac_hw; 4342 int8_t link_id = -1; 4343 QDF_STATUS status; 4344 enum mgmt_rx_reo_shared_snapshot_id snapshot_id; 4345 struct mgmt_rx_reo_shared_snapshot snapshot_value; 4346 int8_t num_mlo_links; 4347 bool ret; 4348 4349 num_mlo_links = mgmt_rx_reo_sim_get_num_mlo_links(sim_context); 4350 if (num_mlo_links < 0 || 4351 num_mlo_links > MAX_MLO_LINKS) { 4352 mgmt_rx_reo_err("Invalid number of MLO links %d", 4353 num_mlo_links); 4354 qdf_assert_always(0); 4355 } 4356 4357 status = mgmt_rx_reo_sim_receive_from_air(mac_hw, num_mlo_links, 4358 &frame); 4359 if (QDF_IS_STATUS_ERROR(status)) { 4360 mgmt_rx_reo_err("Receive from the air failed"); 4361 /** 4362 * Frame reception failed and we are not sure about the 4363 * link id. Without link id there is no way to restore 4364 * the mac hw state. Hence assert unconditionally. 4365 */ 4366 qdf_assert_always(0); 4367 } 4368 link_id = frame.link_id; 4369 4370 mgmt_rx_reo_debug("HW-%d: received frame with ts = %u, ctr = %u", 4371 link_id, frame.global_timestamp, 4372 frame.mgmt_pkt_ctr); 4373 4374 frame_mac_hw = qdf_mem_malloc(sizeof(*frame_mac_hw)); 4375 if (!frame_mac_hw) { 4376 mgmt_rx_reo_err("HW-%d: Failed to alloc mac hw frame", 4377 link_id); 4378 4379 /* Cleanup */ 4380 status = mgmt_rx_reo_sim_undo_receive_from_air( 4381 mac_hw, &frame); 4382 qdf_assert_always(QDF_IS_STATUS_SUCCESS(status)); 4383 4384 continue; 4385 } 4386 4387 frame_mac_hw->params = frame; 4388 frame_mac_hw->sim_context = sim_context; 4389 4390 status = mgmt_rx_reo_sim_add_frame_to_pending_list( 4391 &sim_context->master_frame_list, &frame); 4392 if (QDF_IS_STATUS_ERROR(status)) { 4393 mgmt_rx_reo_err("HW-%d: Failed to add frame to list", 4394 link_id); 4395 4396 /* Cleanup */ 4397 status = mgmt_rx_reo_sim_undo_receive_from_air( 4398 mac_hw, &frame); 4399 qdf_assert_always(QDF_IS_STATUS_SUCCESS(status)); 4400 4401 qdf_mem_free(frame_mac_hw); 4402 4403 continue; 4404 } 4405 4406 snapshot_id = MGMT_RX_REO_SHARED_SNAPSHOT_MAC_HW; 4407 snapshot_value = mgmt_rx_reo_sim_get_snapshot_value( 4408 frame.global_timestamp, 4409 frame.mgmt_pkt_ctr); 4410 4411 status = mgmt_rx_reo_sim_write_snapshot(link_id, snapshot_id, 4412 snapshot_value); 4413 if (QDF_IS_STATUS_ERROR(status)) { 4414 mgmt_rx_reo_err("HW-%d : Failed to write snapshot %d", 4415 link_id, snapshot_id); 4416 4417 /* Cleanup */ 4418 status = mgmt_rx_reo_sim_remove_frame_from_pending_list( 4419 &sim_context->master_frame_list, &frame); 4420 qdf_assert_always(QDF_IS_STATUS_SUCCESS(status)); 4421 4422 status = mgmt_rx_reo_sim_undo_receive_from_air( 4423 mac_hw, &frame); 4424 qdf_assert_always(QDF_IS_STATUS_SUCCESS(status)); 4425 4426 qdf_mem_free(frame_mac_hw); 4427 4428 continue; 4429 } 4430 4431 status = qdf_create_work(NULL, &frame_mac_hw->frame_handler_fw, 4432 mgmt_rx_reo_sim_frame_handler_fw, 4433 frame_mac_hw); 4434 if (QDF_IS_STATUS_ERROR(status)) { 4435 mgmt_rx_reo_err("HW-%d : Failed to create work", 4436 link_id); 4437 qdf_assert_always(0); 4438 } 4439 4440 ret = qdf_queue_work( 4441 NULL, sim_context->fw_mgmt_frame_handler[link_id], 4442 &frame_mac_hw->frame_handler_fw); 4443 if (!ret) { 4444 mgmt_rx_reo_err("HW-%d : Work is already present in Q", 4445 link_id); 4446 qdf_assert_always(0); 4447 } 4448 4449 inter_frame_delay_us = MGMT_RX_REO_SIM_INTER_FRAME_DELAY_MIN + 4450 mgmt_rx_reo_sim_get_random_unsigned_int( 4451 MGMT_RX_REO_SIM_INTER_FRAME_DELAY_MIN_MAX_DELTA); 4452 4453 mgmt_rx_reo_sim_sleep(inter_frame_delay_us); 4454 } 4455 4456 return 0; 4457 } 4458 4459 /** 4460 * mgmt_rx_reo_sim_init_master_frame_list() - Initializes the master 4461 * management frame list 4462 * @pending_frame_list: Pointer to master frame list 4463 * 4464 * This API initializes the master management frame list 4465 * 4466 * Return: QDF_STATUS 4467 */ 4468 static QDF_STATUS 4469 mgmt_rx_reo_sim_init_master_frame_list( 4470 struct mgmt_rx_reo_master_frame_list *master_frame_list) 4471 { 4472 qdf_spinlock_create(&master_frame_list->lock); 4473 4474 qdf_list_create(&master_frame_list->pending_list, 4475 MGMT_RX_REO_SIM_PENDING_FRAME_LIST_MAX_SIZE); 4476 qdf_list_create(&master_frame_list->stale_list, 4477 MGMT_RX_REO_SIM_STALE_FRAME_LIST_MAX_SIZE); 4478 4479 return QDF_STATUS_SUCCESS; 4480 } 4481 4482 /** 4483 * mgmt_rx_reo_sim_deinit_master_frame_list() - De initializes the master 4484 * management frame list 4485 * @master_frame_list: Pointer to master frame list 4486 * 4487 * This API de initializes the master management frame list 4488 * 4489 * Return: QDF_STATUS 4490 */ 4491 static QDF_STATUS 4492 mgmt_rx_reo_sim_deinit_master_frame_list( 4493 struct mgmt_rx_reo_master_frame_list *master_frame_list) 4494 { 4495 qdf_spin_lock(&master_frame_list->lock); 4496 qdf_list_destroy(&master_frame_list->stale_list); 4497 qdf_list_destroy(&master_frame_list->pending_list); 4498 qdf_spin_unlock(&master_frame_list->lock); 4499 4500 qdf_spinlock_destroy(&master_frame_list->lock); 4501 4502 return QDF_STATUS_SUCCESS; 4503 } 4504 4505 /** 4506 * mgmt_rx_reo_sim_generate_unique_link_id() - Helper API to generate 4507 * unique link id values 4508 * @link_id_to_pdev_map: pointer to link id to pdev map 4509 * @link_id: Pointer to unique link id 4510 * 4511 * This API generates unique link id values for each pdev. This API should be 4512 * called after acquiring the spin lock protecting link id to pdev map. 4513 * 4514 * Return: QDF_STATUS 4515 */ 4516 static QDF_STATUS 4517 mgmt_rx_reo_sim_generate_unique_link_id( 4518 struct wlan_objmgr_pdev *link_id_to_pdev_map, uint8_t *link_id) 4519 { 4520 uint8_t random_link_id; 4521 uint8_t link_id; 4522 4523 if (!link_id_to_pdev_map || !link_id) 4524 return QDF_STATUS_E_NULL_VALUE; 4525 4526 for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) 4527 if (!link_id_to_pdev_map[link_id]) 4528 break; 4529 4530 if (link_id == MAX_MLO_LINKS) { 4531 mgmt_rx_reo_err("All link ids are already allocated"); 4532 return QDF_STATUS_E_FAILURE; 4533 } 4534 4535 while (1) { 4536 random_link_id = mgmt_rx_reo_sim_get_random_unsigned_int( 4537 MAX_MLO_LINKS); 4538 4539 if (!link_id_to_pdev_map[random_link_id]) 4540 break; 4541 } 4542 4543 *link_id = random_link_id; 4544 4545 return QDF_STATUS_SUCCESS; 4546 } 4547 4548 /** 4549 * mgmt_rx_reo_sim_insert_into_link_id_to_pdev_map() - Builds the MLO HW link id 4550 * to pdev map 4551 * @link_id_to_pdev_map: pointer to link id to pdev map 4552 * @pdev: pointer to pdev object 4553 * 4554 * This API incrementally builds the MLO HW link id to pdev map. This API is 4555 * used only for simulation. 4556 * 4557 * Return: QDF_STATUS 4558 */ 4559 static QDF_STATUS 4560 mgmt_rx_reo_sim_insert_into_link_id_to_pdev_map( 4561 struct mgmt_rx_reo_sim_link_id_to_pdev_map *link_id_to_pdev_map, 4562 struct wlan_objmgr_pdev *pdev) 4563 { 4564 uint8_t link_id; 4565 4566 if (!link_id_to_pdev_map) { 4567 mgmt_rx_reo_err("Link id to pdev map is null"); 4568 return QDF_STATUS_E_NULL_VALUE; 4569 } 4570 4571 if (!pdev) { 4572 mgmt_rx_reo_err("pdev is null"); 4573 return QDF_STATUS_E_NULL_VALUE; 4574 } 4575 4576 qdf_spin_lock(&link_id_to_pdev_map->lock); 4577 4578 status = mgmt_rx_reo_sim_generate_unique_link_id( 4579 link_id_to_pdev_map->map, &link_id) 4580 if (QDF_IS_STATUS_ERROR(status)) { 4581 qdf_spin_unlock(&link_id_to_pdev_map->lock); 4582 return QDF_STATUS_E_FAILURE; 4583 } 4584 qdf_assert_always(link_id < MAX_MLO_LINKS); 4585 4586 link_id_to_pdev_map->map[link_id] = pdev; 4587 link_id_to_pdev_map->valid_link_list 4588 [link_id_to_pdev_map->num_mlo_links] = link_id; 4589 link_id_to_pdev_map->num_mlo_links++; 4590 4591 qdf_spin_unlock(&link_id_to_pdev_map->lock); 4592 4593 return QDF_STATUS_SUCCESS; 4594 } 4595 4596 /** 4597 * mgmt_rx_reo_sim_remove_from_link_id_to_pdev_map() - Destroys the MLO HW link 4598 * id to pdev map 4599 * @link_id_to_pdev_map: pointer to link id to pdev map 4600 * @pdev: pointer to pdev object 4601 * 4602 * This API incrementally destroys the MLO HW link id to pdev map. This API is 4603 * used only for simulation. 4604 * 4605 * Return: QDF_STATUS 4606 */ 4607 static QDF_STATUS 4608 mgmt_rx_reo_sim_remove_from_link_id_to_pdev_map( 4609 struct mgmt_rx_reo_sim_link_id_to_pdev_map *link_id_to_pdev_map, 4610 struct wlan_objmgr_pdev *pdev) 4611 { 4612 uint8_t link_id; 4613 4614 if (!link_id_to_pdev_map) { 4615 mgmt_rx_reo_err("Link id to pdev map is null"); 4616 return QDF_STATUS_E_NULL_VALUE; 4617 } 4618 4619 if (!pdev) { 4620 mgmt_rx_reo_err("pdev is null"); 4621 return QDF_STATUS_E_NULL_VALUE; 4622 } 4623 4624 qdf_spin_lock(&link_id_to_pdev_map->lock); 4625 4626 for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) { 4627 if (link_id_to_pdev_map->map[link_id] == pdev) { 4628 link_id_to_pdev_map->map[link_id] = NULL; 4629 qdf_spin_unlock(&link_id_to_pdev_map->lock); 4630 4631 return QDF_STATUS_SUCCESS; 4632 } 4633 } 4634 4635 qdf_spin_unlock(&link_id_to_pdev_map->lock); 4636 4637 mgmt_rx_reo_err("Pdev %pK is not found in map", pdev); 4638 4639 return QDF_STATUS_E_FAILURE; 4640 } 4641 4642 QDF_STATUS 4643 mgmt_rx_reo_sim_pdev_object_create_notification(struct wlan_objmgr_pdev *pdev) 4644 { 4645 struct mgmt_rx_reo_sim_context *sim_context; 4646 QDF_STATUS status; 4647 4648 sim_context = mgmt_rx_reo_sim_get_context(); 4649 if (!sim_context) { 4650 mgmt_rx_reo_err("Mgmt simulation context is null"); 4651 return QDF_STATUS_E_NULL_VALUE; 4652 } 4653 4654 status = mgmt_rx_reo_sim_insert_into_link_id_to_pdev_map( 4655 &sim_context->link_id_to_pdev_map, pdev); 4656 4657 if (QDF_IS_STATUS_ERROR(status)) { 4658 mgmt_rx_reo_err("Failed to add pdev to the map %pK", pdev); 4659 return status; 4660 } 4661 4662 return QDF_STATUS_SUCCESS; 4663 } 4664 4665 QDF_STATUS 4666 mgmt_rx_reo_sim_pdev_object_destroy_notification(struct wlan_objmgr_pdev *pdev) 4667 { 4668 struct mgmt_rx_reo_sim_context *sim_context; 4669 QDF_STATUS status; 4670 4671 sim_context = mgmt_rx_reo_sim_get_context(); 4672 if (!sim_context) { 4673 mgmt_rx_reo_err("Mgmt simulation context is null"); 4674 return QDF_STATUS_E_NULL_VALUE; 4675 } 4676 4677 status = mgmt_rx_reo_sim_remove_from_link_id_to_pdev_map( 4678 &sim_context->link_id_to_pdev_map, pdev); 4679 4680 if (QDF_IS_STATUS_ERROR(status)) { 4681 mgmt_rx_reo_err("Failed to remove pdev from the map"); 4682 return status; 4683 } 4684 4685 return QDF_STATUS_SUCCESS; 4686 } 4687 4688 QDF_STATUS 4689 mgmt_rx_reo_sim_start(void) 4690 { 4691 struct mgmt_rx_reo_context *reo_context; 4692 struct mgmt_rx_reo_sim_context *sim_context; 4693 qdf_thread_t *mac_hw_thread; 4694 uint8_t link_id; 4695 uint8_t id; 4696 QDF_STATUS status; 4697 4698 reo_context = mgmt_rx_reo_get_context(); 4699 if (!reo_context) { 4700 mgmt_rx_reo_err("reo context is null"); 4701 return QDF_STATUS_E_NULL_VALUE; 4702 } 4703 4704 reo_context->simulation_in_progress = true; 4705 4706 sim_context = &reo_context->sim_context; 4707 4708 for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) { 4709 struct workqueue_struct *wq; 4710 4711 wq = alloc_ordered_workqueue("mgmt_rx_reo_sim_host-%u", 0, 4712 link_id); 4713 if (!wq) { 4714 mgmt_rx_reo_err("Host workqueue creation failed"); 4715 status = QDF_STATUS_E_FAILURE; 4716 goto error_destroy_fw_and_host_work_queues_till_last_link; 4717 } 4718 sim_context->host_mgmt_frame_handler[link_id] = wq; 4719 4720 wq = alloc_ordered_workqueue("mgmt_rx_reo_sim_fw-%u", 0, 4721 link_id); 4722 if (!wq) { 4723 mgmt_rx_reo_err("FW workqueue creation failed"); 4724 status = QDF_STATUS_E_FAILURE; 4725 goto error_destroy_host_work_queue_of_last_link; 4726 } 4727 sim_context->fw_mgmt_frame_handler[link_id] = wq; 4728 } 4729 4730 mac_hw_thread = qdf_create_thread(mgmt_rx_reo_sim_mac_hw_thread, 4731 sim_context, "MAC_HW_thread"); 4732 if (!mac_hw_thread) { 4733 mgmt_rx_reo_err("MAC HW thread creation failed"); 4734 status = QDF_STATUS_E_FAILURE; 4735 goto error_destroy_fw_and_host_work_queues_of_last_link; 4736 } 4737 4738 sim_context->mac_hw_sim.mac_hw_thread = mac_hw_thread; 4739 4740 qdf_wake_up_process(sim_context->mac_hw_sim.mac_hw_thread); 4741 4742 return QDF_STATUS_SUCCESS; 4743 4744 error_destroy_fw_and_host_work_queues_of_last_link: 4745 drain_workqueue(sim_context->fw_mgmt_frame_handler[link_id]); 4746 destroy_workqueue(sim_context->fw_mgmt_frame_handler[link_id]); 4747 4748 error_destroy_host_work_queue_of_last_link: 4749 drain_workqueue(sim_context->host_mgmt_frame_handler[link_id]); 4750 destroy_workqueue(sim_context->host_mgmt_frame_handler[link_id]); 4751 4752 error_destroy_fw_and_host_work_queues_till_last_link: 4753 for (id = 0; id < link_id; id++) { 4754 drain_workqueue(sim_context->fw_mgmt_frame_handler[id]); 4755 destroy_workqueue(sim_context->fw_mgmt_frame_handler[id]); 4756 4757 drain_workqueue(sim_context->host_mgmt_frame_handler[id]); 4758 destroy_workqueue(sim_context->host_mgmt_frame_handler[id]); 4759 } 4760 4761 return status; 4762 } 4763 4764 QDF_STATUS 4765 mgmt_rx_reo_sim_stop(void) 4766 { 4767 struct mgmt_rx_reo_context *reo_context; 4768 struct mgmt_rx_reo_sim_context *sim_context; 4769 struct mgmt_rx_reo_master_frame_list *master_frame_list; 4770 uint8_t link_id; 4771 QDF_STATUS status; 4772 4773 reo_context = mgmt_rx_reo_get_context(); 4774 if (!reo_context) { 4775 mgmt_rx_reo_err("reo context is null"); 4776 return QDF_STATUS_E_NULL_VALUE; 4777 } 4778 4779 sim_context = &reo_context->sim_context; 4780 4781 status = qdf_thread_join(sim_context->mac_hw_sim.mac_hw_thread); 4782 if (QDF_IS_STATUS_ERROR(status)) { 4783 mgmt_rx_reo_err("Failed to stop the thread"); 4784 return status; 4785 } 4786 4787 sim_context->mac_hw_sim.mac_hw_thread = NULL; 4788 4789 for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) { 4790 /* Wait for all the pending frames to be processed by FW */ 4791 drain_workqueue(sim_context->fw_mgmt_frame_handler[link_id]); 4792 destroy_workqueue(sim_context->fw_mgmt_frame_handler[link_id]); 4793 4794 /* Wait for all the pending frames to be processed by host */ 4795 drain_workqueue(sim_context->host_mgmt_frame_handler[link_id]); 4796 destroy_workqueue( 4797 sim_context->host_mgmt_frame_handler[link_id]); 4798 } 4799 4800 status = mgmt_rx_reo_print_ingress_frame_debug_info(); 4801 if (QDF_IS_STATUS_ERROR(status)) { 4802 mgmt_rx_reo_err("Failed to print ingress frame debug info"); 4803 return status; 4804 } 4805 4806 status = mgmt_rx_reo_print_egress_frame_debug_info(); 4807 if (QDF_IS_STATUS_ERROR(status)) { 4808 mgmt_rx_reo_err("Failed to print egress frame debug info"); 4809 return status; 4810 } 4811 4812 master_frame_list = &sim_context->master_frame_list; 4813 if (!qdf_list_empty(&master_frame_list->pending_list) || 4814 !qdf_list_empty(&master_frame_list->stale_list)) { 4815 mgmt_rx_reo_err("reo sim failure: pending/stale frame list non empty"); 4816 4817 status = mgmt_rx_reo_list_display(&reo_context->reo_list); 4818 if (QDF_IS_STATUS_ERROR(status)) { 4819 mgmt_rx_reo_err("Failed to print reorder list"); 4820 return status; 4821 } 4822 4823 qdf_assert_always(0); 4824 } else { 4825 mgmt_rx_reo_err("reo sim passed"); 4826 } 4827 4828 reo_context->simulation_in_progress = false; 4829 4830 return QDF_STATUS_SUCCESS; 4831 } 4832 4833 /** 4834 * mgmt_rx_reo_sim_init() - Initialize management rx reorder simulation 4835 * context. 4836 * @reo_context: Pointer to reo context 4837 * 4838 * Return: QDF_STATUS of operation 4839 */ 4840 static QDF_STATUS 4841 mgmt_rx_reo_sim_init(struct mgmt_rx_reo_context *reo_context) 4842 { 4843 QDF_STATUS status; 4844 struct mgmt_rx_reo_sim_context *sim_context; 4845 uint8_t link_id; 4846 4847 if (!reo_context) { 4848 mgmt_rx_reo_err("reo context is null"); 4849 return QDF_STATUS_E_NULL_VALUE; 4850 } 4851 4852 sim_context = &reo_context->sim_context; 4853 4854 qdf_mem_zero(sim_context, sizeof(*sim_context)); 4855 4856 status = mgmt_rx_reo_sim_init_master_frame_list( 4857 &sim_context->master_frame_list); 4858 if (QDF_IS_STATUS_ERROR(status)) { 4859 mgmt_rx_reo_err("Failed to create master mgmt frame list"); 4860 return status; 4861 } 4862 4863 qdf_spinlock_create(&sim_context->link_id_to_pdev_map.lock); 4864 4865 for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) 4866 sim_context->link_id_to_pdev_map.valid_link_list[link_id] = 4867 MGMT_RX_REO_INVALID_LINK_ID; 4868 4869 return QDF_STATUS_SUCCESS; 4870 } 4871 4872 /** 4873 * mgmt_rx_reo_sim_deinit() - De initialize management rx reorder simulation 4874 * context. 4875 * @reo_context: Pointer to reo context 4876 * 4877 * Return: QDF_STATUS of operation 4878 */ 4879 static QDF_STATUS 4880 mgmt_rx_reo_sim_deinit(struct mgmt_rx_reo_context *reo_context) 4881 { 4882 QDF_STATUS status; 4883 struct mgmt_rx_reo_sim_context *sim_context; 4884 4885 if (!reo_context) { 4886 mgmt_rx_reo_err("reo context is null"); 4887 return QDF_STATUS_E_NULL_VALUE; 4888 } 4889 4890 sim_context = &reo_context->sim_context; 4891 4892 qdf_spinlock_destroy(&sim_context->link_id_to_pdev_map.lock); 4893 4894 status = mgmt_rx_reo_sim_deinit_master_frame_list( 4895 &sim_context->master_frame_list); 4896 if (QDF_IS_STATUS_ERROR(status)) { 4897 mgmt_rx_reo_err("Failed to destroy master frame list"); 4898 return status; 4899 } 4900 4901 return QDF_STATUS_SUCCESS; 4902 } 4903 4904 QDF_STATUS 4905 mgmt_rx_reo_sim_get_snapshot_address( 4906 struct wlan_objmgr_pdev *pdev, 4907 enum mgmt_rx_reo_shared_snapshot_id id, 4908 struct mgmt_rx_reo_shared_snapshot **address) 4909 { 4910 int8_t link_id; 4911 struct mgmt_rx_reo_sim_context *sim_context; 4912 4913 sim_context = mgmt_rx_reo_sim_get_context(); 4914 if (!sim_context) { 4915 mgmt_rx_reo_err("Mgmt reo simulation context is null"); 4916 return QDF_STATUS_E_NULL_VALUE; 4917 } 4918 4919 if (!pdev) { 4920 mgmt_rx_reo_err("pdev is NULL"); 4921 return QDF_STATUS_E_NULL_VALUE; 4922 } 4923 4924 if (id < 0 || id >= MGMT_RX_REO_SHARED_SNAPSHOT_MAX) { 4925 mgmt_rx_reo_err("Invalid snapshot ID %d", id); 4926 return QDF_STATUS_E_INVAL; 4927 } 4928 4929 if (!address) { 4930 mgmt_rx_reo_err("Pointer to snapshot address is null"); 4931 return QDF_STATUS_E_NULL_VALUE; 4932 } 4933 4934 link_id = wlan_get_mlo_link_id_from_pdev(pdev); 4935 if (link_id < 0 || link_id >= MAX_MLO_LINKS) { 4936 mgmt_rx_reo_err("Invalid link id %d for the pdev %pK", link_id, 4937 pdev); 4938 return QDF_STATUS_E_INVAL; 4939 } 4940 4941 *address = &sim_context->snapshot[link_id][id]; 4942 4943 return QDF_STATUS_SUCCESS; 4944 } 4945 #endif /* WLAN_MGMT_RX_REO_SIM_SUPPORT */ 4946 4947 #ifdef WLAN_MGMT_RX_REO_DEBUG_SUPPORT 4948 /** 4949 * mgmt_rx_reo_ingress_debug_info_init() - Initialize the management rx-reorder 4950 * ingress frame debug info 4951 * @psoc: Pointer to psoc 4952 * @ingress_debug_info_init_count: Initialization count 4953 * @ingress_frame_debug_info: Ingress frame debug info object 4954 * 4955 * API to initialize the management rx-reorder ingress frame debug info. 4956 * 4957 * Return: QDF_STATUS 4958 */ 4959 static QDF_STATUS 4960 mgmt_rx_reo_ingress_debug_info_init 4961 (struct wlan_objmgr_psoc *psoc, 4962 qdf_atomic_t *ingress_debug_info_init_count, 4963 struct reo_ingress_debug_info *ingress_frame_debug_info) 4964 { 4965 if (!psoc) { 4966 mgmt_rx_reo_err("psoc is null"); 4967 return QDF_STATUS_E_NULL_VALUE; 4968 } 4969 4970 if (!ingress_frame_debug_info) { 4971 mgmt_rx_reo_err("Ingress frame debug info is null"); 4972 return QDF_STATUS_E_NULL_VALUE; 4973 } 4974 4975 /* We need to initialize only for the first invocation */ 4976 if (qdf_atomic_read(ingress_debug_info_init_count)) 4977 goto success; 4978 4979 ingress_frame_debug_info->frame_list_size = 4980 wlan_mgmt_rx_reo_get_ingress_frame_debug_list_size(psoc); 4981 4982 if (ingress_frame_debug_info->frame_list_size) { 4983 ingress_frame_debug_info->frame_list = qdf_mem_malloc 4984 (ingress_frame_debug_info->frame_list_size * 4985 sizeof(*ingress_frame_debug_info->frame_list)); 4986 4987 if (!ingress_frame_debug_info->frame_list) { 4988 mgmt_rx_reo_err("Failed to allocate debug info"); 4989 return QDF_STATUS_E_NOMEM; 4990 } 4991 } 4992 4993 /* Initialize the string for storing the debug info table boarder */ 4994 qdf_mem_set(ingress_frame_debug_info->boarder, 4995 MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_BOARDER_MAX_SIZE, '-'); 4996 4997 success: 4998 qdf_atomic_inc(ingress_debug_info_init_count); 4999 return QDF_STATUS_SUCCESS; 5000 } 5001 5002 /** 5003 * mgmt_rx_reo_egress_debug_info_init() - Initialize the management rx-reorder 5004 * egress frame debug info 5005 * @psoc: Pointer to psoc 5006 * @egress_debug_info_init_count: Initialization count 5007 * @egress_frame_debug_info: Egress frame debug info object 5008 * 5009 * API to initialize the management rx-reorder egress frame debug info. 5010 * 5011 * Return: QDF_STATUS 5012 */ 5013 static QDF_STATUS 5014 mgmt_rx_reo_egress_debug_info_init 5015 (struct wlan_objmgr_psoc *psoc, 5016 qdf_atomic_t *egress_debug_info_init_count, 5017 struct reo_egress_debug_info *egress_frame_debug_info) 5018 { 5019 if (!psoc) { 5020 mgmt_rx_reo_err("psoc is null"); 5021 return QDF_STATUS_E_NULL_VALUE; 5022 } 5023 5024 if (!egress_frame_debug_info) { 5025 mgmt_rx_reo_err("Egress frame debug info is null"); 5026 return QDF_STATUS_E_NULL_VALUE; 5027 } 5028 5029 /* We need to initialize only for the first invocation */ 5030 if (qdf_atomic_read(egress_debug_info_init_count)) 5031 goto success; 5032 5033 egress_frame_debug_info->frame_list_size = 5034 wlan_mgmt_rx_reo_get_egress_frame_debug_list_size(psoc); 5035 5036 if (egress_frame_debug_info->frame_list_size) { 5037 egress_frame_debug_info->frame_list = qdf_mem_malloc 5038 (egress_frame_debug_info->frame_list_size * 5039 sizeof(*egress_frame_debug_info->frame_list)); 5040 5041 if (!egress_frame_debug_info->frame_list) { 5042 mgmt_rx_reo_err("Failed to allocate debug info"); 5043 return QDF_STATUS_E_NOMEM; 5044 } 5045 } 5046 5047 /* Initialize the string for storing the debug info table boarder */ 5048 qdf_mem_set(egress_frame_debug_info->boarder, 5049 MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_BOARDER_MAX_SIZE, '-'); 5050 5051 success: 5052 qdf_atomic_inc(egress_debug_info_init_count); 5053 return QDF_STATUS_SUCCESS; 5054 } 5055 5056 /** 5057 * mgmt_rx_reo_debug_info_init() - Initialize the management rx-reorder debug 5058 * info 5059 * @pdev: pointer to pdev object 5060 * 5061 * API to initialize the management rx-reorder debug info. 5062 * 5063 * Return: QDF_STATUS 5064 */ 5065 static QDF_STATUS 5066 mgmt_rx_reo_debug_info_init(struct wlan_objmgr_pdev *pdev) 5067 { 5068 struct mgmt_rx_reo_context *reo_context; 5069 QDF_STATUS status; 5070 struct wlan_objmgr_psoc *psoc; 5071 5072 psoc = wlan_pdev_get_psoc(pdev); 5073 5074 if (!wlan_mgmt_rx_reo_is_feature_enabled_at_psoc(psoc)) 5075 return QDF_STATUS_SUCCESS; 5076 5077 reo_context = mgmt_rx_reo_get_context(); 5078 if (!reo_context) { 5079 mgmt_rx_reo_err("reo context is null"); 5080 return QDF_STATUS_E_NULL_VALUE; 5081 } 5082 5083 status = mgmt_rx_reo_ingress_debug_info_init 5084 (psoc, &reo_context->ingress_debug_info_init_count, 5085 &reo_context->ingress_frame_debug_info); 5086 if (QDF_IS_STATUS_ERROR(status)) { 5087 mgmt_rx_reo_err("Failed to initialize ingress debug info"); 5088 return QDF_STATUS_E_FAILURE; 5089 } 5090 5091 status = mgmt_rx_reo_egress_debug_info_init 5092 (psoc, &reo_context->egress_debug_info_init_count, 5093 &reo_context->egress_frame_debug_info); 5094 if (QDF_IS_STATUS_ERROR(status)) { 5095 mgmt_rx_reo_err("Failed to initialize egress debug info"); 5096 return QDF_STATUS_E_FAILURE; 5097 } 5098 5099 return QDF_STATUS_SUCCESS; 5100 } 5101 5102 /** 5103 * mgmt_rx_reo_ingress_debug_info_deinit() - De initialize the management 5104 * rx-reorder ingress frame debug info 5105 * @psoc: Pointer to psoc 5106 * @ingress_debug_info_init_count: Initialization count 5107 * @ingress_frame_debug_info: Ingress frame debug info object 5108 * 5109 * API to de initialize the management rx-reorder ingress frame debug info. 5110 * 5111 * Return: QDF_STATUS 5112 */ 5113 static QDF_STATUS 5114 mgmt_rx_reo_ingress_debug_info_deinit 5115 (struct wlan_objmgr_psoc *psoc, 5116 qdf_atomic_t *ingress_debug_info_init_count, 5117 struct reo_ingress_debug_info *ingress_frame_debug_info) 5118 { 5119 if (!psoc) { 5120 mgmt_rx_reo_err("psoc is null"); 5121 return QDF_STATUS_E_NULL_VALUE; 5122 } 5123 5124 if (!ingress_frame_debug_info) { 5125 mgmt_rx_reo_err("Ingress frame debug info is null"); 5126 return QDF_STATUS_E_NULL_VALUE; 5127 } 5128 5129 if (!qdf_atomic_read(ingress_debug_info_init_count)) { 5130 mgmt_rx_reo_err("Ingress debug info ref cnt is 0"); 5131 return QDF_STATUS_E_FAILURE; 5132 } 5133 5134 /* We need to de-initialize only for the last invocation */ 5135 if (qdf_atomic_dec_and_test(ingress_debug_info_init_count)) 5136 goto success; 5137 5138 if (ingress_frame_debug_info->frame_list) { 5139 qdf_mem_free(ingress_frame_debug_info->frame_list); 5140 ingress_frame_debug_info->frame_list = NULL; 5141 } 5142 ingress_frame_debug_info->frame_list_size = 0; 5143 5144 qdf_mem_zero(ingress_frame_debug_info->boarder, 5145 MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_BOARDER_MAX_SIZE + 1); 5146 5147 success: 5148 return QDF_STATUS_SUCCESS; 5149 } 5150 5151 /** 5152 * mgmt_rx_reo_egress_debug_info_deinit() - De initialize the management 5153 * rx-reorder egress frame debug info 5154 * @psoc: Pointer to psoc 5155 * @egress_debug_info_init_count: Initialization count 5156 * @egress_frame_debug_info: Egress frame debug info object 5157 * 5158 * API to de initialize the management rx-reorder egress frame debug info. 5159 * 5160 * Return: QDF_STATUS 5161 */ 5162 static QDF_STATUS 5163 mgmt_rx_reo_egress_debug_info_deinit 5164 (struct wlan_objmgr_psoc *psoc, 5165 qdf_atomic_t *egress_debug_info_init_count, 5166 struct reo_egress_debug_info *egress_frame_debug_info) 5167 { 5168 if (!psoc) { 5169 mgmt_rx_reo_err("psoc is null"); 5170 return QDF_STATUS_E_NULL_VALUE; 5171 } 5172 5173 if (!egress_frame_debug_info) { 5174 mgmt_rx_reo_err("Egress frame debug info is null"); 5175 return QDF_STATUS_E_NULL_VALUE; 5176 } 5177 5178 if (!qdf_atomic_read(egress_debug_info_init_count)) { 5179 mgmt_rx_reo_err("Egress debug info ref cnt is 0"); 5180 return QDF_STATUS_E_FAILURE; 5181 } 5182 5183 /* We need to de-initialize only for the last invocation */ 5184 if (qdf_atomic_dec_and_test(egress_debug_info_init_count)) 5185 goto success; 5186 5187 if (egress_frame_debug_info->frame_list) { 5188 qdf_mem_free(egress_frame_debug_info->frame_list); 5189 egress_frame_debug_info->frame_list = NULL; 5190 } 5191 egress_frame_debug_info->frame_list_size = 0; 5192 5193 qdf_mem_zero(egress_frame_debug_info->boarder, 5194 MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_BOARDER_MAX_SIZE + 1); 5195 5196 success: 5197 return QDF_STATUS_SUCCESS; 5198 } 5199 5200 /** 5201 * mgmt_rx_reo_debug_info_deinit() - De initialize the management rx-reorder 5202 * debug info 5203 * @pdev: Pointer to pdev object 5204 * 5205 * API to de initialize the management rx-reorder debug info. 5206 * 5207 * Return: QDF_STATUS 5208 */ 5209 static QDF_STATUS 5210 mgmt_rx_reo_debug_info_deinit(struct wlan_objmgr_pdev *pdev) 5211 { 5212 struct mgmt_rx_reo_context *reo_context; 5213 QDF_STATUS status; 5214 struct wlan_objmgr_psoc *psoc; 5215 5216 psoc = wlan_pdev_get_psoc(pdev); 5217 5218 if (!wlan_mgmt_rx_reo_is_feature_enabled_at_psoc(psoc)) 5219 return QDF_STATUS_SUCCESS; 5220 5221 reo_context = mgmt_rx_reo_get_context(); 5222 if (!reo_context) { 5223 mgmt_rx_reo_err("reo context is null"); 5224 return QDF_STATUS_E_NULL_VALUE; 5225 } 5226 5227 status = mgmt_rx_reo_ingress_debug_info_deinit 5228 (psoc, &reo_context->ingress_debug_info_init_count, 5229 &reo_context->ingress_frame_debug_info); 5230 if (QDF_IS_STATUS_ERROR(status)) { 5231 mgmt_rx_reo_err("Failed to deinitialize ingress debug info"); 5232 return QDF_STATUS_E_FAILURE; 5233 } 5234 5235 status = mgmt_rx_reo_egress_debug_info_deinit 5236 (psoc, &reo_context->egress_debug_info_init_count, 5237 &reo_context->egress_frame_debug_info); 5238 if (QDF_IS_STATUS_ERROR(status)) { 5239 mgmt_rx_reo_err("Failed to deinitialize egress debug info"); 5240 return QDF_STATUS_E_FAILURE; 5241 } 5242 5243 return QDF_STATUS_SUCCESS; 5244 } 5245 #else 5246 static QDF_STATUS 5247 mgmt_rx_reo_debug_info_init(struct wlan_objmgr_psoc *psoc) 5248 { 5249 return QDF_STATUS_SUCCESS; 5250 } 5251 5252 static QDF_STATUS 5253 mgmt_rx_reo_debug_info_deinit(struct wlan_objmgr_psoc *psoc) 5254 { 5255 return QDF_STATUS_SUCCESS; 5256 } 5257 #endif /* WLAN_MGMT_RX_REO_DEBUG_SUPPORT */ 5258 5259 /** 5260 * mgmt_rx_reo_flush_reorder_list() - Flush all entries in the reorder list 5261 * @reo_list: Pointer to reorder list 5262 * 5263 * API to flush all the entries of the reorder list. This API would acquire 5264 * the lock protecting the list. 5265 * 5266 * Return: QDF_STATUS 5267 */ 5268 static QDF_STATUS 5269 mgmt_rx_reo_flush_reorder_list(struct mgmt_rx_reo_list *reo_list) 5270 { 5271 struct mgmt_rx_reo_list_entry *cur_entry; 5272 struct mgmt_rx_reo_list_entry *temp; 5273 5274 if (!reo_list) { 5275 mgmt_rx_reo_err("reorder list is null"); 5276 return QDF_STATUS_E_NULL_VALUE; 5277 } 5278 5279 qdf_spin_lock_bh(&reo_list->list_lock); 5280 5281 qdf_list_for_each_del(&reo_list->list, cur_entry, temp, node) { 5282 free_mgmt_rx_event_params(cur_entry->rx_params); 5283 5284 /** 5285 * Release the reference taken when the entry is inserted into 5286 * the reorder list. 5287 */ 5288 wlan_objmgr_pdev_release_ref(cur_entry->pdev, 5289 WLAN_MGMT_RX_REO_ID); 5290 5291 qdf_mem_free(cur_entry); 5292 } 5293 5294 qdf_spin_unlock_bh(&reo_list->list_lock); 5295 5296 return QDF_STATUS_SUCCESS; 5297 } 5298 5299 /** 5300 * mgmt_rx_reo_list_deinit() - De initialize the management rx-reorder list 5301 * @reo_list: Pointer to reorder list 5302 * 5303 * API to de initialize the management rx-reorder list. 5304 * 5305 * Return: QDF_STATUS 5306 */ 5307 static QDF_STATUS 5308 mgmt_rx_reo_list_deinit(struct mgmt_rx_reo_list *reo_list) 5309 { 5310 QDF_STATUS status; 5311 5312 qdf_timer_free(&reo_list->global_mgmt_rx_inactivity_timer); 5313 qdf_timer_free(&reo_list->ageout_timer); 5314 5315 status = mgmt_rx_reo_flush_reorder_list(reo_list); 5316 if (QDF_IS_STATUS_ERROR(status)) { 5317 mgmt_rx_reo_err("Failed to flush the reorder list"); 5318 return QDF_STATUS_E_FAILURE; 5319 } 5320 qdf_spinlock_destroy(&reo_list->list_lock); 5321 qdf_list_destroy(&reo_list->list); 5322 5323 return QDF_STATUS_SUCCESS; 5324 } 5325 5326 QDF_STATUS 5327 mgmt_rx_reo_deinit_context(void) 5328 { 5329 QDF_STATUS status; 5330 struct mgmt_rx_reo_context *reo_context; 5331 5332 reo_context = mgmt_rx_reo_get_context(); 5333 if (!reo_context) { 5334 mgmt_rx_reo_err("reo context is null"); 5335 return QDF_STATUS_E_NULL_VALUE; 5336 } 5337 5338 qdf_timer_sync_cancel( 5339 &reo_context->reo_list.global_mgmt_rx_inactivity_timer); 5340 qdf_timer_sync_cancel(&reo_context->reo_list.ageout_timer); 5341 5342 qdf_spinlock_destroy(&reo_context->reo_algo_entry_lock); 5343 5344 status = mgmt_rx_reo_sim_deinit(reo_context); 5345 if (QDF_IS_STATUS_ERROR(status)) { 5346 mgmt_rx_reo_err("Failed to de initialize reo sim context"); 5347 qdf_mem_free(reo_context); 5348 return QDF_STATUS_E_FAILURE; 5349 } 5350 5351 status = mgmt_rx_reo_list_deinit(&reo_context->reo_list); 5352 if (QDF_IS_STATUS_ERROR(status)) { 5353 mgmt_rx_reo_err("Failed to de-initialize mgmt Rx reo list"); 5354 qdf_mem_free(reo_context); 5355 return status; 5356 } 5357 5358 qdf_mem_free(reo_context); 5359 5360 return QDF_STATUS_SUCCESS; 5361 } 5362 5363 QDF_STATUS 5364 mgmt_rx_reo_init_context(void) 5365 { 5366 QDF_STATUS status; 5367 QDF_STATUS temp; 5368 struct mgmt_rx_reo_context *reo_context; 5369 5370 reo_context = qdf_mem_malloc(sizeof(*reo_context)); 5371 if (!reo_context) { 5372 mgmt_rx_reo_err("Failed to allocate reo context"); 5373 return QDF_STATUS_E_NULL_VALUE; 5374 } 5375 mgmt_rx_reo_set_context(reo_context); 5376 5377 status = mgmt_rx_reo_list_init(&reo_context->reo_list); 5378 if (QDF_IS_STATUS_ERROR(status)) { 5379 mgmt_rx_reo_err("Failed to initialize mgmt Rx reo list"); 5380 return status; 5381 } 5382 5383 status = mgmt_rx_reo_sim_init(reo_context); 5384 if (QDF_IS_STATUS_ERROR(status)) { 5385 mgmt_rx_reo_err("Failed to initialize reo simulation context"); 5386 goto error_reo_list_deinit; 5387 } 5388 5389 qdf_spinlock_create(&reo_context->reo_algo_entry_lock); 5390 5391 qdf_timer_mod(&reo_context->reo_list.ageout_timer, 5392 MGMT_RX_REO_AGEOUT_TIMER_PERIOD_MS); 5393 5394 return QDF_STATUS_SUCCESS; 5395 5396 error_reo_list_deinit: 5397 temp = mgmt_rx_reo_list_deinit(&reo_context->reo_list); 5398 if (QDF_IS_STATUS_ERROR(temp)) { 5399 mgmt_rx_reo_err("Failed to de-initialize mgmt Rx reo list"); 5400 return temp; 5401 } 5402 5403 return status; 5404 } 5405 5406 /** 5407 * wlan_mgmt_rx_reo_initialize_snapshot_params() - Initialize a given snapshot 5408 * params object 5409 * @snapshot_params: Pointer to snapshot params object 5410 * 5411 * Return: void 5412 */ 5413 static void 5414 wlan_mgmt_rx_reo_initialize_snapshot_params( 5415 struct mgmt_rx_reo_snapshot_params *snapshot_params) 5416 { 5417 snapshot_params->valid = false; 5418 snapshot_params->mgmt_pkt_ctr = 0; 5419 snapshot_params->global_timestamp = 0; 5420 } 5421 5422 /** 5423 * mgmt_rx_reo_initialize_snapshot_address() - Initialize management Rx reorder 5424 * snapshot addresses for a given pdev 5425 * @pdev: pointer to pdev object 5426 * 5427 * Return: QDF_STATUS 5428 */ 5429 static QDF_STATUS 5430 mgmt_rx_reo_initialize_snapshot_address(struct wlan_objmgr_pdev *pdev) 5431 { 5432 enum mgmt_rx_reo_shared_snapshot_id snapshot_id; 5433 struct mgmt_rx_reo_pdev_info *mgmt_rx_reo_pdev_ctx; 5434 QDF_STATUS status; 5435 5436 mgmt_rx_reo_pdev_ctx = wlan_mgmt_rx_reo_get_priv_object(pdev); 5437 if (!mgmt_rx_reo_pdev_ctx) { 5438 mgmt_rx_reo_err("Mgmt Rx REO priv object is null"); 5439 return QDF_STATUS_E_NULL_VALUE; 5440 } 5441 5442 snapshot_id = 0; 5443 5444 while (snapshot_id < MGMT_RX_REO_SHARED_SNAPSHOT_MAX) { 5445 struct mgmt_rx_reo_snapshot_info *snapshot_info; 5446 5447 snapshot_info = 5448 &mgmt_rx_reo_pdev_ctx->host_target_shared_snapshot_info 5449 [snapshot_id]; 5450 status = wlan_mgmt_rx_reo_get_snapshot_info 5451 (pdev, snapshot_id, snapshot_info); 5452 if (QDF_IS_STATUS_ERROR(status)) { 5453 mgmt_rx_reo_err("Get snapshot info failed, id = %u", 5454 snapshot_id); 5455 return status; 5456 } 5457 5458 snapshot_id++; 5459 } 5460 5461 return QDF_STATUS_SUCCESS; 5462 } 5463 5464 /** 5465 * mgmt_rx_reo_initialize_snapshot_value() - Initialize management Rx reorder 5466 * snapshot values for a given pdev 5467 * @pdev: pointer to pdev object 5468 * 5469 * Return: QDF_STATUS 5470 */ 5471 static QDF_STATUS 5472 mgmt_rx_reo_initialize_snapshot_value(struct wlan_objmgr_pdev *pdev) 5473 { 5474 enum mgmt_rx_reo_shared_snapshot_id snapshot_id; 5475 struct mgmt_rx_reo_pdev_info *mgmt_rx_reo_pdev_ctx; 5476 5477 mgmt_rx_reo_pdev_ctx = wlan_mgmt_rx_reo_get_priv_object(pdev); 5478 if (!mgmt_rx_reo_pdev_ctx) { 5479 mgmt_rx_reo_err("Mgmt Rx REO priv object is null"); 5480 return QDF_STATUS_E_NULL_VALUE; 5481 } 5482 5483 snapshot_id = 0; 5484 while (snapshot_id < MGMT_RX_REO_SHARED_SNAPSHOT_MAX) { 5485 wlan_mgmt_rx_reo_initialize_snapshot_params 5486 (&mgmt_rx_reo_pdev_ctx->last_valid_shared_snapshot 5487 [snapshot_id]); 5488 snapshot_id++; 5489 } 5490 5491 /* Initialize Host snapshot params */ 5492 wlan_mgmt_rx_reo_initialize_snapshot_params 5493 (&mgmt_rx_reo_pdev_ctx->host_snapshot); 5494 5495 return QDF_STATUS_SUCCESS; 5496 } 5497 5498 /** 5499 * mgmt_rx_reo_set_initialization_complete() - Set initialization completion 5500 * for management Rx REO pdev component private object 5501 * @pdev: pointer to pdev object 5502 * 5503 * Return: QDF_STATUS 5504 */ 5505 static QDF_STATUS 5506 mgmt_rx_reo_set_initialization_complete(struct wlan_objmgr_pdev *pdev) 5507 { 5508 struct mgmt_rx_reo_pdev_info *mgmt_rx_reo_pdev_ctx; 5509 5510 mgmt_rx_reo_pdev_ctx = wlan_mgmt_rx_reo_get_priv_object(pdev); 5511 if (!mgmt_rx_reo_pdev_ctx) { 5512 mgmt_rx_reo_err("Mgmt Rx REO priv object is null"); 5513 return QDF_STATUS_E_NULL_VALUE; 5514 } 5515 5516 mgmt_rx_reo_pdev_ctx->init_complete = true; 5517 5518 return QDF_STATUS_SUCCESS; 5519 } 5520 5521 /** 5522 * mgmt_rx_reo_clear_initialization_complete() - Clear initialization completion 5523 * for management Rx REO pdev component private object 5524 * @pdev: pointer to pdev object 5525 * 5526 * Return: QDF_STATUS 5527 */ 5528 static QDF_STATUS 5529 mgmt_rx_reo_clear_initialization_complete(struct wlan_objmgr_pdev *pdev) 5530 { 5531 struct mgmt_rx_reo_pdev_info *mgmt_rx_reo_pdev_ctx; 5532 5533 mgmt_rx_reo_pdev_ctx = wlan_mgmt_rx_reo_get_priv_object(pdev); 5534 if (!mgmt_rx_reo_pdev_ctx) { 5535 mgmt_rx_reo_err("Mgmt Rx REO priv object is null"); 5536 return QDF_STATUS_E_NULL_VALUE; 5537 } 5538 5539 mgmt_rx_reo_pdev_ctx->init_complete = false; 5540 5541 return QDF_STATUS_SUCCESS; 5542 } 5543 5544 /** 5545 * mgmt_rx_reo_initialize_snapshots() - Initialize management Rx reorder 5546 * snapshot related data structures for a given pdev 5547 * @pdev: pointer to pdev object 5548 * 5549 * Return: QDF_STATUS 5550 */ 5551 static QDF_STATUS 5552 mgmt_rx_reo_initialize_snapshots(struct wlan_objmgr_pdev *pdev) 5553 { 5554 QDF_STATUS status; 5555 5556 status = mgmt_rx_reo_initialize_snapshot_value(pdev); 5557 if (QDF_IS_STATUS_ERROR(status)) { 5558 mgmt_rx_reo_err("Failed to initialize snapshot value"); 5559 return status; 5560 } 5561 5562 status = mgmt_rx_reo_initialize_snapshot_address(pdev); 5563 if (QDF_IS_STATUS_ERROR(status)) { 5564 mgmt_rx_reo_err("Failed to initialize snapshot address"); 5565 return status; 5566 } 5567 5568 return QDF_STATUS_SUCCESS; 5569 } 5570 5571 /** 5572 * mgmt_rx_reo_clear_snapshots() - Clear management Rx reorder snapshot related 5573 * data structures for a given pdev 5574 * @pdev: pointer to pdev object 5575 * 5576 * Return: QDF_STATUS 5577 */ 5578 static QDF_STATUS 5579 mgmt_rx_reo_clear_snapshots(struct wlan_objmgr_pdev *pdev) 5580 { 5581 QDF_STATUS status; 5582 5583 status = mgmt_rx_reo_initialize_snapshot_value(pdev); 5584 if (QDF_IS_STATUS_ERROR(status)) { 5585 mgmt_rx_reo_err("Failed to initialize snapshot value"); 5586 return status; 5587 } 5588 5589 return QDF_STATUS_SUCCESS; 5590 } 5591 5592 QDF_STATUS 5593 mgmt_rx_reo_attach(struct wlan_objmgr_pdev *pdev) 5594 { 5595 QDF_STATUS status; 5596 5597 if (!wlan_mgmt_rx_reo_is_feature_enabled_at_pdev(pdev)) 5598 return QDF_STATUS_SUCCESS; 5599 5600 status = mgmt_rx_reo_initialize_snapshots(pdev); 5601 if (QDF_IS_STATUS_ERROR(status)) { 5602 mgmt_rx_reo_err("Failed to initialize mgmt Rx REO snapshots"); 5603 return status; 5604 } 5605 5606 status = mgmt_rx_reo_set_initialization_complete(pdev); 5607 if (QDF_IS_STATUS_ERROR(status)) { 5608 mgmt_rx_reo_err("Failed to set initialization complete"); 5609 return status; 5610 } 5611 5612 return QDF_STATUS_SUCCESS; 5613 } 5614 5615 QDF_STATUS 5616 mgmt_rx_reo_detach(struct wlan_objmgr_pdev *pdev) 5617 { 5618 QDF_STATUS status; 5619 5620 if (!wlan_mgmt_rx_reo_is_feature_enabled_at_pdev(pdev)) 5621 return QDF_STATUS_SUCCESS; 5622 5623 status = mgmt_rx_reo_clear_initialization_complete(pdev); 5624 if (QDF_IS_STATUS_ERROR(status)) { 5625 mgmt_rx_reo_err("Failed to clear initialization complete"); 5626 return status; 5627 } 5628 5629 status = mgmt_rx_reo_clear_snapshots(pdev); 5630 if (QDF_IS_STATUS_ERROR(status)) { 5631 mgmt_rx_reo_err("Failed to clear mgmt Rx REO snapshots"); 5632 return status; 5633 } 5634 5635 return QDF_STATUS_SUCCESS; 5636 } 5637 5638 QDF_STATUS 5639 mgmt_rx_reo_pdev_obj_create_notification( 5640 struct wlan_objmgr_pdev *pdev, 5641 struct mgmt_txrx_priv_pdev_context *mgmt_txrx_pdev_ctx) 5642 { 5643 QDF_STATUS status; 5644 struct mgmt_rx_reo_pdev_info *mgmt_rx_reo_pdev_ctx = NULL; 5645 5646 if (!pdev) { 5647 mgmt_rx_reo_err("pdev is null"); 5648 status = QDF_STATUS_E_NULL_VALUE; 5649 goto failure; 5650 } 5651 5652 if (!wlan_mgmt_rx_reo_is_feature_enabled_at_pdev(pdev)) { 5653 status = QDF_STATUS_SUCCESS; 5654 goto failure; 5655 } 5656 5657 status = mgmt_rx_reo_sim_pdev_object_create_notification(pdev); 5658 if (QDF_IS_STATUS_ERROR(status)) { 5659 mgmt_rx_reo_err("Failed to handle pdev create for reo sim"); 5660 goto failure; 5661 } 5662 5663 mgmt_rx_reo_pdev_ctx = qdf_mem_malloc(sizeof(*mgmt_rx_reo_pdev_ctx)); 5664 if (!mgmt_rx_reo_pdev_ctx) { 5665 mgmt_rx_reo_err("Allocation failure for REO pdev context"); 5666 status = QDF_STATUS_E_NOMEM; 5667 goto failure; 5668 } 5669 5670 mgmt_txrx_pdev_ctx->mgmt_rx_reo_pdev_ctx = mgmt_rx_reo_pdev_ctx; 5671 5672 status = mgmt_rx_reo_debug_info_init(pdev); 5673 if (QDF_IS_STATUS_ERROR(status)) { 5674 mgmt_rx_reo_err("Failed to initialize debug info"); 5675 status = QDF_STATUS_E_NOMEM; 5676 goto failure; 5677 } 5678 5679 return QDF_STATUS_SUCCESS; 5680 5681 failure: 5682 if (mgmt_rx_reo_pdev_ctx) 5683 qdf_mem_free(mgmt_rx_reo_pdev_ctx); 5684 5685 mgmt_txrx_pdev_ctx->mgmt_rx_reo_pdev_ctx = NULL; 5686 5687 return status; 5688 } 5689 5690 QDF_STATUS 5691 mgmt_rx_reo_pdev_obj_destroy_notification( 5692 struct wlan_objmgr_pdev *pdev, 5693 struct mgmt_txrx_priv_pdev_context *mgmt_txrx_pdev_ctx) 5694 { 5695 QDF_STATUS status; 5696 5697 if (!wlan_mgmt_rx_reo_is_feature_enabled_at_pdev(pdev)) 5698 return QDF_STATUS_SUCCESS; 5699 5700 status = mgmt_rx_reo_debug_info_deinit(pdev); 5701 if (QDF_IS_STATUS_ERROR(status)) { 5702 mgmt_rx_reo_err("Failed to de-initialize debug info"); 5703 return status; 5704 } 5705 5706 qdf_mem_free(mgmt_txrx_pdev_ctx->mgmt_rx_reo_pdev_ctx); 5707 mgmt_txrx_pdev_ctx->mgmt_rx_reo_pdev_ctx = NULL; 5708 5709 status = mgmt_rx_reo_sim_pdev_object_destroy_notification(pdev); 5710 if (QDF_IS_STATUS_ERROR(status)) { 5711 mgmt_rx_reo_err("Failed to handle pdev create for reo sim"); 5712 return status; 5713 } 5714 5715 return QDF_STATUS_SUCCESS; 5716 } 5717 5718 QDF_STATUS 5719 mgmt_rx_reo_psoc_obj_create_notification(struct wlan_objmgr_psoc *psoc) 5720 { 5721 return QDF_STATUS_SUCCESS; 5722 } 5723 5724 QDF_STATUS 5725 mgmt_rx_reo_psoc_obj_destroy_notification(struct wlan_objmgr_psoc *psoc) 5726 { 5727 return QDF_STATUS_SUCCESS; 5728 } 5729 5730 bool 5731 mgmt_rx_reo_is_simulation_in_progress(void) 5732 { 5733 struct mgmt_rx_reo_context *reo_context; 5734 5735 reo_context = mgmt_rx_reo_get_context(); 5736 if (!reo_context) { 5737 mgmt_rx_reo_err("reo context is null"); 5738 return false; 5739 } 5740 5741 return reo_context->simulation_in_progress; 5742 } 5743 5744 #ifdef WLAN_MGMT_RX_REO_DEBUG_SUPPORT 5745 QDF_STATUS 5746 mgmt_rx_reo_print_ingress_frame_stats(void) 5747 { 5748 struct mgmt_rx_reo_context *reo_context; 5749 QDF_STATUS status; 5750 5751 reo_context = mgmt_rx_reo_get_context(); 5752 if (!reo_context) { 5753 mgmt_rx_reo_err("reo context is null"); 5754 return QDF_STATUS_E_NULL_VALUE; 5755 } 5756 5757 status = mgmt_rx_reo_debug_print_ingress_frame_stats(reo_context); 5758 if (QDF_IS_STATUS_ERROR(status)) { 5759 mgmt_rx_reo_err("Failed to print ingress frame stats"); 5760 return status; 5761 } 5762 5763 return QDF_STATUS_SUCCESS; 5764 } 5765 5766 QDF_STATUS 5767 mgmt_rx_reo_print_ingress_frame_info(uint16_t num_frames) 5768 { 5769 struct mgmt_rx_reo_context *reo_context; 5770 QDF_STATUS status; 5771 5772 reo_context = mgmt_rx_reo_get_context(); 5773 if (!reo_context) { 5774 mgmt_rx_reo_err("reo context is null"); 5775 return QDF_STATUS_E_NULL_VALUE; 5776 } 5777 5778 status = mgmt_rx_reo_debug_print_ingress_frame_info(reo_context, 5779 num_frames); 5780 if (QDF_IS_STATUS_ERROR(status)) { 5781 mgmt_rx_reo_err("Failed to print ingress frame info"); 5782 return status; 5783 } 5784 5785 return QDF_STATUS_SUCCESS; 5786 } 5787 5788 QDF_STATUS 5789 mgmt_rx_reo_print_egress_frame_stats(void) 5790 { 5791 struct mgmt_rx_reo_context *reo_context; 5792 QDF_STATUS status; 5793 5794 reo_context = mgmt_rx_reo_get_context(); 5795 if (!reo_context) { 5796 mgmt_rx_reo_err("reo context is null"); 5797 return QDF_STATUS_E_NULL_VALUE; 5798 } 5799 5800 status = mgmt_rx_reo_debug_print_egress_frame_stats(reo_context); 5801 if (QDF_IS_STATUS_ERROR(status)) { 5802 mgmt_rx_reo_err("Failed to print egress frame stats"); 5803 return status; 5804 } 5805 5806 return QDF_STATUS_SUCCESS; 5807 } 5808 5809 QDF_STATUS 5810 mgmt_rx_reo_print_egress_frame_info(uint16_t num_frames) 5811 { 5812 struct mgmt_rx_reo_context *reo_context; 5813 QDF_STATUS status; 5814 5815 reo_context = mgmt_rx_reo_get_context(); 5816 if (!reo_context) { 5817 mgmt_rx_reo_err("reo context is null"); 5818 return QDF_STATUS_E_NULL_VALUE; 5819 } 5820 5821 status = mgmt_rx_reo_debug_print_egress_frame_info(reo_context, 5822 num_frames); 5823 if (QDF_IS_STATUS_ERROR(status)) { 5824 mgmt_rx_reo_err("Failed to print egress frame info"); 5825 return status; 5826 } 5827 5828 return QDF_STATUS_SUCCESS; 5829 } 5830 #else 5831 QDF_STATUS 5832 mgmt_rx_reo_print_ingress_frame_stats(void) 5833 { 5834 return QDF_STATUS_SUCCESS; 5835 } 5836 5837 QDF_STATUS 5838 mgmt_rx_reo_print_ingress_frame_info(uint16_t num_frames) 5839 { 5840 return QDF_STATUS_SUCCESS; 5841 } 5842 5843 QDF_STATUS 5844 mgmt_rx_reo_print_egress_frame_stats(void) 5845 { 5846 return QDF_STATUS_SUCCESS; 5847 } 5848 5849 QDF_STATUS 5850 mgmt_rx_reo_print_egress_frame_info(uint16_t num_frames) 5851 { 5852 return QDF_STATUS_SUCCESS; 5853 } 5854 #endif /* WLAN_MGMT_RX_REO_DEBUG_SUPPORT */ 5855