xref: /wlan-dirver/qca-wifi-host-cmn/umac/cmn_services/mgmt_txrx/core/src/wlan_mgmt_txrx_rx_reo.c (revision 70a19e16789e308182f63b15c75decec7bf0b342)
1 /*
2  * Copyright (c) 2021, The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 /**
19  *  DOC: wlan_mgmt_txrx_rx_reo.c
20  *  This file contains mgmt rx re-ordering related function definitions
21  */
22 
23 #include "wlan_mgmt_txrx_rx_reo_i.h"
24 #include <wlan_mgmt_txrx_rx_reo_tgt_api.h>
25 #include "wlan_mgmt_txrx_main_i.h"
26 #include <qdf_util.h>
27 #include <wlan_mlo_mgr_cmn.h>
28 #include <wlan_mlo_mgr_setup.h>
29 
30 static struct mgmt_rx_reo_context *g_rx_reo_ctx[WLAN_MAX_MLO_GROUPS];
31 
32 #define mgmt_rx_reo_get_context(_grp_id) (g_rx_reo_ctx[_grp_id])
33 #define mgmt_rx_reo_set_context(grp_id, c)       (g_rx_reo_ctx[grp_id] = c)
34 
35 #define MGMT_RX_REO_PKT_CTR_HALF_RANGE (0x8000)
36 #define MGMT_RX_REO_PKT_CTR_FULL_RANGE (MGMT_RX_REO_PKT_CTR_HALF_RANGE << 1)
37 
38 /**
39  * wlan_mgmt_rx_reo_get_ctx_from_pdev - Get MGMT Rx REO Context from pdev
40  * @pdev: Pointer to pdev structure object
41  *
42  * API to get the MGMT RX reo context of the pdev using the appropriate
43  * MLO group id.
44  *
45  * Return: Mgmt rx reo context for the pdev
46  */
47 
48 static inline struct mgmt_rx_reo_context*
49 wlan_mgmt_rx_reo_get_ctx_from_pdev(struct wlan_objmgr_pdev *pdev)
50 {
51 	uint8_t ml_grp_id;
52 
53 	ml_grp_id = wlan_get_mlo_grp_id_from_pdev(pdev);
54 	if (ml_grp_id >= WLAN_MAX_MLO_GROUPS) {
55 		mgmt_rx_reo_err("REO context - Invalid ML Group ID");
56 		return NULL;
57 	}
58 
59 	return mgmt_rx_reo_get_context(ml_grp_id);
60 }
61 
62 /**
63  * mgmt_rx_reo_compare_pkt_ctrs_gte() - Compare given mgmt packet counters
64  * @ctr1: Management packet counter1
65  * @ctr2: Management packet counter2
66  *
67  * We can't directly use the comparison operator here because the counters can
68  * overflow. But these counters have a property that the difference between
69  * them can never be greater than half the range of the data type.
70  * We can make use of this condition to detect which one is actually greater.
71  *
72  * Return: true if @ctr1 is greater than or equal to @ctr2, else false
73  */
74 static inline bool
75 mgmt_rx_reo_compare_pkt_ctrs_gte(uint16_t ctr1, uint16_t ctr2)
76 {
77 	uint16_t delta = ctr1 - ctr2;
78 
79 	return delta <= MGMT_RX_REO_PKT_CTR_HALF_RANGE;
80 }
81 
82 /**
83  * mgmt_rx_reo_subtract_pkt_ctrs() - Subtract given mgmt packet counters
84  * @ctr1: Management packet counter1
85  * @ctr2: Management packet counter2
86  *
87  * We can't directly use the subtract operator here because the counters can
88  * overflow. But these counters have a property that the difference between
89  * them can never be greater than half the range of the data type.
90  * We can make use of this condition to detect whichone is actually greater and
91  * return the difference accordingly.
92  *
93  * Return: Difference between @ctr1 and @crt2
94  */
95 static inline int
96 mgmt_rx_reo_subtract_pkt_ctrs(uint16_t ctr1, uint16_t ctr2)
97 {
98 	uint16_t delta = ctr1 - ctr2;
99 
100 	/**
101 	 * if delta is greater than half the range (i.e, ctr1 is actually
102 	 * smaller than ctr2), then the result should be a negative number.
103 	 * subtracting the entire range should give the correct value.
104 	 */
105 	if (delta > MGMT_RX_REO_PKT_CTR_HALF_RANGE)
106 		return delta - MGMT_RX_REO_PKT_CTR_FULL_RANGE;
107 
108 	return delta;
109 }
110 
111 #define MGMT_RX_REO_GLOBAL_TS_HALF_RANGE (0x80000000)
112 /**
113  * mgmt_rx_reo_compare_global_timestamps_gte()-Compare given global timestamps
114  * @ts1: Global timestamp1
115  * @ts2: Global timestamp2
116  *
117  * We can't directly use the comparison operator here because the timestamps can
118  * overflow. But these timestamps have a property that the difference between
119  * them can never be greater than half the range of the data type.
120  * We can make use of this condition to detect which one is actually greater.
121  *
122  * Return: true if @ts1 is greater than or equal to @ts2, else false
123  */
124 static inline bool
125 mgmt_rx_reo_compare_global_timestamps_gte(uint32_t ts1, uint32_t ts2)
126 {
127 	uint32_t delta = ts1 - ts2;
128 
129 	return delta <= MGMT_RX_REO_GLOBAL_TS_HALF_RANGE;
130 }
131 
132 /**
133  * mgmt_rx_reo_is_stale_frame()- API to check whether the given management frame
134  * is stale
135  * @ts_last_released_frame: pointer to global time stamp of the last frame
136  * removed from the reorder list
137  * @frame_desc: pointer to frame descriptor
138  *
139  * This API checks whether the current management frame under processing is
140  * stale. Any frame older than the last frame delivered to upper layer is a
141  * stale frame. This could happen when we have to deliver frames out of order
142  * due to time out or list size limit. The frames which arrive late at host and
143  * with time stamp lesser than the last delivered frame are stale frames and
144  * they need to be handled differently.
145  *
146  * Return: QDF_STATUS. On success "is_stale" and "is_parallel_rx" members of
147  * @frame_desc will be filled with proper values.
148  */
149 static QDF_STATUS
150 mgmt_rx_reo_is_stale_frame(
151 		struct mgmt_rx_reo_global_ts_info *ts_last_released_frame,
152 		struct mgmt_rx_reo_frame_descriptor *frame_desc)
153 {
154 	uint32_t cur_frame_start_ts;
155 	uint32_t cur_frame_end_ts;
156 
157 	if (!ts_last_released_frame) {
158 		mgmt_rx_reo_err("Last released frame time stamp info is null");
159 		return QDF_STATUS_E_NULL_VALUE;
160 	}
161 
162 	if (!frame_desc) {
163 		mgmt_rx_reo_err("Frame descriptor is null");
164 		return QDF_STATUS_E_NULL_VALUE;
165 	}
166 
167 	frame_desc->is_stale = false;
168 	frame_desc->is_parallel_rx = false;
169 
170 	if (!ts_last_released_frame->valid)
171 		return QDF_STATUS_SUCCESS;
172 
173 	cur_frame_start_ts = mgmt_rx_reo_get_start_ts(frame_desc->rx_params);
174 	cur_frame_end_ts = mgmt_rx_reo_get_end_ts(frame_desc->rx_params);
175 
176 	frame_desc->is_stale =
177 		!mgmt_rx_reo_compare_global_timestamps_gte(cur_frame_start_ts,
178 					ts_last_released_frame->start_ts);
179 
180 	if (mgmt_rx_reo_compare_global_timestamps_gte
181 		(ts_last_released_frame->start_ts, cur_frame_start_ts) &&
182 	    mgmt_rx_reo_compare_global_timestamps_gte
183 		(cur_frame_end_ts, ts_last_released_frame->end_ts)) {
184 		frame_desc->is_parallel_rx = true;
185 		frame_desc->is_stale = false;
186 	}
187 
188 	return QDF_STATUS_SUCCESS;
189 }
190 
191 QDF_STATUS
192 mgmt_rx_reo_validate_mlo_link_info(struct wlan_objmgr_psoc *psoc)
193 {
194 	uint16_t valid_link_bitmap_shmem;
195 	uint16_t valid_link_bitmap;
196 	int8_t num_active_links_shmem;
197 	int8_t num_active_links;
198 	uint8_t grp_id = 0;
199 	QDF_STATUS status;
200 
201 	if (!psoc) {
202 		mgmt_rx_reo_err("psoc is null");
203 		return QDF_STATUS_E_NULL_VALUE;
204 	}
205 
206 	if (!wlan_mgmt_rx_reo_is_feature_enabled_at_psoc(psoc))
207 		return QDF_STATUS_SUCCESS;
208 
209 	status = tgt_mgmt_rx_reo_get_num_active_hw_links(psoc,
210 							 &num_active_links_shmem);
211 	if (QDF_IS_STATUS_ERROR(status)) {
212 		mgmt_rx_reo_err("Failed to get number of active MLO HW links");
213 		return QDF_STATUS_E_FAILURE;
214 	}
215 	qdf_assert_always(num_active_links_shmem > 0);
216 
217 	if (!mlo_psoc_get_grp_id(psoc, &grp_id)) {
218 		mgmt_rx_reo_err("Failed to get valid MLO Group id");
219 		return QDF_STATUS_E_INVAL;
220 	}
221 
222 	num_active_links = wlan_mlo_get_num_active_links(grp_id);
223 	qdf_assert_always(num_active_links > 0);
224 
225 	qdf_assert_always(num_active_links_shmem == num_active_links);
226 
227 	status = tgt_mgmt_rx_reo_get_valid_hw_link_bitmap(psoc,
228 							  &valid_link_bitmap_shmem);
229 	if (QDF_IS_STATUS_ERROR(status)) {
230 		mgmt_rx_reo_err("Failed to get valid MLO HW link bitmap");
231 		return QDF_STATUS_E_INVAL;
232 	}
233 	qdf_assert_always(valid_link_bitmap_shmem != 0);
234 
235 	valid_link_bitmap = wlan_mlo_get_valid_link_bitmap(grp_id);
236 	qdf_assert_always(valid_link_bitmap_shmem != 0);
237 
238 	qdf_assert_always(valid_link_bitmap_shmem == valid_link_bitmap);
239 
240 	return QDF_STATUS_SUCCESS;
241 }
242 
243 #ifndef WLAN_MGMT_RX_REO_SIM_SUPPORT
244 /**
245  * mgmt_rx_reo_is_valid_link() - Check whether the given HW link is valid
246  * @link_id: Link id to be checked
247  * @grp_id: MLO Group id which it belongs to
248  *
249  * Return: true if @link_id is a valid link else false
250  */
251 static bool
252 mgmt_rx_reo_is_valid_link(uint8_t link_id, uint8_t grp_id)
253 {
254 	uint16_t valid_hw_link_bitmap;
255 
256 	if (link_id >= MAX_MLO_LINKS) {
257 		mgmt_rx_reo_err("Invalid link id %u", link_id);
258 		return false;
259 	}
260 
261 	valid_hw_link_bitmap = wlan_mlo_get_valid_link_bitmap(grp_id);
262 	qdf_assert_always(valid_hw_link_bitmap);
263 
264 	return (valid_hw_link_bitmap & (1 << link_id));
265 }
266 
267 /**
268  * mgmt_rx_reo_get_num_mlo_links() - Get number of MLO HW links active in the
269  * system
270  * @reo_context: Pointer to reo context object
271  * @grp_id: MLO group id which it belongs to
272  *
273  * Return: On success returns number of active MLO HW links. On failure
274  * returns WLAN_MLO_INVALID_NUM_LINKS.
275  */
276 static int8_t
277 mgmt_rx_reo_get_num_mlo_links(struct mgmt_rx_reo_context *reo_context,
278 			      uint8_t grp_id)
279 {
280 	if (!reo_context) {
281 		mgmt_rx_reo_err("Mgmt reo context is null");
282 		return WLAN_MLO_INVALID_NUM_LINKS;
283 	}
284 
285 	return wlan_mlo_get_num_active_links(grp_id);
286 }
287 
288 static QDF_STATUS
289 mgmt_rx_reo_handle_potential_premature_delivery(
290 				struct mgmt_rx_reo_context *reo_context,
291 				uint32_t global_timestamp)
292 {
293 	return QDF_STATUS_SUCCESS;
294 }
295 
296 static QDF_STATUS
297 mgmt_rx_reo_handle_stale_frame(struct mgmt_rx_reo_list *reo_list,
298 			       struct mgmt_rx_reo_frame_descriptor *desc)
299 {
300 	return QDF_STATUS_SUCCESS;
301 }
302 #else
303 /**
304  * mgmt_rx_reo_sim_is_valid_link() - Check whether the given HW link is valid
305  *
306  * Return: true if @link_id is a valid link, else false
307  */
308 static bool
309 mgmt_rx_reo_sim_is_valid_link(struct mgmt_rx_reo_sim_context *sim_context,
310 			      uint8_t link_id)
311 {
312 	bool is_valid_link = false;
313 
314 	if (!sim_context) {
315 		mgmt_rx_reo_err("Mgmt reo sim context is null");
316 		return false;
317 	}
318 
319 	if (link_id >= MAX_MLO_LINKS) {
320 		mgmt_rx_reo_err("Invalid link id %u", link_id);
321 		return false;
322 	}
323 
324 	qdf_spin_lock(&sim_context->link_id_to_pdev_map.lock);
325 
326 	if (sim_context->link_id_to_pdev_map.map[link_id])
327 		is_valid_link = true;
328 
329 	qdf_spin_unlock(&sim_context->link_id_to_pdev_map.lock);
330 
331 	return is_valid_link;
332 }
333 
334 /**
335  * mgmt_rx_reo_is_valid_link() - Check whether the given HW link is valid
336  * @link_id: HW Link ID to be verified
337  * @grp_id: MLO Group id on which the Link ID  belongs to
338  *
339  * Return: true if @link_id is a valid link else false
340  */
341 static bool
342 mgmt_rx_reo_is_valid_link(uint8_t ml_grp_id, uint8_t link_id)
343 {
344 	struct mgmt_rx_reo_context *reo_context;
345 
346 	reo_context = mgmt_rx_reo_get_context(ml_grp_id);
347 
348 	if (!reo_context) {
349 		mgmt_rx_reo_err("Mgmt reo context is null");
350 		return false;
351 	}
352 
353 	return mgmt_rx_reo_sim_is_valid_link(&reo_context->sim_context,
354 					     link_id);
355 }
356 
357 /**
358  * mgmt_rx_reo_sim_get_num_mlo_links() - Get number of MLO HW links from the reo
359  * simulation context object
360  * @sim_context: Pointer to reo simulation context object
361  *
362  * Number of MLO links will be equal to number of pdevs in the
363  * system. In case of simulation all the pdevs are assumed
364  * to have MLO capability.
365  *
366  * Return: On success returns number of MLO HW links. On failure
367  * returns WLAN_MLO_INVALID_NUM_LINKS.
368  */
369 static int8_t
370 mgmt_rx_reo_sim_get_num_mlo_links(struct mgmt_rx_reo_sim_context *sim_context)
371 {
372 	uint8_t num_mlo_links;
373 
374 	if (!sim_context) {
375 		mgmt_rx_reo_err("Mgmt reo simulation context is null");
376 		return WLAN_MLO_INVALID_NUM_LINKS;
377 	}
378 
379 	qdf_spin_lock(&sim_context->link_id_to_pdev_map.lock);
380 
381 	num_mlo_links = sim_context->link_id_to_pdev_map.num_mlo_links;
382 
383 	qdf_spin_unlock(&sim_context->link_id_to_pdev_map.lock);
384 
385 	return num_mlo_links;
386 }
387 
388 /**
389  * mgmt_rx_reo_get_num_mlo_links() - Get number of MLO links from the reo
390  * context object
391  * @reo_context: Pointer to reo context object
392  *
393  * Return: On success returns number of MLO HW links. On failure
394  * returns WLAN_MLO_INVALID_NUM_LINKS.
395  */
396 static int8_t
397 mgmt_rx_reo_get_num_mlo_links(struct mgmt_rx_reo_context *reo_context,
398 			      uint8_t grp_id)
399 {
400 	if (!reo_context) {
401 		mgmt_rx_reo_err("Mgmt reo context is null");
402 		return WLAN_MLO_INVALID_NUM_LINKS;
403 	}
404 
405 	return mgmt_rx_reo_sim_get_num_mlo_links(&reo_context->sim_context);
406 }
407 
408 /**
409  * mgmt_rx_reo_sim_get_context() - Helper API to get the management
410  * rx reorder simulation context
411  * @ml_grp_id: MLO group id for the rx reordering
412  *
413  * Return: On success returns the pointer to management rx reorder
414  * simulation context. On failure returns NULL.
415  */
416 static struct mgmt_rx_reo_sim_context *
417 mgmt_rx_reo_sim_get_context(uint8_t ml_grp_id)
418 {
419 	struct mgmt_rx_reo_context *reo_context;
420 
421 	reo_context = mgmt_rx_reo_get_context(ml_grp_id);
422 	if (!reo_context) {
423 		mgmt_rx_reo_err("Mgmt reo context is null");
424 		return NULL;
425 	}
426 
427 	return &reo_context->sim_context;
428 }
429 
430 int8_t
431 mgmt_rx_reo_sim_get_mlo_link_id_from_pdev(struct wlan_objmgr_pdev *pdev)
432 {
433 	struct mgmt_rx_reo_sim_context *sim_context;
434 	int8_t link_id;
435 
436 	sim_context = mgmt_rx_reo_sim_get_context();
437 	if (!sim_context) {
438 		mgmt_rx_reo_err("Mgmt reo simulation context is null");
439 		return MGMT_RX_REO_INVALID_LINK_ID;
440 	}
441 
442 	qdf_spin_lock(&sim_context->link_id_to_pdev_map.lock);
443 
444 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++)
445 		if (sim_context->link_id_to_pdev_map.map[link_id] == pdev)
446 			break;
447 
448 	/* pdev is not found in map */
449 	if (link_id == MAX_MLO_LINKS)
450 		link_id = MGMT_RX_REO_INVALID_LINK_ID;
451 
452 	qdf_spin_unlock(&sim_context->link_id_to_pdev_map.lock);
453 
454 	return link_id;
455 }
456 
457 struct wlan_objmgr_pdev *
458 mgmt_rx_reo_sim_get_pdev_from_mlo_link_id(uint8_t mlo_link_id,
459 					  wlan_objmgr_ref_dbgid refdbgid)
460 {
461 	struct mgmt_rx_reo_sim_context *sim_context;
462 	struct wlan_objmgr_pdev *pdev;
463 	QDF_STATUS status;
464 
465 	sim_context = mgmt_rx_reo_sim_get_context();
466 	if (!sim_context) {
467 		mgmt_rx_reo_err("Mgmt reo simulation context is null");
468 		return NULL;
469 	}
470 
471 	if (mlo_link_id >= MAX_MLO_LINKS) {
472 		mgmt_rx_reo_err("Invalid link id %u", mlo_link_id);
473 		return NULL;
474 	}
475 
476 	qdf_spin_lock(&sim_context->link_id_to_pdev_map.lock);
477 
478 	pdev = sim_context->link_id_to_pdev_map.map[mlo_link_id];
479 	status = wlan_objmgr_pdev_try_get_ref(pdev, refdbgid);
480 	if (QDF_IS_STATUS_ERROR(status)) {
481 		mgmt_rx_reo_err("Failed to get pdev reference");
482 		return NULL;
483 	}
484 
485 	qdf_spin_unlock(&sim_context->link_id_to_pdev_map.lock);
486 
487 	return pdev;
488 }
489 
490 /**
491  * mgmt_rx_reo_handle_potential_premature_delivery - Helper API to handle
492  * premature delivery.
493  * @reo_context: Pointer to reorder list
494  * @global_timestamp: Global time stamp of the current management frame
495  *
496  * Sometimes we have to deliver a management frame to the upper layers even
497  * before its wait count reaching zero. This is called premature delivery.
498  * Premature delivery could happen due to time out or reorder list overflow.
499  *
500  * Return: QDF_STATUS
501  */
502 static QDF_STATUS
503 mgmt_rx_reo_handle_potential_premature_delivery(
504 				struct mgmt_rx_reo_context *reo_context,
505 				uint32_t global_timestamp)
506 {
507 	qdf_list_t stale_frame_list_temp;
508 	QDF_STATUS status;
509 	struct mgmt_rx_reo_pending_frame_list_entry *latest_stale_frame = NULL;
510 	struct mgmt_rx_reo_pending_frame_list_entry *cur_entry;
511 	struct mgmt_rx_reo_sim_context *sim_context;
512 	struct mgmt_rx_reo_master_frame_list *master_frame_list;
513 
514 	if (!reo_context)
515 		return QDF_STATUS_E_NULL_VALUE;
516 
517 	sim_context = &reo_context->sim_context;
518 	master_frame_list = &sim_context->master_frame_list;
519 
520 	qdf_spin_lock(&master_frame_list->lock);
521 
522 	qdf_list_for_each(&master_frame_list->pending_list, cur_entry, node) {
523 		if (cur_entry->params.global_timestamp == global_timestamp)
524 			break;
525 
526 		latest_stale_frame = cur_entry;
527 	}
528 
529 	if (latest_stale_frame) {
530 		qdf_list_create(&stale_frame_list_temp,
531 				MGMT_RX_REO_SIM_STALE_FRAME_TEMP_LIST_MAX_SIZE);
532 
533 		status = qdf_list_split(&stale_frame_list_temp,
534 					&master_frame_list->pending_list,
535 					&latest_stale_frame->node);
536 		if (QDF_IS_STATUS_ERROR(status))
537 			goto exit_unlock_master_frame_list;
538 
539 		status = qdf_list_join(&master_frame_list->stale_list,
540 				       &stale_frame_list_temp);
541 		if (QDF_IS_STATUS_ERROR(status))
542 			goto exit_unlock_master_frame_list;
543 	}
544 
545 	status = QDF_STATUS_SUCCESS;
546 
547 exit_unlock_master_frame_list:
548 	qdf_spin_unlock(&master_frame_list->lock);
549 
550 	return status;
551 }
552 
553 /**
554  * mgmt_rx_reo_sim_remove_frame_from_stale_list() - Removes frame from the
555  * stale management frame list
556  * @master_frame_list: pointer to master management frame list
557  * @reo_params: pointer to reo params
558  *
559  * This API removes frames from the stale management frame list.
560  *
561  * Return: QDF_STATUS of operation
562  */
563 static QDF_STATUS
564 mgmt_rx_reo_sim_remove_frame_from_stale_list(
565 		struct mgmt_rx_reo_master_frame_list *master_frame_list,
566 		const struct mgmt_rx_reo_params *reo_params)
567 {
568 	struct mgmt_rx_reo_stale_frame_list_entry *cur_entry;
569 	struct mgmt_rx_reo_stale_frame_list_entry *matching_entry = NULL;
570 	QDF_STATUS status;
571 
572 	if (!master_frame_list || !reo_params)
573 		return QDF_STATUS_E_NULL_VALUE;
574 
575 	qdf_spin_lock(&master_frame_list->lock);
576 
577 	/**
578 	 * Stale frames can come in any order at host. Do a linear search and
579 	 * remove the matching entry.
580 	 */
581 	qdf_list_for_each(&master_frame_list->stale_list, cur_entry, node) {
582 		if (cur_entry->params.link_id == reo_params->link_id &&
583 		    cur_entry->params.mgmt_pkt_ctr == reo_params->mgmt_pkt_ctr &&
584 		    cur_entry->params.global_timestamp ==
585 		    reo_params->global_timestamp) {
586 			matching_entry = cur_entry;
587 			break;
588 		}
589 	}
590 
591 	if (!matching_entry) {
592 		qdf_spin_unlock(&master_frame_list->lock);
593 		mgmt_rx_reo_err("reo sim failure: absent in stale frame list");
594 		qdf_assert_always(0);
595 	}
596 
597 	status = qdf_list_remove_node(&master_frame_list->stale_list,
598 				      &matching_entry->node);
599 
600 	if (QDF_IS_STATUS_ERROR(status)) {
601 		qdf_spin_unlock(&master_frame_list->lock);
602 		return status;
603 	}
604 
605 	qdf_mem_free(matching_entry);
606 
607 	qdf_spin_unlock(&master_frame_list->lock);
608 
609 	return QDF_STATUS_SUCCESS;
610 }
611 
612 /**
613  * mgmt_rx_reo_handle_stale_frame() - API to handle stale management frames.
614  * @reo_list: Pointer to reorder list
615  * @desc: Pointer to frame descriptor
616  *
617  * Return: QDF_STATUS of operation
618  */
619 static QDF_STATUS
620 mgmt_rx_reo_handle_stale_frame(struct mgmt_rx_reo_list *reo_list,
621 			       struct mgmt_rx_reo_frame_descriptor *desc)
622 {
623 	QDF_STATUS status;
624 	struct mgmt_rx_reo_context *reo_context;
625 	struct mgmt_rx_reo_sim_context *sim_context;
626 	struct mgmt_rx_reo_params *reo_params;
627 
628 	if (!reo_list || !desc)
629 		return QDF_STATUS_E_NULL_VALUE;
630 
631 	/* FW consumed/Error frames are already removed */
632 	if (desc->type != MGMT_RX_REO_FRAME_DESC_HOST_CONSUMED_FRAME)
633 		return QDF_STATUS_SUCCESS;
634 
635 	reo_context = mgmt_rx_reo_get_context_from_reo_list(reo_list);
636 	if (!reo_context)
637 		return QDF_STATUS_E_NULL_VALUE;
638 
639 	sim_context = &reo_context->sim_context;
640 
641 	reo_params = desc->rx_params->reo_params;
642 	if (!reo_params)
643 		return QDF_STATUS_E_NULL_VALUE;
644 
645 	status = mgmt_rx_reo_sim_remove_frame_from_stale_list(
646 				&sim_context->master_frame_list, reo_params);
647 
648 	return status;
649 }
650 #endif /* WLAN_MGMT_RX_REO_SIM_SUPPORT */
651 
652 /**
653  * mgmt_rx_reo_is_potential_premature_delivery() - Helper API to check
654  * whether the current frame getting delivered to upper layer is a premature
655  * delivery
656  * @release_reason: release reason
657  *
658  * Return: true for a premature delivery
659  */
660 static bool
661 mgmt_rx_reo_is_potential_premature_delivery(uint8_t release_reason)
662 {
663 	return !(release_reason &
664 			MGMT_RX_REO_LIST_ENTRY_RELEASE_REASON_ZERO_WAIT_COUNT);
665 }
666 
667 /**
668  * wlan_mgmt_rx_reo_get_priv_object() - Get the pdev private object of
669  * MGMT Rx REO module
670  * @pdev: pointer to pdev object
671  *
672  * Return: Pointer to pdev private object of MGMT Rx REO module on success,
673  * else NULL
674  */
675 static struct mgmt_rx_reo_pdev_info *
676 wlan_mgmt_rx_reo_get_priv_object(struct wlan_objmgr_pdev *pdev)
677 {
678 	struct mgmt_txrx_priv_pdev_context *mgmt_txrx_pdev_ctx;
679 
680 	if (!pdev) {
681 		mgmt_rx_reo_err("pdev is null");
682 		return NULL;
683 	}
684 
685 	mgmt_txrx_pdev_ctx = (struct mgmt_txrx_priv_pdev_context *)
686 		wlan_objmgr_pdev_get_comp_private_obj(pdev,
687 						      WLAN_UMAC_COMP_MGMT_TXRX);
688 
689 	if (!mgmt_txrx_pdev_ctx) {
690 		mgmt_rx_reo_err("mgmt txrx context is NULL");
691 		return NULL;
692 	}
693 
694 	return mgmt_txrx_pdev_ctx->mgmt_rx_reo_pdev_ctx;
695 }
696 
697 /**
698  * mgmt_rx_reo_print_snapshots() - Print all snapshots related
699  * to management Rx reorder module
700  * @mac_hw_ss: MAC HW snapshot
701  * @fw_forwarded_ss: FW forwarded snapshot
702  * @fw_consumed_ss: FW consumed snapshot
703  * @host_ss: Host snapshot
704  *
705  * return: QDF_STATUS
706  */
707 static QDF_STATUS
708 mgmt_rx_reo_print_snapshots
709 			(struct mgmt_rx_reo_snapshot_params *mac_hw_ss,
710 			 struct mgmt_rx_reo_snapshot_params *fw_forwarded_ss,
711 			 struct mgmt_rx_reo_snapshot_params *fw_consumed_ss,
712 			 struct mgmt_rx_reo_snapshot_params *host_ss)
713 {
714 	mgmt_rx_reo_debug("HW SS: valid = %u, ctr = %u, ts = %u",
715 			  mac_hw_ss->valid, mac_hw_ss->mgmt_pkt_ctr,
716 			  mac_hw_ss->global_timestamp);
717 	mgmt_rx_reo_debug("FW forwarded SS: valid = %u, ctr = %u, ts = %u",
718 			  fw_forwarded_ss->valid,
719 			  fw_forwarded_ss->mgmt_pkt_ctr,
720 			  fw_forwarded_ss->global_timestamp);
721 	mgmt_rx_reo_debug("FW consumed SS: valid = %u, ctr = %u, ts = %u",
722 			  fw_consumed_ss->valid,
723 			  fw_consumed_ss->mgmt_pkt_ctr,
724 			  fw_consumed_ss->global_timestamp);
725 	mgmt_rx_reo_debug("HOST SS: valid = %u, ctr = %u, ts = %u",
726 			  host_ss->valid, host_ss->mgmt_pkt_ctr,
727 			  host_ss->global_timestamp);
728 
729 	return QDF_STATUS_SUCCESS;
730 }
731 
732 /**
733  * mgmt_rx_reo_invalidate_stale_snapshots() - Invalidate stale management
734  * Rx REO snapshots
735  * @mac_hw_ss: MAC HW snapshot
736  * @fw_forwarded_ss: FW forwarded snapshot
737  * @fw_consumed_ss: FW consumed snapshot
738  * @host_ss: Host snapshot
739  * @link: link ID
740  *
741  * return: QDF_STATUS
742  */
743 static QDF_STATUS
744 mgmt_rx_reo_invalidate_stale_snapshots
745 			(struct mgmt_rx_reo_snapshot_params *mac_hw_ss,
746 			 struct mgmt_rx_reo_snapshot_params *fw_forwarded_ss,
747 			 struct mgmt_rx_reo_snapshot_params *fw_consumed_ss,
748 			 struct mgmt_rx_reo_snapshot_params *host_ss,
749 			 uint8_t link)
750 {
751 	if (!mac_hw_ss->valid)
752 		return QDF_STATUS_SUCCESS;
753 
754 	if (host_ss->valid) {
755 		if (!mgmt_rx_reo_compare_global_timestamps_gte
756 					(mac_hw_ss->global_timestamp,
757 					 host_ss->global_timestamp) ||
758 		    !mgmt_rx_reo_compare_pkt_ctrs_gte
759 					(mac_hw_ss->mgmt_pkt_ctr,
760 					 host_ss->mgmt_pkt_ctr)) {
761 			mgmt_rx_reo_print_snapshots(mac_hw_ss, fw_forwarded_ss,
762 						    fw_consumed_ss, host_ss);
763 			mgmt_rx_reo_debug("Invalidate host snapshot, link %u",
764 					  link);
765 			host_ss->valid = false;
766 		}
767 	}
768 
769 	if (fw_forwarded_ss->valid) {
770 		if (!mgmt_rx_reo_compare_global_timestamps_gte
771 					(mac_hw_ss->global_timestamp,
772 					 fw_forwarded_ss->global_timestamp) ||
773 		    !mgmt_rx_reo_compare_pkt_ctrs_gte
774 					(mac_hw_ss->mgmt_pkt_ctr,
775 					 fw_forwarded_ss->mgmt_pkt_ctr)) {
776 			mgmt_rx_reo_print_snapshots(mac_hw_ss, fw_forwarded_ss,
777 						    fw_consumed_ss, host_ss);
778 			mgmt_rx_reo_debug("Invalidate FW forwarded SS, link %u",
779 					  link);
780 			fw_forwarded_ss->valid = false;
781 		}
782 
783 		if (host_ss->valid && fw_forwarded_ss->valid &&
784 		    (mgmt_rx_reo_compare_global_timestamps_gte
785 					(host_ss->global_timestamp,
786 					 fw_forwarded_ss->global_timestamp) !=
787 		     mgmt_rx_reo_compare_pkt_ctrs_gte
788 					(host_ss->mgmt_pkt_ctr,
789 					 fw_forwarded_ss->mgmt_pkt_ctr))) {
790 			mgmt_rx_reo_print_snapshots(mac_hw_ss, fw_forwarded_ss,
791 						    fw_consumed_ss, host_ss);
792 			mgmt_rx_reo_debug("Invalidate FW forwarded SS, link %u",
793 					  link);
794 			fw_forwarded_ss->valid = false;
795 		}
796 	}
797 
798 	if (fw_consumed_ss->valid) {
799 		if (!mgmt_rx_reo_compare_global_timestamps_gte
800 					(mac_hw_ss->global_timestamp,
801 					 fw_consumed_ss->global_timestamp) ||
802 		    !mgmt_rx_reo_compare_pkt_ctrs_gte
803 					(mac_hw_ss->mgmt_pkt_ctr,
804 					 fw_consumed_ss->mgmt_pkt_ctr)) {
805 			mgmt_rx_reo_print_snapshots(mac_hw_ss, fw_forwarded_ss,
806 						    fw_consumed_ss, host_ss);
807 			mgmt_rx_reo_debug("Invalidate FW consumed SS, link %u",
808 					  link);
809 			fw_consumed_ss->valid = false;
810 		}
811 
812 		if (host_ss->valid && fw_consumed_ss->valid &&
813 		    (mgmt_rx_reo_compare_global_timestamps_gte
814 					(host_ss->global_timestamp,
815 					 fw_consumed_ss->global_timestamp) !=
816 		     mgmt_rx_reo_compare_pkt_ctrs_gte
817 					(host_ss->mgmt_pkt_ctr,
818 					 fw_consumed_ss->mgmt_pkt_ctr))) {
819 			mgmt_rx_reo_print_snapshots(mac_hw_ss, fw_forwarded_ss,
820 						    fw_consumed_ss, host_ss);
821 			mgmt_rx_reo_debug("Invalidate FW consumed SS, link %u",
822 					  link);
823 			fw_consumed_ss->valid = false;
824 		}
825 	}
826 
827 	return QDF_STATUS_SUCCESS;
828 }
829 
830 /**
831  * mgmt_rx_reo_snapshots_check_sanity() - Check the sanity of management
832  * Rx REO snapshots
833  * @mac_hw_ss: MAC HW snapshot
834  * @fw_forwarded_ss: FW forwarded snapshot
835  * @fw_consumed_ss: FW consumed snapshot
836  * @host_ss: Host snapshot
837  *
838  * return: QDF_STATUS
839  */
840 static QDF_STATUS
841 mgmt_rx_reo_snapshots_check_sanity
842 			(struct mgmt_rx_reo_snapshot_params *mac_hw_ss,
843 			 struct mgmt_rx_reo_snapshot_params *fw_forwarded_ss,
844 			 struct mgmt_rx_reo_snapshot_params *fw_consumed_ss,
845 			 struct mgmt_rx_reo_snapshot_params *host_ss)
846 {
847 	QDF_STATUS status;
848 
849 	if (!mac_hw_ss->valid) {
850 		if (fw_forwarded_ss->valid || fw_consumed_ss->valid ||
851 		    host_ss->valid) {
852 			mgmt_rx_reo_err("MAC HW SS is invalid");
853 			status = QDF_STATUS_E_INVAL;
854 			goto fail;
855 		}
856 
857 		return QDF_STATUS_SUCCESS;
858 	}
859 
860 	if (!fw_forwarded_ss->valid && !fw_consumed_ss->valid) {
861 		if (host_ss->valid) {
862 			mgmt_rx_reo_err("FW forwarded and consumed SS invalid");
863 			status = QDF_STATUS_E_INVAL;
864 			goto fail;
865 		}
866 
867 		return QDF_STATUS_SUCCESS;
868 	}
869 
870 	if (fw_forwarded_ss->valid) {
871 		if (!mgmt_rx_reo_compare_global_timestamps_gte
872 					(mac_hw_ss->global_timestamp,
873 					 fw_forwarded_ss->global_timestamp)) {
874 			mgmt_rx_reo_err("TS: MAC HW SS < FW forwarded SS");
875 			status = QDF_STATUS_E_INVAL;
876 			goto fail;
877 		}
878 
879 		if (!mgmt_rx_reo_compare_pkt_ctrs_gte
880 					(mac_hw_ss->mgmt_pkt_ctr,
881 					 fw_forwarded_ss->mgmt_pkt_ctr)) {
882 			mgmt_rx_reo_err("PKT CTR: MAC HW SS < FW forwarded SS");
883 			status = QDF_STATUS_E_INVAL;
884 			goto fail;
885 		}
886 	}
887 
888 	if (fw_consumed_ss->valid) {
889 		if (!mgmt_rx_reo_compare_global_timestamps_gte
890 					(mac_hw_ss->global_timestamp,
891 					 fw_consumed_ss->global_timestamp)) {
892 			mgmt_rx_reo_err("TS: MAC HW SS < FW consumed SS");
893 			status = QDF_STATUS_E_INVAL;
894 			goto fail;
895 		}
896 
897 		if (!mgmt_rx_reo_compare_pkt_ctrs_gte
898 					(mac_hw_ss->mgmt_pkt_ctr,
899 					 fw_consumed_ss->mgmt_pkt_ctr)) {
900 			mgmt_rx_reo_err("PKT CTR: MAC HW SS < FW consumed SS");
901 			status = QDF_STATUS_E_INVAL;
902 			goto fail;
903 		}
904 	}
905 
906 	if (host_ss->valid) {
907 		if (!mgmt_rx_reo_compare_global_timestamps_gte
908 					(mac_hw_ss->global_timestamp,
909 					 host_ss->global_timestamp)) {
910 			mgmt_rx_reo_err("TS: MAC HW SS < host SS");
911 			status = QDF_STATUS_E_INVAL;
912 			goto fail;
913 		}
914 
915 		if (!mgmt_rx_reo_compare_pkt_ctrs_gte
916 					(mac_hw_ss->mgmt_pkt_ctr,
917 					 host_ss->mgmt_pkt_ctr)) {
918 			mgmt_rx_reo_err("PKT CTR: MAC HW SS < host SS");
919 			status = QDF_STATUS_E_INVAL;
920 			goto fail;
921 		}
922 
923 		if (fw_forwarded_ss->valid && !fw_consumed_ss->valid) {
924 			if (!mgmt_rx_reo_compare_global_timestamps_gte
925 					(fw_forwarded_ss->global_timestamp,
926 					 host_ss->global_timestamp)) {
927 				mgmt_rx_reo_err("TS: FW forwarded < host SS");
928 				status = QDF_STATUS_E_INVAL;
929 				goto fail;
930 			}
931 
932 			if (!mgmt_rx_reo_compare_pkt_ctrs_gte
933 					(fw_forwarded_ss->mgmt_pkt_ctr,
934 					 host_ss->mgmt_pkt_ctr)) {
935 				mgmt_rx_reo_err("CTR: FW forwarded < host SS");
936 				status = QDF_STATUS_E_INVAL;
937 				goto fail;
938 			}
939 		}
940 
941 		if (fw_consumed_ss->valid && !fw_forwarded_ss->valid) {
942 			if (!mgmt_rx_reo_compare_global_timestamps_gte
943 					(fw_consumed_ss->global_timestamp,
944 					 host_ss->global_timestamp)) {
945 				mgmt_rx_reo_err("TS: FW consumed < host SS");
946 				status = QDF_STATUS_E_INVAL;
947 				goto fail;
948 			}
949 
950 			if (!mgmt_rx_reo_compare_pkt_ctrs_gte
951 					(fw_consumed_ss->mgmt_pkt_ctr,
952 					 host_ss->mgmt_pkt_ctr)) {
953 				mgmt_rx_reo_err("CTR: FW consumed < host SS");
954 				status = QDF_STATUS_E_INVAL;
955 				goto fail;
956 			}
957 		}
958 
959 		if (fw_forwarded_ss->valid && fw_consumed_ss->valid) {
960 			if (!mgmt_rx_reo_compare_global_timestamps_gte
961 					(fw_consumed_ss->global_timestamp,
962 					 host_ss->global_timestamp) &&
963 			    !mgmt_rx_reo_compare_global_timestamps_gte
964 					(fw_forwarded_ss->global_timestamp,
965 					 host_ss->global_timestamp)) {
966 				mgmt_rx_reo_err("TS: FW consumed/forwarded < host");
967 				status = QDF_STATUS_E_INVAL;
968 				goto fail;
969 			}
970 
971 			if (!mgmt_rx_reo_compare_pkt_ctrs_gte
972 					(fw_consumed_ss->mgmt_pkt_ctr,
973 					 host_ss->mgmt_pkt_ctr) &&
974 			    !mgmt_rx_reo_compare_pkt_ctrs_gte
975 					(fw_forwarded_ss->mgmt_pkt_ctr,
976 					 host_ss->mgmt_pkt_ctr)) {
977 				mgmt_rx_reo_err("CTR: FW consumed/forwarded < host");
978 				status = QDF_STATUS_E_INVAL;
979 				goto fail;
980 			}
981 		}
982 	}
983 
984 	return QDF_STATUS_SUCCESS;
985 
986 fail:
987 	mgmt_rx_reo_debug("HW SS: valid = %u, ctr = %u, ts = %u",
988 			  mac_hw_ss->valid, mac_hw_ss->mgmt_pkt_ctr,
989 			  mac_hw_ss->global_timestamp);
990 	mgmt_rx_reo_debug("FW forwarded SS: valid = %u, ctr = %u, ts = %u",
991 			  fw_forwarded_ss->valid,
992 			  fw_forwarded_ss->mgmt_pkt_ctr,
993 			  fw_forwarded_ss->global_timestamp);
994 	mgmt_rx_reo_debug("FW consumed SS: valid = %u, ctr = %u, ts = %u",
995 			  fw_consumed_ss->valid,
996 			  fw_consumed_ss->mgmt_pkt_ctr,
997 			  fw_consumed_ss->global_timestamp);
998 	mgmt_rx_reo_debug("HOST SS: valid = %u, ctr = %u, ts = %u",
999 			  host_ss->valid, host_ss->mgmt_pkt_ctr,
1000 			  host_ss->global_timestamp);
1001 
1002 	return status;
1003 }
1004 
1005 /**
1006  * wlan_mgmt_rx_reo_algo_calculate_wait_count() - Calculates the number of
1007  * frames an incoming frame should wait for before it gets delivered.
1008  * @in_frame_pdev: pdev on which this frame is received
1009  * @desc: frame Descriptor
1010  *
1011  * Each frame carrys a MGMT pkt number which is local to that link, and a
1012  * timestamp which is global across all the links. MAC HW and FW also captures
1013  * the same details of the last frame that they have seen. Host also maintains
1014  * the details of the last frame it has seen. In total, there are 4 snapshots.
1015  * 1. MAC HW snapshot - latest frame seen at MAC HW
1016  * 2. FW forwarded snapshot- latest frame forwarded to the Host
1017  * 3. FW consumed snapshot - latest frame consumed by the FW
1018  * 4. Host/FW consumed snapshot - latest frame seen by the Host
1019  * By using all these snapshots, this function tries to compute the wait count
1020  * for a given incoming frame on all links.
1021  *
1022  * Return: QDF_STATUS of operation
1023  */
1024 static QDF_STATUS
1025 wlan_mgmt_rx_reo_algo_calculate_wait_count(
1026 		struct wlan_objmgr_pdev *in_frame_pdev,
1027 		struct mgmt_rx_reo_frame_descriptor *desc)
1028 {
1029 	QDF_STATUS status;
1030 	uint8_t link;
1031 	int8_t grp_id;
1032 	int8_t in_frame_link;
1033 	int frames_pending, delta_fwd_host;
1034 	uint8_t snapshot_id;
1035 	struct wlan_objmgr_pdev *pdev;
1036 	struct mgmt_rx_reo_pdev_info *rx_reo_pdev_ctx;
1037 	struct mgmt_rx_reo_pdev_info *in_frame_rx_reo_pdev_ctx;
1038 	struct mgmt_rx_reo_snapshot_info *snapshot_info;
1039 	struct mgmt_rx_reo_snapshot_params snapshot_params
1040 				[MGMT_RX_REO_SHARED_SNAPSHOT_MAX];
1041 	struct mgmt_rx_reo_snapshot_params *mac_hw_ss, *fw_forwarded_ss,
1042 					    *fw_consumed_ss, *host_ss;
1043 	struct mgmt_rx_reo_params *in_frame_params;
1044 	struct mgmt_rx_reo_wait_count *wait_count;
1045 
1046 	if (!in_frame_pdev) {
1047 		mgmt_rx_reo_err("pdev is null");
1048 		return QDF_STATUS_E_NULL_VALUE;
1049 	}
1050 
1051 	if (!desc) {
1052 		mgmt_rx_reo_err("Frame descriptor is null");
1053 		return QDF_STATUS_E_NULL_VALUE;
1054 	}
1055 
1056 	if (!desc->rx_params) {
1057 		mgmt_rx_reo_err("MGMT Rx params of incoming frame is NULL");
1058 		return QDF_STATUS_E_NULL_VALUE;
1059 	}
1060 
1061 	in_frame_params = desc->rx_params->reo_params;
1062 	if (!in_frame_params) {
1063 		mgmt_rx_reo_err("MGMT Rx REO params of incoming frame is NULL");
1064 		return QDF_STATUS_E_NULL_VALUE;
1065 	}
1066 
1067 	wait_count = &desc->wait_count;
1068 
1069 	/* Get the MLO link ID of incoming frame */
1070 	in_frame_link = wlan_get_mlo_link_id_from_pdev(in_frame_pdev);
1071 	grp_id = wlan_get_mlo_grp_id_from_pdev(in_frame_pdev);
1072 	qdf_assert_always(in_frame_link >= 0);
1073 	qdf_assert_always(in_frame_link < MAX_MLO_LINKS);
1074 	qdf_assert_always(mgmt_rx_reo_is_valid_link(in_frame_link, grp_id));
1075 
1076 	in_frame_rx_reo_pdev_ctx =
1077 			wlan_mgmt_rx_reo_get_priv_object(in_frame_pdev);
1078 	if (!in_frame_rx_reo_pdev_ctx) {
1079 		mgmt_rx_reo_err("Reo context null for incoming frame pdev");
1080 		return QDF_STATUS_E_FAILURE;
1081 	}
1082 	qdf_mem_zero(in_frame_rx_reo_pdev_ctx->raw_snapshots,
1083 		     sizeof(in_frame_rx_reo_pdev_ctx->raw_snapshots));
1084 
1085 	/* Iterate over all the valid MLO links */
1086 	for (link = 0; link < MAX_MLO_LINKS; link++) {
1087 		/* No need wait for any frames on an invalid link */
1088 		if (!mgmt_rx_reo_is_valid_link(link, grp_id)) {
1089 			frames_pending = 0;
1090 			goto update_pending_frames;
1091 		}
1092 
1093 		pdev = wlan_get_pdev_from_mlo_link_id(link, grp_id,
1094 						      WLAN_MGMT_RX_REO_ID);
1095 
1096 		/* No need to wait for any frames if the pdev is not found */
1097 		if (!pdev) {
1098 			mgmt_rx_reo_debug("pdev is null for link %d", link);
1099 			frames_pending = 0;
1100 			goto update_pending_frames;
1101 		}
1102 
1103 		rx_reo_pdev_ctx = wlan_mgmt_rx_reo_get_priv_object(pdev);
1104 		if (!rx_reo_pdev_ctx) {
1105 			mgmt_rx_reo_err("Mgmt reo context empty for pdev %pK",
1106 					pdev);
1107 			wlan_objmgr_pdev_release_ref(pdev, WLAN_MGMT_RX_REO_ID);
1108 			return QDF_STATUS_E_FAILURE;
1109 		}
1110 
1111 		if (!rx_reo_pdev_ctx->init_complete) {
1112 			mgmt_rx_reo_debug("REO init in progress for link %d",
1113 					  link);
1114 			wlan_objmgr_pdev_release_ref(pdev, WLAN_MGMT_RX_REO_ID);
1115 			frames_pending = 0;
1116 			goto update_pending_frames;
1117 		}
1118 
1119 		host_ss = &rx_reo_pdev_ctx->host_snapshot;
1120 		desc->host_snapshot[link] = rx_reo_pdev_ctx->host_snapshot;
1121 
1122 		mgmt_rx_reo_info("link_id = %u HOST SS: valid = %u, ctr = %u, ts = %u",
1123 				 link, host_ss->valid, host_ss->mgmt_pkt_ctr,
1124 				 host_ss->global_timestamp);
1125 
1126 		snapshot_id = 0;
1127 		/* Read all the shared snapshots */
1128 		while (snapshot_id <
1129 			MGMT_RX_REO_SHARED_SNAPSHOT_MAX) {
1130 			snapshot_info = &rx_reo_pdev_ctx->
1131 				host_target_shared_snapshot_info[snapshot_id];
1132 
1133 			qdf_mem_zero(&snapshot_params[snapshot_id],
1134 				     sizeof(snapshot_params[snapshot_id]));
1135 
1136 			status = tgt_mgmt_rx_reo_read_snapshot(
1137 					pdev, snapshot_info, snapshot_id,
1138 					&snapshot_params[snapshot_id],
1139 					in_frame_rx_reo_pdev_ctx->raw_snapshots
1140 					[link][snapshot_id]);
1141 
1142 			/* Read operation shouldn't fail */
1143 			if (QDF_IS_STATUS_ERROR(status)) {
1144 				mgmt_rx_reo_err("snapshot(%d) read failed on"
1145 						"link (%d)", snapshot_id, link);
1146 				wlan_objmgr_pdev_release_ref(
1147 						pdev, WLAN_MGMT_RX_REO_ID);
1148 				return status;
1149 			}
1150 
1151 			/* If snapshot is valid, save it in the pdev context */
1152 			if (snapshot_params[snapshot_id].valid) {
1153 				rx_reo_pdev_ctx->
1154 				   last_valid_shared_snapshot[snapshot_id] =
1155 				   snapshot_params[snapshot_id];
1156 			}
1157 			desc->shared_snapshots[link][snapshot_id] =
1158 						snapshot_params[snapshot_id];
1159 
1160 			snapshot_id++;
1161 		}
1162 
1163 		wlan_objmgr_pdev_release_ref(pdev, WLAN_MGMT_RX_REO_ID);
1164 
1165 		mac_hw_ss = &snapshot_params
1166 				[MGMT_RX_REO_SHARED_SNAPSHOT_MAC_HW];
1167 		fw_forwarded_ss = &snapshot_params
1168 				[MGMT_RX_REO_SHARED_SNAPSHOT_FW_FORWARDED];
1169 		fw_consumed_ss = &snapshot_params
1170 				[MGMT_RX_REO_SHARED_SNAPSHOT_FW_CONSUMED];
1171 
1172 		status = mgmt_rx_reo_invalidate_stale_snapshots(mac_hw_ss,
1173 								fw_forwarded_ss,
1174 								fw_consumed_ss,
1175 								host_ss, link);
1176 		if (QDF_IS_STATUS_ERROR(status)) {
1177 			mgmt_rx_reo_err("Failed to invalidate SS for link %u",
1178 					link);
1179 			return status;
1180 		}
1181 
1182 		desc->shared_snapshots[link][MGMT_RX_REO_SHARED_SNAPSHOT_MAC_HW] =
1183 								*mac_hw_ss;
1184 		desc->shared_snapshots[link][MGMT_RX_REO_SHARED_SNAPSHOT_FW_FORWARDED] =
1185 								*fw_forwarded_ss;
1186 		desc->shared_snapshots[link][MGMT_RX_REO_SHARED_SNAPSHOT_FW_CONSUMED] =
1187 								*fw_consumed_ss;
1188 		desc->host_snapshot[link] = *host_ss;
1189 
1190 		status = mgmt_rx_reo_snapshots_check_sanity
1191 			(mac_hw_ss, fw_forwarded_ss, fw_consumed_ss, host_ss);
1192 		if (QDF_IS_STATUS_ERROR(status)) {
1193 			mgmt_rx_reo_err_rl("Snapshot sanity for link %u failed",
1194 					   link);
1195 			return status;
1196 		}
1197 
1198 		mgmt_rx_reo_info("link_id = %u HW SS: valid = %u, ctr = %u, ts = %u",
1199 				 link, mac_hw_ss->valid,
1200 				 mac_hw_ss->mgmt_pkt_ctr,
1201 				 mac_hw_ss->global_timestamp);
1202 		mgmt_rx_reo_info("link_id = %u FW forwarded SS: valid = %u, ctr = %u, ts = %u",
1203 				 link, fw_forwarded_ss->valid,
1204 				 fw_forwarded_ss->mgmt_pkt_ctr,
1205 				 fw_forwarded_ss->global_timestamp);
1206 		mgmt_rx_reo_info("link_id = %u FW consumed SS: valid = %u, ctr = %u, ts = %u",
1207 				 link, fw_consumed_ss->valid,
1208 				 fw_consumed_ss->mgmt_pkt_ctr,
1209 				 fw_consumed_ss->global_timestamp);
1210 
1211 		/* No need wait for any frames on the same link */
1212 		if (link == in_frame_link) {
1213 			frames_pending = 0;
1214 			goto update_pending_frames;
1215 		}
1216 
1217 		/**
1218 		 * If MAC HW snapshot is invalid, the link has not started
1219 		 * receiving management frames. Set wait count to zero.
1220 		 */
1221 		if (!mac_hw_ss->valid) {
1222 			frames_pending = 0;
1223 			goto update_pending_frames;
1224 		}
1225 
1226 		/**
1227 		 * If host snapshot is invalid, wait for MAX number of frames.
1228 		 * When any frame in this link arrives at host, actual wait
1229 		 * counts will be updated.
1230 		 */
1231 		if (!host_ss->valid) {
1232 			wait_count->per_link_count[link] = UINT_MAX;
1233 			wait_count->total_count += UINT_MAX;
1234 			goto print_wait_count;
1235 		}
1236 
1237 		/**
1238 		 * If MAC HW snapshot sequence number and host snapshot
1239 		 * sequence number are same, all the frames received by
1240 		 * this link are processed by host. No need to wait for
1241 		 * any frames from this link.
1242 		 */
1243 		if (!mgmt_rx_reo_subtract_pkt_ctrs(mac_hw_ss->mgmt_pkt_ctr,
1244 						   host_ss->mgmt_pkt_ctr)) {
1245 			frames_pending = 0;
1246 			goto update_pending_frames;
1247 		}
1248 
1249 		/**
1250 		 * Ideally, the incoming frame has to wait for only those frames
1251 		 * (on other links) which meet all the below criterion.
1252 		 * 1. Frame's timestamp is less than incoming frame's
1253 		 * 2. Frame is supposed to be consumed by the Host
1254 		 * 3. Frame is not yet seen by the Host.
1255 		 * We may not be able to compute the exact optimal wait count
1256 		 * because HW/FW provides a limited assist.
1257 		 * This algorithm tries to get the best estimate of wait count
1258 		 * by not waiting for those frames where we have a conclusive
1259 		 * evidence that we don't have to wait for those frames.
1260 		 */
1261 
1262 		/**
1263 		 * If this link has already seen a frame whose timestamp is
1264 		 * greater than or equal to incoming frame's timestamp,
1265 		 * then no need to wait for any frames on this link.
1266 		 * If the total wait count becomes zero, then the policy on
1267 		 * whether to deliver such a frame to upper layers is handled
1268 		 * separately.
1269 		 */
1270 		if (mgmt_rx_reo_compare_global_timestamps_gte(
1271 				host_ss->global_timestamp,
1272 				in_frame_params->global_timestamp)) {
1273 			frames_pending = 0;
1274 			goto update_pending_frames;
1275 		}
1276 
1277 		/**
1278 		 * For starters, we only have to wait for the frames that are
1279 		 * seen by MAC HW but not yet seen by Host. The frames which
1280 		 * reach MAC HW later are guaranteed to have a timestamp
1281 		 * greater than incoming frame's timestamp.
1282 		 */
1283 		frames_pending = mgmt_rx_reo_subtract_pkt_ctrs(
1284 					mac_hw_ss->mgmt_pkt_ctr,
1285 					host_ss->mgmt_pkt_ctr);
1286 		qdf_assert_always(frames_pending >= 0);
1287 
1288 		if (frames_pending &&
1289 		    mgmt_rx_reo_compare_global_timestamps_gte
1290 					(mac_hw_ss->global_timestamp,
1291 					 in_frame_params->global_timestamp)) {
1292 			/**
1293 			 * Last frame seen at MAC HW has timestamp greater than
1294 			 * or equal to incoming frame's timestamp. So no need to
1295 			 * wait for that last frame, but we can't conclusively
1296 			 * say anything about timestamp of frames before the
1297 			 * last frame, so try to wait for all of those frames.
1298 			 */
1299 			frames_pending--;
1300 			qdf_assert_always(frames_pending >= 0);
1301 
1302 			if (fw_consumed_ss->valid &&
1303 			    mgmt_rx_reo_compare_global_timestamps_gte(
1304 				fw_consumed_ss->global_timestamp,
1305 				in_frame_params->global_timestamp)) {
1306 				/**
1307 				 * Last frame consumed by the FW has timestamp
1308 				 * greater than or equal to incoming frame's.
1309 				 * That means all the frames from
1310 				 * fw_consumed_ss->mgmt_pkt_ctr to
1311 				 * mac_hw->mgmt_pkt_ctr will have timestamp
1312 				 * greater than or equal to incoming frame's and
1313 				 * hence, no need to wait for those frames.
1314 				 * We just need to wait for frames from
1315 				 * host_ss->mgmt_pkt_ctr to
1316 				 * fw_consumed_ss->mgmt_pkt_ctr-1. This is a
1317 				 * better estimate over the above estimate,
1318 				 * so update frames_pending.
1319 				 */
1320 				frames_pending =
1321 				  mgmt_rx_reo_subtract_pkt_ctrs(
1322 				      fw_consumed_ss->mgmt_pkt_ctr,
1323 				      host_ss->mgmt_pkt_ctr) - 1;
1324 
1325 				qdf_assert_always(frames_pending >= 0);
1326 
1327 				/**
1328 				 * Last frame forwarded to Host has timestamp
1329 				 * less than incoming frame's. That means all
1330 				 * the frames starting from
1331 				 * fw_forwarded_ss->mgmt_pkt_ctr+1 to
1332 				 * fw_consumed_ss->mgmt_pkt_ctr are consumed by
1333 				 * the FW and hence, no need to wait for those
1334 				 * frames. We just need to wait for frames
1335 				 * from host_ss->mgmt_pkt_ctr to
1336 				 * fw_forwarded_ss->mgmt_pkt_ctr. This is a
1337 				 * better estimate over the above estimate,
1338 				 * so update frames_pending.
1339 				 */
1340 				if (fw_forwarded_ss->valid &&
1341 				    !mgmt_rx_reo_compare_global_timestamps_gte(
1342 					fw_forwarded_ss->global_timestamp,
1343 					in_frame_params->global_timestamp)) {
1344 					frames_pending =
1345 					  mgmt_rx_reo_subtract_pkt_ctrs(
1346 					      fw_forwarded_ss->mgmt_pkt_ctr,
1347 					      host_ss->mgmt_pkt_ctr);
1348 
1349 					/**
1350 					 * frames_pending can be negative in
1351 					 * cases whene there are no frames
1352 					 * getting forwarded to the Host. No
1353 					 * need to wait for any frames in that
1354 					 * case.
1355 					 */
1356 					if (frames_pending < 0)
1357 						frames_pending = 0;
1358 				}
1359 			}
1360 
1361 			/**
1362 			 * Last frame forwarded to Host has timestamp greater
1363 			 * than or equal to incoming frame's. That means all the
1364 			 * frames from fw_forwarded->mgmt_pkt_ctr to
1365 			 * mac_hw->mgmt_pkt_ctr will have timestamp greater than
1366 			 * or equal to incoming frame's and hence, no need to
1367 			 * wait for those frames. We may have to just wait for
1368 			 * frames from host_ss->mgmt_pkt_ctr to
1369 			 * fw_forwarded_ss->mgmt_pkt_ctr-1
1370 			 */
1371 			if (fw_forwarded_ss->valid &&
1372 			    mgmt_rx_reo_compare_global_timestamps_gte(
1373 				fw_forwarded_ss->global_timestamp,
1374 				in_frame_params->global_timestamp)) {
1375 				delta_fwd_host =
1376 				  mgmt_rx_reo_subtract_pkt_ctrs(
1377 				    fw_forwarded_ss->mgmt_pkt_ctr,
1378 				    host_ss->mgmt_pkt_ctr) - 1;
1379 
1380 				qdf_assert_always(delta_fwd_host >= 0);
1381 
1382 				/**
1383 				 * This will be a better estimate over the one
1384 				 * we computed using mac_hw_ss but this may or
1385 				 * may not be a better estimate over the
1386 				 * one we computed using fw_consumed_ss.
1387 				 * When timestamps of both fw_consumed_ss and
1388 				 * fw_forwarded_ss are greater than incoming
1389 				 * frame's but timestamp of fw_consumed_ss is
1390 				 * smaller than fw_forwarded_ss, then
1391 				 * frames_pending will be smaller than
1392 				 * delta_fwd_host, the reverse will be true in
1393 				 * other cases. Instead of checking for all
1394 				 * those cases, just waiting for the minimum
1395 				 * among these two should be sufficient.
1396 				 */
1397 				frames_pending = qdf_min(frames_pending,
1398 							 delta_fwd_host);
1399 				qdf_assert_always(frames_pending >= 0);
1400 			}
1401 		}
1402 
1403 update_pending_frames:
1404 			qdf_assert_always(frames_pending >= 0);
1405 
1406 			wait_count->per_link_count[link] = frames_pending;
1407 			wait_count->total_count += frames_pending;
1408 
1409 print_wait_count:
1410 			mgmt_rx_reo_info("link_id = %u wait count: per link = 0x%x, total = 0x%llx",
1411 					 link, wait_count->per_link_count[link],
1412 					 wait_count->total_count);
1413 	}
1414 
1415 	return QDF_STATUS_SUCCESS;
1416 }
1417 
1418 /*
1419  * struct mgmt_rx_reo_list_entry_debug_info - This structure holds the necessary
1420  * information about a reo list entry for debug purposes.
1421  * @link_id: link id
1422  * @mgmt_pkt_ctr: management packet counter
1423  * @global_timestamp: global time stamp
1424  * @wait_count: wait count values
1425  * @status: status of the entry in the list
1426  * @entry: pointer to reo list entry
1427  */
1428 struct mgmt_rx_reo_list_entry_debug_info {
1429 	uint8_t link_id;
1430 	uint16_t mgmt_pkt_ctr;
1431 	uint32_t global_timestamp;
1432 	struct mgmt_rx_reo_wait_count wait_count;
1433 	uint32_t status;
1434 	struct mgmt_rx_reo_list_entry *entry;
1435 };
1436 
1437 /**
1438  * mgmt_rx_reo_list_display() - API to print the entries in the reorder list
1439  * @reo_list: Pointer to reorder list
1440  *
1441  * Return: QDF_STATUS
1442  */
1443 static QDF_STATUS
1444 mgmt_rx_reo_list_display(struct mgmt_rx_reo_list *reo_list)
1445 {
1446 	uint32_t reo_list_size;
1447 	uint32_t index;
1448 	struct mgmt_rx_reo_list_entry *cur_entry;
1449 	struct mgmt_rx_reo_list_entry_debug_info *debug_info;
1450 
1451 	if (!reo_list) {
1452 		mgmt_rx_reo_err("Pointer to reo list is null");
1453 		return QDF_STATUS_E_NULL_VALUE;
1454 	}
1455 
1456 	qdf_spin_lock_bh(&reo_list->list_lock);
1457 
1458 	reo_list_size = qdf_list_size(&reo_list->list);
1459 
1460 	if (reo_list_size == 0) {
1461 		qdf_spin_unlock_bh(&reo_list->list_lock);
1462 		mgmt_rx_reo_debug("Number of entries in the reo list = %u",
1463 				  reo_list_size);
1464 		return QDF_STATUS_SUCCESS;
1465 	}
1466 
1467 	debug_info = qdf_mem_malloc_atomic(reo_list_size * sizeof(*debug_info));
1468 	if (!debug_info) {
1469 		qdf_spin_unlock_bh(&reo_list->list_lock);
1470 		mgmt_rx_reo_err("Memory allocation failed");
1471 		return QDF_STATUS_E_NOMEM;
1472 	}
1473 
1474 	index = 0;
1475 	qdf_list_for_each(&reo_list->list, cur_entry, node) {
1476 		debug_info[index].link_id =
1477 				mgmt_rx_reo_get_link_id(cur_entry->rx_params);
1478 		debug_info[index].mgmt_pkt_ctr =
1479 			mgmt_rx_reo_get_pkt_counter(cur_entry->rx_params);
1480 		debug_info[index].global_timestamp =
1481 				mgmt_rx_reo_get_global_ts(cur_entry->rx_params);
1482 		debug_info[index].wait_count = cur_entry->wait_count;
1483 		debug_info[index].status = cur_entry->status;
1484 		debug_info[index].entry = cur_entry;
1485 
1486 		++index;
1487 	}
1488 
1489 	qdf_spin_unlock_bh(&reo_list->list_lock);
1490 
1491 	mgmt_rx_reo_debug("Reorder list");
1492 	mgmt_rx_reo_debug("##################################################");
1493 	mgmt_rx_reo_debug("Number of entries in the reo list = %u",
1494 			  reo_list_size);
1495 	for (index = 0; index < reo_list_size; index++) {
1496 		uint8_t link_id;
1497 
1498 		mgmt_rx_reo_debug("index = %u: link_id = %u, ts = %u, ctr = %u, status = 0x%x, entry = %pK",
1499 				  index, debug_info[index].link_id,
1500 				  debug_info[index].global_timestamp,
1501 				  debug_info[index].mgmt_pkt_ctr,
1502 				  debug_info[index].status,
1503 				  debug_info[index].entry);
1504 
1505 		mgmt_rx_reo_debug("Total wait count = 0x%llx",
1506 				  debug_info[index].wait_count.total_count);
1507 
1508 		for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++)
1509 			mgmt_rx_reo_debug("Link id = %u, wait_count = 0x%x",
1510 					  link_id, debug_info[index].wait_count.
1511 					  per_link_count[link_id]);
1512 	}
1513 	mgmt_rx_reo_debug("##################################################");
1514 
1515 	qdf_mem_free(debug_info);
1516 
1517 	return QDF_STATUS_SUCCESS;
1518 }
1519 
1520 #ifdef WLAN_MGMT_RX_REO_DEBUG_SUPPORT
1521 /**
1522  * mgmt_rx_reo_egress_frame_debug_info_enabled() - API to check whether egress
1523  * frame info debug feaure is enabled
1524  * @egress_frame_debug_info: Pointer to egress frame debug info object
1525  *
1526  * Return: true or false
1527  */
1528 static bool
1529 mgmt_rx_reo_egress_frame_debug_info_enabled
1530 			(struct reo_egress_debug_info *egress_frame_debug_info)
1531 {
1532 	return egress_frame_debug_info->frame_list_size;
1533 }
1534 
1535 /**
1536  * mgmt_rx_reo_debug_print_egress_frame_stats() - API to print the stats
1537  * related to frames going out of the reorder module
1538  * @reo_ctx: Pointer to reorder context
1539  *
1540  * API to print the stats related to frames going out of the management
1541  * Rx reorder module.
1542  *
1543  * Return: QDF_STATUS
1544  */
1545 static QDF_STATUS
1546 mgmt_rx_reo_debug_print_egress_frame_stats(struct mgmt_rx_reo_context *reo_ctx)
1547 {
1548 	struct reo_egress_frame_stats *stats;
1549 	uint8_t link_id;
1550 	uint8_t reason;
1551 	uint64_t total_delivery_attempts_count = 0;
1552 	uint64_t total_delivery_success_count = 0;
1553 	uint64_t total_premature_delivery_count = 0;
1554 	uint64_t delivery_count_per_link[MAX_MLO_LINKS] = {0};
1555 	uint64_t delivery_count_per_reason[MGMT_RX_REO_RELEASE_REASON_MAX] = {0};
1556 	uint64_t total_delivery_count = 0;
1557 	char delivery_reason_stats_boarder_a[MGMT_RX_REO_EGRESS_FRAME_DELIVERY_REASON_STATS_BOARDER_A_MAX_SIZE + 1] = {0};
1558 	char delivery_reason_stats_boarder_b[MGMT_RX_REO_EGRESS_FRAME_DELIVERY_REASON_STATS_BOARDER_B_MAX_SIZE + 1] = {0};
1559 
1560 	if (!reo_ctx)
1561 		return QDF_STATUS_E_NULL_VALUE;
1562 
1563 	stats = &reo_ctx->egress_frame_debug_info.stats;
1564 
1565 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
1566 		total_delivery_attempts_count +=
1567 				stats->delivery_attempts_count[link_id];
1568 		total_delivery_success_count +=
1569 				stats->delivery_success_count[link_id];
1570 		total_premature_delivery_count +=
1571 				stats->premature_delivery_count[link_id];
1572 	}
1573 
1574 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
1575 		for (reason = 0; reason < MGMT_RX_REO_RELEASE_REASON_MAX;
1576 		     reason++)
1577 			delivery_count_per_link[link_id] +=
1578 				stats->delivery_count[link_id][reason];
1579 		total_delivery_count += delivery_count_per_link[link_id];
1580 	}
1581 	for (reason = 0; reason < MGMT_RX_REO_RELEASE_REASON_MAX; reason++)
1582 		for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++)
1583 			delivery_count_per_reason[reason] +=
1584 				stats->delivery_count[link_id][reason];
1585 
1586 	mgmt_rx_reo_alert("Egress frame stats:");
1587 	mgmt_rx_reo_alert("\t1) Delivery related stats:");
1588 	mgmt_rx_reo_alert("\t------------------------------------------");
1589 	mgmt_rx_reo_alert("\t|link id   |Attempts |Success |Premature |");
1590 	mgmt_rx_reo_alert("\t|          | count   | count  | count    |");
1591 	mgmt_rx_reo_alert("\t------------------------------------------");
1592 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
1593 		mgmt_rx_reo_alert("\t|%10u|%9llu|%8llu|%10llu|", link_id,
1594 				  stats->delivery_attempts_count[link_id],
1595 				  stats->delivery_success_count[link_id],
1596 				  stats->premature_delivery_count[link_id]);
1597 	mgmt_rx_reo_alert("\t------------------------------------------");
1598 	}
1599 	mgmt_rx_reo_alert("\t%11s|%9llu|%8llu|%10llu|\n\n", "",
1600 			  total_delivery_attempts_count,
1601 			  total_delivery_success_count,
1602 			  total_premature_delivery_count);
1603 
1604 	mgmt_rx_reo_alert("\t2) Delivery reason related stats");
1605 	mgmt_rx_reo_alert("\tRelease Reason Values:-");
1606 	mgmt_rx_reo_alert("\tRELEASE_REASON_ZERO_WAIT_COUNT - 0x%lx",
1607 			  MGMT_RX_REO_LIST_ENTRY_RELEASE_REASON_ZERO_WAIT_COUNT);
1608 	mgmt_rx_reo_alert("\tRELEASE_REASON_AGED_OUT - 0x%lx",
1609 			  MGMT_RX_REO_LIST_ENTRY_RELEASE_REASON_AGED_OUT);
1610 	mgmt_rx_reo_alert("\tRELEASE_REASON_OLDER_THAN_AGED_OUT_FRAME - 0x%lx",
1611 			  MGMT_RX_REO_LIST_ENTRY_RELEASE_REASON_OLDER_THAN_AGED_OUT_FRAME);
1612 	mgmt_rx_reo_alert("\tRELEASE_REASON_LIST_MAX_SIZE_EXCEEDED - 0x%lx",
1613 			  MGMT_RX_REO_LIST_ENTRY_RELEASE_REASON_LIST_MAX_SIZE_EXCEEDED);
1614 
1615 	qdf_mem_set(delivery_reason_stats_boarder_a,
1616 		    MGMT_RX_REO_EGRESS_FRAME_DELIVERY_REASON_STATS_BOARDER_A_MAX_SIZE, '-');
1617 	qdf_mem_set(delivery_reason_stats_boarder_b,
1618 		    MGMT_RX_REO_EGRESS_FRAME_DELIVERY_REASON_STATS_BOARDER_B_MAX_SIZE, '-');
1619 
1620 	mgmt_rx_reo_alert("\t%66s", delivery_reason_stats_boarder_a);
1621 	mgmt_rx_reo_alert("\t|%16s|%7s|%7s|%7s|%7s|%7s|%7s|", "Release Reason/",
1622 			  "", "", "", "", "", "");
1623 	mgmt_rx_reo_alert("\t|%16s|%7s|%7s|%7s|%7s|%7s|%7s|", "link id",
1624 			  "0", "1", "2", "3", "4", "5");
1625 	mgmt_rx_reo_alert("\t%s", delivery_reason_stats_boarder_b);
1626 
1627 	for (reason = 0; reason < MGMT_RX_REO_RELEASE_REASON_MAX; reason++) {
1628 		mgmt_rx_reo_alert("\t|%16x|%7llu|%7llu|%7llu|%7llu|%7llu|%7llu|%7llu",
1629 				  reason, stats->delivery_count[0][reason],
1630 				  stats->delivery_count[1][reason],
1631 				  stats->delivery_count[2][reason],
1632 				  stats->delivery_count[3][reason],
1633 				  stats->delivery_count[4][reason],
1634 				  stats->delivery_count[5][reason],
1635 				  delivery_count_per_reason[reason]);
1636 		mgmt_rx_reo_alert("\t%s", delivery_reason_stats_boarder_b);
1637 	}
1638 	mgmt_rx_reo_alert("\t%17s|%7llu|%7llu|%7llu|%7llu|%7llu|%7llu|%7llu\n\n",
1639 			  "", delivery_count_per_link[0],
1640 			  delivery_count_per_link[1],
1641 			  delivery_count_per_link[2],
1642 			  delivery_count_per_link[3],
1643 			  delivery_count_per_link[4],
1644 			  delivery_count_per_link[5],
1645 			  total_delivery_count);
1646 
1647 	return QDF_STATUS_SUCCESS;
1648 }
1649 
1650 /**
1651  * mgmt_rx_reo_log_egress_frame_before_delivery() - Log the information about a
1652  * frame exiting the reorder module. Logging is done before attempting the frame
1653  * delivery to upper layers.
1654  * @reo_ctx: management rx reorder context
1655  * @entry: Pointer to reorder list entry
1656  *
1657  * Return: QDF_STATUS of operation
1658  */
1659 static QDF_STATUS
1660 mgmt_rx_reo_log_egress_frame_before_delivery(
1661 					struct mgmt_rx_reo_context *reo_ctx,
1662 					struct mgmt_rx_reo_list_entry *entry)
1663 {
1664 	struct reo_egress_debug_info *egress_frame_debug_info;
1665 	struct reo_egress_debug_frame_info *cur_frame_debug_info;
1666 	struct reo_egress_frame_stats *stats;
1667 	uint8_t link_id;
1668 
1669 	if (!reo_ctx || !entry)
1670 		return QDF_STATUS_E_NULL_VALUE;
1671 
1672 	egress_frame_debug_info = &reo_ctx->egress_frame_debug_info;
1673 
1674 	stats = &egress_frame_debug_info->stats;
1675 	link_id = mgmt_rx_reo_get_link_id(entry->rx_params);
1676 	stats->delivery_attempts_count[link_id]++;
1677 	if (entry->is_premature_delivery)
1678 		stats->premature_delivery_count[link_id]++;
1679 
1680 	if (!mgmt_rx_reo_egress_frame_debug_info_enabled
1681 						(egress_frame_debug_info))
1682 		return QDF_STATUS_SUCCESS;
1683 
1684 	cur_frame_debug_info = &egress_frame_debug_info->frame_list
1685 			[egress_frame_debug_info->next_index];
1686 
1687 	cur_frame_debug_info->link_id = link_id;
1688 	cur_frame_debug_info->mgmt_pkt_ctr =
1689 				mgmt_rx_reo_get_pkt_counter(entry->rx_params);
1690 	cur_frame_debug_info->global_timestamp =
1691 				mgmt_rx_reo_get_global_ts(entry->rx_params);
1692 	cur_frame_debug_info->initial_wait_count = entry->initial_wait_count;
1693 	cur_frame_debug_info->final_wait_count = entry->wait_count;
1694 	qdf_mem_copy(cur_frame_debug_info->shared_snapshots,
1695 		     entry->shared_snapshots,
1696 		     qdf_min(sizeof(cur_frame_debug_info->shared_snapshots),
1697 			     sizeof(entry->shared_snapshots)));
1698 	qdf_mem_copy(cur_frame_debug_info->host_snapshot, entry->host_snapshot,
1699 		     qdf_min(sizeof(cur_frame_debug_info->host_snapshot),
1700 			     sizeof(entry->host_snapshot)));
1701 	cur_frame_debug_info->insertion_ts = entry->insertion_ts;
1702 	cur_frame_debug_info->ingress_timestamp = entry->ingress_timestamp;
1703 	cur_frame_debug_info->removal_ts =  entry->removal_ts;
1704 	cur_frame_debug_info->egress_timestamp = qdf_get_log_timestamp();
1705 	cur_frame_debug_info->release_reason = entry->release_reason;
1706 	cur_frame_debug_info->is_premature_delivery =
1707 						entry->is_premature_delivery;
1708 	cur_frame_debug_info->cpu_id = qdf_get_smp_processor_id();
1709 
1710 	return QDF_STATUS_SUCCESS;
1711 }
1712 
1713 /**
1714  * mgmt_rx_reo_log_egress_frame_after_delivery() - Log the information about a
1715  * frame exiting the reorder module. Logging is done after attempting the frame
1716  * delivery to upper layer.
1717  * @reo_ctx: management rx reorder context
1718  * @entry: Pointer to reorder list entry
1719  *
1720  * Return: QDF_STATUS of operation
1721  */
1722 static QDF_STATUS
1723 mgmt_rx_reo_log_egress_frame_after_delivery(
1724 					struct mgmt_rx_reo_context *reo_ctx,
1725 					struct mgmt_rx_reo_list_entry *entry,
1726 					uint8_t link_id)
1727 {
1728 	struct reo_egress_debug_info *egress_frame_debug_info;
1729 	struct reo_egress_debug_frame_info *cur_frame_debug_info;
1730 	struct reo_egress_frame_stats *stats;
1731 
1732 	if (!reo_ctx || !entry)
1733 		return QDF_STATUS_E_NULL_VALUE;
1734 
1735 	egress_frame_debug_info = &reo_ctx->egress_frame_debug_info;
1736 
1737 	stats = &egress_frame_debug_info->stats;
1738 	if (entry->is_delivered) {
1739 		uint8_t release_reason = entry->release_reason;
1740 
1741 		stats->delivery_count[link_id][release_reason]++;
1742 		stats->delivery_success_count[link_id]++;
1743 	}
1744 
1745 	if (!mgmt_rx_reo_egress_frame_debug_info_enabled
1746 						(egress_frame_debug_info))
1747 		return QDF_STATUS_SUCCESS;
1748 
1749 	cur_frame_debug_info = &egress_frame_debug_info->frame_list
1750 			[egress_frame_debug_info->next_index];
1751 
1752 	cur_frame_debug_info->is_delivered = entry->is_delivered;
1753 	cur_frame_debug_info->egress_duration = qdf_get_log_timestamp() -
1754 					cur_frame_debug_info->egress_timestamp;
1755 
1756 	egress_frame_debug_info->next_index++;
1757 	egress_frame_debug_info->next_index %=
1758 				egress_frame_debug_info->frame_list_size;
1759 	if (egress_frame_debug_info->next_index == 0)
1760 		egress_frame_debug_info->wrap_aroud = true;
1761 
1762 	return QDF_STATUS_SUCCESS;
1763 }
1764 
1765 /**
1766  * mgmt_rx_reo_debug_print_egress_frame_info() - Print the debug information
1767  * about the latest frames leaving the reorder module
1768  * @reo_ctx: management rx reorder context
1769  * @num_frames: Number of frames for which the debug information is to be
1770  * printed. If @num_frames is 0, then debug information about all the frames
1771  * in the ring buffer will be  printed.
1772  *
1773  * Return: QDF_STATUS of operation
1774  */
1775 static QDF_STATUS
1776 mgmt_rx_reo_debug_print_egress_frame_info(struct mgmt_rx_reo_context *reo_ctx,
1777 					  uint16_t num_frames)
1778 {
1779 	struct reo_egress_debug_info *egress_frame_debug_info;
1780 	int start_index;
1781 	uint16_t index;
1782 	uint16_t entry;
1783 	uint16_t num_valid_entries;
1784 	uint16_t num_entries_to_print;
1785 	char *boarder;
1786 
1787 	if (!reo_ctx)
1788 		return QDF_STATUS_E_NULL_VALUE;
1789 
1790 	egress_frame_debug_info = &reo_ctx->egress_frame_debug_info;
1791 
1792 	if (egress_frame_debug_info->wrap_aroud)
1793 		num_valid_entries = egress_frame_debug_info->frame_list_size;
1794 	else
1795 		num_valid_entries = egress_frame_debug_info->next_index;
1796 
1797 	if (num_frames == 0) {
1798 		num_entries_to_print = num_valid_entries;
1799 
1800 		if (egress_frame_debug_info->wrap_aroud)
1801 			start_index = egress_frame_debug_info->next_index;
1802 		else
1803 			start_index = 0;
1804 	} else {
1805 		num_entries_to_print = qdf_min(num_frames, num_valid_entries);
1806 
1807 		start_index = (egress_frame_debug_info->next_index -
1808 			       num_entries_to_print +
1809 			       egress_frame_debug_info->frame_list_size)
1810 			      % egress_frame_debug_info->frame_list_size;
1811 
1812 		qdf_assert_always(start_index >= 0 &&
1813 				  start_index < egress_frame_debug_info->frame_list_size);
1814 	}
1815 
1816 	mgmt_rx_reo_alert_no_fl("Egress Frame Info:-");
1817 	mgmt_rx_reo_alert_no_fl("num_frames = %u, wrap = %u, next_index = %u",
1818 				num_frames,
1819 				egress_frame_debug_info->wrap_aroud,
1820 				egress_frame_debug_info->next_index);
1821 	mgmt_rx_reo_alert_no_fl("start_index = %d num_entries_to_print = %u",
1822 				start_index, num_entries_to_print);
1823 
1824 	if (!num_entries_to_print)
1825 		return QDF_STATUS_SUCCESS;
1826 
1827 	boarder = egress_frame_debug_info->boarder;
1828 
1829 	mgmt_rx_reo_alert_no_fl("%s", boarder);
1830 	mgmt_rx_reo_alert_no_fl("|%3s|%5s|%4s|%5s|%10s|%11s|%11s|%11s|%11s|%5s|%7s|%5s|%4s|%69s|%69s|%94s|%94s|%94s|%94s|%94s|%94s|",
1831 				"No.", "CPU", "Link", "SeqNo", "Global ts",
1832 				"Ingress ts", "Insert. ts", "Removal ts",
1833 				"Egress ts", "E Dur", "W Dur", "Flags", "Rea.",
1834 				"Final wait count", "Initial wait count",
1835 				"Snapshot : link 0", "Snapshot : link 1",
1836 				"Snapshot : link 2", "Snapshot : link 3",
1837 				"Snapshot : link 4", "Snapshot : link 5");
1838 	mgmt_rx_reo_alert_no_fl("%s", boarder);
1839 
1840 	index = start_index;
1841 	for (entry = 0; entry < num_entries_to_print; entry++) {
1842 		struct reo_egress_debug_frame_info *info;
1843 		char flags[MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_FLAG_MAX_SIZE + 1] = {'\0'};
1844 		char final_wait_count[MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_WAIT_COUNT_MAX_SIZE + 1] = {'\0'};
1845 		char initial_wait_count[MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_WAIT_COUNT_MAX_SIZE + 1] = {'\0'};
1846 		char snapshots[MAX_MLO_LINKS][MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_PER_LINK_SNAPSHOTS_MAX_SIZE + 1] = {'\0'};
1847 		char flag_premature_delivery = ' ';
1848 		char flag_error = ' ';
1849 		uint8_t link;
1850 
1851 		info = &reo_ctx->egress_frame_debug_info.frame_list[index];
1852 
1853 		if (!info->is_delivered)
1854 			flag_error = 'E';
1855 
1856 		if (info->is_premature_delivery)
1857 			flag_premature_delivery = 'P';
1858 
1859 		snprintf(flags, sizeof(flags), "%c %c", flag_error,
1860 			 flag_premature_delivery);
1861 		snprintf(initial_wait_count, sizeof(initial_wait_count),
1862 			 "%9llx(%8x, %8x, %8x, %8x, %8x, %8x)",
1863 			 info->initial_wait_count.total_count,
1864 			 info->initial_wait_count.per_link_count[0],
1865 			 info->initial_wait_count.per_link_count[1],
1866 			 info->initial_wait_count.per_link_count[2],
1867 			 info->initial_wait_count.per_link_count[3],
1868 			 info->initial_wait_count.per_link_count[4],
1869 			 info->initial_wait_count.per_link_count[5]);
1870 		snprintf(final_wait_count, sizeof(final_wait_count),
1871 			 "%9llx(%8x, %8x, %8x, %8x, %8x, %8x)",
1872 			 info->final_wait_count.total_count,
1873 			 info->final_wait_count.per_link_count[0],
1874 			 info->final_wait_count.per_link_count[1],
1875 			 info->final_wait_count.per_link_count[2],
1876 			 info->final_wait_count.per_link_count[3],
1877 			 info->final_wait_count.per_link_count[4],
1878 			 info->final_wait_count.per_link_count[5]);
1879 
1880 		for (link = 0; link < MAX_MLO_LINKS; link++) {
1881 			char mac_hw[MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_SNAPSHOT_MAX_SIZE + 1] = {'\0'};
1882 			char fw_consumed[MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_SNAPSHOT_MAX_SIZE + 1] = {'\0'};
1883 			char fw_forwarded[MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_SNAPSHOT_MAX_SIZE + 1] = {'\0'};
1884 			char host[MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_SNAPSHOT_MAX_SIZE + 1] = {'\0'};
1885 			struct mgmt_rx_reo_snapshot_params *mac_hw_ss;
1886 			struct mgmt_rx_reo_snapshot_params *fw_consumed_ss;
1887 			struct mgmt_rx_reo_snapshot_params *fw_forwarded_ss;
1888 			struct mgmt_rx_reo_snapshot_params *host_ss;
1889 
1890 			mac_hw_ss = &info->shared_snapshots
1891 				[link][MGMT_RX_REO_SHARED_SNAPSHOT_MAC_HW];
1892 			fw_consumed_ss = &info->shared_snapshots
1893 				[link][MGMT_RX_REO_SHARED_SNAPSHOT_FW_CONSUMED];
1894 			fw_forwarded_ss = &info->shared_snapshots
1895 				[link][MGMT_RX_REO_SHARED_SNAPSHOT_FW_FORWARDED];
1896 			host_ss = &info->host_snapshot[link];
1897 
1898 			snprintf(mac_hw, sizeof(mac_hw), "(%1u, %5u, %10u)",
1899 				 mac_hw_ss->valid, mac_hw_ss->mgmt_pkt_ctr,
1900 				 mac_hw_ss->global_timestamp);
1901 			snprintf(fw_consumed, sizeof(fw_consumed),
1902 				 "(%1u, %5u, %10u)",
1903 				 fw_consumed_ss->valid,
1904 				 fw_consumed_ss->mgmt_pkt_ctr,
1905 				 fw_consumed_ss->global_timestamp);
1906 			snprintf(fw_forwarded, sizeof(fw_forwarded),
1907 				 "(%1u, %5u, %10u)",
1908 				 fw_forwarded_ss->valid,
1909 				 fw_forwarded_ss->mgmt_pkt_ctr,
1910 				 fw_forwarded_ss->global_timestamp);
1911 			snprintf(host, sizeof(host), "(%1u, %5u, %10u)",
1912 				 host_ss->valid,
1913 				 host_ss->mgmt_pkt_ctr,
1914 				 host_ss->global_timestamp);
1915 			snprintf(snapshots[link], sizeof(snapshots[link]),
1916 				 "%22s, %22s, %22s, %22s", mac_hw, fw_consumed,
1917 				 fw_forwarded, host);
1918 		}
1919 
1920 		mgmt_rx_reo_alert_no_fl("|%3u|%5d|%4u|%5u|%10u|%11llu|%11llu|%11llu|%11llu|%5llu|%7llu|%5s|%4x|%69s|%69s|%94s|%94s|%94s|%94s|%94s|%94s|",
1921 					entry, info->cpu_id, info->link_id,
1922 					info->mgmt_pkt_ctr,
1923 					info->global_timestamp,
1924 					info->ingress_timestamp,
1925 					info->insertion_ts, info->removal_ts,
1926 					info->egress_timestamp,
1927 					info->egress_duration,
1928 					info->removal_ts - info->insertion_ts,
1929 					flags, info->release_reason,
1930 					final_wait_count, initial_wait_count,
1931 					snapshots[0], snapshots[1],
1932 					snapshots[2], snapshots[3],
1933 					snapshots[4], snapshots[5]);
1934 		mgmt_rx_reo_alert_no_fl("%s", boarder);
1935 
1936 		index++;
1937 		index %= egress_frame_debug_info->frame_list_size;
1938 	}
1939 
1940 	return QDF_STATUS_SUCCESS;
1941 }
1942 #else
1943 /**
1944  * mgmt_rx_reo_debug_print_egress_frame_stats() - API to print the stats
1945  * related to frames going out of the reorder module
1946  * @reo_ctx: Pointer to reorder context
1947  *
1948  * API to print the stats related to frames going out of the management
1949  * Rx reorder module.
1950  *
1951  * Return: QDF_STATUS
1952  */
1953 static QDF_STATUS
1954 mgmt_rx_reo_debug_print_egress_frame_stats(struct mgmt_rx_reo_context *reo_ctx)
1955 {
1956 	return QDF_STATUS_SUCCESS;
1957 }
1958 
1959 /**
1960  * mgmt_rx_reo_log_egress_frame_before_delivery() - Log the information about a
1961  * frame exiting the reorder module. Logging is done before attempting the frame
1962  * delivery to upper layers.
1963  * @reo_ctx: management rx reorder context
1964  * @entry: Pointer to reorder list entry
1965  *
1966  * Return: QDF_STATUS of operation
1967  */
1968 static QDF_STATUS
1969 mgmt_rx_reo_log_egress_frame_before_delivery(
1970 					struct mgmt_rx_reo_context *reo_ctx,
1971 					struct mgmt_rx_reo_list_entry *entry)
1972 {
1973 	return QDF_STATUS_SUCCESS;
1974 }
1975 
1976 /**
1977  * mgmt_rx_reo_log_egress_frame_after_delivery() - Log the information about a
1978  * frame exiting the reorder module. Logging is done after attempting the frame
1979  * delivery to upper layer.
1980  * @reo_ctx: management rx reorder context
1981  * @is_delivered: Flag to indicate whether the frame is delivered to upper
1982  * layers
1983  *
1984  * Return: QDF_STATUS of operation
1985  */
1986 static QDF_STATUS
1987 mgmt_rx_reo_log_egress_frame_after_delivery(
1988 					struct mgmt_rx_reo_context *reo_ctx,
1989 					bool is_delivered)
1990 {
1991 	return QDF_STATUS_SUCCESS;
1992 }
1993 
1994 /**
1995  * mgmt_rx_reo_debug_print_egress_frame_info() - Print debug information about
1996  * the latest frames leaving the reorder module
1997  * @reo_ctx: management rx reorder context
1998  *
1999  * Return: QDF_STATUS of operation
2000  */
2001 static QDF_STATUS
2002 mgmt_rx_reo_debug_print_egress_frame_info(struct mgmt_rx_reo_context *reo_ctx)
2003 {
2004 	return QDF_STATUS_SUCCESS;
2005 }
2006 #endif /* WLAN_MGMT_RX_REO_DEBUG_SUPPORT */
2007 
2008 /**
2009  * mgmt_rx_reo_list_entry_get_release_reason() - Helper API to get the reason
2010  * for releasing the reorder list entry to upper layer.
2011  * reorder list.
2012  * @entry: List entry
2013  *
2014  * This API expects the caller to acquire the spin lock protecting the reorder
2015  * list.
2016  *
2017  * Return: Reason for releasing the frame.
2018  */
2019 static uint8_t
2020 mgmt_rx_reo_list_entry_get_release_reason(struct mgmt_rx_reo_list_entry *entry)
2021 {
2022 	uint8_t release_reason = 0;
2023 
2024 	if (!entry)
2025 		return 0;
2026 
2027 	if (MGMT_RX_REO_LIST_ENTRY_IS_MAX_SIZE_EXCEEDED(entry))
2028 		release_reason |=
2029 		   MGMT_RX_REO_LIST_ENTRY_RELEASE_REASON_LIST_MAX_SIZE_EXCEEDED;
2030 
2031 	if (!MGMT_RX_REO_LIST_ENTRY_IS_WAITING_FOR_FRAME_ON_OTHER_LINK(entry))
2032 		release_reason |=
2033 			MGMT_RX_REO_LIST_ENTRY_RELEASE_REASON_ZERO_WAIT_COUNT;
2034 
2035 	if (MGMT_RX_REO_LIST_ENTRY_IS_AGED_OUT(entry))
2036 		release_reason |=
2037 				MGMT_RX_REO_LIST_ENTRY_RELEASE_REASON_AGED_OUT;
2038 
2039 	if (MGMT_RX_REO_LIST_ENTRY_IS_OLDER_THAN_LATEST_AGED_OUT_FRAME(entry))
2040 		release_reason |=
2041 		MGMT_RX_REO_LIST_ENTRY_RELEASE_REASON_OLDER_THAN_AGED_OUT_FRAME;
2042 
2043 	return release_reason;
2044 }
2045 
2046 /**
2047  * mgmt_rx_reo_list_entry_send_up() - API to send the frame to the upper layer.
2048  * @reo_list: Pointer to reorder list
2049  * @entry: List entry
2050  *
2051  * API to send the frame to the upper layer. This API has to be called only
2052  * for entries which can be released to upper layer. It is the caller's
2053  * responsibility to ensure that entry can be released (by using API
2054  * mgmt_rx_reo_list_is_ready_to_send_up_entry). This API is called after
2055  * acquiring the lock which serializes the frame delivery to the upper layers.
2056  *
2057  * Return: QDF_STATUS
2058  */
2059 static QDF_STATUS
2060 mgmt_rx_reo_list_entry_send_up(struct mgmt_rx_reo_list *reo_list,
2061 			       struct mgmt_rx_reo_list_entry *entry)
2062 {
2063 	uint8_t release_reason;
2064 	uint8_t link_id;
2065 	uint32_t entry_global_ts;
2066 	QDF_STATUS status;
2067 	QDF_STATUS temp;
2068 	struct mgmt_rx_reo_context *reo_context;
2069 
2070 	qdf_assert_always(reo_list);
2071 	qdf_assert_always(entry);
2072 
2073 	reo_context = mgmt_rx_reo_get_context_from_reo_list(reo_list);
2074 	qdf_assert_always(reo_context);
2075 
2076 	link_id = mgmt_rx_reo_get_link_id(entry->rx_params);
2077 	entry_global_ts = mgmt_rx_reo_get_global_ts(entry->rx_params);
2078 
2079 	release_reason = mgmt_rx_reo_list_entry_get_release_reason(entry);
2080 
2081 	qdf_assert_always(release_reason != 0);
2082 
2083 	entry->is_delivered = false;
2084 	entry->is_premature_delivery = false;
2085 	entry->release_reason = release_reason;
2086 
2087 	if (mgmt_rx_reo_is_potential_premature_delivery(release_reason)) {
2088 		entry->is_premature_delivery = true;
2089 		status = mgmt_rx_reo_handle_potential_premature_delivery(
2090 						reo_context, entry_global_ts);
2091 		if (QDF_IS_STATUS_ERROR(status))
2092 			goto exit;
2093 	}
2094 
2095 	status = mgmt_rx_reo_log_egress_frame_before_delivery(reo_context,
2096 							      entry);
2097 	if (QDF_IS_STATUS_ERROR(status))
2098 		goto exit;
2099 
2100 	status = wlan_mgmt_txrx_process_rx_frame(entry->pdev, entry->nbuf,
2101 						 entry->rx_params);
2102 	/* Above call frees nbuf and rx_params, make it null explicitly */
2103 	entry->nbuf = NULL;
2104 	entry->rx_params = NULL;
2105 
2106 	if (QDF_IS_STATUS_ERROR(status))
2107 		goto exit_log;
2108 
2109 	entry->is_delivered = true;
2110 
2111 	status = QDF_STATUS_SUCCESS;
2112 
2113 exit_log:
2114 	temp = mgmt_rx_reo_log_egress_frame_after_delivery(reo_context, entry,
2115 							   link_id);
2116 	if (QDF_IS_STATUS_ERROR(temp))
2117 		status = temp;
2118 exit:
2119 	/**
2120 	 * Release the reference taken when the entry is inserted into
2121 	 * the reorder list
2122 	 */
2123 	wlan_objmgr_pdev_release_ref(entry->pdev, WLAN_MGMT_RX_REO_ID);
2124 
2125 	return status;
2126 }
2127 
2128 /**
2129  * mgmt_rx_reo_list_is_ready_to_send_up_entry() - API to check whether the
2130  * list entry can be send to upper layers.
2131  * @reo_list: Pointer to reorder list
2132  * @entry: List entry
2133  *
2134  * Return: QDF_STATUS
2135  */
2136 static bool
2137 mgmt_rx_reo_list_is_ready_to_send_up_entry(struct mgmt_rx_reo_list *reo_list,
2138 					   struct mgmt_rx_reo_list_entry *entry)
2139 {
2140 	if (!reo_list || !entry)
2141 		return false;
2142 
2143 	return mgmt_rx_reo_list_max_size_exceeded(reo_list) ||
2144 	       !MGMT_RX_REO_LIST_ENTRY_IS_WAITING_FOR_FRAME_ON_OTHER_LINK(
2145 	       entry) || MGMT_RX_REO_LIST_ENTRY_IS_AGED_OUT(entry) ||
2146 	       MGMT_RX_REO_LIST_ENTRY_IS_OLDER_THAN_LATEST_AGED_OUT_FRAME
2147 	       (entry);
2148 }
2149 
2150 /**
2151  * mgmt_rx_reo_list_release_entries() - Release entries from the reorder list
2152  * @reo_context: Pointer to management Rx reorder context
2153  *
2154  * This API releases the entries from the reorder list based on the following
2155  * conditions.
2156  *   a) Entries with total wait count equal to 0
2157  *   b) Entries which are timed out or entries with global time stamp <= global
2158  *      time stamp of the latest frame which is timed out. We can only release
2159  *      the entries in the increasing order of the global time stamp.
2160  *      So all the entries with global time stamp <= global time stamp of the
2161  *      latest timed out frame has to be released.
2162  *
2163  * Return: QDF_STATUS
2164  */
2165 static QDF_STATUS
2166 mgmt_rx_reo_list_release_entries(struct mgmt_rx_reo_context *reo_context)
2167 {
2168 	struct mgmt_rx_reo_list *reo_list;
2169 	QDF_STATUS status;
2170 
2171 	if (!reo_context) {
2172 		mgmt_rx_reo_err("reo context is null");
2173 		return QDF_STATUS_E_NULL_VALUE;
2174 	}
2175 
2176 	reo_list = &reo_context->reo_list;
2177 
2178 	qdf_spin_lock(&reo_context->frame_release_lock);
2179 
2180 	while (1) {
2181 		struct mgmt_rx_reo_list_entry *first_entry;
2182 		/* TODO yield if release_count > THRESHOLD */
2183 		uint16_t release_count = 0;
2184 		struct mgmt_rx_reo_global_ts_info *ts_last_released_frame =
2185 					&reo_list->ts_last_released_frame;
2186 		uint32_t entry_global_ts;
2187 
2188 		qdf_spin_lock_bh(&reo_list->list_lock);
2189 
2190 		first_entry = qdf_list_first_entry_or_null(
2191 			&reo_list->list, struct mgmt_rx_reo_list_entry, node);
2192 
2193 		if (!first_entry) {
2194 			status = QDF_STATUS_SUCCESS;
2195 			goto exit_unlock_list_lock;
2196 		}
2197 
2198 		if (!mgmt_rx_reo_list_is_ready_to_send_up_entry(reo_list,
2199 								first_entry)) {
2200 			status = QDF_STATUS_SUCCESS;
2201 			goto exit_unlock_list_lock;
2202 		}
2203 
2204 		if (mgmt_rx_reo_list_max_size_exceeded(reo_list))
2205 			first_entry->status |=
2206 				MGMT_RX_REO_STATUS_LIST_MAX_SIZE_EXCEEDED;
2207 
2208 		status = qdf_list_remove_node(&reo_list->list,
2209 					      &first_entry->node);
2210 		if (QDF_IS_STATUS_ERROR(status)) {
2211 			status = QDF_STATUS_E_FAILURE;
2212 			goto exit_unlock_list_lock;
2213 		}
2214 		first_entry->removal_ts = qdf_get_log_timestamp();
2215 
2216 		/**
2217 		 * Last released frame global time stamp is invalid means that
2218 		 * current frame is the first frame to be released to the
2219 		 * upper layer from the reorder list. Blindly update the last
2220 		 * released frame global time stamp to the current frame's
2221 		 * global time stamp and set the valid to true.
2222 		 * If the last released frame global time stamp is valid and
2223 		 * current frame's global time stamp is >= last released frame
2224 		 * global time stamp, deliver the current frame to upper layer
2225 		 * and update the last released frame global time stamp.
2226 		 */
2227 		entry_global_ts =
2228 			mgmt_rx_reo_get_global_ts(first_entry->rx_params);
2229 
2230 		if (!ts_last_released_frame->valid ||
2231 		    mgmt_rx_reo_compare_global_timestamps_gte(
2232 			entry_global_ts, ts_last_released_frame->global_ts)) {
2233 			struct mgmt_rx_event_params *params;
2234 
2235 			params = first_entry->rx_params;
2236 
2237 			ts_last_released_frame->global_ts = entry_global_ts;
2238 			ts_last_released_frame->start_ts =
2239 					mgmt_rx_reo_get_start_ts(params);
2240 			ts_last_released_frame->end_ts =
2241 					mgmt_rx_reo_get_end_ts(params);
2242 			ts_last_released_frame->valid = true;
2243 
2244 			qdf_timer_mod
2245 				(&reo_list->global_mgmt_rx_inactivity_timer,
2246 				 MGMT_RX_REO_GLOBAL_MGMT_RX_INACTIVITY_TIMEOUT);
2247 		} else {
2248 			/**
2249 			 * This should never happen. All the frames older than
2250 			 * the last frame released from the reorder list will be
2251 			 * discarded at the entry to reorder algorithm itself.
2252 			 */
2253 			qdf_assert_always(first_entry->is_parallel_rx);
2254 		}
2255 
2256 		qdf_spin_unlock_bh(&reo_list->list_lock);
2257 
2258 		status = mgmt_rx_reo_list_entry_send_up(reo_list,
2259 							first_entry);
2260 		if (QDF_IS_STATUS_ERROR(status)) {
2261 			status = QDF_STATUS_E_FAILURE;
2262 			qdf_mem_free(first_entry);
2263 			goto exit_unlock_frame_release_lock;
2264 		}
2265 
2266 		qdf_mem_free(first_entry);
2267 		release_count++;
2268 	}
2269 
2270 	status = QDF_STATUS_SUCCESS;
2271 	goto exit_unlock_frame_release_lock;
2272 
2273 exit_unlock_list_lock:
2274 	qdf_spin_unlock_bh(&reo_list->list_lock);
2275 exit_unlock_frame_release_lock:
2276 	qdf_spin_unlock(&reo_context->frame_release_lock);
2277 
2278 	return status;
2279 }
2280 
2281 /**
2282  * mgmt_rx_reo_list_ageout_timer_handler() - Periodic ageout timer handler
2283  * @arg: Argument to timer handler
2284  *
2285  * This is the handler for periodic ageout timer used to timeout entries in the
2286  * reorder list.
2287  *
2288  * Return: void
2289  */
2290 static void
2291 mgmt_rx_reo_list_ageout_timer_handler(void *arg)
2292 {
2293 	struct mgmt_rx_reo_list *reo_list = arg;
2294 	struct mgmt_rx_reo_list_entry *cur_entry;
2295 	uint64_t cur_ts;
2296 	QDF_STATUS status;
2297 	struct mgmt_rx_reo_context *reo_context;
2298 	/**
2299 	 * Stores the pointer to the entry in reorder list for the latest aged
2300 	 * out frame. Latest aged out frame is the aged out frame in reorder
2301 	 * list which has the largest global time stamp value.
2302 	 */
2303 	struct mgmt_rx_reo_list_entry *latest_aged_out_entry = NULL;
2304 
2305 	qdf_assert_always(reo_list);
2306 
2307 	qdf_timer_mod(&reo_list->ageout_timer,
2308 		      MGMT_RX_REO_AGEOUT_TIMER_PERIOD_MS);
2309 
2310 	reo_context = mgmt_rx_reo_get_context_from_reo_list(reo_list);
2311 	qdf_assert_always(reo_context);
2312 
2313 	qdf_spin_lock_bh(&reo_list->list_lock);
2314 
2315 	cur_ts = qdf_get_log_timestamp();
2316 
2317 	qdf_list_for_each(&reo_list->list, cur_entry, node) {
2318 		if (cur_ts - cur_entry->insertion_ts >=
2319 		    reo_list->list_entry_timeout_us) {
2320 			latest_aged_out_entry = cur_entry;
2321 			cur_entry->status |= MGMT_RX_REO_STATUS_AGED_OUT;
2322 		}
2323 	}
2324 
2325 	if (latest_aged_out_entry) {
2326 		qdf_list_for_each(&reo_list->list, cur_entry, node) {
2327 			if (cur_entry == latest_aged_out_entry)
2328 				break;
2329 			cur_entry->status |= MGMT_RX_REO_STATUS_OLDER_THAN_LATEST_AGED_OUT_FRAME;
2330 		}
2331 	}
2332 
2333 	qdf_spin_unlock_bh(&reo_list->list_lock);
2334 
2335 	if (latest_aged_out_entry) {
2336 		status = mgmt_rx_reo_list_release_entries(reo_context);
2337 		if (QDF_IS_STATUS_ERROR(status)) {
2338 			mgmt_rx_reo_err("Failed to release entries, ret = %d",
2339 					status);
2340 			return;
2341 		}
2342 	}
2343 }
2344 
2345 /**
2346  * mgmt_rx_reo_global_mgmt_rx_inactivity_timer_handler() - Timer handler
2347  * for global management Rx inactivity timer
2348  * @arg: Argument to timer handler
2349  *
2350  * This is the timer handler for tracking management Rx inactivity across
2351  * links.
2352  *
2353  * Return: void
2354  */
2355 static void
2356 mgmt_rx_reo_global_mgmt_rx_inactivity_timer_handler(void *arg)
2357 {
2358 	struct mgmt_rx_reo_list *reo_list = arg;
2359 	struct mgmt_rx_reo_context *reo_context;
2360 	struct mgmt_rx_reo_global_ts_info *ts_last_released_frame;
2361 
2362 	qdf_assert_always(reo_list);
2363 	ts_last_released_frame = &reo_list->ts_last_released_frame;
2364 
2365 	reo_context = mgmt_rx_reo_get_context_from_reo_list(reo_list);
2366 	qdf_assert_always(reo_context);
2367 
2368 	qdf_spin_lock(&reo_context->frame_release_lock);
2369 	qdf_spin_lock_bh(&reo_list->list_lock);
2370 
2371 	qdf_mem_zero(ts_last_released_frame, sizeof(*ts_last_released_frame));
2372 
2373 	qdf_spin_unlock_bh(&reo_list->list_lock);
2374 	qdf_spin_unlock(&reo_context->frame_release_lock);
2375 }
2376 
2377 /**
2378  * mgmt_rx_reo_prepare_list_entry() - Prepare a list entry from the management
2379  * frame received.
2380  * @frame_desc: Pointer to the frame descriptor
2381  * @entry: Pointer to the list entry
2382  *
2383  * This API prepares the reorder list entry corresponding to a management frame
2384  * to be consumed by host. This entry would be inserted at the appropriate
2385  * position in the reorder list.
2386  *
2387  * Return: QDF_STATUS
2388  */
2389 static QDF_STATUS
2390 mgmt_rx_reo_prepare_list_entry(
2391 		const struct mgmt_rx_reo_frame_descriptor *frame_desc,
2392 		struct mgmt_rx_reo_list_entry **entry)
2393 {
2394 	struct mgmt_rx_reo_list_entry *list_entry;
2395 	struct wlan_objmgr_pdev *pdev;
2396 	uint8_t link_id;
2397 	uint8_t ml_grp_id;
2398 
2399 	if (!frame_desc) {
2400 		mgmt_rx_reo_err("frame descriptor is null");
2401 		return QDF_STATUS_E_NULL_VALUE;
2402 	}
2403 
2404 	if (!entry) {
2405 		mgmt_rx_reo_err("Pointer to list entry is null");
2406 		return QDF_STATUS_E_NULL_VALUE;
2407 	}
2408 
2409 	link_id = mgmt_rx_reo_get_link_id(frame_desc->rx_params);
2410 	ml_grp_id = mgmt_rx_reo_get_mlo_grp_id(frame_desc->rx_params);
2411 
2412 	pdev = wlan_get_pdev_from_mlo_link_id(link_id, ml_grp_id,
2413 					      WLAN_MGMT_RX_REO_ID);
2414 	if (!pdev) {
2415 		mgmt_rx_reo_err("pdev corresponding to link %u is null",
2416 				link_id);
2417 		return QDF_STATUS_E_NULL_VALUE;
2418 	}
2419 
2420 	list_entry =  qdf_mem_malloc(sizeof(*list_entry));
2421 	if (!list_entry) {
2422 		wlan_objmgr_pdev_release_ref(pdev, WLAN_MGMT_RX_REO_ID);
2423 		mgmt_rx_reo_err("List entry allocation failed");
2424 		return QDF_STATUS_E_NOMEM;
2425 	}
2426 
2427 	list_entry->pdev = pdev;
2428 	list_entry->nbuf = frame_desc->nbuf;
2429 	list_entry->rx_params = frame_desc->rx_params;
2430 	list_entry->wait_count = frame_desc->wait_count;
2431 	list_entry->initial_wait_count = frame_desc->wait_count;
2432 	qdf_mem_copy(list_entry->shared_snapshots, frame_desc->shared_snapshots,
2433 		     qdf_min(sizeof(list_entry->shared_snapshots),
2434 			     sizeof(frame_desc->shared_snapshots)));
2435 	qdf_mem_copy(list_entry->host_snapshot, frame_desc->host_snapshot,
2436 		     qdf_min(sizeof(list_entry->host_snapshot),
2437 			     sizeof(frame_desc->host_snapshot)));
2438 	list_entry->status = 0;
2439 	if (list_entry->wait_count.total_count)
2440 		list_entry->status |=
2441 			MGMT_RX_REO_STATUS_WAIT_FOR_FRAME_ON_OTHER_LINKS;
2442 
2443 	*entry = list_entry;
2444 
2445 	return QDF_STATUS_SUCCESS;
2446 }
2447 
2448 /**
2449  * mgmt_rx_reo_update_wait_count() - Update the wait count for a frame based
2450  * on the wait count of a frame received after that on air.
2451  * @wait_count_old_frame: Pointer to the wait count structure for the old frame.
2452  * @wait_count_new_frame: Pointer to the wait count structure for the new frame.
2453  *
2454  * This API optimizes the wait count of a frame based on the wait count of
2455  * a frame received after that on air. Old frame refers to the frame received
2456  * first on the air and new frame refers to the frame received after that.
2457  * We use the following fundamental idea. Wait counts for old frames can't be
2458  * more than wait counts for the new frame. Use this to optimize the wait count
2459  * for the old frames. Per link wait count of an old frame is minimum of the
2460  * per link wait count of the old frame and new frame.
2461  *
2462  * Return: QDF_STATUS
2463  */
2464 static QDF_STATUS
2465 mgmt_rx_reo_update_wait_count(
2466 		struct mgmt_rx_reo_wait_count *wait_count_old_frame,
2467 		const struct mgmt_rx_reo_wait_count *wait_count_new_frame)
2468 {
2469 	uint8_t link_id;
2470 
2471 	qdf_assert_always(wait_count_old_frame);
2472 	qdf_assert_always(wait_count_new_frame);
2473 
2474 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
2475 		if (wait_count_old_frame->per_link_count[link_id]) {
2476 			uint32_t temp_wait_count;
2477 			uint32_t wait_count_diff;
2478 
2479 			temp_wait_count =
2480 				wait_count_old_frame->per_link_count[link_id];
2481 			wait_count_old_frame->per_link_count[link_id] =
2482 				qdf_min(wait_count_old_frame->
2483 					per_link_count[link_id],
2484 					wait_count_new_frame->
2485 					per_link_count[link_id]);
2486 			wait_count_diff = temp_wait_count -
2487 				wait_count_old_frame->per_link_count[link_id];
2488 
2489 			wait_count_old_frame->total_count -= wait_count_diff;
2490 		}
2491 	}
2492 
2493 	return QDF_STATUS_SUCCESS;
2494 }
2495 
2496 /**
2497  * mgmt_rx_reo_update_list() - Modify the reorder list when a frame is received
2498  * @reo_list: Pointer to reorder list
2499  * @frame_desc: Pointer to frame descriptor
2500  * @is_queued: Whether this frame is queued in the REO list
2501  *
2502  * API to update the reorder list on every management frame reception.
2503  * This API does the following things.
2504  *   a) Update the wait counts for all the frames in the reorder list with
2505  *      global time stamp <= current frame's global time stamp. We use the
2506  *      following principle for updating the wait count in this case.
2507  *      Let A and B be two management frames with global time stamp of A <=
2508  *      global time stamp of B. Let WAi and WBi be the wait count of A and B
2509  *      for link i, then WAi <= WBi. Hence we can optimize WAi as
2510  *      min(WAi, WBi).
2511  *   b) If the current frame is to be consumed by host, insert it in the
2512  *      reorder list such that the list is always sorted in the increasing order
2513  *      of global time stamp. Update the wait count of the current frame based
2514  *      on the frame next to it in the reorder list (if any).
2515  *   c) Update the wait count of the frames in the reorder list with global
2516  *      time stamp > current frame's global time stamp. Let the current frame
2517  *      belong to link "l". Then link "l"'s wait count can be reduced by one for
2518  *      all the frames in the reorder list with global time stamp > current
2519  *      frame's global time stamp.
2520  *
2521  * Return: QDF_STATUS
2522  */
2523 static QDF_STATUS
2524 mgmt_rx_reo_update_list(struct mgmt_rx_reo_list *reo_list,
2525 			struct mgmt_rx_reo_frame_descriptor *frame_desc,
2526 			bool *is_queued)
2527 {
2528 	struct mgmt_rx_reo_list_entry *cur_entry;
2529 	struct mgmt_rx_reo_list_entry *least_greater_entry = NULL;
2530 	bool least_greater_entry_found = false;
2531 	QDF_STATUS status;
2532 	uint32_t new_frame_global_ts;
2533 	struct mgmt_rx_reo_list_entry *new_entry = NULL;
2534 	uint16_t list_insertion_pos = 0;
2535 
2536 	if (!is_queued)
2537 		return QDF_STATUS_E_NULL_VALUE;
2538 	*is_queued = false;
2539 
2540 	if (!reo_list) {
2541 		mgmt_rx_reo_err("Mgmt Rx reo list is null");
2542 		return QDF_STATUS_E_NULL_VALUE;
2543 	}
2544 
2545 	if (!frame_desc) {
2546 		mgmt_rx_reo_err("Mgmt frame descriptor is null");
2547 		return QDF_STATUS_E_NULL_VALUE;
2548 	}
2549 
2550 	new_frame_global_ts = mgmt_rx_reo_get_global_ts(frame_desc->rx_params);
2551 
2552 	/* Prepare the list entry before acquiring lock */
2553 	if (frame_desc->type == MGMT_RX_REO_FRAME_DESC_HOST_CONSUMED_FRAME &&
2554 	    frame_desc->reo_required) {
2555 		status = mgmt_rx_reo_prepare_list_entry(frame_desc, &new_entry);
2556 		if (QDF_IS_STATUS_ERROR(status)) {
2557 			mgmt_rx_reo_err("Failed to prepare list entry");
2558 			return QDF_STATUS_E_FAILURE;
2559 		}
2560 	}
2561 
2562 	qdf_spin_lock_bh(&reo_list->list_lock);
2563 
2564 	frame_desc->list_size_rx = qdf_list_size(&reo_list->list);
2565 
2566 	status = mgmt_rx_reo_is_stale_frame(&reo_list->ts_last_released_frame,
2567 					    frame_desc);
2568 	if (QDF_IS_STATUS_ERROR(status))
2569 		goto exit_free_entry;
2570 
2571 	if (frame_desc->is_stale) {
2572 		status = mgmt_rx_reo_handle_stale_frame(reo_list, frame_desc);
2573 		if (QDF_IS_STATUS_ERROR(status))
2574 			goto exit_free_entry;
2575 	}
2576 
2577 	qdf_list_for_each(&reo_list->list, cur_entry, node) {
2578 		uint32_t cur_entry_global_ts;
2579 
2580 		cur_entry_global_ts = mgmt_rx_reo_get_global_ts(
2581 					cur_entry->rx_params);
2582 
2583 		if (!mgmt_rx_reo_compare_global_timestamps_gte(
2584 		    new_frame_global_ts, cur_entry_global_ts)) {
2585 			least_greater_entry = cur_entry;
2586 			least_greater_entry_found = true;
2587 			break;
2588 		}
2589 
2590 		qdf_assert_always(!frame_desc->is_stale ||
2591 				  cur_entry->is_parallel_rx);
2592 
2593 		list_insertion_pos++;
2594 
2595 		status = mgmt_rx_reo_update_wait_count(
2596 					&cur_entry->wait_count,
2597 					&frame_desc->wait_count);
2598 		if (QDF_IS_STATUS_ERROR(status))
2599 			goto exit_free_entry;
2600 
2601 		if (cur_entry->wait_count.total_count == 0)
2602 			cur_entry->status &=
2603 			      ~MGMT_RX_REO_STATUS_WAIT_FOR_FRAME_ON_OTHER_LINKS;
2604 	}
2605 
2606 	if (frame_desc->type == MGMT_RX_REO_FRAME_DESC_HOST_CONSUMED_FRAME &&
2607 	    !frame_desc->is_stale && frame_desc->reo_required) {
2608 		if (least_greater_entry_found) {
2609 			status = mgmt_rx_reo_update_wait_count(
2610 					&new_entry->wait_count,
2611 					&least_greater_entry->wait_count);
2612 
2613 			if (QDF_IS_STATUS_ERROR(status))
2614 				goto exit_free_entry;
2615 
2616 			frame_desc->wait_count = new_entry->wait_count;
2617 
2618 			if (new_entry->wait_count.total_count == 0)
2619 				new_entry->status &=
2620 					~MGMT_RX_REO_STATUS_WAIT_FOR_FRAME_ON_OTHER_LINKS;
2621 		}
2622 
2623 		new_entry->insertion_ts = qdf_get_log_timestamp();
2624 		new_entry->ingress_timestamp = frame_desc->ingress_timestamp;
2625 		new_entry->is_parallel_rx = frame_desc->is_parallel_rx;
2626 		frame_desc->list_insertion_pos = list_insertion_pos;
2627 
2628 		if (least_greater_entry_found)
2629 			status = qdf_list_insert_before(
2630 					&reo_list->list, &new_entry->node,
2631 					&least_greater_entry->node);
2632 		else
2633 			status = qdf_list_insert_back(
2634 					&reo_list->list, &new_entry->node);
2635 
2636 		if (QDF_IS_STATUS_ERROR(status))
2637 			goto exit_free_entry;
2638 
2639 		*is_queued = true;
2640 
2641 		if (new_entry->wait_count.total_count == 0)
2642 			frame_desc->zero_wait_count_rx = true;
2643 
2644 		if (frame_desc->zero_wait_count_rx &&
2645 		    qdf_list_first_entry_or_null(&reo_list->list,
2646 						 struct mgmt_rx_reo_list_entry,
2647 						 node) == new_entry)
2648 			frame_desc->immediate_delivery = true;
2649 	}
2650 
2651 	if (least_greater_entry_found) {
2652 		cur_entry = least_greater_entry;
2653 
2654 		qdf_list_for_each_from(&reo_list->list, cur_entry, node) {
2655 			uint8_t frame_link_id;
2656 			struct mgmt_rx_reo_wait_count *wait_count;
2657 
2658 			frame_link_id =
2659 				mgmt_rx_reo_get_link_id(frame_desc->rx_params);
2660 			wait_count = &cur_entry->wait_count;
2661 			if (wait_count->per_link_count[frame_link_id]) {
2662 				uint32_t old_wait_count;
2663 				uint32_t new_wait_count;
2664 				uint32_t wait_count_diff;
2665 				uint16_t pkt_ctr_delta;
2666 
2667 				pkt_ctr_delta = frame_desc->pkt_ctr_delta;
2668 				old_wait_count =
2669 				      wait_count->per_link_count[frame_link_id];
2670 
2671 				if (old_wait_count >= pkt_ctr_delta)
2672 					new_wait_count = old_wait_count -
2673 							 pkt_ctr_delta;
2674 				else
2675 					new_wait_count = 0;
2676 
2677 				wait_count_diff = old_wait_count -
2678 						  new_wait_count;
2679 
2680 				wait_count->per_link_count[frame_link_id] =
2681 								new_wait_count;
2682 				wait_count->total_count -= wait_count_diff;
2683 
2684 				if (wait_count->total_count == 0)
2685 					cur_entry->status &=
2686 						~MGMT_RX_REO_STATUS_WAIT_FOR_FRAME_ON_OTHER_LINKS;
2687 			}
2688 		}
2689 	}
2690 
2691 	status = QDF_STATUS_SUCCESS;
2692 
2693 exit_free_entry:
2694 	/* Cleanup the entry if it is not queued */
2695 	if (new_entry && !*is_queued) {
2696 		/**
2697 		 * New entry created is not inserted to reorder list, free
2698 		 * the entry and release the reference
2699 		 */
2700 		wlan_objmgr_pdev_release_ref(new_entry->pdev,
2701 					     WLAN_MGMT_RX_REO_ID);
2702 		qdf_mem_free(new_entry);
2703 	}
2704 
2705 	qdf_spin_unlock_bh(&reo_list->list_lock);
2706 
2707 	if (!*is_queued)
2708 		return status;
2709 
2710 	return status;
2711 }
2712 
2713 /**
2714  * mgmt_rx_reo_list_init() - Initialize the management rx-reorder list
2715  * @reo_list: Pointer to reorder list
2716  *
2717  * API to initialize the management rx-reorder list.
2718  *
2719  * Return: QDF_STATUS
2720  */
2721 static QDF_STATUS
2722 mgmt_rx_reo_list_init(struct mgmt_rx_reo_list *reo_list)
2723 {
2724 	QDF_STATUS status;
2725 
2726 	reo_list->max_list_size = MGMT_RX_REO_LIST_MAX_SIZE;
2727 	reo_list->list_entry_timeout_us = MGMT_RX_REO_LIST_TIMEOUT_US;
2728 
2729 	qdf_list_create(&reo_list->list, reo_list->max_list_size);
2730 	qdf_spinlock_create(&reo_list->list_lock);
2731 
2732 	status = qdf_timer_init(NULL, &reo_list->ageout_timer,
2733 				mgmt_rx_reo_list_ageout_timer_handler, reo_list,
2734 				QDF_TIMER_TYPE_WAKE_APPS);
2735 	if (QDF_IS_STATUS_ERROR(status)) {
2736 		mgmt_rx_reo_err("Failed to initialize reo list ageout timer");
2737 		return status;
2738 	}
2739 
2740 	reo_list->ts_last_released_frame.valid = false;
2741 
2742 	status = qdf_timer_init
2743 			(NULL, &reo_list->global_mgmt_rx_inactivity_timer,
2744 			 mgmt_rx_reo_global_mgmt_rx_inactivity_timer_handler,
2745 			 reo_list, QDF_TIMER_TYPE_WAKE_APPS);
2746 	if (QDF_IS_STATUS_ERROR(status)) {
2747 		mgmt_rx_reo_err("Failed to init glb mgmt rx inactivity timer");
2748 		return status;
2749 	}
2750 
2751 	return QDF_STATUS_SUCCESS;
2752 }
2753 
2754 /**
2755  * wlan_mgmt_rx_reo_update_host_snapshot() - Update Host snapshot with the MGMT
2756  * Rx REO parameters.
2757  * @pdev: pdev extracted from the WMI event
2758  * @desc: pointer to frame descriptor
2759  *
2760  * Return: QDF_STATUS of operation
2761  */
2762 static QDF_STATUS
2763 wlan_mgmt_rx_reo_update_host_snapshot(struct wlan_objmgr_pdev *pdev,
2764 				      struct mgmt_rx_reo_frame_descriptor *desc)
2765 {
2766 	struct mgmt_rx_reo_pdev_info *rx_reo_pdev_ctx;
2767 	struct mgmt_rx_reo_snapshot_params *host_ss;
2768 	struct mgmt_rx_reo_params *reo_params;
2769 	int pkt_ctr_delta;
2770 	struct wlan_objmgr_psoc *psoc;
2771 	uint16_t pkt_ctr_delta_thresh;
2772 
2773 	if (!desc) {
2774 		mgmt_rx_reo_err("Mgmt Rx REO frame descriptor null");
2775 		return QDF_STATUS_E_NULL_VALUE;
2776 	}
2777 
2778 	if (!desc->rx_params) {
2779 		mgmt_rx_reo_err("Mgmt Rx params null");
2780 		return QDF_STATUS_E_NULL_VALUE;
2781 	}
2782 
2783 	reo_params = desc->rx_params->reo_params;
2784 	if (!reo_params) {
2785 		mgmt_rx_reo_err("Mgmt Rx REO params NULL");
2786 		return QDF_STATUS_E_NULL_VALUE;
2787 	}
2788 
2789 	rx_reo_pdev_ctx = wlan_mgmt_rx_reo_get_priv_object(pdev);
2790 	if (!rx_reo_pdev_ctx) {
2791 		mgmt_rx_reo_err("Mgmt Rx REO context empty for pdev %pK", pdev);
2792 		return QDF_STATUS_E_FAILURE;
2793 	}
2794 
2795 	psoc = wlan_pdev_get_psoc(pdev);
2796 
2797 	/* FW should send valid REO parameters */
2798 	if (!reo_params->valid) {
2799 		mgmt_rx_reo_err("Mgmt Rx REO params is invalid");
2800 		return QDF_STATUS_E_FAILURE;
2801 	}
2802 
2803 	host_ss = &rx_reo_pdev_ctx->host_snapshot;
2804 
2805 	if (!host_ss->valid) {
2806 		desc->pkt_ctr_delta = 1;
2807 		goto update_host_ss;
2808 	}
2809 
2810 	if (mgmt_rx_reo_compare_pkt_ctrs_gte(host_ss->mgmt_pkt_ctr,
2811 					     reo_params->mgmt_pkt_ctr)) {
2812 		mgmt_rx_reo_err("Cur frame ctr > last frame ctr for link = %u",
2813 				reo_params->link_id);
2814 		goto failure_debug;
2815 	}
2816 
2817 	pkt_ctr_delta = mgmt_rx_reo_subtract_pkt_ctrs(reo_params->mgmt_pkt_ctr,
2818 						      host_ss->mgmt_pkt_ctr);
2819 	qdf_assert_always(pkt_ctr_delta > 0);
2820 	desc->pkt_ctr_delta = pkt_ctr_delta;
2821 
2822 	if (pkt_ctr_delta == 1)
2823 		goto update_host_ss;
2824 
2825 	/*
2826 	 * Under back pressure scenarios, FW may drop management Rx frame
2827 	 * WMI events. So holes in the management packet counter is expected.
2828 	 * Add a debug print and optional assert to track the holes.
2829 	 */
2830 	mgmt_rx_reo_debug("pkt_ctr_delta = %u", pkt_ctr_delta);
2831 	mgmt_rx_reo_debug("Cur frame valid = %u, pkt_ctr = %u, ts =%u",
2832 			  reo_params->valid, reo_params->mgmt_pkt_ctr,
2833 			  reo_params->global_timestamp);
2834 	mgmt_rx_reo_debug("Last frame valid = %u, pkt_ctr = %u, ts =%u",
2835 			  host_ss->valid, host_ss->mgmt_pkt_ctr,
2836 			  host_ss->global_timestamp);
2837 
2838 	pkt_ctr_delta_thresh = wlan_mgmt_rx_reo_get_pkt_ctr_delta_thresh(psoc);
2839 
2840 	if (pkt_ctr_delta_thresh && pkt_ctr_delta > pkt_ctr_delta_thresh) {
2841 		mgmt_rx_reo_err("pkt ctr delta %u > thresh %u for link %u",
2842 				pkt_ctr_delta, pkt_ctr_delta_thresh,
2843 				reo_params->link_id);
2844 		goto failure_debug;
2845 	}
2846 
2847 update_host_ss:
2848 	host_ss->valid = true;
2849 	host_ss->global_timestamp = reo_params->global_timestamp;
2850 	host_ss->mgmt_pkt_ctr = reo_params->mgmt_pkt_ctr;
2851 
2852 	return QDF_STATUS_SUCCESS;
2853 
2854 failure_debug:
2855 	mgmt_rx_reo_err("Cur frame valid = %u, pkt_ctr = %u, ts =%u",
2856 			reo_params->valid, reo_params->mgmt_pkt_ctr,
2857 			reo_params->global_timestamp);
2858 	mgmt_rx_reo_err("Last frame vailid = %u, pkt_ctr = %u, ts =%u",
2859 			host_ss->valid, host_ss->mgmt_pkt_ctr,
2860 			host_ss->global_timestamp);
2861 	qdf_assert_always(0);
2862 
2863 	return QDF_STATUS_E_FAILURE;
2864 }
2865 
2866 #ifdef WLAN_MGMT_RX_REO_DEBUG_SUPPORT
2867 /**
2868  * mgmt_rx_reo_ingress_frame_debug_info_enabled() - API to check whether ingress
2869  * frame info debug feaure is enabled
2870  * @ingress_frame_debug_info: Pointer to ingress frame debug info object
2871  *
2872  * Return: true or false
2873  */
2874 static bool
2875 mgmt_rx_reo_ingress_frame_debug_info_enabled
2876 		(struct reo_ingress_debug_info *ingress_frame_debug_info)
2877 {
2878 	return ingress_frame_debug_info->frame_list_size;
2879 }
2880 
2881 /**
2882  * mgmt_rx_reo_debug_print_ingress_frame_stats() - API to print the stats
2883  * related to frames going into the reorder module
2884  * @reo_ctx: Pointer to reorder context
2885  *
2886  * API to print the stats related to frames going into the management
2887  * Rx reorder module.
2888  *
2889  * Return: QDF_STATUS
2890  */
2891 static QDF_STATUS
2892 mgmt_rx_reo_debug_print_ingress_frame_stats(struct mgmt_rx_reo_context *reo_ctx)
2893 {
2894 	struct reo_ingress_frame_stats *stats;
2895 	uint8_t link_id;
2896 	uint8_t desc_type;
2897 	uint64_t ingress_count_per_link[MAX_MLO_LINKS] = {0};
2898 	uint64_t ingress_count_per_desc_type[MGMT_RX_REO_FRAME_DESC_TYPE_MAX] = {0};
2899 	uint64_t total_ingress_count = 0;
2900 	uint64_t stale_count_per_link[MAX_MLO_LINKS] = {0};
2901 	uint64_t stale_count_per_desc_type[MGMT_RX_REO_FRAME_DESC_TYPE_MAX] = {0};
2902 	uint64_t total_stale_count = 0;
2903 	uint64_t error_count_per_link[MAX_MLO_LINKS] = {0};
2904 	uint64_t error_count_per_desc_type[MGMT_RX_REO_FRAME_DESC_TYPE_MAX] = {0};
2905 	uint64_t total_error_count = 0;
2906 	uint64_t total_queued_count = 0;
2907 	uint64_t total_zero_wait_count_rx_count = 0;
2908 	uint64_t total_immediate_delivery_count = 0;
2909 
2910 	if (!reo_ctx)
2911 		return QDF_STATUS_E_NULL_VALUE;
2912 
2913 	stats = &reo_ctx->ingress_frame_debug_info.stats;
2914 
2915 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
2916 		for (desc_type = 0; desc_type < MGMT_RX_REO_FRAME_DESC_TYPE_MAX;
2917 		     desc_type++) {
2918 			ingress_count_per_link[link_id] +=
2919 				stats->ingress_count[link_id][desc_type];
2920 			stale_count_per_link[link_id] +=
2921 					stats->stale_count[link_id][desc_type];
2922 			error_count_per_link[link_id] +=
2923 					stats->error_count[link_id][desc_type];
2924 		}
2925 
2926 		total_ingress_count += ingress_count_per_link[link_id];
2927 		total_stale_count += stale_count_per_link[link_id];
2928 		total_error_count += error_count_per_link[link_id];
2929 	}
2930 
2931 	for (desc_type = 0; desc_type < MGMT_RX_REO_FRAME_DESC_TYPE_MAX;
2932 	     desc_type++) {
2933 		for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
2934 			ingress_count_per_desc_type[desc_type] +=
2935 				stats->ingress_count[link_id][desc_type];
2936 			stale_count_per_desc_type[desc_type] +=
2937 					stats->stale_count[link_id][desc_type];
2938 			error_count_per_desc_type[desc_type] +=
2939 					stats->error_count[link_id][desc_type];
2940 		}
2941 	}
2942 
2943 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
2944 		total_queued_count += stats->queued_count[link_id];
2945 		total_zero_wait_count_rx_count +=
2946 				stats->zero_wait_count_rx_count[link_id];
2947 		total_immediate_delivery_count +=
2948 				stats->immediate_delivery_count[link_id];
2949 	}
2950 
2951 	mgmt_rx_reo_alert("Ingress Frame Stats:");
2952 	mgmt_rx_reo_alert("\t1) Ingress Frame Count:");
2953 	mgmt_rx_reo_alert("\tDescriptor Type Values:-");
2954 	mgmt_rx_reo_alert("\t\t0 - MGMT_RX_REO_FRAME_DESC_HOST_CONSUMED_FRAME");
2955 	mgmt_rx_reo_alert("\t\t1 - MGMT_RX_REO_FRAME_DESC_FW_CONSUMED_FRAME");
2956 	mgmt_rx_reo_alert("\t\t2 - MGMT_RX_REO_FRAME_DESC_ERROR_FRAME");
2957 	mgmt_rx_reo_alert("\t------------------------------------");
2958 	mgmt_rx_reo_alert("\t|link id/  |       |       |       |");
2959 	mgmt_rx_reo_alert("\t|desc type |      0|      1|      2|");
2960 	mgmt_rx_reo_alert("\t-------------------------------------------");
2961 
2962 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
2963 		mgmt_rx_reo_alert("\t|%10u|%7llu|%7llu|%7llu|%7llu", link_id,
2964 				  stats->ingress_count[link_id][0],
2965 				  stats->ingress_count[link_id][1],
2966 				  stats->ingress_count[link_id][2],
2967 				  ingress_count_per_link[link_id]);
2968 		mgmt_rx_reo_alert("\t-------------------------------------------");
2969 	}
2970 	mgmt_rx_reo_alert("\t           |%7llu|%7llu|%7llu|%7llu\n\n",
2971 			  ingress_count_per_desc_type[0],
2972 			  ingress_count_per_desc_type[1],
2973 			  ingress_count_per_desc_type[2],
2974 			  total_ingress_count);
2975 
2976 	mgmt_rx_reo_alert("\t2) Stale Frame Count:");
2977 	mgmt_rx_reo_alert("\t------------------------------------");
2978 	mgmt_rx_reo_alert("\t|link id/  |       |       |       |");
2979 	mgmt_rx_reo_alert("\t|desc type |      0|      1|      2|");
2980 	mgmt_rx_reo_alert("\t-------------------------------------------");
2981 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
2982 		mgmt_rx_reo_alert("\t|%10u|%7llu|%7llu|%7llu|%7llu", link_id,
2983 				  stats->stale_count[link_id][0],
2984 				  stats->stale_count[link_id][1],
2985 				  stats->stale_count[link_id][2],
2986 				  stale_count_per_link[link_id]);
2987 		mgmt_rx_reo_alert("\t-------------------------------------------");
2988 	}
2989 	mgmt_rx_reo_alert("\t           |%7llu|%7llu|%7llu|%7llu\n\n",
2990 			  stale_count_per_desc_type[0],
2991 			  stale_count_per_desc_type[1],
2992 			  stale_count_per_desc_type[2],
2993 			  total_stale_count);
2994 
2995 	mgmt_rx_reo_alert("\t3) Error Frame Count:");
2996 	mgmt_rx_reo_alert("\t------------------------------------");
2997 	mgmt_rx_reo_alert("\t|link id/  |       |       |       |");
2998 	mgmt_rx_reo_alert("\t|desc type |      0|      1|      2|");
2999 	mgmt_rx_reo_alert("\t-------------------------------------------");
3000 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
3001 		mgmt_rx_reo_alert("\t|%10u|%7llu|%7llu|%7llu|%7llu", link_id,
3002 				  stats->error_count[link_id][0],
3003 				  stats->error_count[link_id][1],
3004 				  stats->error_count[link_id][2],
3005 				  error_count_per_link[link_id]);
3006 		mgmt_rx_reo_alert("\t-------------------------------------------");
3007 	}
3008 	mgmt_rx_reo_alert("\t           |%7llu|%7llu|%7llu|%7llu\n\n",
3009 			  error_count_per_desc_type[0],
3010 			  error_count_per_desc_type[1],
3011 			  error_count_per_desc_type[2],
3012 			  total_error_count);
3013 
3014 	mgmt_rx_reo_alert("\t4) Host consumed frames related stats:");
3015 	mgmt_rx_reo_alert("\t------------------------------------------------");
3016 	mgmt_rx_reo_alert("\t|link id   |Queued frame |Zero wait |Immediate |");
3017 	mgmt_rx_reo_alert("\t|          |    count    |  count   | delivery |");
3018 	mgmt_rx_reo_alert("\t------------------------------------------------");
3019 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
3020 		mgmt_rx_reo_alert("\t|%10u|%13llu|%10llu|%10llu|", link_id,
3021 				  stats->queued_count[link_id],
3022 				  stats->zero_wait_count_rx_count[link_id],
3023 				  stats->immediate_delivery_count[link_id]);
3024 		mgmt_rx_reo_alert("\t------------------------------------------------");
3025 	}
3026 	mgmt_rx_reo_alert("\t%11s|%13llu|%10llu|%10llu|\n\n", "",
3027 			  total_queued_count,
3028 			  total_zero_wait_count_rx_count,
3029 			  total_immediate_delivery_count);
3030 
3031 	return QDF_STATUS_SUCCESS;
3032 }
3033 
3034 /**
3035  * mgmt_rx_reo_log_ingress_frame() - Log the information about a frame entering
3036  * the reorder algorithm.
3037  * @reo_ctx: management rx reorder context
3038  * @desc: Pointer to frame descriptor
3039  * @is_queued: Indicates whether this frame is queued to reorder list
3040  * @is_error: Indicates whether any error occurred during processing this frame
3041  *
3042  * Return: QDF_STATUS of operation
3043  */
3044 static QDF_STATUS
3045 mgmt_rx_reo_log_ingress_frame(struct mgmt_rx_reo_context *reo_ctx,
3046 			      struct mgmt_rx_reo_frame_descriptor *desc,
3047 			      bool is_queued, bool is_error)
3048 {
3049 	struct reo_ingress_debug_info *ingress_frame_debug_info;
3050 	struct reo_ingress_debug_frame_info *cur_frame_debug_info;
3051 	struct reo_ingress_frame_stats *stats;
3052 	uint8_t link_id;
3053 
3054 	if (!reo_ctx || !desc)
3055 		return QDF_STATUS_E_NULL_VALUE;
3056 
3057 	ingress_frame_debug_info = &reo_ctx->ingress_frame_debug_info;
3058 
3059 	stats = &ingress_frame_debug_info->stats;
3060 	link_id = mgmt_rx_reo_get_link_id(desc->rx_params);
3061 	stats->ingress_count[link_id][desc->type]++;
3062 	if (is_queued)
3063 		stats->queued_count[link_id]++;
3064 	if (desc->zero_wait_count_rx)
3065 		stats->zero_wait_count_rx_count[link_id]++;
3066 	if (desc->immediate_delivery)
3067 		stats->immediate_delivery_count[link_id]++;
3068 	if (is_error)
3069 		stats->error_count[link_id][desc->type]++;
3070 	if (desc->is_stale)
3071 		stats->stale_count[link_id][desc->type]++;
3072 
3073 	if (!mgmt_rx_reo_ingress_frame_debug_info_enabled
3074 						(ingress_frame_debug_info))
3075 		return QDF_STATUS_SUCCESS;
3076 
3077 	cur_frame_debug_info = &ingress_frame_debug_info->frame_list
3078 			[ingress_frame_debug_info->next_index];
3079 
3080 	cur_frame_debug_info->link_id = link_id;
3081 	cur_frame_debug_info->mgmt_pkt_ctr =
3082 				mgmt_rx_reo_get_pkt_counter(desc->rx_params);
3083 	cur_frame_debug_info->global_timestamp =
3084 				mgmt_rx_reo_get_global_ts(desc->rx_params);
3085 	cur_frame_debug_info->start_timestamp =
3086 				mgmt_rx_reo_get_start_ts(desc->rx_params);
3087 	cur_frame_debug_info->end_timestamp =
3088 				mgmt_rx_reo_get_end_ts(desc->rx_params);
3089 	cur_frame_debug_info->duration_us =
3090 				mgmt_rx_reo_get_duration_us(desc->rx_params);
3091 	cur_frame_debug_info->desc_type = desc->type;
3092 	cur_frame_debug_info->frame_type = desc->frame_type;
3093 	cur_frame_debug_info->frame_subtype = desc->frame_subtype;
3094 	cur_frame_debug_info->wait_count = desc->wait_count;
3095 	qdf_mem_copy(cur_frame_debug_info->shared_snapshots,
3096 		     desc->shared_snapshots,
3097 		     qdf_min(sizeof(cur_frame_debug_info->shared_snapshots),
3098 			     sizeof(desc->shared_snapshots)));
3099 	qdf_mem_copy(cur_frame_debug_info->host_snapshot, desc->host_snapshot,
3100 		     qdf_min(sizeof(cur_frame_debug_info->host_snapshot),
3101 			     sizeof(desc->host_snapshot)));
3102 	cur_frame_debug_info->is_queued = is_queued;
3103 	cur_frame_debug_info->is_stale = desc->is_stale;
3104 	cur_frame_debug_info->is_parallel_rx = desc->is_parallel_rx;
3105 	cur_frame_debug_info->zero_wait_count_rx = desc->zero_wait_count_rx;
3106 	cur_frame_debug_info->immediate_delivery = desc->immediate_delivery;
3107 	cur_frame_debug_info->is_error = is_error;
3108 	cur_frame_debug_info->ts_last_released_frame =
3109 				reo_ctx->reo_list.ts_last_released_frame;
3110 	cur_frame_debug_info->ingress_timestamp = desc->ingress_timestamp;
3111 	cur_frame_debug_info->ingress_duration =
3112 			qdf_get_log_timestamp() - desc->ingress_timestamp;
3113 	cur_frame_debug_info->list_size_rx = desc->list_size_rx;
3114 	cur_frame_debug_info->list_insertion_pos = desc->list_insertion_pos;
3115 	cur_frame_debug_info->cpu_id = qdf_get_smp_processor_id();
3116 	cur_frame_debug_info->reo_required = desc->reo_required;
3117 
3118 	ingress_frame_debug_info->next_index++;
3119 	ingress_frame_debug_info->next_index %=
3120 				ingress_frame_debug_info->frame_list_size;
3121 	if (ingress_frame_debug_info->next_index == 0)
3122 		ingress_frame_debug_info->wrap_aroud = true;
3123 
3124 	return QDF_STATUS_SUCCESS;
3125 }
3126 
3127 /**
3128  * mgmt_rx_reo_debug_print_ingress_frame_info() - Print the debug information
3129  * about the latest frames entered the reorder module
3130  * @reo_ctx: management rx reorder context
3131  * @num_frames: Number of frames for which the debug information is to be
3132  * printed. If @num_frames is 0, then debug information about all the frames
3133  * in the ring buffer will be  printed.
3134  *
3135  * Return: QDF_STATUS of operation
3136  */
3137 static QDF_STATUS
3138 mgmt_rx_reo_debug_print_ingress_frame_info(struct mgmt_rx_reo_context *reo_ctx,
3139 					   uint16_t num_frames)
3140 {
3141 	struct reo_ingress_debug_info *ingress_frame_debug_info;
3142 	int start_index;
3143 	uint16_t index;
3144 	uint16_t entry;
3145 	uint16_t num_valid_entries;
3146 	uint16_t num_entries_to_print;
3147 	char *boarder;
3148 
3149 	if (!reo_ctx)
3150 		return QDF_STATUS_E_NULL_VALUE;
3151 
3152 	ingress_frame_debug_info = &reo_ctx->ingress_frame_debug_info;
3153 
3154 	if (ingress_frame_debug_info->wrap_aroud)
3155 		num_valid_entries = ingress_frame_debug_info->frame_list_size;
3156 	else
3157 		num_valid_entries = ingress_frame_debug_info->next_index;
3158 
3159 	if (num_frames == 0) {
3160 		num_entries_to_print = num_valid_entries;
3161 
3162 		if (ingress_frame_debug_info->wrap_aroud)
3163 			start_index = ingress_frame_debug_info->next_index;
3164 		else
3165 			start_index = 0;
3166 	} else {
3167 		num_entries_to_print = qdf_min(num_frames, num_valid_entries);
3168 
3169 		start_index = (ingress_frame_debug_info->next_index -
3170 			       num_entries_to_print +
3171 			       ingress_frame_debug_info->frame_list_size)
3172 			      % ingress_frame_debug_info->frame_list_size;
3173 
3174 		qdf_assert_always(start_index >= 0 &&
3175 				  start_index < ingress_frame_debug_info->frame_list_size);
3176 	}
3177 
3178 	mgmt_rx_reo_alert_no_fl("Ingress Frame Info:-");
3179 	mgmt_rx_reo_alert_no_fl("num_frames = %u, wrap = %u, next_index = %u",
3180 				num_frames,
3181 				ingress_frame_debug_info->wrap_aroud,
3182 				ingress_frame_debug_info->next_index);
3183 	mgmt_rx_reo_alert_no_fl("start_index = %d num_entries_to_print = %u",
3184 				start_index, num_entries_to_print);
3185 
3186 	if (!num_entries_to_print)
3187 		return QDF_STATUS_SUCCESS;
3188 
3189 	boarder = ingress_frame_debug_info->boarder;
3190 
3191 	mgmt_rx_reo_alert_no_fl("%s", boarder);
3192 	mgmt_rx_reo_alert_no_fl("|%5s|%5s|%6s|%6s|%9s|%4s|%5s|%10s|%10s|%10s|%5s|%10s|%11s|%13s|%11s|%4s|%3s|%69s|%94s|%94s|%94s|%94s|%94s|%94s|",
3193 				"Index", "CPU", "D.type", "F.type", "F.subtype",
3194 				"Link", "SeqNo", "Global ts",
3195 				"Start ts", "End ts", "Dur", "Last ts",
3196 				"Ingress ts", "Flags", "Ingress Dur", "Size",
3197 				"Pos", "Wait Count", "Snapshot : link 0",
3198 				"Snapshot : link 1", "Snapshot : link 2",
3199 				"Snapshot : link 3", "Snapshot : link 4",
3200 				"Snapshot : link 5");
3201 	mgmt_rx_reo_alert_no_fl("%s", boarder);
3202 
3203 	index = start_index;
3204 	for (entry = 0; entry < num_entries_to_print; entry++) {
3205 		struct reo_ingress_debug_frame_info *info;
3206 		char flags[MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_FLAG_MAX_SIZE + 1] = {'\0'};
3207 		char wait_count[MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_WAIT_COUNT_MAX_SIZE + 1] = {'\0'};
3208 		char snapshots[MAX_MLO_LINKS][MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_PER_LINK_SNAPSHOTS_MAX_SIZE + 1] = {'\0'};
3209 		char flag_queued = ' ';
3210 		char flag_stale = ' ';
3211 		char flag_parallel_rx = ' ';
3212 		char flag_error = ' ';
3213 		char flag_zero_wait_count_rx = ' ';
3214 		char flag_immediate_delivery = ' ';
3215 		char flag_reo_required = ' ';
3216 		int64_t ts_last_released_frame = -1;
3217 		uint8_t link;
3218 
3219 		info = &reo_ctx->ingress_frame_debug_info.frame_list[index];
3220 
3221 		if (info->ts_last_released_frame.valid)
3222 			ts_last_released_frame =
3223 					info->ts_last_released_frame.global_ts;
3224 
3225 		if (info->is_queued)
3226 			flag_queued = 'Q';
3227 
3228 		if (info->is_stale)
3229 			flag_stale = 'S';
3230 
3231 		if (info->is_parallel_rx)
3232 			flag_parallel_rx = 'P';
3233 
3234 		if (info->is_error)
3235 			flag_error = 'E';
3236 
3237 		if (info->zero_wait_count_rx)
3238 			flag_zero_wait_count_rx = 'Z';
3239 
3240 		if (info->immediate_delivery)
3241 			flag_immediate_delivery = 'I';
3242 
3243 		if (!info->reo_required)
3244 			flag_reo_required = 'N';
3245 
3246 		snprintf(flags, sizeof(flags), "%c %c %c %c %c %c %c", flag_error,
3247 			 flag_stale, flag_parallel_rx, flag_queued,
3248 			 flag_zero_wait_count_rx, flag_immediate_delivery,
3249 			 flag_reo_required);
3250 		snprintf(wait_count, sizeof(wait_count),
3251 			 "%9llx(%8x, %8x, %8x, %8x, %8x, %8x)",
3252 			 info->wait_count.total_count,
3253 			 info->wait_count.per_link_count[0],
3254 			 info->wait_count.per_link_count[1],
3255 			 info->wait_count.per_link_count[2],
3256 			 info->wait_count.per_link_count[3],
3257 			 info->wait_count.per_link_count[4],
3258 			 info->wait_count.per_link_count[5]);
3259 
3260 		for (link = 0; link < MAX_MLO_LINKS; link++) {
3261 			char mac_hw[MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_SNAPSHOT_MAX_SIZE + 1] = {'\0'};
3262 			char fw_consumed[MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_SNAPSHOT_MAX_SIZE + 1] = {'\0'};
3263 			char fw_forwarded[MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_SNAPSHOT_MAX_SIZE + 1] = {'\0'};
3264 			char host[MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_SNAPSHOT_MAX_SIZE + 1] = {'\0'};
3265 			struct mgmt_rx_reo_snapshot_params *mac_hw_ss;
3266 			struct mgmt_rx_reo_snapshot_params *fw_consumed_ss;
3267 			struct mgmt_rx_reo_snapshot_params *fw_forwarded_ss;
3268 			struct mgmt_rx_reo_snapshot_params *host_ss;
3269 
3270 			mac_hw_ss = &info->shared_snapshots
3271 				[link][MGMT_RX_REO_SHARED_SNAPSHOT_MAC_HW];
3272 			fw_consumed_ss = &info->shared_snapshots
3273 				[link][MGMT_RX_REO_SHARED_SNAPSHOT_FW_CONSUMED];
3274 			fw_forwarded_ss = &info->shared_snapshots
3275 				[link][MGMT_RX_REO_SHARED_SNAPSHOT_FW_FORWARDED];
3276 			host_ss = &info->host_snapshot[link];
3277 
3278 			snprintf(mac_hw, sizeof(mac_hw), "(%1u, %5u, %10u)",
3279 				 mac_hw_ss->valid, mac_hw_ss->mgmt_pkt_ctr,
3280 				 mac_hw_ss->global_timestamp);
3281 			snprintf(fw_consumed, sizeof(fw_consumed),
3282 				 "(%1u, %5u, %10u)",
3283 				 fw_consumed_ss->valid,
3284 				 fw_consumed_ss->mgmt_pkt_ctr,
3285 				 fw_consumed_ss->global_timestamp);
3286 			snprintf(fw_forwarded, sizeof(fw_forwarded),
3287 				 "(%1u, %5u, %10u)",
3288 				 fw_forwarded_ss->valid,
3289 				 fw_forwarded_ss->mgmt_pkt_ctr,
3290 				 fw_forwarded_ss->global_timestamp);
3291 			snprintf(host, sizeof(host), "(%1u, %5u, %10u)",
3292 				 host_ss->valid,
3293 				 host_ss->mgmt_pkt_ctr,
3294 				 host_ss->global_timestamp);
3295 			snprintf(snapshots[link], sizeof(snapshots[link]),
3296 				 "%22s, %22s, %22s, %22s", mac_hw, fw_consumed,
3297 				 fw_forwarded, host);
3298 		}
3299 
3300 		mgmt_rx_reo_alert_no_fl("|%5u|%5d|%6u|%6x|%9x|%4u|%5u|%10u|%10u|%10u|%5u|%10lld|%11llu|%13s|%11llu|%4d|%3d|%69s|%70s|%70s|%70s|%70s|%70s|%70s|",
3301 					entry, info->cpu_id, info->desc_type,
3302 					info->frame_type, info->frame_subtype,
3303 					info->link_id,
3304 					info->mgmt_pkt_ctr,
3305 					info->global_timestamp,
3306 					info->start_timestamp,
3307 					info->end_timestamp,
3308 					info->duration_us,
3309 					ts_last_released_frame,
3310 					info->ingress_timestamp, flags,
3311 					info->ingress_duration,
3312 					info->list_size_rx,
3313 					info->list_insertion_pos, wait_count,
3314 					snapshots[0], snapshots[1],
3315 					snapshots[2], snapshots[3],
3316 					snapshots[4], snapshots[5]);
3317 		mgmt_rx_reo_alert_no_fl("%s", boarder);
3318 
3319 		index++;
3320 		index %= ingress_frame_debug_info->frame_list_size;
3321 	}
3322 
3323 	return QDF_STATUS_SUCCESS;
3324 }
3325 #else
3326 /**
3327  * mgmt_rx_reo_debug_print_ingress_frame_stats() - API to print the stats
3328  * related to frames going into the reorder module
3329  * @reo_ctx: Pointer to reorder context
3330  *
3331  * API to print the stats related to frames going into the management
3332  * Rx reorder module.
3333  *
3334  * Return: QDF_STATUS
3335  */
3336 static QDF_STATUS
3337 mgmt_rx_reo_debug_print_ingress_frame_stats(struct mgmt_rx_reo_context *reo_ctx)
3338 {
3339 	return QDF_STATUS_SUCCESS;
3340 }
3341 
3342 /**
3343  * mgmt_rx_reo_log_ingress_frame() - Log the information about a frame entering
3344  * the reorder algorithm.
3345  * @reo_ctx: management rx reorder context
3346  * @desc: Pointer to frame descriptor
3347  * @is_queued: Indicates whether this frame is queued to reorder list
3348  * @is_error: Indicates whether any error occurred during processing this frame
3349  *
3350  * Return: QDF_STATUS of operation
3351  */
3352 static QDF_STATUS
3353 mgmt_rx_reo_log_ingress_frame(struct mgmt_rx_reo_context *reo_ctx,
3354 			      struct mgmt_rx_reo_frame_descriptor *desc,
3355 			      bool is_queued, bool is_error)
3356 {
3357 	return QDF_STATUS_SUCCESS;
3358 }
3359 
3360 /**
3361  * mgmt_rx_reo_debug_print_ingress_frame_info() - Print debug information about
3362  * the latest frames entering the reorder module
3363  * @reo_ctx: management rx reorder context
3364  *
3365  * Return: QDF_STATUS of operation
3366  */
3367 static QDF_STATUS
3368 mgmt_rx_reo_debug_print_ingress_frame_info(struct mgmt_rx_reo_context *reo_ctx)
3369 {
3370 	return QDF_STATUS_SUCCESS;
3371 }
3372 #endif /* WLAN_MGMT_RX_REO_DEBUG_SUPPORT */
3373 
3374 QDF_STATUS
3375 wlan_mgmt_rx_reo_algo_entry(struct wlan_objmgr_pdev *pdev,
3376 			    struct mgmt_rx_reo_frame_descriptor *desc,
3377 			    bool *is_queued)
3378 {
3379 	struct mgmt_rx_reo_context *reo_ctx;
3380 	QDF_STATUS ret;
3381 
3382 	if (!is_queued)
3383 		return QDF_STATUS_E_NULL_VALUE;
3384 
3385 	*is_queued = false;
3386 
3387 	if (!desc || !desc->rx_params) {
3388 		mgmt_rx_reo_err("MGMT Rx REO descriptor or rx params are null");
3389 		return QDF_STATUS_E_NULL_VALUE;
3390 	}
3391 
3392 	reo_ctx = wlan_mgmt_rx_reo_get_ctx_from_pdev(pdev);
3393 	if (!reo_ctx) {
3394 		mgmt_rx_reo_err("REO context is NULL");
3395 		return QDF_STATUS_E_NULL_VALUE;
3396 	}
3397 
3398 	/**
3399 	 * Critical Section = Host snapshot update + Calculation of wait
3400 	 * counts + Update reorder list. Following section describes the
3401 	 * motivation for making this a critical section.
3402 	 * Lets take an example of 2 links (Link A & B) and each has received
3403 	 * a management frame A1 and B1 such that MLO global time stamp of A1 <
3404 	 * MLO global time stamp of B1. Host is concurrently executing
3405 	 * "wlan_mgmt_rx_reo_algo_entry" for A1 and B1 in 2 different CPUs.
3406 	 *
3407 	 * A lock less version of this API("wlan_mgmt_rx_reo_algo_entry_v1") is
3408 	 * as follows.
3409 	 *
3410 	 * wlan_mgmt_rx_reo_algo_entry()
3411 	 * {
3412 	 *     Host snapshot update
3413 	 *     Calculation of wait counts
3414 	 *     Update reorder list
3415 	 *     Release to upper layer
3416 	 * }
3417 	 *
3418 	 * We may run into race conditions under the following sequence of
3419 	 * operations.
3420 	 *
3421 	 * 1. Host snapshot update for link A in context of frame A1
3422 	 * 2. Host snapshot update for link B in context of frame B1
3423 	 * 3. Calculation of wait count for frame B1
3424 	 *        link A wait count =  0
3425 	 *        link B wait count =  0
3426 	 * 4. Update reorder list with frame B1
3427 	 * 5. Release B1 to upper layer
3428 	 * 6. Calculation of wait count for frame A1
3429 	 *        link A wait count =  0
3430 	 *        link B wait count =  0
3431 	 * 7. Update reorder list with frame A1
3432 	 * 8. Release A1 to upper layer
3433 	 *
3434 	 * This leads to incorrect behaviour as B1 goes to upper layer before
3435 	 * A1.
3436 	 *
3437 	 * To prevent this lets make Host snapshot update + Calculate wait count
3438 	 * a critical section by adding locks. The updated version of the API
3439 	 * ("wlan_mgmt_rx_reo_algo_entry_v2") is as follows.
3440 	 *
3441 	 * wlan_mgmt_rx_reo_algo_entry()
3442 	 * {
3443 	 *     LOCK
3444 	 *         Host snapshot update
3445 	 *         Calculation of wait counts
3446 	 *     UNLOCK
3447 	 *     Update reorder list
3448 	 *     Release to upper layer
3449 	 * }
3450 	 *
3451 	 * With this API also We may run into race conditions under the
3452 	 * following sequence of operations.
3453 	 *
3454 	 * 1. Host snapshot update for link A in context of frame A1 +
3455 	 *    Calculation of wait count for frame A1
3456 	 *        link A wait count =  0
3457 	 *        link B wait count =  0
3458 	 * 2. Host snapshot update for link B in context of frame B1 +
3459 	 *    Calculation of wait count for frame B1
3460 	 *        link A wait count =  0
3461 	 *        link B wait count =  0
3462 	 * 4. Update reorder list with frame B1
3463 	 * 5. Release B1 to upper layer
3464 	 * 7. Update reorder list with frame A1
3465 	 * 8. Release A1 to upper layer
3466 	 *
3467 	 * This also leads to incorrect behaviour as B1 goes to upper layer
3468 	 * before A1.
3469 	 *
3470 	 * To prevent this, let's make Host snapshot update + Calculate wait
3471 	 * count + Update reorder list a critical section by adding locks.
3472 	 * The updated version of the API ("wlan_mgmt_rx_reo_algo_entry_final")
3473 	 * is as follows.
3474 	 *
3475 	 * wlan_mgmt_rx_reo_algo_entry()
3476 	 * {
3477 	 *     LOCK
3478 	 *         Host snapshot update
3479 	 *         Calculation of wait counts
3480 	 *         Update reorder list
3481 	 *     UNLOCK
3482 	 *     Release to upper layer
3483 	 * }
3484 	 */
3485 	qdf_spin_lock(&reo_ctx->reo_algo_entry_lock);
3486 
3487 	qdf_assert_always(desc->rx_params->reo_params->valid);
3488 	qdf_assert_always(desc->frame_type == IEEE80211_FC0_TYPE_MGT);
3489 
3490 	if (desc->type == MGMT_RX_REO_FRAME_DESC_HOST_CONSUMED_FRAME ||
3491 	    desc->type == MGMT_RX_REO_FRAME_DESC_FW_CONSUMED_FRAME)
3492 		qdf_assert_always(desc->rx_params->reo_params->duration_us);
3493 
3494 	/* Update the Host snapshot */
3495 	ret = wlan_mgmt_rx_reo_update_host_snapshot(pdev, desc);
3496 	if (QDF_IS_STATUS_ERROR(ret))
3497 		goto failure;
3498 
3499 	/* Compute wait count for this frame/event */
3500 	ret = wlan_mgmt_rx_reo_algo_calculate_wait_count(pdev, desc);
3501 	if (QDF_IS_STATUS_ERROR(ret))
3502 		goto failure;
3503 
3504 	/* Update the REO list */
3505 	ret = mgmt_rx_reo_update_list(&reo_ctx->reo_list, desc, is_queued);
3506 	if (QDF_IS_STATUS_ERROR(ret))
3507 		goto failure;
3508 
3509 	ret = mgmt_rx_reo_log_ingress_frame(reo_ctx, desc,
3510 					    *is_queued, false);
3511 	if (QDF_IS_STATUS_ERROR(ret)) {
3512 		qdf_spin_unlock(&reo_ctx->reo_algo_entry_lock);
3513 		return ret;
3514 	}
3515 
3516 	qdf_spin_unlock(&reo_ctx->reo_algo_entry_lock);
3517 
3518 	/* Finally, release the entries for which pending frame is received */
3519 	return mgmt_rx_reo_list_release_entries(reo_ctx);
3520 
3521 failure:
3522 	/**
3523 	 * Ignore the return value of this function call, return
3524 	 * the actual reason for failure.
3525 	 */
3526 	mgmt_rx_reo_log_ingress_frame(reo_ctx, desc, *is_queued, true);
3527 
3528 	qdf_spin_unlock(&reo_ctx->reo_algo_entry_lock);
3529 
3530 	return ret;
3531 }
3532 
3533 #ifndef WLAN_MGMT_RX_REO_SIM_SUPPORT
3534 /**
3535  * mgmt_rx_reo_sim_init() - Initialize management rx reorder simulation
3536  * context.
3537  * @reo_context: Pointer to reo context
3538  * @ml_grp_id: MLO group id which it belongs to
3539  *
3540  * Return: QDF_STATUS of operation
3541  */
3542 static inline QDF_STATUS
3543 mgmt_rx_reo_sim_init(struct mgmt_rx_reo_context *reo_context)
3544 {
3545 	return QDF_STATUS_SUCCESS;
3546 }
3547 
3548 /**
3549  * mgmt_rx_reo_sim_deinit() - De initialize management rx reorder simulation
3550  * context.
3551  * @reo_context: Pointer to reo context
3552  *
3553  * Return: QDF_STATUS of operation
3554  */
3555 static inline QDF_STATUS
3556 mgmt_rx_reo_sim_deinit(struct mgmt_rx_reo_context *reo_context)
3557 {
3558 	return QDF_STATUS_SUCCESS;
3559 }
3560 
3561 QDF_STATUS
3562 mgmt_rx_reo_sim_pdev_object_create_notification(struct wlan_objmgr_pdev *pdev)
3563 {
3564 	return QDF_STATUS_SUCCESS;
3565 }
3566 
3567 QDF_STATUS
3568 mgmt_rx_reo_sim_pdev_object_destroy_notification(struct wlan_objmgr_pdev *pdev)
3569 {
3570 	return QDF_STATUS_SUCCESS;
3571 }
3572 #else
3573 /**
3574  * mgmt_rx_reo_sim_remove_frame_from_master_list() - Removes frame from the
3575  * master management frame list
3576  * @master_frame_list: pointer to master management frame list
3577  * @frame: pointer to management frame parameters
3578  *
3579  * This API removes frames from the master management frame list. This API is
3580  * used in case of FW consumed management frames or management frames which
3581  * are dropped at host due to any error.
3582  *
3583  * Return: QDF_STATUS of operation
3584  */
3585 static QDF_STATUS
3586 mgmt_rx_reo_sim_remove_frame_from_master_list(
3587 		struct mgmt_rx_reo_master_frame_list *master_frame_list,
3588 		const struct mgmt_rx_frame_params *frame)
3589 {
3590 	struct mgmt_rx_reo_pending_frame_list_entry *pending_entry;
3591 	struct mgmt_rx_reo_pending_frame_list_entry *matching_pend_entry = NULL;
3592 	struct mgmt_rx_reo_stale_frame_list_entry *stale_entry;
3593 	struct mgmt_rx_reo_stale_frame_list_entry *matching_stale_entry = NULL;
3594 	QDF_STATUS status;
3595 
3596 	if (!master_frame_list) {
3597 		mgmt_rx_reo_err("Mgmt master frame list is null");
3598 		return QDF_STATUS_E_NULL_VALUE;
3599 	}
3600 
3601 	if (!frame) {
3602 		mgmt_rx_reo_err("Pointer to mgmt frame params is null");
3603 		return QDF_STATUS_E_NULL_VALUE;
3604 	}
3605 
3606 	qdf_spin_lock(&master_frame_list->lock);
3607 
3608 	qdf_list_for_each(&master_frame_list->pending_list, pending_entry,
3609 			  node) {
3610 		if (pending_entry->params.link_id == frame->link_id &&
3611 		    pending_entry->params.mgmt_pkt_ctr == frame->mgmt_pkt_ctr &&
3612 		    pending_entry->params.global_timestamp ==
3613 		    frame->global_timestamp) {
3614 			matching_pend_entry = pending_entry;
3615 			break;
3616 		}
3617 	}
3618 
3619 	qdf_list_for_each(&master_frame_list->stale_list, stale_entry, node) {
3620 		if (stale_entry->params.link_id == frame->link_id &&
3621 		    stale_entry->params.mgmt_pkt_ctr == frame->mgmt_pkt_ctr &&
3622 		    stale_entry->params.global_timestamp ==
3623 		    frame->global_timestamp) {
3624 			matching_stale_entry = stale_entry;
3625 			break;
3626 		}
3627 	}
3628 
3629 	/* Found in pending and stale list. Duplicate entries, assert */
3630 	qdf_assert_always(!matching_pend_entry || !matching_stale_entry);
3631 
3632 	if (!matching_pend_entry && !matching_stale_entry) {
3633 		qdf_spin_unlock(&master_frame_list->lock);
3634 		mgmt_rx_reo_err("No matching frame in pend/stale list");
3635 		return QDF_STATUS_E_FAILURE;
3636 	}
3637 
3638 	if (matching_pend_entry) {
3639 		status = qdf_list_remove_node(&master_frame_list->pending_list,
3640 					      &matching_pend_entry->node);
3641 		if (QDF_IS_STATUS_ERROR(status)) {
3642 			qdf_spin_unlock(&master_frame_list->lock);
3643 			mgmt_rx_reo_err("Failed to remove the matching entry");
3644 			return status;
3645 		}
3646 
3647 		qdf_mem_free(matching_pend_entry);
3648 	}
3649 
3650 	if (matching_stale_entry) {
3651 		status = qdf_list_remove_node(&master_frame_list->stale_list,
3652 					      &matching_stale_entry->node);
3653 		if (QDF_IS_STATUS_ERROR(status)) {
3654 			qdf_spin_unlock(&master_frame_list->lock);
3655 			mgmt_rx_reo_err("Failed to remove the matching entry");
3656 			return status;
3657 		}
3658 
3659 		qdf_mem_free(matching_stale_entry);
3660 	}
3661 
3662 	qdf_spin_unlock(&master_frame_list->lock);
3663 
3664 	return QDF_STATUS_SUCCESS;
3665 }
3666 
3667 /**
3668  * mgmt_rx_reo_sim_remove_frame_from_pending_list() - Removes frame from the
3669  * pending management frame list
3670  * @master_frame_list: pointer to master management frame list
3671  * @frame: pointer to management frame parameters
3672  *
3673  * This API removes frames from the pending management frame list. This API is
3674  * used in case of FW consumed management frames or management frames which
3675  * are dropped at host due to any error.
3676  *
3677  * Return: QDF_STATUS of operation
3678  */
3679 static QDF_STATUS
3680 mgmt_rx_reo_sim_remove_frame_from_pending_list(
3681 		struct mgmt_rx_reo_master_frame_list *master_frame_list,
3682 		const struct mgmt_rx_frame_params *frame)
3683 {
3684 	struct mgmt_rx_reo_pending_frame_list_entry *cur_entry;
3685 	struct mgmt_rx_reo_pending_frame_list_entry *matching_entry = NULL;
3686 	QDF_STATUS status;
3687 
3688 	if (!master_frame_list) {
3689 		mgmt_rx_reo_err("Mgmt master frame list is null");
3690 		return QDF_STATUS_E_NULL_VALUE;
3691 	}
3692 
3693 	if (!frame) {
3694 		mgmt_rx_reo_err("Pointer to mgmt frame params is null");
3695 		return QDF_STATUS_E_NULL_VALUE;
3696 	}
3697 
3698 	qdf_spin_lock(&master_frame_list->lock);
3699 
3700 	qdf_list_for_each(&master_frame_list->pending_list, cur_entry, node) {
3701 		if (cur_entry->params.link_id == frame->link_id &&
3702 		    cur_entry->params.mgmt_pkt_ctr == frame->mgmt_pkt_ctr &&
3703 		    cur_entry->params.global_timestamp ==
3704 		    frame->global_timestamp) {
3705 			matching_entry = cur_entry;
3706 			break;
3707 		}
3708 	}
3709 
3710 	if (!matching_entry) {
3711 		qdf_spin_unlock(&master_frame_list->lock);
3712 		mgmt_rx_reo_err("No matching frame in the pend list to remove");
3713 		return QDF_STATUS_E_FAILURE;
3714 	}
3715 
3716 	status = qdf_list_remove_node(&master_frame_list->pending_list,
3717 				      &matching_entry->node);
3718 	if (QDF_IS_STATUS_ERROR(status)) {
3719 		qdf_spin_unlock(&master_frame_list->lock);
3720 		mgmt_rx_reo_err("Failed to remove the matching entry");
3721 		return status;
3722 	}
3723 
3724 	qdf_mem_free(matching_entry);
3725 
3726 	qdf_spin_unlock(&master_frame_list->lock);
3727 
3728 
3729 	return QDF_STATUS_SUCCESS;
3730 }
3731 
3732 /**
3733  * mgmt_rx_reo_sim_add_frame_to_pending_list() - Inserts frame to the
3734  * pending management frame list
3735  * @master_frame_list: pointer to master management frame list
3736  * @frame: pointer to management frame parameters
3737  *
3738  * This API inserts frames to the pending management frame list. This API is
3739  * used to insert frames generated by the MAC HW to the pending frame list.
3740  *
3741  * Return: QDF_STATUS of operation
3742  */
3743 static QDF_STATUS
3744 mgmt_rx_reo_sim_add_frame_to_pending_list(
3745 		struct mgmt_rx_reo_master_frame_list *master_frame_list,
3746 		const struct mgmt_rx_frame_params *frame)
3747 {
3748 	struct mgmt_rx_reo_pending_frame_list_entry *new_entry;
3749 	QDF_STATUS status;
3750 
3751 	if (!master_frame_list) {
3752 		mgmt_rx_reo_err("Mgmt master frame list is null");
3753 		return QDF_STATUS_E_NULL_VALUE;
3754 	}
3755 
3756 	if (!frame) {
3757 		mgmt_rx_reo_err("Pointer mgmt frame params is null");
3758 		return QDF_STATUS_E_NULL_VALUE;
3759 	}
3760 
3761 	new_entry = qdf_mem_malloc(sizeof(*new_entry));
3762 	if (!new_entry) {
3763 		mgmt_rx_reo_err("Failed to allocate new entry to frame list");
3764 		return QDF_STATUS_E_NOMEM;
3765 	}
3766 
3767 	new_entry->params = *frame;
3768 
3769 	qdf_spin_lock(&master_frame_list->lock);
3770 
3771 	status = qdf_list_insert_back(&master_frame_list->pending_list,
3772 				      &new_entry->node);
3773 
3774 	qdf_spin_unlock(&master_frame_list->lock);
3775 
3776 	if (QDF_IS_STATUS_ERROR(status)) {
3777 		mgmt_rx_reo_err("Failed to add frame to pending list");
3778 		qdf_mem_free(new_entry);
3779 		return status;
3780 	}
3781 
3782 	return QDF_STATUS_SUCCESS;
3783 }
3784 
3785 QDF_STATUS
3786 mgmt_rx_reo_sim_process_rx_frame(struct wlan_objmgr_pdev *pdev, qdf_nbuf_t buf,
3787 				 struct mgmt_rx_event_params *mgmt_rx_params)
3788 {
3789 	struct mgmt_rx_reo_context *reo_context;
3790 	struct mgmt_rx_reo_sim_context *sim_context;
3791 	QDF_STATUS status;
3792 	struct mgmt_rx_reo_params *reo_params;
3793 
3794 	if (!mgmt_rx_params) {
3795 		mgmt_rx_reo_err("Mgmt rx params null");
3796 		return QDF_STATUS_E_NULL_VALUE;
3797 	}
3798 
3799 	reo_params = mgmt_rx_params->reo_params;
3800 
3801 	reo_context = wlan_mgmt_rx_reo_get_ctx_from_pdev(pdev);
3802 	if (!reo_context) {
3803 		mgmt_rx_reo_err("Mgmt reo context is null");
3804 		return QDF_STATUS_E_NULL_VALUE;
3805 	}
3806 
3807 	sim_context = &reo_context->sim_context;
3808 
3809 	qdf_spin_lock(&sim_context->master_frame_list.lock);
3810 
3811 	if (qdf_list_empty(&sim_context->master_frame_list.pending_list)) {
3812 		qdf_spin_unlock(&sim_context->master_frame_list.lock);
3813 		mgmt_rx_reo_err("reo sim failure: pending frame list is empty");
3814 		qdf_assert_always(0);
3815 	} else {
3816 		struct mgmt_rx_frame_params *cur_entry_params;
3817 		struct mgmt_rx_reo_pending_frame_list_entry *cur_entry;
3818 		struct mgmt_rx_reo_pending_frame_list_entry *matching_entry = NULL;
3819 
3820 		/**
3821 		 * Make sure the frames delivered to upper layer are in the
3822 		 * increasing order of global time stamp. For that the frame
3823 		 * which is being delivered should be present at the head of the
3824 		 * pending frame list. There could be multiple frames with the
3825 		 * same global time stamp in the pending frame list. Search
3826 		 * among all the frames at the head of the list which has the
3827 		 * same global time stamp as the frame which is being delivered.
3828 		 * To find matching frame, check whether packet counter,
3829 		 * global time stamp and link id are same.
3830 		 */
3831 		qdf_list_for_each(&sim_context->master_frame_list.pending_list,
3832 				  cur_entry, node) {
3833 			cur_entry_params = &cur_entry->params;
3834 
3835 			if (cur_entry_params->global_timestamp !=
3836 			    reo_params->global_timestamp)
3837 				break;
3838 
3839 			if (cur_entry_params->link_id == reo_params->link_id &&
3840 			    cur_entry_params->mgmt_pkt_ctr ==
3841 			    reo_params->mgmt_pkt_ctr) {
3842 				matching_entry = cur_entry;
3843 				break;
3844 			}
3845 		}
3846 
3847 		if (!matching_entry) {
3848 			qdf_spin_unlock(&sim_context->master_frame_list.lock);
3849 			mgmt_rx_reo_err("reo sim failure: mismatch");
3850 			qdf_assert_always(0);
3851 		}
3852 
3853 		status = qdf_list_remove_node(
3854 				&sim_context->master_frame_list.pending_list,
3855 				&matching_entry->node);
3856 		qdf_mem_free(matching_entry);
3857 
3858 		if (QDF_IS_STATUS_ERROR(status)) {
3859 			qdf_spin_unlock(&sim_context->master_frame_list.lock);
3860 			mgmt_rx_reo_err("Failed to remove matching entry");
3861 			return status;
3862 		}
3863 	}
3864 
3865 	qdf_spin_unlock(&sim_context->master_frame_list.lock);
3866 
3867 	mgmt_rx_reo_debug("Successfully processed mgmt frame");
3868 	mgmt_rx_reo_debug("link_id = %u, ctr = %u, ts = %u",
3869 			  reo_params->link_id, reo_params->mgmt_pkt_ctr,
3870 			  reo_params->global_timestamp);
3871 
3872 	return QDF_STATUS_SUCCESS;
3873 }
3874 
3875 /**
3876  * mgmt_rx_reo_sim_get_random_bool() - Generate true/false randomly
3877  * @percentage_true: probability (in percentage) of true
3878  *
3879  * API to generate true with probability @percentage_true % and false with
3880  * probability (100 - @percentage_true) %.
3881  *
3882  * Return: true with probability @percentage_true % and false with probability
3883  * (100 - @percentage_true) %
3884  */
3885 static bool
3886 mgmt_rx_reo_sim_get_random_bool(uint8_t percentage_true)
3887 {
3888 	uint32_t rand;
3889 
3890 	if (percentage_true > 100) {
3891 		mgmt_rx_reo_err("Invalid probability value for true, %u",
3892 				percentage_true);
3893 		return -EINVAL;
3894 	}
3895 
3896 	get_random_bytes(&rand, sizeof(rand));
3897 
3898 	return ((rand % 100) < percentage_true);
3899 }
3900 
3901 /**
3902  * mgmt_rx_reo_sim_get_random_unsigned_int() - Generate random unsigned integer
3903  * value in the range [0, max)
3904  * @max: upper limit for the output
3905  *
3906  * API to generate random unsigned integer value in the range [0, max).
3907  *
3908  * Return: unsigned integer value in the range [0, max)
3909  */
3910 static uint32_t
3911 mgmt_rx_reo_sim_get_random_unsigned_int(uint32_t max)
3912 {
3913 	uint32_t rand;
3914 
3915 	get_random_bytes(&rand, sizeof(rand));
3916 
3917 	return (rand % max);
3918 }
3919 
3920 /**
3921  * mgmt_rx_reo_sim_sleep() - Wrapper API to sleep for given micro seconds
3922  * @sleeptime_us: Sleep time in micro seconds
3923  *
3924  * This API uses msleep() internally. So the granularity is limited to
3925  * milliseconds.
3926  *
3927  * Return: none
3928  */
3929 static void
3930 mgmt_rx_reo_sim_sleep(uint32_t sleeptime_us)
3931 {
3932 	msleep(sleeptime_us / USEC_PER_MSEC);
3933 }
3934 
3935 /**
3936  * mgmt_rx_reo_sim_frame_handler_host() - Management frame handler at the host
3937  * layer
3938  * @arg: Argument
3939  *
3940  * This API handles the management frame at the host layer. This is applicable
3941  * for simulation alone.
3942  *
3943  * Return: none
3944  */
3945 static void
3946 mgmt_rx_reo_sim_frame_handler_host(void *arg)
3947 {
3948 	struct mgmt_rx_frame_fw *frame_fw = (struct mgmt_rx_frame_fw *)arg;
3949 	uint32_t fw_to_host_delay_us;
3950 	bool is_error_frame = false;
3951 	int8_t link_id = -1;
3952 	struct mgmt_rx_event_params *rx_params;
3953 	QDF_STATUS status;
3954 	struct mgmt_rx_reo_sim_context *sim_context;
3955 	struct wlan_objmgr_pdev *pdev;
3956 	uint8_t ml_grp_id;
3957 
3958 	if (!frame_fw) {
3959 		mgmt_rx_reo_err("HOST-%d : Pointer to FW frame struct is null",
3960 				link_id);
3961 		goto error_print;
3962 	}
3963 
3964 	link_id = frame_fw->params.link_id;
3965 
3966 	sim_context = frame_fw->sim_context;
3967 	if (!sim_context) {
3968 		mgmt_rx_reo_err("HOST-%d : Mgmt rx reo simulation context null",
3969 				link_id);
3970 		goto error_free_fw_frame;
3971 	}
3972 
3973 	ml_grp_id = sim_context->mlo_grp_id;
3974 
3975 	fw_to_host_delay_us = MGMT_RX_REO_SIM_DELAY_FW_TO_HOST_MIN +
3976 			      mgmt_rx_reo_sim_get_random_unsigned_int(
3977 			      MGMT_RX_REO_SIM_DELAY_FW_TO_HOST_MIN_MAX_DELTA);
3978 
3979 	mgmt_rx_reo_sim_sleep(fw_to_host_delay_us);
3980 
3981 	if (!frame_fw->is_consumed_by_fw) {
3982 		is_error_frame = mgmt_rx_reo_sim_get_random_bool(
3983 				 MGMT_RX_REO_SIM_PERCENTAGE_ERROR_FRAMES);
3984 
3985 		/**
3986 		 * This frame should be present in pending/stale list of the
3987 		 * master frame list. Error frames need not be reordered
3988 		 * by reorder algorithm. It is just used for book
3989 		 * keeping purposes. Hence remove it from the master list.
3990 		 */
3991 		if (is_error_frame) {
3992 			status = mgmt_rx_reo_sim_remove_frame_from_master_list(
3993 					&sim_context->master_frame_list,
3994 					&frame_fw->params);
3995 
3996 			if (QDF_IS_STATUS_ERROR(status)) {
3997 				mgmt_rx_reo_err("HOST-%d : Failed to remove error frame",
3998 						link_id);
3999 				qdf_assert_always(0);
4000 			}
4001 		}
4002 	}
4003 
4004 	mgmt_rx_reo_debug("HOST-%d : Received frame with ts = %u, ctr = %u, consume = %u, error = %u",
4005 			  link_id, frame_fw->params.global_timestamp,
4006 			  frame_fw->params.mgmt_pkt_ctr,
4007 			  frame_fw->is_consumed_by_fw, is_error_frame);
4008 
4009 	rx_params = alloc_mgmt_rx_event_params();
4010 	if (!rx_params) {
4011 		mgmt_rx_reo_err("HOST-%d : Failed to allocate event params",
4012 				link_id);
4013 		goto error_free_fw_frame;
4014 	}
4015 
4016 	rx_params->reo_params->link_id = frame_fw->params.link_id;
4017 	rx_params->reo_params->global_timestamp =
4018 					frame_fw->params.global_timestamp;
4019 	rx_params->reo_params->mgmt_pkt_ctr = frame_fw->params.mgmt_pkt_ctr;
4020 	rx_params->reo_params->valid = true;
4021 
4022 	pdev = wlan_get_pdev_from_mlo_link_id(
4023 			link_id, ml_grp_id, WLAN_MGMT_RX_REO_SIM_ID);
4024 	if (!pdev) {
4025 		mgmt_rx_reo_err("No pdev corresponding to link_id %d", link_id);
4026 		goto error_free_mgmt_rx_event_params;
4027 	}
4028 
4029 	if (is_error_frame) {
4030 		status = tgt_mgmt_rx_reo_host_drop_handler(
4031 						pdev, rx_params->reo_params);
4032 		free_mgmt_rx_event_params(rx_params);
4033 	} else if (frame_fw->is_consumed_by_fw) {
4034 		status = tgt_mgmt_rx_reo_fw_consumed_event_handler(
4035 						pdev, rx_params->reo_params);
4036 		free_mgmt_rx_event_params(rx_params);
4037 	} else {
4038 		status = tgt_mgmt_rx_reo_frame_handler(pdev, NULL, rx_params);
4039 	}
4040 
4041 	wlan_objmgr_pdev_release_ref(pdev, WLAN_MGMT_RX_REO_SIM_ID);
4042 
4043 	if (QDF_IS_STATUS_ERROR(status)) {
4044 		mgmt_rx_reo_err("Failed to execute reo algorithm");
4045 		goto error_free_fw_frame;
4046 	}
4047 
4048 	qdf_mem_free(frame_fw);
4049 
4050 	return;
4051 
4052 error_free_mgmt_rx_event_params:
4053 	free_mgmt_rx_event_params(rx_params);
4054 error_free_fw_frame:
4055 	qdf_mem_free(frame_fw);
4056 error_print:
4057 	mgmt_rx_reo_err("HOST-%d : Exiting host frame handler due to error",
4058 			link_id);
4059 }
4060 
4061 /**
4062  * mgmt_rx_reo_sim_write_snapshot() - API to write snapshots used for management
4063  * frame reordering
4064  * @link_id: link id
4065  * @id: snapshot id
4066  * @value: snapshot value
4067  * @ml_grp_id: MLO group id which it belongs to
4068  *
4069  * This API writes the snapshots used for management frame reordering. MAC HW
4070  * and FW can use this API to update the MAC HW/FW consumed/FW forwarded
4071  * snapshots.
4072  *
4073  * Return: QDF_STATUS
4074  */
4075 static QDF_STATUS
4076 mgmt_rx_reo_sim_write_snapshot(uint8_t link_id, uint8_t ml_grp_id,
4077 			       enum mgmt_rx_reo_shared_snapshot_id id,
4078 			       struct mgmt_rx_reo_shared_snapshot value)
4079 {
4080 	struct wlan_objmgr_pdev *pdev;
4081 	struct mgmt_rx_reo_shared_snapshot *snapshot_address;
4082 	QDF_STATUS status;
4083 
4084 	pdev = wlan_get_pdev_from_mlo_link_id(
4085 			link_id, ml_grp_id,
4086 			WLAN_MGMT_RX_REO_SIM_ID);
4087 
4088 	if (!pdev) {
4089 		mgmt_rx_reo_err("pdev is null");
4090 		return QDF_STATUS_E_NULL_VALUE;
4091 	}
4092 
4093 	status = mgmt_rx_reo_sim_get_snapshot_address(pdev, id,
4094 						      &snapshot_address);
4095 
4096 	wlan_objmgr_pdev_release_ref(pdev, WLAN_MGMT_RX_REO_SIM_ID);
4097 
4098 	if (QDF_IS_STATUS_ERROR(status)) {
4099 		mgmt_rx_reo_err("Failed to get snapshot address %d of pdev %pK",
4100 				id, pdev);
4101 		return QDF_STATUS_E_FAILURE;
4102 	}
4103 
4104 	snapshot_address->mgmt_rx_reo_snapshot_low =
4105 						value.mgmt_rx_reo_snapshot_low;
4106 	snapshot_address->mgmt_rx_reo_snapshot_high =
4107 						value.mgmt_rx_reo_snapshot_high;
4108 
4109 	return QDF_STATUS_SUCCESS;
4110 }
4111 
4112 #define MGMT_RX_REO_SNAPSHOT_LOW_VALID_POS                       (0)
4113 #define MGMT_RX_REO_SNAPSHOT_LOW_VALID_SIZE                      (1)
4114 #define MGMT_RX_REO_SNAPSHOT_LOW_MGMT_PKT_CTR_POS                (1)
4115 #define MGMT_RX_REO_SNAPSHOT_LOW_MGMT_PKT_CTR_SIZE               (16)
4116 #define MGMT_RX_REO_SNAPSHOT_LOW_GLOBAL_TIMESTAMP_POS            (17)
4117 #define MGMT_RX_REO_SNAPSHOT_LOW_GLOBAL_TIMESTAMP_SIZE           (15)
4118 
4119 #define MGMT_RX_REO_SNAPSHOT_HIGH_GLOBAL_TIMESTAMP_POS           (0)
4120 #define MGMT_RX_REO_SNAPSHOT_HIGH_GLOBAL_TIMESTAMP_SIZE          (17)
4121 #define MGMT_RX_REO_SNAPSHOT_HIGH_MGMT_PKT_CTR_REDUNDANT_POS     (17)
4122 #define MGMT_RX_REO_SNAPSHOT_HIGH_MGMT_PKT_CTR_REDUNDANT_SIZE    (15)
4123 
4124 /**
4125  * mgmt_rx_reo_sim_get_snapshot_value() - API to get snapshot value for a given
4126  * management frame
4127  * @global_timestamp: global time stamp
4128  * @mgmt_pkt_ctr: management packet counter
4129  *
4130  * This API gets the snapshot value for a frame with time stamp
4131  * @global_timestamp and sequence number @mgmt_pkt_ctr.
4132  *
4133  * Return: snapshot value (struct mgmt_rx_reo_shared_snapshot)
4134  */
4135 static struct mgmt_rx_reo_shared_snapshot
4136 mgmt_rx_reo_sim_get_snapshot_value(uint32_t global_timestamp,
4137 				   uint16_t mgmt_pkt_ctr)
4138 {
4139 	struct mgmt_rx_reo_shared_snapshot snapshot = {0};
4140 
4141 	QDF_SET_BITS(snapshot.mgmt_rx_reo_snapshot_low,
4142 		     MGMT_RX_REO_SNAPSHOT_LOW_VALID_POS,
4143 		     MGMT_RX_REO_SNAPSHOT_LOW_VALID_SIZE, 1);
4144 	QDF_SET_BITS(snapshot.mgmt_rx_reo_snapshot_low,
4145 		     MGMT_RX_REO_SNAPSHOT_LOW_MGMT_PKT_CTR_POS,
4146 		     MGMT_RX_REO_SNAPSHOT_LOW_MGMT_PKT_CTR_SIZE, mgmt_pkt_ctr);
4147 	QDF_SET_BITS(snapshot.mgmt_rx_reo_snapshot_low,
4148 		     MGMT_RX_REO_SNAPSHOT_LOW_GLOBAL_TIMESTAMP_POS,
4149 		     MGMT_RX_REO_SNAPSHOT_LOW_GLOBAL_TIMESTAMP_SIZE,
4150 		     global_timestamp);
4151 
4152 	QDF_SET_BITS(snapshot.mgmt_rx_reo_snapshot_high,
4153 		     MGMT_RX_REO_SNAPSHOT_HIGH_GLOBAL_TIMESTAMP_POS,
4154 		     MGMT_RX_REO_SNAPSHOT_HIGH_GLOBAL_TIMESTAMP_SIZE,
4155 		     global_timestamp >> 15);
4156 	QDF_SET_BITS(snapshot.mgmt_rx_reo_snapshot_high,
4157 		     MGMT_RX_REO_SNAPSHOT_HIGH_MGMT_PKT_CTR_REDUNDANT_POS,
4158 		     MGMT_RX_REO_SNAPSHOT_HIGH_MGMT_PKT_CTR_REDUNDANT_SIZE,
4159 		     mgmt_pkt_ctr);
4160 
4161 	return snapshot;
4162 }
4163 
4164 /**
4165  * mgmt_rx_reo_sim_frame_handler_fw() - Management frame handler at the fw layer
4166  * @arg: Argument
4167  *
4168  * This API handles the management frame at the fw layer. This is applicable
4169  * for simulation alone.
4170  *
4171  * Return: none
4172  */
4173 static void
4174 mgmt_rx_reo_sim_frame_handler_fw(void *arg)
4175 {
4176 	struct mgmt_rx_frame_mac_hw *frame_hw =
4177 					(struct mgmt_rx_frame_mac_hw *)arg;
4178 	uint32_t mac_hw_to_fw_delay_us;
4179 	bool is_consumed_by_fw;
4180 	struct  mgmt_rx_frame_fw *frame_fw;
4181 	int8_t link_id = -1;
4182 	QDF_STATUS status;
4183 	struct mgmt_rx_reo_sim_context *sim_context;
4184 	enum mgmt_rx_reo_shared_snapshot_id snapshot_id;
4185 	struct mgmt_rx_reo_shared_snapshot snapshot_value;
4186 	bool ret;
4187 	uint8_t ml_grp_id;
4188 
4189 	if (!frame_hw) {
4190 		mgmt_rx_reo_err("FW-%d : Pointer to HW frame struct is null",
4191 				link_id);
4192 		qdf_assert_always(0);
4193 	}
4194 
4195 	link_id = frame_hw->params.link_id;
4196 
4197 	sim_context = frame_hw->sim_context;
4198 	if (!sim_context) {
4199 		mgmt_rx_reo_err("FW-%d : Mgmt rx reo simulation context null",
4200 				link_id);
4201 		goto error_free_mac_hw_frame;
4202 	}
4203 
4204 	ml_grp_id = sim_context->mlo_grp_id;
4205 
4206 	mac_hw_to_fw_delay_us = MGMT_RX_REO_SIM_DELAY_MAC_HW_TO_FW_MIN +
4207 			mgmt_rx_reo_sim_get_random_unsigned_int(
4208 			MGMT_RX_REO_SIM_DELAY_MAC_HW_TO_FW_MIN_MAX_DELTA);
4209 	mgmt_rx_reo_sim_sleep(mac_hw_to_fw_delay_us);
4210 
4211 	is_consumed_by_fw = mgmt_rx_reo_sim_get_random_bool(
4212 			    MGMT_RX_REO_SIM_PERCENTAGE_FW_CONSUMED_FRAMES);
4213 
4214 	if (is_consumed_by_fw) {
4215 		/**
4216 		 * This frame should be present in pending/stale list of the
4217 		 * master frame list. FW consumed frames need not be reordered
4218 		 * by reorder algorithm. It is just used for book
4219 		 * keeping purposes. Hence remove it from the master list.
4220 		 */
4221 		status = mgmt_rx_reo_sim_remove_frame_from_master_list(
4222 					&sim_context->master_frame_list,
4223 					&frame_hw->params);
4224 
4225 		if (QDF_IS_STATUS_ERROR(status)) {
4226 			mgmt_rx_reo_err("FW-%d : Failed to remove FW consumed frame",
4227 					link_id);
4228 			qdf_assert_always(0);
4229 		}
4230 	}
4231 
4232 	mgmt_rx_reo_debug("FW-%d : Processing frame with ts = %u, ctr = %u, consume = %u",
4233 			  link_id, frame_hw->params.global_timestamp,
4234 			  frame_hw->params.mgmt_pkt_ctr, is_consumed_by_fw);
4235 
4236 	frame_fw = qdf_mem_malloc(sizeof(*frame_fw));
4237 	if (!frame_fw) {
4238 		mgmt_rx_reo_err("FW-%d : Failed to allocate FW mgmt frame",
4239 				link_id);
4240 		goto error_free_mac_hw_frame;
4241 	}
4242 
4243 	frame_fw->params = frame_hw->params;
4244 	frame_fw->is_consumed_by_fw = is_consumed_by_fw;
4245 	frame_fw->sim_context = frame_hw->sim_context;
4246 
4247 	snapshot_id = is_consumed_by_fw ?
4248 		      MGMT_RX_REO_SHARED_SNAPSHOT_FW_CONSUMED :
4249 		      MGMT_RX_REO_SHARED_SNAPSHOT_FW_FORWARDED;
4250 
4251 	snapshot_value = mgmt_rx_reo_sim_get_snapshot_value(
4252 					frame_hw->params.global_timestamp,
4253 					frame_hw->params.mgmt_pkt_ctr);
4254 
4255 	status = mgmt_rx_reo_sim_write_snapshot(
4256 			link_id, ml_grp_id,
4257 			snapshot_id, snapshot_value);
4258 
4259 	if (QDF_IS_STATUS_ERROR(status)) {
4260 		mgmt_rx_reo_err("FW-%d : Failed to write snapshot %d",
4261 				link_id, snapshot_id);
4262 		goto error_free_fw_frame;
4263 	}
4264 
4265 	status = qdf_create_work(NULL, &frame_fw->frame_handler_host,
4266 				 mgmt_rx_reo_sim_frame_handler_host, frame_fw);
4267 	if (QDF_IS_STATUS_ERROR(status)) {
4268 		mgmt_rx_reo_err("FW-%d : Failed to create work", link_id);
4269 		goto error_free_fw_frame;
4270 	}
4271 
4272 	ret = qdf_queue_work(
4273 			NULL, sim_context->host_mgmt_frame_handler[link_id],
4274 			&frame_fw->frame_handler_host);
4275 	if (!ret) {
4276 		mgmt_rx_reo_err("FW-%d : Work is already present on the queue",
4277 				link_id);
4278 		goto error_free_fw_frame;
4279 	}
4280 
4281 	qdf_mem_free(frame_hw);
4282 
4283 	return;
4284 
4285 error_free_fw_frame:
4286 	qdf_mem_free(frame_fw);
4287 error_free_mac_hw_frame:
4288 	qdf_mem_free(frame_hw);
4289 
4290 	mgmt_rx_reo_err("FW-%d : Exiting fw frame handler due to error",
4291 			link_id);
4292 }
4293 
4294 /**
4295  * mgmt_rx_reo_sim_get_link_id() - Helper API to get the link id value
4296  * from the index to the valid link list
4297  * @valid_link_list_index: Index to list of valid links
4298  *
4299  * Return: link id
4300  */
4301 static int8_t
4302 mgmt_rx_reo_sim_get_link_id(uint8_t valid_link_list_index)
4303 {
4304 	struct mgmt_rx_reo_sim_context *sim_context;
4305 
4306 	if (valid_link_list_index >= MAX_MLO_LINKS) {
4307 		mgmt_rx_reo_err("Invalid index %u to valid link list",
4308 				valid_link_list_index);
4309 		return MGMT_RX_REO_INVALID_LINK_ID;
4310 	}
4311 
4312 	sim_context = mgmt_rx_reo_sim_get_context();
4313 	if (!sim_context) {
4314 		mgmt_rx_reo_err("Mgmt reo simulation context is null");
4315 		return MGMT_RX_REO_INVALID_LINK_ID;
4316 	}
4317 
4318 	return sim_context->link_id_to_pdev_map.valid_link_list
4319 						[valid_link_list_index];
4320 }
4321 
4322 /**
4323  * mgmt_rx_reo_sim_receive_from_air() - Simulate management frame reception from
4324  * the air
4325  * @mac_hw: pointer to structure representing MAC HW
4326  * @num_mlo_links: number of MLO HW links
4327  * @frame: pointer to management frame parameters
4328  *
4329  * This API simulates the management frame reception from air.
4330  *
4331  * Return: QDF_STATUS
4332  */
4333 static QDF_STATUS
4334 mgmt_rx_reo_sim_receive_from_air(struct mgmt_rx_reo_sim_mac_hw *mac_hw,
4335 				 uint8_t num_mlo_links,
4336 				 struct mgmt_rx_frame_params *frame)
4337 {
4338 	uint8_t valid_link_list_index;
4339 	int8_t link_id;
4340 
4341 	if (!mac_hw) {
4342 		mgmt_rx_reo_err("pointer to MAC HW struct is null");
4343 		return QDF_STATUS_E_NULL_VALUE;
4344 	}
4345 
4346 	if (num_mlo_links == 0 || num_mlo_links > MAX_MLO_LINKS) {
4347 		mgmt_rx_reo_err("Invalid number of MLO links %u",
4348 				num_mlo_links);
4349 		return QDF_STATUS_E_INVAL;
4350 	}
4351 
4352 	if (!frame) {
4353 		mgmt_rx_reo_err("pointer to frame parameters is null");
4354 		return QDF_STATUS_E_NULL_VALUE;
4355 	}
4356 
4357 	valid_link_list_index = mgmt_rx_reo_sim_get_random_unsigned_int(
4358 							num_mlo_links);
4359 	link_id = mgmt_rx_reo_sim_get_link_id(valid_link_list_index);
4360 	qdf_assert_always(link_id >= 0);
4361 	qdf_assert_always(link_id < MAX_MLO_LINKS);
4362 
4363 	frame->global_timestamp = div_u64(ktime_get_ns(), NSEC_PER_USEC);
4364 	frame->mgmt_pkt_ctr = ++mac_hw->mgmt_pkt_ctr[link_id];
4365 	frame->link_id = link_id;
4366 
4367 	return QDF_STATUS_SUCCESS;
4368 }
4369 
4370 /**
4371  * mgmt_rx_reo_sim_undo_receive_from_air() - API to restore the state of MAC
4372  * HW in case of any Rx error.
4373  * @mac_hw: pointer to structure representing MAC HW
4374  * @frame: pointer to management frame parameters
4375  *
4376  * Return: QDF_STATUS
4377  */
4378 static QDF_STATUS
4379 mgmt_rx_reo_sim_undo_receive_from_air(struct mgmt_rx_reo_sim_mac_hw *mac_hw,
4380 				      struct mgmt_rx_frame_params *frame)
4381 {
4382 	if (!mac_hw) {
4383 		mgmt_rx_reo_err("pointer to MAC HW struct is null");
4384 		return QDF_STATUS_E_NULL_VALUE;
4385 	}
4386 
4387 	if (!frame) {
4388 		mgmt_rx_reo_err("pointer to frame parameters is null");
4389 		return QDF_STATUS_E_NULL_VALUE;
4390 	}
4391 
4392 	if (frame->link_id >= MAX_MLO_LINKS) {
4393 		mgmt_rx_reo_err("Invalid link id %u", frame->link_id);
4394 		return QDF_STATUS_E_INVAL;
4395 	}
4396 
4397 	--mac_hw->mgmt_pkt_ctr[frame->link_id];
4398 
4399 	return QDF_STATUS_SUCCESS;
4400 }
4401 
4402 /**
4403  * mgmt_rx_reo_sim_mac_hw_thread() - kthread to simulate MAC HW
4404  * @data: pointer to data input
4405  *
4406  * kthread handler to simulate MAC HW.
4407  *
4408  * Return: 0 for success, else failure
4409  */
4410 static int
4411 mgmt_rx_reo_sim_mac_hw_thread(void *data)
4412 {
4413 	struct mgmt_rx_reo_sim_context *sim_context = data;
4414 	struct mgmt_rx_reo_sim_mac_hw *mac_hw;
4415 
4416 	if (!sim_context) {
4417 		mgmt_rx_reo_err("HW: Mgmt rx reo simulation context is null");
4418 		return -EINVAL;
4419 	}
4420 
4421 	mac_hw = &sim_context->mac_hw_sim.mac_hw_info;
4422 
4423 	while (!qdf_thread_should_stop()) {
4424 		uint32_t inter_frame_delay_us;
4425 		struct mgmt_rx_frame_params frame;
4426 		struct mgmt_rx_frame_mac_hw *frame_mac_hw;
4427 		int8_t link_id = -1;
4428 		QDF_STATUS status;
4429 		enum mgmt_rx_reo_shared_snapshot_id snapshot_id;
4430 		struct mgmt_rx_reo_shared_snapshot snapshot_value;
4431 		int8_t num_mlo_links;
4432 		bool ret;
4433 		uint8_t ml_grp_id;
4434 
4435 		num_mlo_links = mgmt_rx_reo_sim_get_num_mlo_links(sim_context);
4436 		if (num_mlo_links < 0 ||
4437 		    num_mlo_links > MAX_MLO_LINKS) {
4438 			mgmt_rx_reo_err("Invalid number of MLO links %d",
4439 					num_mlo_links);
4440 			qdf_assert_always(0);
4441 		}
4442 
4443 		status = mgmt_rx_reo_sim_receive_from_air(mac_hw, num_mlo_links,
4444 							  &frame);
4445 		if (QDF_IS_STATUS_ERROR(status)) {
4446 			mgmt_rx_reo_err("Receive from the air failed");
4447 			/**
4448 			 * Frame reception failed and we are not sure about the
4449 			 * link id. Without link id there is no way to restore
4450 			 * the mac hw state. Hence assert unconditionally.
4451 			 */
4452 			qdf_assert_always(0);
4453 		}
4454 		link_id = frame.link_id;
4455 
4456 		mgmt_rx_reo_debug("HW-%d: received frame with ts = %u, ctr = %u",
4457 				  link_id, frame.global_timestamp,
4458 				  frame.mgmt_pkt_ctr);
4459 
4460 		frame_mac_hw = qdf_mem_malloc(sizeof(*frame_mac_hw));
4461 		if (!frame_mac_hw) {
4462 			mgmt_rx_reo_err("HW-%d: Failed to alloc mac hw frame",
4463 					link_id);
4464 
4465 			/* Cleanup */
4466 			status = mgmt_rx_reo_sim_undo_receive_from_air(
4467 								mac_hw, &frame);
4468 			qdf_assert_always(QDF_IS_STATUS_SUCCESS(status));
4469 
4470 			continue;
4471 		}
4472 
4473 		frame_mac_hw->params = frame;
4474 		frame_mac_hw->sim_context = sim_context;
4475 		ml_grp_id = sim_context->ml_grp_id;
4476 
4477 		status = mgmt_rx_reo_sim_add_frame_to_pending_list(
4478 				&sim_context->master_frame_list, &frame);
4479 		if (QDF_IS_STATUS_ERROR(status)) {
4480 			mgmt_rx_reo_err("HW-%d: Failed to add frame to list",
4481 					link_id);
4482 
4483 			/* Cleanup */
4484 			status = mgmt_rx_reo_sim_undo_receive_from_air(
4485 								mac_hw, &frame);
4486 			qdf_assert_always(QDF_IS_STATUS_SUCCESS(status));
4487 
4488 			qdf_mem_free(frame_mac_hw);
4489 
4490 			continue;
4491 		}
4492 
4493 		snapshot_id = MGMT_RX_REO_SHARED_SNAPSHOT_MAC_HW;
4494 		snapshot_value = mgmt_rx_reo_sim_get_snapshot_value(
4495 						frame.global_timestamp,
4496 						frame.mgmt_pkt_ctr);
4497 
4498 		status = mgmt_rx_reo_sim_write_snapshot(
4499 				link_id, ml_grp_id
4500 				snapshot_id, snapshot_value);
4501 		if (QDF_IS_STATUS_ERROR(status)) {
4502 			mgmt_rx_reo_err("HW-%d : Failed to write snapshot %d",
4503 					link_id, snapshot_id);
4504 
4505 			/* Cleanup */
4506 			status = mgmt_rx_reo_sim_remove_frame_from_pending_list(
4507 				&sim_context->master_frame_list, &frame);
4508 			qdf_assert_always(QDF_IS_STATUS_SUCCESS(status));
4509 
4510 			status = mgmt_rx_reo_sim_undo_receive_from_air(
4511 								mac_hw, &frame);
4512 			qdf_assert_always(QDF_IS_STATUS_SUCCESS(status));
4513 
4514 			qdf_mem_free(frame_mac_hw);
4515 
4516 			continue;
4517 		}
4518 
4519 		status = qdf_create_work(NULL, &frame_mac_hw->frame_handler_fw,
4520 					 mgmt_rx_reo_sim_frame_handler_fw,
4521 					 frame_mac_hw);
4522 		if (QDF_IS_STATUS_ERROR(status)) {
4523 			mgmt_rx_reo_err("HW-%d : Failed to create work",
4524 					link_id);
4525 			qdf_assert_always(0);
4526 		}
4527 
4528 		ret = qdf_queue_work(
4529 			NULL, sim_context->fw_mgmt_frame_handler[link_id],
4530 			&frame_mac_hw->frame_handler_fw);
4531 		if (!ret) {
4532 			mgmt_rx_reo_err("HW-%d : Work is already present in Q",
4533 					link_id);
4534 			qdf_assert_always(0);
4535 		}
4536 
4537 		inter_frame_delay_us = MGMT_RX_REO_SIM_INTER_FRAME_DELAY_MIN +
4538 			mgmt_rx_reo_sim_get_random_unsigned_int(
4539 			MGMT_RX_REO_SIM_INTER_FRAME_DELAY_MIN_MAX_DELTA);
4540 
4541 		mgmt_rx_reo_sim_sleep(inter_frame_delay_us);
4542 	}
4543 
4544 	return 0;
4545 }
4546 
4547 /**
4548  * mgmt_rx_reo_sim_init_master_frame_list() - Initializes the master
4549  * management frame list
4550  * @pending_frame_list: Pointer to master frame list
4551  *
4552  * This API initializes the master management frame list
4553  *
4554  * Return: QDF_STATUS
4555  */
4556 static QDF_STATUS
4557 mgmt_rx_reo_sim_init_master_frame_list(
4558 		struct mgmt_rx_reo_master_frame_list *master_frame_list)
4559 {
4560 	qdf_spinlock_create(&master_frame_list->lock);
4561 
4562 	qdf_list_create(&master_frame_list->pending_list,
4563 			MGMT_RX_REO_SIM_PENDING_FRAME_LIST_MAX_SIZE);
4564 	qdf_list_create(&master_frame_list->stale_list,
4565 			MGMT_RX_REO_SIM_STALE_FRAME_LIST_MAX_SIZE);
4566 
4567 	return QDF_STATUS_SUCCESS;
4568 }
4569 
4570 /**
4571  * mgmt_rx_reo_sim_deinit_master_frame_list() - De initializes the master
4572  * management frame list
4573  * @master_frame_list: Pointer to master frame list
4574  *
4575  * This API de initializes the master management frame list
4576  *
4577  * Return: QDF_STATUS
4578  */
4579 static QDF_STATUS
4580 mgmt_rx_reo_sim_deinit_master_frame_list(
4581 		struct mgmt_rx_reo_master_frame_list *master_frame_list)
4582 {
4583 	qdf_spin_lock(&master_frame_list->lock);
4584 	qdf_list_destroy(&master_frame_list->stale_list);
4585 	qdf_list_destroy(&master_frame_list->pending_list);
4586 	qdf_spin_unlock(&master_frame_list->lock);
4587 
4588 	qdf_spinlock_destroy(&master_frame_list->lock);
4589 
4590 	return QDF_STATUS_SUCCESS;
4591 }
4592 
4593 /**
4594  * mgmt_rx_reo_sim_generate_unique_link_id() - Helper API to generate
4595  * unique link id values
4596  * @link_id_to_pdev_map: pointer to link id to pdev map
4597  * @link_id: Pointer to unique link id
4598  *
4599  * This API generates unique link id values for each pdev. This API should be
4600  * called after acquiring the spin lock protecting link id to pdev map.
4601  *
4602  * Return: QDF_STATUS
4603  */
4604 static QDF_STATUS
4605 mgmt_rx_reo_sim_generate_unique_link_id(
4606 		struct wlan_objmgr_pdev **link_id_to_pdev_map, uint8_t *link_id)
4607 {
4608 	uint8_t random_link_id;
4609 	uint8_t link;
4610 
4611 	if (!link_id_to_pdev_map || !link_id)
4612 		return QDF_STATUS_E_NULL_VALUE;
4613 
4614 	for (link = 0; link < MAX_MLO_LINKS; link++)
4615 		if (!link_id_to_pdev_map[link])
4616 			break;
4617 
4618 	if (link == MAX_MLO_LINKS) {
4619 		mgmt_rx_reo_err("All link ids are already allocated");
4620 		return QDF_STATUS_E_FAILURE;
4621 	}
4622 
4623 	while (1) {
4624 		random_link_id = mgmt_rx_reo_sim_get_random_unsigned_int(
4625 							MAX_MLO_LINKS);
4626 
4627 		if (!link_id_to_pdev_map[random_link_id])
4628 			break;
4629 	}
4630 
4631 	*link_id = random_link_id;
4632 
4633 	return QDF_STATUS_SUCCESS;
4634 }
4635 
4636 /**
4637  * mgmt_rx_reo_sim_insert_into_link_id_to_pdev_map() - Builds the MLO HW link id
4638  * to pdev map
4639  * @link_id_to_pdev_map: pointer to link id to pdev map
4640  * @pdev: pointer to pdev object
4641  *
4642  * This API incrementally builds the MLO HW link id to pdev map. This API is
4643  * used only for simulation.
4644  *
4645  * Return: QDF_STATUS
4646  */
4647 static QDF_STATUS
4648 mgmt_rx_reo_sim_insert_into_link_id_to_pdev_map(
4649 		struct mgmt_rx_reo_sim_link_id_to_pdev_map *link_id_to_pdev_map,
4650 		struct wlan_objmgr_pdev *pdev)
4651 {
4652 	uint8_t link_id;
4653 	QDF_STATUS status;
4654 
4655 	if (!link_id_to_pdev_map) {
4656 		mgmt_rx_reo_err("Link id to pdev map is null");
4657 		return QDF_STATUS_E_NULL_VALUE;
4658 	}
4659 
4660 	if (!pdev) {
4661 		mgmt_rx_reo_err("pdev is null");
4662 		return QDF_STATUS_E_NULL_VALUE;
4663 	}
4664 
4665 	qdf_spin_lock(&link_id_to_pdev_map->lock);
4666 
4667 	status = mgmt_rx_reo_sim_generate_unique_link_id(
4668 					link_id_to_pdev_map->map, &link_id);
4669 	if (QDF_IS_STATUS_ERROR(status)) {
4670 		qdf_spin_unlock(&link_id_to_pdev_map->lock);
4671 		return QDF_STATUS_E_FAILURE;
4672 	}
4673 	qdf_assert_always(link_id < MAX_MLO_LINKS);
4674 
4675 	link_id_to_pdev_map->map[link_id] = pdev;
4676 	link_id_to_pdev_map->valid_link_list
4677 			[link_id_to_pdev_map->num_mlo_links] = link_id;
4678 	link_id_to_pdev_map->num_mlo_links++;
4679 
4680 	qdf_spin_unlock(&link_id_to_pdev_map->lock);
4681 
4682 	return QDF_STATUS_SUCCESS;
4683 }
4684 
4685 /**
4686  * mgmt_rx_reo_sim_remove_from_link_id_to_pdev_map() - Destroys the MLO HW link
4687  * id to pdev map
4688  * @link_id_to_pdev_map: pointer to link id to pdev map
4689  * @pdev: pointer to pdev object
4690  *
4691  * This API incrementally destroys the MLO HW link id to pdev map. This API is
4692  * used only for simulation.
4693  *
4694  * Return: QDF_STATUS
4695  */
4696 static QDF_STATUS
4697 mgmt_rx_reo_sim_remove_from_link_id_to_pdev_map(
4698 		struct mgmt_rx_reo_sim_link_id_to_pdev_map *link_id_to_pdev_map,
4699 		struct wlan_objmgr_pdev *pdev)
4700 {
4701 	uint8_t link_id;
4702 
4703 	if (!link_id_to_pdev_map) {
4704 		mgmt_rx_reo_err("Link id to pdev map is null");
4705 		return QDF_STATUS_E_NULL_VALUE;
4706 	}
4707 
4708 	if (!pdev) {
4709 		mgmt_rx_reo_err("pdev is null");
4710 		return QDF_STATUS_E_NULL_VALUE;
4711 	}
4712 
4713 	qdf_spin_lock(&link_id_to_pdev_map->lock);
4714 
4715 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
4716 		if (link_id_to_pdev_map->map[link_id] == pdev) {
4717 			link_id_to_pdev_map->map[link_id] = NULL;
4718 			qdf_spin_unlock(&link_id_to_pdev_map->lock);
4719 
4720 			return QDF_STATUS_SUCCESS;
4721 		}
4722 	}
4723 
4724 	qdf_spin_unlock(&link_id_to_pdev_map->lock);
4725 
4726 	mgmt_rx_reo_err("Pdev %pK is not found in map", pdev);
4727 
4728 	return QDF_STATUS_E_FAILURE;
4729 }
4730 
4731 QDF_STATUS
4732 mgmt_rx_reo_sim_pdev_object_create_notification(struct wlan_objmgr_pdev *pdev)
4733 {
4734 	struct mgmt_rx_reo_sim_context *sim_context;
4735 	QDF_STATUS status;
4736 
4737 	sim_context = mgmt_rx_reo_sim_get_context();
4738 	if (!sim_context) {
4739 		mgmt_rx_reo_err("Mgmt simulation context is null");
4740 		return QDF_STATUS_E_NULL_VALUE;
4741 	}
4742 
4743 	status = mgmt_rx_reo_sim_insert_into_link_id_to_pdev_map(
4744 				&sim_context->link_id_to_pdev_map, pdev);
4745 
4746 	if (QDF_IS_STATUS_ERROR(status)) {
4747 		mgmt_rx_reo_err("Failed to add pdev to the map %pK", pdev);
4748 		return status;
4749 	}
4750 
4751 	return QDF_STATUS_SUCCESS;
4752 }
4753 
4754 QDF_STATUS
4755 mgmt_rx_reo_sim_pdev_object_destroy_notification(struct wlan_objmgr_pdev *pdev)
4756 {
4757 	struct mgmt_rx_reo_sim_context *sim_context;
4758 	QDF_STATUS status;
4759 
4760 	sim_context = mgmt_rx_reo_sim_get_context();
4761 	if (!sim_context) {
4762 		mgmt_rx_reo_err("Mgmt simulation context is null");
4763 		return QDF_STATUS_E_NULL_VALUE;
4764 	}
4765 
4766 	status = mgmt_rx_reo_sim_remove_from_link_id_to_pdev_map(
4767 				&sim_context->link_id_to_pdev_map, pdev);
4768 
4769 	if (QDF_IS_STATUS_ERROR(status)) {
4770 		mgmt_rx_reo_err("Failed to remove pdev from the map");
4771 		return status;
4772 	}
4773 
4774 	return QDF_STATUS_SUCCESS;
4775 }
4776 
4777 QDF_STATUS
4778 mgmt_rx_reo_sim_start(uint8_t ml_grp_id)
4779 {
4780 	struct mgmt_rx_reo_context *reo_context;
4781 	struct mgmt_rx_reo_sim_context *sim_context;
4782 	qdf_thread_t *mac_hw_thread;
4783 	uint8_t link_id;
4784 	uint8_t id;
4785 	QDF_STATUS status;
4786 
4787 	reo_context = mgmt_rx_reo_get_context(ml_grp_id);
4788 	if (!reo_context) {
4789 		mgmt_rx_reo_err("reo context is null");
4790 		return QDF_STATUS_E_NULL_VALUE;
4791 	}
4792 
4793 	reo_context->simulation_in_progress = true;
4794 
4795 	sim_context = &reo_context->sim_context;
4796 
4797 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
4798 		struct workqueue_struct *wq;
4799 
4800 		wq = alloc_ordered_workqueue("mgmt_rx_reo_sim_host-%u", 0,
4801 					     link_id);
4802 		if (!wq) {
4803 			mgmt_rx_reo_err("Host workqueue creation failed");
4804 			status = QDF_STATUS_E_FAILURE;
4805 			goto error_destroy_fw_and_host_work_queues_till_last_link;
4806 		}
4807 		sim_context->host_mgmt_frame_handler[link_id] = wq;
4808 
4809 		wq = alloc_ordered_workqueue("mgmt_rx_reo_sim_fw-%u", 0,
4810 					     link_id);
4811 		if (!wq) {
4812 			mgmt_rx_reo_err("FW workqueue creation failed");
4813 			status = QDF_STATUS_E_FAILURE;
4814 			goto error_destroy_host_work_queue_of_last_link;
4815 		}
4816 		sim_context->fw_mgmt_frame_handler[link_id] = wq;
4817 	}
4818 
4819 	mac_hw_thread = qdf_create_thread(mgmt_rx_reo_sim_mac_hw_thread,
4820 					  sim_context, "MAC_HW_thread");
4821 	if (!mac_hw_thread) {
4822 		mgmt_rx_reo_err("MAC HW thread creation failed");
4823 		status = QDF_STATUS_E_FAILURE;
4824 		goto error_destroy_fw_and_host_work_queues_of_last_link;
4825 	}
4826 
4827 	sim_context->mac_hw_sim.mac_hw_thread = mac_hw_thread;
4828 
4829 	qdf_wake_up_process(sim_context->mac_hw_sim.mac_hw_thread);
4830 
4831 	return QDF_STATUS_SUCCESS;
4832 
4833 error_destroy_fw_and_host_work_queues_of_last_link:
4834 	drain_workqueue(sim_context->fw_mgmt_frame_handler[link_id]);
4835 	destroy_workqueue(sim_context->fw_mgmt_frame_handler[link_id]);
4836 
4837 error_destroy_host_work_queue_of_last_link:
4838 	drain_workqueue(sim_context->host_mgmt_frame_handler[link_id]);
4839 	destroy_workqueue(sim_context->host_mgmt_frame_handler[link_id]);
4840 
4841 error_destroy_fw_and_host_work_queues_till_last_link:
4842 	for (id = 0; id < link_id; id++) {
4843 		drain_workqueue(sim_context->fw_mgmt_frame_handler[id]);
4844 		destroy_workqueue(sim_context->fw_mgmt_frame_handler[id]);
4845 
4846 		drain_workqueue(sim_context->host_mgmt_frame_handler[id]);
4847 		destroy_workqueue(sim_context->host_mgmt_frame_handler[id]);
4848 	}
4849 
4850 	return status;
4851 }
4852 
4853 QDF_STATUS
4854 mgmt_rx_reo_sim_stop(uint8_t ml_grp_id)
4855 {
4856 	struct mgmt_rx_reo_context *reo_context;
4857 	struct mgmt_rx_reo_sim_context *sim_context;
4858 	struct mgmt_rx_reo_master_frame_list *master_frame_list;
4859 	uint8_t link_id;
4860 	QDF_STATUS status;
4861 
4862 	reo_context = mgmt_rx_reo_get_context(ml_grp_id);
4863 	if (!reo_context) {
4864 		mgmt_rx_reo_err("reo context is null");
4865 		return QDF_STATUS_E_NULL_VALUE;
4866 	}
4867 
4868 	sim_context = &reo_context->sim_context;
4869 
4870 	status = qdf_thread_join(sim_context->mac_hw_sim.mac_hw_thread);
4871 	if (QDF_IS_STATUS_ERROR(status)) {
4872 		mgmt_rx_reo_err("Failed to stop the thread");
4873 		return status;
4874 	}
4875 
4876 	sim_context->mac_hw_sim.mac_hw_thread = NULL;
4877 
4878 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
4879 		/* Wait for all the pending frames to be processed by FW */
4880 		drain_workqueue(sim_context->fw_mgmt_frame_handler[link_id]);
4881 		destroy_workqueue(sim_context->fw_mgmt_frame_handler[link_id]);
4882 
4883 		/* Wait for all the pending frames to be processed by host */
4884 		drain_workqueue(sim_context->host_mgmt_frame_handler[link_id]);
4885 		destroy_workqueue(
4886 				sim_context->host_mgmt_frame_handler[link_id]);
4887 	}
4888 
4889 	status = mgmt_rx_reo_print_ingress_frame_info
4890 			(MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_PRINT_MAX_FRAMES);
4891 	if (QDF_IS_STATUS_ERROR(status)) {
4892 		mgmt_rx_reo_err("Failed to print ingress frame debug info");
4893 		return status;
4894 	}
4895 
4896 	status = mgmt_rx_reo_print_egress_frame_info
4897 			(MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_PRINT_MAX_FRAMES);
4898 	if (QDF_IS_STATUS_ERROR(status)) {
4899 		mgmt_rx_reo_err("Failed to print egress frame debug info");
4900 		return status;
4901 	}
4902 
4903 	master_frame_list = &sim_context->master_frame_list;
4904 	if (!qdf_list_empty(&master_frame_list->pending_list) ||
4905 	    !qdf_list_empty(&master_frame_list->stale_list)) {
4906 		mgmt_rx_reo_err("reo sim failure: pending/stale frame list non empty");
4907 
4908 		status = mgmt_rx_reo_list_display(&reo_context->reo_list);
4909 		if (QDF_IS_STATUS_ERROR(status)) {
4910 			mgmt_rx_reo_err("Failed to print reorder list");
4911 			return status;
4912 		}
4913 
4914 		qdf_assert_always(0);
4915 	} else {
4916 		mgmt_rx_reo_err("reo sim passed");
4917 	}
4918 
4919 	reo_context->simulation_in_progress = false;
4920 
4921 	return QDF_STATUS_SUCCESS;
4922 }
4923 
4924 /**
4925  * mgmt_rx_reo_sim_init() - Initialize management rx reorder simulation
4926  * context.
4927  * @reo_context: Pointer to reo context
4928  * @ml_grp_id: MLO Group ID which it belongs to
4929  *
4930  * Return: QDF_STATUS of operation
4931  */
4932 static QDF_STATUS
4933 mgmt_rx_reo_sim_init(struct mgmt_rx_reo_context *reo_context)
4934 {
4935 	QDF_STATUS status;
4936 	struct mgmt_rx_reo_sim_context *sim_context;
4937 	uint8_t link_id;
4938 
4939 	if (!reo_context) {
4940 		mgmt_rx_reo_err("reo context is null");
4941 		return QDF_STATUS_E_NULL_VALUE;
4942 	}
4943 
4944 	sim_context = &reo_context->sim_context;
4945 
4946 	qdf_mem_zero(sim_context, sizeof(*sim_context));
4947 	sim_context->mlo_grp_id = reo_context->mlo_grp_id;
4948 
4949 	status = mgmt_rx_reo_sim_init_master_frame_list(
4950 					&sim_context->master_frame_list);
4951 	if (QDF_IS_STATUS_ERROR(status)) {
4952 		mgmt_rx_reo_err("Failed to create master mgmt frame list");
4953 		return status;
4954 	}
4955 
4956 	qdf_spinlock_create(&sim_context->link_id_to_pdev_map.lock);
4957 
4958 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++)
4959 		sim_context->link_id_to_pdev_map.valid_link_list[link_id] =
4960 					MGMT_RX_REO_INVALID_LINK_ID;
4961 
4962 	return QDF_STATUS_SUCCESS;
4963 }
4964 
4965 /**
4966  * mgmt_rx_reo_sim_deinit() - De initialize management rx reorder simulation
4967  * context.
4968  * @reo_context: Pointer to reo context
4969  *
4970  * Return: QDF_STATUS of operation
4971  */
4972 static QDF_STATUS
4973 mgmt_rx_reo_sim_deinit(struct mgmt_rx_reo_context *reo_context)
4974 {
4975 	QDF_STATUS status;
4976 	struct mgmt_rx_reo_sim_context *sim_context;
4977 
4978 	if (!reo_context) {
4979 		mgmt_rx_reo_err("reo context is null");
4980 		return QDF_STATUS_E_NULL_VALUE;
4981 	}
4982 
4983 	sim_context = &reo_context->sim_context;
4984 
4985 	qdf_spinlock_destroy(&sim_context->link_id_to_pdev_map.lock);
4986 
4987 	status = mgmt_rx_reo_sim_deinit_master_frame_list(
4988 					&sim_context->master_frame_list);
4989 	if (QDF_IS_STATUS_ERROR(status)) {
4990 		mgmt_rx_reo_err("Failed to destroy master frame list");
4991 		return status;
4992 	}
4993 
4994 	return QDF_STATUS_SUCCESS;
4995 }
4996 
4997 QDF_STATUS
4998 mgmt_rx_reo_sim_get_snapshot_address(
4999 			struct wlan_objmgr_pdev *pdev,
5000 			enum mgmt_rx_reo_shared_snapshot_id id,
5001 			struct mgmt_rx_reo_shared_snapshot **address)
5002 {
5003 	int8_t link_id;
5004 	struct mgmt_rx_reo_sim_context *sim_context;
5005 
5006 	sim_context = mgmt_rx_reo_sim_get_context();
5007 	if (!sim_context) {
5008 		mgmt_rx_reo_err("Mgmt reo simulation context is null");
5009 		return QDF_STATUS_E_NULL_VALUE;
5010 	}
5011 
5012 	if (!pdev) {
5013 		mgmt_rx_reo_err("pdev is NULL");
5014 		return QDF_STATUS_E_NULL_VALUE;
5015 	}
5016 
5017 	if (id < 0 || id >= MGMT_RX_REO_SHARED_SNAPSHOT_MAX) {
5018 		mgmt_rx_reo_err("Invalid snapshot ID %d", id);
5019 		return QDF_STATUS_E_INVAL;
5020 	}
5021 
5022 	if (!address) {
5023 		mgmt_rx_reo_err("Pointer to snapshot address is null");
5024 		return QDF_STATUS_E_NULL_VALUE;
5025 	}
5026 
5027 	link_id = wlan_get_mlo_link_id_from_pdev(pdev);
5028 	if (link_id < 0 || link_id >= MAX_MLO_LINKS) {
5029 		mgmt_rx_reo_err("Invalid link id %d for the pdev %pK", link_id,
5030 				pdev);
5031 		return QDF_STATUS_E_INVAL;
5032 	}
5033 
5034 	*address = &sim_context->snapshot[link_id][id];
5035 
5036 	return QDF_STATUS_SUCCESS;
5037 }
5038 #endif /* WLAN_MGMT_RX_REO_SIM_SUPPORT */
5039 
5040 #ifdef WLAN_MGMT_RX_REO_DEBUG_SUPPORT
5041 /**
5042  * mgmt_rx_reo_ingress_debug_info_init() - Initialize the management rx-reorder
5043  * ingress frame debug info
5044  * @psoc: Pointer to psoc
5045  * @ingress_debug_info_init_count: Initialization count
5046  * @ingress_frame_debug_info: Ingress frame debug info object
5047  *
5048  * API to initialize the management rx-reorder ingress frame debug info.
5049  *
5050  * Return: QDF_STATUS
5051  */
5052 static QDF_STATUS
5053 mgmt_rx_reo_ingress_debug_info_init
5054 		(struct wlan_objmgr_psoc *psoc,
5055 		 qdf_atomic_t *ingress_debug_info_init_count,
5056 		 struct reo_ingress_debug_info *ingress_frame_debug_info)
5057 {
5058 	if (!psoc) {
5059 		mgmt_rx_reo_err("psoc is null");
5060 		return QDF_STATUS_E_NULL_VALUE;
5061 	}
5062 
5063 	if (!ingress_frame_debug_info) {
5064 		mgmt_rx_reo_err("Ingress frame debug info is null");
5065 		return QDF_STATUS_E_NULL_VALUE;
5066 	}
5067 
5068 	/* We need to initialize only for the first invocation */
5069 	if (qdf_atomic_read(ingress_debug_info_init_count))
5070 		goto success;
5071 
5072 	ingress_frame_debug_info->frame_list_size =
5073 		wlan_mgmt_rx_reo_get_ingress_frame_debug_list_size(psoc);
5074 
5075 	if (ingress_frame_debug_info->frame_list_size) {
5076 		ingress_frame_debug_info->frame_list = qdf_mem_malloc
5077 			(ingress_frame_debug_info->frame_list_size *
5078 			 sizeof(*ingress_frame_debug_info->frame_list));
5079 
5080 		if (!ingress_frame_debug_info->frame_list) {
5081 			mgmt_rx_reo_err("Failed to allocate debug info");
5082 			return QDF_STATUS_E_NOMEM;
5083 		}
5084 	}
5085 
5086 	/* Initialize the string for storing the debug info table boarder */
5087 	qdf_mem_set(ingress_frame_debug_info->boarder,
5088 		    MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_BOARDER_MAX_SIZE, '-');
5089 
5090 success:
5091 	qdf_atomic_inc(ingress_debug_info_init_count);
5092 	return QDF_STATUS_SUCCESS;
5093 }
5094 
5095 /**
5096  * mgmt_rx_reo_egress_debug_info_init() - Initialize the management rx-reorder
5097  * egress frame debug info
5098  * @psoc: Pointer to psoc
5099  * @egress_debug_info_init_count: Initialization count
5100  * @egress_frame_debug_info: Egress frame debug info object
5101  *
5102  * API to initialize the management rx-reorder egress frame debug info.
5103  *
5104  * Return: QDF_STATUS
5105  */
5106 static QDF_STATUS
5107 mgmt_rx_reo_egress_debug_info_init
5108 		(struct wlan_objmgr_psoc *psoc,
5109 		 qdf_atomic_t *egress_debug_info_init_count,
5110 		 struct reo_egress_debug_info *egress_frame_debug_info)
5111 {
5112 	if (!psoc) {
5113 		mgmt_rx_reo_err("psoc is null");
5114 		return QDF_STATUS_E_NULL_VALUE;
5115 	}
5116 
5117 	if (!egress_frame_debug_info) {
5118 		mgmt_rx_reo_err("Egress frame debug info is null");
5119 		return QDF_STATUS_E_NULL_VALUE;
5120 	}
5121 
5122 	/* We need to initialize only for the first invocation */
5123 	if (qdf_atomic_read(egress_debug_info_init_count))
5124 		goto success;
5125 
5126 	egress_frame_debug_info->frame_list_size =
5127 		wlan_mgmt_rx_reo_get_egress_frame_debug_list_size(psoc);
5128 
5129 	if (egress_frame_debug_info->frame_list_size) {
5130 		egress_frame_debug_info->frame_list = qdf_mem_malloc
5131 				(egress_frame_debug_info->frame_list_size *
5132 				 sizeof(*egress_frame_debug_info->frame_list));
5133 
5134 		if (!egress_frame_debug_info->frame_list) {
5135 			mgmt_rx_reo_err("Failed to allocate debug info");
5136 			return QDF_STATUS_E_NOMEM;
5137 		}
5138 	}
5139 
5140 	/* Initialize the string for storing the debug info table boarder */
5141 	qdf_mem_set(egress_frame_debug_info->boarder,
5142 		    MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_BOARDER_MAX_SIZE, '-');
5143 
5144 success:
5145 	qdf_atomic_inc(egress_debug_info_init_count);
5146 	return QDF_STATUS_SUCCESS;
5147 }
5148 
5149 /**
5150  * mgmt_rx_reo_debug_info_init() - Initialize the management rx-reorder debug
5151  * info
5152  * @pdev: pointer to pdev object
5153  *
5154  * API to initialize the management rx-reorder debug info.
5155  *
5156  * Return: QDF_STATUS
5157  */
5158 static QDF_STATUS
5159 mgmt_rx_reo_debug_info_init(struct wlan_objmgr_pdev *pdev)
5160 {
5161 	struct mgmt_rx_reo_context *reo_context;
5162 	QDF_STATUS status;
5163 	struct wlan_objmgr_psoc *psoc;
5164 
5165 	psoc = wlan_pdev_get_psoc(pdev);
5166 
5167 	if (!wlan_mgmt_rx_reo_is_feature_enabled_at_psoc(psoc))
5168 		return QDF_STATUS_SUCCESS;
5169 
5170 	reo_context = wlan_mgmt_rx_reo_get_ctx_from_pdev(pdev);
5171 	if (!reo_context) {
5172 		mgmt_rx_reo_err("reo context is null");
5173 		return QDF_STATUS_E_NULL_VALUE;
5174 	}
5175 
5176 	status = mgmt_rx_reo_ingress_debug_info_init
5177 			(psoc, &reo_context->ingress_debug_info_init_count,
5178 			 &reo_context->ingress_frame_debug_info);
5179 	if (QDF_IS_STATUS_ERROR(status)) {
5180 		mgmt_rx_reo_err("Failed to initialize ingress debug info");
5181 		return QDF_STATUS_E_FAILURE;
5182 	}
5183 
5184 	status = mgmt_rx_reo_egress_debug_info_init
5185 			(psoc, &reo_context->egress_debug_info_init_count,
5186 			 &reo_context->egress_frame_debug_info);
5187 	if (QDF_IS_STATUS_ERROR(status)) {
5188 		mgmt_rx_reo_err("Failed to initialize egress debug info");
5189 		return QDF_STATUS_E_FAILURE;
5190 	}
5191 
5192 	return QDF_STATUS_SUCCESS;
5193 }
5194 
5195 /**
5196  * mgmt_rx_reo_ingress_debug_info_deinit() - De initialize the management
5197  * rx-reorder ingress frame debug info
5198  * @psoc: Pointer to psoc
5199  * @ingress_debug_info_init_count: Initialization count
5200  * @ingress_frame_debug_info: Ingress frame debug info object
5201  *
5202  * API to de initialize the management rx-reorder ingress frame debug info.
5203  *
5204  * Return: QDF_STATUS
5205  */
5206 static QDF_STATUS
5207 mgmt_rx_reo_ingress_debug_info_deinit
5208 		(struct wlan_objmgr_psoc *psoc,
5209 		 qdf_atomic_t *ingress_debug_info_init_count,
5210 		 struct reo_ingress_debug_info *ingress_frame_debug_info)
5211 {
5212 	if (!psoc) {
5213 		mgmt_rx_reo_err("psoc is null");
5214 		return QDF_STATUS_E_NULL_VALUE;
5215 	}
5216 
5217 	if (!ingress_frame_debug_info) {
5218 		mgmt_rx_reo_err("Ingress frame debug info is null");
5219 		return QDF_STATUS_E_NULL_VALUE;
5220 	}
5221 
5222 	if (!qdf_atomic_read(ingress_debug_info_init_count)) {
5223 		mgmt_rx_reo_err("Ingress debug info ref cnt is 0");
5224 		return QDF_STATUS_E_FAILURE;
5225 	}
5226 
5227 	/* We need to de-initialize only for the last invocation */
5228 	if (qdf_atomic_dec_and_test(ingress_debug_info_init_count))
5229 		goto success;
5230 
5231 	if (ingress_frame_debug_info->frame_list) {
5232 		qdf_mem_free(ingress_frame_debug_info->frame_list);
5233 		ingress_frame_debug_info->frame_list = NULL;
5234 	}
5235 	ingress_frame_debug_info->frame_list_size = 0;
5236 
5237 	qdf_mem_zero(ingress_frame_debug_info->boarder,
5238 		     MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_BOARDER_MAX_SIZE + 1);
5239 
5240 success:
5241 	return QDF_STATUS_SUCCESS;
5242 }
5243 
5244 /**
5245  * mgmt_rx_reo_egress_debug_info_deinit() - De initialize the management
5246  * rx-reorder egress frame debug info
5247  * @psoc: Pointer to psoc
5248  * @egress_debug_info_init_count: Initialization count
5249  * @egress_frame_debug_info: Egress frame debug info object
5250  *
5251  * API to de initialize the management rx-reorder egress frame debug info.
5252  *
5253  * Return: QDF_STATUS
5254  */
5255 static QDF_STATUS
5256 mgmt_rx_reo_egress_debug_info_deinit
5257 		(struct wlan_objmgr_psoc *psoc,
5258 		 qdf_atomic_t *egress_debug_info_init_count,
5259 		 struct reo_egress_debug_info *egress_frame_debug_info)
5260 {
5261 	if (!psoc) {
5262 		mgmt_rx_reo_err("psoc is null");
5263 		return QDF_STATUS_E_NULL_VALUE;
5264 	}
5265 
5266 	if (!egress_frame_debug_info) {
5267 		mgmt_rx_reo_err("Egress frame debug info is null");
5268 		return QDF_STATUS_E_NULL_VALUE;
5269 	}
5270 
5271 	if (!qdf_atomic_read(egress_debug_info_init_count)) {
5272 		mgmt_rx_reo_err("Egress debug info ref cnt is 0");
5273 		return QDF_STATUS_E_FAILURE;
5274 	}
5275 
5276 	/* We need to de-initialize only for the last invocation */
5277 	if (qdf_atomic_dec_and_test(egress_debug_info_init_count))
5278 		goto success;
5279 
5280 	if (egress_frame_debug_info->frame_list) {
5281 		qdf_mem_free(egress_frame_debug_info->frame_list);
5282 		egress_frame_debug_info->frame_list = NULL;
5283 	}
5284 	egress_frame_debug_info->frame_list_size = 0;
5285 
5286 	qdf_mem_zero(egress_frame_debug_info->boarder,
5287 		     MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_BOARDER_MAX_SIZE + 1);
5288 
5289 success:
5290 	return QDF_STATUS_SUCCESS;
5291 }
5292 
5293 /**
5294  * mgmt_rx_reo_debug_info_deinit() - De initialize the management rx-reorder
5295  * debug info
5296  * @pdev: Pointer to pdev object
5297  *
5298  * API to de initialize the management rx-reorder debug info.
5299  *
5300  * Return: QDF_STATUS
5301  */
5302 static QDF_STATUS
5303 mgmt_rx_reo_debug_info_deinit(struct wlan_objmgr_pdev *pdev)
5304 {
5305 	struct mgmt_rx_reo_context *reo_context;
5306 	QDF_STATUS status;
5307 	struct wlan_objmgr_psoc *psoc;
5308 
5309 	psoc = wlan_pdev_get_psoc(pdev);
5310 
5311 	if (!wlan_mgmt_rx_reo_is_feature_enabled_at_psoc(psoc))
5312 		return QDF_STATUS_SUCCESS;
5313 
5314 	reo_context = wlan_mgmt_rx_reo_get_ctx_from_pdev(pdev);
5315 	if (!reo_context) {
5316 		mgmt_rx_reo_err("reo context is null");
5317 		return QDF_STATUS_E_NULL_VALUE;
5318 	}
5319 
5320 	status = mgmt_rx_reo_ingress_debug_info_deinit
5321 			(psoc, &reo_context->ingress_debug_info_init_count,
5322 			 &reo_context->ingress_frame_debug_info);
5323 	if (QDF_IS_STATUS_ERROR(status)) {
5324 		mgmt_rx_reo_err("Failed to deinitialize ingress debug info");
5325 		return QDF_STATUS_E_FAILURE;
5326 	}
5327 
5328 	status = mgmt_rx_reo_egress_debug_info_deinit
5329 			(psoc, &reo_context->egress_debug_info_init_count,
5330 			 &reo_context->egress_frame_debug_info);
5331 	if (QDF_IS_STATUS_ERROR(status)) {
5332 		mgmt_rx_reo_err("Failed to deinitialize egress debug info");
5333 		return QDF_STATUS_E_FAILURE;
5334 	}
5335 
5336 	return QDF_STATUS_SUCCESS;
5337 }
5338 #else
5339 static QDF_STATUS
5340 mgmt_rx_reo_debug_info_init(struct wlan_objmgr_psoc *psoc)
5341 {
5342 	return QDF_STATUS_SUCCESS;
5343 }
5344 
5345 static QDF_STATUS
5346 mgmt_rx_reo_debug_info_deinit(struct wlan_objmgr_psoc *psoc)
5347 {
5348 	return QDF_STATUS_SUCCESS;
5349 }
5350 #endif /* WLAN_MGMT_RX_REO_DEBUG_SUPPORT */
5351 
5352 /**
5353  * mgmt_rx_reo_flush_reorder_list() - Flush all entries in the reorder list
5354  * @reo_list: Pointer to reorder list
5355  *
5356  * API to flush all the entries of the reorder list. This API would acquire
5357  * the lock protecting the list.
5358  *
5359  * Return: QDF_STATUS
5360  */
5361 static QDF_STATUS
5362 mgmt_rx_reo_flush_reorder_list(struct mgmt_rx_reo_list *reo_list)
5363 {
5364 	struct mgmt_rx_reo_list_entry *cur_entry;
5365 	struct mgmt_rx_reo_list_entry *temp;
5366 
5367 	if (!reo_list) {
5368 		mgmt_rx_reo_err("reorder list is null");
5369 		return QDF_STATUS_E_NULL_VALUE;
5370 	}
5371 
5372 	qdf_spin_lock_bh(&reo_list->list_lock);
5373 
5374 	qdf_list_for_each_del(&reo_list->list, cur_entry, temp, node) {
5375 		free_mgmt_rx_event_params(cur_entry->rx_params);
5376 
5377 		/**
5378 		 * Release the reference taken when the entry is inserted into
5379 		 * the reorder list.
5380 		 */
5381 		wlan_objmgr_pdev_release_ref(cur_entry->pdev,
5382 					     WLAN_MGMT_RX_REO_ID);
5383 
5384 		qdf_mem_free(cur_entry);
5385 	}
5386 
5387 	qdf_spin_unlock_bh(&reo_list->list_lock);
5388 
5389 	return QDF_STATUS_SUCCESS;
5390 }
5391 
5392 /**
5393  * mgmt_rx_reo_list_deinit() - De initialize the management rx-reorder list
5394  * @reo_list: Pointer to reorder list
5395  *
5396  * API to de initialize the management rx-reorder list.
5397  *
5398  * Return: QDF_STATUS
5399  */
5400 static QDF_STATUS
5401 mgmt_rx_reo_list_deinit(struct mgmt_rx_reo_list *reo_list)
5402 {
5403 	QDF_STATUS status;
5404 
5405 	qdf_timer_free(&reo_list->global_mgmt_rx_inactivity_timer);
5406 	qdf_timer_free(&reo_list->ageout_timer);
5407 
5408 	status = mgmt_rx_reo_flush_reorder_list(reo_list);
5409 	if (QDF_IS_STATUS_ERROR(status)) {
5410 		mgmt_rx_reo_err("Failed to flush the reorder list");
5411 		return QDF_STATUS_E_FAILURE;
5412 	}
5413 	qdf_spinlock_destroy(&reo_list->list_lock);
5414 	qdf_list_destroy(&reo_list->list);
5415 
5416 	return QDF_STATUS_SUCCESS;
5417 }
5418 
5419 QDF_STATUS
5420 mgmt_rx_reo_deinit_context(uint8_t ml_grp_id)
5421 {
5422 	QDF_STATUS status;
5423 	struct mgmt_rx_reo_context *reo_context;
5424 
5425 	reo_context = mgmt_rx_reo_get_context(ml_grp_id);
5426 	if (!reo_context) {
5427 		mgmt_rx_reo_err("reo context is null");
5428 		return QDF_STATUS_E_NULL_VALUE;
5429 	}
5430 
5431 	qdf_timer_sync_cancel(
5432 			&reo_context->reo_list.global_mgmt_rx_inactivity_timer);
5433 	qdf_timer_sync_cancel(&reo_context->reo_list.ageout_timer);
5434 
5435 	qdf_spinlock_destroy(&reo_context->reo_algo_entry_lock);
5436 
5437 	status = mgmt_rx_reo_sim_deinit(reo_context);
5438 	if (QDF_IS_STATUS_ERROR(status)) {
5439 		mgmt_rx_reo_err("Failed to de initialize reo sim context");
5440 		qdf_mem_free(reo_context);
5441 		return QDF_STATUS_E_FAILURE;
5442 	}
5443 
5444 	status = mgmt_rx_reo_list_deinit(&reo_context->reo_list);
5445 	if (QDF_IS_STATUS_ERROR(status)) {
5446 		mgmt_rx_reo_err("Failed to de-initialize mgmt Rx reo list");
5447 		qdf_mem_free(reo_context);
5448 		return status;
5449 	}
5450 
5451 	qdf_mem_free(reo_context);
5452 	mgmt_rx_reo_set_context(ml_grp_id, NULL);
5453 
5454 	return QDF_STATUS_SUCCESS;
5455 }
5456 
5457 QDF_STATUS
5458 mgmt_rx_reo_init_context(uint8_t ml_grp_id)
5459 {
5460 	QDF_STATUS status;
5461 	QDF_STATUS temp;
5462 	struct mgmt_rx_reo_context *reo_context;
5463 
5464 	reo_context = qdf_mem_malloc(sizeof(struct mgmt_rx_reo_context));
5465 	if (!reo_context) {
5466 		mgmt_rx_reo_err("Failed to allocate reo context");
5467 		return QDF_STATUS_E_NULL_VALUE;
5468 	}
5469 
5470 	mgmt_rx_reo_set_context(ml_grp_id, reo_context);
5471 
5472 	reo_context->mlo_grp_id = ml_grp_id;
5473 
5474 	status = mgmt_rx_reo_list_init(&reo_context->reo_list);
5475 	if (QDF_IS_STATUS_ERROR(status)) {
5476 		mgmt_rx_reo_err("Failed to initialize mgmt Rx reo list");
5477 		return status;
5478 	}
5479 
5480 	status = mgmt_rx_reo_sim_init(reo_context);
5481 	if (QDF_IS_STATUS_ERROR(status)) {
5482 		mgmt_rx_reo_err("Failed to initialize reo simulation context");
5483 		goto error_reo_list_deinit;
5484 	}
5485 
5486 	qdf_spinlock_create(&reo_context->reo_algo_entry_lock);
5487 
5488 	qdf_timer_mod(&reo_context->reo_list.ageout_timer,
5489 		      MGMT_RX_REO_AGEOUT_TIMER_PERIOD_MS);
5490 
5491 	return QDF_STATUS_SUCCESS;
5492 
5493 error_reo_list_deinit:
5494 	temp = mgmt_rx_reo_list_deinit(&reo_context->reo_list);
5495 	if (QDF_IS_STATUS_ERROR(temp)) {
5496 		mgmt_rx_reo_err("Failed to de-initialize mgmt Rx reo list");
5497 		return temp;
5498 	}
5499 
5500 	return status;
5501 }
5502 
5503 /**
5504  * wlan_mgmt_rx_reo_initialize_snapshot_params() - Initialize a given snapshot
5505  * params object
5506  * @snapshot_params: Pointer to snapshot params object
5507  *
5508  * Return: void
5509  */
5510 static void
5511 wlan_mgmt_rx_reo_initialize_snapshot_params(
5512 			struct mgmt_rx_reo_snapshot_params *snapshot_params)
5513 {
5514 	snapshot_params->valid = false;
5515 	snapshot_params->mgmt_pkt_ctr = 0;
5516 	snapshot_params->global_timestamp = 0;
5517 }
5518 
5519 /**
5520  * mgmt_rx_reo_initialize_snapshot_address() - Initialize management Rx reorder
5521  * snapshot addresses for a given pdev
5522  * @pdev: pointer to pdev object
5523  *
5524  * Return: QDF_STATUS
5525  */
5526 static QDF_STATUS
5527 mgmt_rx_reo_initialize_snapshot_address(struct wlan_objmgr_pdev *pdev)
5528 {
5529 	enum mgmt_rx_reo_shared_snapshot_id snapshot_id;
5530 	struct mgmt_rx_reo_pdev_info *mgmt_rx_reo_pdev_ctx;
5531 	QDF_STATUS status;
5532 
5533 	mgmt_rx_reo_pdev_ctx = wlan_mgmt_rx_reo_get_priv_object(pdev);
5534 	if (!mgmt_rx_reo_pdev_ctx) {
5535 		mgmt_rx_reo_err("Mgmt Rx REO priv object is null");
5536 		return QDF_STATUS_E_NULL_VALUE;
5537 	}
5538 
5539 	snapshot_id = 0;
5540 
5541 	while (snapshot_id < MGMT_RX_REO_SHARED_SNAPSHOT_MAX) {
5542 		struct mgmt_rx_reo_snapshot_info *snapshot_info;
5543 
5544 		snapshot_info =
5545 			&mgmt_rx_reo_pdev_ctx->host_target_shared_snapshot_info
5546 			[snapshot_id];
5547 		status = wlan_mgmt_rx_reo_get_snapshot_info
5548 					(pdev, snapshot_id, snapshot_info);
5549 		if (QDF_IS_STATUS_ERROR(status)) {
5550 			mgmt_rx_reo_err("Get snapshot info failed, id = %u",
5551 					snapshot_id);
5552 			return status;
5553 		}
5554 
5555 		snapshot_id++;
5556 	}
5557 
5558 	return QDF_STATUS_SUCCESS;
5559 }
5560 
5561 /**
5562  * mgmt_rx_reo_initialize_snapshot_value() - Initialize management Rx reorder
5563  * snapshot values for a given pdev
5564  * @pdev: pointer to pdev object
5565  *
5566  * Return: QDF_STATUS
5567  */
5568 static QDF_STATUS
5569 mgmt_rx_reo_initialize_snapshot_value(struct wlan_objmgr_pdev *pdev)
5570 {
5571 	enum mgmt_rx_reo_shared_snapshot_id snapshot_id;
5572 	struct mgmt_rx_reo_pdev_info *mgmt_rx_reo_pdev_ctx;
5573 
5574 	mgmt_rx_reo_pdev_ctx = wlan_mgmt_rx_reo_get_priv_object(pdev);
5575 	if (!mgmt_rx_reo_pdev_ctx) {
5576 		mgmt_rx_reo_err("Mgmt Rx REO priv object is null");
5577 		return QDF_STATUS_E_NULL_VALUE;
5578 	}
5579 
5580 	snapshot_id = 0;
5581 	while (snapshot_id < MGMT_RX_REO_SHARED_SNAPSHOT_MAX) {
5582 		wlan_mgmt_rx_reo_initialize_snapshot_params
5583 			(&mgmt_rx_reo_pdev_ctx->last_valid_shared_snapshot
5584 			 [snapshot_id]);
5585 		snapshot_id++;
5586 	}
5587 
5588 	/* Initialize Host snapshot params */
5589 	wlan_mgmt_rx_reo_initialize_snapshot_params
5590 				(&mgmt_rx_reo_pdev_ctx->host_snapshot);
5591 
5592 	return QDF_STATUS_SUCCESS;
5593 }
5594 
5595 /**
5596  * mgmt_rx_reo_set_initialization_complete() - Set initialization completion
5597  * for management Rx REO pdev component private object
5598  * @pdev: pointer to pdev object
5599  *
5600  * Return: QDF_STATUS
5601  */
5602 static QDF_STATUS
5603 mgmt_rx_reo_set_initialization_complete(struct wlan_objmgr_pdev *pdev)
5604 {
5605 	struct mgmt_rx_reo_pdev_info *mgmt_rx_reo_pdev_ctx;
5606 
5607 	mgmt_rx_reo_pdev_ctx = wlan_mgmt_rx_reo_get_priv_object(pdev);
5608 	if (!mgmt_rx_reo_pdev_ctx) {
5609 		mgmt_rx_reo_err("Mgmt Rx REO priv object is null");
5610 		return QDF_STATUS_E_NULL_VALUE;
5611 	}
5612 
5613 	mgmt_rx_reo_pdev_ctx->init_complete = true;
5614 
5615 	return QDF_STATUS_SUCCESS;
5616 }
5617 
5618 /**
5619  * mgmt_rx_reo_clear_initialization_complete() - Clear initialization completion
5620  * for management Rx REO pdev component private object
5621  * @pdev: pointer to pdev object
5622  *
5623  * Return: QDF_STATUS
5624  */
5625 static QDF_STATUS
5626 mgmt_rx_reo_clear_initialization_complete(struct wlan_objmgr_pdev *pdev)
5627 {
5628 	struct mgmt_rx_reo_pdev_info *mgmt_rx_reo_pdev_ctx;
5629 
5630 	mgmt_rx_reo_pdev_ctx = wlan_mgmt_rx_reo_get_priv_object(pdev);
5631 	if (!mgmt_rx_reo_pdev_ctx) {
5632 		mgmt_rx_reo_err("Mgmt Rx REO priv object is null");
5633 		return QDF_STATUS_E_NULL_VALUE;
5634 	}
5635 
5636 	mgmt_rx_reo_pdev_ctx->init_complete = false;
5637 
5638 	return QDF_STATUS_SUCCESS;
5639 }
5640 
5641 /**
5642  * mgmt_rx_reo_initialize_snapshots() - Initialize management Rx reorder
5643  * snapshot related data structures for a given pdev
5644  * @pdev: pointer to pdev object
5645  *
5646  * Return: QDF_STATUS
5647  */
5648 static QDF_STATUS
5649 mgmt_rx_reo_initialize_snapshots(struct wlan_objmgr_pdev *pdev)
5650 {
5651 	QDF_STATUS status;
5652 
5653 	status = mgmt_rx_reo_initialize_snapshot_value(pdev);
5654 	if (QDF_IS_STATUS_ERROR(status)) {
5655 		mgmt_rx_reo_err("Failed to initialize snapshot value");
5656 		return status;
5657 	}
5658 
5659 	status = mgmt_rx_reo_initialize_snapshot_address(pdev);
5660 	if (QDF_IS_STATUS_ERROR(status)) {
5661 		mgmt_rx_reo_err("Failed to initialize snapshot address");
5662 		return status;
5663 	}
5664 
5665 	return QDF_STATUS_SUCCESS;
5666 }
5667 
5668 /**
5669  * mgmt_rx_reo_clear_snapshots() - Clear management Rx reorder snapshot related
5670  * data structures for a given pdev
5671  * @pdev: pointer to pdev object
5672  *
5673  * Return: QDF_STATUS
5674  */
5675 static QDF_STATUS
5676 mgmt_rx_reo_clear_snapshots(struct wlan_objmgr_pdev *pdev)
5677 {
5678 	QDF_STATUS status;
5679 
5680 	status = mgmt_rx_reo_initialize_snapshot_value(pdev);
5681 	if (QDF_IS_STATUS_ERROR(status)) {
5682 		mgmt_rx_reo_err("Failed to initialize snapshot value");
5683 		return status;
5684 	}
5685 
5686 	return QDF_STATUS_SUCCESS;
5687 }
5688 
5689 QDF_STATUS
5690 mgmt_rx_reo_attach(struct wlan_objmgr_pdev *pdev)
5691 {
5692 	QDF_STATUS status;
5693 
5694 	if (!wlan_mgmt_rx_reo_is_feature_enabled_at_pdev(pdev))
5695 		return QDF_STATUS_SUCCESS;
5696 
5697 	status = mgmt_rx_reo_initialize_snapshots(pdev);
5698 	if (QDF_IS_STATUS_ERROR(status)) {
5699 		mgmt_rx_reo_err("Failed to initialize mgmt Rx REO snapshots");
5700 		return status;
5701 	}
5702 
5703 	status = mgmt_rx_reo_set_initialization_complete(pdev);
5704 	if (QDF_IS_STATUS_ERROR(status)) {
5705 		mgmt_rx_reo_err("Failed to set initialization complete");
5706 		return status;
5707 	}
5708 
5709 	return QDF_STATUS_SUCCESS;
5710 }
5711 
5712 QDF_STATUS
5713 mgmt_rx_reo_detach(struct wlan_objmgr_pdev *pdev)
5714 {
5715 	QDF_STATUS status;
5716 
5717 	if (!wlan_mgmt_rx_reo_is_feature_enabled_at_pdev(pdev))
5718 		return QDF_STATUS_SUCCESS;
5719 
5720 	status = mgmt_rx_reo_clear_initialization_complete(pdev);
5721 	if (QDF_IS_STATUS_ERROR(status)) {
5722 		mgmt_rx_reo_err("Failed to clear initialization complete");
5723 		return status;
5724 	}
5725 
5726 	status = mgmt_rx_reo_clear_snapshots(pdev);
5727 	if (QDF_IS_STATUS_ERROR(status)) {
5728 		mgmt_rx_reo_err("Failed to clear mgmt Rx REO snapshots");
5729 		return status;
5730 	}
5731 
5732 	return QDF_STATUS_SUCCESS;
5733 }
5734 
5735 QDF_STATUS
5736 mgmt_rx_reo_pdev_obj_create_notification(
5737 	struct wlan_objmgr_pdev *pdev,
5738 	struct mgmt_txrx_priv_pdev_context *mgmt_txrx_pdev_ctx)
5739 {
5740 	QDF_STATUS status;
5741 	struct mgmt_rx_reo_pdev_info *mgmt_rx_reo_pdev_ctx = NULL;
5742 
5743 	if (!pdev) {
5744 		mgmt_rx_reo_err("pdev is null");
5745 		status = QDF_STATUS_E_NULL_VALUE;
5746 		goto failure;
5747 	}
5748 
5749 	if (!wlan_mgmt_rx_reo_is_feature_enabled_at_pdev(pdev)) {
5750 		status = QDF_STATUS_SUCCESS;
5751 		goto failure;
5752 	}
5753 
5754 	status = mgmt_rx_reo_sim_pdev_object_create_notification(pdev);
5755 	if (QDF_IS_STATUS_ERROR(status)) {
5756 		mgmt_rx_reo_err("Failed to handle pdev create for reo sim");
5757 		goto failure;
5758 	}
5759 
5760 	mgmt_rx_reo_pdev_ctx = qdf_mem_malloc(sizeof(*mgmt_rx_reo_pdev_ctx));
5761 	if (!mgmt_rx_reo_pdev_ctx) {
5762 		mgmt_rx_reo_err("Allocation failure for REO pdev context");
5763 		status = QDF_STATUS_E_NOMEM;
5764 		goto failure;
5765 	}
5766 
5767 	mgmt_txrx_pdev_ctx->mgmt_rx_reo_pdev_ctx = mgmt_rx_reo_pdev_ctx;
5768 
5769 	status = mgmt_rx_reo_debug_info_init(pdev);
5770 	if (QDF_IS_STATUS_ERROR(status)) {
5771 		mgmt_rx_reo_err("Failed to initialize debug info");
5772 		status = QDF_STATUS_E_NOMEM;
5773 		goto failure;
5774 	}
5775 
5776 	return QDF_STATUS_SUCCESS;
5777 
5778 failure:
5779 	if (mgmt_rx_reo_pdev_ctx)
5780 		qdf_mem_free(mgmt_rx_reo_pdev_ctx);
5781 
5782 	mgmt_txrx_pdev_ctx->mgmt_rx_reo_pdev_ctx = NULL;
5783 
5784 	return status;
5785 }
5786 
5787 QDF_STATUS
5788 mgmt_rx_reo_pdev_obj_destroy_notification(
5789 	struct wlan_objmgr_pdev *pdev,
5790 	struct mgmt_txrx_priv_pdev_context *mgmt_txrx_pdev_ctx)
5791 {
5792 	QDF_STATUS status;
5793 
5794 	if (!wlan_mgmt_rx_reo_is_feature_enabled_at_pdev(pdev))
5795 		return QDF_STATUS_SUCCESS;
5796 
5797 	status = mgmt_rx_reo_debug_info_deinit(pdev);
5798 	if (QDF_IS_STATUS_ERROR(status)) {
5799 		mgmt_rx_reo_err("Failed to de-initialize debug info");
5800 		return status;
5801 	}
5802 
5803 	qdf_mem_free(mgmt_txrx_pdev_ctx->mgmt_rx_reo_pdev_ctx);
5804 	mgmt_txrx_pdev_ctx->mgmt_rx_reo_pdev_ctx = NULL;
5805 
5806 	status = mgmt_rx_reo_sim_pdev_object_destroy_notification(pdev);
5807 	if (QDF_IS_STATUS_ERROR(status)) {
5808 		mgmt_rx_reo_err("Failed to handle pdev create for reo sim");
5809 		return status;
5810 	}
5811 
5812 	return QDF_STATUS_SUCCESS;
5813 }
5814 
5815 QDF_STATUS
5816 mgmt_rx_reo_psoc_obj_create_notification(struct wlan_objmgr_psoc *psoc)
5817 {
5818 	return QDF_STATUS_SUCCESS;
5819 }
5820 
5821 QDF_STATUS
5822 mgmt_rx_reo_psoc_obj_destroy_notification(struct wlan_objmgr_psoc *psoc)
5823 {
5824 	return QDF_STATUS_SUCCESS;
5825 }
5826 
5827 bool
5828 mgmt_rx_reo_is_simulation_in_progress(uint8_t ml_grp_id)
5829 {
5830 	struct mgmt_rx_reo_context *reo_context;
5831 
5832 	reo_context = mgmt_rx_reo_get_context(ml_grp_id);
5833 	if (!reo_context) {
5834 		mgmt_rx_reo_err("reo context is null");
5835 		return false;
5836 	}
5837 
5838 	return reo_context->simulation_in_progress;
5839 }
5840 
5841 #ifdef WLAN_MGMT_RX_REO_DEBUG_SUPPORT
5842 QDF_STATUS
5843 mgmt_rx_reo_print_ingress_frame_stats(uint8_t ml_grp_id)
5844 {
5845 	struct mgmt_rx_reo_context *reo_context;
5846 	QDF_STATUS status;
5847 
5848 	reo_context = mgmt_rx_reo_get_context(ml_grp_id);
5849 	if (!reo_context) {
5850 		mgmt_rx_reo_err("reo context is null");
5851 		return QDF_STATUS_E_NULL_VALUE;
5852 	}
5853 
5854 	status = mgmt_rx_reo_debug_print_ingress_frame_stats(reo_context);
5855 	if (QDF_IS_STATUS_ERROR(status)) {
5856 		mgmt_rx_reo_err("Failed to print ingress frame stats");
5857 		return status;
5858 	}
5859 
5860 	return QDF_STATUS_SUCCESS;
5861 }
5862 
5863 QDF_STATUS
5864 mgmt_rx_reo_print_ingress_frame_info(uint8_t ml_grp_id, uint16_t num_frames)
5865 {
5866 	struct mgmt_rx_reo_context *reo_context;
5867 	QDF_STATUS status;
5868 
5869 	reo_context = mgmt_rx_reo_get_context(ml_grp_id);
5870 	if (!reo_context) {
5871 		mgmt_rx_reo_err("reo context is null");
5872 		return QDF_STATUS_E_NULL_VALUE;
5873 	}
5874 
5875 	status = mgmt_rx_reo_debug_print_ingress_frame_info(reo_context,
5876 							    num_frames);
5877 	if (QDF_IS_STATUS_ERROR(status)) {
5878 		mgmt_rx_reo_err("Failed to print ingress frame info");
5879 		return status;
5880 	}
5881 
5882 	return QDF_STATUS_SUCCESS;
5883 }
5884 
5885 QDF_STATUS
5886 mgmt_rx_reo_print_egress_frame_stats(uint8_t ml_grp_id)
5887 {
5888 	struct mgmt_rx_reo_context *reo_context;
5889 	QDF_STATUS status;
5890 
5891 	reo_context = mgmt_rx_reo_get_context(ml_grp_id);
5892 	if (!reo_context) {
5893 		mgmt_rx_reo_err("reo context is null");
5894 		return QDF_STATUS_E_NULL_VALUE;
5895 	}
5896 
5897 	status = mgmt_rx_reo_debug_print_egress_frame_stats(reo_context);
5898 	if (QDF_IS_STATUS_ERROR(status)) {
5899 		mgmt_rx_reo_err("Failed to print egress frame stats");
5900 		return status;
5901 	}
5902 
5903 	return QDF_STATUS_SUCCESS;
5904 }
5905 
5906 QDF_STATUS
5907 mgmt_rx_reo_print_egress_frame_info(uint8_t ml_grp_id, uint16_t num_frames)
5908 {
5909 	struct mgmt_rx_reo_context *reo_context;
5910 	QDF_STATUS status;
5911 
5912 	reo_context = mgmt_rx_reo_get_context(ml_grp_id);
5913 	if (!reo_context) {
5914 		mgmt_rx_reo_err("reo context is null");
5915 		return QDF_STATUS_E_NULL_VALUE;
5916 	}
5917 
5918 	status = mgmt_rx_reo_debug_print_egress_frame_info(reo_context,
5919 							   num_frames);
5920 	if (QDF_IS_STATUS_ERROR(status)) {
5921 		mgmt_rx_reo_err("Failed to print egress frame info");
5922 		return status;
5923 	}
5924 
5925 	return QDF_STATUS_SUCCESS;
5926 }
5927 #else
5928 QDF_STATUS
5929 mgmt_rx_reo_print_ingress_frame_stats(uint8_t ml_grp_id)
5930 {
5931 	return QDF_STATUS_SUCCESS;
5932 }
5933 
5934 QDF_STATUS
5935 mgmt_rx_reo_print_ingress_frame_info(uint8_t ml_grp_id, uint16_t num_frames)
5936 {
5937 	return QDF_STATUS_SUCCESS;
5938 }
5939 
5940 QDF_STATUS
5941 mgmt_rx_reo_print_egress_frame_stats(uint8_t ml_grp_id)
5942 {
5943 	return QDF_STATUS_SUCCESS;
5944 }
5945 
5946 QDF_STATUS
5947 mgmt_rx_reo_print_egress_frame_info(uint8_t ml_grp_id, uint16_t num_frames)
5948 {
5949 	return QDF_STATUS_SUCCESS;
5950 }
5951 #endif /* WLAN_MGMT_RX_REO_DEBUG_SUPPORT */
5952