xref: /wlan-dirver/qca-wifi-host-cmn/umac/cmn_services/mgmt_txrx/core/src/wlan_mgmt_txrx_rx_reo.c (revision d0c05845839e5f2ba5a8dcebe0cd3e4cd4e8dfcf)
1 /*
2  * Copyright (c) 2021, The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 /**
19  *  DOC: wlan_mgmt_txrx_rx_reo.c
20  *  This file contains mgmt rx re-ordering related function definitions
21  */
22 
23 #include "wlan_mgmt_txrx_rx_reo_i.h"
24 #include <wlan_mgmt_txrx_rx_reo_tgt_api.h>
25 #include "wlan_mgmt_txrx_main_i.h"
26 #include <qdf_util.h>
27 #include <wlan_cfr_utils_api.h>
28 #include <wlan_mlo_mgr_cmn.h>
29 
30 static struct mgmt_rx_reo_context g_rx_reo_ctx;
31 
32 #define mgmt_rx_reo_get_context()        (&g_rx_reo_ctx)
33 
34 #define MGMT_RX_REO_PKT_CTR_HALF_RANGE (0x8000)
35 #define MGMT_RX_REO_PKT_CTR_FULL_RANGE (MGMT_RX_REO_PKT_CTR_HALF_RANGE << 1)
36 
37 /**
38  * mgmt_rx_reo_compare_pkt_ctrs_gte() - Compare given mgmt packet counters
39  * @ctr1: Management packet counter1
40  * @ctr2: Management packet counter2
41  *
42  * We can't directly use the comparison operator here because the counters can
43  * overflow. But these counters have a property that the difference between
44  * them can never be greater than half the range of the data type.
45  * We can make use of this condition to detect which one is actually greater.
46  *
47  * Return: true if @ctr1 is greater than or equal to @ctr2, else false
48  */
49 static inline bool
50 mgmt_rx_reo_compare_pkt_ctrs_gte(uint16_t ctr1, uint16_t ctr2)
51 {
52 	uint16_t delta = ctr1 - ctr2;
53 
54 	return delta <= MGMT_RX_REO_PKT_CTR_HALF_RANGE;
55 }
56 
57 /**
58  * mgmt_rx_reo_subtract_pkt_ctrs() - Subtract given mgmt packet counters
59  * @ctr1: Management packet counter1
60  * @ctr2: Management packet counter2
61  *
62  * We can't directly use the subtract operator here because the counters can
63  * overflow. But these counters have a property that the difference between
64  * them can never be greater than half the range of the data type.
65  * We can make use of this condition to detect whichone is actually greater and
66  * return the difference accordingly.
67  *
68  * Return: Difference between @ctr1 and @crt2
69  */
70 static inline int
71 mgmt_rx_reo_subtract_pkt_ctrs(uint16_t ctr1, uint16_t ctr2)
72 {
73 	uint16_t delta = ctr1 - ctr2;
74 
75 	/**
76 	 * if delta is greater than half the range (i.e, ctr1 is actually
77 	 * smaller than ctr2), then the result should be a negative number.
78 	 * subtracting the entire range should give the correct value.
79 	 */
80 	if (delta > MGMT_RX_REO_PKT_CTR_HALF_RANGE)
81 		return delta - MGMT_RX_REO_PKT_CTR_FULL_RANGE;
82 
83 	return delta;
84 }
85 
86 #define MGMT_RX_REO_GLOBAL_TS_HALF_RANGE (0x80000000)
87 /**
88  * mgmt_rx_reo_compare_global_timestamps_gte()-Compare given global timestamps
89  * @ts1: Global timestamp1
90  * @ts2: Global timestamp2
91  *
92  * We can't directly use the comparison operator here because the timestamps can
93  * overflow. But these timestamps have a property that the difference between
94  * them can never be greater than half the range of the data type.
95  * We can make use of this condition to detect which one is actually greater.
96  *
97  * Return: true if @ts1 is greater than or equal to @ts2, else false
98  */
99 static inline bool
100 mgmt_rx_reo_compare_global_timestamps_gte(uint32_t ts1, uint32_t ts2)
101 {
102 	uint32_t delta = ts1 - ts2;
103 
104 	return delta <= MGMT_RX_REO_GLOBAL_TS_HALF_RANGE;
105 }
106 
107 /**
108  * mgmt_rx_reo_is_stale_frame()- API to check whether the given management frame
109  * is stale
110  * @ts_last_released_frame: pointer to global time stamp of the last frame
111  * removed from the reorder list
112  * @frame_desc: pointer to frame descriptor
113  *
114  * This API checks whether the current management frame under processing is
115  * stale. Any frame older than the last frame delivered to upper layer is a
116  * stale frame. This could happen when we have to deliver frames out of order
117  * due to time out or list size limit. The frames which arrive late at host and
118  * with time stamp lesser than the last delivered frame are stale frames and
119  * they need to be handled differently.
120  *
121  * Return: QDF_STATUS. On success "is_stale" and "is_parallel_rx" members of
122  * @frame_desc will be filled with proper values.
123  */
124 static QDF_STATUS
125 mgmt_rx_reo_is_stale_frame(
126 		struct mgmt_rx_reo_global_ts_info *ts_last_released_frame,
127 		struct mgmt_rx_reo_frame_descriptor *frame_desc)
128 {
129 	uint32_t cur_frame_start_ts;
130 	uint32_t cur_frame_end_ts;
131 
132 	if (!ts_last_released_frame) {
133 		mgmt_rx_reo_err("Last released frame time stamp info is null");
134 		return QDF_STATUS_E_NULL_VALUE;
135 	}
136 
137 	if (!frame_desc) {
138 		mgmt_rx_reo_err("Frame descriptor is null");
139 		return QDF_STATUS_E_NULL_VALUE;
140 	}
141 
142 	frame_desc->is_stale = false;
143 	frame_desc->is_parallel_rx = false;
144 
145 	if (!ts_last_released_frame->valid)
146 		return QDF_STATUS_SUCCESS;
147 
148 	cur_frame_start_ts = mgmt_rx_reo_get_start_ts(frame_desc->rx_params);
149 	cur_frame_end_ts = mgmt_rx_reo_get_end_ts(frame_desc->rx_params);
150 
151 	frame_desc->is_stale =
152 		!mgmt_rx_reo_compare_global_timestamps_gte(cur_frame_start_ts,
153 					ts_last_released_frame->start_ts);
154 
155 	if (mgmt_rx_reo_compare_global_timestamps_gte
156 		(ts_last_released_frame->start_ts, cur_frame_start_ts) &&
157 	    mgmt_rx_reo_compare_global_timestamps_gte
158 		(cur_frame_end_ts, ts_last_released_frame->end_ts)) {
159 		frame_desc->is_parallel_rx = true;
160 		frame_desc->is_stale = false;
161 	}
162 
163 	return QDF_STATUS_SUCCESS;
164 }
165 
166 QDF_STATUS
167 mgmt_rx_reo_validate_mlo_link_info(struct wlan_objmgr_psoc *psoc)
168 {
169 	uint16_t valid_link_bitmap_shmem;
170 	uint16_t valid_link_bitmap;
171 	int8_t num_active_links_shmem;
172 	int8_t num_active_links;
173 	QDF_STATUS status;
174 
175 	if (!psoc) {
176 		mgmt_rx_reo_err("psoc is null");
177 		return QDF_STATUS_E_NULL_VALUE;
178 	}
179 
180 	if (!wlan_mgmt_rx_reo_is_feature_enabled_at_psoc(psoc))
181 		return QDF_STATUS_SUCCESS;
182 
183 	status = tgt_mgmt_rx_reo_get_num_active_hw_links(psoc,
184 							 &num_active_links_shmem);
185 	if (QDF_IS_STATUS_ERROR(status)) {
186 		mgmt_rx_reo_err("Failed to get number of active MLO HW links");
187 		return QDF_STATUS_E_FAILURE;
188 	}
189 	qdf_assert_always(num_active_links_shmem > 0);
190 
191 	num_active_links = wlan_mlo_get_num_active_links();
192 	qdf_assert_always(num_active_links > 0);
193 
194 	qdf_assert_always(num_active_links_shmem == num_active_links);
195 
196 	status = tgt_mgmt_rx_reo_get_valid_hw_link_bitmap(psoc,
197 							  &valid_link_bitmap_shmem);
198 	if (QDF_IS_STATUS_ERROR(status)) {
199 		mgmt_rx_reo_err("Failed to get valid MLO HW link bitmap");
200 		return QDF_STATUS_E_INVAL;
201 	}
202 	qdf_assert_always(valid_link_bitmap_shmem != 0);
203 
204 	valid_link_bitmap = wlan_mlo_get_valid_link_bitmap();
205 	qdf_assert_always(valid_link_bitmap_shmem != 0);
206 
207 	qdf_assert_always(valid_link_bitmap_shmem == valid_link_bitmap);
208 
209 	return QDF_STATUS_SUCCESS;
210 }
211 
212 #ifndef WLAN_MGMT_RX_REO_SIM_SUPPORT
213 /**
214  * mgmt_rx_reo_is_valid_link() - Check whether the given HW link is valid
215  *
216  * Return: true if @link_id is a valid link else false
217  */
218 static bool
219 mgmt_rx_reo_is_valid_link(uint8_t link_id)
220 {
221 	uint16_t valid_hw_link_bitmap;
222 
223 	if (link_id >= MAX_MLO_LINKS) {
224 		mgmt_rx_reo_err("Invalid link id %u", link_id);
225 		return false;
226 	}
227 
228 	valid_hw_link_bitmap = wlan_mlo_get_valid_link_bitmap();
229 	qdf_assert_always(valid_hw_link_bitmap);
230 
231 	return (valid_hw_link_bitmap & (1 << link_id));
232 }
233 
234 /**
235  * mgmt_rx_reo_get_num_mlo_links() - Get number of MLO HW links active in the
236  * system
237  * @reo_context: Pointer to reo context object
238  *
239  * Return: On success returns number of active MLO HW links. On failure
240  * returns WLAN_MLO_INVALID_NUM_LINKS.
241  */
242 static int8_t
243 mgmt_rx_reo_get_num_mlo_links(struct mgmt_rx_reo_context *reo_context) {
244 	if (!reo_context) {
245 		mgmt_rx_reo_err("Mgmt reo context is null");
246 		return WLAN_MLO_INVALID_NUM_LINKS;
247 	}
248 
249 	return wlan_mlo_get_num_active_links();
250 }
251 
252 static QDF_STATUS
253 mgmt_rx_reo_handle_potential_premature_delivery(
254 				struct mgmt_rx_reo_context *reo_context,
255 				uint32_t global_timestamp)
256 {
257 	return QDF_STATUS_SUCCESS;
258 }
259 
260 static QDF_STATUS
261 mgmt_rx_reo_handle_stale_frame(struct mgmt_rx_reo_list *reo_list,
262 			       struct mgmt_rx_reo_frame_descriptor *desc)
263 {
264 	return QDF_STATUS_SUCCESS;
265 }
266 #else
267 /**
268  * mgmt_rx_reo_sim_is_valid_link() - Check whether the given HW link is valid
269  *
270  * Return: true if @link_id is a valid link, else false
271  */
272 static bool
273 mgmt_rx_reo_sim_is_valid_link(struct mgmt_rx_reo_sim_context *sim_context,
274 			      uint8_t link_id)
275 {
276 	bool is_valid_link = false;
277 
278 	if (!sim_context) {
279 		mgmt_rx_reo_err("Mgmt reo sim context is null");
280 		return false;
281 	}
282 
283 	if (link_id >= MAX_MLO_LINKS) {
284 		mgmt_rx_reo_err("Invalid link id %u", link_id);
285 		return false;
286 	}
287 
288 	qdf_spin_lock(&sim_context->link_id_to_pdev_map.lock);
289 
290 	if (sim_context->link_id_to_pdev_map.map[link_id])
291 		is_valid_link = true;
292 
293 	qdf_spin_unlock(&sim_context->link_id_to_pdev_map.lock);
294 
295 	return is_valid_link;
296 }
297 
298 /**
299  * mgmt_rx_reo_is_valid_link() - Check whether the given HW link is valid
300  *
301  * Return: true if @link_id is a valid link else false
302  */
303 static bool
304 mgmt_rx_reo_is_valid_link(uint8_t link_id)
305 {
306 	struct mgmt_rx_reo_context *reo_context;
307 
308 	reo_context = mgmt_rx_reo_get_context();
309 
310 	if (!reo_context) {
311 		mgmt_rx_reo_err("Mgmt reo context is null");
312 		return false;
313 	}
314 
315 	return mgmt_rx_reo_sim_is_valid_link(&reo_context->sim_context,
316 					     link_id);
317 }
318 
319 /**
320  * mgmt_rx_reo_sim_get_num_mlo_links() - Get number of MLO HW links from the reo
321  * simulation context object
322  * @sim_context: Pointer to reo simulation context object
323  *
324  * Number of MLO links will be equal to number of pdevs in the
325  * system. In case of simulation all the pdevs are assumed
326  * to have MLO capability.
327  *
328  * Return: On success returns number of MLO HW links. On failure
329  * returns WLAN_MLO_INVALID_NUM_LINKS.
330  */
331 static int8_t
332 mgmt_rx_reo_sim_get_num_mlo_links(struct mgmt_rx_reo_sim_context *sim_context)
333 {
334 	uint8_t num_mlo_links;
335 
336 	if (!sim_context) {
337 		mgmt_rx_reo_err("Mgmt reo simulation context is null");
338 		return WLAN_MLO_INVALID_NUM_LINKS;
339 	}
340 
341 	qdf_spin_lock(&sim_context->link_id_to_pdev_map.lock);
342 
343 	num_mlo_links = sim_context->link_id_to_pdev_map.num_mlo_links;
344 
345 	qdf_spin_unlock(&sim_context->link_id_to_pdev_map.lock);
346 
347 	return num_mlo_links;
348 }
349 
350 /**
351  * mgmt_rx_reo_get_num_mlo_links() - Get number of MLO links from the reo
352  * context object
353  * @reo_context: Pointer to reo context object
354  *
355  * Return: On success returns number of MLO HW links. On failure
356  * returns WLAN_MLO_INVALID_NUM_LINKS.
357  */
358 static int8_t
359 mgmt_rx_reo_get_num_mlo_links(struct mgmt_rx_reo_context *reo_context) {
360 	if (!reo_context) {
361 		mgmt_rx_reo_err("Mgmt reo context is null");
362 		return WLAN_MLO_INVALID_NUM_LINKS;
363 	}
364 
365 	return mgmt_rx_reo_sim_get_num_mlo_links(&reo_context->sim_context);
366 }
367 
368 /**
369  * mgmt_rx_reo_sim_get_context() - Helper API to get the management
370  * rx reorder simulation context
371  *
372  * Return: On success returns the pointer to management rx reorder
373  * simulation context. On failure returns NULL.
374  */
375 static struct mgmt_rx_reo_sim_context *
376 mgmt_rx_reo_sim_get_context(void)
377 {
378 	struct mgmt_rx_reo_context *reo_context;
379 
380 	reo_context = mgmt_rx_reo_get_context();
381 	if (!reo_context) {
382 		mgmt_rx_reo_err("Mgmt reo context is null");
383 		return NULL;
384 	}
385 
386 	return &reo_context->sim_context;
387 }
388 
389 int8_t
390 mgmt_rx_reo_sim_get_mlo_link_id_from_pdev(struct wlan_objmgr_pdev *pdev)
391 {
392 	struct mgmt_rx_reo_sim_context *sim_context;
393 	int8_t link_id;
394 
395 	sim_context = mgmt_rx_reo_sim_get_context();
396 	if (!sim_context) {
397 		mgmt_rx_reo_err("Mgmt reo simulation context is null");
398 		return MGMT_RX_REO_INVALID_LINK_ID;
399 	}
400 
401 	qdf_spin_lock(&sim_context->link_id_to_pdev_map.lock);
402 
403 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++)
404 		if (sim_context->link_id_to_pdev_map.map[link_id] == pdev)
405 			break;
406 
407 	/* pdev is not found in map */
408 	if (link_id == MAX_MLO_LINKS)
409 		link_id = MGMT_RX_REO_INVALID_LINK_ID;
410 
411 	qdf_spin_unlock(&sim_context->link_id_to_pdev_map.lock);
412 
413 	return link_id;
414 }
415 
416 struct wlan_objmgr_pdev *
417 mgmt_rx_reo_sim_get_pdev_from_mlo_link_id(uint8_t mlo_link_id,
418 					  wlan_objmgr_ref_dbgid refdbgid)
419 {
420 	struct mgmt_rx_reo_sim_context *sim_context;
421 	struct wlan_objmgr_pdev *pdev;
422 	QDF_STATUS status;
423 
424 	sim_context = mgmt_rx_reo_sim_get_context();
425 	if (!sim_context) {
426 		mgmt_rx_reo_err("Mgmt reo simulation context is null");
427 		return NULL;
428 	}
429 
430 	if (mlo_link_id >= MAX_MLO_LINKS) {
431 		mgmt_rx_reo_err("Invalid link id %u", mlo_link_id);
432 		return NULL;
433 	}
434 
435 	qdf_spin_lock(&sim_context->link_id_to_pdev_map.lock);
436 
437 	pdev = sim_context->link_id_to_pdev_map.map[mlo_link_id];
438 	status = wlan_objmgr_pdev_try_get_ref(pdev, refdbgid);
439 	if (QDF_IS_STATUS_ERROR(status)) {
440 		mgmt_rx_reo_err("Failed to get pdev reference");
441 		return NULL;
442 	}
443 
444 	qdf_spin_unlock(&sim_context->link_id_to_pdev_map.lock);
445 
446 	return pdev;
447 }
448 
449 /**
450  * mgmt_rx_reo_handle_potential_premature_delivery - Helper API to handle
451  * premature delivery.
452  * @reo_context: Pointer to reorder list
453  * @global_timestamp: Global time stamp of the current management frame
454  *
455  * Sometimes we have to deliver a management frame to the upper layers even
456  * before its wait count reaching zero. This is called premature delivery.
457  * Premature delivery could happen due to time out or reorder list overflow.
458  *
459  * Return: QDF_STATUS
460  */
461 static QDF_STATUS
462 mgmt_rx_reo_handle_potential_premature_delivery(
463 				struct mgmt_rx_reo_context *reo_context,
464 				uint32_t global_timestamp)
465 {
466 	qdf_list_t stale_frame_list_temp;
467 	QDF_STATUS status;
468 	struct mgmt_rx_reo_pending_frame_list_entry *latest_stale_frame = NULL;
469 	struct mgmt_rx_reo_pending_frame_list_entry *cur_entry;
470 	struct mgmt_rx_reo_sim_context *sim_context;
471 	struct mgmt_rx_reo_master_frame_list *master_frame_list;
472 
473 	if (!reo_context)
474 		return QDF_STATUS_E_NULL_VALUE;
475 
476 	sim_context = &reo_context->sim_context;
477 	master_frame_list = &sim_context->master_frame_list;
478 
479 	qdf_spin_lock(&master_frame_list->lock);
480 
481 	qdf_list_for_each(&master_frame_list->pending_list, cur_entry, node) {
482 		if (cur_entry->params.global_timestamp == global_timestamp)
483 			break;
484 
485 		latest_stale_frame = cur_entry;
486 	}
487 
488 	if (latest_stale_frame) {
489 		qdf_list_create(&stale_frame_list_temp,
490 				MGMT_RX_REO_SIM_STALE_FRAME_TEMP_LIST_MAX_SIZE);
491 
492 		status = qdf_list_split(&stale_frame_list_temp,
493 					&master_frame_list->pending_list,
494 					&latest_stale_frame->node);
495 		if (QDF_IS_STATUS_ERROR(status))
496 			goto exit_unlock_master_frame_list;
497 
498 		status = qdf_list_join(&master_frame_list->stale_list,
499 				       &stale_frame_list_temp);
500 		if (QDF_IS_STATUS_ERROR(status))
501 			goto exit_unlock_master_frame_list;
502 	}
503 
504 	status = QDF_STATUS_SUCCESS;
505 
506 exit_unlock_master_frame_list:
507 	qdf_spin_unlock(&master_frame_list->lock);
508 
509 	return status;
510 }
511 
512 /**
513  * mgmt_rx_reo_sim_remove_frame_from_stale_list() - Removes frame from the
514  * stale management frame list
515  * @master_frame_list: pointer to master management frame list
516  * @reo_params: pointer to reo params
517  *
518  * This API removes frames from the stale management frame list.
519  *
520  * Return: QDF_STATUS of operation
521  */
522 static QDF_STATUS
523 mgmt_rx_reo_sim_remove_frame_from_stale_list(
524 		struct mgmt_rx_reo_master_frame_list *master_frame_list,
525 		const struct mgmt_rx_reo_params *reo_params)
526 {
527 	struct mgmt_rx_reo_stale_frame_list_entry *cur_entry;
528 	struct mgmt_rx_reo_stale_frame_list_entry *matching_entry = NULL;
529 	QDF_STATUS status;
530 
531 	if (!master_frame_list || !reo_params)
532 		return QDF_STATUS_E_NULL_VALUE;
533 
534 	qdf_spin_lock(&master_frame_list->lock);
535 
536 	/**
537 	 * Stale frames can come in any order at host. Do a linear search and
538 	 * remove the matching entry.
539 	 */
540 	qdf_list_for_each(&master_frame_list->stale_list, cur_entry, node) {
541 		if (cur_entry->params.link_id == reo_params->link_id &&
542 		    cur_entry->params.mgmt_pkt_ctr == reo_params->mgmt_pkt_ctr &&
543 		    cur_entry->params.global_timestamp ==
544 		    reo_params->global_timestamp) {
545 			matching_entry = cur_entry;
546 			break;
547 		}
548 	}
549 
550 	if (!matching_entry) {
551 		qdf_spin_unlock(&master_frame_list->lock);
552 		mgmt_rx_reo_err("reo sim failure: absent in stale frame list");
553 		qdf_assert_always(0);
554 	}
555 
556 	status = qdf_list_remove_node(&master_frame_list->stale_list,
557 				      &matching_entry->node);
558 
559 	if (QDF_IS_STATUS_ERROR(status)) {
560 		qdf_spin_unlock(&master_frame_list->lock);
561 		return status;
562 	}
563 
564 	qdf_mem_free(matching_entry);
565 
566 	qdf_spin_unlock(&master_frame_list->lock);
567 
568 	return QDF_STATUS_SUCCESS;
569 }
570 
571 /**
572  * mgmt_rx_reo_handle_stale_frame() - API to handle stale management frames.
573  * @reo_list: Pointer to reorder list
574  * @desc: Pointer to frame descriptor
575  *
576  * Return: QDF_STATUS of operation
577  */
578 static QDF_STATUS
579 mgmt_rx_reo_handle_stale_frame(struct mgmt_rx_reo_list *reo_list,
580 			       struct mgmt_rx_reo_frame_descriptor *desc)
581 {
582 	QDF_STATUS status;
583 	struct mgmt_rx_reo_context *reo_context;
584 	struct mgmt_rx_reo_sim_context *sim_context;
585 	struct mgmt_rx_reo_params *reo_params;
586 
587 	if (!reo_list || !desc)
588 		return QDF_STATUS_E_NULL_VALUE;
589 
590 	/* FW consumed/Error frames are already removed */
591 	if (desc->type != MGMT_RX_REO_FRAME_DESC_HOST_CONSUMED_FRAME)
592 		return QDF_STATUS_SUCCESS;
593 
594 	reo_context = mgmt_rx_reo_get_context_from_reo_list(reo_list);
595 	if (!reo_context)
596 		return QDF_STATUS_E_NULL_VALUE;
597 
598 	sim_context = &reo_context->sim_context;
599 
600 	reo_params = desc->rx_params->reo_params;
601 	if (!reo_params)
602 		return QDF_STATUS_E_NULL_VALUE;
603 
604 	status = mgmt_rx_reo_sim_remove_frame_from_stale_list(
605 				&sim_context->master_frame_list, reo_params);
606 
607 	return status;
608 }
609 #endif /* WLAN_MGMT_RX_REO_SIM_SUPPORT */
610 
611 /**
612  * mgmt_rx_reo_is_potential_premature_delivery() - Helper API to check
613  * whether the current frame getting delivered to upper layer is a premature
614  * delivery
615  * @release_reason: release reason
616  *
617  * Return: true for a premature delivery
618  */
619 static bool
620 mgmt_rx_reo_is_potential_premature_delivery(uint8_t release_reason)
621 {
622 	return !(release_reason &
623 			MGMT_RX_REO_LIST_ENTRY_RELEASE_REASON_ZERO_WAIT_COUNT);
624 }
625 
626 /**
627  * wlan_mgmt_rx_reo_get_priv_object() - Get the pdev private object of
628  * MGMT Rx REO module
629  * @pdev: pointer to pdev object
630  *
631  * Return: Pointer to pdev private object of MGMT Rx REO module on success,
632  * else NULL
633  */
634 static struct mgmt_rx_reo_pdev_info *
635 wlan_mgmt_rx_reo_get_priv_object(struct wlan_objmgr_pdev *pdev)
636 {
637 	struct mgmt_txrx_priv_pdev_context *mgmt_txrx_pdev_ctx;
638 
639 	if (!pdev) {
640 		mgmt_rx_reo_err("pdev is null");
641 		return NULL;
642 	}
643 
644 	mgmt_txrx_pdev_ctx = (struct mgmt_txrx_priv_pdev_context *)
645 		wlan_objmgr_pdev_get_comp_private_obj(pdev,
646 						      WLAN_UMAC_COMP_MGMT_TXRX);
647 
648 	if (!mgmt_txrx_pdev_ctx) {
649 		mgmt_rx_reo_err("mgmt txrx context is NULL");
650 		return NULL;
651 	}
652 
653 	return mgmt_txrx_pdev_ctx->mgmt_rx_reo_pdev_ctx;
654 }
655 
656 /**
657  * mgmt_rx_reo_print_snapshots() - Print all snapshots related
658  * to management Rx reorder module
659  * @mac_hw_ss: MAC HW snapshot
660  * @fw_forwarded_ss: FW forwarded snapshot
661  * @fw_consumed_ss: FW consumed snapshot
662  * @host_ss: Host snapshot
663  *
664  * return: QDF_STATUS
665  */
666 static QDF_STATUS
667 mgmt_rx_reo_print_snapshots
668 			(struct mgmt_rx_reo_snapshot_params *mac_hw_ss,
669 			 struct mgmt_rx_reo_snapshot_params *fw_forwarded_ss,
670 			 struct mgmt_rx_reo_snapshot_params *fw_consumed_ss,
671 			 struct mgmt_rx_reo_snapshot_params *host_ss)
672 {
673 	mgmt_rx_reo_debug("HW SS: valid = %u, ctr = %u, ts = %u",
674 			  mac_hw_ss->valid, mac_hw_ss->mgmt_pkt_ctr,
675 			  mac_hw_ss->global_timestamp);
676 	mgmt_rx_reo_debug("FW forwarded SS: valid = %u, ctr = %u, ts = %u",
677 			  fw_forwarded_ss->valid,
678 			  fw_forwarded_ss->mgmt_pkt_ctr,
679 			  fw_forwarded_ss->global_timestamp);
680 	mgmt_rx_reo_debug("FW consumed SS: valid = %u, ctr = %u, ts = %u",
681 			  fw_consumed_ss->valid,
682 			  fw_consumed_ss->mgmt_pkt_ctr,
683 			  fw_consumed_ss->global_timestamp);
684 	mgmt_rx_reo_debug("HOST SS: valid = %u, ctr = %u, ts = %u",
685 			  host_ss->valid, host_ss->mgmt_pkt_ctr,
686 			  host_ss->global_timestamp);
687 
688 	return QDF_STATUS_SUCCESS;
689 }
690 
691 /**
692  * mgmt_rx_reo_invalidate_stale_snapshots() - Invalidate stale management
693  * Rx REO snapshots
694  * @mac_hw_ss: MAC HW snapshot
695  * @fw_forwarded_ss: FW forwarded snapshot
696  * @fw_consumed_ss: FW consumed snapshot
697  * @host_ss: Host snapshot
698  * @link: link ID
699  *
700  * return: QDF_STATUS
701  */
702 static QDF_STATUS
703 mgmt_rx_reo_invalidate_stale_snapshots
704 			(struct mgmt_rx_reo_snapshot_params *mac_hw_ss,
705 			 struct mgmt_rx_reo_snapshot_params *fw_forwarded_ss,
706 			 struct mgmt_rx_reo_snapshot_params *fw_consumed_ss,
707 			 struct mgmt_rx_reo_snapshot_params *host_ss,
708 			 uint8_t link)
709 {
710 	if (!mac_hw_ss->valid)
711 		return QDF_STATUS_SUCCESS;
712 
713 	if (fw_forwarded_ss->valid) {
714 		if (!mgmt_rx_reo_compare_global_timestamps_gte
715 					(mac_hw_ss->global_timestamp,
716 					 fw_forwarded_ss->global_timestamp) ||
717 		    !mgmt_rx_reo_compare_pkt_ctrs_gte
718 					(mac_hw_ss->mgmt_pkt_ctr,
719 					 fw_forwarded_ss->mgmt_pkt_ctr)) {
720 			mgmt_rx_reo_print_snapshots(mac_hw_ss, fw_forwarded_ss,
721 						    fw_consumed_ss, host_ss);
722 			mgmt_rx_reo_debug("Invalidate FW forwarded SS, link %u",
723 					  link);
724 			fw_forwarded_ss->valid = false;
725 		}
726 	}
727 
728 	if (fw_consumed_ss->valid) {
729 		if (!mgmt_rx_reo_compare_global_timestamps_gte
730 					(mac_hw_ss->global_timestamp,
731 					 fw_consumed_ss->global_timestamp) ||
732 		    !mgmt_rx_reo_compare_pkt_ctrs_gte
733 					(mac_hw_ss->mgmt_pkt_ctr,
734 					 fw_consumed_ss->mgmt_pkt_ctr)) {
735 			mgmt_rx_reo_print_snapshots(mac_hw_ss, fw_forwarded_ss,
736 						    fw_consumed_ss, host_ss);
737 			mgmt_rx_reo_debug("Invalidate FW consumed SS, link %u",
738 					  link);
739 			fw_consumed_ss->valid = false;
740 		}
741 	}
742 
743 	if (host_ss->valid) {
744 		if (!mgmt_rx_reo_compare_global_timestamps_gte
745 					(mac_hw_ss->global_timestamp,
746 					 host_ss->global_timestamp) ||
747 		    !mgmt_rx_reo_compare_pkt_ctrs_gte
748 					(mac_hw_ss->mgmt_pkt_ctr,
749 					 host_ss->mgmt_pkt_ctr)) {
750 			mgmt_rx_reo_print_snapshots(mac_hw_ss, fw_forwarded_ss,
751 						    fw_consumed_ss, host_ss);
752 			mgmt_rx_reo_debug("Invalidate host snapshot, link %u",
753 					  link);
754 			host_ss->valid = false;
755 		}
756 	}
757 
758 	return QDF_STATUS_SUCCESS;
759 }
760 
761 /**
762  * mgmt_rx_reo_snapshots_check_sanity() - Check the sanity of management
763  * Rx REO snapshots
764  * @mac_hw_ss: MAC HW snapshot
765  * @fw_forwarded_ss: FW forwarded snapshot
766  * @fw_consumed_ss: FW consumed snapshot
767  * @host_ss: Host snapshot
768  *
769  * return: QDF_STATUS
770  */
771 static QDF_STATUS
772 mgmt_rx_reo_snapshots_check_sanity
773 			(struct mgmt_rx_reo_snapshot_params *mac_hw_ss,
774 			 struct mgmt_rx_reo_snapshot_params *fw_forwarded_ss,
775 			 struct mgmt_rx_reo_snapshot_params *fw_consumed_ss,
776 			 struct mgmt_rx_reo_snapshot_params *host_ss)
777 {
778 	QDF_STATUS status;
779 
780 	if (!mac_hw_ss->valid) {
781 		if (fw_forwarded_ss->valid || fw_consumed_ss->valid ||
782 		    host_ss->valid) {
783 			mgmt_rx_reo_err("MAC HW SS is invalid");
784 			status = QDF_STATUS_E_INVAL;
785 			goto fail;
786 		}
787 
788 		return QDF_STATUS_SUCCESS;
789 	}
790 
791 	if (!fw_forwarded_ss->valid && !fw_consumed_ss->valid) {
792 		if (host_ss->valid) {
793 			mgmt_rx_reo_err("FW forwarded and consumed SS invalid");
794 			status = QDF_STATUS_E_INVAL;
795 			goto fail;
796 		}
797 
798 		return QDF_STATUS_SUCCESS;
799 	}
800 
801 	if (fw_forwarded_ss->valid) {
802 		if (!mgmt_rx_reo_compare_global_timestamps_gte
803 					(mac_hw_ss->global_timestamp,
804 					 fw_forwarded_ss->global_timestamp)) {
805 			mgmt_rx_reo_err("TS: MAC HW SS < FW forwarded SS");
806 			status = QDF_STATUS_E_INVAL;
807 			goto fail;
808 		}
809 
810 		if (!mgmt_rx_reo_compare_pkt_ctrs_gte
811 					(mac_hw_ss->mgmt_pkt_ctr,
812 					 fw_forwarded_ss->mgmt_pkt_ctr)) {
813 			mgmt_rx_reo_err("PKT CTR: MAC HW SS < FW forwarded SS");
814 			status = QDF_STATUS_E_INVAL;
815 			goto fail;
816 		}
817 	}
818 
819 	if (fw_consumed_ss->valid) {
820 		if (!mgmt_rx_reo_compare_global_timestamps_gte
821 					(mac_hw_ss->global_timestamp,
822 					 fw_consumed_ss->global_timestamp)) {
823 			mgmt_rx_reo_err("TS: MAC HW SS < FW consumed SS");
824 			status = QDF_STATUS_E_INVAL;
825 			goto fail;
826 		}
827 
828 		if (!mgmt_rx_reo_compare_pkt_ctrs_gte
829 					(mac_hw_ss->mgmt_pkt_ctr,
830 					 fw_consumed_ss->mgmt_pkt_ctr)) {
831 			mgmt_rx_reo_err("PKT CTR: MAC HW SS < FW consumed SS");
832 			status = QDF_STATUS_E_INVAL;
833 			goto fail;
834 		}
835 	}
836 
837 	if (host_ss->valid) {
838 		if (!mgmt_rx_reo_compare_global_timestamps_gte
839 					(mac_hw_ss->global_timestamp,
840 					 host_ss->global_timestamp)) {
841 			mgmt_rx_reo_err("TS: MAC HW SS < host SS");
842 			status = QDF_STATUS_E_INVAL;
843 			goto fail;
844 		}
845 
846 		if (!mgmt_rx_reo_compare_pkt_ctrs_gte
847 					(mac_hw_ss->mgmt_pkt_ctr,
848 					 host_ss->mgmt_pkt_ctr)) {
849 			mgmt_rx_reo_err("PKT CTR: MAC HW SS < host SS");
850 			status = QDF_STATUS_E_INVAL;
851 			goto fail;
852 		}
853 
854 		if (fw_forwarded_ss->valid && !fw_consumed_ss->valid) {
855 			if (!mgmt_rx_reo_compare_global_timestamps_gte
856 					(fw_forwarded_ss->global_timestamp,
857 					 host_ss->global_timestamp)) {
858 				mgmt_rx_reo_err("TS: FW forwarded < host SS");
859 				status = QDF_STATUS_E_INVAL;
860 				goto fail;
861 			}
862 
863 			if (!mgmt_rx_reo_compare_pkt_ctrs_gte
864 					(fw_forwarded_ss->mgmt_pkt_ctr,
865 					 host_ss->mgmt_pkt_ctr)) {
866 				mgmt_rx_reo_err("CTR: FW forwarded < host SS");
867 				status = QDF_STATUS_E_INVAL;
868 				goto fail;
869 			}
870 		}
871 
872 		if (fw_consumed_ss->valid && !fw_forwarded_ss->valid) {
873 			if (!mgmt_rx_reo_compare_global_timestamps_gte
874 					(fw_consumed_ss->global_timestamp,
875 					 host_ss->global_timestamp)) {
876 				mgmt_rx_reo_err("TS: FW consumed < host SS");
877 				status = QDF_STATUS_E_INVAL;
878 				goto fail;
879 			}
880 
881 			if (!mgmt_rx_reo_compare_pkt_ctrs_gte
882 					(fw_consumed_ss->mgmt_pkt_ctr,
883 					 host_ss->mgmt_pkt_ctr)) {
884 				mgmt_rx_reo_err("CTR: FW consumed < host SS");
885 				status = QDF_STATUS_E_INVAL;
886 				goto fail;
887 			}
888 		}
889 
890 		if (fw_forwarded_ss->valid && fw_consumed_ss->valid) {
891 			if (!mgmt_rx_reo_compare_global_timestamps_gte
892 					(fw_consumed_ss->global_timestamp,
893 					 host_ss->global_timestamp) &&
894 			    !mgmt_rx_reo_compare_global_timestamps_gte
895 					(fw_forwarded_ss->global_timestamp,
896 					 host_ss->global_timestamp)) {
897 				mgmt_rx_reo_err("TS: FW consumed/forwarded < host");
898 				status = QDF_STATUS_E_INVAL;
899 				goto fail;
900 			}
901 
902 			if (!mgmt_rx_reo_compare_pkt_ctrs_gte
903 					(fw_consumed_ss->mgmt_pkt_ctr,
904 					 host_ss->mgmt_pkt_ctr) &&
905 			    !mgmt_rx_reo_compare_pkt_ctrs_gte
906 					(fw_forwarded_ss->mgmt_pkt_ctr,
907 					 host_ss->mgmt_pkt_ctr)) {
908 				mgmt_rx_reo_err("CTR: FW consumed/forwarded < host");
909 				status = QDF_STATUS_E_INVAL;
910 				goto fail;
911 			}
912 		}
913 	}
914 
915 	return QDF_STATUS_SUCCESS;
916 
917 fail:
918 	mgmt_rx_reo_debug("HW SS: valid = %u, ctr = %u, ts = %u",
919 			  mac_hw_ss->valid, mac_hw_ss->mgmt_pkt_ctr,
920 			  mac_hw_ss->global_timestamp);
921 	mgmt_rx_reo_debug("FW forwarded SS: valid = %u, ctr = %u, ts = %u",
922 			  fw_forwarded_ss->valid,
923 			  fw_forwarded_ss->mgmt_pkt_ctr,
924 			  fw_forwarded_ss->global_timestamp);
925 	mgmt_rx_reo_debug("FW consumed SS: valid = %u, ctr = %u, ts = %u",
926 			  fw_consumed_ss->valid,
927 			  fw_consumed_ss->mgmt_pkt_ctr,
928 			  fw_consumed_ss->global_timestamp);
929 	mgmt_rx_reo_debug("HOST SS: valid = %u, ctr = %u, ts = %u",
930 			  host_ss->valid, host_ss->mgmt_pkt_ctr,
931 			  host_ss->global_timestamp);
932 
933 	return status;
934 }
935 
936 /**
937  * wlan_mgmt_rx_reo_algo_calculate_wait_count() - Calculates the number of
938  * frames an incoming frame should wait for before it gets delivered.
939  * @in_frame_pdev: pdev on which this frame is received
940  * @desc: frame Descriptor
941  *
942  * Each frame carrys a MGMT pkt number which is local to that link, and a
943  * timestamp which is global across all the links. MAC HW and FW also captures
944  * the same details of the last frame that they have seen. Host also maintains
945  * the details of the last frame it has seen. In total, there are 4 snapshots.
946  * 1. MAC HW snapshot - latest frame seen at MAC HW
947  * 2. FW forwarded snapshot- latest frame forwarded to the Host
948  * 3. FW consumed snapshot - latest frame consumed by the FW
949  * 4. Host/FW consumed snapshot - latest frame seen by the Host
950  * By using all these snapshots, this function tries to compute the wait count
951  * for a given incoming frame on all links.
952  *
953  * Return: QDF_STATUS of operation
954  */
955 static QDF_STATUS
956 wlan_mgmt_rx_reo_algo_calculate_wait_count(
957 		struct wlan_objmgr_pdev *in_frame_pdev,
958 		struct mgmt_rx_reo_frame_descriptor *desc)
959 {
960 	QDF_STATUS status;
961 	uint8_t link;
962 	int8_t in_frame_link;
963 	int frames_pending, delta_fwd_host;
964 	uint8_t snapshot_id;
965 	struct wlan_objmgr_pdev *pdev;
966 	struct mgmt_rx_reo_pdev_info *rx_reo_pdev_ctx;
967 	struct mgmt_rx_reo_pdev_info *in_frame_rx_reo_pdev_ctx;
968 	struct mgmt_rx_reo_snapshot_info *snapshot_info;
969 	struct mgmt_rx_reo_snapshot_params snapshot_params
970 				[MGMT_RX_REO_SHARED_SNAPSHOT_MAX];
971 	struct mgmt_rx_reo_snapshot_params *mac_hw_ss, *fw_forwarded_ss,
972 					    *fw_consumed_ss, *host_ss;
973 	struct mgmt_rx_reo_params *in_frame_params;
974 	struct mgmt_rx_reo_wait_count *wait_count;
975 
976 	if (!in_frame_pdev) {
977 		mgmt_rx_reo_err("pdev is null");
978 		return QDF_STATUS_E_NULL_VALUE;
979 	}
980 
981 	if (!desc) {
982 		mgmt_rx_reo_err("Frame descriptor is null");
983 		return QDF_STATUS_E_NULL_VALUE;
984 	}
985 
986 	if (!desc->rx_params) {
987 		mgmt_rx_reo_err("MGMT Rx params of incoming frame is NULL");
988 		return QDF_STATUS_E_NULL_VALUE;
989 	}
990 
991 	in_frame_params = desc->rx_params->reo_params;
992 	if (!in_frame_params) {
993 		mgmt_rx_reo_err("MGMT Rx REO params of incoming frame is NULL");
994 		return QDF_STATUS_E_NULL_VALUE;
995 	}
996 
997 	wait_count = &desc->wait_count;
998 
999 	/* Get the MLO link ID of incoming frame */
1000 	in_frame_link = wlan_get_mlo_link_id_from_pdev(in_frame_pdev);
1001 	qdf_assert_always(in_frame_link >= 0);
1002 	qdf_assert_always(in_frame_link < MAX_MLO_LINKS);
1003 	qdf_assert_always(mgmt_rx_reo_is_valid_link(in_frame_link));
1004 
1005 	in_frame_rx_reo_pdev_ctx =
1006 			wlan_mgmt_rx_reo_get_priv_object(in_frame_pdev);
1007 	if (!in_frame_rx_reo_pdev_ctx) {
1008 		mgmt_rx_reo_err("Reo context null for incoming frame pdev");
1009 		return QDF_STATUS_E_FAILURE;
1010 	}
1011 	qdf_mem_zero(in_frame_rx_reo_pdev_ctx->raw_snapshots,
1012 		     sizeof(in_frame_rx_reo_pdev_ctx->raw_snapshots));
1013 
1014 	/* Iterate over all the valid MLO links */
1015 	for (link = 0; link < MAX_MLO_LINKS; link++) {
1016 		/* No need wait for any frames on an invalid link */
1017 		if (!mgmt_rx_reo_is_valid_link(link)) {
1018 			frames_pending = 0;
1019 			goto update_pending_frames;
1020 		}
1021 
1022 		pdev = wlan_get_pdev_from_mlo_link_id(link,
1023 						      WLAN_MGMT_RX_REO_ID);
1024 
1025 		rx_reo_pdev_ctx = wlan_mgmt_rx_reo_get_priv_object(pdev);
1026 		if (!rx_reo_pdev_ctx) {
1027 			mgmt_rx_reo_err("Mgmt reo context empty for pdev %pK",
1028 					pdev);
1029 			wlan_objmgr_pdev_release_ref(pdev, WLAN_MGMT_RX_REO_ID);
1030 			return QDF_STATUS_E_FAILURE;
1031 		}
1032 
1033 		host_ss = &rx_reo_pdev_ctx->host_snapshot;
1034 		desc->host_snapshot[link] = rx_reo_pdev_ctx->host_snapshot;
1035 
1036 		mgmt_rx_reo_info("link_id = %u HOST SS: valid = %u, ctr = %u, ts = %u",
1037 				 link, host_ss->valid, host_ss->mgmt_pkt_ctr,
1038 				 host_ss->global_timestamp);
1039 
1040 		snapshot_id = 0;
1041 		/* Read all the shared snapshots */
1042 		while (snapshot_id <
1043 			MGMT_RX_REO_SHARED_SNAPSHOT_MAX) {
1044 			snapshot_info = &rx_reo_pdev_ctx->
1045 				host_target_shared_snapshot_info[snapshot_id];
1046 
1047 			qdf_mem_zero(&snapshot_params[snapshot_id],
1048 				     sizeof(snapshot_params[snapshot_id]));
1049 
1050 			status = tgt_mgmt_rx_reo_read_snapshot(
1051 					pdev, snapshot_info, snapshot_id,
1052 					&snapshot_params[snapshot_id],
1053 					in_frame_rx_reo_pdev_ctx->raw_snapshots
1054 					[link][snapshot_id]);
1055 
1056 			/* Read operation shouldn't fail */
1057 			if (QDF_IS_STATUS_ERROR(status)) {
1058 				mgmt_rx_reo_err("snapshot(%d) read failed on"
1059 						"link (%d)", snapshot_id, link);
1060 				wlan_objmgr_pdev_release_ref(
1061 						pdev, WLAN_MGMT_RX_REO_ID);
1062 				return status;
1063 			}
1064 
1065 			/* If snapshot is valid, save it in the pdev context */
1066 			if (snapshot_params[snapshot_id].valid) {
1067 				rx_reo_pdev_ctx->
1068 				   last_valid_shared_snapshot[snapshot_id] =
1069 				   snapshot_params[snapshot_id];
1070 			}
1071 			desc->shared_snapshots[link][snapshot_id] =
1072 						snapshot_params[snapshot_id];
1073 
1074 			snapshot_id++;
1075 		}
1076 
1077 		wlan_objmgr_pdev_release_ref(pdev, WLAN_MGMT_RX_REO_ID);
1078 
1079 		mac_hw_ss = &snapshot_params
1080 				[MGMT_RX_REO_SHARED_SNAPSHOT_MAC_HW];
1081 		fw_forwarded_ss = &snapshot_params
1082 				[MGMT_RX_REO_SHARED_SNAPSHOT_FW_FORWADED];
1083 		fw_consumed_ss = &snapshot_params
1084 				[MGMT_RX_REO_SHARED_SNAPSHOT_FW_CONSUMED];
1085 
1086 		status = mgmt_rx_reo_invalidate_stale_snapshots(mac_hw_ss,
1087 								fw_forwarded_ss,
1088 								fw_consumed_ss,
1089 								host_ss, link);
1090 		if (QDF_IS_STATUS_ERROR(status)) {
1091 			mgmt_rx_reo_err("Failed to invalidate SS for link %u",
1092 					link);
1093 			return status;
1094 		}
1095 
1096 		desc->shared_snapshots[link][MGMT_RX_REO_SHARED_SNAPSHOT_MAC_HW] =
1097 								*mac_hw_ss;
1098 		desc->shared_snapshots[link][MGMT_RX_REO_SHARED_SNAPSHOT_FW_FORWADED] =
1099 								*fw_forwarded_ss;
1100 		desc->shared_snapshots[link][MGMT_RX_REO_SHARED_SNAPSHOT_FW_CONSUMED] =
1101 								*fw_consumed_ss;
1102 		desc->host_snapshot[link] = *host_ss;
1103 
1104 		status = mgmt_rx_reo_snapshots_check_sanity
1105 			(mac_hw_ss, fw_forwarded_ss, fw_consumed_ss, host_ss);
1106 		if (QDF_IS_STATUS_ERROR(status)) {
1107 			mgmt_rx_reo_err_rl("Snapshot sanity for link %u failed",
1108 					   link);
1109 			return status;
1110 		}
1111 
1112 		mgmt_rx_reo_info("link_id = %u HW SS: valid = %u, ctr = %u, ts = %u",
1113 				 link, mac_hw_ss->valid,
1114 				 mac_hw_ss->mgmt_pkt_ctr,
1115 				 mac_hw_ss->global_timestamp);
1116 		mgmt_rx_reo_info("link_id = %u FW forwarded SS: valid = %u, ctr = %u, ts = %u",
1117 				 link, fw_forwarded_ss->valid,
1118 				 fw_forwarded_ss->mgmt_pkt_ctr,
1119 				 fw_forwarded_ss->global_timestamp);
1120 		mgmt_rx_reo_info("link_id = %u FW consumed SS: valid = %u, ctr = %u, ts = %u",
1121 				 link, fw_consumed_ss->valid,
1122 				 fw_consumed_ss->mgmt_pkt_ctr,
1123 				 fw_consumed_ss->global_timestamp);
1124 
1125 		/* No need wait for any frames on the same link */
1126 		if (link == in_frame_link) {
1127 			frames_pending = 0;
1128 			goto update_pending_frames;
1129 		}
1130 
1131 		/**
1132 		 * If MAC HW snapshot is invalid, the link has not started
1133 		 * receiving management frames. Set wait count to zero.
1134 		 */
1135 		if (!mac_hw_ss->valid) {
1136 			frames_pending = 0;
1137 			goto update_pending_frames;
1138 		}
1139 
1140 		/**
1141 		 * If host snapshot is invalid, wait for MAX number of frames.
1142 		 * When any frame in this link arrives at host, actual wait
1143 		 * counts will be updated.
1144 		 */
1145 		if (!host_ss->valid) {
1146 			wait_count->per_link_count[link] = UINT_MAX;
1147 			wait_count->total_count += UINT_MAX;
1148 			goto print_wait_count;
1149 		}
1150 
1151 		/**
1152 		 * If MAC HW snapshot sequence number and host snapshot
1153 		 * sequence number are same, all the frames received by
1154 		 * this link are processed by host. No need to wait for
1155 		 * any frames from this link.
1156 		 */
1157 		if (!mgmt_rx_reo_subtract_pkt_ctrs(mac_hw_ss->mgmt_pkt_ctr,
1158 						   host_ss->mgmt_pkt_ctr)) {
1159 			frames_pending = 0;
1160 			goto update_pending_frames;
1161 		}
1162 
1163 		/**
1164 		 * Ideally, the incoming frame has to wait for only those frames
1165 		 * (on other links) which meet all the below criterion.
1166 		 * 1. Frame's timestamp is less than incoming frame's
1167 		 * 2. Frame is supposed to be consumed by the Host
1168 		 * 3. Frame is not yet seen by the Host.
1169 		 * We may not be able to compute the exact optimal wait count
1170 		 * because HW/FW provides a limited assist.
1171 		 * This algorithm tries to get the best estimate of wait count
1172 		 * by not waiting for those frames where we have a conclusive
1173 		 * evidence that we don't have to wait for those frames.
1174 		 */
1175 
1176 		/**
1177 		 * If this link has already seen a frame whose timestamp is
1178 		 * greater than or equal to incoming frame's timestamp,
1179 		 * then no need to wait for any frames on this link.
1180 		 * If the total wait count becomes zero, then the policy on
1181 		 * whether to deliver such a frame to upper layers is handled
1182 		 * separately.
1183 		 */
1184 		if (mgmt_rx_reo_compare_global_timestamps_gte(
1185 				host_ss->global_timestamp,
1186 				in_frame_params->global_timestamp)) {
1187 			frames_pending = 0;
1188 			goto update_pending_frames;
1189 		}
1190 
1191 		/**
1192 		 * For starters, we only have to wait for the frames that are
1193 		 * seen by MAC HW but not yet seen by Host. The frames which
1194 		 * reach MAC HW later are guaranteed to have a timestamp
1195 		 * greater than incoming frame's timestamp.
1196 		 */
1197 		frames_pending = mgmt_rx_reo_subtract_pkt_ctrs(
1198 					mac_hw_ss->mgmt_pkt_ctr,
1199 					host_ss->mgmt_pkt_ctr);
1200 		qdf_assert_always(frames_pending >= 0);
1201 
1202 		if (frames_pending &&
1203 		    mgmt_rx_reo_compare_global_timestamps_gte
1204 					(mac_hw_ss->global_timestamp,
1205 					 in_frame_params->global_timestamp)) {
1206 			/**
1207 			 * Last frame seen at MAC HW has timestamp greater than
1208 			 * or equal to incoming frame's timestamp. So no need to
1209 			 * wait for that last frame, but we can't conclusively
1210 			 * say anything about timestamp of frames before the
1211 			 * last frame, so try to wait for all of those frames.
1212 			 */
1213 			frames_pending--;
1214 			qdf_assert_always(frames_pending >= 0);
1215 
1216 			if (fw_consumed_ss->valid &&
1217 			    mgmt_rx_reo_compare_global_timestamps_gte(
1218 				fw_consumed_ss->global_timestamp,
1219 				in_frame_params->global_timestamp)) {
1220 				/**
1221 				 * Last frame consumed by the FW has timestamp
1222 				 * greater than or equal to incoming frame's.
1223 				 * That means all the frames from
1224 				 * fw_consumed_ss->mgmt_pkt_ctr to
1225 				 * mac_hw->mgmt_pkt_ctr will have timestamp
1226 				 * greater than or equal to incoming frame's and
1227 				 * hence, no need to wait for those frames.
1228 				 * We just need to wait for frames from
1229 				 * host_ss->mgmt_pkt_ctr to
1230 				 * fw_consumed_ss->mgmt_pkt_ctr-1. This is a
1231 				 * better estimate over the above estimate,
1232 				 * so update frames_pending.
1233 				 */
1234 				frames_pending =
1235 				  mgmt_rx_reo_subtract_pkt_ctrs(
1236 				      fw_consumed_ss->mgmt_pkt_ctr,
1237 				      host_ss->mgmt_pkt_ctr) - 1;
1238 
1239 				qdf_assert_always(frames_pending >= 0);
1240 
1241 				/**
1242 				 * Last frame forwarded to Host has timestamp
1243 				 * less than incoming frame's. That means all
1244 				 * the frames starting from
1245 				 * fw_forwarded_ss->mgmt_pkt_ctr+1 to
1246 				 * fw_consumed_ss->mgmt_pkt_ctr are consumed by
1247 				 * the FW and hence, no need to wait for those
1248 				 * frames. We just need to wait for frames
1249 				 * from host_ss->mgmt_pkt_ctr to
1250 				 * fw_forwarded_ss->mgmt_pkt_ctr. This is a
1251 				 * better estimate over the above estimate,
1252 				 * so update frames_pending.
1253 				 */
1254 				if (fw_forwarded_ss->valid &&
1255 				    !mgmt_rx_reo_compare_global_timestamps_gte(
1256 					fw_forwarded_ss->global_timestamp,
1257 					in_frame_params->global_timestamp)) {
1258 					frames_pending =
1259 					  mgmt_rx_reo_subtract_pkt_ctrs(
1260 					      fw_forwarded_ss->mgmt_pkt_ctr,
1261 					      host_ss->mgmt_pkt_ctr);
1262 
1263 					/**
1264 					 * frames_pending can be negative in
1265 					 * cases whene there are no frames
1266 					 * getting forwarded to the Host. No
1267 					 * need to wait for any frames in that
1268 					 * case.
1269 					 */
1270 					if (frames_pending < 0)
1271 						frames_pending = 0;
1272 				}
1273 			}
1274 
1275 			/**
1276 			 * Last frame forwarded to Host has timestamp greater
1277 			 * than or equal to incoming frame's. That means all the
1278 			 * frames from fw_forwarded->mgmt_pkt_ctr to
1279 			 * mac_hw->mgmt_pkt_ctr will have timestamp greater than
1280 			 * or equal to incoming frame's and hence, no need to
1281 			 * wait for those frames. We may have to just wait for
1282 			 * frames from host_ss->mgmt_pkt_ctr to
1283 			 * fw_forwarded_ss->mgmt_pkt_ctr-1
1284 			 */
1285 			if (fw_forwarded_ss->valid &&
1286 			    mgmt_rx_reo_compare_global_timestamps_gte(
1287 				fw_forwarded_ss->global_timestamp,
1288 				in_frame_params->global_timestamp)) {
1289 				delta_fwd_host =
1290 				  mgmt_rx_reo_subtract_pkt_ctrs(
1291 				    fw_forwarded_ss->mgmt_pkt_ctr,
1292 				    host_ss->mgmt_pkt_ctr) - 1;
1293 
1294 				qdf_assert_always(delta_fwd_host >= 0);
1295 
1296 				/**
1297 				 * This will be a better estimate over the one
1298 				 * we computed using mac_hw_ss but this may or
1299 				 * may not be a better estimate over the
1300 				 * one we computed using fw_consumed_ss.
1301 				 * When timestamps of both fw_consumed_ss and
1302 				 * fw_forwarded_ss are greater than incoming
1303 				 * frame's but timestamp of fw_consumed_ss is
1304 				 * smaller than fw_forwarded_ss, then
1305 				 * frames_pending will be smaller than
1306 				 * delta_fwd_host, the reverse will be true in
1307 				 * other cases. Instead of checking for all
1308 				 * those cases, just waiting for the minimum
1309 				 * among these two should be sufficient.
1310 				 */
1311 				frames_pending = qdf_min(frames_pending,
1312 							 delta_fwd_host);
1313 				qdf_assert_always(frames_pending >= 0);
1314 			}
1315 		}
1316 
1317 update_pending_frames:
1318 			qdf_assert_always(frames_pending >= 0);
1319 
1320 			wait_count->per_link_count[link] = frames_pending;
1321 			wait_count->total_count += frames_pending;
1322 
1323 print_wait_count:
1324 			mgmt_rx_reo_info("link_id = %u wait count: per link = 0x%x, total = 0x%llx",
1325 					 link, wait_count->per_link_count[link],
1326 					 wait_count->total_count);
1327 	}
1328 
1329 	return QDF_STATUS_SUCCESS;
1330 }
1331 
1332 /*
1333  * struct mgmt_rx_reo_list_entry_debug_info - This structure holds the necessary
1334  * information about a reo list entry for debug purposes.
1335  * @link_id: link id
1336  * @mgmt_pkt_ctr: management packet counter
1337  * @global_timestamp: global time stamp
1338  * @wait_count: wait count values
1339  * @status: status of the entry in the list
1340  * @entry: pointer to reo list entry
1341  */
1342 struct mgmt_rx_reo_list_entry_debug_info {
1343 	uint8_t link_id;
1344 	uint16_t mgmt_pkt_ctr;
1345 	uint32_t global_timestamp;
1346 	struct mgmt_rx_reo_wait_count wait_count;
1347 	uint32_t status;
1348 	struct mgmt_rx_reo_list_entry *entry;
1349 };
1350 
1351 /**
1352  * mgmt_rx_reo_list_display() - API to print the entries in the reorder list
1353  * @reo_list: Pointer to reorder list
1354  *
1355  * Return: QDF_STATUS
1356  */
1357 static QDF_STATUS
1358 mgmt_rx_reo_list_display(struct mgmt_rx_reo_list *reo_list)
1359 {
1360 	uint32_t reo_list_size;
1361 	uint32_t index;
1362 	struct mgmt_rx_reo_list_entry *cur_entry;
1363 	struct mgmt_rx_reo_list_entry_debug_info *debug_info;
1364 
1365 	if (!reo_list) {
1366 		mgmt_rx_reo_err("Pointer to reo list is null");
1367 		return QDF_STATUS_E_NULL_VALUE;
1368 	}
1369 
1370 	qdf_spin_lock_bh(&reo_list->list_lock);
1371 
1372 	reo_list_size = qdf_list_size(&reo_list->list);
1373 
1374 	if (reo_list_size == 0) {
1375 		qdf_spin_unlock_bh(&reo_list->list_lock);
1376 		mgmt_rx_reo_debug("Number of entries in the reo list = %u",
1377 				  reo_list_size);
1378 		return QDF_STATUS_SUCCESS;
1379 	}
1380 
1381 	debug_info = qdf_mem_malloc_atomic(reo_list_size * sizeof(*debug_info));
1382 	if (!debug_info) {
1383 		qdf_spin_unlock_bh(&reo_list->list_lock);
1384 		mgmt_rx_reo_err("Memory allocation failed");
1385 		return QDF_STATUS_E_NOMEM;
1386 	}
1387 
1388 	index = 0;
1389 	qdf_list_for_each(&reo_list->list, cur_entry, node) {
1390 		debug_info[index].link_id =
1391 				mgmt_rx_reo_get_link_id(cur_entry->rx_params);
1392 		debug_info[index].mgmt_pkt_ctr =
1393 			mgmt_rx_reo_get_pkt_counter(cur_entry->rx_params);
1394 		debug_info[index].global_timestamp =
1395 				mgmt_rx_reo_get_global_ts(cur_entry->rx_params);
1396 		debug_info[index].wait_count = cur_entry->wait_count;
1397 		debug_info[index].status = cur_entry->status;
1398 		debug_info[index].entry = cur_entry;
1399 
1400 		++index;
1401 	}
1402 
1403 	qdf_spin_unlock_bh(&reo_list->list_lock);
1404 
1405 	mgmt_rx_reo_debug("Reorder list");
1406 	mgmt_rx_reo_debug("##################################################");
1407 	mgmt_rx_reo_debug("Number of entries in the reo list = %u",
1408 			  reo_list_size);
1409 	for (index = 0; index < reo_list_size; index++) {
1410 		uint8_t link_id;
1411 
1412 		mgmt_rx_reo_debug("index = %u: link_id = %u, ts = %u, ctr = %u, status = 0x%x, entry = %pK",
1413 				  index, debug_info[index].link_id,
1414 				  debug_info[index].global_timestamp,
1415 				  debug_info[index].mgmt_pkt_ctr,
1416 				  debug_info[index].status,
1417 				  debug_info[index].entry);
1418 
1419 		mgmt_rx_reo_debug("Total wait count = 0x%llx",
1420 				  debug_info[index].wait_count.total_count);
1421 
1422 		for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++)
1423 			mgmt_rx_reo_debug("Link id = %u, wait_count = 0x%x",
1424 					  link_id, debug_info[index].wait_count.
1425 					  per_link_count[link_id]);
1426 	}
1427 	mgmt_rx_reo_debug("##################################################");
1428 
1429 	qdf_mem_free(debug_info);
1430 
1431 	return QDF_STATUS_SUCCESS;
1432 }
1433 
1434 #ifdef WLAN_MGMT_RX_REO_DEBUG_SUPPORT
1435 /**
1436  * mgmt_rx_reo_debug_print_egress_frame_stats() - API to print the stats
1437  * related to frames going out of the reorder module
1438  * @reo_ctx: Pointer to reorder context
1439  *
1440  * API to print the stats related to frames going out of the management
1441  * Rx reorder module.
1442  *
1443  * Return: QDF_STATUS
1444  */
1445 static QDF_STATUS
1446 mgmt_rx_reo_debug_print_egress_frame_stats(struct mgmt_rx_reo_context *reo_ctx)
1447 {
1448 	struct reo_egress_frame_stats *stats;
1449 	uint8_t link_id;
1450 	uint8_t reason;
1451 	uint64_t total_delivery_attempts_count = 0;
1452 	uint64_t total_delivery_success_count = 0;
1453 	uint64_t total_premature_delivery_count = 0;
1454 	uint64_t delivery_count_per_link[MAX_MLO_LINKS] = {0};
1455 	uint64_t delivery_count_per_reason[MGMT_RX_REO_RELEASE_REASON_MAX] = {0};
1456 	uint64_t total_delivery_count = 0;
1457 	char delivery_reason_stats_boarder_a[MGMT_RX_REO_EGRESS_FRAME_DELIVERY_REASON_STATS_BOARDER_A_MAX_SIZE + 1] = {0};
1458 	char delivery_reason_stats_boarder_b[MGMT_RX_REO_EGRESS_FRAME_DELIVERY_REASON_STATS_BOARDER_B_MAX_SIZE + 1] = {0};
1459 
1460 	if (!reo_ctx)
1461 		return QDF_STATUS_E_NULL_VALUE;
1462 
1463 	stats = &reo_ctx->egress_frame_debug_info.stats;
1464 
1465 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
1466 		total_delivery_attempts_count +=
1467 				stats->delivery_attempts_count[link_id];
1468 		total_delivery_success_count +=
1469 				stats->delivery_success_count[link_id];
1470 		total_premature_delivery_count +=
1471 				stats->premature_delivery_count[link_id];
1472 	}
1473 
1474 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
1475 		for (reason = 0; reason < MGMT_RX_REO_RELEASE_REASON_MAX;
1476 		     reason++)
1477 			delivery_count_per_link[link_id] +=
1478 				stats->delivery_count[link_id][reason];
1479 		total_delivery_count += delivery_count_per_link[link_id];
1480 	}
1481 	for (reason = 0; reason < MGMT_RX_REO_RELEASE_REASON_MAX; reason++)
1482 		for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++)
1483 			delivery_count_per_reason[reason] +=
1484 				stats->delivery_count[link_id][reason];
1485 
1486 	mgmt_rx_reo_alert("Egress frame stats:");
1487 	mgmt_rx_reo_alert("\t1) Delivery related stats:");
1488 	mgmt_rx_reo_alert("\t------------------------------------------");
1489 	mgmt_rx_reo_alert("\t|link id   |Attempts |Success |Premature |");
1490 	mgmt_rx_reo_alert("\t|          | count   | count  | count    |");
1491 	mgmt_rx_reo_alert("\t------------------------------------------");
1492 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
1493 		mgmt_rx_reo_alert("\t|%10u|%9llu|%8llu|%10llu|", link_id,
1494 				  stats->delivery_attempts_count[link_id],
1495 				  stats->delivery_success_count[link_id],
1496 				  stats->premature_delivery_count[link_id]);
1497 	mgmt_rx_reo_alert("\t------------------------------------------");
1498 	}
1499 	mgmt_rx_reo_alert("\t%11s|%9llu|%8llu|%10llu|\n\n", "",
1500 			  total_delivery_attempts_count,
1501 			  total_delivery_success_count,
1502 			  total_premature_delivery_count);
1503 
1504 	mgmt_rx_reo_alert("\t2) Delivery reason related stats");
1505 	mgmt_rx_reo_alert("\tRelease Reason Values:-");
1506 	mgmt_rx_reo_alert("\tRELEASE_REASON_ZERO_WAIT_COUNT - 0x%lx",
1507 			  MGMT_RX_REO_LIST_ENTRY_RELEASE_REASON_ZERO_WAIT_COUNT);
1508 	mgmt_rx_reo_alert("\tRELEASE_REASON_AGED_OUT - 0x%lx",
1509 			  MGMT_RX_REO_LIST_ENTRY_RELEASE_REASON_AGED_OUT);
1510 	mgmt_rx_reo_alert("\tRELEASE_REASON_OLDER_THAN_AGED_OUT_FRAME - 0x%lx",
1511 			  MGMT_RX_REO_LIST_ENTRY_RELEASE_REASON_OLDER_THAN_AGED_OUT_FRAME);
1512 	mgmt_rx_reo_alert("\tRELEASE_REASON_LIST_MAX_SIZE_EXCEEDED - 0x%lx",
1513 			  MGMT_RX_REO_LIST_ENTRY_RELEASE_REASON_LIST_MAX_SIZE_EXCEEDED);
1514 
1515 	qdf_mem_set(delivery_reason_stats_boarder_a,
1516 		    MGMT_RX_REO_EGRESS_FRAME_DELIVERY_REASON_STATS_BOARDER_A_MAX_SIZE, '-');
1517 	qdf_mem_set(delivery_reason_stats_boarder_b,
1518 		    MGMT_RX_REO_EGRESS_FRAME_DELIVERY_REASON_STATS_BOARDER_B_MAX_SIZE, '-');
1519 
1520 	mgmt_rx_reo_alert("\t%66s", delivery_reason_stats_boarder_a);
1521 	mgmt_rx_reo_alert("\t|%16s|%7s|%7s|%7s|%7s|%7s|%7s|", "Release Reason/",
1522 			  "", "", "", "", "", "");
1523 	mgmt_rx_reo_alert("\t|%16s|%7s|%7s|%7s|%7s|%7s|%7s|", "link id",
1524 			  "0", "1", "2", "3", "4", "5");
1525 	mgmt_rx_reo_alert("\t%s", delivery_reason_stats_boarder_b);
1526 
1527 	for (reason = 0; reason < MGMT_RX_REO_RELEASE_REASON_MAX; reason++) {
1528 		mgmt_rx_reo_alert("\t|%16x|%7llu|%7llu|%7llu|%7llu|%7llu|%7llu|%7llu",
1529 				  reason, stats->delivery_count[0][reason],
1530 				  stats->delivery_count[1][reason],
1531 				  stats->delivery_count[2][reason],
1532 				  stats->delivery_count[3][reason],
1533 				  stats->delivery_count[4][reason],
1534 				  stats->delivery_count[5][reason],
1535 				  delivery_count_per_reason[reason]);
1536 		mgmt_rx_reo_alert("\t%s", delivery_reason_stats_boarder_b);
1537 	}
1538 	mgmt_rx_reo_alert("\t%17s|%7llu|%7llu|%7llu|%7llu|%7llu|%7llu|%7llu\n\n",
1539 			  "", delivery_count_per_link[0],
1540 			  delivery_count_per_link[1],
1541 			  delivery_count_per_link[2],
1542 			  delivery_count_per_link[3],
1543 			  delivery_count_per_link[4],
1544 			  delivery_count_per_link[5],
1545 			  total_delivery_count);
1546 
1547 	return QDF_STATUS_SUCCESS;
1548 }
1549 
1550 /**
1551  * mgmt_rx_reo_log_egress_frame_before_delivery() - Log the information about a
1552  * frame exiting the reorder module. Logging is done before attempting the frame
1553  * delivery to upper layers.
1554  * @reo_ctx: management rx reorder context
1555  * @entry: Pointer to reorder list entry
1556  *
1557  * Return: QDF_STATUS of operation
1558  */
1559 static QDF_STATUS
1560 mgmt_rx_reo_log_egress_frame_before_delivery(
1561 					struct mgmt_rx_reo_context *reo_ctx,
1562 					struct mgmt_rx_reo_list_entry *entry)
1563 {
1564 	struct reo_egress_debug_info *egress_frame_debug_info;
1565 	struct reo_egress_debug_frame_info *cur_frame_debug_info;
1566 	struct reo_egress_frame_stats *stats;
1567 	uint8_t link_id;
1568 
1569 	if (!reo_ctx || !entry)
1570 		return QDF_STATUS_E_NULL_VALUE;
1571 
1572 	egress_frame_debug_info = &reo_ctx->egress_frame_debug_info;
1573 
1574 	cur_frame_debug_info = &egress_frame_debug_info->frame_list
1575 			[egress_frame_debug_info->next_index];
1576 
1577 	cur_frame_debug_info->link_id =
1578 				mgmt_rx_reo_get_link_id(entry->rx_params);
1579 	cur_frame_debug_info->mgmt_pkt_ctr =
1580 				mgmt_rx_reo_get_pkt_counter(entry->rx_params);
1581 	cur_frame_debug_info->global_timestamp =
1582 				mgmt_rx_reo_get_global_ts(entry->rx_params);
1583 	cur_frame_debug_info->initial_wait_count = entry->initial_wait_count;
1584 	cur_frame_debug_info->final_wait_count = entry->wait_count;
1585 	qdf_mem_copy(cur_frame_debug_info->shared_snapshots,
1586 		     entry->shared_snapshots,
1587 		     qdf_min(sizeof(cur_frame_debug_info->shared_snapshots),
1588 			     sizeof(entry->shared_snapshots)));
1589 	qdf_mem_copy(cur_frame_debug_info->host_snapshot, entry->host_snapshot,
1590 		     qdf_min(sizeof(cur_frame_debug_info->host_snapshot),
1591 			     sizeof(entry->host_snapshot)));
1592 	cur_frame_debug_info->insertion_ts = entry->insertion_ts;
1593 	cur_frame_debug_info->ingress_timestamp = entry->ingress_timestamp;
1594 	cur_frame_debug_info->removal_ts =  entry->removal_ts;
1595 	cur_frame_debug_info->egress_timestamp = qdf_get_log_timestamp();
1596 	cur_frame_debug_info->release_reason = entry->release_reason;
1597 	cur_frame_debug_info->is_premature_delivery =
1598 						entry->is_premature_delivery;
1599 	cur_frame_debug_info->cpu_id = qdf_get_smp_processor_id();
1600 
1601 	stats = &egress_frame_debug_info->stats;
1602 	link_id = cur_frame_debug_info->link_id;
1603 	stats->delivery_attempts_count[link_id]++;
1604 	if (entry->is_premature_delivery)
1605 		stats->premature_delivery_count[link_id]++;
1606 
1607 	return QDF_STATUS_SUCCESS;
1608 }
1609 
1610 /**
1611  * mgmt_rx_reo_log_egress_frame_after_delivery() - Log the information about a
1612  * frame exiting the reorder module. Logging is done after attempting the frame
1613  * delivery to upper layer.
1614  * @reo_ctx: management rx reorder context
1615  * @entry: Pointer to reorder list entry
1616  *
1617  * Return: QDF_STATUS of operation
1618  */
1619 static QDF_STATUS
1620 mgmt_rx_reo_log_egress_frame_after_delivery(
1621 					struct mgmt_rx_reo_context *reo_ctx,
1622 					struct mgmt_rx_reo_list_entry *entry)
1623 {
1624 	struct reo_egress_debug_info *egress_frame_debug_info;
1625 	struct reo_egress_debug_frame_info *cur_frame_debug_info;
1626 	struct reo_egress_frame_stats *stats;
1627 
1628 	if (!reo_ctx)
1629 		return QDF_STATUS_E_NULL_VALUE;
1630 
1631 	egress_frame_debug_info = &reo_ctx->egress_frame_debug_info;
1632 
1633 	cur_frame_debug_info = &egress_frame_debug_info->frame_list
1634 			[egress_frame_debug_info->next_index];
1635 
1636 	cur_frame_debug_info->is_delivered = entry->is_delivered;
1637 	cur_frame_debug_info->egress_duration = qdf_get_log_timestamp() -
1638 					cur_frame_debug_info->egress_timestamp;
1639 
1640 	egress_frame_debug_info->next_index++;
1641 	egress_frame_debug_info->next_index %=
1642 				MGMT_RX_REO_EGRESS_FRAME_DEBUG_ENTRIES_MAX;
1643 	if (egress_frame_debug_info->next_index == 0)
1644 		egress_frame_debug_info->wrap_aroud = true;
1645 
1646 	stats = &egress_frame_debug_info->stats;
1647 	if (entry->is_delivered) {
1648 		uint8_t link_id = cur_frame_debug_info->link_id;
1649 		uint8_t release_reason = cur_frame_debug_info->release_reason;
1650 
1651 		stats->delivery_count[link_id][release_reason]++;
1652 		stats->delivery_success_count[link_id]++;
1653 	}
1654 
1655 	return QDF_STATUS_SUCCESS;
1656 }
1657 
1658 /**
1659  * mgmt_rx_reo_debug_print_egress_frame_info() - Print the debug information
1660  * about the latest frames leaving the reorder module
1661  * @reo_ctx: management rx reorder context
1662  * @num_frames: Number of frames for which the debug information is to be
1663  * printed. If @num_frames is 0, then debug information about all the frames
1664  * in the ring buffer will be  printed.
1665  *
1666  * Return: QDF_STATUS of operation
1667  */
1668 static QDF_STATUS
1669 mgmt_rx_reo_debug_print_egress_frame_info(struct mgmt_rx_reo_context *reo_ctx,
1670 					  uint16_t num_frames)
1671 {
1672 	struct reo_egress_debug_info *egress_frame_debug_info;
1673 	int start_index;
1674 	uint16_t index;
1675 	uint16_t entry;
1676 	uint16_t num_valid_entries;
1677 	uint16_t num_entries_to_print;
1678 	char *boarder;
1679 
1680 	if (!reo_ctx)
1681 		return QDF_STATUS_E_NULL_VALUE;
1682 
1683 	egress_frame_debug_info = &reo_ctx->egress_frame_debug_info;
1684 
1685 	if (egress_frame_debug_info->wrap_aroud)
1686 		num_valid_entries = MGMT_RX_REO_EGRESS_FRAME_DEBUG_ENTRIES_MAX;
1687 	else
1688 		num_valid_entries = egress_frame_debug_info->next_index;
1689 
1690 	if (num_frames == 0) {
1691 		num_entries_to_print = num_valid_entries;
1692 
1693 		if (egress_frame_debug_info->wrap_aroud)
1694 			start_index = egress_frame_debug_info->next_index;
1695 		else
1696 			start_index = 0;
1697 	} else {
1698 		num_entries_to_print = qdf_min(num_frames, num_valid_entries);
1699 
1700 		start_index = (egress_frame_debug_info->next_index -
1701 			       num_entries_to_print +
1702 			       MGMT_RX_REO_EGRESS_FRAME_DEBUG_ENTRIES_MAX)
1703 			      % MGMT_RX_REO_EGRESS_FRAME_DEBUG_ENTRIES_MAX;
1704 
1705 		qdf_assert_always(start_index >= 0 &&
1706 				  start_index < MGMT_RX_REO_EGRESS_FRAME_DEBUG_ENTRIES_MAX);
1707 	}
1708 
1709 	mgmt_rx_reo_alert_no_fl("Egress Frame Info:-");
1710 	mgmt_rx_reo_alert_no_fl("num_frames = %u, wrap = %u, next_index = %u",
1711 				num_frames,
1712 				egress_frame_debug_info->wrap_aroud,
1713 				egress_frame_debug_info->next_index);
1714 	mgmt_rx_reo_alert_no_fl("start_index = %d num_entries_to_print = %u",
1715 				start_index, num_entries_to_print);
1716 
1717 	if (!num_entries_to_print)
1718 		return QDF_STATUS_SUCCESS;
1719 
1720 	boarder = egress_frame_debug_info->boarder;
1721 
1722 	mgmt_rx_reo_alert_no_fl("%s", boarder);
1723 	mgmt_rx_reo_alert_no_fl("|%3s|%5s|%4s|%5s|%10s|%11s|%11s|%11s|%11s|%5s|%7s|%5s|%4s|%69s|%69s|%94s|%94s|%94s|%94s|%94s|%94s|",
1724 				"No.", "CPU", "Link", "SeqNo", "Global ts",
1725 				"Ingress ts", "Insert. ts", "Removal ts",
1726 				"Egress ts", "E Dur", "W Dur", "Flags", "Rea.",
1727 				"Final wait count", "Initial wait count",
1728 				"Snapshot : link 0", "Snapshot : link 1",
1729 				"Snapshot : link 2", "Snapshot : link 3",
1730 				"Snapshot : link 4", "Snapshot : link 5");
1731 	mgmt_rx_reo_alert_no_fl("%s", boarder);
1732 
1733 	index = start_index;
1734 	for (entry = 0; entry < num_entries_to_print; entry++) {
1735 		struct reo_egress_debug_frame_info *info;
1736 		char flags[MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_FLAG_MAX_SIZE + 1] = {'\0'};
1737 		char final_wait_count[MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_WAIT_COUNT_MAX_SIZE + 1] = {'\0'};
1738 		char initial_wait_count[MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_WAIT_COUNT_MAX_SIZE + 1] = {'\0'};
1739 		char snapshots[MAX_MLO_LINKS][MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_PER_LINK_SNAPSHOTS_MAX_SIZE + 1] = {'\0'};
1740 		char flag_premature_delivery = ' ';
1741 		char flag_error = ' ';
1742 		uint8_t link;
1743 
1744 		info = &reo_ctx->egress_frame_debug_info.frame_list[index];
1745 
1746 		if (!info->is_delivered)
1747 			flag_error = 'E';
1748 
1749 		if (info->is_premature_delivery)
1750 			flag_premature_delivery = 'P';
1751 
1752 		snprintf(flags, sizeof(flags), "%c %c", flag_error,
1753 			 flag_premature_delivery);
1754 		snprintf(initial_wait_count, sizeof(initial_wait_count),
1755 			 "%9llx(%8x, %8x, %8x, %8x, %8x, %8x)",
1756 			 info->initial_wait_count.total_count,
1757 			 info->initial_wait_count.per_link_count[0],
1758 			 info->initial_wait_count.per_link_count[1],
1759 			 info->initial_wait_count.per_link_count[2],
1760 			 info->initial_wait_count.per_link_count[3],
1761 			 info->initial_wait_count.per_link_count[4],
1762 			 info->initial_wait_count.per_link_count[5]);
1763 		snprintf(final_wait_count, sizeof(final_wait_count),
1764 			 "%9llx(%8x, %8x, %8x, %8x, %8x, %8x)",
1765 			 info->final_wait_count.total_count,
1766 			 info->final_wait_count.per_link_count[0],
1767 			 info->final_wait_count.per_link_count[1],
1768 			 info->final_wait_count.per_link_count[2],
1769 			 info->final_wait_count.per_link_count[3],
1770 			 info->final_wait_count.per_link_count[4],
1771 			 info->final_wait_count.per_link_count[5]);
1772 
1773 		for (link = 0; link < MAX_MLO_LINKS; link++) {
1774 			char mac_hw[MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_SNAPSHOT_MAX_SIZE + 1] = {'\0'};
1775 			char fw_consumed[MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_SNAPSHOT_MAX_SIZE + 1] = {'\0'};
1776 			char fw_forwaded[MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_SNAPSHOT_MAX_SIZE + 1] = {'\0'};
1777 			char host[MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_SNAPSHOT_MAX_SIZE + 1] = {'\0'};
1778 			struct mgmt_rx_reo_snapshot_params *mac_hw_ss;
1779 			struct mgmt_rx_reo_snapshot_params *fw_consumed_ss;
1780 			struct mgmt_rx_reo_snapshot_params *fw_forwarded_ss;
1781 			struct mgmt_rx_reo_snapshot_params *host_ss;
1782 
1783 			mac_hw_ss = &info->shared_snapshots
1784 				[link][MGMT_RX_REO_SHARED_SNAPSHOT_MAC_HW];
1785 			fw_consumed_ss = &info->shared_snapshots
1786 				[link][MGMT_RX_REO_SHARED_SNAPSHOT_FW_CONSUMED];
1787 			fw_forwarded_ss = &info->shared_snapshots
1788 				[link][MGMT_RX_REO_SHARED_SNAPSHOT_FW_FORWADED];
1789 			host_ss = &info->host_snapshot[link];
1790 
1791 			snprintf(mac_hw, sizeof(mac_hw), "(%1u, %5u, %10u)",
1792 				 mac_hw_ss->valid, mac_hw_ss->mgmt_pkt_ctr,
1793 				 mac_hw_ss->global_timestamp);
1794 			snprintf(fw_consumed, sizeof(fw_consumed),
1795 				 "(%1u, %5u, %10u)",
1796 				 fw_consumed_ss->valid,
1797 				 fw_consumed_ss->mgmt_pkt_ctr,
1798 				 fw_consumed_ss->global_timestamp);
1799 			snprintf(fw_forwaded, sizeof(fw_forwaded),
1800 				 "(%1u, %5u, %10u)",
1801 				 fw_forwarded_ss->valid,
1802 				 fw_forwarded_ss->mgmt_pkt_ctr,
1803 				 fw_forwarded_ss->global_timestamp);
1804 			snprintf(host, sizeof(host), "(%1u, %5u, %10u)",
1805 				 host_ss->valid,
1806 				 host_ss->mgmt_pkt_ctr,
1807 				 host_ss->global_timestamp);
1808 			snprintf(snapshots[link], sizeof(snapshots[link]),
1809 				 "%22s, %22s, %22s, %22s", mac_hw, fw_consumed,
1810 				 fw_forwaded, host);
1811 		}
1812 
1813 		mgmt_rx_reo_alert_no_fl("|%3u|%5d|%4u|%5u|%10u|%11llu|%11llu|%11llu|%11llu|%5llu|%7llu|%5s|%4x|%69s|%69s|%94s|%94s|%94s|%94s|%94s|%94s|",
1814 					entry, info->cpu_id, info->link_id,
1815 					info->mgmt_pkt_ctr,
1816 					info->global_timestamp,
1817 					info->ingress_timestamp,
1818 					info->insertion_ts, info->removal_ts,
1819 					info->egress_timestamp,
1820 					info->egress_duration,
1821 					info->removal_ts - info->insertion_ts,
1822 					flags, info->release_reason,
1823 					final_wait_count, initial_wait_count,
1824 					snapshots[0], snapshots[1],
1825 					snapshots[2], snapshots[3],
1826 					snapshots[4], snapshots[5]);
1827 		mgmt_rx_reo_alert_no_fl("%s", boarder);
1828 
1829 		index++;
1830 		index %= MGMT_RX_REO_EGRESS_FRAME_DEBUG_ENTRIES_MAX;
1831 	}
1832 
1833 	return QDF_STATUS_SUCCESS;
1834 }
1835 #else
1836 /**
1837  * mgmt_rx_reo_debug_print_egress_frame_stats() - API to print the stats
1838  * related to frames going out of the reorder module
1839  * @reo_ctx: Pointer to reorder context
1840  *
1841  * API to print the stats related to frames going out of the management
1842  * Rx reorder module.
1843  *
1844  * Return: QDF_STATUS
1845  */
1846 static QDF_STATUS
1847 mgmt_rx_reo_debug_print_egress_frame_stats(struct mgmt_rx_reo_context *reo_ctx)
1848 {
1849 	return QDF_STATUS_SUCCESS;
1850 }
1851 
1852 /**
1853  * mgmt_rx_reo_log_egress_frame_before_delivery() - Log the information about a
1854  * frame exiting the reorder module. Logging is done before attempting the frame
1855  * delivery to upper layers.
1856  * @reo_ctx: management rx reorder context
1857  * @entry: Pointer to reorder list entry
1858  *
1859  * Return: QDF_STATUS of operation
1860  */
1861 static QDF_STATUS
1862 mgmt_rx_reo_log_egress_frame_before_delivery(
1863 					struct mgmt_rx_reo_context *reo_ctx,
1864 					struct mgmt_rx_reo_list_entry *entry)
1865 {
1866 	return QDF_STATUS_SUCCESS;
1867 }
1868 
1869 /**
1870  * mgmt_rx_reo_log_egress_frame_after_delivery() - Log the information about a
1871  * frame exiting the reorder module. Logging is done after attempting the frame
1872  * delivery to upper layer.
1873  * @reo_ctx: management rx reorder context
1874  * @is_delivered: Flag to indicate whether the frame is delivered to upper
1875  * layers
1876  *
1877  * Return: QDF_STATUS of operation
1878  */
1879 static QDF_STATUS
1880 mgmt_rx_reo_log_egress_frame_after_delivery(
1881 					struct mgmt_rx_reo_context *reo_ctx,
1882 					bool is_delivered)
1883 {
1884 	return QDF_STATUS_SUCCESS;
1885 }
1886 
1887 /**
1888  * mgmt_rx_reo_debug_print_egress_frame_info() - Print debug information about
1889  * the latest frames leaving the reorder module
1890  * @reo_ctx: management rx reorder context
1891  *
1892  * Return: QDF_STATUS of operation
1893  */
1894 static QDF_STATUS
1895 mgmt_rx_reo_debug_print_egress_frame_info(struct mgmt_rx_reo_context *reo_ctx)
1896 {
1897 	return QDF_STATUS_SUCCESS;
1898 }
1899 #endif /* WLAN_MGMT_RX_REO_DEBUG_SUPPORT */
1900 
1901 /**
1902  * mgmt_rx_reo_list_entry_get_release_reason() - Helper API to get the reason
1903  * for releasing the reorder list entry to upper layer.
1904  * reorder list.
1905  * @entry: List entry
1906  *
1907  * This API expects the caller to acquire the spin lock protecting the reorder
1908  * list.
1909  *
1910  * Return: Reason for releasing the frame.
1911  */
1912 static uint8_t
1913 mgmt_rx_reo_list_entry_get_release_reason(struct mgmt_rx_reo_list_entry *entry)
1914 {
1915 	uint8_t release_reason = 0;
1916 
1917 	if (!entry)
1918 		return 0;
1919 
1920 	if (MGMT_RX_REO_LIST_ENTRY_IS_MAX_SIZE_EXCEEDED(entry))
1921 		release_reason |=
1922 		   MGMT_RX_REO_LIST_ENTRY_RELEASE_REASON_LIST_MAX_SIZE_EXCEEDED;
1923 
1924 	if (!MGMT_RX_REO_LIST_ENTRY_IS_WAITING_FOR_FRAME_ON_OTHER_LINK(entry))
1925 		release_reason |=
1926 			MGMT_RX_REO_LIST_ENTRY_RELEASE_REASON_ZERO_WAIT_COUNT;
1927 
1928 	if (MGMT_RX_REO_LIST_ENTRY_IS_AGED_OUT(entry))
1929 		release_reason |=
1930 				MGMT_RX_REO_LIST_ENTRY_RELEASE_REASON_AGED_OUT;
1931 
1932 	if (MGMT_RX_REO_LIST_ENTRY_IS_OLDER_THAN_LATEST_AGED_OUT_FRAME(entry))
1933 		release_reason |=
1934 		MGMT_RX_REO_LIST_ENTRY_RELEASE_REASON_OLDER_THAN_AGED_OUT_FRAME;
1935 
1936 	return release_reason;
1937 }
1938 
1939 /**
1940  * mgmt_rx_reo_list_entry_send_up() - API to send the frame to the upper layer.
1941  * @reo_list: Pointer to reorder list
1942  * @entry: List entry
1943  *
1944  * API to send the frame to the upper layer. This API has to be called only
1945  * for entries which can be released to upper layer. It is the caller's
1946  * responsibility to ensure that entry can be released (by using API
1947  * mgmt_rx_reo_list_is_ready_to_send_up_entry). This API is called after
1948  * acquiring the lock which serializes the frame delivery to the upper layers.
1949  *
1950  * Return: QDF_STATUS
1951  */
1952 static QDF_STATUS
1953 mgmt_rx_reo_list_entry_send_up(struct mgmt_rx_reo_list *reo_list,
1954 			       struct mgmt_rx_reo_list_entry *entry)
1955 {
1956 	uint8_t release_reason;
1957 	uint8_t link_id;
1958 	uint32_t entry_global_ts;
1959 	QDF_STATUS status;
1960 	QDF_STATUS temp;
1961 	struct mgmt_rx_reo_context *reo_context;
1962 
1963 	qdf_assert_always(reo_list);
1964 	qdf_assert_always(entry);
1965 
1966 	reo_context = mgmt_rx_reo_get_context_from_reo_list(reo_list);
1967 	qdf_assert_always(reo_context);
1968 
1969 	link_id = mgmt_rx_reo_get_link_id(entry->rx_params);
1970 	entry_global_ts = mgmt_rx_reo_get_global_ts(entry->rx_params);
1971 
1972 	release_reason = mgmt_rx_reo_list_entry_get_release_reason(entry);
1973 
1974 	qdf_assert_always(release_reason != 0);
1975 
1976 	entry->is_delivered = false;
1977 	entry->is_premature_delivery = false;
1978 	entry->release_reason = release_reason;
1979 
1980 	if (mgmt_rx_reo_is_potential_premature_delivery(release_reason)) {
1981 		entry->is_premature_delivery = true;
1982 		status = mgmt_rx_reo_handle_potential_premature_delivery(
1983 						reo_context, entry_global_ts);
1984 		if (QDF_IS_STATUS_ERROR(status))
1985 			goto exit;
1986 	}
1987 
1988 	status = mgmt_rx_reo_log_egress_frame_before_delivery(reo_context,
1989 							      entry);
1990 	if (QDF_IS_STATUS_ERROR(status))
1991 		goto exit;
1992 
1993 	status = wlan_mgmt_txrx_process_rx_frame(entry->pdev, entry->nbuf,
1994 						 entry->rx_params);
1995 	/* Above call frees nbuf and rx_params, make it null explicitly */
1996 	entry->nbuf = NULL;
1997 	entry->rx_params = NULL;
1998 
1999 	if (QDF_IS_STATUS_ERROR(status))
2000 		goto exit_log;
2001 
2002 	entry->is_delivered = true;
2003 
2004 	status = QDF_STATUS_SUCCESS;
2005 
2006 exit_log:
2007 	temp = mgmt_rx_reo_log_egress_frame_after_delivery(reo_context, entry);
2008 	if (QDF_IS_STATUS_ERROR(temp))
2009 		status = temp;
2010 exit:
2011 	/**
2012 	 * Release the reference taken when the entry is inserted into
2013 	 * the reorder list
2014 	 */
2015 	wlan_objmgr_pdev_release_ref(entry->pdev, WLAN_MGMT_RX_REO_ID);
2016 
2017 	return status;
2018 }
2019 
2020 /**
2021  * mgmt_rx_reo_list_is_ready_to_send_up_entry() - API to check whether the
2022  * list entry can be send to upper layers.
2023  * @reo_list: Pointer to reorder list
2024  * @entry: List entry
2025  *
2026  * Return: QDF_STATUS
2027  */
2028 static bool
2029 mgmt_rx_reo_list_is_ready_to_send_up_entry(struct mgmt_rx_reo_list *reo_list,
2030 					   struct mgmt_rx_reo_list_entry *entry)
2031 {
2032 	if (!reo_list || !entry)
2033 		return false;
2034 
2035 	return mgmt_rx_reo_list_max_size_exceeded(reo_list) ||
2036 	       !MGMT_RX_REO_LIST_ENTRY_IS_WAITING_FOR_FRAME_ON_OTHER_LINK(
2037 	       entry) || MGMT_RX_REO_LIST_ENTRY_IS_AGED_OUT(entry) ||
2038 	       MGMT_RX_REO_LIST_ENTRY_IS_OLDER_THAN_LATEST_AGED_OUT_FRAME
2039 	       (entry);
2040 }
2041 
2042 /**
2043  * mgmt_rx_reo_list_release_entries() - Release entries from the reorder list
2044  * @reo_context: Pointer to management Rx reorder context
2045  *
2046  * This API releases the entries from the reorder list based on the following
2047  * conditions.
2048  *   a) Entries with total wait count equal to 0
2049  *   b) Entries which are timed out or entries with global time stamp <= global
2050  *      time stamp of the latest frame which is timed out. We can only release
2051  *      the entries in the increasing order of the global time stamp.
2052  *      So all the entries with global time stamp <= global time stamp of the
2053  *      latest timed out frame has to be released.
2054  *
2055  * Return: QDF_STATUS
2056  */
2057 static QDF_STATUS
2058 mgmt_rx_reo_list_release_entries(struct mgmt_rx_reo_context *reo_context)
2059 {
2060 	struct mgmt_rx_reo_list *reo_list;
2061 	QDF_STATUS status;
2062 
2063 	if (!reo_context) {
2064 		mgmt_rx_reo_err("reo context is null");
2065 		return QDF_STATUS_E_NULL_VALUE;
2066 	}
2067 
2068 	reo_list = &reo_context->reo_list;
2069 
2070 	qdf_spin_lock(&reo_context->frame_release_lock);
2071 
2072 	while (1) {
2073 		struct mgmt_rx_reo_list_entry *first_entry;
2074 		/* TODO yield if release_count > THRESHOLD */
2075 		uint16_t release_count = 0;
2076 		struct mgmt_rx_reo_global_ts_info *ts_last_released_frame =
2077 					&reo_list->ts_last_released_frame;
2078 		uint32_t entry_global_ts;
2079 
2080 		qdf_spin_lock_bh(&reo_list->list_lock);
2081 
2082 		first_entry = qdf_list_first_entry_or_null(
2083 			&reo_list->list, struct mgmt_rx_reo_list_entry, node);
2084 
2085 		if (!first_entry) {
2086 			status = QDF_STATUS_SUCCESS;
2087 			goto exit_unlock_list_lock;
2088 		}
2089 
2090 		if (!mgmt_rx_reo_list_is_ready_to_send_up_entry(reo_list,
2091 								first_entry)) {
2092 			status = QDF_STATUS_SUCCESS;
2093 			goto exit_unlock_list_lock;
2094 		}
2095 
2096 		if (mgmt_rx_reo_list_max_size_exceeded(reo_list))
2097 			first_entry->status |=
2098 				MGMT_RX_REO_STATUS_LIST_MAX_SIZE_EXCEEDED;
2099 
2100 		status = qdf_list_remove_node(&reo_list->list,
2101 					      &first_entry->node);
2102 		if (QDF_IS_STATUS_ERROR(status)) {
2103 			status = QDF_STATUS_E_FAILURE;
2104 			goto exit_unlock_list_lock;
2105 		}
2106 		first_entry->removal_ts = qdf_get_log_timestamp();
2107 
2108 		/**
2109 		 * Last released frame global time stamp is invalid means that
2110 		 * current frame is the first frame to be released to the
2111 		 * upper layer from the reorder list. Blindly update the last
2112 		 * released frame global time stamp to the current frame's
2113 		 * global time stamp and set the valid to true.
2114 		 * If the last released frame global time stamp is valid and
2115 		 * current frame's global time stamp is >= last released frame
2116 		 * global time stamp, deliver the current frame to upper layer
2117 		 * and update the last released frame global time stamp.
2118 		 */
2119 		entry_global_ts =
2120 			mgmt_rx_reo_get_global_ts(first_entry->rx_params);
2121 
2122 		if (!ts_last_released_frame->valid ||
2123 		    mgmt_rx_reo_compare_global_timestamps_gte(
2124 			entry_global_ts, ts_last_released_frame->global_ts)) {
2125 			struct mgmt_rx_event_params *params;
2126 
2127 			params = first_entry->rx_params;
2128 
2129 			ts_last_released_frame->global_ts = entry_global_ts;
2130 			ts_last_released_frame->start_ts =
2131 					mgmt_rx_reo_get_start_ts(params);
2132 			ts_last_released_frame->end_ts =
2133 					mgmt_rx_reo_get_end_ts(params);
2134 			ts_last_released_frame->valid = true;
2135 
2136 			qdf_timer_mod
2137 				(&reo_list->global_mgmt_rx_inactivity_timer,
2138 				 MGMT_RX_REO_GLOBAL_MGMT_RX_INACTIVITY_TIMEOUT);
2139 		} else {
2140 			/**
2141 			 * This should never happen. All the frames older than
2142 			 * the last frame released from the reorder list will be
2143 			 * discarded at the entry to reorder algorithm itself.
2144 			 */
2145 			qdf_assert_always(first_entry->is_parallel_rx);
2146 		}
2147 
2148 		qdf_spin_unlock_bh(&reo_list->list_lock);
2149 
2150 		status = mgmt_rx_reo_list_entry_send_up(reo_list,
2151 							first_entry);
2152 		if (QDF_IS_STATUS_ERROR(status)) {
2153 			status = QDF_STATUS_E_FAILURE;
2154 			qdf_mem_free(first_entry);
2155 			goto exit_unlock_frame_release_lock;
2156 		}
2157 
2158 		qdf_mem_free(first_entry);
2159 		release_count++;
2160 	}
2161 
2162 	status = QDF_STATUS_SUCCESS;
2163 	goto exit_unlock_frame_release_lock;
2164 
2165 exit_unlock_list_lock:
2166 	qdf_spin_unlock_bh(&reo_list->list_lock);
2167 exit_unlock_frame_release_lock:
2168 	qdf_spin_unlock(&reo_context->frame_release_lock);
2169 
2170 	return status;
2171 }
2172 
2173 /**
2174  * mgmt_rx_reo_list_ageout_timer_handler() - Periodic ageout timer handler
2175  * @arg: Argument to timer handler
2176  *
2177  * This is the handler for periodic ageout timer used to timeout entries in the
2178  * reorder list.
2179  *
2180  * Return: void
2181  */
2182 static void
2183 mgmt_rx_reo_list_ageout_timer_handler(void *arg)
2184 {
2185 	struct mgmt_rx_reo_list *reo_list = arg;
2186 	struct mgmt_rx_reo_list_entry *cur_entry;
2187 	uint64_t cur_ts;
2188 	QDF_STATUS status;
2189 	struct mgmt_rx_reo_context *reo_context;
2190 	/**
2191 	 * Stores the pointer to the entry in reorder list for the latest aged
2192 	 * out frame. Latest aged out frame is the aged out frame in reorder
2193 	 * list which has the largest global time stamp value.
2194 	 */
2195 	struct mgmt_rx_reo_list_entry *latest_aged_out_entry = NULL;
2196 
2197 	qdf_assert_always(reo_list);
2198 
2199 	qdf_timer_mod(&reo_list->ageout_timer,
2200 		      MGMT_RX_REO_AGEOUT_TIMER_PERIOD_MS);
2201 
2202 	reo_context = mgmt_rx_reo_get_context_from_reo_list(reo_list);
2203 	qdf_assert_always(reo_context);
2204 
2205 	qdf_spin_lock_bh(&reo_list->list_lock);
2206 
2207 	cur_ts = qdf_get_log_timestamp();
2208 
2209 	qdf_list_for_each(&reo_list->list, cur_entry, node) {
2210 		if (cur_ts - cur_entry->insertion_ts >=
2211 		    reo_list->list_entry_timeout_us) {
2212 			latest_aged_out_entry = cur_entry;
2213 			cur_entry->status |= MGMT_RX_REO_STATUS_AGED_OUT;
2214 		}
2215 	}
2216 
2217 	if (latest_aged_out_entry) {
2218 		qdf_list_for_each(&reo_list->list, cur_entry, node) {
2219 			if (cur_entry == latest_aged_out_entry)
2220 				break;
2221 			cur_entry->status |= MGMT_RX_REO_STATUS_OLDER_THAN_LATEST_AGED_OUT_FRAME;
2222 		}
2223 	}
2224 
2225 	qdf_spin_unlock_bh(&reo_list->list_lock);
2226 
2227 	if (latest_aged_out_entry) {
2228 		status = mgmt_rx_reo_list_release_entries(reo_context);
2229 		if (QDF_IS_STATUS_ERROR(status)) {
2230 			mgmt_rx_reo_err("Failed to release entries, ret = %d",
2231 					status);
2232 			return;
2233 		}
2234 	}
2235 }
2236 
2237 /**
2238  * mgmt_rx_reo_global_mgmt_rx_inactivity_timer_handler() - Timer handler
2239  * for global management Rx inactivity timer
2240  * @arg: Argument to timer handler
2241  *
2242  * This is the timer handler for tracking management Rx inactivity across
2243  * links.
2244  *
2245  * Return: void
2246  */
2247 static void
2248 mgmt_rx_reo_global_mgmt_rx_inactivity_timer_handler(void *arg)
2249 {
2250 	struct mgmt_rx_reo_list *reo_list = arg;
2251 	struct mgmt_rx_reo_context *reo_context;
2252 	struct mgmt_rx_reo_global_ts_info *ts_last_released_frame;
2253 
2254 	qdf_assert_always(reo_list);
2255 	ts_last_released_frame = &reo_list->ts_last_released_frame;
2256 
2257 	reo_context = mgmt_rx_reo_get_context_from_reo_list(reo_list);
2258 	qdf_assert_always(reo_context);
2259 
2260 	qdf_spin_lock(&reo_context->frame_release_lock);
2261 	qdf_spin_lock_bh(&reo_list->list_lock);
2262 
2263 	qdf_mem_zero(ts_last_released_frame, sizeof(*ts_last_released_frame));
2264 
2265 	qdf_spin_unlock_bh(&reo_list->list_lock);
2266 	qdf_spin_unlock(&reo_context->frame_release_lock);
2267 }
2268 
2269 /**
2270  * mgmt_rx_reo_prepare_list_entry() - Prepare a list entry from the management
2271  * frame received.
2272  * @frame_desc: Pointer to the frame descriptor
2273  * @entry: Pointer to the list entry
2274  *
2275  * This API prepares the reorder list entry corresponding to a management frame
2276  * to be consumed by host. This entry would be inserted at the appropriate
2277  * position in the reorder list.
2278  *
2279  * Return: QDF_STATUS
2280  */
2281 static QDF_STATUS
2282 mgmt_rx_reo_prepare_list_entry(
2283 		const struct mgmt_rx_reo_frame_descriptor *frame_desc,
2284 		struct mgmt_rx_reo_list_entry **entry)
2285 {
2286 	struct mgmt_rx_reo_list_entry *list_entry;
2287 	struct wlan_objmgr_pdev *pdev;
2288 	uint8_t link_id;
2289 
2290 	if (!frame_desc) {
2291 		mgmt_rx_reo_err("frame descriptor is null");
2292 		return QDF_STATUS_E_NULL_VALUE;
2293 	}
2294 
2295 	if (!entry) {
2296 		mgmt_rx_reo_err("Pointer to list entry is null");
2297 		return QDF_STATUS_E_NULL_VALUE;
2298 	}
2299 
2300 	link_id = mgmt_rx_reo_get_link_id(frame_desc->rx_params);
2301 
2302 	pdev = wlan_get_pdev_from_mlo_link_id(link_id, WLAN_MGMT_RX_REO_ID);
2303 	if (!pdev) {
2304 		mgmt_rx_reo_err("pdev corresponding to link %u is null",
2305 				link_id);
2306 		return QDF_STATUS_E_NULL_VALUE;
2307 	}
2308 
2309 	list_entry =  qdf_mem_malloc(sizeof(*list_entry));
2310 	if (!list_entry) {
2311 		wlan_objmgr_pdev_release_ref(pdev, WLAN_MGMT_RX_REO_ID);
2312 		mgmt_rx_reo_err("List entry allocation failed");
2313 		return QDF_STATUS_E_NOMEM;
2314 	}
2315 
2316 	list_entry->pdev = pdev;
2317 	list_entry->nbuf = frame_desc->nbuf;
2318 	list_entry->rx_params = frame_desc->rx_params;
2319 	list_entry->wait_count = frame_desc->wait_count;
2320 	list_entry->initial_wait_count = frame_desc->wait_count;
2321 	qdf_mem_copy(list_entry->shared_snapshots, frame_desc->shared_snapshots,
2322 		     qdf_min(sizeof(list_entry->shared_snapshots),
2323 			     sizeof(frame_desc->shared_snapshots)));
2324 	qdf_mem_copy(list_entry->host_snapshot, frame_desc->host_snapshot,
2325 		     qdf_min(sizeof(list_entry->host_snapshot),
2326 			     sizeof(frame_desc->host_snapshot)));
2327 	list_entry->status = 0;
2328 	if (list_entry->wait_count.total_count)
2329 		list_entry->status |=
2330 			MGMT_RX_REO_STATUS_WAIT_FOR_FRAME_ON_OTHER_LINKS;
2331 
2332 	*entry = list_entry;
2333 
2334 	return QDF_STATUS_SUCCESS;
2335 }
2336 
2337 /**
2338  * mgmt_rx_reo_update_wait_count() - Update the wait count for a frame based
2339  * on the wait count of a frame received after that on air.
2340  * @wait_count_old_frame: Pointer to the wait count structure for the old frame.
2341  * @wait_count_new_frame: Pointer to the wait count structure for the new frame.
2342  *
2343  * This API optimizes the wait count of a frame based on the wait count of
2344  * a frame received after that on air. Old frame refers to the frame received
2345  * first on the air and new frame refers to the frame received after that.
2346  * We use the following fundamental idea. Wait counts for old frames can't be
2347  * more than wait counts for the new frame. Use this to optimize the wait count
2348  * for the old frames. Per link wait count of an old frame is minimum of the
2349  * per link wait count of the old frame and new frame.
2350  *
2351  * Return: QDF_STATUS
2352  */
2353 static QDF_STATUS
2354 mgmt_rx_reo_update_wait_count(
2355 		struct mgmt_rx_reo_wait_count *wait_count_old_frame,
2356 		const struct mgmt_rx_reo_wait_count *wait_count_new_frame)
2357 {
2358 	uint8_t link_id;
2359 
2360 	qdf_assert_always(wait_count_old_frame);
2361 	qdf_assert_always(wait_count_new_frame);
2362 
2363 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
2364 		if (wait_count_old_frame->per_link_count[link_id]) {
2365 			uint32_t temp_wait_count;
2366 			uint32_t wait_count_diff;
2367 
2368 			temp_wait_count =
2369 				wait_count_old_frame->per_link_count[link_id];
2370 			wait_count_old_frame->per_link_count[link_id] =
2371 				qdf_min(wait_count_old_frame->
2372 					per_link_count[link_id],
2373 					wait_count_new_frame->
2374 					per_link_count[link_id]);
2375 			wait_count_diff = temp_wait_count -
2376 				wait_count_old_frame->per_link_count[link_id];
2377 
2378 			wait_count_old_frame->total_count -= wait_count_diff;
2379 		}
2380 	}
2381 
2382 	return QDF_STATUS_SUCCESS;
2383 }
2384 
2385 /**
2386  * mgmt_rx_reo_update_list() - Modify the reorder list when a frame is received
2387  * @reo_list: Pointer to reorder list
2388  * @frame_desc: Pointer to frame descriptor
2389  * @is_queued: Whether this frame is queued in the REO list
2390  *
2391  * API to update the reorder list on every management frame reception.
2392  * This API does the following things.
2393  *   a) Update the wait counts for all the frames in the reorder list with
2394  *      global time stamp <= current frame's global time stamp. We use the
2395  *      following principle for updating the wait count in this case.
2396  *      Let A and B be two management frames with global time stamp of A <=
2397  *      global time stamp of B. Let WAi and WBi be the wait count of A and B
2398  *      for link i, then WAi <= WBi. Hence we can optimize WAi as
2399  *      min(WAi, WBi).
2400  *   b) If the current frame is to be consumed by host, insert it in the
2401  *      reorder list such that the list is always sorted in the increasing order
2402  *      of global time stamp. Update the wait count of the current frame based
2403  *      on the frame next to it in the reorder list (if any).
2404  *   c) Update the wait count of the frames in the reorder list with global
2405  *      time stamp > current frame's global time stamp. Let the current frame
2406  *      belong to link "l". Then link "l"'s wait count can be reduced by one for
2407  *      all the frames in the reorder list with global time stamp > current
2408  *      frame's global time stamp.
2409  *
2410  * Return: QDF_STATUS
2411  */
2412 static QDF_STATUS
2413 mgmt_rx_reo_update_list(struct mgmt_rx_reo_list *reo_list,
2414 			struct mgmt_rx_reo_frame_descriptor *frame_desc,
2415 			bool *is_queued)
2416 {
2417 	struct mgmt_rx_reo_list_entry *cur_entry;
2418 	struct mgmt_rx_reo_list_entry *least_greater_entry = NULL;
2419 	bool least_greater_entry_found = false;
2420 	QDF_STATUS status;
2421 	uint32_t new_frame_global_ts;
2422 	struct mgmt_rx_reo_list_entry *new_entry = NULL;
2423 	uint16_t list_insertion_pos = 0;
2424 
2425 	if (!is_queued)
2426 		return QDF_STATUS_E_NULL_VALUE;
2427 	*is_queued = false;
2428 
2429 	if (!reo_list) {
2430 		mgmt_rx_reo_err("Mgmt Rx reo list is null");
2431 		return QDF_STATUS_E_NULL_VALUE;
2432 	}
2433 
2434 	if (!frame_desc) {
2435 		mgmt_rx_reo_err("Mgmt frame descriptor is null");
2436 		return QDF_STATUS_E_NULL_VALUE;
2437 	}
2438 
2439 	new_frame_global_ts = mgmt_rx_reo_get_global_ts(frame_desc->rx_params);
2440 
2441 	/* Prepare the list entry before acquiring lock */
2442 	if (frame_desc->type == MGMT_RX_REO_FRAME_DESC_HOST_CONSUMED_FRAME &&
2443 	    frame_desc->reo_required) {
2444 		status = mgmt_rx_reo_prepare_list_entry(frame_desc, &new_entry);
2445 		if (QDF_IS_STATUS_ERROR(status)) {
2446 			mgmt_rx_reo_err("Failed to prepare list entry");
2447 			return QDF_STATUS_E_FAILURE;
2448 		}
2449 	}
2450 
2451 	qdf_spin_lock_bh(&reo_list->list_lock);
2452 
2453 	frame_desc->list_size_rx = qdf_list_size(&reo_list->list);
2454 
2455 	status = mgmt_rx_reo_is_stale_frame(&reo_list->ts_last_released_frame,
2456 					    frame_desc);
2457 	if (QDF_IS_STATUS_ERROR(status))
2458 		goto exit_free_entry;
2459 
2460 	if (frame_desc->is_stale) {
2461 		status = mgmt_rx_reo_handle_stale_frame(reo_list, frame_desc);
2462 		if (QDF_IS_STATUS_ERROR(status))
2463 			goto exit_free_entry;
2464 	}
2465 
2466 	qdf_list_for_each(&reo_list->list, cur_entry, node) {
2467 		uint32_t cur_entry_global_ts;
2468 
2469 		cur_entry_global_ts = mgmt_rx_reo_get_global_ts(
2470 					cur_entry->rx_params);
2471 
2472 		if (!mgmt_rx_reo_compare_global_timestamps_gte(
2473 		    new_frame_global_ts, cur_entry_global_ts)) {
2474 			least_greater_entry = cur_entry;
2475 			least_greater_entry_found = true;
2476 			break;
2477 		}
2478 
2479 		list_insertion_pos++;
2480 
2481 		status = mgmt_rx_reo_update_wait_count(
2482 					&cur_entry->wait_count,
2483 					&frame_desc->wait_count);
2484 		if (QDF_IS_STATUS_ERROR(status))
2485 			goto exit_free_entry;
2486 
2487 		if (cur_entry->wait_count.total_count == 0)
2488 			cur_entry->status &=
2489 			      ~MGMT_RX_REO_STATUS_WAIT_FOR_FRAME_ON_OTHER_LINKS;
2490 	}
2491 
2492 	if (frame_desc->is_stale)
2493 		qdf_assert_always(!list_insertion_pos);
2494 
2495 	if (frame_desc->type == MGMT_RX_REO_FRAME_DESC_HOST_CONSUMED_FRAME &&
2496 	    !frame_desc->is_stale && frame_desc->reo_required) {
2497 		if (least_greater_entry_found) {
2498 			status = mgmt_rx_reo_update_wait_count(
2499 					&new_entry->wait_count,
2500 					&least_greater_entry->wait_count);
2501 
2502 			if (QDF_IS_STATUS_ERROR(status))
2503 				goto exit_free_entry;
2504 
2505 			frame_desc->wait_count = new_entry->wait_count;
2506 
2507 			if (new_entry->wait_count.total_count == 0)
2508 				new_entry->status &=
2509 					~MGMT_RX_REO_STATUS_WAIT_FOR_FRAME_ON_OTHER_LINKS;
2510 		}
2511 
2512 		new_entry->insertion_ts = qdf_get_log_timestamp();
2513 		new_entry->ingress_timestamp = frame_desc->ingress_timestamp;
2514 		new_entry->is_parallel_rx = frame_desc->is_parallel_rx;
2515 		frame_desc->list_insertion_pos = list_insertion_pos;
2516 
2517 		if (least_greater_entry_found)
2518 			status = qdf_list_insert_before(
2519 					&reo_list->list, &new_entry->node,
2520 					&least_greater_entry->node);
2521 		else
2522 			status = qdf_list_insert_back(
2523 					&reo_list->list, &new_entry->node);
2524 
2525 		if (QDF_IS_STATUS_ERROR(status))
2526 			goto exit_free_entry;
2527 
2528 		*is_queued = true;
2529 
2530 		if (new_entry->wait_count.total_count == 0)
2531 			frame_desc->zero_wait_count_rx = true;
2532 
2533 		if (frame_desc->zero_wait_count_rx &&
2534 		    qdf_list_first_entry_or_null(&reo_list->list,
2535 						 struct mgmt_rx_reo_list_entry,
2536 						 node) == new_entry)
2537 			frame_desc->immediate_delivery = true;
2538 	}
2539 
2540 	if (least_greater_entry_found) {
2541 		cur_entry = least_greater_entry;
2542 
2543 		qdf_list_for_each_from(&reo_list->list, cur_entry, node) {
2544 			uint8_t frame_link_id;
2545 			struct mgmt_rx_reo_wait_count *wait_count;
2546 
2547 			frame_link_id =
2548 				mgmt_rx_reo_get_link_id(frame_desc->rx_params);
2549 			wait_count = &cur_entry->wait_count;
2550 			if (wait_count->per_link_count[frame_link_id]) {
2551 				uint32_t old_wait_count;
2552 				uint32_t new_wait_count;
2553 				uint32_t wait_count_diff;
2554 				uint16_t pkt_ctr_delta;
2555 
2556 				pkt_ctr_delta = frame_desc->pkt_ctr_delta;
2557 				old_wait_count =
2558 				      wait_count->per_link_count[frame_link_id];
2559 				new_wait_count =
2560 				     qdf_min(old_wait_count - pkt_ctr_delta,
2561 					     (uint32_t)0);
2562 				wait_count_diff = old_wait_count -
2563 						  new_wait_count;
2564 
2565 				wait_count->per_link_count[frame_link_id] =
2566 								new_wait_count;
2567 				wait_count->total_count -= wait_count_diff;
2568 
2569 				if (wait_count->total_count == 0)
2570 					cur_entry->status &=
2571 						~MGMT_RX_REO_STATUS_WAIT_FOR_FRAME_ON_OTHER_LINKS;
2572 			}
2573 		}
2574 	}
2575 
2576 	status = QDF_STATUS_SUCCESS;
2577 
2578 exit_free_entry:
2579 	/* Cleanup the entry if it is not queued */
2580 	if (new_entry && !*is_queued) {
2581 		/**
2582 		 * New entry created is not inserted to reorder list, free
2583 		 * the entry and release the reference
2584 		 */
2585 		wlan_objmgr_pdev_release_ref(new_entry->pdev,
2586 					     WLAN_MGMT_RX_REO_ID);
2587 		qdf_mem_free(new_entry);
2588 	}
2589 
2590 	qdf_spin_unlock_bh(&reo_list->list_lock);
2591 
2592 	if (!*is_queued)
2593 		return status;
2594 
2595 	return status;
2596 }
2597 
2598 /**
2599  * mgmt_rx_reo_list_init() - Initialize the management rx-reorder list
2600  * @reo_list: Pointer to reorder list
2601  *
2602  * API to initialize the management rx-reorder list.
2603  *
2604  * Return: QDF_STATUS
2605  */
2606 static QDF_STATUS
2607 mgmt_rx_reo_list_init(struct mgmt_rx_reo_list *reo_list)
2608 {
2609 	QDF_STATUS status;
2610 
2611 	reo_list->max_list_size = MGMT_RX_REO_LIST_MAX_SIZE;
2612 	reo_list->list_entry_timeout_us = MGMT_RX_REO_LIST_TIMEOUT_US;
2613 
2614 	qdf_list_create(&reo_list->list, reo_list->max_list_size);
2615 	qdf_spinlock_create(&reo_list->list_lock);
2616 
2617 	status = qdf_timer_init(NULL, &reo_list->ageout_timer,
2618 				mgmt_rx_reo_list_ageout_timer_handler, reo_list,
2619 				QDF_TIMER_TYPE_WAKE_APPS);
2620 	if (QDF_IS_STATUS_ERROR(status)) {
2621 		mgmt_rx_reo_err("Failed to initialize reo list ageout timer");
2622 		return status;
2623 	}
2624 
2625 	reo_list->ts_last_released_frame.valid = false;
2626 
2627 	status = qdf_timer_init
2628 			(NULL, &reo_list->global_mgmt_rx_inactivity_timer,
2629 			 mgmt_rx_reo_global_mgmt_rx_inactivity_timer_handler,
2630 			 reo_list, QDF_TIMER_TYPE_WAKE_APPS);
2631 	if (QDF_IS_STATUS_ERROR(status)) {
2632 		mgmt_rx_reo_err("Failed to init glb mgmt rx inactivity timer");
2633 		return status;
2634 	}
2635 
2636 	return QDF_STATUS_SUCCESS;
2637 }
2638 
2639 /**
2640  * wlan_mgmt_rx_reo_update_host_snapshot() - Update Host snapshot with the MGMT
2641  * Rx REO parameters.
2642  * @pdev: pdev extracted from the WMI event
2643  * @desc: pointer to frame descriptor
2644  *
2645  * Return: QDF_STATUS of operation
2646  */
2647 static QDF_STATUS
2648 wlan_mgmt_rx_reo_update_host_snapshot(struct wlan_objmgr_pdev *pdev,
2649 				      struct mgmt_rx_reo_frame_descriptor *desc)
2650 {
2651 	struct mgmt_rx_reo_pdev_info *rx_reo_pdev_ctx;
2652 	struct mgmt_rx_reo_snapshot_params *host_ss;
2653 	struct mgmt_rx_reo_params *reo_params;
2654 	int pkt_ctr_delta;
2655 	struct wlan_objmgr_psoc *psoc;
2656 	uint16_t pkt_ctr_delta_thresh;
2657 
2658 	if (!desc) {
2659 		mgmt_rx_reo_err("Mgmt Rx REO frame descriptor null");
2660 		return QDF_STATUS_E_NULL_VALUE;
2661 	}
2662 
2663 	if (!desc->rx_params) {
2664 		mgmt_rx_reo_err("Mgmt Rx params null");
2665 		return QDF_STATUS_E_NULL_VALUE;
2666 	}
2667 
2668 	reo_params = desc->rx_params->reo_params;
2669 	if (!reo_params) {
2670 		mgmt_rx_reo_err("Mgmt Rx REO params NULL");
2671 		return QDF_STATUS_E_NULL_VALUE;
2672 	}
2673 
2674 	rx_reo_pdev_ctx = wlan_mgmt_rx_reo_get_priv_object(pdev);
2675 	if (!rx_reo_pdev_ctx) {
2676 		mgmt_rx_reo_err("Mgmt Rx REO context empty for pdev %pK", pdev);
2677 		return QDF_STATUS_E_FAILURE;
2678 	}
2679 
2680 	psoc = wlan_pdev_get_psoc(pdev);
2681 
2682 	/* FW should send valid REO parameters */
2683 	if (!reo_params->valid) {
2684 		mgmt_rx_reo_err("Mgmt Rx REO params is invalid");
2685 		return QDF_STATUS_E_FAILURE;
2686 	}
2687 
2688 	host_ss = &rx_reo_pdev_ctx->host_snapshot;
2689 
2690 	if (!host_ss->valid) {
2691 		desc->pkt_ctr_delta = 1;
2692 		goto update_host_ss;
2693 	}
2694 
2695 	if (mgmt_rx_reo_compare_pkt_ctrs_gte(host_ss->mgmt_pkt_ctr,
2696 					     reo_params->mgmt_pkt_ctr)) {
2697 		mgmt_rx_reo_err("Cur frame ctr > last frame ctr for link = %u",
2698 				reo_params->link_id);
2699 		goto failure_debug;
2700 	}
2701 
2702 	pkt_ctr_delta = mgmt_rx_reo_subtract_pkt_ctrs(reo_params->mgmt_pkt_ctr,
2703 						      host_ss->mgmt_pkt_ctr);
2704 	qdf_assert_always(pkt_ctr_delta > 0);
2705 	desc->pkt_ctr_delta = pkt_ctr_delta;
2706 
2707 	if (pkt_ctr_delta == 1)
2708 		goto update_host_ss;
2709 
2710 	/*
2711 	 * Under back pressure scenarios, FW may drop management Rx frame
2712 	 * WMI events. So holes in the management packet counter is expected.
2713 	 * Add a debug print and optional assert to track the holes.
2714 	 */
2715 	mgmt_rx_reo_debug("pkt_ctr_delta = %u", pkt_ctr_delta);
2716 	mgmt_rx_reo_debug("Cur frame valid = %u, pkt_ctr = %u, ts =%u",
2717 			  reo_params->valid, reo_params->mgmt_pkt_ctr,
2718 			  reo_params->global_timestamp);
2719 	mgmt_rx_reo_debug("Last frame valid = %u, pkt_ctr = %u, ts =%u",
2720 			  host_ss->valid, host_ss->mgmt_pkt_ctr,
2721 			  host_ss->global_timestamp);
2722 
2723 	pkt_ctr_delta_thresh = wlan_mgmt_rx_reo_get_pkt_ctr_delta_thresh(psoc);
2724 
2725 	if (pkt_ctr_delta_thresh && pkt_ctr_delta > pkt_ctr_delta_thresh) {
2726 		mgmt_rx_reo_err("pkt ctr delta %u > thresh %u for link %u",
2727 				pkt_ctr_delta, pkt_ctr_delta_thresh,
2728 				reo_params->link_id);
2729 		goto failure_debug;
2730 	}
2731 
2732 update_host_ss:
2733 	host_ss->valid = true;
2734 	host_ss->global_timestamp = reo_params->global_timestamp;
2735 	host_ss->mgmt_pkt_ctr = reo_params->mgmt_pkt_ctr;
2736 
2737 	return QDF_STATUS_SUCCESS;
2738 
2739 failure_debug:
2740 	mgmt_rx_reo_err("Cur frame valid = %u, pkt_ctr = %u, ts =%u",
2741 			reo_params->valid, reo_params->mgmt_pkt_ctr,
2742 			reo_params->global_timestamp);
2743 	mgmt_rx_reo_err("Last frame vailid = %u, pkt_ctr = %u, ts =%u",
2744 			host_ss->valid, host_ss->mgmt_pkt_ctr,
2745 			host_ss->global_timestamp);
2746 	qdf_assert_always(0);
2747 
2748 	return QDF_STATUS_E_FAILURE;
2749 }
2750 
2751 #ifdef WLAN_MGMT_RX_REO_DEBUG_SUPPORT
2752 /**
2753  * mgmt_rx_reo_debug_print_ingress_frame_stats() - API to print the stats
2754  * related to frames going into the reorder module
2755  * @reo_ctx: Pointer to reorder context
2756  *
2757  * API to print the stats related to frames going into the management
2758  * Rx reorder module.
2759  *
2760  * Return: QDF_STATUS
2761  */
2762 static QDF_STATUS
2763 mgmt_rx_reo_debug_print_ingress_frame_stats(struct mgmt_rx_reo_context *reo_ctx)
2764 {
2765 	struct reo_ingress_frame_stats *stats;
2766 	uint8_t link_id;
2767 	uint8_t desc_type;
2768 	uint64_t ingress_count_per_link[MAX_MLO_LINKS] = {0};
2769 	uint64_t ingress_count_per_desc_type[MGMT_RX_REO_FRAME_DESC_TYPE_MAX] = {0};
2770 	uint64_t total_ingress_count = 0;
2771 	uint64_t stale_count_per_link[MAX_MLO_LINKS] = {0};
2772 	uint64_t stale_count_per_desc_type[MGMT_RX_REO_FRAME_DESC_TYPE_MAX] = {0};
2773 	uint64_t total_stale_count = 0;
2774 	uint64_t error_count_per_link[MAX_MLO_LINKS] = {0};
2775 	uint64_t error_count_per_desc_type[MGMT_RX_REO_FRAME_DESC_TYPE_MAX] = {0};
2776 	uint64_t total_error_count = 0;
2777 	uint64_t total_queued_count = 0;
2778 	uint64_t total_zero_wait_count_rx_count = 0;
2779 	uint64_t total_immediate_delivery_count = 0;
2780 
2781 	if (!reo_ctx)
2782 		return QDF_STATUS_E_NULL_VALUE;
2783 
2784 	stats = &reo_ctx->ingress_frame_debug_info.stats;
2785 
2786 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
2787 		for (desc_type = 0; desc_type < MGMT_RX_REO_FRAME_DESC_TYPE_MAX;
2788 		     desc_type++) {
2789 			ingress_count_per_link[link_id] +=
2790 				stats->ingress_count[link_id][desc_type];
2791 			stale_count_per_link[link_id] +=
2792 					stats->stale_count[link_id][desc_type];
2793 			error_count_per_link[link_id] +=
2794 					stats->error_count[link_id][desc_type];
2795 		}
2796 
2797 		total_ingress_count += ingress_count_per_link[link_id];
2798 		total_stale_count += stale_count_per_link[link_id];
2799 		total_error_count += error_count_per_link[link_id];
2800 	}
2801 
2802 	for (desc_type = 0; desc_type < MGMT_RX_REO_FRAME_DESC_TYPE_MAX;
2803 	     desc_type++) {
2804 		for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
2805 			ingress_count_per_desc_type[desc_type] +=
2806 				stats->ingress_count[link_id][desc_type];
2807 			stale_count_per_desc_type[desc_type] +=
2808 					stats->stale_count[link_id][desc_type];
2809 			error_count_per_desc_type[desc_type] +=
2810 					stats->error_count[link_id][desc_type];
2811 		}
2812 	}
2813 
2814 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
2815 		total_queued_count += stats->queued_count[link_id];
2816 		total_zero_wait_count_rx_count +=
2817 				stats->zero_wait_count_rx_count[link_id];
2818 		total_immediate_delivery_count +=
2819 				stats->immediate_delivery_count[link_id];
2820 	}
2821 
2822 	mgmt_rx_reo_alert("Ingress Frame Stats:");
2823 	mgmt_rx_reo_alert("\t1) Ingress Frame Count:");
2824 	mgmt_rx_reo_alert("\tDescriptor Type Values:-");
2825 	mgmt_rx_reo_alert("\t\t0 - MGMT_RX_REO_FRAME_DESC_HOST_CONSUMED_FRAME");
2826 	mgmt_rx_reo_alert("\t\t1 - MGMT_RX_REO_FRAME_DESC_FW_CONSUMED_FRAME");
2827 	mgmt_rx_reo_alert("\t\t2 - MGMT_RX_REO_FRAME_DESC_ERROR_FRAME");
2828 	mgmt_rx_reo_alert("\t------------------------------------");
2829 	mgmt_rx_reo_alert("\t|link id/  |       |       |       |");
2830 	mgmt_rx_reo_alert("\t|desc type |      0|      1|      2|");
2831 	mgmt_rx_reo_alert("\t-------------------------------------------");
2832 
2833 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
2834 		mgmt_rx_reo_alert("\t|%10u|%7llu|%7llu|%7llu|%7llu", link_id,
2835 				  stats->ingress_count[link_id][0],
2836 				  stats->ingress_count[link_id][1],
2837 				  stats->ingress_count[link_id][2],
2838 				  ingress_count_per_link[link_id]);
2839 		mgmt_rx_reo_alert("\t-------------------------------------------");
2840 	}
2841 	mgmt_rx_reo_alert("\t           |%7llu|%7llu|%7llu|%7llu\n\n",
2842 			  ingress_count_per_desc_type[0],
2843 			  ingress_count_per_desc_type[1],
2844 			  ingress_count_per_desc_type[2],
2845 			  total_ingress_count);
2846 
2847 	mgmt_rx_reo_alert("\t2) Stale Frame Count:");
2848 	mgmt_rx_reo_alert("\t------------------------------------");
2849 	mgmt_rx_reo_alert("\t|link id/  |       |       |       |");
2850 	mgmt_rx_reo_alert("\t|desc type |      0|      1|      2|");
2851 	mgmt_rx_reo_alert("\t-------------------------------------------");
2852 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
2853 		mgmt_rx_reo_alert("\t|%10u|%7llu|%7llu|%7llu|%7llu", link_id,
2854 				  stats->stale_count[link_id][0],
2855 				  stats->stale_count[link_id][1],
2856 				  stats->stale_count[link_id][2],
2857 				  stale_count_per_link[link_id]);
2858 		mgmt_rx_reo_alert("\t-------------------------------------------");
2859 	}
2860 	mgmt_rx_reo_alert("\t           |%7llu|%7llu|%7llu|%7llu\n\n",
2861 			  stale_count_per_desc_type[0],
2862 			  stale_count_per_desc_type[1],
2863 			  stale_count_per_desc_type[2],
2864 			  total_stale_count);
2865 
2866 	mgmt_rx_reo_alert("\t3) Error Frame Count:");
2867 	mgmt_rx_reo_alert("\t------------------------------------");
2868 	mgmt_rx_reo_alert("\t|link id/  |       |       |       |");
2869 	mgmt_rx_reo_alert("\t|desc type |      0|      1|      2|");
2870 	mgmt_rx_reo_alert("\t-------------------------------------------");
2871 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
2872 		mgmt_rx_reo_alert("\t|%10u|%7llu|%7llu|%7llu|%7llu", link_id,
2873 				  stats->error_count[link_id][0],
2874 				  stats->error_count[link_id][1],
2875 				  stats->error_count[link_id][2],
2876 				  error_count_per_link[link_id]);
2877 		mgmt_rx_reo_alert("\t-------------------------------------------");
2878 	}
2879 	mgmt_rx_reo_alert("\t           |%7llu|%7llu|%7llu|%7llu\n\n",
2880 			  error_count_per_desc_type[0],
2881 			  error_count_per_desc_type[1],
2882 			  error_count_per_desc_type[2],
2883 			  total_error_count);
2884 
2885 	mgmt_rx_reo_alert("\t4) Host consumed frames related stats:");
2886 	mgmt_rx_reo_alert("\t------------------------------------------------");
2887 	mgmt_rx_reo_alert("\t|link id   |Queued frame |Zero wait |Immediate |");
2888 	mgmt_rx_reo_alert("\t|          |    count    |  count   | delivery |");
2889 	mgmt_rx_reo_alert("\t------------------------------------------------");
2890 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
2891 		mgmt_rx_reo_alert("\t|%10u|%13llu|%10llu|%10llu|", link_id,
2892 				  stats->queued_count[link_id],
2893 				  stats->zero_wait_count_rx_count[link_id],
2894 				  stats->immediate_delivery_count[link_id]);
2895 		mgmt_rx_reo_alert("\t------------------------------------------------");
2896 	}
2897 	mgmt_rx_reo_alert("\t%11s|%13llu|%10llu|%10llu|\n\n", "",
2898 			  total_queued_count,
2899 			  total_zero_wait_count_rx_count,
2900 			  total_immediate_delivery_count);
2901 
2902 	return QDF_STATUS_SUCCESS;
2903 }
2904 
2905 /**
2906  * mgmt_rx_reo_log_ingress_frame() - Log the information about a frame entering
2907  * the reorder algorithm.
2908  * @reo_ctx: management rx reorder context
2909  * @desc: Pointer to frame descriptor
2910  * @is_queued: Indicates whether this frame is queued to reorder list
2911  * @is_error: Indicates whether any error occurred during processing this frame
2912  *
2913  * Return: QDF_STATUS of operation
2914  */
2915 static QDF_STATUS
2916 mgmt_rx_reo_log_ingress_frame(struct mgmt_rx_reo_context *reo_ctx,
2917 			      struct mgmt_rx_reo_frame_descriptor *desc,
2918 			      bool is_queued, bool is_error)
2919 {
2920 	struct reo_ingress_debug_info *ingress_frame_debug_info;
2921 	struct reo_ingress_debug_frame_info *cur_frame_debug_info;
2922 	struct reo_ingress_frame_stats *stats;
2923 	uint8_t link_id;
2924 
2925 	if (!reo_ctx || !desc)
2926 		return QDF_STATUS_E_NULL_VALUE;
2927 
2928 	ingress_frame_debug_info = &reo_ctx->ingress_frame_debug_info;
2929 
2930 	cur_frame_debug_info = &ingress_frame_debug_info->frame_list
2931 			[ingress_frame_debug_info->next_index];
2932 
2933 	cur_frame_debug_info->link_id =
2934 				mgmt_rx_reo_get_link_id(desc->rx_params);
2935 	cur_frame_debug_info->mgmt_pkt_ctr =
2936 				mgmt_rx_reo_get_pkt_counter(desc->rx_params);
2937 	cur_frame_debug_info->global_timestamp =
2938 				mgmt_rx_reo_get_global_ts(desc->rx_params);
2939 	cur_frame_debug_info->start_timestamp =
2940 				mgmt_rx_reo_get_start_ts(desc->rx_params);
2941 	cur_frame_debug_info->end_timestamp =
2942 				mgmt_rx_reo_get_end_ts(desc->rx_params);
2943 	cur_frame_debug_info->duration_us =
2944 				mgmt_rx_reo_get_duration_us(desc->rx_params);
2945 	cur_frame_debug_info->desc_type = desc->type;
2946 	cur_frame_debug_info->frame_type = desc->frame_type;
2947 	cur_frame_debug_info->frame_subtype = desc->frame_subtype;
2948 	cur_frame_debug_info->wait_count = desc->wait_count;
2949 	qdf_mem_copy(cur_frame_debug_info->shared_snapshots,
2950 		     desc->shared_snapshots,
2951 		     qdf_min(sizeof(cur_frame_debug_info->shared_snapshots),
2952 			     sizeof(desc->shared_snapshots)));
2953 	qdf_mem_copy(cur_frame_debug_info->host_snapshot, desc->host_snapshot,
2954 		     qdf_min(sizeof(cur_frame_debug_info->host_snapshot),
2955 			     sizeof(desc->host_snapshot)));
2956 	cur_frame_debug_info->is_queued = is_queued;
2957 	cur_frame_debug_info->is_stale = desc->is_stale;
2958 	cur_frame_debug_info->is_parallel_rx = desc->is_parallel_rx;
2959 	cur_frame_debug_info->zero_wait_count_rx = desc->zero_wait_count_rx;
2960 	cur_frame_debug_info->immediate_delivery = desc->immediate_delivery;
2961 	cur_frame_debug_info->is_error = is_error;
2962 	cur_frame_debug_info->ts_last_released_frame =
2963 				reo_ctx->reo_list.ts_last_released_frame;
2964 	cur_frame_debug_info->ingress_timestamp = desc->ingress_timestamp;
2965 	cur_frame_debug_info->ingress_duration =
2966 			qdf_get_log_timestamp() - desc->ingress_timestamp;
2967 	cur_frame_debug_info->list_size_rx = desc->list_size_rx;
2968 	cur_frame_debug_info->list_insertion_pos = desc->list_insertion_pos;
2969 	cur_frame_debug_info->cpu_id = qdf_get_smp_processor_id();
2970 	cur_frame_debug_info->reo_required = desc->reo_required;
2971 
2972 	ingress_frame_debug_info->next_index++;
2973 	ingress_frame_debug_info->next_index %=
2974 				MGMT_RX_REO_INGRESS_FRAME_DEBUG_ENTRIES_MAX;
2975 	if (ingress_frame_debug_info->next_index == 0)
2976 		ingress_frame_debug_info->wrap_aroud = true;
2977 
2978 	stats = &ingress_frame_debug_info->stats;
2979 	link_id = cur_frame_debug_info->link_id;
2980 	stats->ingress_count[link_id][desc->type]++;
2981 	if (is_queued)
2982 		stats->queued_count[link_id]++;
2983 	if (desc->zero_wait_count_rx)
2984 		stats->zero_wait_count_rx_count[link_id]++;
2985 	if (desc->immediate_delivery)
2986 		stats->immediate_delivery_count[link_id]++;
2987 	if (is_error)
2988 		stats->error_count[link_id][desc->type]++;
2989 	if (desc->is_stale)
2990 		stats->stale_count[link_id][desc->type]++;
2991 
2992 	return QDF_STATUS_SUCCESS;
2993 }
2994 
2995 /**
2996  * mgmt_rx_reo_debug_print_ingress_frame_info() - Print the debug information
2997  * about the latest frames entered the reorder module
2998  * @reo_ctx: management rx reorder context
2999  * @num_frames: Number of frames for which the debug information is to be
3000  * printed. If @num_frames is 0, then debug information about all the frames
3001  * in the ring buffer will be  printed.
3002  *
3003  * Return: QDF_STATUS of operation
3004  */
3005 static QDF_STATUS
3006 mgmt_rx_reo_debug_print_ingress_frame_info(struct mgmt_rx_reo_context *reo_ctx,
3007 					   uint16_t num_frames)
3008 {
3009 	struct reo_ingress_debug_info *ingress_frame_debug_info;
3010 	int start_index;
3011 	uint16_t index;
3012 	uint16_t entry;
3013 	uint16_t num_valid_entries;
3014 	uint16_t num_entries_to_print;
3015 	char *boarder;
3016 
3017 	if (!reo_ctx)
3018 		return QDF_STATUS_E_NULL_VALUE;
3019 
3020 	ingress_frame_debug_info = &reo_ctx->ingress_frame_debug_info;
3021 
3022 	if (ingress_frame_debug_info->wrap_aroud)
3023 		num_valid_entries = MGMT_RX_REO_INGRESS_FRAME_DEBUG_ENTRIES_MAX;
3024 	else
3025 		num_valid_entries = ingress_frame_debug_info->next_index;
3026 
3027 	if (num_frames == 0) {
3028 		num_entries_to_print = num_valid_entries;
3029 
3030 		if (ingress_frame_debug_info->wrap_aroud)
3031 			start_index = ingress_frame_debug_info->next_index;
3032 		else
3033 			start_index = 0;
3034 	} else {
3035 		num_entries_to_print = qdf_min(num_frames, num_valid_entries);
3036 
3037 		start_index = (ingress_frame_debug_info->next_index -
3038 			       num_entries_to_print +
3039 			       MGMT_RX_REO_INGRESS_FRAME_DEBUG_ENTRIES_MAX)
3040 			      % MGMT_RX_REO_INGRESS_FRAME_DEBUG_ENTRIES_MAX;
3041 
3042 		qdf_assert_always(start_index >= 0 &&
3043 				  start_index < MGMT_RX_REO_INGRESS_FRAME_DEBUG_ENTRIES_MAX);
3044 	}
3045 
3046 	mgmt_rx_reo_alert_no_fl("Ingress Frame Info:-");
3047 	mgmt_rx_reo_alert_no_fl("num_frames = %u, wrap = %u, next_index = %u",
3048 				num_frames,
3049 				ingress_frame_debug_info->wrap_aroud,
3050 				ingress_frame_debug_info->next_index);
3051 	mgmt_rx_reo_alert_no_fl("start_index = %d num_entries_to_print = %u",
3052 				start_index, num_entries_to_print);
3053 
3054 	if (!num_entries_to_print)
3055 		return QDF_STATUS_SUCCESS;
3056 
3057 	boarder = ingress_frame_debug_info->boarder;
3058 
3059 	mgmt_rx_reo_alert_no_fl("%s", boarder);
3060 	mgmt_rx_reo_alert_no_fl("|%5s|%5s|%6s|%6s|%9s|%4s|%5s|%10s|%10s|%10s|%5s|%10s|%11s|%13s|%11s|%4s|%3s|%69s|%94s|%94s|%94s|%94s|%94s|%94s|",
3061 				"Index", "CPU", "D.type", "F.type", "F.subtype",
3062 				"Link", "SeqNo", "Global ts",
3063 				"Start ts", "End ts", "Dur", "Last ts",
3064 				"Ingress ts", "Flags", "Ingress Dur", "Size",
3065 				"Pos", "Wait Count", "Snapshot : link 0",
3066 				"Snapshot : link 1", "Snapshot : link 2",
3067 				"Snapshot : link 3", "Snapshot : link 4",
3068 				"Snapshot : link 5");
3069 	mgmt_rx_reo_alert_no_fl("%s", boarder);
3070 
3071 	index = start_index;
3072 	for (entry = 0; entry < num_entries_to_print; entry++) {
3073 		struct reo_ingress_debug_frame_info *info;
3074 		char flags[MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_FLAG_MAX_SIZE + 1] = {'\0'};
3075 		char wait_count[MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_WAIT_COUNT_MAX_SIZE + 1] = {'\0'};
3076 		char snapshots[MAX_MLO_LINKS][MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_PER_LINK_SNAPSHOTS_MAX_SIZE + 1] = {'\0'};
3077 		char flag_queued = ' ';
3078 		char flag_stale = ' ';
3079 		char flag_parallel_rx = ' ';
3080 		char flag_error = ' ';
3081 		char flag_zero_wait_count_rx = ' ';
3082 		char flag_immediate_delivery = ' ';
3083 		char flag_reo_required = ' ';
3084 		int64_t ts_last_released_frame = -1;
3085 		uint8_t link;
3086 
3087 		info = &reo_ctx->ingress_frame_debug_info.frame_list[index];
3088 
3089 		if (info->ts_last_released_frame.valid)
3090 			ts_last_released_frame =
3091 					info->ts_last_released_frame.global_ts;
3092 
3093 		if (info->is_queued)
3094 			flag_queued = 'Q';
3095 
3096 		if (info->is_stale)
3097 			flag_stale = 'S';
3098 
3099 		if (info->is_parallel_rx)
3100 			flag_parallel_rx = 'P';
3101 
3102 		if (info->is_error)
3103 			flag_error = 'E';
3104 
3105 		if (info->zero_wait_count_rx)
3106 			flag_zero_wait_count_rx = 'Z';
3107 
3108 		if (info->immediate_delivery)
3109 			flag_immediate_delivery = 'I';
3110 
3111 		if (!info->reo_required)
3112 			flag_reo_required = 'N';
3113 
3114 		snprintf(flags, sizeof(flags), "%c %c %c %c %c %c %c", flag_error,
3115 			 flag_stale, flag_parallel_rx, flag_queued,
3116 			 flag_zero_wait_count_rx, flag_immediate_delivery,
3117 			 flag_reo_required);
3118 		snprintf(wait_count, sizeof(wait_count),
3119 			 "%9llx(%8x, %8x, %8x, %8x, %8x, %8x)",
3120 			 info->wait_count.total_count,
3121 			 info->wait_count.per_link_count[0],
3122 			 info->wait_count.per_link_count[1],
3123 			 info->wait_count.per_link_count[2],
3124 			 info->wait_count.per_link_count[3],
3125 			 info->wait_count.per_link_count[4],
3126 			 info->wait_count.per_link_count[5]);
3127 
3128 		for (link = 0; link < MAX_MLO_LINKS; link++) {
3129 			char mac_hw[MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_SNAPSHOT_MAX_SIZE + 1] = {'\0'};
3130 			char fw_consumed[MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_SNAPSHOT_MAX_SIZE + 1] = {'\0'};
3131 			char fw_forwaded[MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_SNAPSHOT_MAX_SIZE + 1] = {'\0'};
3132 			char host[MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_SNAPSHOT_MAX_SIZE + 1] = {'\0'};
3133 			struct mgmt_rx_reo_snapshot_params *mac_hw_ss;
3134 			struct mgmt_rx_reo_snapshot_params *fw_consumed_ss;
3135 			struct mgmt_rx_reo_snapshot_params *fw_forwarded_ss;
3136 			struct mgmt_rx_reo_snapshot_params *host_ss;
3137 
3138 			mac_hw_ss = &info->shared_snapshots
3139 				[link][MGMT_RX_REO_SHARED_SNAPSHOT_MAC_HW];
3140 			fw_consumed_ss = &info->shared_snapshots
3141 				[link][MGMT_RX_REO_SHARED_SNAPSHOT_FW_CONSUMED];
3142 			fw_forwarded_ss = &info->shared_snapshots
3143 				[link][MGMT_RX_REO_SHARED_SNAPSHOT_FW_FORWADED];
3144 			host_ss = &info->host_snapshot[link];
3145 
3146 			snprintf(mac_hw, sizeof(mac_hw), "(%1u, %5u, %10u)",
3147 				 mac_hw_ss->valid, mac_hw_ss->mgmt_pkt_ctr,
3148 				 mac_hw_ss->global_timestamp);
3149 			snprintf(fw_consumed, sizeof(fw_consumed),
3150 				 "(%1u, %5u, %10u)",
3151 				 fw_consumed_ss->valid,
3152 				 fw_consumed_ss->mgmt_pkt_ctr,
3153 				 fw_consumed_ss->global_timestamp);
3154 			snprintf(fw_forwaded, sizeof(fw_forwaded),
3155 				 "(%1u, %5u, %10u)",
3156 				 fw_forwarded_ss->valid,
3157 				 fw_forwarded_ss->mgmt_pkt_ctr,
3158 				 fw_forwarded_ss->global_timestamp);
3159 			snprintf(host, sizeof(host), "(%1u, %5u, %10u)",
3160 				 host_ss->valid,
3161 				 host_ss->mgmt_pkt_ctr,
3162 				 host_ss->global_timestamp);
3163 			snprintf(snapshots[link], sizeof(snapshots[link]),
3164 				 "%22s, %22s, %22s, %22s", mac_hw, fw_consumed,
3165 				 fw_forwaded, host);
3166 		}
3167 
3168 		mgmt_rx_reo_alert_no_fl("|%5u|%5d|%6u|%6x|%9x|%4u|%5u|%10u|%10u|%10u|%5u|%10lld|%11llu|%13s|%11llu|%4d|%3d|%69s|%70s|%70s|%70s|%70s|%70s|%70s|",
3169 					entry, info->cpu_id, info->desc_type,
3170 					info->frame_type, info->frame_subtype,
3171 					info->link_id,
3172 					info->mgmt_pkt_ctr,
3173 					info->global_timestamp,
3174 					info->start_timestamp,
3175 					info->end_timestamp,
3176 					info->duration_us,
3177 					ts_last_released_frame,
3178 					info->ingress_timestamp, flags,
3179 					info->ingress_duration,
3180 					info->list_size_rx,
3181 					info->list_insertion_pos, wait_count,
3182 					snapshots[0], snapshots[1],
3183 					snapshots[2], snapshots[3],
3184 					snapshots[4], snapshots[5]);
3185 		mgmt_rx_reo_alert_no_fl("%s", boarder);
3186 
3187 		index++;
3188 		index %= MGMT_RX_REO_INGRESS_FRAME_DEBUG_ENTRIES_MAX;
3189 	}
3190 
3191 	return QDF_STATUS_SUCCESS;
3192 }
3193 #else
3194 /**
3195  * mgmt_rx_reo_debug_print_ingress_frame_stats() - API to print the stats
3196  * related to frames going into the reorder module
3197  * @reo_ctx: Pointer to reorder context
3198  *
3199  * API to print the stats related to frames going into the management
3200  * Rx reorder module.
3201  *
3202  * Return: QDF_STATUS
3203  */
3204 static QDF_STATUS
3205 mgmt_rx_reo_debug_print_ingress_frame_stats(struct mgmt_rx_reo_context *reo_ctx)
3206 {
3207 	return QDF_STATUS_SUCCESS;
3208 }
3209 
3210 /**
3211  * mgmt_rx_reo_log_ingress_frame() - Log the information about a frame entering
3212  * the reorder algorithm.
3213  * @reo_ctx: management rx reorder context
3214  * @desc: Pointer to frame descriptor
3215  * @is_queued: Indicates whether this frame is queued to reorder list
3216  * @is_error: Indicates whether any error occurred during processing this frame
3217  *
3218  * Return: QDF_STATUS of operation
3219  */
3220 static QDF_STATUS
3221 mgmt_rx_reo_log_ingress_frame(struct mgmt_rx_reo_context *reo_ctx,
3222 			      struct mgmt_rx_reo_frame_descriptor *desc,
3223 			      bool is_queued, bool is_error)
3224 {
3225 	return QDF_STATUS_SUCCESS;
3226 }
3227 
3228 /**
3229  * mgmt_rx_reo_debug_print_ingress_frame_info() - Print debug information about
3230  * the latest frames entering the reorder module
3231  * @reo_ctx: management rx reorder context
3232  *
3233  * Return: QDF_STATUS of operation
3234  */
3235 static QDF_STATUS
3236 mgmt_rx_reo_debug_print_ingress_frame_info(struct mgmt_rx_reo_context *reo_ctx)
3237 {
3238 	return QDF_STATUS_SUCCESS;
3239 }
3240 #endif /* WLAN_MGMT_RX_REO_DEBUG_SUPPORT */
3241 
3242 QDF_STATUS
3243 wlan_mgmt_rx_reo_algo_entry(struct wlan_objmgr_pdev *pdev,
3244 			    struct mgmt_rx_reo_frame_descriptor *desc,
3245 			    bool *is_queued)
3246 {
3247 	struct mgmt_rx_reo_context *reo_ctx;
3248 	QDF_STATUS ret;
3249 
3250 	if (!is_queued)
3251 		return QDF_STATUS_E_NULL_VALUE;
3252 
3253 	*is_queued = false;
3254 
3255 	if (!desc || !desc->rx_params) {
3256 		mgmt_rx_reo_err("MGMT Rx REO descriptor or rx params are null");
3257 		return QDF_STATUS_E_NULL_VALUE;
3258 	}
3259 
3260 	reo_ctx = mgmt_rx_reo_get_context();
3261 	if (!reo_ctx) {
3262 		mgmt_rx_reo_err("REO context is NULL");
3263 		return QDF_STATUS_E_NULL_VALUE;
3264 	}
3265 
3266 	/**
3267 	 * Critical Section = Host snapshot update + Calculation of wait
3268 	 * counts + Update reorder list. Following section describes the
3269 	 * motivation for making this a critical section.
3270 	 * Lets take an example of 2 links (Link A & B) and each has received
3271 	 * a management frame A1 and B1 such that MLO global time stamp of A1 <
3272 	 * MLO global time stamp of B1. Host is concurrently executing
3273 	 * "wlan_mgmt_rx_reo_algo_entry" for A1 and B1 in 2 different CPUs.
3274 	 *
3275 	 * A lock less version of this API("wlan_mgmt_rx_reo_algo_entry_v1") is
3276 	 * as follows.
3277 	 *
3278 	 * wlan_mgmt_rx_reo_algo_entry()
3279 	 * {
3280 	 *     Host snapshot update
3281 	 *     Calculation of wait counts
3282 	 *     Update reorder list
3283 	 *     Release to upper layer
3284 	 * }
3285 	 *
3286 	 * We may run into race conditions under the following sequence of
3287 	 * operations.
3288 	 *
3289 	 * 1. Host snapshot update for link A in context of frame A1
3290 	 * 2. Host snapshot update for link B in context of frame B1
3291 	 * 3. Calculation of wait count for frame B1
3292 	 *        link A wait count =  0
3293 	 *        link B wait count =  0
3294 	 * 4. Update reorder list with frame B1
3295 	 * 5. Release B1 to upper layer
3296 	 * 6. Calculation of wait count for frame A1
3297 	 *        link A wait count =  0
3298 	 *        link B wait count =  0
3299 	 * 7. Update reorder list with frame A1
3300 	 * 8. Release A1 to upper layer
3301 	 *
3302 	 * This leads to incorrect behaviour as B1 goes to upper layer before
3303 	 * A1.
3304 	 *
3305 	 * To prevent this lets make Host snapshot update + Calculate wait count
3306 	 * a critical section by adding locks. The updated version of the API
3307 	 * ("wlan_mgmt_rx_reo_algo_entry_v2") is as follows.
3308 	 *
3309 	 * wlan_mgmt_rx_reo_algo_entry()
3310 	 * {
3311 	 *     LOCK
3312 	 *         Host snapshot update
3313 	 *         Calculation of wait counts
3314 	 *     UNLOCK
3315 	 *     Update reorder list
3316 	 *     Release to upper layer
3317 	 * }
3318 	 *
3319 	 * With this API also We may run into race conditions under the
3320 	 * following sequence of operations.
3321 	 *
3322 	 * 1. Host snapshot update for link A in context of frame A1 +
3323 	 *    Calculation of wait count for frame A1
3324 	 *        link A wait count =  0
3325 	 *        link B wait count =  0
3326 	 * 2. Host snapshot update for link B in context of frame B1 +
3327 	 *    Calculation of wait count for frame B1
3328 	 *        link A wait count =  0
3329 	 *        link B wait count =  0
3330 	 * 4. Update reorder list with frame B1
3331 	 * 5. Release B1 to upper layer
3332 	 * 7. Update reorder list with frame A1
3333 	 * 8. Release A1 to upper layer
3334 	 *
3335 	 * This also leads to incorrect behaviour as B1 goes to upper layer
3336 	 * before A1.
3337 	 *
3338 	 * To prevent this, let's make Host snapshot update + Calculate wait
3339 	 * count + Update reorder list a critical section by adding locks.
3340 	 * The updated version of the API ("wlan_mgmt_rx_reo_algo_entry_final")
3341 	 * is as follows.
3342 	 *
3343 	 * wlan_mgmt_rx_reo_algo_entry()
3344 	 * {
3345 	 *     LOCK
3346 	 *         Host snapshot update
3347 	 *         Calculation of wait counts
3348 	 *         Update reorder list
3349 	 *     UNLOCK
3350 	 *     Release to upper layer
3351 	 * }
3352 	 */
3353 	qdf_spin_lock(&reo_ctx->reo_algo_entry_lock);
3354 
3355 	qdf_assert_always(desc->rx_params->reo_params->valid);
3356 	qdf_assert_always(desc->frame_type == IEEE80211_FC0_TYPE_MGT);
3357 
3358 	if (desc->type == MGMT_RX_REO_FRAME_DESC_HOST_CONSUMED_FRAME ||
3359 	    desc->type == MGMT_RX_REO_FRAME_DESC_FW_CONSUMED_FRAME)
3360 		qdf_assert_always(desc->rx_params->reo_params->duration_us);
3361 
3362 	/* Update the Host snapshot */
3363 	ret = wlan_mgmt_rx_reo_update_host_snapshot(pdev, desc);
3364 	if (QDF_IS_STATUS_ERROR(ret))
3365 		goto failure;
3366 
3367 	/* Compute wait count for this frame/event */
3368 	ret = wlan_mgmt_rx_reo_algo_calculate_wait_count(pdev, desc);
3369 	if (QDF_IS_STATUS_ERROR(ret))
3370 		goto failure;
3371 
3372 	/* Update the REO list */
3373 	ret = mgmt_rx_reo_update_list(&reo_ctx->reo_list, desc, is_queued);
3374 	if (QDF_IS_STATUS_ERROR(ret))
3375 		goto failure;
3376 
3377 	ret = mgmt_rx_reo_log_ingress_frame(reo_ctx, desc,
3378 					    *is_queued, false);
3379 	if (QDF_IS_STATUS_ERROR(ret)) {
3380 		qdf_spin_unlock(&reo_ctx->reo_algo_entry_lock);
3381 		return ret;
3382 	}
3383 
3384 	qdf_spin_unlock(&reo_ctx->reo_algo_entry_lock);
3385 
3386 	/* Finally, release the entries for which pending frame is received */
3387 	return mgmt_rx_reo_list_release_entries(reo_ctx);
3388 
3389 failure:
3390 	/**
3391 	 * Ignore the return value of this function call, return
3392 	 * the actual reason for failure.
3393 	 */
3394 	mgmt_rx_reo_log_ingress_frame(reo_ctx, desc, *is_queued, true);
3395 
3396 	qdf_spin_unlock(&reo_ctx->reo_algo_entry_lock);
3397 
3398 	return ret;
3399 }
3400 
3401 #ifndef WLAN_MGMT_RX_REO_SIM_SUPPORT
3402 /**
3403  * mgmt_rx_reo_sim_init() - Initialize management rx reorder simulation
3404  * context.
3405  * @reo_context: Pointer to reo context
3406  *
3407  * Return: QDF_STATUS of operation
3408  */
3409 static inline QDF_STATUS
3410 mgmt_rx_reo_sim_init(struct mgmt_rx_reo_context *reo_context)
3411 {
3412 	return QDF_STATUS_SUCCESS;
3413 }
3414 
3415 /**
3416  * mgmt_rx_reo_sim_deinit() - De initialize management rx reorder simulation
3417  * context.
3418  * @reo_context: Pointer to reo context
3419  *
3420  * Return: QDF_STATUS of operation
3421  */
3422 static inline QDF_STATUS
3423 mgmt_rx_reo_sim_deinit(struct mgmt_rx_reo_context *reo_context)
3424 {
3425 	return QDF_STATUS_SUCCESS;
3426 }
3427 
3428 QDF_STATUS
3429 mgmt_rx_reo_sim_pdev_object_create_notification(struct wlan_objmgr_pdev *pdev)
3430 {
3431 	return QDF_STATUS_SUCCESS;
3432 }
3433 
3434 QDF_STATUS
3435 mgmt_rx_reo_sim_pdev_object_destroy_notification(struct wlan_objmgr_pdev *pdev)
3436 {
3437 	return QDF_STATUS_SUCCESS;
3438 }
3439 #else
3440 /**
3441  * mgmt_rx_reo_sim_remove_frame_from_master_list() - Removes frame from the
3442  * master management frame list
3443  * @master_frame_list: pointer to master management frame list
3444  * @frame: pointer to management frame parameters
3445  *
3446  * This API removes frames from the master management frame list. This API is
3447  * used in case of FW consumed management frames or management frames which
3448  * are dropped at host due to any error.
3449  *
3450  * Return: QDF_STATUS of operation
3451  */
3452 static QDF_STATUS
3453 mgmt_rx_reo_sim_remove_frame_from_master_list(
3454 		struct mgmt_rx_reo_master_frame_list *master_frame_list,
3455 		const struct mgmt_rx_frame_params *frame)
3456 {
3457 	struct mgmt_rx_reo_pending_frame_list_entry *pending_entry;
3458 	struct mgmt_rx_reo_pending_frame_list_entry *matching_pend_entry = NULL;
3459 	struct mgmt_rx_reo_stale_frame_list_entry *stale_entry;
3460 	struct mgmt_rx_reo_stale_frame_list_entry *matching_stale_entry = NULL;
3461 	QDF_STATUS status;
3462 
3463 	if (!master_frame_list) {
3464 		mgmt_rx_reo_err("Mgmt master frame list is null");
3465 		return QDF_STATUS_E_NULL_VALUE;
3466 	}
3467 
3468 	if (!frame) {
3469 		mgmt_rx_reo_err("Pointer to mgmt frame params is null");
3470 		return QDF_STATUS_E_NULL_VALUE;
3471 	}
3472 
3473 	qdf_spin_lock(&master_frame_list->lock);
3474 
3475 	qdf_list_for_each(&master_frame_list->pending_list, pending_entry,
3476 			  node) {
3477 		if (pending_entry->params.link_id == frame->link_id &&
3478 		    pending_entry->params.mgmt_pkt_ctr == frame->mgmt_pkt_ctr &&
3479 		    pending_entry->params.global_timestamp ==
3480 		    frame->global_timestamp) {
3481 			matching_pend_entry = pending_entry;
3482 			break;
3483 		}
3484 	}
3485 
3486 	qdf_list_for_each(&master_frame_list->stale_list, stale_entry, node) {
3487 		if (stale_entry->params.link_id == frame->link_id &&
3488 		    stale_entry->params.mgmt_pkt_ctr == frame->mgmt_pkt_ctr &&
3489 		    stale_entry->params.global_timestamp ==
3490 		    frame->global_timestamp) {
3491 			matching_stale_entry = stale_entry;
3492 			break;
3493 		}
3494 	}
3495 
3496 	/* Found in pending and stale list. Duplicate entries, assert */
3497 	qdf_assert_always(!matching_pend_entry || !matching_stale_entry);
3498 
3499 	if (!matching_pend_entry && !matching_stale_entry) {
3500 		qdf_spin_unlock(&master_frame_list->lock);
3501 		mgmt_rx_reo_err("No matching frame in pend/stale list");
3502 		return QDF_STATUS_E_FAILURE;
3503 	}
3504 
3505 	if (matching_pend_entry) {
3506 		status = qdf_list_remove_node(&master_frame_list->pending_list,
3507 					      &matching_pend_entry->node);
3508 		if (QDF_IS_STATUS_ERROR(status)) {
3509 			qdf_spin_unlock(&master_frame_list->lock);
3510 			mgmt_rx_reo_err("Failed to remove the matching entry");
3511 			return status;
3512 		}
3513 
3514 		qdf_mem_free(matching_pend_entry);
3515 	}
3516 
3517 	if (matching_stale_entry) {
3518 		status = qdf_list_remove_node(&master_frame_list->stale_list,
3519 					      &matching_stale_entry->node);
3520 		if (QDF_IS_STATUS_ERROR(status)) {
3521 			qdf_spin_unlock(&master_frame_list->lock);
3522 			mgmt_rx_reo_err("Failed to remove the matching entry");
3523 			return status;
3524 		}
3525 
3526 		qdf_mem_free(matching_stale_entry);
3527 	}
3528 
3529 	qdf_spin_unlock(&master_frame_list->lock);
3530 
3531 	return QDF_STATUS_SUCCESS;
3532 }
3533 
3534 /**
3535  * mgmt_rx_reo_sim_remove_frame_from_pending_list() - Removes frame from the
3536  * pending management frame list
3537  * @master_frame_list: pointer to master management frame list
3538  * @frame: pointer to management frame parameters
3539  *
3540  * This API removes frames from the pending management frame list. This API is
3541  * used in case of FW consumed management frames or management frames which
3542  * are dropped at host due to any error.
3543  *
3544  * Return: QDF_STATUS of operation
3545  */
3546 static QDF_STATUS
3547 mgmt_rx_reo_sim_remove_frame_from_pending_list(
3548 		struct mgmt_rx_reo_master_frame_list *master_frame_list,
3549 		const struct mgmt_rx_frame_params *frame)
3550 {
3551 	struct mgmt_rx_reo_pending_frame_list_entry *cur_entry;
3552 	struct mgmt_rx_reo_pending_frame_list_entry *matching_entry = NULL;
3553 	QDF_STATUS status;
3554 
3555 	if (!master_frame_list) {
3556 		mgmt_rx_reo_err("Mgmt master frame list is null");
3557 		return QDF_STATUS_E_NULL_VALUE;
3558 	}
3559 
3560 	if (!frame) {
3561 		mgmt_rx_reo_err("Pointer to mgmt frame params is null");
3562 		return QDF_STATUS_E_NULL_VALUE;
3563 	}
3564 
3565 	qdf_spin_lock(&master_frame_list->lock);
3566 
3567 	qdf_list_for_each(&master_frame_list->pending_list, cur_entry, node) {
3568 		if (cur_entry->params.link_id == frame->link_id &&
3569 		    cur_entry->params.mgmt_pkt_ctr == frame->mgmt_pkt_ctr &&
3570 		    cur_entry->params.global_timestamp ==
3571 		    frame->global_timestamp) {
3572 			matching_entry = cur_entry;
3573 			break;
3574 		}
3575 	}
3576 
3577 	if (!matching_entry) {
3578 		qdf_spin_unlock(&master_frame_list->lock);
3579 		mgmt_rx_reo_err("No matching frame in the pend list to remove");
3580 		return QDF_STATUS_E_FAILURE;
3581 	}
3582 
3583 	status = qdf_list_remove_node(&master_frame_list->pending_list,
3584 				      &matching_entry->node);
3585 	if (QDF_IS_STATUS_ERROR(status)) {
3586 		qdf_spin_unlock(&master_frame_list->lock);
3587 		mgmt_rx_reo_err("Failed to remove the matching entry");
3588 		return status;
3589 	}
3590 
3591 	qdf_mem_free(matching_entry);
3592 
3593 	qdf_spin_unlock(&master_frame_list->lock);
3594 
3595 
3596 	return QDF_STATUS_SUCCESS;
3597 }
3598 
3599 /**
3600  * mgmt_rx_reo_sim_add_frame_to_pending_list() - Inserts frame to the
3601  * pending management frame list
3602  * @master_frame_list: pointer to master management frame list
3603  * @frame: pointer to management frame parameters
3604  *
3605  * This API inserts frames to the pending management frame list. This API is
3606  * used to insert frames generated by the MAC HW to the pending frame list.
3607  *
3608  * Return: QDF_STATUS of operation
3609  */
3610 static QDF_STATUS
3611 mgmt_rx_reo_sim_add_frame_to_pending_list(
3612 		struct mgmt_rx_reo_master_frame_list *master_frame_list,
3613 		const struct mgmt_rx_frame_params *frame)
3614 {
3615 	struct mgmt_rx_reo_pending_frame_list_entry *new_entry;
3616 	QDF_STATUS status;
3617 
3618 	if (!master_frame_list) {
3619 		mgmt_rx_reo_err("Mgmt master frame list is null");
3620 		return QDF_STATUS_E_NULL_VALUE;
3621 	}
3622 
3623 	if (!frame) {
3624 		mgmt_rx_reo_err("Pointer mgmt frame params is null");
3625 		return QDF_STATUS_E_NULL_VALUE;
3626 	}
3627 
3628 	new_entry = qdf_mem_malloc(sizeof(*new_entry));
3629 	if (!new_entry) {
3630 		mgmt_rx_reo_err("Failed to allocate new entry to frame list");
3631 		return QDF_STATUS_E_NOMEM;
3632 	}
3633 
3634 	new_entry->params = *frame;
3635 
3636 	qdf_spin_lock(&master_frame_list->lock);
3637 
3638 	status = qdf_list_insert_back(&master_frame_list->pending_list,
3639 				      &new_entry->node);
3640 
3641 	qdf_spin_unlock(&master_frame_list->lock);
3642 
3643 	if (QDF_IS_STATUS_ERROR(status)) {
3644 		mgmt_rx_reo_err("Failed to add frame to pending list");
3645 		qdf_mem_free(new_entry);
3646 		return status;
3647 	}
3648 
3649 	return QDF_STATUS_SUCCESS;
3650 }
3651 
3652 QDF_STATUS
3653 mgmt_rx_reo_sim_process_rx_frame(struct wlan_objmgr_pdev *pdev, qdf_nbuf_t buf,
3654 				 struct mgmt_rx_event_params *mgmt_rx_params)
3655 {
3656 	struct mgmt_rx_reo_context *reo_context;
3657 	struct mgmt_rx_reo_sim_context *sim_context;
3658 	QDF_STATUS status;
3659 	struct mgmt_rx_reo_params *reo_params;
3660 
3661 	if (!mgmt_rx_params) {
3662 		mgmt_rx_reo_err("Mgmt rx params null");
3663 		return QDF_STATUS_E_NULL_VALUE;
3664 	}
3665 
3666 	reo_params = mgmt_rx_params->reo_params;
3667 
3668 	reo_context = mgmt_rx_reo_get_context();
3669 	if (!reo_context) {
3670 		mgmt_rx_reo_err("Mgmt reo context is null");
3671 		return QDF_STATUS_E_NULL_VALUE;
3672 	}
3673 
3674 	sim_context = &reo_context->sim_context;
3675 
3676 	qdf_spin_lock(&sim_context->master_frame_list.lock);
3677 
3678 	if (qdf_list_empty(&sim_context->master_frame_list.pending_list)) {
3679 		qdf_spin_unlock(&sim_context->master_frame_list.lock);
3680 		mgmt_rx_reo_err("reo sim failure: pending frame list is empty");
3681 		qdf_assert_always(0);
3682 	} else {
3683 		struct mgmt_rx_frame_params *cur_entry_params;
3684 		struct mgmt_rx_reo_pending_frame_list_entry *cur_entry;
3685 		struct mgmt_rx_reo_pending_frame_list_entry *matching_entry = NULL;
3686 
3687 		/**
3688 		 * Make sure the frames delivered to upper layer are in the
3689 		 * increasing order of global time stamp. For that the frame
3690 		 * which is being delivered should be present at the head of the
3691 		 * pending frame list. There could be multiple frames with the
3692 		 * same global time stamp in the pending frame list. Search
3693 		 * among all the frames at the head of the list which has the
3694 		 * same global time stamp as the frame which is being delivered.
3695 		 * To find matching frame, check whether packet counter,
3696 		 * global time stamp and link id are same.
3697 		 */
3698 		qdf_list_for_each(&sim_context->master_frame_list.pending_list,
3699 				  cur_entry, node) {
3700 			cur_entry_params = &cur_entry->params;
3701 
3702 			if (cur_entry_params->global_timestamp !=
3703 			    reo_params->global_timestamp)
3704 				break;
3705 
3706 			if (cur_entry_params->link_id == reo_params->link_id &&
3707 			    cur_entry_params->mgmt_pkt_ctr ==
3708 			    reo_params->mgmt_pkt_ctr) {
3709 				matching_entry = cur_entry;
3710 				break;
3711 			}
3712 		}
3713 
3714 		if (!matching_entry) {
3715 			qdf_spin_unlock(&sim_context->master_frame_list.lock);
3716 			mgmt_rx_reo_err("reo sim failure: mismatch");
3717 			qdf_assert_always(0);
3718 		}
3719 
3720 		status = qdf_list_remove_node(
3721 				&sim_context->master_frame_list.pending_list,
3722 				&matching_entry->node);
3723 		qdf_mem_free(matching_entry);
3724 
3725 		if (QDF_IS_STATUS_ERROR(status)) {
3726 			qdf_spin_unlock(&sim_context->master_frame_list.lock);
3727 			mgmt_rx_reo_err("Failed to remove matching entry");
3728 			return status;
3729 		}
3730 	}
3731 
3732 	qdf_spin_unlock(&sim_context->master_frame_list.lock);
3733 
3734 	mgmt_rx_reo_debug("Successfully processed mgmt frame");
3735 	mgmt_rx_reo_debug("link_id = %u, ctr = %u, ts = %u",
3736 			  reo_params->link_id, reo_params->mgmt_pkt_ctr,
3737 			  reo_params->global_timestamp);
3738 
3739 	return QDF_STATUS_SUCCESS;
3740 }
3741 
3742 /**
3743  * mgmt_rx_reo_sim_get_random_bool() - Generate true/false randomly
3744  * @percentage_true: probability (in percentage) of true
3745  *
3746  * API to generate true with probability @percentage_true % and false with
3747  * probability (100 - @percentage_true) %.
3748  *
3749  * Return: true with probability @percentage_true % and false with probability
3750  * (100 - @percentage_true) %
3751  */
3752 static bool
3753 mgmt_rx_reo_sim_get_random_bool(uint8_t percentage_true)
3754 {
3755 	uint32_t rand;
3756 
3757 	if (percentage_true > 100) {
3758 		mgmt_rx_reo_err("Invalid probability value for true, %u",
3759 				percentage_true);
3760 		return -EINVAL;
3761 	}
3762 
3763 	get_random_bytes(&rand, sizeof(rand));
3764 
3765 	return ((rand % 100) < percentage_true);
3766 }
3767 
3768 /**
3769  * mgmt_rx_reo_sim_get_random_unsigned_int() - Generate random unsigned integer
3770  * value in the range [0, max)
3771  * @max: upper limit for the output
3772  *
3773  * API to generate random unsigned integer value in the range [0, max).
3774  *
3775  * Return: unsigned integer value in the range [0, max)
3776  */
3777 static uint32_t
3778 mgmt_rx_reo_sim_get_random_unsigned_int(uint32_t max)
3779 {
3780 	uint32_t rand;
3781 
3782 	get_random_bytes(&rand, sizeof(rand));
3783 
3784 	return (rand % max);
3785 }
3786 
3787 /**
3788  * mgmt_rx_reo_sim_sleep() - Wrapper API to sleep for given micro seconds
3789  * @sleeptime_us: Sleep time in micro seconds
3790  *
3791  * This API uses msleep() internally. So the granularity is limited to
3792  * milliseconds.
3793  *
3794  * Return: none
3795  */
3796 static void
3797 mgmt_rx_reo_sim_sleep(uint32_t sleeptime_us)
3798 {
3799 	msleep(sleeptime_us / USEC_PER_MSEC);
3800 }
3801 
3802 /**
3803  * mgmt_rx_reo_sim_frame_handler_host() - Management frame handler at the host
3804  * layer
3805  * @arg: Argument
3806  *
3807  * This API handles the management frame at the host layer. This is applicable
3808  * for simulation alone.
3809  *
3810  * Return: none
3811  */
3812 static void
3813 mgmt_rx_reo_sim_frame_handler_host(void *arg)
3814 {
3815 	struct mgmt_rx_frame_fw *frame_fw = (struct mgmt_rx_frame_fw *)arg;
3816 	uint32_t fw_to_host_delay_us;
3817 	bool is_error_frame = false;
3818 	int8_t link_id = -1;
3819 	struct mgmt_rx_event_params *rx_params;
3820 	QDF_STATUS status;
3821 	struct mgmt_rx_reo_sim_context *sim_context;
3822 	struct wlan_objmgr_pdev *pdev;
3823 
3824 	if (!frame_fw) {
3825 		mgmt_rx_reo_err("HOST-%d : Pointer to FW frame struct is null",
3826 				link_id);
3827 		goto error_print;
3828 	}
3829 
3830 	link_id = frame_fw->params.link_id;
3831 
3832 	sim_context = frame_fw->sim_context;
3833 	if (!sim_context) {
3834 		mgmt_rx_reo_err("HOST-%d : Mgmt rx reo simulation context null",
3835 				link_id);
3836 		goto error_free_fw_frame;
3837 	}
3838 
3839 	fw_to_host_delay_us = MGMT_RX_REO_SIM_DELAY_FW_TO_HOST_MIN +
3840 			      mgmt_rx_reo_sim_get_random_unsigned_int(
3841 			      MGMT_RX_REO_SIM_DELAY_FW_TO_HOST_MIN_MAX_DELTA);
3842 
3843 	mgmt_rx_reo_sim_sleep(fw_to_host_delay_us);
3844 
3845 	if (!frame_fw->is_consumed_by_fw) {
3846 		is_error_frame = mgmt_rx_reo_sim_get_random_bool(
3847 				 MGMT_RX_REO_SIM_PERCENTAGE_ERROR_FRAMES);
3848 
3849 		/**
3850 		 * This frame should be present in pending/stale list of the
3851 		 * master frame list. Error frames need not be reordered
3852 		 * by reorder algorithm. It is just used for book
3853 		 * keeping purposes. Hence remove it from the master list.
3854 		 */
3855 		if (is_error_frame) {
3856 			status = mgmt_rx_reo_sim_remove_frame_from_master_list(
3857 					&sim_context->master_frame_list,
3858 					&frame_fw->params);
3859 
3860 			if (QDF_IS_STATUS_ERROR(status)) {
3861 				mgmt_rx_reo_err("HOST-%d : Failed to remove error frame",
3862 						link_id);
3863 				qdf_assert_always(0);
3864 			}
3865 		}
3866 	}
3867 
3868 	mgmt_rx_reo_debug("HOST-%d : Received frame with ts = %u, ctr = %u, consume = %u, error = %u",
3869 			  link_id, frame_fw->params.global_timestamp,
3870 			  frame_fw->params.mgmt_pkt_ctr,
3871 			  frame_fw->is_consumed_by_fw, is_error_frame);
3872 
3873 	rx_params = alloc_mgmt_rx_event_params();
3874 	if (!rx_params) {
3875 		mgmt_rx_reo_err("HOST-%d : Failed to allocate event params",
3876 				link_id);
3877 		goto error_free_fw_frame;
3878 	}
3879 
3880 	rx_params->reo_params->link_id = frame_fw->params.link_id;
3881 	rx_params->reo_params->global_timestamp =
3882 					frame_fw->params.global_timestamp;
3883 	rx_params->reo_params->mgmt_pkt_ctr = frame_fw->params.mgmt_pkt_ctr;
3884 	rx_params->reo_params->valid = true;
3885 
3886 	pdev = wlan_get_pdev_from_mlo_link_id(link_id, WLAN_MGMT_RX_REO_SIM_ID);
3887 	if (!pdev) {
3888 		mgmt_rx_reo_err("No pdev corresponding to link_id %d", link_id);
3889 		goto error_free_mgmt_rx_event_params;
3890 	}
3891 
3892 	if (is_error_frame) {
3893 		status = tgt_mgmt_rx_reo_host_drop_handler(
3894 						pdev, rx_params->reo_params);
3895 		free_mgmt_rx_event_params(rx_params);
3896 	} else if (frame_fw->is_consumed_by_fw) {
3897 		status = tgt_mgmt_rx_reo_fw_consumed_event_handler(
3898 						pdev, rx_params->reo_params);
3899 		free_mgmt_rx_event_params(rx_params);
3900 	} else {
3901 		status = tgt_mgmt_rx_reo_frame_handler(pdev, NULL, rx_params);
3902 	}
3903 
3904 	wlan_objmgr_pdev_release_ref(pdev, WLAN_MGMT_RX_REO_SIM_ID);
3905 
3906 	if (QDF_IS_STATUS_ERROR(status)) {
3907 		mgmt_rx_reo_err("Failed to execute reo algorithm");
3908 		goto error_free_fw_frame;
3909 	}
3910 
3911 	qdf_mem_free(frame_fw);
3912 
3913 	return;
3914 
3915 error_free_mgmt_rx_event_params:
3916 	free_mgmt_rx_event_params(rx_params);
3917 error_free_fw_frame:
3918 	qdf_mem_free(frame_fw);
3919 error_print:
3920 	mgmt_rx_reo_err("HOST-%d : Exiting host frame handler due to error",
3921 			link_id);
3922 }
3923 
3924 /**
3925  * mgmt_rx_reo_sim_write_snapshot() - API to write snapshots used for management
3926  * frame reordering
3927  * @link_id: link id
3928  * @id: snapshot id
3929  * @value: snapshot value
3930  *
3931  * This API writes the snapshots used for management frame reordering. MAC HW
3932  * and FW can use this API to update the MAC HW/FW consumed/FW forwarded
3933  * snapshots.
3934  *
3935  * Return: QDF_STATUS
3936  */
3937 static QDF_STATUS
3938 mgmt_rx_reo_sim_write_snapshot(uint8_t link_id,
3939 			       enum mgmt_rx_reo_shared_snapshot_id id,
3940 			       struct mgmt_rx_reo_shared_snapshot value)
3941 {
3942 	struct wlan_objmgr_pdev *pdev;
3943 	struct mgmt_rx_reo_shared_snapshot *snapshot_address;
3944 	QDF_STATUS status;
3945 
3946 	pdev = wlan_get_pdev_from_mlo_link_id(link_id, WLAN_MGMT_RX_REO_SIM_ID);
3947 
3948 	if (!pdev) {
3949 		mgmt_rx_reo_err("pdev is null");
3950 		return QDF_STATUS_E_NULL_VALUE;
3951 	}
3952 
3953 	status = mgmt_rx_reo_sim_get_snapshot_address(pdev, id,
3954 						      &snapshot_address);
3955 
3956 	wlan_objmgr_pdev_release_ref(pdev, WLAN_MGMT_RX_REO_SIM_ID);
3957 
3958 	if (QDF_IS_STATUS_ERROR(status)) {
3959 		mgmt_rx_reo_err("Failed to get snapshot address %d of pdev %pK",
3960 				id, pdev);
3961 		return QDF_STATUS_E_FAILURE;
3962 	}
3963 
3964 	snapshot_address->mgmt_rx_reo_snapshot_low =
3965 						value.mgmt_rx_reo_snapshot_low;
3966 	snapshot_address->mgmt_rx_reo_snapshot_high =
3967 						value.mgmt_rx_reo_snapshot_high;
3968 
3969 	return QDF_STATUS_SUCCESS;
3970 }
3971 
3972 #define MGMT_RX_REO_SNAPSHOT_LOW_VALID_POS                       (0)
3973 #define MGMT_RX_REO_SNAPSHOT_LOW_VALID_SIZE                      (1)
3974 #define MGMT_RX_REO_SNAPSHOT_LOW_MGMT_PKT_CTR_POS                (1)
3975 #define MGMT_RX_REO_SNAPSHOT_LOW_MGMT_PKT_CTR_SIZE               (16)
3976 #define MGMT_RX_REO_SNAPSHOT_LOW_GLOBAL_TIMESTAMP_POS            (17)
3977 #define MGMT_RX_REO_SNAPSHOT_LOW_GLOBAL_TIMESTAMP_SIZE           (15)
3978 
3979 #define MGMT_RX_REO_SNAPSHOT_HIGH_GLOBAL_TIMESTAMP_POS           (0)
3980 #define MGMT_RX_REO_SNAPSHOT_HIGH_GLOBAL_TIMESTAMP_SIZE          (17)
3981 #define MGMT_RX_REO_SNAPSHOT_HIGH_MGMT_PKT_CTR_REDUNDANT_POS     (17)
3982 #define MGMT_RX_REO_SNAPSHOT_HIGH_MGMT_PKT_CTR_REDUNDANT_SIZE    (15)
3983 
3984 /**
3985  * mgmt_rx_reo_sim_get_snapshot_value() - API to get snapshot value for a given
3986  * management frame
3987  * @global_timestamp: global time stamp
3988  * @mgmt_pkt_ctr: management packet counter
3989  *
3990  * This API gets the snapshot value for a frame with time stamp
3991  * @global_timestamp and sequence number @mgmt_pkt_ctr.
3992  *
3993  * Return: snapshot value (struct mgmt_rx_reo_shared_snapshot)
3994  */
3995 static struct mgmt_rx_reo_shared_snapshot
3996 mgmt_rx_reo_sim_get_snapshot_value(uint32_t global_timestamp,
3997 				   uint16_t mgmt_pkt_ctr)
3998 {
3999 	struct mgmt_rx_reo_shared_snapshot snapshot = {0};
4000 
4001 	QDF_SET_BITS(snapshot.mgmt_rx_reo_snapshot_low,
4002 		     MGMT_RX_REO_SNAPSHOT_LOW_VALID_POS,
4003 		     MGMT_RX_REO_SNAPSHOT_LOW_VALID_SIZE, 1);
4004 	QDF_SET_BITS(snapshot.mgmt_rx_reo_snapshot_low,
4005 		     MGMT_RX_REO_SNAPSHOT_LOW_MGMT_PKT_CTR_POS,
4006 		     MGMT_RX_REO_SNAPSHOT_LOW_MGMT_PKT_CTR_SIZE, mgmt_pkt_ctr);
4007 	QDF_SET_BITS(snapshot.mgmt_rx_reo_snapshot_low,
4008 		     MGMT_RX_REO_SNAPSHOT_LOW_GLOBAL_TIMESTAMP_POS,
4009 		     MGMT_RX_REO_SNAPSHOT_LOW_GLOBAL_TIMESTAMP_SIZE,
4010 		     global_timestamp);
4011 
4012 	QDF_SET_BITS(snapshot.mgmt_rx_reo_snapshot_high,
4013 		     MGMT_RX_REO_SNAPSHOT_HIGH_GLOBAL_TIMESTAMP_POS,
4014 		     MGMT_RX_REO_SNAPSHOT_HIGH_GLOBAL_TIMESTAMP_SIZE,
4015 		     global_timestamp >> 15);
4016 	QDF_SET_BITS(snapshot.mgmt_rx_reo_snapshot_high,
4017 		     MGMT_RX_REO_SNAPSHOT_HIGH_MGMT_PKT_CTR_REDUNDANT_POS,
4018 		     MGMT_RX_REO_SNAPSHOT_HIGH_MGMT_PKT_CTR_REDUNDANT_SIZE,
4019 		     mgmt_pkt_ctr);
4020 
4021 	return snapshot;
4022 }
4023 
4024 /**
4025  * mgmt_rx_reo_sim_frame_handler_fw() - Management frame handler at the fw layer
4026  * @arg: Argument
4027  *
4028  * This API handles the management frame at the fw layer. This is applicable
4029  * for simulation alone.
4030  *
4031  * Return: none
4032  */
4033 static void
4034 mgmt_rx_reo_sim_frame_handler_fw(void *arg)
4035 {
4036 	struct mgmt_rx_frame_mac_hw *frame_hw =
4037 					(struct mgmt_rx_frame_mac_hw *)arg;
4038 	uint32_t mac_hw_to_fw_delay_us;
4039 	bool is_consumed_by_fw;
4040 	struct  mgmt_rx_frame_fw *frame_fw;
4041 	int8_t link_id = -1;
4042 	QDF_STATUS status;
4043 	struct mgmt_rx_reo_sim_context *sim_context;
4044 	enum mgmt_rx_reo_shared_snapshot_id snapshot_id;
4045 	struct mgmt_rx_reo_shared_snapshot snapshot_value;
4046 	bool ret;
4047 
4048 	if (!frame_hw) {
4049 		mgmt_rx_reo_err("FW-%d : Pointer to HW frame struct is null",
4050 				link_id);
4051 		qdf_assert_always(0);
4052 	}
4053 
4054 	link_id = frame_hw->params.link_id;
4055 
4056 	sim_context = frame_hw->sim_context;
4057 	if (!sim_context) {
4058 		mgmt_rx_reo_err("FW-%d : Mgmt rx reo simulation context null",
4059 				link_id);
4060 		goto error_free_mac_hw_frame;
4061 	}
4062 
4063 	mac_hw_to_fw_delay_us = MGMT_RX_REO_SIM_DELAY_MAC_HW_TO_FW_MIN +
4064 			mgmt_rx_reo_sim_get_random_unsigned_int(
4065 			MGMT_RX_REO_SIM_DELAY_MAC_HW_TO_FW_MIN_MAX_DELTA);
4066 	mgmt_rx_reo_sim_sleep(mac_hw_to_fw_delay_us);
4067 
4068 	is_consumed_by_fw = mgmt_rx_reo_sim_get_random_bool(
4069 			    MGMT_RX_REO_SIM_PERCENTAGE_FW_CONSUMED_FRAMES);
4070 
4071 	if (is_consumed_by_fw) {
4072 		/**
4073 		 * This frame should be present in pending/stale list of the
4074 		 * master frame list. FW consumed frames need not be reordered
4075 		 * by reorder algorithm. It is just used for book
4076 		 * keeping purposes. Hence remove it from the master list.
4077 		 */
4078 		status = mgmt_rx_reo_sim_remove_frame_from_master_list(
4079 					&sim_context->master_frame_list,
4080 					&frame_hw->params);
4081 
4082 		if (QDF_IS_STATUS_ERROR(status)) {
4083 			mgmt_rx_reo_err("FW-%d : Failed to remove FW consumed frame",
4084 					link_id);
4085 			qdf_assert_always(0);
4086 		}
4087 	}
4088 
4089 	mgmt_rx_reo_debug("FW-%d : Processing frame with ts = %u, ctr = %u, consume = %u",
4090 			  link_id, frame_hw->params.global_timestamp,
4091 			  frame_hw->params.mgmt_pkt_ctr, is_consumed_by_fw);
4092 
4093 	frame_fw = qdf_mem_malloc(sizeof(*frame_fw));
4094 	if (!frame_fw) {
4095 		mgmt_rx_reo_err("FW-%d : Failed to allocate FW mgmt frame",
4096 				link_id);
4097 		goto error_free_mac_hw_frame;
4098 	}
4099 
4100 	frame_fw->params = frame_hw->params;
4101 	frame_fw->is_consumed_by_fw = is_consumed_by_fw;
4102 	frame_fw->sim_context = frame_hw->sim_context;
4103 
4104 	snapshot_id = is_consumed_by_fw ?
4105 		      MGMT_RX_REO_SHARED_SNAPSHOT_FW_CONSUMED :
4106 		      MGMT_RX_REO_SHARED_SNAPSHOT_FW_FORWADED;
4107 
4108 	snapshot_value = mgmt_rx_reo_sim_get_snapshot_value(
4109 					frame_hw->params.global_timestamp,
4110 					frame_hw->params.mgmt_pkt_ctr);
4111 
4112 	status = mgmt_rx_reo_sim_write_snapshot(link_id, snapshot_id,
4113 						snapshot_value);
4114 
4115 	if (QDF_IS_STATUS_ERROR(status)) {
4116 		mgmt_rx_reo_err("FW-%d : Failed to write snapshot %d",
4117 				link_id, snapshot_id);
4118 		goto error_free_fw_frame;
4119 	}
4120 
4121 	status = qdf_create_work(NULL, &frame_fw->frame_handler_host,
4122 				 mgmt_rx_reo_sim_frame_handler_host, frame_fw);
4123 	if (QDF_IS_STATUS_ERROR(status)) {
4124 		mgmt_rx_reo_err("FW-%d : Failed to create work", link_id);
4125 		goto error_free_fw_frame;
4126 	}
4127 
4128 	ret = qdf_queue_work(
4129 			NULL, sim_context->host_mgmt_frame_handler[link_id],
4130 			&frame_fw->frame_handler_host);
4131 	if (!ret) {
4132 		mgmt_rx_reo_err("FW-%d : Work is already present on the queue",
4133 				link_id);
4134 		goto error_free_fw_frame;
4135 	}
4136 
4137 	qdf_mem_free(frame_hw);
4138 
4139 	return;
4140 
4141 error_free_fw_frame:
4142 	qdf_mem_free(frame_fw);
4143 error_free_mac_hw_frame:
4144 	qdf_mem_free(frame_hw);
4145 
4146 	mgmt_rx_reo_err("FW-%d : Exiting fw frame handler due to error",
4147 			link_id);
4148 }
4149 
4150 /**
4151  * mgmt_rx_reo_sim_get_link_id() - Helper API to get the link id value
4152  * from the index to the valid link list
4153  * @valid_link_list_index: Index to list of valid links
4154  *
4155  * Return: link id
4156  */
4157 static int8_t
4158 mgmt_rx_reo_sim_get_link_id(uint8_t valid_link_list_index)
4159 {
4160 	struct mgmt_rx_reo_sim_context *sim_context;
4161 
4162 	if (valid_link_list_index >= MAX_MLO_LINKS) {
4163 		mgmt_rx_reo_err("Invalid index %u to valid link list",
4164 				valid_link_list_index);
4165 		return MGMT_RX_REO_INVALID_LINK_ID;
4166 	}
4167 
4168 	sim_context = mgmt_rx_reo_sim_get_context();
4169 	if (!sim_context) {
4170 		mgmt_rx_reo_err("Mgmt reo simulation context is null");
4171 		return MGMT_RX_REO_INVALID_LINK_ID;
4172 	}
4173 
4174 	return sim_context->link_id_to_pdev_map.valid_link_list
4175 						[valid_link_list_index];
4176 }
4177 
4178 /**
4179  * mgmt_rx_reo_sim_receive_from_air() - Simulate management frame reception from
4180  * the air
4181  * @mac_hw: pointer to structure representing MAC HW
4182  * @num_mlo_links: number of MLO HW links
4183  * @frame: pointer to management frame parameters
4184  *
4185  * This API simulates the management frame reception from air.
4186  *
4187  * Return: QDF_STATUS
4188  */
4189 static QDF_STATUS
4190 mgmt_rx_reo_sim_receive_from_air(struct mgmt_rx_reo_sim_mac_hw *mac_hw,
4191 				 uint8_t num_mlo_links,
4192 				 struct mgmt_rx_frame_params *frame)
4193 {
4194 	uint8_t valid_link_list_index;
4195 	QDF_STATUS status;
4196 	int8_t link_id;
4197 
4198 	if (!mac_hw) {
4199 		mgmt_rx_reo_err("pointer to MAC HW struct is null");
4200 		return QDF_STATUS_E_NULL_VALUE;
4201 	}
4202 
4203 	if (num_mlo_links == 0 || num_mlo_links > MAX_MLO_LINKS) {
4204 		mgmt_rx_reo_err("Invalid number of MLO links %u",
4205 				num_mlo_links);
4206 		return QDF_STATUS_E_INVAL;
4207 	}
4208 
4209 	if (!frame) {
4210 		mgmt_rx_reo_err("pointer to frame parameters is null");
4211 		return QDF_STATUS_E_NULL_VALUE;
4212 	}
4213 
4214 	valid_link_list_index = mgmt_rx_reo_sim_get_random_unsigned_int(
4215 							num_mlo_links);
4216 	link_id = mgmt_rx_reo_sim_get_link_id(valid_link_list_index);
4217 	qdf_assert_always(link_id >= 0);
4218 	qdf_assert_always(link_id < MAX_MLO_LINKS);
4219 
4220 	frame->global_timestamp = div_u64(ktime_get_ns(), NSEC_PER_USEC);
4221 	frame->mgmt_pkt_ctr = ++mac_hw->mgmt_pkt_ctr[link_id];
4222 	frame->link_id = link_id;
4223 
4224 	return QDF_STATUS_SUCCESS;
4225 }
4226 
4227 /**
4228  * mgmt_rx_reo_sim_undo_receive_from_air() - API to restore the state of MAC
4229  * HW in case of any Rx error.
4230  * @mac_hw: pointer to structure representing MAC HW
4231  * @frame: pointer to management frame parameters
4232  *
4233  * Return: QDF_STATUS
4234  */
4235 static QDF_STATUS
4236 mgmt_rx_reo_sim_undo_receive_from_air(struct mgmt_rx_reo_sim_mac_hw *mac_hw,
4237 				      struct mgmt_rx_frame_params *frame)
4238 {
4239 	if (!mac_hw) {
4240 		mgmt_rx_reo_err("pointer to MAC HW struct is null");
4241 		return QDF_STATUS_E_NULL_VALUE;
4242 	}
4243 
4244 	if (!frame) {
4245 		mgmt_rx_reo_err("pointer to frame parameters is null");
4246 		return QDF_STATUS_E_NULL_VALUE;
4247 	}
4248 
4249 	if (frame->link_id >= MAX_MLO_LINKS) {
4250 		mgmt_rx_reo_err("Invalid link id %u", frame->link_id);
4251 		return QDF_STATUS_E_INVAL;
4252 	}
4253 
4254 	--mac_hw->mgmt_pkt_ctr[frame->link_id];
4255 
4256 	return QDF_STATUS_SUCCESS;
4257 }
4258 
4259 /**
4260  * mgmt_rx_reo_sim_mac_hw_thread() - kthread to simulate MAC HW
4261  * @data: pointer to data input
4262  *
4263  * kthread handler to simulate MAC HW.
4264  *
4265  * Return: 0 for success, else failure
4266  */
4267 static int
4268 mgmt_rx_reo_sim_mac_hw_thread(void *data)
4269 {
4270 	struct mgmt_rx_reo_sim_context *sim_context = data;
4271 	struct mgmt_rx_reo_sim_mac_hw *mac_hw;
4272 
4273 	if (!sim_context) {
4274 		mgmt_rx_reo_err("HW: Mgmt rx reo simulation context is null");
4275 		return -EINVAL;
4276 	}
4277 
4278 	mac_hw = &sim_context->mac_hw_sim.mac_hw_info;
4279 
4280 	while (!qdf_thread_should_stop()) {
4281 		uint32_t inter_frame_delay_us;
4282 		struct mgmt_rx_frame_params frame;
4283 		struct mgmt_rx_frame_mac_hw *frame_mac_hw;
4284 		int8_t link_id = -1;
4285 		QDF_STATUS status;
4286 		enum mgmt_rx_reo_shared_snapshot_id snapshot_id;
4287 		struct mgmt_rx_reo_shared_snapshot snapshot_value;
4288 		int8_t num_mlo_links;
4289 		bool ret;
4290 
4291 		num_mlo_links = mgmt_rx_reo_sim_get_num_mlo_links(sim_context);
4292 		if (num_mlo_links < 0 ||
4293 		    num_mlo_links > MAX_MLO_LINKS) {
4294 			mgmt_rx_reo_err("Invalid number of MLO links %d",
4295 					num_mlo_links);
4296 			qdf_assert_always(0);
4297 		}
4298 
4299 		status = mgmt_rx_reo_sim_receive_from_air(mac_hw, num_mlo_links,
4300 							  &frame);
4301 		if (QDF_IS_STATUS_ERROR(status)) {
4302 			mgmt_rx_reo_err("Receive from the air failed");
4303 			/**
4304 			 * Frame reception failed and we are not sure about the
4305 			 * link id. Without link id there is no way to restore
4306 			 * the mac hw state. Hence assert unconditionally.
4307 			 */
4308 			qdf_assert_always(0);
4309 		}
4310 		link_id = frame.link_id;
4311 
4312 		mgmt_rx_reo_debug("HW-%d: received frame with ts = %u, ctr = %u",
4313 				  link_id, frame.global_timestamp,
4314 				  frame.mgmt_pkt_ctr);
4315 
4316 		frame_mac_hw = qdf_mem_malloc(sizeof(*frame_mac_hw));
4317 		if (!frame_mac_hw) {
4318 			mgmt_rx_reo_err("HW-%d: Failed to alloc mac hw frame",
4319 					link_id);
4320 
4321 			/* Cleanup */
4322 			status = mgmt_rx_reo_sim_undo_receive_from_air(
4323 								mac_hw, &frame);
4324 			qdf_assert_always(QDF_IS_STATUS_SUCCESS(status));
4325 
4326 			continue;
4327 		}
4328 
4329 		frame_mac_hw->params = frame;
4330 		frame_mac_hw->sim_context = sim_context;
4331 
4332 		status = mgmt_rx_reo_sim_add_frame_to_pending_list(
4333 				&sim_context->master_frame_list, &frame);
4334 		if (QDF_IS_STATUS_ERROR(status)) {
4335 			mgmt_rx_reo_err("HW-%d: Failed to add frame to list",
4336 					link_id);
4337 
4338 			/* Cleanup */
4339 			status = mgmt_rx_reo_sim_undo_receive_from_air(
4340 								mac_hw, &frame);
4341 			qdf_assert_always(QDF_IS_STATUS_SUCCESS(status));
4342 
4343 			qdf_mem_free(frame_mac_hw);
4344 
4345 			continue;
4346 		}
4347 
4348 		snapshot_id = MGMT_RX_REO_SHARED_SNAPSHOT_MAC_HW;
4349 		snapshot_value = mgmt_rx_reo_sim_get_snapshot_value(
4350 						frame.global_timestamp,
4351 						frame.mgmt_pkt_ctr);
4352 
4353 		status = mgmt_rx_reo_sim_write_snapshot(link_id, snapshot_id,
4354 							snapshot_value);
4355 		if (QDF_IS_STATUS_ERROR(status)) {
4356 			mgmt_rx_reo_err("HW-%d : Failed to write snapshot %d",
4357 					link_id, snapshot_id);
4358 
4359 			/* Cleanup */
4360 			status = mgmt_rx_reo_sim_remove_frame_from_pending_list(
4361 				&sim_context->master_frame_list, &frame);
4362 			qdf_assert_always(QDF_IS_STATUS_SUCCESS(status));
4363 
4364 			status = mgmt_rx_reo_sim_undo_receive_from_air(
4365 								mac_hw, &frame);
4366 			qdf_assert_always(QDF_IS_STATUS_SUCCESS(status));
4367 
4368 			qdf_mem_free(frame_mac_hw);
4369 
4370 			continue;
4371 		}
4372 
4373 		status = qdf_create_work(NULL, &frame_mac_hw->frame_handler_fw,
4374 					 mgmt_rx_reo_sim_frame_handler_fw,
4375 					 frame_mac_hw);
4376 		if (QDF_IS_STATUS_ERROR(status)) {
4377 			mgmt_rx_reo_err("HW-%d : Failed to create work",
4378 					link_id);
4379 			qdf_assert_always(0);
4380 		}
4381 
4382 		ret = qdf_queue_work(
4383 			NULL, sim_context->fw_mgmt_frame_handler[link_id],
4384 			&frame_mac_hw->frame_handler_fw);
4385 		if (!ret) {
4386 			mgmt_rx_reo_err("HW-%d : Work is already present in Q",
4387 					link_id);
4388 			qdf_assert_always(0);
4389 		}
4390 
4391 		inter_frame_delay_us = MGMT_RX_REO_SIM_INTER_FRAME_DELAY_MIN +
4392 			mgmt_rx_reo_sim_get_random_unsigned_int(
4393 			MGMT_RX_REO_SIM_INTER_FRAME_DELAY_MIN_MAX_DELTA);
4394 
4395 		mgmt_rx_reo_sim_sleep(inter_frame_delay_us);
4396 	}
4397 
4398 	return 0;
4399 }
4400 
4401 /**
4402  * mgmt_rx_reo_sim_init_master_frame_list() - Initializes the master
4403  * management frame list
4404  * @pending_frame_list: Pointer to master frame list
4405  *
4406  * This API initializes the master management frame list
4407  *
4408  * Return: QDF_STATUS
4409  */
4410 static QDF_STATUS
4411 mgmt_rx_reo_sim_init_master_frame_list(
4412 		struct mgmt_rx_reo_master_frame_list *master_frame_list)
4413 {
4414 	qdf_spinlock_create(&master_frame_list->lock);
4415 
4416 	qdf_list_create(&master_frame_list->pending_list,
4417 			MGMT_RX_REO_SIM_PENDING_FRAME_LIST_MAX_SIZE);
4418 	qdf_list_create(&master_frame_list->stale_list,
4419 			MGMT_RX_REO_SIM_STALE_FRAME_LIST_MAX_SIZE);
4420 
4421 	return QDF_STATUS_SUCCESS;
4422 }
4423 
4424 /**
4425  * mgmt_rx_reo_sim_deinit_master_frame_list() - De initializes the master
4426  * management frame list
4427  * @master_frame_list: Pointer to master frame list
4428  *
4429  * This API de initializes the master management frame list
4430  *
4431  * Return: QDF_STATUS
4432  */
4433 static QDF_STATUS
4434 mgmt_rx_reo_sim_deinit_master_frame_list(
4435 		struct mgmt_rx_reo_master_frame_list *master_frame_list)
4436 {
4437 	qdf_spin_lock(&master_frame_list->lock);
4438 	qdf_list_destroy(&master_frame_list->stale_list);
4439 	qdf_list_destroy(&master_frame_list->pending_list);
4440 	qdf_spin_unlock(&master_frame_list->lock);
4441 
4442 	qdf_spinlock_destroy(&master_frame_list->lock);
4443 
4444 	return QDF_STATUS_SUCCESS;
4445 }
4446 
4447 /**
4448  * mgmt_rx_reo_sim_generate_unique_link_id() - Helper API to generate
4449  * unique link id values
4450  * @link_id_to_pdev_map: pointer to link id to pdev map
4451  * @link_id: Pointer to unique link id
4452  *
4453  * This API generates unique link id values for each pdev. This API should be
4454  * called after acquiring the spin lock protecting link id to pdev map.
4455  *
4456  * Return: QDF_STATUS
4457  */
4458 static QDF_STATUS
4459 mgmt_rx_reo_sim_generate_unique_link_id(
4460 		struct wlan_objmgr_pdev *link_id_to_pdev_map, uint8_t *link_id)
4461 {
4462 	uint8_t random_link_id;
4463 	uint8_t link_id;
4464 
4465 	if (!link_id_to_pdev_map || !link_id)
4466 		return QDF_STATUS_E_NULL_VALUE;
4467 
4468 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++)
4469 		if (!link_id_to_pdev_map[link_id])
4470 			break;
4471 
4472 	if (link_id == MAX_MLO_LINKS) {
4473 		mgmt_rx_reo_err("All link ids are already allocated");
4474 		return QDF_STATUS_E_FAILURE;
4475 	}
4476 
4477 	while (1) {
4478 		random_link_id = mgmt_rx_reo_sim_get_random_unsigned_int(
4479 							MAX_MLO_LINKS);
4480 
4481 		if (!link_id_to_pdev_map[random_link_id])
4482 			break;
4483 	}
4484 
4485 	*link_id = random_link_id;
4486 
4487 	return QDF_STATUS_SUCCESS;
4488 }
4489 
4490 /**
4491  * mgmt_rx_reo_sim_insert_into_link_id_to_pdev_map() - Builds the MLO HW link id
4492  * to pdev map
4493  * @link_id_to_pdev_map: pointer to link id to pdev map
4494  * @pdev: pointer to pdev object
4495  *
4496  * This API incrementally builds the MLO HW link id to pdev map. This API is
4497  * used only for simulation.
4498  *
4499  * Return: QDF_STATUS
4500  */
4501 static QDF_STATUS
4502 mgmt_rx_reo_sim_insert_into_link_id_to_pdev_map(
4503 		struct mgmt_rx_reo_sim_link_id_to_pdev_map *link_id_to_pdev_map,
4504 		struct wlan_objmgr_pdev *pdev)
4505 {
4506 	uint8_t link_id;
4507 
4508 	if (!link_id_to_pdev_map) {
4509 		mgmt_rx_reo_err("Link id to pdev map is null");
4510 		return QDF_STATUS_E_NULL_VALUE;
4511 	}
4512 
4513 	if (!pdev) {
4514 		mgmt_rx_reo_err("pdev is null");
4515 		return QDF_STATUS_E_NULL_VALUE;
4516 	}
4517 
4518 	qdf_spin_lock(&link_id_to_pdev_map->lock);
4519 
4520 	status = mgmt_rx_reo_sim_generate_unique_link_id(
4521 					link_id_to_pdev_map->map, &link_id)
4522 	if (QDF_IS_STATUS_ERROR(status)) {
4523 		qdf_spin_unlock(&link_id_to_pdev_map->lock);
4524 		return QDF_STATUS_E_FAILURE;
4525 	}
4526 	qdf_assert_always(link_id < MAX_MLO_LINKS);
4527 
4528 	link_id_to_pdev_map->map[link_id] = pdev;
4529 	link_id_to_pdev_map->valid_link_list
4530 			[link_id_to_pdev_map->num_mlo_links] = link_id;
4531 	link_id_to_pdev_map->num_mlo_links++;
4532 
4533 	qdf_spin_unlock(&link_id_to_pdev_map->lock);
4534 
4535 	return QDF_STATUS_SUCCESS;
4536 }
4537 
4538 /**
4539  * mgmt_rx_reo_sim_remove_from_link_id_to_pdev_map() - Destroys the MLO HW link
4540  * id to pdev map
4541  * @link_id_to_pdev_map: pointer to link id to pdev map
4542  * @pdev: pointer to pdev object
4543  *
4544  * This API incrementally destroys the MLO HW link id to pdev map. This API is
4545  * used only for simulation.
4546  *
4547  * Return: QDF_STATUS
4548  */
4549 static QDF_STATUS
4550 mgmt_rx_reo_sim_remove_from_link_id_to_pdev_map(
4551 		struct mgmt_rx_reo_sim_link_id_to_pdev_map *link_id_to_pdev_map,
4552 		struct wlan_objmgr_pdev *pdev)
4553 {
4554 	uint8_t link_id;
4555 
4556 	if (!link_id_to_pdev_map) {
4557 		mgmt_rx_reo_err("Link id to pdev map is null");
4558 		return QDF_STATUS_E_NULL_VALUE;
4559 	}
4560 
4561 	if (!pdev) {
4562 		mgmt_rx_reo_err("pdev is null");
4563 		return QDF_STATUS_E_NULL_VALUE;
4564 	}
4565 
4566 	qdf_spin_lock(&link_id_to_pdev_map->lock);
4567 
4568 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
4569 		if (link_id_to_pdev_map->map[link_id] == pdev) {
4570 			link_id_to_pdev_map->map[link_id] = NULL;
4571 			qdf_spin_unlock(&link_id_to_pdev_map->lock);
4572 
4573 			return QDF_STATUS_SUCCESS;
4574 		}
4575 	}
4576 
4577 	qdf_spin_unlock(&link_id_to_pdev_map->lock);
4578 
4579 	mgmt_rx_reo_err("Pdev %pK is not found in map", pdev);
4580 
4581 	return QDF_STATUS_E_FAILURE;
4582 }
4583 
4584 QDF_STATUS
4585 mgmt_rx_reo_sim_pdev_object_create_notification(struct wlan_objmgr_pdev *pdev)
4586 {
4587 	struct mgmt_rx_reo_sim_context *sim_context;
4588 	QDF_STATUS status;
4589 
4590 	sim_context = mgmt_rx_reo_sim_get_context();
4591 	if (!sim_context) {
4592 		mgmt_rx_reo_err("Mgmt simulation context is null");
4593 		return QDF_STATUS_E_NULL_VALUE;
4594 	}
4595 
4596 	status = mgmt_rx_reo_sim_insert_into_link_id_to_pdev_map(
4597 				&sim_context->link_id_to_pdev_map, pdev);
4598 
4599 	if (QDF_IS_STATUS_ERROR(status)) {
4600 		mgmt_rx_reo_err("Failed to add pdev to the map %pK", pdev);
4601 		return status;
4602 	}
4603 
4604 	return QDF_STATUS_SUCCESS;
4605 }
4606 
4607 QDF_STATUS
4608 mgmt_rx_reo_sim_pdev_object_destroy_notification(struct wlan_objmgr_pdev *pdev)
4609 {
4610 	struct mgmt_rx_reo_sim_context *sim_context;
4611 	QDF_STATUS status;
4612 
4613 	sim_context = mgmt_rx_reo_sim_get_context();
4614 	if (!sim_context) {
4615 		mgmt_rx_reo_err("Mgmt simulation context is null");
4616 		return QDF_STATUS_E_NULL_VALUE;
4617 	}
4618 
4619 	status = mgmt_rx_reo_sim_remove_from_link_id_to_pdev_map(
4620 				&sim_context->link_id_to_pdev_map, pdev);
4621 
4622 	if (QDF_IS_STATUS_ERROR(status)) {
4623 		mgmt_rx_reo_err("Failed to remove pdev from the map");
4624 		return status;
4625 	}
4626 
4627 	return QDF_STATUS_SUCCESS;
4628 }
4629 
4630 QDF_STATUS
4631 mgmt_rx_reo_sim_start(void)
4632 {
4633 	struct mgmt_rx_reo_context *reo_context;
4634 	struct mgmt_rx_reo_sim_context *sim_context;
4635 	qdf_thread_t *mac_hw_thread;
4636 	uint8_t link_id;
4637 	uint8_t id;
4638 	QDF_STATUS status;
4639 
4640 	reo_context = mgmt_rx_reo_get_context();
4641 	if (!reo_context) {
4642 		mgmt_rx_reo_err("reo context is null");
4643 		return QDF_STATUS_E_NULL_VALUE;
4644 	}
4645 
4646 	reo_context->simulation_in_progress = true;
4647 
4648 	sim_context = &reo_context->sim_context;
4649 
4650 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
4651 		struct workqueue_struct *wq;
4652 
4653 		wq = alloc_ordered_workqueue("mgmt_rx_reo_sim_host-%u", 0,
4654 					     link_id);
4655 		if (!wq) {
4656 			mgmt_rx_reo_err("Host workqueue creation failed");
4657 			status = QDF_STATUS_E_FAILURE;
4658 			goto error_destroy_fw_and_host_work_queues_till_last_link;
4659 		}
4660 		sim_context->host_mgmt_frame_handler[link_id] = wq;
4661 
4662 		wq = alloc_ordered_workqueue("mgmt_rx_reo_sim_fw-%u", 0,
4663 					     link_id);
4664 		if (!wq) {
4665 			mgmt_rx_reo_err("FW workqueue creation failed");
4666 			status = QDF_STATUS_E_FAILURE;
4667 			goto error_destroy_host_work_queue_of_last_link;
4668 		}
4669 		sim_context->fw_mgmt_frame_handler[link_id] = wq;
4670 	}
4671 
4672 	mac_hw_thread = qdf_create_thread(mgmt_rx_reo_sim_mac_hw_thread,
4673 					  sim_context, "MAC_HW_thread");
4674 	if (!mac_hw_thread) {
4675 		mgmt_rx_reo_err("MAC HW thread creation failed");
4676 		status = QDF_STATUS_E_FAILURE;
4677 		goto error_destroy_fw_and_host_work_queues_of_last_link;
4678 	}
4679 
4680 	sim_context->mac_hw_sim.mac_hw_thread = mac_hw_thread;
4681 
4682 	qdf_wake_up_process(sim_context->mac_hw_sim.mac_hw_thread);
4683 
4684 	return QDF_STATUS_SUCCESS;
4685 
4686 error_destroy_fw_and_host_work_queues_of_last_link:
4687 	drain_workqueue(sim_context->fw_mgmt_frame_handler[link_id]);
4688 	destroy_workqueue(sim_context->fw_mgmt_frame_handler[link_id]);
4689 
4690 error_destroy_host_work_queue_of_last_link:
4691 	drain_workqueue(sim_context->host_mgmt_frame_handler[link_id]);
4692 	destroy_workqueue(sim_context->host_mgmt_frame_handler[link_id]);
4693 
4694 error_destroy_fw_and_host_work_queues_till_last_link:
4695 	for (id = 0; id < link_id; id++) {
4696 		drain_workqueue(sim_context->fw_mgmt_frame_handler[id]);
4697 		destroy_workqueue(sim_context->fw_mgmt_frame_handler[id]);
4698 
4699 		drain_workqueue(sim_context->host_mgmt_frame_handler[id]);
4700 		destroy_workqueue(sim_context->host_mgmt_frame_handler[id]);
4701 	}
4702 
4703 	return status;
4704 }
4705 
4706 QDF_STATUS
4707 mgmt_rx_reo_sim_stop(void)
4708 {
4709 	struct mgmt_rx_reo_context *reo_context;
4710 	struct mgmt_rx_reo_sim_context *sim_context;
4711 	struct mgmt_rx_reo_master_frame_list *master_frame_list;
4712 	uint8_t link_id;
4713 	QDF_STATUS status;
4714 
4715 	reo_context = mgmt_rx_reo_get_context();
4716 	if (!reo_context) {
4717 		mgmt_rx_reo_err("reo context is null");
4718 		return QDF_STATUS_E_NULL_VALUE;
4719 	}
4720 
4721 	sim_context = &reo_context->sim_context;
4722 
4723 	status = qdf_thread_join(sim_context->mac_hw_sim.mac_hw_thread);
4724 	if (QDF_IS_STATUS_ERROR(status)) {
4725 		mgmt_rx_reo_err("Failed to stop the thread");
4726 		return status;
4727 	}
4728 
4729 	sim_context->mac_hw_sim.mac_hw_thread = NULL;
4730 
4731 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
4732 		/* Wait for all the pending frames to be processed by FW */
4733 		drain_workqueue(sim_context->fw_mgmt_frame_handler[link_id]);
4734 		destroy_workqueue(sim_context->fw_mgmt_frame_handler[link_id]);
4735 
4736 		/* Wait for all the pending frames to be processed by host */
4737 		drain_workqueue(sim_context->host_mgmt_frame_handler[link_id]);
4738 		destroy_workqueue(
4739 				sim_context->host_mgmt_frame_handler[link_id]);
4740 	}
4741 
4742 	status = mgmt_rx_reo_print_ingress_frame_debug_info();
4743 	if (QDF_IS_STATUS_ERROR(status)) {
4744 		mgmt_rx_reo_err("Failed to print ingress frame debug info");
4745 		return status;
4746 	}
4747 
4748 	status = mgmt_rx_reo_print_egress_frame_debug_info();
4749 	if (QDF_IS_STATUS_ERROR(status)) {
4750 		mgmt_rx_reo_err("Failed to print egress frame debug info");
4751 		return status;
4752 	}
4753 
4754 	master_frame_list = &sim_context->master_frame_list;
4755 	if (!qdf_list_empty(&master_frame_list->pending_list) ||
4756 	    !qdf_list_empty(&master_frame_list->stale_list)) {
4757 		mgmt_rx_reo_err("reo sim failure: pending/stale frame list non empty");
4758 
4759 		status = mgmt_rx_reo_list_display(&reo_context->reo_list);
4760 		if (QDF_IS_STATUS_ERROR(status)) {
4761 			mgmt_rx_reo_err("Failed to print reorder list");
4762 			return status;
4763 		}
4764 
4765 		qdf_assert_always(0);
4766 	} else {
4767 		mgmt_rx_reo_err("reo sim passed");
4768 	}
4769 
4770 	reo_context->simulation_in_progress = false;
4771 
4772 	return QDF_STATUS_SUCCESS;
4773 }
4774 
4775 /**
4776  * mgmt_rx_reo_sim_init() - Initialize management rx reorder simulation
4777  * context.
4778  * @reo_context: Pointer to reo context
4779  *
4780  * Return: QDF_STATUS of operation
4781  */
4782 static QDF_STATUS
4783 mgmt_rx_reo_sim_init(struct mgmt_rx_reo_context *reo_context)
4784 {
4785 	QDF_STATUS status;
4786 	struct mgmt_rx_reo_sim_context *sim_context;
4787 	uint8_t link_id;
4788 
4789 	if (!reo_context) {
4790 		mgmt_rx_reo_err("reo context is null");
4791 		return QDF_STATUS_E_NULL_VALUE;
4792 	}
4793 
4794 	sim_context = &reo_context->sim_context;
4795 
4796 	qdf_mem_zero(sim_context, sizeof(*sim_context));
4797 
4798 	status = mgmt_rx_reo_sim_init_master_frame_list(
4799 					&sim_context->master_frame_list);
4800 	if (QDF_IS_STATUS_ERROR(status)) {
4801 		mgmt_rx_reo_err("Failed to create master mgmt frame list");
4802 		return status;
4803 	}
4804 
4805 	qdf_spinlock_create(&sim_context->link_id_to_pdev_map.lock);
4806 
4807 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++)
4808 		sim_context->link_id_to_pdev_map.valid_link_list[link_id] =
4809 					MGMT_RX_REO_INVALID_LINK_ID;
4810 
4811 	return QDF_STATUS_SUCCESS;
4812 }
4813 
4814 /**
4815  * mgmt_rx_reo_sim_deinit() - De initialize management rx reorder simulation
4816  * context.
4817  * @reo_context: Pointer to reo context
4818  *
4819  * Return: QDF_STATUS of operation
4820  */
4821 static QDF_STATUS
4822 mgmt_rx_reo_sim_deinit(struct mgmt_rx_reo_context *reo_context)
4823 {
4824 	QDF_STATUS status;
4825 	struct mgmt_rx_reo_sim_context *sim_context;
4826 
4827 	if (!reo_context) {
4828 		mgmt_rx_reo_err("reo context is null");
4829 		return QDF_STATUS_E_NULL_VALUE;
4830 	}
4831 
4832 	sim_context = &reo_context->sim_context;
4833 
4834 	qdf_spinlock_destroy(&sim_context->link_id_to_pdev_map.lock);
4835 
4836 	status = mgmt_rx_reo_sim_deinit_master_frame_list(
4837 					&sim_context->master_frame_list);
4838 	if (QDF_IS_STATUS_ERROR(status)) {
4839 		mgmt_rx_reo_err("Failed to destroy master frame list");
4840 		return status;
4841 	}
4842 
4843 	return QDF_STATUS_SUCCESS;
4844 }
4845 
4846 QDF_STATUS
4847 mgmt_rx_reo_sim_get_snapshot_address(
4848 			struct wlan_objmgr_pdev *pdev,
4849 			enum mgmt_rx_reo_shared_snapshot_id id,
4850 			struct mgmt_rx_reo_shared_snapshot **address)
4851 {
4852 	int8_t link_id;
4853 	struct mgmt_rx_reo_sim_context *sim_context;
4854 
4855 	sim_context = mgmt_rx_reo_sim_get_context();
4856 	if (!sim_context) {
4857 		mgmt_rx_reo_err("Mgmt reo simulation context is null");
4858 		return QDF_STATUS_E_NULL_VALUE;
4859 	}
4860 
4861 	if (!pdev) {
4862 		mgmt_rx_reo_err("pdev is NULL");
4863 		return QDF_STATUS_E_NULL_VALUE;
4864 	}
4865 
4866 	if (id < 0 || id >= MGMT_RX_REO_SHARED_SNAPSHOT_MAX) {
4867 		mgmt_rx_reo_err("Invalid snapshot ID %d", id);
4868 		return QDF_STATUS_E_INVAL;
4869 	}
4870 
4871 	if (!address) {
4872 		mgmt_rx_reo_err("Pointer to snapshot address is null");
4873 		return QDF_STATUS_E_NULL_VALUE;
4874 	}
4875 
4876 	link_id = wlan_get_mlo_link_id_from_pdev(pdev);
4877 	if (link_id < 0 || link_id >= MAX_MLO_LINKS) {
4878 		mgmt_rx_reo_err("Invalid link id %d for the pdev %pK", link_id,
4879 				pdev);
4880 		return QDF_STATUS_E_INVAL;
4881 	}
4882 
4883 	*address = &sim_context->snapshot[link_id][id];
4884 
4885 	return QDF_STATUS_SUCCESS;
4886 }
4887 #endif /* WLAN_MGMT_RX_REO_SIM_SUPPORT */
4888 
4889 /**
4890  * mgmt_rx_reo_flush_reorder_list() - Flush all entries in the reorder list
4891  * @reo_list: Pointer to reorder list
4892  *
4893  * API to flush all the entries of the reorder list. This API would acquire
4894  * the lock protecting the list.
4895  *
4896  * Return: QDF_STATUS
4897  */
4898 static QDF_STATUS
4899 mgmt_rx_reo_flush_reorder_list(struct mgmt_rx_reo_list *reo_list)
4900 {
4901 	struct mgmt_rx_reo_list_entry *cur_entry;
4902 	struct mgmt_rx_reo_list_entry *temp;
4903 
4904 	if (!reo_list) {
4905 		mgmt_rx_reo_err("reorder list is null");
4906 		return QDF_STATUS_E_NULL_VALUE;
4907 	}
4908 
4909 	qdf_spin_lock_bh(&reo_list->list_lock);
4910 
4911 	qdf_list_for_each_del(&reo_list->list, cur_entry, temp, node) {
4912 		free_mgmt_rx_event_params(cur_entry->rx_params);
4913 
4914 		/**
4915 		 * Release the reference taken when the entry is inserted into
4916 		 * the reorder list.
4917 		 */
4918 		wlan_objmgr_pdev_release_ref(cur_entry->pdev,
4919 					     WLAN_MGMT_RX_REO_ID);
4920 
4921 		qdf_mem_free(cur_entry);
4922 	}
4923 
4924 	qdf_spin_unlock_bh(&reo_list->list_lock);
4925 
4926 	return QDF_STATUS_SUCCESS;
4927 }
4928 
4929 /**
4930  * mgmt_rx_reo_list_deinit() - De initialize the management rx-reorder list
4931  * @reo_list: Pointer to reorder list
4932  *
4933  * API to de initialize the management rx-reorder list.
4934  *
4935  * Return: QDF_STATUS
4936  */
4937 static QDF_STATUS
4938 mgmt_rx_reo_list_deinit(struct mgmt_rx_reo_list *reo_list)
4939 {
4940 	QDF_STATUS status;
4941 
4942 	qdf_timer_free(&reo_list->global_mgmt_rx_inactivity_timer);
4943 	qdf_timer_free(&reo_list->ageout_timer);
4944 
4945 	status = mgmt_rx_reo_flush_reorder_list(reo_list);
4946 	if (QDF_IS_STATUS_ERROR(status)) {
4947 		mgmt_rx_reo_err("Failed to flush the reorder list");
4948 		return QDF_STATUS_E_FAILURE;
4949 	}
4950 	qdf_spinlock_destroy(&reo_list->list_lock);
4951 	qdf_list_destroy(&reo_list->list);
4952 
4953 	return QDF_STATUS_SUCCESS;
4954 }
4955 
4956 QDF_STATUS
4957 mgmt_rx_reo_deinit_context(void)
4958 {
4959 	QDF_STATUS status;
4960 	struct mgmt_rx_reo_context *reo_context;
4961 
4962 	reo_context = mgmt_rx_reo_get_context();
4963 	if (!reo_context) {
4964 		mgmt_rx_reo_err("reo context is null");
4965 		return QDF_STATUS_E_NULL_VALUE;
4966 	}
4967 
4968 	qdf_timer_sync_cancel(
4969 			&reo_context->reo_list.global_mgmt_rx_inactivity_timer);
4970 	qdf_timer_sync_cancel(&reo_context->reo_list.ageout_timer);
4971 
4972 	qdf_spinlock_destroy(&reo_context->reo_algo_entry_lock);
4973 
4974 	status = mgmt_rx_reo_sim_deinit(reo_context);
4975 	if (QDF_IS_STATUS_ERROR(status)) {
4976 		mgmt_rx_reo_err("Failed to de initialize reo sim context");
4977 		return QDF_STATUS_E_FAILURE;
4978 	}
4979 
4980 	status = mgmt_rx_reo_list_deinit(&reo_context->reo_list);
4981 	if (QDF_IS_STATUS_ERROR(status)) {
4982 		mgmt_rx_reo_err("Failed to de-initialize mgmt Rx reo list");
4983 		return status;
4984 	}
4985 
4986 	return QDF_STATUS_SUCCESS;
4987 }
4988 
4989 QDF_STATUS
4990 mgmt_rx_reo_init_context(void)
4991 {
4992 	QDF_STATUS status;
4993 	QDF_STATUS temp;
4994 	struct mgmt_rx_reo_context *reo_context;
4995 
4996 	reo_context = mgmt_rx_reo_get_context();
4997 	if (!reo_context) {
4998 		mgmt_rx_reo_err("reo context is null");
4999 		return QDF_STATUS_E_NULL_VALUE;
5000 	}
5001 	qdf_mem_zero(reo_context, sizeof(*reo_context));
5002 
5003 	status = mgmt_rx_reo_list_init(&reo_context->reo_list);
5004 	if (QDF_IS_STATUS_ERROR(status)) {
5005 		mgmt_rx_reo_err("Failed to initialize mgmt Rx reo list");
5006 		return status;
5007 	}
5008 
5009 	status = mgmt_rx_reo_sim_init(reo_context);
5010 	if (QDF_IS_STATUS_ERROR(status)) {
5011 		mgmt_rx_reo_err("Failed to initialize reo simulation context");
5012 		goto error_reo_list_deinit;
5013 	}
5014 
5015 	qdf_spinlock_create(&reo_context->reo_algo_entry_lock);
5016 
5017 	qdf_timer_mod(&reo_context->reo_list.ageout_timer,
5018 		      MGMT_RX_REO_AGEOUT_TIMER_PERIOD_MS);
5019 
5020 	qdf_mem_set(reo_context->ingress_frame_debug_info.boarder,
5021 		    MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_BOARDER_MAX_SIZE, '-');
5022 	qdf_mem_set(reo_context->egress_frame_debug_info.boarder,
5023 		    MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_BOARDER_MAX_SIZE, '-');
5024 
5025 	return QDF_STATUS_SUCCESS;
5026 
5027 error_reo_list_deinit:
5028 	temp = mgmt_rx_reo_list_deinit(&reo_context->reo_list);
5029 	if (QDF_IS_STATUS_ERROR(temp)) {
5030 		mgmt_rx_reo_err("Failed to de-initialize mgmt Rx reo list");
5031 		return temp;
5032 	}
5033 
5034 	return status;
5035 }
5036 
5037 /**
5038  * wlan_mgmt_rx_reo_initialize_snapshot_params() - Initialize a given snapshot
5039  * params object
5040  * @snapshot_params: Pointer to snapshot params object
5041  *
5042  * Return: void
5043  */
5044 static void
5045 wlan_mgmt_rx_reo_initialize_snapshot_params(
5046 			struct mgmt_rx_reo_snapshot_params *snapshot_params)
5047 {
5048 	snapshot_params->valid = false;
5049 	snapshot_params->mgmt_pkt_ctr = 0;
5050 	snapshot_params->global_timestamp = 0;
5051 }
5052 
5053 /**
5054  * mgmt_rx_reo_initialize_snapshot_address() - Initialize management Rx reorder
5055  * snapshot addresses for a given pdev
5056  * @pdev: pointer to pdev object
5057  *
5058  * Return: QDF_STATUS
5059  */
5060 static QDF_STATUS
5061 mgmt_rx_reo_initialize_snapshot_address(struct wlan_objmgr_pdev *pdev)
5062 {
5063 	enum mgmt_rx_reo_shared_snapshot_id snapshot_id;
5064 	struct mgmt_rx_reo_pdev_info *mgmt_rx_reo_pdev_ctx;
5065 	QDF_STATUS status;
5066 
5067 	mgmt_rx_reo_pdev_ctx = wlan_mgmt_rx_reo_get_priv_object(pdev);
5068 	if (!mgmt_rx_reo_pdev_ctx) {
5069 		mgmt_rx_reo_err("Mgmt Rx REO priv object is null");
5070 		return QDF_STATUS_E_NULL_VALUE;
5071 	}
5072 
5073 	snapshot_id = 0;
5074 
5075 	while (snapshot_id < MGMT_RX_REO_SHARED_SNAPSHOT_MAX) {
5076 		struct mgmt_rx_reo_snapshot_info *snapshot_info;
5077 
5078 		snapshot_info =
5079 			&mgmt_rx_reo_pdev_ctx->host_target_shared_snapshot_info
5080 			[snapshot_id];
5081 		status = wlan_mgmt_rx_reo_get_snapshot_info
5082 					(pdev, snapshot_id, snapshot_info);
5083 		if (QDF_IS_STATUS_ERROR(status)) {
5084 			mgmt_rx_reo_err("Get snapshot info failed, id = %u",
5085 					snapshot_id);
5086 			return status;
5087 		}
5088 
5089 		snapshot_id++;
5090 	}
5091 
5092 	return QDF_STATUS_SUCCESS;
5093 }
5094 
5095 /**
5096  * mgmt_rx_reo_initialize_snapshot_value() - Initialize management Rx reorder
5097  * snapshot values for a given pdev
5098  * @pdev: pointer to pdev object
5099  *
5100  * Return: QDF_STATUS
5101  */
5102 static QDF_STATUS
5103 mgmt_rx_reo_initialize_snapshot_value(struct wlan_objmgr_pdev *pdev)
5104 {
5105 	enum mgmt_rx_reo_shared_snapshot_id snapshot_id;
5106 	struct mgmt_rx_reo_pdev_info *mgmt_rx_reo_pdev_ctx;
5107 
5108 	mgmt_rx_reo_pdev_ctx = wlan_mgmt_rx_reo_get_priv_object(pdev);
5109 	if (!mgmt_rx_reo_pdev_ctx) {
5110 		mgmt_rx_reo_err("Mgmt Rx REO priv object is null");
5111 		return QDF_STATUS_E_NULL_VALUE;
5112 	}
5113 
5114 	snapshot_id = 0;
5115 	while (snapshot_id < MGMT_RX_REO_SHARED_SNAPSHOT_MAX) {
5116 		wlan_mgmt_rx_reo_initialize_snapshot_params
5117 			(&mgmt_rx_reo_pdev_ctx->last_valid_shared_snapshot
5118 			 [snapshot_id]);
5119 		snapshot_id++;
5120 	}
5121 
5122 	/* Initialize Host snapshot params */
5123 	wlan_mgmt_rx_reo_initialize_snapshot_params
5124 				(&mgmt_rx_reo_pdev_ctx->host_snapshot);
5125 
5126 	return QDF_STATUS_SUCCESS;
5127 }
5128 
5129 /**
5130  * mgmt_rx_reo_initialize_snapshots() - Initialize management Rx reorder
5131  * snapshot related data structures for a given pdev
5132  * @pdev: pointer to pdev object
5133  *
5134  * Return: QDF_STATUS
5135  */
5136 static QDF_STATUS
5137 mgmt_rx_reo_initialize_snapshots(struct wlan_objmgr_pdev *pdev)
5138 {
5139 	QDF_STATUS status;
5140 
5141 	status = mgmt_rx_reo_initialize_snapshot_value(pdev);
5142 	if (QDF_IS_STATUS_ERROR(status)) {
5143 		mgmt_rx_reo_err("Failed to initialize snapshot value");
5144 		return status;
5145 	}
5146 
5147 	status = mgmt_rx_reo_initialize_snapshot_address(pdev);
5148 	if (QDF_IS_STATUS_ERROR(status)) {
5149 		mgmt_rx_reo_err("Failed to initialize snapshot address");
5150 		return status;
5151 	}
5152 
5153 	return QDF_STATUS_SUCCESS;
5154 }
5155 
5156 /**
5157  * mgmt_rx_reo_clear_snapshots() - Clear management Rx reorder snapshot related
5158  * data structures for a given pdev
5159  * @pdev: pointer to pdev object
5160  *
5161  * Return: QDF_STATUS
5162  */
5163 static QDF_STATUS
5164 mgmt_rx_reo_clear_snapshots(struct wlan_objmgr_pdev *pdev)
5165 {
5166 	QDF_STATUS status;
5167 
5168 	status = mgmt_rx_reo_initialize_snapshot_value(pdev);
5169 	if (QDF_IS_STATUS_ERROR(status)) {
5170 		mgmt_rx_reo_err("Failed to initialize snapshot value");
5171 		return status;
5172 	}
5173 
5174 	return QDF_STATUS_SUCCESS;
5175 }
5176 
5177 QDF_STATUS
5178 mgmt_rx_reo_attach(struct wlan_objmgr_pdev *pdev)
5179 {
5180 	QDF_STATUS status;
5181 
5182 	if (!wlan_mgmt_rx_reo_is_feature_enabled_at_pdev(pdev))
5183 		return QDF_STATUS_SUCCESS;
5184 
5185 	status = mgmt_rx_reo_initialize_snapshots(pdev);
5186 	if (QDF_IS_STATUS_ERROR(status)) {
5187 		mgmt_rx_reo_err("Failed to initialize mgmt Rx REO snapshots");
5188 		return status;
5189 	}
5190 
5191 	return QDF_STATUS_SUCCESS;
5192 }
5193 
5194 QDF_STATUS
5195 mgmt_rx_reo_detach(struct wlan_objmgr_pdev *pdev)
5196 {
5197 	QDF_STATUS status;
5198 
5199 	if (!wlan_mgmt_rx_reo_is_feature_enabled_at_pdev(pdev))
5200 		return QDF_STATUS_SUCCESS;
5201 
5202 	status = mgmt_rx_reo_clear_snapshots(pdev);
5203 	if (QDF_IS_STATUS_ERROR(status)) {
5204 		mgmt_rx_reo_err("Failed to clear mgmt Rx REO snapshots");
5205 		return status;
5206 	}
5207 
5208 	return QDF_STATUS_SUCCESS;
5209 }
5210 
5211 QDF_STATUS
5212 mgmt_rx_reo_pdev_obj_create_notification(
5213 	struct wlan_objmgr_pdev *pdev,
5214 	struct mgmt_txrx_priv_pdev_context *mgmt_txrx_pdev_ctx)
5215 {
5216 	QDF_STATUS status;
5217 	struct mgmt_rx_reo_pdev_info *mgmt_rx_reo_pdev_ctx = NULL;
5218 
5219 	if (!pdev) {
5220 		mgmt_rx_reo_err("pdev is null");
5221 		status = QDF_STATUS_E_NULL_VALUE;
5222 		goto failure;
5223 	}
5224 
5225 	if (!wlan_mgmt_rx_reo_is_feature_enabled_at_pdev(pdev)) {
5226 		status = QDF_STATUS_SUCCESS;
5227 		goto failure;
5228 	}
5229 
5230 	status = mgmt_rx_reo_sim_pdev_object_create_notification(pdev);
5231 	if (QDF_IS_STATUS_ERROR(status)) {
5232 		mgmt_rx_reo_err("Failed to handle pdev create for reo sim");
5233 		goto failure;
5234 	}
5235 
5236 	mgmt_rx_reo_pdev_ctx = qdf_mem_malloc(sizeof(*mgmt_rx_reo_pdev_ctx));
5237 	if (!mgmt_rx_reo_pdev_ctx) {
5238 		mgmt_rx_reo_err("Allocation failure for REO pdev context");
5239 		status = QDF_STATUS_E_NOMEM;
5240 		goto failure;
5241 	}
5242 
5243 	mgmt_txrx_pdev_ctx->mgmt_rx_reo_pdev_ctx = mgmt_rx_reo_pdev_ctx;
5244 
5245 	return QDF_STATUS_SUCCESS;
5246 
5247 failure:
5248 	if (mgmt_rx_reo_pdev_ctx)
5249 		qdf_mem_free(mgmt_rx_reo_pdev_ctx);
5250 
5251 	mgmt_txrx_pdev_ctx->mgmt_rx_reo_pdev_ctx = NULL;
5252 
5253 	return status;
5254 }
5255 
5256 QDF_STATUS
5257 mgmt_rx_reo_pdev_obj_destroy_notification(
5258 	struct wlan_objmgr_pdev *pdev,
5259 	struct mgmt_txrx_priv_pdev_context *mgmt_txrx_pdev_ctx)
5260 {
5261 	QDF_STATUS status;
5262 
5263 	if (!wlan_mgmt_rx_reo_is_feature_enabled_at_pdev(pdev))
5264 		return QDF_STATUS_SUCCESS;
5265 
5266 	qdf_mem_free(mgmt_txrx_pdev_ctx->mgmt_rx_reo_pdev_ctx);
5267 	mgmt_txrx_pdev_ctx->mgmt_rx_reo_pdev_ctx = NULL;
5268 
5269 	status = mgmt_rx_reo_sim_pdev_object_destroy_notification(pdev);
5270 	if (QDF_IS_STATUS_ERROR(status)) {
5271 		mgmt_rx_reo_err("Failed to handle pdev create for reo sim");
5272 		return status;
5273 	}
5274 
5275 	return QDF_STATUS_SUCCESS;
5276 }
5277 
5278 bool
5279 mgmt_rx_reo_is_simulation_in_progress(void)
5280 {
5281 	struct mgmt_rx_reo_context *reo_context;
5282 
5283 	reo_context = mgmt_rx_reo_get_context();
5284 	if (!reo_context) {
5285 		mgmt_rx_reo_err("reo context is null");
5286 		return false;
5287 	}
5288 
5289 	return reo_context->simulation_in_progress;
5290 }
5291 
5292 #ifdef WLAN_MGMT_RX_REO_DEBUG_SUPPORT
5293 QDF_STATUS
5294 mgmt_rx_reo_print_ingress_frame_stats(void)
5295 {
5296 	struct mgmt_rx_reo_context *reo_context;
5297 	QDF_STATUS status;
5298 
5299 	reo_context = mgmt_rx_reo_get_context();
5300 	if (!reo_context) {
5301 		mgmt_rx_reo_err("reo context is null");
5302 		return QDF_STATUS_E_NULL_VALUE;
5303 	}
5304 
5305 	status = mgmt_rx_reo_debug_print_ingress_frame_stats(reo_context);
5306 	if (QDF_IS_STATUS_ERROR(status)) {
5307 		mgmt_rx_reo_err("Failed to print ingress frame stats");
5308 		return status;
5309 	}
5310 
5311 	return QDF_STATUS_SUCCESS;
5312 }
5313 
5314 QDF_STATUS
5315 mgmt_rx_reo_print_ingress_frame_info(uint16_t num_frames)
5316 {
5317 	struct mgmt_rx_reo_context *reo_context;
5318 	QDF_STATUS status;
5319 
5320 	reo_context = mgmt_rx_reo_get_context();
5321 	if (!reo_context) {
5322 		mgmt_rx_reo_err("reo context is null");
5323 		return QDF_STATUS_E_NULL_VALUE;
5324 	}
5325 
5326 	status = mgmt_rx_reo_debug_print_ingress_frame_info(reo_context,
5327 							    num_frames);
5328 	if (QDF_IS_STATUS_ERROR(status)) {
5329 		mgmt_rx_reo_err("Failed to print ingress frame info");
5330 		return status;
5331 	}
5332 
5333 	return QDF_STATUS_SUCCESS;
5334 }
5335 
5336 QDF_STATUS
5337 mgmt_rx_reo_print_egress_frame_stats(void)
5338 {
5339 	struct mgmt_rx_reo_context *reo_context;
5340 	QDF_STATUS status;
5341 
5342 	reo_context = mgmt_rx_reo_get_context();
5343 	if (!reo_context) {
5344 		mgmt_rx_reo_err("reo context is null");
5345 		return QDF_STATUS_E_NULL_VALUE;
5346 	}
5347 
5348 	status = mgmt_rx_reo_debug_print_egress_frame_stats(reo_context);
5349 	if (QDF_IS_STATUS_ERROR(status)) {
5350 		mgmt_rx_reo_err("Failed to print egress frame stats");
5351 		return status;
5352 	}
5353 
5354 	return QDF_STATUS_SUCCESS;
5355 }
5356 
5357 QDF_STATUS
5358 mgmt_rx_reo_print_egress_frame_info(uint16_t num_frames)
5359 {
5360 	struct mgmt_rx_reo_context *reo_context;
5361 	QDF_STATUS status;
5362 
5363 	reo_context = mgmt_rx_reo_get_context();
5364 	if (!reo_context) {
5365 		mgmt_rx_reo_err("reo context is null");
5366 		return QDF_STATUS_E_NULL_VALUE;
5367 	}
5368 
5369 	status = mgmt_rx_reo_debug_print_egress_frame_info(reo_context,
5370 							   num_frames);
5371 	if (QDF_IS_STATUS_ERROR(status)) {
5372 		mgmt_rx_reo_err("Failed to print egress frame info");
5373 		return status;
5374 	}
5375 
5376 	return QDF_STATUS_SUCCESS;
5377 }
5378 #else
5379 QDF_STATUS
5380 mgmt_rx_reo_print_ingress_frame_stats(void)
5381 {
5382 	return QDF_STATUS_SUCCESS;
5383 }
5384 
5385 QDF_STATUS
5386 mgmt_rx_reo_print_ingress_frame_info(uint16_t num_frames)
5387 {
5388 	return QDF_STATUS_SUCCESS;
5389 }
5390 
5391 QDF_STATUS
5392 mgmt_rx_reo_print_egress_frame_stats(void)
5393 {
5394 	return QDF_STATUS_SUCCESS;
5395 }
5396 
5397 QDF_STATUS
5398 mgmt_rx_reo_print_egress_frame_info(uint16_t num_frames)
5399 {
5400 	return QDF_STATUS_SUCCESS;
5401 }
5402 #endif /* WLAN_MGMT_RX_REO_DEBUG_SUPPORT */
5403