xref: /wlan-dirver/qca-wifi-host-cmn/umac/cmn_services/mgmt_txrx/core/src/wlan_mgmt_txrx_rx_reo.c (revision c7eaf5ac989ac229214b8317faa3e981d261e7db)
1 /*
2  * Copyright (c) 2021, The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 /**
19  *  DOC: wlan_mgmt_txrx_rx_reo.c
20  *  This file contains mgmt rx re-ordering related function definitions
21  */
22 
23 #include "wlan_mgmt_txrx_rx_reo_i.h"
24 #include <wlan_mgmt_txrx_rx_reo_tgt_api.h>
25 #include "wlan_mgmt_txrx_main_i.h"
26 #include <qdf_util.h>
27 #include <wlan_mlo_mgr_cmn.h>
28 #include <wlan_mlo_mgr_setup.h>
29 
30 static struct mgmt_rx_reo_context *g_rx_reo_ctx[WLAN_MAX_MLO_GROUPS];
31 
32 #define mgmt_rx_reo_get_context(_grp_id) (g_rx_reo_ctx[_grp_id])
33 #define mgmt_rx_reo_set_context(grp_id, c)       (g_rx_reo_ctx[grp_id] = c)
34 
35 #define MGMT_RX_REO_PKT_CTR_HALF_RANGE (0x8000)
36 #define MGMT_RX_REO_PKT_CTR_FULL_RANGE (MGMT_RX_REO_PKT_CTR_HALF_RANGE << 1)
37 
38 /**
39  * wlan_mgmt_rx_reo_get_ctx_from_pdev - Get MGMT Rx REO Context from pdev
40  * @pdev: Pointer to pdev structure object
41  *
42  * API to get the MGMT RX reo context of the pdev using the appropriate
43  * MLO group id.
44  *
45  * Return: Mgmt rx reo context for the pdev
46  */
47 
48 static inline struct mgmt_rx_reo_context*
49 wlan_mgmt_rx_reo_get_ctx_from_pdev(struct wlan_objmgr_pdev *pdev)
50 {
51 	uint8_t ml_grp_id;
52 
53 	ml_grp_id = wlan_get_mlo_grp_id_from_pdev(pdev);
54 	if (ml_grp_id >= WLAN_MAX_MLO_GROUPS) {
55 		mgmt_rx_reo_err("REO context - Invalid ML Group ID");
56 		return NULL;
57 	}
58 
59 	return mgmt_rx_reo_get_context(ml_grp_id);
60 }
61 
62 /**
63  * mgmt_rx_reo_compare_pkt_ctrs_gte() - Compare given mgmt packet counters
64  * @ctr1: Management packet counter1
65  * @ctr2: Management packet counter2
66  *
67  * We can't directly use the comparison operator here because the counters can
68  * overflow. But these counters have a property that the difference between
69  * them can never be greater than half the range of the data type.
70  * We can make use of this condition to detect which one is actually greater.
71  *
72  * Return: true if @ctr1 is greater than or equal to @ctr2, else false
73  */
74 static inline bool
75 mgmt_rx_reo_compare_pkt_ctrs_gte(uint16_t ctr1, uint16_t ctr2)
76 {
77 	uint16_t delta = ctr1 - ctr2;
78 
79 	return delta <= MGMT_RX_REO_PKT_CTR_HALF_RANGE;
80 }
81 
82 /**
83  * mgmt_rx_reo_subtract_pkt_ctrs() - Subtract given mgmt packet counters
84  * @ctr1: Management packet counter1
85  * @ctr2: Management packet counter2
86  *
87  * We can't directly use the subtract operator here because the counters can
88  * overflow. But these counters have a property that the difference between
89  * them can never be greater than half the range of the data type.
90  * We can make use of this condition to detect whichone is actually greater and
91  * return the difference accordingly.
92  *
93  * Return: Difference between @ctr1 and @crt2
94  */
95 static inline int
96 mgmt_rx_reo_subtract_pkt_ctrs(uint16_t ctr1, uint16_t ctr2)
97 {
98 	uint16_t delta = ctr1 - ctr2;
99 
100 	/**
101 	 * if delta is greater than half the range (i.e, ctr1 is actually
102 	 * smaller than ctr2), then the result should be a negative number.
103 	 * subtracting the entire range should give the correct value.
104 	 */
105 	if (delta > MGMT_RX_REO_PKT_CTR_HALF_RANGE)
106 		return delta - MGMT_RX_REO_PKT_CTR_FULL_RANGE;
107 
108 	return delta;
109 }
110 
111 #define MGMT_RX_REO_GLOBAL_TS_HALF_RANGE (0x80000000)
112 /**
113  * mgmt_rx_reo_compare_global_timestamps_gte()-Compare given global timestamps
114  * @ts1: Global timestamp1
115  * @ts2: Global timestamp2
116  *
117  * We can't directly use the comparison operator here because the timestamps can
118  * overflow. But these timestamps have a property that the difference between
119  * them can never be greater than half the range of the data type.
120  * We can make use of this condition to detect which one is actually greater.
121  *
122  * Return: true if @ts1 is greater than or equal to @ts2, else false
123  */
124 static inline bool
125 mgmt_rx_reo_compare_global_timestamps_gte(uint32_t ts1, uint32_t ts2)
126 {
127 	uint32_t delta = ts1 - ts2;
128 
129 	return delta <= MGMT_RX_REO_GLOBAL_TS_HALF_RANGE;
130 }
131 
132 /**
133  * mgmt_rx_reo_is_stale_frame()- API to check whether the given management frame
134  * is stale
135  * @last_delivered_frame: pointer to the info of the last frame delivered to
136  * upper layer
137  * @frame_desc: pointer to frame descriptor
138  *
139  * This API checks whether the current management frame under processing is
140  * stale. Any frame older than the last frame delivered to upper layer is a
141  * stale frame. This could happen when we have to deliver frames out of order
142  * due to time out or list size limit. The frames which arrive late at host and
143  * with time stamp lesser than the last delivered frame are stale frames and
144  * they need to be handled differently.
145  *
146  * Return: QDF_STATUS. On success "is_stale" and "is_parallel_rx" members of
147  * @frame_desc will be filled with proper values.
148  */
149 static QDF_STATUS
150 mgmt_rx_reo_is_stale_frame(
151 		struct mgmt_rx_reo_frame_info *last_delivered_frame,
152 		struct mgmt_rx_reo_frame_descriptor *frame_desc)
153 {
154 	uint32_t cur_frame_start_ts;
155 	uint32_t cur_frame_end_ts;
156 	uint32_t last_delivered_frame_start_ts;
157 	uint32_t last_delivered_frame_end_ts;
158 
159 	if (!last_delivered_frame) {
160 		mgmt_rx_reo_err("Last delivered frame info is null");
161 		return QDF_STATUS_E_NULL_VALUE;
162 	}
163 
164 	if (!frame_desc) {
165 		mgmt_rx_reo_err("Frame descriptor is null");
166 		return QDF_STATUS_E_NULL_VALUE;
167 	}
168 
169 	frame_desc->is_stale = false;
170 	frame_desc->is_parallel_rx = false;
171 	frame_desc->last_delivered_frame = *last_delivered_frame;
172 
173 	if (!frame_desc->reo_required)
174 		return QDF_STATUS_SUCCESS;
175 
176 	if (!last_delivered_frame->valid)
177 		return QDF_STATUS_SUCCESS;
178 
179 	cur_frame_start_ts = mgmt_rx_reo_get_start_ts(frame_desc->rx_params);
180 	cur_frame_end_ts = mgmt_rx_reo_get_end_ts(frame_desc->rx_params);
181 	last_delivered_frame_start_ts =
182 			last_delivered_frame->reo_params.start_timestamp;
183 	last_delivered_frame_end_ts =
184 			last_delivered_frame->reo_params.end_timestamp;
185 
186 	frame_desc->is_stale =
187 		!mgmt_rx_reo_compare_global_timestamps_gte(cur_frame_start_ts,
188 					last_delivered_frame_start_ts);
189 
190 	if (mgmt_rx_reo_compare_global_timestamps_gte
191 		(last_delivered_frame_start_ts, cur_frame_start_ts) &&
192 	    mgmt_rx_reo_compare_global_timestamps_gte
193 		(cur_frame_end_ts, last_delivered_frame_end_ts)) {
194 		frame_desc->is_parallel_rx = true;
195 		frame_desc->is_stale = false;
196 	}
197 
198 	return QDF_STATUS_SUCCESS;
199 }
200 
201 QDF_STATUS
202 mgmt_rx_reo_validate_mlo_link_info(struct wlan_objmgr_psoc *psoc)
203 {
204 	uint16_t valid_link_bitmap_shmem;
205 	uint16_t valid_link_bitmap;
206 	int8_t num_active_links_shmem;
207 	int8_t num_active_links;
208 	uint8_t grp_id = 0;
209 	QDF_STATUS status;
210 
211 	if (!psoc) {
212 		mgmt_rx_reo_err("psoc is null");
213 		return QDF_STATUS_E_NULL_VALUE;
214 	}
215 
216 	if (!wlan_mgmt_rx_reo_is_feature_enabled_at_psoc(psoc))
217 		return QDF_STATUS_SUCCESS;
218 
219 	status = tgt_mgmt_rx_reo_get_num_active_hw_links(psoc,
220 							 &num_active_links_shmem);
221 	if (QDF_IS_STATUS_ERROR(status)) {
222 		mgmt_rx_reo_err("Failed to get number of active MLO HW links");
223 		return QDF_STATUS_E_FAILURE;
224 	}
225 	qdf_assert_always(num_active_links_shmem > 0);
226 
227 	if (!mlo_psoc_get_grp_id(psoc, &grp_id)) {
228 		mgmt_rx_reo_err("Failed to get valid MLO Group id");
229 		return QDF_STATUS_E_INVAL;
230 	}
231 
232 	num_active_links = wlan_mlo_get_num_active_links(grp_id);
233 	qdf_assert_always(num_active_links > 0);
234 
235 	qdf_assert_always(num_active_links_shmem == num_active_links);
236 
237 	status = tgt_mgmt_rx_reo_get_valid_hw_link_bitmap(psoc,
238 							  &valid_link_bitmap_shmem);
239 	if (QDF_IS_STATUS_ERROR(status)) {
240 		mgmt_rx_reo_err("Failed to get valid MLO HW link bitmap");
241 		return QDF_STATUS_E_INVAL;
242 	}
243 	qdf_assert_always(valid_link_bitmap_shmem != 0);
244 
245 	valid_link_bitmap = wlan_mlo_get_valid_link_bitmap(grp_id);
246 	qdf_assert_always(valid_link_bitmap_shmem != 0);
247 
248 	qdf_assert_always(valid_link_bitmap_shmem == valid_link_bitmap);
249 
250 	return QDF_STATUS_SUCCESS;
251 }
252 
253 #ifndef WLAN_MGMT_RX_REO_SIM_SUPPORT
254 /**
255  * mgmt_rx_reo_is_valid_link() - Check whether the given HW link is valid
256  * @link_id: Link id to be checked
257  * @grp_id: MLO Group id which it belongs to
258  *
259  * Return: true if @link_id is a valid link else false
260  */
261 static bool
262 mgmt_rx_reo_is_valid_link(uint8_t link_id, uint8_t grp_id)
263 {
264 	uint16_t valid_hw_link_bitmap;
265 
266 	if (link_id >= MAX_MLO_LINKS) {
267 		mgmt_rx_reo_err("Invalid link id %u", link_id);
268 		return false;
269 	}
270 
271 	valid_hw_link_bitmap = wlan_mlo_get_valid_link_bitmap(grp_id);
272 	qdf_assert_always(valid_hw_link_bitmap);
273 
274 	return (valid_hw_link_bitmap & (1 << link_id));
275 }
276 
277 /**
278  * mgmt_rx_reo_get_num_mlo_links() - Get number of MLO HW links active in the
279  * system
280  * @reo_context: Pointer to reo context object
281  * @grp_id: MLO group id which it belongs to
282  *
283  * Return: On success returns number of active MLO HW links. On failure
284  * returns WLAN_MLO_INVALID_NUM_LINKS.
285  */
286 static int8_t
287 mgmt_rx_reo_get_num_mlo_links(struct mgmt_rx_reo_context *reo_context,
288 			      uint8_t grp_id)
289 {
290 	if (!reo_context) {
291 		mgmt_rx_reo_err("Mgmt reo context is null");
292 		return WLAN_MLO_INVALID_NUM_LINKS;
293 	}
294 
295 	return wlan_mlo_get_num_active_links(grp_id);
296 }
297 
298 static QDF_STATUS
299 mgmt_rx_reo_handle_potential_premature_delivery(
300 				struct mgmt_rx_reo_context *reo_context,
301 				uint32_t global_timestamp)
302 {
303 	return QDF_STATUS_SUCCESS;
304 }
305 
306 static QDF_STATUS
307 mgmt_rx_reo_handle_stale_frame(struct mgmt_rx_reo_list *reo_list,
308 			       struct mgmt_rx_reo_frame_descriptor *desc)
309 {
310 	return QDF_STATUS_SUCCESS;
311 }
312 #else
313 /**
314  * mgmt_rx_reo_sim_is_valid_link() - Check whether the given HW link is valid
315  * @sim_context: Pointer to reo simulation context object
316  * @link_id: Link id to be checked
317  *
318  * Return: true if @link_id is a valid link, else false
319  */
320 static bool
321 mgmt_rx_reo_sim_is_valid_link(struct mgmt_rx_reo_sim_context *sim_context,
322 			      uint8_t link_id)
323 {
324 	bool is_valid_link = false;
325 
326 	if (!sim_context) {
327 		mgmt_rx_reo_err("Mgmt reo sim context is null");
328 		return false;
329 	}
330 
331 	if (link_id >= MAX_MLO_LINKS) {
332 		mgmt_rx_reo_err("Invalid link id %u", link_id);
333 		return false;
334 	}
335 
336 	qdf_spin_lock(&sim_context->link_id_to_pdev_map.lock);
337 
338 	if (sim_context->link_id_to_pdev_map.map[link_id])
339 		is_valid_link = true;
340 
341 	qdf_spin_unlock(&sim_context->link_id_to_pdev_map.lock);
342 
343 	return is_valid_link;
344 }
345 
346 /**
347  * mgmt_rx_reo_is_valid_link() - Check whether the given HW link is valid
348  * @ml_grp_id: MLO Group id on which the Link ID  belongs to
349  * @link_id: HW Link ID to be verified
350  *
351  * Return: true if @link_id is a valid link else false
352  */
353 static bool
354 mgmt_rx_reo_is_valid_link(uint8_t ml_grp_id, uint8_t link_id)
355 {
356 	struct mgmt_rx_reo_context *reo_context;
357 
358 	reo_context = mgmt_rx_reo_get_context(ml_grp_id);
359 
360 	if (!reo_context) {
361 		mgmt_rx_reo_err("Mgmt reo context is null");
362 		return false;
363 	}
364 
365 	return mgmt_rx_reo_sim_is_valid_link(&reo_context->sim_context,
366 					     link_id);
367 }
368 
369 /**
370  * mgmt_rx_reo_sim_get_num_mlo_links() - Get number of MLO HW links from the reo
371  * simulation context object
372  * @sim_context: Pointer to reo simulation context object
373  *
374  * Number of MLO links will be equal to number of pdevs in the
375  * system. In case of simulation all the pdevs are assumed
376  * to have MLO capability.
377  *
378  * Return: On success returns number of MLO HW links. On failure
379  * returns WLAN_MLO_INVALID_NUM_LINKS.
380  */
381 static int8_t
382 mgmt_rx_reo_sim_get_num_mlo_links(struct mgmt_rx_reo_sim_context *sim_context)
383 {
384 	uint8_t num_mlo_links;
385 
386 	if (!sim_context) {
387 		mgmt_rx_reo_err("Mgmt reo simulation context is null");
388 		return WLAN_MLO_INVALID_NUM_LINKS;
389 	}
390 
391 	qdf_spin_lock(&sim_context->link_id_to_pdev_map.lock);
392 
393 	num_mlo_links = sim_context->link_id_to_pdev_map.num_mlo_links;
394 
395 	qdf_spin_unlock(&sim_context->link_id_to_pdev_map.lock);
396 
397 	return num_mlo_links;
398 }
399 
400 /**
401  * mgmt_rx_reo_get_num_mlo_links() - Get number of MLO links from the reo
402  * context object
403  * @reo_context: Pointer to reo context object
404  * @grp_id: MLO Group id which it belongs to
405  *
406  * Return: On success returns number of MLO HW links. On failure
407  * returns WLAN_MLO_INVALID_NUM_LINKS.
408  */
409 static int8_t
410 mgmt_rx_reo_get_num_mlo_links(struct mgmt_rx_reo_context *reo_context,
411 			      uint8_t grp_id)
412 {
413 	if (!reo_context) {
414 		mgmt_rx_reo_err("Mgmt reo context is null");
415 		return WLAN_MLO_INVALID_NUM_LINKS;
416 	}
417 
418 	return mgmt_rx_reo_sim_get_num_mlo_links(&reo_context->sim_context);
419 }
420 
421 /**
422  * mgmt_rx_reo_sim_get_context() - Helper API to get the management
423  * rx reorder simulation context
424  * @ml_grp_id: MLO group id for the rx reordering
425  *
426  * Return: On success returns the pointer to management rx reorder
427  * simulation context. On failure returns NULL.
428  */
429 static struct mgmt_rx_reo_sim_context *
430 mgmt_rx_reo_sim_get_context(uint8_t ml_grp_id)
431 {
432 	struct mgmt_rx_reo_context *reo_context;
433 
434 	reo_context = mgmt_rx_reo_get_context(ml_grp_id);
435 	if (!reo_context) {
436 		mgmt_rx_reo_err("Mgmt reo context is null");
437 		return NULL;
438 	}
439 
440 	return &reo_context->sim_context;
441 }
442 
443 int8_t
444 mgmt_rx_reo_sim_get_mlo_link_id_from_pdev(struct wlan_objmgr_pdev *pdev)
445 {
446 	struct mgmt_rx_reo_sim_context *sim_context;
447 	int8_t link_id;
448 
449 	sim_context = mgmt_rx_reo_sim_get_context();
450 	if (!sim_context) {
451 		mgmt_rx_reo_err("Mgmt reo simulation context is null");
452 		return MGMT_RX_REO_INVALID_LINK;
453 	}
454 
455 	qdf_spin_lock(&sim_context->link_id_to_pdev_map.lock);
456 
457 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++)
458 		if (sim_context->link_id_to_pdev_map.map[link_id] == pdev)
459 			break;
460 
461 	/* pdev is not found in map */
462 	if (link_id == MAX_MLO_LINKS)
463 		link_id = MGMT_RX_REO_INVALID_LINK;
464 
465 	qdf_spin_unlock(&sim_context->link_id_to_pdev_map.lock);
466 
467 	return link_id;
468 }
469 
470 struct wlan_objmgr_pdev *
471 mgmt_rx_reo_sim_get_pdev_from_mlo_link_id(uint8_t mlo_link_id,
472 					  wlan_objmgr_ref_dbgid refdbgid)
473 {
474 	struct mgmt_rx_reo_sim_context *sim_context;
475 	struct wlan_objmgr_pdev *pdev;
476 	QDF_STATUS status;
477 
478 	sim_context = mgmt_rx_reo_sim_get_context();
479 	if (!sim_context) {
480 		mgmt_rx_reo_err("Mgmt reo simulation context is null");
481 		return NULL;
482 	}
483 
484 	if (mlo_link_id >= MAX_MLO_LINKS) {
485 		mgmt_rx_reo_err("Invalid link id %u", mlo_link_id);
486 		return NULL;
487 	}
488 
489 	qdf_spin_lock(&sim_context->link_id_to_pdev_map.lock);
490 
491 	pdev = sim_context->link_id_to_pdev_map.map[mlo_link_id];
492 	status = wlan_objmgr_pdev_try_get_ref(pdev, refdbgid);
493 	if (QDF_IS_STATUS_ERROR(status)) {
494 		mgmt_rx_reo_err("Failed to get pdev reference");
495 		return NULL;
496 	}
497 
498 	qdf_spin_unlock(&sim_context->link_id_to_pdev_map.lock);
499 
500 	return pdev;
501 }
502 
503 /**
504  * mgmt_rx_reo_handle_potential_premature_delivery - Helper API to handle
505  * premature delivery.
506  * @reo_context: Pointer to reorder list
507  * @global_timestamp: Global time stamp of the current management frame
508  *
509  * Sometimes we have to deliver a management frame to the upper layers even
510  * before its wait count reaching zero. This is called premature delivery.
511  * Premature delivery could happen due to time out or reorder list overflow.
512  *
513  * Return: QDF_STATUS
514  */
515 static QDF_STATUS
516 mgmt_rx_reo_handle_potential_premature_delivery(
517 				struct mgmt_rx_reo_context *reo_context,
518 				uint32_t global_timestamp)
519 {
520 	qdf_list_t stale_frame_list_temp;
521 	QDF_STATUS status;
522 	struct mgmt_rx_reo_pending_frame_list_entry *latest_stale_frame = NULL;
523 	struct mgmt_rx_reo_pending_frame_list_entry *cur_entry;
524 	struct mgmt_rx_reo_sim_context *sim_context;
525 	struct mgmt_rx_reo_master_frame_list *master_frame_list;
526 
527 	if (!reo_context)
528 		return QDF_STATUS_E_NULL_VALUE;
529 
530 	sim_context = &reo_context->sim_context;
531 	master_frame_list = &sim_context->master_frame_list;
532 
533 	qdf_spin_lock(&master_frame_list->lock);
534 
535 	qdf_list_for_each(&master_frame_list->pending_list, cur_entry, node) {
536 		if (cur_entry->params.global_timestamp == global_timestamp)
537 			break;
538 
539 		latest_stale_frame = cur_entry;
540 	}
541 
542 	if (latest_stale_frame) {
543 		qdf_list_create(&stale_frame_list_temp,
544 				MGMT_RX_REO_SIM_STALE_FRAME_TEMP_LIST_MAX_SIZE);
545 
546 		status = qdf_list_split(&stale_frame_list_temp,
547 					&master_frame_list->pending_list,
548 					&latest_stale_frame->node);
549 		if (QDF_IS_STATUS_ERROR(status))
550 			goto exit_unlock_master_frame_list;
551 
552 		status = qdf_list_join(&master_frame_list->stale_list,
553 				       &stale_frame_list_temp);
554 		if (QDF_IS_STATUS_ERROR(status))
555 			goto exit_unlock_master_frame_list;
556 	}
557 
558 	status = QDF_STATUS_SUCCESS;
559 
560 exit_unlock_master_frame_list:
561 	qdf_spin_unlock(&master_frame_list->lock);
562 
563 	return status;
564 }
565 
566 /**
567  * mgmt_rx_reo_sim_remove_frame_from_stale_list() - Removes frame from the
568  * stale management frame list
569  * @master_frame_list: pointer to master management frame list
570  * @reo_params: pointer to reo params
571  *
572  * This API removes frames from the stale management frame list.
573  *
574  * Return: QDF_STATUS of operation
575  */
576 static QDF_STATUS
577 mgmt_rx_reo_sim_remove_frame_from_stale_list(
578 		struct mgmt_rx_reo_master_frame_list *master_frame_list,
579 		const struct mgmt_rx_reo_params *reo_params)
580 {
581 	struct mgmt_rx_reo_stale_frame_list_entry *cur_entry;
582 	struct mgmt_rx_reo_stale_frame_list_entry *matching_entry = NULL;
583 	QDF_STATUS status;
584 
585 	if (!master_frame_list || !reo_params)
586 		return QDF_STATUS_E_NULL_VALUE;
587 
588 	qdf_spin_lock(&master_frame_list->lock);
589 
590 	/**
591 	 * Stale frames can come in any order at host. Do a linear search and
592 	 * remove the matching entry.
593 	 */
594 	qdf_list_for_each(&master_frame_list->stale_list, cur_entry, node) {
595 		if (cur_entry->params.link_id == reo_params->link_id &&
596 		    cur_entry->params.mgmt_pkt_ctr == reo_params->mgmt_pkt_ctr &&
597 		    cur_entry->params.global_timestamp ==
598 		    reo_params->global_timestamp) {
599 			matching_entry = cur_entry;
600 			break;
601 		}
602 	}
603 
604 	if (!matching_entry) {
605 		qdf_spin_unlock(&master_frame_list->lock);
606 		mgmt_rx_reo_err("reo sim failure: absent in stale frame list");
607 		qdf_assert_always(0);
608 	}
609 
610 	status = qdf_list_remove_node(&master_frame_list->stale_list,
611 				      &matching_entry->node);
612 
613 	if (QDF_IS_STATUS_ERROR(status)) {
614 		qdf_spin_unlock(&master_frame_list->lock);
615 		return status;
616 	}
617 
618 	qdf_mem_free(matching_entry);
619 
620 	qdf_spin_unlock(&master_frame_list->lock);
621 
622 	return QDF_STATUS_SUCCESS;
623 }
624 
625 /**
626  * mgmt_rx_reo_handle_stale_frame() - API to handle stale management frames.
627  * @reo_list: Pointer to reorder list
628  * @desc: Pointer to frame descriptor
629  *
630  * Return: QDF_STATUS of operation
631  */
632 static QDF_STATUS
633 mgmt_rx_reo_handle_stale_frame(struct mgmt_rx_reo_list *reo_list,
634 			       struct mgmt_rx_reo_frame_descriptor *desc)
635 {
636 	QDF_STATUS status;
637 	struct mgmt_rx_reo_context *reo_context;
638 	struct mgmt_rx_reo_sim_context *sim_context;
639 	struct mgmt_rx_reo_params *reo_params;
640 
641 	if (!reo_list || !desc)
642 		return QDF_STATUS_E_NULL_VALUE;
643 
644 	/* FW consumed/Error frames are already removed */
645 	if (desc->type != MGMT_RX_REO_FRAME_DESC_HOST_CONSUMED_FRAME)
646 		return QDF_STATUS_SUCCESS;
647 
648 	reo_context = mgmt_rx_reo_get_context_from_reo_list(reo_list);
649 	if (!reo_context)
650 		return QDF_STATUS_E_NULL_VALUE;
651 
652 	sim_context = &reo_context->sim_context;
653 
654 	reo_params = desc->rx_params->reo_params;
655 	if (!reo_params)
656 		return QDF_STATUS_E_NULL_VALUE;
657 
658 	status = mgmt_rx_reo_sim_remove_frame_from_stale_list(
659 				&sim_context->master_frame_list, reo_params);
660 
661 	return status;
662 }
663 #endif /* WLAN_MGMT_RX_REO_SIM_SUPPORT */
664 
665 /**
666  * mgmt_rx_reo_is_potential_premature_delivery() - Helper API to check
667  * whether the current frame getting delivered to upper layer is a premature
668  * delivery
669  * @release_reason: release reason
670  *
671  * Return: true for a premature delivery
672  */
673 static bool
674 mgmt_rx_reo_is_potential_premature_delivery(uint8_t release_reason)
675 {
676 	return !(release_reason & RELEASE_REASON_ZERO_WAIT_COUNT);
677 }
678 
679 /**
680  * wlan_mgmt_rx_reo_get_priv_object() - Get the pdev private object of
681  * MGMT Rx REO module
682  * @pdev: pointer to pdev object
683  *
684  * Return: Pointer to pdev private object of MGMT Rx REO module on success,
685  * else NULL
686  */
687 static struct mgmt_rx_reo_pdev_info *
688 wlan_mgmt_rx_reo_get_priv_object(struct wlan_objmgr_pdev *pdev)
689 {
690 	struct mgmt_txrx_priv_pdev_context *mgmt_txrx_pdev_ctx;
691 
692 	if (!pdev) {
693 		mgmt_rx_reo_err("pdev is null");
694 		return NULL;
695 	}
696 
697 	mgmt_txrx_pdev_ctx = (struct mgmt_txrx_priv_pdev_context *)
698 		wlan_objmgr_pdev_get_comp_private_obj(pdev,
699 						      WLAN_UMAC_COMP_MGMT_TXRX);
700 
701 	if (!mgmt_txrx_pdev_ctx) {
702 		mgmt_rx_reo_err("mgmt txrx context is NULL");
703 		return NULL;
704 	}
705 
706 	return mgmt_txrx_pdev_ctx->mgmt_rx_reo_pdev_ctx;
707 }
708 
709 /**
710  * mgmt_rx_reo_print_snapshots() - Print all snapshots related
711  * to management Rx reorder module
712  * @mac_hw_ss: MAC HW snapshot
713  * @fw_forwarded_ss: FW forwarded snapshot
714  * @fw_consumed_ss: FW consumed snapshot
715  * @host_ss: Host snapshot
716  *
717  * return: QDF_STATUS
718  */
719 static QDF_STATUS
720 mgmt_rx_reo_print_snapshots
721 			(struct mgmt_rx_reo_snapshot_params *mac_hw_ss,
722 			 struct mgmt_rx_reo_snapshot_params *fw_forwarded_ss,
723 			 struct mgmt_rx_reo_snapshot_params *fw_consumed_ss,
724 			 struct mgmt_rx_reo_snapshot_params *host_ss)
725 {
726 	mgmt_rx_reo_debug("HW SS: valid = %u, ctr = %u, ts = %u",
727 			  mac_hw_ss->valid, mac_hw_ss->mgmt_pkt_ctr,
728 			  mac_hw_ss->global_timestamp);
729 	mgmt_rx_reo_debug("FW forwarded SS: valid = %u, ctr = %u, ts = %u",
730 			  fw_forwarded_ss->valid,
731 			  fw_forwarded_ss->mgmt_pkt_ctr,
732 			  fw_forwarded_ss->global_timestamp);
733 	mgmt_rx_reo_debug("FW consumed SS: valid = %u, ctr = %u, ts = %u",
734 			  fw_consumed_ss->valid,
735 			  fw_consumed_ss->mgmt_pkt_ctr,
736 			  fw_consumed_ss->global_timestamp);
737 	mgmt_rx_reo_debug("HOST SS: valid = %u, ctr = %u, ts = %u",
738 			  host_ss->valid, host_ss->mgmt_pkt_ctr,
739 			  host_ss->global_timestamp);
740 
741 	return QDF_STATUS_SUCCESS;
742 }
743 
744 /**
745  * mgmt_rx_reo_invalidate_stale_snapshots() - Invalidate stale management
746  * Rx REO snapshots
747  * @mac_hw_ss: MAC HW snapshot
748  * @fw_forwarded_ss: FW forwarded snapshot
749  * @fw_consumed_ss: FW consumed snapshot
750  * @host_ss: Host snapshot
751  * @link: link ID
752  *
753  * return: QDF_STATUS
754  */
755 static QDF_STATUS
756 mgmt_rx_reo_invalidate_stale_snapshots
757 			(struct mgmt_rx_reo_snapshot_params *mac_hw_ss,
758 			 struct mgmt_rx_reo_snapshot_params *fw_forwarded_ss,
759 			 struct mgmt_rx_reo_snapshot_params *fw_consumed_ss,
760 			 struct mgmt_rx_reo_snapshot_params *host_ss,
761 			 uint8_t link)
762 {
763 	if (!mac_hw_ss->valid)
764 		return QDF_STATUS_SUCCESS;
765 
766 	if (host_ss->valid) {
767 		if (!mgmt_rx_reo_compare_global_timestamps_gte
768 					(mac_hw_ss->global_timestamp,
769 					 host_ss->global_timestamp) ||
770 		    !mgmt_rx_reo_compare_pkt_ctrs_gte
771 					(mac_hw_ss->mgmt_pkt_ctr,
772 					 host_ss->mgmt_pkt_ctr)) {
773 			mgmt_rx_reo_print_snapshots(mac_hw_ss, fw_forwarded_ss,
774 						    fw_consumed_ss, host_ss);
775 			mgmt_rx_reo_debug("Invalidate host snapshot, link %u",
776 					  link);
777 			host_ss->valid = false;
778 		}
779 	}
780 
781 	if (fw_forwarded_ss->valid) {
782 		if (!mgmt_rx_reo_compare_global_timestamps_gte
783 					(mac_hw_ss->global_timestamp,
784 					 fw_forwarded_ss->global_timestamp) ||
785 		    !mgmt_rx_reo_compare_pkt_ctrs_gte
786 					(mac_hw_ss->mgmt_pkt_ctr,
787 					 fw_forwarded_ss->mgmt_pkt_ctr)) {
788 			mgmt_rx_reo_print_snapshots(mac_hw_ss, fw_forwarded_ss,
789 						    fw_consumed_ss, host_ss);
790 			mgmt_rx_reo_debug("Invalidate FW forwarded SS, link %u",
791 					  link);
792 			fw_forwarded_ss->valid = false;
793 		}
794 
795 		if (host_ss->valid && fw_forwarded_ss->valid &&
796 		    (mgmt_rx_reo_compare_global_timestamps_gte
797 					(host_ss->global_timestamp,
798 					 fw_forwarded_ss->global_timestamp) !=
799 		     mgmt_rx_reo_compare_pkt_ctrs_gte
800 					(host_ss->mgmt_pkt_ctr,
801 					 fw_forwarded_ss->mgmt_pkt_ctr))) {
802 			mgmt_rx_reo_print_snapshots(mac_hw_ss, fw_forwarded_ss,
803 						    fw_consumed_ss, host_ss);
804 			mgmt_rx_reo_debug("Invalidate FW forwarded SS, link %u",
805 					  link);
806 			fw_forwarded_ss->valid = false;
807 		}
808 	}
809 
810 	if (fw_consumed_ss->valid) {
811 		if (!mgmt_rx_reo_compare_global_timestamps_gte
812 					(mac_hw_ss->global_timestamp,
813 					 fw_consumed_ss->global_timestamp) ||
814 		    !mgmt_rx_reo_compare_pkt_ctrs_gte
815 					(mac_hw_ss->mgmt_pkt_ctr,
816 					 fw_consumed_ss->mgmt_pkt_ctr)) {
817 			mgmt_rx_reo_print_snapshots(mac_hw_ss, fw_forwarded_ss,
818 						    fw_consumed_ss, host_ss);
819 			mgmt_rx_reo_debug("Invalidate FW consumed SS, link %u",
820 					  link);
821 			fw_consumed_ss->valid = false;
822 		}
823 
824 		if (host_ss->valid && fw_consumed_ss->valid &&
825 		    (mgmt_rx_reo_compare_global_timestamps_gte
826 					(host_ss->global_timestamp,
827 					 fw_consumed_ss->global_timestamp) !=
828 		     mgmt_rx_reo_compare_pkt_ctrs_gte
829 					(host_ss->mgmt_pkt_ctr,
830 					 fw_consumed_ss->mgmt_pkt_ctr))) {
831 			mgmt_rx_reo_print_snapshots(mac_hw_ss, fw_forwarded_ss,
832 						    fw_consumed_ss, host_ss);
833 			mgmt_rx_reo_debug("Invalidate FW consumed SS, link %u",
834 					  link);
835 			fw_consumed_ss->valid = false;
836 		}
837 	}
838 
839 	return QDF_STATUS_SUCCESS;
840 }
841 
842 /**
843  * mgmt_rx_reo_snapshots_check_sanity() - Check the sanity of management
844  * Rx REO snapshots
845  * @mac_hw_ss: MAC HW snapshot
846  * @fw_forwarded_ss: FW forwarded snapshot
847  * @fw_consumed_ss: FW consumed snapshot
848  * @host_ss: Host snapshot
849  *
850  * return: QDF_STATUS
851  */
852 static QDF_STATUS
853 mgmt_rx_reo_snapshots_check_sanity
854 			(struct mgmt_rx_reo_snapshot_params *mac_hw_ss,
855 			 struct mgmt_rx_reo_snapshot_params *fw_forwarded_ss,
856 			 struct mgmt_rx_reo_snapshot_params *fw_consumed_ss,
857 			 struct mgmt_rx_reo_snapshot_params *host_ss)
858 {
859 	QDF_STATUS status;
860 
861 	if (!mac_hw_ss->valid) {
862 		if (fw_forwarded_ss->valid || fw_consumed_ss->valid ||
863 		    host_ss->valid) {
864 			mgmt_rx_reo_err("MAC HW SS is invalid");
865 			status = QDF_STATUS_E_INVAL;
866 			goto fail;
867 		}
868 
869 		return QDF_STATUS_SUCCESS;
870 	}
871 
872 	if (!fw_forwarded_ss->valid && !fw_consumed_ss->valid) {
873 		if (host_ss->valid) {
874 			mgmt_rx_reo_err("FW forwarded and consumed SS invalid");
875 			status = QDF_STATUS_E_INVAL;
876 			goto fail;
877 		}
878 
879 		return QDF_STATUS_SUCCESS;
880 	}
881 
882 	if (fw_forwarded_ss->valid) {
883 		if (!mgmt_rx_reo_compare_global_timestamps_gte
884 					(mac_hw_ss->global_timestamp,
885 					 fw_forwarded_ss->global_timestamp)) {
886 			mgmt_rx_reo_err("TS: MAC HW SS < FW forwarded SS");
887 			status = QDF_STATUS_E_INVAL;
888 			goto fail;
889 		}
890 
891 		if (!mgmt_rx_reo_compare_pkt_ctrs_gte
892 					(mac_hw_ss->mgmt_pkt_ctr,
893 					 fw_forwarded_ss->mgmt_pkt_ctr)) {
894 			mgmt_rx_reo_err("PKT CTR: MAC HW SS < FW forwarded SS");
895 			status = QDF_STATUS_E_INVAL;
896 			goto fail;
897 		}
898 	}
899 
900 	if (fw_consumed_ss->valid) {
901 		if (!mgmt_rx_reo_compare_global_timestamps_gte
902 					(mac_hw_ss->global_timestamp,
903 					 fw_consumed_ss->global_timestamp)) {
904 			mgmt_rx_reo_err("TS: MAC HW SS < FW consumed SS");
905 			status = QDF_STATUS_E_INVAL;
906 			goto fail;
907 		}
908 
909 		if (!mgmt_rx_reo_compare_pkt_ctrs_gte
910 					(mac_hw_ss->mgmt_pkt_ctr,
911 					 fw_consumed_ss->mgmt_pkt_ctr)) {
912 			mgmt_rx_reo_err("PKT CTR: MAC HW SS < FW consumed SS");
913 			status = QDF_STATUS_E_INVAL;
914 			goto fail;
915 		}
916 	}
917 
918 	if (host_ss->valid) {
919 		if (!mgmt_rx_reo_compare_global_timestamps_gte
920 					(mac_hw_ss->global_timestamp,
921 					 host_ss->global_timestamp)) {
922 			mgmt_rx_reo_err("TS: MAC HW SS < host SS");
923 			status = QDF_STATUS_E_INVAL;
924 			goto fail;
925 		}
926 
927 		if (!mgmt_rx_reo_compare_pkt_ctrs_gte
928 					(mac_hw_ss->mgmt_pkt_ctr,
929 					 host_ss->mgmt_pkt_ctr)) {
930 			mgmt_rx_reo_err("PKT CTR: MAC HW SS < host SS");
931 			status = QDF_STATUS_E_INVAL;
932 			goto fail;
933 		}
934 
935 		if (fw_forwarded_ss->valid && !fw_consumed_ss->valid) {
936 			if (!mgmt_rx_reo_compare_global_timestamps_gte
937 					(fw_forwarded_ss->global_timestamp,
938 					 host_ss->global_timestamp)) {
939 				mgmt_rx_reo_err("TS: FW forwarded < host SS");
940 				status = QDF_STATUS_E_INVAL;
941 				goto fail;
942 			}
943 
944 			if (!mgmt_rx_reo_compare_pkt_ctrs_gte
945 					(fw_forwarded_ss->mgmt_pkt_ctr,
946 					 host_ss->mgmt_pkt_ctr)) {
947 				mgmt_rx_reo_err("CTR: FW forwarded < host SS");
948 				status = QDF_STATUS_E_INVAL;
949 				goto fail;
950 			}
951 		}
952 
953 		if (fw_consumed_ss->valid && !fw_forwarded_ss->valid) {
954 			if (!mgmt_rx_reo_compare_global_timestamps_gte
955 					(fw_consumed_ss->global_timestamp,
956 					 host_ss->global_timestamp)) {
957 				mgmt_rx_reo_err("TS: FW consumed < host SS");
958 				status = QDF_STATUS_E_INVAL;
959 				goto fail;
960 			}
961 
962 			if (!mgmt_rx_reo_compare_pkt_ctrs_gte
963 					(fw_consumed_ss->mgmt_pkt_ctr,
964 					 host_ss->mgmt_pkt_ctr)) {
965 				mgmt_rx_reo_err("CTR: FW consumed < host SS");
966 				status = QDF_STATUS_E_INVAL;
967 				goto fail;
968 			}
969 		}
970 
971 		if (fw_forwarded_ss->valid && fw_consumed_ss->valid) {
972 			if (!mgmt_rx_reo_compare_global_timestamps_gte
973 					(fw_consumed_ss->global_timestamp,
974 					 host_ss->global_timestamp) &&
975 			    !mgmt_rx_reo_compare_global_timestamps_gte
976 					(fw_forwarded_ss->global_timestamp,
977 					 host_ss->global_timestamp)) {
978 				mgmt_rx_reo_err("TS: FW consumed/forwarded < host");
979 				status = QDF_STATUS_E_INVAL;
980 				goto fail;
981 			}
982 
983 			if (!mgmt_rx_reo_compare_pkt_ctrs_gte
984 					(fw_consumed_ss->mgmt_pkt_ctr,
985 					 host_ss->mgmt_pkt_ctr) &&
986 			    !mgmt_rx_reo_compare_pkt_ctrs_gte
987 					(fw_forwarded_ss->mgmt_pkt_ctr,
988 					 host_ss->mgmt_pkt_ctr)) {
989 				mgmt_rx_reo_err("CTR: FW consumed/forwarded < host");
990 				status = QDF_STATUS_E_INVAL;
991 				goto fail;
992 			}
993 		}
994 	}
995 
996 	return QDF_STATUS_SUCCESS;
997 
998 fail:
999 	mgmt_rx_reo_debug("HW SS: valid = %u, ctr = %u, ts = %u",
1000 			  mac_hw_ss->valid, mac_hw_ss->mgmt_pkt_ctr,
1001 			  mac_hw_ss->global_timestamp);
1002 	mgmt_rx_reo_debug("FW forwarded SS: valid = %u, ctr = %u, ts = %u",
1003 			  fw_forwarded_ss->valid,
1004 			  fw_forwarded_ss->mgmt_pkt_ctr,
1005 			  fw_forwarded_ss->global_timestamp);
1006 	mgmt_rx_reo_debug("FW consumed SS: valid = %u, ctr = %u, ts = %u",
1007 			  fw_consumed_ss->valid,
1008 			  fw_consumed_ss->mgmt_pkt_ctr,
1009 			  fw_consumed_ss->global_timestamp);
1010 	mgmt_rx_reo_debug("HOST SS: valid = %u, ctr = %u, ts = %u",
1011 			  host_ss->valid, host_ss->mgmt_pkt_ctr,
1012 			  host_ss->global_timestamp);
1013 
1014 	return status;
1015 }
1016 
1017 /**
1018  * wlan_mgmt_rx_reo_algo_calculate_wait_count() - Calculates the number of
1019  * frames an incoming frame should wait for before it gets delivered.
1020  * @in_frame_pdev: pdev on which this frame is received
1021  * @desc: frame Descriptor
1022  *
1023  * Each frame carrys a MGMT pkt number which is local to that link, and a
1024  * timestamp which is global across all the links. MAC HW and FW also captures
1025  * the same details of the last frame that they have seen. Host also maintains
1026  * the details of the last frame it has seen. In total, there are 4 snapshots.
1027  * 1. MAC HW snapshot - latest frame seen at MAC HW
1028  * 2. FW forwarded snapshot- latest frame forwarded to the Host
1029  * 3. FW consumed snapshot - latest frame consumed by the FW
1030  * 4. Host/FW consumed snapshot - latest frame seen by the Host
1031  * By using all these snapshots, this function tries to compute the wait count
1032  * for a given incoming frame on all links.
1033  *
1034  * Return: QDF_STATUS of operation
1035  */
1036 static QDF_STATUS
1037 wlan_mgmt_rx_reo_algo_calculate_wait_count(
1038 		struct wlan_objmgr_pdev *in_frame_pdev,
1039 		struct mgmt_rx_reo_frame_descriptor *desc)
1040 {
1041 	QDF_STATUS status;
1042 	uint8_t link;
1043 	int8_t grp_id;
1044 	int8_t in_frame_link;
1045 	int frames_pending, delta_fwd_host;
1046 	uint8_t snapshot_id;
1047 	struct wlan_objmgr_pdev *pdev;
1048 	struct mgmt_rx_reo_pdev_info *rx_reo_pdev_ctx;
1049 	struct mgmt_rx_reo_pdev_info *in_frame_rx_reo_pdev_ctx;
1050 	struct mgmt_rx_reo_snapshot_info *snapshot_info;
1051 	struct mgmt_rx_reo_snapshot_params snapshot_params
1052 				[MGMT_RX_REO_SHARED_SNAPSHOT_MAX];
1053 	struct mgmt_rx_reo_snapshot_params *mac_hw_ss, *fw_forwarded_ss,
1054 					    *fw_consumed_ss, *host_ss;
1055 	struct mgmt_rx_reo_params *in_frame_params;
1056 	struct mgmt_rx_reo_wait_count *wait_count;
1057 
1058 	if (!in_frame_pdev) {
1059 		mgmt_rx_reo_err("pdev is null");
1060 		return QDF_STATUS_E_NULL_VALUE;
1061 	}
1062 
1063 	if (!desc) {
1064 		mgmt_rx_reo_err("Frame descriptor is null");
1065 		return QDF_STATUS_E_NULL_VALUE;
1066 	}
1067 
1068 	if (!desc->rx_params) {
1069 		mgmt_rx_reo_err("MGMT Rx params of incoming frame is NULL");
1070 		return QDF_STATUS_E_NULL_VALUE;
1071 	}
1072 
1073 	in_frame_params = desc->rx_params->reo_params;
1074 	if (!in_frame_params) {
1075 		mgmt_rx_reo_err("MGMT Rx REO params of incoming frame is NULL");
1076 		return QDF_STATUS_E_NULL_VALUE;
1077 	}
1078 
1079 	wait_count = &desc->wait_count;
1080 
1081 	/* Get the MLO link ID of incoming frame */
1082 	in_frame_link = wlan_get_mlo_link_id_from_pdev(in_frame_pdev);
1083 	grp_id = wlan_get_mlo_grp_id_from_pdev(in_frame_pdev);
1084 	qdf_assert_always(in_frame_link >= 0);
1085 	qdf_assert_always(in_frame_link < MAX_MLO_LINKS);
1086 	qdf_assert_always(mgmt_rx_reo_is_valid_link(in_frame_link, grp_id));
1087 
1088 	in_frame_rx_reo_pdev_ctx =
1089 			wlan_mgmt_rx_reo_get_priv_object(in_frame_pdev);
1090 	if (!in_frame_rx_reo_pdev_ctx) {
1091 		mgmt_rx_reo_err("Reo context null for incoming frame pdev");
1092 		return QDF_STATUS_E_FAILURE;
1093 	}
1094 	qdf_mem_zero(in_frame_rx_reo_pdev_ctx->raw_snapshots,
1095 		     sizeof(in_frame_rx_reo_pdev_ctx->raw_snapshots));
1096 
1097 	/* Iterate over all the valid MLO links */
1098 	for (link = 0; link < MAX_MLO_LINKS; link++) {
1099 		/* No need wait for any frames on an invalid link */
1100 		if (!mgmt_rx_reo_is_valid_link(link, grp_id)) {
1101 			frames_pending = 0;
1102 			goto update_pending_frames;
1103 		}
1104 
1105 		pdev = wlan_get_pdev_from_mlo_link_id(link, grp_id,
1106 						      WLAN_MGMT_RX_REO_ID);
1107 
1108 		/* No need to wait for any frames if the pdev is not found */
1109 		if (!pdev) {
1110 			mgmt_rx_reo_debug("pdev is null for link %d", link);
1111 			frames_pending = 0;
1112 			goto update_pending_frames;
1113 		}
1114 
1115 		rx_reo_pdev_ctx = wlan_mgmt_rx_reo_get_priv_object(pdev);
1116 		if (!rx_reo_pdev_ctx) {
1117 			mgmt_rx_reo_err("Mgmt reo context empty for pdev %pK",
1118 					pdev);
1119 			wlan_objmgr_pdev_release_ref(pdev, WLAN_MGMT_RX_REO_ID);
1120 			return QDF_STATUS_E_FAILURE;
1121 		}
1122 
1123 		if (!rx_reo_pdev_ctx->init_complete) {
1124 			mgmt_rx_reo_debug("REO init in progress for link %d",
1125 					  link);
1126 			wlan_objmgr_pdev_release_ref(pdev, WLAN_MGMT_RX_REO_ID);
1127 			frames_pending = 0;
1128 			goto update_pending_frames;
1129 		}
1130 
1131 		host_ss = &rx_reo_pdev_ctx->host_snapshot;
1132 		desc->host_snapshot[link] = rx_reo_pdev_ctx->host_snapshot;
1133 
1134 		mgmt_rx_reo_info("link_id = %u HOST SS: valid = %u, ctr = %u, ts = %u",
1135 				 link, host_ss->valid, host_ss->mgmt_pkt_ctr,
1136 				 host_ss->global_timestamp);
1137 
1138 		snapshot_id = 0;
1139 		/* Read all the shared snapshots */
1140 		while (snapshot_id <
1141 			MGMT_RX_REO_SHARED_SNAPSHOT_MAX) {
1142 			snapshot_info = &rx_reo_pdev_ctx->
1143 				host_target_shared_snapshot_info[snapshot_id];
1144 
1145 			qdf_mem_zero(&snapshot_params[snapshot_id],
1146 				     sizeof(snapshot_params[snapshot_id]));
1147 
1148 			status = tgt_mgmt_rx_reo_read_snapshot(
1149 					pdev, snapshot_info, snapshot_id,
1150 					&snapshot_params[snapshot_id],
1151 					in_frame_rx_reo_pdev_ctx->raw_snapshots
1152 					[link][snapshot_id]);
1153 
1154 			/* Read operation shouldn't fail */
1155 			if (QDF_IS_STATUS_ERROR(status)) {
1156 				mgmt_rx_reo_err("snapshot(%d) read failed on"
1157 						"link (%d)", snapshot_id, link);
1158 				wlan_objmgr_pdev_release_ref(
1159 						pdev, WLAN_MGMT_RX_REO_ID);
1160 				return status;
1161 			}
1162 
1163 			/* If snapshot is valid, save it in the pdev context */
1164 			if (snapshot_params[snapshot_id].valid) {
1165 				rx_reo_pdev_ctx->
1166 				   last_valid_shared_snapshot[snapshot_id] =
1167 				   snapshot_params[snapshot_id];
1168 			}
1169 			desc->shared_snapshots[link][snapshot_id] =
1170 						snapshot_params[snapshot_id];
1171 
1172 			snapshot_id++;
1173 		}
1174 
1175 		wlan_objmgr_pdev_release_ref(pdev, WLAN_MGMT_RX_REO_ID);
1176 
1177 		mac_hw_ss = &snapshot_params
1178 				[MGMT_RX_REO_SHARED_SNAPSHOT_MAC_HW];
1179 		fw_forwarded_ss = &snapshot_params
1180 				[MGMT_RX_REO_SHARED_SNAPSHOT_FW_FORWARDED];
1181 		fw_consumed_ss = &snapshot_params
1182 				[MGMT_RX_REO_SHARED_SNAPSHOT_FW_CONSUMED];
1183 
1184 		status = mgmt_rx_reo_invalidate_stale_snapshots(mac_hw_ss,
1185 								fw_forwarded_ss,
1186 								fw_consumed_ss,
1187 								host_ss, link);
1188 		if (QDF_IS_STATUS_ERROR(status)) {
1189 			mgmt_rx_reo_err("Failed to invalidate SS for link %u",
1190 					link);
1191 			return status;
1192 		}
1193 
1194 		desc->shared_snapshots[link][MGMT_RX_REO_SHARED_SNAPSHOT_MAC_HW] =
1195 								*mac_hw_ss;
1196 		desc->shared_snapshots[link][MGMT_RX_REO_SHARED_SNAPSHOT_FW_FORWARDED] =
1197 								*fw_forwarded_ss;
1198 		desc->shared_snapshots[link][MGMT_RX_REO_SHARED_SNAPSHOT_FW_CONSUMED] =
1199 								*fw_consumed_ss;
1200 		desc->host_snapshot[link] = *host_ss;
1201 
1202 		status = mgmt_rx_reo_snapshots_check_sanity
1203 			(mac_hw_ss, fw_forwarded_ss, fw_consumed_ss, host_ss);
1204 		if (QDF_IS_STATUS_ERROR(status)) {
1205 			mgmt_rx_reo_err_rl("Snapshot sanity for link %u failed",
1206 					   link);
1207 			return status;
1208 		}
1209 
1210 		mgmt_rx_reo_info("link_id = %u HW SS: valid = %u, ctr = %u, ts = %u",
1211 				 link, mac_hw_ss->valid,
1212 				 mac_hw_ss->mgmt_pkt_ctr,
1213 				 mac_hw_ss->global_timestamp);
1214 		mgmt_rx_reo_info("link_id = %u FW forwarded SS: valid = %u, ctr = %u, ts = %u",
1215 				 link, fw_forwarded_ss->valid,
1216 				 fw_forwarded_ss->mgmt_pkt_ctr,
1217 				 fw_forwarded_ss->global_timestamp);
1218 		mgmt_rx_reo_info("link_id = %u FW consumed SS: valid = %u, ctr = %u, ts = %u",
1219 				 link, fw_consumed_ss->valid,
1220 				 fw_consumed_ss->mgmt_pkt_ctr,
1221 				 fw_consumed_ss->global_timestamp);
1222 
1223 		/* No need wait for any frames on the same link */
1224 		if (link == in_frame_link) {
1225 			frames_pending = 0;
1226 			goto update_pending_frames;
1227 		}
1228 
1229 		/**
1230 		 * If MAC HW snapshot is invalid, the link has not started
1231 		 * receiving management frames. Set wait count to zero.
1232 		 */
1233 		if (!mac_hw_ss->valid) {
1234 			frames_pending = 0;
1235 			goto update_pending_frames;
1236 		}
1237 
1238 		/**
1239 		 * If host snapshot is invalid, wait for MAX number of frames.
1240 		 * When any frame in this link arrives at host, actual wait
1241 		 * counts will be updated.
1242 		 */
1243 		if (!host_ss->valid) {
1244 			wait_count->per_link_count[link] = UINT_MAX;
1245 			wait_count->total_count += UINT_MAX;
1246 			goto print_wait_count;
1247 		}
1248 
1249 		/**
1250 		 * If MAC HW snapshot sequence number and host snapshot
1251 		 * sequence number are same, all the frames received by
1252 		 * this link are processed by host. No need to wait for
1253 		 * any frames from this link.
1254 		 */
1255 		if (!mgmt_rx_reo_subtract_pkt_ctrs(mac_hw_ss->mgmt_pkt_ctr,
1256 						   host_ss->mgmt_pkt_ctr)) {
1257 			frames_pending = 0;
1258 			goto update_pending_frames;
1259 		}
1260 
1261 		/**
1262 		 * Ideally, the incoming frame has to wait for only those frames
1263 		 * (on other links) which meet all the below criterion.
1264 		 * 1. Frame's timestamp is less than incoming frame's
1265 		 * 2. Frame is supposed to be consumed by the Host
1266 		 * 3. Frame is not yet seen by the Host.
1267 		 * We may not be able to compute the exact optimal wait count
1268 		 * because HW/FW provides a limited assist.
1269 		 * This algorithm tries to get the best estimate of wait count
1270 		 * by not waiting for those frames where we have a conclusive
1271 		 * evidence that we don't have to wait for those frames.
1272 		 */
1273 
1274 		/**
1275 		 * If this link has already seen a frame whose timestamp is
1276 		 * greater than or equal to incoming frame's timestamp,
1277 		 * then no need to wait for any frames on this link.
1278 		 * If the total wait count becomes zero, then the policy on
1279 		 * whether to deliver such a frame to upper layers is handled
1280 		 * separately.
1281 		 */
1282 		if (mgmt_rx_reo_compare_global_timestamps_gte(
1283 				host_ss->global_timestamp,
1284 				in_frame_params->global_timestamp)) {
1285 			frames_pending = 0;
1286 			goto update_pending_frames;
1287 		}
1288 
1289 		/**
1290 		 * For starters, we only have to wait for the frames that are
1291 		 * seen by MAC HW but not yet seen by Host. The frames which
1292 		 * reach MAC HW later are guaranteed to have a timestamp
1293 		 * greater than incoming frame's timestamp.
1294 		 */
1295 		frames_pending = mgmt_rx_reo_subtract_pkt_ctrs(
1296 					mac_hw_ss->mgmt_pkt_ctr,
1297 					host_ss->mgmt_pkt_ctr);
1298 		qdf_assert_always(frames_pending >= 0);
1299 
1300 		if (frames_pending &&
1301 		    mgmt_rx_reo_compare_global_timestamps_gte
1302 					(mac_hw_ss->global_timestamp,
1303 					 in_frame_params->global_timestamp)) {
1304 			/**
1305 			 * Last frame seen at MAC HW has timestamp greater than
1306 			 * or equal to incoming frame's timestamp. So no need to
1307 			 * wait for that last frame, but we can't conclusively
1308 			 * say anything about timestamp of frames before the
1309 			 * last frame, so try to wait for all of those frames.
1310 			 */
1311 			frames_pending--;
1312 			qdf_assert_always(frames_pending >= 0);
1313 
1314 			if (fw_consumed_ss->valid &&
1315 			    mgmt_rx_reo_compare_global_timestamps_gte(
1316 				fw_consumed_ss->global_timestamp,
1317 				in_frame_params->global_timestamp)) {
1318 				/**
1319 				 * Last frame consumed by the FW has timestamp
1320 				 * greater than or equal to incoming frame's.
1321 				 * That means all the frames from
1322 				 * fw_consumed_ss->mgmt_pkt_ctr to
1323 				 * mac_hw->mgmt_pkt_ctr will have timestamp
1324 				 * greater than or equal to incoming frame's and
1325 				 * hence, no need to wait for those frames.
1326 				 * We just need to wait for frames from
1327 				 * host_ss->mgmt_pkt_ctr to
1328 				 * fw_consumed_ss->mgmt_pkt_ctr-1. This is a
1329 				 * better estimate over the above estimate,
1330 				 * so update frames_pending.
1331 				 */
1332 				frames_pending =
1333 				  mgmt_rx_reo_subtract_pkt_ctrs(
1334 				      fw_consumed_ss->mgmt_pkt_ctr,
1335 				      host_ss->mgmt_pkt_ctr) - 1;
1336 
1337 				qdf_assert_always(frames_pending >= 0);
1338 
1339 				/**
1340 				 * Last frame forwarded to Host has timestamp
1341 				 * less than incoming frame's. That means all
1342 				 * the frames starting from
1343 				 * fw_forwarded_ss->mgmt_pkt_ctr+1 to
1344 				 * fw_consumed_ss->mgmt_pkt_ctr are consumed by
1345 				 * the FW and hence, no need to wait for those
1346 				 * frames. We just need to wait for frames
1347 				 * from host_ss->mgmt_pkt_ctr to
1348 				 * fw_forwarded_ss->mgmt_pkt_ctr. This is a
1349 				 * better estimate over the above estimate,
1350 				 * so update frames_pending.
1351 				 */
1352 				if (fw_forwarded_ss->valid &&
1353 				    !mgmt_rx_reo_compare_global_timestamps_gte(
1354 					fw_forwarded_ss->global_timestamp,
1355 					in_frame_params->global_timestamp)) {
1356 					frames_pending =
1357 					  mgmt_rx_reo_subtract_pkt_ctrs(
1358 					      fw_forwarded_ss->mgmt_pkt_ctr,
1359 					      host_ss->mgmt_pkt_ctr);
1360 
1361 					/**
1362 					 * frames_pending can be negative in
1363 					 * cases whene there are no frames
1364 					 * getting forwarded to the Host. No
1365 					 * need to wait for any frames in that
1366 					 * case.
1367 					 */
1368 					if (frames_pending < 0)
1369 						frames_pending = 0;
1370 				}
1371 			}
1372 
1373 			/**
1374 			 * Last frame forwarded to Host has timestamp greater
1375 			 * than or equal to incoming frame's. That means all the
1376 			 * frames from fw_forwarded->mgmt_pkt_ctr to
1377 			 * mac_hw->mgmt_pkt_ctr will have timestamp greater than
1378 			 * or equal to incoming frame's and hence, no need to
1379 			 * wait for those frames. We may have to just wait for
1380 			 * frames from host_ss->mgmt_pkt_ctr to
1381 			 * fw_forwarded_ss->mgmt_pkt_ctr-1
1382 			 */
1383 			if (fw_forwarded_ss->valid &&
1384 			    mgmt_rx_reo_compare_global_timestamps_gte(
1385 				fw_forwarded_ss->global_timestamp,
1386 				in_frame_params->global_timestamp)) {
1387 				delta_fwd_host =
1388 				  mgmt_rx_reo_subtract_pkt_ctrs(
1389 				    fw_forwarded_ss->mgmt_pkt_ctr,
1390 				    host_ss->mgmt_pkt_ctr) - 1;
1391 
1392 				qdf_assert_always(delta_fwd_host >= 0);
1393 
1394 				/**
1395 				 * This will be a better estimate over the one
1396 				 * we computed using mac_hw_ss but this may or
1397 				 * may not be a better estimate over the
1398 				 * one we computed using fw_consumed_ss.
1399 				 * When timestamps of both fw_consumed_ss and
1400 				 * fw_forwarded_ss are greater than incoming
1401 				 * frame's but timestamp of fw_consumed_ss is
1402 				 * smaller than fw_forwarded_ss, then
1403 				 * frames_pending will be smaller than
1404 				 * delta_fwd_host, the reverse will be true in
1405 				 * other cases. Instead of checking for all
1406 				 * those cases, just waiting for the minimum
1407 				 * among these two should be sufficient.
1408 				 */
1409 				frames_pending = qdf_min(frames_pending,
1410 							 delta_fwd_host);
1411 				qdf_assert_always(frames_pending >= 0);
1412 			}
1413 		}
1414 
1415 update_pending_frames:
1416 			qdf_assert_always(frames_pending >= 0);
1417 
1418 			wait_count->per_link_count[link] = frames_pending;
1419 			wait_count->total_count += frames_pending;
1420 
1421 print_wait_count:
1422 			mgmt_rx_reo_info("link_id = %u wait count: per link = 0x%x, total = 0x%llx",
1423 					 link, wait_count->per_link_count[link],
1424 					 wait_count->total_count);
1425 	}
1426 
1427 	return QDF_STATUS_SUCCESS;
1428 }
1429 
1430 /**
1431  * struct mgmt_rx_reo_list_entry_debug_info - This structure holds the necessary
1432  * information about a reo list entry for debug purposes.
1433  * @link_id: link id
1434  * @mgmt_pkt_ctr: management packet counter
1435  * @global_timestamp: global time stamp
1436  * @wait_count: wait count values
1437  * @status: status of the entry in the list
1438  * @entry: pointer to reo list entry
1439  */
1440 struct mgmt_rx_reo_list_entry_debug_info {
1441 	uint8_t link_id;
1442 	uint16_t mgmt_pkt_ctr;
1443 	uint32_t global_timestamp;
1444 	struct mgmt_rx_reo_wait_count wait_count;
1445 	uint32_t status;
1446 	struct mgmt_rx_reo_list_entry *entry;
1447 };
1448 
1449 /**
1450  * mgmt_rx_reo_list_display() - API to print the entries in the reorder list
1451  * @reo_list: Pointer to reorder list
1452  *
1453  * Return: QDF_STATUS
1454  */
1455 static QDF_STATUS
1456 mgmt_rx_reo_list_display(struct mgmt_rx_reo_list *reo_list)
1457 {
1458 	uint32_t reo_list_size;
1459 	uint32_t index;
1460 	struct mgmt_rx_reo_list_entry *cur_entry;
1461 	struct mgmt_rx_reo_list_entry_debug_info *debug_info;
1462 
1463 	if (!reo_list) {
1464 		mgmt_rx_reo_err("Pointer to reo list is null");
1465 		return QDF_STATUS_E_NULL_VALUE;
1466 	}
1467 
1468 	qdf_spin_lock_bh(&reo_list->list_lock);
1469 
1470 	reo_list_size = qdf_list_size(&reo_list->list);
1471 
1472 	if (reo_list_size == 0) {
1473 		qdf_spin_unlock_bh(&reo_list->list_lock);
1474 		mgmt_rx_reo_debug("Number of entries in the reo list = %u",
1475 				  reo_list_size);
1476 		return QDF_STATUS_SUCCESS;
1477 	}
1478 
1479 	debug_info = qdf_mem_malloc_atomic(reo_list_size * sizeof(*debug_info));
1480 	if (!debug_info) {
1481 		qdf_spin_unlock_bh(&reo_list->list_lock);
1482 		mgmt_rx_reo_err("Memory allocation failed");
1483 		return QDF_STATUS_E_NOMEM;
1484 	}
1485 
1486 	index = 0;
1487 	qdf_list_for_each(&reo_list->list, cur_entry, node) {
1488 		debug_info[index].link_id =
1489 				mgmt_rx_reo_get_link_id(cur_entry->rx_params);
1490 		debug_info[index].mgmt_pkt_ctr =
1491 			mgmt_rx_reo_get_pkt_counter(cur_entry->rx_params);
1492 		debug_info[index].global_timestamp =
1493 				mgmt_rx_reo_get_global_ts(cur_entry->rx_params);
1494 		debug_info[index].wait_count = cur_entry->wait_count;
1495 		debug_info[index].status = cur_entry->status;
1496 		debug_info[index].entry = cur_entry;
1497 
1498 		++index;
1499 	}
1500 
1501 	qdf_spin_unlock_bh(&reo_list->list_lock);
1502 
1503 	mgmt_rx_reo_debug("Reorder list");
1504 	mgmt_rx_reo_debug("##################################################");
1505 	mgmt_rx_reo_debug("Number of entries in the reo list = %u",
1506 			  reo_list_size);
1507 	for (index = 0; index < reo_list_size; index++) {
1508 		uint8_t link_id;
1509 
1510 		mgmt_rx_reo_debug("index = %u: link_id = %u, ts = %u, ctr = %u, status = 0x%x, entry = %pK",
1511 				  index, debug_info[index].link_id,
1512 				  debug_info[index].global_timestamp,
1513 				  debug_info[index].mgmt_pkt_ctr,
1514 				  debug_info[index].status,
1515 				  debug_info[index].entry);
1516 
1517 		mgmt_rx_reo_debug("Total wait count = 0x%llx",
1518 				  debug_info[index].wait_count.total_count);
1519 
1520 		for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++)
1521 			mgmt_rx_reo_debug("Link id = %u, wait_count = 0x%x",
1522 					  link_id, debug_info[index].wait_count.
1523 					  per_link_count[link_id]);
1524 	}
1525 	mgmt_rx_reo_debug("##################################################");
1526 
1527 	qdf_mem_free(debug_info);
1528 
1529 	return QDF_STATUS_SUCCESS;
1530 }
1531 
1532 #ifdef WLAN_MGMT_RX_REO_DEBUG_SUPPORT
1533 /**
1534  * mgmt_rx_reo_egress_frame_debug_info_enabled() - API to check whether egress
1535  * frame info debug feaure is enabled
1536  * @egress_frame_debug_info: Pointer to egress frame debug info object
1537  *
1538  * Return: true or false
1539  */
1540 static bool
1541 mgmt_rx_reo_egress_frame_debug_info_enabled
1542 			(struct reo_egress_debug_info *egress_frame_debug_info)
1543 {
1544 	return egress_frame_debug_info->frame_list_size;
1545 }
1546 
1547 /**
1548  * mgmt_rx_reo_debug_print_scheduler_stats() - API to print the stats
1549  * related to frames getting scheduled by mgmt rx reo scheduler
1550  * @reo_ctx: Pointer to reorder context
1551  *
1552  * API to print the stats related to frames getting scheduled by management
1553  * Rx reorder scheduler.
1554  *
1555  * Return: QDF_STATUS
1556  */
1557 static QDF_STATUS
1558 mgmt_rx_reo_debug_print_scheduler_stats(struct mgmt_rx_reo_context *reo_ctx)
1559 {
1560 	struct reo_scheduler_stats *stats;
1561 	uint64_t scheduled_count_per_link[MAX_MLO_LINKS] = {0};
1562 	uint64_t scheduled_count_per_context[MGMT_RX_REO_CONTEXT_MAX] = {0};
1563 	uint64_t total_scheduled_count = 0;
1564 	uint64_t rescheduled_count_per_link[MAX_MLO_LINKS] = {0};
1565 	uint64_t rescheduled_count_per_context[MGMT_RX_REO_CONTEXT_MAX] = {0};
1566 	uint64_t total_rescheduled_count = 0;
1567 	uint64_t total_scheduler_cb_count = 0;
1568 	uint8_t link_id;
1569 	uint8_t ctx;
1570 
1571 	if (!reo_ctx)
1572 		return QDF_STATUS_E_NULL_VALUE;
1573 
1574 	stats = &reo_ctx->scheduler_debug_info.stats;
1575 
1576 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
1577 		for (ctx = 0; ctx < MGMT_RX_REO_CONTEXT_MAX; ctx++) {
1578 			scheduled_count_per_link[link_id] +=
1579 				stats->scheduled_count[link_id][ctx];
1580 			rescheduled_count_per_link[link_id] +=
1581 				stats->rescheduled_count[link_id][ctx];
1582 		}
1583 
1584 		total_scheduled_count += scheduled_count_per_link[link_id];
1585 		total_rescheduled_count += rescheduled_count_per_link[link_id];
1586 		total_scheduler_cb_count += stats->scheduler_cb_count[link_id];
1587 	}
1588 
1589 	for (ctx = 0; ctx < MGMT_RX_REO_CONTEXT_MAX; ctx++) {
1590 		for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
1591 			scheduled_count_per_context[ctx] +=
1592 				stats->scheduled_count[link_id][ctx];
1593 			rescheduled_count_per_context[ctx] +=
1594 				stats->rescheduled_count[link_id][ctx];
1595 		}
1596 	}
1597 
1598 	mgmt_rx_reo_alert("Scheduler stats:");
1599 	mgmt_rx_reo_alert("\t1) Scheduled count");
1600 	mgmt_rx_reo_alert("\t\t0 - MGMT_RX_REO_CONTEXT_MGMT_RX");
1601 	mgmt_rx_reo_alert("\t\t1 - MGMT_RX_REO_CONTEXT_INGRESS_LIST_TIMEOUT");
1602 	mgmt_rx_reo_alert("\t\t2 - MGMT_RX_REO_CONTEXT_SCHEDULER_CB");
1603 	mgmt_rx_reo_alert("\t------------------------------------");
1604 	mgmt_rx_reo_alert("\t|link id/  |       |       |       |");
1605 	mgmt_rx_reo_alert("\t|context   |      0|      1|      2|");
1606 	mgmt_rx_reo_alert("\t-------------------------------------------");
1607 
1608 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
1609 		mgmt_rx_reo_alert("\t|%10u|%7llu|%7llu|%7llu|%7llu", link_id,
1610 				  stats->scheduled_count[link_id][0],
1611 				  stats->scheduled_count[link_id][1],
1612 				  stats->scheduled_count[link_id][2],
1613 				  scheduled_count_per_link[link_id]);
1614 		mgmt_rx_reo_alert("\t-------------------------------------------");
1615 	}
1616 	mgmt_rx_reo_alert("\t           |%7llu|%7llu|%7llu|%7llu\n\n",
1617 			  scheduled_count_per_context[0],
1618 			  scheduled_count_per_context[1],
1619 			  scheduled_count_per_context[2],
1620 			  total_scheduled_count);
1621 
1622 	mgmt_rx_reo_alert("\t2) Rescheduled count");
1623 	mgmt_rx_reo_alert("\t\t0 - MGMT_RX_REO_CONTEXT_MGMT_RX");
1624 	mgmt_rx_reo_alert("\t\t1 - MGMT_RX_REO_CONTEXT_INGRESS_LIST_TIMEOUT");
1625 	mgmt_rx_reo_alert("\t\t2 - MGMT_RX_REO_CONTEXT_SCHEDULER_CB");
1626 	mgmt_rx_reo_alert("\t------------------------------------");
1627 	mgmt_rx_reo_alert("\t|link id/  |       |       |       |");
1628 	mgmt_rx_reo_alert("\t|context   |      0|      1|      2|");
1629 	mgmt_rx_reo_alert("\t-------------------------------------------");
1630 
1631 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
1632 		mgmt_rx_reo_alert("\t|%10u|%7llu|%7llu|%7llu|%7llu", link_id,
1633 				  stats->rescheduled_count[link_id][0],
1634 				  stats->rescheduled_count[link_id][1],
1635 				  stats->rescheduled_count[link_id][2],
1636 				  rescheduled_count_per_link[link_id]);
1637 		mgmt_rx_reo_alert("\t-------------------------------------------");
1638 	}
1639 	mgmt_rx_reo_alert("\t           |%7llu|%7llu|%7llu|%7llu\n\n",
1640 			  rescheduled_count_per_context[0],
1641 			  rescheduled_count_per_context[1],
1642 			  rescheduled_count_per_context[2],
1643 			  total_rescheduled_count);
1644 
1645 	mgmt_rx_reo_alert("\t3) Per link stats:");
1646 	mgmt_rx_reo_alert("\t----------------------");
1647 	mgmt_rx_reo_alert("\t|link id|Scheduler CB|");
1648 	mgmt_rx_reo_alert("\t|       |    Count   |");
1649 	mgmt_rx_reo_alert("\t----------------------");
1650 
1651 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
1652 		mgmt_rx_reo_alert("\t|%7u|%12llu|", link_id,
1653 				  stats->scheduler_cb_count[link_id]);
1654 		mgmt_rx_reo_alert("\t----------------------");
1655 	}
1656 	mgmt_rx_reo_alert("\t%8s|%12llu|\n\n", "", total_scheduler_cb_count);
1657 
1658 	return QDF_STATUS_SUCCESS;
1659 }
1660 
1661 /**
1662  * mgmt_rx_reo_debug_print_egress_frame_stats() - API to print the stats
1663  * related to frames going out of the reorder module
1664  * @reo_ctx: Pointer to reorder context
1665  *
1666  * API to print the stats related to frames going out of the management
1667  * Rx reorder module.
1668  *
1669  * Return: QDF_STATUS
1670  */
1671 static QDF_STATUS
1672 mgmt_rx_reo_debug_print_egress_frame_stats(struct mgmt_rx_reo_context *reo_ctx)
1673 {
1674 	struct reo_egress_frame_stats *stats;
1675 	uint8_t link_id;
1676 	uint8_t reason;
1677 	uint8_t ctx;
1678 	uint64_t total_delivery_attempts_count = 0;
1679 	uint64_t total_delivery_success_count = 0;
1680 	uint64_t total_drop_count = 0;
1681 	uint64_t total_premature_delivery_count = 0;
1682 	uint64_t delivery_count_per_link[MAX_MLO_LINKS] = {0};
1683 	uint64_t delivery_count_per_reason[RELEASE_REASON_MAX] = {0};
1684 	uint64_t delivery_count_per_context[MGMT_RX_REO_CONTEXT_MAX] = {0};
1685 	uint64_t total_delivery_count = 0;
1686 	char delivery_reason_stats_boarder_a[MGMT_RX_REO_EGRESS_FRAME_DELIVERY_REASON_STATS_BOARDER_A_MAX_SIZE + 1] = {0};
1687 	char delivery_reason_stats_boarder_b[MGMT_RX_REO_EGRESS_FRAME_DELIVERY_REASON_STATS_BOARDER_B_MAX_SIZE + 1] = {0};
1688 	QDF_STATUS status;
1689 
1690 	if (!reo_ctx)
1691 		return QDF_STATUS_E_NULL_VALUE;
1692 
1693 	stats = &reo_ctx->egress_frame_debug_info.stats;
1694 
1695 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
1696 		total_delivery_attempts_count +=
1697 				stats->delivery_attempts_count[link_id];
1698 		total_delivery_success_count +=
1699 				stats->delivery_success_count[link_id];
1700 		total_drop_count += stats->drop_count[link_id];
1701 		total_premature_delivery_count +=
1702 				stats->premature_delivery_count[link_id];
1703 	}
1704 
1705 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
1706 		for (reason = 0; reason < RELEASE_REASON_MAX;
1707 		     reason++)
1708 			delivery_count_per_link[link_id] +=
1709 				stats->delivery_reason_count[link_id][reason];
1710 		total_delivery_count += delivery_count_per_link[link_id];
1711 	}
1712 	for (reason = 0; reason < RELEASE_REASON_MAX; reason++)
1713 		for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++)
1714 			delivery_count_per_reason[reason] +=
1715 				stats->delivery_reason_count[link_id][reason];
1716 	for (ctx = 0; ctx < MGMT_RX_REO_CONTEXT_MAX; ctx++)
1717 		for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++)
1718 			delivery_count_per_context[ctx] +=
1719 				stats->delivery_context_count[link_id][ctx];
1720 
1721 	mgmt_rx_reo_alert("Egress frame stats:");
1722 	mgmt_rx_reo_alert("\t1) Delivery related stats:");
1723 	mgmt_rx_reo_alert("\t------------------------------------------------");
1724 	mgmt_rx_reo_alert("\t|link id  |Attempts|Success |Premature|Drop    |");
1725 	mgmt_rx_reo_alert("\t|         | count  | count  | count   |count   |");
1726 	mgmt_rx_reo_alert("\t------------------------------------------------");
1727 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
1728 		mgmt_rx_reo_alert("\t|%9u|%8llu|%8llu|%9llu|%8llu|", link_id,
1729 				  stats->delivery_attempts_count[link_id],
1730 				  stats->delivery_success_count[link_id],
1731 				  stats->premature_delivery_count[link_id],
1732 				  stats->drop_count[link_id]);
1733 		mgmt_rx_reo_alert("\t------------------------------------------------");
1734 	}
1735 	mgmt_rx_reo_alert("\t%10s|%8llu|%8llu|%9llu|%8llu|\n\n", "",
1736 			  total_delivery_attempts_count,
1737 			  total_delivery_success_count,
1738 			  total_premature_delivery_count,
1739 			  total_drop_count);
1740 
1741 	mgmt_rx_reo_alert("\t2) Delivery reason related stats");
1742 	mgmt_rx_reo_alert("\tRelease Reason Values:-");
1743 	mgmt_rx_reo_alert("\tREASON_ZERO_WAIT_COUNT - 0x%lx",
1744 			  RELEASE_REASON_ZERO_WAIT_COUNT);
1745 	mgmt_rx_reo_alert("\tREASON_AGED_OUT - 0x%lx",
1746 			  RELEASE_REASON_AGED_OUT);
1747 	mgmt_rx_reo_alert("\tREASON_OLDER_THAN_AGED_OUT_FRAME - 0x%lx",
1748 			  RELEASE_REASON_OLDER_THAN_AGED_OUT_FRAME);
1749 	mgmt_rx_reo_alert("\tREASON_INGRESS_LIST_OVERFLOW - 0x%lx",
1750 			  RELEASE_REASON_INGRESS_LIST_OVERFLOW);
1751 	mgmt_rx_reo_alert("\tREASON_OLDER_THAN_READY_TO_DELIVER_FRAMES - 0x%lx",
1752 			  RELEASE_REASON_OLDER_THAN_READY_TO_DELIVER_FRAMES);
1753 	mgmt_rx_reo_alert("\tREASON_EGRESS_LIST_OVERFLOW - 0x%lx",
1754 			  RELEASE_REASON_EGRESS_LIST_OVERFLOW);
1755 
1756 	qdf_mem_set(delivery_reason_stats_boarder_a,
1757 		    MGMT_RX_REO_EGRESS_FRAME_DELIVERY_REASON_STATS_BOARDER_A_MAX_SIZE, '-');
1758 	qdf_mem_set(delivery_reason_stats_boarder_b,
1759 		    MGMT_RX_REO_EGRESS_FRAME_DELIVERY_REASON_STATS_BOARDER_B_MAX_SIZE, '-');
1760 
1761 	mgmt_rx_reo_alert("\t%66s", delivery_reason_stats_boarder_a);
1762 	mgmt_rx_reo_alert("\t|%16s|%7s|%7s|%7s|%7s|%7s|%7s|", "Release Reason/",
1763 			  "", "", "", "", "", "");
1764 	mgmt_rx_reo_alert("\t|%16s|%7s|%7s|%7s|%7s|%7s|%7s|", "link id",
1765 			  "0", "1", "2", "3", "4", "5");
1766 	mgmt_rx_reo_alert("\t%s", delivery_reason_stats_boarder_b);
1767 
1768 	for (reason = 0; reason < RELEASE_REASON_MAX; reason++) {
1769 		mgmt_rx_reo_alert("\t|%16x|%7llu|%7llu|%7llu|%7llu|%7llu|%7llu|%7llu",
1770 				  reason,
1771 				  stats->delivery_reason_count[0][reason],
1772 				  stats->delivery_reason_count[1][reason],
1773 				  stats->delivery_reason_count[2][reason],
1774 				  stats->delivery_reason_count[3][reason],
1775 				  stats->delivery_reason_count[4][reason],
1776 				  stats->delivery_reason_count[5][reason],
1777 				  delivery_count_per_reason[reason]);
1778 		mgmt_rx_reo_alert("\t%s", delivery_reason_stats_boarder_b);
1779 	}
1780 	mgmt_rx_reo_alert("\t%17s|%7llu|%7llu|%7llu|%7llu|%7llu|%7llu|%7llu\n\n",
1781 			  "", delivery_count_per_link[0],
1782 			  delivery_count_per_link[1],
1783 			  delivery_count_per_link[2],
1784 			  delivery_count_per_link[3],
1785 			  delivery_count_per_link[4],
1786 			  delivery_count_per_link[5],
1787 			  total_delivery_count);
1788 
1789 	mgmt_rx_reo_alert("\t3) Delivery context related stats");
1790 	mgmt_rx_reo_alert("\t\t0 - MGMT_RX_REO_CONTEXT_MGMT_RX");
1791 	mgmt_rx_reo_alert("\t\t1 - MGMT_RX_REO_CONTEXT_INGRESS_LIST_TIMEOUT");
1792 	mgmt_rx_reo_alert("\t\t2 - MGMT_RX_REO_CONTEXT_SCHEDULER_CB");
1793 	mgmt_rx_reo_alert("\t------------------------------------");
1794 	mgmt_rx_reo_alert("\t|link id/  |       |       |       |");
1795 	mgmt_rx_reo_alert("\t|context   |      0|      1|      2|");
1796 	mgmt_rx_reo_alert("\t-------------------------------------------");
1797 
1798 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
1799 		mgmt_rx_reo_alert("\t|%10u|%7llu|%7llu|%7llu|%7llu", link_id,
1800 				  stats->delivery_context_count[link_id][0],
1801 				  stats->delivery_context_count[link_id][1],
1802 				  stats->delivery_context_count[link_id][2],
1803 				  delivery_count_per_link[link_id]);
1804 		mgmt_rx_reo_alert("\t-------------------------------------------");
1805 	}
1806 	mgmt_rx_reo_alert("\t           |%7llu|%7llu|%7llu|%7llu\n\n",
1807 			  delivery_count_per_context[0],
1808 			  delivery_count_per_context[1],
1809 			  delivery_count_per_context[2],
1810 			  total_delivery_count);
1811 
1812 	mgmt_rx_reo_alert("\t4) Misc stats:");
1813 	mgmt_rx_reo_alert("\t\tEgress list overflow count = %llu\n\n",
1814 			  reo_ctx->egress_list.reo_list.overflow_count);
1815 
1816 	status = mgmt_rx_reo_debug_print_scheduler_stats(reo_ctx);
1817 	if (QDF_IS_STATUS_ERROR(status)) {
1818 		mgmt_rx_reo_err("Failed to print scheduler stats");
1819 		return status;
1820 	}
1821 
1822 	return QDF_STATUS_SUCCESS;
1823 }
1824 
1825 /**
1826  * mgmt_rx_reo_log_egress_frame_before_delivery() - Log the information about a
1827  * frame exiting the reorder module. Logging is done before attempting the frame
1828  * delivery to upper layers.
1829  * @reo_ctx: management rx reorder context
1830  * @entry: Pointer to reorder list entry
1831  *
1832  * Return: QDF_STATUS of operation
1833  */
1834 static QDF_STATUS
1835 mgmt_rx_reo_log_egress_frame_before_delivery(
1836 					struct mgmt_rx_reo_context *reo_ctx,
1837 					struct mgmt_rx_reo_list_entry *entry)
1838 {
1839 	struct reo_egress_debug_info *egress_frame_debug_info;
1840 	struct reo_egress_debug_frame_info *cur_frame_debug_info;
1841 	struct reo_egress_frame_stats *stats;
1842 	uint8_t link_id;
1843 
1844 	if (!reo_ctx || !entry)
1845 		return QDF_STATUS_E_NULL_VALUE;
1846 
1847 	egress_frame_debug_info = &reo_ctx->egress_frame_debug_info;
1848 
1849 	stats = &egress_frame_debug_info->stats;
1850 	link_id = mgmt_rx_reo_get_link_id(entry->rx_params);
1851 	stats->delivery_attempts_count[link_id]++;
1852 	if (entry->is_premature_delivery)
1853 		stats->premature_delivery_count[link_id]++;
1854 
1855 	if (!mgmt_rx_reo_egress_frame_debug_info_enabled
1856 						(egress_frame_debug_info))
1857 		return QDF_STATUS_SUCCESS;
1858 
1859 	cur_frame_debug_info = &egress_frame_debug_info->frame_list
1860 			[egress_frame_debug_info->next_index];
1861 
1862 	cur_frame_debug_info->link_id = link_id;
1863 	cur_frame_debug_info->mgmt_pkt_ctr =
1864 				mgmt_rx_reo_get_pkt_counter(entry->rx_params);
1865 	cur_frame_debug_info->global_timestamp =
1866 				mgmt_rx_reo_get_global_ts(entry->rx_params);
1867 	cur_frame_debug_info->initial_wait_count = entry->initial_wait_count;
1868 	cur_frame_debug_info->final_wait_count = entry->wait_count;
1869 	qdf_mem_copy(cur_frame_debug_info->shared_snapshots,
1870 		     entry->shared_snapshots,
1871 		     qdf_min(sizeof(cur_frame_debug_info->shared_snapshots),
1872 			     sizeof(entry->shared_snapshots)));
1873 	qdf_mem_copy(cur_frame_debug_info->host_snapshot, entry->host_snapshot,
1874 		     qdf_min(sizeof(cur_frame_debug_info->host_snapshot),
1875 			     sizeof(entry->host_snapshot)));
1876 	cur_frame_debug_info->ingress_timestamp = entry->ingress_timestamp;
1877 	cur_frame_debug_info->ingress_list_insertion_ts =
1878 					entry->ingress_list_insertion_ts;
1879 	cur_frame_debug_info->ingress_list_removal_ts =
1880 					entry->ingress_list_removal_ts;
1881 	cur_frame_debug_info->egress_list_insertion_ts =
1882 					entry->egress_list_insertion_ts;
1883 	cur_frame_debug_info->egress_list_removal_ts =
1884 					entry->egress_list_removal_ts;
1885 	cur_frame_debug_info->egress_timestamp = qdf_get_log_timestamp();
1886 	cur_frame_debug_info->egress_list_size = entry->egress_list_size;
1887 	cur_frame_debug_info->first_scheduled_ts = entry->first_scheduled_ts;
1888 	cur_frame_debug_info->last_scheduled_ts = entry->last_scheduled_ts;
1889 	cur_frame_debug_info->scheduled_count =
1890 				qdf_atomic_read(&entry->scheduled_count);
1891 	cur_frame_debug_info->ctx_info = entry->ctx_info;
1892 	cur_frame_debug_info->release_reason = entry->release_reason;
1893 	cur_frame_debug_info->is_premature_delivery =
1894 						entry->is_premature_delivery;
1895 	cur_frame_debug_info->cpu_id = qdf_get_smp_processor_id();
1896 
1897 	return QDF_STATUS_SUCCESS;
1898 }
1899 
1900 /**
1901  * mgmt_rx_reo_log_egress_frame_after_delivery() - Log the information about a
1902  * frame exiting the reorder module. Logging is done after attempting the frame
1903  * delivery to upper layer.
1904  * @reo_ctx: management rx reorder context
1905  * @entry: Pointer to reorder list entry
1906  * @link_id: multi-link link ID
1907  *
1908  * Return: QDF_STATUS of operation
1909  */
1910 static QDF_STATUS
1911 mgmt_rx_reo_log_egress_frame_after_delivery(
1912 					struct mgmt_rx_reo_context *reo_ctx,
1913 					struct mgmt_rx_reo_list_entry *entry,
1914 					uint8_t link_id)
1915 {
1916 	struct reo_egress_debug_info *egress_frame_debug_info;
1917 	struct reo_egress_debug_frame_info *cur_frame_debug_info;
1918 	struct reo_egress_frame_stats *stats;
1919 	uint8_t context;
1920 
1921 	if (!reo_ctx || !entry)
1922 		return QDF_STATUS_E_NULL_VALUE;
1923 
1924 	egress_frame_debug_info = &reo_ctx->egress_frame_debug_info;
1925 	context = entry->ctx_info.context;
1926 	if (context >= MGMT_RX_REO_CONTEXT_MAX)
1927 		return QDF_STATUS_E_INVAL;
1928 
1929 	stats = &egress_frame_debug_info->stats;
1930 	if (entry->is_delivered) {
1931 		uint8_t release_reason = entry->release_reason;
1932 
1933 		stats->delivery_reason_count[link_id][release_reason]++;
1934 		stats->delivery_context_count[link_id][context]++;
1935 		stats->delivery_success_count[link_id]++;
1936 	}
1937 
1938 	if (entry->is_dropped)
1939 		stats->drop_count[link_id]++;
1940 
1941 	if (!mgmt_rx_reo_egress_frame_debug_info_enabled
1942 						(egress_frame_debug_info))
1943 		return QDF_STATUS_SUCCESS;
1944 
1945 	cur_frame_debug_info = &egress_frame_debug_info->frame_list
1946 			[egress_frame_debug_info->next_index];
1947 
1948 	cur_frame_debug_info->is_delivered = entry->is_delivered;
1949 	cur_frame_debug_info->is_dropped = entry->is_dropped;
1950 	cur_frame_debug_info->egress_duration = qdf_get_log_timestamp() -
1951 					cur_frame_debug_info->egress_timestamp;
1952 
1953 	egress_frame_debug_info->next_index++;
1954 	egress_frame_debug_info->next_index %=
1955 				egress_frame_debug_info->frame_list_size;
1956 	if (egress_frame_debug_info->next_index == 0)
1957 		egress_frame_debug_info->wrap_aroud = true;
1958 
1959 	return QDF_STATUS_SUCCESS;
1960 }
1961 
1962 /**
1963  * mgmt_rx_reo_debug_print_egress_frame_info() - Print the debug information
1964  * about the latest frames leaving the reorder module
1965  * @reo_ctx: management rx reorder context
1966  * @num_frames: Number of frames for which the debug information is to be
1967  * printed. If @num_frames is 0, then debug information about all the frames
1968  * in the ring buffer will be  printed.
1969  *
1970  * Return: QDF_STATUS of operation
1971  */
1972 static QDF_STATUS
1973 mgmt_rx_reo_debug_print_egress_frame_info(struct mgmt_rx_reo_context *reo_ctx,
1974 					  uint16_t num_frames)
1975 {
1976 	struct reo_egress_debug_info *egress_frame_debug_info;
1977 	int start_index;
1978 	uint16_t index;
1979 	uint16_t entry;
1980 	uint16_t num_valid_entries;
1981 	uint16_t num_entries_to_print;
1982 	char *boarder;
1983 
1984 	if (!reo_ctx)
1985 		return QDF_STATUS_E_NULL_VALUE;
1986 
1987 	egress_frame_debug_info = &reo_ctx->egress_frame_debug_info;
1988 
1989 	if (egress_frame_debug_info->wrap_aroud)
1990 		num_valid_entries = egress_frame_debug_info->frame_list_size;
1991 	else
1992 		num_valid_entries = egress_frame_debug_info->next_index;
1993 
1994 	if (num_frames == 0) {
1995 		num_entries_to_print = num_valid_entries;
1996 
1997 		if (egress_frame_debug_info->wrap_aroud)
1998 			start_index = egress_frame_debug_info->next_index;
1999 		else
2000 			start_index = 0;
2001 	} else {
2002 		num_entries_to_print = qdf_min(num_frames, num_valid_entries);
2003 
2004 		start_index = (egress_frame_debug_info->next_index -
2005 			       num_entries_to_print +
2006 			       egress_frame_debug_info->frame_list_size)
2007 			      % egress_frame_debug_info->frame_list_size;
2008 
2009 		qdf_assert_always(start_index >= 0 &&
2010 				  start_index < egress_frame_debug_info->frame_list_size);
2011 	}
2012 
2013 	mgmt_rx_reo_alert_no_fl("Egress Frame Info:-");
2014 	mgmt_rx_reo_alert_no_fl("num_frames = %u, wrap = %u, next_index = %u",
2015 				num_frames,
2016 				egress_frame_debug_info->wrap_aroud,
2017 				egress_frame_debug_info->next_index);
2018 	mgmt_rx_reo_alert_no_fl("start_index = %d num_entries_to_print = %u",
2019 				start_index, num_entries_to_print);
2020 
2021 	if (!num_entries_to_print)
2022 		return QDF_STATUS_SUCCESS;
2023 
2024 	boarder = egress_frame_debug_info->boarder;
2025 
2026 	mgmt_rx_reo_alert_no_fl("%s", boarder);
2027 	mgmt_rx_reo_alert_no_fl("|%3s|%5s|%4s|%5s|%10s|%11s|%11s|%11s|%11s|%11s|%11s|%5s|%7s|%7s|%5s|%4s|%69s|%69s|%94s|%94s|%94s|%94s|%94s|%94s|",
2028 				"No.", "CPU", "Link", "SeqNo", "Global ts",
2029 				"Ingress ts", "Ing Insert",
2030 				"Ing Remove", "Eg Insert", "Eg Remove",
2031 				"Egress ts", "E Dur", "I W Dur", "E W Dur",
2032 				"Flags", "Rea.", "Final wait count",
2033 				"Initial wait count", "Snapshot : link 0",
2034 				"Snapshot : link 1", "Snapshot : link 2",
2035 				"Snapshot : link 3", "Snapshot : link 4",
2036 				"Snapshot : link 5");
2037 	mgmt_rx_reo_alert_no_fl("%s", boarder);
2038 
2039 	index = start_index;
2040 	for (entry = 0; entry < num_entries_to_print; entry++) {
2041 		struct reo_egress_debug_frame_info *info;
2042 		char flags[MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_FLAG_MAX_SIZE + 1] = {0};
2043 		char final_wait_count[MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_WAIT_COUNT_MAX_SIZE + 1] = {0};
2044 		char initial_wait_count[MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_WAIT_COUNT_MAX_SIZE + 1] = {0};
2045 		char snapshots[MAX_MLO_LINKS][MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_PER_LINK_SNAPSHOTS_MAX_SIZE + 1] = {0};
2046 		char flag_premature_delivery = ' ';
2047 		char flag_error = ' ';
2048 		uint8_t link;
2049 
2050 		info = &reo_ctx->egress_frame_debug_info.frame_list[index];
2051 
2052 		if (!info->is_delivered)
2053 			flag_error = 'E';
2054 
2055 		if (info->is_premature_delivery)
2056 			flag_premature_delivery = 'P';
2057 
2058 		snprintf(flags, sizeof(flags), "%c %c", flag_error,
2059 			 flag_premature_delivery);
2060 		snprintf(initial_wait_count, sizeof(initial_wait_count),
2061 			 "%9llx(%8x, %8x, %8x, %8x, %8x, %8x)",
2062 			 info->initial_wait_count.total_count,
2063 			 info->initial_wait_count.per_link_count[0],
2064 			 info->initial_wait_count.per_link_count[1],
2065 			 info->initial_wait_count.per_link_count[2],
2066 			 info->initial_wait_count.per_link_count[3],
2067 			 info->initial_wait_count.per_link_count[4],
2068 			 info->initial_wait_count.per_link_count[5]);
2069 		snprintf(final_wait_count, sizeof(final_wait_count),
2070 			 "%9llx(%8x, %8x, %8x, %8x, %8x, %8x)",
2071 			 info->final_wait_count.total_count,
2072 			 info->final_wait_count.per_link_count[0],
2073 			 info->final_wait_count.per_link_count[1],
2074 			 info->final_wait_count.per_link_count[2],
2075 			 info->final_wait_count.per_link_count[3],
2076 			 info->final_wait_count.per_link_count[4],
2077 			 info->final_wait_count.per_link_count[5]);
2078 
2079 		for (link = 0; link < MAX_MLO_LINKS; link++) {
2080 			char mac_hw[MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_SNAPSHOT_MAX_SIZE + 1] = {'\0'};
2081 			char fw_consumed[MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_SNAPSHOT_MAX_SIZE + 1] = {'\0'};
2082 			char fw_forwarded[MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_SNAPSHOT_MAX_SIZE + 1] = {'\0'};
2083 			char host[MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_SNAPSHOT_MAX_SIZE + 1] = {'\0'};
2084 			struct mgmt_rx_reo_snapshot_params *mac_hw_ss;
2085 			struct mgmt_rx_reo_snapshot_params *fw_consumed_ss;
2086 			struct mgmt_rx_reo_snapshot_params *fw_forwarded_ss;
2087 			struct mgmt_rx_reo_snapshot_params *host_ss;
2088 
2089 			mac_hw_ss = &info->shared_snapshots
2090 				[link][MGMT_RX_REO_SHARED_SNAPSHOT_MAC_HW];
2091 			fw_consumed_ss = &info->shared_snapshots
2092 				[link][MGMT_RX_REO_SHARED_SNAPSHOT_FW_CONSUMED];
2093 			fw_forwarded_ss = &info->shared_snapshots
2094 				[link][MGMT_RX_REO_SHARED_SNAPSHOT_FW_FORWARDED];
2095 			host_ss = &info->host_snapshot[link];
2096 
2097 			snprintf(mac_hw, sizeof(mac_hw), "(%1u, %5u, %10u)",
2098 				 mac_hw_ss->valid, mac_hw_ss->mgmt_pkt_ctr,
2099 				 mac_hw_ss->global_timestamp);
2100 			snprintf(fw_consumed, sizeof(fw_consumed),
2101 				 "(%1u, %5u, %10u)",
2102 				 fw_consumed_ss->valid,
2103 				 fw_consumed_ss->mgmt_pkt_ctr,
2104 				 fw_consumed_ss->global_timestamp);
2105 			snprintf(fw_forwarded, sizeof(fw_forwarded),
2106 				 "(%1u, %5u, %10u)",
2107 				 fw_forwarded_ss->valid,
2108 				 fw_forwarded_ss->mgmt_pkt_ctr,
2109 				 fw_forwarded_ss->global_timestamp);
2110 			snprintf(host, sizeof(host), "(%1u, %5u, %10u)",
2111 				 host_ss->valid,
2112 				 host_ss->mgmt_pkt_ctr,
2113 				 host_ss->global_timestamp);
2114 			snprintf(snapshots[link], sizeof(snapshots[link]),
2115 				 "%22s, %22s, %22s, %22s", mac_hw, fw_consumed,
2116 				 fw_forwarded, host);
2117 		}
2118 
2119 		mgmt_rx_reo_alert_no_fl("|%3u|%5d|%4u|%5u|%10u|%11llu|%11llu|%11llu|%11llu|%11llu|%11llu|%5llu|%7llu|%7llu|%5s|%4x|%69s|%69s|%94s|%94s|%94s|%94s|%94s|%94s|",
2120 					entry, info->cpu_id, info->link_id,
2121 					info->mgmt_pkt_ctr,
2122 					info->global_timestamp,
2123 					info->ingress_timestamp,
2124 					info->ingress_list_insertion_ts,
2125 					info->ingress_list_removal_ts,
2126 					info->egress_list_insertion_ts,
2127 					info->egress_list_removal_ts,
2128 					info->egress_timestamp,
2129 					info->egress_duration,
2130 					info->ingress_list_removal_ts -
2131 					info->ingress_list_insertion_ts,
2132 					info->egress_list_removal_ts -
2133 					info->egress_list_insertion_ts,
2134 					flags, info->release_reason,
2135 					final_wait_count, initial_wait_count,
2136 					snapshots[0], snapshots[1],
2137 					snapshots[2], snapshots[3],
2138 					snapshots[4], snapshots[5]);
2139 		mgmt_rx_reo_alert_no_fl("%s", boarder);
2140 
2141 		index++;
2142 		index %= egress_frame_debug_info->frame_list_size;
2143 	}
2144 
2145 	return QDF_STATUS_SUCCESS;
2146 }
2147 #else
2148 /**
2149  * mgmt_rx_reo_debug_print_egress_frame_stats() - API to print the stats
2150  * related to frames going out of the reorder module
2151  * @reo_ctx: Pointer to reorder context
2152  *
2153  * API to print the stats related to frames going out of the management
2154  * Rx reorder module.
2155  *
2156  * Return: QDF_STATUS
2157  */
2158 static QDF_STATUS
2159 mgmt_rx_reo_debug_print_egress_frame_stats(struct mgmt_rx_reo_context *reo_ctx)
2160 {
2161 	return QDF_STATUS_SUCCESS;
2162 }
2163 
2164 /**
2165  * mgmt_rx_reo_log_egress_frame_before_delivery() - Log the information about a
2166  * frame exiting the reorder module. Logging is done before attempting the frame
2167  * delivery to upper layers.
2168  * @reo_ctx: management rx reorder context
2169  * @entry: Pointer to reorder list entry
2170  *
2171  * Return: QDF_STATUS of operation
2172  */
2173 static QDF_STATUS
2174 mgmt_rx_reo_log_egress_frame_before_delivery(
2175 					struct mgmt_rx_reo_context *reo_ctx,
2176 					struct mgmt_rx_reo_list_entry *entry)
2177 {
2178 	return QDF_STATUS_SUCCESS;
2179 }
2180 
2181 /**
2182  * mgmt_rx_reo_log_egress_frame_after_delivery() - Log the information about a
2183  * frame exiting the reorder module. Logging is done after attempting the frame
2184  * delivery to upper layer.
2185  * @reo_ctx: management rx reorder context
2186  * @is_delivered: Flag to indicate whether the frame is delivered to upper
2187  * layers
2188  *
2189  * Return: QDF_STATUS of operation
2190  */
2191 static QDF_STATUS
2192 mgmt_rx_reo_log_egress_frame_after_delivery(
2193 					struct mgmt_rx_reo_context *reo_ctx,
2194 					bool is_delivered)
2195 {
2196 	return QDF_STATUS_SUCCESS;
2197 }
2198 
2199 /**
2200  * mgmt_rx_reo_debug_print_egress_frame_info() - Print debug information about
2201  * the latest frames leaving the reorder module
2202  * @reo_ctx: management rx reorder context
2203  *
2204  * Return: QDF_STATUS of operation
2205  */
2206 static QDF_STATUS
2207 mgmt_rx_reo_debug_print_egress_frame_info(struct mgmt_rx_reo_context *reo_ctx)
2208 {
2209 	return QDF_STATUS_SUCCESS;
2210 }
2211 #endif /* WLAN_MGMT_RX_REO_DEBUG_SUPPORT */
2212 
2213 /**
2214  * mgmt_rx_reo_list_entry_get_release_reason() - Helper API to get the reason
2215  * for releasing the reorder list entry to upper layer.
2216  * reorder list.
2217  * @entry: List entry
2218  *
2219  * This API expects the caller to acquire the spin lock protecting the reorder
2220  * list.
2221  *
2222  * Return: Reason for releasing the frame.
2223  */
2224 static uint8_t
2225 mgmt_rx_reo_list_entry_get_release_reason(struct mgmt_rx_reo_list_entry *entry)
2226 {
2227 	uint8_t reason = 0;
2228 
2229 	if (!entry)
2230 		return 0;
2231 
2232 	if (!LIST_ENTRY_IS_WAITING_FOR_FRAME_ON_OTHER_LINK(entry))
2233 		reason |= RELEASE_REASON_ZERO_WAIT_COUNT;
2234 
2235 	if (LIST_ENTRY_IS_AGED_OUT(entry))
2236 		reason |= RELEASE_REASON_AGED_OUT;
2237 
2238 	if (LIST_ENTRY_IS_OLDER_THAN_LATEST_AGED_OUT_FRAME(entry))
2239 		reason |= RELEASE_REASON_OLDER_THAN_AGED_OUT_FRAME;
2240 
2241 	if (LIST_ENTRY_IS_REMOVED_DUE_TO_INGRESS_LIST_OVERFLOW(entry))
2242 		reason |= RELEASE_REASON_INGRESS_LIST_OVERFLOW;
2243 
2244 	if (LIST_ENTRY_IS_OLDER_THAN_READY_TO_DELIVER_FRAMES(entry))
2245 		reason |= RELEASE_REASON_OLDER_THAN_READY_TO_DELIVER_FRAMES;
2246 
2247 	if (LIST_ENTRY_IS_REMOVED_DUE_TO_EGRESS_LIST_OVERFLOW(entry))
2248 		reason |= RELEASE_REASON_EGRESS_LIST_OVERFLOW;
2249 
2250 	return reason;
2251 }
2252 
2253 /**
2254  * mgmt_rx_reo_list_entry_send_up() - API to send the frame to the upper layer.
2255  * @reo_context: Pointer to reorder context
2256  * @entry: List entry
2257  * @deliver: Indicates whether this entry has to be delivered to upper layers
2258  * or dropped in the reo layer itself.
2259  *
2260  * API to send the frame to the upper layer. This API has to be called only
2261  * for entries which can be released to upper layer. It is the caller's
2262  * responsibility to ensure that entry can be released (by using API
2263  * mgmt_rx_reo_is_entry_ready_to_send_up). This API is called after
2264  * acquiring the lock which serializes the frame delivery to the upper layers.
2265  *
2266  * Return: QDF_STATUS
2267  */
2268 static QDF_STATUS
2269 mgmt_rx_reo_list_entry_send_up(struct mgmt_rx_reo_context *reo_context,
2270 			       struct mgmt_rx_reo_list_entry *entry,
2271 			       bool deliver)
2272 {
2273 	uint8_t release_reason;
2274 	uint8_t link_id;
2275 	uint32_t entry_global_ts;
2276 	QDF_STATUS status;
2277 	QDF_STATUS temp;
2278 
2279 	qdf_assert_always(reo_context);
2280 	qdf_assert_always(entry);
2281 
2282 	link_id = mgmt_rx_reo_get_link_id(entry->rx_params);
2283 	entry_global_ts = mgmt_rx_reo_get_global_ts(entry->rx_params);
2284 
2285 	release_reason = mgmt_rx_reo_list_entry_get_release_reason(entry);
2286 
2287 	qdf_assert_always(release_reason != 0);
2288 
2289 	entry->is_delivered = false;
2290 	entry->is_dropped = false;
2291 	entry->is_premature_delivery = false;
2292 	entry->release_reason = release_reason;
2293 
2294 	if (mgmt_rx_reo_is_potential_premature_delivery(release_reason)) {
2295 		entry->is_premature_delivery = true;
2296 		status = mgmt_rx_reo_handle_potential_premature_delivery(
2297 						reo_context, entry_global_ts);
2298 		if (QDF_IS_STATUS_ERROR(status))
2299 			goto exit;
2300 	}
2301 
2302 	status = mgmt_rx_reo_log_egress_frame_before_delivery(reo_context,
2303 							      entry);
2304 	if (QDF_IS_STATUS_ERROR(status))
2305 		goto exit;
2306 
2307 	if (deliver) {
2308 		status = wlan_mgmt_txrx_process_rx_frame(entry->pdev,
2309 							 entry->nbuf,
2310 							 entry->rx_params);
2311 		/* Above call frees nbuf and rx_params, make them null */
2312 		entry->nbuf = NULL;
2313 		entry->rx_params = NULL;
2314 
2315 		if (QDF_IS_STATUS_ERROR(status))
2316 			goto exit_log;
2317 
2318 		entry->is_delivered = true;
2319 	} else {
2320 		free_mgmt_rx_event_params(entry->rx_params);
2321 		qdf_nbuf_free(entry->nbuf);
2322 		entry->is_dropped = true;
2323 	}
2324 
2325 	status = QDF_STATUS_SUCCESS;
2326 
2327 exit_log:
2328 	temp = mgmt_rx_reo_log_egress_frame_after_delivery(reo_context, entry,
2329 							   link_id);
2330 	if (QDF_IS_STATUS_ERROR(temp))
2331 		status = temp;
2332 exit:
2333 	/**
2334 	 * Release the reference taken when the entry is inserted into
2335 	 * the reorder list
2336 	 */
2337 	wlan_objmgr_pdev_release_ref(entry->pdev, WLAN_MGMT_RX_REO_ID);
2338 
2339 	return status;
2340 }
2341 
2342 /**
2343  * mgmt_rx_reo_is_entry_ready_to_send_up() - API to check whether the
2344  * list entry can be send to upper layers.
2345  * @entry: List entry
2346  *
2347  * Return: QDF_STATUS
2348  */
2349 static bool
2350 mgmt_rx_reo_is_entry_ready_to_send_up(struct mgmt_rx_reo_list_entry *entry)
2351 {
2352 	qdf_assert_always(entry);
2353 
2354 	return LIST_ENTRY_IS_REMOVED_DUE_TO_INGRESS_LIST_OVERFLOW(entry) ||
2355 	       LIST_ENTRY_IS_REMOVED_DUE_TO_EGRESS_LIST_OVERFLOW(entry) ||
2356 	       !LIST_ENTRY_IS_WAITING_FOR_FRAME_ON_OTHER_LINK(entry) ||
2357 	       LIST_ENTRY_IS_AGED_OUT(entry) ||
2358 	       LIST_ENTRY_IS_OLDER_THAN_LATEST_AGED_OUT_FRAME(entry) ||
2359 	       LIST_ENTRY_IS_OLDER_THAN_READY_TO_DELIVER_FRAMES(entry);
2360 }
2361 
2362 #ifdef WLAN_MGMT_RX_REO_DEBUG_SUPPORT
2363 /**
2364  * mgmt_rx_reo_scheduler_debug_info_enabled() - API to check whether scheduler
2365  * debug feaure is enabled
2366  * @scheduler_debug_info: Pointer to scheduler debug info object
2367  *
2368  * Return: true or false
2369  */
2370 static bool
2371 mgmt_rx_reo_scheduler_debug_info_enabled
2372 			(struct reo_scheduler_debug_info *scheduler_debug_info)
2373 {
2374 	return scheduler_debug_info->frame_list_size;
2375 }
2376 
2377 /**
2378  * mgmt_rx_reo_log_scheduler_debug_info() - Log the information about a
2379  * frame getting scheduled by mgmt rx reo scheduler
2380  * @reo_ctx: management rx reorder context
2381  * @entry: Pointer to reorder list entry
2382  * @reschedule: Indicates rescheduling
2383  *
2384  * Return: QDF_STATUS of operation
2385  */
2386 static QDF_STATUS
2387 mgmt_rx_reo_log_scheduler_debug_info(struct mgmt_rx_reo_context *reo_ctx,
2388 				     struct mgmt_rx_reo_list_entry *entry,
2389 				     bool reschedule)
2390 {
2391 	struct reo_scheduler_debug_info *scheduler_debug_info;
2392 	struct reo_scheduler_debug_frame_info *cur_frame_debug_info;
2393 	struct reo_scheduler_stats *stats;
2394 	uint8_t link_id;
2395 
2396 	if (!reo_ctx || !entry)
2397 		return QDF_STATUS_E_NULL_VALUE;
2398 
2399 	scheduler_debug_info = &reo_ctx->scheduler_debug_info;
2400 
2401 	stats = &scheduler_debug_info->stats;
2402 	link_id = mgmt_rx_reo_get_link_id(entry->rx_params);
2403 	stats->scheduled_count[link_id][entry->ctx_info.context]++;
2404 	if (reschedule)
2405 		stats->rescheduled_count[link_id][entry->ctx_info.context]++;
2406 
2407 	if (!mgmt_rx_reo_scheduler_debug_info_enabled(scheduler_debug_info))
2408 		return QDF_STATUS_SUCCESS;
2409 
2410 	cur_frame_debug_info = &scheduler_debug_info->frame_list
2411 			[scheduler_debug_info->next_index];
2412 
2413 	cur_frame_debug_info->link_id = link_id;
2414 	cur_frame_debug_info->mgmt_pkt_ctr =
2415 				mgmt_rx_reo_get_pkt_counter(entry->rx_params);
2416 	cur_frame_debug_info->global_timestamp =
2417 				mgmt_rx_reo_get_global_ts(entry->rx_params);
2418 	cur_frame_debug_info->initial_wait_count = entry->initial_wait_count;
2419 	cur_frame_debug_info->final_wait_count = entry->wait_count;
2420 	qdf_mem_copy(cur_frame_debug_info->shared_snapshots,
2421 		     entry->shared_snapshots,
2422 		     qdf_min(sizeof(cur_frame_debug_info->shared_snapshots),
2423 			     sizeof(entry->shared_snapshots)));
2424 	qdf_mem_copy(cur_frame_debug_info->host_snapshot, entry->host_snapshot,
2425 		     qdf_min(sizeof(cur_frame_debug_info->host_snapshot),
2426 			     sizeof(entry->host_snapshot)));
2427 	cur_frame_debug_info->ingress_timestamp = entry->ingress_timestamp;
2428 	cur_frame_debug_info->ingress_list_insertion_ts =
2429 					entry->ingress_list_insertion_ts;
2430 	cur_frame_debug_info->ingress_list_removal_ts =
2431 					entry->ingress_list_removal_ts;
2432 	cur_frame_debug_info->egress_list_insertion_ts =
2433 					entry->egress_list_insertion_ts;
2434 	cur_frame_debug_info->scheduled_ts = qdf_get_log_timestamp();
2435 	cur_frame_debug_info->first_scheduled_ts = entry->first_scheduled_ts;
2436 	cur_frame_debug_info->last_scheduled_ts = entry->last_scheduled_ts;
2437 	cur_frame_debug_info->scheduled_count =
2438 				qdf_atomic_read(&entry->scheduled_count);
2439 	cur_frame_debug_info->cpu_id = qdf_get_smp_processor_id();
2440 	cur_frame_debug_info->ctx_info = entry->ctx_info;
2441 
2442 	scheduler_debug_info->next_index++;
2443 	scheduler_debug_info->next_index %=
2444 				scheduler_debug_info->frame_list_size;
2445 	if (scheduler_debug_info->next_index == 0)
2446 		scheduler_debug_info->wrap_aroud = true;
2447 
2448 	return QDF_STATUS_SUCCESS;
2449 }
2450 #else
2451 /**
2452  * mgmt_rx_reo_log_scheduler_debug_info() - Log the information about a
2453  * frame getting scheduled by mgmt rx reo scheduler
2454  * @reo_ctx: management rx reorder context
2455  * @entry: Pointer to reorder list entry
2456  * @context: Current execution context
2457  * @reschedule: Indicates rescheduling
2458  *
2459  * Return: QDF_STATUS of operation
2460  */
2461 static inline QDF_STATUS
2462 mgmt_rx_reo_log_scheduler_debug_info(struct mgmt_rx_reo_context *reo_ctx,
2463 				     struct mgmt_rx_reo_list_entry *entry,
2464 				     enum mgmt_rx_reo_execution_context context,
2465 				     bool reschedule)
2466 {
2467 	return QDF_STATUS_SUCCESS;
2468 }
2469 #endif /* WLAN_MGMT_RX_REO_DEBUG_SUPPORT */
2470 
2471 /**
2472  * mgmt_rx_reo_defer_delivery() - Helper API to check whether a management
2473  * frame can be delivered in the current context or it has to be scheduled
2474  * for delivery in a different context
2475  * @entry: List entry
2476  * @link_bitmap: Link bitmap
2477  *
2478  * Return: true if frame can't be delivered in the current context and its
2479  * delivery has to be done in a different context
2480  */
2481 bool
2482 mgmt_rx_reo_defer_delivery(struct mgmt_rx_reo_list_entry *entry,
2483 			   uint32_t link_bitmap)
2484 {
2485 	uint8_t link_id;
2486 	uint8_t mlo_grp_id;
2487 	struct wlan_objmgr_pdev *pdev;
2488 
2489 	qdf_assert_always(entry);
2490 
2491 	link_id = mgmt_rx_reo_get_link_id(entry->rx_params);
2492 	mlo_grp_id = entry->rx_params->reo_params->mlo_grp_id;
2493 
2494 	pdev = wlan_get_pdev_from_mlo_link_id(link_id, mlo_grp_id,
2495 					      WLAN_MGMT_RX_REO_ID);
2496 	if (!pdev) {
2497 		mgmt_rx_reo_err("pdev for link %u, group %u is null",
2498 				link_id, mlo_grp_id);
2499 		return false;
2500 	}
2501 
2502 	if (!wlan_mgmt_rx_reo_is_scheduler_enabled_at_pdev(pdev)) {
2503 		wlan_objmgr_pdev_release_ref(pdev, WLAN_MGMT_RX_REO_ID);
2504 		return false;
2505 	}
2506 
2507 	wlan_objmgr_pdev_release_ref(pdev, WLAN_MGMT_RX_REO_ID);
2508 
2509 	return !(link_bitmap & (1 << link_id));
2510 }
2511 
2512 /**
2513  * mgmt_rx_reo_schedule_delivery() - Helper API to schedule the delivery of
2514  * a management frames.
2515  * @reo_context: Pointer to reorder context
2516  * @entry: List entry corresponding to the frame which has to be scheduled
2517  * for delivery
2518  *
2519  * Return: QDF_STATUS
2520  */
2521 QDF_STATUS
2522 mgmt_rx_reo_schedule_delivery(struct mgmt_rx_reo_context *reo_context,
2523 			      struct mgmt_rx_reo_list_entry *entry)
2524 {
2525 	int scheduled_count;
2526 	int8_t link_id;
2527 	uint8_t mlo_grp_id;
2528 	struct wlan_objmgr_pdev *pdev;
2529 	QDF_STATUS status;
2530 	bool reschedule;
2531 
2532 	if (!reo_context) {
2533 		mgmt_rx_reo_err("Reo context is null");
2534 		return QDF_STATUS_E_NULL_VALUE;
2535 	}
2536 
2537 	if (!entry) {
2538 		mgmt_rx_reo_err("List entry is null");
2539 		return QDF_STATUS_E_NULL_VALUE;
2540 	}
2541 
2542 	scheduled_count = qdf_atomic_inc_return(&entry->scheduled_count);
2543 	qdf_assert_always(scheduled_count > 0);
2544 
2545 	reschedule = (scheduled_count > 1);
2546 	status = mgmt_rx_reo_log_scheduler_debug_info(reo_context, entry,
2547 						      reschedule);
2548 	if (QDF_IS_STATUS_ERROR(status)) {
2549 		mgmt_rx_reo_err("Failed to log scheduler debug info");
2550 		return status;
2551 	}
2552 
2553 	if (reschedule) {
2554 		entry->last_scheduled_ts = qdf_get_log_timestamp();
2555 		return QDF_STATUS_SUCCESS;
2556 	}
2557 
2558 	link_id = mgmt_rx_reo_get_link_id(entry->rx_params);
2559 	qdf_assert_always(link_id >= 0 && link_id < MAX_MLO_LINKS);
2560 	mlo_grp_id = entry->rx_params->reo_params->mlo_grp_id;
2561 	pdev = wlan_get_pdev_from_mlo_link_id(link_id, mlo_grp_id,
2562 					      WLAN_MGMT_RX_REO_ID);
2563 	if (!pdev) {
2564 		mgmt_rx_reo_err("pdev for link %u, group %u is null",
2565 				link_id, mlo_grp_id);
2566 		return QDF_STATUS_E_NULL_VALUE;
2567 	}
2568 
2569 	entry->first_scheduled_ts = qdf_get_log_timestamp();
2570 	status = tgt_mgmt_rx_reo_schedule_delivery(wlan_pdev_get_psoc(pdev));
2571 	if (QDF_IS_STATUS_ERROR(status)) {
2572 		mgmt_rx_reo_err("Failed to schedule for link %u, group %u",
2573 				link_id, mlo_grp_id);
2574 		wlan_objmgr_pdev_release_ref(pdev, WLAN_MGMT_RX_REO_ID);
2575 		return status;
2576 	}
2577 	wlan_objmgr_pdev_release_ref(pdev, WLAN_MGMT_RX_REO_ID);
2578 
2579 	return QDF_STATUS_SUCCESS;
2580 }
2581 
2582 /**
2583  * mgmt_rx_reo_release_egress_list_entries() - Release entries from the
2584  * egress list
2585  * @reo_context: Pointer to management Rx reorder context
2586  * @link_bitmap: Bitmap of links for which frames can be released in the current
2587  * context
2588  * @ctx: Current execution context info
2589  *
2590  * This API releases the entries from the egress list based on the following
2591  * conditions.
2592  *   a) Entries with total wait count equal to 0
2593  *   b) Entries which are timed out or entries with global time stamp <= global
2594  *      time stamp of the latest frame which is timed out. We can only release
2595  *      the entries in the increasing order of the global time stamp.
2596  *      So all the entries with global time stamp <= global time stamp of the
2597  *      latest timed out frame has to be released.
2598  *
2599  * Return: QDF_STATUS
2600  */
2601 static QDF_STATUS
2602 mgmt_rx_reo_release_egress_list_entries(struct mgmt_rx_reo_context *reo_context,
2603 					uint32_t link_bitmap,
2604 					struct mgmt_rx_reo_context_info *ctx)
2605 {
2606 	QDF_STATUS status;
2607 	struct mgmt_rx_reo_egress_list *egress_list;
2608 	struct mgmt_rx_reo_list *reo_egress_list;
2609 	qdf_timer_t *egress_inactivity_timer;
2610 
2611 	if (!reo_context) {
2612 		mgmt_rx_reo_err("reo context is null");
2613 		return QDF_STATUS_E_NULL_VALUE;
2614 	}
2615 
2616 	egress_list = &reo_context->egress_list;
2617 	reo_egress_list = &egress_list->reo_list;
2618 	egress_inactivity_timer = &egress_list->egress_inactivity_timer;
2619 
2620 	qdf_spin_lock(&reo_context->frame_release_lock);
2621 
2622 	while (1) {
2623 		struct mgmt_rx_reo_list_entry *first_entry;
2624 		/* TODO yield if release_count > THRESHOLD */
2625 		uint16_t release_count = 0;
2626 		uint32_t first_entry_ts;
2627 		struct mgmt_rx_event_params *rx_params;
2628 		struct mgmt_rx_reo_frame_info *last_released_frame =
2629 					&reo_egress_list->last_released_frame;
2630 		uint32_t last_released_frame_ts;
2631 		bool ready;
2632 		bool defer;
2633 		bool overflow;
2634 
2635 		qdf_spin_lock_bh(&reo_egress_list->list_lock);
2636 
2637 		first_entry = qdf_list_first_entry_or_null(
2638 					&reo_egress_list->list,
2639 					struct mgmt_rx_reo_list_entry, node);
2640 		if (!first_entry) {
2641 			status = QDF_STATUS_SUCCESS;
2642 			goto exit_unlock_egress_list_lock;
2643 		}
2644 
2645 		ready = mgmt_rx_reo_is_entry_ready_to_send_up(first_entry);
2646 		qdf_assert_always(ready);
2647 
2648 		first_entry->ctx_info = *ctx;
2649 		defer = mgmt_rx_reo_defer_delivery(first_entry, link_bitmap);
2650 		overflow =
2651 		 LIST_ENTRY_IS_REMOVED_DUE_TO_EGRESS_LIST_OVERFLOW(first_entry);
2652 		if (defer && !overflow) {
2653 			status = mgmt_rx_reo_schedule_delivery(reo_context,
2654 							       first_entry);
2655 			if (QDF_IS_STATUS_ERROR(status))
2656 				mgmt_rx_reo_err("Failed to schedule delivery");
2657 			goto exit_unlock_egress_list_lock;
2658 		}
2659 
2660 		first_entry->egress_list_size =
2661 					qdf_list_size(&reo_egress_list->list);
2662 		status = qdf_list_remove_node(&reo_egress_list->list,
2663 					      &first_entry->node);
2664 		if (QDF_IS_STATUS_ERROR(status)) {
2665 			status = QDF_STATUS_E_FAILURE;
2666 			goto exit_unlock_egress_list_lock;
2667 		}
2668 		first_entry->egress_list_removal_ts = qdf_get_log_timestamp();
2669 
2670 		/**
2671 		 * Last released frame global time stamp is invalid means that
2672 		 * current frame is the first frame to be released to the
2673 		 * upper layer from the egress list. Blindly update the last
2674 		 * released frame global time stamp to the current frame's
2675 		 * global time stamp and set the valid to true.
2676 		 * If the last released frame global time stamp is valid and
2677 		 * current frame's global time stamp is >= last released frame
2678 		 * global time stamp, deliver the current frame to upper layer
2679 		 * and update the last released frame global time stamp.
2680 		 */
2681 		rx_params = first_entry->rx_params;
2682 		first_entry_ts = mgmt_rx_reo_get_global_ts(rx_params);
2683 		last_released_frame_ts =
2684 			last_released_frame->reo_params.global_timestamp;
2685 
2686 		if (!last_released_frame->valid ||
2687 		    mgmt_rx_reo_compare_global_timestamps_gte(
2688 			first_entry_ts, last_released_frame_ts)) {
2689 			qdf_timer_sync_cancel(egress_inactivity_timer);
2690 
2691 			last_released_frame->reo_params =
2692 						*rx_params->reo_params;
2693 			last_released_frame->valid = true;
2694 
2695 			qdf_timer_mod(egress_inactivity_timer,
2696 				      MGMT_RX_REO_EGRESS_INACTIVITY_TIMEOUT);
2697 		} else {
2698 			/**
2699 			 * This should never happen. All the frames older than
2700 			 * the last frame released from the reorder list will be
2701 			 * discarded at the entry to reorder algorithm itself.
2702 			 */
2703 			qdf_assert_always(first_entry->is_parallel_rx);
2704 		}
2705 
2706 		qdf_spin_unlock_bh(&reo_egress_list->list_lock);
2707 
2708 		status = mgmt_rx_reo_list_entry_send_up(reo_context,
2709 							first_entry,
2710 							!defer || !overflow);
2711 		if (QDF_IS_STATUS_ERROR(status)) {
2712 			status = QDF_STATUS_E_FAILURE;
2713 			qdf_mem_free(first_entry);
2714 			goto exit_unlock_frame_release_lock;
2715 		}
2716 
2717 		qdf_mem_free(first_entry);
2718 		release_count++;
2719 	}
2720 
2721 	status = QDF_STATUS_SUCCESS;
2722 	goto exit_unlock_frame_release_lock;
2723 
2724 exit_unlock_egress_list_lock:
2725 	qdf_assert_always(qdf_list_size(&reo_egress_list->list) <=
2726 					reo_egress_list->max_list_size);
2727 	qdf_spin_unlock_bh(&reo_egress_list->list_lock);
2728 exit_unlock_frame_release_lock:
2729 	qdf_spin_unlock(&reo_context->frame_release_lock);
2730 
2731 	return status;
2732 }
2733 
2734 QDF_STATUS
2735 mgmt_rx_reo_release_frames(uint8_t mlo_grp_id, uint32_t link_bitmap)
2736 {
2737 	struct mgmt_rx_reo_context *reo_context;
2738 	QDF_STATUS ret;
2739 	struct mgmt_rx_reo_context_info ctx_info = {0};
2740 	uint8_t link;
2741 
2742 	reo_context = mgmt_rx_reo_get_context(mlo_grp_id);
2743 	if (!reo_context) {
2744 		mgmt_rx_reo_err("Mgmt rx reo context is null");
2745 		return QDF_STATUS_E_NULL_VALUE;
2746 	}
2747 
2748 	for (link = 0; link < MAX_MLO_LINKS; link++)
2749 		if (link_bitmap & (1 << link)) {
2750 			struct reo_scheduler_stats *stats;
2751 
2752 			stats = &reo_context->scheduler_debug_info.stats;
2753 			stats->scheduler_cb_count[link]++;
2754 		}
2755 
2756 	ctx_info.context = MGMT_RX_REO_CONTEXT_SCHEDULER_CB;
2757 	ctx_info.context_id = qdf_atomic_inc_return(&reo_context->context_id);
2758 	ret = mgmt_rx_reo_release_egress_list_entries(reo_context, link_bitmap,
2759 						      &ctx_info);
2760 	if (QDF_IS_STATUS_ERROR(ret)) {
2761 		mgmt_rx_reo_err("Failure to release frames grp = %u bm = 0x%x",
2762 				mlo_grp_id, link_bitmap);
2763 		return ret;
2764 	}
2765 
2766 	return QDF_STATUS_SUCCESS;
2767 }
2768 
2769 /**
2770  * mgmt_rx_reo_check_sanity_list() - Check the sanity of reorder list
2771  * @reo_list: Pointer to reorder list
2772  *
2773  * Check the sanity of ingress reorder list or egress reorder list.
2774  * Ingress/Egress reorder list entries should be in the non decreasing order
2775  * of global time stamp.
2776  *
2777  * Return: QDF_STATUS
2778  */
2779 static QDF_STATUS
2780 mgmt_rx_reo_check_sanity_list(struct mgmt_rx_reo_list *reo_list)
2781 {
2782 	struct mgmt_rx_reo_list_entry *first;
2783 	struct mgmt_rx_reo_list_entry *cur;
2784 	uint32_t ts_prev;
2785 	uint32_t ts_cur;
2786 
2787 	qdf_assert_always(reo_list);
2788 
2789 	if (qdf_list_empty(&reo_list->list))
2790 		return QDF_STATUS_SUCCESS;
2791 
2792 	first = qdf_list_first_entry_or_null(&reo_list->list,
2793 					     struct mgmt_rx_reo_list_entry,
2794 					     node);
2795 	qdf_assert_always(first);
2796 
2797 	cur = first;
2798 	ts_prev = mgmt_rx_reo_get_global_ts(first->rx_params);
2799 
2800 	qdf_list_for_each_continue(&reo_list->list, cur, node) {
2801 		ts_cur = mgmt_rx_reo_get_global_ts(cur->rx_params);
2802 
2803 		if (!mgmt_rx_reo_compare_global_timestamps_gte(ts_cur,
2804 							       ts_prev))
2805 			return QDF_STATUS_E_INVAL;
2806 
2807 		ts_prev = ts_cur;
2808 	}
2809 
2810 	return QDF_STATUS_SUCCESS;
2811 }
2812 
2813 /**
2814  * mgmt_rx_reo_check_sanity_lists() - Check the sanity of ingress and
2815  * egress reorder lists
2816  * @reo_egress_list: Pointer to egress reorder list
2817  * @reo_ingress_list: Pointer to ingress reorder list
2818  *
2819  * Check the sanity of ingress reorder list and egress reorder list.
2820  * This API does the following sanity checks.
2821  *
2822  * 1. Ingress list entries should be in the non decreasing order of global
2823  *    time stamp.
2824  * 2. Egress list entries should be in the non decreasing order of global
2825  *    time stamp.
2826  * 3. All the entries in egress list should have global time stamp less
2827  *    than or equal to all the entries in ingress list.
2828  *
2829  * Return: QDF_STATUS
2830  */
2831 static QDF_STATUS
2832 mgmt_rx_reo_check_sanity_lists(struct mgmt_rx_reo_list *reo_egress_list,
2833 			       struct mgmt_rx_reo_list *reo_ingress_list)
2834 {
2835 	QDF_STATUS status;
2836 	struct mgmt_rx_reo_list_entry *last_entry_egress_list;
2837 	uint32_t ts_egress_last_entry;
2838 	struct mgmt_rx_reo_list_entry *first_entry_ingress_list;
2839 	uint32_t ts_ingress_first_entry;
2840 
2841 	qdf_assert_always(reo_egress_list);
2842 	qdf_assert_always(reo_ingress_list);
2843 
2844 	status = mgmt_rx_reo_check_sanity_list(reo_egress_list);
2845 	if (QDF_IS_STATUS_ERROR(status)) {
2846 		mgmt_rx_reo_err("Sanity check of egress list failed");
2847 		return status;
2848 	}
2849 
2850 	status = mgmt_rx_reo_check_sanity_list(reo_ingress_list);
2851 	if (QDF_IS_STATUS_ERROR(status)) {
2852 		mgmt_rx_reo_err("Sanity check of ingress list failed");
2853 		return status;
2854 	}
2855 
2856 	if (qdf_list_empty(&reo_egress_list->list) ||
2857 	    qdf_list_empty(&reo_ingress_list->list))
2858 		return QDF_STATUS_SUCCESS;
2859 
2860 	last_entry_egress_list =
2861 		qdf_list_last_entry(&reo_egress_list->list,
2862 				    struct mgmt_rx_reo_list_entry, node);
2863 	ts_egress_last_entry =
2864 		mgmt_rx_reo_get_global_ts(last_entry_egress_list->rx_params);
2865 
2866 	first_entry_ingress_list =
2867 		qdf_list_first_entry_or_null(&reo_ingress_list->list,
2868 					     struct mgmt_rx_reo_list_entry,
2869 					     node);
2870 	if (!first_entry_ingress_list) {
2871 		mgmt_rx_reo_err("Ingress list is expected to be non empty");
2872 		return QDF_STATUS_E_INVAL;
2873 	}
2874 
2875 	ts_ingress_first_entry =
2876 		mgmt_rx_reo_get_global_ts(first_entry_ingress_list->rx_params);
2877 
2878 	if (!mgmt_rx_reo_compare_global_timestamps_gte(ts_ingress_first_entry,
2879 						       ts_egress_last_entry))
2880 		return QDF_STATUS_E_INVAL;
2881 
2882 	return QDF_STATUS_SUCCESS;
2883 }
2884 
2885 /**
2886  * mgmt_rx_reo_handle_egress_overflow() - Handle overflow of management
2887  * rx reorder egress list
2888  * @reo_egress_list: Pointer to egress reorder list
2889  *
2890  * API to handle overflow of management rx reorder egress list.
2891  *
2892  * Return: QDF_STATUS
2893  */
2894 static QDF_STATUS
2895 mgmt_rx_reo_handle_egress_overflow(struct mgmt_rx_reo_list *reo_egress_list)
2896 {
2897 	struct mgmt_rx_reo_list_entry *cur_entry;
2898 	uint32_t egress_list_max_size;
2899 	uint32_t egress_list_cur_size;
2900 	uint32_t num_overflow_frames;
2901 
2902 	if (!reo_egress_list) {
2903 		mgmt_rx_reo_err("Egress reorder list is null");
2904 		return QDF_STATUS_E_NULL_VALUE;
2905 	}
2906 
2907 	reo_egress_list->overflow_count++;
2908 	reo_egress_list->last_overflow_ts = qdf_get_log_timestamp();
2909 	mgmt_rx_reo_err_rl("Egress overflow, cnt:%llu size:%u",
2910 			   reo_egress_list->overflow_count,
2911 			   qdf_list_size(&reo_egress_list->list));
2912 
2913 	egress_list_cur_size = qdf_list_size(&reo_egress_list->list);
2914 	egress_list_max_size = reo_egress_list->max_list_size;
2915 	num_overflow_frames = egress_list_cur_size - egress_list_max_size;
2916 
2917 	qdf_list_for_each(&reo_egress_list->list, cur_entry, node) {
2918 		if (num_overflow_frames > 0) {
2919 			cur_entry->status |= STATUS_EGRESS_LIST_OVERFLOW;
2920 			num_overflow_frames--;
2921 		}
2922 	}
2923 
2924 	return QDF_STATUS_SUCCESS;
2925 }
2926 
2927 /**
2928  * mgmt_rx_reo_move_entries_ingress_to_egress_list() - Moves frames in
2929  * the ingress list which are ready to be delivered to the egress list
2930  * @ingress_list: Pointer to ingress list
2931  * @egress_list: Pointer to egress list
2932  *
2933  * This API moves frames in the ingress list which are ready to be delivered
2934  * to the egress list.
2935  *
2936  * Return: QDF_STATUS
2937  */
2938 static QDF_STATUS
2939 mgmt_rx_reo_move_entries_ingress_to_egress_list
2940 		(struct mgmt_rx_reo_ingress_list *ingress_list,
2941 		 struct mgmt_rx_reo_egress_list *egress_list)
2942 {
2943 	struct mgmt_rx_reo_list *reo_ingress_list;
2944 	struct mgmt_rx_reo_list *reo_egress_list;
2945 	QDF_STATUS status;
2946 	struct mgmt_rx_reo_list_entry *ingress_list_entry;
2947 	struct mgmt_rx_reo_list_entry *latest_frame_ready_to_deliver = NULL;
2948 	uint16_t num_frames_ready_to_deliver = 0;
2949 	uint32_t num_overflow_frames = 0;
2950 	uint32_t ingress_list_max_size;
2951 	uint32_t ingress_list_cur_size;
2952 
2953 	if (!ingress_list) {
2954 		mgmt_rx_reo_err("Ingress list is null");
2955 		return QDF_STATUS_E_NULL_VALUE;
2956 	}
2957 	reo_ingress_list = &ingress_list->reo_list;
2958 
2959 	if (!egress_list) {
2960 		mgmt_rx_reo_err("Egress list is null");
2961 		return QDF_STATUS_E_NULL_VALUE;
2962 	}
2963 	reo_egress_list = &egress_list->reo_list;
2964 
2965 	qdf_spin_lock_bh(&reo_ingress_list->list_lock);
2966 
2967 	ingress_list_cur_size = qdf_list_size(&reo_ingress_list->list);
2968 	ingress_list_max_size = reo_ingress_list->max_list_size;
2969 	if (mgmt_rx_reo_list_overflowed(reo_ingress_list))
2970 		num_overflow_frames =
2971 				ingress_list_cur_size - ingress_list_max_size;
2972 
2973 	qdf_list_for_each(&reo_ingress_list->list, ingress_list_entry, node) {
2974 		if (num_overflow_frames > 0) {
2975 			ingress_list_entry->status |=
2976 						STATUS_INGRESS_LIST_OVERFLOW;
2977 			num_overflow_frames--;
2978 		}
2979 
2980 		if (!mgmt_rx_reo_is_entry_ready_to_send_up(ingress_list_entry))
2981 			break;
2982 
2983 		ingress_list_entry->ingress_list_removal_ts =
2984 							qdf_get_log_timestamp();
2985 		ingress_list_entry->egress_list_insertion_ts =
2986 							qdf_get_log_timestamp();
2987 		latest_frame_ready_to_deliver = ingress_list_entry;
2988 		num_frames_ready_to_deliver++;
2989 	}
2990 
2991 	/* Check if ingress list has at least one frame ready to be delivered */
2992 	if (num_frames_ready_to_deliver) {
2993 		qdf_list_t temp_list_frames_ready_to_deliver;
2994 
2995 		qdf_list_create(&temp_list_frames_ready_to_deliver,
2996 				INGRESS_TO_EGRESS_MOVEMENT_TEMP_LIST_MAX_SIZE);
2997 
2998 		status = qdf_list_split(&temp_list_frames_ready_to_deliver,
2999 					&reo_ingress_list->list,
3000 					&latest_frame_ready_to_deliver->node);
3001 		qdf_assert_always(QDF_IS_STATUS_SUCCESS(status));
3002 
3003 		qdf_assert_always(num_frames_ready_to_deliver ==
3004 			qdf_list_size(&temp_list_frames_ready_to_deliver));
3005 
3006 		qdf_spin_lock_bh(&reo_egress_list->list_lock);
3007 
3008 		status = qdf_list_join(&reo_egress_list->list,
3009 				       &temp_list_frames_ready_to_deliver);
3010 		qdf_assert_always(QDF_IS_STATUS_SUCCESS(status));
3011 
3012 		if (mgmt_rx_reo_list_overflowed(reo_egress_list)) {
3013 			status =
3014 			    mgmt_rx_reo_handle_egress_overflow(reo_egress_list);
3015 			if (QDF_IS_STATUS_ERROR(status)) {
3016 				mgmt_rx_reo_err("Failed to handle overflow");
3017 				qdf_assert_always(0);
3018 			}
3019 		}
3020 
3021 		qdf_assert_always(qdf_list_size(&reo_ingress_list->list) <=
3022 				  reo_ingress_list->max_list_size);
3023 
3024 		status = mgmt_rx_reo_check_sanity_lists(reo_egress_list,
3025 							reo_ingress_list);
3026 		if (QDF_IS_STATUS_ERROR(status)) {
3027 			mgmt_rx_reo_err("Sanity check of reo lists failed");
3028 			qdf_assert_always(0);
3029 		}
3030 
3031 		qdf_spin_unlock_bh(&reo_egress_list->list_lock);
3032 
3033 		qdf_list_destroy(&temp_list_frames_ready_to_deliver);
3034 	}
3035 
3036 	qdf_spin_unlock_bh(&reo_ingress_list->list_lock);
3037 
3038 	return QDF_STATUS_SUCCESS;
3039 }
3040 
3041 /**
3042  * mgmt_rx_reo_ageout_entries_ingress_list() - Helper API to ageout entries
3043  * in the ingress list
3044  * @ingress_list: Pointer to the ingress list
3045  * @latest_aged_out_entry: Double pointer to the latest agedout entry in the
3046  * ingress list
3047  *
3048  * Helper API to ageout entries in the ingress list.
3049  *
3050  * Return: QDF_STATUS
3051  */
3052 static QDF_STATUS
3053 mgmt_rx_reo_ageout_entries_ingress_list
3054 			(struct mgmt_rx_reo_ingress_list *ingress_list,
3055 			 struct mgmt_rx_reo_list_entry **latest_aged_out_entry)
3056 {
3057 	struct mgmt_rx_reo_list *reo_ingress_list;
3058 	struct mgmt_rx_reo_list_entry *cur_entry;
3059 	uint64_t cur_ts;
3060 
3061 	qdf_assert_always(ingress_list);
3062 	qdf_assert_always(latest_aged_out_entry);
3063 
3064 	*latest_aged_out_entry = NULL;
3065 	reo_ingress_list = &ingress_list->reo_list;
3066 
3067 	qdf_spin_lock_bh(&reo_ingress_list->list_lock);
3068 
3069 	cur_ts = qdf_get_log_timestamp();
3070 
3071 	qdf_list_for_each(&reo_ingress_list->list, cur_entry, node) {
3072 		if (cur_ts - cur_entry->ingress_list_insertion_ts >=
3073 		    ingress_list->list_entry_timeout_us) {
3074 			*latest_aged_out_entry = cur_entry;
3075 			cur_entry->status |= STATUS_AGED_OUT;
3076 		}
3077 	}
3078 
3079 	if (!*latest_aged_out_entry)
3080 		goto exit_release_list_lock;
3081 
3082 	qdf_list_for_each(&reo_ingress_list->list, cur_entry, node) {
3083 		if (cur_entry == *latest_aged_out_entry)
3084 			break;
3085 		cur_entry->status |= STATUS_OLDER_THAN_LATEST_AGED_OUT_FRAME;
3086 	}
3087 
3088 exit_release_list_lock:
3089 	qdf_spin_unlock_bh(&reo_ingress_list->list_lock);
3090 
3091 	return QDF_STATUS_SUCCESS;
3092 }
3093 
3094 /**
3095  * mgmt_rx_reo_ingress_list_ageout_timer_handler() - Periodic ageout timer
3096  * handler
3097  * @arg: Argument to timer handler
3098  *
3099  * This is the handler for periodic ageout timer used to timeout entries in the
3100  * ingress list.
3101  *
3102  * Return: void
3103  */
3104 static void
3105 mgmt_rx_reo_ingress_list_ageout_timer_handler(void *arg)
3106 {
3107 	struct mgmt_rx_reo_ingress_list *ingress_list = arg;
3108 	struct mgmt_rx_reo_egress_list *egress_list;
3109 	QDF_STATUS ret;
3110 	struct mgmt_rx_reo_context *reo_ctx;
3111 	/**
3112 	 * Stores the pointer to the entry in ingress list for the latest aged
3113 	 * out frame. Latest aged out frame is the aged out frame in reorder
3114 	 * list which has the largest global time stamp value.
3115 	 */
3116 	struct mgmt_rx_reo_list_entry *latest_aged_out_entry = NULL;
3117 	struct mgmt_rx_reo_context_info ctx_info = {0};
3118 
3119 	qdf_assert_always(ingress_list);
3120 	reo_ctx = mgmt_rx_reo_get_context_from_ingress_list(ingress_list);
3121 	qdf_assert_always(reo_ctx);
3122 	egress_list = &reo_ctx->egress_list;
3123 
3124 	qdf_timer_mod(&ingress_list->ageout_timer,
3125 		      MGMT_RX_REO_INGRESS_LIST_AGEOUT_TIMER_PERIOD_MS);
3126 
3127 	ret = mgmt_rx_reo_ageout_entries_ingress_list(ingress_list,
3128 						      &latest_aged_out_entry);
3129 	if (QDF_IS_STATUS_ERROR(ret)) {
3130 		mgmt_rx_reo_err("Failure to ageout entries in ingress list");
3131 		return;
3132 	}
3133 
3134 	if (!latest_aged_out_entry)
3135 		return;
3136 
3137 	ret = mgmt_rx_reo_move_entries_ingress_to_egress_list(ingress_list,
3138 							      egress_list);
3139 	if (QDF_IS_STATUS_ERROR(ret)) {
3140 		mgmt_rx_reo_err("Ingress to egress list movement failure(%d)",
3141 				ret);
3142 		return;
3143 	}
3144 
3145 	ctx_info.context = MGMT_RX_REO_CONTEXT_INGRESS_LIST_TIMEOUT;
3146 	ctx_info.context_id = qdf_atomic_inc_return(&reo_ctx->context_id);
3147 	ret = mgmt_rx_reo_release_egress_list_entries(reo_ctx, 0, &ctx_info);
3148 	if (QDF_IS_STATUS_ERROR(ret)) {
3149 		mgmt_rx_reo_err("Failure to release entries, ret = %d", ret);
3150 		return;
3151 	}
3152 }
3153 
3154 /**
3155  * mgmt_rx_reo_egress_inactivity_timer_handler() - Timer handler
3156  * for egress inactivity timer
3157  * @arg: Argument to timer handler
3158  *
3159  * This is the timer handler for tracking management Rx inactivity
3160  * across links.
3161  *
3162  * Return: void
3163  */
3164 static void
3165 mgmt_rx_reo_egress_inactivity_timer_handler(void *arg)
3166 {
3167 	struct mgmt_rx_reo_egress_list *egress_list = arg;
3168 	struct mgmt_rx_reo_list *reo_egress_list;
3169 	struct mgmt_rx_reo_frame_info *last_delivered_frame;
3170 
3171 	qdf_assert_always(egress_list);
3172 
3173 	reo_egress_list = &egress_list->reo_list;
3174 	last_delivered_frame = &reo_egress_list->last_released_frame;
3175 
3176 	qdf_spin_lock(&reo_egress_list->list_lock);
3177 
3178 	qdf_mem_zero(last_delivered_frame, sizeof(*last_delivered_frame));
3179 
3180 	qdf_spin_unlock(&reo_egress_list->list_lock);
3181 }
3182 
3183 /**
3184  * mgmt_rx_reo_prepare_list_entry() - Prepare a list entry from the management
3185  * frame received.
3186  * @frame_desc: Pointer to the frame descriptor
3187  * @entry: Pointer to the list entry
3188  *
3189  * This API prepares the reorder list entry corresponding to a management frame
3190  * to be consumed by host. This entry would be inserted at the appropriate
3191  * position in the reorder list.
3192  *
3193  * Return: QDF_STATUS
3194  */
3195 static QDF_STATUS
3196 mgmt_rx_reo_prepare_list_entry(
3197 		const struct mgmt_rx_reo_frame_descriptor *frame_desc,
3198 		struct mgmt_rx_reo_list_entry **entry)
3199 {
3200 	struct mgmt_rx_reo_list_entry *list_entry;
3201 	struct wlan_objmgr_pdev *pdev;
3202 	uint8_t link_id;
3203 	uint8_t ml_grp_id;
3204 
3205 	if (!frame_desc) {
3206 		mgmt_rx_reo_err("frame descriptor is null");
3207 		return QDF_STATUS_E_NULL_VALUE;
3208 	}
3209 
3210 	if (!entry) {
3211 		mgmt_rx_reo_err("Pointer to list entry is null");
3212 		return QDF_STATUS_E_NULL_VALUE;
3213 	}
3214 
3215 	link_id = mgmt_rx_reo_get_link_id(frame_desc->rx_params);
3216 	ml_grp_id = mgmt_rx_reo_get_mlo_grp_id(frame_desc->rx_params);
3217 
3218 	pdev = wlan_get_pdev_from_mlo_link_id(link_id, ml_grp_id,
3219 					      WLAN_MGMT_RX_REO_ID);
3220 	if (!pdev) {
3221 		mgmt_rx_reo_err("pdev corresponding to link %u is null",
3222 				link_id);
3223 		return QDF_STATUS_E_NULL_VALUE;
3224 	}
3225 
3226 	list_entry =  qdf_mem_malloc(sizeof(*list_entry));
3227 	if (!list_entry) {
3228 		wlan_objmgr_pdev_release_ref(pdev, WLAN_MGMT_RX_REO_ID);
3229 		mgmt_rx_reo_err("List entry allocation failed");
3230 		return QDF_STATUS_E_NOMEM;
3231 	}
3232 
3233 	list_entry->pdev = pdev;
3234 	list_entry->nbuf = frame_desc->nbuf;
3235 	list_entry->rx_params = frame_desc->rx_params;
3236 	list_entry->wait_count = frame_desc->wait_count;
3237 	list_entry->initial_wait_count = frame_desc->wait_count;
3238 	qdf_mem_copy(list_entry->shared_snapshots, frame_desc->shared_snapshots,
3239 		     qdf_min(sizeof(list_entry->shared_snapshots),
3240 			     sizeof(frame_desc->shared_snapshots)));
3241 	qdf_mem_copy(list_entry->host_snapshot, frame_desc->host_snapshot,
3242 		     qdf_min(sizeof(list_entry->host_snapshot),
3243 			     sizeof(frame_desc->host_snapshot)));
3244 	list_entry->status = 0;
3245 	if (list_entry->wait_count.total_count)
3246 		list_entry->status |= STATUS_WAIT_FOR_FRAME_ON_OTHER_LINKS;
3247 	qdf_atomic_init(&list_entry->scheduled_count);
3248 
3249 	*entry = list_entry;
3250 
3251 	return QDF_STATUS_SUCCESS;
3252 }
3253 
3254 /**
3255  * mgmt_rx_reo_update_wait_count() - Update the wait count for a frame based
3256  * on the wait count of a frame received after that on air.
3257  * @wait_count_old_frame: Pointer to the wait count structure for the old frame.
3258  * @wait_count_new_frame: Pointer to the wait count structure for the new frame.
3259  *
3260  * This API optimizes the wait count of a frame based on the wait count of
3261  * a frame received after that on air. Old frame refers to the frame received
3262  * first on the air and new frame refers to the frame received after that.
3263  * We use the following fundamental idea. Wait counts for old frames can't be
3264  * more than wait counts for the new frame. Use this to optimize the wait count
3265  * for the old frames. Per link wait count of an old frame is minimum of the
3266  * per link wait count of the old frame and new frame.
3267  *
3268  * Return: QDF_STATUS
3269  */
3270 static QDF_STATUS
3271 mgmt_rx_reo_update_wait_count(
3272 		struct mgmt_rx_reo_wait_count *wait_count_old_frame,
3273 		const struct mgmt_rx_reo_wait_count *wait_count_new_frame)
3274 {
3275 	uint8_t link_id;
3276 
3277 	qdf_assert_always(wait_count_old_frame);
3278 	qdf_assert_always(wait_count_new_frame);
3279 
3280 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
3281 		if (wait_count_old_frame->per_link_count[link_id]) {
3282 			uint32_t temp_wait_count;
3283 			uint32_t wait_count_diff;
3284 
3285 			temp_wait_count =
3286 				wait_count_old_frame->per_link_count[link_id];
3287 			wait_count_old_frame->per_link_count[link_id] =
3288 				qdf_min(wait_count_old_frame->
3289 					per_link_count[link_id],
3290 					wait_count_new_frame->
3291 					per_link_count[link_id]);
3292 			wait_count_diff = temp_wait_count -
3293 				wait_count_old_frame->per_link_count[link_id];
3294 
3295 			wait_count_old_frame->total_count -= wait_count_diff;
3296 		}
3297 	}
3298 
3299 	return QDF_STATUS_SUCCESS;
3300 }
3301 
3302 /**
3303  * mgmt_rx_reo_update_ingress_list() - Modify the reorder list when a frame is
3304  * received
3305  * @ingress_list: Pointer to ingress list
3306  * @frame_desc: Pointer to frame descriptor
3307  * @new: pointer to the list entry for the current frame
3308  * @is_queued: Whether this frame is queued in the REO list
3309  *
3310  * API to update the reorder list on every management frame reception.
3311  * This API does the following things.
3312  *   a) Update the wait counts for all the frames in the reorder list with
3313  *      global time stamp <= current frame's global time stamp. We use the
3314  *      following principle for updating the wait count in this case.
3315  *      Let A and B be two management frames with global time stamp of A <=
3316  *      global time stamp of B. Let WAi and WBi be the wait count of A and B
3317  *      for link i, then WAi <= WBi. Hence we can optimize WAi as
3318  *      min(WAi, WBi).
3319  *   b) If the current frame is to be consumed by host, insert it in the
3320  *      reorder list such that the list is always sorted in the increasing order
3321  *      of global time stamp. Update the wait count of the current frame based
3322  *      on the frame next to it in the reorder list (if any).
3323  *   c) Update the wait count of the frames in the reorder list with global
3324  *      time stamp > current frame's global time stamp. Let the current frame
3325  *      belong to link "l". Then link "l"'s wait count can be reduced by one for
3326  *      all the frames in the reorder list with global time stamp > current
3327  *      frame's global time stamp.
3328  *
3329  * Return: QDF_STATUS
3330  */
3331 static QDF_STATUS
3332 mgmt_rx_reo_update_ingress_list(struct mgmt_rx_reo_ingress_list *ingress_list,
3333 				struct mgmt_rx_reo_frame_descriptor *frame_desc,
3334 				struct mgmt_rx_reo_list_entry *new,
3335 				bool *is_queued)
3336 {
3337 	struct mgmt_rx_reo_list *reo_ingress_list;
3338 	struct mgmt_rx_reo_list_entry *cur;
3339 	struct mgmt_rx_reo_list_entry *least_greater = NULL;
3340 	bool least_greater_entry_found = false;
3341 	QDF_STATUS status;
3342 	uint16_t list_insertion_pos = 0;
3343 	uint32_t ts_new;
3344 
3345 	if (!ingress_list) {
3346 		mgmt_rx_reo_err("Mgmt Rx reo ingress list is null");
3347 		return QDF_STATUS_E_NULL_VALUE;
3348 	}
3349 	reo_ingress_list = &ingress_list->reo_list;
3350 
3351 	if (!frame_desc) {
3352 		mgmt_rx_reo_err("Mgmt frame descriptor is null");
3353 		return QDF_STATUS_E_NULL_VALUE;
3354 	}
3355 
3356 	if (!(frame_desc->type == MGMT_RX_REO_FRAME_DESC_HOST_CONSUMED_FRAME &&
3357 	      frame_desc->reo_required) != !new)
3358 		qdf_assert_always(0);
3359 
3360 	if (!is_queued) {
3361 		mgmt_rx_reo_err("Pointer to queued indication is null");
3362 		return QDF_STATUS_E_NULL_VALUE;
3363 	}
3364 	*is_queued = false;
3365 
3366 	ts_new = mgmt_rx_reo_get_global_ts(frame_desc->rx_params);
3367 
3368 	frame_desc->ingress_list_size_rx =
3369 				qdf_list_size(&reo_ingress_list->list);
3370 
3371 	qdf_list_for_each(&reo_ingress_list->list, cur, node) {
3372 		uint32_t ts_cur;
3373 
3374 		ts_cur = mgmt_rx_reo_get_global_ts(cur->rx_params);
3375 
3376 		least_greater_entry_found =
3377 		     !mgmt_rx_reo_compare_global_timestamps_gte(ts_new, ts_cur);
3378 		if (least_greater_entry_found) {
3379 			least_greater = cur;
3380 			break;
3381 		}
3382 
3383 		qdf_assert_always(!frame_desc->is_stale || cur->is_parallel_rx);
3384 
3385 		list_insertion_pos++;
3386 
3387 		status = mgmt_rx_reo_update_wait_count(&cur->wait_count,
3388 						       &frame_desc->wait_count);
3389 		if (QDF_IS_STATUS_ERROR(status))
3390 			return status;
3391 
3392 		if (cur->wait_count.total_count == 0)
3393 			cur->status &= ~STATUS_WAIT_FOR_FRAME_ON_OTHER_LINKS;
3394 	}
3395 
3396 	if (frame_desc->type == MGMT_RX_REO_FRAME_DESC_HOST_CONSUMED_FRAME &&
3397 	    !frame_desc->is_stale && frame_desc->reo_required &&
3398 	    (frame_desc->queued_list != MGMT_RX_REO_LIST_TYPE_EGRESS)) {
3399 		bool overflow;
3400 
3401 		if (least_greater_entry_found) {
3402 			status = mgmt_rx_reo_update_wait_count(
3403 					&new->wait_count,
3404 					&least_greater->wait_count);
3405 
3406 			if (QDF_IS_STATUS_ERROR(status))
3407 				return status;
3408 
3409 			frame_desc->wait_count = new->wait_count;
3410 
3411 			if (new->wait_count.total_count == 0)
3412 				new->status &=
3413 					~STATUS_WAIT_FOR_FRAME_ON_OTHER_LINKS;
3414 		}
3415 
3416 		new->ingress_list_insertion_ts = qdf_get_log_timestamp();
3417 		new->ingress_timestamp = frame_desc->ingress_timestamp;
3418 		new->is_parallel_rx = frame_desc->is_parallel_rx;
3419 		frame_desc->ingress_list_insertion_pos = list_insertion_pos;
3420 
3421 		if (least_greater_entry_found)
3422 			status = qdf_list_insert_before(
3423 					&reo_ingress_list->list, &new->node,
3424 					&least_greater->node);
3425 		else
3426 			status = qdf_list_insert_back(
3427 					&reo_ingress_list->list, &new->node);
3428 
3429 		if (QDF_IS_STATUS_ERROR(status))
3430 			return status;
3431 
3432 		*is_queued = true;
3433 		frame_desc->queued_list = MGMT_RX_REO_LIST_TYPE_INGRESS;
3434 
3435 		overflow = (qdf_list_size(&reo_ingress_list->list) >
3436 					  reo_ingress_list->max_list_size);
3437 		if (overflow) {
3438 			qdf_list_t *ingress_list_ptr = &reo_ingress_list->list;
3439 
3440 			reo_ingress_list->overflow_count++;
3441 			reo_ingress_list->last_overflow_ts =
3442 							qdf_get_log_timestamp();
3443 			mgmt_rx_reo_err_rl("Ingress overflow, cnt:%llu size:%u",
3444 					   reo_ingress_list->overflow_count,
3445 					   qdf_list_size(ingress_list_ptr));
3446 		}
3447 
3448 		if (new->wait_count.total_count == 0)
3449 			frame_desc->zero_wait_count_rx = true;
3450 
3451 		if (frame_desc->zero_wait_count_rx &&
3452 		    qdf_list_first_entry_or_null(&reo_ingress_list->list,
3453 						 struct mgmt_rx_reo_list_entry,
3454 						 node) == new)
3455 			frame_desc->immediate_delivery = true;
3456 	}
3457 
3458 	if (least_greater_entry_found) {
3459 		cur = least_greater;
3460 
3461 		qdf_list_for_each_from(&reo_ingress_list->list, cur, node) {
3462 			uint8_t frame_link_id;
3463 			struct mgmt_rx_reo_wait_count *wait_count;
3464 
3465 			frame_link_id =
3466 				mgmt_rx_reo_get_link_id(frame_desc->rx_params);
3467 			wait_count = &cur->wait_count;
3468 			if (wait_count->per_link_count[frame_link_id]) {
3469 				uint32_t old_wait_count;
3470 				uint32_t new_wait_count;
3471 				uint32_t wait_count_diff;
3472 				uint16_t pkt_ctr_delta;
3473 
3474 				pkt_ctr_delta = frame_desc->pkt_ctr_delta;
3475 				old_wait_count =
3476 				      wait_count->per_link_count[frame_link_id];
3477 
3478 				if (old_wait_count >= pkt_ctr_delta)
3479 					new_wait_count = old_wait_count -
3480 							 pkt_ctr_delta;
3481 				else
3482 					new_wait_count = 0;
3483 
3484 				wait_count_diff = old_wait_count -
3485 						  new_wait_count;
3486 
3487 				wait_count->per_link_count[frame_link_id] =
3488 								new_wait_count;
3489 				wait_count->total_count -= wait_count_diff;
3490 
3491 				if (wait_count->total_count == 0)
3492 					cur->status &=
3493 					  ~STATUS_WAIT_FOR_FRAME_ON_OTHER_LINKS;
3494 			}
3495 		}
3496 	}
3497 
3498 	return QDF_STATUS_SUCCESS;
3499 }
3500 
3501 static QDF_STATUS
3502 mgmt_rx_reo_update_egress_list(struct mgmt_rx_reo_egress_list *egress_list,
3503 			       struct mgmt_rx_reo_frame_descriptor *frame_desc,
3504 			       struct mgmt_rx_reo_list_entry *new,
3505 			       bool *is_queued)
3506 {
3507 	struct mgmt_rx_reo_list *reo_egress_list;
3508 	struct mgmt_rx_reo_list_entry *cur;
3509 	struct mgmt_rx_reo_list_entry *last;
3510 	struct mgmt_rx_reo_list_entry *least_greater = NULL;
3511 	bool least_greater_entry_found = false;
3512 	uint32_t ts_last;
3513 	uint32_t ts_new;
3514 	uint16_t list_insertion_pos = 0;
3515 	QDF_STATUS ret;
3516 
3517 	if (!egress_list) {
3518 		mgmt_rx_reo_err("Mgmt Rx reo egress list is null");
3519 		return QDF_STATUS_E_NULL_VALUE;
3520 	}
3521 	reo_egress_list = &egress_list->reo_list;
3522 
3523 	if (!frame_desc) {
3524 		mgmt_rx_reo_err("Mgmt frame descriptor is null");
3525 		return QDF_STATUS_E_NULL_VALUE;
3526 	}
3527 
3528 	if (!(frame_desc->type == MGMT_RX_REO_FRAME_DESC_HOST_CONSUMED_FRAME &&
3529 	      frame_desc->reo_required) != !new)
3530 		qdf_assert_always(0);
3531 
3532 	if (!is_queued) {
3533 		mgmt_rx_reo_err("Pointer to queued indication is null");
3534 		return QDF_STATUS_E_NULL_VALUE;
3535 	}
3536 	*is_queued = false;
3537 
3538 	ts_new = mgmt_rx_reo_get_global_ts(frame_desc->rx_params);
3539 	frame_desc->egress_list_size_rx = qdf_list_size(&reo_egress_list->list);
3540 
3541 	ret = mgmt_rx_reo_is_stale_frame(&reo_egress_list->last_released_frame,
3542 					 frame_desc);
3543 	if (QDF_IS_STATUS_ERROR(ret))
3544 		return ret;
3545 
3546 	if (frame_desc->is_stale) {
3547 		ret = mgmt_rx_reo_handle_stale_frame(reo_egress_list,
3548 						     frame_desc);
3549 		if (QDF_IS_STATUS_ERROR(ret))
3550 			return ret;
3551 
3552 		qdf_list_for_each(&reo_egress_list->list, cur, node) {
3553 			uint32_t ts_cur;
3554 
3555 			ts_cur = mgmt_rx_reo_get_global_ts(cur->rx_params);
3556 
3557 			if (!mgmt_rx_reo_compare_global_timestamps_gte(ts_new,
3558 								       ts_cur))
3559 				break;
3560 
3561 			qdf_assert_always(cur->is_parallel_rx);
3562 		}
3563 
3564 		return QDF_STATUS_SUCCESS;
3565 	}
3566 
3567 	if (!new)
3568 		return QDF_STATUS_SUCCESS;
3569 
3570 	if (qdf_list_empty(&reo_egress_list->list))
3571 		return QDF_STATUS_SUCCESS;
3572 
3573 	last = qdf_list_last_entry(&reo_egress_list->list,
3574 				   struct mgmt_rx_reo_list_entry, node);
3575 	qdf_assert_always(last);
3576 
3577 	ts_last = mgmt_rx_reo_get_global_ts(last->rx_params);
3578 
3579 	if (mgmt_rx_reo_compare_global_timestamps_gte(ts_new, ts_last))
3580 		return QDF_STATUS_SUCCESS;
3581 
3582 	qdf_list_for_each(&reo_egress_list->list, cur, node) {
3583 		uint32_t ts_cur;
3584 
3585 		ts_cur = mgmt_rx_reo_get_global_ts(cur->rx_params);
3586 
3587 		if (!mgmt_rx_reo_compare_global_timestamps_gte(ts_new,
3588 							       ts_cur)) {
3589 			least_greater = cur;
3590 			least_greater_entry_found = true;
3591 			break;
3592 		}
3593 
3594 		list_insertion_pos++;
3595 	}
3596 	qdf_assert_always(least_greater_entry_found);
3597 
3598 	ret = mgmt_rx_reo_update_wait_count(&new->wait_count,
3599 					    &least_greater->wait_count);
3600 
3601 	if (QDF_IS_STATUS_ERROR(ret))
3602 		return ret;
3603 
3604 	frame_desc->wait_count = new->wait_count;
3605 
3606 	if (new->wait_count.total_count == 0)
3607 		new->status &= ~STATUS_WAIT_FOR_FRAME_ON_OTHER_LINKS;
3608 
3609 	new->egress_list_insertion_ts = qdf_get_log_timestamp();
3610 	new->ingress_timestamp = frame_desc->ingress_timestamp;
3611 	new->is_parallel_rx = frame_desc->is_parallel_rx;
3612 	new->status |= STATUS_OLDER_THAN_READY_TO_DELIVER_FRAMES;
3613 	frame_desc->egress_list_insertion_pos = list_insertion_pos;
3614 
3615 	ret = qdf_list_insert_before(&reo_egress_list->list, &new->node,
3616 				     &least_greater->node);
3617 	if (QDF_IS_STATUS_ERROR(ret))
3618 		return ret;
3619 
3620 	if (mgmt_rx_reo_list_overflowed(reo_egress_list)) {
3621 		ret = mgmt_rx_reo_handle_egress_overflow(reo_egress_list);
3622 		if (QDF_IS_STATUS_ERROR(ret)) {
3623 			mgmt_rx_reo_err("Failed to handle egress overflow");
3624 			qdf_assert_always(0);
3625 		}
3626 	}
3627 
3628 	*is_queued = true;
3629 	frame_desc->queued_list = MGMT_RX_REO_LIST_TYPE_EGRESS;
3630 
3631 	if (frame_desc->wait_count.total_count == 0)
3632 		frame_desc->zero_wait_count_rx = true;
3633 	frame_desc->immediate_delivery = true;
3634 
3635 	return QDF_STATUS_SUCCESS;
3636 }
3637 
3638 static QDF_STATUS
3639 mgmt_rx_reo_update_lists(struct mgmt_rx_reo_ingress_list *ingress_list,
3640 			 struct mgmt_rx_reo_egress_list *egress_list,
3641 			 struct mgmt_rx_reo_frame_descriptor *frame_desc,
3642 			 bool *is_queued)
3643 {
3644 	struct mgmt_rx_reo_list *reo_ingress_list;
3645 	struct mgmt_rx_reo_list *reo_egress_list;
3646 	bool is_queued_to_ingress_list = false;
3647 	bool is_queued_to_egress_list = false;
3648 	QDF_STATUS status;
3649 	struct mgmt_rx_reo_list_entry *new_entry = NULL;
3650 	enum mgmt_rx_reo_list_type queued_list;
3651 
3652 	if (!ingress_list) {
3653 		mgmt_rx_reo_err("Mgmt Rx reo ingress list is null");
3654 		return QDF_STATUS_E_NULL_VALUE;
3655 	}
3656 	reo_ingress_list = &ingress_list->reo_list;
3657 
3658 	if (!egress_list) {
3659 		mgmt_rx_reo_err("Mgmt Rx reo egress list is null");
3660 		return QDF_STATUS_E_NULL_VALUE;
3661 	}
3662 	reo_egress_list = &egress_list->reo_list;
3663 
3664 	if (!frame_desc) {
3665 		mgmt_rx_reo_err("Mgmt frame descriptor is null");
3666 		return QDF_STATUS_E_NULL_VALUE;
3667 	}
3668 
3669 	if (!is_queued) {
3670 		mgmt_rx_reo_err("Pointer to queued indication is null");
3671 		return QDF_STATUS_E_NULL_VALUE;
3672 	}
3673 	*is_queued = false;
3674 
3675 	/* Prepare the list entry before acquiring lock */
3676 	if (frame_desc->type == MGMT_RX_REO_FRAME_DESC_HOST_CONSUMED_FRAME &&
3677 	    frame_desc->reo_required) {
3678 		status = mgmt_rx_reo_prepare_list_entry(frame_desc, &new_entry);
3679 		if (QDF_IS_STATUS_ERROR(status)) {
3680 			mgmt_rx_reo_err("Failed to prepare list entry");
3681 			return QDF_STATUS_E_FAILURE;
3682 		}
3683 	}
3684 
3685 	qdf_spin_lock_bh(&reo_ingress_list->list_lock);
3686 
3687 	qdf_spin_lock_bh(&reo_egress_list->list_lock);
3688 
3689 	status = mgmt_rx_reo_update_egress_list(egress_list, frame_desc,
3690 						new_entry,
3691 						&is_queued_to_egress_list);
3692 	if (QDF_IS_STATUS_ERROR(status))
3693 		goto exit_release_egress_list_lock;
3694 
3695 	status = mgmt_rx_reo_check_sanity_list(reo_egress_list);
3696 	if (QDF_IS_STATUS_ERROR(status)) {
3697 		mgmt_rx_reo_err("Sanity check of egress list failed");
3698 		qdf_assert_always(0);
3699 	}
3700 
3701 	qdf_spin_unlock_bh(&reo_egress_list->list_lock);
3702 
3703 	status = mgmt_rx_reo_update_ingress_list(ingress_list, frame_desc,
3704 						 new_entry,
3705 						 &is_queued_to_ingress_list);
3706 	if (QDF_IS_STATUS_ERROR(status))
3707 		goto exit_release_ingress_list_lock;
3708 
3709 	status = mgmt_rx_reo_check_sanity_list(reo_ingress_list);
3710 	if (QDF_IS_STATUS_ERROR(status)) {
3711 		mgmt_rx_reo_err("Sanity check of ingress list failed");
3712 		qdf_assert_always(0);
3713 	}
3714 
3715 	status = QDF_STATUS_SUCCESS;
3716 	goto exit_release_ingress_list_lock;
3717 
3718 exit_release_egress_list_lock:
3719 	qdf_spin_unlock_bh(&reo_egress_list->list_lock);
3720 exit_release_ingress_list_lock:
3721 	qdf_spin_unlock_bh(&reo_ingress_list->list_lock);
3722 
3723 	qdf_assert_always(!is_queued_to_ingress_list ||
3724 			  !is_queued_to_egress_list);
3725 
3726 	*is_queued = is_queued_to_ingress_list || is_queued_to_egress_list;
3727 
3728 	queued_list = frame_desc->queued_list;
3729 	qdf_assert_always(!(*is_queued &&
3730 			    queued_list == MGMT_RX_REO_LIST_TYPE_INVALID));
3731 
3732 	qdf_assert_always(new_entry || !*is_queued);
3733 
3734 	/* Cleanup the entry if it is not queued */
3735 	if (new_entry && !*is_queued) {
3736 		/**
3737 		 * New entry created is not inserted to reorder list, free
3738 		 * the entry and release the reference
3739 		 */
3740 		wlan_objmgr_pdev_release_ref(new_entry->pdev,
3741 					     WLAN_MGMT_RX_REO_ID);
3742 		qdf_mem_free(new_entry);
3743 	}
3744 
3745 	return status;
3746 }
3747 
3748 /**
3749  * mgmt_rx_reo_ingress_list_init() - Initialize the management rx-reorder
3750  * ingress list
3751  * @ingress_list: Pointer to ingress list
3752  *
3753  * API to initialize the management rx-reorder ingress list.
3754  *
3755  * Return: QDF_STATUS
3756  */
3757 static QDF_STATUS
3758 mgmt_rx_reo_ingress_list_init(struct mgmt_rx_reo_ingress_list *ingress_list)
3759 {
3760 	QDF_STATUS status;
3761 	struct mgmt_rx_reo_list *reo_ingress_list;
3762 
3763 	if (!ingress_list) {
3764 		mgmt_rx_reo_err("Ingress list is null");
3765 		return QDF_STATUS_E_NULL_VALUE;
3766 	}
3767 
3768 	reo_ingress_list = &ingress_list->reo_list;
3769 
3770 	reo_ingress_list->max_list_size = MGMT_RX_REO_INGRESS_LIST_MAX_SIZE;
3771 	qdf_list_create(&reo_ingress_list->list,
3772 			reo_ingress_list->max_list_size);
3773 	qdf_spinlock_create(&reo_ingress_list->list_lock);
3774 	qdf_mem_zero(&reo_ingress_list->last_inserted_frame,
3775 		     sizeof(reo_ingress_list->last_inserted_frame));
3776 	qdf_mem_zero(&reo_ingress_list->last_released_frame,
3777 		     sizeof(reo_ingress_list->last_released_frame));
3778 
3779 	ingress_list->list_entry_timeout_us =
3780 					MGMT_RX_REO_INGRESS_LIST_TIMEOUT_US;
3781 
3782 	status = qdf_timer_init(NULL, &ingress_list->ageout_timer,
3783 				mgmt_rx_reo_ingress_list_ageout_timer_handler,
3784 				ingress_list, QDF_TIMER_TYPE_WAKE_APPS);
3785 	if (QDF_IS_STATUS_ERROR(status)) {
3786 		mgmt_rx_reo_err("Failed to initialize ingress ageout timer");
3787 		return status;
3788 	}
3789 	qdf_timer_start(&ingress_list->ageout_timer,
3790 			MGMT_RX_REO_INGRESS_LIST_AGEOUT_TIMER_PERIOD_MS);
3791 
3792 	return QDF_STATUS_SUCCESS;
3793 }
3794 
3795 /**
3796  * mgmt_rx_reo_egress_list_init() - Initialize the management rx-reorder
3797  * egress list
3798  * @egress_list: Pointer to egress list
3799  *
3800  * API to initialize the management rx-reorder egress list.
3801  *
3802  * Return: QDF_STATUS
3803  */
3804 static QDF_STATUS
3805 mgmt_rx_reo_egress_list_init(struct mgmt_rx_reo_egress_list *egress_list)
3806 {
3807 	struct mgmt_rx_reo_list *reo_egress_list;
3808 	QDF_STATUS status;
3809 
3810 	if (!egress_list) {
3811 		mgmt_rx_reo_err("Egress list is null");
3812 		return QDF_STATUS_E_NULL_VALUE;
3813 	}
3814 
3815 	reo_egress_list = &egress_list->reo_list;
3816 
3817 	reo_egress_list->max_list_size = MGMT_RX_REO_EGRESS_LIST_MAX_SIZE;
3818 	qdf_list_create(&reo_egress_list->list, reo_egress_list->max_list_size);
3819 	qdf_spinlock_create(&reo_egress_list->list_lock);
3820 	qdf_mem_zero(&reo_egress_list->last_inserted_frame,
3821 		     sizeof(reo_egress_list->last_inserted_frame));
3822 	qdf_mem_zero(&reo_egress_list->last_released_frame,
3823 		     sizeof(reo_egress_list->last_released_frame));
3824 
3825 	status = qdf_timer_init(NULL, &egress_list->egress_inactivity_timer,
3826 				mgmt_rx_reo_egress_inactivity_timer_handler,
3827 				egress_list, QDF_TIMER_TYPE_WAKE_APPS);
3828 	if (QDF_IS_STATUS_ERROR(status)) {
3829 		mgmt_rx_reo_err("Failed to initialize egress inactivity timer");
3830 		return status;
3831 	}
3832 
3833 	return QDF_STATUS_SUCCESS;
3834 }
3835 
3836 /**
3837  * wlan_mgmt_rx_reo_update_host_snapshot() - Update Host snapshot with the MGMT
3838  * Rx REO parameters.
3839  * @pdev: pdev extracted from the WMI event
3840  * @desc: pointer to frame descriptor
3841  *
3842  * Return: QDF_STATUS of operation
3843  */
3844 static QDF_STATUS
3845 wlan_mgmt_rx_reo_update_host_snapshot(struct wlan_objmgr_pdev *pdev,
3846 				      struct mgmt_rx_reo_frame_descriptor *desc)
3847 {
3848 	struct mgmt_rx_reo_pdev_info *rx_reo_pdev_ctx;
3849 	struct mgmt_rx_reo_snapshot_params *host_ss;
3850 	struct mgmt_rx_reo_params *reo_params;
3851 	int pkt_ctr_delta;
3852 	struct wlan_objmgr_psoc *psoc;
3853 	uint16_t pkt_ctr_delta_thresh;
3854 
3855 	if (!desc) {
3856 		mgmt_rx_reo_err("Mgmt Rx REO frame descriptor null");
3857 		return QDF_STATUS_E_NULL_VALUE;
3858 	}
3859 
3860 	if (!desc->rx_params) {
3861 		mgmt_rx_reo_err("Mgmt Rx params null");
3862 		return QDF_STATUS_E_NULL_VALUE;
3863 	}
3864 
3865 	reo_params = desc->rx_params->reo_params;
3866 	if (!reo_params) {
3867 		mgmt_rx_reo_err("Mgmt Rx REO params NULL");
3868 		return QDF_STATUS_E_NULL_VALUE;
3869 	}
3870 
3871 	rx_reo_pdev_ctx = wlan_mgmt_rx_reo_get_priv_object(pdev);
3872 	if (!rx_reo_pdev_ctx) {
3873 		mgmt_rx_reo_err("Mgmt Rx REO context empty for pdev %pK", pdev);
3874 		return QDF_STATUS_E_FAILURE;
3875 	}
3876 
3877 	psoc = wlan_pdev_get_psoc(pdev);
3878 
3879 	/* FW should send valid REO parameters */
3880 	if (!reo_params->valid) {
3881 		mgmt_rx_reo_err("Mgmt Rx REO params is invalid");
3882 		return QDF_STATUS_E_FAILURE;
3883 	}
3884 
3885 	host_ss = &rx_reo_pdev_ctx->host_snapshot;
3886 
3887 	if (!host_ss->valid) {
3888 		desc->pkt_ctr_delta = 1;
3889 		goto update_host_ss;
3890 	}
3891 
3892 	if (mgmt_rx_reo_compare_pkt_ctrs_gte(host_ss->mgmt_pkt_ctr,
3893 					     reo_params->mgmt_pkt_ctr)) {
3894 		mgmt_rx_reo_err("Cur frame ctr > last frame ctr for link = %u",
3895 				reo_params->link_id);
3896 		goto failure_debug;
3897 	}
3898 
3899 	pkt_ctr_delta = mgmt_rx_reo_subtract_pkt_ctrs(reo_params->mgmt_pkt_ctr,
3900 						      host_ss->mgmt_pkt_ctr);
3901 	qdf_assert_always(pkt_ctr_delta > 0);
3902 	desc->pkt_ctr_delta = pkt_ctr_delta;
3903 
3904 	if (pkt_ctr_delta == 1)
3905 		goto update_host_ss;
3906 
3907 	/*
3908 	 * Under back pressure scenarios, FW may drop management Rx frame
3909 	 * WMI events. So holes in the management packet counter is expected.
3910 	 * Add a debug print and optional assert to track the holes.
3911 	 */
3912 	mgmt_rx_reo_debug("pkt_ctr_delta = %u", pkt_ctr_delta);
3913 	mgmt_rx_reo_debug("Cur frame valid = %u, pkt_ctr = %u, ts =%u",
3914 			  reo_params->valid, reo_params->mgmt_pkt_ctr,
3915 			  reo_params->global_timestamp);
3916 	mgmt_rx_reo_debug("Last frame valid = %u, pkt_ctr = %u, ts =%u",
3917 			  host_ss->valid, host_ss->mgmt_pkt_ctr,
3918 			  host_ss->global_timestamp);
3919 
3920 	pkt_ctr_delta_thresh = wlan_mgmt_rx_reo_get_pkt_ctr_delta_thresh(psoc);
3921 
3922 	if (pkt_ctr_delta_thresh && pkt_ctr_delta > pkt_ctr_delta_thresh) {
3923 		mgmt_rx_reo_err("pkt ctr delta %u > thresh %u for link %u",
3924 				pkt_ctr_delta, pkt_ctr_delta_thresh,
3925 				reo_params->link_id);
3926 		goto failure_debug;
3927 	}
3928 
3929 update_host_ss:
3930 	host_ss->valid = true;
3931 	host_ss->global_timestamp = reo_params->global_timestamp;
3932 	host_ss->mgmt_pkt_ctr = reo_params->mgmt_pkt_ctr;
3933 
3934 	return QDF_STATUS_SUCCESS;
3935 
3936 failure_debug:
3937 	mgmt_rx_reo_err("Cur frame valid = %u, pkt_ctr = %u, ts =%u",
3938 			reo_params->valid, reo_params->mgmt_pkt_ctr,
3939 			reo_params->global_timestamp);
3940 	mgmt_rx_reo_err("Last frame vailid = %u, pkt_ctr = %u, ts =%u",
3941 			host_ss->valid, host_ss->mgmt_pkt_ctr,
3942 			host_ss->global_timestamp);
3943 	qdf_assert_always(0);
3944 
3945 	return QDF_STATUS_E_FAILURE;
3946 }
3947 
3948 #ifdef WLAN_MGMT_RX_REO_DEBUG_SUPPORT
3949 /**
3950  * mgmt_rx_reo_ingress_frame_debug_info_enabled() - API to check whether ingress
3951  * frame info debug feaure is enabled
3952  * @ingress_frame_debug_info: Pointer to ingress frame debug info object
3953  *
3954  * Return: true or false
3955  */
3956 static bool
3957 mgmt_rx_reo_ingress_frame_debug_info_enabled
3958 		(struct reo_ingress_debug_info *ingress_frame_debug_info)
3959 {
3960 	return ingress_frame_debug_info->frame_list_size;
3961 }
3962 
3963 /**
3964  * mgmt_rx_reo_debug_print_ingress_frame_stats() - API to print the stats
3965  * related to frames going into the reorder module
3966  * @reo_ctx: Pointer to reorder context
3967  *
3968  * API to print the stats related to frames going into the management
3969  * Rx reorder module.
3970  *
3971  * Return: QDF_STATUS
3972  */
3973 static QDF_STATUS
3974 mgmt_rx_reo_debug_print_ingress_frame_stats(struct mgmt_rx_reo_context *reo_ctx)
3975 {
3976 	struct reo_ingress_frame_stats *stats;
3977 	uint8_t link_id;
3978 	uint8_t desc_type;
3979 	uint8_t list_type;
3980 	uint64_t ingress_count_per_link[MAX_MLO_LINKS] = {0};
3981 	uint64_t ingress_count_per_desc_type[MGMT_RX_REO_FRAME_DESC_TYPE_MAX] = {0};
3982 	uint64_t total_ingress_count = 0;
3983 	uint64_t reo_count_per_link[MAX_MLO_LINKS] = {0};
3984 	uint64_t reo_count_per_desc_type[MGMT_RX_REO_FRAME_DESC_TYPE_MAX] = {0};
3985 	uint64_t total_reo_count = 0;
3986 	uint64_t stale_count_per_link[MAX_MLO_LINKS] = {0};
3987 	uint64_t stale_count_per_desc_type[MGMT_RX_REO_FRAME_DESC_TYPE_MAX] = {0};
3988 	uint64_t total_stale_count = 0;
3989 	uint64_t parallel_rx_count_per_link[MAX_MLO_LINKS] = {0};
3990 	uint64_t parallel_rx_per_desc[MGMT_RX_REO_FRAME_DESC_TYPE_MAX] = {0};
3991 	uint64_t total_parallel_rx_count = 0;
3992 	uint64_t error_count_per_link[MAX_MLO_LINKS] = {0};
3993 	uint64_t error_count_per_desc_type[MGMT_RX_REO_FRAME_DESC_TYPE_MAX] = {0};
3994 	uint64_t total_error_count = 0;
3995 	uint64_t total_missing_count = 0;
3996 	uint64_t total_queued = 0;
3997 	uint64_t queued_per_list[MGMT_RX_REO_LIST_TYPE_MAX] = {0};
3998 	uint64_t queued_per_link[MAX_MLO_LINKS] = {0};
3999 	uint64_t total_zero_wait_count_rx = 0;
4000 	uint64_t zero_wait_count_rx_per_list[MGMT_RX_REO_LIST_TYPE_MAX] = {0};
4001 	uint64_t zero_wait_count_rx_per_link[MAX_MLO_LINKS] = {0};
4002 	uint64_t total_immediate_delivery = 0;
4003 	uint64_t immediate_delivery_per_list[MGMT_RX_REO_LIST_TYPE_MAX] = {0};
4004 	uint64_t immediate_delivery_per_link[MAX_MLO_LINKS] = {0};
4005 
4006 	if (!reo_ctx)
4007 		return QDF_STATUS_E_NULL_VALUE;
4008 
4009 	stats = &reo_ctx->ingress_frame_debug_info.stats;
4010 
4011 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
4012 		for (desc_type = 0; desc_type < MGMT_RX_REO_FRAME_DESC_TYPE_MAX;
4013 		     desc_type++) {
4014 			ingress_count_per_link[link_id] +=
4015 				stats->ingress_count[link_id][desc_type];
4016 			reo_count_per_link[link_id] +=
4017 				stats->reo_count[link_id][desc_type];
4018 			stale_count_per_link[link_id] +=
4019 					stats->stale_count[link_id][desc_type];
4020 			error_count_per_link[link_id] +=
4021 					stats->error_count[link_id][desc_type];
4022 			parallel_rx_count_per_link[link_id] +=
4023 				   stats->parallel_rx_count[link_id][desc_type];
4024 		}
4025 
4026 		total_ingress_count += ingress_count_per_link[link_id];
4027 		total_reo_count += reo_count_per_link[link_id];
4028 		total_stale_count += stale_count_per_link[link_id];
4029 		total_error_count += error_count_per_link[link_id];
4030 		total_parallel_rx_count += parallel_rx_count_per_link[link_id];
4031 		total_missing_count += stats->missing_count[link_id];
4032 	}
4033 
4034 	for (desc_type = 0; desc_type < MGMT_RX_REO_FRAME_DESC_TYPE_MAX;
4035 	     desc_type++) {
4036 		for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
4037 			ingress_count_per_desc_type[desc_type] +=
4038 				stats->ingress_count[link_id][desc_type];
4039 			reo_count_per_desc_type[desc_type] +=
4040 				stats->reo_count[link_id][desc_type];
4041 			stale_count_per_desc_type[desc_type] +=
4042 					stats->stale_count[link_id][desc_type];
4043 			error_count_per_desc_type[desc_type] +=
4044 					stats->error_count[link_id][desc_type];
4045 			parallel_rx_per_desc[desc_type] +=
4046 				stats->parallel_rx_count[link_id][desc_type];
4047 		}
4048 	}
4049 
4050 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
4051 		for (list_type = 0; list_type < MGMT_RX_REO_LIST_TYPE_MAX;
4052 		     list_type++) {
4053 			queued_per_link[link_id] +=
4054 				stats->queued_count[link_id][list_type];
4055 			zero_wait_count_rx_per_link[link_id] +=
4056 			    stats->zero_wait_count_rx_count[link_id][list_type];
4057 			immediate_delivery_per_link[link_id] +=
4058 			    stats->immediate_delivery_count[link_id][list_type];
4059 		}
4060 
4061 		total_queued += queued_per_link[link_id];
4062 		total_zero_wait_count_rx +=
4063 					zero_wait_count_rx_per_link[link_id];
4064 		total_immediate_delivery +=
4065 					immediate_delivery_per_link[link_id];
4066 	}
4067 
4068 	for (list_type = 0; list_type < MGMT_RX_REO_LIST_TYPE_MAX;
4069 	     list_type++) {
4070 		for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
4071 			queued_per_list[list_type] +=
4072 				stats->queued_count[link_id][list_type];
4073 			zero_wait_count_rx_per_list[list_type] +=
4074 			    stats->zero_wait_count_rx_count[link_id][list_type];
4075 			immediate_delivery_per_list[list_type] +=
4076 			    stats->immediate_delivery_count[link_id][list_type];
4077 		}
4078 	}
4079 
4080 	mgmt_rx_reo_alert("Ingress Frame Stats:");
4081 	mgmt_rx_reo_alert("\t1) Ingress Frame Count:");
4082 	mgmt_rx_reo_alert("\tDescriptor Type Values:-");
4083 	mgmt_rx_reo_alert("\t\t0 - MGMT_RX_REO_FRAME_DESC_HOST_CONSUMED_FRAME");
4084 	mgmt_rx_reo_alert("\t\t1 - MGMT_RX_REO_FRAME_DESC_FW_CONSUMED_FRAME");
4085 	mgmt_rx_reo_alert("\t\t2 - MGMT_RX_REO_FRAME_DESC_ERROR_FRAME");
4086 	mgmt_rx_reo_alert("\t------------------------------------");
4087 	mgmt_rx_reo_alert("\t|link id/  |       |       |       |");
4088 	mgmt_rx_reo_alert("\t|desc type |      0|      1|      2|");
4089 	mgmt_rx_reo_alert("\t-------------------------------------------");
4090 
4091 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
4092 		mgmt_rx_reo_alert("\t|%10u|%7llu|%7llu|%7llu|%7llu", link_id,
4093 				  stats->ingress_count[link_id][0],
4094 				  stats->ingress_count[link_id][1],
4095 				  stats->ingress_count[link_id][2],
4096 				  ingress_count_per_link[link_id]);
4097 		mgmt_rx_reo_alert("\t-------------------------------------------");
4098 	}
4099 	mgmt_rx_reo_alert("\t           |%7llu|%7llu|%7llu|%7llu\n\n",
4100 			  ingress_count_per_desc_type[0],
4101 			  ingress_count_per_desc_type[1],
4102 			  ingress_count_per_desc_type[2],
4103 			  total_ingress_count);
4104 
4105 	mgmt_rx_reo_alert("\t2) Reo required Frame Count:");
4106 	mgmt_rx_reo_alert("\t------------------------------------");
4107 	mgmt_rx_reo_alert("\t|link id/  |       |       |       |");
4108 	mgmt_rx_reo_alert("\t|desc type |      0|      1|      2|");
4109 	mgmt_rx_reo_alert("\t-------------------------------------------");
4110 
4111 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
4112 		mgmt_rx_reo_alert("\t|%10u|%7llu|%7llu|%7llu|%7llu", link_id,
4113 				  stats->reo_count[link_id][0],
4114 				  stats->reo_count[link_id][1],
4115 				  stats->reo_count[link_id][2],
4116 				  reo_count_per_link[link_id]);
4117 		mgmt_rx_reo_alert("\t-------------------------------------------");
4118 	}
4119 	mgmt_rx_reo_alert("\t           |%7llu|%7llu|%7llu|%7llu\n\n",
4120 			  reo_count_per_desc_type[0],
4121 			  reo_count_per_desc_type[1],
4122 			  reo_count_per_desc_type[2],
4123 			  total_reo_count);
4124 
4125 	mgmt_rx_reo_alert("\t3) Stale Frame Count:");
4126 	mgmt_rx_reo_alert("\t------------------------------------");
4127 	mgmt_rx_reo_alert("\t|link id/  |       |       |       |");
4128 	mgmt_rx_reo_alert("\t|desc type |      0|      1|      2|");
4129 	mgmt_rx_reo_alert("\t-------------------------------------------");
4130 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
4131 		mgmt_rx_reo_alert("\t|%10u|%7llu|%7llu|%7llu|%7llu", link_id,
4132 				  stats->stale_count[link_id][0],
4133 				  stats->stale_count[link_id][1],
4134 				  stats->stale_count[link_id][2],
4135 				  stale_count_per_link[link_id]);
4136 		mgmt_rx_reo_alert("\t-------------------------------------------");
4137 	}
4138 	mgmt_rx_reo_alert("\t           |%7llu|%7llu|%7llu|%7llu\n\n",
4139 			  stale_count_per_desc_type[0],
4140 			  stale_count_per_desc_type[1],
4141 			  stale_count_per_desc_type[2],
4142 			  total_stale_count);
4143 
4144 	mgmt_rx_reo_alert("\t4) Parallel rx Frame Count:");
4145 	mgmt_rx_reo_alert("\t------------------------------------");
4146 	mgmt_rx_reo_alert("\t|link id/  |       |       |       |");
4147 	mgmt_rx_reo_alert("\t|desc type |      0|      1|      2|");
4148 	mgmt_rx_reo_alert("\t-------------------------------------------");
4149 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
4150 		mgmt_rx_reo_alert("\t|%10u|%7llu|%7llu|%7llu|%7llu", link_id,
4151 				  stats->parallel_rx_count[link_id][0],
4152 				  stats->parallel_rx_count[link_id][1],
4153 				  stats->parallel_rx_count[link_id][2],
4154 				  parallel_rx_count_per_link[link_id]);
4155 		mgmt_rx_reo_alert("\t-------------------------------------------");
4156 	}
4157 	mgmt_rx_reo_alert("\t           |%7llu|%7llu|%7llu|%7llu\n\n",
4158 			  parallel_rx_per_desc[0], parallel_rx_per_desc[1],
4159 			  parallel_rx_per_desc[2], total_parallel_rx_count);
4160 
4161 	mgmt_rx_reo_alert("\t5) Error Frame Count:");
4162 	mgmt_rx_reo_alert("\t------------------------------------");
4163 	mgmt_rx_reo_alert("\t|link id/  |       |       |       |");
4164 	mgmt_rx_reo_alert("\t|desc type |      0|      1|      2|");
4165 	mgmt_rx_reo_alert("\t-------------------------------------------");
4166 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
4167 		mgmt_rx_reo_alert("\t|%10u|%7llu|%7llu|%7llu|%7llu", link_id,
4168 				  stats->error_count[link_id][0],
4169 				  stats->error_count[link_id][1],
4170 				  stats->error_count[link_id][2],
4171 				  error_count_per_link[link_id]);
4172 		mgmt_rx_reo_alert("\t-------------------------------------------");
4173 	}
4174 	mgmt_rx_reo_alert("\t           |%7llu|%7llu|%7llu|%7llu\n\n",
4175 			  error_count_per_desc_type[0],
4176 			  error_count_per_desc_type[1],
4177 			  error_count_per_desc_type[2],
4178 			  total_error_count);
4179 
4180 	mgmt_rx_reo_alert("\t6) Per link stats:");
4181 	mgmt_rx_reo_alert("\t----------------------------");
4182 	mgmt_rx_reo_alert("\t|link id   | Missing frame |");
4183 	mgmt_rx_reo_alert("\t|          |     count     |");
4184 	mgmt_rx_reo_alert("\t----------------------------");
4185 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
4186 		mgmt_rx_reo_alert("\t|%10u|%15llu|", link_id,
4187 				  stats->missing_count[link_id]);
4188 		mgmt_rx_reo_alert("\t----------------------------");
4189 	}
4190 	mgmt_rx_reo_alert("\t%11s|%15llu|\n\n", "", total_missing_count);
4191 
4192 	mgmt_rx_reo_alert("\t7) Host consumed frames related stats:");
4193 	mgmt_rx_reo_alert("\tOverall:");
4194 	mgmt_rx_reo_alert("\t------------------------------------------------");
4195 	mgmt_rx_reo_alert("\t|link id   |Queued frame |Zero wait |Immediate |");
4196 	mgmt_rx_reo_alert("\t|          |    count    |  count   | delivery |");
4197 	mgmt_rx_reo_alert("\t------------------------------------------------");
4198 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
4199 		mgmt_rx_reo_alert("\t|%10u|%13llu|%10llu|%10llu|", link_id,
4200 				  queued_per_link[link_id],
4201 				  zero_wait_count_rx_per_link[link_id],
4202 				  immediate_delivery_per_link[link_id]);
4203 		mgmt_rx_reo_alert("\t------------------------------------------------");
4204 	}
4205 	mgmt_rx_reo_alert("\t%11s|%13llu|%10llu|%10llu|\n\n", "",
4206 			  total_queued,
4207 			  total_zero_wait_count_rx,
4208 			  total_immediate_delivery);
4209 
4210 	mgmt_rx_reo_alert("\t\ta) Ingress List:");
4211 	mgmt_rx_reo_alert("\t\t------------------------------------------------");
4212 	mgmt_rx_reo_alert("\t\t|link id   |Queued frame |Zero wait |Immediate |");
4213 	mgmt_rx_reo_alert("\t\t|          |    count    |  count   | delivery |");
4214 	mgmt_rx_reo_alert("\t\t------------------------------------------------");
4215 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
4216 		mgmt_rx_reo_alert("\t\t|%10u|%13llu|%10llu|%10llu|", link_id,
4217 				  stats->queued_count[link_id][0],
4218 				  stats->zero_wait_count_rx_count[link_id][0],
4219 				  stats->immediate_delivery_count[link_id][0]);
4220 		mgmt_rx_reo_alert("\t\t------------------------------------------------");
4221 	}
4222 	mgmt_rx_reo_alert("\t\t%11s|%13llu|%10llu|%10llu|\n\n", "",
4223 			  queued_per_list[0],
4224 			  zero_wait_count_rx_per_list[0],
4225 			  immediate_delivery_per_list[0]);
4226 
4227 	mgmt_rx_reo_alert("\t\tb) Egress List:");
4228 	mgmt_rx_reo_alert("\t\t------------------------------------------------");
4229 	mgmt_rx_reo_alert("\t\t|link id   |Queued frame |Zero wait |Immediate |");
4230 	mgmt_rx_reo_alert("\t\t|          |    count    |  count   | delivery |");
4231 	mgmt_rx_reo_alert("\t\t------------------------------------------------");
4232 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
4233 		mgmt_rx_reo_alert("\t\t|%10u|%13llu|%10llu|%10llu|", link_id,
4234 				  stats->queued_count[link_id][1],
4235 				  stats->zero_wait_count_rx_count[link_id][1],
4236 				  stats->immediate_delivery_count[link_id][1]);
4237 		mgmt_rx_reo_alert("\t\t------------------------------------------------");
4238 	}
4239 	mgmt_rx_reo_alert("\t\t%11s|%13llu|%10llu|%10llu|\n\n", "",
4240 			  queued_per_list[1],
4241 			  zero_wait_count_rx_per_list[1],
4242 			  immediate_delivery_per_list[1]);
4243 
4244 	mgmt_rx_reo_alert("\t8) Misc stats:");
4245 	mgmt_rx_reo_alert("\t\tIngress list overflow count = %llu\n\n",
4246 			  reo_ctx->ingress_list.reo_list.overflow_count);
4247 
4248 	return QDF_STATUS_SUCCESS;
4249 }
4250 
4251 /**
4252  * mgmt_rx_reo_log_ingress_frame() - Log the information about a frame entering
4253  * the reorder algorithm.
4254  * @reo_ctx: management rx reorder context
4255  * @desc: Pointer to frame descriptor
4256  * @is_queued: Indicates whether this frame is queued to reorder list
4257  * @is_error: Indicates whether any error occurred during processing this frame
4258  * @context_id: context identifier
4259  *
4260  * Return: QDF_STATUS of operation
4261  */
4262 static QDF_STATUS
4263 mgmt_rx_reo_log_ingress_frame(struct mgmt_rx_reo_context *reo_ctx,
4264 			      struct mgmt_rx_reo_frame_descriptor *desc,
4265 			      bool is_queued, bool is_error,
4266 			      int32_t context_id)
4267 {
4268 	struct reo_ingress_debug_info *ingress_frame_debug_info;
4269 	struct reo_ingress_debug_frame_info *cur_frame_debug_info;
4270 	struct reo_ingress_frame_stats *stats;
4271 	uint8_t link_id;
4272 	enum mgmt_rx_reo_list_type queued_list;
4273 
4274 	if (!reo_ctx || !desc)
4275 		return QDF_STATUS_E_NULL_VALUE;
4276 
4277 	ingress_frame_debug_info = &reo_ctx->ingress_frame_debug_info;
4278 
4279 	stats = &ingress_frame_debug_info->stats;
4280 	link_id = mgmt_rx_reo_get_link_id(desc->rx_params);
4281 	queued_list = desc->queued_list;
4282 	stats->ingress_count[link_id][desc->type]++;
4283 	if (desc->reo_required)
4284 		stats->reo_count[link_id][desc->type]++;
4285 	if (is_queued)
4286 		stats->queued_count[link_id][queued_list]++;
4287 	if (desc->zero_wait_count_rx)
4288 		stats->zero_wait_count_rx_count[link_id][queued_list]++;
4289 	if (desc->immediate_delivery)
4290 		stats->immediate_delivery_count[link_id][queued_list]++;
4291 	if (is_error)
4292 		stats->error_count[link_id][desc->type]++;
4293 	if (desc->is_stale)
4294 		stats->stale_count[link_id][desc->type]++;
4295 	if (desc->pkt_ctr_delta > 1)
4296 		stats->missing_count[link_id] += desc->pkt_ctr_delta - 1;
4297 	if (desc->is_parallel_rx)
4298 		stats->parallel_rx_count[link_id][desc->type]++;
4299 
4300 	if (!mgmt_rx_reo_ingress_frame_debug_info_enabled
4301 						(ingress_frame_debug_info))
4302 		return QDF_STATUS_SUCCESS;
4303 
4304 	cur_frame_debug_info = &ingress_frame_debug_info->frame_list
4305 			[ingress_frame_debug_info->next_index];
4306 
4307 	cur_frame_debug_info->link_id = link_id;
4308 	cur_frame_debug_info->mgmt_pkt_ctr =
4309 				mgmt_rx_reo_get_pkt_counter(desc->rx_params);
4310 	cur_frame_debug_info->global_timestamp =
4311 				mgmt_rx_reo_get_global_ts(desc->rx_params);
4312 	cur_frame_debug_info->start_timestamp =
4313 				mgmt_rx_reo_get_start_ts(desc->rx_params);
4314 	cur_frame_debug_info->end_timestamp =
4315 				mgmt_rx_reo_get_end_ts(desc->rx_params);
4316 	cur_frame_debug_info->duration_us =
4317 				mgmt_rx_reo_get_duration_us(desc->rx_params);
4318 	cur_frame_debug_info->desc_type = desc->type;
4319 	cur_frame_debug_info->frame_type = desc->frame_type;
4320 	cur_frame_debug_info->frame_subtype = desc->frame_subtype;
4321 	cur_frame_debug_info->wait_count = desc->wait_count;
4322 	qdf_mem_copy(cur_frame_debug_info->shared_snapshots,
4323 		     desc->shared_snapshots,
4324 		     qdf_min(sizeof(cur_frame_debug_info->shared_snapshots),
4325 			     sizeof(desc->shared_snapshots)));
4326 	qdf_mem_copy(cur_frame_debug_info->host_snapshot, desc->host_snapshot,
4327 		     qdf_min(sizeof(cur_frame_debug_info->host_snapshot),
4328 			     sizeof(desc->host_snapshot)));
4329 	cur_frame_debug_info->is_queued = is_queued;
4330 	cur_frame_debug_info->is_stale = desc->is_stale;
4331 	cur_frame_debug_info->is_parallel_rx = desc->is_parallel_rx;
4332 	cur_frame_debug_info->queued_list = desc->queued_list;
4333 	cur_frame_debug_info->zero_wait_count_rx = desc->zero_wait_count_rx;
4334 	cur_frame_debug_info->immediate_delivery = desc->immediate_delivery;
4335 	cur_frame_debug_info->is_error = is_error;
4336 	cur_frame_debug_info->last_delivered_frame = desc->last_delivered_frame;
4337 	cur_frame_debug_info->ingress_timestamp = desc->ingress_timestamp;
4338 	cur_frame_debug_info->ingress_duration =
4339 			qdf_get_log_timestamp() - desc->ingress_timestamp;
4340 	cur_frame_debug_info->ingress_list_size_rx =
4341 					desc->ingress_list_size_rx;
4342 	cur_frame_debug_info->ingress_list_insertion_pos =
4343 					desc->ingress_list_insertion_pos;
4344 	cur_frame_debug_info->egress_list_size_rx =
4345 					desc->egress_list_size_rx;
4346 	cur_frame_debug_info->egress_list_insertion_pos =
4347 					desc->egress_list_insertion_pos;
4348 	cur_frame_debug_info->cpu_id = qdf_get_smp_processor_id();
4349 	cur_frame_debug_info->reo_required = desc->reo_required;
4350 	cur_frame_debug_info->context_id = context_id;
4351 
4352 	ingress_frame_debug_info->next_index++;
4353 	ingress_frame_debug_info->next_index %=
4354 				ingress_frame_debug_info->frame_list_size;
4355 	if (ingress_frame_debug_info->next_index == 0)
4356 		ingress_frame_debug_info->wrap_aroud = true;
4357 
4358 	return QDF_STATUS_SUCCESS;
4359 }
4360 
4361 /**
4362  * mgmt_rx_reo_debug_print_ingress_frame_info() - Print the debug information
4363  * about the latest frames entered the reorder module
4364  * @reo_ctx: management rx reorder context
4365  * @num_frames: Number of frames for which the debug information is to be
4366  * printed. If @num_frames is 0, then debug information about all the frames
4367  * in the ring buffer will be  printed.
4368  *
4369  * Return: QDF_STATUS of operation
4370  */
4371 static QDF_STATUS
4372 mgmt_rx_reo_debug_print_ingress_frame_info(struct mgmt_rx_reo_context *reo_ctx,
4373 					   uint16_t num_frames)
4374 {
4375 	struct reo_ingress_debug_info *ingress_frame_debug_info;
4376 	int start_index;
4377 	uint16_t index;
4378 	uint16_t entry;
4379 	uint16_t num_valid_entries;
4380 	uint16_t num_entries_to_print;
4381 	char *boarder;
4382 
4383 	if (!reo_ctx)
4384 		return QDF_STATUS_E_NULL_VALUE;
4385 
4386 	ingress_frame_debug_info = &reo_ctx->ingress_frame_debug_info;
4387 
4388 	if (ingress_frame_debug_info->wrap_aroud)
4389 		num_valid_entries = ingress_frame_debug_info->frame_list_size;
4390 	else
4391 		num_valid_entries = ingress_frame_debug_info->next_index;
4392 
4393 	if (num_frames == 0) {
4394 		num_entries_to_print = num_valid_entries;
4395 
4396 		if (ingress_frame_debug_info->wrap_aroud)
4397 			start_index = ingress_frame_debug_info->next_index;
4398 		else
4399 			start_index = 0;
4400 	} else {
4401 		num_entries_to_print = qdf_min(num_frames, num_valid_entries);
4402 
4403 		start_index = (ingress_frame_debug_info->next_index -
4404 			       num_entries_to_print +
4405 			       ingress_frame_debug_info->frame_list_size)
4406 			      % ingress_frame_debug_info->frame_list_size;
4407 
4408 		qdf_assert_always(start_index >= 0 &&
4409 				  start_index < ingress_frame_debug_info->frame_list_size);
4410 	}
4411 
4412 	mgmt_rx_reo_alert_no_fl("Ingress Frame Info:-");
4413 	mgmt_rx_reo_alert_no_fl("num_frames = %u, wrap = %u, next_index = %u",
4414 				num_frames,
4415 				ingress_frame_debug_info->wrap_aroud,
4416 				ingress_frame_debug_info->next_index);
4417 	mgmt_rx_reo_alert_no_fl("start_index = %d num_entries_to_print = %u",
4418 				start_index, num_entries_to_print);
4419 
4420 	if (!num_entries_to_print)
4421 		return QDF_STATUS_SUCCESS;
4422 
4423 	boarder = ingress_frame_debug_info->boarder;
4424 
4425 	mgmt_rx_reo_alert_no_fl("%s", boarder);
4426 	mgmt_rx_reo_alert_no_fl("|%5s|%5s|%6s|%6s|%9s|%4s|%5s|%10s|%10s|%10s|%5s|%10s|%11s|%13s|%4s|%11s|%6s|%5s|%6s|%5s|%69s|%94s|%94s|%94s|%94s|%94s|%94s|",
4427 				"Index", "CPU", "D.type", "F.type", "F.subtype",
4428 				"Link", "SeqNo", "Global ts",
4429 				"Start ts", "End ts", "Dur", "Last ts",
4430 				"Ingress ts", "Flags", "List", "Ingress Dur",
4431 				"I Size", "I Pos", "E Size",
4432 				"E Pos", "Wait Count", "Snapshot : link 0",
4433 				"Snapshot : link 1", "Snapshot : link 2",
4434 				"Snapshot : link 3", "Snapshot : link 4",
4435 				"Snapshot : link 5");
4436 	mgmt_rx_reo_alert_no_fl("%s", boarder);
4437 
4438 	index = start_index;
4439 	for (entry = 0; entry < num_entries_to_print; entry++) {
4440 		struct reo_ingress_debug_frame_info *info;
4441 		char flags[MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_FLAG_MAX_SIZE + 1] = {0};
4442 		char wait_count[MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_WAIT_COUNT_MAX_SIZE + 1] = {0};
4443 		char snapshots[MAX_MLO_LINKS][MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_PER_LINK_SNAPSHOTS_MAX_SIZE + 1] = {0};
4444 		char flag_queued = ' ';
4445 		char flag_stale = ' ';
4446 		char flag_parallel_rx = ' ';
4447 		char flag_error = ' ';
4448 		char flag_zero_wait_count_rx = ' ';
4449 		char flag_immediate_delivery = ' ';
4450 		char flag_reo_required = ' ';
4451 		int64_t ts_last_delivered_frame = -1;
4452 		uint8_t link;
4453 
4454 		info = &reo_ctx->ingress_frame_debug_info.frame_list[index];
4455 
4456 		if (info->last_delivered_frame.valid) {
4457 			struct mgmt_rx_reo_params *reo_params;
4458 
4459 			reo_params = &info->last_delivered_frame.reo_params;
4460 			ts_last_delivered_frame = reo_params->global_timestamp;
4461 		}
4462 
4463 		if (info->is_queued)
4464 			flag_queued = 'Q';
4465 
4466 		if (info->is_stale)
4467 			flag_stale = 'S';
4468 
4469 		if (info->is_parallel_rx)
4470 			flag_parallel_rx = 'P';
4471 
4472 		if (info->is_error)
4473 			flag_error = 'E';
4474 
4475 		if (info->zero_wait_count_rx)
4476 			flag_zero_wait_count_rx = 'Z';
4477 
4478 		if (info->immediate_delivery)
4479 			flag_immediate_delivery = 'I';
4480 
4481 		if (!info->reo_required)
4482 			flag_reo_required = 'N';
4483 
4484 		snprintf(flags, sizeof(flags), "%c %c %c %c %c %c %c",flag_error,
4485 			 flag_stale, flag_parallel_rx, flag_queued,
4486 			 flag_zero_wait_count_rx, flag_immediate_delivery,
4487 			 flag_reo_required);
4488 		snprintf(wait_count, sizeof(wait_count),
4489 			 "%9llx(%8x, %8x, %8x, %8x, %8x, %8x)",
4490 			 info->wait_count.total_count,
4491 			 info->wait_count.per_link_count[0],
4492 			 info->wait_count.per_link_count[1],
4493 			 info->wait_count.per_link_count[2],
4494 			 info->wait_count.per_link_count[3],
4495 			 info->wait_count.per_link_count[4],
4496 			 info->wait_count.per_link_count[5]);
4497 
4498 		for (link = 0; link < MAX_MLO_LINKS; link++) {
4499 			char mac_hw[MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_SNAPSHOT_MAX_SIZE + 1] = {'\0'};
4500 			char fw_consumed[MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_SNAPSHOT_MAX_SIZE + 1] = {'\0'};
4501 			char fw_forwarded[MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_SNAPSHOT_MAX_SIZE + 1] = {'\0'};
4502 			char host[MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_SNAPSHOT_MAX_SIZE + 1] = {'\0'};
4503 			struct mgmt_rx_reo_snapshot_params *mac_hw_ss;
4504 			struct mgmt_rx_reo_snapshot_params *fw_consumed_ss;
4505 			struct mgmt_rx_reo_snapshot_params *fw_forwarded_ss;
4506 			struct mgmt_rx_reo_snapshot_params *host_ss;
4507 
4508 			mac_hw_ss = &info->shared_snapshots
4509 				[link][MGMT_RX_REO_SHARED_SNAPSHOT_MAC_HW];
4510 			fw_consumed_ss = &info->shared_snapshots
4511 				[link][MGMT_RX_REO_SHARED_SNAPSHOT_FW_CONSUMED];
4512 			fw_forwarded_ss = &info->shared_snapshots
4513 				[link][MGMT_RX_REO_SHARED_SNAPSHOT_FW_FORWARDED];
4514 			host_ss = &info->host_snapshot[link];
4515 
4516 			snprintf(mac_hw, sizeof(mac_hw), "(%1u, %5u, %10u)",
4517 				 mac_hw_ss->valid, mac_hw_ss->mgmt_pkt_ctr,
4518 				 mac_hw_ss->global_timestamp);
4519 			snprintf(fw_consumed, sizeof(fw_consumed),
4520 				 "(%1u, %5u, %10u)",
4521 				 fw_consumed_ss->valid,
4522 				 fw_consumed_ss->mgmt_pkt_ctr,
4523 				 fw_consumed_ss->global_timestamp);
4524 			snprintf(fw_forwarded, sizeof(fw_forwarded),
4525 				 "(%1u, %5u, %10u)",
4526 				 fw_forwarded_ss->valid,
4527 				 fw_forwarded_ss->mgmt_pkt_ctr,
4528 				 fw_forwarded_ss->global_timestamp);
4529 			snprintf(host, sizeof(host), "(%1u, %5u, %10u)",
4530 				 host_ss->valid,
4531 				 host_ss->mgmt_pkt_ctr,
4532 				 host_ss->global_timestamp);
4533 			snprintf(snapshots[link], sizeof(snapshots[link]),
4534 				 "%22s, %22s, %22s, %22s", mac_hw, fw_consumed,
4535 				 fw_forwarded, host);
4536 		}
4537 
4538 		mgmt_rx_reo_alert_no_fl("|%5u|%5d|%6u|%6x|%9x|%4u|%5u|%10u|%10u|%10u|%5u|%10lld|%11llu|%11s|%4u|%11llu|%6d|%5d|%6d|%5d|%69s|%70s|%70s|%70s|%70s|%70s|%70s|",
4539 					entry, info->cpu_id, info->desc_type,
4540 					info->frame_type, info->frame_subtype,
4541 					info->link_id,
4542 					info->mgmt_pkt_ctr,
4543 					info->global_timestamp,
4544 					info->start_timestamp,
4545 					info->end_timestamp,
4546 					info->duration_us,
4547 					ts_last_delivered_frame,
4548 					info->ingress_timestamp, flags,
4549 					info->queued_list,
4550 					info->ingress_duration,
4551 					info->ingress_list_size_rx,
4552 					info->ingress_list_insertion_pos,
4553 					info->egress_list_size_rx,
4554 					info->egress_list_insertion_pos,
4555 					wait_count,
4556 					snapshots[0], snapshots[1],
4557 					snapshots[2], snapshots[3],
4558 					snapshots[4], snapshots[5]);
4559 		mgmt_rx_reo_alert_no_fl("%s", boarder);
4560 
4561 		index++;
4562 		index %= ingress_frame_debug_info->frame_list_size;
4563 	}
4564 
4565 	return QDF_STATUS_SUCCESS;
4566 }
4567 #else
4568 /**
4569  * mgmt_rx_reo_debug_print_ingress_frame_stats() - API to print the stats
4570  * related to frames going into the reorder module
4571  * @reo_ctx: Pointer to reorder context
4572  *
4573  * API to print the stats related to frames going into the management
4574  * Rx reorder module.
4575  *
4576  * Return: QDF_STATUS
4577  */
4578 static QDF_STATUS
4579 mgmt_rx_reo_debug_print_ingress_frame_stats(struct mgmt_rx_reo_context *reo_ctx)
4580 {
4581 	return QDF_STATUS_SUCCESS;
4582 }
4583 
4584 /**
4585  * mgmt_rx_reo_log_ingress_frame() - Log the information about a frame entering
4586  * the reorder algorithm.
4587  * @reo_ctx: management rx reorder context
4588  * @desc: Pointer to frame descriptor
4589  * @is_queued: Indicates whether this frame is queued to reorder list
4590  * @is_error: Indicates whether any error occurred during processing this frame
4591  * @context_id: context identifier
4592  *
4593  * Return: QDF_STATUS of operation
4594  */
4595 static QDF_STATUS
4596 mgmt_rx_reo_log_ingress_frame(struct mgmt_rx_reo_context *reo_ctx,
4597 			      struct mgmt_rx_reo_frame_descriptor *desc,
4598 			      bool is_queued, bool is_error,
4599 			      int32_t context_id)
4600 {
4601 	return QDF_STATUS_SUCCESS;
4602 }
4603 
4604 /**
4605  * mgmt_rx_reo_debug_print_ingress_frame_info() - Print debug information about
4606  * the latest frames entering the reorder module
4607  * @reo_ctx: management rx reorder context
4608  *
4609  * Return: QDF_STATUS of operation
4610  */
4611 static QDF_STATUS
4612 mgmt_rx_reo_debug_print_ingress_frame_info(struct mgmt_rx_reo_context *reo_ctx)
4613 {
4614 	return QDF_STATUS_SUCCESS;
4615 }
4616 #endif /* WLAN_MGMT_RX_REO_DEBUG_SUPPORT */
4617 
4618 QDF_STATUS
4619 wlan_mgmt_rx_reo_algo_entry(struct wlan_objmgr_pdev *pdev,
4620 			    struct mgmt_rx_reo_frame_descriptor *desc,
4621 			    bool *is_queued)
4622 {
4623 	struct mgmt_rx_reo_context *reo_ctx;
4624 	struct mgmt_rx_reo_ingress_list *ingress_list;
4625 	struct mgmt_rx_reo_egress_list *egress_list;
4626 	QDF_STATUS ret;
4627 	int16_t cur_link;
4628 	struct mgmt_rx_reo_context_info ctx_info = {0};
4629 	int32_t context_id = 0;
4630 
4631 	if (!is_queued) {
4632 		mgmt_rx_reo_err("Pointer to queued indication is null");
4633 		return QDF_STATUS_E_NULL_VALUE;
4634 	}
4635 
4636 	*is_queued = false;
4637 
4638 	if (!desc || !desc->rx_params) {
4639 		mgmt_rx_reo_err("MGMT Rx REO descriptor or rx params are null");
4640 		return QDF_STATUS_E_NULL_VALUE;
4641 	}
4642 
4643 	reo_ctx = wlan_mgmt_rx_reo_get_ctx_from_pdev(pdev);
4644 	if (!reo_ctx) {
4645 		mgmt_rx_reo_err("REO context is NULL");
4646 		return QDF_STATUS_E_NULL_VALUE;
4647 	}
4648 	ingress_list = &reo_ctx->ingress_list;
4649 	egress_list = &reo_ctx->egress_list;
4650 
4651 	/**
4652 	 * Critical Section = Host snapshot update + Calculation of wait
4653 	 * counts + Update reorder list. Following section describes the
4654 	 * motivation for making this a critical section.
4655 	 * Lets take an example of 2 links (Link A & B) and each has received
4656 	 * a management frame A1 and B1 such that MLO global time stamp of A1 <
4657 	 * MLO global time stamp of B1. Host is concurrently executing
4658 	 * "wlan_mgmt_rx_reo_algo_entry" for A1 and B1 in 2 different CPUs.
4659 	 *
4660 	 * A lock less version of this API("wlan_mgmt_rx_reo_algo_entry_v1") is
4661 	 * as follows.
4662 	 *
4663 	 * wlan_mgmt_rx_reo_algo_entry()
4664 	 * {
4665 	 *     Host snapshot update
4666 	 *     Calculation of wait counts
4667 	 *     Update reorder list
4668 	 *     Release to upper layer
4669 	 * }
4670 	 *
4671 	 * We may run into race conditions under the following sequence of
4672 	 * operations.
4673 	 *
4674 	 * 1. Host snapshot update for link A in context of frame A1
4675 	 * 2. Host snapshot update for link B in context of frame B1
4676 	 * 3. Calculation of wait count for frame B1
4677 	 *        link A wait count =  0
4678 	 *        link B wait count =  0
4679 	 * 4. Update reorder list with frame B1
4680 	 * 5. Release B1 to upper layer
4681 	 * 6. Calculation of wait count for frame A1
4682 	 *        link A wait count =  0
4683 	 *        link B wait count =  0
4684 	 * 7. Update reorder list with frame A1
4685 	 * 8. Release A1 to upper layer
4686 	 *
4687 	 * This leads to incorrect behaviour as B1 goes to upper layer before
4688 	 * A1.
4689 	 *
4690 	 * To prevent this lets make Host snapshot update + Calculate wait count
4691 	 * a critical section by adding locks. The updated version of the API
4692 	 * ("wlan_mgmt_rx_reo_algo_entry_v2") is as follows.
4693 	 *
4694 	 * wlan_mgmt_rx_reo_algo_entry()
4695 	 * {
4696 	 *     LOCK
4697 	 *         Host snapshot update
4698 	 *         Calculation of wait counts
4699 	 *     UNLOCK
4700 	 *     Update reorder list
4701 	 *     Release to upper layer
4702 	 * }
4703 	 *
4704 	 * With this API also We may run into race conditions under the
4705 	 * following sequence of operations.
4706 	 *
4707 	 * 1. Host snapshot update for link A in context of frame A1 +
4708 	 *    Calculation of wait count for frame A1
4709 	 *        link A wait count =  0
4710 	 *        link B wait count =  0
4711 	 * 2. Host snapshot update for link B in context of frame B1 +
4712 	 *    Calculation of wait count for frame B1
4713 	 *        link A wait count =  0
4714 	 *        link B wait count =  0
4715 	 * 4. Update reorder list with frame B1
4716 	 * 5. Release B1 to upper layer
4717 	 * 7. Update reorder list with frame A1
4718 	 * 8. Release A1 to upper layer
4719 	 *
4720 	 * This also leads to incorrect behaviour as B1 goes to upper layer
4721 	 * before A1.
4722 	 *
4723 	 * To prevent this, let's make Host snapshot update + Calculate wait
4724 	 * count + Update reorder list a critical section by adding locks.
4725 	 * The updated version of the API ("wlan_mgmt_rx_reo_algo_entry_final")
4726 	 * is as follows.
4727 	 *
4728 	 * wlan_mgmt_rx_reo_algo_entry()
4729 	 * {
4730 	 *     LOCK
4731 	 *         Host snapshot update
4732 	 *         Calculation of wait counts
4733 	 *         Update reorder list
4734 	 *     UNLOCK
4735 	 *     Release to upper layer
4736 	 * }
4737 	 */
4738 	qdf_spin_lock(&reo_ctx->reo_algo_entry_lock);
4739 
4740 	cur_link = mgmt_rx_reo_get_link_id(desc->rx_params);
4741 	qdf_assert_always(desc->rx_params->reo_params->valid);
4742 	qdf_assert_always(desc->frame_type == IEEE80211_FC0_TYPE_MGT);
4743 
4744 	if (desc->type == MGMT_RX_REO_FRAME_DESC_HOST_CONSUMED_FRAME ||
4745 	    desc->type == MGMT_RX_REO_FRAME_DESC_FW_CONSUMED_FRAME)
4746 		qdf_assert_always(desc->rx_params->reo_params->duration_us);
4747 
4748 	/* Update the Host snapshot */
4749 	ret = wlan_mgmt_rx_reo_update_host_snapshot(pdev, desc);
4750 	if (QDF_IS_STATUS_ERROR(ret))
4751 		goto failure;
4752 
4753 	/* Compute wait count for this frame/event */
4754 	ret = wlan_mgmt_rx_reo_algo_calculate_wait_count(pdev, desc);
4755 	if (QDF_IS_STATUS_ERROR(ret))
4756 		goto failure;
4757 
4758 	ctx_info.in_reo_params = *desc->rx_params->reo_params;
4759 	/* Update ingress and egress list */
4760 	ret = mgmt_rx_reo_update_lists(ingress_list, egress_list, desc,
4761 				       is_queued);
4762 	if (QDF_IS_STATUS_ERROR(ret))
4763 		goto failure;
4764 
4765 	context_id = qdf_atomic_inc_return(&reo_ctx->context_id);
4766 	ret = mgmt_rx_reo_log_ingress_frame(reo_ctx, desc, *is_queued, false,
4767 					    context_id);
4768 	if (QDF_IS_STATUS_ERROR(ret)) {
4769 		qdf_spin_unlock(&reo_ctx->reo_algo_entry_lock);
4770 		return ret;
4771 	}
4772 
4773 	qdf_spin_unlock(&reo_ctx->reo_algo_entry_lock);
4774 
4775 	ret = mgmt_rx_reo_move_entries_ingress_to_egress_list(ingress_list,
4776 							      egress_list);
4777 	if (QDF_IS_STATUS_ERROR(ret))
4778 		return ret;
4779 
4780 	ctx_info.context = MGMT_RX_REO_CONTEXT_MGMT_RX;
4781 	ctx_info.context_id = context_id;
4782 
4783 	/* Finally, release the entries for which pending frame is received */
4784 	return mgmt_rx_reo_release_egress_list_entries(reo_ctx, 1 << cur_link,
4785 						       &ctx_info);
4786 
4787 failure:
4788 	/**
4789 	 * Ignore the return value of this function call, return
4790 	 * the actual reason for failure.
4791 	 */
4792 	mgmt_rx_reo_log_ingress_frame(reo_ctx, desc, *is_queued, true,
4793 				      context_id);
4794 
4795 	qdf_spin_unlock(&reo_ctx->reo_algo_entry_lock);
4796 
4797 	return ret;
4798 }
4799 
4800 #ifndef WLAN_MGMT_RX_REO_SIM_SUPPORT
4801 /**
4802  * mgmt_rx_reo_sim_init() - Initialize management rx reorder simulation
4803  * context.
4804  * @reo_context: Pointer to reo context
4805  *
4806  * Return: QDF_STATUS of operation
4807  */
4808 static inline QDF_STATUS
4809 mgmt_rx_reo_sim_init(struct mgmt_rx_reo_context *reo_context)
4810 {
4811 	return QDF_STATUS_SUCCESS;
4812 }
4813 
4814 /**
4815  * mgmt_rx_reo_sim_deinit() - De initialize management rx reorder simulation
4816  * context.
4817  * @reo_context: Pointer to reo context
4818  *
4819  * Return: QDF_STATUS of operation
4820  */
4821 static inline QDF_STATUS
4822 mgmt_rx_reo_sim_deinit(struct mgmt_rx_reo_context *reo_context)
4823 {
4824 	return QDF_STATUS_SUCCESS;
4825 }
4826 
4827 QDF_STATUS
4828 mgmt_rx_reo_sim_pdev_object_create_notification(struct wlan_objmgr_pdev *pdev)
4829 {
4830 	return QDF_STATUS_SUCCESS;
4831 }
4832 
4833 QDF_STATUS
4834 mgmt_rx_reo_sim_pdev_object_destroy_notification(struct wlan_objmgr_pdev *pdev)
4835 {
4836 	return QDF_STATUS_SUCCESS;
4837 }
4838 #else
4839 /**
4840  * mgmt_rx_reo_sim_remove_frame_from_master_list() - Removes frame from the
4841  * master management frame list
4842  * @master_frame_list: pointer to master management frame list
4843  * @frame: pointer to management frame parameters
4844  *
4845  * This API removes frames from the master management frame list. This API is
4846  * used in case of FW consumed management frames or management frames which
4847  * are dropped at host due to any error.
4848  *
4849  * Return: QDF_STATUS of operation
4850  */
4851 static QDF_STATUS
4852 mgmt_rx_reo_sim_remove_frame_from_master_list(
4853 		struct mgmt_rx_reo_master_frame_list *master_frame_list,
4854 		const struct mgmt_rx_frame_params *frame)
4855 {
4856 	struct mgmt_rx_reo_pending_frame_list_entry *pending_entry;
4857 	struct mgmt_rx_reo_pending_frame_list_entry *matching_pend_entry = NULL;
4858 	struct mgmt_rx_reo_stale_frame_list_entry *stale_entry;
4859 	struct mgmt_rx_reo_stale_frame_list_entry *matching_stale_entry = NULL;
4860 	QDF_STATUS status;
4861 
4862 	if (!master_frame_list) {
4863 		mgmt_rx_reo_err("Mgmt master frame list is null");
4864 		return QDF_STATUS_E_NULL_VALUE;
4865 	}
4866 
4867 	if (!frame) {
4868 		mgmt_rx_reo_err("Pointer to mgmt frame params is null");
4869 		return QDF_STATUS_E_NULL_VALUE;
4870 	}
4871 
4872 	qdf_spin_lock(&master_frame_list->lock);
4873 
4874 	qdf_list_for_each(&master_frame_list->pending_list, pending_entry,
4875 			  node) {
4876 		if (pending_entry->params.link_id == frame->link_id &&
4877 		    pending_entry->params.mgmt_pkt_ctr == frame->mgmt_pkt_ctr &&
4878 		    pending_entry->params.global_timestamp ==
4879 		    frame->global_timestamp) {
4880 			matching_pend_entry = pending_entry;
4881 			break;
4882 		}
4883 	}
4884 
4885 	qdf_list_for_each(&master_frame_list->stale_list, stale_entry, node) {
4886 		if (stale_entry->params.link_id == frame->link_id &&
4887 		    stale_entry->params.mgmt_pkt_ctr == frame->mgmt_pkt_ctr &&
4888 		    stale_entry->params.global_timestamp ==
4889 		    frame->global_timestamp) {
4890 			matching_stale_entry = stale_entry;
4891 			break;
4892 		}
4893 	}
4894 
4895 	/* Found in pending and stale list. Duplicate entries, assert */
4896 	qdf_assert_always(!matching_pend_entry || !matching_stale_entry);
4897 
4898 	if (!matching_pend_entry && !matching_stale_entry) {
4899 		qdf_spin_unlock(&master_frame_list->lock);
4900 		mgmt_rx_reo_err("No matching frame in pend/stale list");
4901 		return QDF_STATUS_E_FAILURE;
4902 	}
4903 
4904 	if (matching_pend_entry) {
4905 		status = qdf_list_remove_node(&master_frame_list->pending_list,
4906 					      &matching_pend_entry->node);
4907 		if (QDF_IS_STATUS_ERROR(status)) {
4908 			qdf_spin_unlock(&master_frame_list->lock);
4909 			mgmt_rx_reo_err("Failed to remove the matching entry");
4910 			return status;
4911 		}
4912 
4913 		qdf_mem_free(matching_pend_entry);
4914 	}
4915 
4916 	if (matching_stale_entry) {
4917 		status = qdf_list_remove_node(&master_frame_list->stale_list,
4918 					      &matching_stale_entry->node);
4919 		if (QDF_IS_STATUS_ERROR(status)) {
4920 			qdf_spin_unlock(&master_frame_list->lock);
4921 			mgmt_rx_reo_err("Failed to remove the matching entry");
4922 			return status;
4923 		}
4924 
4925 		qdf_mem_free(matching_stale_entry);
4926 	}
4927 
4928 	qdf_spin_unlock(&master_frame_list->lock);
4929 
4930 	return QDF_STATUS_SUCCESS;
4931 }
4932 
4933 /**
4934  * mgmt_rx_reo_sim_remove_frame_from_pending_list() - Removes frame from the
4935  * pending management frame list
4936  * @master_frame_list: pointer to master management frame list
4937  * @frame: pointer to management frame parameters
4938  *
4939  * This API removes frames from the pending management frame list. This API is
4940  * used in case of FW consumed management frames or management frames which
4941  * are dropped at host due to any error.
4942  *
4943  * Return: QDF_STATUS of operation
4944  */
4945 static QDF_STATUS
4946 mgmt_rx_reo_sim_remove_frame_from_pending_list(
4947 		struct mgmt_rx_reo_master_frame_list *master_frame_list,
4948 		const struct mgmt_rx_frame_params *frame)
4949 {
4950 	struct mgmt_rx_reo_pending_frame_list_entry *cur_entry;
4951 	struct mgmt_rx_reo_pending_frame_list_entry *matching_entry = NULL;
4952 	QDF_STATUS status;
4953 
4954 	if (!master_frame_list) {
4955 		mgmt_rx_reo_err("Mgmt master frame list is null");
4956 		return QDF_STATUS_E_NULL_VALUE;
4957 	}
4958 
4959 	if (!frame) {
4960 		mgmt_rx_reo_err("Pointer to mgmt frame params is null");
4961 		return QDF_STATUS_E_NULL_VALUE;
4962 	}
4963 
4964 	qdf_spin_lock(&master_frame_list->lock);
4965 
4966 	qdf_list_for_each(&master_frame_list->pending_list, cur_entry, node) {
4967 		if (cur_entry->params.link_id == frame->link_id &&
4968 		    cur_entry->params.mgmt_pkt_ctr == frame->mgmt_pkt_ctr &&
4969 		    cur_entry->params.global_timestamp ==
4970 		    frame->global_timestamp) {
4971 			matching_entry = cur_entry;
4972 			break;
4973 		}
4974 	}
4975 
4976 	if (!matching_entry) {
4977 		qdf_spin_unlock(&master_frame_list->lock);
4978 		mgmt_rx_reo_err("No matching frame in the pend list to remove");
4979 		return QDF_STATUS_E_FAILURE;
4980 	}
4981 
4982 	status = qdf_list_remove_node(&master_frame_list->pending_list,
4983 				      &matching_entry->node);
4984 	if (QDF_IS_STATUS_ERROR(status)) {
4985 		qdf_spin_unlock(&master_frame_list->lock);
4986 		mgmt_rx_reo_err("Failed to remove the matching entry");
4987 		return status;
4988 	}
4989 
4990 	qdf_mem_free(matching_entry);
4991 
4992 	qdf_spin_unlock(&master_frame_list->lock);
4993 
4994 
4995 	return QDF_STATUS_SUCCESS;
4996 }
4997 
4998 /**
4999  * mgmt_rx_reo_sim_add_frame_to_pending_list() - Inserts frame to the
5000  * pending management frame list
5001  * @master_frame_list: pointer to master management frame list
5002  * @frame: pointer to management frame parameters
5003  *
5004  * This API inserts frames to the pending management frame list. This API is
5005  * used to insert frames generated by the MAC HW to the pending frame list.
5006  *
5007  * Return: QDF_STATUS of operation
5008  */
5009 static QDF_STATUS
5010 mgmt_rx_reo_sim_add_frame_to_pending_list(
5011 		struct mgmt_rx_reo_master_frame_list *master_frame_list,
5012 		const struct mgmt_rx_frame_params *frame)
5013 {
5014 	struct mgmt_rx_reo_pending_frame_list_entry *new_entry;
5015 	QDF_STATUS status;
5016 
5017 	if (!master_frame_list) {
5018 		mgmt_rx_reo_err("Mgmt master frame list is null");
5019 		return QDF_STATUS_E_NULL_VALUE;
5020 	}
5021 
5022 	if (!frame) {
5023 		mgmt_rx_reo_err("Pointer mgmt frame params is null");
5024 		return QDF_STATUS_E_NULL_VALUE;
5025 	}
5026 
5027 	new_entry = qdf_mem_malloc(sizeof(*new_entry));
5028 	if (!new_entry) {
5029 		mgmt_rx_reo_err("Failed to allocate new entry to frame list");
5030 		return QDF_STATUS_E_NOMEM;
5031 	}
5032 
5033 	new_entry->params = *frame;
5034 
5035 	qdf_spin_lock(&master_frame_list->lock);
5036 
5037 	status = qdf_list_insert_back(&master_frame_list->pending_list,
5038 				      &new_entry->node);
5039 
5040 	qdf_spin_unlock(&master_frame_list->lock);
5041 
5042 	if (QDF_IS_STATUS_ERROR(status)) {
5043 		mgmt_rx_reo_err("Failed to add frame to pending list");
5044 		qdf_mem_free(new_entry);
5045 		return status;
5046 	}
5047 
5048 	return QDF_STATUS_SUCCESS;
5049 }
5050 
5051 QDF_STATUS
5052 mgmt_rx_reo_sim_process_rx_frame(struct wlan_objmgr_pdev *pdev, qdf_nbuf_t buf,
5053 				 struct mgmt_rx_event_params *mgmt_rx_params)
5054 {
5055 	struct mgmt_rx_reo_context *reo_context;
5056 	struct mgmt_rx_reo_sim_context *sim_context;
5057 	QDF_STATUS status;
5058 	struct mgmt_rx_reo_params *reo_params;
5059 
5060 	if (!mgmt_rx_params) {
5061 		mgmt_rx_reo_err("Mgmt rx params null");
5062 		return QDF_STATUS_E_NULL_VALUE;
5063 	}
5064 
5065 	reo_params = mgmt_rx_params->reo_params;
5066 
5067 	reo_context = wlan_mgmt_rx_reo_get_ctx_from_pdev(pdev);
5068 	if (!reo_context) {
5069 		mgmt_rx_reo_err("Mgmt reo context is null");
5070 		return QDF_STATUS_E_NULL_VALUE;
5071 	}
5072 
5073 	sim_context = &reo_context->sim_context;
5074 
5075 	qdf_spin_lock(&sim_context->master_frame_list.lock);
5076 
5077 	if (qdf_list_empty(&sim_context->master_frame_list.pending_list)) {
5078 		qdf_spin_unlock(&sim_context->master_frame_list.lock);
5079 		mgmt_rx_reo_err("reo sim failure: pending frame list is empty");
5080 		qdf_assert_always(0);
5081 	} else {
5082 		struct mgmt_rx_frame_params *cur_entry_params;
5083 		struct mgmt_rx_reo_pending_frame_list_entry *cur_entry;
5084 		struct mgmt_rx_reo_pending_frame_list_entry *matching_entry = NULL;
5085 
5086 		/**
5087 		 * Make sure the frames delivered to upper layer are in the
5088 		 * increasing order of global time stamp. For that the frame
5089 		 * which is being delivered should be present at the head of the
5090 		 * pending frame list. There could be multiple frames with the
5091 		 * same global time stamp in the pending frame list. Search
5092 		 * among all the frames at the head of the list which has the
5093 		 * same global time stamp as the frame which is being delivered.
5094 		 * To find matching frame, check whether packet counter,
5095 		 * global time stamp and link id are same.
5096 		 */
5097 		qdf_list_for_each(&sim_context->master_frame_list.pending_list,
5098 				  cur_entry, node) {
5099 			cur_entry_params = &cur_entry->params;
5100 
5101 			if (cur_entry_params->global_timestamp !=
5102 			    reo_params->global_timestamp)
5103 				break;
5104 
5105 			if (cur_entry_params->link_id == reo_params->link_id &&
5106 			    cur_entry_params->mgmt_pkt_ctr ==
5107 			    reo_params->mgmt_pkt_ctr) {
5108 				matching_entry = cur_entry;
5109 				break;
5110 			}
5111 		}
5112 
5113 		if (!matching_entry) {
5114 			qdf_spin_unlock(&sim_context->master_frame_list.lock);
5115 			mgmt_rx_reo_err("reo sim failure: mismatch");
5116 			qdf_assert_always(0);
5117 		}
5118 
5119 		status = qdf_list_remove_node(
5120 				&sim_context->master_frame_list.pending_list,
5121 				&matching_entry->node);
5122 		qdf_mem_free(matching_entry);
5123 
5124 		if (QDF_IS_STATUS_ERROR(status)) {
5125 			qdf_spin_unlock(&sim_context->master_frame_list.lock);
5126 			mgmt_rx_reo_err("Failed to remove matching entry");
5127 			return status;
5128 		}
5129 	}
5130 
5131 	qdf_spin_unlock(&sim_context->master_frame_list.lock);
5132 
5133 	mgmt_rx_reo_debug("Successfully processed mgmt frame");
5134 	mgmt_rx_reo_debug("link_id = %u, ctr = %u, ts = %u",
5135 			  reo_params->link_id, reo_params->mgmt_pkt_ctr,
5136 			  reo_params->global_timestamp);
5137 
5138 	return QDF_STATUS_SUCCESS;
5139 }
5140 
5141 /**
5142  * mgmt_rx_reo_sim_get_random_bool() - Generate true/false randomly
5143  * @percentage_true: probability (in percentage) of true
5144  *
5145  * API to generate true with probability @percentage_true % and false with
5146  * probability (100 - @percentage_true) %.
5147  *
5148  * Return: true with probability @percentage_true % and false with probability
5149  * (100 - @percentage_true) %
5150  */
5151 static bool
5152 mgmt_rx_reo_sim_get_random_bool(uint8_t percentage_true)
5153 {
5154 	uint32_t rand;
5155 
5156 	if (percentage_true > 100) {
5157 		mgmt_rx_reo_err("Invalid probability value for true, %u",
5158 				percentage_true);
5159 		return -EINVAL;
5160 	}
5161 
5162 	get_random_bytes(&rand, sizeof(rand));
5163 
5164 	return ((rand % 100) < percentage_true);
5165 }
5166 
5167 /**
5168  * mgmt_rx_reo_sim_get_random_unsigned_int() - Generate random unsigned integer
5169  * value in the range [0, max)
5170  * @max: upper limit for the output
5171  *
5172  * API to generate random unsigned integer value in the range [0, max).
5173  *
5174  * Return: unsigned integer value in the range [0, max)
5175  */
5176 static uint32_t
5177 mgmt_rx_reo_sim_get_random_unsigned_int(uint32_t max)
5178 {
5179 	uint32_t rand;
5180 
5181 	get_random_bytes(&rand, sizeof(rand));
5182 
5183 	return (rand % max);
5184 }
5185 
5186 /**
5187  * mgmt_rx_reo_sim_sleep() - Wrapper API to sleep for given micro seconds
5188  * @sleeptime_us: Sleep time in micro seconds
5189  *
5190  * This API uses msleep() internally. So the granularity is limited to
5191  * milliseconds.
5192  *
5193  * Return: none
5194  */
5195 static void
5196 mgmt_rx_reo_sim_sleep(uint32_t sleeptime_us)
5197 {
5198 	msleep(sleeptime_us / USEC_PER_MSEC);
5199 }
5200 
5201 /**
5202  * mgmt_rx_reo_sim_frame_handler_host() - Management frame handler at the host
5203  * layer
5204  * @arg: Argument
5205  *
5206  * This API handles the management frame at the host layer. This is applicable
5207  * for simulation alone.
5208  *
5209  * Return: none
5210  */
5211 static void
5212 mgmt_rx_reo_sim_frame_handler_host(void *arg)
5213 {
5214 	struct mgmt_rx_frame_fw *frame_fw = (struct mgmt_rx_frame_fw *)arg;
5215 	uint32_t fw_to_host_delay_us;
5216 	bool is_error_frame = false;
5217 	int8_t link_id = -1;
5218 	struct mgmt_rx_event_params *rx_params;
5219 	QDF_STATUS status;
5220 	struct mgmt_rx_reo_sim_context *sim_context;
5221 	struct wlan_objmgr_pdev *pdev;
5222 	uint8_t ml_grp_id;
5223 
5224 	if (!frame_fw) {
5225 		mgmt_rx_reo_err("HOST-%d : Pointer to FW frame struct is null",
5226 				link_id);
5227 		goto error_print;
5228 	}
5229 
5230 	link_id = frame_fw->params.link_id;
5231 
5232 	sim_context = frame_fw->sim_context;
5233 	if (!sim_context) {
5234 		mgmt_rx_reo_err("HOST-%d : Mgmt rx reo simulation context null",
5235 				link_id);
5236 		goto error_free_fw_frame;
5237 	}
5238 
5239 	ml_grp_id = sim_context->mlo_grp_id;
5240 
5241 	fw_to_host_delay_us = MGMT_RX_REO_SIM_DELAY_FW_TO_HOST_MIN +
5242 			      mgmt_rx_reo_sim_get_random_unsigned_int(
5243 			      MGMT_RX_REO_SIM_DELAY_FW_TO_HOST_MIN_MAX_DELTA);
5244 
5245 	mgmt_rx_reo_sim_sleep(fw_to_host_delay_us);
5246 
5247 	if (!frame_fw->is_consumed_by_fw) {
5248 		is_error_frame = mgmt_rx_reo_sim_get_random_bool(
5249 				 MGMT_RX_REO_SIM_PERCENTAGE_ERROR_FRAMES);
5250 
5251 		/**
5252 		 * This frame should be present in pending/stale list of the
5253 		 * master frame list. Error frames need not be reordered
5254 		 * by reorder algorithm. It is just used for book
5255 		 * keeping purposes. Hence remove it from the master list.
5256 		 */
5257 		if (is_error_frame) {
5258 			status = mgmt_rx_reo_sim_remove_frame_from_master_list(
5259 					&sim_context->master_frame_list,
5260 					&frame_fw->params);
5261 
5262 			if (QDF_IS_STATUS_ERROR(status)) {
5263 				mgmt_rx_reo_err("HOST-%d : Failed to remove error frame",
5264 						link_id);
5265 				qdf_assert_always(0);
5266 			}
5267 		}
5268 	}
5269 
5270 	mgmt_rx_reo_debug("HOST-%d : Received frame with ts = %u, ctr = %u, consume = %u, error = %u",
5271 			  link_id, frame_fw->params.global_timestamp,
5272 			  frame_fw->params.mgmt_pkt_ctr,
5273 			  frame_fw->is_consumed_by_fw, is_error_frame);
5274 
5275 	rx_params = alloc_mgmt_rx_event_params();
5276 	if (!rx_params) {
5277 		mgmt_rx_reo_err("HOST-%d : Failed to allocate event params",
5278 				link_id);
5279 		goto error_free_fw_frame;
5280 	}
5281 
5282 	rx_params->reo_params->link_id = frame_fw->params.link_id;
5283 	rx_params->reo_params->global_timestamp =
5284 					frame_fw->params.global_timestamp;
5285 	rx_params->reo_params->mgmt_pkt_ctr = frame_fw->params.mgmt_pkt_ctr;
5286 	rx_params->reo_params->valid = true;
5287 
5288 	pdev = wlan_get_pdev_from_mlo_link_id(
5289 			link_id, ml_grp_id, WLAN_MGMT_RX_REO_SIM_ID);
5290 	if (!pdev) {
5291 		mgmt_rx_reo_err("No pdev corresponding to link_id %d", link_id);
5292 		goto error_free_mgmt_rx_event_params;
5293 	}
5294 
5295 	if (is_error_frame) {
5296 		status = tgt_mgmt_rx_reo_host_drop_handler(
5297 						pdev, rx_params->reo_params);
5298 		free_mgmt_rx_event_params(rx_params);
5299 	} else if (frame_fw->is_consumed_by_fw) {
5300 		status = tgt_mgmt_rx_reo_fw_consumed_event_handler(
5301 						pdev, rx_params->reo_params);
5302 		free_mgmt_rx_event_params(rx_params);
5303 	} else {
5304 		status = tgt_mgmt_rx_reo_frame_handler(pdev, NULL, rx_params);
5305 	}
5306 
5307 	wlan_objmgr_pdev_release_ref(pdev, WLAN_MGMT_RX_REO_SIM_ID);
5308 
5309 	if (QDF_IS_STATUS_ERROR(status)) {
5310 		mgmt_rx_reo_err("Failed to execute reo algorithm");
5311 		goto error_free_fw_frame;
5312 	}
5313 
5314 	qdf_mem_free(frame_fw);
5315 
5316 	return;
5317 
5318 error_free_mgmt_rx_event_params:
5319 	free_mgmt_rx_event_params(rx_params);
5320 error_free_fw_frame:
5321 	qdf_mem_free(frame_fw);
5322 error_print:
5323 	mgmt_rx_reo_err("HOST-%d : Exiting host frame handler due to error",
5324 			link_id);
5325 }
5326 
5327 /**
5328  * mgmt_rx_reo_sim_write_snapshot() - API to write snapshots used for management
5329  * frame reordering
5330  * @link_id: link id
5331  * @id: snapshot id
5332  * @value: snapshot value
5333  * @ml_grp_id: MLO group id which it belongs to
5334  *
5335  * This API writes the snapshots used for management frame reordering. MAC HW
5336  * and FW can use this API to update the MAC HW/FW consumed/FW forwarded
5337  * snapshots.
5338  *
5339  * Return: QDF_STATUS
5340  */
5341 static QDF_STATUS
5342 mgmt_rx_reo_sim_write_snapshot(uint8_t link_id, uint8_t ml_grp_id,
5343 			       enum mgmt_rx_reo_shared_snapshot_id id,
5344 			       struct mgmt_rx_reo_shared_snapshot value)
5345 {
5346 	struct wlan_objmgr_pdev *pdev;
5347 	struct mgmt_rx_reo_shared_snapshot *snapshot_address;
5348 	QDF_STATUS status;
5349 
5350 	pdev = wlan_get_pdev_from_mlo_link_id(
5351 			link_id, ml_grp_id,
5352 			WLAN_MGMT_RX_REO_SIM_ID);
5353 
5354 	if (!pdev) {
5355 		mgmt_rx_reo_err("pdev is null");
5356 		return QDF_STATUS_E_NULL_VALUE;
5357 	}
5358 
5359 	status = mgmt_rx_reo_sim_get_snapshot_address(pdev, id,
5360 						      &snapshot_address);
5361 
5362 	wlan_objmgr_pdev_release_ref(pdev, WLAN_MGMT_RX_REO_SIM_ID);
5363 
5364 	if (QDF_IS_STATUS_ERROR(status)) {
5365 		mgmt_rx_reo_err("Failed to get snapshot address %d of pdev %pK",
5366 				id, pdev);
5367 		return QDF_STATUS_E_FAILURE;
5368 	}
5369 
5370 	snapshot_address->mgmt_rx_reo_snapshot_low =
5371 						value.mgmt_rx_reo_snapshot_low;
5372 	snapshot_address->mgmt_rx_reo_snapshot_high =
5373 						value.mgmt_rx_reo_snapshot_high;
5374 
5375 	return QDF_STATUS_SUCCESS;
5376 }
5377 
5378 #define MGMT_RX_REO_SNAPSHOT_LOW_VALID_POS                       (0)
5379 #define MGMT_RX_REO_SNAPSHOT_LOW_VALID_SIZE                      (1)
5380 #define MGMT_RX_REO_SNAPSHOT_LOW_MGMT_PKT_CTR_POS                (1)
5381 #define MGMT_RX_REO_SNAPSHOT_LOW_MGMT_PKT_CTR_SIZE               (16)
5382 #define MGMT_RX_REO_SNAPSHOT_LOW_GLOBAL_TIMESTAMP_POS            (17)
5383 #define MGMT_RX_REO_SNAPSHOT_LOW_GLOBAL_TIMESTAMP_SIZE           (15)
5384 
5385 #define MGMT_RX_REO_SNAPSHOT_HIGH_GLOBAL_TIMESTAMP_POS           (0)
5386 #define MGMT_RX_REO_SNAPSHOT_HIGH_GLOBAL_TIMESTAMP_SIZE          (17)
5387 #define MGMT_RX_REO_SNAPSHOT_HIGH_MGMT_PKT_CTR_REDUNDANT_POS     (17)
5388 #define MGMT_RX_REO_SNAPSHOT_HIGH_MGMT_PKT_CTR_REDUNDANT_SIZE    (15)
5389 
5390 /**
5391  * mgmt_rx_reo_sim_get_snapshot_value() - API to get snapshot value for a given
5392  * management frame
5393  * @global_timestamp: global time stamp
5394  * @mgmt_pkt_ctr: management packet counter
5395  *
5396  * This API gets the snapshot value for a frame with time stamp
5397  * @global_timestamp and sequence number @mgmt_pkt_ctr.
5398  *
5399  * Return: snapshot value (struct mgmt_rx_reo_shared_snapshot)
5400  */
5401 static struct mgmt_rx_reo_shared_snapshot
5402 mgmt_rx_reo_sim_get_snapshot_value(uint32_t global_timestamp,
5403 				   uint16_t mgmt_pkt_ctr)
5404 {
5405 	struct mgmt_rx_reo_shared_snapshot snapshot = {0};
5406 
5407 	QDF_SET_BITS(snapshot.mgmt_rx_reo_snapshot_low,
5408 		     MGMT_RX_REO_SNAPSHOT_LOW_VALID_POS,
5409 		     MGMT_RX_REO_SNAPSHOT_LOW_VALID_SIZE, 1);
5410 	QDF_SET_BITS(snapshot.mgmt_rx_reo_snapshot_low,
5411 		     MGMT_RX_REO_SNAPSHOT_LOW_MGMT_PKT_CTR_POS,
5412 		     MGMT_RX_REO_SNAPSHOT_LOW_MGMT_PKT_CTR_SIZE, mgmt_pkt_ctr);
5413 	QDF_SET_BITS(snapshot.mgmt_rx_reo_snapshot_low,
5414 		     MGMT_RX_REO_SNAPSHOT_LOW_GLOBAL_TIMESTAMP_POS,
5415 		     MGMT_RX_REO_SNAPSHOT_LOW_GLOBAL_TIMESTAMP_SIZE,
5416 		     global_timestamp);
5417 
5418 	QDF_SET_BITS(snapshot.mgmt_rx_reo_snapshot_high,
5419 		     MGMT_RX_REO_SNAPSHOT_HIGH_GLOBAL_TIMESTAMP_POS,
5420 		     MGMT_RX_REO_SNAPSHOT_HIGH_GLOBAL_TIMESTAMP_SIZE,
5421 		     global_timestamp >> 15);
5422 	QDF_SET_BITS(snapshot.mgmt_rx_reo_snapshot_high,
5423 		     MGMT_RX_REO_SNAPSHOT_HIGH_MGMT_PKT_CTR_REDUNDANT_POS,
5424 		     MGMT_RX_REO_SNAPSHOT_HIGH_MGMT_PKT_CTR_REDUNDANT_SIZE,
5425 		     mgmt_pkt_ctr);
5426 
5427 	return snapshot;
5428 }
5429 
5430 /**
5431  * mgmt_rx_reo_sim_frame_handler_fw() - Management frame handler at the fw layer
5432  * @arg: Argument
5433  *
5434  * This API handles the management frame at the fw layer. This is applicable
5435  * for simulation alone.
5436  *
5437  * Return: none
5438  */
5439 static void
5440 mgmt_rx_reo_sim_frame_handler_fw(void *arg)
5441 {
5442 	struct mgmt_rx_frame_mac_hw *frame_hw =
5443 					(struct mgmt_rx_frame_mac_hw *)arg;
5444 	uint32_t mac_hw_to_fw_delay_us;
5445 	bool is_consumed_by_fw;
5446 	struct  mgmt_rx_frame_fw *frame_fw;
5447 	int8_t link_id = -1;
5448 	QDF_STATUS status;
5449 	struct mgmt_rx_reo_sim_context *sim_context;
5450 	enum mgmt_rx_reo_shared_snapshot_id snapshot_id;
5451 	struct mgmt_rx_reo_shared_snapshot snapshot_value;
5452 	bool ret;
5453 	uint8_t ml_grp_id;
5454 
5455 	if (!frame_hw) {
5456 		mgmt_rx_reo_err("FW-%d : Pointer to HW frame struct is null",
5457 				link_id);
5458 		qdf_assert_always(0);
5459 	}
5460 
5461 	link_id = frame_hw->params.link_id;
5462 
5463 	sim_context = frame_hw->sim_context;
5464 	if (!sim_context) {
5465 		mgmt_rx_reo_err("FW-%d : Mgmt rx reo simulation context null",
5466 				link_id);
5467 		goto error_free_mac_hw_frame;
5468 	}
5469 
5470 	ml_grp_id = sim_context->mlo_grp_id;
5471 
5472 	mac_hw_to_fw_delay_us = MGMT_RX_REO_SIM_DELAY_MAC_HW_TO_FW_MIN +
5473 			mgmt_rx_reo_sim_get_random_unsigned_int(
5474 			MGMT_RX_REO_SIM_DELAY_MAC_HW_TO_FW_MIN_MAX_DELTA);
5475 	mgmt_rx_reo_sim_sleep(mac_hw_to_fw_delay_us);
5476 
5477 	is_consumed_by_fw = mgmt_rx_reo_sim_get_random_bool(
5478 			    MGMT_RX_REO_SIM_PERCENTAGE_FW_CONSUMED_FRAMES);
5479 
5480 	if (is_consumed_by_fw) {
5481 		/**
5482 		 * This frame should be present in pending/stale list of the
5483 		 * master frame list. FW consumed frames need not be reordered
5484 		 * by reorder algorithm. It is just used for book
5485 		 * keeping purposes. Hence remove it from the master list.
5486 		 */
5487 		status = mgmt_rx_reo_sim_remove_frame_from_master_list(
5488 					&sim_context->master_frame_list,
5489 					&frame_hw->params);
5490 
5491 		if (QDF_IS_STATUS_ERROR(status)) {
5492 			mgmt_rx_reo_err("FW-%d : Failed to remove FW consumed frame",
5493 					link_id);
5494 			qdf_assert_always(0);
5495 		}
5496 	}
5497 
5498 	mgmt_rx_reo_debug("FW-%d : Processing frame with ts = %u, ctr = %u, consume = %u",
5499 			  link_id, frame_hw->params.global_timestamp,
5500 			  frame_hw->params.mgmt_pkt_ctr, is_consumed_by_fw);
5501 
5502 	frame_fw = qdf_mem_malloc(sizeof(*frame_fw));
5503 	if (!frame_fw) {
5504 		mgmt_rx_reo_err("FW-%d : Failed to allocate FW mgmt frame",
5505 				link_id);
5506 		goto error_free_mac_hw_frame;
5507 	}
5508 
5509 	frame_fw->params = frame_hw->params;
5510 	frame_fw->is_consumed_by_fw = is_consumed_by_fw;
5511 	frame_fw->sim_context = frame_hw->sim_context;
5512 
5513 	snapshot_id = is_consumed_by_fw ?
5514 		      MGMT_RX_REO_SHARED_SNAPSHOT_FW_CONSUMED :
5515 		      MGMT_RX_REO_SHARED_SNAPSHOT_FW_FORWARDED;
5516 
5517 	snapshot_value = mgmt_rx_reo_sim_get_snapshot_value(
5518 					frame_hw->params.global_timestamp,
5519 					frame_hw->params.mgmt_pkt_ctr);
5520 
5521 	status = mgmt_rx_reo_sim_write_snapshot(
5522 			link_id, ml_grp_id,
5523 			snapshot_id, snapshot_value);
5524 
5525 	if (QDF_IS_STATUS_ERROR(status)) {
5526 		mgmt_rx_reo_err("FW-%d : Failed to write snapshot %d",
5527 				link_id, snapshot_id);
5528 		goto error_free_fw_frame;
5529 	}
5530 
5531 	status = qdf_create_work(NULL, &frame_fw->frame_handler_host,
5532 				 mgmt_rx_reo_sim_frame_handler_host, frame_fw);
5533 	if (QDF_IS_STATUS_ERROR(status)) {
5534 		mgmt_rx_reo_err("FW-%d : Failed to create work", link_id);
5535 		goto error_free_fw_frame;
5536 	}
5537 
5538 	ret = qdf_queue_work(
5539 			NULL, sim_context->host_mgmt_frame_handler[link_id],
5540 			&frame_fw->frame_handler_host);
5541 	if (!ret) {
5542 		mgmt_rx_reo_err("FW-%d : Work is already present on the queue",
5543 				link_id);
5544 		goto error_free_fw_frame;
5545 	}
5546 
5547 	qdf_mem_free(frame_hw);
5548 
5549 	return;
5550 
5551 error_free_fw_frame:
5552 	qdf_mem_free(frame_fw);
5553 error_free_mac_hw_frame:
5554 	qdf_mem_free(frame_hw);
5555 
5556 	mgmt_rx_reo_err("FW-%d : Exiting fw frame handler due to error",
5557 			link_id);
5558 }
5559 
5560 /**
5561  * mgmt_rx_reo_sim_get_link_id() - Helper API to get the link id value
5562  * from the index to the valid link list
5563  * @valid_link_list_index: Index to list of valid links
5564  *
5565  * Return: link id
5566  */
5567 static int8_t
5568 mgmt_rx_reo_sim_get_link_id(uint8_t valid_link_list_index)
5569 {
5570 	struct mgmt_rx_reo_sim_context *sim_context;
5571 
5572 	if (valid_link_list_index >= MAX_MLO_LINKS) {
5573 		mgmt_rx_reo_err("Invalid index %u to valid link list",
5574 				valid_link_list_index);
5575 		return MGMT_RX_REO_INVALID_LINK;
5576 	}
5577 
5578 	sim_context = mgmt_rx_reo_sim_get_context();
5579 	if (!sim_context) {
5580 		mgmt_rx_reo_err("Mgmt reo simulation context is null");
5581 		return MGMT_RX_REO_INVALID_LINK;
5582 	}
5583 
5584 	return sim_context->link_id_to_pdev_map.valid_link_list
5585 						[valid_link_list_index];
5586 }
5587 
5588 /**
5589  * mgmt_rx_reo_sim_receive_from_air() - Simulate management frame reception from
5590  * the air
5591  * @mac_hw: pointer to structure representing MAC HW
5592  * @num_mlo_links: number of MLO HW links
5593  * @frame: pointer to management frame parameters
5594  *
5595  * This API simulates the management frame reception from air.
5596  *
5597  * Return: QDF_STATUS
5598  */
5599 static QDF_STATUS
5600 mgmt_rx_reo_sim_receive_from_air(struct mgmt_rx_reo_sim_mac_hw *mac_hw,
5601 				 uint8_t num_mlo_links,
5602 				 struct mgmt_rx_frame_params *frame)
5603 {
5604 	uint8_t valid_link_list_index;
5605 	int8_t link_id;
5606 
5607 	if (!mac_hw) {
5608 		mgmt_rx_reo_err("pointer to MAC HW struct is null");
5609 		return QDF_STATUS_E_NULL_VALUE;
5610 	}
5611 
5612 	if (num_mlo_links == 0 || num_mlo_links > MAX_MLO_LINKS) {
5613 		mgmt_rx_reo_err("Invalid number of MLO links %u",
5614 				num_mlo_links);
5615 		return QDF_STATUS_E_INVAL;
5616 	}
5617 
5618 	if (!frame) {
5619 		mgmt_rx_reo_err("pointer to frame parameters is null");
5620 		return QDF_STATUS_E_NULL_VALUE;
5621 	}
5622 
5623 	valid_link_list_index = mgmt_rx_reo_sim_get_random_unsigned_int(
5624 							num_mlo_links);
5625 	link_id = mgmt_rx_reo_sim_get_link_id(valid_link_list_index);
5626 	qdf_assert_always(link_id >= 0);
5627 	qdf_assert_always(link_id < MAX_MLO_LINKS);
5628 
5629 	frame->global_timestamp = div_u64(ktime_get_ns(), NSEC_PER_USEC);
5630 	frame->mgmt_pkt_ctr = ++mac_hw->mgmt_pkt_ctr[link_id];
5631 	frame->link_id = link_id;
5632 
5633 	return QDF_STATUS_SUCCESS;
5634 }
5635 
5636 /**
5637  * mgmt_rx_reo_sim_undo_receive_from_air() - API to restore the state of MAC
5638  * HW in case of any Rx error.
5639  * @mac_hw: pointer to structure representing MAC HW
5640  * @frame: pointer to management frame parameters
5641  *
5642  * Return: QDF_STATUS
5643  */
5644 static QDF_STATUS
5645 mgmt_rx_reo_sim_undo_receive_from_air(struct mgmt_rx_reo_sim_mac_hw *mac_hw,
5646 				      struct mgmt_rx_frame_params *frame)
5647 {
5648 	if (!mac_hw) {
5649 		mgmt_rx_reo_err("pointer to MAC HW struct is null");
5650 		return QDF_STATUS_E_NULL_VALUE;
5651 	}
5652 
5653 	if (!frame) {
5654 		mgmt_rx_reo_err("pointer to frame parameters is null");
5655 		return QDF_STATUS_E_NULL_VALUE;
5656 	}
5657 
5658 	if (frame->link_id >= MAX_MLO_LINKS) {
5659 		mgmt_rx_reo_err("Invalid link id %u", frame->link_id);
5660 		return QDF_STATUS_E_INVAL;
5661 	}
5662 
5663 	--mac_hw->mgmt_pkt_ctr[frame->link_id];
5664 
5665 	return QDF_STATUS_SUCCESS;
5666 }
5667 
5668 /**
5669  * mgmt_rx_reo_sim_mac_hw_thread() - kthread to simulate MAC HW
5670  * @data: pointer to data input
5671  *
5672  * kthread handler to simulate MAC HW.
5673  *
5674  * Return: 0 for success, else failure
5675  */
5676 static int
5677 mgmt_rx_reo_sim_mac_hw_thread(void *data)
5678 {
5679 	struct mgmt_rx_reo_sim_context *sim_context = data;
5680 	struct mgmt_rx_reo_sim_mac_hw *mac_hw;
5681 
5682 	if (!sim_context) {
5683 		mgmt_rx_reo_err("HW: Mgmt rx reo simulation context is null");
5684 		return -EINVAL;
5685 	}
5686 
5687 	mac_hw = &sim_context->mac_hw_sim.mac_hw_info;
5688 
5689 	while (!qdf_thread_should_stop()) {
5690 		uint32_t inter_frame_delay_us;
5691 		struct mgmt_rx_frame_params frame;
5692 		struct mgmt_rx_frame_mac_hw *frame_mac_hw;
5693 		int8_t link_id = -1;
5694 		QDF_STATUS status;
5695 		enum mgmt_rx_reo_shared_snapshot_id snapshot_id;
5696 		struct mgmt_rx_reo_shared_snapshot snapshot_value;
5697 		int8_t num_mlo_links;
5698 		bool ret;
5699 		uint8_t ml_grp_id;
5700 
5701 		num_mlo_links = mgmt_rx_reo_sim_get_num_mlo_links(sim_context);
5702 		if (num_mlo_links < 0 ||
5703 		    num_mlo_links > MAX_MLO_LINKS) {
5704 			mgmt_rx_reo_err("Invalid number of MLO links %d",
5705 					num_mlo_links);
5706 			qdf_assert_always(0);
5707 		}
5708 
5709 		status = mgmt_rx_reo_sim_receive_from_air(mac_hw, num_mlo_links,
5710 							  &frame);
5711 		if (QDF_IS_STATUS_ERROR(status)) {
5712 			mgmt_rx_reo_err("Receive from the air failed");
5713 			/**
5714 			 * Frame reception failed and we are not sure about the
5715 			 * link id. Without link id there is no way to restore
5716 			 * the mac hw state. Hence assert unconditionally.
5717 			 */
5718 			qdf_assert_always(0);
5719 		}
5720 		link_id = frame.link_id;
5721 
5722 		mgmt_rx_reo_debug("HW-%d: received frame with ts = %u, ctr = %u",
5723 				  link_id, frame.global_timestamp,
5724 				  frame.mgmt_pkt_ctr);
5725 
5726 		frame_mac_hw = qdf_mem_malloc(sizeof(*frame_mac_hw));
5727 		if (!frame_mac_hw) {
5728 			mgmt_rx_reo_err("HW-%d: Failed to alloc mac hw frame",
5729 					link_id);
5730 
5731 			/* Cleanup */
5732 			status = mgmt_rx_reo_sim_undo_receive_from_air(
5733 								mac_hw, &frame);
5734 			qdf_assert_always(QDF_IS_STATUS_SUCCESS(status));
5735 
5736 			continue;
5737 		}
5738 
5739 		frame_mac_hw->params = frame;
5740 		frame_mac_hw->sim_context = sim_context;
5741 		ml_grp_id = sim_context->ml_grp_id;
5742 
5743 		status = mgmt_rx_reo_sim_add_frame_to_pending_list(
5744 				&sim_context->master_frame_list, &frame);
5745 		if (QDF_IS_STATUS_ERROR(status)) {
5746 			mgmt_rx_reo_err("HW-%d: Failed to add frame to list",
5747 					link_id);
5748 
5749 			/* Cleanup */
5750 			status = mgmt_rx_reo_sim_undo_receive_from_air(
5751 								mac_hw, &frame);
5752 			qdf_assert_always(QDF_IS_STATUS_SUCCESS(status));
5753 
5754 			qdf_mem_free(frame_mac_hw);
5755 
5756 			continue;
5757 		}
5758 
5759 		snapshot_id = MGMT_RX_REO_SHARED_SNAPSHOT_MAC_HW;
5760 		snapshot_value = mgmt_rx_reo_sim_get_snapshot_value(
5761 						frame.global_timestamp,
5762 						frame.mgmt_pkt_ctr);
5763 
5764 		status = mgmt_rx_reo_sim_write_snapshot(
5765 				link_id, ml_grp_id
5766 				snapshot_id, snapshot_value);
5767 		if (QDF_IS_STATUS_ERROR(status)) {
5768 			mgmt_rx_reo_err("HW-%d : Failed to write snapshot %d",
5769 					link_id, snapshot_id);
5770 
5771 			/* Cleanup */
5772 			status = mgmt_rx_reo_sim_remove_frame_from_pending_list(
5773 				&sim_context->master_frame_list, &frame);
5774 			qdf_assert_always(QDF_IS_STATUS_SUCCESS(status));
5775 
5776 			status = mgmt_rx_reo_sim_undo_receive_from_air(
5777 								mac_hw, &frame);
5778 			qdf_assert_always(QDF_IS_STATUS_SUCCESS(status));
5779 
5780 			qdf_mem_free(frame_mac_hw);
5781 
5782 			continue;
5783 		}
5784 
5785 		status = qdf_create_work(NULL, &frame_mac_hw->frame_handler_fw,
5786 					 mgmt_rx_reo_sim_frame_handler_fw,
5787 					 frame_mac_hw);
5788 		if (QDF_IS_STATUS_ERROR(status)) {
5789 			mgmt_rx_reo_err("HW-%d : Failed to create work",
5790 					link_id);
5791 			qdf_assert_always(0);
5792 		}
5793 
5794 		ret = qdf_queue_work(
5795 			NULL, sim_context->fw_mgmt_frame_handler[link_id],
5796 			&frame_mac_hw->frame_handler_fw);
5797 		if (!ret) {
5798 			mgmt_rx_reo_err("HW-%d : Work is already present in Q",
5799 					link_id);
5800 			qdf_assert_always(0);
5801 		}
5802 
5803 		inter_frame_delay_us = MGMT_RX_REO_SIM_INTER_FRAME_DELAY_MIN +
5804 			mgmt_rx_reo_sim_get_random_unsigned_int(
5805 			MGMT_RX_REO_SIM_INTER_FRAME_DELAY_MIN_MAX_DELTA);
5806 
5807 		mgmt_rx_reo_sim_sleep(inter_frame_delay_us);
5808 	}
5809 
5810 	return 0;
5811 }
5812 
5813 /**
5814  * mgmt_rx_reo_sim_init_master_frame_list() - Initializes the master
5815  * management frame list
5816  * @master_frame_list: Pointer to master frame list
5817  *
5818  * This API initializes the master management frame list
5819  *
5820  * Return: QDF_STATUS
5821  */
5822 static QDF_STATUS
5823 mgmt_rx_reo_sim_init_master_frame_list(
5824 		struct mgmt_rx_reo_master_frame_list *master_frame_list)
5825 {
5826 	qdf_spinlock_create(&master_frame_list->lock);
5827 
5828 	qdf_list_create(&master_frame_list->pending_list,
5829 			MGMT_RX_REO_SIM_PENDING_FRAME_LIST_MAX_SIZE);
5830 	qdf_list_create(&master_frame_list->stale_list,
5831 			MGMT_RX_REO_SIM_STALE_FRAME_LIST_MAX_SIZE);
5832 
5833 	return QDF_STATUS_SUCCESS;
5834 }
5835 
5836 /**
5837  * mgmt_rx_reo_sim_deinit_master_frame_list() - De initializes the master
5838  * management frame list
5839  * @master_frame_list: Pointer to master frame list
5840  *
5841  * This API de initializes the master management frame list
5842  *
5843  * Return: QDF_STATUS
5844  */
5845 static QDF_STATUS
5846 mgmt_rx_reo_sim_deinit_master_frame_list(
5847 		struct mgmt_rx_reo_master_frame_list *master_frame_list)
5848 {
5849 	qdf_spin_lock(&master_frame_list->lock);
5850 	qdf_list_destroy(&master_frame_list->stale_list);
5851 	qdf_list_destroy(&master_frame_list->pending_list);
5852 	qdf_spin_unlock(&master_frame_list->lock);
5853 
5854 	qdf_spinlock_destroy(&master_frame_list->lock);
5855 
5856 	return QDF_STATUS_SUCCESS;
5857 }
5858 
5859 /**
5860  * mgmt_rx_reo_sim_generate_unique_link_id() - Helper API to generate
5861  * unique link id values
5862  * @link_id_to_pdev_map: pointer to link id to pdev map
5863  * @link_id: Pointer to unique link id
5864  *
5865  * This API generates unique link id values for each pdev. This API should be
5866  * called after acquiring the spin lock protecting link id to pdev map.
5867  *
5868  * Return: QDF_STATUS
5869  */
5870 static QDF_STATUS
5871 mgmt_rx_reo_sim_generate_unique_link_id(
5872 		struct wlan_objmgr_pdev **link_id_to_pdev_map, uint8_t *link_id)
5873 {
5874 	uint8_t random_link_id;
5875 	uint8_t link;
5876 
5877 	if (!link_id_to_pdev_map || !link_id)
5878 		return QDF_STATUS_E_NULL_VALUE;
5879 
5880 	for (link = 0; link < MAX_MLO_LINKS; link++)
5881 		if (!link_id_to_pdev_map[link])
5882 			break;
5883 
5884 	if (link == MAX_MLO_LINKS) {
5885 		mgmt_rx_reo_err("All link ids are already allocated");
5886 		return QDF_STATUS_E_FAILURE;
5887 	}
5888 
5889 	while (1) {
5890 		random_link_id = mgmt_rx_reo_sim_get_random_unsigned_int(
5891 							MAX_MLO_LINKS);
5892 
5893 		if (!link_id_to_pdev_map[random_link_id])
5894 			break;
5895 	}
5896 
5897 	*link_id = random_link_id;
5898 
5899 	return QDF_STATUS_SUCCESS;
5900 }
5901 
5902 /**
5903  * mgmt_rx_reo_sim_insert_into_link_id_to_pdev_map() - Builds the MLO HW link id
5904  * to pdev map
5905  * @link_id_to_pdev_map: pointer to link id to pdev map
5906  * @pdev: pointer to pdev object
5907  *
5908  * This API incrementally builds the MLO HW link id to pdev map. This API is
5909  * used only for simulation.
5910  *
5911  * Return: QDF_STATUS
5912  */
5913 static QDF_STATUS
5914 mgmt_rx_reo_sim_insert_into_link_id_to_pdev_map(
5915 		struct mgmt_rx_reo_sim_link_id_to_pdev_map *link_id_to_pdev_map,
5916 		struct wlan_objmgr_pdev *pdev)
5917 {
5918 	uint8_t link_id;
5919 	QDF_STATUS status;
5920 
5921 	if (!link_id_to_pdev_map) {
5922 		mgmt_rx_reo_err("Link id to pdev map is null");
5923 		return QDF_STATUS_E_NULL_VALUE;
5924 	}
5925 
5926 	if (!pdev) {
5927 		mgmt_rx_reo_err("pdev is null");
5928 		return QDF_STATUS_E_NULL_VALUE;
5929 	}
5930 
5931 	qdf_spin_lock(&link_id_to_pdev_map->lock);
5932 
5933 	status = mgmt_rx_reo_sim_generate_unique_link_id(
5934 					link_id_to_pdev_map->map, &link_id);
5935 	if (QDF_IS_STATUS_ERROR(status)) {
5936 		qdf_spin_unlock(&link_id_to_pdev_map->lock);
5937 		return QDF_STATUS_E_FAILURE;
5938 	}
5939 	qdf_assert_always(link_id < MAX_MLO_LINKS);
5940 
5941 	link_id_to_pdev_map->map[link_id] = pdev;
5942 	link_id_to_pdev_map->valid_link_list
5943 			[link_id_to_pdev_map->num_mlo_links] = link_id;
5944 	link_id_to_pdev_map->num_mlo_links++;
5945 
5946 	qdf_spin_unlock(&link_id_to_pdev_map->lock);
5947 
5948 	return QDF_STATUS_SUCCESS;
5949 }
5950 
5951 /**
5952  * mgmt_rx_reo_sim_remove_from_link_id_to_pdev_map() - Destroys the MLO HW link
5953  * id to pdev map
5954  * @link_id_to_pdev_map: pointer to link id to pdev map
5955  * @pdev: pointer to pdev object
5956  *
5957  * This API incrementally destroys the MLO HW link id to pdev map. This API is
5958  * used only for simulation.
5959  *
5960  * Return: QDF_STATUS
5961  */
5962 static QDF_STATUS
5963 mgmt_rx_reo_sim_remove_from_link_id_to_pdev_map(
5964 		struct mgmt_rx_reo_sim_link_id_to_pdev_map *link_id_to_pdev_map,
5965 		struct wlan_objmgr_pdev *pdev)
5966 {
5967 	uint8_t link_id;
5968 
5969 	if (!link_id_to_pdev_map) {
5970 		mgmt_rx_reo_err("Link id to pdev map is null");
5971 		return QDF_STATUS_E_NULL_VALUE;
5972 	}
5973 
5974 	if (!pdev) {
5975 		mgmt_rx_reo_err("pdev is null");
5976 		return QDF_STATUS_E_NULL_VALUE;
5977 	}
5978 
5979 	qdf_spin_lock(&link_id_to_pdev_map->lock);
5980 
5981 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
5982 		if (link_id_to_pdev_map->map[link_id] == pdev) {
5983 			link_id_to_pdev_map->map[link_id] = NULL;
5984 			qdf_spin_unlock(&link_id_to_pdev_map->lock);
5985 
5986 			return QDF_STATUS_SUCCESS;
5987 		}
5988 	}
5989 
5990 	qdf_spin_unlock(&link_id_to_pdev_map->lock);
5991 
5992 	mgmt_rx_reo_err("Pdev %pK is not found in map", pdev);
5993 
5994 	return QDF_STATUS_E_FAILURE;
5995 }
5996 
5997 QDF_STATUS
5998 mgmt_rx_reo_sim_pdev_object_create_notification(struct wlan_objmgr_pdev *pdev)
5999 {
6000 	struct mgmt_rx_reo_sim_context *sim_context;
6001 	QDF_STATUS status;
6002 
6003 	sim_context = mgmt_rx_reo_sim_get_context();
6004 	if (!sim_context) {
6005 		mgmt_rx_reo_err("Mgmt simulation context is null");
6006 		return QDF_STATUS_E_NULL_VALUE;
6007 	}
6008 
6009 	status = mgmt_rx_reo_sim_insert_into_link_id_to_pdev_map(
6010 				&sim_context->link_id_to_pdev_map, pdev);
6011 
6012 	if (QDF_IS_STATUS_ERROR(status)) {
6013 		mgmt_rx_reo_err("Failed to add pdev to the map %pK", pdev);
6014 		return status;
6015 	}
6016 
6017 	return QDF_STATUS_SUCCESS;
6018 }
6019 
6020 QDF_STATUS
6021 mgmt_rx_reo_sim_pdev_object_destroy_notification(struct wlan_objmgr_pdev *pdev)
6022 {
6023 	struct mgmt_rx_reo_sim_context *sim_context;
6024 	QDF_STATUS status;
6025 
6026 	sim_context = mgmt_rx_reo_sim_get_context();
6027 	if (!sim_context) {
6028 		mgmt_rx_reo_err("Mgmt simulation context is null");
6029 		return QDF_STATUS_E_NULL_VALUE;
6030 	}
6031 
6032 	status = mgmt_rx_reo_sim_remove_from_link_id_to_pdev_map(
6033 				&sim_context->link_id_to_pdev_map, pdev);
6034 
6035 	if (QDF_IS_STATUS_ERROR(status)) {
6036 		mgmt_rx_reo_err("Failed to remove pdev from the map");
6037 		return status;
6038 	}
6039 
6040 	return QDF_STATUS_SUCCESS;
6041 }
6042 
6043 QDF_STATUS
6044 mgmt_rx_reo_sim_start(uint8_t ml_grp_id)
6045 {
6046 	struct mgmt_rx_reo_context *reo_context;
6047 	struct mgmt_rx_reo_sim_context *sim_context;
6048 	qdf_thread_t *mac_hw_thread;
6049 	uint8_t link_id;
6050 	uint8_t id;
6051 	QDF_STATUS status;
6052 
6053 	reo_context = mgmt_rx_reo_get_context(ml_grp_id);
6054 	if (!reo_context) {
6055 		mgmt_rx_reo_err("reo context is null");
6056 		return QDF_STATUS_E_NULL_VALUE;
6057 	}
6058 
6059 	reo_context->simulation_in_progress = true;
6060 
6061 	sim_context = &reo_context->sim_context;
6062 
6063 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
6064 		struct workqueue_struct *wq;
6065 
6066 		wq = alloc_ordered_workqueue("mgmt_rx_reo_sim_host-%u", 0,
6067 					     link_id);
6068 		if (!wq) {
6069 			mgmt_rx_reo_err("Host workqueue creation failed");
6070 			status = QDF_STATUS_E_FAILURE;
6071 			goto error_destroy_fw_and_host_work_queues_till_last_link;
6072 		}
6073 		sim_context->host_mgmt_frame_handler[link_id] = wq;
6074 
6075 		wq = alloc_ordered_workqueue("mgmt_rx_reo_sim_fw-%u", 0,
6076 					     link_id);
6077 		if (!wq) {
6078 			mgmt_rx_reo_err("FW workqueue creation failed");
6079 			status = QDF_STATUS_E_FAILURE;
6080 			goto error_destroy_host_work_queue_of_last_link;
6081 		}
6082 		sim_context->fw_mgmt_frame_handler[link_id] = wq;
6083 	}
6084 
6085 	mac_hw_thread = qdf_create_thread(mgmt_rx_reo_sim_mac_hw_thread,
6086 					  sim_context, "MAC_HW_thread");
6087 	if (!mac_hw_thread) {
6088 		mgmt_rx_reo_err("MAC HW thread creation failed");
6089 		status = QDF_STATUS_E_FAILURE;
6090 		goto error_destroy_fw_and_host_work_queues_of_last_link;
6091 	}
6092 
6093 	sim_context->mac_hw_sim.mac_hw_thread = mac_hw_thread;
6094 
6095 	qdf_wake_up_process(sim_context->mac_hw_sim.mac_hw_thread);
6096 
6097 	return QDF_STATUS_SUCCESS;
6098 
6099 error_destroy_fw_and_host_work_queues_of_last_link:
6100 	drain_workqueue(sim_context->fw_mgmt_frame_handler[link_id]);
6101 	destroy_workqueue(sim_context->fw_mgmt_frame_handler[link_id]);
6102 
6103 error_destroy_host_work_queue_of_last_link:
6104 	drain_workqueue(sim_context->host_mgmt_frame_handler[link_id]);
6105 	destroy_workqueue(sim_context->host_mgmt_frame_handler[link_id]);
6106 
6107 error_destroy_fw_and_host_work_queues_till_last_link:
6108 	for (id = 0; id < link_id; id++) {
6109 		drain_workqueue(sim_context->fw_mgmt_frame_handler[id]);
6110 		destroy_workqueue(sim_context->fw_mgmt_frame_handler[id]);
6111 
6112 		drain_workqueue(sim_context->host_mgmt_frame_handler[id]);
6113 		destroy_workqueue(sim_context->host_mgmt_frame_handler[id]);
6114 	}
6115 
6116 	return status;
6117 }
6118 
6119 QDF_STATUS
6120 mgmt_rx_reo_sim_stop(uint8_t ml_grp_id)
6121 {
6122 	struct mgmt_rx_reo_context *reo_context;
6123 	struct mgmt_rx_reo_sim_context *sim_context;
6124 	struct mgmt_rx_reo_master_frame_list *master_frame_list;
6125 	uint8_t link_id;
6126 	QDF_STATUS status;
6127 
6128 	reo_context = mgmt_rx_reo_get_context(ml_grp_id);
6129 	if (!reo_context) {
6130 		mgmt_rx_reo_err("reo context is null");
6131 		return QDF_STATUS_E_NULL_VALUE;
6132 	}
6133 
6134 	sim_context = &reo_context->sim_context;
6135 
6136 	status = qdf_thread_join(sim_context->mac_hw_sim.mac_hw_thread);
6137 	if (QDF_IS_STATUS_ERROR(status)) {
6138 		mgmt_rx_reo_err("Failed to stop the thread");
6139 		return status;
6140 	}
6141 
6142 	sim_context->mac_hw_sim.mac_hw_thread = NULL;
6143 
6144 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
6145 		/* Wait for all the pending frames to be processed by FW */
6146 		drain_workqueue(sim_context->fw_mgmt_frame_handler[link_id]);
6147 		destroy_workqueue(sim_context->fw_mgmt_frame_handler[link_id]);
6148 
6149 		/* Wait for all the pending frames to be processed by host */
6150 		drain_workqueue(sim_context->host_mgmt_frame_handler[link_id]);
6151 		destroy_workqueue(
6152 				sim_context->host_mgmt_frame_handler[link_id]);
6153 	}
6154 
6155 	status = mgmt_rx_reo_print_ingress_frame_info
6156 			(MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_PRINT_MAX_FRAMES);
6157 	if (QDF_IS_STATUS_ERROR(status)) {
6158 		mgmt_rx_reo_err("Failed to print ingress frame debug info");
6159 		return status;
6160 	}
6161 
6162 	status = mgmt_rx_reo_print_egress_frame_info
6163 			(MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_PRINT_MAX_FRAMES);
6164 	if (QDF_IS_STATUS_ERROR(status)) {
6165 		mgmt_rx_reo_err("Failed to print egress frame debug info");
6166 		return status;
6167 	}
6168 
6169 	master_frame_list = &sim_context->master_frame_list;
6170 	if (!qdf_list_empty(&master_frame_list->pending_list) ||
6171 	    !qdf_list_empty(&master_frame_list->stale_list)) {
6172 		mgmt_rx_reo_err("reo sim failure: pending/stale frame list non empty");
6173 
6174 		status = mgmt_rx_reo_list_display(&reo_context->reo_list);
6175 		if (QDF_IS_STATUS_ERROR(status)) {
6176 			mgmt_rx_reo_err("Failed to print reorder list");
6177 			return status;
6178 		}
6179 
6180 		qdf_assert_always(0);
6181 	} else {
6182 		mgmt_rx_reo_err("reo sim passed");
6183 	}
6184 
6185 	reo_context->simulation_in_progress = false;
6186 
6187 	return QDF_STATUS_SUCCESS;
6188 }
6189 
6190 /**
6191  * mgmt_rx_reo_sim_init() - Initialize management rx reorder simulation
6192  * context.
6193  * @reo_context: Pointer to reo context
6194  *
6195  * Return: QDF_STATUS of operation
6196  */
6197 static QDF_STATUS
6198 mgmt_rx_reo_sim_init(struct mgmt_rx_reo_context *reo_context)
6199 {
6200 	QDF_STATUS status;
6201 	struct mgmt_rx_reo_sim_context *sim_context;
6202 	uint8_t link_id;
6203 
6204 	if (!reo_context) {
6205 		mgmt_rx_reo_err("reo context is null");
6206 		return QDF_STATUS_E_NULL_VALUE;
6207 	}
6208 
6209 	sim_context = &reo_context->sim_context;
6210 
6211 	qdf_mem_zero(sim_context, sizeof(*sim_context));
6212 	sim_context->mlo_grp_id = reo_context->mlo_grp_id;
6213 
6214 	status = mgmt_rx_reo_sim_init_master_frame_list(
6215 					&sim_context->master_frame_list);
6216 	if (QDF_IS_STATUS_ERROR(status)) {
6217 		mgmt_rx_reo_err("Failed to create master mgmt frame list");
6218 		return status;
6219 	}
6220 
6221 	qdf_spinlock_create(&sim_context->link_id_to_pdev_map.lock);
6222 
6223 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++)
6224 		sim_context->link_id_to_pdev_map.valid_link_list[link_id] =
6225 					MGMT_RX_REO_INVALID_LINK;
6226 
6227 	return QDF_STATUS_SUCCESS;
6228 }
6229 
6230 /**
6231  * mgmt_rx_reo_sim_deinit() - De initialize management rx reorder simulation
6232  * context.
6233  * @reo_context: Pointer to reo context
6234  *
6235  * Return: QDF_STATUS of operation
6236  */
6237 static QDF_STATUS
6238 mgmt_rx_reo_sim_deinit(struct mgmt_rx_reo_context *reo_context)
6239 {
6240 	QDF_STATUS status;
6241 	struct mgmt_rx_reo_sim_context *sim_context;
6242 
6243 	if (!reo_context) {
6244 		mgmt_rx_reo_err("reo context is null");
6245 		return QDF_STATUS_E_NULL_VALUE;
6246 	}
6247 
6248 	sim_context = &reo_context->sim_context;
6249 
6250 	qdf_spinlock_destroy(&sim_context->link_id_to_pdev_map.lock);
6251 
6252 	status = mgmt_rx_reo_sim_deinit_master_frame_list(
6253 					&sim_context->master_frame_list);
6254 	if (QDF_IS_STATUS_ERROR(status)) {
6255 		mgmt_rx_reo_err("Failed to destroy master frame list");
6256 		return status;
6257 	}
6258 
6259 	return QDF_STATUS_SUCCESS;
6260 }
6261 
6262 QDF_STATUS
6263 mgmt_rx_reo_sim_get_snapshot_address(
6264 			struct wlan_objmgr_pdev *pdev,
6265 			enum mgmt_rx_reo_shared_snapshot_id id,
6266 			struct mgmt_rx_reo_shared_snapshot **address)
6267 {
6268 	int8_t link_id;
6269 	struct mgmt_rx_reo_sim_context *sim_context;
6270 
6271 	sim_context = mgmt_rx_reo_sim_get_context();
6272 	if (!sim_context) {
6273 		mgmt_rx_reo_err("Mgmt reo simulation context is null");
6274 		return QDF_STATUS_E_NULL_VALUE;
6275 	}
6276 
6277 	if (!pdev) {
6278 		mgmt_rx_reo_err("pdev is NULL");
6279 		return QDF_STATUS_E_NULL_VALUE;
6280 	}
6281 
6282 	if (id < 0 || id >= MGMT_RX_REO_SHARED_SNAPSHOT_MAX) {
6283 		mgmt_rx_reo_err("Invalid snapshot ID %d", id);
6284 		return QDF_STATUS_E_INVAL;
6285 	}
6286 
6287 	if (!address) {
6288 		mgmt_rx_reo_err("Pointer to snapshot address is null");
6289 		return QDF_STATUS_E_NULL_VALUE;
6290 	}
6291 
6292 	link_id = wlan_get_mlo_link_id_from_pdev(pdev);
6293 	if (link_id < 0 || link_id >= MAX_MLO_LINKS) {
6294 		mgmt_rx_reo_err("Invalid link id %d for the pdev %pK", link_id,
6295 				pdev);
6296 		return QDF_STATUS_E_INVAL;
6297 	}
6298 
6299 	*address = &sim_context->snapshot[link_id][id];
6300 
6301 	return QDF_STATUS_SUCCESS;
6302 }
6303 #endif /* WLAN_MGMT_RX_REO_SIM_SUPPORT */
6304 
6305 #ifdef WLAN_MGMT_RX_REO_DEBUG_SUPPORT
6306 /**
6307  * mgmt_rx_reo_ingress_debug_info_init() - Initialize the management rx-reorder
6308  * ingress frame debug info
6309  * @psoc: Pointer to psoc
6310  * @ingress_debug_info_init_count: Initialization count
6311  * @ingress_frame_debug_info: Ingress frame debug info object
6312  *
6313  * API to initialize the management rx-reorder ingress frame debug info.
6314  *
6315  * Return: QDF_STATUS
6316  */
6317 static QDF_STATUS
6318 mgmt_rx_reo_ingress_debug_info_init
6319 		(struct wlan_objmgr_psoc *psoc,
6320 		 qdf_atomic_t *ingress_debug_info_init_count,
6321 		 struct reo_ingress_debug_info *ingress_frame_debug_info)
6322 {
6323 	if (!psoc) {
6324 		mgmt_rx_reo_err("psoc is null");
6325 		return QDF_STATUS_E_NULL_VALUE;
6326 	}
6327 
6328 	if (!ingress_frame_debug_info) {
6329 		mgmt_rx_reo_err("Ingress frame debug info is null");
6330 		return QDF_STATUS_E_NULL_VALUE;
6331 	}
6332 
6333 	/* We need to initialize only for the first invocation */
6334 	if (qdf_atomic_read(ingress_debug_info_init_count))
6335 		goto success;
6336 
6337 	ingress_frame_debug_info->frame_list_size =
6338 		wlan_mgmt_rx_reo_get_ingress_frame_debug_list_size(psoc);
6339 
6340 	if (ingress_frame_debug_info->frame_list_size) {
6341 		ingress_frame_debug_info->frame_list = qdf_mem_malloc
6342 			(ingress_frame_debug_info->frame_list_size *
6343 			 sizeof(*ingress_frame_debug_info->frame_list));
6344 
6345 		if (!ingress_frame_debug_info->frame_list) {
6346 			mgmt_rx_reo_err("Failed to allocate debug info");
6347 			return QDF_STATUS_E_NOMEM;
6348 		}
6349 	}
6350 
6351 	/* Initialize the string for storing the debug info table boarder */
6352 	qdf_mem_set(ingress_frame_debug_info->boarder,
6353 		    MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_BOARDER_MAX_SIZE, '-');
6354 
6355 success:
6356 	qdf_atomic_inc(ingress_debug_info_init_count);
6357 	return QDF_STATUS_SUCCESS;
6358 }
6359 
6360 /**
6361  * mgmt_rx_reo_egress_debug_info_init() - Initialize the management rx-reorder
6362  * egress frame debug info
6363  * @psoc: Pointer to psoc
6364  * @egress_debug_info_init_count: Initialization count
6365  * @egress_frame_debug_info: Egress frame debug info object
6366  *
6367  * API to initialize the management rx-reorder egress frame debug info.
6368  *
6369  * Return: QDF_STATUS
6370  */
6371 static QDF_STATUS
6372 mgmt_rx_reo_egress_debug_info_init
6373 		(struct wlan_objmgr_psoc *psoc,
6374 		 qdf_atomic_t *egress_debug_info_init_count,
6375 		 struct reo_egress_debug_info *egress_frame_debug_info)
6376 {
6377 	if (!psoc) {
6378 		mgmt_rx_reo_err("psoc is null");
6379 		return QDF_STATUS_E_NULL_VALUE;
6380 	}
6381 
6382 	if (!egress_frame_debug_info) {
6383 		mgmt_rx_reo_err("Egress frame debug info is null");
6384 		return QDF_STATUS_E_NULL_VALUE;
6385 	}
6386 
6387 	/* We need to initialize only for the first invocation */
6388 	if (qdf_atomic_read(egress_debug_info_init_count))
6389 		goto success;
6390 
6391 	egress_frame_debug_info->frame_list_size =
6392 		wlan_mgmt_rx_reo_get_egress_frame_debug_list_size(psoc);
6393 
6394 	if (egress_frame_debug_info->frame_list_size) {
6395 		egress_frame_debug_info->frame_list = qdf_mem_malloc
6396 				(egress_frame_debug_info->frame_list_size *
6397 				 sizeof(*egress_frame_debug_info->frame_list));
6398 
6399 		if (!egress_frame_debug_info->frame_list) {
6400 			mgmt_rx_reo_err("Failed to allocate debug info");
6401 			return QDF_STATUS_E_NOMEM;
6402 		}
6403 	}
6404 
6405 	/* Initialize the string for storing the debug info table boarder */
6406 	qdf_mem_set(egress_frame_debug_info->boarder,
6407 		    MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_BOARDER_MAX_SIZE, '-');
6408 
6409 success:
6410 	qdf_atomic_inc(egress_debug_info_init_count);
6411 	return QDF_STATUS_SUCCESS;
6412 }
6413 
6414 /**
6415  * mgmt_rx_reo_scheduler_debug_info_init() - Initialize the management
6416  * rx-reorder scheduler debug info
6417  * @psoc: Pointer to psoc
6418  * @scheduler_debug_info_init_count: Initialization count
6419  * @scheduler_debug_info: Scheduler debug info object
6420  *
6421  * API to initialize the management rx-reorder Scheduler debug info.
6422  *
6423  * Return: QDF_STATUS
6424  */
6425 static QDF_STATUS
6426 mgmt_rx_reo_scheduler_debug_info_init
6427 		(struct wlan_objmgr_psoc *psoc,
6428 		 qdf_atomic_t *scheduler_debug_info_init_count,
6429 		 struct reo_scheduler_debug_info *scheduler_debug_info)
6430 {
6431 	if (!psoc) {
6432 		mgmt_rx_reo_err("psoc is null");
6433 		return QDF_STATUS_E_NULL_VALUE;
6434 	}
6435 
6436 	if (!scheduler_debug_info) {
6437 		mgmt_rx_reo_err("scheduler debug info is null");
6438 		return QDF_STATUS_E_NULL_VALUE;
6439 	}
6440 
6441 	/* We need to initialize only for the first invocation */
6442 	if (qdf_atomic_read(scheduler_debug_info_init_count))
6443 		goto success;
6444 
6445 	scheduler_debug_info->frame_list_size =
6446 		wlan_mgmt_rx_reo_get_scheduler_debug_list_size(psoc);
6447 
6448 	if (scheduler_debug_info->frame_list_size) {
6449 		scheduler_debug_info->frame_list = qdf_mem_malloc
6450 			(scheduler_debug_info->frame_list_size *
6451 			 sizeof(*scheduler_debug_info->frame_list));
6452 
6453 		if (!scheduler_debug_info->frame_list) {
6454 			mgmt_rx_reo_err("Failed to allocate debug info");
6455 			return QDF_STATUS_E_NOMEM;
6456 		}
6457 	}
6458 
6459 success:
6460 	qdf_atomic_inc(scheduler_debug_info_init_count);
6461 	return QDF_STATUS_SUCCESS;
6462 }
6463 
6464 /**
6465  * mgmt_rx_reo_debug_info_init() - Initialize the management rx-reorder debug
6466  * info
6467  * @pdev: pointer to pdev object
6468  *
6469  * API to initialize the management rx-reorder debug info.
6470  *
6471  * Return: QDF_STATUS
6472  */
6473 static QDF_STATUS
6474 mgmt_rx_reo_debug_info_init(struct wlan_objmgr_pdev *pdev)
6475 {
6476 	struct mgmt_rx_reo_context *reo_context;
6477 	QDF_STATUS status;
6478 	struct wlan_objmgr_psoc *psoc;
6479 
6480 	psoc = wlan_pdev_get_psoc(pdev);
6481 
6482 	if (!wlan_mgmt_rx_reo_is_feature_enabled_at_psoc(psoc))
6483 		return QDF_STATUS_SUCCESS;
6484 
6485 	reo_context = wlan_mgmt_rx_reo_get_ctx_from_pdev(pdev);
6486 	if (!reo_context) {
6487 		mgmt_rx_reo_err("reo context is null");
6488 		return QDF_STATUS_E_NULL_VALUE;
6489 	}
6490 
6491 	status = mgmt_rx_reo_ingress_debug_info_init
6492 			(psoc, &reo_context->ingress_debug_info_init_count,
6493 			 &reo_context->ingress_frame_debug_info);
6494 	if (QDF_IS_STATUS_ERROR(status)) {
6495 		mgmt_rx_reo_err("Failed to initialize ingress debug info");
6496 		return QDF_STATUS_E_FAILURE;
6497 	}
6498 
6499 	status = mgmt_rx_reo_egress_debug_info_init
6500 			(psoc, &reo_context->egress_debug_info_init_count,
6501 			 &reo_context->egress_frame_debug_info);
6502 	if (QDF_IS_STATUS_ERROR(status)) {
6503 		mgmt_rx_reo_err("Failed to initialize egress debug info");
6504 		return QDF_STATUS_E_FAILURE;
6505 	}
6506 
6507 	status = mgmt_rx_reo_scheduler_debug_info_init
6508 			(psoc, &reo_context->scheduler_debug_info_init_count,
6509 			 &reo_context->scheduler_debug_info);
6510 	if (QDF_IS_STATUS_ERROR(status)) {
6511 		mgmt_rx_reo_err("Failed to initialize scheduler debug info");
6512 		return QDF_STATUS_E_FAILURE;
6513 	}
6514 
6515 	return QDF_STATUS_SUCCESS;
6516 }
6517 
6518 /**
6519  * mgmt_rx_reo_ingress_debug_info_deinit() - De initialize the management
6520  * rx-reorder ingress frame debug info
6521  * @psoc: Pointer to psoc
6522  * @ingress_debug_info_init_count: Initialization count
6523  * @ingress_frame_debug_info: Ingress frame debug info object
6524  *
6525  * API to de initialize the management rx-reorder ingress frame debug info.
6526  *
6527  * Return: QDF_STATUS
6528  */
6529 static QDF_STATUS
6530 mgmt_rx_reo_ingress_debug_info_deinit
6531 		(struct wlan_objmgr_psoc *psoc,
6532 		 qdf_atomic_t *ingress_debug_info_init_count,
6533 		 struct reo_ingress_debug_info *ingress_frame_debug_info)
6534 {
6535 	if (!psoc) {
6536 		mgmt_rx_reo_err("psoc is null");
6537 		return QDF_STATUS_E_NULL_VALUE;
6538 	}
6539 
6540 	if (!ingress_frame_debug_info) {
6541 		mgmt_rx_reo_err("Ingress frame debug info is null");
6542 		return QDF_STATUS_E_NULL_VALUE;
6543 	}
6544 
6545 	if (!qdf_atomic_read(ingress_debug_info_init_count)) {
6546 		mgmt_rx_reo_err("Ingress debug info ref cnt is 0");
6547 		return QDF_STATUS_E_FAILURE;
6548 	}
6549 
6550 	/* We need to de-initialize only for the last invocation */
6551 	if (qdf_atomic_dec_and_test(ingress_debug_info_init_count))
6552 		goto success;
6553 
6554 	if (ingress_frame_debug_info->frame_list) {
6555 		qdf_mem_free(ingress_frame_debug_info->frame_list);
6556 		ingress_frame_debug_info->frame_list = NULL;
6557 	}
6558 	ingress_frame_debug_info->frame_list_size = 0;
6559 
6560 	qdf_mem_zero(ingress_frame_debug_info->boarder,
6561 		     MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_BOARDER_MAX_SIZE + 1);
6562 
6563 success:
6564 	return QDF_STATUS_SUCCESS;
6565 }
6566 
6567 /**
6568  * mgmt_rx_reo_egress_debug_info_deinit() - De initialize the management
6569  * rx-reorder egress frame debug info
6570  * @psoc: Pointer to psoc
6571  * @egress_debug_info_init_count: Initialization count
6572  * @egress_frame_debug_info: Egress frame debug info object
6573  *
6574  * API to de initialize the management rx-reorder egress frame debug info.
6575  *
6576  * Return: QDF_STATUS
6577  */
6578 static QDF_STATUS
6579 mgmt_rx_reo_egress_debug_info_deinit
6580 		(struct wlan_objmgr_psoc *psoc,
6581 		 qdf_atomic_t *egress_debug_info_init_count,
6582 		 struct reo_egress_debug_info *egress_frame_debug_info)
6583 {
6584 	if (!psoc) {
6585 		mgmt_rx_reo_err("psoc is null");
6586 		return QDF_STATUS_E_NULL_VALUE;
6587 	}
6588 
6589 	if (!egress_frame_debug_info) {
6590 		mgmt_rx_reo_err("Egress frame debug info is null");
6591 		return QDF_STATUS_E_NULL_VALUE;
6592 	}
6593 
6594 	if (!qdf_atomic_read(egress_debug_info_init_count)) {
6595 		mgmt_rx_reo_err("Egress debug info ref cnt is 0");
6596 		return QDF_STATUS_E_FAILURE;
6597 	}
6598 
6599 	/* We need to de-initialize only for the last invocation */
6600 	if (qdf_atomic_dec_and_test(egress_debug_info_init_count))
6601 		goto success;
6602 
6603 	if (egress_frame_debug_info->frame_list) {
6604 		qdf_mem_free(egress_frame_debug_info->frame_list);
6605 		egress_frame_debug_info->frame_list = NULL;
6606 	}
6607 	egress_frame_debug_info->frame_list_size = 0;
6608 
6609 	qdf_mem_zero(egress_frame_debug_info->boarder,
6610 		     MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_BOARDER_MAX_SIZE + 1);
6611 
6612 success:
6613 	return QDF_STATUS_SUCCESS;
6614 }
6615 
6616 /**
6617  * mgmt_rx_reo_scheduler_debug_info_deinit() - De initialize the management
6618  * rx-reorder scheduler debug info
6619  * @psoc: Pointer to psoc
6620  * @scheduler_debug_info_init_count: Initialization count
6621  * @scheduler_debug_info: Scheduler debug info object
6622  *
6623  * API to de initialize the management rx-reorder scheduler debug info.
6624  *
6625  * Return: QDF_STATUS
6626  */
6627 static QDF_STATUS
6628 mgmt_rx_reo_scheduler_debug_info_deinit
6629 		(struct wlan_objmgr_psoc *psoc,
6630 		 qdf_atomic_t *scheduler_debug_info_init_count,
6631 		 struct reo_scheduler_debug_info *scheduler_debug_info)
6632 {
6633 	if (!psoc) {
6634 		mgmt_rx_reo_err("psoc is null");
6635 		return QDF_STATUS_E_NULL_VALUE;
6636 	}
6637 
6638 	if (!scheduler_debug_info) {
6639 		mgmt_rx_reo_err("Scheduler debug info is null");
6640 		return QDF_STATUS_E_NULL_VALUE;
6641 	}
6642 
6643 	if (!qdf_atomic_read(scheduler_debug_info_init_count)) {
6644 		mgmt_rx_reo_err("Scheduler debug info ref cnt is 0");
6645 		return QDF_STATUS_E_FAILURE;
6646 	}
6647 
6648 	/* We need to de-initialize only for the last invocation */
6649 	if (qdf_atomic_dec_and_test(scheduler_debug_info_init_count))
6650 		goto success;
6651 
6652 	if (scheduler_debug_info->frame_list) {
6653 		qdf_mem_free(scheduler_debug_info->frame_list);
6654 		scheduler_debug_info->frame_list = NULL;
6655 	}
6656 	scheduler_debug_info->frame_list_size = 0;
6657 
6658 success:
6659 	return QDF_STATUS_SUCCESS;
6660 }
6661 
6662 /**
6663  * mgmt_rx_reo_debug_info_deinit() - De initialize the management rx-reorder
6664  * debug info
6665  * @pdev: Pointer to pdev object
6666  *
6667  * API to de initialize the management rx-reorder debug info.
6668  *
6669  * Return: QDF_STATUS
6670  */
6671 static QDF_STATUS
6672 mgmt_rx_reo_debug_info_deinit(struct wlan_objmgr_pdev *pdev)
6673 {
6674 	struct mgmt_rx_reo_context *reo_context;
6675 	QDF_STATUS status;
6676 	struct wlan_objmgr_psoc *psoc;
6677 
6678 	psoc = wlan_pdev_get_psoc(pdev);
6679 
6680 	if (!wlan_mgmt_rx_reo_is_feature_enabled_at_psoc(psoc))
6681 		return QDF_STATUS_SUCCESS;
6682 
6683 	reo_context = wlan_mgmt_rx_reo_get_ctx_from_pdev(pdev);
6684 	if (!reo_context) {
6685 		mgmt_rx_reo_err("reo context is null");
6686 		return QDF_STATUS_E_NULL_VALUE;
6687 	}
6688 
6689 	status = mgmt_rx_reo_ingress_debug_info_deinit
6690 			(psoc, &reo_context->ingress_debug_info_init_count,
6691 			 &reo_context->ingress_frame_debug_info);
6692 	if (QDF_IS_STATUS_ERROR(status)) {
6693 		mgmt_rx_reo_err("Failed to deinitialize ingress debug info");
6694 		return QDF_STATUS_E_FAILURE;
6695 	}
6696 
6697 	status = mgmt_rx_reo_egress_debug_info_deinit
6698 			(psoc, &reo_context->egress_debug_info_init_count,
6699 			 &reo_context->egress_frame_debug_info);
6700 	if (QDF_IS_STATUS_ERROR(status)) {
6701 		mgmt_rx_reo_err("Failed to deinitialize egress debug info");
6702 		return QDF_STATUS_E_FAILURE;
6703 	}
6704 
6705 	status = mgmt_rx_reo_scheduler_debug_info_deinit
6706 			(psoc, &reo_context->scheduler_debug_info_init_count,
6707 			 &reo_context->scheduler_debug_info);
6708 	if (QDF_IS_STATUS_ERROR(status)) {
6709 		mgmt_rx_reo_err("Failed to deinitialize scheduler debug info");
6710 		return QDF_STATUS_E_FAILURE;
6711 	}
6712 
6713 	return QDF_STATUS_SUCCESS;
6714 }
6715 #else
6716 static QDF_STATUS
6717 mgmt_rx_reo_debug_info_init(struct wlan_objmgr_psoc *psoc)
6718 {
6719 	return QDF_STATUS_SUCCESS;
6720 }
6721 
6722 static QDF_STATUS
6723 mgmt_rx_reo_debug_info_deinit(struct wlan_objmgr_psoc *psoc)
6724 {
6725 	return QDF_STATUS_SUCCESS;
6726 }
6727 #endif /* WLAN_MGMT_RX_REO_DEBUG_SUPPORT */
6728 
6729 /**
6730  * mgmt_rx_reo_flush_list() - Flush all entries in the reorder list
6731  * @reo_list: Pointer to reorder list
6732  *
6733  * API to flush all the entries of the reorder list. This API would acquire
6734  * the lock protecting the list.
6735  *
6736  * Return: QDF_STATUS
6737  */
6738 static QDF_STATUS
6739 mgmt_rx_reo_flush_list(struct mgmt_rx_reo_list *reo_list)
6740 {
6741 	struct mgmt_rx_reo_list_entry *cur_entry;
6742 	struct mgmt_rx_reo_list_entry *temp;
6743 
6744 	if (!reo_list) {
6745 		mgmt_rx_reo_err("reorder list is null");
6746 		return QDF_STATUS_E_NULL_VALUE;
6747 	}
6748 
6749 	qdf_spin_lock_bh(&reo_list->list_lock);
6750 
6751 	qdf_list_for_each_del(&reo_list->list, cur_entry, temp, node) {
6752 		free_mgmt_rx_event_params(cur_entry->rx_params);
6753 
6754 		/**
6755 		 * Release the reference taken when the entry is inserted into
6756 		 * the reorder list.
6757 		 */
6758 		wlan_objmgr_pdev_release_ref(cur_entry->pdev,
6759 					     WLAN_MGMT_RX_REO_ID);
6760 
6761 		qdf_mem_free(cur_entry);
6762 	}
6763 
6764 	qdf_spin_unlock_bh(&reo_list->list_lock);
6765 
6766 	return QDF_STATUS_SUCCESS;
6767 }
6768 
6769 /**
6770  * mgmt_rx_reo_ingress_list_deinit() - De initialize the management rx-reorder
6771  * ingress list
6772  * @ingress_list: Pointer to ingress reorder list
6773  *
6774  * API to de initialize the management rx-reorder ingress list.
6775  *
6776  * Return: QDF_STATUS
6777  */
6778 static QDF_STATUS
6779 mgmt_rx_reo_ingress_list_deinit(struct mgmt_rx_reo_ingress_list *ingress_list)
6780 {
6781 	QDF_STATUS status;
6782 	struct mgmt_rx_reo_list *reo_ingress_list;
6783 
6784 	if (!ingress_list) {
6785 		mgmt_rx_reo_err("Ingress list is null");
6786 		return QDF_STATUS_E_NULL_VALUE;
6787 	}
6788 	reo_ingress_list = &ingress_list->reo_list;
6789 
6790 	qdf_timer_sync_cancel(&ingress_list->ageout_timer);
6791 	qdf_timer_free(&ingress_list->ageout_timer);
6792 
6793 	status = mgmt_rx_reo_flush_list(reo_ingress_list);
6794 	if (QDF_IS_STATUS_ERROR(status)) {
6795 		mgmt_rx_reo_err("Failed to flush the ingress list");
6796 		return status;
6797 	}
6798 	qdf_spinlock_destroy(&reo_ingress_list->list_lock);
6799 	qdf_list_destroy(&reo_ingress_list->list);
6800 
6801 	return QDF_STATUS_SUCCESS;
6802 }
6803 
6804 /**
6805  * mgmt_rx_reo_egress_list_deinit() - De initialize the management rx-reorder
6806  * egress list
6807  * @egress_list: Pointer to egress reorder list
6808  *
6809  * API to de initialize the management rx-reorder egress list.
6810  *
6811  * Return: QDF_STATUS
6812  */
6813 static QDF_STATUS
6814 mgmt_rx_reo_egress_list_deinit(struct mgmt_rx_reo_egress_list *egress_list)
6815 {
6816 	QDF_STATUS status;
6817 	struct mgmt_rx_reo_list *reo_egress_list;
6818 
6819 	if (!egress_list) {
6820 		mgmt_rx_reo_err("Egress list is null");
6821 		return QDF_STATUS_E_NULL_VALUE;
6822 	}
6823 	reo_egress_list = &egress_list->reo_list;
6824 
6825 	qdf_timer_sync_cancel(&egress_list->egress_inactivity_timer);
6826 	qdf_timer_free(&egress_list->egress_inactivity_timer);
6827 
6828 	status = mgmt_rx_reo_flush_list(reo_egress_list);
6829 	if (QDF_IS_STATUS_ERROR(status)) {
6830 		mgmt_rx_reo_err("Failed to flush the egress list");
6831 		return QDF_STATUS_E_FAILURE;
6832 	}
6833 	qdf_spinlock_destroy(&reo_egress_list->list_lock);
6834 	qdf_list_destroy(&reo_egress_list->list);
6835 
6836 	return QDF_STATUS_SUCCESS;
6837 }
6838 
6839 QDF_STATUS
6840 mgmt_rx_reo_deinit_context(uint8_t ml_grp_id)
6841 {
6842 	QDF_STATUS status;
6843 	struct mgmt_rx_reo_context *reo_context;
6844 
6845 	reo_context = mgmt_rx_reo_get_context(ml_grp_id);
6846 	if (!reo_context) {
6847 		mgmt_rx_reo_err("reo context is null");
6848 		return QDF_STATUS_E_NULL_VALUE;
6849 	}
6850 
6851 	qdf_spinlock_destroy(&reo_context->frame_release_lock);
6852 	qdf_spinlock_destroy(&reo_context->reo_algo_entry_lock);
6853 
6854 	status = mgmt_rx_reo_sim_deinit(reo_context);
6855 	if (QDF_IS_STATUS_ERROR(status)) {
6856 		mgmt_rx_reo_err("Failed to de initialize reo sim context");
6857 		qdf_mem_free(reo_context);
6858 		return QDF_STATUS_E_FAILURE;
6859 	}
6860 
6861 	status = mgmt_rx_reo_egress_list_deinit(&reo_context->egress_list);
6862 	if (QDF_IS_STATUS_ERROR(status)) {
6863 		mgmt_rx_reo_err("Failed to de-initialize Rx reo egress list");
6864 		qdf_mem_free(reo_context);
6865 		return status;
6866 	}
6867 
6868 	status = mgmt_rx_reo_ingress_list_deinit(&reo_context->ingress_list);
6869 	if (QDF_IS_STATUS_ERROR(status)) {
6870 		mgmt_rx_reo_err("Failed to de-initialize Rx reo ingress list");
6871 		qdf_mem_free(reo_context);
6872 		return status;
6873 	}
6874 
6875 	mgmt_rx_reo_set_context(ml_grp_id, NULL);
6876 	qdf_mem_free(reo_context);
6877 
6878 	return QDF_STATUS_SUCCESS;
6879 }
6880 
6881 QDF_STATUS
6882 mgmt_rx_reo_init_context(uint8_t ml_grp_id)
6883 {
6884 	QDF_STATUS status;
6885 	QDF_STATUS temp;
6886 	struct mgmt_rx_reo_context *reo_context;
6887 
6888 	reo_context = qdf_mem_malloc(sizeof(struct mgmt_rx_reo_context));
6889 	if (!reo_context) {
6890 		mgmt_rx_reo_err("Failed to allocate reo context");
6891 		return QDF_STATUS_E_NULL_VALUE;
6892 	}
6893 	reo_context->mlo_grp_id = ml_grp_id;
6894 
6895 	mgmt_rx_reo_set_context(ml_grp_id, reo_context);
6896 
6897 	status = mgmt_rx_reo_ingress_list_init(&reo_context->ingress_list);
6898 	if (QDF_IS_STATUS_ERROR(status)) {
6899 		mgmt_rx_reo_err("Failed to initialize Rx reo ingress list");
6900 		goto free_reo_context;
6901 	}
6902 
6903 	status = mgmt_rx_reo_egress_list_init(&reo_context->egress_list);
6904 	if (QDF_IS_STATUS_ERROR(status)) {
6905 		mgmt_rx_reo_err("Failed to initialize Rx reo egress list");
6906 		goto deinit_reo_ingress_list;
6907 	}
6908 
6909 	status = mgmt_rx_reo_sim_init(reo_context);
6910 	if (QDF_IS_STATUS_ERROR(status)) {
6911 		mgmt_rx_reo_err("Failed to initialize reo simulation context");
6912 		goto deinit_reo_egress_list;
6913 	}
6914 
6915 	qdf_spinlock_create(&reo_context->reo_algo_entry_lock);
6916 	qdf_spinlock_create(&reo_context->frame_release_lock);
6917 	qdf_atomic_init(&reo_context->context_id);
6918 
6919 	return QDF_STATUS_SUCCESS;
6920 
6921 deinit_reo_egress_list:
6922 	temp = mgmt_rx_reo_egress_list_deinit(&reo_context->egress_list);
6923 	if (QDF_IS_STATUS_ERROR(temp)) {
6924 		mgmt_rx_reo_err("Failed to de-initialize Rx reo egress list");
6925 		return temp;
6926 	}
6927 deinit_reo_ingress_list:
6928 	temp = mgmt_rx_reo_ingress_list_deinit(&reo_context->ingress_list);
6929 	if (QDF_IS_STATUS_ERROR(temp)) {
6930 		mgmt_rx_reo_err("Failed to de-initialize Rx reo ingress list");
6931 		return temp;
6932 	}
6933 free_reo_context:
6934 	mgmt_rx_reo_set_context(ml_grp_id, NULL);
6935 	qdf_mem_free(reo_context);
6936 
6937 	return status;
6938 }
6939 
6940 /**
6941  * wlan_mgmt_rx_reo_initialize_snapshot_params() - Initialize a given snapshot
6942  * params object
6943  * @snapshot_params: Pointer to snapshot params object
6944  *
6945  * Return: void
6946  */
6947 static void
6948 wlan_mgmt_rx_reo_initialize_snapshot_params(
6949 			struct mgmt_rx_reo_snapshot_params *snapshot_params)
6950 {
6951 	snapshot_params->valid = false;
6952 	snapshot_params->mgmt_pkt_ctr = 0;
6953 	snapshot_params->global_timestamp = 0;
6954 }
6955 
6956 /**
6957  * mgmt_rx_reo_initialize_snapshot_address() - Initialize management Rx reorder
6958  * snapshot addresses for a given pdev
6959  * @pdev: pointer to pdev object
6960  *
6961  * Return: QDF_STATUS
6962  */
6963 static QDF_STATUS
6964 mgmt_rx_reo_initialize_snapshot_address(struct wlan_objmgr_pdev *pdev)
6965 {
6966 	enum mgmt_rx_reo_shared_snapshot_id snapshot_id;
6967 	struct mgmt_rx_reo_pdev_info *mgmt_rx_reo_pdev_ctx;
6968 	QDF_STATUS status;
6969 
6970 	mgmt_rx_reo_pdev_ctx = wlan_mgmt_rx_reo_get_priv_object(pdev);
6971 	if (!mgmt_rx_reo_pdev_ctx) {
6972 		mgmt_rx_reo_err("Mgmt Rx REO priv object is null");
6973 		return QDF_STATUS_E_NULL_VALUE;
6974 	}
6975 
6976 	snapshot_id = 0;
6977 
6978 	while (snapshot_id < MGMT_RX_REO_SHARED_SNAPSHOT_MAX) {
6979 		struct mgmt_rx_reo_snapshot_info *snapshot_info;
6980 
6981 		snapshot_info =
6982 			&mgmt_rx_reo_pdev_ctx->host_target_shared_snapshot_info
6983 			[snapshot_id];
6984 		status = wlan_mgmt_rx_reo_get_snapshot_info
6985 					(pdev, snapshot_id, snapshot_info);
6986 		if (QDF_IS_STATUS_ERROR(status)) {
6987 			mgmt_rx_reo_err("Get snapshot info failed, id = %u",
6988 					snapshot_id);
6989 			return status;
6990 		}
6991 
6992 		snapshot_id++;
6993 	}
6994 
6995 	return QDF_STATUS_SUCCESS;
6996 }
6997 
6998 /**
6999  * mgmt_rx_reo_initialize_snapshot_value() - Initialize management Rx reorder
7000  * snapshot values for a given pdev
7001  * @pdev: pointer to pdev object
7002  *
7003  * Return: QDF_STATUS
7004  */
7005 static QDF_STATUS
7006 mgmt_rx_reo_initialize_snapshot_value(struct wlan_objmgr_pdev *pdev)
7007 {
7008 	enum mgmt_rx_reo_shared_snapshot_id snapshot_id;
7009 	struct mgmt_rx_reo_pdev_info *mgmt_rx_reo_pdev_ctx;
7010 
7011 	mgmt_rx_reo_pdev_ctx = wlan_mgmt_rx_reo_get_priv_object(pdev);
7012 	if (!mgmt_rx_reo_pdev_ctx) {
7013 		mgmt_rx_reo_err("Mgmt Rx REO priv object is null");
7014 		return QDF_STATUS_E_NULL_VALUE;
7015 	}
7016 
7017 	snapshot_id = 0;
7018 	while (snapshot_id < MGMT_RX_REO_SHARED_SNAPSHOT_MAX) {
7019 		wlan_mgmt_rx_reo_initialize_snapshot_params
7020 			(&mgmt_rx_reo_pdev_ctx->last_valid_shared_snapshot
7021 			 [snapshot_id]);
7022 		snapshot_id++;
7023 	}
7024 
7025 	/* Initialize Host snapshot params */
7026 	wlan_mgmt_rx_reo_initialize_snapshot_params
7027 				(&mgmt_rx_reo_pdev_ctx->host_snapshot);
7028 
7029 	return QDF_STATUS_SUCCESS;
7030 }
7031 
7032 /**
7033  * mgmt_rx_reo_set_initialization_complete() - Set initialization completion
7034  * for management Rx REO pdev component private object
7035  * @pdev: pointer to pdev object
7036  *
7037  * Return: QDF_STATUS
7038  */
7039 static QDF_STATUS
7040 mgmt_rx_reo_set_initialization_complete(struct wlan_objmgr_pdev *pdev)
7041 {
7042 	struct mgmt_rx_reo_pdev_info *mgmt_rx_reo_pdev_ctx;
7043 
7044 	mgmt_rx_reo_pdev_ctx = wlan_mgmt_rx_reo_get_priv_object(pdev);
7045 	if (!mgmt_rx_reo_pdev_ctx) {
7046 		mgmt_rx_reo_err("Mgmt Rx REO priv object is null");
7047 		return QDF_STATUS_E_NULL_VALUE;
7048 	}
7049 
7050 	mgmt_rx_reo_pdev_ctx->init_complete = true;
7051 
7052 	return QDF_STATUS_SUCCESS;
7053 }
7054 
7055 /**
7056  * mgmt_rx_reo_clear_initialization_complete() - Clear initialization completion
7057  * for management Rx REO pdev component private object
7058  * @pdev: pointer to pdev object
7059  *
7060  * Return: QDF_STATUS
7061  */
7062 static QDF_STATUS
7063 mgmt_rx_reo_clear_initialization_complete(struct wlan_objmgr_pdev *pdev)
7064 {
7065 	struct mgmt_rx_reo_pdev_info *mgmt_rx_reo_pdev_ctx;
7066 
7067 	mgmt_rx_reo_pdev_ctx = wlan_mgmt_rx_reo_get_priv_object(pdev);
7068 	if (!mgmt_rx_reo_pdev_ctx) {
7069 		mgmt_rx_reo_err("Mgmt Rx REO priv object is null");
7070 		return QDF_STATUS_E_NULL_VALUE;
7071 	}
7072 
7073 	mgmt_rx_reo_pdev_ctx->init_complete = false;
7074 
7075 	return QDF_STATUS_SUCCESS;
7076 }
7077 
7078 /**
7079  * mgmt_rx_reo_initialize_snapshots() - Initialize management Rx reorder
7080  * snapshot related data structures for a given pdev
7081  * @pdev: pointer to pdev object
7082  *
7083  * Return: QDF_STATUS
7084  */
7085 static QDF_STATUS
7086 mgmt_rx_reo_initialize_snapshots(struct wlan_objmgr_pdev *pdev)
7087 {
7088 	QDF_STATUS status;
7089 
7090 	status = mgmt_rx_reo_initialize_snapshot_value(pdev);
7091 	if (QDF_IS_STATUS_ERROR(status)) {
7092 		mgmt_rx_reo_err("Failed to initialize snapshot value");
7093 		return status;
7094 	}
7095 
7096 	status = mgmt_rx_reo_initialize_snapshot_address(pdev);
7097 	if (QDF_IS_STATUS_ERROR(status)) {
7098 		mgmt_rx_reo_err("Failed to initialize snapshot address");
7099 		return status;
7100 	}
7101 
7102 	return QDF_STATUS_SUCCESS;
7103 }
7104 
7105 /**
7106  * mgmt_rx_reo_clear_snapshots() - Clear management Rx reorder snapshot related
7107  * data structures for a given pdev
7108  * @pdev: pointer to pdev object
7109  *
7110  * Return: QDF_STATUS
7111  */
7112 static QDF_STATUS
7113 mgmt_rx_reo_clear_snapshots(struct wlan_objmgr_pdev *pdev)
7114 {
7115 	QDF_STATUS status;
7116 
7117 	status = mgmt_rx_reo_initialize_snapshot_value(pdev);
7118 	if (QDF_IS_STATUS_ERROR(status)) {
7119 		mgmt_rx_reo_err("Failed to initialize snapshot value");
7120 		return status;
7121 	}
7122 
7123 	return QDF_STATUS_SUCCESS;
7124 }
7125 
7126 QDF_STATUS
7127 mgmt_rx_reo_pdev_attach(struct wlan_objmgr_pdev *pdev)
7128 {
7129 	QDF_STATUS status;
7130 
7131 	if (!wlan_mgmt_rx_reo_is_feature_enabled_at_pdev(pdev))
7132 		return QDF_STATUS_SUCCESS;
7133 
7134 	status = mgmt_rx_reo_initialize_snapshots(pdev);
7135 	if (QDF_IS_STATUS_ERROR(status)) {
7136 		mgmt_rx_reo_err("Failed to initialize mgmt Rx REO snapshots");
7137 		return status;
7138 	}
7139 
7140 	status = mgmt_rx_reo_set_initialization_complete(pdev);
7141 	if (QDF_IS_STATUS_ERROR(status)) {
7142 		mgmt_rx_reo_err("Failed to set initialization complete");
7143 		return status;
7144 	}
7145 
7146 	return QDF_STATUS_SUCCESS;
7147 }
7148 
7149 QDF_STATUS
7150 mgmt_rx_reo_psoc_attach(struct wlan_objmgr_psoc *psoc)
7151 {
7152 	return QDF_STATUS_SUCCESS;
7153 }
7154 
7155 QDF_STATUS
7156 mgmt_rx_reo_pdev_detach(struct wlan_objmgr_pdev *pdev)
7157 {
7158 	QDF_STATUS status;
7159 
7160 	if (!wlan_mgmt_rx_reo_is_feature_enabled_at_pdev(pdev))
7161 		return QDF_STATUS_SUCCESS;
7162 
7163 	status = mgmt_rx_reo_clear_initialization_complete(pdev);
7164 	if (QDF_IS_STATUS_ERROR(status)) {
7165 		mgmt_rx_reo_err("Failed to clear initialization complete");
7166 		return status;
7167 	}
7168 
7169 	status = mgmt_rx_reo_clear_snapshots(pdev);
7170 	if (QDF_IS_STATUS_ERROR(status)) {
7171 		mgmt_rx_reo_err("Failed to clear mgmt Rx REO snapshots");
7172 		return status;
7173 	}
7174 
7175 	return QDF_STATUS_SUCCESS;
7176 }
7177 
7178 QDF_STATUS
7179 mgmt_rx_reo_psoc_detach(struct wlan_objmgr_psoc *psoc)
7180 {
7181 	return QDF_STATUS_SUCCESS;
7182 }
7183 
7184 QDF_STATUS
7185 mgmt_rx_reo_pdev_obj_create_notification(
7186 	struct wlan_objmgr_pdev *pdev,
7187 	struct mgmt_txrx_priv_pdev_context *mgmt_txrx_pdev_ctx)
7188 {
7189 	QDF_STATUS status;
7190 	struct mgmt_rx_reo_pdev_info *mgmt_rx_reo_pdev_ctx = NULL;
7191 
7192 	if (!pdev) {
7193 		mgmt_rx_reo_err("pdev is null");
7194 		status = QDF_STATUS_E_NULL_VALUE;
7195 		goto failure;
7196 	}
7197 
7198 	if (!wlan_mgmt_rx_reo_is_feature_enabled_at_pdev(pdev)) {
7199 		status = QDF_STATUS_SUCCESS;
7200 		goto failure;
7201 	}
7202 
7203 	status = mgmt_rx_reo_sim_pdev_object_create_notification(pdev);
7204 	if (QDF_IS_STATUS_ERROR(status)) {
7205 		mgmt_rx_reo_err("Failed to handle pdev create for reo sim");
7206 		goto failure;
7207 	}
7208 
7209 	mgmt_rx_reo_pdev_ctx = qdf_mem_malloc(sizeof(*mgmt_rx_reo_pdev_ctx));
7210 	if (!mgmt_rx_reo_pdev_ctx) {
7211 		mgmt_rx_reo_err("Allocation failure for REO pdev context");
7212 		status = QDF_STATUS_E_NOMEM;
7213 		goto failure;
7214 	}
7215 
7216 	mgmt_txrx_pdev_ctx->mgmt_rx_reo_pdev_ctx = mgmt_rx_reo_pdev_ctx;
7217 
7218 	status = mgmt_rx_reo_debug_info_init(pdev);
7219 	if (QDF_IS_STATUS_ERROR(status)) {
7220 		mgmt_rx_reo_err("Failed to initialize debug info");
7221 		status = QDF_STATUS_E_NOMEM;
7222 		goto failure;
7223 	}
7224 
7225 	return QDF_STATUS_SUCCESS;
7226 
7227 failure:
7228 	if (mgmt_rx_reo_pdev_ctx)
7229 		qdf_mem_free(mgmt_rx_reo_pdev_ctx);
7230 
7231 	mgmt_txrx_pdev_ctx->mgmt_rx_reo_pdev_ctx = NULL;
7232 
7233 	return status;
7234 }
7235 
7236 QDF_STATUS
7237 mgmt_rx_reo_pdev_obj_destroy_notification(
7238 	struct wlan_objmgr_pdev *pdev,
7239 	struct mgmt_txrx_priv_pdev_context *mgmt_txrx_pdev_ctx)
7240 {
7241 	QDF_STATUS status;
7242 
7243 	if (!wlan_mgmt_rx_reo_is_feature_enabled_at_pdev(pdev))
7244 		return QDF_STATUS_SUCCESS;
7245 
7246 	status = mgmt_rx_reo_debug_info_deinit(pdev);
7247 	if (QDF_IS_STATUS_ERROR(status)) {
7248 		mgmt_rx_reo_err("Failed to de-initialize debug info");
7249 		return status;
7250 	}
7251 
7252 	qdf_mem_free(mgmt_txrx_pdev_ctx->mgmt_rx_reo_pdev_ctx);
7253 	mgmt_txrx_pdev_ctx->mgmt_rx_reo_pdev_ctx = NULL;
7254 
7255 	status = mgmt_rx_reo_sim_pdev_object_destroy_notification(pdev);
7256 	if (QDF_IS_STATUS_ERROR(status)) {
7257 		mgmt_rx_reo_err("Failed to handle pdev create for reo sim");
7258 		return status;
7259 	}
7260 
7261 	return QDF_STATUS_SUCCESS;
7262 }
7263 
7264 QDF_STATUS
7265 mgmt_rx_reo_psoc_obj_create_notification(struct wlan_objmgr_psoc *psoc)
7266 {
7267 	return QDF_STATUS_SUCCESS;
7268 }
7269 
7270 QDF_STATUS
7271 mgmt_rx_reo_psoc_obj_destroy_notification(struct wlan_objmgr_psoc *psoc)
7272 {
7273 	return QDF_STATUS_SUCCESS;
7274 }
7275 
7276 bool
7277 mgmt_rx_reo_is_simulation_in_progress(uint8_t ml_grp_id)
7278 {
7279 	struct mgmt_rx_reo_context *reo_context;
7280 
7281 	reo_context = mgmt_rx_reo_get_context(ml_grp_id);
7282 	if (!reo_context) {
7283 		mgmt_rx_reo_err("reo context is null");
7284 		return false;
7285 	}
7286 
7287 	return reo_context->simulation_in_progress;
7288 }
7289 
7290 #ifdef WLAN_MGMT_RX_REO_DEBUG_SUPPORT
7291 QDF_STATUS
7292 mgmt_rx_reo_print_ingress_frame_stats(uint8_t ml_grp_id)
7293 {
7294 	struct mgmt_rx_reo_context *reo_context;
7295 	QDF_STATUS status;
7296 
7297 	reo_context = mgmt_rx_reo_get_context(ml_grp_id);
7298 	if (!reo_context) {
7299 		mgmt_rx_reo_err("reo context is null");
7300 		return QDF_STATUS_E_NULL_VALUE;
7301 	}
7302 
7303 	status = mgmt_rx_reo_debug_print_ingress_frame_stats(reo_context);
7304 	if (QDF_IS_STATUS_ERROR(status)) {
7305 		mgmt_rx_reo_err("Failed to print ingress frame stats");
7306 		return status;
7307 	}
7308 
7309 	return QDF_STATUS_SUCCESS;
7310 }
7311 
7312 QDF_STATUS
7313 mgmt_rx_reo_print_ingress_frame_info(uint8_t ml_grp_id, uint16_t num_frames)
7314 {
7315 	struct mgmt_rx_reo_context *reo_context;
7316 	QDF_STATUS status;
7317 
7318 	reo_context = mgmt_rx_reo_get_context(ml_grp_id);
7319 	if (!reo_context) {
7320 		mgmt_rx_reo_err("reo context is null");
7321 		return QDF_STATUS_E_NULL_VALUE;
7322 	}
7323 
7324 	status = mgmt_rx_reo_debug_print_ingress_frame_info(reo_context,
7325 							    num_frames);
7326 	if (QDF_IS_STATUS_ERROR(status)) {
7327 		mgmt_rx_reo_err("Failed to print ingress frame info");
7328 		return status;
7329 	}
7330 
7331 	return QDF_STATUS_SUCCESS;
7332 }
7333 
7334 QDF_STATUS
7335 mgmt_rx_reo_print_egress_frame_stats(uint8_t ml_grp_id)
7336 {
7337 	struct mgmt_rx_reo_context *reo_context;
7338 	QDF_STATUS status;
7339 
7340 	reo_context = mgmt_rx_reo_get_context(ml_grp_id);
7341 	if (!reo_context) {
7342 		mgmt_rx_reo_err("reo context is null");
7343 		return QDF_STATUS_E_NULL_VALUE;
7344 	}
7345 
7346 	status = mgmt_rx_reo_debug_print_egress_frame_stats(reo_context);
7347 	if (QDF_IS_STATUS_ERROR(status)) {
7348 		mgmt_rx_reo_err("Failed to print egress frame stats");
7349 		return status;
7350 	}
7351 
7352 	return QDF_STATUS_SUCCESS;
7353 }
7354 
7355 QDF_STATUS
7356 mgmt_rx_reo_print_egress_frame_info(uint8_t ml_grp_id, uint16_t num_frames)
7357 {
7358 	struct mgmt_rx_reo_context *reo_context;
7359 	QDF_STATUS status;
7360 
7361 	reo_context = mgmt_rx_reo_get_context(ml_grp_id);
7362 	if (!reo_context) {
7363 		mgmt_rx_reo_err("reo context is null");
7364 		return QDF_STATUS_E_NULL_VALUE;
7365 	}
7366 
7367 	status = mgmt_rx_reo_debug_print_egress_frame_info(reo_context,
7368 							   num_frames);
7369 	if (QDF_IS_STATUS_ERROR(status)) {
7370 		mgmt_rx_reo_err("Failed to print egress frame info");
7371 		return status;
7372 	}
7373 
7374 	return QDF_STATUS_SUCCESS;
7375 }
7376 #else
7377 QDF_STATUS
7378 mgmt_rx_reo_print_ingress_frame_stats(uint8_t ml_grp_id)
7379 {
7380 	return QDF_STATUS_SUCCESS;
7381 }
7382 
7383 QDF_STATUS
7384 mgmt_rx_reo_print_ingress_frame_info(uint8_t ml_grp_id, uint16_t num_frames)
7385 {
7386 	return QDF_STATUS_SUCCESS;
7387 }
7388 
7389 QDF_STATUS
7390 mgmt_rx_reo_print_egress_frame_stats(uint8_t ml_grp_id)
7391 {
7392 	return QDF_STATUS_SUCCESS;
7393 }
7394 
7395 QDF_STATUS
7396 mgmt_rx_reo_print_egress_frame_info(uint8_t ml_grp_id, uint16_t num_frames)
7397 {
7398 	return QDF_STATUS_SUCCESS;
7399 }
7400 #endif /* WLAN_MGMT_RX_REO_DEBUG_SUPPORT */
7401