xref: /wlan-dirver/qca-wifi-host-cmn/umac/cmn_services/mgmt_txrx/core/src/wlan_mgmt_txrx_rx_reo.c (revision 2888b71da71bce103343119fa1b31f4a0cee07c8)
1 /*
2  * Copyright (c) 2021, The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 /**
19  *  DOC: wlan_mgmt_txrx_rx_reo.c
20  *  This file contains mgmt rx re-ordering related function definitions
21  */
22 
23 #include "wlan_mgmt_txrx_rx_reo_i.h"
24 #include <wlan_mgmt_txrx_rx_reo_tgt_api.h>
25 #include "wlan_mgmt_txrx_main_i.h"
26 #include <qdf_util.h>
27 #include <wlan_mlo_mgr_cmn.h>
28 
29 static struct mgmt_rx_reo_context g_rx_reo_ctx;
30 
31 #define mgmt_rx_reo_get_context()        (&g_rx_reo_ctx)
32 
33 #define MGMT_RX_REO_PKT_CTR_HALF_RANGE (0x8000)
34 #define MGMT_RX_REO_PKT_CTR_FULL_RANGE (MGMT_RX_REO_PKT_CTR_HALF_RANGE << 1)
35 
36 /**
37  * mgmt_rx_reo_compare_pkt_ctrs_gte() - Compare given mgmt packet counters
38  * @ctr1: Management packet counter1
39  * @ctr2: Management packet counter2
40  *
41  * We can't directly use the comparison operator here because the counters can
42  * overflow. But these counters have a property that the difference between
43  * them can never be greater than half the range of the data type.
44  * We can make use of this condition to detect which one is actually greater.
45  *
46  * Return: true if @ctr1 is greater than or equal to @ctr2, else false
47  */
48 static inline bool
49 mgmt_rx_reo_compare_pkt_ctrs_gte(uint16_t ctr1, uint16_t ctr2)
50 {
51 	uint16_t delta = ctr1 - ctr2;
52 
53 	return delta <= MGMT_RX_REO_PKT_CTR_HALF_RANGE;
54 }
55 
56 /**
57  * mgmt_rx_reo_subtract_pkt_ctrs() - Subtract given mgmt packet counters
58  * @ctr1: Management packet counter1
59  * @ctr2: Management packet counter2
60  *
61  * We can't directly use the subtract operator here because the counters can
62  * overflow. But these counters have a property that the difference between
63  * them can never be greater than half the range of the data type.
64  * We can make use of this condition to detect whichone is actually greater and
65  * return the difference accordingly.
66  *
67  * Return: Difference between @ctr1 and @crt2
68  */
69 static inline int
70 mgmt_rx_reo_subtract_pkt_ctrs(uint16_t ctr1, uint16_t ctr2)
71 {
72 	uint16_t delta = ctr1 - ctr2;
73 
74 	/**
75 	 * if delta is greater than half the range (i.e, ctr1 is actually
76 	 * smaller than ctr2), then the result should be a negative number.
77 	 * subtracting the entire range should give the correct value.
78 	 */
79 	if (delta > MGMT_RX_REO_PKT_CTR_HALF_RANGE)
80 		return delta - MGMT_RX_REO_PKT_CTR_FULL_RANGE;
81 
82 	return delta;
83 }
84 
85 #define MGMT_RX_REO_GLOBAL_TS_HALF_RANGE (0x80000000)
86 /**
87  * mgmt_rx_reo_compare_global_timestamps_gte()-Compare given global timestamps
88  * @ts1: Global timestamp1
89  * @ts2: Global timestamp2
90  *
91  * We can't directly use the comparison operator here because the timestamps can
92  * overflow. But these timestamps have a property that the difference between
93  * them can never be greater than half the range of the data type.
94  * We can make use of this condition to detect which one is actually greater.
95  *
96  * Return: true if @ts1 is greater than or equal to @ts2, else false
97  */
98 static inline bool
99 mgmt_rx_reo_compare_global_timestamps_gte(uint32_t ts1, uint32_t ts2)
100 {
101 	uint32_t delta = ts1 - ts2;
102 
103 	return delta <= MGMT_RX_REO_GLOBAL_TS_HALF_RANGE;
104 }
105 
106 /**
107  * mgmt_rx_reo_is_stale_frame()- API to check whether the given management frame
108  * is stale
109  * @ts_last_released_frame: pointer to global time stamp of the last frame
110  * removed from the reorder list
111  * @frame_desc: pointer to frame descriptor
112  *
113  * This API checks whether the current management frame under processing is
114  * stale. Any frame older than the last frame delivered to upper layer is a
115  * stale frame. This could happen when we have to deliver frames out of order
116  * due to time out or list size limit. The frames which arrive late at host and
117  * with time stamp lesser than the last delivered frame are stale frames and
118  * they need to be handled differently.
119  *
120  * Return: QDF_STATUS. On success "is_stale" and "is_parallel_rx" members of
121  * @frame_desc will be filled with proper values.
122  */
123 static QDF_STATUS
124 mgmt_rx_reo_is_stale_frame(
125 		struct mgmt_rx_reo_global_ts_info *ts_last_released_frame,
126 		struct mgmt_rx_reo_frame_descriptor *frame_desc)
127 {
128 	uint32_t cur_frame_start_ts;
129 	uint32_t cur_frame_end_ts;
130 
131 	if (!ts_last_released_frame) {
132 		mgmt_rx_reo_err("Last released frame time stamp info is null");
133 		return QDF_STATUS_E_NULL_VALUE;
134 	}
135 
136 	if (!frame_desc) {
137 		mgmt_rx_reo_err("Frame descriptor is null");
138 		return QDF_STATUS_E_NULL_VALUE;
139 	}
140 
141 	frame_desc->is_stale = false;
142 	frame_desc->is_parallel_rx = false;
143 
144 	if (!ts_last_released_frame->valid)
145 		return QDF_STATUS_SUCCESS;
146 
147 	cur_frame_start_ts = mgmt_rx_reo_get_start_ts(frame_desc->rx_params);
148 	cur_frame_end_ts = mgmt_rx_reo_get_end_ts(frame_desc->rx_params);
149 
150 	frame_desc->is_stale =
151 		!mgmt_rx_reo_compare_global_timestamps_gte(cur_frame_start_ts,
152 					ts_last_released_frame->start_ts);
153 
154 	if (mgmt_rx_reo_compare_global_timestamps_gte
155 		(ts_last_released_frame->start_ts, cur_frame_start_ts) &&
156 	    mgmt_rx_reo_compare_global_timestamps_gte
157 		(cur_frame_end_ts, ts_last_released_frame->end_ts)) {
158 		frame_desc->is_parallel_rx = true;
159 		frame_desc->is_stale = false;
160 	}
161 
162 	return QDF_STATUS_SUCCESS;
163 }
164 
165 QDF_STATUS
166 mgmt_rx_reo_validate_mlo_link_info(struct wlan_objmgr_psoc *psoc)
167 {
168 	uint16_t valid_link_bitmap_shmem;
169 	uint16_t valid_link_bitmap;
170 	int8_t num_active_links_shmem;
171 	int8_t num_active_links;
172 	QDF_STATUS status;
173 
174 	if (!psoc) {
175 		mgmt_rx_reo_err("psoc is null");
176 		return QDF_STATUS_E_NULL_VALUE;
177 	}
178 
179 	if (!wlan_mgmt_rx_reo_is_feature_enabled_at_psoc(psoc))
180 		return QDF_STATUS_SUCCESS;
181 
182 	status = tgt_mgmt_rx_reo_get_num_active_hw_links(psoc,
183 							 &num_active_links_shmem);
184 	if (QDF_IS_STATUS_ERROR(status)) {
185 		mgmt_rx_reo_err("Failed to get number of active MLO HW links");
186 		return QDF_STATUS_E_FAILURE;
187 	}
188 	qdf_assert_always(num_active_links_shmem > 0);
189 
190 	num_active_links = wlan_mlo_get_num_active_links();
191 	qdf_assert_always(num_active_links > 0);
192 
193 	qdf_assert_always(num_active_links_shmem == num_active_links);
194 
195 	status = tgt_mgmt_rx_reo_get_valid_hw_link_bitmap(psoc,
196 							  &valid_link_bitmap_shmem);
197 	if (QDF_IS_STATUS_ERROR(status)) {
198 		mgmt_rx_reo_err("Failed to get valid MLO HW link bitmap");
199 		return QDF_STATUS_E_INVAL;
200 	}
201 	qdf_assert_always(valid_link_bitmap_shmem != 0);
202 
203 	valid_link_bitmap = wlan_mlo_get_valid_link_bitmap();
204 	qdf_assert_always(valid_link_bitmap_shmem != 0);
205 
206 	qdf_assert_always(valid_link_bitmap_shmem == valid_link_bitmap);
207 
208 	return QDF_STATUS_SUCCESS;
209 }
210 
211 #ifndef WLAN_MGMT_RX_REO_SIM_SUPPORT
212 /**
213  * mgmt_rx_reo_is_valid_link() - Check whether the given HW link is valid
214  *
215  * Return: true if @link_id is a valid link else false
216  */
217 static bool
218 mgmt_rx_reo_is_valid_link(uint8_t link_id)
219 {
220 	uint16_t valid_hw_link_bitmap;
221 
222 	if (link_id >= MAX_MLO_LINKS) {
223 		mgmt_rx_reo_err("Invalid link id %u", link_id);
224 		return false;
225 	}
226 
227 	valid_hw_link_bitmap = wlan_mlo_get_valid_link_bitmap();
228 	qdf_assert_always(valid_hw_link_bitmap);
229 
230 	return (valid_hw_link_bitmap & (1 << link_id));
231 }
232 
233 /**
234  * mgmt_rx_reo_get_num_mlo_links() - Get number of MLO HW links active in the
235  * system
236  * @reo_context: Pointer to reo context object
237  *
238  * Return: On success returns number of active MLO HW links. On failure
239  * returns WLAN_MLO_INVALID_NUM_LINKS.
240  */
241 static int8_t
242 mgmt_rx_reo_get_num_mlo_links(struct mgmt_rx_reo_context *reo_context) {
243 	if (!reo_context) {
244 		mgmt_rx_reo_err("Mgmt reo context is null");
245 		return WLAN_MLO_INVALID_NUM_LINKS;
246 	}
247 
248 	return wlan_mlo_get_num_active_links();
249 }
250 
251 static QDF_STATUS
252 mgmt_rx_reo_handle_potential_premature_delivery(
253 				struct mgmt_rx_reo_context *reo_context,
254 				uint32_t global_timestamp)
255 {
256 	return QDF_STATUS_SUCCESS;
257 }
258 
259 static QDF_STATUS
260 mgmt_rx_reo_handle_stale_frame(struct mgmt_rx_reo_list *reo_list,
261 			       struct mgmt_rx_reo_frame_descriptor *desc)
262 {
263 	return QDF_STATUS_SUCCESS;
264 }
265 #else
266 /**
267  * mgmt_rx_reo_sim_is_valid_link() - Check whether the given HW link is valid
268  *
269  * Return: true if @link_id is a valid link, else false
270  */
271 static bool
272 mgmt_rx_reo_sim_is_valid_link(struct mgmt_rx_reo_sim_context *sim_context,
273 			      uint8_t link_id)
274 {
275 	bool is_valid_link = false;
276 
277 	if (!sim_context) {
278 		mgmt_rx_reo_err("Mgmt reo sim context is null");
279 		return false;
280 	}
281 
282 	if (link_id >= MAX_MLO_LINKS) {
283 		mgmt_rx_reo_err("Invalid link id %u", link_id);
284 		return false;
285 	}
286 
287 	qdf_spin_lock(&sim_context->link_id_to_pdev_map.lock);
288 
289 	if (sim_context->link_id_to_pdev_map.map[link_id])
290 		is_valid_link = true;
291 
292 	qdf_spin_unlock(&sim_context->link_id_to_pdev_map.lock);
293 
294 	return is_valid_link;
295 }
296 
297 /**
298  * mgmt_rx_reo_is_valid_link() - Check whether the given HW link is valid
299  *
300  * Return: true if @link_id is a valid link else false
301  */
302 static bool
303 mgmt_rx_reo_is_valid_link(uint8_t link_id)
304 {
305 	struct mgmt_rx_reo_context *reo_context;
306 
307 	reo_context = mgmt_rx_reo_get_context();
308 
309 	if (!reo_context) {
310 		mgmt_rx_reo_err("Mgmt reo context is null");
311 		return false;
312 	}
313 
314 	return mgmt_rx_reo_sim_is_valid_link(&reo_context->sim_context,
315 					     link_id);
316 }
317 
318 /**
319  * mgmt_rx_reo_sim_get_num_mlo_links() - Get number of MLO HW links from the reo
320  * simulation context object
321  * @sim_context: Pointer to reo simulation context object
322  *
323  * Number of MLO links will be equal to number of pdevs in the
324  * system. In case of simulation all the pdevs are assumed
325  * to have MLO capability.
326  *
327  * Return: On success returns number of MLO HW links. On failure
328  * returns WLAN_MLO_INVALID_NUM_LINKS.
329  */
330 static int8_t
331 mgmt_rx_reo_sim_get_num_mlo_links(struct mgmt_rx_reo_sim_context *sim_context)
332 {
333 	uint8_t num_mlo_links;
334 
335 	if (!sim_context) {
336 		mgmt_rx_reo_err("Mgmt reo simulation context is null");
337 		return WLAN_MLO_INVALID_NUM_LINKS;
338 	}
339 
340 	qdf_spin_lock(&sim_context->link_id_to_pdev_map.lock);
341 
342 	num_mlo_links = sim_context->link_id_to_pdev_map.num_mlo_links;
343 
344 	qdf_spin_unlock(&sim_context->link_id_to_pdev_map.lock);
345 
346 	return num_mlo_links;
347 }
348 
349 /**
350  * mgmt_rx_reo_get_num_mlo_links() - Get number of MLO links from the reo
351  * context object
352  * @reo_context: Pointer to reo context object
353  *
354  * Return: On success returns number of MLO HW links. On failure
355  * returns WLAN_MLO_INVALID_NUM_LINKS.
356  */
357 static int8_t
358 mgmt_rx_reo_get_num_mlo_links(struct mgmt_rx_reo_context *reo_context) {
359 	if (!reo_context) {
360 		mgmt_rx_reo_err("Mgmt reo context is null");
361 		return WLAN_MLO_INVALID_NUM_LINKS;
362 	}
363 
364 	return mgmt_rx_reo_sim_get_num_mlo_links(&reo_context->sim_context);
365 }
366 
367 /**
368  * mgmt_rx_reo_sim_get_context() - Helper API to get the management
369  * rx reorder simulation context
370  *
371  * Return: On success returns the pointer to management rx reorder
372  * simulation context. On failure returns NULL.
373  */
374 static struct mgmt_rx_reo_sim_context *
375 mgmt_rx_reo_sim_get_context(void)
376 {
377 	struct mgmt_rx_reo_context *reo_context;
378 
379 	reo_context = mgmt_rx_reo_get_context();
380 	if (!reo_context) {
381 		mgmt_rx_reo_err("Mgmt reo context is null");
382 		return NULL;
383 	}
384 
385 	return &reo_context->sim_context;
386 }
387 
388 int8_t
389 mgmt_rx_reo_sim_get_mlo_link_id_from_pdev(struct wlan_objmgr_pdev *pdev)
390 {
391 	struct mgmt_rx_reo_sim_context *sim_context;
392 	int8_t link_id;
393 
394 	sim_context = mgmt_rx_reo_sim_get_context();
395 	if (!sim_context) {
396 		mgmt_rx_reo_err("Mgmt reo simulation context is null");
397 		return MGMT_RX_REO_INVALID_LINK_ID;
398 	}
399 
400 	qdf_spin_lock(&sim_context->link_id_to_pdev_map.lock);
401 
402 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++)
403 		if (sim_context->link_id_to_pdev_map.map[link_id] == pdev)
404 			break;
405 
406 	/* pdev is not found in map */
407 	if (link_id == MAX_MLO_LINKS)
408 		link_id = MGMT_RX_REO_INVALID_LINK_ID;
409 
410 	qdf_spin_unlock(&sim_context->link_id_to_pdev_map.lock);
411 
412 	return link_id;
413 }
414 
415 struct wlan_objmgr_pdev *
416 mgmt_rx_reo_sim_get_pdev_from_mlo_link_id(uint8_t mlo_link_id,
417 					  wlan_objmgr_ref_dbgid refdbgid)
418 {
419 	struct mgmt_rx_reo_sim_context *sim_context;
420 	struct wlan_objmgr_pdev *pdev;
421 	QDF_STATUS status;
422 
423 	sim_context = mgmt_rx_reo_sim_get_context();
424 	if (!sim_context) {
425 		mgmt_rx_reo_err("Mgmt reo simulation context is null");
426 		return NULL;
427 	}
428 
429 	if (mlo_link_id >= MAX_MLO_LINKS) {
430 		mgmt_rx_reo_err("Invalid link id %u", mlo_link_id);
431 		return NULL;
432 	}
433 
434 	qdf_spin_lock(&sim_context->link_id_to_pdev_map.lock);
435 
436 	pdev = sim_context->link_id_to_pdev_map.map[mlo_link_id];
437 	status = wlan_objmgr_pdev_try_get_ref(pdev, refdbgid);
438 	if (QDF_IS_STATUS_ERROR(status)) {
439 		mgmt_rx_reo_err("Failed to get pdev reference");
440 		return NULL;
441 	}
442 
443 	qdf_spin_unlock(&sim_context->link_id_to_pdev_map.lock);
444 
445 	return pdev;
446 }
447 
448 /**
449  * mgmt_rx_reo_handle_potential_premature_delivery - Helper API to handle
450  * premature delivery.
451  * @reo_context: Pointer to reorder list
452  * @global_timestamp: Global time stamp of the current management frame
453  *
454  * Sometimes we have to deliver a management frame to the upper layers even
455  * before its wait count reaching zero. This is called premature delivery.
456  * Premature delivery could happen due to time out or reorder list overflow.
457  *
458  * Return: QDF_STATUS
459  */
460 static QDF_STATUS
461 mgmt_rx_reo_handle_potential_premature_delivery(
462 				struct mgmt_rx_reo_context *reo_context,
463 				uint32_t global_timestamp)
464 {
465 	qdf_list_t stale_frame_list_temp;
466 	QDF_STATUS status;
467 	struct mgmt_rx_reo_pending_frame_list_entry *latest_stale_frame = NULL;
468 	struct mgmt_rx_reo_pending_frame_list_entry *cur_entry;
469 	struct mgmt_rx_reo_sim_context *sim_context;
470 	struct mgmt_rx_reo_master_frame_list *master_frame_list;
471 
472 	if (!reo_context)
473 		return QDF_STATUS_E_NULL_VALUE;
474 
475 	sim_context = &reo_context->sim_context;
476 	master_frame_list = &sim_context->master_frame_list;
477 
478 	qdf_spin_lock(&master_frame_list->lock);
479 
480 	qdf_list_for_each(&master_frame_list->pending_list, cur_entry, node) {
481 		if (cur_entry->params.global_timestamp == global_timestamp)
482 			break;
483 
484 		latest_stale_frame = cur_entry;
485 	}
486 
487 	if (latest_stale_frame) {
488 		qdf_list_create(&stale_frame_list_temp,
489 				MGMT_RX_REO_SIM_STALE_FRAME_TEMP_LIST_MAX_SIZE);
490 
491 		status = qdf_list_split(&stale_frame_list_temp,
492 					&master_frame_list->pending_list,
493 					&latest_stale_frame->node);
494 		if (QDF_IS_STATUS_ERROR(status))
495 			goto exit_unlock_master_frame_list;
496 
497 		status = qdf_list_join(&master_frame_list->stale_list,
498 				       &stale_frame_list_temp);
499 		if (QDF_IS_STATUS_ERROR(status))
500 			goto exit_unlock_master_frame_list;
501 	}
502 
503 	status = QDF_STATUS_SUCCESS;
504 
505 exit_unlock_master_frame_list:
506 	qdf_spin_unlock(&master_frame_list->lock);
507 
508 	return status;
509 }
510 
511 /**
512  * mgmt_rx_reo_sim_remove_frame_from_stale_list() - Removes frame from the
513  * stale management frame list
514  * @master_frame_list: pointer to master management frame list
515  * @reo_params: pointer to reo params
516  *
517  * This API removes frames from the stale management frame list.
518  *
519  * Return: QDF_STATUS of operation
520  */
521 static QDF_STATUS
522 mgmt_rx_reo_sim_remove_frame_from_stale_list(
523 		struct mgmt_rx_reo_master_frame_list *master_frame_list,
524 		const struct mgmt_rx_reo_params *reo_params)
525 {
526 	struct mgmt_rx_reo_stale_frame_list_entry *cur_entry;
527 	struct mgmt_rx_reo_stale_frame_list_entry *matching_entry = NULL;
528 	QDF_STATUS status;
529 
530 	if (!master_frame_list || !reo_params)
531 		return QDF_STATUS_E_NULL_VALUE;
532 
533 	qdf_spin_lock(&master_frame_list->lock);
534 
535 	/**
536 	 * Stale frames can come in any order at host. Do a linear search and
537 	 * remove the matching entry.
538 	 */
539 	qdf_list_for_each(&master_frame_list->stale_list, cur_entry, node) {
540 		if (cur_entry->params.link_id == reo_params->link_id &&
541 		    cur_entry->params.mgmt_pkt_ctr == reo_params->mgmt_pkt_ctr &&
542 		    cur_entry->params.global_timestamp ==
543 		    reo_params->global_timestamp) {
544 			matching_entry = cur_entry;
545 			break;
546 		}
547 	}
548 
549 	if (!matching_entry) {
550 		qdf_spin_unlock(&master_frame_list->lock);
551 		mgmt_rx_reo_err("reo sim failure: absent in stale frame list");
552 		qdf_assert_always(0);
553 	}
554 
555 	status = qdf_list_remove_node(&master_frame_list->stale_list,
556 				      &matching_entry->node);
557 
558 	if (QDF_IS_STATUS_ERROR(status)) {
559 		qdf_spin_unlock(&master_frame_list->lock);
560 		return status;
561 	}
562 
563 	qdf_mem_free(matching_entry);
564 
565 	qdf_spin_unlock(&master_frame_list->lock);
566 
567 	return QDF_STATUS_SUCCESS;
568 }
569 
570 /**
571  * mgmt_rx_reo_handle_stale_frame() - API to handle stale management frames.
572  * @reo_list: Pointer to reorder list
573  * @desc: Pointer to frame descriptor
574  *
575  * Return: QDF_STATUS of operation
576  */
577 static QDF_STATUS
578 mgmt_rx_reo_handle_stale_frame(struct mgmt_rx_reo_list *reo_list,
579 			       struct mgmt_rx_reo_frame_descriptor *desc)
580 {
581 	QDF_STATUS status;
582 	struct mgmt_rx_reo_context *reo_context;
583 	struct mgmt_rx_reo_sim_context *sim_context;
584 	struct mgmt_rx_reo_params *reo_params;
585 
586 	if (!reo_list || !desc)
587 		return QDF_STATUS_E_NULL_VALUE;
588 
589 	/* FW consumed/Error frames are already removed */
590 	if (desc->type != MGMT_RX_REO_FRAME_DESC_HOST_CONSUMED_FRAME)
591 		return QDF_STATUS_SUCCESS;
592 
593 	reo_context = mgmt_rx_reo_get_context_from_reo_list(reo_list);
594 	if (!reo_context)
595 		return QDF_STATUS_E_NULL_VALUE;
596 
597 	sim_context = &reo_context->sim_context;
598 
599 	reo_params = desc->rx_params->reo_params;
600 	if (!reo_params)
601 		return QDF_STATUS_E_NULL_VALUE;
602 
603 	status = mgmt_rx_reo_sim_remove_frame_from_stale_list(
604 				&sim_context->master_frame_list, reo_params);
605 
606 	return status;
607 }
608 #endif /* WLAN_MGMT_RX_REO_SIM_SUPPORT */
609 
610 /**
611  * mgmt_rx_reo_is_potential_premature_delivery() - Helper API to check
612  * whether the current frame getting delivered to upper layer is a premature
613  * delivery
614  * @release_reason: release reason
615  *
616  * Return: true for a premature delivery
617  */
618 static bool
619 mgmt_rx_reo_is_potential_premature_delivery(uint8_t release_reason)
620 {
621 	return !(release_reason &
622 			MGMT_RX_REO_LIST_ENTRY_RELEASE_REASON_ZERO_WAIT_COUNT);
623 }
624 
625 /**
626  * wlan_mgmt_rx_reo_get_priv_object() - Get the pdev private object of
627  * MGMT Rx REO module
628  * @pdev: pointer to pdev object
629  *
630  * Return: Pointer to pdev private object of MGMT Rx REO module on success,
631  * else NULL
632  */
633 static struct mgmt_rx_reo_pdev_info *
634 wlan_mgmt_rx_reo_get_priv_object(struct wlan_objmgr_pdev *pdev)
635 {
636 	struct mgmt_txrx_priv_pdev_context *mgmt_txrx_pdev_ctx;
637 
638 	if (!pdev) {
639 		mgmt_rx_reo_err("pdev is null");
640 		return NULL;
641 	}
642 
643 	mgmt_txrx_pdev_ctx = (struct mgmt_txrx_priv_pdev_context *)
644 		wlan_objmgr_pdev_get_comp_private_obj(pdev,
645 						      WLAN_UMAC_COMP_MGMT_TXRX);
646 
647 	if (!mgmt_txrx_pdev_ctx) {
648 		mgmt_rx_reo_err("mgmt txrx context is NULL");
649 		return NULL;
650 	}
651 
652 	return mgmt_txrx_pdev_ctx->mgmt_rx_reo_pdev_ctx;
653 }
654 
655 /**
656  * mgmt_rx_reo_print_snapshots() - Print all snapshots related
657  * to management Rx reorder module
658  * @mac_hw_ss: MAC HW snapshot
659  * @fw_forwarded_ss: FW forwarded snapshot
660  * @fw_consumed_ss: FW consumed snapshot
661  * @host_ss: Host snapshot
662  *
663  * return: QDF_STATUS
664  */
665 static QDF_STATUS
666 mgmt_rx_reo_print_snapshots
667 			(struct mgmt_rx_reo_snapshot_params *mac_hw_ss,
668 			 struct mgmt_rx_reo_snapshot_params *fw_forwarded_ss,
669 			 struct mgmt_rx_reo_snapshot_params *fw_consumed_ss,
670 			 struct mgmt_rx_reo_snapshot_params *host_ss)
671 {
672 	mgmt_rx_reo_debug("HW SS: valid = %u, ctr = %u, ts = %u",
673 			  mac_hw_ss->valid, mac_hw_ss->mgmt_pkt_ctr,
674 			  mac_hw_ss->global_timestamp);
675 	mgmt_rx_reo_debug("FW forwarded SS: valid = %u, ctr = %u, ts = %u",
676 			  fw_forwarded_ss->valid,
677 			  fw_forwarded_ss->mgmt_pkt_ctr,
678 			  fw_forwarded_ss->global_timestamp);
679 	mgmt_rx_reo_debug("FW consumed SS: valid = %u, ctr = %u, ts = %u",
680 			  fw_consumed_ss->valid,
681 			  fw_consumed_ss->mgmt_pkt_ctr,
682 			  fw_consumed_ss->global_timestamp);
683 	mgmt_rx_reo_debug("HOST SS: valid = %u, ctr = %u, ts = %u",
684 			  host_ss->valid, host_ss->mgmt_pkt_ctr,
685 			  host_ss->global_timestamp);
686 
687 	return QDF_STATUS_SUCCESS;
688 }
689 
690 /**
691  * mgmt_rx_reo_invalidate_stale_snapshots() - Invalidate stale management
692  * Rx REO snapshots
693  * @mac_hw_ss: MAC HW snapshot
694  * @fw_forwarded_ss: FW forwarded snapshot
695  * @fw_consumed_ss: FW consumed snapshot
696  * @host_ss: Host snapshot
697  * @link: link ID
698  *
699  * return: QDF_STATUS
700  */
701 static QDF_STATUS
702 mgmt_rx_reo_invalidate_stale_snapshots
703 			(struct mgmt_rx_reo_snapshot_params *mac_hw_ss,
704 			 struct mgmt_rx_reo_snapshot_params *fw_forwarded_ss,
705 			 struct mgmt_rx_reo_snapshot_params *fw_consumed_ss,
706 			 struct mgmt_rx_reo_snapshot_params *host_ss,
707 			 uint8_t link)
708 {
709 	if (!mac_hw_ss->valid)
710 		return QDF_STATUS_SUCCESS;
711 
712 	if (fw_forwarded_ss->valid) {
713 		if (!mgmt_rx_reo_compare_global_timestamps_gte
714 					(mac_hw_ss->global_timestamp,
715 					 fw_forwarded_ss->global_timestamp) ||
716 		    !mgmt_rx_reo_compare_pkt_ctrs_gte
717 					(mac_hw_ss->mgmt_pkt_ctr,
718 					 fw_forwarded_ss->mgmt_pkt_ctr)) {
719 			mgmt_rx_reo_print_snapshots(mac_hw_ss, fw_forwarded_ss,
720 						    fw_consumed_ss, host_ss);
721 			mgmt_rx_reo_debug("Invalidate FW forwarded SS, link %u",
722 					  link);
723 			fw_forwarded_ss->valid = false;
724 		}
725 	}
726 
727 	if (fw_consumed_ss->valid) {
728 		if (!mgmt_rx_reo_compare_global_timestamps_gte
729 					(mac_hw_ss->global_timestamp,
730 					 fw_consumed_ss->global_timestamp) ||
731 		    !mgmt_rx_reo_compare_pkt_ctrs_gte
732 					(mac_hw_ss->mgmt_pkt_ctr,
733 					 fw_consumed_ss->mgmt_pkt_ctr)) {
734 			mgmt_rx_reo_print_snapshots(mac_hw_ss, fw_forwarded_ss,
735 						    fw_consumed_ss, host_ss);
736 			mgmt_rx_reo_debug("Invalidate FW consumed SS, link %u",
737 					  link);
738 			fw_consumed_ss->valid = false;
739 		}
740 	}
741 
742 	if (host_ss->valid) {
743 		if (!mgmt_rx_reo_compare_global_timestamps_gte
744 					(mac_hw_ss->global_timestamp,
745 					 host_ss->global_timestamp) ||
746 		    !mgmt_rx_reo_compare_pkt_ctrs_gte
747 					(mac_hw_ss->mgmt_pkt_ctr,
748 					 host_ss->mgmt_pkt_ctr)) {
749 			mgmt_rx_reo_print_snapshots(mac_hw_ss, fw_forwarded_ss,
750 						    fw_consumed_ss, host_ss);
751 			mgmt_rx_reo_debug("Invalidate host snapshot, link %u",
752 					  link);
753 			host_ss->valid = false;
754 		}
755 	}
756 
757 	return QDF_STATUS_SUCCESS;
758 }
759 
760 /**
761  * mgmt_rx_reo_snapshots_check_sanity() - Check the sanity of management
762  * Rx REO snapshots
763  * @mac_hw_ss: MAC HW snapshot
764  * @fw_forwarded_ss: FW forwarded snapshot
765  * @fw_consumed_ss: FW consumed snapshot
766  * @host_ss: Host snapshot
767  *
768  * return: QDF_STATUS
769  */
770 static QDF_STATUS
771 mgmt_rx_reo_snapshots_check_sanity
772 			(struct mgmt_rx_reo_snapshot_params *mac_hw_ss,
773 			 struct mgmt_rx_reo_snapshot_params *fw_forwarded_ss,
774 			 struct mgmt_rx_reo_snapshot_params *fw_consumed_ss,
775 			 struct mgmt_rx_reo_snapshot_params *host_ss)
776 {
777 	QDF_STATUS status;
778 
779 	if (!mac_hw_ss->valid) {
780 		if (fw_forwarded_ss->valid || fw_consumed_ss->valid ||
781 		    host_ss->valid) {
782 			mgmt_rx_reo_err("MAC HW SS is invalid");
783 			status = QDF_STATUS_E_INVAL;
784 			goto fail;
785 		}
786 
787 		return QDF_STATUS_SUCCESS;
788 	}
789 
790 	if (!fw_forwarded_ss->valid && !fw_consumed_ss->valid) {
791 		if (host_ss->valid) {
792 			mgmt_rx_reo_err("FW forwarded and consumed SS invalid");
793 			status = QDF_STATUS_E_INVAL;
794 			goto fail;
795 		}
796 
797 		return QDF_STATUS_SUCCESS;
798 	}
799 
800 	if (fw_forwarded_ss->valid) {
801 		if (!mgmt_rx_reo_compare_global_timestamps_gte
802 					(mac_hw_ss->global_timestamp,
803 					 fw_forwarded_ss->global_timestamp)) {
804 			mgmt_rx_reo_err("TS: MAC HW SS < FW forwarded SS");
805 			status = QDF_STATUS_E_INVAL;
806 			goto fail;
807 		}
808 
809 		if (!mgmt_rx_reo_compare_pkt_ctrs_gte
810 					(mac_hw_ss->mgmt_pkt_ctr,
811 					 fw_forwarded_ss->mgmt_pkt_ctr)) {
812 			mgmt_rx_reo_err("PKT CTR: MAC HW SS < FW forwarded SS");
813 			status = QDF_STATUS_E_INVAL;
814 			goto fail;
815 		}
816 	}
817 
818 	if (fw_consumed_ss->valid) {
819 		if (!mgmt_rx_reo_compare_global_timestamps_gte
820 					(mac_hw_ss->global_timestamp,
821 					 fw_consumed_ss->global_timestamp)) {
822 			mgmt_rx_reo_err("TS: MAC HW SS < FW consumed SS");
823 			status = QDF_STATUS_E_INVAL;
824 			goto fail;
825 		}
826 
827 		if (!mgmt_rx_reo_compare_pkt_ctrs_gte
828 					(mac_hw_ss->mgmt_pkt_ctr,
829 					 fw_consumed_ss->mgmt_pkt_ctr)) {
830 			mgmt_rx_reo_err("PKT CTR: MAC HW SS < FW consumed SS");
831 			status = QDF_STATUS_E_INVAL;
832 			goto fail;
833 		}
834 	}
835 
836 	if (host_ss->valid) {
837 		if (!mgmt_rx_reo_compare_global_timestamps_gte
838 					(mac_hw_ss->global_timestamp,
839 					 host_ss->global_timestamp)) {
840 			mgmt_rx_reo_err("TS: MAC HW SS < host SS");
841 			status = QDF_STATUS_E_INVAL;
842 			goto fail;
843 		}
844 
845 		if (!mgmt_rx_reo_compare_pkt_ctrs_gte
846 					(mac_hw_ss->mgmt_pkt_ctr,
847 					 host_ss->mgmt_pkt_ctr)) {
848 			mgmt_rx_reo_err("PKT CTR: MAC HW SS < host SS");
849 			status = QDF_STATUS_E_INVAL;
850 			goto fail;
851 		}
852 
853 		if (fw_forwarded_ss->valid && !fw_consumed_ss->valid) {
854 			if (!mgmt_rx_reo_compare_global_timestamps_gte
855 					(fw_forwarded_ss->global_timestamp,
856 					 host_ss->global_timestamp)) {
857 				mgmt_rx_reo_err("TS: FW forwarded < host SS");
858 				status = QDF_STATUS_E_INVAL;
859 				goto fail;
860 			}
861 
862 			if (!mgmt_rx_reo_compare_pkt_ctrs_gte
863 					(fw_forwarded_ss->mgmt_pkt_ctr,
864 					 host_ss->mgmt_pkt_ctr)) {
865 				mgmt_rx_reo_err("CTR: FW forwarded < host SS");
866 				status = QDF_STATUS_E_INVAL;
867 				goto fail;
868 			}
869 		}
870 
871 		if (fw_consumed_ss->valid && !fw_forwarded_ss->valid) {
872 			if (!mgmt_rx_reo_compare_global_timestamps_gte
873 					(fw_consumed_ss->global_timestamp,
874 					 host_ss->global_timestamp)) {
875 				mgmt_rx_reo_err("TS: FW consumed < host SS");
876 				status = QDF_STATUS_E_INVAL;
877 				goto fail;
878 			}
879 
880 			if (!mgmt_rx_reo_compare_pkt_ctrs_gte
881 					(fw_consumed_ss->mgmt_pkt_ctr,
882 					 host_ss->mgmt_pkt_ctr)) {
883 				mgmt_rx_reo_err("CTR: FW consumed < host SS");
884 				status = QDF_STATUS_E_INVAL;
885 				goto fail;
886 			}
887 		}
888 
889 		if (fw_forwarded_ss->valid && fw_consumed_ss->valid) {
890 			if (!mgmt_rx_reo_compare_global_timestamps_gte
891 					(fw_consumed_ss->global_timestamp,
892 					 host_ss->global_timestamp) &&
893 			    !mgmt_rx_reo_compare_global_timestamps_gte
894 					(fw_forwarded_ss->global_timestamp,
895 					 host_ss->global_timestamp)) {
896 				mgmt_rx_reo_err("TS: FW consumed/forwarded < host");
897 				status = QDF_STATUS_E_INVAL;
898 				goto fail;
899 			}
900 
901 			if (!mgmt_rx_reo_compare_pkt_ctrs_gte
902 					(fw_consumed_ss->mgmt_pkt_ctr,
903 					 host_ss->mgmt_pkt_ctr) &&
904 			    !mgmt_rx_reo_compare_pkt_ctrs_gte
905 					(fw_forwarded_ss->mgmt_pkt_ctr,
906 					 host_ss->mgmt_pkt_ctr)) {
907 				mgmt_rx_reo_err("CTR: FW consumed/forwarded < host");
908 				status = QDF_STATUS_E_INVAL;
909 				goto fail;
910 			}
911 		}
912 	}
913 
914 	return QDF_STATUS_SUCCESS;
915 
916 fail:
917 	mgmt_rx_reo_debug("HW SS: valid = %u, ctr = %u, ts = %u",
918 			  mac_hw_ss->valid, mac_hw_ss->mgmt_pkt_ctr,
919 			  mac_hw_ss->global_timestamp);
920 	mgmt_rx_reo_debug("FW forwarded SS: valid = %u, ctr = %u, ts = %u",
921 			  fw_forwarded_ss->valid,
922 			  fw_forwarded_ss->mgmt_pkt_ctr,
923 			  fw_forwarded_ss->global_timestamp);
924 	mgmt_rx_reo_debug("FW consumed SS: valid = %u, ctr = %u, ts = %u",
925 			  fw_consumed_ss->valid,
926 			  fw_consumed_ss->mgmt_pkt_ctr,
927 			  fw_consumed_ss->global_timestamp);
928 	mgmt_rx_reo_debug("HOST SS: valid = %u, ctr = %u, ts = %u",
929 			  host_ss->valid, host_ss->mgmt_pkt_ctr,
930 			  host_ss->global_timestamp);
931 
932 	return status;
933 }
934 
935 /**
936  * wlan_mgmt_rx_reo_algo_calculate_wait_count() - Calculates the number of
937  * frames an incoming frame should wait for before it gets delivered.
938  * @in_frame_pdev: pdev on which this frame is received
939  * @desc: frame Descriptor
940  *
941  * Each frame carrys a MGMT pkt number which is local to that link, and a
942  * timestamp which is global across all the links. MAC HW and FW also captures
943  * the same details of the last frame that they have seen. Host also maintains
944  * the details of the last frame it has seen. In total, there are 4 snapshots.
945  * 1. MAC HW snapshot - latest frame seen at MAC HW
946  * 2. FW forwarded snapshot- latest frame forwarded to the Host
947  * 3. FW consumed snapshot - latest frame consumed by the FW
948  * 4. Host/FW consumed snapshot - latest frame seen by the Host
949  * By using all these snapshots, this function tries to compute the wait count
950  * for a given incoming frame on all links.
951  *
952  * Return: QDF_STATUS of operation
953  */
954 static QDF_STATUS
955 wlan_mgmt_rx_reo_algo_calculate_wait_count(
956 		struct wlan_objmgr_pdev *in_frame_pdev,
957 		struct mgmt_rx_reo_frame_descriptor *desc)
958 {
959 	QDF_STATUS status;
960 	uint8_t link;
961 	int8_t in_frame_link;
962 	int frames_pending, delta_fwd_host;
963 	uint8_t snapshot_id;
964 	struct wlan_objmgr_pdev *pdev;
965 	struct mgmt_rx_reo_pdev_info *rx_reo_pdev_ctx;
966 	struct mgmt_rx_reo_pdev_info *in_frame_rx_reo_pdev_ctx;
967 	struct mgmt_rx_reo_snapshot_info *snapshot_info;
968 	struct mgmt_rx_reo_snapshot_params snapshot_params
969 				[MGMT_RX_REO_SHARED_SNAPSHOT_MAX];
970 	struct mgmt_rx_reo_snapshot_params *mac_hw_ss, *fw_forwarded_ss,
971 					    *fw_consumed_ss, *host_ss;
972 	struct mgmt_rx_reo_params *in_frame_params;
973 	struct mgmt_rx_reo_wait_count *wait_count;
974 
975 	if (!in_frame_pdev) {
976 		mgmt_rx_reo_err("pdev is null");
977 		return QDF_STATUS_E_NULL_VALUE;
978 	}
979 
980 	if (!desc) {
981 		mgmt_rx_reo_err("Frame descriptor is null");
982 		return QDF_STATUS_E_NULL_VALUE;
983 	}
984 
985 	if (!desc->rx_params) {
986 		mgmt_rx_reo_err("MGMT Rx params of incoming frame is NULL");
987 		return QDF_STATUS_E_NULL_VALUE;
988 	}
989 
990 	in_frame_params = desc->rx_params->reo_params;
991 	if (!in_frame_params) {
992 		mgmt_rx_reo_err("MGMT Rx REO params of incoming frame is NULL");
993 		return QDF_STATUS_E_NULL_VALUE;
994 	}
995 
996 	wait_count = &desc->wait_count;
997 
998 	/* Get the MLO link ID of incoming frame */
999 	in_frame_link = wlan_get_mlo_link_id_from_pdev(in_frame_pdev);
1000 	qdf_assert_always(in_frame_link >= 0);
1001 	qdf_assert_always(in_frame_link < MAX_MLO_LINKS);
1002 	qdf_assert_always(mgmt_rx_reo_is_valid_link(in_frame_link));
1003 
1004 	in_frame_rx_reo_pdev_ctx =
1005 			wlan_mgmt_rx_reo_get_priv_object(in_frame_pdev);
1006 	if (!in_frame_rx_reo_pdev_ctx) {
1007 		mgmt_rx_reo_err("Reo context null for incoming frame pdev");
1008 		return QDF_STATUS_E_FAILURE;
1009 	}
1010 	qdf_mem_zero(in_frame_rx_reo_pdev_ctx->raw_snapshots,
1011 		     sizeof(in_frame_rx_reo_pdev_ctx->raw_snapshots));
1012 
1013 	/* Iterate over all the valid MLO links */
1014 	for (link = 0; link < MAX_MLO_LINKS; link++) {
1015 		/* No need wait for any frames on an invalid link */
1016 		if (!mgmt_rx_reo_is_valid_link(link)) {
1017 			frames_pending = 0;
1018 			goto update_pending_frames;
1019 		}
1020 
1021 		pdev = wlan_get_pdev_from_mlo_link_id(link,
1022 						      WLAN_MGMT_RX_REO_ID);
1023 
1024 		/* No need to wait for any frames if the pdev is not found */
1025 		if (!pdev) {
1026 			mgmt_rx_reo_debug("pdev is null for link %d", link);
1027 			frames_pending = 0;
1028 			goto update_pending_frames;
1029 		}
1030 
1031 		rx_reo_pdev_ctx = wlan_mgmt_rx_reo_get_priv_object(pdev);
1032 		if (!rx_reo_pdev_ctx) {
1033 			mgmt_rx_reo_err("Mgmt reo context empty for pdev %pK",
1034 					pdev);
1035 			wlan_objmgr_pdev_release_ref(pdev, WLAN_MGMT_RX_REO_ID);
1036 			return QDF_STATUS_E_FAILURE;
1037 		}
1038 
1039 		if (!rx_reo_pdev_ctx->init_complete) {
1040 			mgmt_rx_reo_debug("REO init in progress for link %d",
1041 					  link);
1042 			wlan_objmgr_pdev_release_ref(pdev, WLAN_MGMT_RX_REO_ID);
1043 			frames_pending = 0;
1044 			goto update_pending_frames;
1045 		}
1046 
1047 		host_ss = &rx_reo_pdev_ctx->host_snapshot;
1048 		desc->host_snapshot[link] = rx_reo_pdev_ctx->host_snapshot;
1049 
1050 		mgmt_rx_reo_info("link_id = %u HOST SS: valid = %u, ctr = %u, ts = %u",
1051 				 link, host_ss->valid, host_ss->mgmt_pkt_ctr,
1052 				 host_ss->global_timestamp);
1053 
1054 		snapshot_id = 0;
1055 		/* Read all the shared snapshots */
1056 		while (snapshot_id <
1057 			MGMT_RX_REO_SHARED_SNAPSHOT_MAX) {
1058 			snapshot_info = &rx_reo_pdev_ctx->
1059 				host_target_shared_snapshot_info[snapshot_id];
1060 
1061 			qdf_mem_zero(&snapshot_params[snapshot_id],
1062 				     sizeof(snapshot_params[snapshot_id]));
1063 
1064 			status = tgt_mgmt_rx_reo_read_snapshot(
1065 					pdev, snapshot_info, snapshot_id,
1066 					&snapshot_params[snapshot_id],
1067 					in_frame_rx_reo_pdev_ctx->raw_snapshots
1068 					[link][snapshot_id]);
1069 
1070 			/* Read operation shouldn't fail */
1071 			if (QDF_IS_STATUS_ERROR(status)) {
1072 				mgmt_rx_reo_err("snapshot(%d) read failed on"
1073 						"link (%d)", snapshot_id, link);
1074 				wlan_objmgr_pdev_release_ref(
1075 						pdev, WLAN_MGMT_RX_REO_ID);
1076 				return status;
1077 			}
1078 
1079 			/* If snapshot is valid, save it in the pdev context */
1080 			if (snapshot_params[snapshot_id].valid) {
1081 				rx_reo_pdev_ctx->
1082 				   last_valid_shared_snapshot[snapshot_id] =
1083 				   snapshot_params[snapshot_id];
1084 			}
1085 			desc->shared_snapshots[link][snapshot_id] =
1086 						snapshot_params[snapshot_id];
1087 
1088 			snapshot_id++;
1089 		}
1090 
1091 		wlan_objmgr_pdev_release_ref(pdev, WLAN_MGMT_RX_REO_ID);
1092 
1093 		mac_hw_ss = &snapshot_params
1094 				[MGMT_RX_REO_SHARED_SNAPSHOT_MAC_HW];
1095 		fw_forwarded_ss = &snapshot_params
1096 				[MGMT_RX_REO_SHARED_SNAPSHOT_FW_FORWADED];
1097 		fw_consumed_ss = &snapshot_params
1098 				[MGMT_RX_REO_SHARED_SNAPSHOT_FW_CONSUMED];
1099 
1100 		status = mgmt_rx_reo_invalidate_stale_snapshots(mac_hw_ss,
1101 								fw_forwarded_ss,
1102 								fw_consumed_ss,
1103 								host_ss, link);
1104 		if (QDF_IS_STATUS_ERROR(status)) {
1105 			mgmt_rx_reo_err("Failed to invalidate SS for link %u",
1106 					link);
1107 			return status;
1108 		}
1109 
1110 		desc->shared_snapshots[link][MGMT_RX_REO_SHARED_SNAPSHOT_MAC_HW] =
1111 								*mac_hw_ss;
1112 		desc->shared_snapshots[link][MGMT_RX_REO_SHARED_SNAPSHOT_FW_FORWADED] =
1113 								*fw_forwarded_ss;
1114 		desc->shared_snapshots[link][MGMT_RX_REO_SHARED_SNAPSHOT_FW_CONSUMED] =
1115 								*fw_consumed_ss;
1116 		desc->host_snapshot[link] = *host_ss;
1117 
1118 		status = mgmt_rx_reo_snapshots_check_sanity
1119 			(mac_hw_ss, fw_forwarded_ss, fw_consumed_ss, host_ss);
1120 		if (QDF_IS_STATUS_ERROR(status)) {
1121 			mgmt_rx_reo_err_rl("Snapshot sanity for link %u failed",
1122 					   link);
1123 			return status;
1124 		}
1125 
1126 		mgmt_rx_reo_info("link_id = %u HW SS: valid = %u, ctr = %u, ts = %u",
1127 				 link, mac_hw_ss->valid,
1128 				 mac_hw_ss->mgmt_pkt_ctr,
1129 				 mac_hw_ss->global_timestamp);
1130 		mgmt_rx_reo_info("link_id = %u FW forwarded SS: valid = %u, ctr = %u, ts = %u",
1131 				 link, fw_forwarded_ss->valid,
1132 				 fw_forwarded_ss->mgmt_pkt_ctr,
1133 				 fw_forwarded_ss->global_timestamp);
1134 		mgmt_rx_reo_info("link_id = %u FW consumed SS: valid = %u, ctr = %u, ts = %u",
1135 				 link, fw_consumed_ss->valid,
1136 				 fw_consumed_ss->mgmt_pkt_ctr,
1137 				 fw_consumed_ss->global_timestamp);
1138 
1139 		/* No need wait for any frames on the same link */
1140 		if (link == in_frame_link) {
1141 			frames_pending = 0;
1142 			goto update_pending_frames;
1143 		}
1144 
1145 		/**
1146 		 * If MAC HW snapshot is invalid, the link has not started
1147 		 * receiving management frames. Set wait count to zero.
1148 		 */
1149 		if (!mac_hw_ss->valid) {
1150 			frames_pending = 0;
1151 			goto update_pending_frames;
1152 		}
1153 
1154 		/**
1155 		 * If host snapshot is invalid, wait for MAX number of frames.
1156 		 * When any frame in this link arrives at host, actual wait
1157 		 * counts will be updated.
1158 		 */
1159 		if (!host_ss->valid) {
1160 			wait_count->per_link_count[link] = UINT_MAX;
1161 			wait_count->total_count += UINT_MAX;
1162 			goto print_wait_count;
1163 		}
1164 
1165 		/**
1166 		 * If MAC HW snapshot sequence number and host snapshot
1167 		 * sequence number are same, all the frames received by
1168 		 * this link are processed by host. No need to wait for
1169 		 * any frames from this link.
1170 		 */
1171 		if (!mgmt_rx_reo_subtract_pkt_ctrs(mac_hw_ss->mgmt_pkt_ctr,
1172 						   host_ss->mgmt_pkt_ctr)) {
1173 			frames_pending = 0;
1174 			goto update_pending_frames;
1175 		}
1176 
1177 		/**
1178 		 * Ideally, the incoming frame has to wait for only those frames
1179 		 * (on other links) which meet all the below criterion.
1180 		 * 1. Frame's timestamp is less than incoming frame's
1181 		 * 2. Frame is supposed to be consumed by the Host
1182 		 * 3. Frame is not yet seen by the Host.
1183 		 * We may not be able to compute the exact optimal wait count
1184 		 * because HW/FW provides a limited assist.
1185 		 * This algorithm tries to get the best estimate of wait count
1186 		 * by not waiting for those frames where we have a conclusive
1187 		 * evidence that we don't have to wait for those frames.
1188 		 */
1189 
1190 		/**
1191 		 * If this link has already seen a frame whose timestamp is
1192 		 * greater than or equal to incoming frame's timestamp,
1193 		 * then no need to wait for any frames on this link.
1194 		 * If the total wait count becomes zero, then the policy on
1195 		 * whether to deliver such a frame to upper layers is handled
1196 		 * separately.
1197 		 */
1198 		if (mgmt_rx_reo_compare_global_timestamps_gte(
1199 				host_ss->global_timestamp,
1200 				in_frame_params->global_timestamp)) {
1201 			frames_pending = 0;
1202 			goto update_pending_frames;
1203 		}
1204 
1205 		/**
1206 		 * For starters, we only have to wait for the frames that are
1207 		 * seen by MAC HW but not yet seen by Host. The frames which
1208 		 * reach MAC HW later are guaranteed to have a timestamp
1209 		 * greater than incoming frame's timestamp.
1210 		 */
1211 		frames_pending = mgmt_rx_reo_subtract_pkt_ctrs(
1212 					mac_hw_ss->mgmt_pkt_ctr,
1213 					host_ss->mgmt_pkt_ctr);
1214 		qdf_assert_always(frames_pending >= 0);
1215 
1216 		if (frames_pending &&
1217 		    mgmt_rx_reo_compare_global_timestamps_gte
1218 					(mac_hw_ss->global_timestamp,
1219 					 in_frame_params->global_timestamp)) {
1220 			/**
1221 			 * Last frame seen at MAC HW has timestamp greater than
1222 			 * or equal to incoming frame's timestamp. So no need to
1223 			 * wait for that last frame, but we can't conclusively
1224 			 * say anything about timestamp of frames before the
1225 			 * last frame, so try to wait for all of those frames.
1226 			 */
1227 			frames_pending--;
1228 			qdf_assert_always(frames_pending >= 0);
1229 
1230 			if (fw_consumed_ss->valid &&
1231 			    mgmt_rx_reo_compare_global_timestamps_gte(
1232 				fw_consumed_ss->global_timestamp,
1233 				in_frame_params->global_timestamp)) {
1234 				/**
1235 				 * Last frame consumed by the FW has timestamp
1236 				 * greater than or equal to incoming frame's.
1237 				 * That means all the frames from
1238 				 * fw_consumed_ss->mgmt_pkt_ctr to
1239 				 * mac_hw->mgmt_pkt_ctr will have timestamp
1240 				 * greater than or equal to incoming frame's and
1241 				 * hence, no need to wait for those frames.
1242 				 * We just need to wait for frames from
1243 				 * host_ss->mgmt_pkt_ctr to
1244 				 * fw_consumed_ss->mgmt_pkt_ctr-1. This is a
1245 				 * better estimate over the above estimate,
1246 				 * so update frames_pending.
1247 				 */
1248 				frames_pending =
1249 				  mgmt_rx_reo_subtract_pkt_ctrs(
1250 				      fw_consumed_ss->mgmt_pkt_ctr,
1251 				      host_ss->mgmt_pkt_ctr) - 1;
1252 
1253 				qdf_assert_always(frames_pending >= 0);
1254 
1255 				/**
1256 				 * Last frame forwarded to Host has timestamp
1257 				 * less than incoming frame's. That means all
1258 				 * the frames starting from
1259 				 * fw_forwarded_ss->mgmt_pkt_ctr+1 to
1260 				 * fw_consumed_ss->mgmt_pkt_ctr are consumed by
1261 				 * the FW and hence, no need to wait for those
1262 				 * frames. We just need to wait for frames
1263 				 * from host_ss->mgmt_pkt_ctr to
1264 				 * fw_forwarded_ss->mgmt_pkt_ctr. This is a
1265 				 * better estimate over the above estimate,
1266 				 * so update frames_pending.
1267 				 */
1268 				if (fw_forwarded_ss->valid &&
1269 				    !mgmt_rx_reo_compare_global_timestamps_gte(
1270 					fw_forwarded_ss->global_timestamp,
1271 					in_frame_params->global_timestamp)) {
1272 					frames_pending =
1273 					  mgmt_rx_reo_subtract_pkt_ctrs(
1274 					      fw_forwarded_ss->mgmt_pkt_ctr,
1275 					      host_ss->mgmt_pkt_ctr);
1276 
1277 					/**
1278 					 * frames_pending can be negative in
1279 					 * cases whene there are no frames
1280 					 * getting forwarded to the Host. No
1281 					 * need to wait for any frames in that
1282 					 * case.
1283 					 */
1284 					if (frames_pending < 0)
1285 						frames_pending = 0;
1286 				}
1287 			}
1288 
1289 			/**
1290 			 * Last frame forwarded to Host has timestamp greater
1291 			 * than or equal to incoming frame's. That means all the
1292 			 * frames from fw_forwarded->mgmt_pkt_ctr to
1293 			 * mac_hw->mgmt_pkt_ctr will have timestamp greater than
1294 			 * or equal to incoming frame's and hence, no need to
1295 			 * wait for those frames. We may have to just wait for
1296 			 * frames from host_ss->mgmt_pkt_ctr to
1297 			 * fw_forwarded_ss->mgmt_pkt_ctr-1
1298 			 */
1299 			if (fw_forwarded_ss->valid &&
1300 			    mgmt_rx_reo_compare_global_timestamps_gte(
1301 				fw_forwarded_ss->global_timestamp,
1302 				in_frame_params->global_timestamp)) {
1303 				delta_fwd_host =
1304 				  mgmt_rx_reo_subtract_pkt_ctrs(
1305 				    fw_forwarded_ss->mgmt_pkt_ctr,
1306 				    host_ss->mgmt_pkt_ctr) - 1;
1307 
1308 				qdf_assert_always(delta_fwd_host >= 0);
1309 
1310 				/**
1311 				 * This will be a better estimate over the one
1312 				 * we computed using mac_hw_ss but this may or
1313 				 * may not be a better estimate over the
1314 				 * one we computed using fw_consumed_ss.
1315 				 * When timestamps of both fw_consumed_ss and
1316 				 * fw_forwarded_ss are greater than incoming
1317 				 * frame's but timestamp of fw_consumed_ss is
1318 				 * smaller than fw_forwarded_ss, then
1319 				 * frames_pending will be smaller than
1320 				 * delta_fwd_host, the reverse will be true in
1321 				 * other cases. Instead of checking for all
1322 				 * those cases, just waiting for the minimum
1323 				 * among these two should be sufficient.
1324 				 */
1325 				frames_pending = qdf_min(frames_pending,
1326 							 delta_fwd_host);
1327 				qdf_assert_always(frames_pending >= 0);
1328 			}
1329 		}
1330 
1331 update_pending_frames:
1332 			qdf_assert_always(frames_pending >= 0);
1333 
1334 			wait_count->per_link_count[link] = frames_pending;
1335 			wait_count->total_count += frames_pending;
1336 
1337 print_wait_count:
1338 			mgmt_rx_reo_info("link_id = %u wait count: per link = 0x%x, total = 0x%llx",
1339 					 link, wait_count->per_link_count[link],
1340 					 wait_count->total_count);
1341 	}
1342 
1343 	return QDF_STATUS_SUCCESS;
1344 }
1345 
1346 /*
1347  * struct mgmt_rx_reo_list_entry_debug_info - This structure holds the necessary
1348  * information about a reo list entry for debug purposes.
1349  * @link_id: link id
1350  * @mgmt_pkt_ctr: management packet counter
1351  * @global_timestamp: global time stamp
1352  * @wait_count: wait count values
1353  * @status: status of the entry in the list
1354  * @entry: pointer to reo list entry
1355  */
1356 struct mgmt_rx_reo_list_entry_debug_info {
1357 	uint8_t link_id;
1358 	uint16_t mgmt_pkt_ctr;
1359 	uint32_t global_timestamp;
1360 	struct mgmt_rx_reo_wait_count wait_count;
1361 	uint32_t status;
1362 	struct mgmt_rx_reo_list_entry *entry;
1363 };
1364 
1365 /**
1366  * mgmt_rx_reo_list_display() - API to print the entries in the reorder list
1367  * @reo_list: Pointer to reorder list
1368  *
1369  * Return: QDF_STATUS
1370  */
1371 static QDF_STATUS
1372 mgmt_rx_reo_list_display(struct mgmt_rx_reo_list *reo_list)
1373 {
1374 	uint32_t reo_list_size;
1375 	uint32_t index;
1376 	struct mgmt_rx_reo_list_entry *cur_entry;
1377 	struct mgmt_rx_reo_list_entry_debug_info *debug_info;
1378 
1379 	if (!reo_list) {
1380 		mgmt_rx_reo_err("Pointer to reo list is null");
1381 		return QDF_STATUS_E_NULL_VALUE;
1382 	}
1383 
1384 	qdf_spin_lock_bh(&reo_list->list_lock);
1385 
1386 	reo_list_size = qdf_list_size(&reo_list->list);
1387 
1388 	if (reo_list_size == 0) {
1389 		qdf_spin_unlock_bh(&reo_list->list_lock);
1390 		mgmt_rx_reo_debug("Number of entries in the reo list = %u",
1391 				  reo_list_size);
1392 		return QDF_STATUS_SUCCESS;
1393 	}
1394 
1395 	debug_info = qdf_mem_malloc_atomic(reo_list_size * sizeof(*debug_info));
1396 	if (!debug_info) {
1397 		qdf_spin_unlock_bh(&reo_list->list_lock);
1398 		mgmt_rx_reo_err("Memory allocation failed");
1399 		return QDF_STATUS_E_NOMEM;
1400 	}
1401 
1402 	index = 0;
1403 	qdf_list_for_each(&reo_list->list, cur_entry, node) {
1404 		debug_info[index].link_id =
1405 				mgmt_rx_reo_get_link_id(cur_entry->rx_params);
1406 		debug_info[index].mgmt_pkt_ctr =
1407 			mgmt_rx_reo_get_pkt_counter(cur_entry->rx_params);
1408 		debug_info[index].global_timestamp =
1409 				mgmt_rx_reo_get_global_ts(cur_entry->rx_params);
1410 		debug_info[index].wait_count = cur_entry->wait_count;
1411 		debug_info[index].status = cur_entry->status;
1412 		debug_info[index].entry = cur_entry;
1413 
1414 		++index;
1415 	}
1416 
1417 	qdf_spin_unlock_bh(&reo_list->list_lock);
1418 
1419 	mgmt_rx_reo_debug("Reorder list");
1420 	mgmt_rx_reo_debug("##################################################");
1421 	mgmt_rx_reo_debug("Number of entries in the reo list = %u",
1422 			  reo_list_size);
1423 	for (index = 0; index < reo_list_size; index++) {
1424 		uint8_t link_id;
1425 
1426 		mgmt_rx_reo_debug("index = %u: link_id = %u, ts = %u, ctr = %u, status = 0x%x, entry = %pK",
1427 				  index, debug_info[index].link_id,
1428 				  debug_info[index].global_timestamp,
1429 				  debug_info[index].mgmt_pkt_ctr,
1430 				  debug_info[index].status,
1431 				  debug_info[index].entry);
1432 
1433 		mgmt_rx_reo_debug("Total wait count = 0x%llx",
1434 				  debug_info[index].wait_count.total_count);
1435 
1436 		for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++)
1437 			mgmt_rx_reo_debug("Link id = %u, wait_count = 0x%x",
1438 					  link_id, debug_info[index].wait_count.
1439 					  per_link_count[link_id]);
1440 	}
1441 	mgmt_rx_reo_debug("##################################################");
1442 
1443 	qdf_mem_free(debug_info);
1444 
1445 	return QDF_STATUS_SUCCESS;
1446 }
1447 
1448 #ifdef WLAN_MGMT_RX_REO_DEBUG_SUPPORT
1449 /**
1450  * mgmt_rx_reo_debug_print_egress_frame_stats() - API to print the stats
1451  * related to frames going out of the reorder module
1452  * @reo_ctx: Pointer to reorder context
1453  *
1454  * API to print the stats related to frames going out of the management
1455  * Rx reorder module.
1456  *
1457  * Return: QDF_STATUS
1458  */
1459 static QDF_STATUS
1460 mgmt_rx_reo_debug_print_egress_frame_stats(struct mgmt_rx_reo_context *reo_ctx)
1461 {
1462 	struct reo_egress_frame_stats *stats;
1463 	uint8_t link_id;
1464 	uint8_t reason;
1465 	uint64_t total_delivery_attempts_count = 0;
1466 	uint64_t total_delivery_success_count = 0;
1467 	uint64_t total_premature_delivery_count = 0;
1468 	uint64_t delivery_count_per_link[MAX_MLO_LINKS] = {0};
1469 	uint64_t delivery_count_per_reason[MGMT_RX_REO_RELEASE_REASON_MAX] = {0};
1470 	uint64_t total_delivery_count = 0;
1471 	char delivery_reason_stats_boarder_a[MGMT_RX_REO_EGRESS_FRAME_DELIVERY_REASON_STATS_BOARDER_A_MAX_SIZE + 1] = {0};
1472 	char delivery_reason_stats_boarder_b[MGMT_RX_REO_EGRESS_FRAME_DELIVERY_REASON_STATS_BOARDER_B_MAX_SIZE + 1] = {0};
1473 
1474 	if (!reo_ctx)
1475 		return QDF_STATUS_E_NULL_VALUE;
1476 
1477 	stats = &reo_ctx->egress_frame_debug_info.stats;
1478 
1479 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
1480 		total_delivery_attempts_count +=
1481 				stats->delivery_attempts_count[link_id];
1482 		total_delivery_success_count +=
1483 				stats->delivery_success_count[link_id];
1484 		total_premature_delivery_count +=
1485 				stats->premature_delivery_count[link_id];
1486 	}
1487 
1488 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
1489 		for (reason = 0; reason < MGMT_RX_REO_RELEASE_REASON_MAX;
1490 		     reason++)
1491 			delivery_count_per_link[link_id] +=
1492 				stats->delivery_count[link_id][reason];
1493 		total_delivery_count += delivery_count_per_link[link_id];
1494 	}
1495 	for (reason = 0; reason < MGMT_RX_REO_RELEASE_REASON_MAX; reason++)
1496 		for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++)
1497 			delivery_count_per_reason[reason] +=
1498 				stats->delivery_count[link_id][reason];
1499 
1500 	mgmt_rx_reo_alert("Egress frame stats:");
1501 	mgmt_rx_reo_alert("\t1) Delivery related stats:");
1502 	mgmt_rx_reo_alert("\t------------------------------------------");
1503 	mgmt_rx_reo_alert("\t|link id   |Attempts |Success |Premature |");
1504 	mgmt_rx_reo_alert("\t|          | count   | count  | count    |");
1505 	mgmt_rx_reo_alert("\t------------------------------------------");
1506 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
1507 		mgmt_rx_reo_alert("\t|%10u|%9llu|%8llu|%10llu|", link_id,
1508 				  stats->delivery_attempts_count[link_id],
1509 				  stats->delivery_success_count[link_id],
1510 				  stats->premature_delivery_count[link_id]);
1511 	mgmt_rx_reo_alert("\t------------------------------------------");
1512 	}
1513 	mgmt_rx_reo_alert("\t%11s|%9llu|%8llu|%10llu|\n\n", "",
1514 			  total_delivery_attempts_count,
1515 			  total_delivery_success_count,
1516 			  total_premature_delivery_count);
1517 
1518 	mgmt_rx_reo_alert("\t2) Delivery reason related stats");
1519 	mgmt_rx_reo_alert("\tRelease Reason Values:-");
1520 	mgmt_rx_reo_alert("\tRELEASE_REASON_ZERO_WAIT_COUNT - 0x%lx",
1521 			  MGMT_RX_REO_LIST_ENTRY_RELEASE_REASON_ZERO_WAIT_COUNT);
1522 	mgmt_rx_reo_alert("\tRELEASE_REASON_AGED_OUT - 0x%lx",
1523 			  MGMT_RX_REO_LIST_ENTRY_RELEASE_REASON_AGED_OUT);
1524 	mgmt_rx_reo_alert("\tRELEASE_REASON_OLDER_THAN_AGED_OUT_FRAME - 0x%lx",
1525 			  MGMT_RX_REO_LIST_ENTRY_RELEASE_REASON_OLDER_THAN_AGED_OUT_FRAME);
1526 	mgmt_rx_reo_alert("\tRELEASE_REASON_LIST_MAX_SIZE_EXCEEDED - 0x%lx",
1527 			  MGMT_RX_REO_LIST_ENTRY_RELEASE_REASON_LIST_MAX_SIZE_EXCEEDED);
1528 
1529 	qdf_mem_set(delivery_reason_stats_boarder_a,
1530 		    MGMT_RX_REO_EGRESS_FRAME_DELIVERY_REASON_STATS_BOARDER_A_MAX_SIZE, '-');
1531 	qdf_mem_set(delivery_reason_stats_boarder_b,
1532 		    MGMT_RX_REO_EGRESS_FRAME_DELIVERY_REASON_STATS_BOARDER_B_MAX_SIZE, '-');
1533 
1534 	mgmt_rx_reo_alert("\t%66s", delivery_reason_stats_boarder_a);
1535 	mgmt_rx_reo_alert("\t|%16s|%7s|%7s|%7s|%7s|%7s|%7s|", "Release Reason/",
1536 			  "", "", "", "", "", "");
1537 	mgmt_rx_reo_alert("\t|%16s|%7s|%7s|%7s|%7s|%7s|%7s|", "link id",
1538 			  "0", "1", "2", "3", "4", "5");
1539 	mgmt_rx_reo_alert("\t%s", delivery_reason_stats_boarder_b);
1540 
1541 	for (reason = 0; reason < MGMT_RX_REO_RELEASE_REASON_MAX; reason++) {
1542 		mgmt_rx_reo_alert("\t|%16x|%7llu|%7llu|%7llu|%7llu|%7llu|%7llu|%7llu",
1543 				  reason, stats->delivery_count[0][reason],
1544 				  stats->delivery_count[1][reason],
1545 				  stats->delivery_count[2][reason],
1546 				  stats->delivery_count[3][reason],
1547 				  stats->delivery_count[4][reason],
1548 				  stats->delivery_count[5][reason],
1549 				  delivery_count_per_reason[reason]);
1550 		mgmt_rx_reo_alert("\t%s", delivery_reason_stats_boarder_b);
1551 	}
1552 	mgmt_rx_reo_alert("\t%17s|%7llu|%7llu|%7llu|%7llu|%7llu|%7llu|%7llu\n\n",
1553 			  "", delivery_count_per_link[0],
1554 			  delivery_count_per_link[1],
1555 			  delivery_count_per_link[2],
1556 			  delivery_count_per_link[3],
1557 			  delivery_count_per_link[4],
1558 			  delivery_count_per_link[5],
1559 			  total_delivery_count);
1560 
1561 	return QDF_STATUS_SUCCESS;
1562 }
1563 
1564 /**
1565  * mgmt_rx_reo_log_egress_frame_before_delivery() - Log the information about a
1566  * frame exiting the reorder module. Logging is done before attempting the frame
1567  * delivery to upper layers.
1568  * @reo_ctx: management rx reorder context
1569  * @entry: Pointer to reorder list entry
1570  *
1571  * Return: QDF_STATUS of operation
1572  */
1573 static QDF_STATUS
1574 mgmt_rx_reo_log_egress_frame_before_delivery(
1575 					struct mgmt_rx_reo_context *reo_ctx,
1576 					struct mgmt_rx_reo_list_entry *entry)
1577 {
1578 	struct reo_egress_debug_info *egress_frame_debug_info;
1579 	struct reo_egress_debug_frame_info *cur_frame_debug_info;
1580 	struct reo_egress_frame_stats *stats;
1581 	uint8_t link_id;
1582 
1583 	if (!reo_ctx || !entry)
1584 		return QDF_STATUS_E_NULL_VALUE;
1585 
1586 	egress_frame_debug_info = &reo_ctx->egress_frame_debug_info;
1587 
1588 	cur_frame_debug_info = &egress_frame_debug_info->frame_list
1589 			[egress_frame_debug_info->next_index];
1590 
1591 	cur_frame_debug_info->link_id =
1592 				mgmt_rx_reo_get_link_id(entry->rx_params);
1593 	cur_frame_debug_info->mgmt_pkt_ctr =
1594 				mgmt_rx_reo_get_pkt_counter(entry->rx_params);
1595 	cur_frame_debug_info->global_timestamp =
1596 				mgmt_rx_reo_get_global_ts(entry->rx_params);
1597 	cur_frame_debug_info->initial_wait_count = entry->initial_wait_count;
1598 	cur_frame_debug_info->final_wait_count = entry->wait_count;
1599 	qdf_mem_copy(cur_frame_debug_info->shared_snapshots,
1600 		     entry->shared_snapshots,
1601 		     qdf_min(sizeof(cur_frame_debug_info->shared_snapshots),
1602 			     sizeof(entry->shared_snapshots)));
1603 	qdf_mem_copy(cur_frame_debug_info->host_snapshot, entry->host_snapshot,
1604 		     qdf_min(sizeof(cur_frame_debug_info->host_snapshot),
1605 			     sizeof(entry->host_snapshot)));
1606 	cur_frame_debug_info->insertion_ts = entry->insertion_ts;
1607 	cur_frame_debug_info->ingress_timestamp = entry->ingress_timestamp;
1608 	cur_frame_debug_info->removal_ts =  entry->removal_ts;
1609 	cur_frame_debug_info->egress_timestamp = qdf_get_log_timestamp();
1610 	cur_frame_debug_info->release_reason = entry->release_reason;
1611 	cur_frame_debug_info->is_premature_delivery =
1612 						entry->is_premature_delivery;
1613 	cur_frame_debug_info->cpu_id = qdf_get_smp_processor_id();
1614 
1615 	stats = &egress_frame_debug_info->stats;
1616 	link_id = cur_frame_debug_info->link_id;
1617 	stats->delivery_attempts_count[link_id]++;
1618 	if (entry->is_premature_delivery)
1619 		stats->premature_delivery_count[link_id]++;
1620 
1621 	return QDF_STATUS_SUCCESS;
1622 }
1623 
1624 /**
1625  * mgmt_rx_reo_log_egress_frame_after_delivery() - Log the information about a
1626  * frame exiting the reorder module. Logging is done after attempting the frame
1627  * delivery to upper layer.
1628  * @reo_ctx: management rx reorder context
1629  * @entry: Pointer to reorder list entry
1630  *
1631  * Return: QDF_STATUS of operation
1632  */
1633 static QDF_STATUS
1634 mgmt_rx_reo_log_egress_frame_after_delivery(
1635 					struct mgmt_rx_reo_context *reo_ctx,
1636 					struct mgmt_rx_reo_list_entry *entry)
1637 {
1638 	struct reo_egress_debug_info *egress_frame_debug_info;
1639 	struct reo_egress_debug_frame_info *cur_frame_debug_info;
1640 	struct reo_egress_frame_stats *stats;
1641 
1642 	if (!reo_ctx)
1643 		return QDF_STATUS_E_NULL_VALUE;
1644 
1645 	egress_frame_debug_info = &reo_ctx->egress_frame_debug_info;
1646 
1647 	cur_frame_debug_info = &egress_frame_debug_info->frame_list
1648 			[egress_frame_debug_info->next_index];
1649 
1650 	cur_frame_debug_info->is_delivered = entry->is_delivered;
1651 	cur_frame_debug_info->egress_duration = qdf_get_log_timestamp() -
1652 					cur_frame_debug_info->egress_timestamp;
1653 
1654 	egress_frame_debug_info->next_index++;
1655 	egress_frame_debug_info->next_index %=
1656 				MGMT_RX_REO_EGRESS_FRAME_DEBUG_ENTRIES_MAX;
1657 	if (egress_frame_debug_info->next_index == 0)
1658 		egress_frame_debug_info->wrap_aroud = true;
1659 
1660 	stats = &egress_frame_debug_info->stats;
1661 	if (entry->is_delivered) {
1662 		uint8_t link_id = cur_frame_debug_info->link_id;
1663 		uint8_t release_reason = cur_frame_debug_info->release_reason;
1664 
1665 		stats->delivery_count[link_id][release_reason]++;
1666 		stats->delivery_success_count[link_id]++;
1667 	}
1668 
1669 	return QDF_STATUS_SUCCESS;
1670 }
1671 
1672 /**
1673  * mgmt_rx_reo_debug_print_egress_frame_info() - Print the debug information
1674  * about the latest frames leaving the reorder module
1675  * @reo_ctx: management rx reorder context
1676  * @num_frames: Number of frames for which the debug information is to be
1677  * printed. If @num_frames is 0, then debug information about all the frames
1678  * in the ring buffer will be  printed.
1679  *
1680  * Return: QDF_STATUS of operation
1681  */
1682 static QDF_STATUS
1683 mgmt_rx_reo_debug_print_egress_frame_info(struct mgmt_rx_reo_context *reo_ctx,
1684 					  uint16_t num_frames)
1685 {
1686 	struct reo_egress_debug_info *egress_frame_debug_info;
1687 	int start_index;
1688 	uint16_t index;
1689 	uint16_t entry;
1690 	uint16_t num_valid_entries;
1691 	uint16_t num_entries_to_print;
1692 	char *boarder;
1693 
1694 	if (!reo_ctx)
1695 		return QDF_STATUS_E_NULL_VALUE;
1696 
1697 	egress_frame_debug_info = &reo_ctx->egress_frame_debug_info;
1698 
1699 	if (egress_frame_debug_info->wrap_aroud)
1700 		num_valid_entries = MGMT_RX_REO_EGRESS_FRAME_DEBUG_ENTRIES_MAX;
1701 	else
1702 		num_valid_entries = egress_frame_debug_info->next_index;
1703 
1704 	if (num_frames == 0) {
1705 		num_entries_to_print = num_valid_entries;
1706 
1707 		if (egress_frame_debug_info->wrap_aroud)
1708 			start_index = egress_frame_debug_info->next_index;
1709 		else
1710 			start_index = 0;
1711 	} else {
1712 		num_entries_to_print = qdf_min(num_frames, num_valid_entries);
1713 
1714 		start_index = (egress_frame_debug_info->next_index -
1715 			       num_entries_to_print +
1716 			       MGMT_RX_REO_EGRESS_FRAME_DEBUG_ENTRIES_MAX)
1717 			      % MGMT_RX_REO_EGRESS_FRAME_DEBUG_ENTRIES_MAX;
1718 
1719 		qdf_assert_always(start_index >= 0 &&
1720 				  start_index < MGMT_RX_REO_EGRESS_FRAME_DEBUG_ENTRIES_MAX);
1721 	}
1722 
1723 	mgmt_rx_reo_alert_no_fl("Egress Frame Info:-");
1724 	mgmt_rx_reo_alert_no_fl("num_frames = %u, wrap = %u, next_index = %u",
1725 				num_frames,
1726 				egress_frame_debug_info->wrap_aroud,
1727 				egress_frame_debug_info->next_index);
1728 	mgmt_rx_reo_alert_no_fl("start_index = %d num_entries_to_print = %u",
1729 				start_index, num_entries_to_print);
1730 
1731 	if (!num_entries_to_print)
1732 		return QDF_STATUS_SUCCESS;
1733 
1734 	boarder = egress_frame_debug_info->boarder;
1735 
1736 	mgmt_rx_reo_alert_no_fl("%s", boarder);
1737 	mgmt_rx_reo_alert_no_fl("|%3s|%5s|%4s|%5s|%10s|%11s|%11s|%11s|%11s|%5s|%7s|%5s|%4s|%69s|%69s|%94s|%94s|%94s|%94s|%94s|%94s|",
1738 				"No.", "CPU", "Link", "SeqNo", "Global ts",
1739 				"Ingress ts", "Insert. ts", "Removal ts",
1740 				"Egress ts", "E Dur", "W Dur", "Flags", "Rea.",
1741 				"Final wait count", "Initial wait count",
1742 				"Snapshot : link 0", "Snapshot : link 1",
1743 				"Snapshot : link 2", "Snapshot : link 3",
1744 				"Snapshot : link 4", "Snapshot : link 5");
1745 	mgmt_rx_reo_alert_no_fl("%s", boarder);
1746 
1747 	index = start_index;
1748 	for (entry = 0; entry < num_entries_to_print; entry++) {
1749 		struct reo_egress_debug_frame_info *info;
1750 		char flags[MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_FLAG_MAX_SIZE + 1] = {'\0'};
1751 		char final_wait_count[MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_WAIT_COUNT_MAX_SIZE + 1] = {'\0'};
1752 		char initial_wait_count[MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_WAIT_COUNT_MAX_SIZE + 1] = {'\0'};
1753 		char snapshots[MAX_MLO_LINKS][MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_PER_LINK_SNAPSHOTS_MAX_SIZE + 1] = {'\0'};
1754 		char flag_premature_delivery = ' ';
1755 		char flag_error = ' ';
1756 		uint8_t link;
1757 
1758 		info = &reo_ctx->egress_frame_debug_info.frame_list[index];
1759 
1760 		if (!info->is_delivered)
1761 			flag_error = 'E';
1762 
1763 		if (info->is_premature_delivery)
1764 			flag_premature_delivery = 'P';
1765 
1766 		snprintf(flags, sizeof(flags), "%c %c", flag_error,
1767 			 flag_premature_delivery);
1768 		snprintf(initial_wait_count, sizeof(initial_wait_count),
1769 			 "%9llx(%8x, %8x, %8x, %8x, %8x, %8x)",
1770 			 info->initial_wait_count.total_count,
1771 			 info->initial_wait_count.per_link_count[0],
1772 			 info->initial_wait_count.per_link_count[1],
1773 			 info->initial_wait_count.per_link_count[2],
1774 			 info->initial_wait_count.per_link_count[3],
1775 			 info->initial_wait_count.per_link_count[4],
1776 			 info->initial_wait_count.per_link_count[5]);
1777 		snprintf(final_wait_count, sizeof(final_wait_count),
1778 			 "%9llx(%8x, %8x, %8x, %8x, %8x, %8x)",
1779 			 info->final_wait_count.total_count,
1780 			 info->final_wait_count.per_link_count[0],
1781 			 info->final_wait_count.per_link_count[1],
1782 			 info->final_wait_count.per_link_count[2],
1783 			 info->final_wait_count.per_link_count[3],
1784 			 info->final_wait_count.per_link_count[4],
1785 			 info->final_wait_count.per_link_count[5]);
1786 
1787 		for (link = 0; link < MAX_MLO_LINKS; link++) {
1788 			char mac_hw[MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_SNAPSHOT_MAX_SIZE + 1] = {'\0'};
1789 			char fw_consumed[MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_SNAPSHOT_MAX_SIZE + 1] = {'\0'};
1790 			char fw_forwaded[MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_SNAPSHOT_MAX_SIZE + 1] = {'\0'};
1791 			char host[MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_SNAPSHOT_MAX_SIZE + 1] = {'\0'};
1792 			struct mgmt_rx_reo_snapshot_params *mac_hw_ss;
1793 			struct mgmt_rx_reo_snapshot_params *fw_consumed_ss;
1794 			struct mgmt_rx_reo_snapshot_params *fw_forwarded_ss;
1795 			struct mgmt_rx_reo_snapshot_params *host_ss;
1796 
1797 			mac_hw_ss = &info->shared_snapshots
1798 				[link][MGMT_RX_REO_SHARED_SNAPSHOT_MAC_HW];
1799 			fw_consumed_ss = &info->shared_snapshots
1800 				[link][MGMT_RX_REO_SHARED_SNAPSHOT_FW_CONSUMED];
1801 			fw_forwarded_ss = &info->shared_snapshots
1802 				[link][MGMT_RX_REO_SHARED_SNAPSHOT_FW_FORWADED];
1803 			host_ss = &info->host_snapshot[link];
1804 
1805 			snprintf(mac_hw, sizeof(mac_hw), "(%1u, %5u, %10u)",
1806 				 mac_hw_ss->valid, mac_hw_ss->mgmt_pkt_ctr,
1807 				 mac_hw_ss->global_timestamp);
1808 			snprintf(fw_consumed, sizeof(fw_consumed),
1809 				 "(%1u, %5u, %10u)",
1810 				 fw_consumed_ss->valid,
1811 				 fw_consumed_ss->mgmt_pkt_ctr,
1812 				 fw_consumed_ss->global_timestamp);
1813 			snprintf(fw_forwaded, sizeof(fw_forwaded),
1814 				 "(%1u, %5u, %10u)",
1815 				 fw_forwarded_ss->valid,
1816 				 fw_forwarded_ss->mgmt_pkt_ctr,
1817 				 fw_forwarded_ss->global_timestamp);
1818 			snprintf(host, sizeof(host), "(%1u, %5u, %10u)",
1819 				 host_ss->valid,
1820 				 host_ss->mgmt_pkt_ctr,
1821 				 host_ss->global_timestamp);
1822 			snprintf(snapshots[link], sizeof(snapshots[link]),
1823 				 "%22s, %22s, %22s, %22s", mac_hw, fw_consumed,
1824 				 fw_forwaded, host);
1825 		}
1826 
1827 		mgmt_rx_reo_alert_no_fl("|%3u|%5d|%4u|%5u|%10u|%11llu|%11llu|%11llu|%11llu|%5llu|%7llu|%5s|%4x|%69s|%69s|%94s|%94s|%94s|%94s|%94s|%94s|",
1828 					entry, info->cpu_id, info->link_id,
1829 					info->mgmt_pkt_ctr,
1830 					info->global_timestamp,
1831 					info->ingress_timestamp,
1832 					info->insertion_ts, info->removal_ts,
1833 					info->egress_timestamp,
1834 					info->egress_duration,
1835 					info->removal_ts - info->insertion_ts,
1836 					flags, info->release_reason,
1837 					final_wait_count, initial_wait_count,
1838 					snapshots[0], snapshots[1],
1839 					snapshots[2], snapshots[3],
1840 					snapshots[4], snapshots[5]);
1841 		mgmt_rx_reo_alert_no_fl("%s", boarder);
1842 
1843 		index++;
1844 		index %= MGMT_RX_REO_EGRESS_FRAME_DEBUG_ENTRIES_MAX;
1845 	}
1846 
1847 	return QDF_STATUS_SUCCESS;
1848 }
1849 #else
1850 /**
1851  * mgmt_rx_reo_debug_print_egress_frame_stats() - API to print the stats
1852  * related to frames going out of the reorder module
1853  * @reo_ctx: Pointer to reorder context
1854  *
1855  * API to print the stats related to frames going out of the management
1856  * Rx reorder module.
1857  *
1858  * Return: QDF_STATUS
1859  */
1860 static QDF_STATUS
1861 mgmt_rx_reo_debug_print_egress_frame_stats(struct mgmt_rx_reo_context *reo_ctx)
1862 {
1863 	return QDF_STATUS_SUCCESS;
1864 }
1865 
1866 /**
1867  * mgmt_rx_reo_log_egress_frame_before_delivery() - Log the information about a
1868  * frame exiting the reorder module. Logging is done before attempting the frame
1869  * delivery to upper layers.
1870  * @reo_ctx: management rx reorder context
1871  * @entry: Pointer to reorder list entry
1872  *
1873  * Return: QDF_STATUS of operation
1874  */
1875 static QDF_STATUS
1876 mgmt_rx_reo_log_egress_frame_before_delivery(
1877 					struct mgmt_rx_reo_context *reo_ctx,
1878 					struct mgmt_rx_reo_list_entry *entry)
1879 {
1880 	return QDF_STATUS_SUCCESS;
1881 }
1882 
1883 /**
1884  * mgmt_rx_reo_log_egress_frame_after_delivery() - Log the information about a
1885  * frame exiting the reorder module. Logging is done after attempting the frame
1886  * delivery to upper layer.
1887  * @reo_ctx: management rx reorder context
1888  * @is_delivered: Flag to indicate whether the frame is delivered to upper
1889  * layers
1890  *
1891  * Return: QDF_STATUS of operation
1892  */
1893 static QDF_STATUS
1894 mgmt_rx_reo_log_egress_frame_after_delivery(
1895 					struct mgmt_rx_reo_context *reo_ctx,
1896 					bool is_delivered)
1897 {
1898 	return QDF_STATUS_SUCCESS;
1899 }
1900 
1901 /**
1902  * mgmt_rx_reo_debug_print_egress_frame_info() - Print debug information about
1903  * the latest frames leaving the reorder module
1904  * @reo_ctx: management rx reorder context
1905  *
1906  * Return: QDF_STATUS of operation
1907  */
1908 static QDF_STATUS
1909 mgmt_rx_reo_debug_print_egress_frame_info(struct mgmt_rx_reo_context *reo_ctx)
1910 {
1911 	return QDF_STATUS_SUCCESS;
1912 }
1913 #endif /* WLAN_MGMT_RX_REO_DEBUG_SUPPORT */
1914 
1915 /**
1916  * mgmt_rx_reo_list_entry_get_release_reason() - Helper API to get the reason
1917  * for releasing the reorder list entry to upper layer.
1918  * reorder list.
1919  * @entry: List entry
1920  *
1921  * This API expects the caller to acquire the spin lock protecting the reorder
1922  * list.
1923  *
1924  * Return: Reason for releasing the frame.
1925  */
1926 static uint8_t
1927 mgmt_rx_reo_list_entry_get_release_reason(struct mgmt_rx_reo_list_entry *entry)
1928 {
1929 	uint8_t release_reason = 0;
1930 
1931 	if (!entry)
1932 		return 0;
1933 
1934 	if (MGMT_RX_REO_LIST_ENTRY_IS_MAX_SIZE_EXCEEDED(entry))
1935 		release_reason |=
1936 		   MGMT_RX_REO_LIST_ENTRY_RELEASE_REASON_LIST_MAX_SIZE_EXCEEDED;
1937 
1938 	if (!MGMT_RX_REO_LIST_ENTRY_IS_WAITING_FOR_FRAME_ON_OTHER_LINK(entry))
1939 		release_reason |=
1940 			MGMT_RX_REO_LIST_ENTRY_RELEASE_REASON_ZERO_WAIT_COUNT;
1941 
1942 	if (MGMT_RX_REO_LIST_ENTRY_IS_AGED_OUT(entry))
1943 		release_reason |=
1944 				MGMT_RX_REO_LIST_ENTRY_RELEASE_REASON_AGED_OUT;
1945 
1946 	if (MGMT_RX_REO_LIST_ENTRY_IS_OLDER_THAN_LATEST_AGED_OUT_FRAME(entry))
1947 		release_reason |=
1948 		MGMT_RX_REO_LIST_ENTRY_RELEASE_REASON_OLDER_THAN_AGED_OUT_FRAME;
1949 
1950 	return release_reason;
1951 }
1952 
1953 /**
1954  * mgmt_rx_reo_list_entry_send_up() - API to send the frame to the upper layer.
1955  * @reo_list: Pointer to reorder list
1956  * @entry: List entry
1957  *
1958  * API to send the frame to the upper layer. This API has to be called only
1959  * for entries which can be released to upper layer. It is the caller's
1960  * responsibility to ensure that entry can be released (by using API
1961  * mgmt_rx_reo_list_is_ready_to_send_up_entry). This API is called after
1962  * acquiring the lock which serializes the frame delivery to the upper layers.
1963  *
1964  * Return: QDF_STATUS
1965  */
1966 static QDF_STATUS
1967 mgmt_rx_reo_list_entry_send_up(struct mgmt_rx_reo_list *reo_list,
1968 			       struct mgmt_rx_reo_list_entry *entry)
1969 {
1970 	uint8_t release_reason;
1971 	uint8_t link_id;
1972 	uint32_t entry_global_ts;
1973 	QDF_STATUS status;
1974 	QDF_STATUS temp;
1975 	struct mgmt_rx_reo_context *reo_context;
1976 
1977 	qdf_assert_always(reo_list);
1978 	qdf_assert_always(entry);
1979 
1980 	reo_context = mgmt_rx_reo_get_context_from_reo_list(reo_list);
1981 	qdf_assert_always(reo_context);
1982 
1983 	link_id = mgmt_rx_reo_get_link_id(entry->rx_params);
1984 	entry_global_ts = mgmt_rx_reo_get_global_ts(entry->rx_params);
1985 
1986 	release_reason = mgmt_rx_reo_list_entry_get_release_reason(entry);
1987 
1988 	qdf_assert_always(release_reason != 0);
1989 
1990 	entry->is_delivered = false;
1991 	entry->is_premature_delivery = false;
1992 	entry->release_reason = release_reason;
1993 
1994 	if (mgmt_rx_reo_is_potential_premature_delivery(release_reason)) {
1995 		entry->is_premature_delivery = true;
1996 		status = mgmt_rx_reo_handle_potential_premature_delivery(
1997 						reo_context, entry_global_ts);
1998 		if (QDF_IS_STATUS_ERROR(status))
1999 			goto exit;
2000 	}
2001 
2002 	status = mgmt_rx_reo_log_egress_frame_before_delivery(reo_context,
2003 							      entry);
2004 	if (QDF_IS_STATUS_ERROR(status))
2005 		goto exit;
2006 
2007 	status = wlan_mgmt_txrx_process_rx_frame(entry->pdev, entry->nbuf,
2008 						 entry->rx_params);
2009 	/* Above call frees nbuf and rx_params, make it null explicitly */
2010 	entry->nbuf = NULL;
2011 	entry->rx_params = NULL;
2012 
2013 	if (QDF_IS_STATUS_ERROR(status))
2014 		goto exit_log;
2015 
2016 	entry->is_delivered = true;
2017 
2018 	status = QDF_STATUS_SUCCESS;
2019 
2020 exit_log:
2021 	temp = mgmt_rx_reo_log_egress_frame_after_delivery(reo_context, entry);
2022 	if (QDF_IS_STATUS_ERROR(temp))
2023 		status = temp;
2024 exit:
2025 	/**
2026 	 * Release the reference taken when the entry is inserted into
2027 	 * the reorder list
2028 	 */
2029 	wlan_objmgr_pdev_release_ref(entry->pdev, WLAN_MGMT_RX_REO_ID);
2030 
2031 	return status;
2032 }
2033 
2034 /**
2035  * mgmt_rx_reo_list_is_ready_to_send_up_entry() - API to check whether the
2036  * list entry can be send to upper layers.
2037  * @reo_list: Pointer to reorder list
2038  * @entry: List entry
2039  *
2040  * Return: QDF_STATUS
2041  */
2042 static bool
2043 mgmt_rx_reo_list_is_ready_to_send_up_entry(struct mgmt_rx_reo_list *reo_list,
2044 					   struct mgmt_rx_reo_list_entry *entry)
2045 {
2046 	if (!reo_list || !entry)
2047 		return false;
2048 
2049 	return mgmt_rx_reo_list_max_size_exceeded(reo_list) ||
2050 	       !MGMT_RX_REO_LIST_ENTRY_IS_WAITING_FOR_FRAME_ON_OTHER_LINK(
2051 	       entry) || MGMT_RX_REO_LIST_ENTRY_IS_AGED_OUT(entry) ||
2052 	       MGMT_RX_REO_LIST_ENTRY_IS_OLDER_THAN_LATEST_AGED_OUT_FRAME
2053 	       (entry);
2054 }
2055 
2056 /**
2057  * mgmt_rx_reo_list_release_entries() - Release entries from the reorder list
2058  * @reo_context: Pointer to management Rx reorder context
2059  *
2060  * This API releases the entries from the reorder list based on the following
2061  * conditions.
2062  *   a) Entries with total wait count equal to 0
2063  *   b) Entries which are timed out or entries with global time stamp <= global
2064  *      time stamp of the latest frame which is timed out. We can only release
2065  *      the entries in the increasing order of the global time stamp.
2066  *      So all the entries with global time stamp <= global time stamp of the
2067  *      latest timed out frame has to be released.
2068  *
2069  * Return: QDF_STATUS
2070  */
2071 static QDF_STATUS
2072 mgmt_rx_reo_list_release_entries(struct mgmt_rx_reo_context *reo_context)
2073 {
2074 	struct mgmt_rx_reo_list *reo_list;
2075 	QDF_STATUS status;
2076 
2077 	if (!reo_context) {
2078 		mgmt_rx_reo_err("reo context is null");
2079 		return QDF_STATUS_E_NULL_VALUE;
2080 	}
2081 
2082 	reo_list = &reo_context->reo_list;
2083 
2084 	qdf_spin_lock(&reo_context->frame_release_lock);
2085 
2086 	while (1) {
2087 		struct mgmt_rx_reo_list_entry *first_entry;
2088 		/* TODO yield if release_count > THRESHOLD */
2089 		uint16_t release_count = 0;
2090 		struct mgmt_rx_reo_global_ts_info *ts_last_released_frame =
2091 					&reo_list->ts_last_released_frame;
2092 		uint32_t entry_global_ts;
2093 
2094 		qdf_spin_lock_bh(&reo_list->list_lock);
2095 
2096 		first_entry = qdf_list_first_entry_or_null(
2097 			&reo_list->list, struct mgmt_rx_reo_list_entry, node);
2098 
2099 		if (!first_entry) {
2100 			status = QDF_STATUS_SUCCESS;
2101 			goto exit_unlock_list_lock;
2102 		}
2103 
2104 		if (!mgmt_rx_reo_list_is_ready_to_send_up_entry(reo_list,
2105 								first_entry)) {
2106 			status = QDF_STATUS_SUCCESS;
2107 			goto exit_unlock_list_lock;
2108 		}
2109 
2110 		if (mgmt_rx_reo_list_max_size_exceeded(reo_list))
2111 			first_entry->status |=
2112 				MGMT_RX_REO_STATUS_LIST_MAX_SIZE_EXCEEDED;
2113 
2114 		status = qdf_list_remove_node(&reo_list->list,
2115 					      &first_entry->node);
2116 		if (QDF_IS_STATUS_ERROR(status)) {
2117 			status = QDF_STATUS_E_FAILURE;
2118 			goto exit_unlock_list_lock;
2119 		}
2120 		first_entry->removal_ts = qdf_get_log_timestamp();
2121 
2122 		/**
2123 		 * Last released frame global time stamp is invalid means that
2124 		 * current frame is the first frame to be released to the
2125 		 * upper layer from the reorder list. Blindly update the last
2126 		 * released frame global time stamp to the current frame's
2127 		 * global time stamp and set the valid to true.
2128 		 * If the last released frame global time stamp is valid and
2129 		 * current frame's global time stamp is >= last released frame
2130 		 * global time stamp, deliver the current frame to upper layer
2131 		 * and update the last released frame global time stamp.
2132 		 */
2133 		entry_global_ts =
2134 			mgmt_rx_reo_get_global_ts(first_entry->rx_params);
2135 
2136 		if (!ts_last_released_frame->valid ||
2137 		    mgmt_rx_reo_compare_global_timestamps_gte(
2138 			entry_global_ts, ts_last_released_frame->global_ts)) {
2139 			struct mgmt_rx_event_params *params;
2140 
2141 			params = first_entry->rx_params;
2142 
2143 			ts_last_released_frame->global_ts = entry_global_ts;
2144 			ts_last_released_frame->start_ts =
2145 					mgmt_rx_reo_get_start_ts(params);
2146 			ts_last_released_frame->end_ts =
2147 					mgmt_rx_reo_get_end_ts(params);
2148 			ts_last_released_frame->valid = true;
2149 
2150 			qdf_timer_mod
2151 				(&reo_list->global_mgmt_rx_inactivity_timer,
2152 				 MGMT_RX_REO_GLOBAL_MGMT_RX_INACTIVITY_TIMEOUT);
2153 		} else {
2154 			/**
2155 			 * This should never happen. All the frames older than
2156 			 * the last frame released from the reorder list will be
2157 			 * discarded at the entry to reorder algorithm itself.
2158 			 */
2159 			qdf_assert_always(first_entry->is_parallel_rx);
2160 		}
2161 
2162 		qdf_spin_unlock_bh(&reo_list->list_lock);
2163 
2164 		status = mgmt_rx_reo_list_entry_send_up(reo_list,
2165 							first_entry);
2166 		if (QDF_IS_STATUS_ERROR(status)) {
2167 			status = QDF_STATUS_E_FAILURE;
2168 			qdf_mem_free(first_entry);
2169 			goto exit_unlock_frame_release_lock;
2170 		}
2171 
2172 		qdf_mem_free(first_entry);
2173 		release_count++;
2174 	}
2175 
2176 	status = QDF_STATUS_SUCCESS;
2177 	goto exit_unlock_frame_release_lock;
2178 
2179 exit_unlock_list_lock:
2180 	qdf_spin_unlock_bh(&reo_list->list_lock);
2181 exit_unlock_frame_release_lock:
2182 	qdf_spin_unlock(&reo_context->frame_release_lock);
2183 
2184 	return status;
2185 }
2186 
2187 /**
2188  * mgmt_rx_reo_list_ageout_timer_handler() - Periodic ageout timer handler
2189  * @arg: Argument to timer handler
2190  *
2191  * This is the handler for periodic ageout timer used to timeout entries in the
2192  * reorder list.
2193  *
2194  * Return: void
2195  */
2196 static void
2197 mgmt_rx_reo_list_ageout_timer_handler(void *arg)
2198 {
2199 	struct mgmt_rx_reo_list *reo_list = arg;
2200 	struct mgmt_rx_reo_list_entry *cur_entry;
2201 	uint64_t cur_ts;
2202 	QDF_STATUS status;
2203 	struct mgmt_rx_reo_context *reo_context;
2204 	/**
2205 	 * Stores the pointer to the entry in reorder list for the latest aged
2206 	 * out frame. Latest aged out frame is the aged out frame in reorder
2207 	 * list which has the largest global time stamp value.
2208 	 */
2209 	struct mgmt_rx_reo_list_entry *latest_aged_out_entry = NULL;
2210 
2211 	qdf_assert_always(reo_list);
2212 
2213 	qdf_timer_mod(&reo_list->ageout_timer,
2214 		      MGMT_RX_REO_AGEOUT_TIMER_PERIOD_MS);
2215 
2216 	reo_context = mgmt_rx_reo_get_context_from_reo_list(reo_list);
2217 	qdf_assert_always(reo_context);
2218 
2219 	qdf_spin_lock_bh(&reo_list->list_lock);
2220 
2221 	cur_ts = qdf_get_log_timestamp();
2222 
2223 	qdf_list_for_each(&reo_list->list, cur_entry, node) {
2224 		if (cur_ts - cur_entry->insertion_ts >=
2225 		    reo_list->list_entry_timeout_us) {
2226 			latest_aged_out_entry = cur_entry;
2227 			cur_entry->status |= MGMT_RX_REO_STATUS_AGED_OUT;
2228 		}
2229 	}
2230 
2231 	if (latest_aged_out_entry) {
2232 		qdf_list_for_each(&reo_list->list, cur_entry, node) {
2233 			if (cur_entry == latest_aged_out_entry)
2234 				break;
2235 			cur_entry->status |= MGMT_RX_REO_STATUS_OLDER_THAN_LATEST_AGED_OUT_FRAME;
2236 		}
2237 	}
2238 
2239 	qdf_spin_unlock_bh(&reo_list->list_lock);
2240 
2241 	if (latest_aged_out_entry) {
2242 		status = mgmt_rx_reo_list_release_entries(reo_context);
2243 		if (QDF_IS_STATUS_ERROR(status)) {
2244 			mgmt_rx_reo_err("Failed to release entries, ret = %d",
2245 					status);
2246 			return;
2247 		}
2248 	}
2249 }
2250 
2251 /**
2252  * mgmt_rx_reo_global_mgmt_rx_inactivity_timer_handler() - Timer handler
2253  * for global management Rx inactivity timer
2254  * @arg: Argument to timer handler
2255  *
2256  * This is the timer handler for tracking management Rx inactivity across
2257  * links.
2258  *
2259  * Return: void
2260  */
2261 static void
2262 mgmt_rx_reo_global_mgmt_rx_inactivity_timer_handler(void *arg)
2263 {
2264 	struct mgmt_rx_reo_list *reo_list = arg;
2265 	struct mgmt_rx_reo_context *reo_context;
2266 	struct mgmt_rx_reo_global_ts_info *ts_last_released_frame;
2267 
2268 	qdf_assert_always(reo_list);
2269 	ts_last_released_frame = &reo_list->ts_last_released_frame;
2270 
2271 	reo_context = mgmt_rx_reo_get_context_from_reo_list(reo_list);
2272 	qdf_assert_always(reo_context);
2273 
2274 	qdf_spin_lock(&reo_context->frame_release_lock);
2275 	qdf_spin_lock_bh(&reo_list->list_lock);
2276 
2277 	qdf_mem_zero(ts_last_released_frame, sizeof(*ts_last_released_frame));
2278 
2279 	qdf_spin_unlock_bh(&reo_list->list_lock);
2280 	qdf_spin_unlock(&reo_context->frame_release_lock);
2281 }
2282 
2283 /**
2284  * mgmt_rx_reo_prepare_list_entry() - Prepare a list entry from the management
2285  * frame received.
2286  * @frame_desc: Pointer to the frame descriptor
2287  * @entry: Pointer to the list entry
2288  *
2289  * This API prepares the reorder list entry corresponding to a management frame
2290  * to be consumed by host. This entry would be inserted at the appropriate
2291  * position in the reorder list.
2292  *
2293  * Return: QDF_STATUS
2294  */
2295 static QDF_STATUS
2296 mgmt_rx_reo_prepare_list_entry(
2297 		const struct mgmt_rx_reo_frame_descriptor *frame_desc,
2298 		struct mgmt_rx_reo_list_entry **entry)
2299 {
2300 	struct mgmt_rx_reo_list_entry *list_entry;
2301 	struct wlan_objmgr_pdev *pdev;
2302 	uint8_t link_id;
2303 
2304 	if (!frame_desc) {
2305 		mgmt_rx_reo_err("frame descriptor is null");
2306 		return QDF_STATUS_E_NULL_VALUE;
2307 	}
2308 
2309 	if (!entry) {
2310 		mgmt_rx_reo_err("Pointer to list entry is null");
2311 		return QDF_STATUS_E_NULL_VALUE;
2312 	}
2313 
2314 	link_id = mgmt_rx_reo_get_link_id(frame_desc->rx_params);
2315 
2316 	pdev = wlan_get_pdev_from_mlo_link_id(link_id, WLAN_MGMT_RX_REO_ID);
2317 	if (!pdev) {
2318 		mgmt_rx_reo_err("pdev corresponding to link %u is null",
2319 				link_id);
2320 		return QDF_STATUS_E_NULL_VALUE;
2321 	}
2322 
2323 	list_entry =  qdf_mem_malloc(sizeof(*list_entry));
2324 	if (!list_entry) {
2325 		wlan_objmgr_pdev_release_ref(pdev, WLAN_MGMT_RX_REO_ID);
2326 		mgmt_rx_reo_err("List entry allocation failed");
2327 		return QDF_STATUS_E_NOMEM;
2328 	}
2329 
2330 	list_entry->pdev = pdev;
2331 	list_entry->nbuf = frame_desc->nbuf;
2332 	list_entry->rx_params = frame_desc->rx_params;
2333 	list_entry->wait_count = frame_desc->wait_count;
2334 	list_entry->initial_wait_count = frame_desc->wait_count;
2335 	qdf_mem_copy(list_entry->shared_snapshots, frame_desc->shared_snapshots,
2336 		     qdf_min(sizeof(list_entry->shared_snapshots),
2337 			     sizeof(frame_desc->shared_snapshots)));
2338 	qdf_mem_copy(list_entry->host_snapshot, frame_desc->host_snapshot,
2339 		     qdf_min(sizeof(list_entry->host_snapshot),
2340 			     sizeof(frame_desc->host_snapshot)));
2341 	list_entry->status = 0;
2342 	if (list_entry->wait_count.total_count)
2343 		list_entry->status |=
2344 			MGMT_RX_REO_STATUS_WAIT_FOR_FRAME_ON_OTHER_LINKS;
2345 
2346 	*entry = list_entry;
2347 
2348 	return QDF_STATUS_SUCCESS;
2349 }
2350 
2351 /**
2352  * mgmt_rx_reo_update_wait_count() - Update the wait count for a frame based
2353  * on the wait count of a frame received after that on air.
2354  * @wait_count_old_frame: Pointer to the wait count structure for the old frame.
2355  * @wait_count_new_frame: Pointer to the wait count structure for the new frame.
2356  *
2357  * This API optimizes the wait count of a frame based on the wait count of
2358  * a frame received after that on air. Old frame refers to the frame received
2359  * first on the air and new frame refers to the frame received after that.
2360  * We use the following fundamental idea. Wait counts for old frames can't be
2361  * more than wait counts for the new frame. Use this to optimize the wait count
2362  * for the old frames. Per link wait count of an old frame is minimum of the
2363  * per link wait count of the old frame and new frame.
2364  *
2365  * Return: QDF_STATUS
2366  */
2367 static QDF_STATUS
2368 mgmt_rx_reo_update_wait_count(
2369 		struct mgmt_rx_reo_wait_count *wait_count_old_frame,
2370 		const struct mgmt_rx_reo_wait_count *wait_count_new_frame)
2371 {
2372 	uint8_t link_id;
2373 
2374 	qdf_assert_always(wait_count_old_frame);
2375 	qdf_assert_always(wait_count_new_frame);
2376 
2377 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
2378 		if (wait_count_old_frame->per_link_count[link_id]) {
2379 			uint32_t temp_wait_count;
2380 			uint32_t wait_count_diff;
2381 
2382 			temp_wait_count =
2383 				wait_count_old_frame->per_link_count[link_id];
2384 			wait_count_old_frame->per_link_count[link_id] =
2385 				qdf_min(wait_count_old_frame->
2386 					per_link_count[link_id],
2387 					wait_count_new_frame->
2388 					per_link_count[link_id]);
2389 			wait_count_diff = temp_wait_count -
2390 				wait_count_old_frame->per_link_count[link_id];
2391 
2392 			wait_count_old_frame->total_count -= wait_count_diff;
2393 		}
2394 	}
2395 
2396 	return QDF_STATUS_SUCCESS;
2397 }
2398 
2399 /**
2400  * mgmt_rx_reo_update_list() - Modify the reorder list when a frame is received
2401  * @reo_list: Pointer to reorder list
2402  * @frame_desc: Pointer to frame descriptor
2403  * @is_queued: Whether this frame is queued in the REO list
2404  *
2405  * API to update the reorder list on every management frame reception.
2406  * This API does the following things.
2407  *   a) Update the wait counts for all the frames in the reorder list with
2408  *      global time stamp <= current frame's global time stamp. We use the
2409  *      following principle for updating the wait count in this case.
2410  *      Let A and B be two management frames with global time stamp of A <=
2411  *      global time stamp of B. Let WAi and WBi be the wait count of A and B
2412  *      for link i, then WAi <= WBi. Hence we can optimize WAi as
2413  *      min(WAi, WBi).
2414  *   b) If the current frame is to be consumed by host, insert it in the
2415  *      reorder list such that the list is always sorted in the increasing order
2416  *      of global time stamp. Update the wait count of the current frame based
2417  *      on the frame next to it in the reorder list (if any).
2418  *   c) Update the wait count of the frames in the reorder list with global
2419  *      time stamp > current frame's global time stamp. Let the current frame
2420  *      belong to link "l". Then link "l"'s wait count can be reduced by one for
2421  *      all the frames in the reorder list with global time stamp > current
2422  *      frame's global time stamp.
2423  *
2424  * Return: QDF_STATUS
2425  */
2426 static QDF_STATUS
2427 mgmt_rx_reo_update_list(struct mgmt_rx_reo_list *reo_list,
2428 			struct mgmt_rx_reo_frame_descriptor *frame_desc,
2429 			bool *is_queued)
2430 {
2431 	struct mgmt_rx_reo_list_entry *cur_entry;
2432 	struct mgmt_rx_reo_list_entry *least_greater_entry = NULL;
2433 	bool least_greater_entry_found = false;
2434 	QDF_STATUS status;
2435 	uint32_t new_frame_global_ts;
2436 	struct mgmt_rx_reo_list_entry *new_entry = NULL;
2437 	uint16_t list_insertion_pos = 0;
2438 
2439 	if (!is_queued)
2440 		return QDF_STATUS_E_NULL_VALUE;
2441 	*is_queued = false;
2442 
2443 	if (!reo_list) {
2444 		mgmt_rx_reo_err("Mgmt Rx reo list is null");
2445 		return QDF_STATUS_E_NULL_VALUE;
2446 	}
2447 
2448 	if (!frame_desc) {
2449 		mgmt_rx_reo_err("Mgmt frame descriptor is null");
2450 		return QDF_STATUS_E_NULL_VALUE;
2451 	}
2452 
2453 	new_frame_global_ts = mgmt_rx_reo_get_global_ts(frame_desc->rx_params);
2454 
2455 	/* Prepare the list entry before acquiring lock */
2456 	if (frame_desc->type == MGMT_RX_REO_FRAME_DESC_HOST_CONSUMED_FRAME &&
2457 	    frame_desc->reo_required) {
2458 		status = mgmt_rx_reo_prepare_list_entry(frame_desc, &new_entry);
2459 		if (QDF_IS_STATUS_ERROR(status)) {
2460 			mgmt_rx_reo_err("Failed to prepare list entry");
2461 			return QDF_STATUS_E_FAILURE;
2462 		}
2463 	}
2464 
2465 	qdf_spin_lock_bh(&reo_list->list_lock);
2466 
2467 	frame_desc->list_size_rx = qdf_list_size(&reo_list->list);
2468 
2469 	status = mgmt_rx_reo_is_stale_frame(&reo_list->ts_last_released_frame,
2470 					    frame_desc);
2471 	if (QDF_IS_STATUS_ERROR(status))
2472 		goto exit_free_entry;
2473 
2474 	if (frame_desc->is_stale) {
2475 		status = mgmt_rx_reo_handle_stale_frame(reo_list, frame_desc);
2476 		if (QDF_IS_STATUS_ERROR(status))
2477 			goto exit_free_entry;
2478 	}
2479 
2480 	qdf_list_for_each(&reo_list->list, cur_entry, node) {
2481 		uint32_t cur_entry_global_ts;
2482 
2483 		cur_entry_global_ts = mgmt_rx_reo_get_global_ts(
2484 					cur_entry->rx_params);
2485 
2486 		if (!mgmt_rx_reo_compare_global_timestamps_gte(
2487 		    new_frame_global_ts, cur_entry_global_ts)) {
2488 			least_greater_entry = cur_entry;
2489 			least_greater_entry_found = true;
2490 			break;
2491 		}
2492 
2493 		list_insertion_pos++;
2494 
2495 		status = mgmt_rx_reo_update_wait_count(
2496 					&cur_entry->wait_count,
2497 					&frame_desc->wait_count);
2498 		if (QDF_IS_STATUS_ERROR(status))
2499 			goto exit_free_entry;
2500 
2501 		if (cur_entry->wait_count.total_count == 0)
2502 			cur_entry->status &=
2503 			      ~MGMT_RX_REO_STATUS_WAIT_FOR_FRAME_ON_OTHER_LINKS;
2504 	}
2505 
2506 	if (frame_desc->is_stale)
2507 		qdf_assert_always(!list_insertion_pos);
2508 
2509 	if (frame_desc->type == MGMT_RX_REO_FRAME_DESC_HOST_CONSUMED_FRAME &&
2510 	    !frame_desc->is_stale && frame_desc->reo_required) {
2511 		if (least_greater_entry_found) {
2512 			status = mgmt_rx_reo_update_wait_count(
2513 					&new_entry->wait_count,
2514 					&least_greater_entry->wait_count);
2515 
2516 			if (QDF_IS_STATUS_ERROR(status))
2517 				goto exit_free_entry;
2518 
2519 			frame_desc->wait_count = new_entry->wait_count;
2520 
2521 			if (new_entry->wait_count.total_count == 0)
2522 				new_entry->status &=
2523 					~MGMT_RX_REO_STATUS_WAIT_FOR_FRAME_ON_OTHER_LINKS;
2524 		}
2525 
2526 		new_entry->insertion_ts = qdf_get_log_timestamp();
2527 		new_entry->ingress_timestamp = frame_desc->ingress_timestamp;
2528 		new_entry->is_parallel_rx = frame_desc->is_parallel_rx;
2529 		frame_desc->list_insertion_pos = list_insertion_pos;
2530 
2531 		if (least_greater_entry_found)
2532 			status = qdf_list_insert_before(
2533 					&reo_list->list, &new_entry->node,
2534 					&least_greater_entry->node);
2535 		else
2536 			status = qdf_list_insert_back(
2537 					&reo_list->list, &new_entry->node);
2538 
2539 		if (QDF_IS_STATUS_ERROR(status))
2540 			goto exit_free_entry;
2541 
2542 		*is_queued = true;
2543 
2544 		if (new_entry->wait_count.total_count == 0)
2545 			frame_desc->zero_wait_count_rx = true;
2546 
2547 		if (frame_desc->zero_wait_count_rx &&
2548 		    qdf_list_first_entry_or_null(&reo_list->list,
2549 						 struct mgmt_rx_reo_list_entry,
2550 						 node) == new_entry)
2551 			frame_desc->immediate_delivery = true;
2552 	}
2553 
2554 	if (least_greater_entry_found) {
2555 		cur_entry = least_greater_entry;
2556 
2557 		qdf_list_for_each_from(&reo_list->list, cur_entry, node) {
2558 			uint8_t frame_link_id;
2559 			struct mgmt_rx_reo_wait_count *wait_count;
2560 
2561 			frame_link_id =
2562 				mgmt_rx_reo_get_link_id(frame_desc->rx_params);
2563 			wait_count = &cur_entry->wait_count;
2564 			if (wait_count->per_link_count[frame_link_id]) {
2565 				uint32_t old_wait_count;
2566 				uint32_t new_wait_count;
2567 				uint32_t wait_count_diff;
2568 				uint16_t pkt_ctr_delta;
2569 
2570 				pkt_ctr_delta = frame_desc->pkt_ctr_delta;
2571 				old_wait_count =
2572 				      wait_count->per_link_count[frame_link_id];
2573 
2574 				if (old_wait_count >= pkt_ctr_delta)
2575 					new_wait_count = old_wait_count -
2576 							 pkt_ctr_delta;
2577 				else
2578 					new_wait_count = 0;
2579 
2580 				wait_count_diff = old_wait_count -
2581 						  new_wait_count;
2582 
2583 				wait_count->per_link_count[frame_link_id] =
2584 								new_wait_count;
2585 				wait_count->total_count -= wait_count_diff;
2586 
2587 				if (wait_count->total_count == 0)
2588 					cur_entry->status &=
2589 						~MGMT_RX_REO_STATUS_WAIT_FOR_FRAME_ON_OTHER_LINKS;
2590 			}
2591 		}
2592 	}
2593 
2594 	status = QDF_STATUS_SUCCESS;
2595 
2596 exit_free_entry:
2597 	/* Cleanup the entry if it is not queued */
2598 	if (new_entry && !*is_queued) {
2599 		/**
2600 		 * New entry created is not inserted to reorder list, free
2601 		 * the entry and release the reference
2602 		 */
2603 		wlan_objmgr_pdev_release_ref(new_entry->pdev,
2604 					     WLAN_MGMT_RX_REO_ID);
2605 		qdf_mem_free(new_entry);
2606 	}
2607 
2608 	qdf_spin_unlock_bh(&reo_list->list_lock);
2609 
2610 	if (!*is_queued)
2611 		return status;
2612 
2613 	return status;
2614 }
2615 
2616 /**
2617  * mgmt_rx_reo_list_init() - Initialize the management rx-reorder list
2618  * @reo_list: Pointer to reorder list
2619  *
2620  * API to initialize the management rx-reorder list.
2621  *
2622  * Return: QDF_STATUS
2623  */
2624 static QDF_STATUS
2625 mgmt_rx_reo_list_init(struct mgmt_rx_reo_list *reo_list)
2626 {
2627 	QDF_STATUS status;
2628 
2629 	reo_list->max_list_size = MGMT_RX_REO_LIST_MAX_SIZE;
2630 	reo_list->list_entry_timeout_us = MGMT_RX_REO_LIST_TIMEOUT_US;
2631 
2632 	qdf_list_create(&reo_list->list, reo_list->max_list_size);
2633 	qdf_spinlock_create(&reo_list->list_lock);
2634 
2635 	status = qdf_timer_init(NULL, &reo_list->ageout_timer,
2636 				mgmt_rx_reo_list_ageout_timer_handler, reo_list,
2637 				QDF_TIMER_TYPE_WAKE_APPS);
2638 	if (QDF_IS_STATUS_ERROR(status)) {
2639 		mgmt_rx_reo_err("Failed to initialize reo list ageout timer");
2640 		return status;
2641 	}
2642 
2643 	reo_list->ts_last_released_frame.valid = false;
2644 
2645 	status = qdf_timer_init
2646 			(NULL, &reo_list->global_mgmt_rx_inactivity_timer,
2647 			 mgmt_rx_reo_global_mgmt_rx_inactivity_timer_handler,
2648 			 reo_list, QDF_TIMER_TYPE_WAKE_APPS);
2649 	if (QDF_IS_STATUS_ERROR(status)) {
2650 		mgmt_rx_reo_err("Failed to init glb mgmt rx inactivity timer");
2651 		return status;
2652 	}
2653 
2654 	return QDF_STATUS_SUCCESS;
2655 }
2656 
2657 /**
2658  * wlan_mgmt_rx_reo_update_host_snapshot() - Update Host snapshot with the MGMT
2659  * Rx REO parameters.
2660  * @pdev: pdev extracted from the WMI event
2661  * @desc: pointer to frame descriptor
2662  *
2663  * Return: QDF_STATUS of operation
2664  */
2665 static QDF_STATUS
2666 wlan_mgmt_rx_reo_update_host_snapshot(struct wlan_objmgr_pdev *pdev,
2667 				      struct mgmt_rx_reo_frame_descriptor *desc)
2668 {
2669 	struct mgmt_rx_reo_pdev_info *rx_reo_pdev_ctx;
2670 	struct mgmt_rx_reo_snapshot_params *host_ss;
2671 	struct mgmt_rx_reo_params *reo_params;
2672 	int pkt_ctr_delta;
2673 	struct wlan_objmgr_psoc *psoc;
2674 	uint16_t pkt_ctr_delta_thresh;
2675 
2676 	if (!desc) {
2677 		mgmt_rx_reo_err("Mgmt Rx REO frame descriptor null");
2678 		return QDF_STATUS_E_NULL_VALUE;
2679 	}
2680 
2681 	if (!desc->rx_params) {
2682 		mgmt_rx_reo_err("Mgmt Rx params null");
2683 		return QDF_STATUS_E_NULL_VALUE;
2684 	}
2685 
2686 	reo_params = desc->rx_params->reo_params;
2687 	if (!reo_params) {
2688 		mgmt_rx_reo_err("Mgmt Rx REO params NULL");
2689 		return QDF_STATUS_E_NULL_VALUE;
2690 	}
2691 
2692 	rx_reo_pdev_ctx = wlan_mgmt_rx_reo_get_priv_object(pdev);
2693 	if (!rx_reo_pdev_ctx) {
2694 		mgmt_rx_reo_err("Mgmt Rx REO context empty for pdev %pK", pdev);
2695 		return QDF_STATUS_E_FAILURE;
2696 	}
2697 
2698 	psoc = wlan_pdev_get_psoc(pdev);
2699 
2700 	/* FW should send valid REO parameters */
2701 	if (!reo_params->valid) {
2702 		mgmt_rx_reo_err("Mgmt Rx REO params is invalid");
2703 		return QDF_STATUS_E_FAILURE;
2704 	}
2705 
2706 	host_ss = &rx_reo_pdev_ctx->host_snapshot;
2707 
2708 	if (!host_ss->valid) {
2709 		desc->pkt_ctr_delta = 1;
2710 		goto update_host_ss;
2711 	}
2712 
2713 	if (mgmt_rx_reo_compare_pkt_ctrs_gte(host_ss->mgmt_pkt_ctr,
2714 					     reo_params->mgmt_pkt_ctr)) {
2715 		mgmt_rx_reo_err("Cur frame ctr > last frame ctr for link = %u",
2716 				reo_params->link_id);
2717 		goto failure_debug;
2718 	}
2719 
2720 	pkt_ctr_delta = mgmt_rx_reo_subtract_pkt_ctrs(reo_params->mgmt_pkt_ctr,
2721 						      host_ss->mgmt_pkt_ctr);
2722 	qdf_assert_always(pkt_ctr_delta > 0);
2723 	desc->pkt_ctr_delta = pkt_ctr_delta;
2724 
2725 	if (pkt_ctr_delta == 1)
2726 		goto update_host_ss;
2727 
2728 	/*
2729 	 * Under back pressure scenarios, FW may drop management Rx frame
2730 	 * WMI events. So holes in the management packet counter is expected.
2731 	 * Add a debug print and optional assert to track the holes.
2732 	 */
2733 	mgmt_rx_reo_debug("pkt_ctr_delta = %u", pkt_ctr_delta);
2734 	mgmt_rx_reo_debug("Cur frame valid = %u, pkt_ctr = %u, ts =%u",
2735 			  reo_params->valid, reo_params->mgmt_pkt_ctr,
2736 			  reo_params->global_timestamp);
2737 	mgmt_rx_reo_debug("Last frame valid = %u, pkt_ctr = %u, ts =%u",
2738 			  host_ss->valid, host_ss->mgmt_pkt_ctr,
2739 			  host_ss->global_timestamp);
2740 
2741 	pkt_ctr_delta_thresh = wlan_mgmt_rx_reo_get_pkt_ctr_delta_thresh(psoc);
2742 
2743 	if (pkt_ctr_delta_thresh && pkt_ctr_delta > pkt_ctr_delta_thresh) {
2744 		mgmt_rx_reo_err("pkt ctr delta %u > thresh %u for link %u",
2745 				pkt_ctr_delta, pkt_ctr_delta_thresh,
2746 				reo_params->link_id);
2747 		goto failure_debug;
2748 	}
2749 
2750 update_host_ss:
2751 	host_ss->valid = true;
2752 	host_ss->global_timestamp = reo_params->global_timestamp;
2753 	host_ss->mgmt_pkt_ctr = reo_params->mgmt_pkt_ctr;
2754 
2755 	return QDF_STATUS_SUCCESS;
2756 
2757 failure_debug:
2758 	mgmt_rx_reo_err("Cur frame valid = %u, pkt_ctr = %u, ts =%u",
2759 			reo_params->valid, reo_params->mgmt_pkt_ctr,
2760 			reo_params->global_timestamp);
2761 	mgmt_rx_reo_err("Last frame vailid = %u, pkt_ctr = %u, ts =%u",
2762 			host_ss->valid, host_ss->mgmt_pkt_ctr,
2763 			host_ss->global_timestamp);
2764 	qdf_assert_always(0);
2765 
2766 	return QDF_STATUS_E_FAILURE;
2767 }
2768 
2769 #ifdef WLAN_MGMT_RX_REO_DEBUG_SUPPORT
2770 /**
2771  * mgmt_rx_reo_debug_print_ingress_frame_stats() - API to print the stats
2772  * related to frames going into the reorder module
2773  * @reo_ctx: Pointer to reorder context
2774  *
2775  * API to print the stats related to frames going into the management
2776  * Rx reorder module.
2777  *
2778  * Return: QDF_STATUS
2779  */
2780 static QDF_STATUS
2781 mgmt_rx_reo_debug_print_ingress_frame_stats(struct mgmt_rx_reo_context *reo_ctx)
2782 {
2783 	struct reo_ingress_frame_stats *stats;
2784 	uint8_t link_id;
2785 	uint8_t desc_type;
2786 	uint64_t ingress_count_per_link[MAX_MLO_LINKS] = {0};
2787 	uint64_t ingress_count_per_desc_type[MGMT_RX_REO_FRAME_DESC_TYPE_MAX] = {0};
2788 	uint64_t total_ingress_count = 0;
2789 	uint64_t stale_count_per_link[MAX_MLO_LINKS] = {0};
2790 	uint64_t stale_count_per_desc_type[MGMT_RX_REO_FRAME_DESC_TYPE_MAX] = {0};
2791 	uint64_t total_stale_count = 0;
2792 	uint64_t error_count_per_link[MAX_MLO_LINKS] = {0};
2793 	uint64_t error_count_per_desc_type[MGMT_RX_REO_FRAME_DESC_TYPE_MAX] = {0};
2794 	uint64_t total_error_count = 0;
2795 	uint64_t total_queued_count = 0;
2796 	uint64_t total_zero_wait_count_rx_count = 0;
2797 	uint64_t total_immediate_delivery_count = 0;
2798 
2799 	if (!reo_ctx)
2800 		return QDF_STATUS_E_NULL_VALUE;
2801 
2802 	stats = &reo_ctx->ingress_frame_debug_info.stats;
2803 
2804 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
2805 		for (desc_type = 0; desc_type < MGMT_RX_REO_FRAME_DESC_TYPE_MAX;
2806 		     desc_type++) {
2807 			ingress_count_per_link[link_id] +=
2808 				stats->ingress_count[link_id][desc_type];
2809 			stale_count_per_link[link_id] +=
2810 					stats->stale_count[link_id][desc_type];
2811 			error_count_per_link[link_id] +=
2812 					stats->error_count[link_id][desc_type];
2813 		}
2814 
2815 		total_ingress_count += ingress_count_per_link[link_id];
2816 		total_stale_count += stale_count_per_link[link_id];
2817 		total_error_count += error_count_per_link[link_id];
2818 	}
2819 
2820 	for (desc_type = 0; desc_type < MGMT_RX_REO_FRAME_DESC_TYPE_MAX;
2821 	     desc_type++) {
2822 		for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
2823 			ingress_count_per_desc_type[desc_type] +=
2824 				stats->ingress_count[link_id][desc_type];
2825 			stale_count_per_desc_type[desc_type] +=
2826 					stats->stale_count[link_id][desc_type];
2827 			error_count_per_desc_type[desc_type] +=
2828 					stats->error_count[link_id][desc_type];
2829 		}
2830 	}
2831 
2832 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
2833 		total_queued_count += stats->queued_count[link_id];
2834 		total_zero_wait_count_rx_count +=
2835 				stats->zero_wait_count_rx_count[link_id];
2836 		total_immediate_delivery_count +=
2837 				stats->immediate_delivery_count[link_id];
2838 	}
2839 
2840 	mgmt_rx_reo_alert("Ingress Frame Stats:");
2841 	mgmt_rx_reo_alert("\t1) Ingress Frame Count:");
2842 	mgmt_rx_reo_alert("\tDescriptor Type Values:-");
2843 	mgmt_rx_reo_alert("\t\t0 - MGMT_RX_REO_FRAME_DESC_HOST_CONSUMED_FRAME");
2844 	mgmt_rx_reo_alert("\t\t1 - MGMT_RX_REO_FRAME_DESC_FW_CONSUMED_FRAME");
2845 	mgmt_rx_reo_alert("\t\t2 - MGMT_RX_REO_FRAME_DESC_ERROR_FRAME");
2846 	mgmt_rx_reo_alert("\t------------------------------------");
2847 	mgmt_rx_reo_alert("\t|link id/  |       |       |       |");
2848 	mgmt_rx_reo_alert("\t|desc type |      0|      1|      2|");
2849 	mgmt_rx_reo_alert("\t-------------------------------------------");
2850 
2851 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
2852 		mgmt_rx_reo_alert("\t|%10u|%7llu|%7llu|%7llu|%7llu", link_id,
2853 				  stats->ingress_count[link_id][0],
2854 				  stats->ingress_count[link_id][1],
2855 				  stats->ingress_count[link_id][2],
2856 				  ingress_count_per_link[link_id]);
2857 		mgmt_rx_reo_alert("\t-------------------------------------------");
2858 	}
2859 	mgmt_rx_reo_alert("\t           |%7llu|%7llu|%7llu|%7llu\n\n",
2860 			  ingress_count_per_desc_type[0],
2861 			  ingress_count_per_desc_type[1],
2862 			  ingress_count_per_desc_type[2],
2863 			  total_ingress_count);
2864 
2865 	mgmt_rx_reo_alert("\t2) Stale Frame Count:");
2866 	mgmt_rx_reo_alert("\t------------------------------------");
2867 	mgmt_rx_reo_alert("\t|link id/  |       |       |       |");
2868 	mgmt_rx_reo_alert("\t|desc type |      0|      1|      2|");
2869 	mgmt_rx_reo_alert("\t-------------------------------------------");
2870 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
2871 		mgmt_rx_reo_alert("\t|%10u|%7llu|%7llu|%7llu|%7llu", link_id,
2872 				  stats->stale_count[link_id][0],
2873 				  stats->stale_count[link_id][1],
2874 				  stats->stale_count[link_id][2],
2875 				  stale_count_per_link[link_id]);
2876 		mgmt_rx_reo_alert("\t-------------------------------------------");
2877 	}
2878 	mgmt_rx_reo_alert("\t           |%7llu|%7llu|%7llu|%7llu\n\n",
2879 			  stale_count_per_desc_type[0],
2880 			  stale_count_per_desc_type[1],
2881 			  stale_count_per_desc_type[2],
2882 			  total_stale_count);
2883 
2884 	mgmt_rx_reo_alert("\t3) Error Frame Count:");
2885 	mgmt_rx_reo_alert("\t------------------------------------");
2886 	mgmt_rx_reo_alert("\t|link id/  |       |       |       |");
2887 	mgmt_rx_reo_alert("\t|desc type |      0|      1|      2|");
2888 	mgmt_rx_reo_alert("\t-------------------------------------------");
2889 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
2890 		mgmt_rx_reo_alert("\t|%10u|%7llu|%7llu|%7llu|%7llu", link_id,
2891 				  stats->error_count[link_id][0],
2892 				  stats->error_count[link_id][1],
2893 				  stats->error_count[link_id][2],
2894 				  error_count_per_link[link_id]);
2895 		mgmt_rx_reo_alert("\t-------------------------------------------");
2896 	}
2897 	mgmt_rx_reo_alert("\t           |%7llu|%7llu|%7llu|%7llu\n\n",
2898 			  error_count_per_desc_type[0],
2899 			  error_count_per_desc_type[1],
2900 			  error_count_per_desc_type[2],
2901 			  total_error_count);
2902 
2903 	mgmt_rx_reo_alert("\t4) Host consumed frames related stats:");
2904 	mgmt_rx_reo_alert("\t------------------------------------------------");
2905 	mgmt_rx_reo_alert("\t|link id   |Queued frame |Zero wait |Immediate |");
2906 	mgmt_rx_reo_alert("\t|          |    count    |  count   | delivery |");
2907 	mgmt_rx_reo_alert("\t------------------------------------------------");
2908 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
2909 		mgmt_rx_reo_alert("\t|%10u|%13llu|%10llu|%10llu|", link_id,
2910 				  stats->queued_count[link_id],
2911 				  stats->zero_wait_count_rx_count[link_id],
2912 				  stats->immediate_delivery_count[link_id]);
2913 		mgmt_rx_reo_alert("\t------------------------------------------------");
2914 	}
2915 	mgmt_rx_reo_alert("\t%11s|%13llu|%10llu|%10llu|\n\n", "",
2916 			  total_queued_count,
2917 			  total_zero_wait_count_rx_count,
2918 			  total_immediate_delivery_count);
2919 
2920 	return QDF_STATUS_SUCCESS;
2921 }
2922 
2923 /**
2924  * mgmt_rx_reo_log_ingress_frame() - Log the information about a frame entering
2925  * the reorder algorithm.
2926  * @reo_ctx: management rx reorder context
2927  * @desc: Pointer to frame descriptor
2928  * @is_queued: Indicates whether this frame is queued to reorder list
2929  * @is_error: Indicates whether any error occurred during processing this frame
2930  *
2931  * Return: QDF_STATUS of operation
2932  */
2933 static QDF_STATUS
2934 mgmt_rx_reo_log_ingress_frame(struct mgmt_rx_reo_context *reo_ctx,
2935 			      struct mgmt_rx_reo_frame_descriptor *desc,
2936 			      bool is_queued, bool is_error)
2937 {
2938 	struct reo_ingress_debug_info *ingress_frame_debug_info;
2939 	struct reo_ingress_debug_frame_info *cur_frame_debug_info;
2940 	struct reo_ingress_frame_stats *stats;
2941 	uint8_t link_id;
2942 
2943 	if (!reo_ctx || !desc)
2944 		return QDF_STATUS_E_NULL_VALUE;
2945 
2946 	ingress_frame_debug_info = &reo_ctx->ingress_frame_debug_info;
2947 
2948 	cur_frame_debug_info = &ingress_frame_debug_info->frame_list
2949 			[ingress_frame_debug_info->next_index];
2950 
2951 	cur_frame_debug_info->link_id =
2952 				mgmt_rx_reo_get_link_id(desc->rx_params);
2953 	cur_frame_debug_info->mgmt_pkt_ctr =
2954 				mgmt_rx_reo_get_pkt_counter(desc->rx_params);
2955 	cur_frame_debug_info->global_timestamp =
2956 				mgmt_rx_reo_get_global_ts(desc->rx_params);
2957 	cur_frame_debug_info->start_timestamp =
2958 				mgmt_rx_reo_get_start_ts(desc->rx_params);
2959 	cur_frame_debug_info->end_timestamp =
2960 				mgmt_rx_reo_get_end_ts(desc->rx_params);
2961 	cur_frame_debug_info->duration_us =
2962 				mgmt_rx_reo_get_duration_us(desc->rx_params);
2963 	cur_frame_debug_info->desc_type = desc->type;
2964 	cur_frame_debug_info->frame_type = desc->frame_type;
2965 	cur_frame_debug_info->frame_subtype = desc->frame_subtype;
2966 	cur_frame_debug_info->wait_count = desc->wait_count;
2967 	qdf_mem_copy(cur_frame_debug_info->shared_snapshots,
2968 		     desc->shared_snapshots,
2969 		     qdf_min(sizeof(cur_frame_debug_info->shared_snapshots),
2970 			     sizeof(desc->shared_snapshots)));
2971 	qdf_mem_copy(cur_frame_debug_info->host_snapshot, desc->host_snapshot,
2972 		     qdf_min(sizeof(cur_frame_debug_info->host_snapshot),
2973 			     sizeof(desc->host_snapshot)));
2974 	cur_frame_debug_info->is_queued = is_queued;
2975 	cur_frame_debug_info->is_stale = desc->is_stale;
2976 	cur_frame_debug_info->is_parallel_rx = desc->is_parallel_rx;
2977 	cur_frame_debug_info->zero_wait_count_rx = desc->zero_wait_count_rx;
2978 	cur_frame_debug_info->immediate_delivery = desc->immediate_delivery;
2979 	cur_frame_debug_info->is_error = is_error;
2980 	cur_frame_debug_info->ts_last_released_frame =
2981 				reo_ctx->reo_list.ts_last_released_frame;
2982 	cur_frame_debug_info->ingress_timestamp = desc->ingress_timestamp;
2983 	cur_frame_debug_info->ingress_duration =
2984 			qdf_get_log_timestamp() - desc->ingress_timestamp;
2985 	cur_frame_debug_info->list_size_rx = desc->list_size_rx;
2986 	cur_frame_debug_info->list_insertion_pos = desc->list_insertion_pos;
2987 	cur_frame_debug_info->cpu_id = qdf_get_smp_processor_id();
2988 	cur_frame_debug_info->reo_required = desc->reo_required;
2989 
2990 	ingress_frame_debug_info->next_index++;
2991 	ingress_frame_debug_info->next_index %=
2992 				MGMT_RX_REO_INGRESS_FRAME_DEBUG_ENTRIES_MAX;
2993 	if (ingress_frame_debug_info->next_index == 0)
2994 		ingress_frame_debug_info->wrap_aroud = true;
2995 
2996 	stats = &ingress_frame_debug_info->stats;
2997 	link_id = cur_frame_debug_info->link_id;
2998 	stats->ingress_count[link_id][desc->type]++;
2999 	if (is_queued)
3000 		stats->queued_count[link_id]++;
3001 	if (desc->zero_wait_count_rx)
3002 		stats->zero_wait_count_rx_count[link_id]++;
3003 	if (desc->immediate_delivery)
3004 		stats->immediate_delivery_count[link_id]++;
3005 	if (is_error)
3006 		stats->error_count[link_id][desc->type]++;
3007 	if (desc->is_stale)
3008 		stats->stale_count[link_id][desc->type]++;
3009 
3010 	return QDF_STATUS_SUCCESS;
3011 }
3012 
3013 /**
3014  * mgmt_rx_reo_debug_print_ingress_frame_info() - Print the debug information
3015  * about the latest frames entered the reorder module
3016  * @reo_ctx: management rx reorder context
3017  * @num_frames: Number of frames for which the debug information is to be
3018  * printed. If @num_frames is 0, then debug information about all the frames
3019  * in the ring buffer will be  printed.
3020  *
3021  * Return: QDF_STATUS of operation
3022  */
3023 static QDF_STATUS
3024 mgmt_rx_reo_debug_print_ingress_frame_info(struct mgmt_rx_reo_context *reo_ctx,
3025 					   uint16_t num_frames)
3026 {
3027 	struct reo_ingress_debug_info *ingress_frame_debug_info;
3028 	int start_index;
3029 	uint16_t index;
3030 	uint16_t entry;
3031 	uint16_t num_valid_entries;
3032 	uint16_t num_entries_to_print;
3033 	char *boarder;
3034 
3035 	if (!reo_ctx)
3036 		return QDF_STATUS_E_NULL_VALUE;
3037 
3038 	ingress_frame_debug_info = &reo_ctx->ingress_frame_debug_info;
3039 
3040 	if (ingress_frame_debug_info->wrap_aroud)
3041 		num_valid_entries = MGMT_RX_REO_INGRESS_FRAME_DEBUG_ENTRIES_MAX;
3042 	else
3043 		num_valid_entries = ingress_frame_debug_info->next_index;
3044 
3045 	if (num_frames == 0) {
3046 		num_entries_to_print = num_valid_entries;
3047 
3048 		if (ingress_frame_debug_info->wrap_aroud)
3049 			start_index = ingress_frame_debug_info->next_index;
3050 		else
3051 			start_index = 0;
3052 	} else {
3053 		num_entries_to_print = qdf_min(num_frames, num_valid_entries);
3054 
3055 		start_index = (ingress_frame_debug_info->next_index -
3056 			       num_entries_to_print +
3057 			       MGMT_RX_REO_INGRESS_FRAME_DEBUG_ENTRIES_MAX)
3058 			      % MGMT_RX_REO_INGRESS_FRAME_DEBUG_ENTRIES_MAX;
3059 
3060 		qdf_assert_always(start_index >= 0 &&
3061 				  start_index < MGMT_RX_REO_INGRESS_FRAME_DEBUG_ENTRIES_MAX);
3062 	}
3063 
3064 	mgmt_rx_reo_alert_no_fl("Ingress Frame Info:-");
3065 	mgmt_rx_reo_alert_no_fl("num_frames = %u, wrap = %u, next_index = %u",
3066 				num_frames,
3067 				ingress_frame_debug_info->wrap_aroud,
3068 				ingress_frame_debug_info->next_index);
3069 	mgmt_rx_reo_alert_no_fl("start_index = %d num_entries_to_print = %u",
3070 				start_index, num_entries_to_print);
3071 
3072 	if (!num_entries_to_print)
3073 		return QDF_STATUS_SUCCESS;
3074 
3075 	boarder = ingress_frame_debug_info->boarder;
3076 
3077 	mgmt_rx_reo_alert_no_fl("%s", boarder);
3078 	mgmt_rx_reo_alert_no_fl("|%5s|%5s|%6s|%6s|%9s|%4s|%5s|%10s|%10s|%10s|%5s|%10s|%11s|%13s|%11s|%4s|%3s|%69s|%94s|%94s|%94s|%94s|%94s|%94s|",
3079 				"Index", "CPU", "D.type", "F.type", "F.subtype",
3080 				"Link", "SeqNo", "Global ts",
3081 				"Start ts", "End ts", "Dur", "Last ts",
3082 				"Ingress ts", "Flags", "Ingress Dur", "Size",
3083 				"Pos", "Wait Count", "Snapshot : link 0",
3084 				"Snapshot : link 1", "Snapshot : link 2",
3085 				"Snapshot : link 3", "Snapshot : link 4",
3086 				"Snapshot : link 5");
3087 	mgmt_rx_reo_alert_no_fl("%s", boarder);
3088 
3089 	index = start_index;
3090 	for (entry = 0; entry < num_entries_to_print; entry++) {
3091 		struct reo_ingress_debug_frame_info *info;
3092 		char flags[MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_FLAG_MAX_SIZE + 1] = {'\0'};
3093 		char wait_count[MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_WAIT_COUNT_MAX_SIZE + 1] = {'\0'};
3094 		char snapshots[MAX_MLO_LINKS][MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_PER_LINK_SNAPSHOTS_MAX_SIZE + 1] = {'\0'};
3095 		char flag_queued = ' ';
3096 		char flag_stale = ' ';
3097 		char flag_parallel_rx = ' ';
3098 		char flag_error = ' ';
3099 		char flag_zero_wait_count_rx = ' ';
3100 		char flag_immediate_delivery = ' ';
3101 		char flag_reo_required = ' ';
3102 		int64_t ts_last_released_frame = -1;
3103 		uint8_t link;
3104 
3105 		info = &reo_ctx->ingress_frame_debug_info.frame_list[index];
3106 
3107 		if (info->ts_last_released_frame.valid)
3108 			ts_last_released_frame =
3109 					info->ts_last_released_frame.global_ts;
3110 
3111 		if (info->is_queued)
3112 			flag_queued = 'Q';
3113 
3114 		if (info->is_stale)
3115 			flag_stale = 'S';
3116 
3117 		if (info->is_parallel_rx)
3118 			flag_parallel_rx = 'P';
3119 
3120 		if (info->is_error)
3121 			flag_error = 'E';
3122 
3123 		if (info->zero_wait_count_rx)
3124 			flag_zero_wait_count_rx = 'Z';
3125 
3126 		if (info->immediate_delivery)
3127 			flag_immediate_delivery = 'I';
3128 
3129 		if (!info->reo_required)
3130 			flag_reo_required = 'N';
3131 
3132 		snprintf(flags, sizeof(flags), "%c %c %c %c %c %c %c", flag_error,
3133 			 flag_stale, flag_parallel_rx, flag_queued,
3134 			 flag_zero_wait_count_rx, flag_immediate_delivery,
3135 			 flag_reo_required);
3136 		snprintf(wait_count, sizeof(wait_count),
3137 			 "%9llx(%8x, %8x, %8x, %8x, %8x, %8x)",
3138 			 info->wait_count.total_count,
3139 			 info->wait_count.per_link_count[0],
3140 			 info->wait_count.per_link_count[1],
3141 			 info->wait_count.per_link_count[2],
3142 			 info->wait_count.per_link_count[3],
3143 			 info->wait_count.per_link_count[4],
3144 			 info->wait_count.per_link_count[5]);
3145 
3146 		for (link = 0; link < MAX_MLO_LINKS; link++) {
3147 			char mac_hw[MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_SNAPSHOT_MAX_SIZE + 1] = {'\0'};
3148 			char fw_consumed[MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_SNAPSHOT_MAX_SIZE + 1] = {'\0'};
3149 			char fw_forwaded[MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_SNAPSHOT_MAX_SIZE + 1] = {'\0'};
3150 			char host[MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_SNAPSHOT_MAX_SIZE + 1] = {'\0'};
3151 			struct mgmt_rx_reo_snapshot_params *mac_hw_ss;
3152 			struct mgmt_rx_reo_snapshot_params *fw_consumed_ss;
3153 			struct mgmt_rx_reo_snapshot_params *fw_forwarded_ss;
3154 			struct mgmt_rx_reo_snapshot_params *host_ss;
3155 
3156 			mac_hw_ss = &info->shared_snapshots
3157 				[link][MGMT_RX_REO_SHARED_SNAPSHOT_MAC_HW];
3158 			fw_consumed_ss = &info->shared_snapshots
3159 				[link][MGMT_RX_REO_SHARED_SNAPSHOT_FW_CONSUMED];
3160 			fw_forwarded_ss = &info->shared_snapshots
3161 				[link][MGMT_RX_REO_SHARED_SNAPSHOT_FW_FORWADED];
3162 			host_ss = &info->host_snapshot[link];
3163 
3164 			snprintf(mac_hw, sizeof(mac_hw), "(%1u, %5u, %10u)",
3165 				 mac_hw_ss->valid, mac_hw_ss->mgmt_pkt_ctr,
3166 				 mac_hw_ss->global_timestamp);
3167 			snprintf(fw_consumed, sizeof(fw_consumed),
3168 				 "(%1u, %5u, %10u)",
3169 				 fw_consumed_ss->valid,
3170 				 fw_consumed_ss->mgmt_pkt_ctr,
3171 				 fw_consumed_ss->global_timestamp);
3172 			snprintf(fw_forwaded, sizeof(fw_forwaded),
3173 				 "(%1u, %5u, %10u)",
3174 				 fw_forwarded_ss->valid,
3175 				 fw_forwarded_ss->mgmt_pkt_ctr,
3176 				 fw_forwarded_ss->global_timestamp);
3177 			snprintf(host, sizeof(host), "(%1u, %5u, %10u)",
3178 				 host_ss->valid,
3179 				 host_ss->mgmt_pkt_ctr,
3180 				 host_ss->global_timestamp);
3181 			snprintf(snapshots[link], sizeof(snapshots[link]),
3182 				 "%22s, %22s, %22s, %22s", mac_hw, fw_consumed,
3183 				 fw_forwaded, host);
3184 		}
3185 
3186 		mgmt_rx_reo_alert_no_fl("|%5u|%5d|%6u|%6x|%9x|%4u|%5u|%10u|%10u|%10u|%5u|%10lld|%11llu|%13s|%11llu|%4d|%3d|%69s|%70s|%70s|%70s|%70s|%70s|%70s|",
3187 					entry, info->cpu_id, info->desc_type,
3188 					info->frame_type, info->frame_subtype,
3189 					info->link_id,
3190 					info->mgmt_pkt_ctr,
3191 					info->global_timestamp,
3192 					info->start_timestamp,
3193 					info->end_timestamp,
3194 					info->duration_us,
3195 					ts_last_released_frame,
3196 					info->ingress_timestamp, flags,
3197 					info->ingress_duration,
3198 					info->list_size_rx,
3199 					info->list_insertion_pos, wait_count,
3200 					snapshots[0], snapshots[1],
3201 					snapshots[2], snapshots[3],
3202 					snapshots[4], snapshots[5]);
3203 		mgmt_rx_reo_alert_no_fl("%s", boarder);
3204 
3205 		index++;
3206 		index %= MGMT_RX_REO_INGRESS_FRAME_DEBUG_ENTRIES_MAX;
3207 	}
3208 
3209 	return QDF_STATUS_SUCCESS;
3210 }
3211 #else
3212 /**
3213  * mgmt_rx_reo_debug_print_ingress_frame_stats() - API to print the stats
3214  * related to frames going into the reorder module
3215  * @reo_ctx: Pointer to reorder context
3216  *
3217  * API to print the stats related to frames going into the management
3218  * Rx reorder module.
3219  *
3220  * Return: QDF_STATUS
3221  */
3222 static QDF_STATUS
3223 mgmt_rx_reo_debug_print_ingress_frame_stats(struct mgmt_rx_reo_context *reo_ctx)
3224 {
3225 	return QDF_STATUS_SUCCESS;
3226 }
3227 
3228 /**
3229  * mgmt_rx_reo_log_ingress_frame() - Log the information about a frame entering
3230  * the reorder algorithm.
3231  * @reo_ctx: management rx reorder context
3232  * @desc: Pointer to frame descriptor
3233  * @is_queued: Indicates whether this frame is queued to reorder list
3234  * @is_error: Indicates whether any error occurred during processing this frame
3235  *
3236  * Return: QDF_STATUS of operation
3237  */
3238 static QDF_STATUS
3239 mgmt_rx_reo_log_ingress_frame(struct mgmt_rx_reo_context *reo_ctx,
3240 			      struct mgmt_rx_reo_frame_descriptor *desc,
3241 			      bool is_queued, bool is_error)
3242 {
3243 	return QDF_STATUS_SUCCESS;
3244 }
3245 
3246 /**
3247  * mgmt_rx_reo_debug_print_ingress_frame_info() - Print debug information about
3248  * the latest frames entering the reorder module
3249  * @reo_ctx: management rx reorder context
3250  *
3251  * Return: QDF_STATUS of operation
3252  */
3253 static QDF_STATUS
3254 mgmt_rx_reo_debug_print_ingress_frame_info(struct mgmt_rx_reo_context *reo_ctx)
3255 {
3256 	return QDF_STATUS_SUCCESS;
3257 }
3258 #endif /* WLAN_MGMT_RX_REO_DEBUG_SUPPORT */
3259 
3260 QDF_STATUS
3261 wlan_mgmt_rx_reo_algo_entry(struct wlan_objmgr_pdev *pdev,
3262 			    struct mgmt_rx_reo_frame_descriptor *desc,
3263 			    bool *is_queued)
3264 {
3265 	struct mgmt_rx_reo_context *reo_ctx;
3266 	QDF_STATUS ret;
3267 
3268 	if (!is_queued)
3269 		return QDF_STATUS_E_NULL_VALUE;
3270 
3271 	*is_queued = false;
3272 
3273 	if (!desc || !desc->rx_params) {
3274 		mgmt_rx_reo_err("MGMT Rx REO descriptor or rx params are null");
3275 		return QDF_STATUS_E_NULL_VALUE;
3276 	}
3277 
3278 	reo_ctx = mgmt_rx_reo_get_context();
3279 	if (!reo_ctx) {
3280 		mgmt_rx_reo_err("REO context is NULL");
3281 		return QDF_STATUS_E_NULL_VALUE;
3282 	}
3283 
3284 	/**
3285 	 * Critical Section = Host snapshot update + Calculation of wait
3286 	 * counts + Update reorder list. Following section describes the
3287 	 * motivation for making this a critical section.
3288 	 * Lets take an example of 2 links (Link A & B) and each has received
3289 	 * a management frame A1 and B1 such that MLO global time stamp of A1 <
3290 	 * MLO global time stamp of B1. Host is concurrently executing
3291 	 * "wlan_mgmt_rx_reo_algo_entry" for A1 and B1 in 2 different CPUs.
3292 	 *
3293 	 * A lock less version of this API("wlan_mgmt_rx_reo_algo_entry_v1") is
3294 	 * as follows.
3295 	 *
3296 	 * wlan_mgmt_rx_reo_algo_entry()
3297 	 * {
3298 	 *     Host snapshot update
3299 	 *     Calculation of wait counts
3300 	 *     Update reorder list
3301 	 *     Release to upper layer
3302 	 * }
3303 	 *
3304 	 * We may run into race conditions under the following sequence of
3305 	 * operations.
3306 	 *
3307 	 * 1. Host snapshot update for link A in context of frame A1
3308 	 * 2. Host snapshot update for link B in context of frame B1
3309 	 * 3. Calculation of wait count for frame B1
3310 	 *        link A wait count =  0
3311 	 *        link B wait count =  0
3312 	 * 4. Update reorder list with frame B1
3313 	 * 5. Release B1 to upper layer
3314 	 * 6. Calculation of wait count for frame A1
3315 	 *        link A wait count =  0
3316 	 *        link B wait count =  0
3317 	 * 7. Update reorder list with frame A1
3318 	 * 8. Release A1 to upper layer
3319 	 *
3320 	 * This leads to incorrect behaviour as B1 goes to upper layer before
3321 	 * A1.
3322 	 *
3323 	 * To prevent this lets make Host snapshot update + Calculate wait count
3324 	 * a critical section by adding locks. The updated version of the API
3325 	 * ("wlan_mgmt_rx_reo_algo_entry_v2") is as follows.
3326 	 *
3327 	 * wlan_mgmt_rx_reo_algo_entry()
3328 	 * {
3329 	 *     LOCK
3330 	 *         Host snapshot update
3331 	 *         Calculation of wait counts
3332 	 *     UNLOCK
3333 	 *     Update reorder list
3334 	 *     Release to upper layer
3335 	 * }
3336 	 *
3337 	 * With this API also We may run into race conditions under the
3338 	 * following sequence of operations.
3339 	 *
3340 	 * 1. Host snapshot update for link A in context of frame A1 +
3341 	 *    Calculation of wait count for frame A1
3342 	 *        link A wait count =  0
3343 	 *        link B wait count =  0
3344 	 * 2. Host snapshot update for link B in context of frame B1 +
3345 	 *    Calculation of wait count for frame B1
3346 	 *        link A wait count =  0
3347 	 *        link B wait count =  0
3348 	 * 4. Update reorder list with frame B1
3349 	 * 5. Release B1 to upper layer
3350 	 * 7. Update reorder list with frame A1
3351 	 * 8. Release A1 to upper layer
3352 	 *
3353 	 * This also leads to incorrect behaviour as B1 goes to upper layer
3354 	 * before A1.
3355 	 *
3356 	 * To prevent this, let's make Host snapshot update + Calculate wait
3357 	 * count + Update reorder list a critical section by adding locks.
3358 	 * The updated version of the API ("wlan_mgmt_rx_reo_algo_entry_final")
3359 	 * is as follows.
3360 	 *
3361 	 * wlan_mgmt_rx_reo_algo_entry()
3362 	 * {
3363 	 *     LOCK
3364 	 *         Host snapshot update
3365 	 *         Calculation of wait counts
3366 	 *         Update reorder list
3367 	 *     UNLOCK
3368 	 *     Release to upper layer
3369 	 * }
3370 	 */
3371 	qdf_spin_lock(&reo_ctx->reo_algo_entry_lock);
3372 
3373 	qdf_assert_always(desc->rx_params->reo_params->valid);
3374 	qdf_assert_always(desc->frame_type == IEEE80211_FC0_TYPE_MGT);
3375 
3376 	if (desc->type == MGMT_RX_REO_FRAME_DESC_HOST_CONSUMED_FRAME ||
3377 	    desc->type == MGMT_RX_REO_FRAME_DESC_FW_CONSUMED_FRAME)
3378 		qdf_assert_always(desc->rx_params->reo_params->duration_us);
3379 
3380 	/* Update the Host snapshot */
3381 	ret = wlan_mgmt_rx_reo_update_host_snapshot(pdev, desc);
3382 	if (QDF_IS_STATUS_ERROR(ret))
3383 		goto failure;
3384 
3385 	/* Compute wait count for this frame/event */
3386 	ret = wlan_mgmt_rx_reo_algo_calculate_wait_count(pdev, desc);
3387 	if (QDF_IS_STATUS_ERROR(ret))
3388 		goto failure;
3389 
3390 	/* Update the REO list */
3391 	ret = mgmt_rx_reo_update_list(&reo_ctx->reo_list, desc, is_queued);
3392 	if (QDF_IS_STATUS_ERROR(ret))
3393 		goto failure;
3394 
3395 	ret = mgmt_rx_reo_log_ingress_frame(reo_ctx, desc,
3396 					    *is_queued, false);
3397 	if (QDF_IS_STATUS_ERROR(ret)) {
3398 		qdf_spin_unlock(&reo_ctx->reo_algo_entry_lock);
3399 		return ret;
3400 	}
3401 
3402 	qdf_spin_unlock(&reo_ctx->reo_algo_entry_lock);
3403 
3404 	/* Finally, release the entries for which pending frame is received */
3405 	return mgmt_rx_reo_list_release_entries(reo_ctx);
3406 
3407 failure:
3408 	/**
3409 	 * Ignore the return value of this function call, return
3410 	 * the actual reason for failure.
3411 	 */
3412 	mgmt_rx_reo_log_ingress_frame(reo_ctx, desc, *is_queued, true);
3413 
3414 	qdf_spin_unlock(&reo_ctx->reo_algo_entry_lock);
3415 
3416 	return ret;
3417 }
3418 
3419 #ifndef WLAN_MGMT_RX_REO_SIM_SUPPORT
3420 /**
3421  * mgmt_rx_reo_sim_init() - Initialize management rx reorder simulation
3422  * context.
3423  * @reo_context: Pointer to reo context
3424  *
3425  * Return: QDF_STATUS of operation
3426  */
3427 static inline QDF_STATUS
3428 mgmt_rx_reo_sim_init(struct mgmt_rx_reo_context *reo_context)
3429 {
3430 	return QDF_STATUS_SUCCESS;
3431 }
3432 
3433 /**
3434  * mgmt_rx_reo_sim_deinit() - De initialize management rx reorder simulation
3435  * context.
3436  * @reo_context: Pointer to reo context
3437  *
3438  * Return: QDF_STATUS of operation
3439  */
3440 static inline QDF_STATUS
3441 mgmt_rx_reo_sim_deinit(struct mgmt_rx_reo_context *reo_context)
3442 {
3443 	return QDF_STATUS_SUCCESS;
3444 }
3445 
3446 QDF_STATUS
3447 mgmt_rx_reo_sim_pdev_object_create_notification(struct wlan_objmgr_pdev *pdev)
3448 {
3449 	return QDF_STATUS_SUCCESS;
3450 }
3451 
3452 QDF_STATUS
3453 mgmt_rx_reo_sim_pdev_object_destroy_notification(struct wlan_objmgr_pdev *pdev)
3454 {
3455 	return QDF_STATUS_SUCCESS;
3456 }
3457 #else
3458 /**
3459  * mgmt_rx_reo_sim_remove_frame_from_master_list() - Removes frame from the
3460  * master management frame list
3461  * @master_frame_list: pointer to master management frame list
3462  * @frame: pointer to management frame parameters
3463  *
3464  * This API removes frames from the master management frame list. This API is
3465  * used in case of FW consumed management frames or management frames which
3466  * are dropped at host due to any error.
3467  *
3468  * Return: QDF_STATUS of operation
3469  */
3470 static QDF_STATUS
3471 mgmt_rx_reo_sim_remove_frame_from_master_list(
3472 		struct mgmt_rx_reo_master_frame_list *master_frame_list,
3473 		const struct mgmt_rx_frame_params *frame)
3474 {
3475 	struct mgmt_rx_reo_pending_frame_list_entry *pending_entry;
3476 	struct mgmt_rx_reo_pending_frame_list_entry *matching_pend_entry = NULL;
3477 	struct mgmt_rx_reo_stale_frame_list_entry *stale_entry;
3478 	struct mgmt_rx_reo_stale_frame_list_entry *matching_stale_entry = NULL;
3479 	QDF_STATUS status;
3480 
3481 	if (!master_frame_list) {
3482 		mgmt_rx_reo_err("Mgmt master frame list is null");
3483 		return QDF_STATUS_E_NULL_VALUE;
3484 	}
3485 
3486 	if (!frame) {
3487 		mgmt_rx_reo_err("Pointer to mgmt frame params is null");
3488 		return QDF_STATUS_E_NULL_VALUE;
3489 	}
3490 
3491 	qdf_spin_lock(&master_frame_list->lock);
3492 
3493 	qdf_list_for_each(&master_frame_list->pending_list, pending_entry,
3494 			  node) {
3495 		if (pending_entry->params.link_id == frame->link_id &&
3496 		    pending_entry->params.mgmt_pkt_ctr == frame->mgmt_pkt_ctr &&
3497 		    pending_entry->params.global_timestamp ==
3498 		    frame->global_timestamp) {
3499 			matching_pend_entry = pending_entry;
3500 			break;
3501 		}
3502 	}
3503 
3504 	qdf_list_for_each(&master_frame_list->stale_list, stale_entry, node) {
3505 		if (stale_entry->params.link_id == frame->link_id &&
3506 		    stale_entry->params.mgmt_pkt_ctr == frame->mgmt_pkt_ctr &&
3507 		    stale_entry->params.global_timestamp ==
3508 		    frame->global_timestamp) {
3509 			matching_stale_entry = stale_entry;
3510 			break;
3511 		}
3512 	}
3513 
3514 	/* Found in pending and stale list. Duplicate entries, assert */
3515 	qdf_assert_always(!matching_pend_entry || !matching_stale_entry);
3516 
3517 	if (!matching_pend_entry && !matching_stale_entry) {
3518 		qdf_spin_unlock(&master_frame_list->lock);
3519 		mgmt_rx_reo_err("No matching frame in pend/stale list");
3520 		return QDF_STATUS_E_FAILURE;
3521 	}
3522 
3523 	if (matching_pend_entry) {
3524 		status = qdf_list_remove_node(&master_frame_list->pending_list,
3525 					      &matching_pend_entry->node);
3526 		if (QDF_IS_STATUS_ERROR(status)) {
3527 			qdf_spin_unlock(&master_frame_list->lock);
3528 			mgmt_rx_reo_err("Failed to remove the matching entry");
3529 			return status;
3530 		}
3531 
3532 		qdf_mem_free(matching_pend_entry);
3533 	}
3534 
3535 	if (matching_stale_entry) {
3536 		status = qdf_list_remove_node(&master_frame_list->stale_list,
3537 					      &matching_stale_entry->node);
3538 		if (QDF_IS_STATUS_ERROR(status)) {
3539 			qdf_spin_unlock(&master_frame_list->lock);
3540 			mgmt_rx_reo_err("Failed to remove the matching entry");
3541 			return status;
3542 		}
3543 
3544 		qdf_mem_free(matching_stale_entry);
3545 	}
3546 
3547 	qdf_spin_unlock(&master_frame_list->lock);
3548 
3549 	return QDF_STATUS_SUCCESS;
3550 }
3551 
3552 /**
3553  * mgmt_rx_reo_sim_remove_frame_from_pending_list() - Removes frame from the
3554  * pending management frame list
3555  * @master_frame_list: pointer to master management frame list
3556  * @frame: pointer to management frame parameters
3557  *
3558  * This API removes frames from the pending management frame list. This API is
3559  * used in case of FW consumed management frames or management frames which
3560  * are dropped at host due to any error.
3561  *
3562  * Return: QDF_STATUS of operation
3563  */
3564 static QDF_STATUS
3565 mgmt_rx_reo_sim_remove_frame_from_pending_list(
3566 		struct mgmt_rx_reo_master_frame_list *master_frame_list,
3567 		const struct mgmt_rx_frame_params *frame)
3568 {
3569 	struct mgmt_rx_reo_pending_frame_list_entry *cur_entry;
3570 	struct mgmt_rx_reo_pending_frame_list_entry *matching_entry = NULL;
3571 	QDF_STATUS status;
3572 
3573 	if (!master_frame_list) {
3574 		mgmt_rx_reo_err("Mgmt master frame list is null");
3575 		return QDF_STATUS_E_NULL_VALUE;
3576 	}
3577 
3578 	if (!frame) {
3579 		mgmt_rx_reo_err("Pointer to mgmt frame params is null");
3580 		return QDF_STATUS_E_NULL_VALUE;
3581 	}
3582 
3583 	qdf_spin_lock(&master_frame_list->lock);
3584 
3585 	qdf_list_for_each(&master_frame_list->pending_list, cur_entry, node) {
3586 		if (cur_entry->params.link_id == frame->link_id &&
3587 		    cur_entry->params.mgmt_pkt_ctr == frame->mgmt_pkt_ctr &&
3588 		    cur_entry->params.global_timestamp ==
3589 		    frame->global_timestamp) {
3590 			matching_entry = cur_entry;
3591 			break;
3592 		}
3593 	}
3594 
3595 	if (!matching_entry) {
3596 		qdf_spin_unlock(&master_frame_list->lock);
3597 		mgmt_rx_reo_err("No matching frame in the pend list to remove");
3598 		return QDF_STATUS_E_FAILURE;
3599 	}
3600 
3601 	status = qdf_list_remove_node(&master_frame_list->pending_list,
3602 				      &matching_entry->node);
3603 	if (QDF_IS_STATUS_ERROR(status)) {
3604 		qdf_spin_unlock(&master_frame_list->lock);
3605 		mgmt_rx_reo_err("Failed to remove the matching entry");
3606 		return status;
3607 	}
3608 
3609 	qdf_mem_free(matching_entry);
3610 
3611 	qdf_spin_unlock(&master_frame_list->lock);
3612 
3613 
3614 	return QDF_STATUS_SUCCESS;
3615 }
3616 
3617 /**
3618  * mgmt_rx_reo_sim_add_frame_to_pending_list() - Inserts frame to the
3619  * pending management frame list
3620  * @master_frame_list: pointer to master management frame list
3621  * @frame: pointer to management frame parameters
3622  *
3623  * This API inserts frames to the pending management frame list. This API is
3624  * used to insert frames generated by the MAC HW to the pending frame list.
3625  *
3626  * Return: QDF_STATUS of operation
3627  */
3628 static QDF_STATUS
3629 mgmt_rx_reo_sim_add_frame_to_pending_list(
3630 		struct mgmt_rx_reo_master_frame_list *master_frame_list,
3631 		const struct mgmt_rx_frame_params *frame)
3632 {
3633 	struct mgmt_rx_reo_pending_frame_list_entry *new_entry;
3634 	QDF_STATUS status;
3635 
3636 	if (!master_frame_list) {
3637 		mgmt_rx_reo_err("Mgmt master frame list is null");
3638 		return QDF_STATUS_E_NULL_VALUE;
3639 	}
3640 
3641 	if (!frame) {
3642 		mgmt_rx_reo_err("Pointer mgmt frame params is null");
3643 		return QDF_STATUS_E_NULL_VALUE;
3644 	}
3645 
3646 	new_entry = qdf_mem_malloc(sizeof(*new_entry));
3647 	if (!new_entry) {
3648 		mgmt_rx_reo_err("Failed to allocate new entry to frame list");
3649 		return QDF_STATUS_E_NOMEM;
3650 	}
3651 
3652 	new_entry->params = *frame;
3653 
3654 	qdf_spin_lock(&master_frame_list->lock);
3655 
3656 	status = qdf_list_insert_back(&master_frame_list->pending_list,
3657 				      &new_entry->node);
3658 
3659 	qdf_spin_unlock(&master_frame_list->lock);
3660 
3661 	if (QDF_IS_STATUS_ERROR(status)) {
3662 		mgmt_rx_reo_err("Failed to add frame to pending list");
3663 		qdf_mem_free(new_entry);
3664 		return status;
3665 	}
3666 
3667 	return QDF_STATUS_SUCCESS;
3668 }
3669 
3670 QDF_STATUS
3671 mgmt_rx_reo_sim_process_rx_frame(struct wlan_objmgr_pdev *pdev, qdf_nbuf_t buf,
3672 				 struct mgmt_rx_event_params *mgmt_rx_params)
3673 {
3674 	struct mgmt_rx_reo_context *reo_context;
3675 	struct mgmt_rx_reo_sim_context *sim_context;
3676 	QDF_STATUS status;
3677 	struct mgmt_rx_reo_params *reo_params;
3678 
3679 	if (!mgmt_rx_params) {
3680 		mgmt_rx_reo_err("Mgmt rx params null");
3681 		return QDF_STATUS_E_NULL_VALUE;
3682 	}
3683 
3684 	reo_params = mgmt_rx_params->reo_params;
3685 
3686 	reo_context = mgmt_rx_reo_get_context();
3687 	if (!reo_context) {
3688 		mgmt_rx_reo_err("Mgmt reo context is null");
3689 		return QDF_STATUS_E_NULL_VALUE;
3690 	}
3691 
3692 	sim_context = &reo_context->sim_context;
3693 
3694 	qdf_spin_lock(&sim_context->master_frame_list.lock);
3695 
3696 	if (qdf_list_empty(&sim_context->master_frame_list.pending_list)) {
3697 		qdf_spin_unlock(&sim_context->master_frame_list.lock);
3698 		mgmt_rx_reo_err("reo sim failure: pending frame list is empty");
3699 		qdf_assert_always(0);
3700 	} else {
3701 		struct mgmt_rx_frame_params *cur_entry_params;
3702 		struct mgmt_rx_reo_pending_frame_list_entry *cur_entry;
3703 		struct mgmt_rx_reo_pending_frame_list_entry *matching_entry = NULL;
3704 
3705 		/**
3706 		 * Make sure the frames delivered to upper layer are in the
3707 		 * increasing order of global time stamp. For that the frame
3708 		 * which is being delivered should be present at the head of the
3709 		 * pending frame list. There could be multiple frames with the
3710 		 * same global time stamp in the pending frame list. Search
3711 		 * among all the frames at the head of the list which has the
3712 		 * same global time stamp as the frame which is being delivered.
3713 		 * To find matching frame, check whether packet counter,
3714 		 * global time stamp and link id are same.
3715 		 */
3716 		qdf_list_for_each(&sim_context->master_frame_list.pending_list,
3717 				  cur_entry, node) {
3718 			cur_entry_params = &cur_entry->params;
3719 
3720 			if (cur_entry_params->global_timestamp !=
3721 			    reo_params->global_timestamp)
3722 				break;
3723 
3724 			if (cur_entry_params->link_id == reo_params->link_id &&
3725 			    cur_entry_params->mgmt_pkt_ctr ==
3726 			    reo_params->mgmt_pkt_ctr) {
3727 				matching_entry = cur_entry;
3728 				break;
3729 			}
3730 		}
3731 
3732 		if (!matching_entry) {
3733 			qdf_spin_unlock(&sim_context->master_frame_list.lock);
3734 			mgmt_rx_reo_err("reo sim failure: mismatch");
3735 			qdf_assert_always(0);
3736 		}
3737 
3738 		status = qdf_list_remove_node(
3739 				&sim_context->master_frame_list.pending_list,
3740 				&matching_entry->node);
3741 		qdf_mem_free(matching_entry);
3742 
3743 		if (QDF_IS_STATUS_ERROR(status)) {
3744 			qdf_spin_unlock(&sim_context->master_frame_list.lock);
3745 			mgmt_rx_reo_err("Failed to remove matching entry");
3746 			return status;
3747 		}
3748 	}
3749 
3750 	qdf_spin_unlock(&sim_context->master_frame_list.lock);
3751 
3752 	mgmt_rx_reo_debug("Successfully processed mgmt frame");
3753 	mgmt_rx_reo_debug("link_id = %u, ctr = %u, ts = %u",
3754 			  reo_params->link_id, reo_params->mgmt_pkt_ctr,
3755 			  reo_params->global_timestamp);
3756 
3757 	return QDF_STATUS_SUCCESS;
3758 }
3759 
3760 /**
3761  * mgmt_rx_reo_sim_get_random_bool() - Generate true/false randomly
3762  * @percentage_true: probability (in percentage) of true
3763  *
3764  * API to generate true with probability @percentage_true % and false with
3765  * probability (100 - @percentage_true) %.
3766  *
3767  * Return: true with probability @percentage_true % and false with probability
3768  * (100 - @percentage_true) %
3769  */
3770 static bool
3771 mgmt_rx_reo_sim_get_random_bool(uint8_t percentage_true)
3772 {
3773 	uint32_t rand;
3774 
3775 	if (percentage_true > 100) {
3776 		mgmt_rx_reo_err("Invalid probability value for true, %u",
3777 				percentage_true);
3778 		return -EINVAL;
3779 	}
3780 
3781 	get_random_bytes(&rand, sizeof(rand));
3782 
3783 	return ((rand % 100) < percentage_true);
3784 }
3785 
3786 /**
3787  * mgmt_rx_reo_sim_get_random_unsigned_int() - Generate random unsigned integer
3788  * value in the range [0, max)
3789  * @max: upper limit for the output
3790  *
3791  * API to generate random unsigned integer value in the range [0, max).
3792  *
3793  * Return: unsigned integer value in the range [0, max)
3794  */
3795 static uint32_t
3796 mgmt_rx_reo_sim_get_random_unsigned_int(uint32_t max)
3797 {
3798 	uint32_t rand;
3799 
3800 	get_random_bytes(&rand, sizeof(rand));
3801 
3802 	return (rand % max);
3803 }
3804 
3805 /**
3806  * mgmt_rx_reo_sim_sleep() - Wrapper API to sleep for given micro seconds
3807  * @sleeptime_us: Sleep time in micro seconds
3808  *
3809  * This API uses msleep() internally. So the granularity is limited to
3810  * milliseconds.
3811  *
3812  * Return: none
3813  */
3814 static void
3815 mgmt_rx_reo_sim_sleep(uint32_t sleeptime_us)
3816 {
3817 	msleep(sleeptime_us / USEC_PER_MSEC);
3818 }
3819 
3820 /**
3821  * mgmt_rx_reo_sim_frame_handler_host() - Management frame handler at the host
3822  * layer
3823  * @arg: Argument
3824  *
3825  * This API handles the management frame at the host layer. This is applicable
3826  * for simulation alone.
3827  *
3828  * Return: none
3829  */
3830 static void
3831 mgmt_rx_reo_sim_frame_handler_host(void *arg)
3832 {
3833 	struct mgmt_rx_frame_fw *frame_fw = (struct mgmt_rx_frame_fw *)arg;
3834 	uint32_t fw_to_host_delay_us;
3835 	bool is_error_frame = false;
3836 	int8_t link_id = -1;
3837 	struct mgmt_rx_event_params *rx_params;
3838 	QDF_STATUS status;
3839 	struct mgmt_rx_reo_sim_context *sim_context;
3840 	struct wlan_objmgr_pdev *pdev;
3841 
3842 	if (!frame_fw) {
3843 		mgmt_rx_reo_err("HOST-%d : Pointer to FW frame struct is null",
3844 				link_id);
3845 		goto error_print;
3846 	}
3847 
3848 	link_id = frame_fw->params.link_id;
3849 
3850 	sim_context = frame_fw->sim_context;
3851 	if (!sim_context) {
3852 		mgmt_rx_reo_err("HOST-%d : Mgmt rx reo simulation context null",
3853 				link_id);
3854 		goto error_free_fw_frame;
3855 	}
3856 
3857 	fw_to_host_delay_us = MGMT_RX_REO_SIM_DELAY_FW_TO_HOST_MIN +
3858 			      mgmt_rx_reo_sim_get_random_unsigned_int(
3859 			      MGMT_RX_REO_SIM_DELAY_FW_TO_HOST_MIN_MAX_DELTA);
3860 
3861 	mgmt_rx_reo_sim_sleep(fw_to_host_delay_us);
3862 
3863 	if (!frame_fw->is_consumed_by_fw) {
3864 		is_error_frame = mgmt_rx_reo_sim_get_random_bool(
3865 				 MGMT_RX_REO_SIM_PERCENTAGE_ERROR_FRAMES);
3866 
3867 		/**
3868 		 * This frame should be present in pending/stale list of the
3869 		 * master frame list. Error frames need not be reordered
3870 		 * by reorder algorithm. It is just used for book
3871 		 * keeping purposes. Hence remove it from the master list.
3872 		 */
3873 		if (is_error_frame) {
3874 			status = mgmt_rx_reo_sim_remove_frame_from_master_list(
3875 					&sim_context->master_frame_list,
3876 					&frame_fw->params);
3877 
3878 			if (QDF_IS_STATUS_ERROR(status)) {
3879 				mgmt_rx_reo_err("HOST-%d : Failed to remove error frame",
3880 						link_id);
3881 				qdf_assert_always(0);
3882 			}
3883 		}
3884 	}
3885 
3886 	mgmt_rx_reo_debug("HOST-%d : Received frame with ts = %u, ctr = %u, consume = %u, error = %u",
3887 			  link_id, frame_fw->params.global_timestamp,
3888 			  frame_fw->params.mgmt_pkt_ctr,
3889 			  frame_fw->is_consumed_by_fw, is_error_frame);
3890 
3891 	rx_params = alloc_mgmt_rx_event_params();
3892 	if (!rx_params) {
3893 		mgmt_rx_reo_err("HOST-%d : Failed to allocate event params",
3894 				link_id);
3895 		goto error_free_fw_frame;
3896 	}
3897 
3898 	rx_params->reo_params->link_id = frame_fw->params.link_id;
3899 	rx_params->reo_params->global_timestamp =
3900 					frame_fw->params.global_timestamp;
3901 	rx_params->reo_params->mgmt_pkt_ctr = frame_fw->params.mgmt_pkt_ctr;
3902 	rx_params->reo_params->valid = true;
3903 
3904 	pdev = wlan_get_pdev_from_mlo_link_id(link_id, WLAN_MGMT_RX_REO_SIM_ID);
3905 	if (!pdev) {
3906 		mgmt_rx_reo_err("No pdev corresponding to link_id %d", link_id);
3907 		goto error_free_mgmt_rx_event_params;
3908 	}
3909 
3910 	if (is_error_frame) {
3911 		status = tgt_mgmt_rx_reo_host_drop_handler(
3912 						pdev, rx_params->reo_params);
3913 		free_mgmt_rx_event_params(rx_params);
3914 	} else if (frame_fw->is_consumed_by_fw) {
3915 		status = tgt_mgmt_rx_reo_fw_consumed_event_handler(
3916 						pdev, rx_params->reo_params);
3917 		free_mgmt_rx_event_params(rx_params);
3918 	} else {
3919 		status = tgt_mgmt_rx_reo_frame_handler(pdev, NULL, rx_params);
3920 	}
3921 
3922 	wlan_objmgr_pdev_release_ref(pdev, WLAN_MGMT_RX_REO_SIM_ID);
3923 
3924 	if (QDF_IS_STATUS_ERROR(status)) {
3925 		mgmt_rx_reo_err("Failed to execute reo algorithm");
3926 		goto error_free_fw_frame;
3927 	}
3928 
3929 	qdf_mem_free(frame_fw);
3930 
3931 	return;
3932 
3933 error_free_mgmt_rx_event_params:
3934 	free_mgmt_rx_event_params(rx_params);
3935 error_free_fw_frame:
3936 	qdf_mem_free(frame_fw);
3937 error_print:
3938 	mgmt_rx_reo_err("HOST-%d : Exiting host frame handler due to error",
3939 			link_id);
3940 }
3941 
3942 /**
3943  * mgmt_rx_reo_sim_write_snapshot() - API to write snapshots used for management
3944  * frame reordering
3945  * @link_id: link id
3946  * @id: snapshot id
3947  * @value: snapshot value
3948  *
3949  * This API writes the snapshots used for management frame reordering. MAC HW
3950  * and FW can use this API to update the MAC HW/FW consumed/FW forwarded
3951  * snapshots.
3952  *
3953  * Return: QDF_STATUS
3954  */
3955 static QDF_STATUS
3956 mgmt_rx_reo_sim_write_snapshot(uint8_t link_id,
3957 			       enum mgmt_rx_reo_shared_snapshot_id id,
3958 			       struct mgmt_rx_reo_shared_snapshot value)
3959 {
3960 	struct wlan_objmgr_pdev *pdev;
3961 	struct mgmt_rx_reo_shared_snapshot *snapshot_address;
3962 	QDF_STATUS status;
3963 
3964 	pdev = wlan_get_pdev_from_mlo_link_id(link_id, WLAN_MGMT_RX_REO_SIM_ID);
3965 
3966 	if (!pdev) {
3967 		mgmt_rx_reo_err("pdev is null");
3968 		return QDF_STATUS_E_NULL_VALUE;
3969 	}
3970 
3971 	status = mgmt_rx_reo_sim_get_snapshot_address(pdev, id,
3972 						      &snapshot_address);
3973 
3974 	wlan_objmgr_pdev_release_ref(pdev, WLAN_MGMT_RX_REO_SIM_ID);
3975 
3976 	if (QDF_IS_STATUS_ERROR(status)) {
3977 		mgmt_rx_reo_err("Failed to get snapshot address %d of pdev %pK",
3978 				id, pdev);
3979 		return QDF_STATUS_E_FAILURE;
3980 	}
3981 
3982 	snapshot_address->mgmt_rx_reo_snapshot_low =
3983 						value.mgmt_rx_reo_snapshot_low;
3984 	snapshot_address->mgmt_rx_reo_snapshot_high =
3985 						value.mgmt_rx_reo_snapshot_high;
3986 
3987 	return QDF_STATUS_SUCCESS;
3988 }
3989 
3990 #define MGMT_RX_REO_SNAPSHOT_LOW_VALID_POS                       (0)
3991 #define MGMT_RX_REO_SNAPSHOT_LOW_VALID_SIZE                      (1)
3992 #define MGMT_RX_REO_SNAPSHOT_LOW_MGMT_PKT_CTR_POS                (1)
3993 #define MGMT_RX_REO_SNAPSHOT_LOW_MGMT_PKT_CTR_SIZE               (16)
3994 #define MGMT_RX_REO_SNAPSHOT_LOW_GLOBAL_TIMESTAMP_POS            (17)
3995 #define MGMT_RX_REO_SNAPSHOT_LOW_GLOBAL_TIMESTAMP_SIZE           (15)
3996 
3997 #define MGMT_RX_REO_SNAPSHOT_HIGH_GLOBAL_TIMESTAMP_POS           (0)
3998 #define MGMT_RX_REO_SNAPSHOT_HIGH_GLOBAL_TIMESTAMP_SIZE          (17)
3999 #define MGMT_RX_REO_SNAPSHOT_HIGH_MGMT_PKT_CTR_REDUNDANT_POS     (17)
4000 #define MGMT_RX_REO_SNAPSHOT_HIGH_MGMT_PKT_CTR_REDUNDANT_SIZE    (15)
4001 
4002 /**
4003  * mgmt_rx_reo_sim_get_snapshot_value() - API to get snapshot value for a given
4004  * management frame
4005  * @global_timestamp: global time stamp
4006  * @mgmt_pkt_ctr: management packet counter
4007  *
4008  * This API gets the snapshot value for a frame with time stamp
4009  * @global_timestamp and sequence number @mgmt_pkt_ctr.
4010  *
4011  * Return: snapshot value (struct mgmt_rx_reo_shared_snapshot)
4012  */
4013 static struct mgmt_rx_reo_shared_snapshot
4014 mgmt_rx_reo_sim_get_snapshot_value(uint32_t global_timestamp,
4015 				   uint16_t mgmt_pkt_ctr)
4016 {
4017 	struct mgmt_rx_reo_shared_snapshot snapshot = {0};
4018 
4019 	QDF_SET_BITS(snapshot.mgmt_rx_reo_snapshot_low,
4020 		     MGMT_RX_REO_SNAPSHOT_LOW_VALID_POS,
4021 		     MGMT_RX_REO_SNAPSHOT_LOW_VALID_SIZE, 1);
4022 	QDF_SET_BITS(snapshot.mgmt_rx_reo_snapshot_low,
4023 		     MGMT_RX_REO_SNAPSHOT_LOW_MGMT_PKT_CTR_POS,
4024 		     MGMT_RX_REO_SNAPSHOT_LOW_MGMT_PKT_CTR_SIZE, mgmt_pkt_ctr);
4025 	QDF_SET_BITS(snapshot.mgmt_rx_reo_snapshot_low,
4026 		     MGMT_RX_REO_SNAPSHOT_LOW_GLOBAL_TIMESTAMP_POS,
4027 		     MGMT_RX_REO_SNAPSHOT_LOW_GLOBAL_TIMESTAMP_SIZE,
4028 		     global_timestamp);
4029 
4030 	QDF_SET_BITS(snapshot.mgmt_rx_reo_snapshot_high,
4031 		     MGMT_RX_REO_SNAPSHOT_HIGH_GLOBAL_TIMESTAMP_POS,
4032 		     MGMT_RX_REO_SNAPSHOT_HIGH_GLOBAL_TIMESTAMP_SIZE,
4033 		     global_timestamp >> 15);
4034 	QDF_SET_BITS(snapshot.mgmt_rx_reo_snapshot_high,
4035 		     MGMT_RX_REO_SNAPSHOT_HIGH_MGMT_PKT_CTR_REDUNDANT_POS,
4036 		     MGMT_RX_REO_SNAPSHOT_HIGH_MGMT_PKT_CTR_REDUNDANT_SIZE,
4037 		     mgmt_pkt_ctr);
4038 
4039 	return snapshot;
4040 }
4041 
4042 /**
4043  * mgmt_rx_reo_sim_frame_handler_fw() - Management frame handler at the fw layer
4044  * @arg: Argument
4045  *
4046  * This API handles the management frame at the fw layer. This is applicable
4047  * for simulation alone.
4048  *
4049  * Return: none
4050  */
4051 static void
4052 mgmt_rx_reo_sim_frame_handler_fw(void *arg)
4053 {
4054 	struct mgmt_rx_frame_mac_hw *frame_hw =
4055 					(struct mgmt_rx_frame_mac_hw *)arg;
4056 	uint32_t mac_hw_to_fw_delay_us;
4057 	bool is_consumed_by_fw;
4058 	struct  mgmt_rx_frame_fw *frame_fw;
4059 	int8_t link_id = -1;
4060 	QDF_STATUS status;
4061 	struct mgmt_rx_reo_sim_context *sim_context;
4062 	enum mgmt_rx_reo_shared_snapshot_id snapshot_id;
4063 	struct mgmt_rx_reo_shared_snapshot snapshot_value;
4064 	bool ret;
4065 
4066 	if (!frame_hw) {
4067 		mgmt_rx_reo_err("FW-%d : Pointer to HW frame struct is null",
4068 				link_id);
4069 		qdf_assert_always(0);
4070 	}
4071 
4072 	link_id = frame_hw->params.link_id;
4073 
4074 	sim_context = frame_hw->sim_context;
4075 	if (!sim_context) {
4076 		mgmt_rx_reo_err("FW-%d : Mgmt rx reo simulation context null",
4077 				link_id);
4078 		goto error_free_mac_hw_frame;
4079 	}
4080 
4081 	mac_hw_to_fw_delay_us = MGMT_RX_REO_SIM_DELAY_MAC_HW_TO_FW_MIN +
4082 			mgmt_rx_reo_sim_get_random_unsigned_int(
4083 			MGMT_RX_REO_SIM_DELAY_MAC_HW_TO_FW_MIN_MAX_DELTA);
4084 	mgmt_rx_reo_sim_sleep(mac_hw_to_fw_delay_us);
4085 
4086 	is_consumed_by_fw = mgmt_rx_reo_sim_get_random_bool(
4087 			    MGMT_RX_REO_SIM_PERCENTAGE_FW_CONSUMED_FRAMES);
4088 
4089 	if (is_consumed_by_fw) {
4090 		/**
4091 		 * This frame should be present in pending/stale list of the
4092 		 * master frame list. FW consumed frames need not be reordered
4093 		 * by reorder algorithm. It is just used for book
4094 		 * keeping purposes. Hence remove it from the master list.
4095 		 */
4096 		status = mgmt_rx_reo_sim_remove_frame_from_master_list(
4097 					&sim_context->master_frame_list,
4098 					&frame_hw->params);
4099 
4100 		if (QDF_IS_STATUS_ERROR(status)) {
4101 			mgmt_rx_reo_err("FW-%d : Failed to remove FW consumed frame",
4102 					link_id);
4103 			qdf_assert_always(0);
4104 		}
4105 	}
4106 
4107 	mgmt_rx_reo_debug("FW-%d : Processing frame with ts = %u, ctr = %u, consume = %u",
4108 			  link_id, frame_hw->params.global_timestamp,
4109 			  frame_hw->params.mgmt_pkt_ctr, is_consumed_by_fw);
4110 
4111 	frame_fw = qdf_mem_malloc(sizeof(*frame_fw));
4112 	if (!frame_fw) {
4113 		mgmt_rx_reo_err("FW-%d : Failed to allocate FW mgmt frame",
4114 				link_id);
4115 		goto error_free_mac_hw_frame;
4116 	}
4117 
4118 	frame_fw->params = frame_hw->params;
4119 	frame_fw->is_consumed_by_fw = is_consumed_by_fw;
4120 	frame_fw->sim_context = frame_hw->sim_context;
4121 
4122 	snapshot_id = is_consumed_by_fw ?
4123 		      MGMT_RX_REO_SHARED_SNAPSHOT_FW_CONSUMED :
4124 		      MGMT_RX_REO_SHARED_SNAPSHOT_FW_FORWADED;
4125 
4126 	snapshot_value = mgmt_rx_reo_sim_get_snapshot_value(
4127 					frame_hw->params.global_timestamp,
4128 					frame_hw->params.mgmt_pkt_ctr);
4129 
4130 	status = mgmt_rx_reo_sim_write_snapshot(link_id, snapshot_id,
4131 						snapshot_value);
4132 
4133 	if (QDF_IS_STATUS_ERROR(status)) {
4134 		mgmt_rx_reo_err("FW-%d : Failed to write snapshot %d",
4135 				link_id, snapshot_id);
4136 		goto error_free_fw_frame;
4137 	}
4138 
4139 	status = qdf_create_work(NULL, &frame_fw->frame_handler_host,
4140 				 mgmt_rx_reo_sim_frame_handler_host, frame_fw);
4141 	if (QDF_IS_STATUS_ERROR(status)) {
4142 		mgmt_rx_reo_err("FW-%d : Failed to create work", link_id);
4143 		goto error_free_fw_frame;
4144 	}
4145 
4146 	ret = qdf_queue_work(
4147 			NULL, sim_context->host_mgmt_frame_handler[link_id],
4148 			&frame_fw->frame_handler_host);
4149 	if (!ret) {
4150 		mgmt_rx_reo_err("FW-%d : Work is already present on the queue",
4151 				link_id);
4152 		goto error_free_fw_frame;
4153 	}
4154 
4155 	qdf_mem_free(frame_hw);
4156 
4157 	return;
4158 
4159 error_free_fw_frame:
4160 	qdf_mem_free(frame_fw);
4161 error_free_mac_hw_frame:
4162 	qdf_mem_free(frame_hw);
4163 
4164 	mgmt_rx_reo_err("FW-%d : Exiting fw frame handler due to error",
4165 			link_id);
4166 }
4167 
4168 /**
4169  * mgmt_rx_reo_sim_get_link_id() - Helper API to get the link id value
4170  * from the index to the valid link list
4171  * @valid_link_list_index: Index to list of valid links
4172  *
4173  * Return: link id
4174  */
4175 static int8_t
4176 mgmt_rx_reo_sim_get_link_id(uint8_t valid_link_list_index)
4177 {
4178 	struct mgmt_rx_reo_sim_context *sim_context;
4179 
4180 	if (valid_link_list_index >= MAX_MLO_LINKS) {
4181 		mgmt_rx_reo_err("Invalid index %u to valid link list",
4182 				valid_link_list_index);
4183 		return MGMT_RX_REO_INVALID_LINK_ID;
4184 	}
4185 
4186 	sim_context = mgmt_rx_reo_sim_get_context();
4187 	if (!sim_context) {
4188 		mgmt_rx_reo_err("Mgmt reo simulation context is null");
4189 		return MGMT_RX_REO_INVALID_LINK_ID;
4190 	}
4191 
4192 	return sim_context->link_id_to_pdev_map.valid_link_list
4193 						[valid_link_list_index];
4194 }
4195 
4196 /**
4197  * mgmt_rx_reo_sim_receive_from_air() - Simulate management frame reception from
4198  * the air
4199  * @mac_hw: pointer to structure representing MAC HW
4200  * @num_mlo_links: number of MLO HW links
4201  * @frame: pointer to management frame parameters
4202  *
4203  * This API simulates the management frame reception from air.
4204  *
4205  * Return: QDF_STATUS
4206  */
4207 static QDF_STATUS
4208 mgmt_rx_reo_sim_receive_from_air(struct mgmt_rx_reo_sim_mac_hw *mac_hw,
4209 				 uint8_t num_mlo_links,
4210 				 struct mgmt_rx_frame_params *frame)
4211 {
4212 	uint8_t valid_link_list_index;
4213 	QDF_STATUS status;
4214 	int8_t link_id;
4215 
4216 	if (!mac_hw) {
4217 		mgmt_rx_reo_err("pointer to MAC HW struct is null");
4218 		return QDF_STATUS_E_NULL_VALUE;
4219 	}
4220 
4221 	if (num_mlo_links == 0 || num_mlo_links > MAX_MLO_LINKS) {
4222 		mgmt_rx_reo_err("Invalid number of MLO links %u",
4223 				num_mlo_links);
4224 		return QDF_STATUS_E_INVAL;
4225 	}
4226 
4227 	if (!frame) {
4228 		mgmt_rx_reo_err("pointer to frame parameters is null");
4229 		return QDF_STATUS_E_NULL_VALUE;
4230 	}
4231 
4232 	valid_link_list_index = mgmt_rx_reo_sim_get_random_unsigned_int(
4233 							num_mlo_links);
4234 	link_id = mgmt_rx_reo_sim_get_link_id(valid_link_list_index);
4235 	qdf_assert_always(link_id >= 0);
4236 	qdf_assert_always(link_id < MAX_MLO_LINKS);
4237 
4238 	frame->global_timestamp = div_u64(ktime_get_ns(), NSEC_PER_USEC);
4239 	frame->mgmt_pkt_ctr = ++mac_hw->mgmt_pkt_ctr[link_id];
4240 	frame->link_id = link_id;
4241 
4242 	return QDF_STATUS_SUCCESS;
4243 }
4244 
4245 /**
4246  * mgmt_rx_reo_sim_undo_receive_from_air() - API to restore the state of MAC
4247  * HW in case of any Rx error.
4248  * @mac_hw: pointer to structure representing MAC HW
4249  * @frame: pointer to management frame parameters
4250  *
4251  * Return: QDF_STATUS
4252  */
4253 static QDF_STATUS
4254 mgmt_rx_reo_sim_undo_receive_from_air(struct mgmt_rx_reo_sim_mac_hw *mac_hw,
4255 				      struct mgmt_rx_frame_params *frame)
4256 {
4257 	if (!mac_hw) {
4258 		mgmt_rx_reo_err("pointer to MAC HW struct is null");
4259 		return QDF_STATUS_E_NULL_VALUE;
4260 	}
4261 
4262 	if (!frame) {
4263 		mgmt_rx_reo_err("pointer to frame parameters is null");
4264 		return QDF_STATUS_E_NULL_VALUE;
4265 	}
4266 
4267 	if (frame->link_id >= MAX_MLO_LINKS) {
4268 		mgmt_rx_reo_err("Invalid link id %u", frame->link_id);
4269 		return QDF_STATUS_E_INVAL;
4270 	}
4271 
4272 	--mac_hw->mgmt_pkt_ctr[frame->link_id];
4273 
4274 	return QDF_STATUS_SUCCESS;
4275 }
4276 
4277 /**
4278  * mgmt_rx_reo_sim_mac_hw_thread() - kthread to simulate MAC HW
4279  * @data: pointer to data input
4280  *
4281  * kthread handler to simulate MAC HW.
4282  *
4283  * Return: 0 for success, else failure
4284  */
4285 static int
4286 mgmt_rx_reo_sim_mac_hw_thread(void *data)
4287 {
4288 	struct mgmt_rx_reo_sim_context *sim_context = data;
4289 	struct mgmt_rx_reo_sim_mac_hw *mac_hw;
4290 
4291 	if (!sim_context) {
4292 		mgmt_rx_reo_err("HW: Mgmt rx reo simulation context is null");
4293 		return -EINVAL;
4294 	}
4295 
4296 	mac_hw = &sim_context->mac_hw_sim.mac_hw_info;
4297 
4298 	while (!qdf_thread_should_stop()) {
4299 		uint32_t inter_frame_delay_us;
4300 		struct mgmt_rx_frame_params frame;
4301 		struct mgmt_rx_frame_mac_hw *frame_mac_hw;
4302 		int8_t link_id = -1;
4303 		QDF_STATUS status;
4304 		enum mgmt_rx_reo_shared_snapshot_id snapshot_id;
4305 		struct mgmt_rx_reo_shared_snapshot snapshot_value;
4306 		int8_t num_mlo_links;
4307 		bool ret;
4308 
4309 		num_mlo_links = mgmt_rx_reo_sim_get_num_mlo_links(sim_context);
4310 		if (num_mlo_links < 0 ||
4311 		    num_mlo_links > MAX_MLO_LINKS) {
4312 			mgmt_rx_reo_err("Invalid number of MLO links %d",
4313 					num_mlo_links);
4314 			qdf_assert_always(0);
4315 		}
4316 
4317 		status = mgmt_rx_reo_sim_receive_from_air(mac_hw, num_mlo_links,
4318 							  &frame);
4319 		if (QDF_IS_STATUS_ERROR(status)) {
4320 			mgmt_rx_reo_err("Receive from the air failed");
4321 			/**
4322 			 * Frame reception failed and we are not sure about the
4323 			 * link id. Without link id there is no way to restore
4324 			 * the mac hw state. Hence assert unconditionally.
4325 			 */
4326 			qdf_assert_always(0);
4327 		}
4328 		link_id = frame.link_id;
4329 
4330 		mgmt_rx_reo_debug("HW-%d: received frame with ts = %u, ctr = %u",
4331 				  link_id, frame.global_timestamp,
4332 				  frame.mgmt_pkt_ctr);
4333 
4334 		frame_mac_hw = qdf_mem_malloc(sizeof(*frame_mac_hw));
4335 		if (!frame_mac_hw) {
4336 			mgmt_rx_reo_err("HW-%d: Failed to alloc mac hw frame",
4337 					link_id);
4338 
4339 			/* Cleanup */
4340 			status = mgmt_rx_reo_sim_undo_receive_from_air(
4341 								mac_hw, &frame);
4342 			qdf_assert_always(QDF_IS_STATUS_SUCCESS(status));
4343 
4344 			continue;
4345 		}
4346 
4347 		frame_mac_hw->params = frame;
4348 		frame_mac_hw->sim_context = sim_context;
4349 
4350 		status = mgmt_rx_reo_sim_add_frame_to_pending_list(
4351 				&sim_context->master_frame_list, &frame);
4352 		if (QDF_IS_STATUS_ERROR(status)) {
4353 			mgmt_rx_reo_err("HW-%d: Failed to add frame to list",
4354 					link_id);
4355 
4356 			/* Cleanup */
4357 			status = mgmt_rx_reo_sim_undo_receive_from_air(
4358 								mac_hw, &frame);
4359 			qdf_assert_always(QDF_IS_STATUS_SUCCESS(status));
4360 
4361 			qdf_mem_free(frame_mac_hw);
4362 
4363 			continue;
4364 		}
4365 
4366 		snapshot_id = MGMT_RX_REO_SHARED_SNAPSHOT_MAC_HW;
4367 		snapshot_value = mgmt_rx_reo_sim_get_snapshot_value(
4368 						frame.global_timestamp,
4369 						frame.mgmt_pkt_ctr);
4370 
4371 		status = mgmt_rx_reo_sim_write_snapshot(link_id, snapshot_id,
4372 							snapshot_value);
4373 		if (QDF_IS_STATUS_ERROR(status)) {
4374 			mgmt_rx_reo_err("HW-%d : Failed to write snapshot %d",
4375 					link_id, snapshot_id);
4376 
4377 			/* Cleanup */
4378 			status = mgmt_rx_reo_sim_remove_frame_from_pending_list(
4379 				&sim_context->master_frame_list, &frame);
4380 			qdf_assert_always(QDF_IS_STATUS_SUCCESS(status));
4381 
4382 			status = mgmt_rx_reo_sim_undo_receive_from_air(
4383 								mac_hw, &frame);
4384 			qdf_assert_always(QDF_IS_STATUS_SUCCESS(status));
4385 
4386 			qdf_mem_free(frame_mac_hw);
4387 
4388 			continue;
4389 		}
4390 
4391 		status = qdf_create_work(NULL, &frame_mac_hw->frame_handler_fw,
4392 					 mgmt_rx_reo_sim_frame_handler_fw,
4393 					 frame_mac_hw);
4394 		if (QDF_IS_STATUS_ERROR(status)) {
4395 			mgmt_rx_reo_err("HW-%d : Failed to create work",
4396 					link_id);
4397 			qdf_assert_always(0);
4398 		}
4399 
4400 		ret = qdf_queue_work(
4401 			NULL, sim_context->fw_mgmt_frame_handler[link_id],
4402 			&frame_mac_hw->frame_handler_fw);
4403 		if (!ret) {
4404 			mgmt_rx_reo_err("HW-%d : Work is already present in Q",
4405 					link_id);
4406 			qdf_assert_always(0);
4407 		}
4408 
4409 		inter_frame_delay_us = MGMT_RX_REO_SIM_INTER_FRAME_DELAY_MIN +
4410 			mgmt_rx_reo_sim_get_random_unsigned_int(
4411 			MGMT_RX_REO_SIM_INTER_FRAME_DELAY_MIN_MAX_DELTA);
4412 
4413 		mgmt_rx_reo_sim_sleep(inter_frame_delay_us);
4414 	}
4415 
4416 	return 0;
4417 }
4418 
4419 /**
4420  * mgmt_rx_reo_sim_init_master_frame_list() - Initializes the master
4421  * management frame list
4422  * @pending_frame_list: Pointer to master frame list
4423  *
4424  * This API initializes the master management frame list
4425  *
4426  * Return: QDF_STATUS
4427  */
4428 static QDF_STATUS
4429 mgmt_rx_reo_sim_init_master_frame_list(
4430 		struct mgmt_rx_reo_master_frame_list *master_frame_list)
4431 {
4432 	qdf_spinlock_create(&master_frame_list->lock);
4433 
4434 	qdf_list_create(&master_frame_list->pending_list,
4435 			MGMT_RX_REO_SIM_PENDING_FRAME_LIST_MAX_SIZE);
4436 	qdf_list_create(&master_frame_list->stale_list,
4437 			MGMT_RX_REO_SIM_STALE_FRAME_LIST_MAX_SIZE);
4438 
4439 	return QDF_STATUS_SUCCESS;
4440 }
4441 
4442 /**
4443  * mgmt_rx_reo_sim_deinit_master_frame_list() - De initializes the master
4444  * management frame list
4445  * @master_frame_list: Pointer to master frame list
4446  *
4447  * This API de initializes the master management frame list
4448  *
4449  * Return: QDF_STATUS
4450  */
4451 static QDF_STATUS
4452 mgmt_rx_reo_sim_deinit_master_frame_list(
4453 		struct mgmt_rx_reo_master_frame_list *master_frame_list)
4454 {
4455 	qdf_spin_lock(&master_frame_list->lock);
4456 	qdf_list_destroy(&master_frame_list->stale_list);
4457 	qdf_list_destroy(&master_frame_list->pending_list);
4458 	qdf_spin_unlock(&master_frame_list->lock);
4459 
4460 	qdf_spinlock_destroy(&master_frame_list->lock);
4461 
4462 	return QDF_STATUS_SUCCESS;
4463 }
4464 
4465 /**
4466  * mgmt_rx_reo_sim_generate_unique_link_id() - Helper API to generate
4467  * unique link id values
4468  * @link_id_to_pdev_map: pointer to link id to pdev map
4469  * @link_id: Pointer to unique link id
4470  *
4471  * This API generates unique link id values for each pdev. This API should be
4472  * called after acquiring the spin lock protecting link id to pdev map.
4473  *
4474  * Return: QDF_STATUS
4475  */
4476 static QDF_STATUS
4477 mgmt_rx_reo_sim_generate_unique_link_id(
4478 		struct wlan_objmgr_pdev *link_id_to_pdev_map, uint8_t *link_id)
4479 {
4480 	uint8_t random_link_id;
4481 	uint8_t link_id;
4482 
4483 	if (!link_id_to_pdev_map || !link_id)
4484 		return QDF_STATUS_E_NULL_VALUE;
4485 
4486 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++)
4487 		if (!link_id_to_pdev_map[link_id])
4488 			break;
4489 
4490 	if (link_id == MAX_MLO_LINKS) {
4491 		mgmt_rx_reo_err("All link ids are already allocated");
4492 		return QDF_STATUS_E_FAILURE;
4493 	}
4494 
4495 	while (1) {
4496 		random_link_id = mgmt_rx_reo_sim_get_random_unsigned_int(
4497 							MAX_MLO_LINKS);
4498 
4499 		if (!link_id_to_pdev_map[random_link_id])
4500 			break;
4501 	}
4502 
4503 	*link_id = random_link_id;
4504 
4505 	return QDF_STATUS_SUCCESS;
4506 }
4507 
4508 /**
4509  * mgmt_rx_reo_sim_insert_into_link_id_to_pdev_map() - Builds the MLO HW link id
4510  * to pdev map
4511  * @link_id_to_pdev_map: pointer to link id to pdev map
4512  * @pdev: pointer to pdev object
4513  *
4514  * This API incrementally builds the MLO HW link id to pdev map. This API is
4515  * used only for simulation.
4516  *
4517  * Return: QDF_STATUS
4518  */
4519 static QDF_STATUS
4520 mgmt_rx_reo_sim_insert_into_link_id_to_pdev_map(
4521 		struct mgmt_rx_reo_sim_link_id_to_pdev_map *link_id_to_pdev_map,
4522 		struct wlan_objmgr_pdev *pdev)
4523 {
4524 	uint8_t link_id;
4525 
4526 	if (!link_id_to_pdev_map) {
4527 		mgmt_rx_reo_err("Link id to pdev map is null");
4528 		return QDF_STATUS_E_NULL_VALUE;
4529 	}
4530 
4531 	if (!pdev) {
4532 		mgmt_rx_reo_err("pdev is null");
4533 		return QDF_STATUS_E_NULL_VALUE;
4534 	}
4535 
4536 	qdf_spin_lock(&link_id_to_pdev_map->lock);
4537 
4538 	status = mgmt_rx_reo_sim_generate_unique_link_id(
4539 					link_id_to_pdev_map->map, &link_id)
4540 	if (QDF_IS_STATUS_ERROR(status)) {
4541 		qdf_spin_unlock(&link_id_to_pdev_map->lock);
4542 		return QDF_STATUS_E_FAILURE;
4543 	}
4544 	qdf_assert_always(link_id < MAX_MLO_LINKS);
4545 
4546 	link_id_to_pdev_map->map[link_id] = pdev;
4547 	link_id_to_pdev_map->valid_link_list
4548 			[link_id_to_pdev_map->num_mlo_links] = link_id;
4549 	link_id_to_pdev_map->num_mlo_links++;
4550 
4551 	qdf_spin_unlock(&link_id_to_pdev_map->lock);
4552 
4553 	return QDF_STATUS_SUCCESS;
4554 }
4555 
4556 /**
4557  * mgmt_rx_reo_sim_remove_from_link_id_to_pdev_map() - Destroys the MLO HW link
4558  * id to pdev map
4559  * @link_id_to_pdev_map: pointer to link id to pdev map
4560  * @pdev: pointer to pdev object
4561  *
4562  * This API incrementally destroys the MLO HW link id to pdev map. This API is
4563  * used only for simulation.
4564  *
4565  * Return: QDF_STATUS
4566  */
4567 static QDF_STATUS
4568 mgmt_rx_reo_sim_remove_from_link_id_to_pdev_map(
4569 		struct mgmt_rx_reo_sim_link_id_to_pdev_map *link_id_to_pdev_map,
4570 		struct wlan_objmgr_pdev *pdev)
4571 {
4572 	uint8_t link_id;
4573 
4574 	if (!link_id_to_pdev_map) {
4575 		mgmt_rx_reo_err("Link id to pdev map is null");
4576 		return QDF_STATUS_E_NULL_VALUE;
4577 	}
4578 
4579 	if (!pdev) {
4580 		mgmt_rx_reo_err("pdev is null");
4581 		return QDF_STATUS_E_NULL_VALUE;
4582 	}
4583 
4584 	qdf_spin_lock(&link_id_to_pdev_map->lock);
4585 
4586 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
4587 		if (link_id_to_pdev_map->map[link_id] == pdev) {
4588 			link_id_to_pdev_map->map[link_id] = NULL;
4589 			qdf_spin_unlock(&link_id_to_pdev_map->lock);
4590 
4591 			return QDF_STATUS_SUCCESS;
4592 		}
4593 	}
4594 
4595 	qdf_spin_unlock(&link_id_to_pdev_map->lock);
4596 
4597 	mgmt_rx_reo_err("Pdev %pK is not found in map", pdev);
4598 
4599 	return QDF_STATUS_E_FAILURE;
4600 }
4601 
4602 QDF_STATUS
4603 mgmt_rx_reo_sim_pdev_object_create_notification(struct wlan_objmgr_pdev *pdev)
4604 {
4605 	struct mgmt_rx_reo_sim_context *sim_context;
4606 	QDF_STATUS status;
4607 
4608 	sim_context = mgmt_rx_reo_sim_get_context();
4609 	if (!sim_context) {
4610 		mgmt_rx_reo_err("Mgmt simulation context is null");
4611 		return QDF_STATUS_E_NULL_VALUE;
4612 	}
4613 
4614 	status = mgmt_rx_reo_sim_insert_into_link_id_to_pdev_map(
4615 				&sim_context->link_id_to_pdev_map, pdev);
4616 
4617 	if (QDF_IS_STATUS_ERROR(status)) {
4618 		mgmt_rx_reo_err("Failed to add pdev to the map %pK", pdev);
4619 		return status;
4620 	}
4621 
4622 	return QDF_STATUS_SUCCESS;
4623 }
4624 
4625 QDF_STATUS
4626 mgmt_rx_reo_sim_pdev_object_destroy_notification(struct wlan_objmgr_pdev *pdev)
4627 {
4628 	struct mgmt_rx_reo_sim_context *sim_context;
4629 	QDF_STATUS status;
4630 
4631 	sim_context = mgmt_rx_reo_sim_get_context();
4632 	if (!sim_context) {
4633 		mgmt_rx_reo_err("Mgmt simulation context is null");
4634 		return QDF_STATUS_E_NULL_VALUE;
4635 	}
4636 
4637 	status = mgmt_rx_reo_sim_remove_from_link_id_to_pdev_map(
4638 				&sim_context->link_id_to_pdev_map, pdev);
4639 
4640 	if (QDF_IS_STATUS_ERROR(status)) {
4641 		mgmt_rx_reo_err("Failed to remove pdev from the map");
4642 		return status;
4643 	}
4644 
4645 	return QDF_STATUS_SUCCESS;
4646 }
4647 
4648 QDF_STATUS
4649 mgmt_rx_reo_sim_start(void)
4650 {
4651 	struct mgmt_rx_reo_context *reo_context;
4652 	struct mgmt_rx_reo_sim_context *sim_context;
4653 	qdf_thread_t *mac_hw_thread;
4654 	uint8_t link_id;
4655 	uint8_t id;
4656 	QDF_STATUS status;
4657 
4658 	reo_context = mgmt_rx_reo_get_context();
4659 	if (!reo_context) {
4660 		mgmt_rx_reo_err("reo context is null");
4661 		return QDF_STATUS_E_NULL_VALUE;
4662 	}
4663 
4664 	reo_context->simulation_in_progress = true;
4665 
4666 	sim_context = &reo_context->sim_context;
4667 
4668 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
4669 		struct workqueue_struct *wq;
4670 
4671 		wq = alloc_ordered_workqueue("mgmt_rx_reo_sim_host-%u", 0,
4672 					     link_id);
4673 		if (!wq) {
4674 			mgmt_rx_reo_err("Host workqueue creation failed");
4675 			status = QDF_STATUS_E_FAILURE;
4676 			goto error_destroy_fw_and_host_work_queues_till_last_link;
4677 		}
4678 		sim_context->host_mgmt_frame_handler[link_id] = wq;
4679 
4680 		wq = alloc_ordered_workqueue("mgmt_rx_reo_sim_fw-%u", 0,
4681 					     link_id);
4682 		if (!wq) {
4683 			mgmt_rx_reo_err("FW workqueue creation failed");
4684 			status = QDF_STATUS_E_FAILURE;
4685 			goto error_destroy_host_work_queue_of_last_link;
4686 		}
4687 		sim_context->fw_mgmt_frame_handler[link_id] = wq;
4688 	}
4689 
4690 	mac_hw_thread = qdf_create_thread(mgmt_rx_reo_sim_mac_hw_thread,
4691 					  sim_context, "MAC_HW_thread");
4692 	if (!mac_hw_thread) {
4693 		mgmt_rx_reo_err("MAC HW thread creation failed");
4694 		status = QDF_STATUS_E_FAILURE;
4695 		goto error_destroy_fw_and_host_work_queues_of_last_link;
4696 	}
4697 
4698 	sim_context->mac_hw_sim.mac_hw_thread = mac_hw_thread;
4699 
4700 	qdf_wake_up_process(sim_context->mac_hw_sim.mac_hw_thread);
4701 
4702 	return QDF_STATUS_SUCCESS;
4703 
4704 error_destroy_fw_and_host_work_queues_of_last_link:
4705 	drain_workqueue(sim_context->fw_mgmt_frame_handler[link_id]);
4706 	destroy_workqueue(sim_context->fw_mgmt_frame_handler[link_id]);
4707 
4708 error_destroy_host_work_queue_of_last_link:
4709 	drain_workqueue(sim_context->host_mgmt_frame_handler[link_id]);
4710 	destroy_workqueue(sim_context->host_mgmt_frame_handler[link_id]);
4711 
4712 error_destroy_fw_and_host_work_queues_till_last_link:
4713 	for (id = 0; id < link_id; id++) {
4714 		drain_workqueue(sim_context->fw_mgmt_frame_handler[id]);
4715 		destroy_workqueue(sim_context->fw_mgmt_frame_handler[id]);
4716 
4717 		drain_workqueue(sim_context->host_mgmt_frame_handler[id]);
4718 		destroy_workqueue(sim_context->host_mgmt_frame_handler[id]);
4719 	}
4720 
4721 	return status;
4722 }
4723 
4724 QDF_STATUS
4725 mgmt_rx_reo_sim_stop(void)
4726 {
4727 	struct mgmt_rx_reo_context *reo_context;
4728 	struct mgmt_rx_reo_sim_context *sim_context;
4729 	struct mgmt_rx_reo_master_frame_list *master_frame_list;
4730 	uint8_t link_id;
4731 	QDF_STATUS status;
4732 
4733 	reo_context = mgmt_rx_reo_get_context();
4734 	if (!reo_context) {
4735 		mgmt_rx_reo_err("reo context is null");
4736 		return QDF_STATUS_E_NULL_VALUE;
4737 	}
4738 
4739 	sim_context = &reo_context->sim_context;
4740 
4741 	status = qdf_thread_join(sim_context->mac_hw_sim.mac_hw_thread);
4742 	if (QDF_IS_STATUS_ERROR(status)) {
4743 		mgmt_rx_reo_err("Failed to stop the thread");
4744 		return status;
4745 	}
4746 
4747 	sim_context->mac_hw_sim.mac_hw_thread = NULL;
4748 
4749 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
4750 		/* Wait for all the pending frames to be processed by FW */
4751 		drain_workqueue(sim_context->fw_mgmt_frame_handler[link_id]);
4752 		destroy_workqueue(sim_context->fw_mgmt_frame_handler[link_id]);
4753 
4754 		/* Wait for all the pending frames to be processed by host */
4755 		drain_workqueue(sim_context->host_mgmt_frame_handler[link_id]);
4756 		destroy_workqueue(
4757 				sim_context->host_mgmt_frame_handler[link_id]);
4758 	}
4759 
4760 	status = mgmt_rx_reo_print_ingress_frame_debug_info();
4761 	if (QDF_IS_STATUS_ERROR(status)) {
4762 		mgmt_rx_reo_err("Failed to print ingress frame debug info");
4763 		return status;
4764 	}
4765 
4766 	status = mgmt_rx_reo_print_egress_frame_debug_info();
4767 	if (QDF_IS_STATUS_ERROR(status)) {
4768 		mgmt_rx_reo_err("Failed to print egress frame debug info");
4769 		return status;
4770 	}
4771 
4772 	master_frame_list = &sim_context->master_frame_list;
4773 	if (!qdf_list_empty(&master_frame_list->pending_list) ||
4774 	    !qdf_list_empty(&master_frame_list->stale_list)) {
4775 		mgmt_rx_reo_err("reo sim failure: pending/stale frame list non empty");
4776 
4777 		status = mgmt_rx_reo_list_display(&reo_context->reo_list);
4778 		if (QDF_IS_STATUS_ERROR(status)) {
4779 			mgmt_rx_reo_err("Failed to print reorder list");
4780 			return status;
4781 		}
4782 
4783 		qdf_assert_always(0);
4784 	} else {
4785 		mgmt_rx_reo_err("reo sim passed");
4786 	}
4787 
4788 	reo_context->simulation_in_progress = false;
4789 
4790 	return QDF_STATUS_SUCCESS;
4791 }
4792 
4793 /**
4794  * mgmt_rx_reo_sim_init() - Initialize management rx reorder simulation
4795  * context.
4796  * @reo_context: Pointer to reo context
4797  *
4798  * Return: QDF_STATUS of operation
4799  */
4800 static QDF_STATUS
4801 mgmt_rx_reo_sim_init(struct mgmt_rx_reo_context *reo_context)
4802 {
4803 	QDF_STATUS status;
4804 	struct mgmt_rx_reo_sim_context *sim_context;
4805 	uint8_t link_id;
4806 
4807 	if (!reo_context) {
4808 		mgmt_rx_reo_err("reo context is null");
4809 		return QDF_STATUS_E_NULL_VALUE;
4810 	}
4811 
4812 	sim_context = &reo_context->sim_context;
4813 
4814 	qdf_mem_zero(sim_context, sizeof(*sim_context));
4815 
4816 	status = mgmt_rx_reo_sim_init_master_frame_list(
4817 					&sim_context->master_frame_list);
4818 	if (QDF_IS_STATUS_ERROR(status)) {
4819 		mgmt_rx_reo_err("Failed to create master mgmt frame list");
4820 		return status;
4821 	}
4822 
4823 	qdf_spinlock_create(&sim_context->link_id_to_pdev_map.lock);
4824 
4825 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++)
4826 		sim_context->link_id_to_pdev_map.valid_link_list[link_id] =
4827 					MGMT_RX_REO_INVALID_LINK_ID;
4828 
4829 	return QDF_STATUS_SUCCESS;
4830 }
4831 
4832 /**
4833  * mgmt_rx_reo_sim_deinit() - De initialize management rx reorder simulation
4834  * context.
4835  * @reo_context: Pointer to reo context
4836  *
4837  * Return: QDF_STATUS of operation
4838  */
4839 static QDF_STATUS
4840 mgmt_rx_reo_sim_deinit(struct mgmt_rx_reo_context *reo_context)
4841 {
4842 	QDF_STATUS status;
4843 	struct mgmt_rx_reo_sim_context *sim_context;
4844 
4845 	if (!reo_context) {
4846 		mgmt_rx_reo_err("reo context is null");
4847 		return QDF_STATUS_E_NULL_VALUE;
4848 	}
4849 
4850 	sim_context = &reo_context->sim_context;
4851 
4852 	qdf_spinlock_destroy(&sim_context->link_id_to_pdev_map.lock);
4853 
4854 	status = mgmt_rx_reo_sim_deinit_master_frame_list(
4855 					&sim_context->master_frame_list);
4856 	if (QDF_IS_STATUS_ERROR(status)) {
4857 		mgmt_rx_reo_err("Failed to destroy master frame list");
4858 		return status;
4859 	}
4860 
4861 	return QDF_STATUS_SUCCESS;
4862 }
4863 
4864 QDF_STATUS
4865 mgmt_rx_reo_sim_get_snapshot_address(
4866 			struct wlan_objmgr_pdev *pdev,
4867 			enum mgmt_rx_reo_shared_snapshot_id id,
4868 			struct mgmt_rx_reo_shared_snapshot **address)
4869 {
4870 	int8_t link_id;
4871 	struct mgmt_rx_reo_sim_context *sim_context;
4872 
4873 	sim_context = mgmt_rx_reo_sim_get_context();
4874 	if (!sim_context) {
4875 		mgmt_rx_reo_err("Mgmt reo simulation context is null");
4876 		return QDF_STATUS_E_NULL_VALUE;
4877 	}
4878 
4879 	if (!pdev) {
4880 		mgmt_rx_reo_err("pdev is NULL");
4881 		return QDF_STATUS_E_NULL_VALUE;
4882 	}
4883 
4884 	if (id < 0 || id >= MGMT_RX_REO_SHARED_SNAPSHOT_MAX) {
4885 		mgmt_rx_reo_err("Invalid snapshot ID %d", id);
4886 		return QDF_STATUS_E_INVAL;
4887 	}
4888 
4889 	if (!address) {
4890 		mgmt_rx_reo_err("Pointer to snapshot address is null");
4891 		return QDF_STATUS_E_NULL_VALUE;
4892 	}
4893 
4894 	link_id = wlan_get_mlo_link_id_from_pdev(pdev);
4895 	if (link_id < 0 || link_id >= MAX_MLO_LINKS) {
4896 		mgmt_rx_reo_err("Invalid link id %d for the pdev %pK", link_id,
4897 				pdev);
4898 		return QDF_STATUS_E_INVAL;
4899 	}
4900 
4901 	*address = &sim_context->snapshot[link_id][id];
4902 
4903 	return QDF_STATUS_SUCCESS;
4904 }
4905 #endif /* WLAN_MGMT_RX_REO_SIM_SUPPORT */
4906 
4907 /**
4908  * mgmt_rx_reo_flush_reorder_list() - Flush all entries in the reorder list
4909  * @reo_list: Pointer to reorder list
4910  *
4911  * API to flush all the entries of the reorder list. This API would acquire
4912  * the lock protecting the list.
4913  *
4914  * Return: QDF_STATUS
4915  */
4916 static QDF_STATUS
4917 mgmt_rx_reo_flush_reorder_list(struct mgmt_rx_reo_list *reo_list)
4918 {
4919 	struct mgmt_rx_reo_list_entry *cur_entry;
4920 	struct mgmt_rx_reo_list_entry *temp;
4921 
4922 	if (!reo_list) {
4923 		mgmt_rx_reo_err("reorder list is null");
4924 		return QDF_STATUS_E_NULL_VALUE;
4925 	}
4926 
4927 	qdf_spin_lock_bh(&reo_list->list_lock);
4928 
4929 	qdf_list_for_each_del(&reo_list->list, cur_entry, temp, node) {
4930 		free_mgmt_rx_event_params(cur_entry->rx_params);
4931 
4932 		/**
4933 		 * Release the reference taken when the entry is inserted into
4934 		 * the reorder list.
4935 		 */
4936 		wlan_objmgr_pdev_release_ref(cur_entry->pdev,
4937 					     WLAN_MGMT_RX_REO_ID);
4938 
4939 		qdf_mem_free(cur_entry);
4940 	}
4941 
4942 	qdf_spin_unlock_bh(&reo_list->list_lock);
4943 
4944 	return QDF_STATUS_SUCCESS;
4945 }
4946 
4947 /**
4948  * mgmt_rx_reo_list_deinit() - De initialize the management rx-reorder list
4949  * @reo_list: Pointer to reorder list
4950  *
4951  * API to de initialize the management rx-reorder list.
4952  *
4953  * Return: QDF_STATUS
4954  */
4955 static QDF_STATUS
4956 mgmt_rx_reo_list_deinit(struct mgmt_rx_reo_list *reo_list)
4957 {
4958 	QDF_STATUS status;
4959 
4960 	qdf_timer_free(&reo_list->global_mgmt_rx_inactivity_timer);
4961 	qdf_timer_free(&reo_list->ageout_timer);
4962 
4963 	status = mgmt_rx_reo_flush_reorder_list(reo_list);
4964 	if (QDF_IS_STATUS_ERROR(status)) {
4965 		mgmt_rx_reo_err("Failed to flush the reorder list");
4966 		return QDF_STATUS_E_FAILURE;
4967 	}
4968 	qdf_spinlock_destroy(&reo_list->list_lock);
4969 	qdf_list_destroy(&reo_list->list);
4970 
4971 	return QDF_STATUS_SUCCESS;
4972 }
4973 
4974 QDF_STATUS
4975 mgmt_rx_reo_deinit_context(void)
4976 {
4977 	QDF_STATUS status;
4978 	struct mgmt_rx_reo_context *reo_context;
4979 
4980 	reo_context = mgmt_rx_reo_get_context();
4981 	if (!reo_context) {
4982 		mgmt_rx_reo_err("reo context is null");
4983 		return QDF_STATUS_E_NULL_VALUE;
4984 	}
4985 
4986 	qdf_timer_sync_cancel(
4987 			&reo_context->reo_list.global_mgmt_rx_inactivity_timer);
4988 	qdf_timer_sync_cancel(&reo_context->reo_list.ageout_timer);
4989 
4990 	qdf_spinlock_destroy(&reo_context->reo_algo_entry_lock);
4991 
4992 	status = mgmt_rx_reo_sim_deinit(reo_context);
4993 	if (QDF_IS_STATUS_ERROR(status)) {
4994 		mgmt_rx_reo_err("Failed to de initialize reo sim context");
4995 		return QDF_STATUS_E_FAILURE;
4996 	}
4997 
4998 	status = mgmt_rx_reo_list_deinit(&reo_context->reo_list);
4999 	if (QDF_IS_STATUS_ERROR(status)) {
5000 		mgmt_rx_reo_err("Failed to de-initialize mgmt Rx reo list");
5001 		return status;
5002 	}
5003 
5004 	return QDF_STATUS_SUCCESS;
5005 }
5006 
5007 QDF_STATUS
5008 mgmt_rx_reo_init_context(void)
5009 {
5010 	QDF_STATUS status;
5011 	QDF_STATUS temp;
5012 	struct mgmt_rx_reo_context *reo_context;
5013 
5014 	reo_context = mgmt_rx_reo_get_context();
5015 	if (!reo_context) {
5016 		mgmt_rx_reo_err("reo context is null");
5017 		return QDF_STATUS_E_NULL_VALUE;
5018 	}
5019 	qdf_mem_zero(reo_context, sizeof(*reo_context));
5020 
5021 	status = mgmt_rx_reo_list_init(&reo_context->reo_list);
5022 	if (QDF_IS_STATUS_ERROR(status)) {
5023 		mgmt_rx_reo_err("Failed to initialize mgmt Rx reo list");
5024 		return status;
5025 	}
5026 
5027 	status = mgmt_rx_reo_sim_init(reo_context);
5028 	if (QDF_IS_STATUS_ERROR(status)) {
5029 		mgmt_rx_reo_err("Failed to initialize reo simulation context");
5030 		goto error_reo_list_deinit;
5031 	}
5032 
5033 	qdf_spinlock_create(&reo_context->reo_algo_entry_lock);
5034 
5035 	qdf_timer_mod(&reo_context->reo_list.ageout_timer,
5036 		      MGMT_RX_REO_AGEOUT_TIMER_PERIOD_MS);
5037 
5038 	qdf_mem_set(reo_context->ingress_frame_debug_info.boarder,
5039 		    MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_BOARDER_MAX_SIZE, '-');
5040 	qdf_mem_set(reo_context->egress_frame_debug_info.boarder,
5041 		    MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_BOARDER_MAX_SIZE, '-');
5042 
5043 	return QDF_STATUS_SUCCESS;
5044 
5045 error_reo_list_deinit:
5046 	temp = mgmt_rx_reo_list_deinit(&reo_context->reo_list);
5047 	if (QDF_IS_STATUS_ERROR(temp)) {
5048 		mgmt_rx_reo_err("Failed to de-initialize mgmt Rx reo list");
5049 		return temp;
5050 	}
5051 
5052 	return status;
5053 }
5054 
5055 /**
5056  * wlan_mgmt_rx_reo_initialize_snapshot_params() - Initialize a given snapshot
5057  * params object
5058  * @snapshot_params: Pointer to snapshot params object
5059  *
5060  * Return: void
5061  */
5062 static void
5063 wlan_mgmt_rx_reo_initialize_snapshot_params(
5064 			struct mgmt_rx_reo_snapshot_params *snapshot_params)
5065 {
5066 	snapshot_params->valid = false;
5067 	snapshot_params->mgmt_pkt_ctr = 0;
5068 	snapshot_params->global_timestamp = 0;
5069 }
5070 
5071 /**
5072  * mgmt_rx_reo_initialize_snapshot_address() - Initialize management Rx reorder
5073  * snapshot addresses for a given pdev
5074  * @pdev: pointer to pdev object
5075  *
5076  * Return: QDF_STATUS
5077  */
5078 static QDF_STATUS
5079 mgmt_rx_reo_initialize_snapshot_address(struct wlan_objmgr_pdev *pdev)
5080 {
5081 	enum mgmt_rx_reo_shared_snapshot_id snapshot_id;
5082 	struct mgmt_rx_reo_pdev_info *mgmt_rx_reo_pdev_ctx;
5083 	QDF_STATUS status;
5084 
5085 	mgmt_rx_reo_pdev_ctx = wlan_mgmt_rx_reo_get_priv_object(pdev);
5086 	if (!mgmt_rx_reo_pdev_ctx) {
5087 		mgmt_rx_reo_err("Mgmt Rx REO priv object is null");
5088 		return QDF_STATUS_E_NULL_VALUE;
5089 	}
5090 
5091 	snapshot_id = 0;
5092 
5093 	while (snapshot_id < MGMT_RX_REO_SHARED_SNAPSHOT_MAX) {
5094 		struct mgmt_rx_reo_snapshot_info *snapshot_info;
5095 
5096 		snapshot_info =
5097 			&mgmt_rx_reo_pdev_ctx->host_target_shared_snapshot_info
5098 			[snapshot_id];
5099 		status = wlan_mgmt_rx_reo_get_snapshot_info
5100 					(pdev, snapshot_id, snapshot_info);
5101 		if (QDF_IS_STATUS_ERROR(status)) {
5102 			mgmt_rx_reo_err("Get snapshot info failed, id = %u",
5103 					snapshot_id);
5104 			return status;
5105 		}
5106 
5107 		snapshot_id++;
5108 	}
5109 
5110 	return QDF_STATUS_SUCCESS;
5111 }
5112 
5113 /**
5114  * mgmt_rx_reo_initialize_snapshot_value() - Initialize management Rx reorder
5115  * snapshot values for a given pdev
5116  * @pdev: pointer to pdev object
5117  *
5118  * Return: QDF_STATUS
5119  */
5120 static QDF_STATUS
5121 mgmt_rx_reo_initialize_snapshot_value(struct wlan_objmgr_pdev *pdev)
5122 {
5123 	enum mgmt_rx_reo_shared_snapshot_id snapshot_id;
5124 	struct mgmt_rx_reo_pdev_info *mgmt_rx_reo_pdev_ctx;
5125 
5126 	mgmt_rx_reo_pdev_ctx = wlan_mgmt_rx_reo_get_priv_object(pdev);
5127 	if (!mgmt_rx_reo_pdev_ctx) {
5128 		mgmt_rx_reo_err("Mgmt Rx REO priv object is null");
5129 		return QDF_STATUS_E_NULL_VALUE;
5130 	}
5131 
5132 	snapshot_id = 0;
5133 	while (snapshot_id < MGMT_RX_REO_SHARED_SNAPSHOT_MAX) {
5134 		wlan_mgmt_rx_reo_initialize_snapshot_params
5135 			(&mgmt_rx_reo_pdev_ctx->last_valid_shared_snapshot
5136 			 [snapshot_id]);
5137 		snapshot_id++;
5138 	}
5139 
5140 	/* Initialize Host snapshot params */
5141 	wlan_mgmt_rx_reo_initialize_snapshot_params
5142 				(&mgmt_rx_reo_pdev_ctx->host_snapshot);
5143 
5144 	return QDF_STATUS_SUCCESS;
5145 }
5146 
5147 /**
5148  * mgmt_rx_reo_set_initialization_complete() - Set initialization completion
5149  * for management Rx REO pdev component private object
5150  * @pdev: pointer to pdev object
5151  *
5152  * Return: QDF_STATUS
5153  */
5154 static QDF_STATUS
5155 mgmt_rx_reo_set_initialization_complete(struct wlan_objmgr_pdev *pdev)
5156 {
5157 	struct mgmt_rx_reo_pdev_info *mgmt_rx_reo_pdev_ctx;
5158 
5159 	mgmt_rx_reo_pdev_ctx = wlan_mgmt_rx_reo_get_priv_object(pdev);
5160 	if (!mgmt_rx_reo_pdev_ctx) {
5161 		mgmt_rx_reo_err("Mgmt Rx REO priv object is null");
5162 		return QDF_STATUS_E_NULL_VALUE;
5163 	}
5164 
5165 	mgmt_rx_reo_pdev_ctx->init_complete = true;
5166 
5167 	return QDF_STATUS_SUCCESS;
5168 }
5169 
5170 /**
5171  * mgmt_rx_reo_clear_initialization_complete() - Clear initialization completion
5172  * for management Rx REO pdev component private object
5173  * @pdev: pointer to pdev object
5174  *
5175  * Return: QDF_STATUS
5176  */
5177 static QDF_STATUS
5178 mgmt_rx_reo_clear_initialization_complete(struct wlan_objmgr_pdev *pdev)
5179 {
5180 	struct mgmt_rx_reo_pdev_info *mgmt_rx_reo_pdev_ctx;
5181 
5182 	mgmt_rx_reo_pdev_ctx = wlan_mgmt_rx_reo_get_priv_object(pdev);
5183 	if (!mgmt_rx_reo_pdev_ctx) {
5184 		mgmt_rx_reo_err("Mgmt Rx REO priv object is null");
5185 		return QDF_STATUS_E_NULL_VALUE;
5186 	}
5187 
5188 	mgmt_rx_reo_pdev_ctx->init_complete = false;
5189 
5190 	return QDF_STATUS_SUCCESS;
5191 }
5192 
5193 /**
5194  * mgmt_rx_reo_initialize_snapshots() - Initialize management Rx reorder
5195  * snapshot related data structures for a given pdev
5196  * @pdev: pointer to pdev object
5197  *
5198  * Return: QDF_STATUS
5199  */
5200 static QDF_STATUS
5201 mgmt_rx_reo_initialize_snapshots(struct wlan_objmgr_pdev *pdev)
5202 {
5203 	QDF_STATUS status;
5204 
5205 	status = mgmt_rx_reo_initialize_snapshot_value(pdev);
5206 	if (QDF_IS_STATUS_ERROR(status)) {
5207 		mgmt_rx_reo_err("Failed to initialize snapshot value");
5208 		return status;
5209 	}
5210 
5211 	status = mgmt_rx_reo_initialize_snapshot_address(pdev);
5212 	if (QDF_IS_STATUS_ERROR(status)) {
5213 		mgmt_rx_reo_err("Failed to initialize snapshot address");
5214 		return status;
5215 	}
5216 
5217 	return QDF_STATUS_SUCCESS;
5218 }
5219 
5220 /**
5221  * mgmt_rx_reo_clear_snapshots() - Clear management Rx reorder snapshot related
5222  * data structures for a given pdev
5223  * @pdev: pointer to pdev object
5224  *
5225  * Return: QDF_STATUS
5226  */
5227 static QDF_STATUS
5228 mgmt_rx_reo_clear_snapshots(struct wlan_objmgr_pdev *pdev)
5229 {
5230 	QDF_STATUS status;
5231 
5232 	status = mgmt_rx_reo_initialize_snapshot_value(pdev);
5233 	if (QDF_IS_STATUS_ERROR(status)) {
5234 		mgmt_rx_reo_err("Failed to initialize snapshot value");
5235 		return status;
5236 	}
5237 
5238 	return QDF_STATUS_SUCCESS;
5239 }
5240 
5241 QDF_STATUS
5242 mgmt_rx_reo_attach(struct wlan_objmgr_pdev *pdev)
5243 {
5244 	QDF_STATUS status;
5245 
5246 	if (!wlan_mgmt_rx_reo_is_feature_enabled_at_pdev(pdev))
5247 		return QDF_STATUS_SUCCESS;
5248 
5249 	status = mgmt_rx_reo_initialize_snapshots(pdev);
5250 	if (QDF_IS_STATUS_ERROR(status)) {
5251 		mgmt_rx_reo_err("Failed to initialize mgmt Rx REO snapshots");
5252 		return status;
5253 	}
5254 
5255 	status = mgmt_rx_reo_set_initialization_complete(pdev);
5256 	if (QDF_IS_STATUS_ERROR(status)) {
5257 		mgmt_rx_reo_err("Failed to set initialization complete");
5258 		return status;
5259 	}
5260 
5261 	return QDF_STATUS_SUCCESS;
5262 }
5263 
5264 QDF_STATUS
5265 mgmt_rx_reo_detach(struct wlan_objmgr_pdev *pdev)
5266 {
5267 	QDF_STATUS status;
5268 
5269 	if (!wlan_mgmt_rx_reo_is_feature_enabled_at_pdev(pdev))
5270 		return QDF_STATUS_SUCCESS;
5271 
5272 	status = mgmt_rx_reo_clear_initialization_complete(pdev);
5273 	if (QDF_IS_STATUS_ERROR(status)) {
5274 		mgmt_rx_reo_err("Failed to clear initialization complete");
5275 		return status;
5276 	}
5277 
5278 	status = mgmt_rx_reo_clear_snapshots(pdev);
5279 	if (QDF_IS_STATUS_ERROR(status)) {
5280 		mgmt_rx_reo_err("Failed to clear mgmt Rx REO snapshots");
5281 		return status;
5282 	}
5283 
5284 	return QDF_STATUS_SUCCESS;
5285 }
5286 
5287 QDF_STATUS
5288 mgmt_rx_reo_pdev_obj_create_notification(
5289 	struct wlan_objmgr_pdev *pdev,
5290 	struct mgmt_txrx_priv_pdev_context *mgmt_txrx_pdev_ctx)
5291 {
5292 	QDF_STATUS status;
5293 	struct mgmt_rx_reo_pdev_info *mgmt_rx_reo_pdev_ctx = NULL;
5294 
5295 	if (!pdev) {
5296 		mgmt_rx_reo_err("pdev is null");
5297 		status = QDF_STATUS_E_NULL_VALUE;
5298 		goto failure;
5299 	}
5300 
5301 	if (!wlan_mgmt_rx_reo_is_feature_enabled_at_pdev(pdev)) {
5302 		status = QDF_STATUS_SUCCESS;
5303 		goto failure;
5304 	}
5305 
5306 	status = mgmt_rx_reo_sim_pdev_object_create_notification(pdev);
5307 	if (QDF_IS_STATUS_ERROR(status)) {
5308 		mgmt_rx_reo_err("Failed to handle pdev create for reo sim");
5309 		goto failure;
5310 	}
5311 
5312 	mgmt_rx_reo_pdev_ctx = qdf_mem_malloc(sizeof(*mgmt_rx_reo_pdev_ctx));
5313 	if (!mgmt_rx_reo_pdev_ctx) {
5314 		mgmt_rx_reo_err("Allocation failure for REO pdev context");
5315 		status = QDF_STATUS_E_NOMEM;
5316 		goto failure;
5317 	}
5318 
5319 	mgmt_txrx_pdev_ctx->mgmt_rx_reo_pdev_ctx = mgmt_rx_reo_pdev_ctx;
5320 
5321 	return QDF_STATUS_SUCCESS;
5322 
5323 failure:
5324 	if (mgmt_rx_reo_pdev_ctx)
5325 		qdf_mem_free(mgmt_rx_reo_pdev_ctx);
5326 
5327 	mgmt_txrx_pdev_ctx->mgmt_rx_reo_pdev_ctx = NULL;
5328 
5329 	return status;
5330 }
5331 
5332 QDF_STATUS
5333 mgmt_rx_reo_pdev_obj_destroy_notification(
5334 	struct wlan_objmgr_pdev *pdev,
5335 	struct mgmt_txrx_priv_pdev_context *mgmt_txrx_pdev_ctx)
5336 {
5337 	QDF_STATUS status;
5338 
5339 	if (!wlan_mgmt_rx_reo_is_feature_enabled_at_pdev(pdev))
5340 		return QDF_STATUS_SUCCESS;
5341 
5342 	qdf_mem_free(mgmt_txrx_pdev_ctx->mgmt_rx_reo_pdev_ctx);
5343 	mgmt_txrx_pdev_ctx->mgmt_rx_reo_pdev_ctx = NULL;
5344 
5345 	status = mgmt_rx_reo_sim_pdev_object_destroy_notification(pdev);
5346 	if (QDF_IS_STATUS_ERROR(status)) {
5347 		mgmt_rx_reo_err("Failed to handle pdev create for reo sim");
5348 		return status;
5349 	}
5350 
5351 	return QDF_STATUS_SUCCESS;
5352 }
5353 
5354 bool
5355 mgmt_rx_reo_is_simulation_in_progress(void)
5356 {
5357 	struct mgmt_rx_reo_context *reo_context;
5358 
5359 	reo_context = mgmt_rx_reo_get_context();
5360 	if (!reo_context) {
5361 		mgmt_rx_reo_err("reo context is null");
5362 		return false;
5363 	}
5364 
5365 	return reo_context->simulation_in_progress;
5366 }
5367 
5368 #ifdef WLAN_MGMT_RX_REO_DEBUG_SUPPORT
5369 QDF_STATUS
5370 mgmt_rx_reo_print_ingress_frame_stats(void)
5371 {
5372 	struct mgmt_rx_reo_context *reo_context;
5373 	QDF_STATUS status;
5374 
5375 	reo_context = mgmt_rx_reo_get_context();
5376 	if (!reo_context) {
5377 		mgmt_rx_reo_err("reo context is null");
5378 		return QDF_STATUS_E_NULL_VALUE;
5379 	}
5380 
5381 	status = mgmt_rx_reo_debug_print_ingress_frame_stats(reo_context);
5382 	if (QDF_IS_STATUS_ERROR(status)) {
5383 		mgmt_rx_reo_err("Failed to print ingress frame stats");
5384 		return status;
5385 	}
5386 
5387 	return QDF_STATUS_SUCCESS;
5388 }
5389 
5390 QDF_STATUS
5391 mgmt_rx_reo_print_ingress_frame_info(uint16_t num_frames)
5392 {
5393 	struct mgmt_rx_reo_context *reo_context;
5394 	QDF_STATUS status;
5395 
5396 	reo_context = mgmt_rx_reo_get_context();
5397 	if (!reo_context) {
5398 		mgmt_rx_reo_err("reo context is null");
5399 		return QDF_STATUS_E_NULL_VALUE;
5400 	}
5401 
5402 	status = mgmt_rx_reo_debug_print_ingress_frame_info(reo_context,
5403 							    num_frames);
5404 	if (QDF_IS_STATUS_ERROR(status)) {
5405 		mgmt_rx_reo_err("Failed to print ingress frame info");
5406 		return status;
5407 	}
5408 
5409 	return QDF_STATUS_SUCCESS;
5410 }
5411 
5412 QDF_STATUS
5413 mgmt_rx_reo_print_egress_frame_stats(void)
5414 {
5415 	struct mgmt_rx_reo_context *reo_context;
5416 	QDF_STATUS status;
5417 
5418 	reo_context = mgmt_rx_reo_get_context();
5419 	if (!reo_context) {
5420 		mgmt_rx_reo_err("reo context is null");
5421 		return QDF_STATUS_E_NULL_VALUE;
5422 	}
5423 
5424 	status = mgmt_rx_reo_debug_print_egress_frame_stats(reo_context);
5425 	if (QDF_IS_STATUS_ERROR(status)) {
5426 		mgmt_rx_reo_err("Failed to print egress frame stats");
5427 		return status;
5428 	}
5429 
5430 	return QDF_STATUS_SUCCESS;
5431 }
5432 
5433 QDF_STATUS
5434 mgmt_rx_reo_print_egress_frame_info(uint16_t num_frames)
5435 {
5436 	struct mgmt_rx_reo_context *reo_context;
5437 	QDF_STATUS status;
5438 
5439 	reo_context = mgmt_rx_reo_get_context();
5440 	if (!reo_context) {
5441 		mgmt_rx_reo_err("reo context is null");
5442 		return QDF_STATUS_E_NULL_VALUE;
5443 	}
5444 
5445 	status = mgmt_rx_reo_debug_print_egress_frame_info(reo_context,
5446 							   num_frames);
5447 	if (QDF_IS_STATUS_ERROR(status)) {
5448 		mgmt_rx_reo_err("Failed to print egress frame info");
5449 		return status;
5450 	}
5451 
5452 	return QDF_STATUS_SUCCESS;
5453 }
5454 #else
5455 QDF_STATUS
5456 mgmt_rx_reo_print_ingress_frame_stats(void)
5457 {
5458 	return QDF_STATUS_SUCCESS;
5459 }
5460 
5461 QDF_STATUS
5462 mgmt_rx_reo_print_ingress_frame_info(uint16_t num_frames)
5463 {
5464 	return QDF_STATUS_SUCCESS;
5465 }
5466 
5467 QDF_STATUS
5468 mgmt_rx_reo_print_egress_frame_stats(void)
5469 {
5470 	return QDF_STATUS_SUCCESS;
5471 }
5472 
5473 QDF_STATUS
5474 mgmt_rx_reo_print_egress_frame_info(uint16_t num_frames)
5475 {
5476 	return QDF_STATUS_SUCCESS;
5477 }
5478 #endif /* WLAN_MGMT_RX_REO_DEBUG_SUPPORT */
5479