xref: /wlan-dirver/qca-wifi-host-cmn/umac/cmn_services/mgmt_txrx/core/src/wlan_mgmt_txrx_rx_reo.c (revision 8cfe6b10058a04cafb17eed051f2ddf11bee8931)
1 /*
2  * Copyright (c) 2021, The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 /**
19  *  DOC: wlan_mgmt_txrx_rx_reo.c
20  *  This file contains mgmt rx re-ordering related function definitions
21  */
22 
23 #include "wlan_mgmt_txrx_rx_reo_i.h"
24 #include <wlan_mgmt_txrx_rx_reo_tgt_api.h>
25 #include "wlan_mgmt_txrx_main_i.h"
26 #include <qdf_util.h>
27 #include <wlan_mlo_mgr_cmn.h>
28 #include <wlan_mlo_mgr_setup.h>
29 
30 static struct mgmt_rx_reo_context *g_rx_reo_ctx[WLAN_MAX_MLO_GROUPS];
31 
32 #define mgmt_rx_reo_get_context(_grp_id) (g_rx_reo_ctx[_grp_id])
33 #define mgmt_rx_reo_set_context(grp_id, c)       (g_rx_reo_ctx[grp_id] = c)
34 
35 #define MGMT_RX_REO_PKT_CTR_HALF_RANGE (0x8000)
36 #define MGMT_RX_REO_PKT_CTR_FULL_RANGE (MGMT_RX_REO_PKT_CTR_HALF_RANGE << 1)
37 
38 /**
39  * wlan_mgmt_rx_reo_get_ctx_from_pdev - Get MGMT Rx REO Context from pdev
40  * @pdev: Pointer to pdev structure object
41  *
42  * API to get the MGMT RX reo context of the pdev using the appropriate
43  * MLO group id.
44  *
45  * Return: Mgmt rx reo context for the pdev
46  */
47 
48 static inline struct mgmt_rx_reo_context*
49 wlan_mgmt_rx_reo_get_ctx_from_pdev(struct wlan_objmgr_pdev *pdev)
50 {
51 	uint8_t ml_grp_id;
52 
53 	ml_grp_id = wlan_get_mlo_grp_id_from_pdev(pdev);
54 	if (ml_grp_id >= WLAN_MAX_MLO_GROUPS) {
55 		mgmt_rx_reo_err("REO context - Invalid ML Group ID");
56 		return NULL;
57 	}
58 
59 	return mgmt_rx_reo_get_context(ml_grp_id);
60 }
61 
62 /**
63  * mgmt_rx_reo_compare_pkt_ctrs_gte() - Compare given mgmt packet counters
64  * @ctr1: Management packet counter1
65  * @ctr2: Management packet counter2
66  *
67  * We can't directly use the comparison operator here because the counters can
68  * overflow. But these counters have a property that the difference between
69  * them can never be greater than half the range of the data type.
70  * We can make use of this condition to detect which one is actually greater.
71  *
72  * Return: true if @ctr1 is greater than or equal to @ctr2, else false
73  */
74 static inline bool
75 mgmt_rx_reo_compare_pkt_ctrs_gte(uint16_t ctr1, uint16_t ctr2)
76 {
77 	uint16_t delta = ctr1 - ctr2;
78 
79 	return delta <= MGMT_RX_REO_PKT_CTR_HALF_RANGE;
80 }
81 
82 /**
83  * mgmt_rx_reo_subtract_pkt_ctrs() - Subtract given mgmt packet counters
84  * @ctr1: Management packet counter1
85  * @ctr2: Management packet counter2
86  *
87  * We can't directly use the subtract operator here because the counters can
88  * overflow. But these counters have a property that the difference between
89  * them can never be greater than half the range of the data type.
90  * We can make use of this condition to detect whichone is actually greater and
91  * return the difference accordingly.
92  *
93  * Return: Difference between @ctr1 and @crt2
94  */
95 static inline int
96 mgmt_rx_reo_subtract_pkt_ctrs(uint16_t ctr1, uint16_t ctr2)
97 {
98 	uint16_t delta = ctr1 - ctr2;
99 
100 	/**
101 	 * if delta is greater than half the range (i.e, ctr1 is actually
102 	 * smaller than ctr2), then the result should be a negative number.
103 	 * subtracting the entire range should give the correct value.
104 	 */
105 	if (delta > MGMT_RX_REO_PKT_CTR_HALF_RANGE)
106 		return delta - MGMT_RX_REO_PKT_CTR_FULL_RANGE;
107 
108 	return delta;
109 }
110 
111 #define MGMT_RX_REO_GLOBAL_TS_HALF_RANGE (0x80000000)
112 /**
113  * mgmt_rx_reo_compare_global_timestamps_gte()-Compare given global timestamps
114  * @ts1: Global timestamp1
115  * @ts2: Global timestamp2
116  *
117  * We can't directly use the comparison operator here because the timestamps can
118  * overflow. But these timestamps have a property that the difference between
119  * them can never be greater than half the range of the data type.
120  * We can make use of this condition to detect which one is actually greater.
121  *
122  * Return: true if @ts1 is greater than or equal to @ts2, else false
123  */
124 static inline bool
125 mgmt_rx_reo_compare_global_timestamps_gte(uint32_t ts1, uint32_t ts2)
126 {
127 	uint32_t delta = ts1 - ts2;
128 
129 	return delta <= MGMT_RX_REO_GLOBAL_TS_HALF_RANGE;
130 }
131 
132 /**
133  * mgmt_rx_reo_is_stale_frame()- API to check whether the given management frame
134  * is stale
135  * @last_delivered_frame: pointer to the info of the last frame delivered to
136  * upper layer
137  * @frame_desc: pointer to frame descriptor
138  *
139  * This API checks whether the current management frame under processing is
140  * stale. Any frame older than the last frame delivered to upper layer is a
141  * stale frame. This could happen when we have to deliver frames out of order
142  * due to time out or list size limit. The frames which arrive late at host and
143  * with time stamp lesser than the last delivered frame are stale frames and
144  * they need to be handled differently.
145  *
146  * Return: QDF_STATUS. On success "is_stale" and "is_parallel_rx" members of
147  * @frame_desc will be filled with proper values.
148  */
149 static QDF_STATUS
150 mgmt_rx_reo_is_stale_frame(
151 		struct mgmt_rx_reo_frame_info *last_delivered_frame,
152 		struct mgmt_rx_reo_frame_descriptor *frame_desc)
153 {
154 	uint32_t cur_frame_start_ts;
155 	uint32_t cur_frame_end_ts;
156 	uint32_t last_delivered_frame_start_ts;
157 	uint32_t last_delivered_frame_end_ts;
158 
159 	if (!last_delivered_frame) {
160 		mgmt_rx_reo_err("Last delivered frame info is null");
161 		return QDF_STATUS_E_NULL_VALUE;
162 	}
163 
164 	if (!frame_desc) {
165 		mgmt_rx_reo_err("Frame descriptor is null");
166 		return QDF_STATUS_E_NULL_VALUE;
167 	}
168 
169 	frame_desc->is_stale = false;
170 	frame_desc->is_parallel_rx = false;
171 	frame_desc->last_delivered_frame = *last_delivered_frame;
172 
173 	if (!last_delivered_frame->valid)
174 		return QDF_STATUS_SUCCESS;
175 
176 	cur_frame_start_ts = mgmt_rx_reo_get_start_ts(frame_desc->rx_params);
177 	cur_frame_end_ts = mgmt_rx_reo_get_end_ts(frame_desc->rx_params);
178 	last_delivered_frame_start_ts =
179 			last_delivered_frame->reo_params.start_timestamp;
180 	last_delivered_frame_end_ts =
181 			last_delivered_frame->reo_params.end_timestamp;
182 
183 	frame_desc->is_stale =
184 		!mgmt_rx_reo_compare_global_timestamps_gte(cur_frame_start_ts,
185 					last_delivered_frame_start_ts);
186 
187 	if (mgmt_rx_reo_compare_global_timestamps_gte
188 		(last_delivered_frame_start_ts, cur_frame_start_ts) &&
189 	    mgmt_rx_reo_compare_global_timestamps_gte
190 		(cur_frame_end_ts, last_delivered_frame_end_ts)) {
191 		frame_desc->is_parallel_rx = true;
192 		frame_desc->is_stale = false;
193 	}
194 
195 	return QDF_STATUS_SUCCESS;
196 }
197 
198 QDF_STATUS
199 mgmt_rx_reo_validate_mlo_link_info(struct wlan_objmgr_psoc *psoc)
200 {
201 	uint16_t valid_link_bitmap_shmem;
202 	uint16_t valid_link_bitmap;
203 	int8_t num_active_links_shmem;
204 	int8_t num_active_links;
205 	uint8_t grp_id = 0;
206 	QDF_STATUS status;
207 
208 	if (!psoc) {
209 		mgmt_rx_reo_err("psoc is null");
210 		return QDF_STATUS_E_NULL_VALUE;
211 	}
212 
213 	if (!wlan_mgmt_rx_reo_is_feature_enabled_at_psoc(psoc))
214 		return QDF_STATUS_SUCCESS;
215 
216 	status = tgt_mgmt_rx_reo_get_num_active_hw_links(psoc,
217 							 &num_active_links_shmem);
218 	if (QDF_IS_STATUS_ERROR(status)) {
219 		mgmt_rx_reo_err("Failed to get number of active MLO HW links");
220 		return QDF_STATUS_E_FAILURE;
221 	}
222 	qdf_assert_always(num_active_links_shmem > 0);
223 
224 	if (!mlo_psoc_get_grp_id(psoc, &grp_id)) {
225 		mgmt_rx_reo_err("Failed to get valid MLO Group id");
226 		return QDF_STATUS_E_INVAL;
227 	}
228 
229 	num_active_links = wlan_mlo_get_num_active_links(grp_id);
230 	qdf_assert_always(num_active_links > 0);
231 
232 	qdf_assert_always(num_active_links_shmem == num_active_links);
233 
234 	status = tgt_mgmt_rx_reo_get_valid_hw_link_bitmap(psoc,
235 							  &valid_link_bitmap_shmem);
236 	if (QDF_IS_STATUS_ERROR(status)) {
237 		mgmt_rx_reo_err("Failed to get valid MLO HW link bitmap");
238 		return QDF_STATUS_E_INVAL;
239 	}
240 	qdf_assert_always(valid_link_bitmap_shmem != 0);
241 
242 	valid_link_bitmap = wlan_mlo_get_valid_link_bitmap(grp_id);
243 	qdf_assert_always(valid_link_bitmap_shmem != 0);
244 
245 	qdf_assert_always(valid_link_bitmap_shmem == valid_link_bitmap);
246 
247 	return QDF_STATUS_SUCCESS;
248 }
249 
250 #ifndef WLAN_MGMT_RX_REO_SIM_SUPPORT
251 /**
252  * mgmt_rx_reo_is_valid_link() - Check whether the given HW link is valid
253  * @link_id: Link id to be checked
254  * @grp_id: MLO Group id which it belongs to
255  *
256  * Return: true if @link_id is a valid link else false
257  */
258 static bool
259 mgmt_rx_reo_is_valid_link(uint8_t link_id, uint8_t grp_id)
260 {
261 	uint16_t valid_hw_link_bitmap;
262 
263 	if (link_id >= MAX_MLO_LINKS) {
264 		mgmt_rx_reo_err("Invalid link id %u", link_id);
265 		return false;
266 	}
267 
268 	valid_hw_link_bitmap = wlan_mlo_get_valid_link_bitmap(grp_id);
269 	qdf_assert_always(valid_hw_link_bitmap);
270 
271 	return (valid_hw_link_bitmap & (1 << link_id));
272 }
273 
274 /**
275  * mgmt_rx_reo_get_num_mlo_links() - Get number of MLO HW links active in the
276  * system
277  * @reo_context: Pointer to reo context object
278  * @grp_id: MLO group id which it belongs to
279  *
280  * Return: On success returns number of active MLO HW links. On failure
281  * returns WLAN_MLO_INVALID_NUM_LINKS.
282  */
283 static int8_t
284 mgmt_rx_reo_get_num_mlo_links(struct mgmt_rx_reo_context *reo_context,
285 			      uint8_t grp_id)
286 {
287 	if (!reo_context) {
288 		mgmt_rx_reo_err("Mgmt reo context is null");
289 		return WLAN_MLO_INVALID_NUM_LINKS;
290 	}
291 
292 	return wlan_mlo_get_num_active_links(grp_id);
293 }
294 
295 static QDF_STATUS
296 mgmt_rx_reo_handle_potential_premature_delivery(
297 				struct mgmt_rx_reo_context *reo_context,
298 				uint32_t global_timestamp)
299 {
300 	return QDF_STATUS_SUCCESS;
301 }
302 
303 static QDF_STATUS
304 mgmt_rx_reo_handle_stale_frame(struct mgmt_rx_reo_list *reo_list,
305 			       struct mgmt_rx_reo_frame_descriptor *desc)
306 {
307 	return QDF_STATUS_SUCCESS;
308 }
309 #else
310 /**
311  * mgmt_rx_reo_sim_is_valid_link() - Check whether the given HW link is valid
312  * @sim_context: Pointer to reo simulation context object
313  * @link_id: Link id to be checked
314  *
315  * Return: true if @link_id is a valid link, else false
316  */
317 static bool
318 mgmt_rx_reo_sim_is_valid_link(struct mgmt_rx_reo_sim_context *sim_context,
319 			      uint8_t link_id)
320 {
321 	bool is_valid_link = false;
322 
323 	if (!sim_context) {
324 		mgmt_rx_reo_err("Mgmt reo sim context is null");
325 		return false;
326 	}
327 
328 	if (link_id >= MAX_MLO_LINKS) {
329 		mgmt_rx_reo_err("Invalid link id %u", link_id);
330 		return false;
331 	}
332 
333 	qdf_spin_lock(&sim_context->link_id_to_pdev_map.lock);
334 
335 	if (sim_context->link_id_to_pdev_map.map[link_id])
336 		is_valid_link = true;
337 
338 	qdf_spin_unlock(&sim_context->link_id_to_pdev_map.lock);
339 
340 	return is_valid_link;
341 }
342 
343 /**
344  * mgmt_rx_reo_is_valid_link() - Check whether the given HW link is valid
345  * @ml_grp_id: MLO Group id on which the Link ID  belongs to
346  * @link_id: HW Link ID to be verified
347  *
348  * Return: true if @link_id is a valid link else false
349  */
350 static bool
351 mgmt_rx_reo_is_valid_link(uint8_t ml_grp_id, uint8_t link_id)
352 {
353 	struct mgmt_rx_reo_context *reo_context;
354 
355 	reo_context = mgmt_rx_reo_get_context(ml_grp_id);
356 
357 	if (!reo_context) {
358 		mgmt_rx_reo_err("Mgmt reo context is null");
359 		return false;
360 	}
361 
362 	return mgmt_rx_reo_sim_is_valid_link(&reo_context->sim_context,
363 					     link_id);
364 }
365 
366 /**
367  * mgmt_rx_reo_sim_get_num_mlo_links() - Get number of MLO HW links from the reo
368  * simulation context object
369  * @sim_context: Pointer to reo simulation context object
370  *
371  * Number of MLO links will be equal to number of pdevs in the
372  * system. In case of simulation all the pdevs are assumed
373  * to have MLO capability.
374  *
375  * Return: On success returns number of MLO HW links. On failure
376  * returns WLAN_MLO_INVALID_NUM_LINKS.
377  */
378 static int8_t
379 mgmt_rx_reo_sim_get_num_mlo_links(struct mgmt_rx_reo_sim_context *sim_context)
380 {
381 	uint8_t num_mlo_links;
382 
383 	if (!sim_context) {
384 		mgmt_rx_reo_err("Mgmt reo simulation context is null");
385 		return WLAN_MLO_INVALID_NUM_LINKS;
386 	}
387 
388 	qdf_spin_lock(&sim_context->link_id_to_pdev_map.lock);
389 
390 	num_mlo_links = sim_context->link_id_to_pdev_map.num_mlo_links;
391 
392 	qdf_spin_unlock(&sim_context->link_id_to_pdev_map.lock);
393 
394 	return num_mlo_links;
395 }
396 
397 /**
398  * mgmt_rx_reo_get_num_mlo_links() - Get number of MLO links from the reo
399  * context object
400  * @reo_context: Pointer to reo context object
401  * @grp_id: MLO Group id which it belongs to
402  *
403  * Return: On success returns number of MLO HW links. On failure
404  * returns WLAN_MLO_INVALID_NUM_LINKS.
405  */
406 static int8_t
407 mgmt_rx_reo_get_num_mlo_links(struct mgmt_rx_reo_context *reo_context,
408 			      uint8_t grp_id)
409 {
410 	if (!reo_context) {
411 		mgmt_rx_reo_err("Mgmt reo context is null");
412 		return WLAN_MLO_INVALID_NUM_LINKS;
413 	}
414 
415 	return mgmt_rx_reo_sim_get_num_mlo_links(&reo_context->sim_context);
416 }
417 
418 /**
419  * mgmt_rx_reo_sim_get_context() - Helper API to get the management
420  * rx reorder simulation context
421  * @ml_grp_id: MLO group id for the rx reordering
422  *
423  * Return: On success returns the pointer to management rx reorder
424  * simulation context. On failure returns NULL.
425  */
426 static struct mgmt_rx_reo_sim_context *
427 mgmt_rx_reo_sim_get_context(uint8_t ml_grp_id)
428 {
429 	struct mgmt_rx_reo_context *reo_context;
430 
431 	reo_context = mgmt_rx_reo_get_context(ml_grp_id);
432 	if (!reo_context) {
433 		mgmt_rx_reo_err("Mgmt reo context is null");
434 		return NULL;
435 	}
436 
437 	return &reo_context->sim_context;
438 }
439 
440 int8_t
441 mgmt_rx_reo_sim_get_mlo_link_id_from_pdev(struct wlan_objmgr_pdev *pdev)
442 {
443 	struct mgmt_rx_reo_sim_context *sim_context;
444 	int8_t link_id;
445 
446 	sim_context = mgmt_rx_reo_sim_get_context();
447 	if (!sim_context) {
448 		mgmt_rx_reo_err("Mgmt reo simulation context is null");
449 		return MGMT_RX_REO_INVALID_LINK_ID;
450 	}
451 
452 	qdf_spin_lock(&sim_context->link_id_to_pdev_map.lock);
453 
454 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++)
455 		if (sim_context->link_id_to_pdev_map.map[link_id] == pdev)
456 			break;
457 
458 	/* pdev is not found in map */
459 	if (link_id == MAX_MLO_LINKS)
460 		link_id = MGMT_RX_REO_INVALID_LINK_ID;
461 
462 	qdf_spin_unlock(&sim_context->link_id_to_pdev_map.lock);
463 
464 	return link_id;
465 }
466 
467 struct wlan_objmgr_pdev *
468 mgmt_rx_reo_sim_get_pdev_from_mlo_link_id(uint8_t mlo_link_id,
469 					  wlan_objmgr_ref_dbgid refdbgid)
470 {
471 	struct mgmt_rx_reo_sim_context *sim_context;
472 	struct wlan_objmgr_pdev *pdev;
473 	QDF_STATUS status;
474 
475 	sim_context = mgmt_rx_reo_sim_get_context();
476 	if (!sim_context) {
477 		mgmt_rx_reo_err("Mgmt reo simulation context is null");
478 		return NULL;
479 	}
480 
481 	if (mlo_link_id >= MAX_MLO_LINKS) {
482 		mgmt_rx_reo_err("Invalid link id %u", mlo_link_id);
483 		return NULL;
484 	}
485 
486 	qdf_spin_lock(&sim_context->link_id_to_pdev_map.lock);
487 
488 	pdev = sim_context->link_id_to_pdev_map.map[mlo_link_id];
489 	status = wlan_objmgr_pdev_try_get_ref(pdev, refdbgid);
490 	if (QDF_IS_STATUS_ERROR(status)) {
491 		mgmt_rx_reo_err("Failed to get pdev reference");
492 		return NULL;
493 	}
494 
495 	qdf_spin_unlock(&sim_context->link_id_to_pdev_map.lock);
496 
497 	return pdev;
498 }
499 
500 /**
501  * mgmt_rx_reo_handle_potential_premature_delivery - Helper API to handle
502  * premature delivery.
503  * @reo_context: Pointer to reorder list
504  * @global_timestamp: Global time stamp of the current management frame
505  *
506  * Sometimes we have to deliver a management frame to the upper layers even
507  * before its wait count reaching zero. This is called premature delivery.
508  * Premature delivery could happen due to time out or reorder list overflow.
509  *
510  * Return: QDF_STATUS
511  */
512 static QDF_STATUS
513 mgmt_rx_reo_handle_potential_premature_delivery(
514 				struct mgmt_rx_reo_context *reo_context,
515 				uint32_t global_timestamp)
516 {
517 	qdf_list_t stale_frame_list_temp;
518 	QDF_STATUS status;
519 	struct mgmt_rx_reo_pending_frame_list_entry *latest_stale_frame = NULL;
520 	struct mgmt_rx_reo_pending_frame_list_entry *cur_entry;
521 	struct mgmt_rx_reo_sim_context *sim_context;
522 	struct mgmt_rx_reo_master_frame_list *master_frame_list;
523 
524 	if (!reo_context)
525 		return QDF_STATUS_E_NULL_VALUE;
526 
527 	sim_context = &reo_context->sim_context;
528 	master_frame_list = &sim_context->master_frame_list;
529 
530 	qdf_spin_lock(&master_frame_list->lock);
531 
532 	qdf_list_for_each(&master_frame_list->pending_list, cur_entry, node) {
533 		if (cur_entry->params.global_timestamp == global_timestamp)
534 			break;
535 
536 		latest_stale_frame = cur_entry;
537 	}
538 
539 	if (latest_stale_frame) {
540 		qdf_list_create(&stale_frame_list_temp,
541 				MGMT_RX_REO_SIM_STALE_FRAME_TEMP_LIST_MAX_SIZE);
542 
543 		status = qdf_list_split(&stale_frame_list_temp,
544 					&master_frame_list->pending_list,
545 					&latest_stale_frame->node);
546 		if (QDF_IS_STATUS_ERROR(status))
547 			goto exit_unlock_master_frame_list;
548 
549 		status = qdf_list_join(&master_frame_list->stale_list,
550 				       &stale_frame_list_temp);
551 		if (QDF_IS_STATUS_ERROR(status))
552 			goto exit_unlock_master_frame_list;
553 	}
554 
555 	status = QDF_STATUS_SUCCESS;
556 
557 exit_unlock_master_frame_list:
558 	qdf_spin_unlock(&master_frame_list->lock);
559 
560 	return status;
561 }
562 
563 /**
564  * mgmt_rx_reo_sim_remove_frame_from_stale_list() - Removes frame from the
565  * stale management frame list
566  * @master_frame_list: pointer to master management frame list
567  * @reo_params: pointer to reo params
568  *
569  * This API removes frames from the stale management frame list.
570  *
571  * Return: QDF_STATUS of operation
572  */
573 static QDF_STATUS
574 mgmt_rx_reo_sim_remove_frame_from_stale_list(
575 		struct mgmt_rx_reo_master_frame_list *master_frame_list,
576 		const struct mgmt_rx_reo_params *reo_params)
577 {
578 	struct mgmt_rx_reo_stale_frame_list_entry *cur_entry;
579 	struct mgmt_rx_reo_stale_frame_list_entry *matching_entry = NULL;
580 	QDF_STATUS status;
581 
582 	if (!master_frame_list || !reo_params)
583 		return QDF_STATUS_E_NULL_VALUE;
584 
585 	qdf_spin_lock(&master_frame_list->lock);
586 
587 	/**
588 	 * Stale frames can come in any order at host. Do a linear search and
589 	 * remove the matching entry.
590 	 */
591 	qdf_list_for_each(&master_frame_list->stale_list, cur_entry, node) {
592 		if (cur_entry->params.link_id == reo_params->link_id &&
593 		    cur_entry->params.mgmt_pkt_ctr == reo_params->mgmt_pkt_ctr &&
594 		    cur_entry->params.global_timestamp ==
595 		    reo_params->global_timestamp) {
596 			matching_entry = cur_entry;
597 			break;
598 		}
599 	}
600 
601 	if (!matching_entry) {
602 		qdf_spin_unlock(&master_frame_list->lock);
603 		mgmt_rx_reo_err("reo sim failure: absent in stale frame list");
604 		qdf_assert_always(0);
605 	}
606 
607 	status = qdf_list_remove_node(&master_frame_list->stale_list,
608 				      &matching_entry->node);
609 
610 	if (QDF_IS_STATUS_ERROR(status)) {
611 		qdf_spin_unlock(&master_frame_list->lock);
612 		return status;
613 	}
614 
615 	qdf_mem_free(matching_entry);
616 
617 	qdf_spin_unlock(&master_frame_list->lock);
618 
619 	return QDF_STATUS_SUCCESS;
620 }
621 
622 /**
623  * mgmt_rx_reo_handle_stale_frame() - API to handle stale management frames.
624  * @reo_list: Pointer to reorder list
625  * @desc: Pointer to frame descriptor
626  *
627  * Return: QDF_STATUS of operation
628  */
629 static QDF_STATUS
630 mgmt_rx_reo_handle_stale_frame(struct mgmt_rx_reo_list *reo_list,
631 			       struct mgmt_rx_reo_frame_descriptor *desc)
632 {
633 	QDF_STATUS status;
634 	struct mgmt_rx_reo_context *reo_context;
635 	struct mgmt_rx_reo_sim_context *sim_context;
636 	struct mgmt_rx_reo_params *reo_params;
637 
638 	if (!reo_list || !desc)
639 		return QDF_STATUS_E_NULL_VALUE;
640 
641 	/* FW consumed/Error frames are already removed */
642 	if (desc->type != MGMT_RX_REO_FRAME_DESC_HOST_CONSUMED_FRAME)
643 		return QDF_STATUS_SUCCESS;
644 
645 	reo_context = mgmt_rx_reo_get_context_from_reo_list(reo_list);
646 	if (!reo_context)
647 		return QDF_STATUS_E_NULL_VALUE;
648 
649 	sim_context = &reo_context->sim_context;
650 
651 	reo_params = desc->rx_params->reo_params;
652 	if (!reo_params)
653 		return QDF_STATUS_E_NULL_VALUE;
654 
655 	status = mgmt_rx_reo_sim_remove_frame_from_stale_list(
656 				&sim_context->master_frame_list, reo_params);
657 
658 	return status;
659 }
660 #endif /* WLAN_MGMT_RX_REO_SIM_SUPPORT */
661 
662 /**
663  * mgmt_rx_reo_is_potential_premature_delivery() - Helper API to check
664  * whether the current frame getting delivered to upper layer is a premature
665  * delivery
666  * @release_reason: release reason
667  *
668  * Return: true for a premature delivery
669  */
670 static bool
671 mgmt_rx_reo_is_potential_premature_delivery(uint8_t release_reason)
672 {
673 	return !(release_reason & RELEASE_REASON_ZERO_WAIT_COUNT);
674 }
675 
676 /**
677  * wlan_mgmt_rx_reo_get_priv_object() - Get the pdev private object of
678  * MGMT Rx REO module
679  * @pdev: pointer to pdev object
680  *
681  * Return: Pointer to pdev private object of MGMT Rx REO module on success,
682  * else NULL
683  */
684 static struct mgmt_rx_reo_pdev_info *
685 wlan_mgmt_rx_reo_get_priv_object(struct wlan_objmgr_pdev *pdev)
686 {
687 	struct mgmt_txrx_priv_pdev_context *mgmt_txrx_pdev_ctx;
688 
689 	if (!pdev) {
690 		mgmt_rx_reo_err("pdev is null");
691 		return NULL;
692 	}
693 
694 	mgmt_txrx_pdev_ctx = (struct mgmt_txrx_priv_pdev_context *)
695 		wlan_objmgr_pdev_get_comp_private_obj(pdev,
696 						      WLAN_UMAC_COMP_MGMT_TXRX);
697 
698 	if (!mgmt_txrx_pdev_ctx) {
699 		mgmt_rx_reo_err("mgmt txrx context is NULL");
700 		return NULL;
701 	}
702 
703 	return mgmt_txrx_pdev_ctx->mgmt_rx_reo_pdev_ctx;
704 }
705 
706 /**
707  * mgmt_rx_reo_print_snapshots() - Print all snapshots related
708  * to management Rx reorder module
709  * @mac_hw_ss: MAC HW snapshot
710  * @fw_forwarded_ss: FW forwarded snapshot
711  * @fw_consumed_ss: FW consumed snapshot
712  * @host_ss: Host snapshot
713  *
714  * return: QDF_STATUS
715  */
716 static QDF_STATUS
717 mgmt_rx_reo_print_snapshots
718 			(struct mgmt_rx_reo_snapshot_params *mac_hw_ss,
719 			 struct mgmt_rx_reo_snapshot_params *fw_forwarded_ss,
720 			 struct mgmt_rx_reo_snapshot_params *fw_consumed_ss,
721 			 struct mgmt_rx_reo_snapshot_params *host_ss)
722 {
723 	mgmt_rx_reo_debug("HW SS: valid = %u, ctr = %u, ts = %u",
724 			  mac_hw_ss->valid, mac_hw_ss->mgmt_pkt_ctr,
725 			  mac_hw_ss->global_timestamp);
726 	mgmt_rx_reo_debug("FW forwarded SS: valid = %u, ctr = %u, ts = %u",
727 			  fw_forwarded_ss->valid,
728 			  fw_forwarded_ss->mgmt_pkt_ctr,
729 			  fw_forwarded_ss->global_timestamp);
730 	mgmt_rx_reo_debug("FW consumed SS: valid = %u, ctr = %u, ts = %u",
731 			  fw_consumed_ss->valid,
732 			  fw_consumed_ss->mgmt_pkt_ctr,
733 			  fw_consumed_ss->global_timestamp);
734 	mgmt_rx_reo_debug("HOST SS: valid = %u, ctr = %u, ts = %u",
735 			  host_ss->valid, host_ss->mgmt_pkt_ctr,
736 			  host_ss->global_timestamp);
737 
738 	return QDF_STATUS_SUCCESS;
739 }
740 
741 /**
742  * mgmt_rx_reo_invalidate_stale_snapshots() - Invalidate stale management
743  * Rx REO snapshots
744  * @mac_hw_ss: MAC HW snapshot
745  * @fw_forwarded_ss: FW forwarded snapshot
746  * @fw_consumed_ss: FW consumed snapshot
747  * @host_ss: Host snapshot
748  * @link: link ID
749  *
750  * return: QDF_STATUS
751  */
752 static QDF_STATUS
753 mgmt_rx_reo_invalidate_stale_snapshots
754 			(struct mgmt_rx_reo_snapshot_params *mac_hw_ss,
755 			 struct mgmt_rx_reo_snapshot_params *fw_forwarded_ss,
756 			 struct mgmt_rx_reo_snapshot_params *fw_consumed_ss,
757 			 struct mgmt_rx_reo_snapshot_params *host_ss,
758 			 uint8_t link)
759 {
760 	if (!mac_hw_ss->valid)
761 		return QDF_STATUS_SUCCESS;
762 
763 	if (host_ss->valid) {
764 		if (!mgmt_rx_reo_compare_global_timestamps_gte
765 					(mac_hw_ss->global_timestamp,
766 					 host_ss->global_timestamp) ||
767 		    !mgmt_rx_reo_compare_pkt_ctrs_gte
768 					(mac_hw_ss->mgmt_pkt_ctr,
769 					 host_ss->mgmt_pkt_ctr)) {
770 			mgmt_rx_reo_print_snapshots(mac_hw_ss, fw_forwarded_ss,
771 						    fw_consumed_ss, host_ss);
772 			mgmt_rx_reo_debug("Invalidate host snapshot, link %u",
773 					  link);
774 			host_ss->valid = false;
775 		}
776 	}
777 
778 	if (fw_forwarded_ss->valid) {
779 		if (!mgmt_rx_reo_compare_global_timestamps_gte
780 					(mac_hw_ss->global_timestamp,
781 					 fw_forwarded_ss->global_timestamp) ||
782 		    !mgmt_rx_reo_compare_pkt_ctrs_gte
783 					(mac_hw_ss->mgmt_pkt_ctr,
784 					 fw_forwarded_ss->mgmt_pkt_ctr)) {
785 			mgmt_rx_reo_print_snapshots(mac_hw_ss, fw_forwarded_ss,
786 						    fw_consumed_ss, host_ss);
787 			mgmt_rx_reo_debug("Invalidate FW forwarded SS, link %u",
788 					  link);
789 			fw_forwarded_ss->valid = false;
790 		}
791 
792 		if (host_ss->valid && fw_forwarded_ss->valid &&
793 		    (mgmt_rx_reo_compare_global_timestamps_gte
794 					(host_ss->global_timestamp,
795 					 fw_forwarded_ss->global_timestamp) !=
796 		     mgmt_rx_reo_compare_pkt_ctrs_gte
797 					(host_ss->mgmt_pkt_ctr,
798 					 fw_forwarded_ss->mgmt_pkt_ctr))) {
799 			mgmt_rx_reo_print_snapshots(mac_hw_ss, fw_forwarded_ss,
800 						    fw_consumed_ss, host_ss);
801 			mgmt_rx_reo_debug("Invalidate FW forwarded SS, link %u",
802 					  link);
803 			fw_forwarded_ss->valid = false;
804 		}
805 	}
806 
807 	if (fw_consumed_ss->valid) {
808 		if (!mgmt_rx_reo_compare_global_timestamps_gte
809 					(mac_hw_ss->global_timestamp,
810 					 fw_consumed_ss->global_timestamp) ||
811 		    !mgmt_rx_reo_compare_pkt_ctrs_gte
812 					(mac_hw_ss->mgmt_pkt_ctr,
813 					 fw_consumed_ss->mgmt_pkt_ctr)) {
814 			mgmt_rx_reo_print_snapshots(mac_hw_ss, fw_forwarded_ss,
815 						    fw_consumed_ss, host_ss);
816 			mgmt_rx_reo_debug("Invalidate FW consumed SS, link %u",
817 					  link);
818 			fw_consumed_ss->valid = false;
819 		}
820 
821 		if (host_ss->valid && fw_consumed_ss->valid &&
822 		    (mgmt_rx_reo_compare_global_timestamps_gte
823 					(host_ss->global_timestamp,
824 					 fw_consumed_ss->global_timestamp) !=
825 		     mgmt_rx_reo_compare_pkt_ctrs_gte
826 					(host_ss->mgmt_pkt_ctr,
827 					 fw_consumed_ss->mgmt_pkt_ctr))) {
828 			mgmt_rx_reo_print_snapshots(mac_hw_ss, fw_forwarded_ss,
829 						    fw_consumed_ss, host_ss);
830 			mgmt_rx_reo_debug("Invalidate FW consumed SS, link %u",
831 					  link);
832 			fw_consumed_ss->valid = false;
833 		}
834 	}
835 
836 	return QDF_STATUS_SUCCESS;
837 }
838 
839 /**
840  * mgmt_rx_reo_snapshots_check_sanity() - Check the sanity of management
841  * Rx REO snapshots
842  * @mac_hw_ss: MAC HW snapshot
843  * @fw_forwarded_ss: FW forwarded snapshot
844  * @fw_consumed_ss: FW consumed snapshot
845  * @host_ss: Host snapshot
846  *
847  * return: QDF_STATUS
848  */
849 static QDF_STATUS
850 mgmt_rx_reo_snapshots_check_sanity
851 			(struct mgmt_rx_reo_snapshot_params *mac_hw_ss,
852 			 struct mgmt_rx_reo_snapshot_params *fw_forwarded_ss,
853 			 struct mgmt_rx_reo_snapshot_params *fw_consumed_ss,
854 			 struct mgmt_rx_reo_snapshot_params *host_ss)
855 {
856 	QDF_STATUS status;
857 
858 	if (!mac_hw_ss->valid) {
859 		if (fw_forwarded_ss->valid || fw_consumed_ss->valid ||
860 		    host_ss->valid) {
861 			mgmt_rx_reo_err("MAC HW SS is invalid");
862 			status = QDF_STATUS_E_INVAL;
863 			goto fail;
864 		}
865 
866 		return QDF_STATUS_SUCCESS;
867 	}
868 
869 	if (!fw_forwarded_ss->valid && !fw_consumed_ss->valid) {
870 		if (host_ss->valid) {
871 			mgmt_rx_reo_err("FW forwarded and consumed SS invalid");
872 			status = QDF_STATUS_E_INVAL;
873 			goto fail;
874 		}
875 
876 		return QDF_STATUS_SUCCESS;
877 	}
878 
879 	if (fw_forwarded_ss->valid) {
880 		if (!mgmt_rx_reo_compare_global_timestamps_gte
881 					(mac_hw_ss->global_timestamp,
882 					 fw_forwarded_ss->global_timestamp)) {
883 			mgmt_rx_reo_err("TS: MAC HW SS < FW forwarded SS");
884 			status = QDF_STATUS_E_INVAL;
885 			goto fail;
886 		}
887 
888 		if (!mgmt_rx_reo_compare_pkt_ctrs_gte
889 					(mac_hw_ss->mgmt_pkt_ctr,
890 					 fw_forwarded_ss->mgmt_pkt_ctr)) {
891 			mgmt_rx_reo_err("PKT CTR: MAC HW SS < FW forwarded SS");
892 			status = QDF_STATUS_E_INVAL;
893 			goto fail;
894 		}
895 	}
896 
897 	if (fw_consumed_ss->valid) {
898 		if (!mgmt_rx_reo_compare_global_timestamps_gte
899 					(mac_hw_ss->global_timestamp,
900 					 fw_consumed_ss->global_timestamp)) {
901 			mgmt_rx_reo_err("TS: MAC HW SS < FW consumed SS");
902 			status = QDF_STATUS_E_INVAL;
903 			goto fail;
904 		}
905 
906 		if (!mgmt_rx_reo_compare_pkt_ctrs_gte
907 					(mac_hw_ss->mgmt_pkt_ctr,
908 					 fw_consumed_ss->mgmt_pkt_ctr)) {
909 			mgmt_rx_reo_err("PKT CTR: MAC HW SS < FW consumed SS");
910 			status = QDF_STATUS_E_INVAL;
911 			goto fail;
912 		}
913 	}
914 
915 	if (host_ss->valid) {
916 		if (!mgmt_rx_reo_compare_global_timestamps_gte
917 					(mac_hw_ss->global_timestamp,
918 					 host_ss->global_timestamp)) {
919 			mgmt_rx_reo_err("TS: MAC HW SS < host SS");
920 			status = QDF_STATUS_E_INVAL;
921 			goto fail;
922 		}
923 
924 		if (!mgmt_rx_reo_compare_pkt_ctrs_gte
925 					(mac_hw_ss->mgmt_pkt_ctr,
926 					 host_ss->mgmt_pkt_ctr)) {
927 			mgmt_rx_reo_err("PKT CTR: MAC HW SS < host SS");
928 			status = QDF_STATUS_E_INVAL;
929 			goto fail;
930 		}
931 
932 		if (fw_forwarded_ss->valid && !fw_consumed_ss->valid) {
933 			if (!mgmt_rx_reo_compare_global_timestamps_gte
934 					(fw_forwarded_ss->global_timestamp,
935 					 host_ss->global_timestamp)) {
936 				mgmt_rx_reo_err("TS: FW forwarded < host SS");
937 				status = QDF_STATUS_E_INVAL;
938 				goto fail;
939 			}
940 
941 			if (!mgmt_rx_reo_compare_pkt_ctrs_gte
942 					(fw_forwarded_ss->mgmt_pkt_ctr,
943 					 host_ss->mgmt_pkt_ctr)) {
944 				mgmt_rx_reo_err("CTR: FW forwarded < host SS");
945 				status = QDF_STATUS_E_INVAL;
946 				goto fail;
947 			}
948 		}
949 
950 		if (fw_consumed_ss->valid && !fw_forwarded_ss->valid) {
951 			if (!mgmt_rx_reo_compare_global_timestamps_gte
952 					(fw_consumed_ss->global_timestamp,
953 					 host_ss->global_timestamp)) {
954 				mgmt_rx_reo_err("TS: FW consumed < host SS");
955 				status = QDF_STATUS_E_INVAL;
956 				goto fail;
957 			}
958 
959 			if (!mgmt_rx_reo_compare_pkt_ctrs_gte
960 					(fw_consumed_ss->mgmt_pkt_ctr,
961 					 host_ss->mgmt_pkt_ctr)) {
962 				mgmt_rx_reo_err("CTR: FW consumed < host SS");
963 				status = QDF_STATUS_E_INVAL;
964 				goto fail;
965 			}
966 		}
967 
968 		if (fw_forwarded_ss->valid && fw_consumed_ss->valid) {
969 			if (!mgmt_rx_reo_compare_global_timestamps_gte
970 					(fw_consumed_ss->global_timestamp,
971 					 host_ss->global_timestamp) &&
972 			    !mgmt_rx_reo_compare_global_timestamps_gte
973 					(fw_forwarded_ss->global_timestamp,
974 					 host_ss->global_timestamp)) {
975 				mgmt_rx_reo_err("TS: FW consumed/forwarded < host");
976 				status = QDF_STATUS_E_INVAL;
977 				goto fail;
978 			}
979 
980 			if (!mgmt_rx_reo_compare_pkt_ctrs_gte
981 					(fw_consumed_ss->mgmt_pkt_ctr,
982 					 host_ss->mgmt_pkt_ctr) &&
983 			    !mgmt_rx_reo_compare_pkt_ctrs_gte
984 					(fw_forwarded_ss->mgmt_pkt_ctr,
985 					 host_ss->mgmt_pkt_ctr)) {
986 				mgmt_rx_reo_err("CTR: FW consumed/forwarded < host");
987 				status = QDF_STATUS_E_INVAL;
988 				goto fail;
989 			}
990 		}
991 	}
992 
993 	return QDF_STATUS_SUCCESS;
994 
995 fail:
996 	mgmt_rx_reo_debug("HW SS: valid = %u, ctr = %u, ts = %u",
997 			  mac_hw_ss->valid, mac_hw_ss->mgmt_pkt_ctr,
998 			  mac_hw_ss->global_timestamp);
999 	mgmt_rx_reo_debug("FW forwarded SS: valid = %u, ctr = %u, ts = %u",
1000 			  fw_forwarded_ss->valid,
1001 			  fw_forwarded_ss->mgmt_pkt_ctr,
1002 			  fw_forwarded_ss->global_timestamp);
1003 	mgmt_rx_reo_debug("FW consumed SS: valid = %u, ctr = %u, ts = %u",
1004 			  fw_consumed_ss->valid,
1005 			  fw_consumed_ss->mgmt_pkt_ctr,
1006 			  fw_consumed_ss->global_timestamp);
1007 	mgmt_rx_reo_debug("HOST SS: valid = %u, ctr = %u, ts = %u",
1008 			  host_ss->valid, host_ss->mgmt_pkt_ctr,
1009 			  host_ss->global_timestamp);
1010 
1011 	return status;
1012 }
1013 
1014 /**
1015  * wlan_mgmt_rx_reo_algo_calculate_wait_count() - Calculates the number of
1016  * frames an incoming frame should wait for before it gets delivered.
1017  * @in_frame_pdev: pdev on which this frame is received
1018  * @desc: frame Descriptor
1019  *
1020  * Each frame carrys a MGMT pkt number which is local to that link, and a
1021  * timestamp which is global across all the links. MAC HW and FW also captures
1022  * the same details of the last frame that they have seen. Host also maintains
1023  * the details of the last frame it has seen. In total, there are 4 snapshots.
1024  * 1. MAC HW snapshot - latest frame seen at MAC HW
1025  * 2. FW forwarded snapshot- latest frame forwarded to the Host
1026  * 3. FW consumed snapshot - latest frame consumed by the FW
1027  * 4. Host/FW consumed snapshot - latest frame seen by the Host
1028  * By using all these snapshots, this function tries to compute the wait count
1029  * for a given incoming frame on all links.
1030  *
1031  * Return: QDF_STATUS of operation
1032  */
1033 static QDF_STATUS
1034 wlan_mgmt_rx_reo_algo_calculate_wait_count(
1035 		struct wlan_objmgr_pdev *in_frame_pdev,
1036 		struct mgmt_rx_reo_frame_descriptor *desc)
1037 {
1038 	QDF_STATUS status;
1039 	uint8_t link;
1040 	int8_t grp_id;
1041 	int8_t in_frame_link;
1042 	int frames_pending, delta_fwd_host;
1043 	uint8_t snapshot_id;
1044 	struct wlan_objmgr_pdev *pdev;
1045 	struct mgmt_rx_reo_pdev_info *rx_reo_pdev_ctx;
1046 	struct mgmt_rx_reo_pdev_info *in_frame_rx_reo_pdev_ctx;
1047 	struct mgmt_rx_reo_snapshot_info *snapshot_info;
1048 	struct mgmt_rx_reo_snapshot_params snapshot_params
1049 				[MGMT_RX_REO_SHARED_SNAPSHOT_MAX];
1050 	struct mgmt_rx_reo_snapshot_params *mac_hw_ss, *fw_forwarded_ss,
1051 					    *fw_consumed_ss, *host_ss;
1052 	struct mgmt_rx_reo_params *in_frame_params;
1053 	struct mgmt_rx_reo_wait_count *wait_count;
1054 
1055 	if (!in_frame_pdev) {
1056 		mgmt_rx_reo_err("pdev is null");
1057 		return QDF_STATUS_E_NULL_VALUE;
1058 	}
1059 
1060 	if (!desc) {
1061 		mgmt_rx_reo_err("Frame descriptor is null");
1062 		return QDF_STATUS_E_NULL_VALUE;
1063 	}
1064 
1065 	if (!desc->rx_params) {
1066 		mgmt_rx_reo_err("MGMT Rx params of incoming frame is NULL");
1067 		return QDF_STATUS_E_NULL_VALUE;
1068 	}
1069 
1070 	in_frame_params = desc->rx_params->reo_params;
1071 	if (!in_frame_params) {
1072 		mgmt_rx_reo_err("MGMT Rx REO params of incoming frame is NULL");
1073 		return QDF_STATUS_E_NULL_VALUE;
1074 	}
1075 
1076 	wait_count = &desc->wait_count;
1077 
1078 	/* Get the MLO link ID of incoming frame */
1079 	in_frame_link = wlan_get_mlo_link_id_from_pdev(in_frame_pdev);
1080 	grp_id = wlan_get_mlo_grp_id_from_pdev(in_frame_pdev);
1081 	qdf_assert_always(in_frame_link >= 0);
1082 	qdf_assert_always(in_frame_link < MAX_MLO_LINKS);
1083 	qdf_assert_always(mgmt_rx_reo_is_valid_link(in_frame_link, grp_id));
1084 
1085 	in_frame_rx_reo_pdev_ctx =
1086 			wlan_mgmt_rx_reo_get_priv_object(in_frame_pdev);
1087 	if (!in_frame_rx_reo_pdev_ctx) {
1088 		mgmt_rx_reo_err("Reo context null for incoming frame pdev");
1089 		return QDF_STATUS_E_FAILURE;
1090 	}
1091 	qdf_mem_zero(in_frame_rx_reo_pdev_ctx->raw_snapshots,
1092 		     sizeof(in_frame_rx_reo_pdev_ctx->raw_snapshots));
1093 
1094 	/* Iterate over all the valid MLO links */
1095 	for (link = 0; link < MAX_MLO_LINKS; link++) {
1096 		/* No need wait for any frames on an invalid link */
1097 		if (!mgmt_rx_reo_is_valid_link(link, grp_id)) {
1098 			frames_pending = 0;
1099 			goto update_pending_frames;
1100 		}
1101 
1102 		pdev = wlan_get_pdev_from_mlo_link_id(link, grp_id,
1103 						      WLAN_MGMT_RX_REO_ID);
1104 
1105 		/* No need to wait for any frames if the pdev is not found */
1106 		if (!pdev) {
1107 			mgmt_rx_reo_debug("pdev is null for link %d", link);
1108 			frames_pending = 0;
1109 			goto update_pending_frames;
1110 		}
1111 
1112 		rx_reo_pdev_ctx = wlan_mgmt_rx_reo_get_priv_object(pdev);
1113 		if (!rx_reo_pdev_ctx) {
1114 			mgmt_rx_reo_err("Mgmt reo context empty for pdev %pK",
1115 					pdev);
1116 			wlan_objmgr_pdev_release_ref(pdev, WLAN_MGMT_RX_REO_ID);
1117 			return QDF_STATUS_E_FAILURE;
1118 		}
1119 
1120 		if (!rx_reo_pdev_ctx->init_complete) {
1121 			mgmt_rx_reo_debug("REO init in progress for link %d",
1122 					  link);
1123 			wlan_objmgr_pdev_release_ref(pdev, WLAN_MGMT_RX_REO_ID);
1124 			frames_pending = 0;
1125 			goto update_pending_frames;
1126 		}
1127 
1128 		host_ss = &rx_reo_pdev_ctx->host_snapshot;
1129 		desc->host_snapshot[link] = rx_reo_pdev_ctx->host_snapshot;
1130 
1131 		mgmt_rx_reo_info("link_id = %u HOST SS: valid = %u, ctr = %u, ts = %u",
1132 				 link, host_ss->valid, host_ss->mgmt_pkt_ctr,
1133 				 host_ss->global_timestamp);
1134 
1135 		snapshot_id = 0;
1136 		/* Read all the shared snapshots */
1137 		while (snapshot_id <
1138 			MGMT_RX_REO_SHARED_SNAPSHOT_MAX) {
1139 			snapshot_info = &rx_reo_pdev_ctx->
1140 				host_target_shared_snapshot_info[snapshot_id];
1141 
1142 			qdf_mem_zero(&snapshot_params[snapshot_id],
1143 				     sizeof(snapshot_params[snapshot_id]));
1144 
1145 			status = tgt_mgmt_rx_reo_read_snapshot(
1146 					pdev, snapshot_info, snapshot_id,
1147 					&snapshot_params[snapshot_id],
1148 					in_frame_rx_reo_pdev_ctx->raw_snapshots
1149 					[link][snapshot_id]);
1150 
1151 			/* Read operation shouldn't fail */
1152 			if (QDF_IS_STATUS_ERROR(status)) {
1153 				mgmt_rx_reo_err("snapshot(%d) read failed on"
1154 						"link (%d)", snapshot_id, link);
1155 				wlan_objmgr_pdev_release_ref(
1156 						pdev, WLAN_MGMT_RX_REO_ID);
1157 				return status;
1158 			}
1159 
1160 			/* If snapshot is valid, save it in the pdev context */
1161 			if (snapshot_params[snapshot_id].valid) {
1162 				rx_reo_pdev_ctx->
1163 				   last_valid_shared_snapshot[snapshot_id] =
1164 				   snapshot_params[snapshot_id];
1165 			}
1166 			desc->shared_snapshots[link][snapshot_id] =
1167 						snapshot_params[snapshot_id];
1168 
1169 			snapshot_id++;
1170 		}
1171 
1172 		wlan_objmgr_pdev_release_ref(pdev, WLAN_MGMT_RX_REO_ID);
1173 
1174 		mac_hw_ss = &snapshot_params
1175 				[MGMT_RX_REO_SHARED_SNAPSHOT_MAC_HW];
1176 		fw_forwarded_ss = &snapshot_params
1177 				[MGMT_RX_REO_SHARED_SNAPSHOT_FW_FORWARDED];
1178 		fw_consumed_ss = &snapshot_params
1179 				[MGMT_RX_REO_SHARED_SNAPSHOT_FW_CONSUMED];
1180 
1181 		status = mgmt_rx_reo_invalidate_stale_snapshots(mac_hw_ss,
1182 								fw_forwarded_ss,
1183 								fw_consumed_ss,
1184 								host_ss, link);
1185 		if (QDF_IS_STATUS_ERROR(status)) {
1186 			mgmt_rx_reo_err("Failed to invalidate SS for link %u",
1187 					link);
1188 			return status;
1189 		}
1190 
1191 		desc->shared_snapshots[link][MGMT_RX_REO_SHARED_SNAPSHOT_MAC_HW] =
1192 								*mac_hw_ss;
1193 		desc->shared_snapshots[link][MGMT_RX_REO_SHARED_SNAPSHOT_FW_FORWARDED] =
1194 								*fw_forwarded_ss;
1195 		desc->shared_snapshots[link][MGMT_RX_REO_SHARED_SNAPSHOT_FW_CONSUMED] =
1196 								*fw_consumed_ss;
1197 		desc->host_snapshot[link] = *host_ss;
1198 
1199 		status = mgmt_rx_reo_snapshots_check_sanity
1200 			(mac_hw_ss, fw_forwarded_ss, fw_consumed_ss, host_ss);
1201 		if (QDF_IS_STATUS_ERROR(status)) {
1202 			mgmt_rx_reo_err_rl("Snapshot sanity for link %u failed",
1203 					   link);
1204 			return status;
1205 		}
1206 
1207 		mgmt_rx_reo_info("link_id = %u HW SS: valid = %u, ctr = %u, ts = %u",
1208 				 link, mac_hw_ss->valid,
1209 				 mac_hw_ss->mgmt_pkt_ctr,
1210 				 mac_hw_ss->global_timestamp);
1211 		mgmt_rx_reo_info("link_id = %u FW forwarded SS: valid = %u, ctr = %u, ts = %u",
1212 				 link, fw_forwarded_ss->valid,
1213 				 fw_forwarded_ss->mgmt_pkt_ctr,
1214 				 fw_forwarded_ss->global_timestamp);
1215 		mgmt_rx_reo_info("link_id = %u FW consumed SS: valid = %u, ctr = %u, ts = %u",
1216 				 link, fw_consumed_ss->valid,
1217 				 fw_consumed_ss->mgmt_pkt_ctr,
1218 				 fw_consumed_ss->global_timestamp);
1219 
1220 		/* No need wait for any frames on the same link */
1221 		if (link == in_frame_link) {
1222 			frames_pending = 0;
1223 			goto update_pending_frames;
1224 		}
1225 
1226 		/**
1227 		 * If MAC HW snapshot is invalid, the link has not started
1228 		 * receiving management frames. Set wait count to zero.
1229 		 */
1230 		if (!mac_hw_ss->valid) {
1231 			frames_pending = 0;
1232 			goto update_pending_frames;
1233 		}
1234 
1235 		/**
1236 		 * If host snapshot is invalid, wait for MAX number of frames.
1237 		 * When any frame in this link arrives at host, actual wait
1238 		 * counts will be updated.
1239 		 */
1240 		if (!host_ss->valid) {
1241 			wait_count->per_link_count[link] = UINT_MAX;
1242 			wait_count->total_count += UINT_MAX;
1243 			goto print_wait_count;
1244 		}
1245 
1246 		/**
1247 		 * If MAC HW snapshot sequence number and host snapshot
1248 		 * sequence number are same, all the frames received by
1249 		 * this link are processed by host. No need to wait for
1250 		 * any frames from this link.
1251 		 */
1252 		if (!mgmt_rx_reo_subtract_pkt_ctrs(mac_hw_ss->mgmt_pkt_ctr,
1253 						   host_ss->mgmt_pkt_ctr)) {
1254 			frames_pending = 0;
1255 			goto update_pending_frames;
1256 		}
1257 
1258 		/**
1259 		 * Ideally, the incoming frame has to wait for only those frames
1260 		 * (on other links) which meet all the below criterion.
1261 		 * 1. Frame's timestamp is less than incoming frame's
1262 		 * 2. Frame is supposed to be consumed by the Host
1263 		 * 3. Frame is not yet seen by the Host.
1264 		 * We may not be able to compute the exact optimal wait count
1265 		 * because HW/FW provides a limited assist.
1266 		 * This algorithm tries to get the best estimate of wait count
1267 		 * by not waiting for those frames where we have a conclusive
1268 		 * evidence that we don't have to wait for those frames.
1269 		 */
1270 
1271 		/**
1272 		 * If this link has already seen a frame whose timestamp is
1273 		 * greater than or equal to incoming frame's timestamp,
1274 		 * then no need to wait for any frames on this link.
1275 		 * If the total wait count becomes zero, then the policy on
1276 		 * whether to deliver such a frame to upper layers is handled
1277 		 * separately.
1278 		 */
1279 		if (mgmt_rx_reo_compare_global_timestamps_gte(
1280 				host_ss->global_timestamp,
1281 				in_frame_params->global_timestamp)) {
1282 			frames_pending = 0;
1283 			goto update_pending_frames;
1284 		}
1285 
1286 		/**
1287 		 * For starters, we only have to wait for the frames that are
1288 		 * seen by MAC HW but not yet seen by Host. The frames which
1289 		 * reach MAC HW later are guaranteed to have a timestamp
1290 		 * greater than incoming frame's timestamp.
1291 		 */
1292 		frames_pending = mgmt_rx_reo_subtract_pkt_ctrs(
1293 					mac_hw_ss->mgmt_pkt_ctr,
1294 					host_ss->mgmt_pkt_ctr);
1295 		qdf_assert_always(frames_pending >= 0);
1296 
1297 		if (frames_pending &&
1298 		    mgmt_rx_reo_compare_global_timestamps_gte
1299 					(mac_hw_ss->global_timestamp,
1300 					 in_frame_params->global_timestamp)) {
1301 			/**
1302 			 * Last frame seen at MAC HW has timestamp greater than
1303 			 * or equal to incoming frame's timestamp. So no need to
1304 			 * wait for that last frame, but we can't conclusively
1305 			 * say anything about timestamp of frames before the
1306 			 * last frame, so try to wait for all of those frames.
1307 			 */
1308 			frames_pending--;
1309 			qdf_assert_always(frames_pending >= 0);
1310 
1311 			if (fw_consumed_ss->valid &&
1312 			    mgmt_rx_reo_compare_global_timestamps_gte(
1313 				fw_consumed_ss->global_timestamp,
1314 				in_frame_params->global_timestamp)) {
1315 				/**
1316 				 * Last frame consumed by the FW has timestamp
1317 				 * greater than or equal to incoming frame's.
1318 				 * That means all the frames from
1319 				 * fw_consumed_ss->mgmt_pkt_ctr to
1320 				 * mac_hw->mgmt_pkt_ctr will have timestamp
1321 				 * greater than or equal to incoming frame's and
1322 				 * hence, no need to wait for those frames.
1323 				 * We just need to wait for frames from
1324 				 * host_ss->mgmt_pkt_ctr to
1325 				 * fw_consumed_ss->mgmt_pkt_ctr-1. This is a
1326 				 * better estimate over the above estimate,
1327 				 * so update frames_pending.
1328 				 */
1329 				frames_pending =
1330 				  mgmt_rx_reo_subtract_pkt_ctrs(
1331 				      fw_consumed_ss->mgmt_pkt_ctr,
1332 				      host_ss->mgmt_pkt_ctr) - 1;
1333 
1334 				qdf_assert_always(frames_pending >= 0);
1335 
1336 				/**
1337 				 * Last frame forwarded to Host has timestamp
1338 				 * less than incoming frame's. That means all
1339 				 * the frames starting from
1340 				 * fw_forwarded_ss->mgmt_pkt_ctr+1 to
1341 				 * fw_consumed_ss->mgmt_pkt_ctr are consumed by
1342 				 * the FW and hence, no need to wait for those
1343 				 * frames. We just need to wait for frames
1344 				 * from host_ss->mgmt_pkt_ctr to
1345 				 * fw_forwarded_ss->mgmt_pkt_ctr. This is a
1346 				 * better estimate over the above estimate,
1347 				 * so update frames_pending.
1348 				 */
1349 				if (fw_forwarded_ss->valid &&
1350 				    !mgmt_rx_reo_compare_global_timestamps_gte(
1351 					fw_forwarded_ss->global_timestamp,
1352 					in_frame_params->global_timestamp)) {
1353 					frames_pending =
1354 					  mgmt_rx_reo_subtract_pkt_ctrs(
1355 					      fw_forwarded_ss->mgmt_pkt_ctr,
1356 					      host_ss->mgmt_pkt_ctr);
1357 
1358 					/**
1359 					 * frames_pending can be negative in
1360 					 * cases whene there are no frames
1361 					 * getting forwarded to the Host. No
1362 					 * need to wait for any frames in that
1363 					 * case.
1364 					 */
1365 					if (frames_pending < 0)
1366 						frames_pending = 0;
1367 				}
1368 			}
1369 
1370 			/**
1371 			 * Last frame forwarded to Host has timestamp greater
1372 			 * than or equal to incoming frame's. That means all the
1373 			 * frames from fw_forwarded->mgmt_pkt_ctr to
1374 			 * mac_hw->mgmt_pkt_ctr will have timestamp greater than
1375 			 * or equal to incoming frame's and hence, no need to
1376 			 * wait for those frames. We may have to just wait for
1377 			 * frames from host_ss->mgmt_pkt_ctr to
1378 			 * fw_forwarded_ss->mgmt_pkt_ctr-1
1379 			 */
1380 			if (fw_forwarded_ss->valid &&
1381 			    mgmt_rx_reo_compare_global_timestamps_gte(
1382 				fw_forwarded_ss->global_timestamp,
1383 				in_frame_params->global_timestamp)) {
1384 				delta_fwd_host =
1385 				  mgmt_rx_reo_subtract_pkt_ctrs(
1386 				    fw_forwarded_ss->mgmt_pkt_ctr,
1387 				    host_ss->mgmt_pkt_ctr) - 1;
1388 
1389 				qdf_assert_always(delta_fwd_host >= 0);
1390 
1391 				/**
1392 				 * This will be a better estimate over the one
1393 				 * we computed using mac_hw_ss but this may or
1394 				 * may not be a better estimate over the
1395 				 * one we computed using fw_consumed_ss.
1396 				 * When timestamps of both fw_consumed_ss and
1397 				 * fw_forwarded_ss are greater than incoming
1398 				 * frame's but timestamp of fw_consumed_ss is
1399 				 * smaller than fw_forwarded_ss, then
1400 				 * frames_pending will be smaller than
1401 				 * delta_fwd_host, the reverse will be true in
1402 				 * other cases. Instead of checking for all
1403 				 * those cases, just waiting for the minimum
1404 				 * among these two should be sufficient.
1405 				 */
1406 				frames_pending = qdf_min(frames_pending,
1407 							 delta_fwd_host);
1408 				qdf_assert_always(frames_pending >= 0);
1409 			}
1410 		}
1411 
1412 update_pending_frames:
1413 			qdf_assert_always(frames_pending >= 0);
1414 
1415 			wait_count->per_link_count[link] = frames_pending;
1416 			wait_count->total_count += frames_pending;
1417 
1418 print_wait_count:
1419 			mgmt_rx_reo_info("link_id = %u wait count: per link = 0x%x, total = 0x%llx",
1420 					 link, wait_count->per_link_count[link],
1421 					 wait_count->total_count);
1422 	}
1423 
1424 	return QDF_STATUS_SUCCESS;
1425 }
1426 
1427 /**
1428  * struct mgmt_rx_reo_list_entry_debug_info - This structure holds the necessary
1429  * information about a reo list entry for debug purposes.
1430  * @link_id: link id
1431  * @mgmt_pkt_ctr: management packet counter
1432  * @global_timestamp: global time stamp
1433  * @wait_count: wait count values
1434  * @status: status of the entry in the list
1435  * @entry: pointer to reo list entry
1436  */
1437 struct mgmt_rx_reo_list_entry_debug_info {
1438 	uint8_t link_id;
1439 	uint16_t mgmt_pkt_ctr;
1440 	uint32_t global_timestamp;
1441 	struct mgmt_rx_reo_wait_count wait_count;
1442 	uint32_t status;
1443 	struct mgmt_rx_reo_list_entry *entry;
1444 };
1445 
1446 /**
1447  * mgmt_rx_reo_list_display() - API to print the entries in the reorder list
1448  * @reo_list: Pointer to reorder list
1449  *
1450  * Return: QDF_STATUS
1451  */
1452 static QDF_STATUS
1453 mgmt_rx_reo_list_display(struct mgmt_rx_reo_list *reo_list)
1454 {
1455 	uint32_t reo_list_size;
1456 	uint32_t index;
1457 	struct mgmt_rx_reo_list_entry *cur_entry;
1458 	struct mgmt_rx_reo_list_entry_debug_info *debug_info;
1459 
1460 	if (!reo_list) {
1461 		mgmt_rx_reo_err("Pointer to reo list is null");
1462 		return QDF_STATUS_E_NULL_VALUE;
1463 	}
1464 
1465 	qdf_spin_lock_bh(&reo_list->list_lock);
1466 
1467 	reo_list_size = qdf_list_size(&reo_list->list);
1468 
1469 	if (reo_list_size == 0) {
1470 		qdf_spin_unlock_bh(&reo_list->list_lock);
1471 		mgmt_rx_reo_debug("Number of entries in the reo list = %u",
1472 				  reo_list_size);
1473 		return QDF_STATUS_SUCCESS;
1474 	}
1475 
1476 	debug_info = qdf_mem_malloc_atomic(reo_list_size * sizeof(*debug_info));
1477 	if (!debug_info) {
1478 		qdf_spin_unlock_bh(&reo_list->list_lock);
1479 		mgmt_rx_reo_err("Memory allocation failed");
1480 		return QDF_STATUS_E_NOMEM;
1481 	}
1482 
1483 	index = 0;
1484 	qdf_list_for_each(&reo_list->list, cur_entry, node) {
1485 		debug_info[index].link_id =
1486 				mgmt_rx_reo_get_link_id(cur_entry->rx_params);
1487 		debug_info[index].mgmt_pkt_ctr =
1488 			mgmt_rx_reo_get_pkt_counter(cur_entry->rx_params);
1489 		debug_info[index].global_timestamp =
1490 				mgmt_rx_reo_get_global_ts(cur_entry->rx_params);
1491 		debug_info[index].wait_count = cur_entry->wait_count;
1492 		debug_info[index].status = cur_entry->status;
1493 		debug_info[index].entry = cur_entry;
1494 
1495 		++index;
1496 	}
1497 
1498 	qdf_spin_unlock_bh(&reo_list->list_lock);
1499 
1500 	mgmt_rx_reo_debug("Reorder list");
1501 	mgmt_rx_reo_debug("##################################################");
1502 	mgmt_rx_reo_debug("Number of entries in the reo list = %u",
1503 			  reo_list_size);
1504 	for (index = 0; index < reo_list_size; index++) {
1505 		uint8_t link_id;
1506 
1507 		mgmt_rx_reo_debug("index = %u: link_id = %u, ts = %u, ctr = %u, status = 0x%x, entry = %pK",
1508 				  index, debug_info[index].link_id,
1509 				  debug_info[index].global_timestamp,
1510 				  debug_info[index].mgmt_pkt_ctr,
1511 				  debug_info[index].status,
1512 				  debug_info[index].entry);
1513 
1514 		mgmt_rx_reo_debug("Total wait count = 0x%llx",
1515 				  debug_info[index].wait_count.total_count);
1516 
1517 		for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++)
1518 			mgmt_rx_reo_debug("Link id = %u, wait_count = 0x%x",
1519 					  link_id, debug_info[index].wait_count.
1520 					  per_link_count[link_id]);
1521 	}
1522 	mgmt_rx_reo_debug("##################################################");
1523 
1524 	qdf_mem_free(debug_info);
1525 
1526 	return QDF_STATUS_SUCCESS;
1527 }
1528 
1529 #ifdef WLAN_MGMT_RX_REO_DEBUG_SUPPORT
1530 /**
1531  * mgmt_rx_reo_egress_frame_debug_info_enabled() - API to check whether egress
1532  * frame info debug feaure is enabled
1533  * @egress_frame_debug_info: Pointer to egress frame debug info object
1534  *
1535  * Return: true or false
1536  */
1537 static bool
1538 mgmt_rx_reo_egress_frame_debug_info_enabled
1539 			(struct reo_egress_debug_info *egress_frame_debug_info)
1540 {
1541 	return egress_frame_debug_info->frame_list_size;
1542 }
1543 
1544 /**
1545  * mgmt_rx_reo_debug_print_egress_frame_stats() - API to print the stats
1546  * related to frames going out of the reorder module
1547  * @reo_ctx: Pointer to reorder context
1548  *
1549  * API to print the stats related to frames going out of the management
1550  * Rx reorder module.
1551  *
1552  * Return: QDF_STATUS
1553  */
1554 static QDF_STATUS
1555 mgmt_rx_reo_debug_print_egress_frame_stats(struct mgmt_rx_reo_context *reo_ctx)
1556 {
1557 	struct reo_egress_frame_stats *stats;
1558 	uint8_t link_id;
1559 	uint8_t reason;
1560 	uint64_t total_delivery_attempts_count = 0;
1561 	uint64_t total_delivery_success_count = 0;
1562 	uint64_t total_premature_delivery_count = 0;
1563 	uint64_t delivery_count_per_link[MAX_MLO_LINKS] = {0};
1564 	uint64_t delivery_count_per_reason[RELEASE_REASON_MAX] = {0};
1565 	uint64_t total_delivery_count = 0;
1566 	char delivery_reason_stats_boarder_a[MGMT_RX_REO_EGRESS_FRAME_DELIVERY_REASON_STATS_BOARDER_A_MAX_SIZE + 1] = {0};
1567 	char delivery_reason_stats_boarder_b[MGMT_RX_REO_EGRESS_FRAME_DELIVERY_REASON_STATS_BOARDER_B_MAX_SIZE + 1] = {0};
1568 
1569 	if (!reo_ctx)
1570 		return QDF_STATUS_E_NULL_VALUE;
1571 
1572 	stats = &reo_ctx->egress_frame_debug_info.stats;
1573 
1574 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
1575 		total_delivery_attempts_count +=
1576 				stats->delivery_attempts_count[link_id];
1577 		total_delivery_success_count +=
1578 				stats->delivery_success_count[link_id];
1579 		total_premature_delivery_count +=
1580 				stats->premature_delivery_count[link_id];
1581 	}
1582 
1583 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
1584 		for (reason = 0; reason < RELEASE_REASON_MAX;
1585 		     reason++)
1586 			delivery_count_per_link[link_id] +=
1587 				stats->delivery_count[link_id][reason];
1588 		total_delivery_count += delivery_count_per_link[link_id];
1589 	}
1590 	for (reason = 0; reason < RELEASE_REASON_MAX; reason++)
1591 		for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++)
1592 			delivery_count_per_reason[reason] +=
1593 				stats->delivery_count[link_id][reason];
1594 
1595 	mgmt_rx_reo_alert("Egress frame stats:");
1596 	mgmt_rx_reo_alert("\t1) Delivery related stats:");
1597 	mgmt_rx_reo_alert("\t------------------------------------------");
1598 	mgmt_rx_reo_alert("\t|link id   |Attempts |Success |Premature |");
1599 	mgmt_rx_reo_alert("\t|          | count   | count  | count    |");
1600 	mgmt_rx_reo_alert("\t------------------------------------------");
1601 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
1602 		mgmt_rx_reo_alert("\t|%10u|%9llu|%8llu|%10llu|", link_id,
1603 				  stats->delivery_attempts_count[link_id],
1604 				  stats->delivery_success_count[link_id],
1605 				  stats->premature_delivery_count[link_id]);
1606 	mgmt_rx_reo_alert("\t------------------------------------------");
1607 	}
1608 	mgmt_rx_reo_alert("\t%11s|%9llu|%8llu|%10llu|\n\n", "",
1609 			  total_delivery_attempts_count,
1610 			  total_delivery_success_count,
1611 			  total_premature_delivery_count);
1612 
1613 	mgmt_rx_reo_alert("\t2) Delivery reason related stats");
1614 	mgmt_rx_reo_alert("\tRelease Reason Values:-");
1615 	mgmt_rx_reo_alert("\tREASON_ZERO_WAIT_COUNT - 0x%lx",
1616 			  RELEASE_REASON_ZERO_WAIT_COUNT);
1617 	mgmt_rx_reo_alert("\tREASON_AGED_OUT - 0x%lx",
1618 			  RELEASE_REASON_AGED_OUT);
1619 	mgmt_rx_reo_alert("\tREASON_OLDER_THAN_AGED_OUT_FRAME - 0x%lx",
1620 			  RELEASE_REASON_OLDER_THAN_AGED_OUT_FRAME);
1621 	mgmt_rx_reo_alert("\tREASON_INGRESS_LIST_OVERFLOW - 0x%lx",
1622 			  RELEASE_REASON_INGRESS_LIST_OVERFLOW);
1623 	mgmt_rx_reo_alert("\tREASON_OLDER_THAN_READY_TO_DELIVER_FRAMES - 0x%lx",
1624 			  RELEASE_REASON_OLDER_THAN_READY_TO_DELIVER_FRAMES);
1625 
1626 	qdf_mem_set(delivery_reason_stats_boarder_a,
1627 		    MGMT_RX_REO_EGRESS_FRAME_DELIVERY_REASON_STATS_BOARDER_A_MAX_SIZE, '-');
1628 	qdf_mem_set(delivery_reason_stats_boarder_b,
1629 		    MGMT_RX_REO_EGRESS_FRAME_DELIVERY_REASON_STATS_BOARDER_B_MAX_SIZE, '-');
1630 
1631 	mgmt_rx_reo_alert("\t%66s", delivery_reason_stats_boarder_a);
1632 	mgmt_rx_reo_alert("\t|%16s|%7s|%7s|%7s|%7s|%7s|%7s|", "Release Reason/",
1633 			  "", "", "", "", "", "");
1634 	mgmt_rx_reo_alert("\t|%16s|%7s|%7s|%7s|%7s|%7s|%7s|", "link id",
1635 			  "0", "1", "2", "3", "4", "5");
1636 	mgmt_rx_reo_alert("\t%s", delivery_reason_stats_boarder_b);
1637 
1638 	for (reason = 0; reason < RELEASE_REASON_MAX; reason++) {
1639 		mgmt_rx_reo_alert("\t|%16x|%7llu|%7llu|%7llu|%7llu|%7llu|%7llu|%7llu",
1640 				  reason, stats->delivery_count[0][reason],
1641 				  stats->delivery_count[1][reason],
1642 				  stats->delivery_count[2][reason],
1643 				  stats->delivery_count[3][reason],
1644 				  stats->delivery_count[4][reason],
1645 				  stats->delivery_count[5][reason],
1646 				  delivery_count_per_reason[reason]);
1647 		mgmt_rx_reo_alert("\t%s", delivery_reason_stats_boarder_b);
1648 	}
1649 	mgmt_rx_reo_alert("\t%17s|%7llu|%7llu|%7llu|%7llu|%7llu|%7llu|%7llu\n\n",
1650 			  "", delivery_count_per_link[0],
1651 			  delivery_count_per_link[1],
1652 			  delivery_count_per_link[2],
1653 			  delivery_count_per_link[3],
1654 			  delivery_count_per_link[4],
1655 			  delivery_count_per_link[5],
1656 			  total_delivery_count);
1657 
1658 	return QDF_STATUS_SUCCESS;
1659 }
1660 
1661 /**
1662  * mgmt_rx_reo_log_egress_frame_before_delivery() - Log the information about a
1663  * frame exiting the reorder module. Logging is done before attempting the frame
1664  * delivery to upper layers.
1665  * @reo_ctx: management rx reorder context
1666  * @entry: Pointer to reorder list entry
1667  *
1668  * Return: QDF_STATUS of operation
1669  */
1670 static QDF_STATUS
1671 mgmt_rx_reo_log_egress_frame_before_delivery(
1672 					struct mgmt_rx_reo_context *reo_ctx,
1673 					struct mgmt_rx_reo_list_entry *entry)
1674 {
1675 	struct reo_egress_debug_info *egress_frame_debug_info;
1676 	struct reo_egress_debug_frame_info *cur_frame_debug_info;
1677 	struct reo_egress_frame_stats *stats;
1678 	uint8_t link_id;
1679 
1680 	if (!reo_ctx || !entry)
1681 		return QDF_STATUS_E_NULL_VALUE;
1682 
1683 	egress_frame_debug_info = &reo_ctx->egress_frame_debug_info;
1684 
1685 	stats = &egress_frame_debug_info->stats;
1686 	link_id = mgmt_rx_reo_get_link_id(entry->rx_params);
1687 	stats->delivery_attempts_count[link_id]++;
1688 	if (entry->is_premature_delivery)
1689 		stats->premature_delivery_count[link_id]++;
1690 
1691 	if (!mgmt_rx_reo_egress_frame_debug_info_enabled
1692 						(egress_frame_debug_info))
1693 		return QDF_STATUS_SUCCESS;
1694 
1695 	cur_frame_debug_info = &egress_frame_debug_info->frame_list
1696 			[egress_frame_debug_info->next_index];
1697 
1698 	cur_frame_debug_info->link_id = link_id;
1699 	cur_frame_debug_info->mgmt_pkt_ctr =
1700 				mgmt_rx_reo_get_pkt_counter(entry->rx_params);
1701 	cur_frame_debug_info->global_timestamp =
1702 				mgmt_rx_reo_get_global_ts(entry->rx_params);
1703 	cur_frame_debug_info->initial_wait_count = entry->initial_wait_count;
1704 	cur_frame_debug_info->final_wait_count = entry->wait_count;
1705 	qdf_mem_copy(cur_frame_debug_info->shared_snapshots,
1706 		     entry->shared_snapshots,
1707 		     qdf_min(sizeof(cur_frame_debug_info->shared_snapshots),
1708 			     sizeof(entry->shared_snapshots)));
1709 	qdf_mem_copy(cur_frame_debug_info->host_snapshot, entry->host_snapshot,
1710 		     qdf_min(sizeof(cur_frame_debug_info->host_snapshot),
1711 			     sizeof(entry->host_snapshot)));
1712 	cur_frame_debug_info->ingress_timestamp = entry->ingress_timestamp;
1713 	cur_frame_debug_info->ingress_list_insertion_ts =
1714 					entry->ingress_list_insertion_ts;
1715 	cur_frame_debug_info->ingress_list_removal_ts =
1716 					entry->ingress_list_removal_ts;
1717 	cur_frame_debug_info->egress_list_insertion_ts =
1718 					entry->egress_list_insertion_ts;
1719 	cur_frame_debug_info->egress_list_removal_ts =
1720 					entry->egress_list_removal_ts;
1721 	cur_frame_debug_info->egress_timestamp = qdf_get_log_timestamp();
1722 	cur_frame_debug_info->release_reason = entry->release_reason;
1723 	cur_frame_debug_info->is_premature_delivery =
1724 						entry->is_premature_delivery;
1725 	cur_frame_debug_info->cpu_id = qdf_get_smp_processor_id();
1726 
1727 	return QDF_STATUS_SUCCESS;
1728 }
1729 
1730 /**
1731  * mgmt_rx_reo_log_egress_frame_after_delivery() - Log the information about a
1732  * frame exiting the reorder module. Logging is done after attempting the frame
1733  * delivery to upper layer.
1734  * @reo_ctx: management rx reorder context
1735  * @entry: Pointer to reorder list entry
1736  * @link_id: multi-link link ID
1737  *
1738  * Return: QDF_STATUS of operation
1739  */
1740 static QDF_STATUS
1741 mgmt_rx_reo_log_egress_frame_after_delivery(
1742 					struct mgmt_rx_reo_context *reo_ctx,
1743 					struct mgmt_rx_reo_list_entry *entry,
1744 					uint8_t link_id)
1745 {
1746 	struct reo_egress_debug_info *egress_frame_debug_info;
1747 	struct reo_egress_debug_frame_info *cur_frame_debug_info;
1748 	struct reo_egress_frame_stats *stats;
1749 
1750 	if (!reo_ctx || !entry)
1751 		return QDF_STATUS_E_NULL_VALUE;
1752 
1753 	egress_frame_debug_info = &reo_ctx->egress_frame_debug_info;
1754 
1755 	stats = &egress_frame_debug_info->stats;
1756 	if (entry->is_delivered) {
1757 		uint8_t release_reason = entry->release_reason;
1758 
1759 		stats->delivery_count[link_id][release_reason]++;
1760 		stats->delivery_success_count[link_id]++;
1761 	}
1762 
1763 	if (!mgmt_rx_reo_egress_frame_debug_info_enabled
1764 						(egress_frame_debug_info))
1765 		return QDF_STATUS_SUCCESS;
1766 
1767 	cur_frame_debug_info = &egress_frame_debug_info->frame_list
1768 			[egress_frame_debug_info->next_index];
1769 
1770 	cur_frame_debug_info->is_delivered = entry->is_delivered;
1771 	cur_frame_debug_info->egress_duration = qdf_get_log_timestamp() -
1772 					cur_frame_debug_info->egress_timestamp;
1773 
1774 	egress_frame_debug_info->next_index++;
1775 	egress_frame_debug_info->next_index %=
1776 				egress_frame_debug_info->frame_list_size;
1777 	if (egress_frame_debug_info->next_index == 0)
1778 		egress_frame_debug_info->wrap_aroud = true;
1779 
1780 	return QDF_STATUS_SUCCESS;
1781 }
1782 
1783 /**
1784  * mgmt_rx_reo_debug_print_egress_frame_info() - Print the debug information
1785  * about the latest frames leaving the reorder module
1786  * @reo_ctx: management rx reorder context
1787  * @num_frames: Number of frames for which the debug information is to be
1788  * printed. If @num_frames is 0, then debug information about all the frames
1789  * in the ring buffer will be  printed.
1790  *
1791  * Return: QDF_STATUS of operation
1792  */
1793 static QDF_STATUS
1794 mgmt_rx_reo_debug_print_egress_frame_info(struct mgmt_rx_reo_context *reo_ctx,
1795 					  uint16_t num_frames)
1796 {
1797 	struct reo_egress_debug_info *egress_frame_debug_info;
1798 	int start_index;
1799 	uint16_t index;
1800 	uint16_t entry;
1801 	uint16_t num_valid_entries;
1802 	uint16_t num_entries_to_print;
1803 	char *boarder;
1804 
1805 	if (!reo_ctx)
1806 		return QDF_STATUS_E_NULL_VALUE;
1807 
1808 	egress_frame_debug_info = &reo_ctx->egress_frame_debug_info;
1809 
1810 	if (egress_frame_debug_info->wrap_aroud)
1811 		num_valid_entries = egress_frame_debug_info->frame_list_size;
1812 	else
1813 		num_valid_entries = egress_frame_debug_info->next_index;
1814 
1815 	if (num_frames == 0) {
1816 		num_entries_to_print = num_valid_entries;
1817 
1818 		if (egress_frame_debug_info->wrap_aroud)
1819 			start_index = egress_frame_debug_info->next_index;
1820 		else
1821 			start_index = 0;
1822 	} else {
1823 		num_entries_to_print = qdf_min(num_frames, num_valid_entries);
1824 
1825 		start_index = (egress_frame_debug_info->next_index -
1826 			       num_entries_to_print +
1827 			       egress_frame_debug_info->frame_list_size)
1828 			      % egress_frame_debug_info->frame_list_size;
1829 
1830 		qdf_assert_always(start_index >= 0 &&
1831 				  start_index < egress_frame_debug_info->frame_list_size);
1832 	}
1833 
1834 	mgmt_rx_reo_alert_no_fl("Egress Frame Info:-");
1835 	mgmt_rx_reo_alert_no_fl("num_frames = %u, wrap = %u, next_index = %u",
1836 				num_frames,
1837 				egress_frame_debug_info->wrap_aroud,
1838 				egress_frame_debug_info->next_index);
1839 	mgmt_rx_reo_alert_no_fl("start_index = %d num_entries_to_print = %u",
1840 				start_index, num_entries_to_print);
1841 
1842 	if (!num_entries_to_print)
1843 		return QDF_STATUS_SUCCESS;
1844 
1845 	boarder = egress_frame_debug_info->boarder;
1846 
1847 	mgmt_rx_reo_alert_no_fl("%s", boarder);
1848 	mgmt_rx_reo_alert_no_fl("|%3s|%5s|%4s|%5s|%10s|%11s|%11s|%11s|%11s|%11s|%11s|%5s|%7s|%7s|%5s|%4s|%69s|%69s|%94s|%94s|%94s|%94s|%94s|%94s|",
1849 				"No.", "CPU", "Link", "SeqNo", "Global ts",
1850 				"Ingress ts", "Ing Insert",
1851 				"Ing Remove", "Eg Insert", "Eg Remove",
1852 				"Egress ts", "E Dur", "I W Dur", "E W Dur",
1853 				"Flags", "Rea.", "Final wait count",
1854 				"Initial wait count", "Snapshot : link 0",
1855 				"Snapshot : link 1", "Snapshot : link 2",
1856 				"Snapshot : link 3", "Snapshot : link 4",
1857 				"Snapshot : link 5");
1858 	mgmt_rx_reo_alert_no_fl("%s", boarder);
1859 
1860 	index = start_index;
1861 	for (entry = 0; entry < num_entries_to_print; entry++) {
1862 		struct reo_egress_debug_frame_info *info;
1863 		char flags[MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_FLAG_MAX_SIZE + 1] = {0};
1864 		char final_wait_count[MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_WAIT_COUNT_MAX_SIZE + 1] = {0};
1865 		char initial_wait_count[MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_WAIT_COUNT_MAX_SIZE + 1] = {0};
1866 		char snapshots[MAX_MLO_LINKS][MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_PER_LINK_SNAPSHOTS_MAX_SIZE + 1] = {0};
1867 		char flag_premature_delivery = ' ';
1868 		char flag_error = ' ';
1869 		uint8_t link;
1870 
1871 		info = &reo_ctx->egress_frame_debug_info.frame_list[index];
1872 
1873 		if (!info->is_delivered)
1874 			flag_error = 'E';
1875 
1876 		if (info->is_premature_delivery)
1877 			flag_premature_delivery = 'P';
1878 
1879 		snprintf(flags, sizeof(flags), "%c %c", flag_error,
1880 			 flag_premature_delivery);
1881 		snprintf(initial_wait_count, sizeof(initial_wait_count),
1882 			 "%9llx(%8x, %8x, %8x, %8x, %8x, %8x)",
1883 			 info->initial_wait_count.total_count,
1884 			 info->initial_wait_count.per_link_count[0],
1885 			 info->initial_wait_count.per_link_count[1],
1886 			 info->initial_wait_count.per_link_count[2],
1887 			 info->initial_wait_count.per_link_count[3],
1888 			 info->initial_wait_count.per_link_count[4],
1889 			 info->initial_wait_count.per_link_count[5]);
1890 		snprintf(final_wait_count, sizeof(final_wait_count),
1891 			 "%9llx(%8x, %8x, %8x, %8x, %8x, %8x)",
1892 			 info->final_wait_count.total_count,
1893 			 info->final_wait_count.per_link_count[0],
1894 			 info->final_wait_count.per_link_count[1],
1895 			 info->final_wait_count.per_link_count[2],
1896 			 info->final_wait_count.per_link_count[3],
1897 			 info->final_wait_count.per_link_count[4],
1898 			 info->final_wait_count.per_link_count[5]);
1899 
1900 		for (link = 0; link < MAX_MLO_LINKS; link++) {
1901 			char mac_hw[MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_SNAPSHOT_MAX_SIZE + 1] = {'\0'};
1902 			char fw_consumed[MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_SNAPSHOT_MAX_SIZE + 1] = {'\0'};
1903 			char fw_forwarded[MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_SNAPSHOT_MAX_SIZE + 1] = {'\0'};
1904 			char host[MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_SNAPSHOT_MAX_SIZE + 1] = {'\0'};
1905 			struct mgmt_rx_reo_snapshot_params *mac_hw_ss;
1906 			struct mgmt_rx_reo_snapshot_params *fw_consumed_ss;
1907 			struct mgmt_rx_reo_snapshot_params *fw_forwarded_ss;
1908 			struct mgmt_rx_reo_snapshot_params *host_ss;
1909 
1910 			mac_hw_ss = &info->shared_snapshots
1911 				[link][MGMT_RX_REO_SHARED_SNAPSHOT_MAC_HW];
1912 			fw_consumed_ss = &info->shared_snapshots
1913 				[link][MGMT_RX_REO_SHARED_SNAPSHOT_FW_CONSUMED];
1914 			fw_forwarded_ss = &info->shared_snapshots
1915 				[link][MGMT_RX_REO_SHARED_SNAPSHOT_FW_FORWARDED];
1916 			host_ss = &info->host_snapshot[link];
1917 
1918 			snprintf(mac_hw, sizeof(mac_hw), "(%1u, %5u, %10u)",
1919 				 mac_hw_ss->valid, mac_hw_ss->mgmt_pkt_ctr,
1920 				 mac_hw_ss->global_timestamp);
1921 			snprintf(fw_consumed, sizeof(fw_consumed),
1922 				 "(%1u, %5u, %10u)",
1923 				 fw_consumed_ss->valid,
1924 				 fw_consumed_ss->mgmt_pkt_ctr,
1925 				 fw_consumed_ss->global_timestamp);
1926 			snprintf(fw_forwarded, sizeof(fw_forwarded),
1927 				 "(%1u, %5u, %10u)",
1928 				 fw_forwarded_ss->valid,
1929 				 fw_forwarded_ss->mgmt_pkt_ctr,
1930 				 fw_forwarded_ss->global_timestamp);
1931 			snprintf(host, sizeof(host), "(%1u, %5u, %10u)",
1932 				 host_ss->valid,
1933 				 host_ss->mgmt_pkt_ctr,
1934 				 host_ss->global_timestamp);
1935 			snprintf(snapshots[link], sizeof(snapshots[link]),
1936 				 "%22s, %22s, %22s, %22s", mac_hw, fw_consumed,
1937 				 fw_forwarded, host);
1938 		}
1939 
1940 		mgmt_rx_reo_alert_no_fl("|%3u|%5d|%4u|%5u|%10u|%11llu|%11llu|%11llu|%11llu|%11llu|%11llu|%5llu|%7llu|%7llu|%5s|%4x|%69s|%69s|%94s|%94s|%94s|%94s|%94s|%94s|",
1941 					entry, info->cpu_id, info->link_id,
1942 					info->mgmt_pkt_ctr,
1943 					info->global_timestamp,
1944 					info->ingress_timestamp,
1945 					info->ingress_list_insertion_ts,
1946 					info->ingress_list_removal_ts,
1947 					info->egress_list_insertion_ts,
1948 					info->egress_list_removal_ts,
1949 					info->egress_timestamp,
1950 					info->egress_duration,
1951 					info->ingress_list_removal_ts -
1952 					info->ingress_list_insertion_ts,
1953 					info->egress_list_removal_ts -
1954 					info->egress_list_insertion_ts,
1955 					flags, info->release_reason,
1956 					final_wait_count, initial_wait_count,
1957 					snapshots[0], snapshots[1],
1958 					snapshots[2], snapshots[3],
1959 					snapshots[4], snapshots[5]);
1960 		mgmt_rx_reo_alert_no_fl("%s", boarder);
1961 
1962 		index++;
1963 		index %= egress_frame_debug_info->frame_list_size;
1964 	}
1965 
1966 	return QDF_STATUS_SUCCESS;
1967 }
1968 #else
1969 /**
1970  * mgmt_rx_reo_debug_print_egress_frame_stats() - API to print the stats
1971  * related to frames going out of the reorder module
1972  * @reo_ctx: Pointer to reorder context
1973  *
1974  * API to print the stats related to frames going out of the management
1975  * Rx reorder module.
1976  *
1977  * Return: QDF_STATUS
1978  */
1979 static QDF_STATUS
1980 mgmt_rx_reo_debug_print_egress_frame_stats(struct mgmt_rx_reo_context *reo_ctx)
1981 {
1982 	return QDF_STATUS_SUCCESS;
1983 }
1984 
1985 /**
1986  * mgmt_rx_reo_log_egress_frame_before_delivery() - Log the information about a
1987  * frame exiting the reorder module. Logging is done before attempting the frame
1988  * delivery to upper layers.
1989  * @reo_ctx: management rx reorder context
1990  * @entry: Pointer to reorder list entry
1991  *
1992  * Return: QDF_STATUS of operation
1993  */
1994 static QDF_STATUS
1995 mgmt_rx_reo_log_egress_frame_before_delivery(
1996 					struct mgmt_rx_reo_context *reo_ctx,
1997 					struct mgmt_rx_reo_list_entry *entry)
1998 {
1999 	return QDF_STATUS_SUCCESS;
2000 }
2001 
2002 /**
2003  * mgmt_rx_reo_log_egress_frame_after_delivery() - Log the information about a
2004  * frame exiting the reorder module. Logging is done after attempting the frame
2005  * delivery to upper layer.
2006  * @reo_ctx: management rx reorder context
2007  * @is_delivered: Flag to indicate whether the frame is delivered to upper
2008  * layers
2009  *
2010  * Return: QDF_STATUS of operation
2011  */
2012 static QDF_STATUS
2013 mgmt_rx_reo_log_egress_frame_after_delivery(
2014 					struct mgmt_rx_reo_context *reo_ctx,
2015 					bool is_delivered)
2016 {
2017 	return QDF_STATUS_SUCCESS;
2018 }
2019 
2020 /**
2021  * mgmt_rx_reo_debug_print_egress_frame_info() - Print debug information about
2022  * the latest frames leaving the reorder module
2023  * @reo_ctx: management rx reorder context
2024  *
2025  * Return: QDF_STATUS of operation
2026  */
2027 static QDF_STATUS
2028 mgmt_rx_reo_debug_print_egress_frame_info(struct mgmt_rx_reo_context *reo_ctx)
2029 {
2030 	return QDF_STATUS_SUCCESS;
2031 }
2032 #endif /* WLAN_MGMT_RX_REO_DEBUG_SUPPORT */
2033 
2034 /**
2035  * mgmt_rx_reo_list_entry_get_release_reason() - Helper API to get the reason
2036  * for releasing the reorder list entry to upper layer.
2037  * reorder list.
2038  * @entry: List entry
2039  *
2040  * This API expects the caller to acquire the spin lock protecting the reorder
2041  * list.
2042  *
2043  * Return: Reason for releasing the frame.
2044  */
2045 static uint8_t
2046 mgmt_rx_reo_list_entry_get_release_reason(struct mgmt_rx_reo_list_entry *entry)
2047 {
2048 	uint8_t reason = 0;
2049 
2050 	if (!entry)
2051 		return 0;
2052 
2053 	if (!LIST_ENTRY_IS_WAITING_FOR_FRAME_ON_OTHER_LINK(entry))
2054 		reason |= RELEASE_REASON_ZERO_WAIT_COUNT;
2055 
2056 	if (LIST_ENTRY_IS_AGED_OUT(entry))
2057 		reason |= RELEASE_REASON_AGED_OUT;
2058 
2059 	if (LIST_ENTRY_IS_OLDER_THAN_LATEST_AGED_OUT_FRAME(entry))
2060 		reason |= RELEASE_REASON_OLDER_THAN_AGED_OUT_FRAME;
2061 
2062 	if (LIST_ENTRY_IS_REMOVED_DUE_TO_INGRESS_LIST_OVERFLOW(entry))
2063 		reason |= RELEASE_REASON_INGRESS_LIST_OVERFLOW;
2064 
2065 	if (LIST_ENTRY_IS_OLDER_THAN_READY_TO_DELIVER_FRAMES(entry))
2066 		reason |= RELEASE_REASON_OLDER_THAN_READY_TO_DELIVER_FRAMES;
2067 
2068 	return reason;
2069 }
2070 
2071 /**
2072  * mgmt_rx_reo_list_entry_send_up() - API to send the frame to the upper layer.
2073  * @reo_context: Pointer to reorder context
2074  * @entry: List entry
2075  *
2076  * API to send the frame to the upper layer. This API has to be called only
2077  * for entries which can be released to upper layer. It is the caller's
2078  * responsibility to ensure that entry can be released (by using API
2079  * mgmt_rx_reo_is_entry_ready_to_send_up). This API is called after
2080  * acquiring the lock which serializes the frame delivery to the upper layers.
2081  *
2082  * Return: QDF_STATUS
2083  */
2084 static QDF_STATUS
2085 mgmt_rx_reo_list_entry_send_up(struct mgmt_rx_reo_context *reo_context,
2086 			       struct mgmt_rx_reo_list_entry *entry)
2087 {
2088 	uint8_t release_reason;
2089 	uint8_t link_id;
2090 	uint32_t entry_global_ts;
2091 	QDF_STATUS status;
2092 	QDF_STATUS temp;
2093 
2094 	qdf_assert_always(reo_context);
2095 	qdf_assert_always(entry);
2096 
2097 	link_id = mgmt_rx_reo_get_link_id(entry->rx_params);
2098 	entry_global_ts = mgmt_rx_reo_get_global_ts(entry->rx_params);
2099 
2100 	release_reason = mgmt_rx_reo_list_entry_get_release_reason(entry);
2101 
2102 	qdf_assert_always(release_reason != 0);
2103 
2104 	entry->is_delivered = false;
2105 	entry->is_premature_delivery = false;
2106 	entry->release_reason = release_reason;
2107 
2108 	if (mgmt_rx_reo_is_potential_premature_delivery(release_reason)) {
2109 		entry->is_premature_delivery = true;
2110 		status = mgmt_rx_reo_handle_potential_premature_delivery(
2111 						reo_context, entry_global_ts);
2112 		if (QDF_IS_STATUS_ERROR(status))
2113 			goto exit;
2114 	}
2115 
2116 	status = mgmt_rx_reo_log_egress_frame_before_delivery(reo_context,
2117 							      entry);
2118 	if (QDF_IS_STATUS_ERROR(status))
2119 		goto exit;
2120 
2121 	status = wlan_mgmt_txrx_process_rx_frame(entry->pdev, entry->nbuf,
2122 						 entry->rx_params);
2123 	/* Above call frees nbuf and rx_params, make it null explicitly */
2124 	entry->nbuf = NULL;
2125 	entry->rx_params = NULL;
2126 
2127 	if (QDF_IS_STATUS_ERROR(status))
2128 		goto exit_log;
2129 
2130 	entry->is_delivered = true;
2131 
2132 	status = QDF_STATUS_SUCCESS;
2133 
2134 exit_log:
2135 	temp = mgmt_rx_reo_log_egress_frame_after_delivery(reo_context, entry,
2136 							   link_id);
2137 	if (QDF_IS_STATUS_ERROR(temp))
2138 		status = temp;
2139 exit:
2140 	/**
2141 	 * Release the reference taken when the entry is inserted into
2142 	 * the reorder list
2143 	 */
2144 	wlan_objmgr_pdev_release_ref(entry->pdev, WLAN_MGMT_RX_REO_ID);
2145 
2146 	return status;
2147 }
2148 
2149 /**
2150  * mgmt_rx_reo_is_entry_ready_to_send_up() - API to check whether the
2151  * list entry can be send to upper layers.
2152  * @entry: List entry
2153  *
2154  * Return: QDF_STATUS
2155  */
2156 static bool
2157 mgmt_rx_reo_is_entry_ready_to_send_up(struct mgmt_rx_reo_list_entry *entry)
2158 {
2159 	qdf_assert_always(entry);
2160 
2161 	return LIST_ENTRY_IS_REMOVED_DUE_TO_INGRESS_LIST_OVERFLOW(entry) ||
2162 	       !LIST_ENTRY_IS_WAITING_FOR_FRAME_ON_OTHER_LINK(entry) ||
2163 	       LIST_ENTRY_IS_AGED_OUT(entry) ||
2164 	       LIST_ENTRY_IS_OLDER_THAN_LATEST_AGED_OUT_FRAME(entry) ||
2165 	       LIST_ENTRY_IS_OLDER_THAN_READY_TO_DELIVER_FRAMES(entry);
2166 }
2167 
2168 /**
2169  * mgmt_rx_reo_release_egress_list_entries() - Release entries from the
2170  * egress list
2171  * @reo_context: Pointer to management Rx reorder context
2172  *
2173  * This API releases the entries from the egress list based on the following
2174  * conditions.
2175  *   a) Entries with total wait count equal to 0
2176  *   b) Entries which are timed out or entries with global time stamp <= global
2177  *      time stamp of the latest frame which is timed out. We can only release
2178  *      the entries in the increasing order of the global time stamp.
2179  *      So all the entries with global time stamp <= global time stamp of the
2180  *      latest timed out frame has to be released.
2181  *
2182  * Return: QDF_STATUS
2183  */
2184 static QDF_STATUS
2185 mgmt_rx_reo_release_egress_list_entries(struct mgmt_rx_reo_context *reo_context)
2186 {
2187 	QDF_STATUS status;
2188 	struct mgmt_rx_reo_egress_list *egress_list;
2189 	struct mgmt_rx_reo_list *reo_egress_list;
2190 	qdf_timer_t *egress_inactivity_timer;
2191 
2192 	if (!reo_context) {
2193 		mgmt_rx_reo_err("reo context is null");
2194 		return QDF_STATUS_E_NULL_VALUE;
2195 	}
2196 
2197 	egress_list = &reo_context->egress_list;
2198 	reo_egress_list = &egress_list->reo_list;
2199 	egress_inactivity_timer = &egress_list->egress_inactivity_timer;
2200 
2201 	qdf_spin_lock(&reo_context->frame_release_lock);
2202 
2203 	while (1) {
2204 		struct mgmt_rx_reo_list_entry *first_entry;
2205 		/* TODO yield if release_count > THRESHOLD */
2206 		uint16_t release_count = 0;
2207 		uint32_t first_entry_ts;
2208 		struct mgmt_rx_event_params *rx_params;
2209 		struct mgmt_rx_reo_frame_info *last_released_frame =
2210 					&reo_egress_list->last_released_frame;
2211 		uint32_t last_released_frame_ts;
2212 		bool deliver;
2213 
2214 		qdf_spin_lock_bh(&reo_egress_list->list_lock);
2215 
2216 		first_entry = qdf_list_first_entry_or_null(
2217 					&reo_egress_list->list,
2218 					struct mgmt_rx_reo_list_entry, node);
2219 		if (!first_entry) {
2220 			status = QDF_STATUS_SUCCESS;
2221 			goto exit_unlock_egress_list_lock;
2222 		}
2223 
2224 		deliver = mgmt_rx_reo_is_entry_ready_to_send_up(first_entry);
2225 		qdf_assert_always(deliver);
2226 
2227 		rx_params = first_entry->rx_params;
2228 
2229 		status = qdf_list_remove_node(&reo_egress_list->list,
2230 					      &first_entry->node);
2231 		if (QDF_IS_STATUS_ERROR(status)) {
2232 			status = QDF_STATUS_E_FAILURE;
2233 			goto exit_unlock_egress_list_lock;
2234 		}
2235 		first_entry->egress_list_removal_ts = qdf_get_log_timestamp();
2236 
2237 		/**
2238 		 * Last released frame global time stamp is invalid means that
2239 		 * current frame is the first frame to be released to the
2240 		 * upper layer from the egress list. Blindly update the last
2241 		 * released frame global time stamp to the current frame's
2242 		 * global time stamp and set the valid to true.
2243 		 * If the last released frame global time stamp is valid and
2244 		 * current frame's global time stamp is >= last released frame
2245 		 * global time stamp, deliver the current frame to upper layer
2246 		 * and update the last released frame global time stamp.
2247 		 */
2248 		first_entry_ts = mgmt_rx_reo_get_global_ts(rx_params);
2249 		last_released_frame_ts =
2250 			last_released_frame->reo_params.global_timestamp;
2251 
2252 		if (!last_released_frame->valid ||
2253 		    mgmt_rx_reo_compare_global_timestamps_gte(
2254 			first_entry_ts, last_released_frame_ts)) {
2255 			qdf_timer_sync_cancel(egress_inactivity_timer);
2256 
2257 			last_released_frame->reo_params =
2258 						*rx_params->reo_params;
2259 			last_released_frame->valid = true;
2260 
2261 			qdf_timer_mod(egress_inactivity_timer,
2262 				      MGMT_RX_REO_EGRESS_INACTIVITY_TIMEOUT);
2263 		} else {
2264 			/**
2265 			 * This should never happen. All the frames older than
2266 			 * the last frame released from the reorder list will be
2267 			 * discarded at the entry to reorder algorithm itself.
2268 			 */
2269 			qdf_assert_always(first_entry->is_parallel_rx);
2270 		}
2271 
2272 		qdf_spin_unlock_bh(&reo_egress_list->list_lock);
2273 
2274 		status = mgmt_rx_reo_list_entry_send_up(reo_context,
2275 							first_entry);
2276 		if (QDF_IS_STATUS_ERROR(status)) {
2277 			status = QDF_STATUS_E_FAILURE;
2278 			qdf_mem_free(first_entry);
2279 			goto exit_unlock_frame_release_lock;
2280 		}
2281 
2282 		qdf_mem_free(first_entry);
2283 		release_count++;
2284 	}
2285 
2286 	status = QDF_STATUS_SUCCESS;
2287 	goto exit_unlock_frame_release_lock;
2288 
2289 exit_unlock_egress_list_lock:
2290 	qdf_spin_unlock_bh(&reo_egress_list->list_lock);
2291 exit_unlock_frame_release_lock:
2292 	qdf_spin_unlock(&reo_context->frame_release_lock);
2293 
2294 	return status;
2295 }
2296 
2297 /**
2298  * mgmt_rx_reo_check_sanity_list() - Check the sanity of reorder list
2299  * @reo_list: Pointer to reorder list
2300  *
2301  * Check the sanity of ingress reorder list or egress reorder list.
2302  * Ingress/Egress reorder list entries should be in the non decreasing order
2303  * of global time stamp.
2304  *
2305  * Return: QDF_STATUS
2306  */
2307 static QDF_STATUS
2308 mgmt_rx_reo_check_sanity_list(struct mgmt_rx_reo_list *reo_list)
2309 {
2310 	struct mgmt_rx_reo_list_entry *first;
2311 	struct mgmt_rx_reo_list_entry *cur;
2312 	uint32_t ts_prev;
2313 	uint32_t ts_cur;
2314 
2315 	qdf_assert_always(reo_list);
2316 
2317 	if (qdf_list_empty(&reo_list->list))
2318 		return QDF_STATUS_SUCCESS;
2319 
2320 	first = qdf_list_first_entry_or_null(&reo_list->list,
2321 					     struct mgmt_rx_reo_list_entry,
2322 					     node);
2323 	qdf_assert_always(first);
2324 
2325 	cur = first;
2326 	ts_prev = mgmt_rx_reo_get_global_ts(first->rx_params);
2327 
2328 	qdf_list_for_each_continue(&reo_list->list, cur, node) {
2329 		ts_cur = mgmt_rx_reo_get_global_ts(cur->rx_params);
2330 
2331 		if (!mgmt_rx_reo_compare_global_timestamps_gte(ts_cur,
2332 							       ts_prev))
2333 			return QDF_STATUS_E_INVAL;
2334 
2335 		ts_prev = ts_cur;
2336 	}
2337 
2338 	return QDF_STATUS_SUCCESS;
2339 }
2340 
2341 /**
2342  * mgmt_rx_reo_check_sanity_lists() - Check the sanity of ingress and
2343  * egress reorder lists
2344  * @reo_egress_list: Pointer to egress reorder list
2345  * @reo_ingress_list: Pointer to ingress reorder list
2346  *
2347  * Check the sanity of ingress reorder list and egress reorder list.
2348  * This API does the following sanity checks.
2349  *
2350  * 1. Ingress list entries should be in the non decreasing order of global
2351  *    time stamp.
2352  * 2. Egress list entries should be in the non decreasing order of global
2353  *    time stamp.
2354  * 3. All the entries in egress list should have global time stamp less
2355  *    than or equal to all the entries in ingress list.
2356  *
2357  * Return: QDF_STATUS
2358  */
2359 static QDF_STATUS
2360 mgmt_rx_reo_check_sanity_lists(struct mgmt_rx_reo_list *reo_egress_list,
2361 			       struct mgmt_rx_reo_list *reo_ingress_list)
2362 {
2363 	QDF_STATUS status;
2364 	struct mgmt_rx_reo_list_entry *last_entry_egress_list;
2365 	uint32_t ts_egress_last_entry;
2366 	struct mgmt_rx_reo_list_entry *first_entry_ingress_list;
2367 	uint32_t ts_ingress_first_entry;
2368 
2369 	qdf_assert_always(reo_egress_list);
2370 	qdf_assert_always(reo_ingress_list);
2371 
2372 	status = mgmt_rx_reo_check_sanity_list(reo_egress_list);
2373 	if (QDF_IS_STATUS_ERROR(status)) {
2374 		mgmt_rx_reo_err("Sanity check of egress list failed");
2375 		return status;
2376 	}
2377 
2378 	status = mgmt_rx_reo_check_sanity_list(reo_ingress_list);
2379 	if (QDF_IS_STATUS_ERROR(status)) {
2380 		mgmt_rx_reo_err("Sanity check of ingress list failed");
2381 		return status;
2382 	}
2383 
2384 	if (qdf_list_empty(&reo_egress_list->list) ||
2385 	    qdf_list_empty(&reo_ingress_list->list))
2386 		return QDF_STATUS_SUCCESS;
2387 
2388 	last_entry_egress_list =
2389 		qdf_list_last_entry(&reo_egress_list->list,
2390 				    struct mgmt_rx_reo_list_entry, node);
2391 	ts_egress_last_entry =
2392 		mgmt_rx_reo_get_global_ts(last_entry_egress_list->rx_params);
2393 
2394 	first_entry_ingress_list =
2395 		qdf_list_first_entry_or_null(&reo_ingress_list->list,
2396 					     struct mgmt_rx_reo_list_entry,
2397 					     node);
2398 	if (!first_entry_ingress_list) {
2399 		mgmt_rx_reo_err("Ingress list is expected to be non empty");
2400 		return QDF_STATUS_E_INVAL;
2401 	}
2402 
2403 	ts_ingress_first_entry =
2404 		mgmt_rx_reo_get_global_ts(first_entry_ingress_list->rx_params);
2405 
2406 	if (!mgmt_rx_reo_compare_global_timestamps_gte(ts_ingress_first_entry,
2407 						       ts_egress_last_entry))
2408 		return QDF_STATUS_E_INVAL;
2409 
2410 	return QDF_STATUS_SUCCESS;
2411 }
2412 
2413 /**
2414  * mgmt_rx_reo_move_entries_ingress_to_egress_list() - Moves frames in
2415  * the ingress list which are ready to be delivered to the egress list
2416  * @ingress_list: Pointer to ingress list
2417  * @egress_list: Pointer to egress list
2418  *
2419  * This API moves frames in the ingress list which are ready to be delivered
2420  * to the egress list.
2421  *
2422  * Return: QDF_STATUS
2423  */
2424 static QDF_STATUS
2425 mgmt_rx_reo_move_entries_ingress_to_egress_list
2426 		(struct mgmt_rx_reo_ingress_list *ingress_list,
2427 		 struct mgmt_rx_reo_egress_list *egress_list)
2428 {
2429 	struct mgmt_rx_reo_list *reo_ingress_list;
2430 	struct mgmt_rx_reo_list *reo_egress_list;
2431 	QDF_STATUS status;
2432 	struct mgmt_rx_reo_list_entry *ingress_list_entry;
2433 	struct mgmt_rx_reo_list_entry *latest_frame_ready_to_deliver = NULL;
2434 	uint16_t num_frames_ready_to_deliver = 0;
2435 
2436 	if (!ingress_list) {
2437 		mgmt_rx_reo_err("Ingress list is null");
2438 		return QDF_STATUS_E_NULL_VALUE;
2439 	}
2440 	reo_ingress_list = &ingress_list->reo_list;
2441 
2442 	if (!egress_list) {
2443 		mgmt_rx_reo_err("Egress list is null");
2444 		return QDF_STATUS_E_NULL_VALUE;
2445 	}
2446 	reo_egress_list = &egress_list->reo_list;
2447 
2448 	qdf_spin_lock_bh(&reo_ingress_list->list_lock);
2449 
2450 	qdf_list_for_each(&reo_ingress_list->list, ingress_list_entry, node) {
2451 		if (!mgmt_rx_reo_is_entry_ready_to_send_up(ingress_list_entry))
2452 			break;
2453 
2454 		ingress_list_entry->ingress_list_removal_ts =
2455 							qdf_get_log_timestamp();
2456 		ingress_list_entry->egress_list_insertion_ts =
2457 							qdf_get_log_timestamp();
2458 		latest_frame_ready_to_deliver = ingress_list_entry;
2459 		num_frames_ready_to_deliver++;
2460 	}
2461 
2462 	/* Check if ingress list has at least one frame ready to be delivered */
2463 	if (num_frames_ready_to_deliver) {
2464 		qdf_list_t temp_list_frames_ready_to_deliver;
2465 
2466 		qdf_list_create(&temp_list_frames_ready_to_deliver,
2467 				INGRESS_TO_EGRESS_MOVEMENT_TEMP_LIST_MAX_SIZE);
2468 
2469 		status = qdf_list_split(&temp_list_frames_ready_to_deliver,
2470 					&reo_ingress_list->list,
2471 					&latest_frame_ready_to_deliver->node);
2472 		qdf_assert_always(QDF_IS_STATUS_SUCCESS(status));
2473 
2474 		qdf_assert_always(num_frames_ready_to_deliver ==
2475 			qdf_list_size(&temp_list_frames_ready_to_deliver));
2476 
2477 		qdf_spin_lock_bh(&reo_egress_list->list_lock);
2478 
2479 		status = qdf_list_join(&reo_egress_list->list,
2480 				       &temp_list_frames_ready_to_deliver);
2481 		qdf_assert_always(QDF_IS_STATUS_SUCCESS(status));
2482 
2483 		qdf_assert_always(qdf_list_size(&reo_egress_list->list) <=
2484 						reo_egress_list->max_list_size);
2485 
2486 		status = mgmt_rx_reo_check_sanity_lists(reo_egress_list,
2487 							reo_ingress_list);
2488 		if (QDF_IS_STATUS_ERROR(status)) {
2489 			mgmt_rx_reo_err("Sanity check of reo lists failed");
2490 			qdf_assert_always(0);
2491 		}
2492 
2493 		qdf_spin_unlock_bh(&reo_egress_list->list_lock);
2494 
2495 		qdf_list_destroy(&temp_list_frames_ready_to_deliver);
2496 	}
2497 
2498 	qdf_spin_unlock_bh(&reo_ingress_list->list_lock);
2499 
2500 	return QDF_STATUS_SUCCESS;
2501 }
2502 
2503 /**
2504  * mgmt_rx_reo_ageout_entries_ingress_list() - Helper API to ageout entries
2505  * in the ingress list
2506  * @ingress_list: Pointer to the ingress list
2507  * @latest_aged_out_entry: Double pointer to the latest agedout entry in the
2508  * ingress list
2509  *
2510  * Helper API to ageout entries in the ingress list.
2511  *
2512  * Return: QDF_STATUS
2513  */
2514 static QDF_STATUS
2515 mgmt_rx_reo_ageout_entries_ingress_list
2516 			(struct mgmt_rx_reo_ingress_list *ingress_list,
2517 			 struct mgmt_rx_reo_list_entry **latest_aged_out_entry)
2518 {
2519 	struct mgmt_rx_reo_list *reo_ingress_list;
2520 	struct mgmt_rx_reo_list_entry *cur_entry;
2521 	uint64_t cur_ts;
2522 
2523 	qdf_assert_always(ingress_list);
2524 	qdf_assert_always(latest_aged_out_entry);
2525 
2526 	*latest_aged_out_entry = NULL;
2527 	reo_ingress_list = &ingress_list->reo_list;
2528 
2529 	qdf_spin_lock_bh(&reo_ingress_list->list_lock);
2530 
2531 	cur_ts = qdf_get_log_timestamp();
2532 
2533 	qdf_list_for_each(&reo_ingress_list->list, cur_entry, node) {
2534 		if (cur_ts - cur_entry->ingress_list_insertion_ts >=
2535 		    ingress_list->list_entry_timeout_us) {
2536 			*latest_aged_out_entry = cur_entry;
2537 			cur_entry->status |= STATUS_AGED_OUT;
2538 		}
2539 	}
2540 
2541 	if (!*latest_aged_out_entry)
2542 		goto exit_release_list_lock;
2543 
2544 	qdf_list_for_each(&reo_ingress_list->list, cur_entry, node) {
2545 		if (cur_entry == *latest_aged_out_entry)
2546 			break;
2547 		cur_entry->status |= STATUS_OLDER_THAN_LATEST_AGED_OUT_FRAME;
2548 	}
2549 
2550 exit_release_list_lock:
2551 	qdf_spin_unlock_bh(&reo_ingress_list->list_lock);
2552 
2553 	return QDF_STATUS_SUCCESS;
2554 }
2555 
2556 /**
2557  * mgmt_rx_reo_ingress_list_ageout_timer_handler() - Periodic ageout timer
2558  * handler
2559  * @arg: Argument to timer handler
2560  *
2561  * This is the handler for periodic ageout timer used to timeout entries in the
2562  * ingress list.
2563  *
2564  * Return: void
2565  */
2566 static void
2567 mgmt_rx_reo_ingress_list_ageout_timer_handler(void *arg)
2568 {
2569 	struct mgmt_rx_reo_ingress_list *ingress_list = arg;
2570 	struct mgmt_rx_reo_egress_list *egress_list;
2571 	QDF_STATUS ret;
2572 	struct mgmt_rx_reo_context *reo_ctx;
2573 	/**
2574 	 * Stores the pointer to the entry in ingress list for the latest aged
2575 	 * out frame. Latest aged out frame is the aged out frame in reorder
2576 	 * list which has the largest global time stamp value.
2577 	 */
2578 	struct mgmt_rx_reo_list_entry *latest_aged_out_entry = NULL;
2579 
2580 	qdf_assert_always(ingress_list);
2581 	reo_ctx = mgmt_rx_reo_get_context_from_ingress_list(ingress_list);
2582 	qdf_assert_always(reo_ctx);
2583 	egress_list = &reo_ctx->egress_list;
2584 
2585 	qdf_timer_mod(&ingress_list->ageout_timer,
2586 		      MGMT_RX_REO_INGRESS_LIST_AGEOUT_TIMER_PERIOD_MS);
2587 
2588 	ret = mgmt_rx_reo_ageout_entries_ingress_list(ingress_list,
2589 						      &latest_aged_out_entry);
2590 	if (QDF_IS_STATUS_ERROR(ret)) {
2591 		mgmt_rx_reo_err("Failure to ageout entries in ingress list");
2592 		return;
2593 	}
2594 
2595 	if (!latest_aged_out_entry)
2596 		return;
2597 
2598 	ret = mgmt_rx_reo_move_entries_ingress_to_egress_list(ingress_list,
2599 							      egress_list);
2600 	if (QDF_IS_STATUS_ERROR(ret)) {
2601 		mgmt_rx_reo_err("Ingress to egress list movement failure(%d)",
2602 				ret);
2603 		return;
2604 	}
2605 
2606 	ret = mgmt_rx_reo_release_egress_list_entries(reo_ctx);
2607 	if (QDF_IS_STATUS_ERROR(ret)) {
2608 		mgmt_rx_reo_err("Failure to release entries, ret = %d", ret);
2609 		return;
2610 	}
2611 }
2612 
2613 /**
2614  * mgmt_rx_reo_egress_inactivity_timer_handler() - Timer handler
2615  * for egress inactivity timer
2616  * @arg: Argument to timer handler
2617  *
2618  * This is the timer handler for tracking management Rx inactivity
2619  * across links.
2620  *
2621  * Return: void
2622  */
2623 static void
2624 mgmt_rx_reo_egress_inactivity_timer_handler(void *arg)
2625 {
2626 	struct mgmt_rx_reo_egress_list *egress_list = arg;
2627 	struct mgmt_rx_reo_list *reo_egress_list;
2628 	struct mgmt_rx_reo_frame_info *last_delivered_frame;
2629 
2630 	qdf_assert_always(egress_list);
2631 
2632 	reo_egress_list = &egress_list->reo_list;
2633 	last_delivered_frame = &reo_egress_list->last_released_frame;
2634 
2635 	qdf_spin_lock(&reo_egress_list->list_lock);
2636 
2637 	qdf_mem_zero(last_delivered_frame, sizeof(*last_delivered_frame));
2638 
2639 	qdf_spin_unlock(&reo_egress_list->list_lock);
2640 }
2641 
2642 /**
2643  * mgmt_rx_reo_prepare_list_entry() - Prepare a list entry from the management
2644  * frame received.
2645  * @frame_desc: Pointer to the frame descriptor
2646  * @entry: Pointer to the list entry
2647  *
2648  * This API prepares the reorder list entry corresponding to a management frame
2649  * to be consumed by host. This entry would be inserted at the appropriate
2650  * position in the reorder list.
2651  *
2652  * Return: QDF_STATUS
2653  */
2654 static QDF_STATUS
2655 mgmt_rx_reo_prepare_list_entry(
2656 		const struct mgmt_rx_reo_frame_descriptor *frame_desc,
2657 		struct mgmt_rx_reo_list_entry **entry)
2658 {
2659 	struct mgmt_rx_reo_list_entry *list_entry;
2660 	struct wlan_objmgr_pdev *pdev;
2661 	uint8_t link_id;
2662 	uint8_t ml_grp_id;
2663 
2664 	if (!frame_desc) {
2665 		mgmt_rx_reo_err("frame descriptor is null");
2666 		return QDF_STATUS_E_NULL_VALUE;
2667 	}
2668 
2669 	if (!entry) {
2670 		mgmt_rx_reo_err("Pointer to list entry is null");
2671 		return QDF_STATUS_E_NULL_VALUE;
2672 	}
2673 
2674 	link_id = mgmt_rx_reo_get_link_id(frame_desc->rx_params);
2675 	ml_grp_id = mgmt_rx_reo_get_mlo_grp_id(frame_desc->rx_params);
2676 
2677 	pdev = wlan_get_pdev_from_mlo_link_id(link_id, ml_grp_id,
2678 					      WLAN_MGMT_RX_REO_ID);
2679 	if (!pdev) {
2680 		mgmt_rx_reo_err("pdev corresponding to link %u is null",
2681 				link_id);
2682 		return QDF_STATUS_E_NULL_VALUE;
2683 	}
2684 
2685 	list_entry =  qdf_mem_malloc(sizeof(*list_entry));
2686 	if (!list_entry) {
2687 		wlan_objmgr_pdev_release_ref(pdev, WLAN_MGMT_RX_REO_ID);
2688 		mgmt_rx_reo_err("List entry allocation failed");
2689 		return QDF_STATUS_E_NOMEM;
2690 	}
2691 
2692 	list_entry->pdev = pdev;
2693 	list_entry->nbuf = frame_desc->nbuf;
2694 	list_entry->rx_params = frame_desc->rx_params;
2695 	list_entry->wait_count = frame_desc->wait_count;
2696 	list_entry->initial_wait_count = frame_desc->wait_count;
2697 	qdf_mem_copy(list_entry->shared_snapshots, frame_desc->shared_snapshots,
2698 		     qdf_min(sizeof(list_entry->shared_snapshots),
2699 			     sizeof(frame_desc->shared_snapshots)));
2700 	qdf_mem_copy(list_entry->host_snapshot, frame_desc->host_snapshot,
2701 		     qdf_min(sizeof(list_entry->host_snapshot),
2702 			     sizeof(frame_desc->host_snapshot)));
2703 	list_entry->status = 0;
2704 	if (list_entry->wait_count.total_count)
2705 		list_entry->status |= STATUS_WAIT_FOR_FRAME_ON_OTHER_LINKS;
2706 
2707 	*entry = list_entry;
2708 
2709 	return QDF_STATUS_SUCCESS;
2710 }
2711 
2712 /**
2713  * mgmt_rx_reo_update_wait_count() - Update the wait count for a frame based
2714  * on the wait count of a frame received after that on air.
2715  * @wait_count_old_frame: Pointer to the wait count structure for the old frame.
2716  * @wait_count_new_frame: Pointer to the wait count structure for the new frame.
2717  *
2718  * This API optimizes the wait count of a frame based on the wait count of
2719  * a frame received after that on air. Old frame refers to the frame received
2720  * first on the air and new frame refers to the frame received after that.
2721  * We use the following fundamental idea. Wait counts for old frames can't be
2722  * more than wait counts for the new frame. Use this to optimize the wait count
2723  * for the old frames. Per link wait count of an old frame is minimum of the
2724  * per link wait count of the old frame and new frame.
2725  *
2726  * Return: QDF_STATUS
2727  */
2728 static QDF_STATUS
2729 mgmt_rx_reo_update_wait_count(
2730 		struct mgmt_rx_reo_wait_count *wait_count_old_frame,
2731 		const struct mgmt_rx_reo_wait_count *wait_count_new_frame)
2732 {
2733 	uint8_t link_id;
2734 
2735 	qdf_assert_always(wait_count_old_frame);
2736 	qdf_assert_always(wait_count_new_frame);
2737 
2738 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
2739 		if (wait_count_old_frame->per_link_count[link_id]) {
2740 			uint32_t temp_wait_count;
2741 			uint32_t wait_count_diff;
2742 
2743 			temp_wait_count =
2744 				wait_count_old_frame->per_link_count[link_id];
2745 			wait_count_old_frame->per_link_count[link_id] =
2746 				qdf_min(wait_count_old_frame->
2747 					per_link_count[link_id],
2748 					wait_count_new_frame->
2749 					per_link_count[link_id]);
2750 			wait_count_diff = temp_wait_count -
2751 				wait_count_old_frame->per_link_count[link_id];
2752 
2753 			wait_count_old_frame->total_count -= wait_count_diff;
2754 		}
2755 	}
2756 
2757 	return QDF_STATUS_SUCCESS;
2758 }
2759 
2760 /**
2761  * mgmt_rx_reo_update_ingress_list() - Modify the reorder list when a frame is
2762  * received
2763  * @ingress_list: Pointer to ingress list
2764  * @frame_desc: Pointer to frame descriptor
2765  * @new: pointer to the list entry for the current frame
2766  * @is_queued: Whether this frame is queued in the REO list
2767  *
2768  * API to update the reorder list on every management frame reception.
2769  * This API does the following things.
2770  *   a) Update the wait counts for all the frames in the reorder list with
2771  *      global time stamp <= current frame's global time stamp. We use the
2772  *      following principle for updating the wait count in this case.
2773  *      Let A and B be two management frames with global time stamp of A <=
2774  *      global time stamp of B. Let WAi and WBi be the wait count of A and B
2775  *      for link i, then WAi <= WBi. Hence we can optimize WAi as
2776  *      min(WAi, WBi).
2777  *   b) If the current frame is to be consumed by host, insert it in the
2778  *      reorder list such that the list is always sorted in the increasing order
2779  *      of global time stamp. Update the wait count of the current frame based
2780  *      on the frame next to it in the reorder list (if any).
2781  *   c) Update the wait count of the frames in the reorder list with global
2782  *      time stamp > current frame's global time stamp. Let the current frame
2783  *      belong to link "l". Then link "l"'s wait count can be reduced by one for
2784  *      all the frames in the reorder list with global time stamp > current
2785  *      frame's global time stamp.
2786  *
2787  * Return: QDF_STATUS
2788  */
2789 static QDF_STATUS
2790 mgmt_rx_reo_update_ingress_list(struct mgmt_rx_reo_ingress_list *ingress_list,
2791 				struct mgmt_rx_reo_frame_descriptor *frame_desc,
2792 				struct mgmt_rx_reo_list_entry *new,
2793 				bool *is_queued)
2794 {
2795 	struct mgmt_rx_reo_list *reo_ingress_list;
2796 	struct mgmt_rx_reo_list_entry *cur;
2797 	struct mgmt_rx_reo_list_entry *least_greater = NULL;
2798 	bool least_greater_entry_found = false;
2799 	QDF_STATUS status;
2800 	uint16_t list_insertion_pos = 0;
2801 	uint32_t ts_new;
2802 
2803 	if (!ingress_list) {
2804 		mgmt_rx_reo_err("Mgmt Rx reo ingress list is null");
2805 		return QDF_STATUS_E_NULL_VALUE;
2806 	}
2807 	reo_ingress_list = &ingress_list->reo_list;
2808 
2809 	if (!frame_desc) {
2810 		mgmt_rx_reo_err("Mgmt frame descriptor is null");
2811 		return QDF_STATUS_E_NULL_VALUE;
2812 	}
2813 
2814 	if (!(frame_desc->type == MGMT_RX_REO_FRAME_DESC_HOST_CONSUMED_FRAME &&
2815 	      frame_desc->reo_required) != !new)
2816 		qdf_assert_always(0);
2817 
2818 	if (!is_queued) {
2819 		mgmt_rx_reo_err("Pointer to queued indication is null");
2820 		return QDF_STATUS_E_NULL_VALUE;
2821 	}
2822 	*is_queued = false;
2823 
2824 	ts_new = mgmt_rx_reo_get_global_ts(frame_desc->rx_params);
2825 
2826 	frame_desc->ingress_list_size_rx =
2827 				qdf_list_size(&reo_ingress_list->list);
2828 
2829 	qdf_list_for_each(&reo_ingress_list->list, cur, node) {
2830 		uint32_t ts_cur;
2831 
2832 		ts_cur = mgmt_rx_reo_get_global_ts(cur->rx_params);
2833 
2834 		least_greater_entry_found =
2835 		     !mgmt_rx_reo_compare_global_timestamps_gte(ts_new, ts_cur);
2836 		if (least_greater_entry_found) {
2837 			least_greater = cur;
2838 			break;
2839 		}
2840 
2841 		qdf_assert_always(!frame_desc->is_stale || cur->is_parallel_rx);
2842 
2843 		list_insertion_pos++;
2844 
2845 		status = mgmt_rx_reo_update_wait_count(&cur->wait_count,
2846 						       &frame_desc->wait_count);
2847 		if (QDF_IS_STATUS_ERROR(status))
2848 			return status;
2849 
2850 		if (cur->wait_count.total_count == 0)
2851 			cur->status &= ~STATUS_WAIT_FOR_FRAME_ON_OTHER_LINKS;
2852 	}
2853 
2854 	if (frame_desc->type == MGMT_RX_REO_FRAME_DESC_HOST_CONSUMED_FRAME &&
2855 	    !frame_desc->is_stale && frame_desc->reo_required &&
2856 	    (frame_desc->queued_list != MGMT_RX_REO_LIST_TYPE_EGRESS)) {
2857 		bool overflow;
2858 
2859 		if (least_greater_entry_found) {
2860 			status = mgmt_rx_reo_update_wait_count(
2861 					&new->wait_count,
2862 					&least_greater->wait_count);
2863 
2864 			if (QDF_IS_STATUS_ERROR(status))
2865 				return status;
2866 
2867 			frame_desc->wait_count = new->wait_count;
2868 
2869 			if (new->wait_count.total_count == 0)
2870 				new->status &=
2871 					~STATUS_WAIT_FOR_FRAME_ON_OTHER_LINKS;
2872 		}
2873 
2874 		new->ingress_list_insertion_ts = qdf_get_log_timestamp();
2875 		new->ingress_timestamp = frame_desc->ingress_timestamp;
2876 		new->is_parallel_rx = frame_desc->is_parallel_rx;
2877 		frame_desc->ingress_list_insertion_pos = list_insertion_pos;
2878 
2879 		if (least_greater_entry_found)
2880 			status = qdf_list_insert_before(
2881 					&reo_ingress_list->list, &new->node,
2882 					&least_greater->node);
2883 		else
2884 			status = qdf_list_insert_back(
2885 					&reo_ingress_list->list, &new->node);
2886 
2887 		if (QDF_IS_STATUS_ERROR(status))
2888 			return status;
2889 
2890 		*is_queued = true;
2891 		frame_desc->queued_list = MGMT_RX_REO_LIST_TYPE_INGRESS;
2892 
2893 		overflow = (qdf_list_size(&reo_ingress_list->list) >
2894 					reo_ingress_list->max_list_size);
2895 		qdf_assert_always(!overflow);
2896 
2897 		if (new->wait_count.total_count == 0)
2898 			frame_desc->zero_wait_count_rx = true;
2899 
2900 		if (frame_desc->zero_wait_count_rx &&
2901 		    qdf_list_first_entry_or_null(&reo_ingress_list->list,
2902 						 struct mgmt_rx_reo_list_entry,
2903 						 node) == new)
2904 			frame_desc->immediate_delivery = true;
2905 	}
2906 
2907 	if (least_greater_entry_found) {
2908 		cur = least_greater;
2909 
2910 		qdf_list_for_each_from(&reo_ingress_list->list, cur, node) {
2911 			uint8_t frame_link_id;
2912 			struct mgmt_rx_reo_wait_count *wait_count;
2913 
2914 			frame_link_id =
2915 				mgmt_rx_reo_get_link_id(frame_desc->rx_params);
2916 			wait_count = &cur->wait_count;
2917 			if (wait_count->per_link_count[frame_link_id]) {
2918 				uint32_t old_wait_count;
2919 				uint32_t new_wait_count;
2920 				uint32_t wait_count_diff;
2921 				uint16_t pkt_ctr_delta;
2922 
2923 				pkt_ctr_delta = frame_desc->pkt_ctr_delta;
2924 				old_wait_count =
2925 				      wait_count->per_link_count[frame_link_id];
2926 
2927 				if (old_wait_count >= pkt_ctr_delta)
2928 					new_wait_count = old_wait_count -
2929 							 pkt_ctr_delta;
2930 				else
2931 					new_wait_count = 0;
2932 
2933 				wait_count_diff = old_wait_count -
2934 						  new_wait_count;
2935 
2936 				wait_count->per_link_count[frame_link_id] =
2937 								new_wait_count;
2938 				wait_count->total_count -= wait_count_diff;
2939 
2940 				if (wait_count->total_count == 0)
2941 					cur->status &=
2942 					  ~STATUS_WAIT_FOR_FRAME_ON_OTHER_LINKS;
2943 			}
2944 		}
2945 	}
2946 
2947 	return QDF_STATUS_SUCCESS;
2948 }
2949 
2950 static QDF_STATUS
2951 mgmt_rx_reo_update_egress_list(struct mgmt_rx_reo_egress_list *egress_list,
2952 			       struct mgmt_rx_reo_frame_descriptor *frame_desc,
2953 			       struct mgmt_rx_reo_list_entry *new,
2954 			       bool *is_queued)
2955 {
2956 	struct mgmt_rx_reo_list *reo_egress_list;
2957 	struct mgmt_rx_reo_list_entry *cur;
2958 	struct mgmt_rx_reo_list_entry *last;
2959 	struct mgmt_rx_reo_list_entry *least_greater = NULL;
2960 	bool least_greater_entry_found = false;
2961 	uint32_t ts_last;
2962 	uint32_t ts_new;
2963 	uint16_t list_insertion_pos = 0;
2964 	QDF_STATUS ret;
2965 
2966 	if (!egress_list) {
2967 		mgmt_rx_reo_err("Mgmt Rx reo egress list is null");
2968 		return QDF_STATUS_E_NULL_VALUE;
2969 	}
2970 	reo_egress_list = &egress_list->reo_list;
2971 
2972 	if (!frame_desc) {
2973 		mgmt_rx_reo_err("Mgmt frame descriptor is null");
2974 		return QDF_STATUS_E_NULL_VALUE;
2975 	}
2976 
2977 	if (!(frame_desc->type == MGMT_RX_REO_FRAME_DESC_HOST_CONSUMED_FRAME &&
2978 	      frame_desc->reo_required) != !new)
2979 		qdf_assert_always(0);
2980 
2981 	if (!is_queued) {
2982 		mgmt_rx_reo_err("Pointer to queued indication is null");
2983 		return QDF_STATUS_E_NULL_VALUE;
2984 	}
2985 	*is_queued = false;
2986 
2987 	ts_new = mgmt_rx_reo_get_global_ts(frame_desc->rx_params);
2988 	frame_desc->egress_list_size_rx = qdf_list_size(&reo_egress_list->list);
2989 
2990 	ret = mgmt_rx_reo_is_stale_frame(&reo_egress_list->last_released_frame,
2991 					 frame_desc);
2992 	if (QDF_IS_STATUS_ERROR(ret))
2993 		return ret;
2994 
2995 	if (frame_desc->is_stale) {
2996 		ret = mgmt_rx_reo_handle_stale_frame(reo_egress_list,
2997 						     frame_desc);
2998 		if (QDF_IS_STATUS_ERROR(ret))
2999 			return ret;
3000 
3001 		qdf_list_for_each(&reo_egress_list->list, cur, node) {
3002 			uint32_t ts_cur;
3003 
3004 			ts_cur = mgmt_rx_reo_get_global_ts(cur->rx_params);
3005 
3006 			if (!mgmt_rx_reo_compare_global_timestamps_gte(ts_new,
3007 								       ts_cur))
3008 				break;
3009 
3010 			qdf_assert_always(cur->is_parallel_rx);
3011 		}
3012 	}
3013 
3014 	if (!new)
3015 		return QDF_STATUS_SUCCESS;
3016 
3017 	if (qdf_list_empty(&reo_egress_list->list))
3018 		return QDF_STATUS_SUCCESS;
3019 
3020 	last = qdf_list_last_entry(&reo_egress_list->list,
3021 				   struct mgmt_rx_reo_list_entry, node);
3022 	qdf_assert_always(last);
3023 
3024 	ts_last = mgmt_rx_reo_get_global_ts(last->rx_params);
3025 
3026 	if (mgmt_rx_reo_compare_global_timestamps_gte(ts_new, ts_last))
3027 		return QDF_STATUS_SUCCESS;
3028 
3029 	qdf_list_for_each(&reo_egress_list->list, cur, node) {
3030 		uint32_t ts_cur;
3031 
3032 		ts_cur = mgmt_rx_reo_get_global_ts(cur->rx_params);
3033 
3034 		if (!mgmt_rx_reo_compare_global_timestamps_gte(ts_new,
3035 							       ts_cur)) {
3036 			least_greater = cur;
3037 			least_greater_entry_found = true;
3038 			break;
3039 		}
3040 
3041 		list_insertion_pos++;
3042 	}
3043 	qdf_assert_always(least_greater_entry_found);
3044 
3045 	ret = mgmt_rx_reo_update_wait_count(&new->wait_count,
3046 					    &least_greater->wait_count);
3047 
3048 	if (QDF_IS_STATUS_ERROR(ret))
3049 		return ret;
3050 
3051 	frame_desc->wait_count = new->wait_count;
3052 
3053 	if (new->wait_count.total_count == 0)
3054 		new->status &= ~STATUS_WAIT_FOR_FRAME_ON_OTHER_LINKS;
3055 
3056 	new->egress_list_insertion_ts = qdf_get_log_timestamp();
3057 	new->ingress_timestamp = frame_desc->ingress_timestamp;
3058 	new->is_parallel_rx = frame_desc->is_parallel_rx;
3059 	frame_desc->egress_list_insertion_pos = list_insertion_pos;
3060 
3061 	ret = qdf_list_insert_before(&reo_egress_list->list, &new->node,
3062 				     &least_greater->node);
3063 	if (QDF_IS_STATUS_ERROR(ret))
3064 		return ret;
3065 
3066 	*is_queued = true;
3067 	frame_desc->queued_list = MGMT_RX_REO_LIST_TYPE_EGRESS;
3068 
3069 	if (frame_desc->wait_count.total_count == 0)
3070 		frame_desc->zero_wait_count_rx = true;
3071 	frame_desc->immediate_delivery = true;
3072 
3073 	return QDF_STATUS_SUCCESS;
3074 }
3075 
3076 static QDF_STATUS
3077 mgmt_rx_reo_update_lists(struct mgmt_rx_reo_ingress_list *ingress_list,
3078 			 struct mgmt_rx_reo_egress_list *egress_list,
3079 			 struct mgmt_rx_reo_frame_descriptor *frame_desc,
3080 			 bool *is_queued)
3081 {
3082 	struct mgmt_rx_reo_list *reo_ingress_list;
3083 	struct mgmt_rx_reo_list *reo_egress_list;
3084 	bool is_queued_to_ingress_list = false;
3085 	bool is_queued_to_egress_list = false;
3086 	QDF_STATUS status;
3087 	struct mgmt_rx_reo_list_entry *new_entry = NULL;
3088 	enum mgmt_rx_reo_list_type queued_list;
3089 
3090 	if (!ingress_list) {
3091 		mgmt_rx_reo_err("Mgmt Rx reo ingress list is null");
3092 		return QDF_STATUS_E_NULL_VALUE;
3093 	}
3094 	reo_ingress_list = &ingress_list->reo_list;
3095 
3096 	if (!egress_list) {
3097 		mgmt_rx_reo_err("Mgmt Rx reo egress list is null");
3098 		return QDF_STATUS_E_NULL_VALUE;
3099 	}
3100 	reo_egress_list = &egress_list->reo_list;
3101 
3102 	if (!frame_desc) {
3103 		mgmt_rx_reo_err("Mgmt frame descriptor is null");
3104 		return QDF_STATUS_E_NULL_VALUE;
3105 	}
3106 
3107 	if (!is_queued) {
3108 		mgmt_rx_reo_err("Pointer to queued indication is null");
3109 		return QDF_STATUS_E_NULL_VALUE;
3110 	}
3111 	*is_queued = false;
3112 
3113 	/* Prepare the list entry before acquiring lock */
3114 	if (frame_desc->type == MGMT_RX_REO_FRAME_DESC_HOST_CONSUMED_FRAME &&
3115 	    frame_desc->reo_required) {
3116 		status = mgmt_rx_reo_prepare_list_entry(frame_desc, &new_entry);
3117 		if (QDF_IS_STATUS_ERROR(status)) {
3118 			mgmt_rx_reo_err("Failed to prepare list entry");
3119 			return QDF_STATUS_E_FAILURE;
3120 		}
3121 	}
3122 
3123 	qdf_spin_lock_bh(&reo_ingress_list->list_lock);
3124 
3125 	qdf_spin_lock_bh(&reo_egress_list->list_lock);
3126 
3127 	status = mgmt_rx_reo_update_egress_list(egress_list, frame_desc,
3128 						new_entry,
3129 						&is_queued_to_egress_list);
3130 	if (QDF_IS_STATUS_ERROR(status))
3131 		goto exit_release_egress_list_lock;
3132 
3133 	status = mgmt_rx_reo_check_sanity_list(reo_egress_list);
3134 	if (QDF_IS_STATUS_ERROR(status)) {
3135 		mgmt_rx_reo_err("Sanity check of egress list failed");
3136 		qdf_assert_always(0);
3137 	}
3138 
3139 	qdf_spin_unlock_bh(&reo_egress_list->list_lock);
3140 
3141 	status = mgmt_rx_reo_update_ingress_list(ingress_list, frame_desc,
3142 						 new_entry,
3143 						 &is_queued_to_ingress_list);
3144 	if (QDF_IS_STATUS_ERROR(status))
3145 		goto exit_release_ingress_list_lock;
3146 
3147 	status = mgmt_rx_reo_check_sanity_list(reo_ingress_list);
3148 	if (QDF_IS_STATUS_ERROR(status)) {
3149 		mgmt_rx_reo_err("Sanity check of ingress list failed");
3150 		qdf_assert_always(0);
3151 	}
3152 
3153 	status = QDF_STATUS_SUCCESS;
3154 	goto exit_release_ingress_list_lock;
3155 
3156 exit_release_egress_list_lock:
3157 	qdf_spin_unlock_bh(&reo_egress_list->list_lock);
3158 exit_release_ingress_list_lock:
3159 	qdf_spin_unlock_bh(&reo_ingress_list->list_lock);
3160 
3161 	qdf_assert_always(!is_queued_to_ingress_list ||
3162 			  !is_queued_to_egress_list);
3163 
3164 	*is_queued = is_queued_to_ingress_list || is_queued_to_egress_list;
3165 
3166 	queued_list = frame_desc->queued_list;
3167 	qdf_assert_always(!(*is_queued &&
3168 			    queued_list == MGMT_RX_REO_LIST_TYPE_INVALID));
3169 
3170 	qdf_assert_always(new_entry || !*is_queued);
3171 
3172 	/* Cleanup the entry if it is not queued */
3173 	if (new_entry && !*is_queued) {
3174 		/**
3175 		 * New entry created is not inserted to reorder list, free
3176 		 * the entry and release the reference
3177 		 */
3178 		wlan_objmgr_pdev_release_ref(new_entry->pdev,
3179 					     WLAN_MGMT_RX_REO_ID);
3180 		qdf_mem_free(new_entry);
3181 	}
3182 
3183 	return status;
3184 }
3185 
3186 /**
3187  * mgmt_rx_reo_ingress_list_init() - Initialize the management rx-reorder
3188  * ingress list
3189  * @ingress_list: Pointer to ingress list
3190  *
3191  * API to initialize the management rx-reorder ingress list.
3192  *
3193  * Return: QDF_STATUS
3194  */
3195 static QDF_STATUS
3196 mgmt_rx_reo_ingress_list_init(struct mgmt_rx_reo_ingress_list *ingress_list)
3197 {
3198 	QDF_STATUS status;
3199 	struct mgmt_rx_reo_list *reo_ingress_list;
3200 
3201 	if (!ingress_list) {
3202 		mgmt_rx_reo_err("Ingress list is null");
3203 		return QDF_STATUS_E_NULL_VALUE;
3204 	}
3205 
3206 	reo_ingress_list = &ingress_list->reo_list;
3207 
3208 	reo_ingress_list->max_list_size = MGMT_RX_REO_INGRESS_LIST_MAX_SIZE;
3209 	qdf_list_create(&reo_ingress_list->list,
3210 			reo_ingress_list->max_list_size);
3211 	qdf_spinlock_create(&reo_ingress_list->list_lock);
3212 	qdf_mem_zero(&reo_ingress_list->last_inserted_frame,
3213 		     sizeof(reo_ingress_list->last_inserted_frame));
3214 	qdf_mem_zero(&reo_ingress_list->last_released_frame,
3215 		     sizeof(reo_ingress_list->last_released_frame));
3216 
3217 	ingress_list->list_entry_timeout_us =
3218 					MGMT_RX_REO_INGRESS_LIST_TIMEOUT_US;
3219 
3220 	status = qdf_timer_init(NULL, &ingress_list->ageout_timer,
3221 				mgmt_rx_reo_ingress_list_ageout_timer_handler,
3222 				ingress_list, QDF_TIMER_TYPE_WAKE_APPS);
3223 	if (QDF_IS_STATUS_ERROR(status)) {
3224 		mgmt_rx_reo_err("Failed to initialize ingress ageout timer");
3225 		return status;
3226 	}
3227 	qdf_timer_start(&ingress_list->ageout_timer,
3228 			MGMT_RX_REO_INGRESS_LIST_AGEOUT_TIMER_PERIOD_MS);
3229 
3230 	return QDF_STATUS_SUCCESS;
3231 }
3232 
3233 /**
3234  * mgmt_rx_reo_egress_list_init() - Initialize the management rx-reorder
3235  * egress list
3236  * @egress_list: Pointer to egress list
3237  *
3238  * API to initialize the management rx-reorder egress list.
3239  *
3240  * Return: QDF_STATUS
3241  */
3242 static QDF_STATUS
3243 mgmt_rx_reo_egress_list_init(struct mgmt_rx_reo_egress_list *egress_list)
3244 {
3245 	struct mgmt_rx_reo_list *reo_egress_list;
3246 	QDF_STATUS status;
3247 
3248 	if (!egress_list) {
3249 		mgmt_rx_reo_err("Egress list is null");
3250 		return QDF_STATUS_E_NULL_VALUE;
3251 	}
3252 
3253 	reo_egress_list = &egress_list->reo_list;
3254 
3255 	reo_egress_list->max_list_size = MGMT_RX_REO_EGRESS_LIST_MAX_SIZE;
3256 	qdf_list_create(&reo_egress_list->list, reo_egress_list->max_list_size);
3257 	qdf_spinlock_create(&reo_egress_list->list_lock);
3258 	qdf_mem_zero(&reo_egress_list->last_inserted_frame,
3259 		     sizeof(reo_egress_list->last_inserted_frame));
3260 	qdf_mem_zero(&reo_egress_list->last_released_frame,
3261 		     sizeof(reo_egress_list->last_released_frame));
3262 
3263 	status = qdf_timer_init(NULL, &egress_list->egress_inactivity_timer,
3264 				mgmt_rx_reo_egress_inactivity_timer_handler,
3265 				egress_list, QDF_TIMER_TYPE_WAKE_APPS);
3266 	if (QDF_IS_STATUS_ERROR(status)) {
3267 		mgmt_rx_reo_err("Failed to initialize egress inactivity timer");
3268 		return status;
3269 	}
3270 
3271 	return QDF_STATUS_SUCCESS;
3272 }
3273 
3274 /**
3275  * wlan_mgmt_rx_reo_update_host_snapshot() - Update Host snapshot with the MGMT
3276  * Rx REO parameters.
3277  * @pdev: pdev extracted from the WMI event
3278  * @desc: pointer to frame descriptor
3279  *
3280  * Return: QDF_STATUS of operation
3281  */
3282 static QDF_STATUS
3283 wlan_mgmt_rx_reo_update_host_snapshot(struct wlan_objmgr_pdev *pdev,
3284 				      struct mgmt_rx_reo_frame_descriptor *desc)
3285 {
3286 	struct mgmt_rx_reo_pdev_info *rx_reo_pdev_ctx;
3287 	struct mgmt_rx_reo_snapshot_params *host_ss;
3288 	struct mgmt_rx_reo_params *reo_params;
3289 	int pkt_ctr_delta;
3290 	struct wlan_objmgr_psoc *psoc;
3291 	uint16_t pkt_ctr_delta_thresh;
3292 
3293 	if (!desc) {
3294 		mgmt_rx_reo_err("Mgmt Rx REO frame descriptor null");
3295 		return QDF_STATUS_E_NULL_VALUE;
3296 	}
3297 
3298 	if (!desc->rx_params) {
3299 		mgmt_rx_reo_err("Mgmt Rx params null");
3300 		return QDF_STATUS_E_NULL_VALUE;
3301 	}
3302 
3303 	reo_params = desc->rx_params->reo_params;
3304 	if (!reo_params) {
3305 		mgmt_rx_reo_err("Mgmt Rx REO params NULL");
3306 		return QDF_STATUS_E_NULL_VALUE;
3307 	}
3308 
3309 	rx_reo_pdev_ctx = wlan_mgmt_rx_reo_get_priv_object(pdev);
3310 	if (!rx_reo_pdev_ctx) {
3311 		mgmt_rx_reo_err("Mgmt Rx REO context empty for pdev %pK", pdev);
3312 		return QDF_STATUS_E_FAILURE;
3313 	}
3314 
3315 	psoc = wlan_pdev_get_psoc(pdev);
3316 
3317 	/* FW should send valid REO parameters */
3318 	if (!reo_params->valid) {
3319 		mgmt_rx_reo_err("Mgmt Rx REO params is invalid");
3320 		return QDF_STATUS_E_FAILURE;
3321 	}
3322 
3323 	host_ss = &rx_reo_pdev_ctx->host_snapshot;
3324 
3325 	if (!host_ss->valid) {
3326 		desc->pkt_ctr_delta = 1;
3327 		goto update_host_ss;
3328 	}
3329 
3330 	if (mgmt_rx_reo_compare_pkt_ctrs_gte(host_ss->mgmt_pkt_ctr,
3331 					     reo_params->mgmt_pkt_ctr)) {
3332 		mgmt_rx_reo_err("Cur frame ctr > last frame ctr for link = %u",
3333 				reo_params->link_id);
3334 		goto failure_debug;
3335 	}
3336 
3337 	pkt_ctr_delta = mgmt_rx_reo_subtract_pkt_ctrs(reo_params->mgmt_pkt_ctr,
3338 						      host_ss->mgmt_pkt_ctr);
3339 	qdf_assert_always(pkt_ctr_delta > 0);
3340 	desc->pkt_ctr_delta = pkt_ctr_delta;
3341 
3342 	if (pkt_ctr_delta == 1)
3343 		goto update_host_ss;
3344 
3345 	/*
3346 	 * Under back pressure scenarios, FW may drop management Rx frame
3347 	 * WMI events. So holes in the management packet counter is expected.
3348 	 * Add a debug print and optional assert to track the holes.
3349 	 */
3350 	mgmt_rx_reo_debug("pkt_ctr_delta = %u", pkt_ctr_delta);
3351 	mgmt_rx_reo_debug("Cur frame valid = %u, pkt_ctr = %u, ts =%u",
3352 			  reo_params->valid, reo_params->mgmt_pkt_ctr,
3353 			  reo_params->global_timestamp);
3354 	mgmt_rx_reo_debug("Last frame valid = %u, pkt_ctr = %u, ts =%u",
3355 			  host_ss->valid, host_ss->mgmt_pkt_ctr,
3356 			  host_ss->global_timestamp);
3357 
3358 	pkt_ctr_delta_thresh = wlan_mgmt_rx_reo_get_pkt_ctr_delta_thresh(psoc);
3359 
3360 	if (pkt_ctr_delta_thresh && pkt_ctr_delta > pkt_ctr_delta_thresh) {
3361 		mgmt_rx_reo_err("pkt ctr delta %u > thresh %u for link %u",
3362 				pkt_ctr_delta, pkt_ctr_delta_thresh,
3363 				reo_params->link_id);
3364 		goto failure_debug;
3365 	}
3366 
3367 update_host_ss:
3368 	host_ss->valid = true;
3369 	host_ss->global_timestamp = reo_params->global_timestamp;
3370 	host_ss->mgmt_pkt_ctr = reo_params->mgmt_pkt_ctr;
3371 
3372 	return QDF_STATUS_SUCCESS;
3373 
3374 failure_debug:
3375 	mgmt_rx_reo_err("Cur frame valid = %u, pkt_ctr = %u, ts =%u",
3376 			reo_params->valid, reo_params->mgmt_pkt_ctr,
3377 			reo_params->global_timestamp);
3378 	mgmt_rx_reo_err("Last frame vailid = %u, pkt_ctr = %u, ts =%u",
3379 			host_ss->valid, host_ss->mgmt_pkt_ctr,
3380 			host_ss->global_timestamp);
3381 	qdf_assert_always(0);
3382 
3383 	return QDF_STATUS_E_FAILURE;
3384 }
3385 
3386 #ifdef WLAN_MGMT_RX_REO_DEBUG_SUPPORT
3387 /**
3388  * mgmt_rx_reo_ingress_frame_debug_info_enabled() - API to check whether ingress
3389  * frame info debug feaure is enabled
3390  * @ingress_frame_debug_info: Pointer to ingress frame debug info object
3391  *
3392  * Return: true or false
3393  */
3394 static bool
3395 mgmt_rx_reo_ingress_frame_debug_info_enabled
3396 		(struct reo_ingress_debug_info *ingress_frame_debug_info)
3397 {
3398 	return ingress_frame_debug_info->frame_list_size;
3399 }
3400 
3401 /**
3402  * mgmt_rx_reo_debug_print_ingress_frame_stats() - API to print the stats
3403  * related to frames going into the reorder module
3404  * @reo_ctx: Pointer to reorder context
3405  *
3406  * API to print the stats related to frames going into the management
3407  * Rx reorder module.
3408  *
3409  * Return: QDF_STATUS
3410  */
3411 static QDF_STATUS
3412 mgmt_rx_reo_debug_print_ingress_frame_stats(struct mgmt_rx_reo_context *reo_ctx)
3413 {
3414 	struct reo_ingress_frame_stats *stats;
3415 	uint8_t link_id;
3416 	uint8_t desc_type;
3417 	uint8_t list_type;
3418 	uint64_t ingress_count_per_link[MAX_MLO_LINKS] = {0};
3419 	uint64_t ingress_count_per_desc_type[MGMT_RX_REO_FRAME_DESC_TYPE_MAX] = {0};
3420 	uint64_t total_ingress_count = 0;
3421 	uint64_t reo_count_per_link[MAX_MLO_LINKS] = {0};
3422 	uint64_t reo_count_per_desc_type[MGMT_RX_REO_FRAME_DESC_TYPE_MAX] = {0};
3423 	uint64_t total_reo_count = 0;
3424 	uint64_t stale_count_per_link[MAX_MLO_LINKS] = {0};
3425 	uint64_t stale_count_per_desc_type[MGMT_RX_REO_FRAME_DESC_TYPE_MAX] = {0};
3426 	uint64_t total_stale_count = 0;
3427 	uint64_t error_count_per_link[MAX_MLO_LINKS] = {0};
3428 	uint64_t error_count_per_desc_type[MGMT_RX_REO_FRAME_DESC_TYPE_MAX] = {0};
3429 	uint64_t total_error_count = 0;
3430 	uint64_t total_queued = 0;
3431 	uint64_t queued_per_list[MGMT_RX_REO_LIST_TYPE_MAX] = {0};
3432 	uint64_t queued_per_link[MAX_MLO_LINKS] = {0};
3433 	uint64_t total_zero_wait_count_rx = 0;
3434 	uint64_t zero_wait_count_rx_per_list[MGMT_RX_REO_LIST_TYPE_MAX] = {0};
3435 	uint64_t zero_wait_count_rx_per_link[MAX_MLO_LINKS] = {0};
3436 	uint64_t total_immediate_delivery = 0;
3437 	uint64_t immediate_delivery_per_list[MGMT_RX_REO_LIST_TYPE_MAX] = {0};
3438 	uint64_t immediate_delivery_per_link[MAX_MLO_LINKS] = {0};
3439 
3440 	if (!reo_ctx)
3441 		return QDF_STATUS_E_NULL_VALUE;
3442 
3443 	stats = &reo_ctx->ingress_frame_debug_info.stats;
3444 
3445 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
3446 		for (desc_type = 0; desc_type < MGMT_RX_REO_FRAME_DESC_TYPE_MAX;
3447 		     desc_type++) {
3448 			ingress_count_per_link[link_id] +=
3449 				stats->ingress_count[link_id][desc_type];
3450 			reo_count_per_link[link_id] +=
3451 				stats->reo_count[link_id][desc_type];
3452 			stale_count_per_link[link_id] +=
3453 					stats->stale_count[link_id][desc_type];
3454 			error_count_per_link[link_id] +=
3455 					stats->error_count[link_id][desc_type];
3456 		}
3457 
3458 		total_ingress_count += ingress_count_per_link[link_id];
3459 		total_reo_count += reo_count_per_link[link_id];
3460 		total_stale_count += stale_count_per_link[link_id];
3461 		total_error_count += error_count_per_link[link_id];
3462 	}
3463 
3464 	for (desc_type = 0; desc_type < MGMT_RX_REO_FRAME_DESC_TYPE_MAX;
3465 	     desc_type++) {
3466 		for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
3467 			ingress_count_per_desc_type[desc_type] +=
3468 				stats->ingress_count[link_id][desc_type];
3469 			reo_count_per_desc_type[desc_type] +=
3470 				stats->reo_count[link_id][desc_type];
3471 			stale_count_per_desc_type[desc_type] +=
3472 					stats->stale_count[link_id][desc_type];
3473 			error_count_per_desc_type[desc_type] +=
3474 					stats->error_count[link_id][desc_type];
3475 		}
3476 	}
3477 
3478 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
3479 		for (list_type = 0; list_type < MGMT_RX_REO_LIST_TYPE_MAX;
3480 		     list_type++) {
3481 			queued_per_link[link_id] +=
3482 				stats->queued_count[link_id][list_type];
3483 			zero_wait_count_rx_per_link[link_id] +=
3484 			    stats->zero_wait_count_rx_count[link_id][list_type];
3485 			immediate_delivery_per_link[link_id] +=
3486 			    stats->immediate_delivery_count[link_id][list_type];
3487 		}
3488 
3489 		total_queued += queued_per_link[link_id];
3490 		total_zero_wait_count_rx +=
3491 					zero_wait_count_rx_per_link[link_id];
3492 		total_immediate_delivery +=
3493 					immediate_delivery_per_link[link_id];
3494 	}
3495 
3496 	for (list_type = 0; list_type < MGMT_RX_REO_LIST_TYPE_MAX;
3497 	     list_type++) {
3498 		for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
3499 			queued_per_list[list_type] +=
3500 				stats->queued_count[link_id][list_type];
3501 			zero_wait_count_rx_per_list[list_type] +=
3502 			    stats->zero_wait_count_rx_count[link_id][list_type];
3503 			immediate_delivery_per_list[list_type] +=
3504 			    stats->immediate_delivery_count[link_id][list_type];
3505 		}
3506 	}
3507 
3508 	mgmt_rx_reo_alert("Ingress Frame Stats:");
3509 	mgmt_rx_reo_alert("\t1) Ingress Frame Count:");
3510 	mgmt_rx_reo_alert("\tDescriptor Type Values:-");
3511 	mgmt_rx_reo_alert("\t\t0 - MGMT_RX_REO_FRAME_DESC_HOST_CONSUMED_FRAME");
3512 	mgmt_rx_reo_alert("\t\t1 - MGMT_RX_REO_FRAME_DESC_FW_CONSUMED_FRAME");
3513 	mgmt_rx_reo_alert("\t\t2 - MGMT_RX_REO_FRAME_DESC_ERROR_FRAME");
3514 	mgmt_rx_reo_alert("\t------------------------------------");
3515 	mgmt_rx_reo_alert("\t|link id/  |       |       |       |");
3516 	mgmt_rx_reo_alert("\t|desc type |      0|      1|      2|");
3517 	mgmt_rx_reo_alert("\t-------------------------------------------");
3518 
3519 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
3520 		mgmt_rx_reo_alert("\t|%10u|%7llu|%7llu|%7llu|%7llu", link_id,
3521 				  stats->ingress_count[link_id][0],
3522 				  stats->ingress_count[link_id][1],
3523 				  stats->ingress_count[link_id][2],
3524 				  ingress_count_per_link[link_id]);
3525 		mgmt_rx_reo_alert("\t-------------------------------------------");
3526 	}
3527 	mgmt_rx_reo_alert("\t           |%7llu|%7llu|%7llu|%7llu\n\n",
3528 			  ingress_count_per_desc_type[0],
3529 			  ingress_count_per_desc_type[1],
3530 			  ingress_count_per_desc_type[2],
3531 			  total_ingress_count);
3532 
3533 	mgmt_rx_reo_alert("\t2) Reo required Frame Count:");
3534 	mgmt_rx_reo_alert("\t------------------------------------");
3535 	mgmt_rx_reo_alert("\t|link id/  |       |       |       |");
3536 	mgmt_rx_reo_alert("\t|desc type |      0|      1|      2|");
3537 	mgmt_rx_reo_alert("\t-------------------------------------------");
3538 
3539 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
3540 		mgmt_rx_reo_alert("\t|%10u|%7llu|%7llu|%7llu|%7llu", link_id,
3541 				  stats->reo_count[link_id][0],
3542 				  stats->reo_count[link_id][1],
3543 				  stats->reo_count[link_id][2],
3544 				  reo_count_per_link[link_id]);
3545 		mgmt_rx_reo_alert("\t-------------------------------------------");
3546 	}
3547 	mgmt_rx_reo_alert("\t           |%7llu|%7llu|%7llu|%7llu\n\n",
3548 			  reo_count_per_desc_type[0],
3549 			  reo_count_per_desc_type[1],
3550 			  reo_count_per_desc_type[2],
3551 			  total_reo_count);
3552 
3553 	mgmt_rx_reo_alert("\t3) Stale Frame Count:");
3554 	mgmt_rx_reo_alert("\t------------------------------------");
3555 	mgmt_rx_reo_alert("\t|link id/  |       |       |       |");
3556 	mgmt_rx_reo_alert("\t|desc type |      0|      1|      2|");
3557 	mgmt_rx_reo_alert("\t-------------------------------------------");
3558 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
3559 		mgmt_rx_reo_alert("\t|%10u|%7llu|%7llu|%7llu|%7llu", link_id,
3560 				  stats->stale_count[link_id][0],
3561 				  stats->stale_count[link_id][1],
3562 				  stats->stale_count[link_id][2],
3563 				  stale_count_per_link[link_id]);
3564 		mgmt_rx_reo_alert("\t-------------------------------------------");
3565 	}
3566 	mgmt_rx_reo_alert("\t           |%7llu|%7llu|%7llu|%7llu\n\n",
3567 			  stale_count_per_desc_type[0],
3568 			  stale_count_per_desc_type[1],
3569 			  stale_count_per_desc_type[2],
3570 			  total_stale_count);
3571 
3572 	mgmt_rx_reo_alert("\t4) Error Frame Count:");
3573 	mgmt_rx_reo_alert("\t------------------------------------");
3574 	mgmt_rx_reo_alert("\t|link id/  |       |       |       |");
3575 	mgmt_rx_reo_alert("\t|desc type |      0|      1|      2|");
3576 	mgmt_rx_reo_alert("\t-------------------------------------------");
3577 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
3578 		mgmt_rx_reo_alert("\t|%10u|%7llu|%7llu|%7llu|%7llu", link_id,
3579 				  stats->error_count[link_id][0],
3580 				  stats->error_count[link_id][1],
3581 				  stats->error_count[link_id][2],
3582 				  error_count_per_link[link_id]);
3583 		mgmt_rx_reo_alert("\t-------------------------------------------");
3584 	}
3585 	mgmt_rx_reo_alert("\t           |%7llu|%7llu|%7llu|%7llu\n\n",
3586 			  error_count_per_desc_type[0],
3587 			  error_count_per_desc_type[1],
3588 			  error_count_per_desc_type[2],
3589 			  total_error_count);
3590 
3591 	mgmt_rx_reo_alert("\t5) Host consumed frames related stats:");
3592 	mgmt_rx_reo_alert("\tOverall:");
3593 	mgmt_rx_reo_alert("\t------------------------------------------------");
3594 	mgmt_rx_reo_alert("\t|link id   |Queued frame |Zero wait |Immediate |");
3595 	mgmt_rx_reo_alert("\t|          |    count    |  count   | delivery |");
3596 	mgmt_rx_reo_alert("\t------------------------------------------------");
3597 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
3598 		mgmt_rx_reo_alert("\t|%10u|%13llu|%10llu|%10llu|", link_id,
3599 				  queued_per_link[link_id],
3600 				  zero_wait_count_rx_per_link[link_id],
3601 				  immediate_delivery_per_link[link_id]);
3602 		mgmt_rx_reo_alert("\t------------------------------------------------");
3603 	}
3604 	mgmt_rx_reo_alert("\t%11s|%13llu|%10llu|%10llu|\n\n", "",
3605 			  total_queued,
3606 			  total_zero_wait_count_rx,
3607 			  total_immediate_delivery);
3608 
3609 	mgmt_rx_reo_alert("\t\ta) Ingress List:");
3610 	mgmt_rx_reo_alert("\t\t------------------------------------------------");
3611 	mgmt_rx_reo_alert("\t\t|link id   |Queued frame |Zero wait |Immediate |");
3612 	mgmt_rx_reo_alert("\t\t|          |    count    |  count   | delivery |");
3613 	mgmt_rx_reo_alert("\t\t------------------------------------------------");
3614 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
3615 		mgmt_rx_reo_alert("\t\t|%10u|%13llu|%10llu|%10llu|", link_id,
3616 				  stats->queued_count[link_id][0],
3617 				  stats->zero_wait_count_rx_count[link_id][0],
3618 				  stats->immediate_delivery_count[link_id][0]);
3619 		mgmt_rx_reo_alert("\t\t------------------------------------------------");
3620 	}
3621 	mgmt_rx_reo_alert("\t\t%11s|%13llu|%10llu|%10llu|\n\n", "",
3622 			  queued_per_list[0],
3623 			  zero_wait_count_rx_per_list[0],
3624 			  immediate_delivery_per_list[0]);
3625 
3626 	mgmt_rx_reo_alert("\t\tb) Egress List:");
3627 	mgmt_rx_reo_alert("\t\t------------------------------------------------");
3628 	mgmt_rx_reo_alert("\t\t|link id   |Queued frame |Zero wait |Immediate |");
3629 	mgmt_rx_reo_alert("\t\t|          |    count    |  count   | delivery |");
3630 	mgmt_rx_reo_alert("\t\t------------------------------------------------");
3631 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
3632 		mgmt_rx_reo_alert("\t\t|%10u|%13llu|%10llu|%10llu|", link_id,
3633 				  stats->queued_count[link_id][1],
3634 				  stats->zero_wait_count_rx_count[link_id][1],
3635 				  stats->immediate_delivery_count[link_id][1]);
3636 		mgmt_rx_reo_alert("\t\t------------------------------------------------");
3637 	}
3638 	mgmt_rx_reo_alert("\t\t%11s|%13llu|%10llu|%10llu|\n\n", "",
3639 			  queued_per_list[1],
3640 			  zero_wait_count_rx_per_list[1],
3641 			  immediate_delivery_per_list[1]);
3642 
3643 	return QDF_STATUS_SUCCESS;
3644 }
3645 
3646 /**
3647  * mgmt_rx_reo_log_ingress_frame() - Log the information about a frame entering
3648  * the reorder algorithm.
3649  * @reo_ctx: management rx reorder context
3650  * @desc: Pointer to frame descriptor
3651  * @is_queued: Indicates whether this frame is queued to reorder list
3652  * @is_error: Indicates whether any error occurred during processing this frame
3653  *
3654  * Return: QDF_STATUS of operation
3655  */
3656 static QDF_STATUS
3657 mgmt_rx_reo_log_ingress_frame(struct mgmt_rx_reo_context *reo_ctx,
3658 			      struct mgmt_rx_reo_frame_descriptor *desc,
3659 			      bool is_queued, bool is_error)
3660 {
3661 	struct reo_ingress_debug_info *ingress_frame_debug_info;
3662 	struct reo_ingress_debug_frame_info *cur_frame_debug_info;
3663 	struct reo_ingress_frame_stats *stats;
3664 	uint8_t link_id;
3665 	enum mgmt_rx_reo_list_type queued_list;
3666 
3667 	if (!reo_ctx || !desc)
3668 		return QDF_STATUS_E_NULL_VALUE;
3669 
3670 	ingress_frame_debug_info = &reo_ctx->ingress_frame_debug_info;
3671 
3672 	stats = &ingress_frame_debug_info->stats;
3673 	link_id = mgmt_rx_reo_get_link_id(desc->rx_params);
3674 	queued_list = desc->queued_list;
3675 	stats->ingress_count[link_id][desc->type]++;
3676 	if (desc->reo_required)
3677 		stats->reo_count[link_id][desc->type]++;
3678 	if (is_queued)
3679 		stats->queued_count[link_id][queued_list]++;
3680 	if (desc->zero_wait_count_rx)
3681 		stats->zero_wait_count_rx_count[link_id][queued_list]++;
3682 	if (desc->immediate_delivery)
3683 		stats->immediate_delivery_count[link_id][queued_list]++;
3684 	if (is_error)
3685 		stats->error_count[link_id][desc->type]++;
3686 	if (desc->is_stale)
3687 		stats->stale_count[link_id][desc->type]++;
3688 
3689 	if (!mgmt_rx_reo_ingress_frame_debug_info_enabled
3690 						(ingress_frame_debug_info))
3691 		return QDF_STATUS_SUCCESS;
3692 
3693 	cur_frame_debug_info = &ingress_frame_debug_info->frame_list
3694 			[ingress_frame_debug_info->next_index];
3695 
3696 	cur_frame_debug_info->link_id = link_id;
3697 	cur_frame_debug_info->mgmt_pkt_ctr =
3698 				mgmt_rx_reo_get_pkt_counter(desc->rx_params);
3699 	cur_frame_debug_info->global_timestamp =
3700 				mgmt_rx_reo_get_global_ts(desc->rx_params);
3701 	cur_frame_debug_info->start_timestamp =
3702 				mgmt_rx_reo_get_start_ts(desc->rx_params);
3703 	cur_frame_debug_info->end_timestamp =
3704 				mgmt_rx_reo_get_end_ts(desc->rx_params);
3705 	cur_frame_debug_info->duration_us =
3706 				mgmt_rx_reo_get_duration_us(desc->rx_params);
3707 	cur_frame_debug_info->desc_type = desc->type;
3708 	cur_frame_debug_info->frame_type = desc->frame_type;
3709 	cur_frame_debug_info->frame_subtype = desc->frame_subtype;
3710 	cur_frame_debug_info->wait_count = desc->wait_count;
3711 	qdf_mem_copy(cur_frame_debug_info->shared_snapshots,
3712 		     desc->shared_snapshots,
3713 		     qdf_min(sizeof(cur_frame_debug_info->shared_snapshots),
3714 			     sizeof(desc->shared_snapshots)));
3715 	qdf_mem_copy(cur_frame_debug_info->host_snapshot, desc->host_snapshot,
3716 		     qdf_min(sizeof(cur_frame_debug_info->host_snapshot),
3717 			     sizeof(desc->host_snapshot)));
3718 	cur_frame_debug_info->is_queued = is_queued;
3719 	cur_frame_debug_info->is_stale = desc->is_stale;
3720 	cur_frame_debug_info->is_parallel_rx = desc->is_parallel_rx;
3721 	cur_frame_debug_info->queued_list = desc->queued_list;
3722 	cur_frame_debug_info->zero_wait_count_rx = desc->zero_wait_count_rx;
3723 	cur_frame_debug_info->immediate_delivery = desc->immediate_delivery;
3724 	cur_frame_debug_info->is_error = is_error;
3725 	cur_frame_debug_info->last_delivered_frame = desc->last_delivered_frame;
3726 	cur_frame_debug_info->ingress_timestamp = desc->ingress_timestamp;
3727 	cur_frame_debug_info->ingress_duration =
3728 			qdf_get_log_timestamp() - desc->ingress_timestamp;
3729 	cur_frame_debug_info->ingress_list_size_rx =
3730 					desc->ingress_list_size_rx;
3731 	cur_frame_debug_info->ingress_list_insertion_pos =
3732 					desc->ingress_list_insertion_pos;
3733 	cur_frame_debug_info->egress_list_size_rx =
3734 					desc->egress_list_size_rx;
3735 	cur_frame_debug_info->egress_list_insertion_pos =
3736 					desc->egress_list_insertion_pos;
3737 	cur_frame_debug_info->cpu_id = qdf_get_smp_processor_id();
3738 	cur_frame_debug_info->reo_required = desc->reo_required;
3739 
3740 	ingress_frame_debug_info->next_index++;
3741 	ingress_frame_debug_info->next_index %=
3742 				ingress_frame_debug_info->frame_list_size;
3743 	if (ingress_frame_debug_info->next_index == 0)
3744 		ingress_frame_debug_info->wrap_aroud = true;
3745 
3746 	return QDF_STATUS_SUCCESS;
3747 }
3748 
3749 /**
3750  * mgmt_rx_reo_debug_print_ingress_frame_info() - Print the debug information
3751  * about the latest frames entered the reorder module
3752  * @reo_ctx: management rx reorder context
3753  * @num_frames: Number of frames for which the debug information is to be
3754  * printed. If @num_frames is 0, then debug information about all the frames
3755  * in the ring buffer will be  printed.
3756  *
3757  * Return: QDF_STATUS of operation
3758  */
3759 static QDF_STATUS
3760 mgmt_rx_reo_debug_print_ingress_frame_info(struct mgmt_rx_reo_context *reo_ctx,
3761 					   uint16_t num_frames)
3762 {
3763 	struct reo_ingress_debug_info *ingress_frame_debug_info;
3764 	int start_index;
3765 	uint16_t index;
3766 	uint16_t entry;
3767 	uint16_t num_valid_entries;
3768 	uint16_t num_entries_to_print;
3769 	char *boarder;
3770 
3771 	if (!reo_ctx)
3772 		return QDF_STATUS_E_NULL_VALUE;
3773 
3774 	ingress_frame_debug_info = &reo_ctx->ingress_frame_debug_info;
3775 
3776 	if (ingress_frame_debug_info->wrap_aroud)
3777 		num_valid_entries = ingress_frame_debug_info->frame_list_size;
3778 	else
3779 		num_valid_entries = ingress_frame_debug_info->next_index;
3780 
3781 	if (num_frames == 0) {
3782 		num_entries_to_print = num_valid_entries;
3783 
3784 		if (ingress_frame_debug_info->wrap_aroud)
3785 			start_index = ingress_frame_debug_info->next_index;
3786 		else
3787 			start_index = 0;
3788 	} else {
3789 		num_entries_to_print = qdf_min(num_frames, num_valid_entries);
3790 
3791 		start_index = (ingress_frame_debug_info->next_index -
3792 			       num_entries_to_print +
3793 			       ingress_frame_debug_info->frame_list_size)
3794 			      % ingress_frame_debug_info->frame_list_size;
3795 
3796 		qdf_assert_always(start_index >= 0 &&
3797 				  start_index < ingress_frame_debug_info->frame_list_size);
3798 	}
3799 
3800 	mgmt_rx_reo_alert_no_fl("Ingress Frame Info:-");
3801 	mgmt_rx_reo_alert_no_fl("num_frames = %u, wrap = %u, next_index = %u",
3802 				num_frames,
3803 				ingress_frame_debug_info->wrap_aroud,
3804 				ingress_frame_debug_info->next_index);
3805 	mgmt_rx_reo_alert_no_fl("start_index = %d num_entries_to_print = %u",
3806 				start_index, num_entries_to_print);
3807 
3808 	if (!num_entries_to_print)
3809 		return QDF_STATUS_SUCCESS;
3810 
3811 	boarder = ingress_frame_debug_info->boarder;
3812 
3813 	mgmt_rx_reo_alert_no_fl("%s", boarder);
3814 	mgmt_rx_reo_alert_no_fl("|%5s|%5s|%6s|%6s|%9s|%4s|%5s|%10s|%10s|%10s|%5s|%10s|%11s|%13s|%4s|%11s|%6s|%5s|%6s|%5s|%69s|%94s|%94s|%94s|%94s|%94s|%94s|",
3815 				"Index", "CPU", "D.type", "F.type", "F.subtype",
3816 				"Link", "SeqNo", "Global ts",
3817 				"Start ts", "End ts", "Dur", "Last ts",
3818 				"Ingress ts", "Flags", "List", "Ingress Dur",
3819 				"I Size", "I Pos", "E Size",
3820 				"E Pos", "Wait Count", "Snapshot : link 0",
3821 				"Snapshot : link 1", "Snapshot : link 2",
3822 				"Snapshot : link 3", "Snapshot : link 4",
3823 				"Snapshot : link 5");
3824 	mgmt_rx_reo_alert_no_fl("%s", boarder);
3825 
3826 	index = start_index;
3827 	for (entry = 0; entry < num_entries_to_print; entry++) {
3828 		struct reo_ingress_debug_frame_info *info;
3829 		char flags[MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_FLAG_MAX_SIZE + 1] = {0};
3830 		char wait_count[MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_WAIT_COUNT_MAX_SIZE + 1] = {0};
3831 		char snapshots[MAX_MLO_LINKS][MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_PER_LINK_SNAPSHOTS_MAX_SIZE + 1] = {0};
3832 		char flag_queued = ' ';
3833 		char flag_stale = ' ';
3834 		char flag_parallel_rx = ' ';
3835 		char flag_error = ' ';
3836 		char flag_zero_wait_count_rx = ' ';
3837 		char flag_immediate_delivery = ' ';
3838 		char flag_reo_required = ' ';
3839 		int64_t ts_last_delivered_frame = -1;
3840 		uint8_t link;
3841 
3842 		info = &reo_ctx->ingress_frame_debug_info.frame_list[index];
3843 
3844 		if (info->last_delivered_frame.valid) {
3845 			struct mgmt_rx_reo_params *reo_params;
3846 
3847 			reo_params = &info->last_delivered_frame.reo_params;
3848 			ts_last_delivered_frame = reo_params->global_timestamp;
3849 		}
3850 
3851 		if (info->is_queued)
3852 			flag_queued = 'Q';
3853 
3854 		if (info->is_stale)
3855 			flag_stale = 'S';
3856 
3857 		if (info->is_parallel_rx)
3858 			flag_parallel_rx = 'P';
3859 
3860 		if (info->is_error)
3861 			flag_error = 'E';
3862 
3863 		if (info->zero_wait_count_rx)
3864 			flag_zero_wait_count_rx = 'Z';
3865 
3866 		if (info->immediate_delivery)
3867 			flag_immediate_delivery = 'I';
3868 
3869 		if (!info->reo_required)
3870 			flag_reo_required = 'N';
3871 
3872 		snprintf(flags, sizeof(flags), "%c %c %c %c %c %c %c",flag_error,
3873 			 flag_stale, flag_parallel_rx, flag_queued,
3874 			 flag_zero_wait_count_rx, flag_immediate_delivery,
3875 			 flag_reo_required);
3876 		snprintf(wait_count, sizeof(wait_count),
3877 			 "%9llx(%8x, %8x, %8x, %8x, %8x, %8x)",
3878 			 info->wait_count.total_count,
3879 			 info->wait_count.per_link_count[0],
3880 			 info->wait_count.per_link_count[1],
3881 			 info->wait_count.per_link_count[2],
3882 			 info->wait_count.per_link_count[3],
3883 			 info->wait_count.per_link_count[4],
3884 			 info->wait_count.per_link_count[5]);
3885 
3886 		for (link = 0; link < MAX_MLO_LINKS; link++) {
3887 			char mac_hw[MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_SNAPSHOT_MAX_SIZE + 1] = {'\0'};
3888 			char fw_consumed[MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_SNAPSHOT_MAX_SIZE + 1] = {'\0'};
3889 			char fw_forwarded[MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_SNAPSHOT_MAX_SIZE + 1] = {'\0'};
3890 			char host[MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_SNAPSHOT_MAX_SIZE + 1] = {'\0'};
3891 			struct mgmt_rx_reo_snapshot_params *mac_hw_ss;
3892 			struct mgmt_rx_reo_snapshot_params *fw_consumed_ss;
3893 			struct mgmt_rx_reo_snapshot_params *fw_forwarded_ss;
3894 			struct mgmt_rx_reo_snapshot_params *host_ss;
3895 
3896 			mac_hw_ss = &info->shared_snapshots
3897 				[link][MGMT_RX_REO_SHARED_SNAPSHOT_MAC_HW];
3898 			fw_consumed_ss = &info->shared_snapshots
3899 				[link][MGMT_RX_REO_SHARED_SNAPSHOT_FW_CONSUMED];
3900 			fw_forwarded_ss = &info->shared_snapshots
3901 				[link][MGMT_RX_REO_SHARED_SNAPSHOT_FW_FORWARDED];
3902 			host_ss = &info->host_snapshot[link];
3903 
3904 			snprintf(mac_hw, sizeof(mac_hw), "(%1u, %5u, %10u)",
3905 				 mac_hw_ss->valid, mac_hw_ss->mgmt_pkt_ctr,
3906 				 mac_hw_ss->global_timestamp);
3907 			snprintf(fw_consumed, sizeof(fw_consumed),
3908 				 "(%1u, %5u, %10u)",
3909 				 fw_consumed_ss->valid,
3910 				 fw_consumed_ss->mgmt_pkt_ctr,
3911 				 fw_consumed_ss->global_timestamp);
3912 			snprintf(fw_forwarded, sizeof(fw_forwarded),
3913 				 "(%1u, %5u, %10u)",
3914 				 fw_forwarded_ss->valid,
3915 				 fw_forwarded_ss->mgmt_pkt_ctr,
3916 				 fw_forwarded_ss->global_timestamp);
3917 			snprintf(host, sizeof(host), "(%1u, %5u, %10u)",
3918 				 host_ss->valid,
3919 				 host_ss->mgmt_pkt_ctr,
3920 				 host_ss->global_timestamp);
3921 			snprintf(snapshots[link], sizeof(snapshots[link]),
3922 				 "%22s, %22s, %22s, %22s", mac_hw, fw_consumed,
3923 				 fw_forwarded, host);
3924 		}
3925 
3926 		mgmt_rx_reo_alert_no_fl("|%5u|%5d|%6u|%6x|%9x|%4u|%5u|%10u|%10u|%10u|%5u|%10lld|%11llu|%11s|%4u|%11llu|%6d|%5d|%6d|%5d|%69s|%70s|%70s|%70s|%70s|%70s|%70s|",
3927 					entry, info->cpu_id, info->desc_type,
3928 					info->frame_type, info->frame_subtype,
3929 					info->link_id,
3930 					info->mgmt_pkt_ctr,
3931 					info->global_timestamp,
3932 					info->start_timestamp,
3933 					info->end_timestamp,
3934 					info->duration_us,
3935 					ts_last_delivered_frame,
3936 					info->ingress_timestamp, flags,
3937 					info->queued_list,
3938 					info->ingress_duration,
3939 					info->ingress_list_size_rx,
3940 					info->ingress_list_insertion_pos,
3941 					info->egress_list_size_rx,
3942 					info->egress_list_insertion_pos,
3943 					wait_count,
3944 					snapshots[0], snapshots[1],
3945 					snapshots[2], snapshots[3],
3946 					snapshots[4], snapshots[5]);
3947 		mgmt_rx_reo_alert_no_fl("%s", boarder);
3948 
3949 		index++;
3950 		index %= ingress_frame_debug_info->frame_list_size;
3951 	}
3952 
3953 	return QDF_STATUS_SUCCESS;
3954 }
3955 #else
3956 /**
3957  * mgmt_rx_reo_debug_print_ingress_frame_stats() - API to print the stats
3958  * related to frames going into the reorder module
3959  * @reo_ctx: Pointer to reorder context
3960  *
3961  * API to print the stats related to frames going into the management
3962  * Rx reorder module.
3963  *
3964  * Return: QDF_STATUS
3965  */
3966 static QDF_STATUS
3967 mgmt_rx_reo_debug_print_ingress_frame_stats(struct mgmt_rx_reo_context *reo_ctx)
3968 {
3969 	return QDF_STATUS_SUCCESS;
3970 }
3971 
3972 /**
3973  * mgmt_rx_reo_log_ingress_frame() - Log the information about a frame entering
3974  * the reorder algorithm.
3975  * @reo_ctx: management rx reorder context
3976  * @desc: Pointer to frame descriptor
3977  * @is_queued: Indicates whether this frame is queued to reorder list
3978  * @is_error: Indicates whether any error occurred during processing this frame
3979  *
3980  * Return: QDF_STATUS of operation
3981  */
3982 static QDF_STATUS
3983 mgmt_rx_reo_log_ingress_frame(struct mgmt_rx_reo_context *reo_ctx,
3984 			      struct mgmt_rx_reo_frame_descriptor *desc,
3985 			      bool is_queued, bool is_error)
3986 {
3987 	return QDF_STATUS_SUCCESS;
3988 }
3989 
3990 /**
3991  * mgmt_rx_reo_debug_print_ingress_frame_info() - Print debug information about
3992  * the latest frames entering the reorder module
3993  * @reo_ctx: management rx reorder context
3994  *
3995  * Return: QDF_STATUS of operation
3996  */
3997 static QDF_STATUS
3998 mgmt_rx_reo_debug_print_ingress_frame_info(struct mgmt_rx_reo_context *reo_ctx)
3999 {
4000 	return QDF_STATUS_SUCCESS;
4001 }
4002 #endif /* WLAN_MGMT_RX_REO_DEBUG_SUPPORT */
4003 
4004 QDF_STATUS
4005 wlan_mgmt_rx_reo_algo_entry(struct wlan_objmgr_pdev *pdev,
4006 			    struct mgmt_rx_reo_frame_descriptor *desc,
4007 			    bool *is_queued)
4008 {
4009 	struct mgmt_rx_reo_context *reo_ctx;
4010 	struct mgmt_rx_reo_ingress_list *ingress_list;
4011 	struct mgmt_rx_reo_egress_list *egress_list;
4012 	QDF_STATUS ret;
4013 
4014 	if (!is_queued) {
4015 		mgmt_rx_reo_err("Pointer to queued indication is null");
4016 		return QDF_STATUS_E_NULL_VALUE;
4017 	}
4018 
4019 	*is_queued = false;
4020 
4021 	if (!desc || !desc->rx_params) {
4022 		mgmt_rx_reo_err("MGMT Rx REO descriptor or rx params are null");
4023 		return QDF_STATUS_E_NULL_VALUE;
4024 	}
4025 
4026 	reo_ctx = wlan_mgmt_rx_reo_get_ctx_from_pdev(pdev);
4027 	if (!reo_ctx) {
4028 		mgmt_rx_reo_err("REO context is NULL");
4029 		return QDF_STATUS_E_NULL_VALUE;
4030 	}
4031 	ingress_list = &reo_ctx->ingress_list;
4032 	egress_list = &reo_ctx->egress_list;
4033 
4034 	/**
4035 	 * Critical Section = Host snapshot update + Calculation of wait
4036 	 * counts + Update reorder list. Following section describes the
4037 	 * motivation for making this a critical section.
4038 	 * Lets take an example of 2 links (Link A & B) and each has received
4039 	 * a management frame A1 and B1 such that MLO global time stamp of A1 <
4040 	 * MLO global time stamp of B1. Host is concurrently executing
4041 	 * "wlan_mgmt_rx_reo_algo_entry" for A1 and B1 in 2 different CPUs.
4042 	 *
4043 	 * A lock less version of this API("wlan_mgmt_rx_reo_algo_entry_v1") is
4044 	 * as follows.
4045 	 *
4046 	 * wlan_mgmt_rx_reo_algo_entry()
4047 	 * {
4048 	 *     Host snapshot update
4049 	 *     Calculation of wait counts
4050 	 *     Update reorder list
4051 	 *     Release to upper layer
4052 	 * }
4053 	 *
4054 	 * We may run into race conditions under the following sequence of
4055 	 * operations.
4056 	 *
4057 	 * 1. Host snapshot update for link A in context of frame A1
4058 	 * 2. Host snapshot update for link B in context of frame B1
4059 	 * 3. Calculation of wait count for frame B1
4060 	 *        link A wait count =  0
4061 	 *        link B wait count =  0
4062 	 * 4. Update reorder list with frame B1
4063 	 * 5. Release B1 to upper layer
4064 	 * 6. Calculation of wait count for frame A1
4065 	 *        link A wait count =  0
4066 	 *        link B wait count =  0
4067 	 * 7. Update reorder list with frame A1
4068 	 * 8. Release A1 to upper layer
4069 	 *
4070 	 * This leads to incorrect behaviour as B1 goes to upper layer before
4071 	 * A1.
4072 	 *
4073 	 * To prevent this lets make Host snapshot update + Calculate wait count
4074 	 * a critical section by adding locks. The updated version of the API
4075 	 * ("wlan_mgmt_rx_reo_algo_entry_v2") is as follows.
4076 	 *
4077 	 * wlan_mgmt_rx_reo_algo_entry()
4078 	 * {
4079 	 *     LOCK
4080 	 *         Host snapshot update
4081 	 *         Calculation of wait counts
4082 	 *     UNLOCK
4083 	 *     Update reorder list
4084 	 *     Release to upper layer
4085 	 * }
4086 	 *
4087 	 * With this API also We may run into race conditions under the
4088 	 * following sequence of operations.
4089 	 *
4090 	 * 1. Host snapshot update for link A in context of frame A1 +
4091 	 *    Calculation of wait count for frame A1
4092 	 *        link A wait count =  0
4093 	 *        link B wait count =  0
4094 	 * 2. Host snapshot update for link B in context of frame B1 +
4095 	 *    Calculation of wait count for frame B1
4096 	 *        link A wait count =  0
4097 	 *        link B wait count =  0
4098 	 * 4. Update reorder list with frame B1
4099 	 * 5. Release B1 to upper layer
4100 	 * 7. Update reorder list with frame A1
4101 	 * 8. Release A1 to upper layer
4102 	 *
4103 	 * This also leads to incorrect behaviour as B1 goes to upper layer
4104 	 * before A1.
4105 	 *
4106 	 * To prevent this, let's make Host snapshot update + Calculate wait
4107 	 * count + Update reorder list a critical section by adding locks.
4108 	 * The updated version of the API ("wlan_mgmt_rx_reo_algo_entry_final")
4109 	 * is as follows.
4110 	 *
4111 	 * wlan_mgmt_rx_reo_algo_entry()
4112 	 * {
4113 	 *     LOCK
4114 	 *         Host snapshot update
4115 	 *         Calculation of wait counts
4116 	 *         Update reorder list
4117 	 *     UNLOCK
4118 	 *     Release to upper layer
4119 	 * }
4120 	 */
4121 	qdf_spin_lock(&reo_ctx->reo_algo_entry_lock);
4122 
4123 	qdf_assert_always(desc->rx_params->reo_params->valid);
4124 	qdf_assert_always(desc->frame_type == IEEE80211_FC0_TYPE_MGT);
4125 
4126 	if (desc->type == MGMT_RX_REO_FRAME_DESC_HOST_CONSUMED_FRAME ||
4127 	    desc->type == MGMT_RX_REO_FRAME_DESC_FW_CONSUMED_FRAME)
4128 		qdf_assert_always(desc->rx_params->reo_params->duration_us);
4129 
4130 	/* Update the Host snapshot */
4131 	ret = wlan_mgmt_rx_reo_update_host_snapshot(pdev, desc);
4132 	if (QDF_IS_STATUS_ERROR(ret))
4133 		goto failure;
4134 
4135 	/* Compute wait count for this frame/event */
4136 	ret = wlan_mgmt_rx_reo_algo_calculate_wait_count(pdev, desc);
4137 	if (QDF_IS_STATUS_ERROR(ret))
4138 		goto failure;
4139 
4140 	/* Update ingress and egress list */
4141 	ret = mgmt_rx_reo_update_lists(ingress_list, egress_list, desc,
4142 				       is_queued);
4143 	if (QDF_IS_STATUS_ERROR(ret))
4144 		goto failure;
4145 
4146 	ret = mgmt_rx_reo_log_ingress_frame(reo_ctx, desc,
4147 					    *is_queued, false);
4148 	if (QDF_IS_STATUS_ERROR(ret)) {
4149 		qdf_spin_unlock(&reo_ctx->reo_algo_entry_lock);
4150 		return ret;
4151 	}
4152 
4153 	qdf_spin_unlock(&reo_ctx->reo_algo_entry_lock);
4154 
4155 	ret = mgmt_rx_reo_move_entries_ingress_to_egress_list(ingress_list,
4156 							      egress_list);
4157 	if (QDF_IS_STATUS_ERROR(ret))
4158 		return ret;
4159 
4160 	/* Finally, release the entries for which pending frame is received */
4161 	return mgmt_rx_reo_release_egress_list_entries(reo_ctx);
4162 
4163 failure:
4164 	/**
4165 	 * Ignore the return value of this function call, return
4166 	 * the actual reason for failure.
4167 	 */
4168 	mgmt_rx_reo_log_ingress_frame(reo_ctx, desc, *is_queued, true);
4169 
4170 	qdf_spin_unlock(&reo_ctx->reo_algo_entry_lock);
4171 
4172 	return ret;
4173 }
4174 
4175 #ifndef WLAN_MGMT_RX_REO_SIM_SUPPORT
4176 /**
4177  * mgmt_rx_reo_sim_init() - Initialize management rx reorder simulation
4178  * context.
4179  * @reo_context: Pointer to reo context
4180  *
4181  * Return: QDF_STATUS of operation
4182  */
4183 static inline QDF_STATUS
4184 mgmt_rx_reo_sim_init(struct mgmt_rx_reo_context *reo_context)
4185 {
4186 	return QDF_STATUS_SUCCESS;
4187 }
4188 
4189 /**
4190  * mgmt_rx_reo_sim_deinit() - De initialize management rx reorder simulation
4191  * context.
4192  * @reo_context: Pointer to reo context
4193  *
4194  * Return: QDF_STATUS of operation
4195  */
4196 static inline QDF_STATUS
4197 mgmt_rx_reo_sim_deinit(struct mgmt_rx_reo_context *reo_context)
4198 {
4199 	return QDF_STATUS_SUCCESS;
4200 }
4201 
4202 QDF_STATUS
4203 mgmt_rx_reo_sim_pdev_object_create_notification(struct wlan_objmgr_pdev *pdev)
4204 {
4205 	return QDF_STATUS_SUCCESS;
4206 }
4207 
4208 QDF_STATUS
4209 mgmt_rx_reo_sim_pdev_object_destroy_notification(struct wlan_objmgr_pdev *pdev)
4210 {
4211 	return QDF_STATUS_SUCCESS;
4212 }
4213 #else
4214 /**
4215  * mgmt_rx_reo_sim_remove_frame_from_master_list() - Removes frame from the
4216  * master management frame list
4217  * @master_frame_list: pointer to master management frame list
4218  * @frame: pointer to management frame parameters
4219  *
4220  * This API removes frames from the master management frame list. This API is
4221  * used in case of FW consumed management frames or management frames which
4222  * are dropped at host due to any error.
4223  *
4224  * Return: QDF_STATUS of operation
4225  */
4226 static QDF_STATUS
4227 mgmt_rx_reo_sim_remove_frame_from_master_list(
4228 		struct mgmt_rx_reo_master_frame_list *master_frame_list,
4229 		const struct mgmt_rx_frame_params *frame)
4230 {
4231 	struct mgmt_rx_reo_pending_frame_list_entry *pending_entry;
4232 	struct mgmt_rx_reo_pending_frame_list_entry *matching_pend_entry = NULL;
4233 	struct mgmt_rx_reo_stale_frame_list_entry *stale_entry;
4234 	struct mgmt_rx_reo_stale_frame_list_entry *matching_stale_entry = NULL;
4235 	QDF_STATUS status;
4236 
4237 	if (!master_frame_list) {
4238 		mgmt_rx_reo_err("Mgmt master frame list is null");
4239 		return QDF_STATUS_E_NULL_VALUE;
4240 	}
4241 
4242 	if (!frame) {
4243 		mgmt_rx_reo_err("Pointer to mgmt frame params is null");
4244 		return QDF_STATUS_E_NULL_VALUE;
4245 	}
4246 
4247 	qdf_spin_lock(&master_frame_list->lock);
4248 
4249 	qdf_list_for_each(&master_frame_list->pending_list, pending_entry,
4250 			  node) {
4251 		if (pending_entry->params.link_id == frame->link_id &&
4252 		    pending_entry->params.mgmt_pkt_ctr == frame->mgmt_pkt_ctr &&
4253 		    pending_entry->params.global_timestamp ==
4254 		    frame->global_timestamp) {
4255 			matching_pend_entry = pending_entry;
4256 			break;
4257 		}
4258 	}
4259 
4260 	qdf_list_for_each(&master_frame_list->stale_list, stale_entry, node) {
4261 		if (stale_entry->params.link_id == frame->link_id &&
4262 		    stale_entry->params.mgmt_pkt_ctr == frame->mgmt_pkt_ctr &&
4263 		    stale_entry->params.global_timestamp ==
4264 		    frame->global_timestamp) {
4265 			matching_stale_entry = stale_entry;
4266 			break;
4267 		}
4268 	}
4269 
4270 	/* Found in pending and stale list. Duplicate entries, assert */
4271 	qdf_assert_always(!matching_pend_entry || !matching_stale_entry);
4272 
4273 	if (!matching_pend_entry && !matching_stale_entry) {
4274 		qdf_spin_unlock(&master_frame_list->lock);
4275 		mgmt_rx_reo_err("No matching frame in pend/stale list");
4276 		return QDF_STATUS_E_FAILURE;
4277 	}
4278 
4279 	if (matching_pend_entry) {
4280 		status = qdf_list_remove_node(&master_frame_list->pending_list,
4281 					      &matching_pend_entry->node);
4282 		if (QDF_IS_STATUS_ERROR(status)) {
4283 			qdf_spin_unlock(&master_frame_list->lock);
4284 			mgmt_rx_reo_err("Failed to remove the matching entry");
4285 			return status;
4286 		}
4287 
4288 		qdf_mem_free(matching_pend_entry);
4289 	}
4290 
4291 	if (matching_stale_entry) {
4292 		status = qdf_list_remove_node(&master_frame_list->stale_list,
4293 					      &matching_stale_entry->node);
4294 		if (QDF_IS_STATUS_ERROR(status)) {
4295 			qdf_spin_unlock(&master_frame_list->lock);
4296 			mgmt_rx_reo_err("Failed to remove the matching entry");
4297 			return status;
4298 		}
4299 
4300 		qdf_mem_free(matching_stale_entry);
4301 	}
4302 
4303 	qdf_spin_unlock(&master_frame_list->lock);
4304 
4305 	return QDF_STATUS_SUCCESS;
4306 }
4307 
4308 /**
4309  * mgmt_rx_reo_sim_remove_frame_from_pending_list() - Removes frame from the
4310  * pending management frame list
4311  * @master_frame_list: pointer to master management frame list
4312  * @frame: pointer to management frame parameters
4313  *
4314  * This API removes frames from the pending management frame list. This API is
4315  * used in case of FW consumed management frames or management frames which
4316  * are dropped at host due to any error.
4317  *
4318  * Return: QDF_STATUS of operation
4319  */
4320 static QDF_STATUS
4321 mgmt_rx_reo_sim_remove_frame_from_pending_list(
4322 		struct mgmt_rx_reo_master_frame_list *master_frame_list,
4323 		const struct mgmt_rx_frame_params *frame)
4324 {
4325 	struct mgmt_rx_reo_pending_frame_list_entry *cur_entry;
4326 	struct mgmt_rx_reo_pending_frame_list_entry *matching_entry = NULL;
4327 	QDF_STATUS status;
4328 
4329 	if (!master_frame_list) {
4330 		mgmt_rx_reo_err("Mgmt master frame list is null");
4331 		return QDF_STATUS_E_NULL_VALUE;
4332 	}
4333 
4334 	if (!frame) {
4335 		mgmt_rx_reo_err("Pointer to mgmt frame params is null");
4336 		return QDF_STATUS_E_NULL_VALUE;
4337 	}
4338 
4339 	qdf_spin_lock(&master_frame_list->lock);
4340 
4341 	qdf_list_for_each(&master_frame_list->pending_list, cur_entry, node) {
4342 		if (cur_entry->params.link_id == frame->link_id &&
4343 		    cur_entry->params.mgmt_pkt_ctr == frame->mgmt_pkt_ctr &&
4344 		    cur_entry->params.global_timestamp ==
4345 		    frame->global_timestamp) {
4346 			matching_entry = cur_entry;
4347 			break;
4348 		}
4349 	}
4350 
4351 	if (!matching_entry) {
4352 		qdf_spin_unlock(&master_frame_list->lock);
4353 		mgmt_rx_reo_err("No matching frame in the pend list to remove");
4354 		return QDF_STATUS_E_FAILURE;
4355 	}
4356 
4357 	status = qdf_list_remove_node(&master_frame_list->pending_list,
4358 				      &matching_entry->node);
4359 	if (QDF_IS_STATUS_ERROR(status)) {
4360 		qdf_spin_unlock(&master_frame_list->lock);
4361 		mgmt_rx_reo_err("Failed to remove the matching entry");
4362 		return status;
4363 	}
4364 
4365 	qdf_mem_free(matching_entry);
4366 
4367 	qdf_spin_unlock(&master_frame_list->lock);
4368 
4369 
4370 	return QDF_STATUS_SUCCESS;
4371 }
4372 
4373 /**
4374  * mgmt_rx_reo_sim_add_frame_to_pending_list() - Inserts frame to the
4375  * pending management frame list
4376  * @master_frame_list: pointer to master management frame list
4377  * @frame: pointer to management frame parameters
4378  *
4379  * This API inserts frames to the pending management frame list. This API is
4380  * used to insert frames generated by the MAC HW to the pending frame list.
4381  *
4382  * Return: QDF_STATUS of operation
4383  */
4384 static QDF_STATUS
4385 mgmt_rx_reo_sim_add_frame_to_pending_list(
4386 		struct mgmt_rx_reo_master_frame_list *master_frame_list,
4387 		const struct mgmt_rx_frame_params *frame)
4388 {
4389 	struct mgmt_rx_reo_pending_frame_list_entry *new_entry;
4390 	QDF_STATUS status;
4391 
4392 	if (!master_frame_list) {
4393 		mgmt_rx_reo_err("Mgmt master frame list is null");
4394 		return QDF_STATUS_E_NULL_VALUE;
4395 	}
4396 
4397 	if (!frame) {
4398 		mgmt_rx_reo_err("Pointer mgmt frame params is null");
4399 		return QDF_STATUS_E_NULL_VALUE;
4400 	}
4401 
4402 	new_entry = qdf_mem_malloc(sizeof(*new_entry));
4403 	if (!new_entry) {
4404 		mgmt_rx_reo_err("Failed to allocate new entry to frame list");
4405 		return QDF_STATUS_E_NOMEM;
4406 	}
4407 
4408 	new_entry->params = *frame;
4409 
4410 	qdf_spin_lock(&master_frame_list->lock);
4411 
4412 	status = qdf_list_insert_back(&master_frame_list->pending_list,
4413 				      &new_entry->node);
4414 
4415 	qdf_spin_unlock(&master_frame_list->lock);
4416 
4417 	if (QDF_IS_STATUS_ERROR(status)) {
4418 		mgmt_rx_reo_err("Failed to add frame to pending list");
4419 		qdf_mem_free(new_entry);
4420 		return status;
4421 	}
4422 
4423 	return QDF_STATUS_SUCCESS;
4424 }
4425 
4426 QDF_STATUS
4427 mgmt_rx_reo_sim_process_rx_frame(struct wlan_objmgr_pdev *pdev, qdf_nbuf_t buf,
4428 				 struct mgmt_rx_event_params *mgmt_rx_params)
4429 {
4430 	struct mgmt_rx_reo_context *reo_context;
4431 	struct mgmt_rx_reo_sim_context *sim_context;
4432 	QDF_STATUS status;
4433 	struct mgmt_rx_reo_params *reo_params;
4434 
4435 	if (!mgmt_rx_params) {
4436 		mgmt_rx_reo_err("Mgmt rx params null");
4437 		return QDF_STATUS_E_NULL_VALUE;
4438 	}
4439 
4440 	reo_params = mgmt_rx_params->reo_params;
4441 
4442 	reo_context = wlan_mgmt_rx_reo_get_ctx_from_pdev(pdev);
4443 	if (!reo_context) {
4444 		mgmt_rx_reo_err("Mgmt reo context is null");
4445 		return QDF_STATUS_E_NULL_VALUE;
4446 	}
4447 
4448 	sim_context = &reo_context->sim_context;
4449 
4450 	qdf_spin_lock(&sim_context->master_frame_list.lock);
4451 
4452 	if (qdf_list_empty(&sim_context->master_frame_list.pending_list)) {
4453 		qdf_spin_unlock(&sim_context->master_frame_list.lock);
4454 		mgmt_rx_reo_err("reo sim failure: pending frame list is empty");
4455 		qdf_assert_always(0);
4456 	} else {
4457 		struct mgmt_rx_frame_params *cur_entry_params;
4458 		struct mgmt_rx_reo_pending_frame_list_entry *cur_entry;
4459 		struct mgmt_rx_reo_pending_frame_list_entry *matching_entry = NULL;
4460 
4461 		/**
4462 		 * Make sure the frames delivered to upper layer are in the
4463 		 * increasing order of global time stamp. For that the frame
4464 		 * which is being delivered should be present at the head of the
4465 		 * pending frame list. There could be multiple frames with the
4466 		 * same global time stamp in the pending frame list. Search
4467 		 * among all the frames at the head of the list which has the
4468 		 * same global time stamp as the frame which is being delivered.
4469 		 * To find matching frame, check whether packet counter,
4470 		 * global time stamp and link id are same.
4471 		 */
4472 		qdf_list_for_each(&sim_context->master_frame_list.pending_list,
4473 				  cur_entry, node) {
4474 			cur_entry_params = &cur_entry->params;
4475 
4476 			if (cur_entry_params->global_timestamp !=
4477 			    reo_params->global_timestamp)
4478 				break;
4479 
4480 			if (cur_entry_params->link_id == reo_params->link_id &&
4481 			    cur_entry_params->mgmt_pkt_ctr ==
4482 			    reo_params->mgmt_pkt_ctr) {
4483 				matching_entry = cur_entry;
4484 				break;
4485 			}
4486 		}
4487 
4488 		if (!matching_entry) {
4489 			qdf_spin_unlock(&sim_context->master_frame_list.lock);
4490 			mgmt_rx_reo_err("reo sim failure: mismatch");
4491 			qdf_assert_always(0);
4492 		}
4493 
4494 		status = qdf_list_remove_node(
4495 				&sim_context->master_frame_list.pending_list,
4496 				&matching_entry->node);
4497 		qdf_mem_free(matching_entry);
4498 
4499 		if (QDF_IS_STATUS_ERROR(status)) {
4500 			qdf_spin_unlock(&sim_context->master_frame_list.lock);
4501 			mgmt_rx_reo_err("Failed to remove matching entry");
4502 			return status;
4503 		}
4504 	}
4505 
4506 	qdf_spin_unlock(&sim_context->master_frame_list.lock);
4507 
4508 	mgmt_rx_reo_debug("Successfully processed mgmt frame");
4509 	mgmt_rx_reo_debug("link_id = %u, ctr = %u, ts = %u",
4510 			  reo_params->link_id, reo_params->mgmt_pkt_ctr,
4511 			  reo_params->global_timestamp);
4512 
4513 	return QDF_STATUS_SUCCESS;
4514 }
4515 
4516 /**
4517  * mgmt_rx_reo_sim_get_random_bool() - Generate true/false randomly
4518  * @percentage_true: probability (in percentage) of true
4519  *
4520  * API to generate true with probability @percentage_true % and false with
4521  * probability (100 - @percentage_true) %.
4522  *
4523  * Return: true with probability @percentage_true % and false with probability
4524  * (100 - @percentage_true) %
4525  */
4526 static bool
4527 mgmt_rx_reo_sim_get_random_bool(uint8_t percentage_true)
4528 {
4529 	uint32_t rand;
4530 
4531 	if (percentage_true > 100) {
4532 		mgmt_rx_reo_err("Invalid probability value for true, %u",
4533 				percentage_true);
4534 		return -EINVAL;
4535 	}
4536 
4537 	get_random_bytes(&rand, sizeof(rand));
4538 
4539 	return ((rand % 100) < percentage_true);
4540 }
4541 
4542 /**
4543  * mgmt_rx_reo_sim_get_random_unsigned_int() - Generate random unsigned integer
4544  * value in the range [0, max)
4545  * @max: upper limit for the output
4546  *
4547  * API to generate random unsigned integer value in the range [0, max).
4548  *
4549  * Return: unsigned integer value in the range [0, max)
4550  */
4551 static uint32_t
4552 mgmt_rx_reo_sim_get_random_unsigned_int(uint32_t max)
4553 {
4554 	uint32_t rand;
4555 
4556 	get_random_bytes(&rand, sizeof(rand));
4557 
4558 	return (rand % max);
4559 }
4560 
4561 /**
4562  * mgmt_rx_reo_sim_sleep() - Wrapper API to sleep for given micro seconds
4563  * @sleeptime_us: Sleep time in micro seconds
4564  *
4565  * This API uses msleep() internally. So the granularity is limited to
4566  * milliseconds.
4567  *
4568  * Return: none
4569  */
4570 static void
4571 mgmt_rx_reo_sim_sleep(uint32_t sleeptime_us)
4572 {
4573 	msleep(sleeptime_us / USEC_PER_MSEC);
4574 }
4575 
4576 /**
4577  * mgmt_rx_reo_sim_frame_handler_host() - Management frame handler at the host
4578  * layer
4579  * @arg: Argument
4580  *
4581  * This API handles the management frame at the host layer. This is applicable
4582  * for simulation alone.
4583  *
4584  * Return: none
4585  */
4586 static void
4587 mgmt_rx_reo_sim_frame_handler_host(void *arg)
4588 {
4589 	struct mgmt_rx_frame_fw *frame_fw = (struct mgmt_rx_frame_fw *)arg;
4590 	uint32_t fw_to_host_delay_us;
4591 	bool is_error_frame = false;
4592 	int8_t link_id = -1;
4593 	struct mgmt_rx_event_params *rx_params;
4594 	QDF_STATUS status;
4595 	struct mgmt_rx_reo_sim_context *sim_context;
4596 	struct wlan_objmgr_pdev *pdev;
4597 	uint8_t ml_grp_id;
4598 
4599 	if (!frame_fw) {
4600 		mgmt_rx_reo_err("HOST-%d : Pointer to FW frame struct is null",
4601 				link_id);
4602 		goto error_print;
4603 	}
4604 
4605 	link_id = frame_fw->params.link_id;
4606 
4607 	sim_context = frame_fw->sim_context;
4608 	if (!sim_context) {
4609 		mgmt_rx_reo_err("HOST-%d : Mgmt rx reo simulation context null",
4610 				link_id);
4611 		goto error_free_fw_frame;
4612 	}
4613 
4614 	ml_grp_id = sim_context->mlo_grp_id;
4615 
4616 	fw_to_host_delay_us = MGMT_RX_REO_SIM_DELAY_FW_TO_HOST_MIN +
4617 			      mgmt_rx_reo_sim_get_random_unsigned_int(
4618 			      MGMT_RX_REO_SIM_DELAY_FW_TO_HOST_MIN_MAX_DELTA);
4619 
4620 	mgmt_rx_reo_sim_sleep(fw_to_host_delay_us);
4621 
4622 	if (!frame_fw->is_consumed_by_fw) {
4623 		is_error_frame = mgmt_rx_reo_sim_get_random_bool(
4624 				 MGMT_RX_REO_SIM_PERCENTAGE_ERROR_FRAMES);
4625 
4626 		/**
4627 		 * This frame should be present in pending/stale list of the
4628 		 * master frame list. Error frames need not be reordered
4629 		 * by reorder algorithm. It is just used for book
4630 		 * keeping purposes. Hence remove it from the master list.
4631 		 */
4632 		if (is_error_frame) {
4633 			status = mgmt_rx_reo_sim_remove_frame_from_master_list(
4634 					&sim_context->master_frame_list,
4635 					&frame_fw->params);
4636 
4637 			if (QDF_IS_STATUS_ERROR(status)) {
4638 				mgmt_rx_reo_err("HOST-%d : Failed to remove error frame",
4639 						link_id);
4640 				qdf_assert_always(0);
4641 			}
4642 		}
4643 	}
4644 
4645 	mgmt_rx_reo_debug("HOST-%d : Received frame with ts = %u, ctr = %u, consume = %u, error = %u",
4646 			  link_id, frame_fw->params.global_timestamp,
4647 			  frame_fw->params.mgmt_pkt_ctr,
4648 			  frame_fw->is_consumed_by_fw, is_error_frame);
4649 
4650 	rx_params = alloc_mgmt_rx_event_params();
4651 	if (!rx_params) {
4652 		mgmt_rx_reo_err("HOST-%d : Failed to allocate event params",
4653 				link_id);
4654 		goto error_free_fw_frame;
4655 	}
4656 
4657 	rx_params->reo_params->link_id = frame_fw->params.link_id;
4658 	rx_params->reo_params->global_timestamp =
4659 					frame_fw->params.global_timestamp;
4660 	rx_params->reo_params->mgmt_pkt_ctr = frame_fw->params.mgmt_pkt_ctr;
4661 	rx_params->reo_params->valid = true;
4662 
4663 	pdev = wlan_get_pdev_from_mlo_link_id(
4664 			link_id, ml_grp_id, WLAN_MGMT_RX_REO_SIM_ID);
4665 	if (!pdev) {
4666 		mgmt_rx_reo_err("No pdev corresponding to link_id %d", link_id);
4667 		goto error_free_mgmt_rx_event_params;
4668 	}
4669 
4670 	if (is_error_frame) {
4671 		status = tgt_mgmt_rx_reo_host_drop_handler(
4672 						pdev, rx_params->reo_params);
4673 		free_mgmt_rx_event_params(rx_params);
4674 	} else if (frame_fw->is_consumed_by_fw) {
4675 		status = tgt_mgmt_rx_reo_fw_consumed_event_handler(
4676 						pdev, rx_params->reo_params);
4677 		free_mgmt_rx_event_params(rx_params);
4678 	} else {
4679 		status = tgt_mgmt_rx_reo_frame_handler(pdev, NULL, rx_params);
4680 	}
4681 
4682 	wlan_objmgr_pdev_release_ref(pdev, WLAN_MGMT_RX_REO_SIM_ID);
4683 
4684 	if (QDF_IS_STATUS_ERROR(status)) {
4685 		mgmt_rx_reo_err("Failed to execute reo algorithm");
4686 		goto error_free_fw_frame;
4687 	}
4688 
4689 	qdf_mem_free(frame_fw);
4690 
4691 	return;
4692 
4693 error_free_mgmt_rx_event_params:
4694 	free_mgmt_rx_event_params(rx_params);
4695 error_free_fw_frame:
4696 	qdf_mem_free(frame_fw);
4697 error_print:
4698 	mgmt_rx_reo_err("HOST-%d : Exiting host frame handler due to error",
4699 			link_id);
4700 }
4701 
4702 /**
4703  * mgmt_rx_reo_sim_write_snapshot() - API to write snapshots used for management
4704  * frame reordering
4705  * @link_id: link id
4706  * @id: snapshot id
4707  * @value: snapshot value
4708  * @ml_grp_id: MLO group id which it belongs to
4709  *
4710  * This API writes the snapshots used for management frame reordering. MAC HW
4711  * and FW can use this API to update the MAC HW/FW consumed/FW forwarded
4712  * snapshots.
4713  *
4714  * Return: QDF_STATUS
4715  */
4716 static QDF_STATUS
4717 mgmt_rx_reo_sim_write_snapshot(uint8_t link_id, uint8_t ml_grp_id,
4718 			       enum mgmt_rx_reo_shared_snapshot_id id,
4719 			       struct mgmt_rx_reo_shared_snapshot value)
4720 {
4721 	struct wlan_objmgr_pdev *pdev;
4722 	struct mgmt_rx_reo_shared_snapshot *snapshot_address;
4723 	QDF_STATUS status;
4724 
4725 	pdev = wlan_get_pdev_from_mlo_link_id(
4726 			link_id, ml_grp_id,
4727 			WLAN_MGMT_RX_REO_SIM_ID);
4728 
4729 	if (!pdev) {
4730 		mgmt_rx_reo_err("pdev is null");
4731 		return QDF_STATUS_E_NULL_VALUE;
4732 	}
4733 
4734 	status = mgmt_rx_reo_sim_get_snapshot_address(pdev, id,
4735 						      &snapshot_address);
4736 
4737 	wlan_objmgr_pdev_release_ref(pdev, WLAN_MGMT_RX_REO_SIM_ID);
4738 
4739 	if (QDF_IS_STATUS_ERROR(status)) {
4740 		mgmt_rx_reo_err("Failed to get snapshot address %d of pdev %pK",
4741 				id, pdev);
4742 		return QDF_STATUS_E_FAILURE;
4743 	}
4744 
4745 	snapshot_address->mgmt_rx_reo_snapshot_low =
4746 						value.mgmt_rx_reo_snapshot_low;
4747 	snapshot_address->mgmt_rx_reo_snapshot_high =
4748 						value.mgmt_rx_reo_snapshot_high;
4749 
4750 	return QDF_STATUS_SUCCESS;
4751 }
4752 
4753 #define MGMT_RX_REO_SNAPSHOT_LOW_VALID_POS                       (0)
4754 #define MGMT_RX_REO_SNAPSHOT_LOW_VALID_SIZE                      (1)
4755 #define MGMT_RX_REO_SNAPSHOT_LOW_MGMT_PKT_CTR_POS                (1)
4756 #define MGMT_RX_REO_SNAPSHOT_LOW_MGMT_PKT_CTR_SIZE               (16)
4757 #define MGMT_RX_REO_SNAPSHOT_LOW_GLOBAL_TIMESTAMP_POS            (17)
4758 #define MGMT_RX_REO_SNAPSHOT_LOW_GLOBAL_TIMESTAMP_SIZE           (15)
4759 
4760 #define MGMT_RX_REO_SNAPSHOT_HIGH_GLOBAL_TIMESTAMP_POS           (0)
4761 #define MGMT_RX_REO_SNAPSHOT_HIGH_GLOBAL_TIMESTAMP_SIZE          (17)
4762 #define MGMT_RX_REO_SNAPSHOT_HIGH_MGMT_PKT_CTR_REDUNDANT_POS     (17)
4763 #define MGMT_RX_REO_SNAPSHOT_HIGH_MGMT_PKT_CTR_REDUNDANT_SIZE    (15)
4764 
4765 /**
4766  * mgmt_rx_reo_sim_get_snapshot_value() - API to get snapshot value for a given
4767  * management frame
4768  * @global_timestamp: global time stamp
4769  * @mgmt_pkt_ctr: management packet counter
4770  *
4771  * This API gets the snapshot value for a frame with time stamp
4772  * @global_timestamp and sequence number @mgmt_pkt_ctr.
4773  *
4774  * Return: snapshot value (struct mgmt_rx_reo_shared_snapshot)
4775  */
4776 static struct mgmt_rx_reo_shared_snapshot
4777 mgmt_rx_reo_sim_get_snapshot_value(uint32_t global_timestamp,
4778 				   uint16_t mgmt_pkt_ctr)
4779 {
4780 	struct mgmt_rx_reo_shared_snapshot snapshot = {0};
4781 
4782 	QDF_SET_BITS(snapshot.mgmt_rx_reo_snapshot_low,
4783 		     MGMT_RX_REO_SNAPSHOT_LOW_VALID_POS,
4784 		     MGMT_RX_REO_SNAPSHOT_LOW_VALID_SIZE, 1);
4785 	QDF_SET_BITS(snapshot.mgmt_rx_reo_snapshot_low,
4786 		     MGMT_RX_REO_SNAPSHOT_LOW_MGMT_PKT_CTR_POS,
4787 		     MGMT_RX_REO_SNAPSHOT_LOW_MGMT_PKT_CTR_SIZE, mgmt_pkt_ctr);
4788 	QDF_SET_BITS(snapshot.mgmt_rx_reo_snapshot_low,
4789 		     MGMT_RX_REO_SNAPSHOT_LOW_GLOBAL_TIMESTAMP_POS,
4790 		     MGMT_RX_REO_SNAPSHOT_LOW_GLOBAL_TIMESTAMP_SIZE,
4791 		     global_timestamp);
4792 
4793 	QDF_SET_BITS(snapshot.mgmt_rx_reo_snapshot_high,
4794 		     MGMT_RX_REO_SNAPSHOT_HIGH_GLOBAL_TIMESTAMP_POS,
4795 		     MGMT_RX_REO_SNAPSHOT_HIGH_GLOBAL_TIMESTAMP_SIZE,
4796 		     global_timestamp >> 15);
4797 	QDF_SET_BITS(snapshot.mgmt_rx_reo_snapshot_high,
4798 		     MGMT_RX_REO_SNAPSHOT_HIGH_MGMT_PKT_CTR_REDUNDANT_POS,
4799 		     MGMT_RX_REO_SNAPSHOT_HIGH_MGMT_PKT_CTR_REDUNDANT_SIZE,
4800 		     mgmt_pkt_ctr);
4801 
4802 	return snapshot;
4803 }
4804 
4805 /**
4806  * mgmt_rx_reo_sim_frame_handler_fw() - Management frame handler at the fw layer
4807  * @arg: Argument
4808  *
4809  * This API handles the management frame at the fw layer. This is applicable
4810  * for simulation alone.
4811  *
4812  * Return: none
4813  */
4814 static void
4815 mgmt_rx_reo_sim_frame_handler_fw(void *arg)
4816 {
4817 	struct mgmt_rx_frame_mac_hw *frame_hw =
4818 					(struct mgmt_rx_frame_mac_hw *)arg;
4819 	uint32_t mac_hw_to_fw_delay_us;
4820 	bool is_consumed_by_fw;
4821 	struct  mgmt_rx_frame_fw *frame_fw;
4822 	int8_t link_id = -1;
4823 	QDF_STATUS status;
4824 	struct mgmt_rx_reo_sim_context *sim_context;
4825 	enum mgmt_rx_reo_shared_snapshot_id snapshot_id;
4826 	struct mgmt_rx_reo_shared_snapshot snapshot_value;
4827 	bool ret;
4828 	uint8_t ml_grp_id;
4829 
4830 	if (!frame_hw) {
4831 		mgmt_rx_reo_err("FW-%d : Pointer to HW frame struct is null",
4832 				link_id);
4833 		qdf_assert_always(0);
4834 	}
4835 
4836 	link_id = frame_hw->params.link_id;
4837 
4838 	sim_context = frame_hw->sim_context;
4839 	if (!sim_context) {
4840 		mgmt_rx_reo_err("FW-%d : Mgmt rx reo simulation context null",
4841 				link_id);
4842 		goto error_free_mac_hw_frame;
4843 	}
4844 
4845 	ml_grp_id = sim_context->mlo_grp_id;
4846 
4847 	mac_hw_to_fw_delay_us = MGMT_RX_REO_SIM_DELAY_MAC_HW_TO_FW_MIN +
4848 			mgmt_rx_reo_sim_get_random_unsigned_int(
4849 			MGMT_RX_REO_SIM_DELAY_MAC_HW_TO_FW_MIN_MAX_DELTA);
4850 	mgmt_rx_reo_sim_sleep(mac_hw_to_fw_delay_us);
4851 
4852 	is_consumed_by_fw = mgmt_rx_reo_sim_get_random_bool(
4853 			    MGMT_RX_REO_SIM_PERCENTAGE_FW_CONSUMED_FRAMES);
4854 
4855 	if (is_consumed_by_fw) {
4856 		/**
4857 		 * This frame should be present in pending/stale list of the
4858 		 * master frame list. FW consumed frames need not be reordered
4859 		 * by reorder algorithm. It is just used for book
4860 		 * keeping purposes. Hence remove it from the master list.
4861 		 */
4862 		status = mgmt_rx_reo_sim_remove_frame_from_master_list(
4863 					&sim_context->master_frame_list,
4864 					&frame_hw->params);
4865 
4866 		if (QDF_IS_STATUS_ERROR(status)) {
4867 			mgmt_rx_reo_err("FW-%d : Failed to remove FW consumed frame",
4868 					link_id);
4869 			qdf_assert_always(0);
4870 		}
4871 	}
4872 
4873 	mgmt_rx_reo_debug("FW-%d : Processing frame with ts = %u, ctr = %u, consume = %u",
4874 			  link_id, frame_hw->params.global_timestamp,
4875 			  frame_hw->params.mgmt_pkt_ctr, is_consumed_by_fw);
4876 
4877 	frame_fw = qdf_mem_malloc(sizeof(*frame_fw));
4878 	if (!frame_fw) {
4879 		mgmt_rx_reo_err("FW-%d : Failed to allocate FW mgmt frame",
4880 				link_id);
4881 		goto error_free_mac_hw_frame;
4882 	}
4883 
4884 	frame_fw->params = frame_hw->params;
4885 	frame_fw->is_consumed_by_fw = is_consumed_by_fw;
4886 	frame_fw->sim_context = frame_hw->sim_context;
4887 
4888 	snapshot_id = is_consumed_by_fw ?
4889 		      MGMT_RX_REO_SHARED_SNAPSHOT_FW_CONSUMED :
4890 		      MGMT_RX_REO_SHARED_SNAPSHOT_FW_FORWARDED;
4891 
4892 	snapshot_value = mgmt_rx_reo_sim_get_snapshot_value(
4893 					frame_hw->params.global_timestamp,
4894 					frame_hw->params.mgmt_pkt_ctr);
4895 
4896 	status = mgmt_rx_reo_sim_write_snapshot(
4897 			link_id, ml_grp_id,
4898 			snapshot_id, snapshot_value);
4899 
4900 	if (QDF_IS_STATUS_ERROR(status)) {
4901 		mgmt_rx_reo_err("FW-%d : Failed to write snapshot %d",
4902 				link_id, snapshot_id);
4903 		goto error_free_fw_frame;
4904 	}
4905 
4906 	status = qdf_create_work(NULL, &frame_fw->frame_handler_host,
4907 				 mgmt_rx_reo_sim_frame_handler_host, frame_fw);
4908 	if (QDF_IS_STATUS_ERROR(status)) {
4909 		mgmt_rx_reo_err("FW-%d : Failed to create work", link_id);
4910 		goto error_free_fw_frame;
4911 	}
4912 
4913 	ret = qdf_queue_work(
4914 			NULL, sim_context->host_mgmt_frame_handler[link_id],
4915 			&frame_fw->frame_handler_host);
4916 	if (!ret) {
4917 		mgmt_rx_reo_err("FW-%d : Work is already present on the queue",
4918 				link_id);
4919 		goto error_free_fw_frame;
4920 	}
4921 
4922 	qdf_mem_free(frame_hw);
4923 
4924 	return;
4925 
4926 error_free_fw_frame:
4927 	qdf_mem_free(frame_fw);
4928 error_free_mac_hw_frame:
4929 	qdf_mem_free(frame_hw);
4930 
4931 	mgmt_rx_reo_err("FW-%d : Exiting fw frame handler due to error",
4932 			link_id);
4933 }
4934 
4935 /**
4936  * mgmt_rx_reo_sim_get_link_id() - Helper API to get the link id value
4937  * from the index to the valid link list
4938  * @valid_link_list_index: Index to list of valid links
4939  *
4940  * Return: link id
4941  */
4942 static int8_t
4943 mgmt_rx_reo_sim_get_link_id(uint8_t valid_link_list_index)
4944 {
4945 	struct mgmt_rx_reo_sim_context *sim_context;
4946 
4947 	if (valid_link_list_index >= MAX_MLO_LINKS) {
4948 		mgmt_rx_reo_err("Invalid index %u to valid link list",
4949 				valid_link_list_index);
4950 		return MGMT_RX_REO_INVALID_LINK_ID;
4951 	}
4952 
4953 	sim_context = mgmt_rx_reo_sim_get_context();
4954 	if (!sim_context) {
4955 		mgmt_rx_reo_err("Mgmt reo simulation context is null");
4956 		return MGMT_RX_REO_INVALID_LINK_ID;
4957 	}
4958 
4959 	return sim_context->link_id_to_pdev_map.valid_link_list
4960 						[valid_link_list_index];
4961 }
4962 
4963 /**
4964  * mgmt_rx_reo_sim_receive_from_air() - Simulate management frame reception from
4965  * the air
4966  * @mac_hw: pointer to structure representing MAC HW
4967  * @num_mlo_links: number of MLO HW links
4968  * @frame: pointer to management frame parameters
4969  *
4970  * This API simulates the management frame reception from air.
4971  *
4972  * Return: QDF_STATUS
4973  */
4974 static QDF_STATUS
4975 mgmt_rx_reo_sim_receive_from_air(struct mgmt_rx_reo_sim_mac_hw *mac_hw,
4976 				 uint8_t num_mlo_links,
4977 				 struct mgmt_rx_frame_params *frame)
4978 {
4979 	uint8_t valid_link_list_index;
4980 	int8_t link_id;
4981 
4982 	if (!mac_hw) {
4983 		mgmt_rx_reo_err("pointer to MAC HW struct is null");
4984 		return QDF_STATUS_E_NULL_VALUE;
4985 	}
4986 
4987 	if (num_mlo_links == 0 || num_mlo_links > MAX_MLO_LINKS) {
4988 		mgmt_rx_reo_err("Invalid number of MLO links %u",
4989 				num_mlo_links);
4990 		return QDF_STATUS_E_INVAL;
4991 	}
4992 
4993 	if (!frame) {
4994 		mgmt_rx_reo_err("pointer to frame parameters is null");
4995 		return QDF_STATUS_E_NULL_VALUE;
4996 	}
4997 
4998 	valid_link_list_index = mgmt_rx_reo_sim_get_random_unsigned_int(
4999 							num_mlo_links);
5000 	link_id = mgmt_rx_reo_sim_get_link_id(valid_link_list_index);
5001 	qdf_assert_always(link_id >= 0);
5002 	qdf_assert_always(link_id < MAX_MLO_LINKS);
5003 
5004 	frame->global_timestamp = div_u64(ktime_get_ns(), NSEC_PER_USEC);
5005 	frame->mgmt_pkt_ctr = ++mac_hw->mgmt_pkt_ctr[link_id];
5006 	frame->link_id = link_id;
5007 
5008 	return QDF_STATUS_SUCCESS;
5009 }
5010 
5011 /**
5012  * mgmt_rx_reo_sim_undo_receive_from_air() - API to restore the state of MAC
5013  * HW in case of any Rx error.
5014  * @mac_hw: pointer to structure representing MAC HW
5015  * @frame: pointer to management frame parameters
5016  *
5017  * Return: QDF_STATUS
5018  */
5019 static QDF_STATUS
5020 mgmt_rx_reo_sim_undo_receive_from_air(struct mgmt_rx_reo_sim_mac_hw *mac_hw,
5021 				      struct mgmt_rx_frame_params *frame)
5022 {
5023 	if (!mac_hw) {
5024 		mgmt_rx_reo_err("pointer to MAC HW struct is null");
5025 		return QDF_STATUS_E_NULL_VALUE;
5026 	}
5027 
5028 	if (!frame) {
5029 		mgmt_rx_reo_err("pointer to frame parameters is null");
5030 		return QDF_STATUS_E_NULL_VALUE;
5031 	}
5032 
5033 	if (frame->link_id >= MAX_MLO_LINKS) {
5034 		mgmt_rx_reo_err("Invalid link id %u", frame->link_id);
5035 		return QDF_STATUS_E_INVAL;
5036 	}
5037 
5038 	--mac_hw->mgmt_pkt_ctr[frame->link_id];
5039 
5040 	return QDF_STATUS_SUCCESS;
5041 }
5042 
5043 /**
5044  * mgmt_rx_reo_sim_mac_hw_thread() - kthread to simulate MAC HW
5045  * @data: pointer to data input
5046  *
5047  * kthread handler to simulate MAC HW.
5048  *
5049  * Return: 0 for success, else failure
5050  */
5051 static int
5052 mgmt_rx_reo_sim_mac_hw_thread(void *data)
5053 {
5054 	struct mgmt_rx_reo_sim_context *sim_context = data;
5055 	struct mgmt_rx_reo_sim_mac_hw *mac_hw;
5056 
5057 	if (!sim_context) {
5058 		mgmt_rx_reo_err("HW: Mgmt rx reo simulation context is null");
5059 		return -EINVAL;
5060 	}
5061 
5062 	mac_hw = &sim_context->mac_hw_sim.mac_hw_info;
5063 
5064 	while (!qdf_thread_should_stop()) {
5065 		uint32_t inter_frame_delay_us;
5066 		struct mgmt_rx_frame_params frame;
5067 		struct mgmt_rx_frame_mac_hw *frame_mac_hw;
5068 		int8_t link_id = -1;
5069 		QDF_STATUS status;
5070 		enum mgmt_rx_reo_shared_snapshot_id snapshot_id;
5071 		struct mgmt_rx_reo_shared_snapshot snapshot_value;
5072 		int8_t num_mlo_links;
5073 		bool ret;
5074 		uint8_t ml_grp_id;
5075 
5076 		num_mlo_links = mgmt_rx_reo_sim_get_num_mlo_links(sim_context);
5077 		if (num_mlo_links < 0 ||
5078 		    num_mlo_links > MAX_MLO_LINKS) {
5079 			mgmt_rx_reo_err("Invalid number of MLO links %d",
5080 					num_mlo_links);
5081 			qdf_assert_always(0);
5082 		}
5083 
5084 		status = mgmt_rx_reo_sim_receive_from_air(mac_hw, num_mlo_links,
5085 							  &frame);
5086 		if (QDF_IS_STATUS_ERROR(status)) {
5087 			mgmt_rx_reo_err("Receive from the air failed");
5088 			/**
5089 			 * Frame reception failed and we are not sure about the
5090 			 * link id. Without link id there is no way to restore
5091 			 * the mac hw state. Hence assert unconditionally.
5092 			 */
5093 			qdf_assert_always(0);
5094 		}
5095 		link_id = frame.link_id;
5096 
5097 		mgmt_rx_reo_debug("HW-%d: received frame with ts = %u, ctr = %u",
5098 				  link_id, frame.global_timestamp,
5099 				  frame.mgmt_pkt_ctr);
5100 
5101 		frame_mac_hw = qdf_mem_malloc(sizeof(*frame_mac_hw));
5102 		if (!frame_mac_hw) {
5103 			mgmt_rx_reo_err("HW-%d: Failed to alloc mac hw frame",
5104 					link_id);
5105 
5106 			/* Cleanup */
5107 			status = mgmt_rx_reo_sim_undo_receive_from_air(
5108 								mac_hw, &frame);
5109 			qdf_assert_always(QDF_IS_STATUS_SUCCESS(status));
5110 
5111 			continue;
5112 		}
5113 
5114 		frame_mac_hw->params = frame;
5115 		frame_mac_hw->sim_context = sim_context;
5116 		ml_grp_id = sim_context->ml_grp_id;
5117 
5118 		status = mgmt_rx_reo_sim_add_frame_to_pending_list(
5119 				&sim_context->master_frame_list, &frame);
5120 		if (QDF_IS_STATUS_ERROR(status)) {
5121 			mgmt_rx_reo_err("HW-%d: Failed to add frame to list",
5122 					link_id);
5123 
5124 			/* Cleanup */
5125 			status = mgmt_rx_reo_sim_undo_receive_from_air(
5126 								mac_hw, &frame);
5127 			qdf_assert_always(QDF_IS_STATUS_SUCCESS(status));
5128 
5129 			qdf_mem_free(frame_mac_hw);
5130 
5131 			continue;
5132 		}
5133 
5134 		snapshot_id = MGMT_RX_REO_SHARED_SNAPSHOT_MAC_HW;
5135 		snapshot_value = mgmt_rx_reo_sim_get_snapshot_value(
5136 						frame.global_timestamp,
5137 						frame.mgmt_pkt_ctr);
5138 
5139 		status = mgmt_rx_reo_sim_write_snapshot(
5140 				link_id, ml_grp_id
5141 				snapshot_id, snapshot_value);
5142 		if (QDF_IS_STATUS_ERROR(status)) {
5143 			mgmt_rx_reo_err("HW-%d : Failed to write snapshot %d",
5144 					link_id, snapshot_id);
5145 
5146 			/* Cleanup */
5147 			status = mgmt_rx_reo_sim_remove_frame_from_pending_list(
5148 				&sim_context->master_frame_list, &frame);
5149 			qdf_assert_always(QDF_IS_STATUS_SUCCESS(status));
5150 
5151 			status = mgmt_rx_reo_sim_undo_receive_from_air(
5152 								mac_hw, &frame);
5153 			qdf_assert_always(QDF_IS_STATUS_SUCCESS(status));
5154 
5155 			qdf_mem_free(frame_mac_hw);
5156 
5157 			continue;
5158 		}
5159 
5160 		status = qdf_create_work(NULL, &frame_mac_hw->frame_handler_fw,
5161 					 mgmt_rx_reo_sim_frame_handler_fw,
5162 					 frame_mac_hw);
5163 		if (QDF_IS_STATUS_ERROR(status)) {
5164 			mgmt_rx_reo_err("HW-%d : Failed to create work",
5165 					link_id);
5166 			qdf_assert_always(0);
5167 		}
5168 
5169 		ret = qdf_queue_work(
5170 			NULL, sim_context->fw_mgmt_frame_handler[link_id],
5171 			&frame_mac_hw->frame_handler_fw);
5172 		if (!ret) {
5173 			mgmt_rx_reo_err("HW-%d : Work is already present in Q",
5174 					link_id);
5175 			qdf_assert_always(0);
5176 		}
5177 
5178 		inter_frame_delay_us = MGMT_RX_REO_SIM_INTER_FRAME_DELAY_MIN +
5179 			mgmt_rx_reo_sim_get_random_unsigned_int(
5180 			MGMT_RX_REO_SIM_INTER_FRAME_DELAY_MIN_MAX_DELTA);
5181 
5182 		mgmt_rx_reo_sim_sleep(inter_frame_delay_us);
5183 	}
5184 
5185 	return 0;
5186 }
5187 
5188 /**
5189  * mgmt_rx_reo_sim_init_master_frame_list() - Initializes the master
5190  * management frame list
5191  * @master_frame_list: Pointer to master frame list
5192  *
5193  * This API initializes the master management frame list
5194  *
5195  * Return: QDF_STATUS
5196  */
5197 static QDF_STATUS
5198 mgmt_rx_reo_sim_init_master_frame_list(
5199 		struct mgmt_rx_reo_master_frame_list *master_frame_list)
5200 {
5201 	qdf_spinlock_create(&master_frame_list->lock);
5202 
5203 	qdf_list_create(&master_frame_list->pending_list,
5204 			MGMT_RX_REO_SIM_PENDING_FRAME_LIST_MAX_SIZE);
5205 	qdf_list_create(&master_frame_list->stale_list,
5206 			MGMT_RX_REO_SIM_STALE_FRAME_LIST_MAX_SIZE);
5207 
5208 	return QDF_STATUS_SUCCESS;
5209 }
5210 
5211 /**
5212  * mgmt_rx_reo_sim_deinit_master_frame_list() - De initializes the master
5213  * management frame list
5214  * @master_frame_list: Pointer to master frame list
5215  *
5216  * This API de initializes the master management frame list
5217  *
5218  * Return: QDF_STATUS
5219  */
5220 static QDF_STATUS
5221 mgmt_rx_reo_sim_deinit_master_frame_list(
5222 		struct mgmt_rx_reo_master_frame_list *master_frame_list)
5223 {
5224 	qdf_spin_lock(&master_frame_list->lock);
5225 	qdf_list_destroy(&master_frame_list->stale_list);
5226 	qdf_list_destroy(&master_frame_list->pending_list);
5227 	qdf_spin_unlock(&master_frame_list->lock);
5228 
5229 	qdf_spinlock_destroy(&master_frame_list->lock);
5230 
5231 	return QDF_STATUS_SUCCESS;
5232 }
5233 
5234 /**
5235  * mgmt_rx_reo_sim_generate_unique_link_id() - Helper API to generate
5236  * unique link id values
5237  * @link_id_to_pdev_map: pointer to link id to pdev map
5238  * @link_id: Pointer to unique link id
5239  *
5240  * This API generates unique link id values for each pdev. This API should be
5241  * called after acquiring the spin lock protecting link id to pdev map.
5242  *
5243  * Return: QDF_STATUS
5244  */
5245 static QDF_STATUS
5246 mgmt_rx_reo_sim_generate_unique_link_id(
5247 		struct wlan_objmgr_pdev **link_id_to_pdev_map, uint8_t *link_id)
5248 {
5249 	uint8_t random_link_id;
5250 	uint8_t link;
5251 
5252 	if (!link_id_to_pdev_map || !link_id)
5253 		return QDF_STATUS_E_NULL_VALUE;
5254 
5255 	for (link = 0; link < MAX_MLO_LINKS; link++)
5256 		if (!link_id_to_pdev_map[link])
5257 			break;
5258 
5259 	if (link == MAX_MLO_LINKS) {
5260 		mgmt_rx_reo_err("All link ids are already allocated");
5261 		return QDF_STATUS_E_FAILURE;
5262 	}
5263 
5264 	while (1) {
5265 		random_link_id = mgmt_rx_reo_sim_get_random_unsigned_int(
5266 							MAX_MLO_LINKS);
5267 
5268 		if (!link_id_to_pdev_map[random_link_id])
5269 			break;
5270 	}
5271 
5272 	*link_id = random_link_id;
5273 
5274 	return QDF_STATUS_SUCCESS;
5275 }
5276 
5277 /**
5278  * mgmt_rx_reo_sim_insert_into_link_id_to_pdev_map() - Builds the MLO HW link id
5279  * to pdev map
5280  * @link_id_to_pdev_map: pointer to link id to pdev map
5281  * @pdev: pointer to pdev object
5282  *
5283  * This API incrementally builds the MLO HW link id to pdev map. This API is
5284  * used only for simulation.
5285  *
5286  * Return: QDF_STATUS
5287  */
5288 static QDF_STATUS
5289 mgmt_rx_reo_sim_insert_into_link_id_to_pdev_map(
5290 		struct mgmt_rx_reo_sim_link_id_to_pdev_map *link_id_to_pdev_map,
5291 		struct wlan_objmgr_pdev *pdev)
5292 {
5293 	uint8_t link_id;
5294 	QDF_STATUS status;
5295 
5296 	if (!link_id_to_pdev_map) {
5297 		mgmt_rx_reo_err("Link id to pdev map is null");
5298 		return QDF_STATUS_E_NULL_VALUE;
5299 	}
5300 
5301 	if (!pdev) {
5302 		mgmt_rx_reo_err("pdev is null");
5303 		return QDF_STATUS_E_NULL_VALUE;
5304 	}
5305 
5306 	qdf_spin_lock(&link_id_to_pdev_map->lock);
5307 
5308 	status = mgmt_rx_reo_sim_generate_unique_link_id(
5309 					link_id_to_pdev_map->map, &link_id);
5310 	if (QDF_IS_STATUS_ERROR(status)) {
5311 		qdf_spin_unlock(&link_id_to_pdev_map->lock);
5312 		return QDF_STATUS_E_FAILURE;
5313 	}
5314 	qdf_assert_always(link_id < MAX_MLO_LINKS);
5315 
5316 	link_id_to_pdev_map->map[link_id] = pdev;
5317 	link_id_to_pdev_map->valid_link_list
5318 			[link_id_to_pdev_map->num_mlo_links] = link_id;
5319 	link_id_to_pdev_map->num_mlo_links++;
5320 
5321 	qdf_spin_unlock(&link_id_to_pdev_map->lock);
5322 
5323 	return QDF_STATUS_SUCCESS;
5324 }
5325 
5326 /**
5327  * mgmt_rx_reo_sim_remove_from_link_id_to_pdev_map() - Destroys the MLO HW link
5328  * id to pdev map
5329  * @link_id_to_pdev_map: pointer to link id to pdev map
5330  * @pdev: pointer to pdev object
5331  *
5332  * This API incrementally destroys the MLO HW link id to pdev map. This API is
5333  * used only for simulation.
5334  *
5335  * Return: QDF_STATUS
5336  */
5337 static QDF_STATUS
5338 mgmt_rx_reo_sim_remove_from_link_id_to_pdev_map(
5339 		struct mgmt_rx_reo_sim_link_id_to_pdev_map *link_id_to_pdev_map,
5340 		struct wlan_objmgr_pdev *pdev)
5341 {
5342 	uint8_t link_id;
5343 
5344 	if (!link_id_to_pdev_map) {
5345 		mgmt_rx_reo_err("Link id to pdev map is null");
5346 		return QDF_STATUS_E_NULL_VALUE;
5347 	}
5348 
5349 	if (!pdev) {
5350 		mgmt_rx_reo_err("pdev is null");
5351 		return QDF_STATUS_E_NULL_VALUE;
5352 	}
5353 
5354 	qdf_spin_lock(&link_id_to_pdev_map->lock);
5355 
5356 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
5357 		if (link_id_to_pdev_map->map[link_id] == pdev) {
5358 			link_id_to_pdev_map->map[link_id] = NULL;
5359 			qdf_spin_unlock(&link_id_to_pdev_map->lock);
5360 
5361 			return QDF_STATUS_SUCCESS;
5362 		}
5363 	}
5364 
5365 	qdf_spin_unlock(&link_id_to_pdev_map->lock);
5366 
5367 	mgmt_rx_reo_err("Pdev %pK is not found in map", pdev);
5368 
5369 	return QDF_STATUS_E_FAILURE;
5370 }
5371 
5372 QDF_STATUS
5373 mgmt_rx_reo_sim_pdev_object_create_notification(struct wlan_objmgr_pdev *pdev)
5374 {
5375 	struct mgmt_rx_reo_sim_context *sim_context;
5376 	QDF_STATUS status;
5377 
5378 	sim_context = mgmt_rx_reo_sim_get_context();
5379 	if (!sim_context) {
5380 		mgmt_rx_reo_err("Mgmt simulation context is null");
5381 		return QDF_STATUS_E_NULL_VALUE;
5382 	}
5383 
5384 	status = mgmt_rx_reo_sim_insert_into_link_id_to_pdev_map(
5385 				&sim_context->link_id_to_pdev_map, pdev);
5386 
5387 	if (QDF_IS_STATUS_ERROR(status)) {
5388 		mgmt_rx_reo_err("Failed to add pdev to the map %pK", pdev);
5389 		return status;
5390 	}
5391 
5392 	return QDF_STATUS_SUCCESS;
5393 }
5394 
5395 QDF_STATUS
5396 mgmt_rx_reo_sim_pdev_object_destroy_notification(struct wlan_objmgr_pdev *pdev)
5397 {
5398 	struct mgmt_rx_reo_sim_context *sim_context;
5399 	QDF_STATUS status;
5400 
5401 	sim_context = mgmt_rx_reo_sim_get_context();
5402 	if (!sim_context) {
5403 		mgmt_rx_reo_err("Mgmt simulation context is null");
5404 		return QDF_STATUS_E_NULL_VALUE;
5405 	}
5406 
5407 	status = mgmt_rx_reo_sim_remove_from_link_id_to_pdev_map(
5408 				&sim_context->link_id_to_pdev_map, pdev);
5409 
5410 	if (QDF_IS_STATUS_ERROR(status)) {
5411 		mgmt_rx_reo_err("Failed to remove pdev from the map");
5412 		return status;
5413 	}
5414 
5415 	return QDF_STATUS_SUCCESS;
5416 }
5417 
5418 QDF_STATUS
5419 mgmt_rx_reo_sim_start(uint8_t ml_grp_id)
5420 {
5421 	struct mgmt_rx_reo_context *reo_context;
5422 	struct mgmt_rx_reo_sim_context *sim_context;
5423 	qdf_thread_t *mac_hw_thread;
5424 	uint8_t link_id;
5425 	uint8_t id;
5426 	QDF_STATUS status;
5427 
5428 	reo_context = mgmt_rx_reo_get_context(ml_grp_id);
5429 	if (!reo_context) {
5430 		mgmt_rx_reo_err("reo context is null");
5431 		return QDF_STATUS_E_NULL_VALUE;
5432 	}
5433 
5434 	reo_context->simulation_in_progress = true;
5435 
5436 	sim_context = &reo_context->sim_context;
5437 
5438 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
5439 		struct workqueue_struct *wq;
5440 
5441 		wq = alloc_ordered_workqueue("mgmt_rx_reo_sim_host-%u", 0,
5442 					     link_id);
5443 		if (!wq) {
5444 			mgmt_rx_reo_err("Host workqueue creation failed");
5445 			status = QDF_STATUS_E_FAILURE;
5446 			goto error_destroy_fw_and_host_work_queues_till_last_link;
5447 		}
5448 		sim_context->host_mgmt_frame_handler[link_id] = wq;
5449 
5450 		wq = alloc_ordered_workqueue("mgmt_rx_reo_sim_fw-%u", 0,
5451 					     link_id);
5452 		if (!wq) {
5453 			mgmt_rx_reo_err("FW workqueue creation failed");
5454 			status = QDF_STATUS_E_FAILURE;
5455 			goto error_destroy_host_work_queue_of_last_link;
5456 		}
5457 		sim_context->fw_mgmt_frame_handler[link_id] = wq;
5458 	}
5459 
5460 	mac_hw_thread = qdf_create_thread(mgmt_rx_reo_sim_mac_hw_thread,
5461 					  sim_context, "MAC_HW_thread");
5462 	if (!mac_hw_thread) {
5463 		mgmt_rx_reo_err("MAC HW thread creation failed");
5464 		status = QDF_STATUS_E_FAILURE;
5465 		goto error_destroy_fw_and_host_work_queues_of_last_link;
5466 	}
5467 
5468 	sim_context->mac_hw_sim.mac_hw_thread = mac_hw_thread;
5469 
5470 	qdf_wake_up_process(sim_context->mac_hw_sim.mac_hw_thread);
5471 
5472 	return QDF_STATUS_SUCCESS;
5473 
5474 error_destroy_fw_and_host_work_queues_of_last_link:
5475 	drain_workqueue(sim_context->fw_mgmt_frame_handler[link_id]);
5476 	destroy_workqueue(sim_context->fw_mgmt_frame_handler[link_id]);
5477 
5478 error_destroy_host_work_queue_of_last_link:
5479 	drain_workqueue(sim_context->host_mgmt_frame_handler[link_id]);
5480 	destroy_workqueue(sim_context->host_mgmt_frame_handler[link_id]);
5481 
5482 error_destroy_fw_and_host_work_queues_till_last_link:
5483 	for (id = 0; id < link_id; id++) {
5484 		drain_workqueue(sim_context->fw_mgmt_frame_handler[id]);
5485 		destroy_workqueue(sim_context->fw_mgmt_frame_handler[id]);
5486 
5487 		drain_workqueue(sim_context->host_mgmt_frame_handler[id]);
5488 		destroy_workqueue(sim_context->host_mgmt_frame_handler[id]);
5489 	}
5490 
5491 	return status;
5492 }
5493 
5494 QDF_STATUS
5495 mgmt_rx_reo_sim_stop(uint8_t ml_grp_id)
5496 {
5497 	struct mgmt_rx_reo_context *reo_context;
5498 	struct mgmt_rx_reo_sim_context *sim_context;
5499 	struct mgmt_rx_reo_master_frame_list *master_frame_list;
5500 	uint8_t link_id;
5501 	QDF_STATUS status;
5502 
5503 	reo_context = mgmt_rx_reo_get_context(ml_grp_id);
5504 	if (!reo_context) {
5505 		mgmt_rx_reo_err("reo context is null");
5506 		return QDF_STATUS_E_NULL_VALUE;
5507 	}
5508 
5509 	sim_context = &reo_context->sim_context;
5510 
5511 	status = qdf_thread_join(sim_context->mac_hw_sim.mac_hw_thread);
5512 	if (QDF_IS_STATUS_ERROR(status)) {
5513 		mgmt_rx_reo_err("Failed to stop the thread");
5514 		return status;
5515 	}
5516 
5517 	sim_context->mac_hw_sim.mac_hw_thread = NULL;
5518 
5519 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
5520 		/* Wait for all the pending frames to be processed by FW */
5521 		drain_workqueue(sim_context->fw_mgmt_frame_handler[link_id]);
5522 		destroy_workqueue(sim_context->fw_mgmt_frame_handler[link_id]);
5523 
5524 		/* Wait for all the pending frames to be processed by host */
5525 		drain_workqueue(sim_context->host_mgmt_frame_handler[link_id]);
5526 		destroy_workqueue(
5527 				sim_context->host_mgmt_frame_handler[link_id]);
5528 	}
5529 
5530 	status = mgmt_rx_reo_print_ingress_frame_info
5531 			(MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_PRINT_MAX_FRAMES);
5532 	if (QDF_IS_STATUS_ERROR(status)) {
5533 		mgmt_rx_reo_err("Failed to print ingress frame debug info");
5534 		return status;
5535 	}
5536 
5537 	status = mgmt_rx_reo_print_egress_frame_info
5538 			(MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_PRINT_MAX_FRAMES);
5539 	if (QDF_IS_STATUS_ERROR(status)) {
5540 		mgmt_rx_reo_err("Failed to print egress frame debug info");
5541 		return status;
5542 	}
5543 
5544 	master_frame_list = &sim_context->master_frame_list;
5545 	if (!qdf_list_empty(&master_frame_list->pending_list) ||
5546 	    !qdf_list_empty(&master_frame_list->stale_list)) {
5547 		mgmt_rx_reo_err("reo sim failure: pending/stale frame list non empty");
5548 
5549 		status = mgmt_rx_reo_list_display(&reo_context->reo_list);
5550 		if (QDF_IS_STATUS_ERROR(status)) {
5551 			mgmt_rx_reo_err("Failed to print reorder list");
5552 			return status;
5553 		}
5554 
5555 		qdf_assert_always(0);
5556 	} else {
5557 		mgmt_rx_reo_err("reo sim passed");
5558 	}
5559 
5560 	reo_context->simulation_in_progress = false;
5561 
5562 	return QDF_STATUS_SUCCESS;
5563 }
5564 
5565 /**
5566  * mgmt_rx_reo_sim_init() - Initialize management rx reorder simulation
5567  * context.
5568  * @reo_context: Pointer to reo context
5569  *
5570  * Return: QDF_STATUS of operation
5571  */
5572 static QDF_STATUS
5573 mgmt_rx_reo_sim_init(struct mgmt_rx_reo_context *reo_context)
5574 {
5575 	QDF_STATUS status;
5576 	struct mgmt_rx_reo_sim_context *sim_context;
5577 	uint8_t link_id;
5578 
5579 	if (!reo_context) {
5580 		mgmt_rx_reo_err("reo context is null");
5581 		return QDF_STATUS_E_NULL_VALUE;
5582 	}
5583 
5584 	sim_context = &reo_context->sim_context;
5585 
5586 	qdf_mem_zero(sim_context, sizeof(*sim_context));
5587 	sim_context->mlo_grp_id = reo_context->mlo_grp_id;
5588 
5589 	status = mgmt_rx_reo_sim_init_master_frame_list(
5590 					&sim_context->master_frame_list);
5591 	if (QDF_IS_STATUS_ERROR(status)) {
5592 		mgmt_rx_reo_err("Failed to create master mgmt frame list");
5593 		return status;
5594 	}
5595 
5596 	qdf_spinlock_create(&sim_context->link_id_to_pdev_map.lock);
5597 
5598 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++)
5599 		sim_context->link_id_to_pdev_map.valid_link_list[link_id] =
5600 					MGMT_RX_REO_INVALID_LINK_ID;
5601 
5602 	return QDF_STATUS_SUCCESS;
5603 }
5604 
5605 /**
5606  * mgmt_rx_reo_sim_deinit() - De initialize management rx reorder simulation
5607  * context.
5608  * @reo_context: Pointer to reo context
5609  *
5610  * Return: QDF_STATUS of operation
5611  */
5612 static QDF_STATUS
5613 mgmt_rx_reo_sim_deinit(struct mgmt_rx_reo_context *reo_context)
5614 {
5615 	QDF_STATUS status;
5616 	struct mgmt_rx_reo_sim_context *sim_context;
5617 
5618 	if (!reo_context) {
5619 		mgmt_rx_reo_err("reo context is null");
5620 		return QDF_STATUS_E_NULL_VALUE;
5621 	}
5622 
5623 	sim_context = &reo_context->sim_context;
5624 
5625 	qdf_spinlock_destroy(&sim_context->link_id_to_pdev_map.lock);
5626 
5627 	status = mgmt_rx_reo_sim_deinit_master_frame_list(
5628 					&sim_context->master_frame_list);
5629 	if (QDF_IS_STATUS_ERROR(status)) {
5630 		mgmt_rx_reo_err("Failed to destroy master frame list");
5631 		return status;
5632 	}
5633 
5634 	return QDF_STATUS_SUCCESS;
5635 }
5636 
5637 QDF_STATUS
5638 mgmt_rx_reo_sim_get_snapshot_address(
5639 			struct wlan_objmgr_pdev *pdev,
5640 			enum mgmt_rx_reo_shared_snapshot_id id,
5641 			struct mgmt_rx_reo_shared_snapshot **address)
5642 {
5643 	int8_t link_id;
5644 	struct mgmt_rx_reo_sim_context *sim_context;
5645 
5646 	sim_context = mgmt_rx_reo_sim_get_context();
5647 	if (!sim_context) {
5648 		mgmt_rx_reo_err("Mgmt reo simulation context is null");
5649 		return QDF_STATUS_E_NULL_VALUE;
5650 	}
5651 
5652 	if (!pdev) {
5653 		mgmt_rx_reo_err("pdev is NULL");
5654 		return QDF_STATUS_E_NULL_VALUE;
5655 	}
5656 
5657 	if (id < 0 || id >= MGMT_RX_REO_SHARED_SNAPSHOT_MAX) {
5658 		mgmt_rx_reo_err("Invalid snapshot ID %d", id);
5659 		return QDF_STATUS_E_INVAL;
5660 	}
5661 
5662 	if (!address) {
5663 		mgmt_rx_reo_err("Pointer to snapshot address is null");
5664 		return QDF_STATUS_E_NULL_VALUE;
5665 	}
5666 
5667 	link_id = wlan_get_mlo_link_id_from_pdev(pdev);
5668 	if (link_id < 0 || link_id >= MAX_MLO_LINKS) {
5669 		mgmt_rx_reo_err("Invalid link id %d for the pdev %pK", link_id,
5670 				pdev);
5671 		return QDF_STATUS_E_INVAL;
5672 	}
5673 
5674 	*address = &sim_context->snapshot[link_id][id];
5675 
5676 	return QDF_STATUS_SUCCESS;
5677 }
5678 #endif /* WLAN_MGMT_RX_REO_SIM_SUPPORT */
5679 
5680 #ifdef WLAN_MGMT_RX_REO_DEBUG_SUPPORT
5681 /**
5682  * mgmt_rx_reo_ingress_debug_info_init() - Initialize the management rx-reorder
5683  * ingress frame debug info
5684  * @psoc: Pointer to psoc
5685  * @ingress_debug_info_init_count: Initialization count
5686  * @ingress_frame_debug_info: Ingress frame debug info object
5687  *
5688  * API to initialize the management rx-reorder ingress frame debug info.
5689  *
5690  * Return: QDF_STATUS
5691  */
5692 static QDF_STATUS
5693 mgmt_rx_reo_ingress_debug_info_init
5694 		(struct wlan_objmgr_psoc *psoc,
5695 		 qdf_atomic_t *ingress_debug_info_init_count,
5696 		 struct reo_ingress_debug_info *ingress_frame_debug_info)
5697 {
5698 	if (!psoc) {
5699 		mgmt_rx_reo_err("psoc is null");
5700 		return QDF_STATUS_E_NULL_VALUE;
5701 	}
5702 
5703 	if (!ingress_frame_debug_info) {
5704 		mgmt_rx_reo_err("Ingress frame debug info is null");
5705 		return QDF_STATUS_E_NULL_VALUE;
5706 	}
5707 
5708 	/* We need to initialize only for the first invocation */
5709 	if (qdf_atomic_read(ingress_debug_info_init_count))
5710 		goto success;
5711 
5712 	ingress_frame_debug_info->frame_list_size =
5713 		wlan_mgmt_rx_reo_get_ingress_frame_debug_list_size(psoc);
5714 
5715 	if (ingress_frame_debug_info->frame_list_size) {
5716 		ingress_frame_debug_info->frame_list = qdf_mem_malloc
5717 			(ingress_frame_debug_info->frame_list_size *
5718 			 sizeof(*ingress_frame_debug_info->frame_list));
5719 
5720 		if (!ingress_frame_debug_info->frame_list) {
5721 			mgmt_rx_reo_err("Failed to allocate debug info");
5722 			return QDF_STATUS_E_NOMEM;
5723 		}
5724 	}
5725 
5726 	/* Initialize the string for storing the debug info table boarder */
5727 	qdf_mem_set(ingress_frame_debug_info->boarder,
5728 		    MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_BOARDER_MAX_SIZE, '-');
5729 
5730 success:
5731 	qdf_atomic_inc(ingress_debug_info_init_count);
5732 	return QDF_STATUS_SUCCESS;
5733 }
5734 
5735 /**
5736  * mgmt_rx_reo_egress_debug_info_init() - Initialize the management rx-reorder
5737  * egress frame debug info
5738  * @psoc: Pointer to psoc
5739  * @egress_debug_info_init_count: Initialization count
5740  * @egress_frame_debug_info: Egress frame debug info object
5741  *
5742  * API to initialize the management rx-reorder egress frame debug info.
5743  *
5744  * Return: QDF_STATUS
5745  */
5746 static QDF_STATUS
5747 mgmt_rx_reo_egress_debug_info_init
5748 		(struct wlan_objmgr_psoc *psoc,
5749 		 qdf_atomic_t *egress_debug_info_init_count,
5750 		 struct reo_egress_debug_info *egress_frame_debug_info)
5751 {
5752 	if (!psoc) {
5753 		mgmt_rx_reo_err("psoc is null");
5754 		return QDF_STATUS_E_NULL_VALUE;
5755 	}
5756 
5757 	if (!egress_frame_debug_info) {
5758 		mgmt_rx_reo_err("Egress frame debug info is null");
5759 		return QDF_STATUS_E_NULL_VALUE;
5760 	}
5761 
5762 	/* We need to initialize only for the first invocation */
5763 	if (qdf_atomic_read(egress_debug_info_init_count))
5764 		goto success;
5765 
5766 	egress_frame_debug_info->frame_list_size =
5767 		wlan_mgmt_rx_reo_get_egress_frame_debug_list_size(psoc);
5768 
5769 	if (egress_frame_debug_info->frame_list_size) {
5770 		egress_frame_debug_info->frame_list = qdf_mem_malloc
5771 				(egress_frame_debug_info->frame_list_size *
5772 				 sizeof(*egress_frame_debug_info->frame_list));
5773 
5774 		if (!egress_frame_debug_info->frame_list) {
5775 			mgmt_rx_reo_err("Failed to allocate debug info");
5776 			return QDF_STATUS_E_NOMEM;
5777 		}
5778 	}
5779 
5780 	/* Initialize the string for storing the debug info table boarder */
5781 	qdf_mem_set(egress_frame_debug_info->boarder,
5782 		    MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_BOARDER_MAX_SIZE, '-');
5783 
5784 success:
5785 	qdf_atomic_inc(egress_debug_info_init_count);
5786 	return QDF_STATUS_SUCCESS;
5787 }
5788 
5789 /**
5790  * mgmt_rx_reo_debug_info_init() - Initialize the management rx-reorder debug
5791  * info
5792  * @pdev: pointer to pdev object
5793  *
5794  * API to initialize the management rx-reorder debug info.
5795  *
5796  * Return: QDF_STATUS
5797  */
5798 static QDF_STATUS
5799 mgmt_rx_reo_debug_info_init(struct wlan_objmgr_pdev *pdev)
5800 {
5801 	struct mgmt_rx_reo_context *reo_context;
5802 	QDF_STATUS status;
5803 	struct wlan_objmgr_psoc *psoc;
5804 
5805 	psoc = wlan_pdev_get_psoc(pdev);
5806 
5807 	if (!wlan_mgmt_rx_reo_is_feature_enabled_at_psoc(psoc))
5808 		return QDF_STATUS_SUCCESS;
5809 
5810 	reo_context = wlan_mgmt_rx_reo_get_ctx_from_pdev(pdev);
5811 	if (!reo_context) {
5812 		mgmt_rx_reo_err("reo context is null");
5813 		return QDF_STATUS_E_NULL_VALUE;
5814 	}
5815 
5816 	status = mgmt_rx_reo_ingress_debug_info_init
5817 			(psoc, &reo_context->ingress_debug_info_init_count,
5818 			 &reo_context->ingress_frame_debug_info);
5819 	if (QDF_IS_STATUS_ERROR(status)) {
5820 		mgmt_rx_reo_err("Failed to initialize ingress debug info");
5821 		return QDF_STATUS_E_FAILURE;
5822 	}
5823 
5824 	status = mgmt_rx_reo_egress_debug_info_init
5825 			(psoc, &reo_context->egress_debug_info_init_count,
5826 			 &reo_context->egress_frame_debug_info);
5827 	if (QDF_IS_STATUS_ERROR(status)) {
5828 		mgmt_rx_reo_err("Failed to initialize egress debug info");
5829 		return QDF_STATUS_E_FAILURE;
5830 	}
5831 
5832 	return QDF_STATUS_SUCCESS;
5833 }
5834 
5835 /**
5836  * mgmt_rx_reo_ingress_debug_info_deinit() - De initialize the management
5837  * rx-reorder ingress frame debug info
5838  * @psoc: Pointer to psoc
5839  * @ingress_debug_info_init_count: Initialization count
5840  * @ingress_frame_debug_info: Ingress frame debug info object
5841  *
5842  * API to de initialize the management rx-reorder ingress frame debug info.
5843  *
5844  * Return: QDF_STATUS
5845  */
5846 static QDF_STATUS
5847 mgmt_rx_reo_ingress_debug_info_deinit
5848 		(struct wlan_objmgr_psoc *psoc,
5849 		 qdf_atomic_t *ingress_debug_info_init_count,
5850 		 struct reo_ingress_debug_info *ingress_frame_debug_info)
5851 {
5852 	if (!psoc) {
5853 		mgmt_rx_reo_err("psoc is null");
5854 		return QDF_STATUS_E_NULL_VALUE;
5855 	}
5856 
5857 	if (!ingress_frame_debug_info) {
5858 		mgmt_rx_reo_err("Ingress frame debug info is null");
5859 		return QDF_STATUS_E_NULL_VALUE;
5860 	}
5861 
5862 	if (!qdf_atomic_read(ingress_debug_info_init_count)) {
5863 		mgmt_rx_reo_err("Ingress debug info ref cnt is 0");
5864 		return QDF_STATUS_E_FAILURE;
5865 	}
5866 
5867 	/* We need to de-initialize only for the last invocation */
5868 	if (qdf_atomic_dec_and_test(ingress_debug_info_init_count))
5869 		goto success;
5870 
5871 	if (ingress_frame_debug_info->frame_list) {
5872 		qdf_mem_free(ingress_frame_debug_info->frame_list);
5873 		ingress_frame_debug_info->frame_list = NULL;
5874 	}
5875 	ingress_frame_debug_info->frame_list_size = 0;
5876 
5877 	qdf_mem_zero(ingress_frame_debug_info->boarder,
5878 		     MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_BOARDER_MAX_SIZE + 1);
5879 
5880 success:
5881 	return QDF_STATUS_SUCCESS;
5882 }
5883 
5884 /**
5885  * mgmt_rx_reo_egress_debug_info_deinit() - De initialize the management
5886  * rx-reorder egress frame debug info
5887  * @psoc: Pointer to psoc
5888  * @egress_debug_info_init_count: Initialization count
5889  * @egress_frame_debug_info: Egress frame debug info object
5890  *
5891  * API to de initialize the management rx-reorder egress frame debug info.
5892  *
5893  * Return: QDF_STATUS
5894  */
5895 static QDF_STATUS
5896 mgmt_rx_reo_egress_debug_info_deinit
5897 		(struct wlan_objmgr_psoc *psoc,
5898 		 qdf_atomic_t *egress_debug_info_init_count,
5899 		 struct reo_egress_debug_info *egress_frame_debug_info)
5900 {
5901 	if (!psoc) {
5902 		mgmt_rx_reo_err("psoc is null");
5903 		return QDF_STATUS_E_NULL_VALUE;
5904 	}
5905 
5906 	if (!egress_frame_debug_info) {
5907 		mgmt_rx_reo_err("Egress frame debug info is null");
5908 		return QDF_STATUS_E_NULL_VALUE;
5909 	}
5910 
5911 	if (!qdf_atomic_read(egress_debug_info_init_count)) {
5912 		mgmt_rx_reo_err("Egress debug info ref cnt is 0");
5913 		return QDF_STATUS_E_FAILURE;
5914 	}
5915 
5916 	/* We need to de-initialize only for the last invocation */
5917 	if (qdf_atomic_dec_and_test(egress_debug_info_init_count))
5918 		goto success;
5919 
5920 	if (egress_frame_debug_info->frame_list) {
5921 		qdf_mem_free(egress_frame_debug_info->frame_list);
5922 		egress_frame_debug_info->frame_list = NULL;
5923 	}
5924 	egress_frame_debug_info->frame_list_size = 0;
5925 
5926 	qdf_mem_zero(egress_frame_debug_info->boarder,
5927 		     MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_BOARDER_MAX_SIZE + 1);
5928 
5929 success:
5930 	return QDF_STATUS_SUCCESS;
5931 }
5932 
5933 /**
5934  * mgmt_rx_reo_debug_info_deinit() - De initialize the management rx-reorder
5935  * debug info
5936  * @pdev: Pointer to pdev object
5937  *
5938  * API to de initialize the management rx-reorder debug info.
5939  *
5940  * Return: QDF_STATUS
5941  */
5942 static QDF_STATUS
5943 mgmt_rx_reo_debug_info_deinit(struct wlan_objmgr_pdev *pdev)
5944 {
5945 	struct mgmt_rx_reo_context *reo_context;
5946 	QDF_STATUS status;
5947 	struct wlan_objmgr_psoc *psoc;
5948 
5949 	psoc = wlan_pdev_get_psoc(pdev);
5950 
5951 	if (!wlan_mgmt_rx_reo_is_feature_enabled_at_psoc(psoc))
5952 		return QDF_STATUS_SUCCESS;
5953 
5954 	reo_context = wlan_mgmt_rx_reo_get_ctx_from_pdev(pdev);
5955 	if (!reo_context) {
5956 		mgmt_rx_reo_err("reo context is null");
5957 		return QDF_STATUS_E_NULL_VALUE;
5958 	}
5959 
5960 	status = mgmt_rx_reo_ingress_debug_info_deinit
5961 			(psoc, &reo_context->ingress_debug_info_init_count,
5962 			 &reo_context->ingress_frame_debug_info);
5963 	if (QDF_IS_STATUS_ERROR(status)) {
5964 		mgmt_rx_reo_err("Failed to deinitialize ingress debug info");
5965 		return QDF_STATUS_E_FAILURE;
5966 	}
5967 
5968 	status = mgmt_rx_reo_egress_debug_info_deinit
5969 			(psoc, &reo_context->egress_debug_info_init_count,
5970 			 &reo_context->egress_frame_debug_info);
5971 	if (QDF_IS_STATUS_ERROR(status)) {
5972 		mgmt_rx_reo_err("Failed to deinitialize egress debug info");
5973 		return QDF_STATUS_E_FAILURE;
5974 	}
5975 
5976 	return QDF_STATUS_SUCCESS;
5977 }
5978 #else
5979 static QDF_STATUS
5980 mgmt_rx_reo_debug_info_init(struct wlan_objmgr_psoc *psoc)
5981 {
5982 	return QDF_STATUS_SUCCESS;
5983 }
5984 
5985 static QDF_STATUS
5986 mgmt_rx_reo_debug_info_deinit(struct wlan_objmgr_psoc *psoc)
5987 {
5988 	return QDF_STATUS_SUCCESS;
5989 }
5990 #endif /* WLAN_MGMT_RX_REO_DEBUG_SUPPORT */
5991 
5992 /**
5993  * mgmt_rx_reo_flush_list() - Flush all entries in the reorder list
5994  * @reo_list: Pointer to reorder list
5995  *
5996  * API to flush all the entries of the reorder list. This API would acquire
5997  * the lock protecting the list.
5998  *
5999  * Return: QDF_STATUS
6000  */
6001 static QDF_STATUS
6002 mgmt_rx_reo_flush_list(struct mgmt_rx_reo_list *reo_list)
6003 {
6004 	struct mgmt_rx_reo_list_entry *cur_entry;
6005 	struct mgmt_rx_reo_list_entry *temp;
6006 
6007 	if (!reo_list) {
6008 		mgmt_rx_reo_err("reorder list is null");
6009 		return QDF_STATUS_E_NULL_VALUE;
6010 	}
6011 
6012 	qdf_spin_lock_bh(&reo_list->list_lock);
6013 
6014 	qdf_list_for_each_del(&reo_list->list, cur_entry, temp, node) {
6015 		free_mgmt_rx_event_params(cur_entry->rx_params);
6016 
6017 		/**
6018 		 * Release the reference taken when the entry is inserted into
6019 		 * the reorder list.
6020 		 */
6021 		wlan_objmgr_pdev_release_ref(cur_entry->pdev,
6022 					     WLAN_MGMT_RX_REO_ID);
6023 
6024 		qdf_mem_free(cur_entry);
6025 	}
6026 
6027 	qdf_spin_unlock_bh(&reo_list->list_lock);
6028 
6029 	return QDF_STATUS_SUCCESS;
6030 }
6031 
6032 /**
6033  * mgmt_rx_reo_ingress_list_deinit() - De initialize the management rx-reorder
6034  * ingress list
6035  * @ingress_list: Pointer to ingress reorder list
6036  *
6037  * API to de initialize the management rx-reorder ingress list.
6038  *
6039  * Return: QDF_STATUS
6040  */
6041 static QDF_STATUS
6042 mgmt_rx_reo_ingress_list_deinit(struct mgmt_rx_reo_ingress_list *ingress_list)
6043 {
6044 	QDF_STATUS status;
6045 	struct mgmt_rx_reo_list *reo_ingress_list;
6046 
6047 	if (!ingress_list) {
6048 		mgmt_rx_reo_err("Ingress list is null");
6049 		return QDF_STATUS_E_NULL_VALUE;
6050 	}
6051 	reo_ingress_list = &ingress_list->reo_list;
6052 
6053 	qdf_timer_sync_cancel(&ingress_list->ageout_timer);
6054 	qdf_timer_free(&ingress_list->ageout_timer);
6055 
6056 	status = mgmt_rx_reo_flush_list(reo_ingress_list);
6057 	if (QDF_IS_STATUS_ERROR(status)) {
6058 		mgmt_rx_reo_err("Failed to flush the ingress list");
6059 		return status;
6060 	}
6061 	qdf_spinlock_destroy(&reo_ingress_list->list_lock);
6062 	qdf_list_destroy(&reo_ingress_list->list);
6063 
6064 	return QDF_STATUS_SUCCESS;
6065 }
6066 
6067 /**
6068  * mgmt_rx_reo_egress_list_deinit() - De initialize the management rx-reorder
6069  * egress list
6070  * @egress_list: Pointer to egress reorder list
6071  *
6072  * API to de initialize the management rx-reorder egress list.
6073  *
6074  * Return: QDF_STATUS
6075  */
6076 static QDF_STATUS
6077 mgmt_rx_reo_egress_list_deinit(struct mgmt_rx_reo_egress_list *egress_list)
6078 {
6079 	QDF_STATUS status;
6080 	struct mgmt_rx_reo_list *reo_egress_list;
6081 
6082 	if (!egress_list) {
6083 		mgmt_rx_reo_err("Egress list is null");
6084 		return QDF_STATUS_E_NULL_VALUE;
6085 	}
6086 	reo_egress_list = &egress_list->reo_list;
6087 
6088 	qdf_timer_sync_cancel(&egress_list->egress_inactivity_timer);
6089 	qdf_timer_free(&egress_list->egress_inactivity_timer);
6090 
6091 	status = mgmt_rx_reo_flush_list(reo_egress_list);
6092 	if (QDF_IS_STATUS_ERROR(status)) {
6093 		mgmt_rx_reo_err("Failed to flush the egress list");
6094 		return QDF_STATUS_E_FAILURE;
6095 	}
6096 	qdf_spinlock_destroy(&reo_egress_list->list_lock);
6097 	qdf_list_destroy(&reo_egress_list->list);
6098 
6099 	return QDF_STATUS_SUCCESS;
6100 }
6101 
6102 QDF_STATUS
6103 mgmt_rx_reo_deinit_context(uint8_t ml_grp_id)
6104 {
6105 	QDF_STATUS status;
6106 	struct mgmt_rx_reo_context *reo_context;
6107 
6108 	reo_context = mgmt_rx_reo_get_context(ml_grp_id);
6109 	if (!reo_context) {
6110 		mgmt_rx_reo_err("reo context is null");
6111 		return QDF_STATUS_E_NULL_VALUE;
6112 	}
6113 
6114 	qdf_spinlock_destroy(&reo_context->frame_release_lock);
6115 	qdf_spinlock_destroy(&reo_context->reo_algo_entry_lock);
6116 
6117 	status = mgmt_rx_reo_sim_deinit(reo_context);
6118 	if (QDF_IS_STATUS_ERROR(status)) {
6119 		mgmt_rx_reo_err("Failed to de initialize reo sim context");
6120 		qdf_mem_free(reo_context);
6121 		return QDF_STATUS_E_FAILURE;
6122 	}
6123 
6124 	status = mgmt_rx_reo_egress_list_deinit(&reo_context->egress_list);
6125 	if (QDF_IS_STATUS_ERROR(status)) {
6126 		mgmt_rx_reo_err("Failed to de-initialize Rx reo egress list");
6127 		qdf_mem_free(reo_context);
6128 		return status;
6129 	}
6130 
6131 	status = mgmt_rx_reo_ingress_list_deinit(&reo_context->ingress_list);
6132 	if (QDF_IS_STATUS_ERROR(status)) {
6133 		mgmt_rx_reo_err("Failed to de-initialize Rx reo ingress list");
6134 		qdf_mem_free(reo_context);
6135 		return status;
6136 	}
6137 
6138 	mgmt_rx_reo_set_context(ml_grp_id, NULL);
6139 	qdf_mem_free(reo_context);
6140 
6141 	return QDF_STATUS_SUCCESS;
6142 }
6143 
6144 QDF_STATUS
6145 mgmt_rx_reo_init_context(uint8_t ml_grp_id)
6146 {
6147 	QDF_STATUS status;
6148 	QDF_STATUS temp;
6149 	struct mgmt_rx_reo_context *reo_context;
6150 
6151 	reo_context = qdf_mem_malloc(sizeof(struct mgmt_rx_reo_context));
6152 	if (!reo_context) {
6153 		mgmt_rx_reo_err("Failed to allocate reo context");
6154 		return QDF_STATUS_E_NULL_VALUE;
6155 	}
6156 	reo_context->mlo_grp_id = ml_grp_id;
6157 
6158 	mgmt_rx_reo_set_context(ml_grp_id, reo_context);
6159 
6160 	status = mgmt_rx_reo_ingress_list_init(&reo_context->ingress_list);
6161 	if (QDF_IS_STATUS_ERROR(status)) {
6162 		mgmt_rx_reo_err("Failed to initialize Rx reo ingress list");
6163 		goto free_reo_context;
6164 	}
6165 
6166 	status = mgmt_rx_reo_egress_list_init(&reo_context->egress_list);
6167 	if (QDF_IS_STATUS_ERROR(status)) {
6168 		mgmt_rx_reo_err("Failed to initialize Rx reo egress list");
6169 		goto deinit_reo_ingress_list;
6170 	}
6171 
6172 	status = mgmt_rx_reo_sim_init(reo_context);
6173 	if (QDF_IS_STATUS_ERROR(status)) {
6174 		mgmt_rx_reo_err("Failed to initialize reo simulation context");
6175 		goto deinit_reo_egress_list;
6176 	}
6177 
6178 	qdf_spinlock_create(&reo_context->reo_algo_entry_lock);
6179 	qdf_spinlock_create(&reo_context->frame_release_lock);
6180 
6181 	return QDF_STATUS_SUCCESS;
6182 
6183 deinit_reo_egress_list:
6184 	temp = mgmt_rx_reo_egress_list_deinit(&reo_context->egress_list);
6185 	if (QDF_IS_STATUS_ERROR(temp)) {
6186 		mgmt_rx_reo_err("Failed to de-initialize Rx reo egress list");
6187 		return temp;
6188 	}
6189 deinit_reo_ingress_list:
6190 	temp = mgmt_rx_reo_ingress_list_deinit(&reo_context->ingress_list);
6191 	if (QDF_IS_STATUS_ERROR(temp)) {
6192 		mgmt_rx_reo_err("Failed to de-initialize Rx reo ingress list");
6193 		return temp;
6194 	}
6195 free_reo_context:
6196 	mgmt_rx_reo_set_context(ml_grp_id, NULL);
6197 	qdf_mem_free(reo_context);
6198 
6199 	return status;
6200 }
6201 
6202 /**
6203  * wlan_mgmt_rx_reo_initialize_snapshot_params() - Initialize a given snapshot
6204  * params object
6205  * @snapshot_params: Pointer to snapshot params object
6206  *
6207  * Return: void
6208  */
6209 static void
6210 wlan_mgmt_rx_reo_initialize_snapshot_params(
6211 			struct mgmt_rx_reo_snapshot_params *snapshot_params)
6212 {
6213 	snapshot_params->valid = false;
6214 	snapshot_params->mgmt_pkt_ctr = 0;
6215 	snapshot_params->global_timestamp = 0;
6216 }
6217 
6218 /**
6219  * mgmt_rx_reo_initialize_snapshot_address() - Initialize management Rx reorder
6220  * snapshot addresses for a given pdev
6221  * @pdev: pointer to pdev object
6222  *
6223  * Return: QDF_STATUS
6224  */
6225 static QDF_STATUS
6226 mgmt_rx_reo_initialize_snapshot_address(struct wlan_objmgr_pdev *pdev)
6227 {
6228 	enum mgmt_rx_reo_shared_snapshot_id snapshot_id;
6229 	struct mgmt_rx_reo_pdev_info *mgmt_rx_reo_pdev_ctx;
6230 	QDF_STATUS status;
6231 
6232 	mgmt_rx_reo_pdev_ctx = wlan_mgmt_rx_reo_get_priv_object(pdev);
6233 	if (!mgmt_rx_reo_pdev_ctx) {
6234 		mgmt_rx_reo_err("Mgmt Rx REO priv object is null");
6235 		return QDF_STATUS_E_NULL_VALUE;
6236 	}
6237 
6238 	snapshot_id = 0;
6239 
6240 	while (snapshot_id < MGMT_RX_REO_SHARED_SNAPSHOT_MAX) {
6241 		struct mgmt_rx_reo_snapshot_info *snapshot_info;
6242 
6243 		snapshot_info =
6244 			&mgmt_rx_reo_pdev_ctx->host_target_shared_snapshot_info
6245 			[snapshot_id];
6246 		status = wlan_mgmt_rx_reo_get_snapshot_info
6247 					(pdev, snapshot_id, snapshot_info);
6248 		if (QDF_IS_STATUS_ERROR(status)) {
6249 			mgmt_rx_reo_err("Get snapshot info failed, id = %u",
6250 					snapshot_id);
6251 			return status;
6252 		}
6253 
6254 		snapshot_id++;
6255 	}
6256 
6257 	return QDF_STATUS_SUCCESS;
6258 }
6259 
6260 /**
6261  * mgmt_rx_reo_initialize_snapshot_value() - Initialize management Rx reorder
6262  * snapshot values for a given pdev
6263  * @pdev: pointer to pdev object
6264  *
6265  * Return: QDF_STATUS
6266  */
6267 static QDF_STATUS
6268 mgmt_rx_reo_initialize_snapshot_value(struct wlan_objmgr_pdev *pdev)
6269 {
6270 	enum mgmt_rx_reo_shared_snapshot_id snapshot_id;
6271 	struct mgmt_rx_reo_pdev_info *mgmt_rx_reo_pdev_ctx;
6272 
6273 	mgmt_rx_reo_pdev_ctx = wlan_mgmt_rx_reo_get_priv_object(pdev);
6274 	if (!mgmt_rx_reo_pdev_ctx) {
6275 		mgmt_rx_reo_err("Mgmt Rx REO priv object is null");
6276 		return QDF_STATUS_E_NULL_VALUE;
6277 	}
6278 
6279 	snapshot_id = 0;
6280 	while (snapshot_id < MGMT_RX_REO_SHARED_SNAPSHOT_MAX) {
6281 		wlan_mgmt_rx_reo_initialize_snapshot_params
6282 			(&mgmt_rx_reo_pdev_ctx->last_valid_shared_snapshot
6283 			 [snapshot_id]);
6284 		snapshot_id++;
6285 	}
6286 
6287 	/* Initialize Host snapshot params */
6288 	wlan_mgmt_rx_reo_initialize_snapshot_params
6289 				(&mgmt_rx_reo_pdev_ctx->host_snapshot);
6290 
6291 	return QDF_STATUS_SUCCESS;
6292 }
6293 
6294 /**
6295  * mgmt_rx_reo_set_initialization_complete() - Set initialization completion
6296  * for management Rx REO pdev component private object
6297  * @pdev: pointer to pdev object
6298  *
6299  * Return: QDF_STATUS
6300  */
6301 static QDF_STATUS
6302 mgmt_rx_reo_set_initialization_complete(struct wlan_objmgr_pdev *pdev)
6303 {
6304 	struct mgmt_rx_reo_pdev_info *mgmt_rx_reo_pdev_ctx;
6305 
6306 	mgmt_rx_reo_pdev_ctx = wlan_mgmt_rx_reo_get_priv_object(pdev);
6307 	if (!mgmt_rx_reo_pdev_ctx) {
6308 		mgmt_rx_reo_err("Mgmt Rx REO priv object is null");
6309 		return QDF_STATUS_E_NULL_VALUE;
6310 	}
6311 
6312 	mgmt_rx_reo_pdev_ctx->init_complete = true;
6313 
6314 	return QDF_STATUS_SUCCESS;
6315 }
6316 
6317 /**
6318  * mgmt_rx_reo_clear_initialization_complete() - Clear initialization completion
6319  * for management Rx REO pdev component private object
6320  * @pdev: pointer to pdev object
6321  *
6322  * Return: QDF_STATUS
6323  */
6324 static QDF_STATUS
6325 mgmt_rx_reo_clear_initialization_complete(struct wlan_objmgr_pdev *pdev)
6326 {
6327 	struct mgmt_rx_reo_pdev_info *mgmt_rx_reo_pdev_ctx;
6328 
6329 	mgmt_rx_reo_pdev_ctx = wlan_mgmt_rx_reo_get_priv_object(pdev);
6330 	if (!mgmt_rx_reo_pdev_ctx) {
6331 		mgmt_rx_reo_err("Mgmt Rx REO priv object is null");
6332 		return QDF_STATUS_E_NULL_VALUE;
6333 	}
6334 
6335 	mgmt_rx_reo_pdev_ctx->init_complete = false;
6336 
6337 	return QDF_STATUS_SUCCESS;
6338 }
6339 
6340 /**
6341  * mgmt_rx_reo_initialize_snapshots() - Initialize management Rx reorder
6342  * snapshot related data structures for a given pdev
6343  * @pdev: pointer to pdev object
6344  *
6345  * Return: QDF_STATUS
6346  */
6347 static QDF_STATUS
6348 mgmt_rx_reo_initialize_snapshots(struct wlan_objmgr_pdev *pdev)
6349 {
6350 	QDF_STATUS status;
6351 
6352 	status = mgmt_rx_reo_initialize_snapshot_value(pdev);
6353 	if (QDF_IS_STATUS_ERROR(status)) {
6354 		mgmt_rx_reo_err("Failed to initialize snapshot value");
6355 		return status;
6356 	}
6357 
6358 	status = mgmt_rx_reo_initialize_snapshot_address(pdev);
6359 	if (QDF_IS_STATUS_ERROR(status)) {
6360 		mgmt_rx_reo_err("Failed to initialize snapshot address");
6361 		return status;
6362 	}
6363 
6364 	return QDF_STATUS_SUCCESS;
6365 }
6366 
6367 /**
6368  * mgmt_rx_reo_clear_snapshots() - Clear management Rx reorder snapshot related
6369  * data structures for a given pdev
6370  * @pdev: pointer to pdev object
6371  *
6372  * Return: QDF_STATUS
6373  */
6374 static QDF_STATUS
6375 mgmt_rx_reo_clear_snapshots(struct wlan_objmgr_pdev *pdev)
6376 {
6377 	QDF_STATUS status;
6378 
6379 	status = mgmt_rx_reo_initialize_snapshot_value(pdev);
6380 	if (QDF_IS_STATUS_ERROR(status)) {
6381 		mgmt_rx_reo_err("Failed to initialize snapshot value");
6382 		return status;
6383 	}
6384 
6385 	return QDF_STATUS_SUCCESS;
6386 }
6387 
6388 QDF_STATUS
6389 mgmt_rx_reo_attach(struct wlan_objmgr_pdev *pdev)
6390 {
6391 	QDF_STATUS status;
6392 
6393 	if (!wlan_mgmt_rx_reo_is_feature_enabled_at_pdev(pdev))
6394 		return QDF_STATUS_SUCCESS;
6395 
6396 	status = mgmt_rx_reo_initialize_snapshots(pdev);
6397 	if (QDF_IS_STATUS_ERROR(status)) {
6398 		mgmt_rx_reo_err("Failed to initialize mgmt Rx REO snapshots");
6399 		return status;
6400 	}
6401 
6402 	status = mgmt_rx_reo_set_initialization_complete(pdev);
6403 	if (QDF_IS_STATUS_ERROR(status)) {
6404 		mgmt_rx_reo_err("Failed to set initialization complete");
6405 		return status;
6406 	}
6407 
6408 	return QDF_STATUS_SUCCESS;
6409 }
6410 
6411 QDF_STATUS
6412 mgmt_rx_reo_detach(struct wlan_objmgr_pdev *pdev)
6413 {
6414 	QDF_STATUS status;
6415 
6416 	if (!wlan_mgmt_rx_reo_is_feature_enabled_at_pdev(pdev))
6417 		return QDF_STATUS_SUCCESS;
6418 
6419 	status = mgmt_rx_reo_clear_initialization_complete(pdev);
6420 	if (QDF_IS_STATUS_ERROR(status)) {
6421 		mgmt_rx_reo_err("Failed to clear initialization complete");
6422 		return status;
6423 	}
6424 
6425 	status = mgmt_rx_reo_clear_snapshots(pdev);
6426 	if (QDF_IS_STATUS_ERROR(status)) {
6427 		mgmt_rx_reo_err("Failed to clear mgmt Rx REO snapshots");
6428 		return status;
6429 	}
6430 
6431 	return QDF_STATUS_SUCCESS;
6432 }
6433 
6434 QDF_STATUS
6435 mgmt_rx_reo_pdev_obj_create_notification(
6436 	struct wlan_objmgr_pdev *pdev,
6437 	struct mgmt_txrx_priv_pdev_context *mgmt_txrx_pdev_ctx)
6438 {
6439 	QDF_STATUS status;
6440 	struct mgmt_rx_reo_pdev_info *mgmt_rx_reo_pdev_ctx = NULL;
6441 
6442 	if (!pdev) {
6443 		mgmt_rx_reo_err("pdev is null");
6444 		status = QDF_STATUS_E_NULL_VALUE;
6445 		goto failure;
6446 	}
6447 
6448 	if (!wlan_mgmt_rx_reo_is_feature_enabled_at_pdev(pdev)) {
6449 		status = QDF_STATUS_SUCCESS;
6450 		goto failure;
6451 	}
6452 
6453 	status = mgmt_rx_reo_sim_pdev_object_create_notification(pdev);
6454 	if (QDF_IS_STATUS_ERROR(status)) {
6455 		mgmt_rx_reo_err("Failed to handle pdev create for reo sim");
6456 		goto failure;
6457 	}
6458 
6459 	mgmt_rx_reo_pdev_ctx = qdf_mem_malloc(sizeof(*mgmt_rx_reo_pdev_ctx));
6460 	if (!mgmt_rx_reo_pdev_ctx) {
6461 		mgmt_rx_reo_err("Allocation failure for REO pdev context");
6462 		status = QDF_STATUS_E_NOMEM;
6463 		goto failure;
6464 	}
6465 
6466 	mgmt_txrx_pdev_ctx->mgmt_rx_reo_pdev_ctx = mgmt_rx_reo_pdev_ctx;
6467 
6468 	status = mgmt_rx_reo_debug_info_init(pdev);
6469 	if (QDF_IS_STATUS_ERROR(status)) {
6470 		mgmt_rx_reo_err("Failed to initialize debug info");
6471 		status = QDF_STATUS_E_NOMEM;
6472 		goto failure;
6473 	}
6474 
6475 	return QDF_STATUS_SUCCESS;
6476 
6477 failure:
6478 	if (mgmt_rx_reo_pdev_ctx)
6479 		qdf_mem_free(mgmt_rx_reo_pdev_ctx);
6480 
6481 	mgmt_txrx_pdev_ctx->mgmt_rx_reo_pdev_ctx = NULL;
6482 
6483 	return status;
6484 }
6485 
6486 QDF_STATUS
6487 mgmt_rx_reo_pdev_obj_destroy_notification(
6488 	struct wlan_objmgr_pdev *pdev,
6489 	struct mgmt_txrx_priv_pdev_context *mgmt_txrx_pdev_ctx)
6490 {
6491 	QDF_STATUS status;
6492 
6493 	if (!wlan_mgmt_rx_reo_is_feature_enabled_at_pdev(pdev))
6494 		return QDF_STATUS_SUCCESS;
6495 
6496 	status = mgmt_rx_reo_debug_info_deinit(pdev);
6497 	if (QDF_IS_STATUS_ERROR(status)) {
6498 		mgmt_rx_reo_err("Failed to de-initialize debug info");
6499 		return status;
6500 	}
6501 
6502 	qdf_mem_free(mgmt_txrx_pdev_ctx->mgmt_rx_reo_pdev_ctx);
6503 	mgmt_txrx_pdev_ctx->mgmt_rx_reo_pdev_ctx = NULL;
6504 
6505 	status = mgmt_rx_reo_sim_pdev_object_destroy_notification(pdev);
6506 	if (QDF_IS_STATUS_ERROR(status)) {
6507 		mgmt_rx_reo_err("Failed to handle pdev create for reo sim");
6508 		return status;
6509 	}
6510 
6511 	return QDF_STATUS_SUCCESS;
6512 }
6513 
6514 QDF_STATUS
6515 mgmt_rx_reo_psoc_obj_create_notification(struct wlan_objmgr_psoc *psoc)
6516 {
6517 	return QDF_STATUS_SUCCESS;
6518 }
6519 
6520 QDF_STATUS
6521 mgmt_rx_reo_psoc_obj_destroy_notification(struct wlan_objmgr_psoc *psoc)
6522 {
6523 	return QDF_STATUS_SUCCESS;
6524 }
6525 
6526 bool
6527 mgmt_rx_reo_is_simulation_in_progress(uint8_t ml_grp_id)
6528 {
6529 	struct mgmt_rx_reo_context *reo_context;
6530 
6531 	reo_context = mgmt_rx_reo_get_context(ml_grp_id);
6532 	if (!reo_context) {
6533 		mgmt_rx_reo_err("reo context is null");
6534 		return false;
6535 	}
6536 
6537 	return reo_context->simulation_in_progress;
6538 }
6539 
6540 #ifdef WLAN_MGMT_RX_REO_DEBUG_SUPPORT
6541 QDF_STATUS
6542 mgmt_rx_reo_print_ingress_frame_stats(uint8_t ml_grp_id)
6543 {
6544 	struct mgmt_rx_reo_context *reo_context;
6545 	QDF_STATUS status;
6546 
6547 	reo_context = mgmt_rx_reo_get_context(ml_grp_id);
6548 	if (!reo_context) {
6549 		mgmt_rx_reo_err("reo context is null");
6550 		return QDF_STATUS_E_NULL_VALUE;
6551 	}
6552 
6553 	status = mgmt_rx_reo_debug_print_ingress_frame_stats(reo_context);
6554 	if (QDF_IS_STATUS_ERROR(status)) {
6555 		mgmt_rx_reo_err("Failed to print ingress frame stats");
6556 		return status;
6557 	}
6558 
6559 	return QDF_STATUS_SUCCESS;
6560 }
6561 
6562 QDF_STATUS
6563 mgmt_rx_reo_print_ingress_frame_info(uint8_t ml_grp_id, uint16_t num_frames)
6564 {
6565 	struct mgmt_rx_reo_context *reo_context;
6566 	QDF_STATUS status;
6567 
6568 	reo_context = mgmt_rx_reo_get_context(ml_grp_id);
6569 	if (!reo_context) {
6570 		mgmt_rx_reo_err("reo context is null");
6571 		return QDF_STATUS_E_NULL_VALUE;
6572 	}
6573 
6574 	status = mgmt_rx_reo_debug_print_ingress_frame_info(reo_context,
6575 							    num_frames);
6576 	if (QDF_IS_STATUS_ERROR(status)) {
6577 		mgmt_rx_reo_err("Failed to print ingress frame info");
6578 		return status;
6579 	}
6580 
6581 	return QDF_STATUS_SUCCESS;
6582 }
6583 
6584 QDF_STATUS
6585 mgmt_rx_reo_print_egress_frame_stats(uint8_t ml_grp_id)
6586 {
6587 	struct mgmt_rx_reo_context *reo_context;
6588 	QDF_STATUS status;
6589 
6590 	reo_context = mgmt_rx_reo_get_context(ml_grp_id);
6591 	if (!reo_context) {
6592 		mgmt_rx_reo_err("reo context is null");
6593 		return QDF_STATUS_E_NULL_VALUE;
6594 	}
6595 
6596 	status = mgmt_rx_reo_debug_print_egress_frame_stats(reo_context);
6597 	if (QDF_IS_STATUS_ERROR(status)) {
6598 		mgmt_rx_reo_err("Failed to print egress frame stats");
6599 		return status;
6600 	}
6601 
6602 	return QDF_STATUS_SUCCESS;
6603 }
6604 
6605 QDF_STATUS
6606 mgmt_rx_reo_print_egress_frame_info(uint8_t ml_grp_id, uint16_t num_frames)
6607 {
6608 	struct mgmt_rx_reo_context *reo_context;
6609 	QDF_STATUS status;
6610 
6611 	reo_context = mgmt_rx_reo_get_context(ml_grp_id);
6612 	if (!reo_context) {
6613 		mgmt_rx_reo_err("reo context is null");
6614 		return QDF_STATUS_E_NULL_VALUE;
6615 	}
6616 
6617 	status = mgmt_rx_reo_debug_print_egress_frame_info(reo_context,
6618 							   num_frames);
6619 	if (QDF_IS_STATUS_ERROR(status)) {
6620 		mgmt_rx_reo_err("Failed to print egress frame info");
6621 		return status;
6622 	}
6623 
6624 	return QDF_STATUS_SUCCESS;
6625 }
6626 #else
6627 QDF_STATUS
6628 mgmt_rx_reo_print_ingress_frame_stats(uint8_t ml_grp_id)
6629 {
6630 	return QDF_STATUS_SUCCESS;
6631 }
6632 
6633 QDF_STATUS
6634 mgmt_rx_reo_print_ingress_frame_info(uint8_t ml_grp_id, uint16_t num_frames)
6635 {
6636 	return QDF_STATUS_SUCCESS;
6637 }
6638 
6639 QDF_STATUS
6640 mgmt_rx_reo_print_egress_frame_stats(uint8_t ml_grp_id)
6641 {
6642 	return QDF_STATUS_SUCCESS;
6643 }
6644 
6645 QDF_STATUS
6646 mgmt_rx_reo_print_egress_frame_info(uint8_t ml_grp_id, uint16_t num_frames)
6647 {
6648 	return QDF_STATUS_SUCCESS;
6649 }
6650 #endif /* WLAN_MGMT_RX_REO_DEBUG_SUPPORT */
6651