xref: /wlan-dirver/qca-wifi-host-cmn/umac/cmn_services/mgmt_txrx/core/src/wlan_mgmt_txrx_rx_reo.c (revision edf9fd0441a5a3b63c14b7bb754f301dd8d5e57c)
1 /*
2  * Copyright (c) 2021, The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 /**
19  *  DOC: wlan_mgmt_txrx_rx_reo.c
20  *  This file contains mgmt rx re-ordering related function definitions
21  */
22 
23 #include "wlan_mgmt_txrx_rx_reo_i.h"
24 #include <wlan_mgmt_txrx_rx_reo_tgt_api.h>
25 #include "wlan_mgmt_txrx_main_i.h"
26 #include <qdf_util.h>
27 
28 static struct mgmt_rx_reo_context g_rx_reo_ctx;
29 
30 #define mgmt_rx_reo_get_context()        (&g_rx_reo_ctx)
31 
32 #define MGMT_RX_REO_PKT_CTR_HALF_RANGE (0x8000)
33 #define MGMT_RX_REO_PKT_CTR_FULL_RANGE (MGMT_RX_REO_PKT_CTR_HALF_RANGE << 1)
34 
35 /**
36  * mgmt_rx_reo_compare_pkt_ctrs_gte() - Compare given mgmt packet counters
37  * @ctr1: Management packet counter1
38  * @ctr2: Management packet counter2
39  *
40  * We can't directly use the comparison operator here because the counters can
41  * overflow. But these counters have a property that the difference between
42  * them can never be greater than half the range of the data type.
43  * We can make use of this condition to detect which one is actually greater.
44  *
45  * Return: true if @ctr1 is greater than or equal to @ctr2, else false
46  */
47 static inline bool
48 mgmt_rx_reo_compare_pkt_ctrs_gte(uint16_t ctr1, uint16_t ctr2)
49 {
50 	uint16_t delta = ctr1 - ctr2;
51 
52 	return delta <= MGMT_RX_REO_PKT_CTR_HALF_RANGE;
53 }
54 
55 /**
56  * mgmt_rx_reo_subtract_pkt_ctrs() - Subtract given mgmt packet counters
57  * @ctr1: Management packet counter1
58  * @ctr2: Management packet counter2
59  *
60  * We can't directly use the subtract operator here because the counters can
61  * overflow. But these counters have a property that the difference between
62  * them can never be greater than half the range of the data type.
63  * We can make use of this condition to detect whichone is actually greater and
64  * return the difference accordingly.
65  *
66  * Return: Difference between @ctr1 and @crt2
67  */
68 static inline int
69 mgmt_rx_reo_subtract_pkt_ctrs(uint16_t ctr1, uint16_t ctr2)
70 {
71 	uint16_t delta = ctr1 - ctr2;
72 
73 	/**
74 	 * if delta is greater than half the range (i.e, ctr1 is actually
75 	 * smaller than ctr2), then the result should be a negative number.
76 	 * subtracting the entire range should give the correct value.
77 	 */
78 	if (delta > MGMT_RX_REO_PKT_CTR_HALF_RANGE)
79 		return delta - MGMT_RX_REO_PKT_CTR_FULL_RANGE;
80 
81 	return delta;
82 }
83 
84 #define MGMT_RX_REO_GLOBAL_TS_HALF_RANGE (0x80000000)
85 /**
86  * mgmt_rx_reo_compare_global_timestamps_gte()-Compare given global timestamps
87  * @ts1: Global timestamp1
88  * @ts2: Global timestamp2
89  *
90  * We can't directly use the comparison operator here because the timestamps can
91  * overflow. But these timestamps have a property that the difference between
92  * them can never be greater than half the range of the data type.
93  * We can make use of this condition to detect which one is actually greater.
94  *
95  * Return: true if @ts1 is greater than or equal to @ts2, else false
96  */
97 static inline bool
98 mgmt_rx_reo_compare_global_timestamps_gte(uint32_t ts1, uint32_t ts2)
99 {
100 	uint32_t delta = ts1 - ts2;
101 
102 	return delta <= MGMT_RX_REO_GLOBAL_TS_HALF_RANGE;
103 }
104 
105 /**
106  * mgmt_rx_reo_is_stale_frame()- API to check whether the given management frame
107  * is stale
108  * @ts_last_released_frame: pointer to global time stamp of the last frame
109  * removed from the reorder list
110  * @global_timestamp: global time stamp of the management frame
111  *
112  * This API checks whether the management frame with global time stamp
113  * @global_timestamp is stale. Any frame older than the last frame delivered to
114  * upper layer is a stale frame. This could happen when we have to deliver
115  * frames out of order due to time out or list size limit. The frames which
116  * arrive late at host and with time stamp lesser than the last delivered frame
117  * are stale frames and they need to be handled differently.
118  *
119  * Return: true if the given management frame is stale.
120  */
121 static bool
122 mgmt_rx_reo_is_stale_frame(
123 		struct mgmt_rx_reo_global_ts_info *ts_last_released_frame,
124 		uint32_t global_timestamp)
125 {
126 	return ts_last_released_frame->valid &&
127 	       !mgmt_rx_reo_compare_global_timestamps_gte(global_timestamp,
128 					ts_last_released_frame->global_ts);
129 }
130 
131 #ifndef WLAN_MGMT_RX_REO_SIM_SUPPORT
132 /**
133  * mgmt_rx_reo_get_num_mlo_links() - Get number of MLO HW links from the reo
134  * context object
135  * @reo_context: Pointer to reo context object
136  *
137  * Return: On success returns number of MLO HW links. On failure
138  * returns MGMT_RX_REO_INVALID_NUM_LINKS.
139  */
140 static int8_t
141 mgmt_rx_reo_get_num_mlo_links(struct mgmt_rx_reo_context *reo_context) {
142 	if (!reo_context) {
143 		mgmt_rx_reo_err("Mgmt reo context is null");
144 		return MGMT_RX_REO_INVALID_NUM_LINKS;
145 	}
146 
147 	return reo_context->num_mlo_links;
148 }
149 
150 static QDF_STATUS
151 mgmt_rx_reo_handle_potential_premature_delivery(
152 				struct mgmt_rx_reo_context *reo_context,
153 				uint32_t global_timestamp)
154 {
155 	return QDF_STATUS_SUCCESS;
156 }
157 
158 static QDF_STATUS
159 mgmt_rx_reo_handle_stale_frame(struct mgmt_rx_reo_list *reo_list,
160 			       struct mgmt_rx_reo_frame_descriptor *desc)
161 {
162 	return QDF_STATUS_SUCCESS;
163 }
164 #else
165 /**
166  * mgmt_rx_reo_sim_get_num_mlo_links() - Get number of MLO HW links from the reo
167  * simulation context object
168  * @sim_context: Pointer to reo simulation context object
169  *
170  * Number of MLO links will be equal to number of pdevs in the
171  * system. In case of simulation all the pdevs are assumed
172  * to have MLO capability.
173  *
174  * Return: On success returns number of MLO HW links. On failure
175  * returns -1.
176  */
177 static int8_t
178 mgmt_rx_reo_sim_get_num_mlo_links(struct mgmt_rx_reo_sim_context *sim_context)
179 {
180 	uint8_t num_mlo_links;
181 
182 	if (!sim_context) {
183 		mgmt_rx_reo_err("Mgmt reo simulation context is null");
184 		return MGMT_RX_REO_INVALID_NUM_LINKS;
185 	}
186 
187 	qdf_spin_lock(&sim_context->link_id_to_pdev_map.lock);
188 
189 	num_mlo_links = sim_context->link_id_to_pdev_map.num_mlo_links;
190 
191 	qdf_spin_unlock(&sim_context->link_id_to_pdev_map.lock);
192 
193 	return num_mlo_links;
194 }
195 
196 /**
197  * mgmt_rx_reo_get_num_mlo_links() - Get number of MLO links from the reo
198  * context object
199  * @reo_context: Pointer to reo context object
200  *
201  * Return: On success returns number of MLO HW links. On failure
202  * returns -1.
203  */
204 static int8_t
205 mgmt_rx_reo_get_num_mlo_links(struct mgmt_rx_reo_context *reo_context) {
206 	if (!reo_context) {
207 		mgmt_rx_reo_err("Mgmt reo context is null");
208 		return MGMT_RX_REO_INVALID_NUM_LINKS;
209 	}
210 
211 	return mgmt_rx_reo_sim_get_num_mlo_links(&reo_context->sim_context);
212 }
213 
214 /**
215  * mgmt_rx_reo_sim_get_context() - Helper API to get the management
216  * rx reorder simulation context
217  *
218  * Return: On success returns the pointer to management rx reorder
219  * simulation context. On failure returns NULL.
220  */
221 static struct mgmt_rx_reo_sim_context *
222 mgmt_rx_reo_sim_get_context(void)
223 {
224 	struct mgmt_rx_reo_context *reo_context;
225 
226 	reo_context = mgmt_rx_reo_get_context();
227 	if (!reo_context) {
228 		mgmt_rx_reo_err("Mgmt reo context is null");
229 		return NULL;
230 	}
231 
232 	return &reo_context->sim_context;
233 }
234 
235 int8_t
236 mgmt_rx_reo_sim_get_mlo_link_id_from_pdev(struct wlan_objmgr_pdev *pdev)
237 {
238 	struct mgmt_rx_reo_sim_context *sim_context;
239 	int8_t link_id;
240 
241 	sim_context = mgmt_rx_reo_sim_get_context();
242 	if (!sim_context) {
243 		mgmt_rx_reo_err("Mgmt reo simulation context is null");
244 		return MGMT_RX_REO_INVALID_LINK_ID;
245 	}
246 
247 	qdf_spin_lock(&sim_context->link_id_to_pdev_map.lock);
248 
249 	for (link_id = 0; link_id <
250 	     sim_context->link_id_to_pdev_map.num_mlo_links; link_id++)
251 		if (sim_context->link_id_to_pdev_map.map[link_id] == pdev)
252 			break;
253 
254 	/* pdev is not found in map */
255 	if (link_id == sim_context->link_id_to_pdev_map.num_mlo_links)
256 		link_id = -1;
257 
258 	qdf_spin_unlock(&sim_context->link_id_to_pdev_map.lock);
259 
260 	return link_id;
261 }
262 
263 struct wlan_objmgr_pdev *
264 mgmt_rx_reo_sim_get_pdev_from_mlo_link_id(uint8_t mlo_link_id,
265 					  wlan_objmgr_ref_dbgid refdbgid)
266 {
267 	struct mgmt_rx_reo_sim_context *sim_context;
268 	struct wlan_objmgr_pdev *pdev;
269 	int8_t num_mlo_links;
270 	QDF_STATUS status;
271 
272 	sim_context = mgmt_rx_reo_sim_get_context();
273 	if (!sim_context) {
274 		mgmt_rx_reo_err("Mgmt reo simulation context is null");
275 		return NULL;
276 	}
277 
278 	num_mlo_links = mgmt_rx_reo_sim_get_num_mlo_links(sim_context);
279 	if (num_mlo_links <= 0) {
280 		mgmt_rx_reo_err("invalid number of MLO links %d",
281 				num_mlo_links);
282 		return NULL;
283 	}
284 
285 	if (mlo_link_id >= num_mlo_links) {
286 		mgmt_rx_reo_err("Invalid link id %u, total links = %d",
287 				mlo_link_id, num_mlo_links);
288 		return NULL;
289 	}
290 
291 	qdf_spin_lock(&sim_context->link_id_to_pdev_map.lock);
292 
293 	pdev = sim_context->link_id_to_pdev_map.map[mlo_link_id];
294 	status = wlan_objmgr_pdev_try_get_ref(pdev, refdbgid);
295 	if (QDF_IS_STATUS_ERROR(status)) {
296 		mgmt_rx_reo_err("Failed to get pdev reference");
297 		return NULL;
298 	}
299 
300 	qdf_spin_unlock(&sim_context->link_id_to_pdev_map.lock);
301 
302 	return pdev;
303 }
304 
305 /**
306  * mgmt_rx_reo_handle_potential_premature_delivery - Helper API to handle
307  * premature delivery.
308  * @reo_context: Pointer to reorder list
309  * @global_timestamp: Global time stamp of the current management frame
310  *
311  * Sometimes we have to deliver a management frame to the upper layers even
312  * before its wait count reaching zero. This is called premature delivery.
313  * Premature delivery could happen due to time out or reorder list overflow.
314  *
315  * Return: QDF_STATUS
316  */
317 static QDF_STATUS
318 mgmt_rx_reo_handle_potential_premature_delivery(
319 				struct mgmt_rx_reo_context *reo_context,
320 				uint32_t global_timestamp)
321 {
322 	qdf_list_t stale_frame_list_temp;
323 	QDF_STATUS status;
324 	struct mgmt_rx_reo_pending_frame_list_entry *latest_stale_frame = NULL;
325 	struct mgmt_rx_reo_pending_frame_list_entry *cur_entry;
326 	struct mgmt_rx_reo_sim_context *sim_context;
327 	struct mgmt_rx_reo_master_frame_list *master_frame_list;
328 
329 	if (!reo_context)
330 		return QDF_STATUS_E_NULL_VALUE;
331 
332 	sim_context = &reo_context->sim_context;
333 	master_frame_list = &sim_context->master_frame_list;
334 
335 	qdf_spin_lock(&master_frame_list->lock);
336 
337 	qdf_list_for_each(&master_frame_list->pending_list, cur_entry, node) {
338 		if (cur_entry->params.global_timestamp == global_timestamp)
339 			break;
340 
341 		latest_stale_frame = cur_entry;
342 	}
343 
344 	if (latest_stale_frame) {
345 		qdf_list_create(&stale_frame_list_temp,
346 				MGMT_RX_REO_SIM_STALE_FRAME_TEMP_LIST_MAX_SIZE);
347 
348 		status = qdf_list_split(&stale_frame_list_temp,
349 					&master_frame_list->pending_list,
350 					&latest_stale_frame->node);
351 		if (QDF_IS_STATUS_ERROR(status))
352 			goto exit_unlock_master_frame_list;
353 
354 		status = qdf_list_join(&master_frame_list->stale_list,
355 				       &stale_frame_list_temp);
356 		if (QDF_IS_STATUS_ERROR(status))
357 			goto exit_unlock_master_frame_list;
358 	}
359 
360 	status = QDF_STATUS_SUCCESS;
361 
362 exit_unlock_master_frame_list:
363 	qdf_spin_unlock(&master_frame_list->lock);
364 
365 	return status;
366 }
367 
368 /**
369  * mgmt_rx_reo_sim_remove_frame_from_stale_list() - Removes frame from the
370  * stale management frame list
371  * @master_frame_list: pointer to master management frame list
372  * @reo_params: pointer to reo params
373  *
374  * This API removes frames from the stale management frame list.
375  *
376  * Return: QDF_STATUS of operation
377  */
378 static QDF_STATUS
379 mgmt_rx_reo_sim_remove_frame_from_stale_list(
380 		struct mgmt_rx_reo_master_frame_list *master_frame_list,
381 		const struct mgmt_rx_reo_params *reo_params)
382 {
383 	struct mgmt_rx_reo_stale_frame_list_entry *cur_entry;
384 	struct mgmt_rx_reo_stale_frame_list_entry *matching_entry = NULL;
385 	QDF_STATUS status;
386 
387 	if (!master_frame_list || !reo_params)
388 		return QDF_STATUS_E_NULL_VALUE;
389 
390 	qdf_spin_lock(&master_frame_list->lock);
391 
392 	/**
393 	 * Stale frames can come in any order at host. Do a linear search and
394 	 * remove the matching entry.
395 	 */
396 	qdf_list_for_each(&master_frame_list->stale_list, cur_entry, node) {
397 		if (cur_entry->params.link_id == reo_params->link_id &&
398 		    cur_entry->params.mgmt_pkt_ctr == reo_params->mgmt_pkt_ctr &&
399 		    cur_entry->params.global_timestamp ==
400 		    reo_params->global_timestamp) {
401 			matching_entry = cur_entry;
402 			break;
403 		}
404 	}
405 
406 	if (!matching_entry) {
407 		qdf_spin_unlock(&master_frame_list->lock);
408 		mgmt_rx_reo_err("reo sim failure: absent in stale frame list");
409 		qdf_assert_always(0);
410 	}
411 
412 	status = qdf_list_remove_node(&master_frame_list->stale_list,
413 				      &matching_entry->node);
414 
415 	if (QDF_IS_STATUS_ERROR(status)) {
416 		qdf_spin_unlock(&master_frame_list->lock);
417 		return status;
418 	}
419 
420 	qdf_mem_free(matching_entry);
421 
422 	qdf_spin_unlock(&master_frame_list->lock);
423 
424 	return QDF_STATUS_SUCCESS;
425 }
426 
427 /**
428  * mgmt_rx_reo_handle_stale_frame() - API to handle stale management frames.
429  * @reo_list: Pointer to reorder list
430  * @desc: Pointer to frame descriptor
431  *
432  * Return: QDF_STATUS of operation
433  */
434 static QDF_STATUS
435 mgmt_rx_reo_handle_stale_frame(struct mgmt_rx_reo_list *reo_list,
436 			       struct mgmt_rx_reo_frame_descriptor *desc)
437 {
438 	QDF_STATUS status;
439 	struct mgmt_rx_reo_context *reo_context;
440 	struct mgmt_rx_reo_sim_context *sim_context;
441 	struct mgmt_rx_reo_params *reo_params;
442 
443 	if (!reo_list || !desc)
444 		return QDF_STATUS_E_NULL_VALUE;
445 
446 	/* FW consumed/Error frames are already removed */
447 	if (desc->type != MGMT_RX_REO_FRAME_DESC_HOST_CONSUMED_FRAME)
448 		return QDF_STATUS_SUCCESS;
449 
450 	reo_context = mgmt_rx_reo_get_context_from_reo_list(reo_list);
451 	if (!reo_context)
452 		return QDF_STATUS_E_NULL_VALUE;
453 
454 	sim_context = &reo_context->sim_context;
455 
456 	reo_params = desc->rx_params->reo_params;
457 	if (!reo_params)
458 		return QDF_STATUS_E_NULL_VALUE;
459 
460 	status = mgmt_rx_reo_sim_remove_frame_from_stale_list(
461 				&sim_context->master_frame_list, reo_params);
462 
463 	return status;
464 }
465 #endif /* WLAN_MGMT_RX_REO_SIM_SUPPORT */
466 
467 /**
468  * mgmt_rx_reo_is_potential_premature_delivery() - Helper API to check
469  * whether the current frame getting delivered to upper layer is a premature
470  * delivery
471  * @release_reason: release reason
472  *
473  * Return: true for a premature delivery
474  */
475 static bool
476 mgmt_rx_reo_is_potential_premature_delivery(uint8_t release_reason)
477 {
478 	return !(release_reason &
479 			MGMT_RX_REO_LIST_ENTRY_RELEASE_REASON_ZERO_WAIT_COUNT);
480 }
481 
482 /**
483  * wlan_mgmt_rx_reo_get_priv_object() - Get the pdev private object of
484  * MGMT Rx REO module
485  * @pdev: pointer to pdev object
486  *
487  * Return: Pointer to pdev private object of MGMT Rx REO module on success,
488  * else NULL
489  */
490 static struct mgmt_rx_reo_pdev_info *
491 wlan_mgmt_rx_reo_get_priv_object(struct wlan_objmgr_pdev *pdev)
492 {
493 	struct mgmt_txrx_priv_pdev_context *mgmt_txrx_pdev_ctx;
494 
495 	if (!pdev) {
496 		mgmt_rx_reo_err("pdev is null");
497 		return NULL;
498 	}
499 
500 	mgmt_txrx_pdev_ctx = (struct mgmt_txrx_priv_pdev_context *)
501 		wlan_objmgr_pdev_get_comp_private_obj(pdev,
502 						      WLAN_UMAC_COMP_MGMT_TXRX);
503 
504 	if (!mgmt_txrx_pdev_ctx) {
505 		mgmt_rx_reo_err("mgmt txrx context is NULL");
506 		return NULL;
507 	}
508 
509 	return mgmt_txrx_pdev_ctx->mgmt_rx_reo_pdev_ctx;
510 }
511 
512 /**
513  * wlan_mgmt_rx_reo_algo_calculate_wait_count() - Calculates the number of
514  * frames an incoming frame should wait for before it gets delivered.
515  * @in_frame_pdev: pdev on which this frame is received
516  * @in_frame_params: Pointer to MGMT REO parameters of this frame
517  * @num_mlo_links: Number of MLO links
518  * @wait_count: Pointer to wait count data structure to fill the calculated
519  * wait count
520  *
521  * Each frame carrys a MGMT pkt number which is local to that link, and a
522  * timestamp which is global across all the links. MAC HW and FW also captures
523  * the same details of the last frame that they have seen. Host also maintains
524  * the details of the last frame it has seen. In total, there are 4 snapshots.
525  * 1. MAC HW snapshot - latest frame seen at MAC HW
526  * 2. FW forwarded snapshot- latest frame forwarded to the Host
527  * 3. FW consumed snapshot - latest frame consumed by the FW
528  * 4. Host/FW consumed snapshot - latest frame seen by the Host
529  * By using all these snapshots, this function tries to compute the wait count
530  * for a given incoming frame on all links.
531  *
532  * Return: QDF_STATUS of operation
533  */
534 static QDF_STATUS
535 wlan_mgmt_rx_reo_algo_calculate_wait_count(
536 	struct wlan_objmgr_pdev *in_frame_pdev,
537 	struct mgmt_rx_reo_params *in_frame_params,
538 	uint8_t num_mlo_links,
539 	struct mgmt_rx_reo_wait_count *wait_count)
540 {
541 	QDF_STATUS status;
542 	uint8_t link;
543 	int8_t in_frame_link;
544 	int frames_pending, delta_fwd_host;
545 	uint8_t snapshot_id;
546 	struct wlan_objmgr_pdev *pdev;
547 	struct mgmt_rx_reo_pdev_info *rx_reo_pdev_ctx;
548 	struct mgmt_rx_reo_snapshot *address;
549 	struct mgmt_rx_reo_snapshot_params snapshot_params
550 		[MGMT_RX_REO_SHARED_SNAPSHOT_MAX];
551 	struct mgmt_rx_reo_snapshot_params *mac_hw_ss, *fw_forwarded_ss,
552 					    *fw_consumed_ss, *host_ss;
553 
554 	if (!in_frame_params) {
555 		mgmt_rx_reo_err("MGMT Rx REO params of incoming frame is NULL");
556 		return QDF_STATUS_E_NULL_VALUE;
557 	}
558 
559 	if (!wait_count) {
560 		mgmt_rx_reo_err("wait count pointer to be filled is NULL");
561 		return QDF_STATUS_E_NULL_VALUE;
562 	}
563 
564 	if (!in_frame_pdev) {
565 		mgmt_rx_reo_err("pdev is null");
566 		return QDF_STATUS_E_NULL_VALUE;
567 	}
568 
569 	qdf_assert_always(num_mlo_links <= MGMT_RX_REO_MAX_LINKS);
570 
571 	/* Get the MLO link ID of incoming frame */
572 	in_frame_link = wlan_get_mlo_link_id_from_pdev(in_frame_pdev);
573 	qdf_assert_always(in_frame_link >= 0);
574 
575 	/* Iterate over all the MLO links */
576 	for (link = 0; link < num_mlo_links; link++) {
577 		/* No need wait for any frames on the same link */
578 		if (link == in_frame_link) {
579 			frames_pending = 0;
580 			goto update_pending_frames;
581 		}
582 
583 		pdev = wlan_get_pdev_from_mlo_link_id(link,
584 						      WLAN_MGMT_RX_REO_ID);
585 
586 		rx_reo_pdev_ctx = wlan_mgmt_rx_reo_get_priv_object(pdev);
587 		if (!rx_reo_pdev_ctx) {
588 			mgmt_rx_reo_err("Mgmt reo context empty for pdev %pK",
589 					pdev);
590 			wlan_objmgr_pdev_release_ref(pdev, WLAN_MGMT_RX_REO_ID);
591 			return QDF_STATUS_E_FAILURE;
592 		}
593 
594 		host_ss = &rx_reo_pdev_ctx->host_snapshot;
595 
596 		mgmt_rx_reo_debug("link_id = %u HOST SS: valid = %u, ctr = %u, ts = %u",
597 				  link, host_ss->valid, host_ss->mgmt_pkt_ctr,
598 				  host_ss->global_timestamp);
599 
600 		/**
601 		 * Ideally, the incoming frame has to wait for only those frames
602 		 * (on other links) which meet all the below criterion.
603 		 * 1. Frame's timestamp is less than incoming frame's
604 		 * 2. Frame is supposed to be consumed by the Host
605 		 * 3. Frame is not yet seen by the Host.
606 		 * We may not be able to compute the exact optimal wait count
607 		 * because HW/FW provides a limited assist.
608 		 * This algorithm tries to get the best estimate of wait count
609 		 * by not waiting for those frames where we have a conclusive
610 		 * evidence that we don't have to wait for those frames.
611 		 */
612 
613 		/**
614 		 * If this link has already seen a frame whose timestamp is
615 		 * greater than or equal to incoming frame's timestamp,
616 		 * then no need to wait for any frames on this link.
617 		 * If the totalt wait count becomes zero, then the policy on
618 		 * whether to deliver such a frame to upper layers is handled
619 		 * separately.
620 		 */
621 		if (host_ss->valid &&
622 		    mgmt_rx_reo_compare_global_timestamps_gte(
623 				host_ss->global_timestamp,
624 				in_frame_params->global_timestamp)) {
625 			frames_pending = 0;
626 			wlan_objmgr_pdev_release_ref(pdev, WLAN_MGMT_RX_REO_ID);
627 			goto update_pending_frames;
628 		}
629 
630 		snapshot_id = 0;
631 		/* Read all the shared snapshots */
632 		while (snapshot_id <
633 			MGMT_RX_REO_SHARED_SNAPSHOT_MAX) {
634 			address = rx_reo_pdev_ctx->
635 				   host_target_shared_snapshot[snapshot_id],
636 
637 			qdf_mem_zero(&snapshot_params[snapshot_id],
638 				     sizeof(snapshot_params[snapshot_id]));
639 
640 			status = tgt_mgmt_rx_reo_read_snapshot(
641 						pdev, address, snapshot_id,
642 						&snapshot_params[snapshot_id]);
643 
644 			/* Read operation shouldn't fail */
645 			if (QDF_IS_STATUS_ERROR(status)) {
646 				mgmt_rx_reo_err("snapshot(%d) read failed on"
647 						"link (%d)", snapshot_id, link);
648 				wlan_objmgr_pdev_release_ref(
649 						pdev, WLAN_MGMT_RX_REO_ID);
650 				return status;
651 			}
652 
653 			/* If snapshot is valid, save it in the pdev context */
654 			if (snapshot_params[snapshot_id].valid) {
655 				rx_reo_pdev_ctx->
656 				   last_valid_shared_snapshot[snapshot_id] =
657 				   snapshot_params[snapshot_id];
658 			}
659 			snapshot_id++;
660 		}
661 
662 		wlan_objmgr_pdev_release_ref(pdev, WLAN_MGMT_RX_REO_ID);
663 
664 		mac_hw_ss = &snapshot_params
665 				[MGMT_RX_REO_SHARED_SNAPSHOT_MAC_HW];
666 		fw_forwarded_ss = &snapshot_params
667 				[MGMT_RX_REO_SHARED_SNAPSHOT_FW_FORWADED];
668 		fw_consumed_ss = &snapshot_params
669 				[MGMT_RX_REO_SHARED_SNAPSHOT_FW_CONSUMED];
670 
671 		mgmt_rx_reo_debug("link_id = %u HW SS: valid = %u, ctr = %u, ts = %u",
672 				  link, mac_hw_ss->valid,
673 				  mac_hw_ss->mgmt_pkt_ctr,
674 				  mac_hw_ss->global_timestamp);
675 		mgmt_rx_reo_debug("link_id = %u FW forwarded SS: valid = %u, ctr = %u, ts = %u",
676 				  link, fw_forwarded_ss->valid,
677 				  fw_forwarded_ss->mgmt_pkt_ctr,
678 				  fw_forwarded_ss->global_timestamp);
679 		mgmt_rx_reo_debug("link_id = %u FW consumed SS: valid = %u, ctr = %u, ts = %u",
680 				  link, fw_consumed_ss->valid,
681 				  fw_consumed_ss->mgmt_pkt_ctr,
682 				  fw_consumed_ss->global_timestamp);
683 
684 		/**
685 		 * If MAC HW snapshot is invalid, we need to assume the worst
686 		 * and wait for UINT_MAX frames, but this should not be a
687 		 * concern because if subsequent frames read a valid snapshot,
688 		 * the REO algorithm will take care of updating the wait count
689 		 * of this frame as well.
690 		 * There may be more optimal ways to handle invalid snapshot
691 		 * reads.  For e.g., making use of previously read valid
692 		 * snapshot, but they come with complex logic.
693 		 * Keeping this simple for now.
694 		 */
695 		if (!mac_hw_ss->valid) {
696 			wait_count->per_link_count[link] = UINT_MAX;
697 			wait_count->total_count += UINT_MAX;
698 			mgmt_rx_reo_debug("link_id = %u wait count: per link = 0x%x, total = 0x%llx",
699 					  link,
700 					  wait_count->per_link_count[link],
701 					  wait_count->total_count);
702 			continue;
703 		}
704 
705 		/**
706 		 * For starters, we only have to wait for the frames that are
707 		 * seen by MAC HW but not yet seen by Host. The frames which
708 		 * reach MAC HW later are guaranteed to have a timestamp
709 		 * greater than incoming frame's timestamp.
710 		 */
711 		frames_pending = mgmt_rx_reo_subtract_pkt_ctrs(
712 					mac_hw_ss->mgmt_pkt_ctr,
713 					host_ss->mgmt_pkt_ctr);
714 		qdf_assert_always(frames_pending >= 0);
715 
716 		if (mgmt_rx_reo_compare_global_timestamps_gte(
717 					mac_hw_ss->global_timestamp,
718 					in_frame_params->global_timestamp)) {
719 			/**
720 			 * Last frame seen at MAC HW has timestamp greater than
721 			 * or equal to incoming frame's timestamp. So no need to
722 			 * wait for that last frame, but we can't conclusively
723 			 * say anything about timestamp of frames before the
724 			 * last frame, so try to wait for all of those frames.
725 			 */
726 			frames_pending--;
727 			qdf_assert_always(frames_pending >= 0);
728 
729 			if (fw_consumed_ss->valid &&
730 			    mgmt_rx_reo_compare_global_timestamps_gte(
731 				fw_consumed_ss->global_timestamp,
732 				in_frame_params->global_timestamp)) {
733 				/**
734 				 * Last frame consumed by the FW has timestamp
735 				 * greater than or equal to incoming frame's.
736 				 * That means all the frames from
737 				 * fw_consumed_ss->mgmt_pkt_ctr to
738 				 * mac_hw->mgmt_pkt_ctr will have timestamp
739 				 * greater than or equal to incoming frame's and
740 				 * hence, no need to wait for those frames.
741 				 * We just need to wait for frames from
742 				 * host_ss->mgmt_pkt_ctr to
743 				 * fw_consumed_ss->mgmt_pkt_ctr-1. This is a
744 				 * better estimate over the above estimate,
745 				 * so update frames_pending.
746 				 */
747 				frames_pending =
748 				  mgmt_rx_reo_subtract_pkt_ctrs(
749 				      fw_consumed_ss->mgmt_pkt_ctr,
750 				      host_ss->mgmt_pkt_ctr) - 1;
751 
752 				qdf_assert_always(frames_pending >= 0);
753 
754 				/**
755 				 * Last frame forwarded to Host has timestamp
756 				 * less than incoming frame's. That means all
757 				 * the frames starting from
758 				 * fw_forwarded_ss->mgmt_pkt_ctr+1 to
759 				 * fw_consumed_ss->mgmt_pkt_ctr are consumed by
760 				 * the FW and hence, no need to wait for those
761 				 * frames. We just need to wait for frames
762 				 * from host_ss->mgmt_pkt_ctr to
763 				 * fw_forwarded_ss->mgmt_pkt_ctr. This is a
764 				 * better estimate over the above estimate,
765 				 * so update frames_pending.
766 				 */
767 				if (fw_forwarded_ss->valid &&
768 				    !mgmt_rx_reo_compare_global_timestamps_gte(
769 					fw_forwarded_ss->global_timestamp,
770 					in_frame_params->global_timestamp)) {
771 					frames_pending =
772 					  mgmt_rx_reo_subtract_pkt_ctrs(
773 					      fw_forwarded_ss->mgmt_pkt_ctr,
774 					      host_ss->mgmt_pkt_ctr);
775 
776 					/**
777 					 * frames_pending can be negative in
778 					 * cases whene there are no frames
779 					 * getting forwarded to the Host. No
780 					 * need to wait for any frames in that
781 					 * case.
782 					 */
783 					if (frames_pending < 0)
784 						frames_pending = 0;
785 				}
786 			}
787 
788 			/**
789 			 * Last frame forwarded to Host has timestamp greater
790 			 * than or equal to incoming frame's. That means all the
791 			 * frames from fw_forwarded->mgmt_pkt_ctr to
792 			 * mac_hw->mgmt_pkt_ctr will have timestamp greater than
793 			 * or equal to incoming frame's and hence, no need to
794 			 * wait for those frames. We may have to just wait for
795 			 * frames from host_ss->mgmt_pkt_ctr to
796 			 * fw_forwarded_ss->mgmt_pkt_ctr-1
797 			 */
798 			if (fw_forwarded_ss->valid &&
799 			    mgmt_rx_reo_compare_global_timestamps_gte(
800 				fw_forwarded_ss->global_timestamp,
801 				in_frame_params->global_timestamp)) {
802 				delta_fwd_host =
803 				  mgmt_rx_reo_subtract_pkt_ctrs(
804 				    fw_forwarded_ss->mgmt_pkt_ctr,
805 				    host_ss->mgmt_pkt_ctr) - 1;
806 
807 				qdf_assert_always(delta_fwd_host >= 0);
808 
809 				/**
810 				 * This will be a better estimate over the one
811 				 * we computed using mac_hw_ss but this may or
812 				 * may not be a better estimate over the
813 				 * one we computed using fw_consumed_ss.
814 				 * When timestamps of both fw_consumed_ss and
815 				 * fw_forwarded_ss are greater than incoming
816 				 * frame's but timestamp of fw_consumed_ss is
817 				 * smaller than fw_forwarded_ss, then
818 				 * frames_pending will be smaller than
819 				 * delta_fwd_host, the reverse will be true in
820 				 * other cases. Instead of checking for all
821 				 * those cases, just waiting for the minimum
822 				 * among these two should be sufficient.
823 				 */
824 				frames_pending = qdf_min(frames_pending,
825 							 delta_fwd_host);
826 				qdf_assert_always(frames_pending >= 0);
827 			}
828 		}
829 
830 update_pending_frames:
831 			qdf_assert_always(frames_pending >= 0);
832 
833 			wait_count->per_link_count[link] = frames_pending;
834 			wait_count->total_count += frames_pending;
835 
836 			mgmt_rx_reo_debug("link_id = %u wait count: per link = 0x%x, total = 0x%llx",
837 					  link,
838 					  wait_count->per_link_count[link],
839 					  wait_count->total_count);
840 	}
841 
842 	return QDF_STATUS_SUCCESS;
843 }
844 
845 /*
846  * struct mgmt_rx_reo_list_entry_debug_info - This structure holds the necessary
847  * information about a reo list entry for debug purposes.
848  * @link_id: link id
849  * @mgmt_pkt_ctr: management packet counter
850  * @global_timestamp: global time stamp
851  * @wait_count: wait count values
852  * @status: status of the entry in the list
853  * @entry: pointer to reo list entry
854  */
855 struct mgmt_rx_reo_list_entry_debug_info {
856 	uint8_t link_id;
857 	uint16_t mgmt_pkt_ctr;
858 	uint32_t global_timestamp;
859 	struct mgmt_rx_reo_wait_count wait_count;
860 	uint32_t status;
861 	struct mgmt_rx_reo_list_entry *entry;
862 };
863 
864 /**
865  * mgmt_rx_reo_list_display() - API to print the entries in the reorder list
866  * @reo_list: Pointer to reorder list
867  * @num_mlo_links: Number of MLO HW links
868  *
869  * Return: QDF_STATUS
870  */
871 static QDF_STATUS
872 mgmt_rx_reo_list_display(struct mgmt_rx_reo_list *reo_list,
873 			 uint8_t num_mlo_links)
874 {
875 	uint32_t reo_list_size;
876 	uint32_t index;
877 	struct mgmt_rx_reo_list_entry *cur_entry;
878 	struct mgmt_rx_reo_list_entry_debug_info *debug_info;
879 
880 	if (!reo_list) {
881 		mgmt_rx_reo_err("Pointer to reo list is null");
882 		return QDF_STATUS_E_NULL_VALUE;
883 	}
884 
885 	if (num_mlo_links == 0 || num_mlo_links > MGMT_RX_REO_MAX_LINKS) {
886 		mgmt_rx_reo_err("Invalid number of links %u", num_mlo_links);
887 		return QDF_STATUS_E_INVAL;
888 	}
889 
890 	qdf_spin_lock_bh(&reo_list->list_lock);
891 
892 	reo_list_size = qdf_list_size(&reo_list->list);
893 
894 	if (reo_list_size == 0) {
895 		qdf_spin_unlock_bh(&reo_list->list_lock);
896 		mgmt_rx_reo_debug("Number of entries in the reo list = %u",
897 				  reo_list_size);
898 		return QDF_STATUS_SUCCESS;
899 	}
900 
901 	debug_info = qdf_mem_malloc_atomic(reo_list_size * sizeof(*debug_info));
902 	if (!debug_info) {
903 		qdf_spin_unlock_bh(&reo_list->list_lock);
904 		mgmt_rx_reo_err("Memory allocation failed");
905 		return QDF_STATUS_E_NOMEM;
906 	}
907 
908 	index = 0;
909 	qdf_list_for_each(&reo_list->list, cur_entry, node) {
910 		debug_info[index].link_id =
911 				mgmt_rx_reo_get_link_id(cur_entry->rx_params);
912 		debug_info[index].mgmt_pkt_ctr =
913 			mgmt_rx_reo_get_pkt_counter(cur_entry->rx_params);
914 		debug_info[index].global_timestamp =
915 				mgmt_rx_reo_get_global_ts(cur_entry->rx_params);
916 		debug_info[index].wait_count = cur_entry->wait_count;
917 		debug_info[index].status = cur_entry->status;
918 		debug_info[index].entry = cur_entry;
919 
920 		++index;
921 	}
922 
923 	qdf_spin_unlock_bh(&reo_list->list_lock);
924 
925 	mgmt_rx_reo_debug("Reorder list");
926 	mgmt_rx_reo_debug("##################################################");
927 	mgmt_rx_reo_debug("Number of entries in the reo list = %u",
928 			  reo_list_size);
929 	for (index = 0; index < reo_list_size; index++) {
930 		uint8_t link_id;
931 
932 		mgmt_rx_reo_debug("index = %u: link_id = %u, ts = %u, ctr = %u, status = 0x%x, entry = %pK",
933 				  index, debug_info[index].link_id,
934 				  debug_info[index].global_timestamp,
935 				  debug_info[index].mgmt_pkt_ctr,
936 				  debug_info[index].status,
937 				  debug_info[index].entry);
938 
939 		mgmt_rx_reo_debug("Total wait count = 0x%llx",
940 				  debug_info[index].wait_count.total_count);
941 
942 		for (link_id = 0; link_id < num_mlo_links; link_id++)
943 			mgmt_rx_reo_debug("Link id = %u, wait_count = 0x%x",
944 					  link_id, debug_info[index].wait_count.
945 					  per_link_count[link_id]);
946 	}
947 	mgmt_rx_reo_debug("##################################################");
948 
949 	qdf_mem_free(debug_info);
950 
951 	return QDF_STATUS_SUCCESS;
952 }
953 
954 #ifdef WLAN_MGMT_RX_REO_DEBUG_SUPPORT
955 /**
956  * mgmt_rx_reo_print_egress_frame_stats() - API to print the stats
957  * related to frames going out of the reorder module
958  * @reo_ctx: Pointer to reorder context
959  *
960  * API to print the stats related to frames going out of the management
961  * Rx reorder module.
962  *
963  * Return: QDF_STATUS
964  */
965 static QDF_STATUS
966 mgmt_rx_reo_print_egress_frame_stats(struct mgmt_rx_reo_context *reo_ctx)
967 {
968 	struct reo_egress_frame_stats *stats;
969 	uint8_t link_id;
970 	uint8_t reason;
971 	int8_t num_mlo_links;
972 	uint64_t total_delivery_attempts_count = 0;
973 	uint64_t total_delivery_success_count = 0;
974 	uint64_t total_premature_delivery_count = 0;
975 	uint64_t delivery_count_per_link[MGMT_RX_REO_MAX_LINKS] = {0};
976 	uint64_t delivery_count_per_reason[MGMT_RX_REO_RELEASE_REASON_MAX] = {0};
977 	uint64_t total_delivery_count = 0;
978 
979 	if (!reo_ctx)
980 		return QDF_STATUS_E_NULL_VALUE;
981 
982 	stats = &reo_ctx->egress_frame_debug_info.stats;
983 
984 	num_mlo_links = mgmt_rx_reo_get_num_mlo_links(reo_ctx);
985 	qdf_assert_always(num_mlo_links > 0);
986 	qdf_assert_always(num_mlo_links <= MGMT_RX_REO_MAX_LINKS);
987 
988 	for (link_id = 0; link_id < num_mlo_links; link_id++) {
989 		total_delivery_attempts_count +=
990 				stats->delivery_attempts_count[link_id];
991 		total_delivery_success_count +=
992 				stats->delivery_success_count[link_id];
993 		total_premature_delivery_count +=
994 				stats->premature_delivery_count[link_id];
995 	}
996 
997 	for (link_id = 0; link_id < num_mlo_links; link_id++) {
998 		for (reason = 0; reason < MGMT_RX_REO_RELEASE_REASON_MAX;
999 		     reason++)
1000 			delivery_count_per_link[link_id] +=
1001 				stats->delivery_count[link_id][reason];
1002 		total_delivery_count += delivery_count_per_link[link_id];
1003 	}
1004 	for (reason = 0; reason <= MGMT_RX_REO_RELEASE_REASON_MAX; reason++)
1005 		for (link_id = 0; link_id < num_mlo_links; link_id++)
1006 			delivery_count_per_reason[reason] +=
1007 				stats->delivery_count[link_id][reason];
1008 
1009 	mgmt_rx_reo_err("Egress frame stats:");
1010 	mgmt_rx_reo_err("\t1) Delivery related stats:");
1011 	mgmt_rx_reo_err("\t------------------------------------------");
1012 	mgmt_rx_reo_err("\t|link id   |Attempts |Success |Premature |");
1013 	mgmt_rx_reo_err("\t|          | count   | count  | count    |");
1014 	mgmt_rx_reo_err("\t------------------------------------------");
1015 	for (link_id = 0; link_id < num_mlo_links; link_id++) {
1016 		mgmt_rx_reo_err("\t|%10u|%9llu|%8llu|%10llu|", link_id,
1017 				stats->delivery_attempts_count[link_id],
1018 				stats->delivery_success_count[link_id],
1019 				stats->premature_delivery_count[link_id]);
1020 	mgmt_rx_reo_err("\t------------------------------------------");
1021 	}
1022 	mgmt_rx_reo_err("\t           |%9llu|%8llu|%10llu|\n\n",
1023 			total_delivery_attempts_count,
1024 			total_delivery_success_count,
1025 			total_premature_delivery_count);
1026 
1027 	mgmt_rx_reo_err("\t2) Delivery reason related stats");
1028 	mgmt_rx_reo_err("\tRelease Reason Values:-");
1029 	mgmt_rx_reo_err("\tRELEASE_REASON_ZERO_WAIT_COUNT - 0x%lx",
1030 			MGMT_RX_REO_LIST_ENTRY_RELEASE_REASON_ZERO_WAIT_COUNT);
1031 	mgmt_rx_reo_err("\tRELEASE_REASON_AGED_OUT - 0x%lx",
1032 			MGMT_RX_REO_LIST_ENTRY_RELEASE_REASON_AGED_OUT);
1033 	mgmt_rx_reo_err("\tRELEASE_REASON_OLDER_THAN_AGED_OUT_FRAME - 0x%lx",
1034 			MGMT_RX_REO_LIST_ENTRY_RELEASE_REASON_OLDER_THAN_AGED_OUT_FRAME);
1035 	mgmt_rx_reo_err("\tRELEASE_REASON_LIST_MAX_SIZE_EXCEEDED - 0x%lx",
1036 			MGMT_RX_REO_LIST_ENTRY_RELEASE_REASON_LIST_MAX_SIZE_EXCEEDED);
1037 	mgmt_rx_reo_err("\t--------------------------------------------------");
1038 	mgmt_rx_reo_err("\t|Release Reason/ |       |       |       |       |");
1039 	mgmt_rx_reo_err("\t|link id         |      0|      1|      2|      3|");
1040 	mgmt_rx_reo_err("\t---------------------------------------------------------");
1041 
1042 	for (reason = 0; reason < MGMT_RX_REO_RELEASE_REASON_MAX; reason++) {
1043 		mgmt_rx_reo_err("\t|%16x|%7llu|%7llu|%7llu|%7llu|%7llu", reason,
1044 				stats->delivery_count[0][reason],
1045 				stats->delivery_count[1][reason],
1046 				stats->delivery_count[2][reason],
1047 				stats->delivery_count[3][reason],
1048 				delivery_count_per_reason[reason]);
1049 		mgmt_rx_reo_err("\t---------------------------------------------------------");
1050 	}
1051 	mgmt_rx_reo_err("\t                |%7llu|%7llu|%7llu|%7llu|%7llu\n\n",
1052 			delivery_count_per_link[0], delivery_count_per_link[1],
1053 			delivery_count_per_link[2], delivery_count_per_link[3],
1054 			total_delivery_count);
1055 
1056 	return QDF_STATUS_SUCCESS;
1057 }
1058 
1059 /**
1060  * mgmt_rx_reo_log_egress_frame_before_delivery() - Log the information about a
1061  * frame exiting the reorder module. Logging is done before attempting the frame
1062  * delivery to upper layers.
1063  * @reo_ctx: management rx reorder context
1064  * @entry: Pointer to reorder list entry
1065  *
1066  * Return: QDF_STATUS of operation
1067  */
1068 static QDF_STATUS
1069 mgmt_rx_reo_log_egress_frame_before_delivery(
1070 					struct mgmt_rx_reo_context *reo_ctx,
1071 					struct mgmt_rx_reo_list_entry *entry)
1072 {
1073 	struct reo_egress_debug_info *egress_frame_debug_info;
1074 	struct reo_egress_debug_frame_info *cur_frame_debug_info;
1075 	struct reo_egress_frame_stats *stats;
1076 	uint8_t link_id;
1077 
1078 	if (!reo_ctx || !entry)
1079 		return QDF_STATUS_E_NULL_VALUE;
1080 
1081 	egress_frame_debug_info = &reo_ctx->egress_frame_debug_info;
1082 
1083 	cur_frame_debug_info = &egress_frame_debug_info->frame_list
1084 			[egress_frame_debug_info->next_index];
1085 
1086 	cur_frame_debug_info->link_id =
1087 				mgmt_rx_reo_get_link_id(entry->rx_params);
1088 	cur_frame_debug_info->mgmt_pkt_ctr =
1089 				mgmt_rx_reo_get_pkt_counter(entry->rx_params);
1090 	cur_frame_debug_info->global_timestamp =
1091 				mgmt_rx_reo_get_global_ts(entry->rx_params);
1092 	cur_frame_debug_info->wait_count = entry->wait_count;
1093 	cur_frame_debug_info->insertion_ts = entry->insertion_ts;
1094 	cur_frame_debug_info->ingress_timestamp = entry->ingress_timestamp;
1095 	cur_frame_debug_info->removal_ts =  entry->removal_ts;
1096 	cur_frame_debug_info->egress_timestamp = qdf_get_log_timestamp();
1097 	cur_frame_debug_info->release_reason = entry->release_reason;
1098 	cur_frame_debug_info->is_premature_delivery =
1099 						entry->is_premature_delivery;
1100 
1101 	stats = &egress_frame_debug_info->stats;
1102 	link_id = cur_frame_debug_info->link_id;
1103 	stats->delivery_attempts_count[link_id]++;
1104 	if (entry->is_premature_delivery)
1105 		stats->premature_delivery_count[link_id]++;
1106 
1107 	return QDF_STATUS_SUCCESS;
1108 }
1109 
1110 /**
1111  * mgmt_rx_reo_log_egress_frame_after_delivery() - Log the information about a
1112  * frame exiting the reorder module. Logging is done after attempting the frame
1113  * delivery to upper layer.
1114  * @reo_ctx: management rx reorder context
1115  * @entry: Pointer to reorder list entry
1116  *
1117  * Return: QDF_STATUS of operation
1118  */
1119 static QDF_STATUS
1120 mgmt_rx_reo_log_egress_frame_after_delivery(
1121 					struct mgmt_rx_reo_context *reo_ctx,
1122 					struct mgmt_rx_reo_list_entry *entry)
1123 {
1124 	struct reo_egress_debug_info *egress_frame_debug_info;
1125 	struct reo_egress_debug_frame_info *cur_frame_debug_info;
1126 	struct reo_egress_frame_stats *stats;
1127 
1128 	if (!reo_ctx)
1129 		return QDF_STATUS_E_NULL_VALUE;
1130 
1131 	egress_frame_debug_info = &reo_ctx->egress_frame_debug_info;
1132 
1133 	cur_frame_debug_info = &egress_frame_debug_info->frame_list
1134 			[egress_frame_debug_info->next_index];
1135 
1136 	cur_frame_debug_info->is_delivered = entry->is_delivered;
1137 	cur_frame_debug_info->egress_duration = qdf_get_log_timestamp() -
1138 					cur_frame_debug_info->egress_timestamp;
1139 
1140 	egress_frame_debug_info->next_index++;
1141 	egress_frame_debug_info->next_index %=
1142 				MGMT_RX_REO_EGRESS_FRAME_DEBUG_ENTRIES_MAX;
1143 	if (egress_frame_debug_info->next_index == 0)
1144 		egress_frame_debug_info->wrap_aroud = true;
1145 
1146 	stats = &egress_frame_debug_info->stats;
1147 	if (entry->is_delivered) {
1148 		uint8_t link_id = cur_frame_debug_info->link_id;
1149 		uint8_t release_reason = cur_frame_debug_info->release_reason;
1150 
1151 		stats->delivery_count[link_id][release_reason]++;
1152 		stats->delivery_success_count[link_id]++;
1153 	}
1154 
1155 	return QDF_STATUS_SUCCESS;
1156 }
1157 
1158 /**
1159  * mgmt_rx_reo_print_egress_frame_info() - Print the debug information about the
1160  * latest frames leaving the reorder module
1161  * @reo_ctx: management rx reorder context
1162  *
1163  * Return: QDF_STATUS of operation
1164  */
1165 static QDF_STATUS
1166 mgmt_rx_reo_print_egress_frame_info(struct mgmt_rx_reo_context *reo_ctx)
1167 {
1168 	struct reo_egress_debug_info *egress_frame_debug_info;
1169 	uint16_t start_index;
1170 	uint16_t index;
1171 	uint16_t entry;
1172 	uint16_t num_valid_entries;
1173 
1174 	if (!reo_ctx)
1175 		return QDF_STATUS_E_NULL_VALUE;
1176 
1177 	egress_frame_debug_info = &reo_ctx->egress_frame_debug_info;
1178 
1179 	if (egress_frame_debug_info->wrap_aroud) {
1180 		start_index = egress_frame_debug_info->next_index;
1181 		num_valid_entries = MGMT_RX_REO_EGRESS_FRAME_DEBUG_ENTRIES_MAX;
1182 	} else {
1183 		start_index = 0;
1184 		num_valid_entries =
1185 			egress_frame_debug_info->next_index - start_index;
1186 	}
1187 
1188 	if (!num_valid_entries)
1189 		return QDF_STATUS_SUCCESS;
1190 
1191 	mgmt_rx_reo_err_no_fl("Egress Frame Info:-");
1192 	mgmt_rx_reo_err_no_fl("Number of valid entries = %u",
1193 			      num_valid_entries);
1194 	mgmt_rx_reo_err_no_fl("--------------------------------------------------------------------------------------------------------------------------------------------------");
1195 	mgmt_rx_reo_err_no_fl("|No.|Link|SeqNo|Global ts |Ingress ts|Insert. ts|Removal ts|Egress ts |E Dur|W Dur  |Flags|Rea.|Wait Count                                       |");
1196 	mgmt_rx_reo_err_no_fl("--------------------------------------------------------------------------------------------------------------------------------------------------");
1197 
1198 	index = start_index;
1199 	for (entry = 0; entry < num_valid_entries; entry++) {
1200 		struct reo_egress_debug_frame_info *info;
1201 		char flags[MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_FLAG_MAX_SIZE + 1] = {'\0'};
1202 		char wait_count[MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_WAIT_COUNT_MAX_SIZE + 1] = {'\0'};
1203 		char flag_premature_delivery = ' ';
1204 		char flag_error = ' ';
1205 
1206 		info = &reo_ctx->egress_frame_debug_info.frame_list[index];
1207 
1208 		if (!info->is_delivered)
1209 			flag_error = 'E';
1210 
1211 		if (info->is_premature_delivery)
1212 			flag_premature_delivery = 'P';
1213 
1214 		snprintf(flags, sizeof(flags), "%c %c", flag_error,
1215 			 flag_premature_delivery);
1216 		snprintf(wait_count, sizeof(wait_count),
1217 			 "%9llx(%8x, %8x, %8x, %8x)",
1218 			 info->wait_count.total_count,
1219 			 info->wait_count.per_link_count[0],
1220 			 info->wait_count.per_link_count[1],
1221 			 info->wait_count.per_link_count[2],
1222 			 info->wait_count.per_link_count[3]);
1223 
1224 		mgmt_rx_reo_err_no_fl("|%3u|%4u|%5u|%10u|%10llu|%10llu|%10llu|%10llu|%5llu|%7llu|%5s|%4x|%49s|",
1225 				      entry, info->link_id, info->mgmt_pkt_ctr,
1226 				      info->global_timestamp,
1227 				      info->ingress_timestamp,
1228 				      info->insertion_ts, info->removal_ts,
1229 				      info->egress_timestamp,
1230 				      info->egress_duration,
1231 				      info->removal_ts - info->insertion_ts,
1232 				      flags, info->release_reason, wait_count);
1233 		mgmt_rx_reo_err_no_fl("-------------------------------------------------------------------------"
1234 				      "-------------------------------------------------------------------------");
1235 
1236 		index++;
1237 		index %= MGMT_RX_REO_EGRESS_FRAME_DEBUG_ENTRIES_MAX;
1238 	}
1239 
1240 	return QDF_STATUS_SUCCESS;
1241 }
1242 #else
1243 /**
1244  * mgmt_rx_reo_print_egress_frame_stats() - API to print the stats
1245  * related to frames going out of the reorder module
1246  * @reo_ctx: Pointer to reorder context
1247  *
1248  * API to print the stats related to frames going out of the management
1249  * Rx reorder module.
1250  *
1251  * Return: QDF_STATUS
1252  */
1253 static QDF_STATUS
1254 mgmt_rx_reo_print_egress_frame_stats(struct mgmt_rx_reo_context *reo_ctx)
1255 {
1256 	return QDF_STATUS_SUCCESS;
1257 }
1258 
1259 /**
1260  * mgmt_rx_reo_log_egress_frame_before_delivery() - Log the information about a
1261  * frame exiting the reorder module. Logging is done before attempting the frame
1262  * delivery to upper layers.
1263  * @reo_ctx: management rx reorder context
1264  * @entry: Pointer to reorder list entry
1265  *
1266  * Return: QDF_STATUS of operation
1267  */
1268 static QDF_STATUS
1269 mgmt_rx_reo_log_egress_frame_before_delivery(
1270 					struct mgmt_rx_reo_context *reo_ctx,
1271 					struct mgmt_rx_reo_list_entry *entry)
1272 {
1273 	return QDF_STATUS_SUCCESS;
1274 }
1275 
1276 /**
1277  * mgmt_rx_reo_log_egress_frame_after_delivery() - Log the information about a
1278  * frame exiting the reorder module. Logging is done after attempting the frame
1279  * delivery to upper layer.
1280  * @reo_ctx: management rx reorder context
1281  * @is_delivered: Flag to indicate whether the frame is delivered to upper
1282  * layers
1283  *
1284  * Return: QDF_STATUS of operation
1285  */
1286 static QDF_STATUS
1287 mgmt_rx_reo_log_egress_frame_after_delivery(
1288 					struct mgmt_rx_reo_context *reo_ctx,
1289 					bool is_delivered)
1290 {
1291 	return QDF_STATUS_SUCCESS;
1292 }
1293 
1294 /**
1295  * mgmt_rx_reo_print_egress_frame_info() - Print debug information about the
1296  * latest frames leaving the reorder module
1297  * @reo_ctx: management rx reorder context
1298  *
1299  * Return: QDF_STATUS of operation
1300  */
1301 static QDF_STATUS
1302 mgmt_rx_reo_print_egress_frame_info(struct mgmt_rx_reo_context *reo_ctx)
1303 {
1304 	return QDF_STATUS_SUCCESS;
1305 }
1306 #endif /* WLAN_MGMT_RX_REO_DEBUG_SUPPORT */
1307 
1308 /**
1309  * mgmt_rx_reo_list_entry_get_release_reason() - Helper API to get the reason
1310  * for releasing the reorder list entry to upper layer.
1311  * reorder list.
1312  * @entry: List entry
1313  *
1314  * This API expects the caller to acquire the spin lock protecting the reorder
1315  * list.
1316  *
1317  * Return: Reason for releasing the frame.
1318  */
1319 static uint8_t
1320 mgmt_rx_reo_list_entry_get_release_reason(struct mgmt_rx_reo_list_entry *entry)
1321 {
1322 	uint8_t release_reason = 0;
1323 
1324 	if (!entry)
1325 		return 0;
1326 
1327 	if (MGMT_RX_REO_LIST_ENTRY_IS_MAX_SIZE_EXCEEDED(entry))
1328 		release_reason |=
1329 		   MGMT_RX_REO_LIST_ENTRY_RELEASE_REASON_LIST_MAX_SIZE_EXCEEDED;
1330 
1331 	if (!MGMT_RX_REO_LIST_ENTRY_IS_WAITING_FOR_FRAME_ON_OTHER_LINK(entry))
1332 		release_reason |=
1333 			MGMT_RX_REO_LIST_ENTRY_RELEASE_REASON_ZERO_WAIT_COUNT;
1334 
1335 	if (MGMT_RX_REO_LIST_ENTRY_IS_AGED_OUT(entry))
1336 		release_reason |=
1337 				MGMT_RX_REO_LIST_ENTRY_RELEASE_REASON_AGED_OUT;
1338 
1339 	if (MGMT_RX_REO_LIST_ENTRY_IS_OLDER_THAN_LATEST_AGED_OUT_FRAME(entry))
1340 		release_reason |=
1341 		MGMT_RX_REO_LIST_ENTRY_RELEASE_REASON_OLDER_THAN_AGED_OUT_FRAME;
1342 
1343 	return release_reason;
1344 }
1345 
1346 /**
1347  * mgmt_rx_reo_list_entry_send_up() - API to send the frame to the upper layer.
1348  * @reo_list: Pointer to reorder list
1349  * @entry: List entry
1350  *
1351  * API to send the frame to the upper layer. This API has to be called only
1352  * for entries which can be released to upper layer. It is the caller's
1353  * responsibility to ensure that entry can be released (by using API
1354  * mgmt_rx_reo_list_is_ready_to_send_up_entry). This API is called after
1355  * acquiring the lock which serializes the frame delivery to the upper layers.
1356  *
1357  * Return: QDF_STATUS
1358  */
1359 static QDF_STATUS
1360 mgmt_rx_reo_list_entry_send_up(const struct mgmt_rx_reo_list *reo_list,
1361 			       struct mgmt_rx_reo_list_entry *entry)
1362 {
1363 	uint8_t release_reason;
1364 	uint8_t link_id;
1365 	uint32_t entry_global_ts;
1366 	QDF_STATUS status;
1367 	QDF_STATUS temp;
1368 	struct mgmt_rx_reo_context *reo_context;
1369 
1370 	qdf_assert_always(reo_list);
1371 	qdf_assert_always(entry);
1372 
1373 	reo_context = mgmt_rx_reo_get_context_from_reo_list(reo_list);
1374 	qdf_assert_always(reo_context);
1375 
1376 	link_id = mgmt_rx_reo_get_link_id(entry->rx_params);
1377 	entry_global_ts = mgmt_rx_reo_get_global_ts(entry->rx_params);
1378 
1379 	release_reason = mgmt_rx_reo_list_entry_get_release_reason(entry);
1380 
1381 	qdf_assert_always(release_reason != 0);
1382 
1383 	entry->is_delivered = false;
1384 	entry->is_premature_delivery = false;
1385 	entry->release_reason = release_reason;
1386 
1387 	if (mgmt_rx_reo_is_potential_premature_delivery(release_reason)) {
1388 		entry->is_premature_delivery = true;
1389 		status = mgmt_rx_reo_handle_potential_premature_delivery(
1390 						reo_context, entry_global_ts);
1391 		if (QDF_IS_STATUS_ERROR(status))
1392 			goto exit;
1393 	}
1394 
1395 	status = mgmt_rx_reo_log_egress_frame_before_delivery(reo_context,
1396 							      entry);
1397 	if (QDF_IS_STATUS_ERROR(status))
1398 		goto exit;
1399 
1400 	status = wlan_mgmt_txrx_process_rx_frame(entry->pdev, entry->nbuf,
1401 						 entry->rx_params);
1402 	/* Above call frees nbuf and rx_params, make it null explicitly */
1403 	entry->nbuf = NULL;
1404 	entry->rx_params = NULL;
1405 
1406 	if (QDF_IS_STATUS_ERROR(status))
1407 		goto exit_log;
1408 
1409 	entry->is_delivered = true;
1410 
1411 	status = QDF_STATUS_SUCCESS;
1412 
1413 exit_log:
1414 	temp = mgmt_rx_reo_log_egress_frame_after_delivery(reo_context, entry);
1415 	if (QDF_IS_STATUS_ERROR(temp))
1416 		status = temp;
1417 exit:
1418 	/**
1419 	 * Release the reference taken when the entry is inserted into
1420 	 * the reorder list
1421 	 */
1422 	wlan_objmgr_pdev_release_ref(entry->pdev, WLAN_MGMT_RX_REO_ID);
1423 
1424 	return status;
1425 }
1426 
1427 /**
1428  * mgmt_rx_reo_list_is_ready_to_send_up_entry() - API to check whether the
1429  * list entry can be send to upper layers.
1430  * @reo_list: Pointer to reorder list
1431  * @entry: List entry
1432  *
1433  * Return: QDF_STATUS
1434  */
1435 static bool
1436 mgmt_rx_reo_list_is_ready_to_send_up_entry(struct mgmt_rx_reo_list *reo_list,
1437 					   struct mgmt_rx_reo_list_entry *entry)
1438 {
1439 	if (!reo_list || !entry)
1440 		return false;
1441 
1442 	return mgmt_rx_reo_list_max_size_exceeded(reo_list) ||
1443 	       !MGMT_RX_REO_LIST_ENTRY_IS_WAITING_FOR_FRAME_ON_OTHER_LINK(
1444 	       entry) || MGMT_RX_REO_LIST_ENTRY_IS_AGED_OUT(entry) ||
1445 	       MGMT_RX_REO_LIST_ENTRY_IS_OLDER_THAN_LATEST_AGED_OUT_FRAME
1446 	       (entry);
1447 }
1448 
1449 /**
1450  * mgmt_rx_reo_list_release_entries() - Release entries from the reorder list
1451  * @reo_context: Pointer to management Rx reorder context
1452  *
1453  * This API releases the entries from the reorder list based on the following
1454  * conditions.
1455  *   a) Entries with total wait count equal to 0
1456  *   b) Entries which are timed out or entries with global time stamp <= global
1457  *      time stamp of the latest frame which is timed out. We can only release
1458  *      the entries in the increasing order of the global time stamp.
1459  *      So all the entries with global time stamp <= global time stamp of the
1460  *      latest timed out frame has to be released.
1461  *
1462  * Return: QDF_STATUS
1463  */
1464 static QDF_STATUS
1465 mgmt_rx_reo_list_release_entries(struct mgmt_rx_reo_context *reo_context)
1466 {
1467 	struct mgmt_rx_reo_list *reo_list;
1468 	QDF_STATUS status;
1469 
1470 	if (!reo_context) {
1471 		mgmt_rx_reo_err("reo context is null");
1472 		return QDF_STATUS_E_NULL_VALUE;
1473 	}
1474 
1475 	reo_list = &reo_context->reo_list;
1476 
1477 	qdf_spin_lock(&reo_context->frame_release_lock);
1478 
1479 	while (1) {
1480 		struct mgmt_rx_reo_list_entry *first_entry;
1481 		/* TODO yield if release_count > THRESHOLD */
1482 		uint16_t release_count = 0;
1483 		struct mgmt_rx_reo_global_ts_info *ts_last_released_frame =
1484 					&reo_list->ts_last_released_frame;
1485 		uint32_t entry_global_ts;
1486 
1487 		qdf_spin_lock_bh(&reo_list->list_lock);
1488 
1489 		first_entry = qdf_list_first_entry_or_null(
1490 			&reo_list->list, struct mgmt_rx_reo_list_entry, node);
1491 
1492 		if (!first_entry) {
1493 			status = QDF_STATUS_SUCCESS;
1494 			goto exit_unlock_list_lock;
1495 		}
1496 
1497 		if (!mgmt_rx_reo_list_is_ready_to_send_up_entry(reo_list,
1498 								first_entry)) {
1499 			status = QDF_STATUS_SUCCESS;
1500 			goto exit_unlock_list_lock;
1501 		}
1502 
1503 		if (mgmt_rx_reo_list_max_size_exceeded(reo_list))
1504 			first_entry->status |=
1505 				MGMT_RX_REO_STATUS_LIST_MAX_SIZE_EXCEEDED;
1506 
1507 		status = qdf_list_remove_node(&reo_list->list,
1508 					      &first_entry->node);
1509 		if (QDF_IS_STATUS_ERROR(status)) {
1510 			status = QDF_STATUS_E_FAILURE;
1511 			goto exit_unlock_list_lock;
1512 		}
1513 		first_entry->removal_ts = qdf_get_log_timestamp();
1514 
1515 		/**
1516 		 * Last released frame global time stamp is invalid means that
1517 		 * current frame is the first frame to be released to the
1518 		 * upper layer from the reorder list. Blindly update the last
1519 		 * released frame global time stamp to the current frame's
1520 		 * global time stamp and set the valid to true.
1521 		 * If the last released frame global time stamp is valid and
1522 		 * current frame's global time stamp is >= last released frame
1523 		 * global time stamp, deliver the current frame to upper layer
1524 		 * and update the last released frame global time stamp.
1525 		 */
1526 		entry_global_ts =
1527 			mgmt_rx_reo_get_global_ts(first_entry->rx_params);
1528 		if (!ts_last_released_frame->valid ||
1529 		    mgmt_rx_reo_compare_global_timestamps_gte(
1530 			entry_global_ts, ts_last_released_frame->global_ts)) {
1531 			ts_last_released_frame->global_ts = entry_global_ts;
1532 			ts_last_released_frame->valid = true;
1533 		} else {
1534 			/**
1535 			 * This should never happen. All the frames older than
1536 			 * the last frame released from the reorder list will be
1537 			 * discarded at the entry to reorder algorithm itself.
1538 			 */
1539 			qdf_assert_always(0);
1540 		}
1541 
1542 		qdf_spin_unlock_bh(&reo_list->list_lock);
1543 
1544 		status = mgmt_rx_reo_list_entry_send_up(reo_list,
1545 							first_entry);
1546 		if (QDF_IS_STATUS_ERROR(status)) {
1547 			status = QDF_STATUS_E_FAILURE;
1548 			qdf_mem_free(first_entry);
1549 			goto exit_unlock_frame_release_lock;
1550 		}
1551 
1552 		qdf_mem_free(first_entry);
1553 		release_count++;
1554 	}
1555 
1556 	status = QDF_STATUS_SUCCESS;
1557 	goto exit_unlock_frame_release_lock;
1558 
1559 exit_unlock_list_lock:
1560 	qdf_spin_unlock_bh(&reo_list->list_lock);
1561 exit_unlock_frame_release_lock:
1562 	qdf_spin_unlock(&reo_context->frame_release_lock);
1563 
1564 	return status;
1565 }
1566 
1567 /**
1568  * mgmt_rx_reo_list_ageout_timer_handler() - Periodic ageout timer handler
1569  * @arg: Argument to timer handler
1570  *
1571  * This is the handler for periodic ageout timer used to timeout entries in the
1572  * reorder list.
1573  *
1574  * Return: void
1575  */
1576 static void
1577 mgmt_rx_reo_list_ageout_timer_handler(void *arg)
1578 {
1579 	struct mgmt_rx_reo_list *reo_list = arg;
1580 	struct mgmt_rx_reo_list_entry *cur_entry;
1581 	uint64_t cur_ts;
1582 	QDF_STATUS status;
1583 	struct mgmt_rx_reo_context *reo_context;
1584 	/**
1585 	 * Stores the pointer to the entry in reorder list for the latest aged
1586 	 * out frame. Latest aged out frame is the aged out frame in reorder
1587 	 * list which has the largest global time stamp value.
1588 	 */
1589 	struct mgmt_rx_reo_list_entry *latest_aged_out_entry = NULL;
1590 
1591 	qdf_assert_always(reo_list);
1592 
1593 	qdf_timer_mod(&reo_list->ageout_timer,
1594 		      MGMT_RX_REO_AGEOUT_TIMER_PERIOD_MS);
1595 
1596 	reo_context = mgmt_rx_reo_get_context_from_reo_list(reo_list);
1597 	qdf_assert_always(reo_context);
1598 
1599 	qdf_spin_lock_bh(&reo_list->list_lock);
1600 
1601 	cur_ts = qdf_get_log_timestamp();
1602 
1603 	qdf_list_for_each(&reo_list->list, cur_entry, node) {
1604 		if (cur_ts - cur_entry->insertion_ts >=
1605 		    reo_list->list_entry_timeout_us) {
1606 			latest_aged_out_entry = cur_entry;
1607 			cur_entry->status |= MGMT_RX_REO_STATUS_AGED_OUT;
1608 		}
1609 	}
1610 
1611 	if (latest_aged_out_entry) {
1612 		qdf_list_for_each(&reo_list->list, cur_entry, node) {
1613 			if (cur_entry == latest_aged_out_entry)
1614 				break;
1615 			cur_entry->status |= MGMT_RX_REO_STATUS_OLDER_THAN_LATEST_AGED_OUT_FRAME;
1616 		}
1617 	}
1618 
1619 	qdf_spin_unlock_bh(&reo_list->list_lock);
1620 
1621 	if (latest_aged_out_entry) {
1622 		status = mgmt_rx_reo_list_release_entries(reo_context);
1623 		if (QDF_IS_STATUS_ERROR(status)) {
1624 			mgmt_rx_reo_err("Failed to release entries, ret = %d",
1625 					status);
1626 			return;
1627 		}
1628 	}
1629 }
1630 
1631 /**
1632  * mgmt_rx_reo_prepare_list_entry() - Prepare a list entry from the management
1633  * frame received.
1634  * @frame_desc: Pointer to the frame descriptor
1635  * @entry: Pointer to the list entry
1636  *
1637  * This API prepares the reorder list entry corresponding to a management frame
1638  * to be consumed by host. This entry would be inserted at the appropriate
1639  * position in the reorder list.
1640  *
1641  * Return: QDF_STATUS
1642  */
1643 static QDF_STATUS
1644 mgmt_rx_reo_prepare_list_entry(
1645 		const struct mgmt_rx_reo_frame_descriptor *frame_desc,
1646 		struct mgmt_rx_reo_list_entry **entry)
1647 {
1648 	struct mgmt_rx_reo_list_entry *list_entry;
1649 	struct wlan_objmgr_pdev *pdev;
1650 	uint8_t link_id;
1651 
1652 	if (!frame_desc) {
1653 		mgmt_rx_reo_err("frame descriptor is null");
1654 		return QDF_STATUS_E_NULL_VALUE;
1655 	}
1656 
1657 	if (!entry) {
1658 		mgmt_rx_reo_err("Pointer to list entry is null");
1659 		return QDF_STATUS_E_NULL_VALUE;
1660 	}
1661 
1662 	link_id = mgmt_rx_reo_get_link_id(frame_desc->rx_params);
1663 
1664 	pdev = wlan_get_pdev_from_mlo_link_id(link_id, WLAN_MGMT_RX_REO_ID);
1665 	if (!pdev) {
1666 		mgmt_rx_reo_err("pdev corresponding to link %u is null",
1667 				link_id);
1668 		return QDF_STATUS_E_NULL_VALUE;
1669 	}
1670 
1671 	list_entry =  qdf_mem_malloc(sizeof(*list_entry));
1672 	if (!list_entry) {
1673 		wlan_objmgr_pdev_release_ref(pdev, WLAN_MGMT_RX_REO_ID);
1674 		mgmt_rx_reo_err("List entry allocation failed");
1675 		return QDF_STATUS_E_NOMEM;
1676 	}
1677 
1678 	list_entry->pdev = pdev;
1679 	list_entry->nbuf = frame_desc->nbuf;
1680 	list_entry->rx_params = frame_desc->rx_params;
1681 	list_entry->wait_count = frame_desc->wait_count;
1682 	list_entry->status = 0;
1683 	if (list_entry->wait_count.total_count)
1684 		list_entry->status |=
1685 			MGMT_RX_REO_STATUS_WAIT_FOR_FRAME_ON_OTHER_LINKS;
1686 
1687 	*entry = list_entry;
1688 
1689 	mgmt_rx_reo_debug("New entry to be inserted is %pK", list_entry);
1690 
1691 	return QDF_STATUS_SUCCESS;
1692 }
1693 
1694 /**
1695  * mgmt_rx_reo_update_wait_count() - Update the wait count for a frame based
1696  * on the wait count of a frame received after that on air.
1697  * @num_mlo_links: Number of MLO links
1698  * @wait_count_old_frame: Pointer to the wait count structure for the old frame.
1699  * @wait_count_new_frame: Pointer to the wait count structure for the new frame.
1700  *
1701  * This API optimizes the wait count of a frame based on the wait count of
1702  * a frame received after that on air. Old frame refers to the frame received
1703  * first on the air and new frame refers to the frame received after that.
1704  * We use the following fundamental idea. Wait counts for old frames can't be
1705  * more than wait counts for the new frame. Use this to optimize the wait count
1706  * for the old frames. Per link wait count of an old frame is minimum of the
1707  * per link wait count of the old frame and new frame.
1708  *
1709  * Return: QDF_STATUS
1710  */
1711 static QDF_STATUS
1712 mgmt_rx_reo_update_wait_count(
1713 		uint8_t num_mlo_links,
1714 		struct mgmt_rx_reo_wait_count *wait_count_old_frame,
1715 		const struct mgmt_rx_reo_wait_count *wait_count_new_frame)
1716 {
1717 	uint8_t link_id;
1718 
1719 	qdf_assert_always(wait_count_old_frame);
1720 	qdf_assert_always(wait_count_new_frame);
1721 
1722 	qdf_assert_always(num_mlo_links <= MGMT_RX_REO_MAX_LINKS);
1723 
1724 	for (link_id = 0; link_id < num_mlo_links; link_id++) {
1725 		if (wait_count_old_frame->per_link_count[link_id]) {
1726 			uint32_t temp_wait_count;
1727 			uint32_t wait_count_diff;
1728 
1729 			temp_wait_count =
1730 				wait_count_old_frame->per_link_count[link_id];
1731 			wait_count_old_frame->per_link_count[link_id] =
1732 				qdf_min(wait_count_old_frame->
1733 					per_link_count[link_id],
1734 					wait_count_new_frame->
1735 					per_link_count[link_id]);
1736 			wait_count_diff = temp_wait_count -
1737 				wait_count_old_frame->per_link_count[link_id];
1738 
1739 			wait_count_old_frame->total_count -= wait_count_diff;
1740 		}
1741 	}
1742 
1743 	return QDF_STATUS_SUCCESS;
1744 }
1745 
1746 /**
1747  * mgmt_rx_reo_update_list() - Modify the reorder list when a frame is received
1748  * @reo_list: Pointer to reorder list
1749  * @num_mlo_links: Number of MLO HW links
1750  * @frame_desc: Pointer to frame descriptor
1751  * @is_queued: Whether this frame is queued in the REO list
1752  *
1753  * API to update the reorder list on every management frame reception.
1754  * This API does the following things.
1755  *   a) Update the wait counts for all the frames in the reorder list with
1756  *      global time stamp <= current frame's global time stamp. We use the
1757  *      following principle for updating the wait count in this case.
1758  *      Let A and B be two management frames with global time stamp of A <=
1759  *      global time stamp of B. Let WAi and WBi be the wait count of A and B
1760  *      for link i, then WAi <= WBi. Hence we can optimize WAi as
1761  *      min(WAi, WBi).
1762  *   b) If the current frame is to be consumed by host, insert it in the
1763  *      reorder list such that the list is always sorted in the increasing order
1764  *      of global time stamp. Update the wait count of the current frame based
1765  *      on the frame next to it in the reorder list (if any).
1766  *   c) Update the wait count of the frames in the reorder list with global
1767  *      time stamp > current frame's global time stamp. Let the current frame
1768  *      belong to link "l". Then link "l"'s wait count can be reduced by one for
1769  *      all the frames in the reorder list with global time stamp > current
1770  *      frame's global time stamp.
1771  *
1772  * Return: QDF_STATUS
1773  */
1774 static QDF_STATUS
1775 mgmt_rx_reo_update_list(struct mgmt_rx_reo_list *reo_list,
1776 			uint8_t num_mlo_links,
1777 			struct mgmt_rx_reo_frame_descriptor *frame_desc,
1778 			bool *is_queued)
1779 {
1780 	struct mgmt_rx_reo_list_entry *cur_entry;
1781 	struct mgmt_rx_reo_list_entry *least_greater_entry = NULL;
1782 	bool least_greater_entry_found = false;
1783 	QDF_STATUS status;
1784 	uint32_t new_frame_global_ts;
1785 	struct mgmt_rx_reo_list_entry *new_entry = NULL;
1786 	uint16_t list_insertion_pos = 0;
1787 
1788 	if (!is_queued)
1789 		return QDF_STATUS_E_NULL_VALUE;
1790 	*is_queued = false;
1791 
1792 	if (!reo_list) {
1793 		mgmt_rx_reo_err("Mgmt Rx reo list is null");
1794 		return QDF_STATUS_E_NULL_VALUE;
1795 	}
1796 
1797 	if (!frame_desc) {
1798 		mgmt_rx_reo_err("Mgmt frame descriptor is null");
1799 		return QDF_STATUS_E_NULL_VALUE;
1800 	}
1801 
1802 	new_frame_global_ts = mgmt_rx_reo_get_global_ts(frame_desc->rx_params);
1803 
1804 	/* Prepare the list entry before acquiring lock */
1805 	if (frame_desc->type == MGMT_RX_REO_FRAME_DESC_HOST_CONSUMED_FRAME) {
1806 		status = mgmt_rx_reo_prepare_list_entry(frame_desc, &new_entry);
1807 		if (QDF_IS_STATUS_ERROR(status)) {
1808 			mgmt_rx_reo_err("Failed to prepare list entry");
1809 			return QDF_STATUS_E_FAILURE;
1810 		}
1811 	}
1812 
1813 	qdf_spin_lock_bh(&reo_list->list_lock);
1814 
1815 	frame_desc->list_size_rx = qdf_list_size(&reo_list->list);
1816 
1817 	qdf_list_for_each(&reo_list->list, cur_entry, node) {
1818 		uint32_t cur_entry_global_ts;
1819 
1820 		cur_entry_global_ts = mgmt_rx_reo_get_global_ts(
1821 					cur_entry->rx_params);
1822 
1823 		if (!mgmt_rx_reo_compare_global_timestamps_gte(
1824 		    new_frame_global_ts, cur_entry_global_ts)) {
1825 			least_greater_entry = cur_entry;
1826 			least_greater_entry_found = true;
1827 			break;
1828 		}
1829 
1830 		list_insertion_pos++;
1831 
1832 		status = mgmt_rx_reo_update_wait_count(
1833 					num_mlo_links,
1834 					&cur_entry->wait_count,
1835 					&frame_desc->wait_count);
1836 		if (QDF_IS_STATUS_ERROR(status))
1837 			goto error;
1838 
1839 		if (cur_entry->wait_count.total_count == 0)
1840 			cur_entry->status &=
1841 			      ~MGMT_RX_REO_STATUS_WAIT_FOR_FRAME_ON_OTHER_LINKS;
1842 	}
1843 
1844 	frame_desc->is_stale = false;
1845 	if (mgmt_rx_reo_is_stale_frame(&reo_list->ts_last_released_frame,
1846 				       new_frame_global_ts)) {
1847 		frame_desc->is_stale = true;
1848 
1849 		status = mgmt_rx_reo_handle_stale_frame(reo_list, frame_desc);
1850 		if (QDF_IS_STATUS_ERROR(status))
1851 			goto error;
1852 	}
1853 
1854 	if (frame_desc->type == MGMT_RX_REO_FRAME_DESC_HOST_CONSUMED_FRAME &&
1855 	    !frame_desc->is_stale) {
1856 		if (least_greater_entry_found) {
1857 			status = mgmt_rx_reo_update_wait_count(
1858 					num_mlo_links,
1859 					&new_entry->wait_count,
1860 					&least_greater_entry->wait_count);
1861 
1862 			if (QDF_IS_STATUS_ERROR(status))
1863 				goto error;
1864 
1865 			frame_desc->wait_count = new_entry->wait_count;
1866 
1867 			if (new_entry->wait_count.total_count == 0)
1868 				new_entry->status &=
1869 					~MGMT_RX_REO_STATUS_WAIT_FOR_FRAME_ON_OTHER_LINKS;
1870 		}
1871 
1872 		new_entry->insertion_ts = qdf_get_log_timestamp();
1873 		new_entry->ingress_timestamp = frame_desc->ingress_timestamp;
1874 		frame_desc->list_insertion_pos = list_insertion_pos;
1875 
1876 		if (least_greater_entry_found)
1877 			status = qdf_list_insert_before(
1878 					&reo_list->list, &new_entry->node,
1879 					&least_greater_entry->node);
1880 		else
1881 			status = qdf_list_insert_back(
1882 					&reo_list->list, &new_entry->node);
1883 
1884 		if (QDF_IS_STATUS_ERROR(status))
1885 			goto error;
1886 
1887 		*is_queued = true;
1888 
1889 		if (new_entry->wait_count.total_count == 0)
1890 			frame_desc->zero_wait_count_rx = true;
1891 
1892 		if (frame_desc->zero_wait_count_rx &&
1893 		    qdf_list_first_entry_or_null(&reo_list->list,
1894 						 struct mgmt_rx_reo_list_entry,
1895 						 node) == new_entry)
1896 			frame_desc->immediate_delivery = true;
1897 	}
1898 
1899 	if (least_greater_entry_found) {
1900 		cur_entry = least_greater_entry;
1901 
1902 		qdf_list_for_each_from(&reo_list->list, cur_entry, node) {
1903 			uint8_t frame_link_id;
1904 			struct mgmt_rx_reo_wait_count *wait_count;
1905 
1906 			frame_link_id =
1907 				mgmt_rx_reo_get_link_id(frame_desc->rx_params);
1908 			wait_count = &cur_entry->wait_count;
1909 			if (wait_count->per_link_count[frame_link_id]) {
1910 				wait_count->per_link_count[frame_link_id]--;
1911 				wait_count->total_count--;
1912 				if (wait_count->total_count == 0)
1913 					cur_entry->status &=
1914 						~MGMT_RX_REO_STATUS_WAIT_FOR_FRAME_ON_OTHER_LINKS;
1915 			}
1916 		}
1917 	}
1918 
1919 	status = QDF_STATUS_SUCCESS;
1920 	goto exit;
1921 
1922 error:
1923 	/* Cleanup the entry if it is not queued */
1924 	if (new_entry && !*is_queued) {
1925 		/**
1926 		 * New entry created is not inserted to reorder list, free
1927 		 * the entry and release the reference
1928 		 */
1929 		wlan_objmgr_pdev_release_ref(new_entry->pdev,
1930 					     WLAN_MGMT_RX_REO_ID);
1931 		qdf_mem_free(new_entry);
1932 	}
1933 
1934 exit:
1935 	qdf_spin_unlock_bh(&reo_list->list_lock);
1936 
1937 	if (!*is_queued)
1938 		return status;
1939 
1940 	if (frame_desc->type == MGMT_RX_REO_FRAME_DESC_HOST_CONSUMED_FRAME) {
1941 		if (least_greater_entry_found)
1942 			mgmt_rx_reo_debug("Inserting new entry %pK before %pK",
1943 					  new_entry, least_greater_entry);
1944 		else
1945 			mgmt_rx_reo_debug("Inserting new entry %pK at the tail",
1946 					  new_entry);
1947 	}
1948 
1949 	return status;
1950 }
1951 
1952 /**
1953  * mgmt_rx_reo_list_init() - Initialize the management rx-reorder list
1954  * @reo_list: Pointer to reorder list
1955  *
1956  * API to initialize the management rx-reorder list.
1957  *
1958  * Return: QDF_STATUS
1959  */
1960 static QDF_STATUS
1961 mgmt_rx_reo_list_init(struct mgmt_rx_reo_list *reo_list)
1962 {
1963 	QDF_STATUS status;
1964 
1965 	reo_list->max_list_size = MGMT_RX_REO_LIST_MAX_SIZE;
1966 	reo_list->list_entry_timeout_us = MGMT_RX_REO_LIST_TIMEOUT_US;
1967 
1968 	qdf_list_create(&reo_list->list, reo_list->max_list_size);
1969 	qdf_spinlock_create(&reo_list->list_lock);
1970 
1971 	status = qdf_timer_init(NULL, &reo_list->ageout_timer,
1972 				mgmt_rx_reo_list_ageout_timer_handler, reo_list,
1973 				QDF_TIMER_TYPE_WAKE_APPS);
1974 	if (QDF_IS_STATUS_ERROR(status)) {
1975 		mgmt_rx_reo_err("Failed to initialize reo list ageout timer");
1976 		return status;
1977 	}
1978 
1979 	reo_list->ts_last_released_frame.valid = false;
1980 
1981 	return QDF_STATUS_SUCCESS;
1982 }
1983 
1984 /**
1985  * wlan_mgmt_rx_reo_update_host_snapshot() - Update Host snapshot with the MGMT
1986  * Rx REO parameters.
1987  * @pdev: pdev extracted from the WMI event
1988  * @reo_params: MGMT Rx REO parameters received in the WMI event
1989  *
1990  * Return: QDF_STATUS of operation
1991  */
1992 static QDF_STATUS
1993 wlan_mgmt_rx_reo_update_host_snapshot(struct wlan_objmgr_pdev *pdev,
1994 				      struct mgmt_rx_reo_params *reo_params)
1995 {
1996 	struct mgmt_rx_reo_pdev_info *rx_reo_pdev_ctx;
1997 	struct mgmt_rx_reo_snapshot_params *host_ss;
1998 
1999 	if (!reo_params) {
2000 		mgmt_rx_reo_err("Mgmt Rx REO params are NULL");
2001 		return QDF_STATUS_E_NULL_VALUE;
2002 	}
2003 
2004 	rx_reo_pdev_ctx = wlan_mgmt_rx_reo_get_priv_object(pdev);
2005 	if (!rx_reo_pdev_ctx) {
2006 		mgmt_rx_reo_err("Mgmt Rx REO context empty for pdev %pK", pdev);
2007 		return QDF_STATUS_E_FAILURE;
2008 	}
2009 
2010 	/* FW should send valid REO parameters */
2011 	if (!reo_params->valid) {
2012 		mgmt_rx_reo_err("Mgmt Rx REO params is invalid");
2013 		return QDF_STATUS_E_FAILURE;
2014 	}
2015 
2016 	host_ss = &rx_reo_pdev_ctx->host_snapshot;
2017 
2018 	/* There should not be any holes in the packet counter */
2019 	qdf_assert_always(!host_ss->valid ||
2020 			  mgmt_rx_reo_subtract_pkt_ctrs(
2021 				  reo_params->mgmt_pkt_ctr,
2022 				  host_ss->mgmt_pkt_ctr) == 1);
2023 
2024 	host_ss->valid = true;
2025 	host_ss->global_timestamp = reo_params->global_timestamp;
2026 	host_ss->mgmt_pkt_ctr = reo_params->mgmt_pkt_ctr;
2027 
2028 	return QDF_STATUS_SUCCESS;
2029 }
2030 
2031 #ifdef WLAN_MGMT_RX_REO_DEBUG_SUPPORT
2032 /**
2033  * mgmt_rx_reo_print_ingress_frame_stats() - API to print the stats
2034  * related to frames going into the reorder module
2035  * @reo_ctx: Pointer to reorder context
2036  *
2037  * API to print the stats related to frames going into the management
2038  * Rx reorder module.
2039  *
2040  * Return: QDF_STATUS
2041  */
2042 static QDF_STATUS
2043 mgmt_rx_reo_print_ingress_frame_stats(struct mgmt_rx_reo_context *reo_ctx)
2044 {
2045 	struct reo_ingress_frame_stats *stats;
2046 	uint8_t link_id;
2047 	uint8_t desc_type;
2048 	int8_t num_mlo_links;
2049 	uint64_t ingress_count_per_link[MGMT_RX_REO_MAX_LINKS] = {0};
2050 	uint64_t ingress_count_per_desc_type[MGMT_RX_REO_FRAME_DESC_TYPE_MAX] = {0};
2051 	uint64_t total_ingress_count = 0;
2052 	uint64_t stale_count_per_link[MGMT_RX_REO_MAX_LINKS] = {0};
2053 	uint64_t stale_count_per_desc_type[MGMT_RX_REO_FRAME_DESC_TYPE_MAX] = {0};
2054 	uint64_t total_stale_count = 0;
2055 	uint64_t error_count_per_link[MGMT_RX_REO_MAX_LINKS] = {0};
2056 	uint64_t error_count_per_desc_type[MGMT_RX_REO_FRAME_DESC_TYPE_MAX] = {0};
2057 	uint64_t total_error_count = 0;
2058 	uint64_t total_queued_count = 0;
2059 	uint64_t total_zero_wait_count_rx_count = 0;
2060 	uint64_t total_immediate_delivery_count = 0;
2061 
2062 	if (!reo_ctx)
2063 		return QDF_STATUS_E_NULL_VALUE;
2064 
2065 	stats = &reo_ctx->ingress_frame_debug_info.stats;
2066 
2067 	num_mlo_links = mgmt_rx_reo_get_num_mlo_links(reo_ctx);
2068 	qdf_assert_always(num_mlo_links > 0);
2069 	qdf_assert_always(num_mlo_links <= MGMT_RX_REO_MAX_LINKS);
2070 
2071 	for (link_id = 0; link_id < num_mlo_links; link_id++) {
2072 		for (desc_type = 0; desc_type < MGMT_RX_REO_FRAME_DESC_TYPE_MAX;
2073 		     desc_type++) {
2074 			ingress_count_per_link[link_id] +=
2075 				stats->ingress_count[link_id][desc_type];
2076 			stale_count_per_link[link_id] +=
2077 					stats->stale_count[link_id][desc_type];
2078 			error_count_per_link[link_id] +=
2079 					stats->error_count[link_id][desc_type];
2080 		}
2081 
2082 		total_ingress_count += ingress_count_per_link[link_id];
2083 		total_stale_count += stale_count_per_link[link_id];
2084 		total_error_count += error_count_per_link[link_id];
2085 	}
2086 
2087 	for (desc_type = 0; desc_type < MGMT_RX_REO_FRAME_DESC_TYPE_MAX;
2088 	     desc_type++) {
2089 		for (link_id = 0; link_id < num_mlo_links; link_id++) {
2090 			ingress_count_per_desc_type[desc_type] +=
2091 				stats->ingress_count[link_id][desc_type];
2092 			stale_count_per_desc_type[desc_type] +=
2093 					stats->stale_count[link_id][desc_type];
2094 			error_count_per_desc_type[desc_type] +=
2095 					stats->error_count[link_id][desc_type];
2096 		}
2097 	}
2098 
2099 	for (link_id = 0; link_id < num_mlo_links; link_id++) {
2100 		total_queued_count += stats->queued_count[link_id];
2101 		total_zero_wait_count_rx_count +=
2102 				stats->zero_wait_count_rx_count[link_id];
2103 		total_immediate_delivery_count +=
2104 				stats->immediate_delivery_count[link_id];
2105 	}
2106 
2107 	mgmt_rx_reo_err("Ingress Frame Stats:");
2108 	mgmt_rx_reo_err("\t1) Ingress Frame Count:");
2109 	mgmt_rx_reo_err("\tDescriptor Type Values:-");
2110 	mgmt_rx_reo_err("\t\t0 - MGMT_RX_REO_FRAME_DESC_HOST_CONSUMED_FRAME");
2111 	mgmt_rx_reo_err("\t\t1 - MGMT_RX_REO_FRAME_DESC_FW_CONSUMED_FRAME");
2112 	mgmt_rx_reo_err("\t\t2 - MGMT_RX_REO_FRAME_DESC_ERROR_FRAME");
2113 	mgmt_rx_reo_err("\t------------------------------------");
2114 	mgmt_rx_reo_err("\t|link id/  |       |       |       |");
2115 	mgmt_rx_reo_err("\t|desc type |      0|      1|      2|");
2116 	mgmt_rx_reo_err("\t-------------------------------------------");
2117 
2118 	for (link_id = 0; link_id < num_mlo_links; link_id++) {
2119 		mgmt_rx_reo_err("\t|%10u|%7llu|%7llu|%7llu|%7llu", link_id,
2120 				stats->ingress_count[link_id][0],
2121 				stats->ingress_count[link_id][1],
2122 				stats->ingress_count[link_id][2],
2123 				ingress_count_per_link[link_id]);
2124 		mgmt_rx_reo_err("\t-------------------------------------------");
2125 	}
2126 	mgmt_rx_reo_err("\t           |%7llu|%7llu|%7llu|%7llu\n\n",
2127 			ingress_count_per_desc_type[0],
2128 			ingress_count_per_desc_type[1],
2129 			ingress_count_per_desc_type[2],
2130 			total_ingress_count);
2131 
2132 	mgmt_rx_reo_err("\t2) Stale Frame Count:");
2133 	mgmt_rx_reo_err("\t------------------------------------");
2134 	mgmt_rx_reo_err("\t|link id/  |       |       |       |");
2135 	mgmt_rx_reo_err("\t|desc type |      0|      1|      2|");
2136 	mgmt_rx_reo_err("\t-------------------------------------------");
2137 	for (link_id = 0; link_id < num_mlo_links; link_id++) {
2138 		mgmt_rx_reo_err("\t|%10u|%7llu|%7llu|%7llu|%7llu", link_id,
2139 				stats->stale_count[link_id][0],
2140 				stats->stale_count[link_id][1],
2141 				stats->stale_count[link_id][2],
2142 				stale_count_per_link[link_id]);
2143 		mgmt_rx_reo_err("\t-------------------------------------------");
2144 	}
2145 	mgmt_rx_reo_err("\t           |%7llu|%7llu|%7llu|%7llu\n\n",
2146 			stale_count_per_desc_type[0],
2147 			stale_count_per_desc_type[1],
2148 			stale_count_per_desc_type[2],
2149 			total_stale_count);
2150 
2151 	mgmt_rx_reo_err("\t3) Error Frame Count:");
2152 	mgmt_rx_reo_err("\t------------------------------------");
2153 	mgmt_rx_reo_err("\t|link id/  |       |       |       |");
2154 	mgmt_rx_reo_err("\t|desc type |      0|      1|      2|");
2155 	mgmt_rx_reo_err("\t-------------------------------------------");
2156 	for (link_id = 0; link_id < num_mlo_links; link_id++) {
2157 		mgmt_rx_reo_err("\t|%10u|%7llu|%7llu|%7llu|%7llu", link_id,
2158 				stats->error_count[link_id][0],
2159 				stats->error_count[link_id][1],
2160 				stats->error_count[link_id][2],
2161 				error_count_per_link[link_id]);
2162 		mgmt_rx_reo_err("\t-------------------------------------------");
2163 	}
2164 	mgmt_rx_reo_err("\t           |%7llu|%7llu|%7llu|%7llu\n\n",
2165 			error_count_per_desc_type[0],
2166 			error_count_per_desc_type[1],
2167 			error_count_per_desc_type[2],
2168 			total_error_count);
2169 
2170 	mgmt_rx_reo_err("\t4) Host consumed frames related stats:");
2171 	mgmt_rx_reo_err("\t------------------------------------------------");
2172 	mgmt_rx_reo_err("\t|link id   |Queued frame |Zero wait |Zero wait |");
2173 	mgmt_rx_reo_err("\t|          |    count    |  count   | delivery |");
2174 	mgmt_rx_reo_err("\t------------------------------------------------");
2175 	for (link_id = 0; link_id < num_mlo_links; link_id++) {
2176 		mgmt_rx_reo_err("\t|%10u|%13llu|%10llu|%10llu|", link_id,
2177 				stats->queued_count[link_id],
2178 				stats->zero_wait_count_rx_count[link_id],
2179 				stats->immediate_delivery_count[link_id]);
2180 		mgmt_rx_reo_err("\t------------------------------------------------");
2181 	}
2182 	mgmt_rx_reo_err("\t           |%13llu|%10llu|%10llu|\n\n",
2183 			total_queued_count,
2184 			total_zero_wait_count_rx_count,
2185 			total_immediate_delivery_count);
2186 
2187 	return QDF_STATUS_SUCCESS;
2188 }
2189 
2190 /**
2191  * mgmt_rx_reo_log_ingress_frame() - Log the information about a frame entering
2192  * the reorder algorithm.
2193  * @reo_ctx: management rx reorder context
2194  * @desc: Pointer to frame descriptor
2195  * @is_queued: Indicates whether this frame is queued to reorder list
2196  * @is_error: Indicates whether any error occurred during processing this frame
2197  *
2198  * Return: QDF_STATUS of operation
2199  */
2200 static QDF_STATUS
2201 mgmt_rx_reo_log_ingress_frame(struct mgmt_rx_reo_context *reo_ctx,
2202 			      struct mgmt_rx_reo_frame_descriptor *desc,
2203 			      bool is_queued, bool is_error)
2204 {
2205 	struct reo_ingress_debug_info *ingress_frame_debug_info;
2206 	struct reo_ingress_debug_frame_info *cur_frame_debug_info;
2207 	struct reo_ingress_frame_stats *stats;
2208 	uint8_t link_id;
2209 
2210 	if (!reo_ctx || !desc)
2211 		return QDF_STATUS_E_NULL_VALUE;
2212 
2213 	ingress_frame_debug_info = &reo_ctx->ingress_frame_debug_info;
2214 
2215 	cur_frame_debug_info = &ingress_frame_debug_info->frame_list
2216 			[ingress_frame_debug_info->next_index];
2217 
2218 	cur_frame_debug_info->link_id =
2219 				mgmt_rx_reo_get_link_id(desc->rx_params);
2220 	cur_frame_debug_info->mgmt_pkt_ctr =
2221 				mgmt_rx_reo_get_pkt_counter(desc->rx_params);
2222 	cur_frame_debug_info->global_timestamp =
2223 				mgmt_rx_reo_get_global_ts(desc->rx_params);
2224 	cur_frame_debug_info->type = desc->type;
2225 	cur_frame_debug_info->wait_count = desc->wait_count;
2226 	cur_frame_debug_info->is_queued = is_queued;
2227 	cur_frame_debug_info->is_stale = desc->is_stale;
2228 	cur_frame_debug_info->zero_wait_count_rx = desc->zero_wait_count_rx;
2229 	cur_frame_debug_info->immediate_delivery = desc->immediate_delivery;
2230 	cur_frame_debug_info->is_error = is_error;
2231 	cur_frame_debug_info->ts_last_released_frame =
2232 				reo_ctx->reo_list.ts_last_released_frame;
2233 	cur_frame_debug_info->ingress_timestamp = desc->ingress_timestamp;
2234 	cur_frame_debug_info->ingress_duration =
2235 			qdf_get_log_timestamp() - desc->ingress_timestamp;
2236 	cur_frame_debug_info->list_size_rx = desc->list_size_rx;
2237 	cur_frame_debug_info->list_insertion_pos = desc->list_insertion_pos;
2238 
2239 	ingress_frame_debug_info->next_index++;
2240 	ingress_frame_debug_info->next_index %=
2241 				MGMT_RX_REO_INGRESS_FRAME_DEBUG_ENTRIES_MAX;
2242 	if (ingress_frame_debug_info->next_index == 0)
2243 		ingress_frame_debug_info->wrap_aroud = true;
2244 
2245 	stats = &ingress_frame_debug_info->stats;
2246 	link_id = cur_frame_debug_info->link_id;
2247 	stats->ingress_count[link_id][desc->type]++;
2248 	if (is_queued)
2249 		stats->queued_count[link_id]++;
2250 	if (desc->zero_wait_count_rx)
2251 		stats->zero_wait_count_rx_count[link_id]++;
2252 	if (desc->immediate_delivery)
2253 		stats->immediate_delivery_count[link_id]++;
2254 	if (is_error)
2255 		stats->error_count[link_id][desc->type]++;
2256 
2257 	return QDF_STATUS_SUCCESS;
2258 }
2259 
2260 /**
2261  * mgmt_rx_reo_print_ingress_frame_info() - Print the debug information about
2262  * the latest frames entering the reorder module
2263  * @reo_ctx: management rx reorder context
2264  *
2265  * Return: QDF_STATUS of operation
2266  */
2267 static QDF_STATUS
2268 mgmt_rx_reo_print_ingress_frame_info(struct mgmt_rx_reo_context *reo_ctx)
2269 {
2270 	struct reo_ingress_debug_info *ingress_frame_debug_info;
2271 	uint16_t start_index;
2272 	uint16_t index;
2273 	uint16_t entry;
2274 	uint16_t num_valid_entries;
2275 
2276 	if (!reo_ctx)
2277 		return QDF_STATUS_E_NULL_VALUE;
2278 
2279 	ingress_frame_debug_info = &reo_ctx->ingress_frame_debug_info;
2280 
2281 	if (ingress_frame_debug_info->wrap_aroud) {
2282 		start_index = ingress_frame_debug_info->next_index;
2283 		num_valid_entries = MGMT_RX_REO_INGRESS_FRAME_DEBUG_ENTRIES_MAX;
2284 	} else {
2285 		start_index = 0;
2286 		num_valid_entries =
2287 			ingress_frame_debug_info->next_index - start_index;
2288 	}
2289 
2290 	if (!num_valid_entries)
2291 		return QDF_STATUS_SUCCESS;
2292 
2293 	mgmt_rx_reo_err_no_fl("Ingress Frame Info:-");
2294 	mgmt_rx_reo_err_no_fl("Number of valid entries = %u",
2295 			      num_valid_entries);
2296 	mgmt_rx_reo_err_no_fl("-----------------------------------------------------------------------------------------------------------------------------------------");
2297 	mgmt_rx_reo_err_no_fl("|Index|Type|Link|SeqNo|Global ts |Last ts   |Ingress ts|Flags    |Ingress Dur|Size|Pos|Wait Count                                       |");
2298 	mgmt_rx_reo_err_no_fl("-----------------------------------------------------------------------------------------------------------------------------------------");
2299 
2300 	index = start_index;
2301 	for (entry = 0; entry < num_valid_entries; entry++) {
2302 		struct reo_ingress_debug_frame_info *info;
2303 		char flags[MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_FLAG_MAX_SIZE + 1] = {'\0'};
2304 		char wait_count[MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_WAIT_COUNT_MAX_SIZE + 1] = {'\0'};
2305 		char flag_queued = ' ';
2306 		char flag_stale = ' ';
2307 		char flag_error = ' ';
2308 		char flag_zero_wait_count_rx = ' ';
2309 		char flag_immediate_delivery = ' ';
2310 		int64_t ts_last_released_frame = -1;
2311 
2312 		info = &reo_ctx->ingress_frame_debug_info.frame_list[index];
2313 
2314 		if (info->ts_last_released_frame.valid)
2315 			ts_last_released_frame =
2316 					info->ts_last_released_frame.global_ts;
2317 
2318 		if (info->is_queued)
2319 			flag_queued = 'Q';
2320 
2321 		if (info->is_stale)
2322 			flag_stale = 'S';
2323 
2324 		if (info->is_error)
2325 			flag_error = 'E';
2326 
2327 		if (info->zero_wait_count_rx)
2328 			flag_zero_wait_count_rx = 'Z';
2329 
2330 		if (info->immediate_delivery)
2331 			flag_immediate_delivery = 'I';
2332 
2333 		snprintf(flags, sizeof(flags), "%c %c %c %c %c", flag_error,
2334 			 flag_stale, flag_queued, flag_zero_wait_count_rx,
2335 			 flag_immediate_delivery);
2336 		snprintf(wait_count, sizeof(wait_count),
2337 			 "%9llx(%8x, %8x, %8x, %8x)",
2338 			 info->wait_count.total_count,
2339 			 info->wait_count.per_link_count[0],
2340 			 info->wait_count.per_link_count[1],
2341 			 info->wait_count.per_link_count[2],
2342 			 info->wait_count.per_link_count[3]);
2343 
2344 		mgmt_rx_reo_err_no_fl("|%5u|%4u|%4u|%5u|%10u|%10lld|%10llu|%9s|%11llu|%4d|%3d|%49s|",
2345 				      entry, info->type, info->link_id,
2346 				      info->mgmt_pkt_ctr,
2347 				      info->global_timestamp,
2348 				      ts_last_released_frame,
2349 				      info->ingress_timestamp, flags,
2350 				      info->ingress_duration,
2351 				      info->list_size_rx,
2352 				      info->list_insertion_pos, wait_count);
2353 	mgmt_rx_reo_err_no_fl("----------------------------------------------"
2354 			      "----------------------------------------------"
2355 			      "---------------------------------------------");
2356 
2357 		index++;
2358 		index %= MGMT_RX_REO_INGRESS_FRAME_DEBUG_ENTRIES_MAX;
2359 	}
2360 
2361 	return QDF_STATUS_SUCCESS;
2362 }
2363 #else
2364 /**
2365  * mgmt_rx_reo_print_ingress_frame_stats() - API to print the stats
2366  * related to frames going into the reorder module
2367  * @reo_ctx: Pointer to reorder context
2368  *
2369  * API to print the stats related to frames going into the management
2370  * Rx reorder module.
2371  *
2372  * Return: QDF_STATUS
2373  */
2374 static QDF_STATUS
2375 mgmt_rx_reo_print_ingress_frame_stats(struct mgmt_rx_reo_context *reo_ctx)
2376 {
2377 	return QDF_STATUS_SUCCESS;
2378 }
2379 
2380 /**
2381  * mgmt_rx_reo_log_ingress_frame() - Log the information about a frame entering
2382  * the reorder algorithm.
2383  * @reo_ctx: management rx reorder context
2384  * @desc: Pointer to frame descriptor
2385  * @is_queued: Indicates whether this frame is queued to reorder list
2386  * @is_error: Indicates whether any error occurred during processing this frame
2387  *
2388  * Return: QDF_STATUS of operation
2389  */
2390 static QDF_STATUS
2391 mgmt_rx_reo_log_ingress_frame(struct mgmt_rx_reo_context *reo_ctx,
2392 			      struct mgmt_rx_reo_frame_descriptor *desc,
2393 			      bool is_queued, bool is_error)
2394 {
2395 	return QDF_STATUS_SUCCESS;
2396 }
2397 
2398 /**
2399  * mgmt_rx_reo_print_ingress_frame_info() - Print debug information about
2400  * the latest frames entering the reorder module
2401  * @reo_ctx: management rx reorder context
2402  *
2403  * Return: QDF_STATUS of operation
2404  */
2405 static QDF_STATUS
2406 mgmt_rx_reo_print_ingress_frame_info(struct mgmt_rx_reo_context *reo_ctx)
2407 {
2408 	return QDF_STATUS_SUCCESS;
2409 }
2410 #endif /* WLAN_MGMT_RX_REO_DEBUG_SUPPORT */
2411 
2412 QDF_STATUS
2413 wlan_mgmt_rx_reo_algo_entry(struct wlan_objmgr_pdev *pdev,
2414 			    struct mgmt_rx_reo_frame_descriptor *desc,
2415 			    bool *is_queued)
2416 {
2417 	struct mgmt_rx_reo_context *reo_ctx;
2418 	QDF_STATUS ret;
2419 	int8_t num_mlo_links;
2420 
2421 	if (!is_queued)
2422 		return QDF_STATUS_E_NULL_VALUE;
2423 
2424 	*is_queued = false;
2425 
2426 	if (!desc || !desc->rx_params) {
2427 		mgmt_rx_reo_err("MGMT Rx REO descriptor or rx params are null");
2428 		return QDF_STATUS_E_NULL_VALUE;
2429 	}
2430 
2431 	reo_ctx = mgmt_rx_reo_get_context();
2432 	if (!reo_ctx) {
2433 		mgmt_rx_reo_err("REO context is NULL");
2434 		return QDF_STATUS_E_NULL_VALUE;
2435 	}
2436 
2437 	num_mlo_links = mgmt_rx_reo_get_num_mlo_links(reo_ctx);
2438 	qdf_assert_always(num_mlo_links > 0);
2439 	qdf_assert_always(num_mlo_links <= MGMT_RX_REO_MAX_LINKS);
2440 
2441 	/**
2442 	 * Critical Section = Host snapshot update + Calculation of wait
2443 	 * counts + Update reorder list. Following section describes the
2444 	 * motivation for making this a critical section.
2445 	 * Lets take an example of 2 links (Link A & B) and each has received
2446 	 * a management frame A1 and B1 such that MLO global time stamp of A1 <
2447 	 * MLO global time stamp of B1. Host is concurrently executing
2448 	 * "wlan_mgmt_rx_reo_algo_entry" for A1 and B1 in 2 different CPUs.
2449 	 *
2450 	 * A lock less version of this API("wlan_mgmt_rx_reo_algo_entry_v1") is
2451 	 * as follows.
2452 	 *
2453 	 * wlan_mgmt_rx_reo_algo_entry()
2454 	 * {
2455 	 *     Host snapshot update
2456 	 *     Calculation of wait counts
2457 	 *     Update reorder list
2458 	 *     Release to upper layer
2459 	 * }
2460 	 *
2461 	 * We may run into race conditions under the following sequence of
2462 	 * operations.
2463 	 *
2464 	 * 1. Host snapshot update for link A in context of frame A1
2465 	 * 2. Host snapshot update for link B in context of frame B1
2466 	 * 3. Calculation of wait count for frame B1
2467 	 *        link A wait count =  0
2468 	 *        link B wait count =  0
2469 	 * 4. Update reorder list with frame B1
2470 	 * 5. Release B1 to upper layer
2471 	 * 6. Calculation of wait count for frame A1
2472 	 *        link A wait count =  0
2473 	 *        link B wait count =  0
2474 	 * 7. Update reorder list with frame A1
2475 	 * 8. Release A1 to upper layer
2476 	 *
2477 	 * This leads to incorrect behaviour as B1 goes to upper layer before
2478 	 * A1.
2479 	 *
2480 	 * To prevent this lets make Host snapshot update + Calculate wait count
2481 	 * a critical section by adding locks. The updated version of the API
2482 	 * ("wlan_mgmt_rx_reo_algo_entry_v2") is as follows.
2483 	 *
2484 	 * wlan_mgmt_rx_reo_algo_entry()
2485 	 * {
2486 	 *     LOCK
2487 	 *         Host snapshot update
2488 	 *         Calculation of wait counts
2489 	 *     UNLOCK
2490 	 *     Update reorder list
2491 	 *     Release to upper layer
2492 	 * }
2493 	 *
2494 	 * With this API also We may run into race conditions under the
2495 	 * following sequence of operations.
2496 	 *
2497 	 * 1. Host snapshot update for link A in context of frame A1 +
2498 	 *    Calculation of wait count for frame A1
2499 	 *        link A wait count =  0
2500 	 *        link B wait count =  0
2501 	 * 2. Host snapshot update for link B in context of frame B1 +
2502 	 *    Calculation of wait count for frame B1
2503 	 *        link A wait count =  0
2504 	 *        link B wait count =  0
2505 	 * 4. Update reorder list with frame B1
2506 	 * 5. Release B1 to upper layer
2507 	 * 7. Update reorder list with frame A1
2508 	 * 8. Release A1 to upper layer
2509 	 *
2510 	 * This also leads to incorrect behaviour as B1 goes to upper layer
2511 	 * before A1.
2512 	 *
2513 	 * To prevent this, let's make Host snapshot update + Calculate wait
2514 	 * count + Update reorder list a critical section by adding locks.
2515 	 * The updated version of the API ("wlan_mgmt_rx_reo_algo_entry_final")
2516 	 * is as follows.
2517 	 *
2518 	 * wlan_mgmt_rx_reo_algo_entry()
2519 	 * {
2520 	 *     LOCK
2521 	 *         Host snapshot update
2522 	 *         Calculation of wait counts
2523 	 *         Update reorder list
2524 	 *     UNLOCK
2525 	 *     Release to upper layer
2526 	 * }
2527 	 */
2528 	qdf_spin_lock(&reo_ctx->reo_algo_entry_lock);
2529 
2530 	/* Update the Host snapshot */
2531 	ret = wlan_mgmt_rx_reo_update_host_snapshot(
2532 						pdev,
2533 						desc->rx_params->reo_params);
2534 	if (QDF_IS_STATUS_ERROR(ret))
2535 		goto failure;
2536 
2537 	/* Compute wait count for this frame/event */
2538 	ret = wlan_mgmt_rx_reo_algo_calculate_wait_count(
2539 						pdev,
2540 						desc->rx_params->reo_params,
2541 						num_mlo_links,
2542 						&desc->wait_count);
2543 	if (QDF_IS_STATUS_ERROR(ret))
2544 		goto failure;
2545 
2546 	/* Update the REO list */
2547 	ret = mgmt_rx_reo_update_list(&reo_ctx->reo_list, num_mlo_links,
2548 				      desc, is_queued);
2549 	if (QDF_IS_STATUS_ERROR(ret))
2550 		goto failure;
2551 
2552 	ret = mgmt_rx_reo_log_ingress_frame(reo_ctx, desc,
2553 					    *is_queued, false);
2554 	if (QDF_IS_STATUS_ERROR(ret)) {
2555 		qdf_spin_unlock(&reo_ctx->reo_algo_entry_lock);
2556 		return ret;
2557 	}
2558 
2559 	qdf_spin_unlock(&reo_ctx->reo_algo_entry_lock);
2560 
2561 	/* Finally, release the entries for which pending frame is received */
2562 	return mgmt_rx_reo_list_release_entries(reo_ctx);
2563 
2564 failure:
2565 	/**
2566 	 * Ignore the return value of this function call, return
2567 	 * the actual reason for failure.
2568 	 */
2569 	mgmt_rx_reo_log_ingress_frame(reo_ctx, desc, *is_queued, true);
2570 
2571 	qdf_spin_unlock(&reo_ctx->reo_algo_entry_lock);
2572 
2573 	return ret;
2574 }
2575 
2576 #ifndef WLAN_MGMT_RX_REO_SIM_SUPPORT
2577 /**
2578  * mgmt_rx_reo_sim_init() - Initialize management rx reorder simulation
2579  * context.
2580  * @reo_context: Pointer to reo context
2581  *
2582  * Return: QDF_STATUS of operation
2583  */
2584 static inline QDF_STATUS
2585 mgmt_rx_reo_sim_init(struct mgmt_rx_reo_context *reo_context)
2586 {
2587 	return QDF_STATUS_SUCCESS;
2588 }
2589 
2590 /**
2591  * mgmt_rx_reo_sim_deinit() - De initialize management rx reorder simulation
2592  * context.
2593  * @reo_context: Pointer to reo context
2594  *
2595  * Return: QDF_STATUS of operation
2596  */
2597 static inline QDF_STATUS
2598 mgmt_rx_reo_sim_deinit(struct mgmt_rx_reo_context *reo_context)
2599 {
2600 	return QDF_STATUS_SUCCESS;
2601 }
2602 
2603 QDF_STATUS
2604 mgmt_rx_reo_sim_pdev_object_create_notification(struct wlan_objmgr_pdev *pdev)
2605 {
2606 	return QDF_STATUS_SUCCESS;
2607 }
2608 
2609 QDF_STATUS
2610 mgmt_rx_reo_sim_pdev_object_destroy_notification(struct wlan_objmgr_pdev *pdev)
2611 {
2612 	return QDF_STATUS_SUCCESS;
2613 }
2614 #else
2615 /**
2616  * mgmt_rx_reo_sim_remove_frame_from_master_list() - Removes frame from the
2617  * master management frame list
2618  * @master_frame_list: pointer to master management frame list
2619  * @frame: pointer to management frame parameters
2620  *
2621  * This API removes frames from the master management frame list. This API is
2622  * used in case of FW consumed management frames or management frames which
2623  * are dropped at host due to any error.
2624  *
2625  * Return: QDF_STATUS of operation
2626  */
2627 static QDF_STATUS
2628 mgmt_rx_reo_sim_remove_frame_from_master_list(
2629 		struct mgmt_rx_reo_master_frame_list *master_frame_list,
2630 		const struct mgmt_rx_frame_params *frame)
2631 {
2632 	struct mgmt_rx_reo_pending_frame_list_entry *pending_entry;
2633 	struct mgmt_rx_reo_pending_frame_list_entry *matching_pend_entry = NULL;
2634 	struct mgmt_rx_reo_stale_frame_list_entry *stale_entry;
2635 	struct mgmt_rx_reo_stale_frame_list_entry *matching_stale_entry = NULL;
2636 	QDF_STATUS status;
2637 
2638 	if (!master_frame_list) {
2639 		mgmt_rx_reo_err("Mgmt master frame list is null");
2640 		return QDF_STATUS_E_NULL_VALUE;
2641 	}
2642 
2643 	if (!frame) {
2644 		mgmt_rx_reo_err("Pointer to mgmt frame params is null");
2645 		return QDF_STATUS_E_NULL_VALUE;
2646 	}
2647 
2648 	qdf_spin_lock(&master_frame_list->lock);
2649 
2650 	qdf_list_for_each(&master_frame_list->pending_list, pending_entry,
2651 			  node) {
2652 		if (pending_entry->params.link_id == frame->link_id &&
2653 		    pending_entry->params.mgmt_pkt_ctr == frame->mgmt_pkt_ctr &&
2654 		    pending_entry->params.global_timestamp ==
2655 		    frame->global_timestamp) {
2656 			matching_pend_entry = pending_entry;
2657 			break;
2658 		}
2659 	}
2660 
2661 	qdf_list_for_each(&master_frame_list->stale_list, stale_entry, node) {
2662 		if (stale_entry->params.link_id == frame->link_id &&
2663 		    stale_entry->params.mgmt_pkt_ctr == frame->mgmt_pkt_ctr &&
2664 		    stale_entry->params.global_timestamp ==
2665 		    frame->global_timestamp) {
2666 			matching_stale_entry = stale_entry;
2667 			break;
2668 		}
2669 	}
2670 
2671 	/* Found in pending and stale list. Duplicate entries, assert */
2672 	qdf_assert_always(!matching_pend_entry || !matching_stale_entry);
2673 
2674 	if (!matching_pend_entry && !matching_stale_entry) {
2675 		qdf_spin_unlock(&master_frame_list->lock);
2676 		mgmt_rx_reo_err("No matching frame in pend/stale list");
2677 		return QDF_STATUS_E_FAILURE;
2678 	}
2679 
2680 	if (matching_pend_entry) {
2681 		status = qdf_list_remove_node(&master_frame_list->pending_list,
2682 					      &matching_pend_entry->node);
2683 		if (QDF_IS_STATUS_ERROR(status)) {
2684 			qdf_spin_unlock(&master_frame_list->lock);
2685 			mgmt_rx_reo_err("Failed to remove the matching entry");
2686 			return status;
2687 		}
2688 
2689 		qdf_mem_free(matching_pend_entry);
2690 	}
2691 
2692 	if (matching_stale_entry) {
2693 		status = qdf_list_remove_node(&master_frame_list->stale_list,
2694 					      &matching_stale_entry->node);
2695 		if (QDF_IS_STATUS_ERROR(status)) {
2696 			qdf_spin_unlock(&master_frame_list->lock);
2697 			mgmt_rx_reo_err("Failed to remove the matching entry");
2698 			return status;
2699 		}
2700 
2701 		qdf_mem_free(matching_stale_entry);
2702 	}
2703 
2704 	qdf_spin_unlock(&master_frame_list->lock);
2705 
2706 	return QDF_STATUS_SUCCESS;
2707 }
2708 
2709 /**
2710  * mgmt_rx_reo_sim_remove_frame_from_pending_list() - Removes frame from the
2711  * pending management frame list
2712  * @master_frame_list: pointer to master management frame list
2713  * @frame: pointer to management frame parameters
2714  *
2715  * This API removes frames from the pending management frame list. This API is
2716  * used in case of FW consumed management frames or management frames which
2717  * are dropped at host due to any error.
2718  *
2719  * Return: QDF_STATUS of operation
2720  */
2721 static QDF_STATUS
2722 mgmt_rx_reo_sim_remove_frame_from_pending_list(
2723 		struct mgmt_rx_reo_master_frame_list *master_frame_list,
2724 		const struct mgmt_rx_frame_params *frame)
2725 {
2726 	struct mgmt_rx_reo_pending_frame_list_entry *cur_entry;
2727 	struct mgmt_rx_reo_pending_frame_list_entry *matching_entry = NULL;
2728 	QDF_STATUS status;
2729 
2730 	if (!master_frame_list) {
2731 		mgmt_rx_reo_err("Mgmt master frame list is null");
2732 		return QDF_STATUS_E_NULL_VALUE;
2733 	}
2734 
2735 	if (!frame) {
2736 		mgmt_rx_reo_err("Pointer to mgmt frame params is null");
2737 		return QDF_STATUS_E_NULL_VALUE;
2738 	}
2739 
2740 	qdf_spin_lock(&master_frame_list->lock);
2741 
2742 	qdf_list_for_each(&master_frame_list->pending_list, cur_entry, node) {
2743 		if (cur_entry->params.link_id == frame->link_id &&
2744 		    cur_entry->params.mgmt_pkt_ctr == frame->mgmt_pkt_ctr &&
2745 		    cur_entry->params.global_timestamp ==
2746 		    frame->global_timestamp) {
2747 			matching_entry = cur_entry;
2748 			break;
2749 		}
2750 	}
2751 
2752 	if (!matching_entry) {
2753 		qdf_spin_unlock(&master_frame_list->lock);
2754 		mgmt_rx_reo_err("No matching frame in the pend list to remove");
2755 		return QDF_STATUS_E_FAILURE;
2756 	}
2757 
2758 	status = qdf_list_remove_node(&master_frame_list->pending_list,
2759 				      &matching_entry->node);
2760 	if (QDF_IS_STATUS_ERROR(status)) {
2761 		qdf_spin_unlock(&master_frame_list->lock);
2762 		mgmt_rx_reo_err("Failed to remove the matching entry");
2763 		return status;
2764 	}
2765 
2766 	qdf_mem_free(matching_entry);
2767 
2768 	qdf_spin_unlock(&master_frame_list->lock);
2769 
2770 
2771 	return QDF_STATUS_SUCCESS;
2772 }
2773 
2774 /**
2775  * mgmt_rx_reo_sim_add_frame_to_pending_list() - Inserts frame to the
2776  * pending management frame list
2777  * @master_frame_list: pointer to master management frame list
2778  * @frame: pointer to management frame parameters
2779  *
2780  * This API inserts frames to the pending management frame list. This API is
2781  * used to insert frames generated by the MAC HW to the pending frame list.
2782  *
2783  * Return: QDF_STATUS of operation
2784  */
2785 static QDF_STATUS
2786 mgmt_rx_reo_sim_add_frame_to_pending_list(
2787 		struct mgmt_rx_reo_master_frame_list *master_frame_list,
2788 		const struct mgmt_rx_frame_params *frame)
2789 {
2790 	struct mgmt_rx_reo_pending_frame_list_entry *new_entry;
2791 	QDF_STATUS status;
2792 
2793 	if (!master_frame_list) {
2794 		mgmt_rx_reo_err("Mgmt master frame list is null");
2795 		return QDF_STATUS_E_NULL_VALUE;
2796 	}
2797 
2798 	if (!frame) {
2799 		mgmt_rx_reo_err("Pointer mgmt frame params is null");
2800 		return QDF_STATUS_E_NULL_VALUE;
2801 	}
2802 
2803 	new_entry = qdf_mem_malloc(sizeof(*new_entry));
2804 	if (!new_entry) {
2805 		mgmt_rx_reo_err("Failed to allocate new entry to frame list");
2806 		return QDF_STATUS_E_NOMEM;
2807 	}
2808 
2809 	new_entry->params = *frame;
2810 
2811 	qdf_spin_lock(&master_frame_list->lock);
2812 
2813 	status = qdf_list_insert_back(&master_frame_list->pending_list,
2814 				      &new_entry->node);
2815 
2816 	qdf_spin_unlock(&master_frame_list->lock);
2817 
2818 	if (QDF_IS_STATUS_ERROR(status)) {
2819 		mgmt_rx_reo_err("Failed to add frame to pending list");
2820 		qdf_mem_free(new_entry);
2821 		return status;
2822 	}
2823 
2824 	return QDF_STATUS_SUCCESS;
2825 }
2826 
2827 QDF_STATUS
2828 mgmt_rx_reo_sim_process_rx_frame(struct wlan_objmgr_pdev *pdev, qdf_nbuf_t buf,
2829 				 struct mgmt_rx_event_params *mgmt_rx_params)
2830 {
2831 	struct mgmt_rx_reo_context *reo_context;
2832 	struct mgmt_rx_reo_sim_context *sim_context;
2833 	QDF_STATUS status;
2834 	struct mgmt_rx_reo_params *reo_params;
2835 	int8_t num_mlo_links;
2836 
2837 	if (!mgmt_rx_params) {
2838 		mgmt_rx_reo_err("Mgmt rx params null");
2839 		return QDF_STATUS_E_NULL_VALUE;
2840 	}
2841 
2842 	reo_params = mgmt_rx_params->reo_params;
2843 
2844 	reo_context = mgmt_rx_reo_get_context();
2845 	if (!reo_context) {
2846 		mgmt_rx_reo_err("Mgmt reo context is null");
2847 		return QDF_STATUS_E_NULL_VALUE;
2848 	}
2849 
2850 	sim_context = &reo_context->sim_context;
2851 
2852 	num_mlo_links = mgmt_rx_reo_sim_get_num_mlo_links(sim_context);
2853 
2854 	if (num_mlo_links < 0 || num_mlo_links > MGMT_RX_REO_MAX_LINKS) {
2855 		mgmt_rx_reo_err("Invalid number of MLO links %d",
2856 				num_mlo_links);
2857 		return QDF_STATUS_E_INVAL;
2858 	}
2859 
2860 	qdf_spin_lock(&sim_context->master_frame_list.lock);
2861 
2862 	if (qdf_list_empty(&sim_context->master_frame_list.pending_list)) {
2863 		qdf_spin_unlock(&sim_context->master_frame_list.lock);
2864 		mgmt_rx_reo_err("reo sim failure: pending frame list is empty");
2865 		qdf_assert_always(0);
2866 	} else {
2867 		struct mgmt_rx_frame_params *cur_entry_params;
2868 		struct mgmt_rx_reo_pending_frame_list_entry *cur_entry;
2869 		struct mgmt_rx_reo_pending_frame_list_entry *matching_entry = NULL;
2870 
2871 		/**
2872 		 * Make sure the frames delivered to upper layer are in the
2873 		 * increasing order of global time stamp. For that the frame
2874 		 * which is being delivered should be present at the head of the
2875 		 * pending frame list. There could be multiple frames with the
2876 		 * same global time stamp in the pending frame list. Search
2877 		 * among all the frames at the head of the list which has the
2878 		 * same global time stamp as the frame which is being delivered.
2879 		 * To find matching frame, check whether packet counter,
2880 		 * global time stamp and link id are same.
2881 		 */
2882 		qdf_list_for_each(&sim_context->master_frame_list.pending_list,
2883 				  cur_entry, node) {
2884 			cur_entry_params = &cur_entry->params;
2885 
2886 			if (cur_entry_params->global_timestamp !=
2887 			    reo_params->global_timestamp)
2888 				break;
2889 
2890 			if (cur_entry_params->link_id == reo_params->link_id &&
2891 			    cur_entry_params->mgmt_pkt_ctr ==
2892 			    reo_params->mgmt_pkt_ctr) {
2893 				matching_entry = cur_entry;
2894 				break;
2895 			}
2896 		}
2897 
2898 		if (!matching_entry) {
2899 			qdf_spin_unlock(&sim_context->master_frame_list.lock);
2900 			mgmt_rx_reo_err("reo sim failure: mismatch");
2901 			qdf_assert_always(0);
2902 		}
2903 
2904 		status = qdf_list_remove_node(
2905 				&sim_context->master_frame_list.pending_list,
2906 				&matching_entry->node);
2907 		qdf_mem_free(matching_entry);
2908 
2909 		if (QDF_IS_STATUS_ERROR(status)) {
2910 			qdf_spin_unlock(&sim_context->master_frame_list.lock);
2911 			mgmt_rx_reo_err("Failed to remove matching entry");
2912 			return status;
2913 		}
2914 	}
2915 
2916 	qdf_spin_unlock(&sim_context->master_frame_list.lock);
2917 
2918 	mgmt_rx_reo_debug("Successfully processed mgmt frame");
2919 	mgmt_rx_reo_debug("link_id = %u, ctr = %u, ts = %u",
2920 			  reo_params->link_id, reo_params->mgmt_pkt_ctr,
2921 			  reo_params->global_timestamp);
2922 
2923 	return QDF_STATUS_SUCCESS;
2924 }
2925 
2926 /**
2927  * mgmt_rx_reo_sim_get_random_bool() - Generate true/false randomly
2928  * @percentage_true: probability (in percentage) of true
2929  *
2930  * API to generate true with probability @percentage_true % and false with
2931  * probability (100 - @percentage_true) %.
2932  *
2933  * Return: true with probability @percentage_true % and false with probability
2934  * (100 - @percentage_true) %
2935  */
2936 static bool
2937 mgmt_rx_reo_sim_get_random_bool(uint8_t percentage_true)
2938 {
2939 	uint32_t rand;
2940 
2941 	if (percentage_true > 100) {
2942 		mgmt_rx_reo_err("Invalid probability value for true, %u",
2943 				percentage_true);
2944 		return -EINVAL;
2945 	}
2946 
2947 	get_random_bytes(&rand, sizeof(rand));
2948 
2949 	return ((rand % 100) < percentage_true);
2950 }
2951 
2952 /**
2953  * mgmt_rx_reo_sim_get_random_unsigned_int() - Generate random unsigned integer
2954  * value in the range [0, max)
2955  * @max: upper limit for the output
2956  *
2957  * API to generate random unsigned integer value in the range [0, max).
2958  *
2959  * Return: unsigned integer value in the range [0, max)
2960  */
2961 static uint32_t
2962 mgmt_rx_reo_sim_get_random_unsigned_int(uint32_t max)
2963 {
2964 	uint32_t rand;
2965 
2966 	get_random_bytes(&rand, sizeof(rand));
2967 
2968 	return (rand % max);
2969 }
2970 
2971 /**
2972  * mgmt_rx_reo_sim_sleep() - Wrapper API to sleep for given micro seconds
2973  * @sleeptime_us: Sleep time in micro seconds
2974  *
2975  * This API uses msleep() internally. So the granularity is limited to
2976  * milliseconds.
2977  *
2978  * Return: none
2979  */
2980 static void
2981 mgmt_rx_reo_sim_sleep(uint32_t sleeptime_us)
2982 {
2983 	msleep(sleeptime_us / USEC_PER_MSEC);
2984 }
2985 
2986 /**
2987  * mgmt_rx_reo_sim_frame_handler_host() - Management frame handler at the host
2988  * layer
2989  * @arg: Argument
2990  *
2991  * This API handles the management frame at the host layer. This is applicable
2992  * for simulation alone.
2993  *
2994  * Return: none
2995  */
2996 static void
2997 mgmt_rx_reo_sim_frame_handler_host(void *arg)
2998 {
2999 	struct mgmt_rx_frame_fw *frame_fw = (struct mgmt_rx_frame_fw *)arg;
3000 	uint32_t fw_to_host_delay_us;
3001 	bool is_error_frame = false;
3002 	int8_t link_id = -1;
3003 	struct mgmt_rx_event_params *rx_params;
3004 	QDF_STATUS status;
3005 	struct mgmt_rx_reo_sim_context *sim_context;
3006 	struct wlan_objmgr_pdev *pdev;
3007 
3008 	if (!frame_fw) {
3009 		mgmt_rx_reo_err("HOST-%d : Pointer to FW frame struct is null",
3010 				link_id);
3011 		goto error_print;
3012 	}
3013 
3014 	link_id = frame_fw->params.link_id;
3015 
3016 	sim_context = frame_fw->sim_context;
3017 	if (!sim_context) {
3018 		mgmt_rx_reo_err("HOST-%d : Mgmt rx reo simulation context null",
3019 				link_id);
3020 		goto error_free_fw_frame;
3021 	}
3022 
3023 	fw_to_host_delay_us = MGMT_RX_REO_SIM_DELAY_FW_TO_HOST_MIN +
3024 			      mgmt_rx_reo_sim_get_random_unsigned_int(
3025 			      MGMT_RX_REO_SIM_DELAY_FW_TO_HOST_MIN_MAX_DELTA);
3026 
3027 	mgmt_rx_reo_sim_sleep(fw_to_host_delay_us);
3028 
3029 	if (!frame_fw->is_consumed_by_fw) {
3030 		is_error_frame = mgmt_rx_reo_sim_get_random_bool(
3031 				 MGMT_RX_REO_SIM_PERCENTAGE_ERROR_FRAMES);
3032 
3033 		/**
3034 		 * This frame should be present in pending/stale list of the
3035 		 * master frame list. Error frames need not be reordered
3036 		 * by reorder algorithm. It is just used for book
3037 		 * keeping purposes. Hence remove it from the master list.
3038 		 */
3039 		if (is_error_frame) {
3040 			status = mgmt_rx_reo_sim_remove_frame_from_master_list(
3041 					&sim_context->master_frame_list,
3042 					&frame_fw->params);
3043 
3044 			if (QDF_IS_STATUS_ERROR(status)) {
3045 				mgmt_rx_reo_err("HOST-%d : Failed to remove error frame",
3046 						link_id);
3047 				qdf_assert_always(0);
3048 			}
3049 		}
3050 	}
3051 
3052 	mgmt_rx_reo_debug("HOST-%d : Received frame with ts = %u, ctr = %u, consume = %u, error = %u",
3053 			  link_id, frame_fw->params.global_timestamp,
3054 			  frame_fw->params.mgmt_pkt_ctr,
3055 			  frame_fw->is_consumed_by_fw, is_error_frame);
3056 
3057 	rx_params = alloc_mgmt_rx_event_params();
3058 	if (!rx_params) {
3059 		mgmt_rx_reo_err("HOST-%d : Failed to allocate event params",
3060 				link_id);
3061 		goto error_free_fw_frame;
3062 	}
3063 
3064 	rx_params->reo_params->link_id = frame_fw->params.link_id;
3065 	rx_params->reo_params->global_timestamp =
3066 					frame_fw->params.global_timestamp;
3067 	rx_params->reo_params->mgmt_pkt_ctr = frame_fw->params.mgmt_pkt_ctr;
3068 	rx_params->reo_params->valid = true;
3069 
3070 	pdev = wlan_get_pdev_from_mlo_link_id(link_id, WLAN_MGMT_RX_REO_SIM_ID);
3071 	if (!pdev) {
3072 		mgmt_rx_reo_err("No pdev corresponding to link_id %d", link_id);
3073 		goto error_free_mgmt_rx_event_params;
3074 	}
3075 
3076 	if (is_error_frame) {
3077 		status = tgt_mgmt_rx_reo_host_drop_handler(
3078 						pdev, rx_params->reo_params);
3079 		free_mgmt_rx_event_params(rx_params);
3080 	} else if (frame_fw->is_consumed_by_fw) {
3081 		status = tgt_mgmt_rx_reo_fw_consumed_event_handler(
3082 						pdev, rx_params->reo_params);
3083 		free_mgmt_rx_event_params(rx_params);
3084 	} else {
3085 		status = tgt_mgmt_rx_reo_frame_handler(pdev, NULL, rx_params);
3086 	}
3087 
3088 	wlan_objmgr_pdev_release_ref(pdev, WLAN_MGMT_RX_REO_SIM_ID);
3089 
3090 	if (QDF_IS_STATUS_ERROR(status)) {
3091 		mgmt_rx_reo_err("Failed to execute reo algorithm");
3092 		goto error_free_fw_frame;
3093 	}
3094 
3095 	qdf_mem_free(frame_fw);
3096 
3097 	return;
3098 
3099 error_free_mgmt_rx_event_params:
3100 	free_mgmt_rx_event_params(rx_params);
3101 error_free_fw_frame:
3102 	qdf_mem_free(frame_fw);
3103 error_print:
3104 	mgmt_rx_reo_err("HOST-%d : Exiting host frame handler due to error",
3105 			link_id);
3106 }
3107 
3108 /**
3109  * mgmt_rx_reo_sim_write_snapshot() - API to write snapshots used for management
3110  * frame reordering
3111  * @link_id: link id
3112  * @id: snapshot id
3113  * @value: snapshot value
3114  *
3115  * This API writes the snapshots used for management frame reordering. MAC HW
3116  * and FW can use this API to update the MAC HW/FW consumed/FW forwarded
3117  * snapshots.
3118  *
3119  * Return: QDF_STATUS
3120  */
3121 static QDF_STATUS
3122 mgmt_rx_reo_sim_write_snapshot(uint8_t link_id,
3123 			       enum mgmt_rx_reo_shared_snapshot_id id,
3124 			       struct mgmt_rx_reo_snapshot value)
3125 {
3126 	struct wlan_objmgr_pdev *pdev;
3127 	struct mgmt_rx_reo_snapshot *snapshot_address;
3128 	QDF_STATUS status;
3129 
3130 	pdev = wlan_get_pdev_from_mlo_link_id(link_id, WLAN_MGMT_RX_REO_SIM_ID);
3131 
3132 	if (!pdev) {
3133 		mgmt_rx_reo_err("pdev is null");
3134 		return QDF_STATUS_E_NULL_VALUE;
3135 	}
3136 
3137 	status = mgmt_rx_reo_sim_get_snapshot_address(pdev, id,
3138 						      &snapshot_address);
3139 
3140 	wlan_objmgr_pdev_release_ref(pdev, WLAN_MGMT_RX_REO_SIM_ID);
3141 
3142 	if (QDF_IS_STATUS_ERROR(status)) {
3143 		mgmt_rx_reo_err("Failed to get snapshot address %d of pdev %pK",
3144 				id, pdev);
3145 		return QDF_STATUS_E_FAILURE;
3146 	}
3147 
3148 	snapshot_address->mgmt_rx_reo_snapshot_low =
3149 						value.mgmt_rx_reo_snapshot_low;
3150 	snapshot_address->mgmt_rx_reo_snapshot_high =
3151 						value.mgmt_rx_reo_snapshot_high;
3152 
3153 	return QDF_STATUS_SUCCESS;
3154 }
3155 
3156 #define MGMT_RX_REO_SNAPSHOT_LOW_VALID_POS                       (0)
3157 #define MGMT_RX_REO_SNAPSHOT_LOW_VALID_SIZE                      (1)
3158 #define MGMT_RX_REO_SNAPSHOT_LOW_MGMT_PKT_CTR_POS                (1)
3159 #define MGMT_RX_REO_SNAPSHOT_LOW_MGMT_PKT_CTR_SIZE               (16)
3160 #define MGMT_RX_REO_SNAPSHOT_LOW_GLOBAL_TIMESTAMP_POS            (17)
3161 #define MGMT_RX_REO_SNAPSHOT_LOW_GLOBAL_TIMESTAMP_SIZE           (15)
3162 
3163 #define MGMT_RX_REO_SNAPSHOT_HIGH_GLOBAL_TIMESTAMP_POS           (0)
3164 #define MGMT_RX_REO_SNAPSHOT_HIGH_GLOBAL_TIMESTAMP_SIZE          (17)
3165 #define MGMT_RX_REO_SNAPSHOT_HIGH_MGMT_PKT_CTR_REDUNDANT_POS     (17)
3166 #define MGMT_RX_REO_SNAPSHOT_HIGH_MGMT_PKT_CTR_REDUNDANT_SIZE    (15)
3167 
3168 /**
3169  * mgmt_rx_reo_sim_get_snapshot_value() - API to get snapshot value for a given
3170  * management frame
3171  * @global_timestamp: global time stamp
3172  * @mgmt_pkt_ctr: management packet counter
3173  *
3174  * This API gets the snapshot value for a frame with time stamp
3175  * @global_timestamp and sequence number @mgmt_pkt_ctr.
3176  *
3177  * Return: snapshot value (struct mgmt_rx_reo_snapshot)
3178  */
3179 static struct mgmt_rx_reo_snapshot
3180 mgmt_rx_reo_sim_get_snapshot_value(uint32_t global_timestamp,
3181 				   uint16_t mgmt_pkt_ctr)
3182 {
3183 	struct mgmt_rx_reo_snapshot snapshot = {0};
3184 
3185 	QDF_SET_BITS(snapshot.mgmt_rx_reo_snapshot_low,
3186 		     MGMT_RX_REO_SNAPSHOT_LOW_VALID_POS,
3187 		     MGMT_RX_REO_SNAPSHOT_LOW_VALID_SIZE, 1);
3188 	QDF_SET_BITS(snapshot.mgmt_rx_reo_snapshot_low,
3189 		     MGMT_RX_REO_SNAPSHOT_LOW_MGMT_PKT_CTR_POS,
3190 		     MGMT_RX_REO_SNAPSHOT_LOW_MGMT_PKT_CTR_SIZE, mgmt_pkt_ctr);
3191 	QDF_SET_BITS(snapshot.mgmt_rx_reo_snapshot_low,
3192 		     MGMT_RX_REO_SNAPSHOT_LOW_GLOBAL_TIMESTAMP_POS,
3193 		     MGMT_RX_REO_SNAPSHOT_LOW_GLOBAL_TIMESTAMP_SIZE,
3194 		     global_timestamp);
3195 
3196 	QDF_SET_BITS(snapshot.mgmt_rx_reo_snapshot_high,
3197 		     MGMT_RX_REO_SNAPSHOT_HIGH_GLOBAL_TIMESTAMP_POS,
3198 		     MGMT_RX_REO_SNAPSHOT_HIGH_GLOBAL_TIMESTAMP_SIZE,
3199 		     global_timestamp >> 15);
3200 	QDF_SET_BITS(snapshot.mgmt_rx_reo_snapshot_high,
3201 		     MGMT_RX_REO_SNAPSHOT_HIGH_MGMT_PKT_CTR_REDUNDANT_POS,
3202 		     MGMT_RX_REO_SNAPSHOT_HIGH_MGMT_PKT_CTR_REDUNDANT_SIZE,
3203 		     mgmt_pkt_ctr);
3204 
3205 	return snapshot;
3206 }
3207 
3208 /**
3209  * mgmt_rx_reo_sim_frame_handler_fw() - Management frame handler at the fw layer
3210  * @arg: Argument
3211  *
3212  * This API handles the management frame at the fw layer. This is applicable
3213  * for simulation alone.
3214  *
3215  * Return: none
3216  */
3217 static void
3218 mgmt_rx_reo_sim_frame_handler_fw(void *arg)
3219 {
3220 	struct mgmt_rx_frame_mac_hw *frame_hw =
3221 					(struct mgmt_rx_frame_mac_hw *)arg;
3222 	uint32_t mac_hw_to_fw_delay_us;
3223 	bool is_consumed_by_fw;
3224 	struct  mgmt_rx_frame_fw *frame_fw;
3225 	int8_t link_id = -1;
3226 	QDF_STATUS status;
3227 	struct mgmt_rx_reo_sim_context *sim_context;
3228 	enum mgmt_rx_reo_shared_snapshot_id snapshot_id;
3229 	struct mgmt_rx_reo_snapshot snapshot_value;
3230 	bool ret;
3231 
3232 	if (!frame_hw) {
3233 		mgmt_rx_reo_err("FW-%d : Pointer to HW frame struct is null",
3234 				link_id);
3235 		qdf_assert_always(0);
3236 	}
3237 
3238 	link_id = frame_hw->params.link_id;
3239 
3240 	sim_context = frame_hw->sim_context;
3241 	if (!sim_context) {
3242 		mgmt_rx_reo_err("FW-%d : Mgmt rx reo simulation context null",
3243 				link_id);
3244 		goto error_free_mac_hw_frame;
3245 	}
3246 
3247 	mac_hw_to_fw_delay_us = MGMT_RX_REO_SIM_DELAY_MAC_HW_TO_FW_MIN +
3248 			mgmt_rx_reo_sim_get_random_unsigned_int(
3249 			MGMT_RX_REO_SIM_DELAY_MAC_HW_TO_FW_MIN_MAX_DELTA);
3250 	mgmt_rx_reo_sim_sleep(mac_hw_to_fw_delay_us);
3251 
3252 	is_consumed_by_fw = mgmt_rx_reo_sim_get_random_bool(
3253 			    MGMT_RX_REO_SIM_PERCENTAGE_FW_CONSUMED_FRAMES);
3254 
3255 	if (is_consumed_by_fw) {
3256 		/**
3257 		 * This frame should be present in pending/stale list of the
3258 		 * master frame list. FW consumed frames need not be reordered
3259 		 * by reorder algorithm. It is just used for book
3260 		 * keeping purposes. Hence remove it from the master list.
3261 		 */
3262 		status = mgmt_rx_reo_sim_remove_frame_from_master_list(
3263 					&sim_context->master_frame_list,
3264 					&frame_hw->params);
3265 
3266 		if (QDF_IS_STATUS_ERROR(status)) {
3267 			mgmt_rx_reo_err("FW-%d : Failed to remove FW consumed frame",
3268 					link_id);
3269 			qdf_assert_always(0);
3270 		}
3271 	}
3272 
3273 	mgmt_rx_reo_debug("FW-%d : Processing frame with ts = %u, ctr = %u, consume = %u",
3274 			  link_id, frame_hw->params.global_timestamp,
3275 			  frame_hw->params.mgmt_pkt_ctr, is_consumed_by_fw);
3276 
3277 	frame_fw = qdf_mem_malloc(sizeof(*frame_fw));
3278 	if (!frame_fw) {
3279 		mgmt_rx_reo_err("FW-%d : Failed to allocate FW mgmt frame",
3280 				link_id);
3281 		goto error_free_mac_hw_frame;
3282 	}
3283 
3284 	frame_fw->params = frame_hw->params;
3285 	frame_fw->is_consumed_by_fw = is_consumed_by_fw;
3286 	frame_fw->sim_context = frame_hw->sim_context;
3287 
3288 	snapshot_id = is_consumed_by_fw ?
3289 		      MGMT_RX_REO_SHARED_SNAPSHOT_FW_CONSUMED :
3290 		      MGMT_RX_REO_SHARED_SNAPSHOT_FW_FORWADED;
3291 
3292 	snapshot_value = mgmt_rx_reo_sim_get_snapshot_value(
3293 					frame_hw->params.global_timestamp,
3294 					frame_hw->params.mgmt_pkt_ctr);
3295 
3296 	status = mgmt_rx_reo_sim_write_snapshot(link_id, snapshot_id,
3297 						snapshot_value);
3298 
3299 	if (QDF_IS_STATUS_ERROR(status)) {
3300 		mgmt_rx_reo_err("FW-%d : Failed to write snapshot %d",
3301 				link_id, snapshot_id);
3302 		goto error_free_fw_frame;
3303 	}
3304 
3305 	status = qdf_create_work(NULL, &frame_fw->frame_handler_host,
3306 				 mgmt_rx_reo_sim_frame_handler_host, frame_fw);
3307 	if (QDF_IS_STATUS_ERROR(status)) {
3308 		mgmt_rx_reo_err("FW-%d : Failed to create work", link_id);
3309 		goto error_free_fw_frame;
3310 	}
3311 
3312 	ret = qdf_queue_work(
3313 			NULL, sim_context->host_mgmt_frame_handler[link_id],
3314 			&frame_fw->frame_handler_host);
3315 	if (!ret) {
3316 		mgmt_rx_reo_err("FW-%d : Work is already present on the queue",
3317 				link_id);
3318 		goto error_free_fw_frame;
3319 	}
3320 
3321 	qdf_mem_free(frame_hw);
3322 
3323 	return;
3324 
3325 error_free_fw_frame:
3326 	qdf_mem_free(frame_fw);
3327 error_free_mac_hw_frame:
3328 	qdf_mem_free(frame_hw);
3329 
3330 	mgmt_rx_reo_err("FW-%d : Exiting fw frame handler due to error",
3331 			link_id);
3332 }
3333 
3334 /**
3335  * mgmt_rx_reo_sim_receive_from_air() - Simulate management frame reception from
3336  * the air
3337  * @mac_hw: pointer to structure representing MAC HW
3338  * @num_mlo_links: number of MLO HW links
3339  * @frame: pointer to management frame parameters
3340  *
3341  * This API simulates the management frame reception from air.
3342  *
3343  * Return: QDF_STATUS
3344  */
3345 static QDF_STATUS
3346 mgmt_rx_reo_sim_receive_from_air(struct mgmt_rx_reo_sim_mac_hw *mac_hw,
3347 				 uint8_t num_mlo_links,
3348 				 struct mgmt_rx_frame_params *frame)
3349 {
3350 	uint8_t link_id;
3351 
3352 	if (!mac_hw) {
3353 		mgmt_rx_reo_err("pointer to MAC HW struct is null");
3354 		return QDF_STATUS_E_NULL_VALUE;
3355 	}
3356 
3357 	if (num_mlo_links == 0 || num_mlo_links > MGMT_RX_REO_MAX_LINKS) {
3358 		mgmt_rx_reo_err("Invalid number of MLO links %u",
3359 				num_mlo_links);
3360 		return QDF_STATUS_E_INVAL;
3361 	}
3362 
3363 	if (!frame) {
3364 		mgmt_rx_reo_err("pointer to frame parameters is null");
3365 		return QDF_STATUS_E_NULL_VALUE;
3366 	}
3367 
3368 	link_id = mgmt_rx_reo_sim_get_random_unsigned_int(num_mlo_links);
3369 
3370 	frame->global_timestamp = div_u64(ktime_get_ns(), NSEC_PER_USEC);
3371 	frame->mgmt_pkt_ctr = ++mac_hw->mgmt_pkt_ctr[link_id];
3372 	frame->link_id = link_id;
3373 
3374 	return QDF_STATUS_SUCCESS;
3375 }
3376 
3377 /**
3378  * mgmt_rx_reo_sim_undo_receive_from_air() - API to restore the state of MAC
3379  * HW in case of any Rx error.
3380  * @mac_hw: pointer to structure representing MAC HW
3381  * @frame: pointer to management frame parameters
3382  *
3383  * Return: QDF_STATUS
3384  */
3385 static QDF_STATUS
3386 mgmt_rx_reo_sim_undo_receive_from_air(struct mgmt_rx_reo_sim_mac_hw *mac_hw,
3387 				      struct mgmt_rx_frame_params *frame)
3388 {
3389 	if (!mac_hw) {
3390 		mgmt_rx_reo_err("pointer to MAC HW struct is null");
3391 		return QDF_STATUS_E_NULL_VALUE;
3392 	}
3393 
3394 	if (!frame) {
3395 		mgmt_rx_reo_err("pointer to frame parameters is null");
3396 		return QDF_STATUS_E_NULL_VALUE;
3397 	}
3398 
3399 	if (frame->link_id >= MGMT_RX_REO_MAX_LINKS) {
3400 		mgmt_rx_reo_err("Invalid link id %u", frame->link_id);
3401 		return QDF_STATUS_E_INVAL;
3402 	}
3403 
3404 	--mac_hw->mgmt_pkt_ctr[frame->link_id];
3405 
3406 	return QDF_STATUS_SUCCESS;
3407 }
3408 
3409 /**
3410  * mgmt_rx_reo_sim_mac_hw_thread() - kthread to simulate MAC HW
3411  * @data: pointer to data input
3412  *
3413  * kthread handler to simulate MAC HW.
3414  *
3415  * Return: 0 for success, else failure
3416  */
3417 static int
3418 mgmt_rx_reo_sim_mac_hw_thread(void *data)
3419 {
3420 	struct mgmt_rx_reo_sim_context *sim_context = data;
3421 	struct mgmt_rx_reo_sim_mac_hw *mac_hw;
3422 
3423 	if (!sim_context) {
3424 		mgmt_rx_reo_err("HW: Mgmt rx reo simulation context is null");
3425 		return -EINVAL;
3426 	}
3427 
3428 	mac_hw = &sim_context->mac_hw_sim.mac_hw_info;
3429 
3430 	while (!qdf_thread_should_stop()) {
3431 		uint32_t inter_frame_delay_us;
3432 		struct mgmt_rx_frame_params frame;
3433 		struct mgmt_rx_frame_mac_hw *frame_mac_hw;
3434 		int8_t link_id = -1;
3435 		QDF_STATUS status;
3436 		enum mgmt_rx_reo_shared_snapshot_id snapshot_id;
3437 		struct mgmt_rx_reo_snapshot snapshot_value;
3438 		int8_t num_mlo_links;
3439 		bool ret;
3440 
3441 		num_mlo_links = mgmt_rx_reo_sim_get_num_mlo_links(sim_context);
3442 		if (num_mlo_links < 0 ||
3443 		    num_mlo_links > MGMT_RX_REO_MAX_LINKS) {
3444 			mgmt_rx_reo_err("Invalid number of MLO links %d",
3445 					num_mlo_links);
3446 			qdf_assert_always(0);
3447 		}
3448 
3449 		status = mgmt_rx_reo_sim_receive_from_air(mac_hw, num_mlo_links,
3450 							  &frame);
3451 		if (QDF_IS_STATUS_ERROR(status)) {
3452 			mgmt_rx_reo_err("Receive from the air failed");
3453 			/**
3454 			 * Frame reception failed and we are not sure about the
3455 			 * link id. Without link id there is no way to restore
3456 			 * the mac hw state. Hence assert unconditionally.
3457 			 */
3458 			qdf_assert_always(0);
3459 		}
3460 		link_id = frame.link_id;
3461 
3462 		mgmt_rx_reo_debug("HW-%d: received frame with ts = %u, ctr = %u",
3463 				  link_id, frame.global_timestamp,
3464 				  frame.mgmt_pkt_ctr);
3465 
3466 		frame_mac_hw = qdf_mem_malloc(sizeof(*frame_mac_hw));
3467 		if (!frame_mac_hw) {
3468 			mgmt_rx_reo_err("HW-%d: Failed to alloc mac hw frame",
3469 					link_id);
3470 
3471 			/* Cleanup */
3472 			status = mgmt_rx_reo_sim_undo_receive_from_air(
3473 								mac_hw, &frame);
3474 			qdf_assert_always(QDF_IS_STATUS_SUCCESS(status));
3475 
3476 			continue;
3477 		}
3478 
3479 		frame_mac_hw->params = frame;
3480 		frame_mac_hw->sim_context = sim_context;
3481 
3482 		status = mgmt_rx_reo_sim_add_frame_to_pending_list(
3483 				&sim_context->master_frame_list, &frame);
3484 		if (QDF_IS_STATUS_ERROR(status)) {
3485 			mgmt_rx_reo_err("HW-%d: Failed to add frame to list",
3486 					link_id);
3487 
3488 			/* Cleanup */
3489 			status = mgmt_rx_reo_sim_undo_receive_from_air(
3490 								mac_hw, &frame);
3491 			qdf_assert_always(QDF_IS_STATUS_SUCCESS(status));
3492 
3493 			qdf_mem_free(frame_mac_hw);
3494 
3495 			continue;
3496 		}
3497 
3498 		snapshot_id = MGMT_RX_REO_SHARED_SNAPSHOT_MAC_HW;
3499 		snapshot_value = mgmt_rx_reo_sim_get_snapshot_value(
3500 						frame.global_timestamp,
3501 						frame.mgmt_pkt_ctr);
3502 
3503 		status = mgmt_rx_reo_sim_write_snapshot(link_id, snapshot_id,
3504 							snapshot_value);
3505 		if (QDF_IS_STATUS_ERROR(status)) {
3506 			mgmt_rx_reo_err("HW-%d : Failed to write snapshot %d",
3507 					link_id, snapshot_id);
3508 
3509 			/* Cleanup */
3510 			status = mgmt_rx_reo_sim_remove_frame_from_pending_list(
3511 				&sim_context->master_frame_list, &frame);
3512 			qdf_assert_always(QDF_IS_STATUS_SUCCESS(status));
3513 
3514 			status = mgmt_rx_reo_sim_undo_receive_from_air(
3515 								mac_hw, &frame);
3516 			qdf_assert_always(QDF_IS_STATUS_SUCCESS(status));
3517 
3518 			qdf_mem_free(frame_mac_hw);
3519 
3520 			continue;
3521 		}
3522 
3523 		status = qdf_create_work(NULL, &frame_mac_hw->frame_handler_fw,
3524 					 mgmt_rx_reo_sim_frame_handler_fw,
3525 					 frame_mac_hw);
3526 		if (QDF_IS_STATUS_ERROR(status)) {
3527 			mgmt_rx_reo_err("HW-%d : Failed to create work",
3528 					link_id);
3529 			qdf_assert_always(0);
3530 		}
3531 
3532 		ret = qdf_queue_work(
3533 			NULL, sim_context->fw_mgmt_frame_handler[link_id],
3534 			&frame_mac_hw->frame_handler_fw);
3535 		if (!ret) {
3536 			mgmt_rx_reo_err("HW-%d : Work is already present in Q",
3537 					link_id);
3538 			qdf_assert_always(0);
3539 		}
3540 
3541 		inter_frame_delay_us = MGMT_RX_REO_SIM_INTER_FRAME_DELAY_MIN +
3542 			mgmt_rx_reo_sim_get_random_unsigned_int(
3543 			MGMT_RX_REO_SIM_INTER_FRAME_DELAY_MIN_MAX_DELTA);
3544 
3545 		mgmt_rx_reo_sim_sleep(inter_frame_delay_us);
3546 	}
3547 
3548 	return 0;
3549 }
3550 
3551 /**
3552  * mgmt_rx_reo_sim_init_master_frame_list() - Initializes the master
3553  * management frame list
3554  * @pending_frame_list: Pointer to master frame list
3555  *
3556  * This API initializes the master management frame list
3557  *
3558  * Return: QDF_STATUS
3559  */
3560 static QDF_STATUS
3561 mgmt_rx_reo_sim_init_master_frame_list(
3562 		struct mgmt_rx_reo_master_frame_list *master_frame_list)
3563 {
3564 	qdf_spinlock_create(&master_frame_list->lock);
3565 
3566 	qdf_list_create(&master_frame_list->pending_list,
3567 			MGMT_RX_REO_SIM_PENDING_FRAME_LIST_MAX_SIZE);
3568 	qdf_list_create(&master_frame_list->stale_list,
3569 			MGMT_RX_REO_SIM_STALE_FRAME_LIST_MAX_SIZE);
3570 
3571 	return QDF_STATUS_SUCCESS;
3572 }
3573 
3574 /**
3575  * mgmt_rx_reo_sim_deinit_master_frame_list() - De initializes the master
3576  * management frame list
3577  * @master_frame_list: Pointer to master frame list
3578  *
3579  * This API de initializes the master management frame list
3580  *
3581  * Return: QDF_STATUS
3582  */
3583 static QDF_STATUS
3584 mgmt_rx_reo_sim_deinit_master_frame_list(
3585 		struct mgmt_rx_reo_master_frame_list *master_frame_list)
3586 {
3587 	qdf_spin_lock(&master_frame_list->lock);
3588 	qdf_list_destroy(&master_frame_list->stale_list);
3589 	qdf_list_destroy(&master_frame_list->pending_list);
3590 	qdf_spin_unlock(&master_frame_list->lock);
3591 
3592 	qdf_spinlock_destroy(&master_frame_list->lock);
3593 
3594 	return QDF_STATUS_SUCCESS;
3595 }
3596 
3597 /**
3598  * mgmt_rx_reo_sim_insert_into_link_id_to_pdev_map() - Builds the MLO HW link id
3599  * to pdev map
3600  * @link_id_to_pdev_map: pointer to link id to pdev map
3601  * @pdev: pointer to pdev object
3602  *
3603  * This API incrementally builds the MLO HW link id to pdev map. This API is
3604  * used only for simulation.
3605  *
3606  * Return: QDF_STATUS
3607  */
3608 static QDF_STATUS
3609 mgmt_rx_reo_sim_insert_into_link_id_to_pdev_map(
3610 		struct mgmt_rx_reo_sim_link_id_to_pdev_map *link_id_to_pdev_map,
3611 		struct wlan_objmgr_pdev *pdev)
3612 {
3613 	if (!link_id_to_pdev_map) {
3614 		mgmt_rx_reo_err("Link id to pdev map is null");
3615 		return QDF_STATUS_E_NULL_VALUE;
3616 	}
3617 
3618 	if (!pdev) {
3619 		mgmt_rx_reo_err("pdev is null");
3620 		return QDF_STATUS_E_NULL_VALUE;
3621 	}
3622 
3623 	qdf_spin_lock(&link_id_to_pdev_map->lock);
3624 
3625 	link_id_to_pdev_map->map[link_id_to_pdev_map->num_mlo_links] = pdev;
3626 	link_id_to_pdev_map->num_mlo_links++;
3627 
3628 	qdf_spin_unlock(&link_id_to_pdev_map->lock);
3629 
3630 	return QDF_STATUS_SUCCESS;
3631 }
3632 
3633 /**
3634  * mgmt_rx_reo_sim_remove_from_link_id_to_pdev_map() - Destroys the MLO HW link
3635  * id to pdev map
3636  * @link_id_to_pdev_map: pointer to link id to pdev map
3637  * @pdev: pointer to pdev object
3638  *
3639  * This API incrementally destroys the MLO HW link id to pdev map. This API is
3640  * used only for simulation.
3641  *
3642  * Return: QDF_STATUS
3643  */
3644 static QDF_STATUS
3645 mgmt_rx_reo_sim_remove_from_link_id_to_pdev_map(
3646 		struct mgmt_rx_reo_sim_link_id_to_pdev_map *link_id_to_pdev_map,
3647 		struct wlan_objmgr_pdev *pdev)
3648 {
3649 	uint8_t link_id;
3650 
3651 	if (!link_id_to_pdev_map) {
3652 		mgmt_rx_reo_err("Link id to pdev map is null");
3653 		return QDF_STATUS_E_NULL_VALUE;
3654 	}
3655 
3656 	if (!pdev) {
3657 		mgmt_rx_reo_err("pdev is null");
3658 		return QDF_STATUS_E_NULL_VALUE;
3659 	}
3660 
3661 	qdf_spin_lock(&link_id_to_pdev_map->lock);
3662 
3663 	for (link_id = 0; link_id < link_id_to_pdev_map->num_mlo_links;
3664 	     link_id++) {
3665 		if (link_id_to_pdev_map->map[link_id] == pdev) {
3666 			link_id_to_pdev_map->map[link_id] = NULL;
3667 			qdf_spin_unlock(&link_id_to_pdev_map->lock);
3668 
3669 			return QDF_STATUS_SUCCESS;
3670 		}
3671 	}
3672 
3673 	qdf_spin_unlock(&link_id_to_pdev_map->lock);
3674 
3675 	mgmt_rx_reo_err("Pdev %pK is not found in map", pdev);
3676 
3677 	return QDF_STATUS_E_FAILURE;
3678 }
3679 
3680 QDF_STATUS
3681 mgmt_rx_reo_sim_pdev_object_create_notification(struct wlan_objmgr_pdev *pdev)
3682 {
3683 	struct mgmt_rx_reo_sim_context *sim_context;
3684 	QDF_STATUS status;
3685 
3686 	sim_context = mgmt_rx_reo_sim_get_context();
3687 	if (!sim_context) {
3688 		mgmt_rx_reo_err("Mgmt simulation context is null");
3689 		return QDF_STATUS_E_NULL_VALUE;
3690 	}
3691 
3692 	status = mgmt_rx_reo_sim_insert_into_link_id_to_pdev_map(
3693 				&sim_context->link_id_to_pdev_map, pdev);
3694 
3695 	if (QDF_IS_STATUS_ERROR(status)) {
3696 		mgmt_rx_reo_err("Failed to add pdev to the map %pK", pdev);
3697 		return status;
3698 	}
3699 
3700 	return QDF_STATUS_SUCCESS;
3701 }
3702 
3703 QDF_STATUS
3704 mgmt_rx_reo_sim_pdev_object_destroy_notification(struct wlan_objmgr_pdev *pdev)
3705 {
3706 	struct mgmt_rx_reo_sim_context *sim_context;
3707 	QDF_STATUS status;
3708 
3709 	sim_context = mgmt_rx_reo_sim_get_context();
3710 	if (!sim_context) {
3711 		mgmt_rx_reo_err("Mgmt simulation context is null");
3712 		return QDF_STATUS_E_NULL_VALUE;
3713 	}
3714 
3715 	status = mgmt_rx_reo_sim_remove_from_link_id_to_pdev_map(
3716 				&sim_context->link_id_to_pdev_map, pdev);
3717 
3718 	if (QDF_IS_STATUS_ERROR(status)) {
3719 		mgmt_rx_reo_err("Failed to remove pdev from the map");
3720 		return status;
3721 	}
3722 
3723 	return QDF_STATUS_SUCCESS;
3724 }
3725 
3726 QDF_STATUS
3727 mgmt_rx_reo_sim_start(void)
3728 {
3729 	struct mgmt_rx_reo_context *reo_context;
3730 	struct mgmt_rx_reo_sim_context *sim_context;
3731 	qdf_thread_t *mac_hw_thread;
3732 	uint8_t link_id;
3733 	uint8_t id;
3734 	int8_t num_mlo_links;
3735 	QDF_STATUS status;
3736 
3737 	reo_context = mgmt_rx_reo_get_context();
3738 	if (!reo_context) {
3739 		mgmt_rx_reo_err("reo context is null");
3740 		return QDF_STATUS_E_NULL_VALUE;
3741 	}
3742 
3743 	reo_context->simulation_in_progress = true;
3744 
3745 	sim_context = &reo_context->sim_context;
3746 
3747 	num_mlo_links = mgmt_rx_reo_sim_get_num_mlo_links(sim_context);
3748 	if (num_mlo_links <= 0) {
3749 		mgmt_rx_reo_err("Invalid number of MLO links %d",
3750 				num_mlo_links);
3751 		return QDF_STATUS_E_INVAL;
3752 	}
3753 
3754 	for (link_id = 0; link_id < num_mlo_links; link_id++) {
3755 		struct workqueue_struct *wq;
3756 
3757 		wq = alloc_ordered_workqueue("mgmt_rx_reo_sim_host-%u", 0,
3758 					     link_id);
3759 		if (!wq) {
3760 			mgmt_rx_reo_err("Host workqueue creation failed");
3761 			status = QDF_STATUS_E_FAILURE;
3762 			goto error_destroy_fw_and_host_work_queues_till_last_link;
3763 		}
3764 		sim_context->host_mgmt_frame_handler[link_id] = wq;
3765 
3766 		wq = alloc_ordered_workqueue("mgmt_rx_reo_sim_fw-%u", 0,
3767 					     link_id);
3768 		if (!wq) {
3769 			mgmt_rx_reo_err("FW workqueue creation failed");
3770 			status = QDF_STATUS_E_FAILURE;
3771 			goto error_destroy_host_work_queue_of_last_link;
3772 		}
3773 		sim_context->fw_mgmt_frame_handler[link_id] = wq;
3774 	}
3775 
3776 	mac_hw_thread = qdf_create_thread(mgmt_rx_reo_sim_mac_hw_thread,
3777 					  sim_context, "MAC_HW_thread");
3778 	if (!mac_hw_thread) {
3779 		mgmt_rx_reo_err("MAC HW thread creation failed");
3780 		status = QDF_STATUS_E_FAILURE;
3781 		goto error_destroy_fw_and_host_work_queues_of_last_link;
3782 	}
3783 
3784 	sim_context->mac_hw_sim.mac_hw_thread = mac_hw_thread;
3785 
3786 	qdf_wake_up_process(sim_context->mac_hw_sim.mac_hw_thread);
3787 
3788 	return QDF_STATUS_SUCCESS;
3789 
3790 error_destroy_fw_and_host_work_queues_of_last_link:
3791 	drain_workqueue(sim_context->fw_mgmt_frame_handler[link_id]);
3792 	destroy_workqueue(sim_context->fw_mgmt_frame_handler[link_id]);
3793 
3794 error_destroy_host_work_queue_of_last_link:
3795 	drain_workqueue(sim_context->host_mgmt_frame_handler[link_id]);
3796 	destroy_workqueue(sim_context->host_mgmt_frame_handler[link_id]);
3797 
3798 error_destroy_fw_and_host_work_queues_till_last_link:
3799 	for (id = 0; id < link_id; id++) {
3800 		drain_workqueue(sim_context->fw_mgmt_frame_handler[id]);
3801 		destroy_workqueue(sim_context->fw_mgmt_frame_handler[id]);
3802 
3803 		drain_workqueue(sim_context->host_mgmt_frame_handler[id]);
3804 		destroy_workqueue(sim_context->host_mgmt_frame_handler[id]);
3805 	}
3806 
3807 	return status;
3808 }
3809 
3810 QDF_STATUS
3811 mgmt_rx_reo_sim_stop(void)
3812 {
3813 	struct mgmt_rx_reo_context *reo_context;
3814 	struct mgmt_rx_reo_sim_context *sim_context;
3815 	struct mgmt_rx_reo_master_frame_list *master_frame_list;
3816 	uint8_t link_id;
3817 	QDF_STATUS status;
3818 	int8_t num_mlo_links;
3819 
3820 	reo_context = mgmt_rx_reo_get_context();
3821 	if (!reo_context) {
3822 		mgmt_rx_reo_err("reo context is null");
3823 		return QDF_STATUS_E_NULL_VALUE;
3824 	}
3825 
3826 	sim_context = &reo_context->sim_context;
3827 
3828 	num_mlo_links = mgmt_rx_reo_sim_get_num_mlo_links(sim_context);
3829 	if (num_mlo_links <= 0) {
3830 		mgmt_rx_reo_err("Invalid number of MLO links %d",
3831 				num_mlo_links);
3832 		return QDF_STATUS_E_INVAL;
3833 	}
3834 
3835 	status = qdf_thread_join(sim_context->mac_hw_sim.mac_hw_thread);
3836 	if (QDF_IS_STATUS_ERROR(status)) {
3837 		mgmt_rx_reo_err("Failed to stop the thread");
3838 		return status;
3839 	}
3840 
3841 	sim_context->mac_hw_sim.mac_hw_thread = NULL;
3842 
3843 	for (link_id = 0; link_id < num_mlo_links; link_id++) {
3844 		/* Wait for all the pending frames to be processed by FW */
3845 		drain_workqueue(sim_context->fw_mgmt_frame_handler[link_id]);
3846 		destroy_workqueue(sim_context->fw_mgmt_frame_handler[link_id]);
3847 
3848 		/* Wait for all the pending frames to be processed by host */
3849 		drain_workqueue(sim_context->host_mgmt_frame_handler[link_id]);
3850 		destroy_workqueue(
3851 				sim_context->host_mgmt_frame_handler[link_id]);
3852 	}
3853 
3854 	status = mgmt_rx_reo_print_ingress_frame_stats(reo_context);
3855 	if (QDF_IS_STATUS_ERROR(status)) {
3856 		mgmt_rx_reo_err("Failed to print ingress frame stats");
3857 		return QDF_STATUS_E_FAILURE;
3858 	}
3859 
3860 	status = mgmt_rx_reo_print_egress_frame_stats(reo_context);
3861 	if (QDF_IS_STATUS_ERROR(status)) {
3862 		mgmt_rx_reo_err("Failed to print egress frame stats");
3863 		return QDF_STATUS_E_FAILURE;
3864 	}
3865 
3866 	status = mgmt_rx_reo_print_ingress_frame_info(reo_context);
3867 	if (QDF_IS_STATUS_ERROR(status)) {
3868 		mgmt_rx_reo_err("Failed to print ingress frame info");
3869 		return QDF_STATUS_E_FAILURE;
3870 	}
3871 
3872 	status = mgmt_rx_reo_print_egress_frame_info(reo_context);
3873 	if (QDF_IS_STATUS_ERROR(status)) {
3874 		mgmt_rx_reo_err("Failed to print egress frame info");
3875 		return QDF_STATUS_E_FAILURE;
3876 	}
3877 
3878 	master_frame_list = &sim_context->master_frame_list;
3879 	if (!qdf_list_empty(&master_frame_list->pending_list) ||
3880 	    !qdf_list_empty(&master_frame_list->stale_list)) {
3881 		mgmt_rx_reo_err("reo sim failure: pending/stale frame list non empty");
3882 
3883 		status = mgmt_rx_reo_list_display(
3884 				&reo_context->reo_list, num_mlo_links);
3885 		if (QDF_IS_STATUS_ERROR(status)) {
3886 			mgmt_rx_reo_err("Failed to print reorder list");
3887 			return status;
3888 		}
3889 
3890 		qdf_assert_always(0);
3891 	} else {
3892 		mgmt_rx_reo_err("reo sim passed");
3893 	}
3894 
3895 	reo_context->simulation_in_progress = false;
3896 
3897 	return QDF_STATUS_SUCCESS;
3898 }
3899 
3900 /**
3901  * mgmt_rx_reo_sim_init() - Initialize management rx reorder simulation
3902  * context.
3903  * @reo_context: Pointer to reo context
3904  *
3905  * Return: QDF_STATUS of operation
3906  */
3907 static QDF_STATUS
3908 mgmt_rx_reo_sim_init(struct mgmt_rx_reo_context *reo_context)
3909 {
3910 	QDF_STATUS status;
3911 	struct mgmt_rx_reo_sim_context *sim_context;
3912 
3913 	if (!reo_context) {
3914 		mgmt_rx_reo_err("reo context is null");
3915 		return QDF_STATUS_E_NULL_VALUE;
3916 	}
3917 
3918 	sim_context = &reo_context->sim_context;
3919 
3920 	qdf_mem_zero(sim_context, sizeof(*sim_context));
3921 
3922 	status = mgmt_rx_reo_sim_init_master_frame_list(
3923 					&sim_context->master_frame_list);
3924 	if (QDF_IS_STATUS_ERROR(status)) {
3925 		mgmt_rx_reo_err("Failed to create master mgmt frame list");
3926 		return status;
3927 	}
3928 
3929 	qdf_spinlock_create(&sim_context->link_id_to_pdev_map.lock);
3930 
3931 	return QDF_STATUS_SUCCESS;
3932 }
3933 
3934 /**
3935  * mgmt_rx_reo_sim_deinit() - De initialize management rx reorder simulation
3936  * context.
3937  * @reo_context: Pointer to reo context
3938  *
3939  * Return: QDF_STATUS of operation
3940  */
3941 static QDF_STATUS
3942 mgmt_rx_reo_sim_deinit(struct mgmt_rx_reo_context *reo_context)
3943 {
3944 	QDF_STATUS status;
3945 	struct mgmt_rx_reo_sim_context *sim_context;
3946 
3947 	if (!reo_context) {
3948 		mgmt_rx_reo_err("reo context is null");
3949 		return QDF_STATUS_E_NULL_VALUE;
3950 	}
3951 
3952 	sim_context = &reo_context->sim_context;
3953 
3954 	qdf_spinlock_destroy(&sim_context->link_id_to_pdev_map.lock);
3955 
3956 	status = mgmt_rx_reo_sim_deinit_master_frame_list(
3957 					&sim_context->master_frame_list);
3958 	if (QDF_IS_STATUS_ERROR(status)) {
3959 		mgmt_rx_reo_err("Failed to destroy master frame list");
3960 		return status;
3961 	}
3962 
3963 	return QDF_STATUS_SUCCESS;
3964 }
3965 
3966 QDF_STATUS
3967 mgmt_rx_reo_sim_get_snapshot_address(
3968 			struct wlan_objmgr_pdev *pdev,
3969 			enum mgmt_rx_reo_shared_snapshot_id id,
3970 			struct mgmt_rx_reo_snapshot **address)
3971 {
3972 	int8_t link_id;
3973 	struct mgmt_rx_reo_sim_context *sim_context;
3974 
3975 	sim_context = mgmt_rx_reo_sim_get_context();
3976 	if (!sim_context) {
3977 		mgmt_rx_reo_err("Mgmt reo simulation context is null");
3978 		return QDF_STATUS_E_NULL_VALUE;
3979 	}
3980 
3981 	if (!pdev) {
3982 		mgmt_rx_reo_err("pdev is NULL");
3983 		return QDF_STATUS_E_NULL_VALUE;
3984 	}
3985 
3986 	if (id < 0 || id >= MGMT_RX_REO_SHARED_SNAPSHOT_MAX) {
3987 		mgmt_rx_reo_err("Invalid snapshot ID %d", id);
3988 		return QDF_STATUS_E_INVAL;
3989 	}
3990 
3991 	if (!address) {
3992 		mgmt_rx_reo_err("Pointer to snapshot address is null");
3993 		return QDF_STATUS_E_NULL_VALUE;
3994 	}
3995 
3996 	link_id = wlan_get_mlo_link_id_from_pdev(pdev);
3997 	if (link_id < 0 || link_id >= MGMT_RX_REO_MAX_LINKS) {
3998 		mgmt_rx_reo_err("Invalid link id %d for the pdev %pK", link_id,
3999 				pdev);
4000 		return QDF_STATUS_E_INVAL;
4001 	}
4002 
4003 	*address = &sim_context->snapshot[link_id][id];
4004 
4005 	return QDF_STATUS_SUCCESS;
4006 }
4007 #endif /* WLAN_MGMT_RX_REO_SIM_SUPPORT */
4008 
4009 /**
4010  * mgmt_rx_reo_flush_reorder_list() - Flush all entries in the reorder list
4011  * @reo_list: Pointer to reorder list
4012  *
4013  * API to flush all the entries of the reorder list. This API would acquire
4014  * the lock protecting the list.
4015  *
4016  * Return: QDF_STATUS
4017  */
4018 static QDF_STATUS
4019 mgmt_rx_reo_flush_reorder_list(struct mgmt_rx_reo_list *reo_list)
4020 {
4021 	struct mgmt_rx_reo_list_entry *cur_entry;
4022 	struct mgmt_rx_reo_list_entry *temp;
4023 
4024 	if (!reo_list) {
4025 		mgmt_rx_reo_err("reorder list is null");
4026 		return QDF_STATUS_E_NULL_VALUE;
4027 	}
4028 
4029 	qdf_spin_lock_bh(&reo_list->list_lock);
4030 
4031 	qdf_list_for_each_del(&reo_list->list, cur_entry, temp, node) {
4032 		free_mgmt_rx_event_params(cur_entry->rx_params);
4033 
4034 		/**
4035 		 * Release the reference taken when the entry is inserted into
4036 		 * the reorder list.
4037 		 */
4038 		wlan_objmgr_pdev_release_ref(cur_entry->pdev,
4039 					     WLAN_MGMT_RX_REO_ID);
4040 
4041 		qdf_mem_free(cur_entry);
4042 	}
4043 
4044 	qdf_spin_unlock_bh(&reo_list->list_lock);
4045 
4046 	return QDF_STATUS_SUCCESS;
4047 }
4048 
4049 /**
4050  * mgmt_rx_reo_list_deinit() - De initialize the management rx-reorder list
4051  * @reo_list: Pointer to reorder list
4052  *
4053  * API to de initialize the management rx-reorder list.
4054  *
4055  * Return: QDF_STATUS
4056  */
4057 static QDF_STATUS
4058 mgmt_rx_reo_list_deinit(struct mgmt_rx_reo_list *reo_list)
4059 {
4060 	QDF_STATUS status;
4061 
4062 	qdf_timer_free(&reo_list->ageout_timer);
4063 
4064 	status = mgmt_rx_reo_flush_reorder_list(reo_list);
4065 	if (QDF_IS_STATUS_ERROR(status)) {
4066 		mgmt_rx_reo_err("Failed to flush the reorder list");
4067 		return QDF_STATUS_E_FAILURE;
4068 	}
4069 	qdf_spinlock_destroy(&reo_list->list_lock);
4070 	qdf_list_destroy(&reo_list->list);
4071 
4072 	return QDF_STATUS_SUCCESS;
4073 }
4074 
4075 QDF_STATUS
4076 mgmt_rx_reo_deinit_context(void)
4077 {
4078 	QDF_STATUS status;
4079 	struct mgmt_rx_reo_context *reo_context;
4080 
4081 	reo_context = mgmt_rx_reo_get_context();
4082 	if (!reo_context) {
4083 		mgmt_rx_reo_err("reo context is null");
4084 		return QDF_STATUS_E_NULL_VALUE;
4085 	}
4086 
4087 	qdf_timer_sync_cancel(&reo_context->reo_list.ageout_timer);
4088 
4089 	qdf_spinlock_destroy(&reo_context->reo_algo_entry_lock);
4090 
4091 	status = mgmt_rx_reo_sim_deinit(reo_context);
4092 	if (QDF_IS_STATUS_ERROR(status)) {
4093 		mgmt_rx_reo_err("Failed to de initialize reo sim context");
4094 		return QDF_STATUS_E_FAILURE;
4095 	}
4096 
4097 	status = mgmt_rx_reo_list_deinit(&reo_context->reo_list);
4098 	if (QDF_IS_STATUS_ERROR(status)) {
4099 		mgmt_rx_reo_err("Failed to de-initialize mgmt Rx reo list");
4100 		return status;
4101 	}
4102 
4103 	return QDF_STATUS_SUCCESS;
4104 }
4105 
4106 QDF_STATUS
4107 mgmt_rx_reo_init_context(void)
4108 {
4109 	QDF_STATUS status;
4110 	QDF_STATUS temp;
4111 	struct mgmt_rx_reo_context *reo_context;
4112 
4113 	reo_context = mgmt_rx_reo_get_context();
4114 	if (!reo_context) {
4115 		mgmt_rx_reo_err("reo context is null");
4116 		return QDF_STATUS_E_NULL_VALUE;
4117 	}
4118 	qdf_mem_zero(reo_context, sizeof(*reo_context));
4119 
4120 	status = mgmt_rx_reo_list_init(&reo_context->reo_list);
4121 	if (QDF_IS_STATUS_ERROR(status)) {
4122 		mgmt_rx_reo_err("Failed to initialize mgmt Rx reo list");
4123 		return status;
4124 	}
4125 
4126 	status = mgmt_rx_reo_sim_init(reo_context);
4127 	if (QDF_IS_STATUS_ERROR(status)) {
4128 		mgmt_rx_reo_err("Failed to initialize reo simulation context");
4129 		goto error_reo_list_deinit;
4130 	}
4131 
4132 	qdf_spinlock_create(&reo_context->reo_algo_entry_lock);
4133 
4134 	qdf_timer_mod(&reo_context->reo_list.ageout_timer,
4135 		      MGMT_RX_REO_AGEOUT_TIMER_PERIOD_MS);
4136 
4137 	return QDF_STATUS_SUCCESS;
4138 
4139 error_reo_list_deinit:
4140 	temp = mgmt_rx_reo_list_deinit(&reo_context->reo_list);
4141 	if (QDF_IS_STATUS_ERROR(temp)) {
4142 		mgmt_rx_reo_err("Failed to de-initialize mgmt Rx reo list");
4143 		return temp;
4144 	}
4145 
4146 	return status;
4147 }
4148 
4149 /**
4150  * wlan_mgmt_rx_reo_initialize_snapshot_params() - Initialize a given snapshot
4151  * params object
4152  * @snapshot_params: Pointer to snapshot params object
4153  *
4154  * Return: void
4155  */
4156 static void
4157 wlan_mgmt_rx_reo_initialize_snapshot_params(
4158 			struct mgmt_rx_reo_snapshot_params *snapshot_params)
4159 {
4160 	snapshot_params->valid = false;
4161 	snapshot_params->mgmt_pkt_ctr = 0;
4162 	snapshot_params->global_timestamp = 0;
4163 }
4164 
4165 QDF_STATUS
4166 mgmt_rx_reo_pdev_obj_create_notification(
4167 	struct wlan_objmgr_pdev *pdev,
4168 	struct mgmt_txrx_priv_pdev_context *mgmt_txrx_pdev_ctx)
4169 {
4170 	QDF_STATUS status;
4171 	QDF_STATUS temp_status;
4172 	struct mgmt_rx_reo_pdev_info *mgmt_rx_reo_pdev_ctx = NULL;
4173 	enum mgmt_rx_reo_shared_snapshot_id snapshot_id;
4174 
4175 	if (!pdev) {
4176 		mgmt_rx_reo_err("pdev is null");
4177 		status = QDF_STATUS_E_NULL_VALUE;
4178 		goto failure;
4179 	}
4180 
4181 	if (!wlan_mgmt_rx_reo_is_feature_enabled_at_pdev(pdev)) {
4182 		status = QDF_STATUS_SUCCESS;
4183 		goto failure;
4184 	}
4185 
4186 	status = mgmt_rx_reo_sim_pdev_object_create_notification(pdev);
4187 	if (QDF_IS_STATUS_ERROR(status)) {
4188 		mgmt_rx_reo_err("Failed to handle pdev create for reo sim");
4189 		goto failure;
4190 	}
4191 
4192 	mgmt_rx_reo_pdev_ctx = qdf_mem_malloc(sizeof(*mgmt_rx_reo_pdev_ctx));
4193 	if (!mgmt_rx_reo_pdev_ctx) {
4194 		mgmt_rx_reo_err("Allocation failure for REO pdev context");
4195 		status = QDF_STATUS_E_NOMEM;
4196 		goto failure;
4197 	}
4198 
4199 	snapshot_id = 0;
4200 	while (snapshot_id < MGMT_RX_REO_SHARED_SNAPSHOT_MAX) {
4201 		struct mgmt_rx_reo_snapshot **snapshot_address;
4202 
4203 		snapshot_address = &mgmt_rx_reo_pdev_ctx->
4204 				host_target_shared_snapshot[snapshot_id];
4205 		temp_status = wlan_mgmt_rx_reo_get_snapshot_address(
4206 				pdev, snapshot_id, snapshot_address);
4207 		if (QDF_IS_STATUS_ERROR(temp_status)) {
4208 			mgmt_rx_reo_err("Get snapshot address failed, id = %u",
4209 					snapshot_id);
4210 			status = temp_status;
4211 			goto failure;
4212 		}
4213 
4214 		wlan_mgmt_rx_reo_initialize_snapshot_params(
4215 				&mgmt_rx_reo_pdev_ctx->
4216 				last_valid_shared_snapshot[snapshot_id]);
4217 		snapshot_id++;
4218 	}
4219 
4220 	/* Initialize Host snapshot params */
4221 	wlan_mgmt_rx_reo_initialize_snapshot_params(&mgmt_rx_reo_pdev_ctx->
4222 						    host_snapshot);
4223 
4224 	mgmt_txrx_pdev_ctx->mgmt_rx_reo_pdev_ctx = mgmt_rx_reo_pdev_ctx;
4225 
4226 	return QDF_STATUS_SUCCESS;
4227 
4228 failure:
4229 	if (mgmt_rx_reo_pdev_ctx)
4230 		qdf_mem_free(mgmt_rx_reo_pdev_ctx);
4231 
4232 	mgmt_txrx_pdev_ctx->mgmt_rx_reo_pdev_ctx = NULL;
4233 
4234 	return status;
4235 }
4236 
4237 QDF_STATUS
4238 mgmt_rx_reo_pdev_obj_destroy_notification(
4239 	struct wlan_objmgr_pdev *pdev,
4240 	struct mgmt_txrx_priv_pdev_context *mgmt_txrx_pdev_ctx)
4241 {
4242 	QDF_STATUS status;
4243 
4244 	if (!wlan_mgmt_rx_reo_is_feature_enabled_at_pdev(pdev))
4245 		return QDF_STATUS_SUCCESS;
4246 
4247 	qdf_mem_free(mgmt_txrx_pdev_ctx->mgmt_rx_reo_pdev_ctx);
4248 	mgmt_txrx_pdev_ctx->mgmt_rx_reo_pdev_ctx = NULL;
4249 
4250 	status = mgmt_rx_reo_sim_pdev_object_destroy_notification(pdev);
4251 	if (QDF_IS_STATUS_ERROR(status)) {
4252 		mgmt_rx_reo_err("Failed to handle pdev create for reo sim");
4253 		return status;
4254 	}
4255 
4256 	return QDF_STATUS_SUCCESS;
4257 }
4258 
4259 bool
4260 mgmt_rx_reo_is_simulation_in_progress(void)
4261 {
4262 	struct mgmt_rx_reo_context *reo_context;
4263 
4264 	reo_context = mgmt_rx_reo_get_context();
4265 	if (!reo_context) {
4266 		mgmt_rx_reo_err("reo context is null");
4267 		return false;
4268 	}
4269 
4270 	return reo_context->simulation_in_progress;
4271 }
4272