xref: /wlan-dirver/qca-wifi-host-cmn/umac/cmn_services/mgmt_txrx/core/src/wlan_mgmt_txrx_rx_reo.c (revision 2f4b444fb7e689b83a4ab0e7b3b38f0bf4def8e0)
1 /*
2  * Copyright (c) 2021, The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 /**
18  *  DOC: wlan_mgmt_txrx_rx_reo.c
19  *  This file contains mgmt rx re-ordering related function definitions
20  */
21 
22 #include "wlan_mgmt_txrx_rx_reo_i.h"
23 #include <wlan_mgmt_txrx_rx_reo_tgt_api.h>
24 #include <qdf_util.h>
25 
26 static struct mgmt_rx_reo_context g_rx_reo_ctx;
27 
28 #define mgmt_rx_reo_get_context()        (&g_rx_reo_ctx)
29 
30 #define MGMT_RX_REO_PKT_CTR_HALF_RANGE (0x8000)
31 #define MGMT_RX_REO_PKT_CTR_FULL_RANGE (MGMT_RX_REO_PKT_CTR_HALF_RANGE << 1)
32 
33 /**
34  * mgmt_rx_reo_compare_pkt_ctrs_gte() - Compare given mgmt packet counters
35  * @ctr1: Management packet counter1
36  * @ctr2: Management packet counter2
37  *
38  * We can't directly use the comparison operator here because the counters can
39  * overflow. But these counters have a property that the difference between
40  * them can never be greater than half the range of the data type.
41  * We can make use of this condition to detect which one is actually greater.
42  *
43  * Return: true if @ctr1 is greater than or equal to @ctr2, else false
44  */
45 static inline bool
46 mgmt_rx_reo_compare_pkt_ctrs_gte(uint16_t ctr1, uint16_t ctr2)
47 {
48 	uint16_t delta = ctr1 - ctr2;
49 
50 	return delta <= MGMT_RX_REO_PKT_CTR_HALF_RANGE;
51 }
52 
53 /**
54  * mgmt_rx_reo_subtract_pkt_ctrs() - Subtract given mgmt packet counters
55  * @ctr1: Management packet counter1
56  * @ctr2: Management packet counter2
57  *
58  * We can't directly use the subtract operator here because the counters can
59  * overflow. But these counters have a property that the difference between
60  * them can never be greater than half the range of the data type.
61  * We can make use of this condition to detect whichone is actually greater and
62  * return the difference accordingly.
63  *
64  * Return: Difference between @ctr1 and @crt2
65  */
66 static inline int
67 mgmt_rx_reo_subtract_pkt_ctrs(uint16_t ctr1, uint16_t ctr2)
68 {
69 	uint16_t delta = ctr1 - ctr2;
70 
71 	/**
72 	 * if delta is greater than half the range (i.e, ctr1 is actually
73 	 * smaller than ctr2), then the result should be a negative number.
74 	 * subtracting the entire range should give the correct value.
75 	 */
76 	if (delta > MGMT_RX_REO_PKT_CTR_HALF_RANGE)
77 		return delta - MGMT_RX_REO_PKT_CTR_FULL_RANGE;
78 
79 	return delta;
80 }
81 
82 #define MGMT_RX_REO_GLOBAL_TS_HALF_RANGE (0x80000000)
83 /**
84  * mgmt_rx_reo_compare_global_timestamps_gte()-Compare given global timestamps
85  * @ts1: Global timestamp1
86  * @ts2: Global timestamp2
87  *
88  * We can't directly use the comparison operator here because the timestamps can
89  * overflow. But these timestamps have a property that the difference between
90  * them can never be greater than half the range of the data type.
91  * We can make use of this condition to detect which one is actually greater.
92  *
93  * Return: true if @ts1 is greater than or equal to @ts2, else false
94  */
95 static inline bool
96 mgmt_rx_reo_compare_global_timestamps_gte(uint32_t ts1, uint32_t ts2)
97 {
98 	uint32_t delta = ts1 - ts2;
99 
100 	return delta <= MGMT_RX_REO_GLOBAL_TS_HALF_RANGE;
101 }
102 
103 /**
104  * wlan_mgmt_rx_reo_algo_calculate_wait_count() - Calculates the number of
105  * frames an incoming frame should wait for before it gets delivered.
106  * @in_frame_pdev: pdev on which this frame is received
107  * @in_frame_params: Pointer to MGMT REO parameters of this frame
108  * @num_mlo_links: Number of MLO links
109  * @wait_count: Pointer to wait count data structure to fill the calculated
110  * wait count
111  *
112  * Each frame carrys a MGMT pkt number which is local to that link, and a
113  * timestamp which is global across all the links. MAC HW and FW also captures
114  * the same details of the last frame that they have seen. Host also maintains
115  * the details of the last frame it has seen. In total, there are 4 snapshots.
116  * 1. MAC HW snapshot - latest frame seen at MAC HW
117  * 2. FW forwarded snapshot- latest frame forwarded to the Host
118  * 3. FW consumed snapshot - latest frame consumed by the FW
119  * 4. Host/FW consumed snapshot - latest frame seen by the Host
120  * By using all these snapshots, this function tries to compute the wait count
121  * for a given incoming frame on all links.
122  *
123  * Return: QDF_STATUS of operation
124  */
125 static QDF_STATUS
126 wlan_mgmt_rx_reo_algo_calculate_wait_count(
127 	struct wlan_objmgr_pdev *in_frame_pdev,
128 	struct mgmt_rx_reo_params *in_frame_params,
129 	uint8_t num_mlo_links,
130 	struct mgmt_rx_reo_wait_count *wait_count)
131 {
132 	QDF_STATUS status;
133 	uint8_t link, in_frame_link;
134 	int frames_pending, delta_fwd_host;
135 	uint8_t snapshot_id;
136 	struct wlan_objmgr_pdev *pdev;
137 	struct mgmt_rx_reo_pdev_info *rx_reo_pdev_ctx;
138 	struct mgmt_rx_reo_snapshot *address;
139 	struct mgmt_rx_reo_snapshot_params snapshot_params
140 		[MGMT_RX_REO_SHARED_SNAPSHOT_MAX];
141 	struct mgmt_rx_reo_snapshot_params *mac_hw_ss, *fw_forwarded_ss,
142 					    *fw_consumed_ss, *host_ss;
143 
144 	if (!in_frame_params) {
145 		mgmt_rx_reo_err("MGMT Rx REO params of incoming frame is NULL");
146 		return QDF_STATUS_E_NULL_VALUE;
147 	}
148 
149 	if (!wait_count) {
150 		mgmt_rx_reo_err("wait count pointer to be filled is NULL");
151 		return QDF_STATUS_E_NULL_VALUE;
152 	}
153 
154 	if (!in_frame_pdev) {
155 		mgmt_rx_reo_err("pdev is null");
156 		return QDF_STATUS_E_NULL_VALUE;
157 	}
158 
159 	qdf_assert(num_mlo_links <= MAX_MLO_LINKS);
160 
161 	/* Get the MLO link ID of incoming frame */
162 	in_frame_link = wlan_get_mlo_link_id_from_pdev(in_frame_pdev);
163 
164 	/* Iterate over all the MLO links */
165 	for (link = 0; link < num_mlo_links; link++) {
166 		/* No need wait for any frames on the same link */
167 		if (link == in_frame_link) {
168 			frames_pending = 0;
169 			goto update_pending_frames;
170 		}
171 
172 		pdev = wlan_get_pdev_from_mlo_link_id(link);
173 
174 		rx_reo_pdev_ctx = wlan_mgmt_rx_reo_get_priv_object(pdev);
175 		if (!rx_reo_pdev_ctx) {
176 			mgmt_rx_reo_err("Mgmt Rx REO context empty for pdev %pK",
177 					pdev);
178 			return QDF_STATUS_E_FAILURE;
179 		}
180 
181 		host_ss = &rx_reo_pdev_ctx->host_snapshot;
182 
183 		/**
184 		 * Ideally, the incoming frame has to wait for only those frames
185 		 * (on other links) which meet all the below criterion.
186 		 * 1. Frame's timestamp is less than incoming frame's
187 		 * 2. Frame is supposed to be consumed by the Host
188 		 * 3. Frame is not yet seen by the Host.
189 		 * We may not be able to compute the exact optimal wait count
190 		 * because HW/FW provides a limited assist.
191 		 * This algorithm tries to get the best estimate of wait count
192 		 * by not waiting for those frames where we have a conclusive
193 		 * evidence that we don't have to wait for those frames.
194 		 */
195 
196 		/**
197 		 * If this link has already seen a frame whose timestamp is
198 		 * greater than or equal to incoming frame's timestamp,
199 		 * then no need to wait for any frames on this link.
200 		 * If the totalt wait count becomes zero, then the policy on
201 		 * whether to deliver such a frame to upper layers is handled
202 		 * separately.
203 		 */
204 		if (mgmt_rx_reo_compare_global_timestamps_gte(
205 				host_ss->global_timestamp,
206 				in_frame_params->global_timestamp)) {
207 			frames_pending = 0;
208 			goto update_pending_frames;
209 		}
210 
211 		snapshot_id = 0;
212 		/* Read all the shared snapshots */
213 		while (snapshot_id <
214 			MGMT_RX_REO_SHARED_SNAPSHOT_MAX) {
215 			address = rx_reo_pdev_ctx->
216 				   host_target_shared_snapshot[snapshot_id],
217 
218 			status = tgt_mgmt_rx_reo_read_snapshot(
219 						pdev, address, snapshot_id,
220 						&snapshot_params[snapshot_id]);
221 
222 			/* Read operation shouldn't fail */
223 			if (QDF_IS_STATUS_ERROR(status)) {
224 				mgmt_rx_reo_err("snapshot(%d) read failed on"
225 						"link (%d)", snapshot_id, link);
226 				return status;
227 			}
228 
229 			/* If snpashot is valid, save it in the pdev context */
230 			if (snapshot_params[snapshot_id].valid) {
231 				rx_reo_pdev_ctx->
232 				   last_valid_shared_snapshot[snapshot_id] =
233 				   snapshot_params[snapshot_id];
234 			}
235 			snapshot_id++;
236 		}
237 
238 		mac_hw_ss = &snapshot_params
239 				[MGMT_RX_REO_SHARED_SNAPSHOT_MAC_HW];
240 		fw_forwarded_ss = &snapshot_params
241 				[MGMT_RX_REO_SHARED_SNAPSHOT_FW_FORWADED];
242 		fw_consumed_ss = &snapshot_params
243 				[MGMT_RX_REO_SHARED_SNAPSHOT_FW_CONSUMED];
244 
245 		/**
246 		 * If MAC HW snapshot is invalid, we need to assume the worst
247 		 * and wait for UINT_MAX frames, but this should not be a
248 		 * concern because if subsequent frames read a valid snapshot,
249 		 * the REO algorithm will take care of updating the wait count
250 		 * of this frame as well.
251 		 * There may be more optimal ways to handle invalid snapshot
252 		 * reads.  For e.g., making use of previously read valid
253 		 * snapshot, but they come with complex logic.
254 		 * Keeping this simple for now.
255 		 */
256 		if (!mac_hw_ss->valid) {
257 			wait_count->per_link_count[link] = UINT_MAX;
258 			wait_count->total_count = UINT_MAX;
259 			continue;
260 		}
261 
262 		/**
263 		 * For starters, we only have to wait for the frames that are
264 		 * seen by MAC HW but not yet seen by Host. The frames which
265 		 * reach MAC HW later are guaranteed to have a timestamp
266 		 * greater than incoming frame's timestamp.
267 		 */
268 		frames_pending = mgmt_rx_reo_subtract_pkt_ctrs(
269 					mac_hw_ss->mgmt_pkt_ctr,
270 					host_ss->mgmt_pkt_ctr);
271 		qdf_assert_always(frames_pending >= 0);
272 
273 		if (mgmt_rx_reo_compare_global_timestamps_gte(
274 					mac_hw_ss->global_timestamp,
275 					in_frame_params->global_timestamp)) {
276 			/**
277 			 * Last frame seen at MAC HW has timestamp greater than
278 			 * or equal to incoming frame's timestamp. So no need to
279 			 * wait for that last frame, but we can't conclusively
280 			 * say anything about timestamp of frames before the
281 			 * last frame, so try to wait for all of those frames.
282 			 */
283 			frames_pending--;
284 			qdf_assert_always(frames_pending >= 0);
285 
286 			if (fw_consumed_ss->valid &&
287 			    mgmt_rx_reo_compare_global_timestamps_gte(
288 				fw_consumed_ss->global_timestamp,
289 				in_frame_params->global_timestamp)) {
290 				/**
291 				 * Last frame consumed by the FW has timestamp
292 				 * greater than or equal to incoming frame's.
293 				 * That means all the frames from
294 				 * fw_consumed_ss->mgmt_pkt_ctr to
295 				 * mac_hw->mgmt_pkt_ctr will have timestamp
296 				 * greater than or equal to incoming frame's and
297 				 * hence, no need to wait for those frames.
298 				 * We just need to wait for frames from
299 				 * host_ss->mgmt_pkt_ctr to
300 				 * fw_consumed_ss->mgmt_pkt_ctr-1. This is a
301 				 * better estimate over the above estimate,
302 				 * so update frames_pending.
303 				 */
304 				frames_pending =
305 				  mgmt_rx_reo_subtract_pkt_ctrs(
306 				      fw_consumed_ss->mgmt_pkt_ctr,
307 				      host_ss->mgmt_pkt_ctr) - 1;
308 
309 				qdf_assert_always(frames_pending >= 0);
310 
311 				/**
312 				 * Last frame forwarded to Host has timestamp
313 				 * less than incoming frame's. That means all
314 				 * the frames starting from
315 				 * fw_forwarded_ss->mgmt_pkt_ctr+1 to
316 				 * fw_consumed_ss->mgmt_pkt_ctr are consumed by
317 				 * the FW and hence, no need to wait for those
318 				 * frames. We just need to wait for frames
319 				 * from host_ss->mgmt_pkt_ctr to
320 				 * fw_forwarded_ss->mgmt_pkt_ctr. This is a
321 				 * better estimate over the above estimate,
322 				 * so update frames_pending.
323 				 */
324 				if (fw_forwarded_ss->valid &&
325 				    !mgmt_rx_reo_compare_global_timestamps_gte(
326 					fw_forwarded_ss->global_timestamp,
327 					in_frame_params->global_timestamp)) {
328 					frames_pending =
329 					  mgmt_rx_reo_subtract_pkt_ctrs(
330 					      fw_forwarded_ss->mgmt_pkt_ctr,
331 					      host_ss->mgmt_pkt_ctr);
332 
333 					/**
334 					 * frames_pending can be negative in
335 					 * cases whene there are no frames
336 					 * getting forwarded to the Host. No
337 					 * need to wait for any frames in that
338 					 * case.
339 					 */
340 					if (frames_pending < 0)
341 						frames_pending = 0;
342 				}
343 			}
344 
345 			/**
346 			 * Last frame forwarded to Host has timestamp greater
347 			 * than or equal to incoming frame's. That means all the
348 			 * frames from fw_forwarded->mgmt_pkt_ctr to
349 			 * mac_hw->mgmt_pkt_ctr will have timestamp greater than
350 			 * or equal to incoming frame's and hence, no need to
351 			 * wait for those frames. We may have to just wait for
352 			 * frames from host_ss->mgmt_pkt_ctr to
353 			 * fw_forwarded_ss->mgmt_pkt_ctr-1
354 			 */
355 			if (fw_forwarded_ss->valid &&
356 			    mgmt_rx_reo_compare_global_timestamps_gte(
357 				fw_forwarded_ss->global_timestamp,
358 				in_frame_params->global_timestamp)) {
359 				delta_fwd_host =
360 				  mgmt_rx_reo_subtract_pkt_ctrs(
361 				    fw_forwarded_ss->mgmt_pkt_ctr,
362 				    host_ss->mgmt_pkt_ctr) - 1;
363 
364 				qdf_assert_always(delta_fwd_host >= 0);
365 
366 				/**
367 				 * This will be a better estimate over the one
368 				 * we computed using mac_hw_ss but this may or
369 				 * may not be a better estimate over the
370 				 * one we computed using fw_consumed_ss.
371 				 * When timestamps of both fw_consumed_ss and
372 				 * fw_forwarded_ss are greater than incoming
373 				 * frame's but timestamp of fw_consumed_ss is
374 				 * smaller than fw_forwarded_ss, then
375 				 * frames_pending will be smaller than
376 				 * delta_fwd_host, the reverse will be true in
377 				 * other cases. Instead of checking for all
378 				 * those cases, just waiting for the minimum
379 				 * among these two should be sufficient.
380 				 */
381 				frames_pending = qdf_min(frames_pending,
382 							 delta_fwd_host);
383 				qdf_assert_always(frames_pending >= 0);
384 			}
385 		}
386 
387 update_pending_frames:
388 			wait_count->per_link_count[link] = frames_pending;
389 			wait_count->total_count += frames_pending;
390 	}
391 
392 	return QDF_STATUS_SUCCESS;
393 }
394 
395 /*
396  * struct mgmt_rx_reo_list_entry_debug_info - This structure holds the necessary
397  * information about a reo list entry for debug purposes.
398  * @link_id: link id
399  * @mgmt_pkt_ctr: management packet counter
400  * @global_timestamp: global time stamp
401  * @wait_count: wait count values
402  * @status: status of the entry in the list
403  * @entry: pointer to reo list entry
404  */
405 struct mgmt_rx_reo_list_entry_debug_info {
406 	uint8_t link_id;
407 	uint16_t mgmt_pkt_ctr;
408 	uint32_t global_timestamp;
409 	struct mgmt_rx_reo_wait_count wait_count;
410 	uint32_t status;
411 	struct mgmt_rx_reo_list_entry *entry;
412 };
413 
414 /**
415  * mgmt_rx_reo_list_display() - API to print the entries in the reorder list
416  * @reo_list: Pointer to reorder list
417  * @num_mlo_links: Number of MLO HW links
418  *
419  * Return: QDF_STATUS
420  */
421 static QDF_STATUS
422 mgmt_rx_reo_list_display(struct mgmt_rx_reo_list *reo_list,
423 			 uint8_t num_mlo_links)
424 {
425 	uint32_t reo_list_size;
426 	uint32_t index;
427 	struct mgmt_rx_reo_list_entry *cur_entry;
428 	struct mgmt_rx_reo_list_entry_debug_info *debug_info;
429 
430 	if (!reo_list) {
431 		mgmt_rx_reo_err("Pointer to reo list is null");
432 		return QDF_STATUS_E_NULL_VALUE;
433 	}
434 
435 	if (num_mlo_links == 0 || num_mlo_links > MGMT_RX_REO_MAX_LINKS) {
436 		mgmt_rx_reo_err("Invalid number of links %u", num_mlo_links);
437 		return QDF_STATUS_E_INVAL;
438 	}
439 
440 	qdf_spin_lock_bh(&reo_list->list_lock);
441 
442 	reo_list_size = qdf_list_size(&reo_list->list);
443 
444 	if (reo_list_size == 0) {
445 		qdf_spin_unlock_bh(&reo_list->list_lock);
446 		mgmt_rx_reo_debug("Number of entries in the reo list = %u",
447 				  reo_list_size);
448 		return QDF_STATUS_SUCCESS;
449 	}
450 
451 	debug_info = qdf_mem_malloc_atomic(reo_list_size * sizeof(*debug_info));
452 	if (!debug_info) {
453 		qdf_spin_unlock_bh(&reo_list->list_lock);
454 		mgmt_rx_reo_err("Memory allocation failed");
455 		return QDF_STATUS_E_NOMEM;
456 	}
457 
458 	index = 0;
459 	qdf_list_for_each(&reo_list->list, cur_entry, node) {
460 		debug_info[index].link_id =
461 				mgmt_rx_reo_get_link_id(cur_entry->rx_params);
462 		debug_info[index].mgmt_pkt_ctr =
463 			mgmt_rx_reo_get_pkt_counter(cur_entry->rx_params);
464 		debug_info[index].global_timestamp =
465 				mgmt_rx_reo_get_global_ts(cur_entry->rx_params);
466 		debug_info[index].wait_count = cur_entry->wait_count;
467 		debug_info[index].status = cur_entry->status;
468 		debug_info[index].entry = cur_entry;
469 
470 		++index;
471 	}
472 
473 	qdf_spin_unlock_bh(&reo_list->list_lock);
474 
475 	mgmt_rx_reo_debug("Reorder list");
476 	mgmt_rx_reo_debug("##################################################");
477 	mgmt_rx_reo_debug("Number of entries in the reo list = %u",
478 			  reo_list_size);
479 	for (index = 0; index < reo_list_size; index++) {
480 		uint8_t link_id;
481 
482 		mgmt_rx_reo_debug("index = %u: link_id = %u, ts = %u, ctr = %u, status = 0x%x, entry = %pK",
483 				  index, debug_info[index].link_id,
484 				  debug_info[index].global_timestamp,
485 				  debug_info[index].mgmt_pkt_ctr,
486 				  debug_info[index].status,
487 				  debug_info[index].entry);
488 
489 		mgmt_rx_reo_debug("Total wait count = 0x%llx",
490 				  debug_info[index].wait_count.total_count);
491 
492 		for (link_id = 0; link_id < num_mlo_links; link_id++)
493 			mgmt_rx_reo_debug("Link id = %u, wait_count = 0x%x",
494 					  link_id, debug_info[index].wait_count.
495 					  per_link_count[link_id]);
496 	}
497 	mgmt_rx_reo_debug("##################################################");
498 
499 	qdf_mem_free(debug_info);
500 
501 	return QDF_STATUS_SUCCESS;
502 }
503 
504 /**
505  * mgmt_rx_reo_list_entry_get_release_reason() - Helper API to get the reason
506  * for releasing the reorder list entry to upper layer.
507  * reorder list.
508  * @entry: List entry
509  * @ts_latest_aged_out_frame: Global time stamp of latest aged out frame
510  *
511  * Return: Reason for releasing the frame.
512  */
513 static uint8_t
514 mgmt_rx_reo_list_entry_get_release_reason(
515 		struct mgmt_rx_reo_list_entry *entry,
516 		struct mgmt_rx_reo_global_ts_info *ts_latest_aged_out_frame)
517 {
518 	uint8_t release_reason = 0;
519 
520 	if (!entry || !ts_latest_aged_out_frame)
521 		return 0;
522 
523 	if (!MGMT_RX_REO_LIST_ENTRY_IS_WAITING_FOR_FRAME_ON_OTHER_LINK(entry))
524 		release_reason |=
525 			MGMT_RX_REO_LIST_ENTRY_RELEASE_REASON_ZERO_WAIT_COUNT;
526 
527 	if (MGMT_RX_REO_LIST_ENTRY_IS_AGED_OUT(entry))
528 		release_reason |=
529 				MGMT_RX_REO_LIST_ENTRY_RELEASE_REASON_AGED_OUT;
530 
531 	if (ts_latest_aged_out_frame->valid &&
532 	    MGMT_RX_REO_LIST_ENTRY_IS_OLDER_THAN_LATEST_AGED_OUT_FRAME(
533 				ts_latest_aged_out_frame, entry))
534 		release_reason |=
535 		MGMT_RX_REO_LIST_ENTRY_RELEASE_REASON_OLDER_THAN_AGED_OUT_FRAME;
536 
537 	return release_reason;
538 }
539 
540 /**
541  * mgmt_rx_reo_list_entry_send_up() - API to send the frame to the upper layer.
542  * @reo_list: Pointer to reorder list
543  * @entry: List entry
544  *
545  * API to send the frame to the upper layer. This API has to be called only
546  * for entries which can be released to upper layer. It is the caller's
547  * responsibility to ensure that entry can be released (by using API
548  * mgmt_rx_reo_list_is_ready_to_send_up_entry). This API is called after
549  * acquiring the lock which protects the reorder list.
550  *
551  * Return: QDF_STATUS
552  */
553 static QDF_STATUS
554 mgmt_rx_reo_list_entry_send_up(struct mgmt_rx_reo_list *reo_list,
555 			       struct mgmt_rx_reo_list_entry *entry)
556 {
557 	uint8_t release_reason;
558 	QDF_STATUS status;
559 	uint8_t link_id;
560 	struct wlan_objmgr_pdev *pdev;
561 
562 	qdf_assert_always(reo_list);
563 	qdf_assert_always(entry);
564 
565 	release_reason = mgmt_rx_reo_list_entry_get_release_reason(
566 				entry, &reo_list->ts_latest_aged_out_frame);
567 
568 	qdf_assert_always(release_reason != 0);
569 
570 	status = qdf_list_remove_node(&reo_list->list, &entry->node);
571 	if (QDF_IS_STATUS_ERROR(status)) {
572 		mgmt_rx_reo_err("Failed to remove entry %pK from list", entry);
573 		qdf_assert_always(0);
574 	}
575 
576 	link_id = mgmt_rx_reo_get_link_id(entry->rx_params);
577 
578 	free_mgmt_rx_event_params(entry->rx_params);
579 
580 	pdev = wlan_get_pdev_from_mlo_link_id(link_id);
581 	if (!pdev) {
582 		mgmt_rx_reo_err("Unable to get pdev corresponding to entry %pK",
583 				entry);
584 		return QDF_STATUS_E_FAILURE;
585 	}
586 
587 	/**
588 	 * Release the reference taken when the entry is inserted into
589 	 * the reorder list
590 	 */
591 	wlan_objmgr_pdev_release_ref(pdev, WLAN_MGMT_RX_REO_ID);
592 
593 	return QDF_STATUS_SUCCESS;
594 }
595 
596 /**
597  * mgmt_rx_reo_list_is_ready_to_send_up_entry() - API to check whether the
598  * list entry can be send to upper layers.
599  * @reo_list: Pointer to reorder list
600  * @entry: List entry
601  *
602  * Return: QDF_STATUS
603  */
604 static bool
605 mgmt_rx_reo_list_is_ready_to_send_up_entry(struct mgmt_rx_reo_list *reo_list,
606 					   struct mgmt_rx_reo_list_entry *entry)
607 {
608 	if (!reo_list || !entry)
609 		return false;
610 
611 	return !MGMT_RX_REO_LIST_ENTRY_IS_WAITING_FOR_FRAME_ON_OTHER_LINK(
612 	       entry) || MGMT_RX_REO_LIST_ENTRY_IS_AGED_OUT(entry) ||
613 	       (reo_list->ts_latest_aged_out_frame.valid &&
614 		MGMT_RX_REO_LIST_ENTRY_IS_OLDER_THAN_LATEST_AGED_OUT_FRAME(
615 				&reo_list->ts_latest_aged_out_frame, entry));
616 }
617 
618 /**
619  * mgmt_rx_reo_list_release_entries() - Release entries from the reorder list
620  * @reo_list: Pointer to reorder list
621  *
622  * This API releases the entries from the reorder list based on the following
623  * conditions.
624  *   a) Entries with total wait count equal to 0
625  *   b) Entries which are timed out or entries with global time stamp <= global
626  *      time stamp of the latest frame which is timed out. We can only release
627  *      the entries in the increasing order of the global time stamp.
628  *      So all the entries with global time stamp <= global time stamp of the
629  *      latest timed out frame has to be released.
630  *
631  * Return: QDF_STATUS
632  */
633 static QDF_STATUS
634 mgmt_rx_reo_list_release_entries(struct mgmt_rx_reo_list *reo_list)
635 {
636 	struct mgmt_rx_reo_list_entry *cur_entry;
637 	struct mgmt_rx_reo_list_entry *temp;
638 	/* TODO yield if release_count > THRESHOLD */
639 	uint16_t release_count = 0;
640 	QDF_STATUS status;
641 
642 	if (!reo_list) {
643 		mgmt_rx_reo_err("reo list is null");
644 		return QDF_STATUS_E_NULL_VALUE;
645 	}
646 
647 	qdf_spin_lock_bh(&reo_list->list_lock);
648 
649 	qdf_list_for_each_del(&reo_list->list, cur_entry, temp, node) {
650 		if (mgmt_rx_reo_list_is_ready_to_send_up_entry(reo_list,
651 							       cur_entry)) {
652 			mgmt_rx_reo_debug("Freeing up entry %pK", cur_entry);
653 			status = mgmt_rx_reo_list_entry_send_up(reo_list,
654 								cur_entry);
655 			if (QDF_IS_STATUS_ERROR(status))
656 				goto error;
657 
658 			release_count++;
659 		} else {
660 			break;
661 		}
662 
663 		qdf_mem_free(cur_entry);
664 	}
665 
666 	status = QDF_STATUS_SUCCESS;
667 error:
668 	qdf_spin_unlock_bh(&reo_list->list_lock);
669 	return status;
670 }
671 
672 /**
673  * mgmt_rx_reo_list_ageout_timer_handler() - Periodic ageout timer handler
674  * @arg: Argument to timer handler
675  *
676  * This is the handler for periodic ageout timer used to timeout entries in the
677  * reorder list.
678  *
679  * Return: void
680  */
681 static void
682 mgmt_rx_reo_list_ageout_timer_handler(void *arg)
683 {
684 	struct mgmt_rx_reo_list *reo_list = arg;
685 	struct mgmt_rx_reo_list_entry *cur_entry;
686 	uint64_t cur_ts;
687 	QDF_STATUS status;
688 
689 	if (!reo_list)
690 		return;
691 
692 	qdf_spin_lock_bh(&reo_list->list_lock);
693 
694 	cur_ts = qdf_get_log_timestamp();
695 
696 	qdf_list_for_each(&reo_list->list, cur_entry, node) {
697 		if (cur_ts - cur_entry->insertion_ts >=
698 		    MGMT_RX_REO_LIST_TIMEOUT) {
699 			uint32_t cur_entry_global_ts;
700 			struct mgmt_rx_reo_global_ts_info *ts_ageout;
701 
702 			ts_ageout = &reo_list->ts_latest_aged_out_frame;
703 			cur_entry_global_ts = mgmt_rx_reo_get_global_ts(
704 					      cur_entry->rx_params);
705 
706 			if (!ts_ageout->valid ||
707 			    mgmt_rx_reo_compare_global_timestamps_gte(
708 			    cur_entry_global_ts, ts_ageout->global_ts)) {
709 				ts_ageout->global_ts = cur_entry_global_ts;
710 				ts_ageout->valid = true;
711 			}
712 
713 			cur_entry->status |= MGMT_RX_REO_STATUS_AGED_OUT;
714 		}
715 	}
716 
717 	qdf_spin_unlock_bh(&reo_list->list_lock);
718 
719 	status = mgmt_rx_reo_list_release_entries(reo_list);
720 	if (QDF_IS_STATUS_ERROR(status)) {
721 		mgmt_rx_reo_err("Failed to release list entries, status = %d",
722 				status);
723 		return;
724 	}
725 }
726 
727 /**
728  * mgmt_rx_reo_prepare_list_entry() - Prepare a list entry from the management
729  * frame received.
730  * @frame_desc: Pointer to the frame descriptor
731  * @entry: Pointer to the list entry
732  *
733  * This API prepares the reorder list entry corresponding to a management frame
734  * to be consumed by host. This entry would be inserted at the appropriate
735  * position in the reorder list.
736  *
737  * Return: QDF_STATUS
738  */
739 static QDF_STATUS
740 mgmt_rx_reo_prepare_list_entry(
741 		const struct mgmt_rx_reo_frame_descriptor *frame_desc,
742 		struct mgmt_rx_reo_list_entry **entry)
743 {
744 	struct mgmt_rx_reo_list_entry *list_entry;
745 	struct wlan_objmgr_pdev *pdev;
746 	QDF_STATUS status;
747 	uint8_t link_id;
748 
749 	if (!frame_desc) {
750 		mgmt_rx_reo_err("frame descriptor is null");
751 		return QDF_STATUS_E_NULL_VALUE;
752 	}
753 
754 	if (!entry) {
755 		mgmt_rx_reo_err("Pointer to list entry is null");
756 		return QDF_STATUS_E_NULL_VALUE;
757 	}
758 
759 	link_id = mgmt_rx_reo_get_link_id(frame_desc->rx_params);
760 
761 	pdev = wlan_get_pdev_from_mlo_link_id(link_id);
762 	if (!pdev) {
763 		mgmt_rx_reo_err("pdev corresponding to link %u is null",
764 				link_id);
765 		return QDF_STATUS_E_NULL_VALUE;
766 	}
767 
768 	/* Take the reference when the entry is created for insertion */
769 	status = wlan_objmgr_pdev_try_get_ref(pdev, WLAN_MGMT_RX_REO_ID);
770 	if (QDF_IS_STATUS_ERROR(status)) {
771 		mgmt_rx_reo_err("Failed to get ref to pdev");
772 		return QDF_STATUS_E_FAILURE;
773 	}
774 
775 	list_entry =  qdf_mem_malloc(sizeof(*list_entry));
776 	if (!list_entry) {
777 		wlan_objmgr_pdev_release_ref(pdev, WLAN_MGMT_RX_REO_ID);
778 		mgmt_rx_reo_err("List entry allocation failed");
779 		return QDF_STATUS_E_NOMEM;
780 	}
781 
782 	list_entry->nbuf = frame_desc->nbuf;
783 	list_entry->rx_params = frame_desc->rx_params;
784 	list_entry->wait_count = frame_desc->wait_count;
785 	list_entry->status = 0;
786 	if (list_entry->wait_count.total_count)
787 		list_entry->status |=
788 			MGMT_RX_REO_STATUS_WAIT_FOR_FRAME_ON_OTHER_LINKS;
789 
790 	*entry = list_entry;
791 
792 	mgmt_rx_reo_debug("New entry to be inserted is %pK", list_entry);
793 
794 	return QDF_STATUS_SUCCESS;
795 }
796 
797 /**
798  * mgmt_rx_reo_update_wait_count() - Update the wait count for a frame based
799  * on the wait count of a frame received after that on air.
800  * @num_mlo_links: Number of MLO links
801  * @wait_count_old_frame: Pointer to the wait count structure for the old frame.
802  * @wait_count_new_frame: Pointer to the wait count structure for the new frame.
803  *
804  * This API optimizes the wait count of a frame based on the wait count of
805  * a frame received after that on air. Old frame refers to the frame received
806  * first on the air and new frame refers to the frame received after that.
807  * We use the following fundamental idea. Wait counts for old frames can't be
808  * more than wait counts for the new frame. Use this to optimize the wait count
809  * for the old frames. Per link wait count of an old frame is minimum of the
810  * per link wait count of the old frame and new frame.
811  *
812  * Return: QDF_STATUS
813  */
814 static QDF_STATUS
815 mgmt_rx_reo_update_wait_count(
816 		uint8_t num_mlo_links,
817 		struct mgmt_rx_reo_wait_count *wait_count_old_frame,
818 		const struct mgmt_rx_reo_wait_count *wait_count_new_frame)
819 {
820 	uint8_t link_id;
821 
822 	qdf_assert_always(wait_count_old_frame);
823 	qdf_assert_always(wait_count_new_frame);
824 
825 	qdf_assert_always(num_mlo_links >= 1);
826 	qdf_assert_always(num_mlo_links <= MGMT_RX_REO_MAX_LINKS);
827 
828 	for (link_id = 0; link_id < num_mlo_links; link_id++) {
829 		if (wait_count_old_frame->per_link_count[link_id]) {
830 			uint32_t temp_wait_count;
831 			uint32_t wait_count_diff;
832 
833 			temp_wait_count =
834 				wait_count_old_frame->per_link_count[link_id];
835 			wait_count_old_frame->per_link_count[link_id] =
836 				qdf_min(wait_count_old_frame->
837 					per_link_count[link_id],
838 					wait_count_new_frame->
839 					per_link_count[link_id]);
840 			wait_count_diff = temp_wait_count -
841 				wait_count_old_frame->per_link_count[link_id];
842 
843 			wait_count_old_frame->total_count -= wait_count_diff;
844 		}
845 	}
846 
847 	return QDF_STATUS_SUCCESS;
848 }
849 
850 /**
851  * mgmt_rx_reo_update_list() - Modify the reorder list when a frame is received
852  * @reo_list: Pointer to reorder list
853  * @num_mlo_links: Number of MLO HW links
854  * @frame_desc: Pointer to frame descriptor
855  * @is_queued: Whether this frame is queued in the REO list
856  *
857  * API to update the reorder list on every management frame reception.
858  * This API does the following things.
859  *   a) Update the wait counts for all the frames in the reorder list with
860  *      global time stamp <= current frame's global time stamp. We use the
861  *      following principle for updating the wait count in this case.
862  *      Let A and B be two management frames with global time stamp of A <=
863  *      global time stamp of B. Let WAi and WBi be the wait count of A and B
864  *      for link i, then WAi <= WBi. Hence we can optimize WAi as
865  *      min(WAi, WBi).
866  *   b) If the current frame is to be consumed by host, insert it in the
867  *      reorder list such that the list is always sorted in the increasing order
868  *      of global time stamp. Update the wait count of the current frame based
869  *      on the frame next to it in the reorder list (if any).
870  *   c) Update the wait count of the frames in the reorder list with global
871  *      time stamp > current frame's global time stamp. Let the current frame
872  *      belong to link "l". Then link "l"'s wait count can be reduced by one for
873  *      all the frames in the reorder list with global time stamp > current
874  *      frame's global time stamp.
875  *
876  * Return: QDF_STATUS
877  */
878 static QDF_STATUS
879 mgmt_rx_reo_update_list(struct mgmt_rx_reo_list *reo_list,
880 			uint8_t num_mlo_links,
881 			struct mgmt_rx_reo_frame_descriptor *frame_desc,
882 			bool *is_queued)
883 {
884 	struct mgmt_rx_reo_list_entry *cur_entry;
885 	struct mgmt_rx_reo_list_entry *least_greater_entry;
886 	bool least_greater_entry_found = false;
887 	QDF_STATUS status;
888 	uint32_t new_frame_global_ts;
889 	struct mgmt_rx_reo_list_entry *new_entry = NULL;
890 
891 	if (!is_queued)
892 		return QDF_STATUS_E_NULL_VALUE;
893 	*is_queued = false;
894 
895 	if (!reo_list) {
896 		mgmt_rx_reo_err("Mgmt Rx reo list is null");
897 		return QDF_STATUS_E_NULL_VALUE;
898 	}
899 
900 	if (!frame_desc) {
901 		mgmt_rx_reo_err("Mgmt frame descriptor is null");
902 		return QDF_STATUS_E_NULL_VALUE;
903 	}
904 
905 	new_frame_global_ts = mgmt_rx_reo_get_global_ts(frame_desc->rx_params);
906 
907 	/* Prepare the list entry before acquiring lock */
908 	if (frame_desc->type == MGMT_RX_REO_FRAME_DESC_HOST_CONSUMED_FRAME) {
909 		status = mgmt_rx_reo_prepare_list_entry(frame_desc, &new_entry);
910 		if (QDF_IS_STATUS_ERROR(status)) {
911 			mgmt_rx_reo_err("Failed to prepare list entry");
912 			return QDF_STATUS_E_FAILURE;
913 		}
914 	}
915 
916 	qdf_spin_lock_bh(&reo_list->list_lock);
917 
918 	qdf_list_for_each(&reo_list->list, cur_entry, node) {
919 		uint32_t cur_entry_global_ts;
920 
921 		cur_entry_global_ts = mgmt_rx_reo_get_global_ts(
922 					cur_entry->rx_params);
923 
924 		if (!mgmt_rx_reo_compare_global_timestamps_gte(
925 		    new_frame_global_ts, cur_entry_global_ts)) {
926 			least_greater_entry = cur_entry;
927 			least_greater_entry_found = true;
928 			break;
929 		}
930 
931 		status = mgmt_rx_reo_update_wait_count(
932 					num_mlo_links,
933 					&cur_entry->wait_count,
934 					&frame_desc->wait_count);
935 		if (QDF_IS_STATUS_ERROR(status))
936 			goto error;
937 
938 		if (cur_entry->wait_count.total_count == 0)
939 			cur_entry->status &=
940 			      ~MGMT_RX_REO_STATUS_WAIT_FOR_FRAME_ON_OTHER_LINKS;
941 	}
942 
943 	if (frame_desc->type == MGMT_RX_REO_FRAME_DESC_HOST_CONSUMED_FRAME) {
944 		if (least_greater_entry_found) {
945 			status = mgmt_rx_reo_update_wait_count(
946 					num_mlo_links,
947 					&new_entry->wait_count,
948 					&least_greater_entry->wait_count);
949 
950 			if (QDF_IS_STATUS_ERROR(status))
951 				goto error;
952 
953 			if (new_entry->wait_count.total_count == 0)
954 				new_entry->status &=
955 					~MGMT_RX_REO_STATUS_WAIT_FOR_FRAME_ON_OTHER_LINKS;
956 		}
957 
958 		new_entry->insertion_ts = qdf_get_log_timestamp();
959 
960 		status = qdf_list_insert_before(&reo_list->list,
961 						&new_entry->node,
962 						&least_greater_entry->node);
963 		if (QDF_IS_STATUS_ERROR(status))
964 			goto error;
965 
966 		*is_queued = true;
967 	}
968 
969 	cur_entry = least_greater_entry;
970 	qdf_list_for_each_from(&reo_list->list, cur_entry, node) {
971 		uint8_t frame_link_id;
972 
973 		frame_link_id = mgmt_rx_reo_get_link_id(frame_desc->rx_params);
974 		if (cur_entry->wait_count.per_link_count[frame_link_id]) {
975 			cur_entry->wait_count.per_link_count[frame_link_id]--;
976 			cur_entry->wait_count.total_count--;
977 			if (cur_entry->wait_count.total_count == 0)
978 				cur_entry->status &=
979 					~MGMT_RX_REO_STATUS_WAIT_FOR_FRAME_ON_OTHER_LINKS;
980 		}
981 	}
982 
983 	status = QDF_STATUS_SUCCESS;
984 	goto exit;
985 
986 error:
987 	/* Cleanup the entry if it is not queued */
988 	if (!*is_queued) {
989 		struct wlan_objmgr_pdev *pdev;
990 		uint8_t link_id;
991 
992 		link_id = mgmt_rx_reo_get_link_id(new_entry->rx_params);
993 
994 		pdev = wlan_get_pdev_from_mlo_link_id(link_id);
995 		/**
996 		 * New entry created is not inserted to reorder list, free
997 		 * the entry and release the reference
998 		 */
999 		if (pdev)
1000 			wlan_objmgr_pdev_release_ref(pdev, WLAN_MGMT_RX_REO_ID);
1001 		else
1002 			mgmt_rx_reo_err("Unable to get pdev corresponding to entry %pK",
1003 					new_entry);
1004 		qdf_mem_free(new_entry);
1005 	}
1006 
1007 exit:
1008 	qdf_spin_unlock_bh(&reo_list->list_lock);
1009 
1010 	if (!*is_queued)
1011 		return status;
1012 
1013 	if (frame_desc->type == MGMT_RX_REO_FRAME_DESC_HOST_CONSUMED_FRAME) {
1014 		if (least_greater_entry_found)
1015 			mgmt_rx_reo_debug("Inserting new entry %pK before %pK",
1016 					  new_entry, least_greater_entry);
1017 		else
1018 			mgmt_rx_reo_debug("Inserting new entry %pK at the tail",
1019 					  new_entry);
1020 	}
1021 
1022 	return status;
1023 }
1024 
1025 /**
1026  * mgmt_rx_reo_list_init() - Initialize the management rx-reorder list
1027  * @reo_list: Pointer to reorder list
1028  *
1029  * API to initialize the management rx-reorder list.
1030  *
1031  * Return: QDF_STATUS
1032  */
1033 static QDF_STATUS
1034 mgmt_rx_reo_list_init(struct mgmt_rx_reo_list *reo_list)
1035 {
1036 	QDF_STATUS status;
1037 
1038 	reo_list->max_list_size = MGMT_RX_REO_MAX_LIST_SIZE;
1039 	qdf_list_create(&reo_list->list, reo_list->max_list_size);
1040 	qdf_spinlock_create(&reo_list->list_lock);
1041 
1042 	status = qdf_timer_init(NULL, &reo_list->ageout_timer,
1043 				mgmt_rx_reo_list_ageout_timer_handler, reo_list,
1044 				QDF_TIMER_TYPE_WAKE_APPS);
1045 	if (QDF_IS_STATUS_ERROR(status)) {
1046 		mgmt_rx_reo_err("Failed to initialize reo list ageout timer");
1047 		return status;
1048 	}
1049 
1050 	return QDF_STATUS_SUCCESS;
1051 }
1052 
1053 /**
1054  * wlan_mgmt_rx_reo_update_host_snapshot() - Update Host snapshot with the MGMT
1055  * Rx REO parameters.
1056  * @pdev: pdev extracted from the WMI event
1057  * @reo_params: MGMT Rx REO parameters received in the WMI event
1058  *
1059  * Return: QDF_STATUS of operation
1060  */
1061 static QDF_STATUS
1062 wlan_mgmt_rx_reo_update_host_snapshot(struct wlan_objmgr_pdev *pdev,
1063 				      struct mgmt_rx_reo_params *reo_params)
1064 {
1065 	struct mgmt_rx_reo_pdev_info *rx_reo_pdev_ctx;
1066 	struct mgmt_rx_reo_snapshot_params *host_ss;
1067 
1068 	if (!reo_params) {
1069 		mgmt_rx_reo_err("Mgmt Rx REO params are NULL");
1070 		return QDF_STATUS_E_NULL_VALUE;
1071 	}
1072 
1073 	rx_reo_pdev_ctx = wlan_mgmt_rx_reo_get_priv_object(pdev);
1074 	if (!rx_reo_pdev_ctx) {
1075 		mgmt_rx_reo_err("Mgmt Rx REO context empty for pdev %pK", pdev);
1076 		return QDF_STATUS_E_FAILURE;
1077 	}
1078 
1079 	/* FW should send valid REO parameters */
1080 	if (!reo_params->valid) {
1081 		mgmt_rx_reo_err("Mgmt Rx REO params is invalid");
1082 		return QDF_STATUS_E_FAILURE;
1083 	}
1084 
1085 	host_ss = &rx_reo_pdev_ctx->host_snapshot;
1086 	host_ss->valid = true;
1087 	host_ss->global_timestamp = reo_params->global_timestamp;
1088 	host_ss->mgmt_pkt_ctr = reo_params->mgmt_pkt_ctr;
1089 
1090 	return QDF_STATUS_SUCCESS;
1091 }
1092 
1093 QDF_STATUS
1094 wlan_mgmt_rx_reo_algo_entry(struct wlan_objmgr_pdev *pdev,
1095 			    struct mgmt_rx_reo_frame_descriptor *desc,
1096 			    bool *is_queued)
1097 {
1098 	struct mgmt_rx_reo_context *reo_ctx;
1099 	QDF_STATUS status;
1100 
1101 	if (!is_queued)
1102 		return QDF_STATUS_E_NULL_VALUE;
1103 
1104 	*is_queued = false;
1105 
1106 	if (!desc || !desc->rx_params) {
1107 		mgmt_rx_reo_err("MGMT Rx REO descriptor or rx params are null");
1108 		return QDF_STATUS_E_NULL_VALUE;
1109 	}
1110 
1111 	reo_ctx = mgmt_rx_reo_get_context();
1112 	if (!reo_ctx) {
1113 		mgmt_rx_reo_err("REO context is NULL");
1114 		return QDF_STATUS_E_FAILURE;
1115 	}
1116 
1117 	/* Update the Host snapshot */
1118 	status = wlan_mgmt_rx_reo_update_host_snapshot(
1119 						pdev,
1120 						desc->rx_params->reo_params);
1121 	if (QDF_IS_STATUS_ERROR(status)) {
1122 		mgmt_rx_reo_err("Unable to update Host snapshot");
1123 		return QDF_STATUS_E_FAILURE;
1124 	}
1125 
1126 	/* Compute wait count for this frame/event */
1127 	status = wlan_mgmt_rx_reo_algo_calculate_wait_count(
1128 						pdev,
1129 						desc->rx_params->reo_params,
1130 						reo_ctx->num_mlo_links,
1131 						&desc->wait_count);
1132 	if (QDF_IS_STATUS_ERROR(status)) {
1133 		mgmt_rx_reo_err("Wait count calculation failed");
1134 		return QDF_STATUS_E_FAILURE;
1135 	}
1136 
1137 	/* Update the REO list */
1138 	status = mgmt_rx_reo_update_list(&reo_ctx->reo_list, desc, is_queued);
1139 	if (QDF_IS_STATUS_ERROR(status)) {
1140 		mgmt_rx_reo_err("REO list updation failed");
1141 		return QDF_STATUS_E_FAILURE;
1142 	}
1143 
1144 	/* Finally, release the entries for which pending frame is received */
1145 	return mgmt_rx_reo_list_release_entries(&reo_ctx->reo_list);
1146 }
1147 
1148 QDF_STATUS
1149 mgmt_rx_reo_init_context(void)
1150 {
1151 	QDF_STATUS status;
1152 	struct mgmt_rx_reo_context *reo_context;
1153 
1154 	reo_context = mgmt_rx_reo_get_context();
1155 	if (!reo_context) {
1156 		mgmt_rx_reo_err("reo context is null");
1157 		return QDF_STATUS_E_NULL_VALUE;
1158 	}
1159 	qdf_mem_zero(reo_context, sizeof(*reo_context));
1160 
1161 	status = mgmt_rx_reo_list_init(&reo_context->reo_list);
1162 	if (QDF_IS_STATUS_ERROR(status)) {
1163 		mgmt_rx_reo_err("Failed to initialize mgmt Rx reo list");
1164 		return status;
1165 	}
1166 
1167 	reo_context->ts_last_delivered_frame.valid = false;
1168 
1169 	return QDF_STATUS_SUCCESS;
1170 }
1171 
1172 /**
1173  * mgmt_rx_reo_flush_reorder_list() - flush all entries in the reorder list
1174  * @reo_list: pointer to reorder list
1175  *
1176  * api to flush all the entries of the reorder list. this api would acquire
1177  * the lock protecting the list.
1178  *
1179  * return: qdf_status
1180  */
1181 static QDF_STATUS
1182 mgmt_rx_reo_flush_reorder_list(struct mgmt_rx_reo_list *reo_list)
1183 {
1184 	struct mgmt_rx_reo_list_entry *cur_entry;
1185 	struct mgmt_rx_reo_list_entry *temp;
1186 
1187 	if (!reo_list) {
1188 		mgmt_rx_reo_err("reorder list is null");
1189 		return QDF_STATUS_E_NULL_VALUE;
1190 	}
1191 
1192 	qdf_spin_lock_bh(&reo_list->list_lock);
1193 
1194 	qdf_list_for_each_del(&reo_list->list, cur_entry, temp, node) {
1195 		uint8_t link_id;
1196 		struct wlan_objmgr_pdev *pdev = NULL;
1197 
1198 		free_mgmt_rx_event_params(cur_entry->rx_params);
1199 
1200 		link_id = mgmt_rx_reo_get_link_id(cur_entry->rx_params);
1201 
1202 		pdev = wlan_get_pdev_from_mlo_link_id(link_id);
1203 		if (!pdev) {
1204 			qdf_spin_unlock_bh(&reo_list->list_lock);
1205 			mgmt_rx_reo_err("pdev for link_id %u is null", link_id);
1206 			return QDF_STATUS_E_NULL_VALUE;
1207 		}
1208 
1209 		/**
1210 		 * Release the reference taken when the entry is inserted into
1211 		 * the reorder list
1212 		 */
1213 		wlan_objmgr_pdev_release_ref(pdev, WLAN_MGMT_RX_REO_ID);
1214 
1215 		qdf_mem_free(cur_entry);
1216 	}
1217 
1218 	qdf_spin_unlock_bh(&reo_list->list_lock);
1219 
1220 	return QDF_STATUS_SUCCESS;
1221 }
1222 
1223 /**
1224  * mgmt_rx_reo_list_deinit() - De initialize the management rx-reorder list
1225  * @reo_list: Pointer to reorder list
1226  *
1227  * API to de initialize the management rx-reorder list.
1228  *
1229  * Return: QDF_STATUS
1230  */
1231 static QDF_STATUS
1232 mgmt_rx_reo_list_deinit(struct mgmt_rx_reo_list *reo_list)
1233 {
1234 	QDF_STATUS status;
1235 
1236 	qdf_timer_free(&reo_list->ageout_timer);
1237 
1238 	status = mgmt_rx_reo_flush_reorder_list(reo_list);
1239 	if (QDF_IS_STATUS_ERROR(status)) {
1240 		mgmt_rx_reo_err("Failed to flush the reorder list");
1241 		return QDF_STATUS_E_FAILURE;
1242 	}
1243 	qdf_spinlock_destroy(&reo_list->list_lock);
1244 	qdf_list_destroy(&reo_list->list);
1245 
1246 	return QDF_STATUS_SUCCESS;
1247 }
1248 
1249 QDF_STATUS
1250 mgmt_rx_reo_deinit_context(void)
1251 {
1252 	QDF_STATUS status;
1253 	struct mgmt_rx_reo_context *reo_context;
1254 
1255 	reo_context = mgmt_rx_reo_get_context();
1256 	if (!reo_context) {
1257 		mgmt_rx_reo_err("reo context is null");
1258 		return QDF_STATUS_E_NULL_VALUE;
1259 	}
1260 
1261 	status = mgmt_rx_reo_list_deinit(&reo_context->reo_list);
1262 	if (QDF_IS_STATUS_ERROR(status)) {
1263 		mgmt_rx_reo_err("Failed to de-initialize mgmt Rx reo list");
1264 		return status;
1265 	}
1266 
1267 	return QDF_STATUS_SUCCESS;
1268 }
1269