xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_peer.c (revision bee501dcb7f7900c28a50e7c5a100eeb14466d1e)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <qdf_types.h>
21 #include <qdf_lock.h>
22 #include <hal_hw_headers.h>
23 #include "dp_htt.h"
24 #include "dp_types.h"
25 #include "dp_internal.h"
26 #include "dp_peer.h"
27 #include "dp_rx_defrag.h"
28 #include "dp_rx.h"
29 #include <hal_api.h>
30 #include <hal_reo.h>
31 #include <cdp_txrx_handle.h>
32 #include <wlan_cfg.h>
33 #ifdef WIFI_MONITOR_SUPPORT
34 #include <dp_mon.h>
35 #endif
36 #ifdef FEATURE_WDS
37 #include "dp_txrx_wds.h"
38 #endif
39 #include <qdf_module.h>
40 #ifdef QCA_PEER_EXT_STATS
41 #include "dp_hist.h"
42 #endif
43 #ifdef BYPASS_OL_OPS
44 #include <target_if_dp.h>
45 #endif
46 
47 #ifdef REO_QDESC_HISTORY
48 #define REO_QDESC_HISTORY_SIZE 512
49 uint64_t reo_qdesc_history_idx;
50 struct reo_qdesc_event reo_qdesc_history[REO_QDESC_HISTORY_SIZE];
51 #endif
52 
53 #ifdef FEATURE_AST
54 #ifdef BYPASS_OL_OPS
55 /*
56  * dp_add_wds_entry_wrapper() - Add new AST entry for the wds station
57  * @soc: DP soc structure pointer
58  * @peer: dp peer structure
59  * @dest_mac: MAC address of ast node
60  * @flags: wds or hmwds
61  * @type: type from enum cdp_txrx_ast_entry_type
62  *
63  * This API is used by WDS source port learning function to
64  * add a new AST entry in the fw.
65  *
66  * Return: 0 on success, error code otherwise.
67  */
68 static int dp_add_wds_entry_wrapper(struct dp_soc *soc,
69 				    struct dp_peer *peer,
70 				    const uint8_t *dest_macaddr,
71 				    uint32_t flags,
72 				    uint8_t type)
73 {
74 	QDF_STATUS status;
75 
76 	status = target_if_add_wds_entry(soc->ctrl_psoc,
77 					 peer->vdev->vdev_id,
78 					 peer->mac_addr.raw,
79 					 dest_macaddr,
80 					 WMI_HOST_WDS_FLAG_STATIC,
81 					 type);
82 
83 	return qdf_status_to_os_return(status);
84 }
85 
86 /*
87  * dp_update_wds_entry_wrapper() - update an existing wds entry with new peer
88  * @soc: DP soc structure pointer
89  * @peer: dp peer structure
90  * @dest_macaddr: MAC address of ast node
91  * @flags: wds or hmwds
92  *
93  * This API is used by update the peer mac address for the ast
94  * in the fw.
95  *
96  * Return: 0 on success, error code otherwise.
97  */
98 static int dp_update_wds_entry_wrapper(struct dp_soc *soc,
99 				       struct dp_peer *peer,
100 				       uint8_t *dest_macaddr,
101 				       uint32_t flags)
102 {
103 	QDF_STATUS status;
104 
105 	status = target_if_update_wds_entry(soc->ctrl_psoc,
106 					    peer->vdev->vdev_id,
107 					    dest_macaddr,
108 					    peer->mac_addr.raw,
109 					    WMI_HOST_WDS_FLAG_STATIC);
110 
111 	return qdf_status_to_os_return(status);
112 }
113 
114 /*
115  * dp_del_wds_entry_wrapper() - delete a WSD AST entry
116  * @soc: DP soc structure pointer
117  * @vdev_id: vdev_id
118  * @wds_macaddr: MAC address of ast node
119  * @type: type from enum cdp_txrx_ast_entry_type
120  * @delete_in_fw: Flag to indicate if entry needs to be deleted in fw
121  *
122  * This API is used to delete an AST entry from fw
123  *
124  * Return: None
125  */
126 static void dp_del_wds_entry_wrapper(struct dp_soc *soc,
127 				     uint8_t vdev_id,
128 				     uint8_t *wds_macaddr,
129 				     uint8_t type,
130 				     uint8_t delete_in_fw)
131 {
132 	target_if_del_wds_entry(soc->ctrl_psoc, vdev_id,
133 				wds_macaddr, type, delete_in_fw);
134 }
135 #else
136 static int dp_add_wds_entry_wrapper(struct dp_soc *soc,
137 				    struct dp_peer *peer,
138 				    const uint8_t *dest_macaddr,
139 				    uint32_t flags,
140 				    uint8_t type)
141 {
142 	int status;
143 
144 	status = soc->cdp_soc.ol_ops->peer_add_wds_entry(
145 					soc->ctrl_psoc,
146 					peer->vdev->vdev_id,
147 					peer->mac_addr.raw,
148 					peer->peer_id,
149 					dest_macaddr,
150 					peer->mac_addr.raw,
151 					flags,
152 					type);
153 
154 	return status;
155 }
156 
157 static int dp_update_wds_entry_wrapper(struct dp_soc *soc,
158 				       struct dp_peer *peer,
159 				       uint8_t *dest_macaddr,
160 				       uint32_t flags)
161 {
162 	int status;
163 
164 	status = soc->cdp_soc.ol_ops->peer_update_wds_entry(
165 				soc->ctrl_psoc,
166 				peer->vdev->vdev_id,
167 				dest_macaddr,
168 				peer->mac_addr.raw,
169 				flags);
170 
171 	return status;
172 }
173 
174 static void dp_del_wds_entry_wrapper(struct dp_soc *soc,
175 				     uint8_t vdev_id,
176 				     uint8_t *wds_macaddr,
177 				     uint8_t type,
178 				     uint8_t delete_in_fw)
179 {
180 	soc->cdp_soc.ol_ops->peer_del_wds_entry(soc->ctrl_psoc,
181 						vdev_id,
182 						wds_macaddr,
183 						type,
184 						delete_in_fw);
185 }
186 #endif
187 #endif
188 
189 #ifdef FEATURE_WDS
190 static inline bool
191 dp_peer_ast_free_in_unmap_supported(struct dp_soc *soc,
192 				    struct dp_ast_entry *ast_entry)
193 {
194 	/* if peer map v2 is enabled we are not freeing ast entry
195 	 * here and it is supposed to be freed in unmap event (after
196 	 * we receive delete confirmation from target)
197 	 *
198 	 * if peer_id is invalid we did not get the peer map event
199 	 * for the peer free ast entry from here only in this case
200 	 */
201 
202 	if ((ast_entry->type != CDP_TXRX_AST_TYPE_WDS_HM_SEC) &&
203 	    (ast_entry->type != CDP_TXRX_AST_TYPE_SELF))
204 		return true;
205 
206 	return false;
207 }
208 #else
209 static inline bool
210 dp_peer_ast_free_in_unmap_supported(struct dp_soc *soc,
211 				    struct dp_ast_entry *ast_entry)
212 {
213 	return false;
214 }
215 
216 void dp_soc_wds_attach(struct dp_soc *soc)
217 {
218 }
219 
220 void dp_soc_wds_detach(struct dp_soc *soc)
221 {
222 }
223 #endif
224 
225 #ifdef QCA_SUPPORT_WDS_EXTENDED
226 bool dp_peer_check_wds_ext_peer(struct dp_peer *peer)
227 {
228 	struct dp_vdev *vdev = peer->vdev;
229 	struct dp_txrx_peer *txrx_peer;
230 
231 	if (!vdev->wds_ext_enabled)
232 		return false;
233 
234 	txrx_peer = dp_get_txrx_peer(peer);
235 	if (!txrx_peer)
236 		return false;
237 
238 	if (qdf_atomic_test_bit(WDS_EXT_PEER_INIT_BIT,
239 				&txrx_peer->wds_ext.init))
240 		return true;
241 
242 	return false;
243 }
244 #else
245 bool dp_peer_check_wds_ext_peer(struct dp_peer *peer)
246 {
247 	return false;
248 }
249 #endif
250 
251 #ifdef REO_QDESC_HISTORY
252 static inline void
253 dp_rx_reo_qdesc_history_add(struct reo_desc_list_node *free_desc,
254 			    enum reo_qdesc_event_type type)
255 {
256 	struct reo_qdesc_event *evt;
257 	struct dp_rx_tid *rx_tid = &free_desc->rx_tid;
258 	uint32_t idx;
259 
260 	reo_qdesc_history_idx++;
261 	idx = (reo_qdesc_history_idx & (REO_QDESC_HISTORY_SIZE - 1));
262 
263 	evt = &reo_qdesc_history[idx];
264 
265 	qdf_mem_copy(evt->peer_mac, free_desc->peer_mac, QDF_MAC_ADDR_SIZE);
266 	evt->qdesc_addr = rx_tid->hw_qdesc_paddr;
267 	evt->ts = qdf_get_log_timestamp();
268 	evt->type = type;
269 }
270 
271 #ifdef WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY
272 static inline void
273 dp_rx_reo_qdesc_deferred_evt_add(struct reo_desc_deferred_freelist_node *desc,
274 				 enum reo_qdesc_event_type type)
275 {
276 	struct reo_qdesc_event *evt;
277 	uint32_t idx;
278 
279 	reo_qdesc_history_idx++;
280 	idx = (reo_qdesc_history_idx & (REO_QDESC_HISTORY_SIZE - 1));
281 
282 	evt = &reo_qdesc_history[idx];
283 
284 	qdf_mem_copy(evt->peer_mac, desc->peer_mac, QDF_MAC_ADDR_SIZE);
285 	evt->qdesc_addr = desc->hw_qdesc_paddr;
286 	evt->ts = qdf_get_log_timestamp();
287 	evt->type = type;
288 }
289 
290 #define DP_RX_REO_QDESC_DEFERRED_FREE_EVT(desc) \
291 	dp_rx_reo_qdesc_deferred_evt_add((desc), REO_QDESC_FREE)
292 
293 #define DP_RX_REO_QDESC_DEFERRED_GET_MAC(desc, freedesc) \
294 	qdf_mem_copy(desc->peer_mac, freedesc->peer_mac, QDF_MAC_ADDR_SIZE)
295 #endif /* WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY */
296 
297 #define DP_RX_REO_QDESC_GET_MAC(freedesc, peer) \
298 	qdf_mem_copy(freedesc->peer_mac, peer->mac_addr.raw, QDF_MAC_ADDR_SIZE)
299 
300 #define DP_RX_REO_QDESC_UPDATE_EVT(free_desc) \
301 	dp_rx_reo_qdesc_history_add((free_desc), REO_QDESC_UPDATE_CB)
302 
303 #define DP_RX_REO_QDESC_FREE_EVT(free_desc) \
304 	dp_rx_reo_qdesc_history_add((free_desc), REO_QDESC_FREE)
305 
306 #else
307 #define DP_RX_REO_QDESC_GET_MAC(freedesc, peer)
308 
309 #define DP_RX_REO_QDESC_UPDATE_EVT(free_desc)
310 
311 #define DP_RX_REO_QDESC_FREE_EVT(free_desc)
312 
313 #define DP_RX_REO_QDESC_DEFERRED_FREE_EVT(desc)
314 
315 #define DP_RX_REO_QDESC_DEFERRED_GET_MAC(desc, freedesc)
316 #endif
317 
318 static inline void
319 dp_set_ssn_valid_flag(struct hal_reo_cmd_params *params,
320 					uint8_t valid)
321 {
322 	params->u.upd_queue_params.update_svld = 1;
323 	params->u.upd_queue_params.svld = valid;
324 	dp_peer_debug("Setting SSN valid bit to %d",
325 		      valid);
326 }
327 
328 QDF_STATUS dp_peer_ast_table_attach(struct dp_soc *soc)
329 {
330 	uint32_t max_ast_index;
331 
332 	max_ast_index = wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx);
333 	/* allocate ast_table for ast entry to ast_index map */
334 	dp_peer_info("\n%pK:<=== cfg max ast idx %d ====>", soc, max_ast_index);
335 	soc->ast_table = qdf_mem_malloc(max_ast_index *
336 					sizeof(struct dp_ast_entry *));
337 	if (!soc->ast_table) {
338 		dp_peer_err("%pK: ast_table memory allocation failed", soc);
339 		return QDF_STATUS_E_NOMEM;
340 	}
341 	return QDF_STATUS_SUCCESS; /* success */
342 }
343 
344 /*
345  * dp_peer_find_map_attach() - allocate memory for peer_id_to_obj_map
346  * @soc: soc handle
347  *
348  * return: QDF_STATUS
349  */
350 static QDF_STATUS dp_peer_find_map_attach(struct dp_soc *soc)
351 {
352 	uint32_t max_peers, peer_map_size;
353 
354 	max_peers = soc->max_peer_id;
355 	/* allocate the peer ID -> peer object map */
356 	dp_peer_info("\n%pK:<=== cfg max peer id %d ====>", soc, max_peers);
357 	peer_map_size = max_peers * sizeof(soc->peer_id_to_obj_map[0]);
358 	soc->peer_id_to_obj_map = qdf_mem_malloc(peer_map_size);
359 	if (!soc->peer_id_to_obj_map) {
360 		dp_peer_err("%pK: peer map memory allocation failed", soc);
361 		return QDF_STATUS_E_NOMEM;
362 	}
363 
364 	/*
365 	 * The peer_id_to_obj_map doesn't really need to be initialized,
366 	 * since elements are only used after they have been individually
367 	 * initialized.
368 	 * However, it is convenient for debugging to have all elements
369 	 * that are not in use set to 0.
370 	 */
371 	qdf_mem_zero(soc->peer_id_to_obj_map, peer_map_size);
372 
373 	qdf_spinlock_create(&soc->peer_map_lock);
374 	return QDF_STATUS_SUCCESS; /* success */
375 }
376 
377 #define DP_AST_HASH_LOAD_MULT  2
378 #define DP_AST_HASH_LOAD_SHIFT 0
379 
380 static inline uint32_t
381 dp_peer_find_hash_index(struct dp_soc *soc,
382 			union dp_align_mac_addr *mac_addr)
383 {
384 	uint32_t index;
385 
386 	index =
387 		mac_addr->align2.bytes_ab ^
388 		mac_addr->align2.bytes_cd ^
389 		mac_addr->align2.bytes_ef;
390 
391 	index ^= index >> soc->peer_hash.idx_bits;
392 	index &= soc->peer_hash.mask;
393 	return index;
394 }
395 
396 /*
397  * dp_peer_find_hash_find() - returns legacy or mlo link peer from
398  *			      peer_hash_table matching vdev_id and mac_address
399  * @soc: soc handle
400  * @peer_mac_addr: peer mac address
401  * @mac_addr_is_aligned: is mac addr aligned
402  * @vdev_id: vdev_id
403  * @mod_id: id of module requesting reference
404  *
405  * return: peer in sucsess
406  *         NULL in failure
407  */
408 struct dp_peer *dp_peer_find_hash_find(
409 				struct dp_soc *soc, uint8_t *peer_mac_addr,
410 				int mac_addr_is_aligned, uint8_t vdev_id,
411 				enum dp_mod_id mod_id)
412 {
413 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
414 	uint32_t index;
415 	struct dp_peer *peer;
416 
417 	if (!soc->peer_hash.bins)
418 		return NULL;
419 
420 	if (mac_addr_is_aligned) {
421 		mac_addr = (union dp_align_mac_addr *)peer_mac_addr;
422 	} else {
423 		qdf_mem_copy(
424 			&local_mac_addr_aligned.raw[0],
425 			peer_mac_addr, QDF_MAC_ADDR_SIZE);
426 		mac_addr = &local_mac_addr_aligned;
427 	}
428 	index = dp_peer_find_hash_index(soc, mac_addr);
429 	qdf_spin_lock_bh(&soc->peer_hash_lock);
430 	TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
431 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
432 		    ((peer->vdev->vdev_id == vdev_id) ||
433 		     (vdev_id == DP_VDEV_ALL))) {
434 			/* take peer reference before returning */
435 			if (dp_peer_get_ref(soc, peer, mod_id) !=
436 						QDF_STATUS_SUCCESS)
437 				peer = NULL;
438 
439 			qdf_spin_unlock_bh(&soc->peer_hash_lock);
440 			return peer;
441 		}
442 	}
443 	qdf_spin_unlock_bh(&soc->peer_hash_lock);
444 	return NULL; /* failure */
445 }
446 
447 qdf_export_symbol(dp_peer_find_hash_find);
448 
449 #ifdef WLAN_FEATURE_11BE_MLO
450 /*
451  * dp_peer_find_hash_detach() - cleanup memory for peer_hash table
452  * @soc: soc handle
453  *
454  * return: none
455  */
456 static void dp_peer_find_hash_detach(struct dp_soc *soc)
457 {
458 	if (soc->peer_hash.bins) {
459 		qdf_mem_free(soc->peer_hash.bins);
460 		soc->peer_hash.bins = NULL;
461 		qdf_spinlock_destroy(&soc->peer_hash_lock);
462 	}
463 
464 	if (soc->arch_ops.mlo_peer_find_hash_detach)
465 		soc->arch_ops.mlo_peer_find_hash_detach(soc);
466 }
467 
468 /*
469  * dp_peer_find_hash_attach() - allocate memory for peer_hash table
470  * @soc: soc handle
471  *
472  * return: QDF_STATUS
473  */
474 static QDF_STATUS dp_peer_find_hash_attach(struct dp_soc *soc)
475 {
476 	int i, hash_elems, log2;
477 
478 	/* allocate the peer MAC address -> peer object hash table */
479 	hash_elems = soc->max_peers;
480 	hash_elems *= DP_PEER_HASH_LOAD_MULT;
481 	hash_elems >>= DP_PEER_HASH_LOAD_SHIFT;
482 	log2 = dp_log2_ceil(hash_elems);
483 	hash_elems = 1 << log2;
484 
485 	soc->peer_hash.mask = hash_elems - 1;
486 	soc->peer_hash.idx_bits = log2;
487 	/* allocate an array of TAILQ peer object lists */
488 	soc->peer_hash.bins = qdf_mem_malloc(
489 		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q, dp_peer)));
490 	if (!soc->peer_hash.bins)
491 		return QDF_STATUS_E_NOMEM;
492 
493 	for (i = 0; i < hash_elems; i++)
494 		TAILQ_INIT(&soc->peer_hash.bins[i]);
495 
496 	qdf_spinlock_create(&soc->peer_hash_lock);
497 
498 	if (soc->arch_ops.mlo_peer_find_hash_attach &&
499 	    (soc->arch_ops.mlo_peer_find_hash_attach(soc) !=
500 			QDF_STATUS_SUCCESS)) {
501 		dp_peer_find_hash_detach(soc);
502 		return QDF_STATUS_E_NOMEM;
503 	}
504 	return QDF_STATUS_SUCCESS;
505 }
506 
507 /*
508  * dp_peer_find_hash_add() - add peer to peer_hash_table
509  * @soc: soc handle
510  * @peer: peer handle
511  * @peer_type: link or mld peer
512  *
513  * return: none
514  */
515 void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer)
516 {
517 	unsigned index;
518 
519 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
520 	if (peer->peer_type == CDP_LINK_PEER_TYPE) {
521 		qdf_spin_lock_bh(&soc->peer_hash_lock);
522 
523 		if (QDF_IS_STATUS_ERROR(dp_peer_get_ref(soc, peer,
524 							DP_MOD_ID_CONFIG))) {
525 			dp_err("fail to get peer ref:" QDF_MAC_ADDR_FMT,
526 			       QDF_MAC_ADDR_REF(peer->mac_addr.raw));
527 			qdf_spin_unlock_bh(&soc->peer_hash_lock);
528 			return;
529 		}
530 
531 		/*
532 		 * It is important to add the new peer at the tail of
533 		 * peer list with the bin index. Together with having
534 		 * the hash_find function search from head to tail,
535 		 * this ensures that if two entries with the same MAC address
536 		 * are stored, the one added first will be found first.
537 		 */
538 		TAILQ_INSERT_TAIL(&soc->peer_hash.bins[index], peer,
539 				  hash_list_elem);
540 
541 		qdf_spin_unlock_bh(&soc->peer_hash_lock);
542 	} else if (peer->peer_type == CDP_MLD_PEER_TYPE) {
543 		if (soc->arch_ops.mlo_peer_find_hash_add)
544 			soc->arch_ops.mlo_peer_find_hash_add(soc, peer);
545 	} else {
546 		dp_err("unknown peer type %d", peer->peer_type);
547 	}
548 }
549 
550 /*
551  * dp_peer_find_hash_remove() - remove peer from peer_hash_table
552  * @soc: soc handle
553  * @peer: peer handle
554  *
555  * return: none
556  */
557 void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer)
558 {
559 	unsigned index;
560 	struct dp_peer *tmppeer = NULL;
561 	int found = 0;
562 
563 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
564 
565 	if (peer->peer_type == CDP_LINK_PEER_TYPE) {
566 		/* Check if tail is not empty before delete*/
567 		QDF_ASSERT(!TAILQ_EMPTY(&soc->peer_hash.bins[index]));
568 
569 		qdf_spin_lock_bh(&soc->peer_hash_lock);
570 		TAILQ_FOREACH(tmppeer, &soc->peer_hash.bins[index],
571 			      hash_list_elem) {
572 			if (tmppeer == peer) {
573 				found = 1;
574 				break;
575 			}
576 		}
577 		QDF_ASSERT(found);
578 		TAILQ_REMOVE(&soc->peer_hash.bins[index], peer,
579 			     hash_list_elem);
580 
581 		dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
582 		qdf_spin_unlock_bh(&soc->peer_hash_lock);
583 	} else if (peer->peer_type == CDP_MLD_PEER_TYPE) {
584 		if (soc->arch_ops.mlo_peer_find_hash_remove)
585 			soc->arch_ops.mlo_peer_find_hash_remove(soc, peer);
586 	} else {
587 		dp_err("unknown peer type %d", peer->peer_type);
588 	}
589 }
590 #else
591 static QDF_STATUS dp_peer_find_hash_attach(struct dp_soc *soc)
592 {
593 	int i, hash_elems, log2;
594 
595 	/* allocate the peer MAC address -> peer object hash table */
596 	hash_elems = soc->max_peers;
597 	hash_elems *= DP_PEER_HASH_LOAD_MULT;
598 	hash_elems >>= DP_PEER_HASH_LOAD_SHIFT;
599 	log2 = dp_log2_ceil(hash_elems);
600 	hash_elems = 1 << log2;
601 
602 	soc->peer_hash.mask = hash_elems - 1;
603 	soc->peer_hash.idx_bits = log2;
604 	/* allocate an array of TAILQ peer object lists */
605 	soc->peer_hash.bins = qdf_mem_malloc(
606 		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q, dp_peer)));
607 	if (!soc->peer_hash.bins)
608 		return QDF_STATUS_E_NOMEM;
609 
610 	for (i = 0; i < hash_elems; i++)
611 		TAILQ_INIT(&soc->peer_hash.bins[i]);
612 
613 	qdf_spinlock_create(&soc->peer_hash_lock);
614 	return QDF_STATUS_SUCCESS;
615 }
616 
617 static void dp_peer_find_hash_detach(struct dp_soc *soc)
618 {
619 	if (soc->peer_hash.bins) {
620 		qdf_mem_free(soc->peer_hash.bins);
621 		soc->peer_hash.bins = NULL;
622 		qdf_spinlock_destroy(&soc->peer_hash_lock);
623 	}
624 }
625 
626 void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer)
627 {
628 	unsigned index;
629 
630 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
631 	qdf_spin_lock_bh(&soc->peer_hash_lock);
632 
633 	if (QDF_IS_STATUS_ERROR(dp_peer_get_ref(soc, peer, DP_MOD_ID_CONFIG))) {
634 		dp_err("unable to get peer ref at MAP mac: "QDF_MAC_ADDR_FMT,
635 		       QDF_MAC_ADDR_REF(peer->mac_addr.raw));
636 		qdf_spin_unlock_bh(&soc->peer_hash_lock);
637 		return;
638 	}
639 
640 	/*
641 	 * It is important to add the new peer at the tail of the peer list
642 	 * with the bin index.  Together with having the hash_find function
643 	 * search from head to tail, this ensures that if two entries with
644 	 * the same MAC address are stored, the one added first will be
645 	 * found first.
646 	 */
647 	TAILQ_INSERT_TAIL(&soc->peer_hash.bins[index], peer, hash_list_elem);
648 
649 	qdf_spin_unlock_bh(&soc->peer_hash_lock);
650 }
651 
652 void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer)
653 {
654 	unsigned index;
655 	struct dp_peer *tmppeer = NULL;
656 	int found = 0;
657 
658 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
659 	/* Check if tail is not empty before delete*/
660 	QDF_ASSERT(!TAILQ_EMPTY(&soc->peer_hash.bins[index]));
661 
662 	qdf_spin_lock_bh(&soc->peer_hash_lock);
663 	TAILQ_FOREACH(tmppeer, &soc->peer_hash.bins[index], hash_list_elem) {
664 		if (tmppeer == peer) {
665 			found = 1;
666 			break;
667 		}
668 	}
669 	QDF_ASSERT(found);
670 	TAILQ_REMOVE(&soc->peer_hash.bins[index], peer, hash_list_elem);
671 
672 	dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
673 	qdf_spin_unlock_bh(&soc->peer_hash_lock);
674 }
675 
676 
677 #endif/* WLAN_FEATURE_11BE_MLO */
678 
679 /*
680  * dp_peer_vdev_list_add() - add peer into vdev's peer list
681  * @soc: soc handle
682  * @vdev: vdev handle
683  * @peer: peer handle
684  *
685  * return: none
686  */
687 void dp_peer_vdev_list_add(struct dp_soc *soc, struct dp_vdev *vdev,
688 			   struct dp_peer *peer)
689 {
690 	/* only link peer will be added to vdev peer list */
691 	if (IS_MLO_DP_MLD_PEER(peer))
692 		return;
693 
694 	qdf_spin_lock_bh(&vdev->peer_list_lock);
695 	if (QDF_IS_STATUS_ERROR(dp_peer_get_ref(soc, peer, DP_MOD_ID_CONFIG))) {
696 		dp_err("unable to get peer ref at MAP mac: "QDF_MAC_ADDR_FMT,
697 		       QDF_MAC_ADDR_REF(peer->mac_addr.raw));
698 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
699 		return;
700 	}
701 
702 	/* add this peer into the vdev's list */
703 	if (wlan_op_mode_sta == vdev->opmode)
704 		TAILQ_INSERT_HEAD(&vdev->peer_list, peer, peer_list_elem);
705 	else
706 		TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
707 
708 	vdev->num_peers++;
709 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
710 }
711 
712 /*
713  * dp_peer_vdev_list_remove() - remove peer from vdev's peer list
714  * @soc: SoC handle
715  * @vdev: VDEV handle
716  * @peer: peer handle
717  *
718  * Return: none
719  */
720 void dp_peer_vdev_list_remove(struct dp_soc *soc, struct dp_vdev *vdev,
721 			      struct dp_peer *peer)
722 {
723 	uint8_t found = 0;
724 	struct dp_peer *tmppeer = NULL;
725 
726 	/* only link peer will be added to vdev peer list */
727 	if (IS_MLO_DP_MLD_PEER(peer))
728 		return;
729 
730 	qdf_spin_lock_bh(&vdev->peer_list_lock);
731 	TAILQ_FOREACH(tmppeer, &peer->vdev->peer_list, peer_list_elem) {
732 		if (tmppeer == peer) {
733 			found = 1;
734 			break;
735 		}
736 	}
737 
738 	if (found) {
739 		TAILQ_REMOVE(&peer->vdev->peer_list, peer,
740 			     peer_list_elem);
741 		dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
742 		vdev->num_peers--;
743 	} else {
744 		/*Ignoring the remove operation as peer not found*/
745 		dp_peer_debug("%pK: peer:%pK not found in vdev:%pK peerlist:%pK"
746 			      , soc, peer, vdev, &peer->vdev->peer_list);
747 	}
748 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
749 }
750 
751 /*
752  * dp_txrx_peer_attach_add() - Attach txrx_peer and add it to peer_id table
753  * @soc: SoC handle
754  * @peer: peer handle
755  * @txrx_peer: txrx peer handle
756  *
757  * Return: None
758  */
759 void dp_txrx_peer_attach_add(struct dp_soc *soc,
760 			     struct dp_peer *peer,
761 			     struct dp_txrx_peer *txrx_peer)
762 {
763 	qdf_spin_lock_bh(&soc->peer_map_lock);
764 
765 	peer->txrx_peer = txrx_peer;
766 	txrx_peer->bss_peer = peer->bss_peer;
767 
768 	if (peer->peer_id == HTT_INVALID_PEER) {
769 		qdf_spin_unlock_bh(&soc->peer_map_lock);
770 		return;
771 	}
772 
773 	txrx_peer->peer_id = peer->peer_id;
774 
775 	QDF_ASSERT(soc->peer_id_to_obj_map[peer->peer_id]);
776 
777 	qdf_spin_unlock_bh(&soc->peer_map_lock);
778 }
779 
780 /*
781  * dp_peer_find_id_to_obj_add() - Add peer into peer_id table
782  * @soc: SoC handle
783  * @peer: peer handle
784  * @peer_id: peer_id
785  *
786  * Return: None
787  */
788 void dp_peer_find_id_to_obj_add(struct dp_soc *soc,
789 				struct dp_peer *peer,
790 				uint16_t peer_id)
791 {
792 	QDF_ASSERT(peer_id <= soc->max_peer_id);
793 
794 	qdf_spin_lock_bh(&soc->peer_map_lock);
795 
796 	peer->peer_id = peer_id;
797 
798 	if (QDF_IS_STATUS_ERROR(dp_peer_get_ref(soc, peer, DP_MOD_ID_CONFIG))) {
799 		dp_err("unable to get peer ref at MAP mac: "QDF_MAC_ADDR_FMT" peer_id %u",
800 		       QDF_MAC_ADDR_REF(peer->mac_addr.raw), peer_id);
801 		qdf_spin_unlock_bh(&soc->peer_map_lock);
802 		return;
803 	}
804 
805 	if (!soc->peer_id_to_obj_map[peer_id]) {
806 		soc->peer_id_to_obj_map[peer_id] = peer;
807 		if (peer->txrx_peer)
808 			peer->txrx_peer->peer_id = peer_id;
809 	} else {
810 		/* Peer map event came for peer_id which
811 		 * is already mapped, this is not expected
812 		 */
813 		dp_err("peer %pK(" QDF_MAC_ADDR_FMT ")map failed, id %d mapped to peer %pK",
814 		       peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw), peer_id,
815 		       soc->peer_id_to_obj_map[peer_id]);
816 		dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
817 		qdf_assert_always(0);
818 	}
819 	qdf_spin_unlock_bh(&soc->peer_map_lock);
820 }
821 
822 /*
823  * dp_peer_find_id_to_obj_remove() - remove peer from peer_id table
824  * @soc: SoC handle
825  * @peer_id: peer_id
826  *
827  * Return: None
828  */
829 void dp_peer_find_id_to_obj_remove(struct dp_soc *soc,
830 				   uint16_t peer_id)
831 {
832 	struct dp_peer *peer = NULL;
833 	QDF_ASSERT(peer_id <= soc->max_peer_id);
834 
835 	qdf_spin_lock_bh(&soc->peer_map_lock);
836 	peer = soc->peer_id_to_obj_map[peer_id];
837 	peer->peer_id = HTT_INVALID_PEER;
838 	if (peer->txrx_peer)
839 		peer->txrx_peer->peer_id = HTT_INVALID_PEER;
840 	soc->peer_id_to_obj_map[peer_id] = NULL;
841 	dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
842 	qdf_spin_unlock_bh(&soc->peer_map_lock);
843 }
844 
845 #ifdef FEATURE_MEC
846 /**
847  * dp_peer_mec_hash_attach() - Allocate and initialize MEC Hash Table
848  * @soc: SoC handle
849  *
850  * Return: QDF_STATUS
851  */
852 QDF_STATUS dp_peer_mec_hash_attach(struct dp_soc *soc)
853 {
854 	int log2, hash_elems, i;
855 
856 	log2 = dp_log2_ceil(DP_PEER_MAX_MEC_IDX);
857 	hash_elems = 1 << log2;
858 
859 	soc->mec_hash.mask = hash_elems - 1;
860 	soc->mec_hash.idx_bits = log2;
861 
862 	dp_peer_info("%pK: max mec index: %d",
863 		     soc, DP_PEER_MAX_MEC_IDX);
864 
865 	/* allocate an array of TAILQ mec object lists */
866 	soc->mec_hash.bins = qdf_mem_malloc(hash_elems *
867 					    sizeof(TAILQ_HEAD(anonymous_tail_q,
868 							      dp_mec_entry)));
869 
870 	if (!soc->mec_hash.bins)
871 		return QDF_STATUS_E_NOMEM;
872 
873 	for (i = 0; i < hash_elems; i++)
874 		TAILQ_INIT(&soc->mec_hash.bins[i]);
875 
876 	return QDF_STATUS_SUCCESS;
877 }
878 
879 /**
880  * dp_peer_mec_hash_index() - Compute the MEC hash from MAC address
881  * @soc: SoC handle
882  *
883  * Return: MEC hash
884  */
885 static inline uint32_t dp_peer_mec_hash_index(struct dp_soc *soc,
886 					      union dp_align_mac_addr *mac_addr)
887 {
888 	uint32_t index;
889 
890 	index =
891 		mac_addr->align2.bytes_ab ^
892 		mac_addr->align2.bytes_cd ^
893 		mac_addr->align2.bytes_ef;
894 	index ^= index >> soc->mec_hash.idx_bits;
895 	index &= soc->mec_hash.mask;
896 	return index;
897 }
898 
899 struct dp_mec_entry *dp_peer_mec_hash_find_by_pdevid(struct dp_soc *soc,
900 						     uint8_t pdev_id,
901 						     uint8_t *mec_mac_addr)
902 {
903 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
904 	uint32_t index;
905 	struct dp_mec_entry *mecentry;
906 
907 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
908 		     mec_mac_addr, QDF_MAC_ADDR_SIZE);
909 	mac_addr = &local_mac_addr_aligned;
910 
911 	index = dp_peer_mec_hash_index(soc, mac_addr);
912 	TAILQ_FOREACH(mecentry, &soc->mec_hash.bins[index], hash_list_elem) {
913 		if ((pdev_id == mecentry->pdev_id) &&
914 		    !dp_peer_find_mac_addr_cmp(mac_addr, &mecentry->mac_addr))
915 			return mecentry;
916 	}
917 
918 	return NULL;
919 }
920 
921 /**
922  * dp_peer_mec_hash_add() - Add MEC entry into hash table
923  * @soc: SoC handle
924  *
925  * This function adds the MEC entry into SoC MEC hash table
926  *
927  * Return: None
928  */
929 static inline void dp_peer_mec_hash_add(struct dp_soc *soc,
930 					struct dp_mec_entry *mecentry)
931 {
932 	uint32_t index;
933 
934 	index = dp_peer_mec_hash_index(soc, &mecentry->mac_addr);
935 	qdf_spin_lock_bh(&soc->mec_lock);
936 	TAILQ_INSERT_TAIL(&soc->mec_hash.bins[index], mecentry, hash_list_elem);
937 	qdf_spin_unlock_bh(&soc->mec_lock);
938 }
939 
940 QDF_STATUS dp_peer_mec_add_entry(struct dp_soc *soc,
941 				 struct dp_vdev *vdev,
942 				 uint8_t *mac_addr)
943 {
944 	struct dp_mec_entry *mecentry = NULL;
945 	struct dp_pdev *pdev = NULL;
946 
947 	if (!vdev) {
948 		dp_peer_err("%pK: Peers vdev is NULL", soc);
949 		return QDF_STATUS_E_INVAL;
950 	}
951 
952 	pdev = vdev->pdev;
953 
954 	if (qdf_unlikely(qdf_atomic_read(&soc->mec_cnt) >=
955 					 DP_PEER_MAX_MEC_ENTRY)) {
956 		dp_peer_warn("%pK: max MEC entry limit reached mac_addr: "
957 			     QDF_MAC_ADDR_FMT, soc, QDF_MAC_ADDR_REF(mac_addr));
958 		return QDF_STATUS_E_NOMEM;
959 	}
960 
961 	qdf_spin_lock_bh(&soc->mec_lock);
962 	mecentry = dp_peer_mec_hash_find_by_pdevid(soc, pdev->pdev_id,
963 						   mac_addr);
964 	if (qdf_likely(mecentry)) {
965 		mecentry->is_active = TRUE;
966 		qdf_spin_unlock_bh(&soc->mec_lock);
967 		return QDF_STATUS_E_ALREADY;
968 	}
969 
970 	qdf_spin_unlock_bh(&soc->mec_lock);
971 
972 	dp_peer_debug("%pK: pdevid: %u vdev: %u type: MEC mac_addr: "
973 		      QDF_MAC_ADDR_FMT,
974 		      soc, pdev->pdev_id, vdev->vdev_id,
975 		      QDF_MAC_ADDR_REF(mac_addr));
976 
977 	mecentry = (struct dp_mec_entry *)
978 			qdf_mem_malloc(sizeof(struct dp_mec_entry));
979 
980 	if (qdf_unlikely(!mecentry)) {
981 		dp_peer_err("%pK: fail to allocate mecentry", soc);
982 		return QDF_STATUS_E_NOMEM;
983 	}
984 
985 	qdf_copy_macaddr((struct qdf_mac_addr *)&mecentry->mac_addr.raw[0],
986 			 (struct qdf_mac_addr *)mac_addr);
987 	mecentry->pdev_id = pdev->pdev_id;
988 	mecentry->vdev_id = vdev->vdev_id;
989 	mecentry->is_active = TRUE;
990 	dp_peer_mec_hash_add(soc, mecentry);
991 
992 	qdf_atomic_inc(&soc->mec_cnt);
993 	DP_STATS_INC(soc, mec.added, 1);
994 
995 	return QDF_STATUS_SUCCESS;
996 }
997 
998 void dp_peer_mec_detach_entry(struct dp_soc *soc, struct dp_mec_entry *mecentry,
999 			      void *ptr)
1000 {
1001 	uint32_t index = dp_peer_mec_hash_index(soc, &mecentry->mac_addr);
1002 
1003 	TAILQ_HEAD(, dp_mec_entry) * free_list = ptr;
1004 
1005 	TAILQ_REMOVE(&soc->mec_hash.bins[index], mecentry,
1006 		     hash_list_elem);
1007 	TAILQ_INSERT_TAIL(free_list, mecentry, hash_list_elem);
1008 }
1009 
1010 void dp_peer_mec_free_list(struct dp_soc *soc, void *ptr)
1011 {
1012 	struct dp_mec_entry *mecentry, *mecentry_next;
1013 
1014 	TAILQ_HEAD(, dp_mec_entry) * free_list = ptr;
1015 
1016 	TAILQ_FOREACH_SAFE(mecentry, free_list, hash_list_elem,
1017 			   mecentry_next) {
1018 		dp_peer_debug("%pK: MEC delete for mac_addr " QDF_MAC_ADDR_FMT,
1019 			      soc, QDF_MAC_ADDR_REF(&mecentry->mac_addr));
1020 		qdf_mem_free(mecentry);
1021 		qdf_atomic_dec(&soc->mec_cnt);
1022 		DP_STATS_INC(soc, mec.deleted, 1);
1023 	}
1024 }
1025 
1026 /**
1027  * dp_peer_mec_hash_detach() - Free MEC Hash table
1028  * @soc: SoC handle
1029  *
1030  * Return: None
1031  */
1032 void dp_peer_mec_hash_detach(struct dp_soc *soc)
1033 {
1034 	dp_peer_mec_flush_entries(soc);
1035 	qdf_mem_free(soc->mec_hash.bins);
1036 	soc->mec_hash.bins = NULL;
1037 }
1038 
1039 void dp_peer_mec_spinlock_destroy(struct dp_soc *soc)
1040 {
1041 	qdf_spinlock_destroy(&soc->mec_lock);
1042 }
1043 
1044 void dp_peer_mec_spinlock_create(struct dp_soc *soc)
1045 {
1046 	qdf_spinlock_create(&soc->mec_lock);
1047 }
1048 #else
1049 QDF_STATUS dp_peer_mec_hash_attach(struct dp_soc *soc)
1050 {
1051 	return QDF_STATUS_SUCCESS;
1052 }
1053 
1054 void dp_peer_mec_hash_detach(struct dp_soc *soc)
1055 {
1056 }
1057 #endif
1058 
1059 #ifdef FEATURE_AST
1060 #ifdef WLAN_FEATURE_11BE_MLO
1061 /*
1062  * dp_peer_exist_on_pdev - check if peer with mac address exist on pdev
1063  *
1064  * @soc: Datapath SOC handle
1065  * @peer_mac_addr: peer mac address
1066  * @mac_addr_is_aligned: is mac address aligned
1067  * @pdev: Datapath PDEV handle
1068  *
1069  * Return: true if peer found else return false
1070  */
1071 static bool dp_peer_exist_on_pdev(struct dp_soc *soc,
1072 				  uint8_t *peer_mac_addr,
1073 				  int mac_addr_is_aligned,
1074 				  struct dp_pdev *pdev)
1075 {
1076 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
1077 	unsigned int index;
1078 	struct dp_peer *peer;
1079 	bool found = false;
1080 
1081 	if (mac_addr_is_aligned) {
1082 		mac_addr = (union dp_align_mac_addr *)peer_mac_addr;
1083 	} else {
1084 		qdf_mem_copy(
1085 			&local_mac_addr_aligned.raw[0],
1086 			peer_mac_addr, QDF_MAC_ADDR_SIZE);
1087 		mac_addr = &local_mac_addr_aligned;
1088 	}
1089 	index = dp_peer_find_hash_index(soc, mac_addr);
1090 	qdf_spin_lock_bh(&soc->peer_hash_lock);
1091 	TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
1092 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
1093 		    (peer->vdev->pdev == pdev)) {
1094 			found = true;
1095 			break;
1096 		}
1097 	}
1098 	qdf_spin_unlock_bh(&soc->peer_hash_lock);
1099 
1100 	if (found)
1101 		return found;
1102 
1103 	peer = dp_mld_peer_find_hash_find(soc, peer_mac_addr,
1104 					  mac_addr_is_aligned, DP_VDEV_ALL,
1105 					  DP_MOD_ID_CDP);
1106 	if (peer) {
1107 		if (peer->vdev->pdev == pdev)
1108 			found = true;
1109 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
1110 	}
1111 
1112 	return found;
1113 }
1114 #else
1115 static bool dp_peer_exist_on_pdev(struct dp_soc *soc,
1116 				  uint8_t *peer_mac_addr,
1117 				  int mac_addr_is_aligned,
1118 				  struct dp_pdev *pdev)
1119 {
1120 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
1121 	unsigned int index;
1122 	struct dp_peer *peer;
1123 	bool found = false;
1124 
1125 	if (mac_addr_is_aligned) {
1126 		mac_addr = (union dp_align_mac_addr *)peer_mac_addr;
1127 	} else {
1128 		qdf_mem_copy(
1129 			&local_mac_addr_aligned.raw[0],
1130 			peer_mac_addr, QDF_MAC_ADDR_SIZE);
1131 		mac_addr = &local_mac_addr_aligned;
1132 	}
1133 	index = dp_peer_find_hash_index(soc, mac_addr);
1134 	qdf_spin_lock_bh(&soc->peer_hash_lock);
1135 	TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
1136 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
1137 		    (peer->vdev->pdev == pdev)) {
1138 			found = true;
1139 			break;
1140 		}
1141 	}
1142 	qdf_spin_unlock_bh(&soc->peer_hash_lock);
1143 	return found;
1144 }
1145 #endif /* WLAN_FEATURE_11BE_MLO */
1146 
1147 /*
1148  * dp_peer_ast_hash_attach() - Allocate and initialize AST Hash Table
1149  * @soc: SoC handle
1150  *
1151  * Return: QDF_STATUS
1152  */
1153 QDF_STATUS dp_peer_ast_hash_attach(struct dp_soc *soc)
1154 {
1155 	int i, hash_elems, log2;
1156 	unsigned int max_ast_idx = wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx);
1157 
1158 	hash_elems = ((max_ast_idx * DP_AST_HASH_LOAD_MULT) >>
1159 		DP_AST_HASH_LOAD_SHIFT);
1160 
1161 	log2 = dp_log2_ceil(hash_elems);
1162 	hash_elems = 1 << log2;
1163 
1164 	soc->ast_hash.mask = hash_elems - 1;
1165 	soc->ast_hash.idx_bits = log2;
1166 
1167 	dp_peer_info("%pK: ast hash_elems: %d, max_ast_idx: %d",
1168 		     soc, hash_elems, max_ast_idx);
1169 
1170 	/* allocate an array of TAILQ peer object lists */
1171 	soc->ast_hash.bins = qdf_mem_malloc(
1172 		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q,
1173 				dp_ast_entry)));
1174 
1175 	if (!soc->ast_hash.bins)
1176 		return QDF_STATUS_E_NOMEM;
1177 
1178 	for (i = 0; i < hash_elems; i++)
1179 		TAILQ_INIT(&soc->ast_hash.bins[i]);
1180 
1181 	return QDF_STATUS_SUCCESS;
1182 }
1183 
1184 /*
1185  * dp_peer_ast_cleanup() - cleanup the references
1186  * @soc: SoC handle
1187  * @ast: ast entry
1188  *
1189  * Return: None
1190  */
1191 static inline void dp_peer_ast_cleanup(struct dp_soc *soc,
1192 				       struct dp_ast_entry *ast)
1193 {
1194 	txrx_ast_free_cb cb = ast->callback;
1195 	void *cookie = ast->cookie;
1196 
1197 	dp_peer_debug("mac_addr: " QDF_MAC_ADDR_FMT ", cb: %pK, cookie: %pK",
1198 		      QDF_MAC_ADDR_REF(ast->mac_addr.raw), cb, cookie);
1199 
1200 	/* Call the callbacks to free up the cookie */
1201 	if (cb) {
1202 		ast->callback = NULL;
1203 		ast->cookie = NULL;
1204 		cb(soc->ctrl_psoc,
1205 		   dp_soc_to_cdp_soc(soc),
1206 		   cookie,
1207 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
1208 	}
1209 }
1210 
1211 /*
1212  * dp_peer_ast_hash_detach() - Free AST Hash table
1213  * @soc: SoC handle
1214  *
1215  * Return: None
1216  */
1217 void dp_peer_ast_hash_detach(struct dp_soc *soc)
1218 {
1219 	unsigned int index;
1220 	struct dp_ast_entry *ast, *ast_next;
1221 
1222 	if (!soc->ast_hash.mask)
1223 		return;
1224 
1225 	if (!soc->ast_hash.bins)
1226 		return;
1227 
1228 	dp_peer_debug("%pK: num_ast_entries: %u", soc, soc->num_ast_entries);
1229 
1230 	qdf_spin_lock_bh(&soc->ast_lock);
1231 	for (index = 0; index <= soc->ast_hash.mask; index++) {
1232 		if (!TAILQ_EMPTY(&soc->ast_hash.bins[index])) {
1233 			TAILQ_FOREACH_SAFE(ast, &soc->ast_hash.bins[index],
1234 					   hash_list_elem, ast_next) {
1235 				TAILQ_REMOVE(&soc->ast_hash.bins[index], ast,
1236 					     hash_list_elem);
1237 				dp_peer_ast_cleanup(soc, ast);
1238 				soc->num_ast_entries--;
1239 				qdf_mem_free(ast);
1240 			}
1241 		}
1242 	}
1243 	qdf_spin_unlock_bh(&soc->ast_lock);
1244 
1245 	qdf_mem_free(soc->ast_hash.bins);
1246 	soc->ast_hash.bins = NULL;
1247 }
1248 
1249 /*
1250  * dp_peer_ast_hash_index() - Compute the AST hash from MAC address
1251  * @soc: SoC handle
1252  *
1253  * Return: AST hash
1254  */
1255 static inline uint32_t dp_peer_ast_hash_index(struct dp_soc *soc,
1256 	union dp_align_mac_addr *mac_addr)
1257 {
1258 	uint32_t index;
1259 
1260 	index =
1261 		mac_addr->align2.bytes_ab ^
1262 		mac_addr->align2.bytes_cd ^
1263 		mac_addr->align2.bytes_ef;
1264 	index ^= index >> soc->ast_hash.idx_bits;
1265 	index &= soc->ast_hash.mask;
1266 	return index;
1267 }
1268 
1269 /*
1270  * dp_peer_ast_hash_add() - Add AST entry into hash table
1271  * @soc: SoC handle
1272  *
1273  * This function adds the AST entry into SoC AST hash table
1274  * It assumes caller has taken the ast lock to protect the access to this table
1275  *
1276  * Return: None
1277  */
1278 static inline void dp_peer_ast_hash_add(struct dp_soc *soc,
1279 		struct dp_ast_entry *ase)
1280 {
1281 	uint32_t index;
1282 
1283 	index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
1284 	TAILQ_INSERT_TAIL(&soc->ast_hash.bins[index], ase, hash_list_elem);
1285 }
1286 
1287 /*
1288  * dp_peer_ast_hash_remove() - Look up and remove AST entry from hash table
1289  * @soc: SoC handle
1290  *
1291  * This function removes the AST entry from soc AST hash table
1292  * It assumes caller has taken the ast lock to protect the access to this table
1293  *
1294  * Return: None
1295  */
1296 void dp_peer_ast_hash_remove(struct dp_soc *soc,
1297 			     struct dp_ast_entry *ase)
1298 {
1299 	unsigned index;
1300 	struct dp_ast_entry *tmpase;
1301 	int found = 0;
1302 
1303 	if (soc->ast_offload_support && !soc->host_ast_db_enable)
1304 		return;
1305 
1306 	index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
1307 	/* Check if tail is not empty before delete*/
1308 	QDF_ASSERT(!TAILQ_EMPTY(&soc->ast_hash.bins[index]));
1309 
1310 	dp_peer_debug("ID: %u idx: %u mac_addr: " QDF_MAC_ADDR_FMT,
1311 		      ase->peer_id, index, QDF_MAC_ADDR_REF(ase->mac_addr.raw));
1312 
1313 	TAILQ_FOREACH(tmpase, &soc->ast_hash.bins[index], hash_list_elem) {
1314 		if (tmpase == ase) {
1315 			found = 1;
1316 			break;
1317 		}
1318 	}
1319 
1320 	QDF_ASSERT(found);
1321 
1322 	if (found)
1323 		TAILQ_REMOVE(&soc->ast_hash.bins[index], ase, hash_list_elem);
1324 }
1325 
1326 /*
1327  * dp_peer_ast_hash_find_by_vdevid() - Find AST entry by MAC address
1328  * @soc: SoC handle
1329  *
1330  * It assumes caller has taken the ast lock to protect the access to
1331  * AST hash table
1332  *
1333  * Return: AST entry
1334  */
1335 struct dp_ast_entry *dp_peer_ast_hash_find_by_vdevid(struct dp_soc *soc,
1336 						     uint8_t *ast_mac_addr,
1337 						     uint8_t vdev_id)
1338 {
1339 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
1340 	uint32_t index;
1341 	struct dp_ast_entry *ase;
1342 
1343 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
1344 		     ast_mac_addr, QDF_MAC_ADDR_SIZE);
1345 	mac_addr = &local_mac_addr_aligned;
1346 
1347 	index = dp_peer_ast_hash_index(soc, mac_addr);
1348 	TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
1349 		if ((vdev_id == ase->vdev_id) &&
1350 		    !dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr)) {
1351 			return ase;
1352 		}
1353 	}
1354 
1355 	return NULL;
1356 }
1357 
1358 /*
1359  * dp_peer_ast_hash_find_by_pdevid() - Find AST entry by MAC address
1360  * @soc: SoC handle
1361  *
1362  * It assumes caller has taken the ast lock to protect the access to
1363  * AST hash table
1364  *
1365  * Return: AST entry
1366  */
1367 struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
1368 						     uint8_t *ast_mac_addr,
1369 						     uint8_t pdev_id)
1370 {
1371 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
1372 	uint32_t index;
1373 	struct dp_ast_entry *ase;
1374 
1375 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
1376 		     ast_mac_addr, QDF_MAC_ADDR_SIZE);
1377 	mac_addr = &local_mac_addr_aligned;
1378 
1379 	index = dp_peer_ast_hash_index(soc, mac_addr);
1380 	TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
1381 		if ((pdev_id == ase->pdev_id) &&
1382 		    !dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr)) {
1383 			return ase;
1384 		}
1385 	}
1386 
1387 	return NULL;
1388 }
1389 
1390 /*
1391  * dp_peer_ast_hash_find_soc() - Find AST entry by MAC address
1392  * @soc: SoC handle
1393  *
1394  * It assumes caller has taken the ast lock to protect the access to
1395  * AST hash table
1396  *
1397  * Return: AST entry
1398  */
1399 struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
1400 					       uint8_t *ast_mac_addr)
1401 {
1402 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
1403 	unsigned index;
1404 	struct dp_ast_entry *ase;
1405 
1406 	if (!soc->ast_hash.bins)
1407 		return NULL;
1408 
1409 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
1410 			ast_mac_addr, QDF_MAC_ADDR_SIZE);
1411 	mac_addr = &local_mac_addr_aligned;
1412 
1413 	index = dp_peer_ast_hash_index(soc, mac_addr);
1414 	TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
1415 		if (dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr) == 0) {
1416 			return ase;
1417 		}
1418 	}
1419 
1420 	return NULL;
1421 }
1422 
1423 /*
1424  * dp_peer_host_add_map_ast() - Add ast entry with HW AST Index
1425  * @soc: SoC handle
1426  * @peer_id: peer id from firmware
1427  * @mac_addr: MAC address of ast node
1428  * @hw_peer_id: HW AST Index returned by target in peer map event
1429  * @vdev_id: vdev id for VAP to which the peer belongs to
1430  * @ast_hash: ast hash value in HW
1431  * @is_wds: flag to indicate peer map event for WDS ast entry
1432  *
1433  * Return: QDF_STATUS code
1434  */
1435 static inline
1436 QDF_STATUS dp_peer_host_add_map_ast(struct dp_soc *soc, uint16_t peer_id,
1437 				    uint8_t *mac_addr, uint16_t hw_peer_id,
1438 				    uint8_t vdev_id, uint16_t ast_hash,
1439 				    uint8_t is_wds)
1440 {
1441 	struct dp_vdev *vdev;
1442 	struct dp_ast_entry *ast_entry;
1443 	enum cdp_txrx_ast_entry_type type;
1444 	struct dp_peer *peer;
1445 	struct dp_peer *old_peer;
1446 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1447 
1448 	if (is_wds)
1449 		type = CDP_TXRX_AST_TYPE_WDS;
1450 	else
1451 		type = CDP_TXRX_AST_TYPE_STATIC;
1452 
1453 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_HTT);
1454 	if (!peer) {
1455 		dp_peer_info("Peer not found soc:%pK: peer_id %d, peer_mac " QDF_MAC_ADDR_FMT ", vdev_id %d",
1456 			     soc, peer_id,
1457 			     QDF_MAC_ADDR_REF(mac_addr), vdev_id);
1458 		return QDF_STATUS_E_INVAL;
1459 	}
1460 
1461 	if (!is_wds && IS_MLO_DP_MLD_PEER(peer))
1462 		type = CDP_TXRX_AST_TYPE_MLD;
1463 
1464 	vdev = peer->vdev;
1465 	if (!vdev) {
1466 		dp_peer_err("%pK: Peers vdev is NULL", soc);
1467 		status = QDF_STATUS_E_INVAL;
1468 		goto fail;
1469 	}
1470 
1471 	if (!dp_peer_state_cmp(peer, DP_PEER_STATE_ACTIVE)) {
1472 		if (type != CDP_TXRX_AST_TYPE_STATIC &&
1473 		    type != CDP_TXRX_AST_TYPE_MLD &&
1474 		    type != CDP_TXRX_AST_TYPE_SELF) {
1475 			status = QDF_STATUS_E_BUSY;
1476 			goto fail;
1477 		}
1478 	}
1479 
1480 	dp_peer_debug("%pK: vdev: %u  ast_entry->type: %d peer_mac: " QDF_MAC_ADDR_FMT " peer: %pK mac " QDF_MAC_ADDR_FMT,
1481 		      soc, vdev->vdev_id, type,
1482 		      QDF_MAC_ADDR_REF(peer->mac_addr.raw), peer,
1483 		      QDF_MAC_ADDR_REF(mac_addr));
1484 
1485 	/*
1486 	 * In MLO scenario, there is possibility for same mac address
1487 	 * on both link mac address and MLD mac address.
1488 	 * Duplicate AST map needs to be handled for non-mld type.
1489 	 */
1490 	qdf_spin_lock_bh(&soc->ast_lock);
1491 	ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
1492 	if (ast_entry && type != CDP_TXRX_AST_TYPE_MLD) {
1493 		dp_peer_debug("AST present ID %d vid %d mac " QDF_MAC_ADDR_FMT,
1494 			      hw_peer_id, vdev_id,
1495 			      QDF_MAC_ADDR_REF(mac_addr));
1496 
1497 		old_peer = __dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
1498 						   DP_MOD_ID_AST);
1499 		if (!old_peer) {
1500 			dp_peer_info("Peer not found soc:%pK: peer_id %d, peer_mac " QDF_MAC_ADDR_FMT ", vdev_id %d",
1501 				     soc, ast_entry->peer_id,
1502 				     QDF_MAC_ADDR_REF(mac_addr), vdev_id);
1503 			qdf_spin_unlock_bh(&soc->ast_lock);
1504 			status = QDF_STATUS_E_INVAL;
1505 			goto fail;
1506 		}
1507 
1508 		dp_peer_unlink_ast_entry(soc, ast_entry, old_peer);
1509 		dp_peer_free_ast_entry(soc, ast_entry);
1510 		if (old_peer)
1511 			dp_peer_unref_delete(old_peer, DP_MOD_ID_AST);
1512 	}
1513 
1514 	ast_entry = (struct dp_ast_entry *)
1515 		qdf_mem_malloc(sizeof(struct dp_ast_entry));
1516 	if (!ast_entry) {
1517 		dp_peer_err("%pK: fail to allocate ast_entry", soc);
1518 		qdf_spin_unlock_bh(&soc->ast_lock);
1519 		QDF_ASSERT(0);
1520 		status = QDF_STATUS_E_NOMEM;
1521 		goto fail;
1522 	}
1523 
1524 	qdf_mem_copy(&ast_entry->mac_addr.raw[0], mac_addr, QDF_MAC_ADDR_SIZE);
1525 	ast_entry->pdev_id = vdev->pdev->pdev_id;
1526 	ast_entry->is_mapped = false;
1527 	ast_entry->delete_in_progress = false;
1528 	ast_entry->next_hop = 0;
1529 	ast_entry->vdev_id = vdev->vdev_id;
1530 	ast_entry->type = type;
1531 
1532 	switch (type) {
1533 	case CDP_TXRX_AST_TYPE_STATIC:
1534 		if (peer->vdev->opmode == wlan_op_mode_sta)
1535 			ast_entry->type = CDP_TXRX_AST_TYPE_STA_BSS;
1536 		break;
1537 	case CDP_TXRX_AST_TYPE_WDS:
1538 		ast_entry->next_hop = 1;
1539 		break;
1540 	case CDP_TXRX_AST_TYPE_MLD:
1541 		break;
1542 	default:
1543 		dp_peer_alert("%pK: Incorrect AST entry type", soc);
1544 	}
1545 
1546 	ast_entry->is_active = TRUE;
1547 	DP_STATS_INC(soc, ast.added, 1);
1548 	soc->num_ast_entries++;
1549 	dp_peer_ast_hash_add(soc, ast_entry);
1550 
1551 	ast_entry->ast_idx = hw_peer_id;
1552 	ast_entry->ast_hash_value = ast_hash;
1553 	ast_entry->peer_id = peer_id;
1554 	TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry,
1555 			  ase_list_elem);
1556 
1557 	qdf_spin_unlock_bh(&soc->ast_lock);
1558 fail:
1559 	dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
1560 
1561 	return status;
1562 }
1563 
1564 /*
1565  * dp_peer_map_ast() - Map the ast entry with HW AST Index
1566  * @soc: SoC handle
1567  * @peer: peer to which ast node belongs
1568  * @mac_addr: MAC address of ast node
1569  * @hw_peer_id: HW AST Index returned by target in peer map event
1570  * @vdev_id: vdev id for VAP to which the peer belongs to
1571  * @ast_hash: ast hash value in HW
1572  * @is_wds: flag to indicate peer map event for WDS ast entry
1573  *
1574  * Return: QDF_STATUS code
1575  */
1576 static inline QDF_STATUS dp_peer_map_ast(struct dp_soc *soc,
1577 					 struct dp_peer *peer,
1578 					 uint8_t *mac_addr,
1579 					 uint16_t hw_peer_id,
1580 					 uint8_t vdev_id,
1581 					 uint16_t ast_hash,
1582 					 uint8_t is_wds)
1583 {
1584 	struct dp_ast_entry *ast_entry = NULL;
1585 	enum cdp_txrx_ast_entry_type peer_type = CDP_TXRX_AST_TYPE_STATIC;
1586 	void *cookie = NULL;
1587 	txrx_ast_free_cb cb = NULL;
1588 	QDF_STATUS err = QDF_STATUS_SUCCESS;
1589 
1590 	if (soc->ast_offload_support)
1591 		return QDF_STATUS_SUCCESS;
1592 
1593 	if (!peer) {
1594 		return QDF_STATUS_E_INVAL;
1595 	}
1596 
1597 	dp_peer_err("%pK: peer %pK ID %d vid %d mac " QDF_MAC_ADDR_FMT,
1598 		    soc, peer, hw_peer_id, vdev_id,
1599 		    QDF_MAC_ADDR_REF(mac_addr));
1600 
1601 	qdf_spin_lock_bh(&soc->ast_lock);
1602 
1603 	ast_entry = dp_peer_ast_hash_find_by_vdevid(soc, mac_addr, vdev_id);
1604 
1605 	if (is_wds) {
1606 		/*
1607 		 * In certain cases like Auth attack on a repeater
1608 		 * can result in the number of ast_entries falling
1609 		 * in the same hash bucket to exceed the max_skid
1610 		 * length supported by HW in root AP. In these cases
1611 		 * the FW will return the hw_peer_id (ast_index) as
1612 		 * 0xffff indicating HW could not add the entry in
1613 		 * its table. Host has to delete the entry from its
1614 		 * table in these cases.
1615 		 */
1616 		if (hw_peer_id == HTT_INVALID_PEER) {
1617 			DP_STATS_INC(soc, ast.map_err, 1);
1618 			if (ast_entry) {
1619 				if (ast_entry->is_mapped) {
1620 					soc->ast_table[ast_entry->ast_idx] =
1621 						NULL;
1622 				}
1623 
1624 				cb = ast_entry->callback;
1625 				cookie = ast_entry->cookie;
1626 				peer_type = ast_entry->type;
1627 
1628 				dp_peer_unlink_ast_entry(soc, ast_entry, peer);
1629 				dp_peer_free_ast_entry(soc, ast_entry);
1630 
1631 				qdf_spin_unlock_bh(&soc->ast_lock);
1632 
1633 				if (cb) {
1634 					cb(soc->ctrl_psoc,
1635 					   dp_soc_to_cdp_soc(soc),
1636 					   cookie,
1637 					   CDP_TXRX_AST_DELETED);
1638 				}
1639 			} else {
1640 				qdf_spin_unlock_bh(&soc->ast_lock);
1641 				dp_peer_alert("AST entry not found with peer %pK peer_id %u peer_mac " QDF_MAC_ADDR_FMT " mac_addr " QDF_MAC_ADDR_FMT " vdev_id %u next_hop %u",
1642 					      peer, peer->peer_id,
1643 					      QDF_MAC_ADDR_REF(peer->mac_addr.raw),
1644 					      QDF_MAC_ADDR_REF(mac_addr),
1645 					      vdev_id, is_wds);
1646 			}
1647 			err = QDF_STATUS_E_INVAL;
1648 
1649 			dp_hmwds_ast_add_notify(peer, mac_addr,
1650 						peer_type, err, true);
1651 
1652 			return err;
1653 		}
1654 	}
1655 
1656 	if (ast_entry) {
1657 		ast_entry->ast_idx = hw_peer_id;
1658 		soc->ast_table[hw_peer_id] = ast_entry;
1659 		ast_entry->is_active = TRUE;
1660 		peer_type = ast_entry->type;
1661 		ast_entry->ast_hash_value = ast_hash;
1662 		ast_entry->is_mapped = TRUE;
1663 		qdf_assert_always(ast_entry->peer_id == HTT_INVALID_PEER);
1664 
1665 		ast_entry->peer_id = peer->peer_id;
1666 		TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry,
1667 				  ase_list_elem);
1668 	}
1669 
1670 	if (ast_entry || (peer->vdev && peer->vdev->proxysta_vdev)) {
1671 		if (soc->cdp_soc.ol_ops->peer_map_event) {
1672 			soc->cdp_soc.ol_ops->peer_map_event(
1673 			soc->ctrl_psoc, peer->peer_id,
1674 			hw_peer_id, vdev_id,
1675 			mac_addr, peer_type, ast_hash);
1676 		}
1677 	} else {
1678 		dp_peer_err("%pK: AST entry not found", soc);
1679 		err = QDF_STATUS_E_NOENT;
1680 	}
1681 
1682 	qdf_spin_unlock_bh(&soc->ast_lock);
1683 
1684 	dp_hmwds_ast_add_notify(peer, mac_addr,
1685 				peer_type, err, true);
1686 
1687 	return err;
1688 }
1689 
1690 void dp_peer_free_hmwds_cb(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
1691 			   struct cdp_soc *dp_soc,
1692 			   void *cookie,
1693 			   enum cdp_ast_free_status status)
1694 {
1695 	struct dp_ast_free_cb_params *param =
1696 		(struct dp_ast_free_cb_params *)cookie;
1697 	struct dp_soc *soc = (struct dp_soc *)dp_soc;
1698 	struct dp_peer *peer = NULL;
1699 	QDF_STATUS err = QDF_STATUS_SUCCESS;
1700 
1701 	if (status != CDP_TXRX_AST_DELETED) {
1702 		qdf_mem_free(cookie);
1703 		return;
1704 	}
1705 
1706 	peer = dp_peer_find_hash_find(soc, &param->peer_mac_addr.raw[0],
1707 				      0, param->vdev_id, DP_MOD_ID_AST);
1708 	if (peer) {
1709 		err = dp_peer_add_ast(soc, peer,
1710 				      &param->mac_addr.raw[0],
1711 				      param->type,
1712 				      param->flags);
1713 
1714 		dp_hmwds_ast_add_notify(peer, &param->mac_addr.raw[0],
1715 					param->type, err, false);
1716 
1717 		dp_peer_unref_delete(peer, DP_MOD_ID_AST);
1718 	}
1719 	qdf_mem_free(cookie);
1720 }
1721 
1722 /*
1723  * dp_peer_add_ast() - Allocate and add AST entry into peer list
1724  * @soc: SoC handle
1725  * @peer: peer to which ast node belongs
1726  * @mac_addr: MAC address of ast node
1727  * @is_self: Is this base AST entry with peer mac address
1728  *
1729  * This API is used by WDS source port learning function to
1730  * add a new AST entry into peer AST list
1731  *
1732  * Return: QDF_STATUS code
1733  */
1734 QDF_STATUS dp_peer_add_ast(struct dp_soc *soc,
1735 			   struct dp_peer *peer,
1736 			   uint8_t *mac_addr,
1737 			   enum cdp_txrx_ast_entry_type type,
1738 			   uint32_t flags)
1739 {
1740 	struct dp_ast_entry *ast_entry = NULL;
1741 	struct dp_vdev *vdev = NULL;
1742 	struct dp_pdev *pdev = NULL;
1743 	txrx_ast_free_cb cb = NULL;
1744 	void *cookie = NULL;
1745 	struct dp_peer *vap_bss_peer = NULL;
1746 	bool is_peer_found = false;
1747 	int status = 0;
1748 
1749 	if (soc->ast_offload_support)
1750 		return QDF_STATUS_E_INVAL;
1751 
1752 	vdev = peer->vdev;
1753 	if (!vdev) {
1754 		dp_peer_err("%pK: Peers vdev is NULL", soc);
1755 		QDF_ASSERT(0);
1756 		return QDF_STATUS_E_INVAL;
1757 	}
1758 
1759 	pdev = vdev->pdev;
1760 
1761 	is_peer_found = dp_peer_exist_on_pdev(soc, mac_addr, 0, pdev);
1762 
1763 	qdf_spin_lock_bh(&soc->ast_lock);
1764 
1765 	if (!dp_peer_state_cmp(peer, DP_PEER_STATE_ACTIVE)) {
1766 		if ((type != CDP_TXRX_AST_TYPE_STATIC) &&
1767 		    (type != CDP_TXRX_AST_TYPE_SELF)) {
1768 			qdf_spin_unlock_bh(&soc->ast_lock);
1769 			return QDF_STATUS_E_BUSY;
1770 		}
1771 	}
1772 
1773 	dp_peer_debug("%pK: pdevid: %u vdev: %u  ast_entry->type: %d flags: 0x%x peer_mac: " QDF_MAC_ADDR_FMT " peer: %pK mac " QDF_MAC_ADDR_FMT,
1774 		      soc, pdev->pdev_id, vdev->vdev_id, type, flags,
1775 		      QDF_MAC_ADDR_REF(peer->mac_addr.raw), peer,
1776 		      QDF_MAC_ADDR_REF(mac_addr));
1777 
1778 	/* fw supports only 2 times the max_peers ast entries */
1779 	if (soc->num_ast_entries >=
1780 	    wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx)) {
1781 		qdf_spin_unlock_bh(&soc->ast_lock);
1782 		dp_peer_err("%pK: Max ast entries reached", soc);
1783 		return QDF_STATUS_E_RESOURCES;
1784 	}
1785 
1786 	/* If AST entry already exists , just return from here
1787 	 * ast entry with same mac address can exist on different radios
1788 	 * if ast_override support is enabled use search by pdev in this
1789 	 * case
1790 	 */
1791 	if (soc->ast_override_support) {
1792 		ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, mac_addr,
1793 							    pdev->pdev_id);
1794 		if (ast_entry) {
1795 			qdf_spin_unlock_bh(&soc->ast_lock);
1796 			return QDF_STATUS_E_ALREADY;
1797 		}
1798 
1799 		if (is_peer_found) {
1800 			/* During WDS to static roaming, peer is added
1801 			 * to the list before static AST entry create.
1802 			 * So, allow AST entry for STATIC type
1803 			 * even if peer is present
1804 			 */
1805 			if (type != CDP_TXRX_AST_TYPE_STATIC) {
1806 				qdf_spin_unlock_bh(&soc->ast_lock);
1807 				return QDF_STATUS_E_ALREADY;
1808 			}
1809 		}
1810 	} else {
1811 		/* For HWMWDS_SEC entries can be added for same mac address
1812 		 * do not check for existing entry
1813 		 */
1814 		if (type == CDP_TXRX_AST_TYPE_WDS_HM_SEC)
1815 			goto add_ast_entry;
1816 
1817 		ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
1818 
1819 		if (ast_entry) {
1820 			if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) &&
1821 			    !ast_entry->delete_in_progress) {
1822 				qdf_spin_unlock_bh(&soc->ast_lock);
1823 				return QDF_STATUS_E_ALREADY;
1824 			}
1825 
1826 			/* Add for HMWDS entry we cannot be ignored if there
1827 			 * is AST entry with same mac address
1828 			 *
1829 			 * if ast entry exists with the requested mac address
1830 			 * send a delete command and register callback which
1831 			 * can take care of adding HMWDS ast entry on delete
1832 			 * confirmation from target
1833 			 */
1834 			if (type == CDP_TXRX_AST_TYPE_WDS_HM) {
1835 				struct dp_ast_free_cb_params *param = NULL;
1836 
1837 				if (ast_entry->type ==
1838 					CDP_TXRX_AST_TYPE_WDS_HM_SEC)
1839 					goto add_ast_entry;
1840 
1841 				/* save existing callback */
1842 				if (ast_entry->callback) {
1843 					cb = ast_entry->callback;
1844 					cookie = ast_entry->cookie;
1845 				}
1846 
1847 				param = qdf_mem_malloc(sizeof(*param));
1848 				if (!param) {
1849 					QDF_TRACE(QDF_MODULE_ID_TXRX,
1850 						  QDF_TRACE_LEVEL_ERROR,
1851 						  "Allocation failed");
1852 					qdf_spin_unlock_bh(&soc->ast_lock);
1853 					return QDF_STATUS_E_NOMEM;
1854 				}
1855 
1856 				qdf_mem_copy(&param->mac_addr.raw[0], mac_addr,
1857 					     QDF_MAC_ADDR_SIZE);
1858 				qdf_mem_copy(&param->peer_mac_addr.raw[0],
1859 					     &peer->mac_addr.raw[0],
1860 					     QDF_MAC_ADDR_SIZE);
1861 				param->type = type;
1862 				param->flags = flags;
1863 				param->vdev_id = vdev->vdev_id;
1864 				ast_entry->callback = dp_peer_free_hmwds_cb;
1865 				ast_entry->pdev_id = vdev->pdev->pdev_id;
1866 				ast_entry->type = type;
1867 				ast_entry->cookie = (void *)param;
1868 				if (!ast_entry->delete_in_progress)
1869 					dp_peer_del_ast(soc, ast_entry);
1870 
1871 				qdf_spin_unlock_bh(&soc->ast_lock);
1872 
1873 				/* Call the saved callback*/
1874 				if (cb) {
1875 					cb(soc->ctrl_psoc,
1876 					   dp_soc_to_cdp_soc(soc),
1877 					   cookie,
1878 					   CDP_TXRX_AST_DELETE_IN_PROGRESS);
1879 				}
1880 				return QDF_STATUS_E_AGAIN;
1881 			}
1882 
1883 			qdf_spin_unlock_bh(&soc->ast_lock);
1884 			return QDF_STATUS_E_ALREADY;
1885 		}
1886 	}
1887 
1888 add_ast_entry:
1889 	ast_entry = (struct dp_ast_entry *)
1890 			qdf_mem_malloc(sizeof(struct dp_ast_entry));
1891 
1892 	if (!ast_entry) {
1893 		qdf_spin_unlock_bh(&soc->ast_lock);
1894 		dp_peer_err("%pK: fail to allocate ast_entry", soc);
1895 		QDF_ASSERT(0);
1896 		return QDF_STATUS_E_NOMEM;
1897 	}
1898 
1899 	qdf_mem_copy(&ast_entry->mac_addr.raw[0], mac_addr, QDF_MAC_ADDR_SIZE);
1900 	ast_entry->pdev_id = vdev->pdev->pdev_id;
1901 	ast_entry->is_mapped = false;
1902 	ast_entry->delete_in_progress = false;
1903 	ast_entry->peer_id = HTT_INVALID_PEER;
1904 	ast_entry->next_hop = 0;
1905 	ast_entry->vdev_id = vdev->vdev_id;
1906 
1907 	switch (type) {
1908 	case CDP_TXRX_AST_TYPE_STATIC:
1909 		peer->self_ast_entry = ast_entry;
1910 		ast_entry->type = CDP_TXRX_AST_TYPE_STATIC;
1911 		if (peer->vdev->opmode == wlan_op_mode_sta)
1912 			ast_entry->type = CDP_TXRX_AST_TYPE_STA_BSS;
1913 		break;
1914 	case CDP_TXRX_AST_TYPE_SELF:
1915 		peer->self_ast_entry = ast_entry;
1916 		ast_entry->type = CDP_TXRX_AST_TYPE_SELF;
1917 		break;
1918 	case CDP_TXRX_AST_TYPE_WDS:
1919 		ast_entry->next_hop = 1;
1920 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
1921 		break;
1922 	case CDP_TXRX_AST_TYPE_WDS_HM:
1923 		ast_entry->next_hop = 1;
1924 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM;
1925 		break;
1926 	case CDP_TXRX_AST_TYPE_WDS_HM_SEC:
1927 		ast_entry->next_hop = 1;
1928 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM_SEC;
1929 		ast_entry->peer_id = peer->peer_id;
1930 		TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry,
1931 				  ase_list_elem);
1932 		break;
1933 	case CDP_TXRX_AST_TYPE_DA:
1934 		vap_bss_peer = dp_vdev_bss_peer_ref_n_get(soc, vdev,
1935 							  DP_MOD_ID_AST);
1936 		if (!vap_bss_peer) {
1937 			qdf_spin_unlock_bh(&soc->ast_lock);
1938 			qdf_mem_free(ast_entry);
1939 			return QDF_STATUS_E_FAILURE;
1940 		}
1941 		peer = vap_bss_peer;
1942 		ast_entry->next_hop = 1;
1943 		ast_entry->type = CDP_TXRX_AST_TYPE_DA;
1944 		break;
1945 	default:
1946 		dp_peer_err("%pK: Incorrect AST entry type", soc);
1947 	}
1948 
1949 	ast_entry->is_active = TRUE;
1950 	DP_STATS_INC(soc, ast.added, 1);
1951 	soc->num_ast_entries++;
1952 	dp_peer_ast_hash_add(soc, ast_entry);
1953 
1954 	if ((ast_entry->type != CDP_TXRX_AST_TYPE_STATIC) &&
1955 	    (ast_entry->type != CDP_TXRX_AST_TYPE_SELF) &&
1956 	    (ast_entry->type != CDP_TXRX_AST_TYPE_STA_BSS) &&
1957 	    (ast_entry->type != CDP_TXRX_AST_TYPE_WDS_HM_SEC))
1958 		status = dp_add_wds_entry_wrapper(soc,
1959 						  peer,
1960 						  mac_addr,
1961 						  flags,
1962 						  ast_entry->type);
1963 
1964 	if (vap_bss_peer)
1965 		dp_peer_unref_delete(vap_bss_peer, DP_MOD_ID_AST);
1966 
1967 	qdf_spin_unlock_bh(&soc->ast_lock);
1968 	return qdf_status_from_os_return(status);
1969 }
1970 
1971 qdf_export_symbol(dp_peer_add_ast);
1972 
1973 /*
1974  * dp_peer_free_ast_entry() - Free up the ast entry memory
1975  * @soc: SoC handle
1976  * @ast_entry: Address search entry
1977  *
1978  * This API is used to free up the memory associated with
1979  * AST entry.
1980  *
1981  * Return: None
1982  */
1983 void dp_peer_free_ast_entry(struct dp_soc *soc,
1984 			    struct dp_ast_entry *ast_entry)
1985 {
1986 	/*
1987 	 * NOTE: Ensure that call to this API is done
1988 	 * after soc->ast_lock is taken
1989 	 */
1990 	dp_peer_debug("type: %d ID: %u vid: %u mac_addr: " QDF_MAC_ADDR_FMT,
1991 		      ast_entry->type, ast_entry->peer_id, ast_entry->vdev_id,
1992 		      QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw));
1993 
1994 	ast_entry->callback = NULL;
1995 	ast_entry->cookie = NULL;
1996 
1997 	DP_STATS_INC(soc, ast.deleted, 1);
1998 	dp_peer_ast_hash_remove(soc, ast_entry);
1999 	dp_peer_ast_cleanup(soc, ast_entry);
2000 	qdf_mem_free(ast_entry);
2001 	soc->num_ast_entries--;
2002 }
2003 
2004 /*
2005  * dp_peer_unlink_ast_entry() - Free up the ast entry memory
2006  * @soc: SoC handle
2007  * @ast_entry: Address search entry
2008  * @peer: peer
2009  *
2010  * This API is used to remove/unlink AST entry from the peer list
2011  * and hash list.
2012  *
2013  * Return: None
2014  */
2015 void dp_peer_unlink_ast_entry(struct dp_soc *soc,
2016 			      struct dp_ast_entry *ast_entry,
2017 			      struct dp_peer *peer)
2018 {
2019 	if (!peer) {
2020 		dp_info_rl("NULL peer");
2021 		return;
2022 	}
2023 
2024 	if (ast_entry->peer_id == HTT_INVALID_PEER) {
2025 		dp_info_rl("Invalid peer id in AST entry mac addr:"QDF_MAC_ADDR_FMT" type:%d",
2026 			  QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw),
2027 			  ast_entry->type);
2028 		return;
2029 	}
2030 	/*
2031 	 * NOTE: Ensure that call to this API is done
2032 	 * after soc->ast_lock is taken
2033 	 */
2034 
2035 	qdf_assert_always(ast_entry->peer_id == peer->peer_id);
2036 	TAILQ_REMOVE(&peer->ast_entry_list, ast_entry, ase_list_elem);
2037 
2038 	if (ast_entry == peer->self_ast_entry)
2039 		peer->self_ast_entry = NULL;
2040 
2041 	/*
2042 	 * release the reference only if it is mapped
2043 	 * to ast_table
2044 	 */
2045 	if (ast_entry->is_mapped)
2046 		soc->ast_table[ast_entry->ast_idx] = NULL;
2047 
2048 	ast_entry->peer_id = HTT_INVALID_PEER;
2049 }
2050 
2051 /*
2052  * dp_peer_del_ast() - Delete and free AST entry
2053  * @soc: SoC handle
2054  * @ast_entry: AST entry of the node
2055  *
2056  * This function removes the AST entry from peer and soc tables
2057  * It assumes caller has taken the ast lock to protect the access to these
2058  * tables
2059  *
2060  * Return: None
2061  */
2062 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
2063 {
2064 	struct dp_peer *peer = NULL;
2065 
2066 	if (soc->ast_offload_support)
2067 		return;
2068 
2069 	if (!ast_entry) {
2070 		dp_info_rl("NULL AST entry");
2071 		return;
2072 	}
2073 
2074 	if (ast_entry->delete_in_progress) {
2075 		dp_info_rl("AST entry deletion in progress mac addr:"QDF_MAC_ADDR_FMT" type:%d",
2076 			  QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw),
2077 			  ast_entry->type);
2078 		return;
2079 	}
2080 
2081 	dp_peer_debug("call by %ps: ID: %u vid: %u mac_addr: " QDF_MAC_ADDR_FMT,
2082 		      (void *)_RET_IP_, ast_entry->peer_id, ast_entry->vdev_id,
2083 		      QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw));
2084 
2085 	ast_entry->delete_in_progress = true;
2086 
2087 	/* In teardown del ast is called after setting logical delete state
2088 	 * use __dp_peer_get_ref_by_id to get the reference irrespective of
2089 	 * state
2090 	 */
2091 	peer = __dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
2092 				       DP_MOD_ID_AST);
2093 
2094 	dp_peer_ast_send_wds_del(soc, ast_entry, peer);
2095 
2096 	/* Remove SELF and STATIC entries in teardown itself */
2097 	if (!ast_entry->next_hop)
2098 		dp_peer_unlink_ast_entry(soc, ast_entry, peer);
2099 
2100 	if (ast_entry->is_mapped)
2101 		soc->ast_table[ast_entry->ast_idx] = NULL;
2102 
2103 	/* if peer map v2 is enabled we are not freeing ast entry
2104 	 * here and it is supposed to be freed in unmap event (after
2105 	 * we receive delete confirmation from target)
2106 	 *
2107 	 * if peer_id is invalid we did not get the peer map event
2108 	 * for the peer free ast entry from here only in this case
2109 	 */
2110 	if (dp_peer_ast_free_in_unmap_supported(soc, ast_entry))
2111 		goto end;
2112 
2113 	/* for WDS secondary entry ast_entry->next_hop would be set so
2114 	 * unlinking has to be done explicitly here.
2115 	 * As this entry is not a mapped entry unmap notification from
2116 	 * FW will not come. Hence unlinkling is done right here.
2117 	 */
2118 
2119 	if (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC)
2120 		dp_peer_unlink_ast_entry(soc, ast_entry, peer);
2121 
2122 	dp_peer_free_ast_entry(soc, ast_entry);
2123 
2124 end:
2125 	if (peer)
2126 		dp_peer_unref_delete(peer, DP_MOD_ID_AST);
2127 }
2128 
2129 /*
2130  * dp_peer_update_ast() - Delete and free AST entry
2131  * @soc: SoC handle
2132  * @peer: peer to which ast node belongs
2133  * @ast_entry: AST entry of the node
2134  * @flags: wds or hmwds
2135  *
2136  * This function update the AST entry to the roamed peer and soc tables
2137  * It assumes caller has taken the ast lock to protect the access to these
2138  * tables
2139  *
2140  * Return: 0 if ast entry is updated successfully
2141  *         -1 failure
2142  */
2143 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
2144 		       struct dp_ast_entry *ast_entry, uint32_t flags)
2145 {
2146 	int ret = -1;
2147 	struct dp_peer *old_peer;
2148 
2149 	if (soc->ast_offload_support)
2150 		return QDF_STATUS_E_INVAL;
2151 
2152 	dp_peer_debug("%pK: ast_entry->type: %d pdevid: %u vdevid: %u flags: 0x%x mac_addr: " QDF_MAC_ADDR_FMT " peer_mac: " QDF_MAC_ADDR_FMT "\n",
2153 		      soc, ast_entry->type, peer->vdev->pdev->pdev_id,
2154 		      peer->vdev->vdev_id, flags,
2155 		      QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw),
2156 		      QDF_MAC_ADDR_REF(peer->mac_addr.raw));
2157 
2158 	/* Do not send AST update in below cases
2159 	 *  1) Ast entry delete has already triggered
2160 	 *  2) Peer delete is already triggered
2161 	 *  3) We did not get the HTT map for create event
2162 	 */
2163 	if (ast_entry->delete_in_progress ||
2164 	    !dp_peer_state_cmp(peer, DP_PEER_STATE_ACTIVE) ||
2165 	    !ast_entry->is_mapped)
2166 		return ret;
2167 
2168 	if ((ast_entry->type == CDP_TXRX_AST_TYPE_STATIC) ||
2169 	    (ast_entry->type == CDP_TXRX_AST_TYPE_SELF) ||
2170 	    (ast_entry->type == CDP_TXRX_AST_TYPE_STA_BSS) ||
2171 	    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
2172 		return 0;
2173 
2174 	/*
2175 	 * Avoids flood of WMI update messages sent to FW for same peer.
2176 	 */
2177 	if (qdf_unlikely(ast_entry->peer_id == peer->peer_id) &&
2178 	    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS) &&
2179 	    (ast_entry->vdev_id == peer->vdev->vdev_id) &&
2180 	    (ast_entry->is_active))
2181 		return 0;
2182 
2183 	old_peer = dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
2184 					 DP_MOD_ID_AST);
2185 	if (!old_peer)
2186 		return 0;
2187 
2188 	TAILQ_REMOVE(&old_peer->ast_entry_list, ast_entry, ase_list_elem);
2189 
2190 	dp_peer_unref_delete(old_peer, DP_MOD_ID_AST);
2191 
2192 	ast_entry->peer_id = peer->peer_id;
2193 	ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
2194 	ast_entry->pdev_id = peer->vdev->pdev->pdev_id;
2195 	ast_entry->vdev_id = peer->vdev->vdev_id;
2196 	ast_entry->is_active = TRUE;
2197 	TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem);
2198 
2199 	ret = dp_update_wds_entry_wrapper(soc,
2200 					  peer,
2201 					  ast_entry->mac_addr.raw,
2202 					  flags);
2203 
2204 	return ret;
2205 }
2206 
2207 /*
2208  * dp_peer_ast_get_pdev_id() - get pdev_id from the ast entry
2209  * @soc: SoC handle
2210  * @ast_entry: AST entry of the node
2211  *
2212  * This function gets the pdev_id from the ast entry.
2213  *
2214  * Return: (uint8_t) pdev_id
2215  */
2216 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
2217 				struct dp_ast_entry *ast_entry)
2218 {
2219 	return ast_entry->pdev_id;
2220 }
2221 
2222 /*
2223  * dp_peer_ast_get_next_hop() - get next_hop from the ast entry
2224  * @soc: SoC handle
2225  * @ast_entry: AST entry of the node
2226  *
2227  * This function gets the next hop from the ast entry.
2228  *
2229  * Return: (uint8_t) next_hop
2230  */
2231 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
2232 				struct dp_ast_entry *ast_entry)
2233 {
2234 	return ast_entry->next_hop;
2235 }
2236 
2237 /*
2238  * dp_peer_ast_set_type() - set type from the ast entry
2239  * @soc: SoC handle
2240  * @ast_entry: AST entry of the node
2241  *
2242  * This function sets the type in the ast entry.
2243  *
2244  * Return:
2245  */
2246 void dp_peer_ast_set_type(struct dp_soc *soc,
2247 				struct dp_ast_entry *ast_entry,
2248 				enum cdp_txrx_ast_entry_type type)
2249 {
2250 	ast_entry->type = type;
2251 }
2252 
2253 void dp_peer_ast_send_wds_del(struct dp_soc *soc,
2254 			      struct dp_ast_entry *ast_entry,
2255 			      struct dp_peer *peer)
2256 {
2257 	bool delete_in_fw = false;
2258 
2259 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_TRACE,
2260 		  "%s: ast_entry->type: %d pdevid: %u vdev: %u mac_addr: "QDF_MAC_ADDR_FMT" next_hop: %u peer_id: %uM\n",
2261 		  __func__, ast_entry->type, ast_entry->pdev_id,
2262 		  ast_entry->vdev_id,
2263 		  QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw),
2264 		  ast_entry->next_hop, ast_entry->peer_id);
2265 
2266 	/*
2267 	 * If peer state is logical delete, the peer is about to get
2268 	 * teared down with a peer delete command to firmware,
2269 	 * which will cleanup all the wds ast entries.
2270 	 * So, no need to send explicit wds ast delete to firmware.
2271 	 */
2272 	if (ast_entry->next_hop) {
2273 		if (peer && dp_peer_state_cmp(peer,
2274 					      DP_PEER_STATE_LOGICAL_DELETE))
2275 			delete_in_fw = false;
2276 		else
2277 			delete_in_fw = true;
2278 
2279 		dp_del_wds_entry_wrapper(soc,
2280 					 ast_entry->vdev_id,
2281 					 ast_entry->mac_addr.raw,
2282 					 ast_entry->type,
2283 					 delete_in_fw);
2284 	}
2285 }
2286 #else
2287 void dp_peer_free_ast_entry(struct dp_soc *soc,
2288 			    struct dp_ast_entry *ast_entry)
2289 {
2290 }
2291 
2292 void dp_peer_unlink_ast_entry(struct dp_soc *soc,
2293 			      struct dp_ast_entry *ast_entry,
2294 			      struct dp_peer *peer)
2295 {
2296 }
2297 
2298 void dp_peer_ast_hash_remove(struct dp_soc *soc,
2299 			     struct dp_ast_entry *ase)
2300 {
2301 }
2302 
2303 struct dp_ast_entry *dp_peer_ast_hash_find_by_vdevid(struct dp_soc *soc,
2304 						     uint8_t *ast_mac_addr,
2305 						     uint8_t vdev_id)
2306 {
2307 	return NULL;
2308 }
2309 
2310 QDF_STATUS dp_peer_add_ast(struct dp_soc *soc,
2311 			   struct dp_peer *peer,
2312 			   uint8_t *mac_addr,
2313 			   enum cdp_txrx_ast_entry_type type,
2314 			   uint32_t flags)
2315 {
2316 	return QDF_STATUS_E_FAILURE;
2317 }
2318 
2319 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
2320 {
2321 }
2322 
2323 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
2324 			struct dp_ast_entry *ast_entry, uint32_t flags)
2325 {
2326 	return 1;
2327 }
2328 
2329 struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
2330 					       uint8_t *ast_mac_addr)
2331 {
2332 	return NULL;
2333 }
2334 
2335 static inline
2336 QDF_STATUS dp_peer_host_add_map_ast(struct dp_soc *soc, uint16_t peer_id,
2337 				    uint8_t *mac_addr, uint16_t hw_peer_id,
2338 				    uint8_t vdev_id, uint16_t ast_hash,
2339 				    uint8_t is_wds)
2340 {
2341 	return QDF_STATUS_SUCCESS;
2342 }
2343 
2344 struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
2345 						     uint8_t *ast_mac_addr,
2346 						     uint8_t pdev_id)
2347 {
2348 	return NULL;
2349 }
2350 
2351 QDF_STATUS dp_peer_ast_hash_attach(struct dp_soc *soc)
2352 {
2353 	return QDF_STATUS_SUCCESS;
2354 }
2355 
2356 static inline QDF_STATUS dp_peer_map_ast(struct dp_soc *soc,
2357 					 struct dp_peer *peer,
2358 					 uint8_t *mac_addr,
2359 					 uint16_t hw_peer_id,
2360 					 uint8_t vdev_id,
2361 					 uint16_t ast_hash,
2362 					 uint8_t is_wds)
2363 {
2364 	return QDF_STATUS_SUCCESS;
2365 }
2366 
2367 void dp_peer_ast_hash_detach(struct dp_soc *soc)
2368 {
2369 }
2370 
2371 void dp_peer_ast_set_type(struct dp_soc *soc,
2372 				struct dp_ast_entry *ast_entry,
2373 				enum cdp_txrx_ast_entry_type type)
2374 {
2375 }
2376 
2377 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
2378 				struct dp_ast_entry *ast_entry)
2379 {
2380 	return 0xff;
2381 }
2382 
2383 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
2384 				 struct dp_ast_entry *ast_entry)
2385 {
2386 	return 0xff;
2387 }
2388 
2389 void dp_peer_ast_send_wds_del(struct dp_soc *soc,
2390 			      struct dp_ast_entry *ast_entry,
2391 			      struct dp_peer *peer)
2392 {
2393 }
2394 #endif
2395 
2396 #ifdef WLAN_FEATURE_MULTI_AST_DEL
2397 void dp_peer_ast_send_multi_wds_del(
2398 		struct dp_soc *soc, uint8_t vdev_id,
2399 		struct peer_del_multi_wds_entries *wds_list)
2400 {
2401 	struct cdp_soc_t *cdp_soc = &soc->cdp_soc;
2402 
2403 	if (cdp_soc && cdp_soc->ol_ops &&
2404 	    cdp_soc->ol_ops->peer_del_multi_wds_entry)
2405 		cdp_soc->ol_ops->peer_del_multi_wds_entry(soc->ctrl_psoc,
2406 							  vdev_id, wds_list);
2407 }
2408 #endif
2409 
2410 #ifdef FEATURE_WDS
2411 /**
2412  * dp_peer_ast_free_wds_entries() - Free wds ast entries associated with peer
2413  * @soc: soc handle
2414  * @peer: peer handle
2415  *
2416  * Free all the wds ast entries associated with peer
2417  *
2418  * Return: Number of wds ast entries freed
2419  */
2420 static uint32_t dp_peer_ast_free_wds_entries(struct dp_soc *soc,
2421 					     struct dp_peer *peer)
2422 {
2423 	TAILQ_HEAD(, dp_ast_entry) ast_local_list = {0};
2424 	struct dp_ast_entry *ast_entry, *temp_ast_entry;
2425 	uint32_t num_ast = 0;
2426 
2427 	TAILQ_INIT(&ast_local_list);
2428 	qdf_spin_lock_bh(&soc->ast_lock);
2429 
2430 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry) {
2431 		if (ast_entry->next_hop)
2432 			num_ast++;
2433 
2434 		if (ast_entry->is_mapped)
2435 			soc->ast_table[ast_entry->ast_idx] = NULL;
2436 
2437 		dp_peer_unlink_ast_entry(soc, ast_entry, peer);
2438 		DP_STATS_INC(soc, ast.deleted, 1);
2439 		dp_peer_ast_hash_remove(soc, ast_entry);
2440 		TAILQ_INSERT_TAIL(&ast_local_list, ast_entry,
2441 				  ase_list_elem);
2442 		soc->num_ast_entries--;
2443 	}
2444 
2445 	qdf_spin_unlock_bh(&soc->ast_lock);
2446 
2447 	TAILQ_FOREACH_SAFE(ast_entry, &ast_local_list, ase_list_elem,
2448 			   temp_ast_entry) {
2449 		if (ast_entry->callback)
2450 			ast_entry->callback(soc->ctrl_psoc,
2451 					    dp_soc_to_cdp_soc(soc),
2452 					    ast_entry->cookie,
2453 					    CDP_TXRX_AST_DELETED);
2454 
2455 		qdf_mem_free(ast_entry);
2456 	}
2457 
2458 	return num_ast;
2459 }
2460 /**
2461  * dp_peer_clean_wds_entries() - Clean wds ast entries and compare
2462  * @soc: soc handle
2463  * @peer: peer handle
2464  * @free_wds_count - number of wds entries freed by FW with peer delete
2465  *
2466  * Free all the wds ast entries associated with peer and compare with
2467  * the value received from firmware
2468  *
2469  * Return: Number of wds ast entries freed
2470  */
2471 static void
2472 dp_peer_clean_wds_entries(struct dp_soc *soc, struct dp_peer *peer,
2473 			  uint32_t free_wds_count)
2474 {
2475 	uint32_t wds_deleted = 0;
2476 
2477 	if (soc->ast_offload_support && !soc->host_ast_db_enable)
2478 		return;
2479 
2480 	wds_deleted = dp_peer_ast_free_wds_entries(soc, peer);
2481 	if ((DP_PEER_WDS_COUNT_INVALID != free_wds_count) &&
2482 	    (free_wds_count != wds_deleted)) {
2483 		DP_STATS_INC(soc, ast.ast_mismatch, 1);
2484 		dp_alert("For peer %pK (mac: "QDF_MAC_ADDR_FMT")number of wds entries deleted by fw = %d during peer delete is not same as the numbers deleted by host = %d",
2485 			 peer, peer->mac_addr.raw, free_wds_count,
2486 			 wds_deleted);
2487 	}
2488 }
2489 
2490 #else
2491 static void
2492 dp_peer_clean_wds_entries(struct dp_soc *soc, struct dp_peer *peer,
2493 			  uint32_t free_wds_count)
2494 {
2495 	struct dp_ast_entry *ast_entry, *temp_ast_entry;
2496 
2497 	qdf_spin_lock_bh(&soc->ast_lock);
2498 
2499 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry) {
2500 		dp_peer_unlink_ast_entry(soc, ast_entry, peer);
2501 
2502 		if (ast_entry->is_mapped)
2503 			soc->ast_table[ast_entry->ast_idx] = NULL;
2504 
2505 		dp_peer_free_ast_entry(soc, ast_entry);
2506 	}
2507 
2508 	peer->self_ast_entry = NULL;
2509 	qdf_spin_unlock_bh(&soc->ast_lock);
2510 }
2511 #endif
2512 
2513 /**
2514  * dp_peer_ast_free_entry_by_mac() - find ast entry by MAC address and delete
2515  * @soc: soc handle
2516  * @peer: peer handle
2517  * @vdev_id: vdev_id
2518  * @mac_addr: mac address of the AST entry to searc and delete
2519  *
2520  * find the ast entry from the peer list using the mac address and free
2521  * the entry.
2522  *
2523  * Return: SUCCESS or NOENT
2524  */
2525 static int dp_peer_ast_free_entry_by_mac(struct dp_soc *soc,
2526 					 struct dp_peer *peer,
2527 					 uint8_t vdev_id,
2528 					 uint8_t *mac_addr)
2529 {
2530 	struct dp_ast_entry *ast_entry;
2531 	void *cookie = NULL;
2532 	txrx_ast_free_cb cb = NULL;
2533 
2534 	/*
2535 	 * release the reference only if it is mapped
2536 	 * to ast_table
2537 	 */
2538 
2539 	qdf_spin_lock_bh(&soc->ast_lock);
2540 
2541 	ast_entry = dp_peer_ast_hash_find_by_vdevid(soc, mac_addr, vdev_id);
2542 	if (!ast_entry) {
2543 		qdf_spin_unlock_bh(&soc->ast_lock);
2544 		return QDF_STATUS_E_NOENT;
2545 	} else if (ast_entry->is_mapped) {
2546 		soc->ast_table[ast_entry->ast_idx] = NULL;
2547 	}
2548 
2549 	cb = ast_entry->callback;
2550 	cookie = ast_entry->cookie;
2551 
2552 
2553 	dp_peer_unlink_ast_entry(soc, ast_entry, peer);
2554 
2555 	dp_peer_free_ast_entry(soc, ast_entry);
2556 
2557 	qdf_spin_unlock_bh(&soc->ast_lock);
2558 
2559 	if (cb) {
2560 		cb(soc->ctrl_psoc,
2561 		   dp_soc_to_cdp_soc(soc),
2562 		   cookie,
2563 		   CDP_TXRX_AST_DELETED);
2564 	}
2565 
2566 	return QDF_STATUS_SUCCESS;
2567 }
2568 
2569 void dp_peer_find_hash_erase(struct dp_soc *soc)
2570 {
2571 	int i;
2572 
2573 	/*
2574 	 * Not really necessary to take peer_ref_mutex lock - by this point,
2575 	 * it's known that the soc is no longer in use.
2576 	 */
2577 	for (i = 0; i <= soc->peer_hash.mask; i++) {
2578 		if (!TAILQ_EMPTY(&soc->peer_hash.bins[i])) {
2579 			struct dp_peer *peer, *peer_next;
2580 
2581 			/*
2582 			 * TAILQ_FOREACH_SAFE must be used here to avoid any
2583 			 * memory access violation after peer is freed
2584 			 */
2585 			TAILQ_FOREACH_SAFE(peer, &soc->peer_hash.bins[i],
2586 				hash_list_elem, peer_next) {
2587 				/*
2588 				 * Don't remove the peer from the hash table -
2589 				 * that would modify the list we are currently
2590 				 * traversing, and it's not necessary anyway.
2591 				 */
2592 				/*
2593 				 * Artificially adjust the peer's ref count to
2594 				 * 1, so it will get deleted by
2595 				 * dp_peer_unref_delete.
2596 				 */
2597 				/* set to zero */
2598 				qdf_atomic_init(&peer->ref_cnt);
2599 				for (i = 0; i < DP_MOD_ID_MAX; i++)
2600 					qdf_atomic_init(&peer->mod_refs[i]);
2601 				/* incr to one */
2602 				qdf_atomic_inc(&peer->ref_cnt);
2603 				qdf_atomic_inc(&peer->mod_refs
2604 						[DP_MOD_ID_CONFIG]);
2605 				dp_peer_unref_delete(peer,
2606 						     DP_MOD_ID_CONFIG);
2607 			}
2608 		}
2609 	}
2610 }
2611 
2612 void dp_peer_ast_table_detach(struct dp_soc *soc)
2613 {
2614 	if (soc->ast_table) {
2615 		qdf_mem_free(soc->ast_table);
2616 		soc->ast_table = NULL;
2617 	}
2618 }
2619 
2620 /*
2621  * dp_peer_find_map_detach() - cleanup memory for peer_id_to_obj_map
2622  * @soc: soc handle
2623  *
2624  * return: none
2625  */
2626 void dp_peer_find_map_detach(struct dp_soc *soc)
2627 {
2628 	if (soc->peer_id_to_obj_map) {
2629 		qdf_mem_free(soc->peer_id_to_obj_map);
2630 		soc->peer_id_to_obj_map = NULL;
2631 		qdf_spinlock_destroy(&soc->peer_map_lock);
2632 	}
2633 }
2634 
2635 #ifndef AST_OFFLOAD_ENABLE
2636 QDF_STATUS dp_peer_find_attach(struct dp_soc *soc)
2637 {
2638 	QDF_STATUS status;
2639 
2640 	status = dp_peer_find_map_attach(soc);
2641 	if (!QDF_IS_STATUS_SUCCESS(status))
2642 		return status;
2643 
2644 	status = dp_peer_find_hash_attach(soc);
2645 	if (!QDF_IS_STATUS_SUCCESS(status))
2646 		goto map_detach;
2647 
2648 	status = dp_peer_ast_table_attach(soc);
2649 	if (!QDF_IS_STATUS_SUCCESS(status))
2650 		goto hash_detach;
2651 
2652 	status = dp_peer_ast_hash_attach(soc);
2653 	if (!QDF_IS_STATUS_SUCCESS(status))
2654 		goto ast_table_detach;
2655 
2656 	status = dp_peer_mec_hash_attach(soc);
2657 	if (QDF_IS_STATUS_SUCCESS(status)) {
2658 		dp_soc_wds_attach(soc);
2659 		return status;
2660 	}
2661 
2662 	dp_peer_ast_hash_detach(soc);
2663 ast_table_detach:
2664 	dp_peer_ast_table_detach(soc);
2665 hash_detach:
2666 	dp_peer_find_hash_detach(soc);
2667 map_detach:
2668 	dp_peer_find_map_detach(soc);
2669 
2670 	return status;
2671 }
2672 #else
2673 QDF_STATUS dp_peer_find_attach(struct dp_soc *soc)
2674 {
2675 	QDF_STATUS status;
2676 
2677 	status = dp_peer_find_map_attach(soc);
2678 	if (!QDF_IS_STATUS_SUCCESS(status))
2679 		return status;
2680 
2681 	status = dp_peer_find_hash_attach(soc);
2682 	if (!QDF_IS_STATUS_SUCCESS(status))
2683 		goto map_detach;
2684 
2685 	return status;
2686 map_detach:
2687 	dp_peer_find_map_detach(soc);
2688 
2689 	return status;
2690 }
2691 #endif
2692 
2693 #ifdef IPA_OFFLOAD
2694 /*
2695  * dp_peer_update_tid_stats_from_reo() - update rx pkt and byte count from reo
2696  * @soc - soc handle
2697  * @cb_ctxt - combination of peer_id and tid
2698  * @reo_status - reo status
2699  *
2700  * return: void
2701  */
2702 void dp_peer_update_tid_stats_from_reo(struct dp_soc *soc, void *cb_ctxt,
2703 				       union hal_reo_status *reo_status)
2704 {
2705 	struct dp_peer *peer = NULL;
2706 	struct dp_rx_tid *rx_tid = NULL;
2707 	unsigned long comb_peer_id_tid;
2708 	struct hal_reo_queue_status *queue_status = &reo_status->queue_status;
2709 	uint16_t tid;
2710 	uint16_t peer_id;
2711 
2712 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
2713 		dp_err("REO stats failure %d\n",
2714 		       queue_status->header.status);
2715 		return;
2716 	}
2717 	comb_peer_id_tid = (unsigned long)cb_ctxt;
2718 	tid = DP_PEER_GET_REO_STATS_TID(comb_peer_id_tid);
2719 	peer_id = DP_PEER_GET_REO_STATS_PEER_ID(comb_peer_id_tid);
2720 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_GENERIC_STATS);
2721 	if (!peer)
2722 		return;
2723 	rx_tid  = &peer->rx_tid[tid];
2724 
2725 	if (!rx_tid) {
2726 		dp_peer_unref_delete(peer, DP_MOD_ID_GENERIC_STATS);
2727 		return;
2728 	}
2729 
2730 	rx_tid->rx_msdu_cnt.bytes += queue_status->total_cnt;
2731 	rx_tid->rx_msdu_cnt.num += queue_status->msdu_frms_cnt;
2732 	dp_peer_unref_delete(peer, DP_MOD_ID_GENERIC_STATS);
2733 }
2734 
2735 qdf_export_symbol(dp_peer_update_tid_stats_from_reo);
2736 #endif
2737 
2738 void dp_rx_tid_stats_cb(struct dp_soc *soc, void *cb_ctxt,
2739 	union hal_reo_status *reo_status)
2740 {
2741 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
2742 	struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
2743 
2744 	if (queue_status->header.status == HAL_REO_CMD_DRAIN)
2745 		return;
2746 
2747 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
2748 		DP_PRINT_STATS("REO stats failure %d for TID %d\n",
2749 			       queue_status->header.status, rx_tid->tid);
2750 		return;
2751 	}
2752 
2753 	DP_PRINT_STATS("REO queue stats (TID: %d):\n"
2754 		       "ssn: %d\n"
2755 		       "curr_idx  : %d\n"
2756 		       "pn_31_0   : %08x\n"
2757 		       "pn_63_32  : %08x\n"
2758 		       "pn_95_64  : %08x\n"
2759 		       "pn_127_96 : %08x\n"
2760 		       "last_rx_enq_tstamp : %08x\n"
2761 		       "last_rx_deq_tstamp : %08x\n"
2762 		       "rx_bitmap_31_0     : %08x\n"
2763 		       "rx_bitmap_63_32    : %08x\n"
2764 		       "rx_bitmap_95_64    : %08x\n"
2765 		       "rx_bitmap_127_96   : %08x\n"
2766 		       "rx_bitmap_159_128  : %08x\n"
2767 		       "rx_bitmap_191_160  : %08x\n"
2768 		       "rx_bitmap_223_192  : %08x\n"
2769 		       "rx_bitmap_255_224  : %08x\n",
2770 		       rx_tid->tid,
2771 		       queue_status->ssn, queue_status->curr_idx,
2772 		       queue_status->pn_31_0, queue_status->pn_63_32,
2773 		       queue_status->pn_95_64, queue_status->pn_127_96,
2774 		       queue_status->last_rx_enq_tstamp,
2775 		       queue_status->last_rx_deq_tstamp,
2776 		       queue_status->rx_bitmap_31_0,
2777 		       queue_status->rx_bitmap_63_32,
2778 		       queue_status->rx_bitmap_95_64,
2779 		       queue_status->rx_bitmap_127_96,
2780 		       queue_status->rx_bitmap_159_128,
2781 		       queue_status->rx_bitmap_191_160,
2782 		       queue_status->rx_bitmap_223_192,
2783 		       queue_status->rx_bitmap_255_224);
2784 
2785 	DP_PRINT_STATS(
2786 		       "curr_mpdu_cnt      : %d\n"
2787 		       "curr_msdu_cnt      : %d\n"
2788 		       "fwd_timeout_cnt    : %d\n"
2789 		       "fwd_bar_cnt        : %d\n"
2790 		       "dup_cnt            : %d\n"
2791 		       "frms_in_order_cnt  : %d\n"
2792 		       "bar_rcvd_cnt       : %d\n"
2793 		       "mpdu_frms_cnt      : %d\n"
2794 		       "msdu_frms_cnt      : %d\n"
2795 		       "total_byte_cnt     : %d\n"
2796 		       "late_recv_mpdu_cnt : %d\n"
2797 		       "win_jump_2k        : %d\n"
2798 		       "hole_cnt           : %d\n",
2799 		       queue_status->curr_mpdu_cnt,
2800 		       queue_status->curr_msdu_cnt,
2801 		       queue_status->fwd_timeout_cnt,
2802 		       queue_status->fwd_bar_cnt,
2803 		       queue_status->dup_cnt,
2804 		       queue_status->frms_in_order_cnt,
2805 		       queue_status->bar_rcvd_cnt,
2806 		       queue_status->mpdu_frms_cnt,
2807 		       queue_status->msdu_frms_cnt,
2808 		       queue_status->total_cnt,
2809 		       queue_status->late_recv_mpdu_cnt,
2810 		       queue_status->win_jump_2k,
2811 		       queue_status->hole_cnt);
2812 
2813 	DP_PRINT_STATS("Addba Req          : %d\n"
2814 			"Addba Resp         : %d\n"
2815 			"Addba Resp success : %d\n"
2816 			"Addba Resp failed  : %d\n"
2817 			"Delba Req received : %d\n"
2818 			"Delba Tx success   : %d\n"
2819 			"Delba Tx Fail      : %d\n"
2820 			"BA window size     : %d\n"
2821 			"Pn size            : %d\n",
2822 			rx_tid->num_of_addba_req,
2823 			rx_tid->num_of_addba_resp,
2824 			rx_tid->num_addba_rsp_success,
2825 			rx_tid->num_addba_rsp_failed,
2826 			rx_tid->num_of_delba_req,
2827 			rx_tid->delba_tx_success_cnt,
2828 			rx_tid->delba_tx_fail_cnt,
2829 			rx_tid->ba_win_size,
2830 			rx_tid->pn_size);
2831 }
2832 
2833 #ifdef REO_SHARED_QREF_TABLE_EN
2834 void dp_peer_rx_reo_shared_qaddr_delete(struct dp_soc *soc,
2835 					struct dp_peer *peer)
2836 {
2837 	uint8_t tid;
2838 
2839 	if (peer->peer_id > soc->max_peer_id)
2840 		return;
2841 	if (IS_MLO_DP_LINK_PEER(peer))
2842 		return;
2843 	if (hal_reo_shared_qaddr_is_enable(soc->hal_soc)) {
2844 		for (tid = 0; tid < DP_MAX_TIDS; tid++)
2845 			hal_reo_shared_qaddr_write(soc->hal_soc,
2846 						   peer->peer_id, tid, 0);
2847 	}
2848 }
2849 #endif
2850 
2851 /*
2852  * dp_peer_find_add_id() - map peer_id with peer
2853  * @soc: soc handle
2854  * @peer_mac_addr: peer mac address
2855  * @peer_id: peer id to be mapped
2856  * @hw_peer_id: HW ast index
2857  * @vdev_id: vdev_id
2858  * @peer_type: peer type (link or MLD)
2859  *
2860  * return: peer in success
2861  *         NULL in failure
2862  */
2863 static inline struct dp_peer *dp_peer_find_add_id(struct dp_soc *soc,
2864 	uint8_t *peer_mac_addr, uint16_t peer_id, uint16_t hw_peer_id,
2865 	uint8_t vdev_id, enum cdp_peer_type peer_type)
2866 {
2867 	struct dp_peer *peer;
2868 	struct cdp_peer_info peer_info = { 0 };
2869 
2870 	QDF_ASSERT(peer_id <= soc->max_peer_id);
2871 	/* check if there's already a peer object with this MAC address */
2872 	DP_PEER_INFO_PARAMS_INIT(&peer_info, vdev_id, peer_mac_addr,
2873 				 false, peer_type);
2874 	peer = dp_peer_hash_find_wrapper(soc, &peer_info, DP_MOD_ID_CONFIG);
2875 	dp_peer_err("%pK: peer %pK ID %d vid %d mac " QDF_MAC_ADDR_FMT,
2876 		    soc, peer, peer_id, vdev_id,
2877 		    QDF_MAC_ADDR_REF(peer_mac_addr));
2878 
2879 	if (peer) {
2880 		/* peer's ref count was already incremented by
2881 		 * peer_find_hash_find
2882 		 */
2883 		dp_peer_info("%pK: ref_cnt: %d", soc,
2884 			     qdf_atomic_read(&peer->ref_cnt));
2885 
2886 		/*
2887 		 * if peer is in logical delete CP triggered delete before map
2888 		 * is received ignore this event
2889 		 */
2890 		if (dp_peer_state_cmp(peer, DP_PEER_STATE_LOGICAL_DELETE)) {
2891 			dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
2892 			dp_alert("Peer %pK["QDF_MAC_ADDR_FMT"] logical delete state vid %d",
2893 				 peer, QDF_MAC_ADDR_REF(peer_mac_addr),
2894 				 vdev_id);
2895 			return NULL;
2896 		}
2897 
2898 		if (peer->peer_id == HTT_INVALID_PEER) {
2899 			if (!IS_MLO_DP_MLD_PEER(peer))
2900 				dp_monitor_peer_tid_peer_id_update(soc, peer,
2901 								   peer_id);
2902 		} else {
2903 			dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
2904 			QDF_ASSERT(0);
2905 			return NULL;
2906 		}
2907 		dp_peer_find_id_to_obj_add(soc, peer, peer_id);
2908 		if (soc->arch_ops.dp_partner_chips_map)
2909 			soc->arch_ops.dp_partner_chips_map(soc, peer, peer_id);
2910 
2911 		dp_peer_update_state(soc, peer, DP_PEER_STATE_ACTIVE);
2912 		return peer;
2913 	}
2914 
2915 	return NULL;
2916 }
2917 
2918 #ifdef WLAN_FEATURE_11BE_MLO
2919 #ifdef DP_USE_REDUCED_PEER_ID_FIELD_WIDTH
2920 static inline uint16_t dp_gen_ml_peer_id(struct dp_soc *soc,
2921 					 uint16_t peer_id)
2922 {
2923 	return ((peer_id & soc->peer_id_mask) | (1 << soc->peer_id_shift));
2924 }
2925 #else
2926 static inline uint16_t dp_gen_ml_peer_id(struct dp_soc *soc,
2927 					 uint16_t peer_id)
2928 {
2929 	return (peer_id | (1 << HTT_RX_PEER_META_DATA_V1_ML_PEER_VALID_S));
2930 }
2931 #endif
2932 
2933 QDF_STATUS
2934 dp_rx_mlo_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
2935 			   uint8_t *peer_mac_addr,
2936 			   struct dp_mlo_flow_override_info *mlo_flow_info,
2937 			   struct dp_mlo_link_info *mlo_link_info)
2938 {
2939 	struct dp_peer *peer = NULL;
2940 	uint16_t hw_peer_id = mlo_flow_info[0].ast_idx;
2941 	uint16_t ast_hash = mlo_flow_info[0].cache_set_num;
2942 	uint8_t vdev_id = 0;
2943 	uint8_t is_wds = 0;
2944 	int i;
2945 	uint16_t ml_peer_id = dp_gen_ml_peer_id(soc, peer_id);
2946 	enum cdp_txrx_ast_entry_type type = CDP_TXRX_AST_TYPE_STATIC;
2947 	QDF_STATUS err = QDF_STATUS_SUCCESS;
2948 	struct dp_soc *primary_soc;
2949 
2950 	dp_info("mlo_peer_map_event (soc:%pK): peer_id %d ml_peer_id %d, peer_mac "QDF_MAC_ADDR_FMT,
2951 		soc, peer_id, ml_peer_id,
2952 		QDF_MAC_ADDR_REF(peer_mac_addr));
2953 
2954 	/* Get corresponding vdev ID for the peer based
2955 	 * on chip ID obtained from mlo peer_map event
2956 	 */
2957 	for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
2958 		if (mlo_link_info[i].peer_chip_id == dp_mlo_get_chip_id(soc)) {
2959 			vdev_id = mlo_link_info[i].vdev_id;
2960 			break;
2961 		}
2962 	}
2963 
2964 	peer = dp_peer_find_add_id(soc, peer_mac_addr, ml_peer_id,
2965 				   hw_peer_id, vdev_id, CDP_MLD_PEER_TYPE);
2966 
2967 	if (peer) {
2968 		if (wlan_op_mode_sta == peer->vdev->opmode &&
2969 		    qdf_mem_cmp(peer->mac_addr.raw,
2970 				peer->vdev->mld_mac_addr.raw,
2971 				QDF_MAC_ADDR_SIZE) != 0) {
2972 			dp_peer_info("%pK: STA vdev bss_peer!!!!", soc);
2973 			peer->bss_peer = 1;
2974 			if (peer->txrx_peer)
2975 				peer->txrx_peer->bss_peer = 1;
2976 		}
2977 
2978 		if (peer->vdev->opmode == wlan_op_mode_sta) {
2979 			peer->vdev->bss_ast_hash = ast_hash;
2980 			peer->vdev->bss_ast_idx = hw_peer_id;
2981 		}
2982 
2983 		/* Add ast entry incase self ast entry is
2984 		 * deleted due to DP CP sync issue
2985 		 *
2986 		 * self_ast_entry is modified in peer create
2987 		 * and peer unmap path which cannot run in
2988 		 * parllel with peer map, no lock need before
2989 		 * referring it
2990 		 */
2991 		if (!peer->self_ast_entry) {
2992 			dp_info("Add self ast from map "QDF_MAC_ADDR_FMT,
2993 				QDF_MAC_ADDR_REF(peer_mac_addr));
2994 			dp_peer_add_ast(soc, peer,
2995 					peer_mac_addr,
2996 					type, 0);
2997 		}
2998 		/* If peer setup and hence rx_tid setup got called
2999 		 * before htt peer map then Qref write to LUT did not
3000 		 * happen in rx_tid setup as peer_id was invalid.
3001 		 * So defer Qref write to peer map handler. Check if
3002 		 * rx_tid qdesc for tid 0 is already setup and perform
3003 		 * qref write to LUT for Tid 0 and 16.
3004 		 *
3005 		 * Peer map could be obtained on assoc link, hence
3006 		 * change to primary link's soc.
3007 		 */
3008 		primary_soc = peer->vdev->pdev->soc;
3009 		if (hal_reo_shared_qaddr_is_enable(primary_soc->hal_soc) &&
3010 		    peer->rx_tid[0].hw_qdesc_vaddr_unaligned) {
3011 			hal_reo_shared_qaddr_write(primary_soc->hal_soc,
3012 						   ml_peer_id,
3013 						   0,
3014 						   peer->rx_tid[0].hw_qdesc_paddr);
3015 			hal_reo_shared_qaddr_write(primary_soc->hal_soc,
3016 						   ml_peer_id,
3017 						   DP_NON_QOS_TID,
3018 						   peer->rx_tid[DP_NON_QOS_TID].hw_qdesc_paddr);
3019 		}
3020 	}
3021 
3022 	err = dp_peer_map_ast(soc, peer, peer_mac_addr, hw_peer_id,
3023 			      vdev_id, ast_hash, is_wds);
3024 
3025 	/*
3026 	 * If AST offload and host AST DB is enabled, populate AST entries on
3027 	 * host based on mlo peer map event from FW
3028 	 */
3029 	if (soc->ast_offload_support && soc->host_ast_db_enable) {
3030 		dp_peer_host_add_map_ast(soc, ml_peer_id, peer_mac_addr,
3031 					 hw_peer_id, vdev_id,
3032 					 ast_hash, is_wds);
3033 	}
3034 
3035 	return err;
3036 }
3037 #endif
3038 
3039 #ifdef DP_RX_UDP_OVER_PEER_ROAM
3040 void dp_rx_reset_roaming_peer(struct dp_soc *soc, uint8_t vdev_id,
3041 			      uint8_t *peer_mac_addr)
3042 {
3043 	struct dp_vdev *vdev = NULL;
3044 
3045 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_HTT);
3046 	if (vdev) {
3047 		if (qdf_mem_cmp(vdev->roaming_peer_mac.raw, peer_mac_addr,
3048 				QDF_MAC_ADDR_SIZE) == 0) {
3049 			vdev->roaming_peer_status =
3050 						WLAN_ROAM_PEER_AUTH_STATUS_NONE;
3051 			qdf_mem_zero(vdev->roaming_peer_mac.raw,
3052 				     QDF_MAC_ADDR_SIZE);
3053 		}
3054 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT);
3055 	}
3056 }
3057 #endif
3058 
3059 #ifdef WLAN_SUPPORT_PPEDS
3060 static void
3061 dp_tx_ppeds_cfg_astidx_cache_mapping(struct dp_soc *soc, struct dp_vdev *vdev,
3062 				     bool peer_map)
3063 {
3064 	if (soc->arch_ops.dp_tx_ppeds_cfg_astidx_cache_mapping)
3065 		soc->arch_ops.dp_tx_ppeds_cfg_astidx_cache_mapping(soc, vdev,
3066 								   peer_map);
3067 }
3068 #else
3069 static void
3070 dp_tx_ppeds_cfg_astidx_cache_mapping(struct dp_soc *soc, struct dp_vdev *vdev,
3071 				     bool peer_map)
3072 {
3073 }
3074 #endif
3075 
3076 /**
3077  * dp_rx_peer_map_handler() - handle peer map event from firmware
3078  * @soc_handle - generic soc handle
3079  * @peeri_id - peer_id from firmware
3080  * @hw_peer_id - ast index for this peer
3081  * @vdev_id - vdev ID
3082  * @peer_mac_addr - mac address of the peer
3083  * @ast_hash - ast hash value
3084  * @is_wds - flag to indicate peer map event for WDS ast entry
3085  *
3086  * associate the peer_id that firmware provided with peer entry
3087  * and update the ast table in the host with the hw_peer_id.
3088  *
3089  * Return: QDF_STATUS code
3090  */
3091 
3092 QDF_STATUS
3093 dp_rx_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
3094 		       uint16_t hw_peer_id, uint8_t vdev_id,
3095 		       uint8_t *peer_mac_addr, uint16_t ast_hash,
3096 		       uint8_t is_wds)
3097 {
3098 	struct dp_peer *peer = NULL;
3099 	struct dp_vdev *vdev = NULL;
3100 	enum cdp_txrx_ast_entry_type type = CDP_TXRX_AST_TYPE_STATIC;
3101 	QDF_STATUS err = QDF_STATUS_SUCCESS;
3102 
3103 	dp_info("peer_map_event (soc:%pK): peer_id %d, hw_peer_id %d, peer_mac "QDF_MAC_ADDR_FMT", vdev_id %d",
3104 		soc, peer_id, hw_peer_id,
3105 		QDF_MAC_ADDR_REF(peer_mac_addr), vdev_id);
3106 
3107 	/* Peer map event for WDS ast entry get the peer from
3108 	 * obj map
3109 	 */
3110 	if (is_wds) {
3111 		if (!soc->ast_offload_support) {
3112 			peer = dp_peer_get_ref_by_id(soc, peer_id,
3113 						     DP_MOD_ID_HTT);
3114 
3115 			err = dp_peer_map_ast(soc, peer, peer_mac_addr,
3116 					      hw_peer_id,
3117 					      vdev_id, ast_hash, is_wds);
3118 			if (peer)
3119 				dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
3120 		}
3121 	} else {
3122 		/*
3123 		 * It's the responsibility of the CP and FW to ensure
3124 		 * that peer is created successfully. Ideally DP should
3125 		 * not hit the below condition for directly associated
3126 		 * peers.
3127 		 */
3128 		if ((!soc->ast_offload_support) && ((hw_peer_id < 0) ||
3129 		    (hw_peer_id >=
3130 		     wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx)))) {
3131 			dp_peer_err("%pK: invalid hw_peer_id: %d", soc, hw_peer_id);
3132 			qdf_assert_always(0);
3133 		}
3134 
3135 		peer = dp_peer_find_add_id(soc, peer_mac_addr, peer_id,
3136 					   hw_peer_id, vdev_id,
3137 					   CDP_LINK_PEER_TYPE);
3138 
3139 		if (peer) {
3140 			bool peer_map = true;
3141 
3142 			/* Updating ast_hash and ast_idx in peer level */
3143 			peer->ast_hash = ast_hash;
3144 			peer->ast_idx = hw_peer_id;
3145 			vdev = peer->vdev;
3146 			/* Only check for STA Vdev and peer is not for TDLS */
3147 			if (wlan_op_mode_sta == vdev->opmode &&
3148 			    !peer->is_tdls_peer) {
3149 				if (qdf_mem_cmp(peer->mac_addr.raw,
3150 						vdev->mac_addr.raw,
3151 						QDF_MAC_ADDR_SIZE) != 0) {
3152 					dp_info("%pK: STA vdev bss_peer", soc);
3153 					peer->bss_peer = 1;
3154 					if (peer->txrx_peer)
3155 						peer->txrx_peer->bss_peer = 1;
3156 				}
3157 
3158 				dp_info("bss ast_hash 0x%x, ast_index 0x%x",
3159 					ast_hash, hw_peer_id);
3160 				vdev->bss_ast_hash = ast_hash;
3161 				vdev->bss_ast_idx = hw_peer_id;
3162 
3163 				dp_tx_ppeds_cfg_astidx_cache_mapping(soc, vdev,
3164 								     peer_map);
3165 			}
3166 
3167 			/* Add ast entry incase self ast entry is
3168 			 * deleted due to DP CP sync issue
3169 			 *
3170 			 * self_ast_entry is modified in peer create
3171 			 * and peer unmap path which cannot run in
3172 			 * parllel with peer map, no lock need before
3173 			 * referring it
3174 			 */
3175 			if (!soc->ast_offload_support &&
3176 				!peer->self_ast_entry) {
3177 				dp_info("Add self ast from map "QDF_MAC_ADDR_FMT,
3178 					QDF_MAC_ADDR_REF(peer_mac_addr));
3179 				dp_peer_add_ast(soc, peer,
3180 						peer_mac_addr,
3181 						type, 0);
3182 			}
3183 
3184 			/* If peer setup and hence rx_tid setup got called
3185 			 * before htt peer map then Qref write to LUT did
3186 			 * not happen in rx_tid setup as peer_id was invalid.
3187 			 * So defer Qref write to peer map handler. Check if
3188 			 * rx_tid qdesc for tid 0 is already setup perform qref
3189 			 * write to LUT for Tid 0 and 16.
3190 			 */
3191 			if (hal_reo_shared_qaddr_is_enable(soc->hal_soc) &&
3192 			    peer->rx_tid[0].hw_qdesc_vaddr_unaligned &&
3193 			    !IS_MLO_DP_LINK_PEER(peer)) {
3194 				hal_reo_shared_qaddr_write(soc->hal_soc,
3195 							   peer_id,
3196 							   0,
3197 							   peer->rx_tid[0].hw_qdesc_paddr);
3198 				hal_reo_shared_qaddr_write(soc->hal_soc,
3199 							   peer_id,
3200 							   DP_NON_QOS_TID,
3201 							   peer->rx_tid[DP_NON_QOS_TID].hw_qdesc_paddr);
3202 			}
3203 		}
3204 
3205 		err = dp_peer_map_ast(soc, peer, peer_mac_addr, hw_peer_id,
3206 				      vdev_id, ast_hash, is_wds);
3207 	}
3208 
3209 	dp_rx_reset_roaming_peer(soc, vdev_id, peer_mac_addr);
3210 
3211 	/*
3212 	 * If AST offload and host AST DB is enabled, populate AST entries on
3213 	 * host based on peer map event from FW
3214 	 */
3215 	if (soc->ast_offload_support && soc->host_ast_db_enable) {
3216 		dp_peer_host_add_map_ast(soc, peer_id, peer_mac_addr,
3217 					 hw_peer_id, vdev_id,
3218 					 ast_hash, is_wds);
3219 	}
3220 
3221 	return err;
3222 }
3223 
3224 /**
3225  * dp_rx_peer_unmap_handler() - handle peer unmap event from firmware
3226  * @soc_handle - generic soc handle
3227  * @peeri_id - peer_id from firmware
3228  * @vdev_id - vdev ID
3229  * @mac_addr - mac address of the peer or wds entry
3230  * @is_wds - flag to indicate peer map event for WDS ast entry
3231  * @free_wds_count - number of wds entries freed by FW with peer delete
3232  *
3233  * Return: none
3234  */
3235 void
3236 dp_rx_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id,
3237 			 uint8_t vdev_id, uint8_t *mac_addr,
3238 			 uint8_t is_wds, uint32_t free_wds_count)
3239 {
3240 	struct dp_peer *peer;
3241 	struct dp_vdev *vdev = NULL;
3242 
3243 	/*
3244 	 * If FW AST offload is enabled and host AST DB is enabled,
3245 	 * the AST entries are created during peer map from FW.
3246 	 */
3247 	if (soc->ast_offload_support && is_wds) {
3248 		if (!soc->host_ast_db_enable)
3249 			return;
3250 	}
3251 
3252 	peer = __dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_HTT);
3253 
3254 	/*
3255 	 * Currently peer IDs are assigned for vdevs as well as peers.
3256 	 * If the peer ID is for a vdev, then the peer pointer stored
3257 	 * in peer_id_to_obj_map will be NULL.
3258 	 */
3259 	if (!peer) {
3260 		dp_err("Received unmap event for invalid peer_id %u",
3261 		       peer_id);
3262 		return;
3263 	}
3264 
3265 	/* If V2 Peer map messages are enabled AST entry has to be
3266 	 * freed here
3267 	 */
3268 	if (is_wds) {
3269 		if (!dp_peer_ast_free_entry_by_mac(soc, peer, vdev_id,
3270 						   mac_addr)) {
3271 			dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
3272 			return;
3273 		}
3274 
3275 		dp_alert("AST entry not found with peer %pK peer_id %u peer_mac "QDF_MAC_ADDR_FMT" mac_addr "QDF_MAC_ADDR_FMT" vdev_id %u next_hop %u",
3276 			  peer, peer->peer_id,
3277 			  QDF_MAC_ADDR_REF(peer->mac_addr.raw),
3278 			  QDF_MAC_ADDR_REF(mac_addr), vdev_id,
3279 			  is_wds);
3280 
3281 		dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
3282 		return;
3283 	}
3284 
3285 	dp_peer_clean_wds_entries(soc, peer, free_wds_count);
3286 
3287 	dp_info("peer_unmap_event (soc:%pK) peer_id %d peer %pK",
3288 		soc, peer_id, peer);
3289 
3290 	/* Clear entries in Qref LUT */
3291 	/* TODO: Check if this is to be called from
3292 	 * dp_peer_delete for MLO case if there is race between
3293 	 * new peer id assignment and still not having received
3294 	 * peer unmap for MLD peer with same peer id.
3295 	 */
3296 	dp_peer_rx_reo_shared_qaddr_delete(soc, peer);
3297 
3298 	vdev = peer->vdev;
3299 
3300 	/* only if peer is in STA mode and not tdls peer */
3301 	if (wlan_op_mode_sta == vdev->opmode && !peer->is_tdls_peer) {
3302 		bool peer_map = false;
3303 
3304 		dp_tx_ppeds_cfg_astidx_cache_mapping(soc, vdev, peer_map);
3305 	}
3306 
3307 	dp_peer_find_id_to_obj_remove(soc, peer_id);
3308 
3309 	if (soc->arch_ops.dp_partner_chips_unmap)
3310 		soc->arch_ops.dp_partner_chips_unmap(soc, peer_id);
3311 
3312 	peer->peer_id = HTT_INVALID_PEER;
3313 
3314 	/*
3315 	 *	 Reset ast flow mapping table
3316 	 */
3317 	if (!soc->ast_offload_support)
3318 		dp_peer_reset_flowq_map(peer);
3319 
3320 	if (soc->cdp_soc.ol_ops->peer_unmap_event) {
3321 		soc->cdp_soc.ol_ops->peer_unmap_event(soc->ctrl_psoc,
3322 				peer_id, vdev_id, mac_addr);
3323 	}
3324 
3325 	dp_update_vdev_stats_on_peer_unmap(vdev, peer);
3326 
3327 	dp_peer_update_state(soc, peer, DP_PEER_STATE_INACTIVE);
3328 	dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
3329 	/*
3330 	 * Remove a reference to the peer.
3331 	 * If there are no more references, delete the peer object.
3332 	 */
3333 	dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
3334 }
3335 
3336 #ifdef WLAN_FEATURE_11BE_MLO
3337 void dp_rx_mlo_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id)
3338 {
3339 	uint16_t ml_peer_id = dp_gen_ml_peer_id(soc, peer_id);
3340 	uint8_t mac_addr[QDF_MAC_ADDR_SIZE] = {0};
3341 	uint8_t vdev_id = DP_VDEV_ALL;
3342 	uint8_t is_wds = 0;
3343 
3344 	dp_info("MLO peer_unmap_event (soc:%pK) peer_id %d",
3345 		soc, peer_id);
3346 
3347 	dp_rx_peer_unmap_handler(soc, ml_peer_id, vdev_id,
3348 				 mac_addr, is_wds,
3349 				 DP_PEER_WDS_COUNT_INVALID);
3350 }
3351 #endif
3352 
3353 #ifndef AST_OFFLOAD_ENABLE
3354 void
3355 dp_peer_find_detach(struct dp_soc *soc)
3356 {
3357 	dp_soc_wds_detach(soc);
3358 	dp_peer_find_map_detach(soc);
3359 	dp_peer_find_hash_detach(soc);
3360 	dp_peer_ast_hash_detach(soc);
3361 	dp_peer_ast_table_detach(soc);
3362 	dp_peer_mec_hash_detach(soc);
3363 }
3364 #else
3365 void
3366 dp_peer_find_detach(struct dp_soc *soc)
3367 {
3368 	dp_peer_find_map_detach(soc);
3369 	dp_peer_find_hash_detach(soc);
3370 }
3371 #endif
3372 
3373 static void dp_rx_tid_update_cb(struct dp_soc *soc, void *cb_ctxt,
3374 	union hal_reo_status *reo_status)
3375 {
3376 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
3377 
3378 	if ((reo_status->rx_queue_status.header.status !=
3379 		HAL_REO_CMD_SUCCESS) &&
3380 		(reo_status->rx_queue_status.header.status !=
3381 		HAL_REO_CMD_DRAIN)) {
3382 		/* Should not happen normally. Just print error for now */
3383 		dp_peer_err("%pK: Rx tid HW desc update failed(%d): tid %d",
3384 			    soc, reo_status->rx_queue_status.header.status,
3385 			    rx_tid->tid);
3386 	}
3387 }
3388 
3389 static bool dp_get_peer_vdev_roaming_in_progress(struct dp_peer *peer)
3390 {
3391 	struct ol_if_ops *ol_ops = NULL;
3392 	bool is_roaming = false;
3393 	uint8_t vdev_id = -1;
3394 	struct cdp_soc_t *soc;
3395 
3396 	if (!peer) {
3397 		dp_peer_info("Peer is NULL. No roaming possible");
3398 		return false;
3399 	}
3400 
3401 	soc = dp_soc_to_cdp_soc_t(peer->vdev->pdev->soc);
3402 	ol_ops = peer->vdev->pdev->soc->cdp_soc.ol_ops;
3403 
3404 	if (ol_ops && ol_ops->is_roam_inprogress) {
3405 		dp_get_vdevid(soc, peer->mac_addr.raw, &vdev_id);
3406 		is_roaming = ol_ops->is_roam_inprogress(vdev_id);
3407 	}
3408 
3409 	dp_peer_info("peer: " QDF_MAC_ADDR_FMT ", vdev_id: %d, is_roaming: %d",
3410 		     QDF_MAC_ADDR_REF(peer->mac_addr.raw), vdev_id, is_roaming);
3411 
3412 	return is_roaming;
3413 }
3414 
3415 #ifdef WLAN_FEATURE_11BE_MLO
3416 /**
3417  * dp_rx_tid_setup_allow() - check if rx_tid and reo queue desc
3418 			     setup is necessary
3419  * @peer: DP peer handle
3420  *
3421  * Return: true - allow, false - disallow
3422  */
3423 static inline
3424 bool dp_rx_tid_setup_allow(struct dp_peer *peer)
3425 {
3426 	if (IS_MLO_DP_LINK_PEER(peer) && !peer->first_link)
3427 		return false;
3428 
3429 	return true;
3430 }
3431 
3432 /**
3433  * dp_rx_tid_update_allow() - check if rx_tid update needed
3434  * @peer: DP peer handle
3435  *
3436  * Return: true - allow, false - disallow
3437  */
3438 static inline
3439 bool dp_rx_tid_update_allow(struct dp_peer *peer)
3440 {
3441 	/* not as expected for MLO connection link peer */
3442 	if (IS_MLO_DP_LINK_PEER(peer)) {
3443 		QDF_BUG(0);
3444 		return false;
3445 	}
3446 
3447 	return true;
3448 }
3449 #else
3450 static inline
3451 bool dp_rx_tid_setup_allow(struct dp_peer *peer)
3452 {
3453 	return true;
3454 }
3455 
3456 static inline
3457 bool dp_rx_tid_update_allow(struct dp_peer *peer)
3458 {
3459 	return true;
3460 }
3461 #endif
3462 
3463 QDF_STATUS dp_rx_tid_update_wifi3(struct dp_peer *peer, int tid, uint32_t
3464 					 ba_window_size, uint32_t start_seq,
3465 					 bool bar_update)
3466 {
3467 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
3468 	struct dp_soc *soc = peer->vdev->pdev->soc;
3469 	struct hal_reo_cmd_params params;
3470 
3471 	if (!dp_rx_tid_update_allow(peer)) {
3472 		dp_peer_err("skip tid update for peer:" QDF_MAC_ADDR_FMT,
3473 			    QDF_MAC_ADDR_REF(peer->mac_addr.raw));
3474 		return QDF_STATUS_E_FAILURE;
3475 	}
3476 
3477 	qdf_mem_zero(&params, sizeof(params));
3478 
3479 	params.std.need_status = 1;
3480 	params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
3481 	params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
3482 	params.u.upd_queue_params.update_ba_window_size = 1;
3483 	params.u.upd_queue_params.ba_window_size = ba_window_size;
3484 
3485 	if (start_seq < IEEE80211_SEQ_MAX) {
3486 		params.u.upd_queue_params.update_ssn = 1;
3487 		params.u.upd_queue_params.ssn = start_seq;
3488 	} else {
3489 	    dp_set_ssn_valid_flag(&params, 0);
3490 	}
3491 
3492 	if (dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
3493 			    dp_rx_tid_update_cb, rx_tid)) {
3494 		dp_err_log("failed to send reo cmd CMD_UPDATE_RX_REO_QUEUE");
3495 		DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
3496 	}
3497 
3498 	rx_tid->ba_win_size = ba_window_size;
3499 
3500 	if (dp_get_peer_vdev_roaming_in_progress(peer))
3501 		return QDF_STATUS_E_PERM;
3502 
3503 	if (!bar_update)
3504 		dp_peer_rx_reorder_queue_setup(soc, peer,
3505 					       tid, ba_window_size);
3506 
3507 	return QDF_STATUS_SUCCESS;
3508 }
3509 
3510 #ifdef WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY
3511 /*
3512  * dp_reo_desc_defer_free_enqueue() - enqueue REO QDESC to be freed into
3513  *                                    the deferred list
3514  * @soc: Datapath soc handle
3515  * @free_desc: REO DESC reference that needs to be freed
3516  *
3517  * Return: true if enqueued, else false
3518  */
3519 static bool dp_reo_desc_defer_free_enqueue(struct dp_soc *soc,
3520 					   struct reo_desc_list_node *freedesc)
3521 {
3522 	struct reo_desc_deferred_freelist_node *desc;
3523 
3524 	if (!qdf_atomic_read(&soc->cmn_init_done))
3525 		return false;
3526 
3527 	desc = qdf_mem_malloc(sizeof(*desc));
3528 	if (!desc)
3529 		return false;
3530 
3531 	desc->hw_qdesc_paddr = freedesc->rx_tid.hw_qdesc_paddr;
3532 	desc->hw_qdesc_alloc_size = freedesc->rx_tid.hw_qdesc_alloc_size;
3533 	desc->hw_qdesc_vaddr_unaligned =
3534 			freedesc->rx_tid.hw_qdesc_vaddr_unaligned;
3535 	desc->free_ts = qdf_get_system_timestamp();
3536 	DP_RX_REO_QDESC_DEFERRED_GET_MAC(desc, freedesc);
3537 
3538 	qdf_spin_lock_bh(&soc->reo_desc_deferred_freelist_lock);
3539 	if (!soc->reo_desc_deferred_freelist_init) {
3540 		qdf_mem_free(desc);
3541 		qdf_spin_unlock_bh(&soc->reo_desc_deferred_freelist_lock);
3542 		return false;
3543 	}
3544 	qdf_list_insert_back(&soc->reo_desc_deferred_freelist,
3545 			     (qdf_list_node_t *)desc);
3546 	qdf_spin_unlock_bh(&soc->reo_desc_deferred_freelist_lock);
3547 
3548 	return true;
3549 }
3550 
3551 /*
3552  * dp_reo_desc_defer_free() - free the REO QDESC in the deferred list
3553  *                            based on time threshold
3554  * @soc: Datapath soc handle
3555  * @free_desc: REO DESC reference that needs to be freed
3556  *
3557  * Return: true if enqueued, else false
3558  */
3559 static void dp_reo_desc_defer_free(struct dp_soc *soc)
3560 {
3561 	struct reo_desc_deferred_freelist_node *desc;
3562 	unsigned long curr_ts = qdf_get_system_timestamp();
3563 
3564 	qdf_spin_lock_bh(&soc->reo_desc_deferred_freelist_lock);
3565 
3566 	while ((qdf_list_peek_front(&soc->reo_desc_deferred_freelist,
3567 	       (qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) &&
3568 	       (curr_ts > (desc->free_ts + REO_DESC_DEFERRED_FREE_MS))) {
3569 		qdf_list_remove_front(&soc->reo_desc_deferred_freelist,
3570 				      (qdf_list_node_t **)&desc);
3571 
3572 		DP_RX_REO_QDESC_DEFERRED_FREE_EVT(desc);
3573 
3574 		qdf_mem_unmap_nbytes_single(soc->osdev,
3575 					    desc->hw_qdesc_paddr,
3576 					    QDF_DMA_BIDIRECTIONAL,
3577 					    desc->hw_qdesc_alloc_size);
3578 		qdf_mem_free(desc->hw_qdesc_vaddr_unaligned);
3579 		qdf_mem_free(desc);
3580 
3581 		curr_ts = qdf_get_system_timestamp();
3582 	}
3583 
3584 	qdf_spin_unlock_bh(&soc->reo_desc_deferred_freelist_lock);
3585 }
3586 #else
3587 static inline bool
3588 dp_reo_desc_defer_free_enqueue(struct dp_soc *soc,
3589 			       struct reo_desc_list_node *freedesc)
3590 {
3591 	return false;
3592 }
3593 
3594 static void dp_reo_desc_defer_free(struct dp_soc *soc)
3595 {
3596 }
3597 #endif /* !WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY */
3598 
3599 /*
3600  * dp_reo_desc_free() - Callback free reo descriptor memory after
3601  * HW cache flush
3602  *
3603  * @soc: DP SOC handle
3604  * @cb_ctxt: Callback context
3605  * @reo_status: REO command status
3606  */
3607 static void dp_reo_desc_free(struct dp_soc *soc, void *cb_ctxt,
3608 	union hal_reo_status *reo_status)
3609 {
3610 	struct reo_desc_list_node *freedesc =
3611 		(struct reo_desc_list_node *)cb_ctxt;
3612 	struct dp_rx_tid *rx_tid = &freedesc->rx_tid;
3613 	unsigned long curr_ts = qdf_get_system_timestamp();
3614 
3615 	if ((reo_status->fl_cache_status.header.status !=
3616 		HAL_REO_CMD_SUCCESS) &&
3617 		(reo_status->fl_cache_status.header.status !=
3618 		HAL_REO_CMD_DRAIN)) {
3619 		dp_peer_err("%pK: Rx tid HW desc flush failed(%d): tid %d",
3620 			    soc, reo_status->rx_queue_status.header.status,
3621 			    freedesc->rx_tid.tid);
3622 	}
3623 	dp_peer_info("%pK: %lu hw_qdesc_paddr: %pK, tid:%d", soc,
3624 		     curr_ts, (void *)(rx_tid->hw_qdesc_paddr),
3625 		     rx_tid->tid);
3626 
3627 	/* REO desc is enqueued to be freed at a later point
3628 	 * in time, just free the freedesc alone and return
3629 	 */
3630 	if (dp_reo_desc_defer_free_enqueue(soc, freedesc))
3631 		goto out;
3632 
3633 	DP_RX_REO_QDESC_FREE_EVT(freedesc);
3634 
3635 	qdf_mem_unmap_nbytes_single(soc->osdev,
3636 		rx_tid->hw_qdesc_paddr,
3637 		QDF_DMA_BIDIRECTIONAL,
3638 		rx_tid->hw_qdesc_alloc_size);
3639 	qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
3640 out:
3641 	qdf_mem_free(freedesc);
3642 }
3643 
3644 #if defined(CONFIG_WIFI_EMULATION_WIFI_3_0) && defined(BUILD_X86)
3645 /* Hawkeye emulation requires bus address to be >= 0x50000000 */
3646 static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
3647 {
3648 	if (dma_addr < 0x50000000)
3649 		return QDF_STATUS_E_FAILURE;
3650 	else
3651 		return QDF_STATUS_SUCCESS;
3652 }
3653 #else
3654 static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
3655 {
3656 	return QDF_STATUS_SUCCESS;
3657 }
3658 #endif
3659 
3660 /*
3661  * dp_rx_tid_setup_wifi3() – Setup receive TID state
3662  * @peer: Datapath peer handle
3663  * @tid: TID
3664  * @ba_window_size: BlockAck window size
3665  * @start_seq: Starting sequence number
3666  *
3667  * Return: QDF_STATUS code
3668  */
3669 QDF_STATUS dp_rx_tid_setup_wifi3(struct dp_peer *peer, int tid,
3670 				 uint32_t ba_window_size, uint32_t start_seq)
3671 {
3672 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
3673 	struct dp_vdev *vdev = peer->vdev;
3674 	struct dp_soc *soc = vdev->pdev->soc;
3675 	uint32_t hw_qdesc_size;
3676 	uint32_t hw_qdesc_align;
3677 	int hal_pn_type;
3678 	void *hw_qdesc_vaddr;
3679 	uint32_t alloc_tries = 0;
3680 	QDF_STATUS status = QDF_STATUS_SUCCESS;
3681 	struct dp_txrx_peer *txrx_peer;
3682 
3683 	if (!qdf_atomic_read(&peer->is_default_route_set))
3684 		return QDF_STATUS_E_FAILURE;
3685 
3686 	if (!dp_rx_tid_setup_allow(peer)) {
3687 		dp_peer_info("skip rx tid setup for peer" QDF_MAC_ADDR_FMT,
3688 			     QDF_MAC_ADDR_REF(peer->mac_addr.raw));
3689 		goto send_wmi_reo_cmd;
3690 	}
3691 
3692 	rx_tid->ba_win_size = ba_window_size;
3693 	if (rx_tid->hw_qdesc_vaddr_unaligned)
3694 		return dp_rx_tid_update_wifi3(peer, tid, ba_window_size,
3695 			start_seq, false);
3696 	rx_tid->delba_tx_status = 0;
3697 	rx_tid->ppdu_id_2k = 0;
3698 	rx_tid->num_of_addba_req = 0;
3699 	rx_tid->num_of_delba_req = 0;
3700 	rx_tid->num_of_addba_resp = 0;
3701 	rx_tid->num_addba_rsp_failed = 0;
3702 	rx_tid->num_addba_rsp_success = 0;
3703 	rx_tid->delba_tx_success_cnt = 0;
3704 	rx_tid->delba_tx_fail_cnt = 0;
3705 	rx_tid->statuscode = 0;
3706 
3707 	/* TODO: Allocating HW queue descriptors based on max BA window size
3708 	 * for all QOS TIDs so that same descriptor can be used later when
3709 	 * ADDBA request is received. This should be changed to allocate HW
3710 	 * queue descriptors based on BA window size being negotiated (0 for
3711 	 * non BA cases), and reallocate when BA window size changes and also
3712 	 * send WMI message to FW to change the REO queue descriptor in Rx
3713 	 * peer entry as part of dp_rx_tid_update.
3714 	 */
3715 	hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc,
3716 					       ba_window_size, tid);
3717 
3718 	hw_qdesc_align = hal_get_reo_qdesc_align(soc->hal_soc);
3719 	/* To avoid unnecessary extra allocation for alignment, try allocating
3720 	 * exact size and see if we already have aligned address.
3721 	 */
3722 	rx_tid->hw_qdesc_alloc_size = hw_qdesc_size;
3723 
3724 try_desc_alloc:
3725 	rx_tid->hw_qdesc_vaddr_unaligned =
3726 		qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size);
3727 
3728 	if (!rx_tid->hw_qdesc_vaddr_unaligned) {
3729 		dp_peer_err("%pK: Rx tid HW desc alloc failed: tid %d",
3730 			    soc, tid);
3731 		return QDF_STATUS_E_NOMEM;
3732 	}
3733 
3734 	if ((unsigned long)(rx_tid->hw_qdesc_vaddr_unaligned) %
3735 		hw_qdesc_align) {
3736 		/* Address allocated above is not aligned. Allocate extra
3737 		 * memory for alignment
3738 		 */
3739 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
3740 		rx_tid->hw_qdesc_vaddr_unaligned =
3741 			qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size +
3742 					hw_qdesc_align - 1);
3743 
3744 		if (!rx_tid->hw_qdesc_vaddr_unaligned) {
3745 			dp_peer_err("%pK: Rx tid HW desc alloc failed: tid %d",
3746 				    soc, tid);
3747 			return QDF_STATUS_E_NOMEM;
3748 		}
3749 
3750 		hw_qdesc_vaddr = (void *)qdf_align((unsigned long)
3751 			rx_tid->hw_qdesc_vaddr_unaligned,
3752 			hw_qdesc_align);
3753 
3754 		dp_peer_debug("%pK: Total Size %d Aligned Addr %pK",
3755 			      soc, rx_tid->hw_qdesc_alloc_size,
3756 			      hw_qdesc_vaddr);
3757 
3758 	} else {
3759 		hw_qdesc_vaddr = rx_tid->hw_qdesc_vaddr_unaligned;
3760 	}
3761 	rx_tid->hw_qdesc_vaddr_aligned = hw_qdesc_vaddr;
3762 
3763 	txrx_peer = dp_get_txrx_peer(peer);
3764 
3765 	/* TODO: Ensure that sec_type is set before ADDBA is received.
3766 	 * Currently this is set based on htt indication
3767 	 * HTT_T2H_MSG_TYPE_SEC_IND from target
3768 	 */
3769 	switch (txrx_peer->security[dp_sec_ucast].sec_type) {
3770 	case cdp_sec_type_tkip_nomic:
3771 	case cdp_sec_type_aes_ccmp:
3772 	case cdp_sec_type_aes_ccmp_256:
3773 	case cdp_sec_type_aes_gcmp:
3774 	case cdp_sec_type_aes_gcmp_256:
3775 		hal_pn_type = HAL_PN_WPA;
3776 		break;
3777 	case cdp_sec_type_wapi:
3778 		if (vdev->opmode == wlan_op_mode_ap)
3779 			hal_pn_type = HAL_PN_WAPI_EVEN;
3780 		else
3781 			hal_pn_type = HAL_PN_WAPI_UNEVEN;
3782 		break;
3783 	default:
3784 		hal_pn_type = HAL_PN_NONE;
3785 		break;
3786 	}
3787 
3788 	hal_reo_qdesc_setup(soc->hal_soc, tid, ba_window_size, start_seq,
3789 		hw_qdesc_vaddr, rx_tid->hw_qdesc_paddr, hal_pn_type,
3790 		vdev->vdev_stats_id);
3791 
3792 	qdf_mem_map_nbytes_single(soc->osdev, hw_qdesc_vaddr,
3793 		QDF_DMA_BIDIRECTIONAL, rx_tid->hw_qdesc_alloc_size,
3794 		&(rx_tid->hw_qdesc_paddr));
3795 
3796 	if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) !=
3797 			QDF_STATUS_SUCCESS) {
3798 		if (alloc_tries++ < 10) {
3799 			qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
3800 			rx_tid->hw_qdesc_vaddr_unaligned = NULL;
3801 			goto try_desc_alloc;
3802 		} else {
3803 			dp_peer_err("%pK: Rx tid HW desc alloc failed (lowmem): tid %d",
3804 				    soc, tid);
3805 			status = QDF_STATUS_E_NOMEM;
3806 			goto error;
3807 		}
3808 	}
3809 
3810 send_wmi_reo_cmd:
3811 	if (dp_get_peer_vdev_roaming_in_progress(peer)) {
3812 		status = QDF_STATUS_E_PERM;
3813 		goto error;
3814 	}
3815 
3816 	status = dp_peer_rx_reorder_queue_setup(soc, peer,
3817 						tid, ba_window_size);
3818 	if (QDF_IS_STATUS_SUCCESS(status))
3819 		return status;
3820 
3821 error:
3822 	if (rx_tid->hw_qdesc_vaddr_unaligned) {
3823 		if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) ==
3824 		    QDF_STATUS_SUCCESS)
3825 			qdf_mem_unmap_nbytes_single(
3826 				soc->osdev,
3827 				rx_tid->hw_qdesc_paddr,
3828 				QDF_DMA_BIDIRECTIONAL,
3829 				rx_tid->hw_qdesc_alloc_size);
3830 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
3831 		rx_tid->hw_qdesc_vaddr_unaligned = NULL;
3832 		rx_tid->hw_qdesc_paddr = 0;
3833 	}
3834 	return status;
3835 }
3836 
3837 #ifdef DP_UMAC_HW_RESET_SUPPORT
3838 static
3839 void dp_peer_rst_tids(struct dp_soc *soc, struct dp_peer *peer, void *arg)
3840 {
3841 	int tid;
3842 
3843 	for (tid = 0; tid < (DP_MAX_TIDS - 1); tid++) {
3844 		struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
3845 		void *vaddr = rx_tid->hw_qdesc_vaddr_aligned;
3846 
3847 		if (vaddr)
3848 			dp_reset_rx_reo_tid_queue(soc, vaddr,
3849 						  rx_tid->hw_qdesc_alloc_size);
3850 	}
3851 }
3852 
3853 void dp_reset_tid_q_setup(struct dp_soc *soc)
3854 {
3855 	dp_soc_iterate_peer(soc, dp_peer_rst_tids, NULL, DP_MOD_ID_UMAC_RESET);
3856 }
3857 #endif
3858 #ifdef REO_DESC_DEFER_FREE
3859 /*
3860  * dp_reo_desc_clean_up() - If cmd to flush base desc fails add
3861  * desc back to freelist and defer the deletion
3862  *
3863  * @soc: DP SOC handle
3864  * @desc: Base descriptor to be freed
3865  * @reo_status: REO command status
3866  */
3867 static void dp_reo_desc_clean_up(struct dp_soc *soc,
3868 				 struct reo_desc_list_node *desc,
3869 				 union hal_reo_status *reo_status)
3870 {
3871 	desc->free_ts = qdf_get_system_timestamp();
3872 	DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
3873 	qdf_list_insert_back(&soc->reo_desc_freelist,
3874 			     (qdf_list_node_t *)desc);
3875 }
3876 
3877 /*
3878  * dp_reo_limit_clean_batch_sz() - Limit number REO CMD queued to cmd
3879  * ring in avoid of REO hang
3880  *
3881  * @list_size: REO desc list size to be cleaned
3882  */
3883 static inline void dp_reo_limit_clean_batch_sz(uint32_t *list_size)
3884 {
3885 	unsigned long curr_ts = qdf_get_system_timestamp();
3886 
3887 	if ((*list_size) > REO_DESC_FREELIST_SIZE) {
3888 		dp_err_log("%lu:freedesc number %d in freelist",
3889 			   curr_ts, *list_size);
3890 		/* limit the batch queue size */
3891 		*list_size = REO_DESC_FREELIST_SIZE;
3892 	}
3893 }
3894 #else
3895 /*
3896  * dp_reo_desc_clean_up() - If send cmd to REO inorder to flush
3897  * cache fails free the base REO desc anyway
3898  *
3899  * @soc: DP SOC handle
3900  * @desc: Base descriptor to be freed
3901  * @reo_status: REO command status
3902  */
3903 static void dp_reo_desc_clean_up(struct dp_soc *soc,
3904 				 struct reo_desc_list_node *desc,
3905 				 union hal_reo_status *reo_status)
3906 {
3907 	if (reo_status) {
3908 		qdf_mem_zero(reo_status, sizeof(*reo_status));
3909 		reo_status->fl_cache_status.header.status = 0;
3910 		dp_reo_desc_free(soc, (void *)desc, reo_status);
3911 	}
3912 }
3913 
3914 /*
3915  * dp_reo_limit_clean_batch_sz() - Limit number REO CMD queued to cmd
3916  * ring in avoid of REO hang
3917  *
3918  * @list_size: REO desc list size to be cleaned
3919  */
3920 static inline void dp_reo_limit_clean_batch_sz(uint32_t *list_size)
3921 {
3922 }
3923 #endif
3924 
3925 /*
3926  * dp_resend_update_reo_cmd() - Resend the UPDATE_REO_QUEUE
3927  * cmd and re-insert desc into free list if send fails.
3928  *
3929  * @soc: DP SOC handle
3930  * @desc: desc with resend update cmd flag set
3931  * @rx_tid: Desc RX tid associated with update cmd for resetting
3932  * valid field to 0 in h/w
3933  *
3934  * Return: QDF status
3935  */
3936 static QDF_STATUS
3937 dp_resend_update_reo_cmd(struct dp_soc *soc,
3938 			 struct reo_desc_list_node *desc,
3939 			 struct dp_rx_tid *rx_tid)
3940 {
3941 	struct hal_reo_cmd_params params;
3942 
3943 	qdf_mem_zero(&params, sizeof(params));
3944 	params.std.need_status = 1;
3945 	params.std.addr_lo =
3946 		rx_tid->hw_qdesc_paddr & 0xffffffff;
3947 	params.std.addr_hi =
3948 		(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
3949 	params.u.upd_queue_params.update_vld = 1;
3950 	params.u.upd_queue_params.vld = 0;
3951 	desc->resend_update_reo_cmd = false;
3952 	/*
3953 	 * If the cmd send fails then set resend_update_reo_cmd flag
3954 	 * and insert the desc at the end of the free list to retry.
3955 	 */
3956 	if (dp_reo_send_cmd(soc,
3957 			    CMD_UPDATE_RX_REO_QUEUE,
3958 			    &params,
3959 			    dp_rx_tid_delete_cb,
3960 			    (void *)desc)
3961 	    != QDF_STATUS_SUCCESS) {
3962 		desc->resend_update_reo_cmd = true;
3963 		desc->free_ts = qdf_get_system_timestamp();
3964 		qdf_list_insert_back(&soc->reo_desc_freelist,
3965 				     (qdf_list_node_t *)desc);
3966 		dp_err_log("failed to send reo cmd CMD_UPDATE_RX_REO_QUEUE");
3967 		DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
3968 		return QDF_STATUS_E_FAILURE;
3969 	}
3970 
3971 	return QDF_STATUS_SUCCESS;
3972 }
3973 
3974 /*
3975  * dp_rx_tid_delete_cb() - Callback to flush reo descriptor HW cache
3976  * after deleting the entries (ie., setting valid=0)
3977  *
3978  * @soc: DP SOC handle
3979  * @cb_ctxt: Callback context
3980  * @reo_status: REO command status
3981  */
3982 void dp_rx_tid_delete_cb(struct dp_soc *soc, void *cb_ctxt,
3983 			 union hal_reo_status *reo_status)
3984 {
3985 	struct reo_desc_list_node *freedesc =
3986 		(struct reo_desc_list_node *)cb_ctxt;
3987 	uint32_t list_size;
3988 	struct reo_desc_list_node *desc;
3989 	unsigned long curr_ts = qdf_get_system_timestamp();
3990 	uint32_t desc_size, tot_desc_size;
3991 	struct hal_reo_cmd_params params;
3992 	bool flush_failure = false;
3993 
3994 	DP_RX_REO_QDESC_UPDATE_EVT(freedesc);
3995 
3996 	if (reo_status->rx_queue_status.header.status == HAL_REO_CMD_DRAIN) {
3997 		qdf_mem_zero(reo_status, sizeof(*reo_status));
3998 		reo_status->fl_cache_status.header.status = HAL_REO_CMD_DRAIN;
3999 		dp_reo_desc_free(soc, (void *)freedesc, reo_status);
4000 		DP_STATS_INC(soc, rx.err.reo_cmd_send_drain, 1);
4001 		return;
4002 	} else if (reo_status->rx_queue_status.header.status !=
4003 		HAL_REO_CMD_SUCCESS) {
4004 		/* Should not happen normally. Just print error for now */
4005 		dp_info_rl("Rx tid HW desc deletion failed(%d): tid %d",
4006 			   reo_status->rx_queue_status.header.status,
4007 			   freedesc->rx_tid.tid);
4008 	}
4009 
4010 	dp_peer_info("%pK: rx_tid: %d status: %d",
4011 		     soc, freedesc->rx_tid.tid,
4012 		     reo_status->rx_queue_status.header.status);
4013 
4014 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
4015 	freedesc->free_ts = curr_ts;
4016 	qdf_list_insert_back_size(&soc->reo_desc_freelist,
4017 		(qdf_list_node_t *)freedesc, &list_size);
4018 
4019 	/* MCL path add the desc back to reo_desc_freelist when REO FLUSH
4020 	 * failed. it may cause the number of REO queue pending  in free
4021 	 * list is even larger than REO_CMD_RING max size and lead REO CMD
4022 	 * flood then cause REO HW in an unexpected condition. So it's
4023 	 * needed to limit the number REO cmds in a batch operation.
4024 	 */
4025 	dp_reo_limit_clean_batch_sz(&list_size);
4026 
4027 	while ((qdf_list_peek_front(&soc->reo_desc_freelist,
4028 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) &&
4029 		((list_size >= REO_DESC_FREELIST_SIZE) ||
4030 		(curr_ts > (desc->free_ts + REO_DESC_FREE_DEFER_MS)) ||
4031 		(desc->resend_update_reo_cmd && list_size))) {
4032 		struct dp_rx_tid *rx_tid;
4033 
4034 		qdf_list_remove_front(&soc->reo_desc_freelist,
4035 				(qdf_list_node_t **)&desc);
4036 		list_size--;
4037 		rx_tid = &desc->rx_tid;
4038 
4039 		/* First process descs with resend_update_reo_cmd set */
4040 		if (desc->resend_update_reo_cmd) {
4041 			if (dp_resend_update_reo_cmd(soc, desc, rx_tid) !=
4042 			    QDF_STATUS_SUCCESS)
4043 				break;
4044 			else
4045 				continue;
4046 		}
4047 
4048 		/* Flush and invalidate REO descriptor from HW cache: Base and
4049 		 * extension descriptors should be flushed separately */
4050 		if (desc->pending_ext_desc_size)
4051 			tot_desc_size = desc->pending_ext_desc_size;
4052 		else
4053 			tot_desc_size = rx_tid->hw_qdesc_alloc_size;
4054 		/* Get base descriptor size by passing non-qos TID */
4055 		desc_size = hal_get_reo_qdesc_size(soc->hal_soc, 0,
4056 						   DP_NON_QOS_TID);
4057 
4058 		/* Flush reo extension descriptors */
4059 		while ((tot_desc_size -= desc_size) > 0) {
4060 			qdf_mem_zero(&params, sizeof(params));
4061 			params.std.addr_lo =
4062 				((uint64_t)(rx_tid->hw_qdesc_paddr) +
4063 				tot_desc_size) & 0xffffffff;
4064 			params.std.addr_hi =
4065 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
4066 
4067 			if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc,
4068 							CMD_FLUSH_CACHE,
4069 							&params,
4070 							NULL,
4071 							NULL)) {
4072 				dp_info_rl("fail to send CMD_CACHE_FLUSH:"
4073 					   "tid %d desc %pK", rx_tid->tid,
4074 					   (void *)(rx_tid->hw_qdesc_paddr));
4075 				desc->pending_ext_desc_size = tot_desc_size +
4076 								      desc_size;
4077 				dp_reo_desc_clean_up(soc, desc, reo_status);
4078 				flush_failure = true;
4079 				break;
4080 			}
4081 		}
4082 
4083 		if (flush_failure)
4084 			break;
4085 		else
4086 			desc->pending_ext_desc_size = desc_size;
4087 
4088 		/* Flush base descriptor */
4089 		qdf_mem_zero(&params, sizeof(params));
4090 		params.std.need_status = 1;
4091 		params.std.addr_lo =
4092 			(uint64_t)(rx_tid->hw_qdesc_paddr) & 0xffffffff;
4093 		params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
4094 
4095 		if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc,
4096 							  CMD_FLUSH_CACHE,
4097 							  &params,
4098 							  dp_reo_desc_free,
4099 							  (void *)desc)) {
4100 			union hal_reo_status reo_status;
4101 			/*
4102 			 * If dp_reo_send_cmd return failure, related TID queue desc
4103 			 * should be unmapped. Also locally reo_desc, together with
4104 			 * TID queue desc also need to be freed accordingly.
4105 			 *
4106 			 * Here invoke desc_free function directly to do clean up.
4107 			 *
4108 			 * In case of MCL path add the desc back to the free
4109 			 * desc list and defer deletion.
4110 			 */
4111 			dp_info_rl("fail to send REO cmd to flush cache: tid %d",
4112 				   rx_tid->tid);
4113 			dp_reo_desc_clean_up(soc, desc, &reo_status);
4114 			DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
4115 			break;
4116 		}
4117 	}
4118 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
4119 
4120 	dp_reo_desc_defer_free(soc);
4121 }
4122 
4123 /*
4124  * dp_rx_tid_delete_wifi3() – Delete receive TID queue
4125  * @peer: Datapath peer handle
4126  * @tid: TID
4127  *
4128  * Return: 0 on success, error code on failure
4129  */
4130 static int dp_rx_tid_delete_wifi3(struct dp_peer *peer, int tid)
4131 {
4132 	struct dp_rx_tid *rx_tid = &(peer->rx_tid[tid]);
4133 	struct dp_soc *soc = peer->vdev->pdev->soc;
4134 	struct hal_reo_cmd_params params;
4135 	struct reo_desc_list_node *freedesc =
4136 		qdf_mem_malloc(sizeof(*freedesc));
4137 
4138 	if (!freedesc) {
4139 		dp_peer_err("%pK: malloc failed for freedesc: tid %d",
4140 			    soc, tid);
4141 		qdf_assert(0);
4142 		return -ENOMEM;
4143 	}
4144 
4145 	freedesc->rx_tid = *rx_tid;
4146 	freedesc->resend_update_reo_cmd = false;
4147 
4148 	qdf_mem_zero(&params, sizeof(params));
4149 
4150 	DP_RX_REO_QDESC_GET_MAC(freedesc, peer);
4151 
4152 	params.std.need_status = 1;
4153 	params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
4154 	params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
4155 	params.u.upd_queue_params.update_vld = 1;
4156 	params.u.upd_queue_params.vld = 0;
4157 
4158 	if (dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
4159 			    dp_rx_tid_delete_cb, (void *)freedesc)
4160 		!= QDF_STATUS_SUCCESS) {
4161 		/* Defer the clean up to the call back context */
4162 		qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
4163 		freedesc->free_ts = qdf_get_system_timestamp();
4164 		freedesc->resend_update_reo_cmd = true;
4165 		qdf_list_insert_front(&soc->reo_desc_freelist,
4166 				      (qdf_list_node_t *)freedesc);
4167 		DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
4168 		qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
4169 		dp_info("Failed to send CMD_UPDATE_RX_REO_QUEUE");
4170 	}
4171 
4172 	rx_tid->hw_qdesc_vaddr_unaligned = NULL;
4173 	rx_tid->hw_qdesc_alloc_size = 0;
4174 	rx_tid->hw_qdesc_paddr = 0;
4175 
4176 	return 0;
4177 }
4178 
4179 #ifdef DP_LFR
4180 static void dp_peer_setup_remaining_tids(struct dp_peer *peer)
4181 {
4182 	int tid;
4183 
4184 	for (tid = 1; tid < DP_MAX_TIDS-1; tid++) {
4185 		dp_rx_tid_setup_wifi3(peer, tid, 1, 0);
4186 		dp_peer_debug("Setting up TID %d for peer %pK peer->local_id %d",
4187 			      tid, peer, peer->local_id);
4188 	}
4189 }
4190 #else
4191 static void dp_peer_setup_remaining_tids(struct dp_peer *peer) {};
4192 #endif
4193 
4194 #ifdef WLAN_FEATURE_11BE_MLO
4195 /**
4196  * dp_peer_rx_tids_init() - initialize each tids in peer
4197  * @peer: peer pointer
4198  *
4199  * Return: None
4200  */
4201 static void dp_peer_rx_tids_init(struct dp_peer *peer)
4202 {
4203 	int tid;
4204 	struct dp_rx_tid *rx_tid;
4205 	struct dp_rx_tid_defrag *rx_tid_defrag;
4206 
4207 	if (!IS_MLO_DP_LINK_PEER(peer)) {
4208 		for (tid = 0; tid < DP_MAX_TIDS; tid++) {
4209 			rx_tid_defrag = &peer->txrx_peer->rx_tid[tid];
4210 
4211 			rx_tid_defrag->array = &rx_tid_defrag->base;
4212 			rx_tid_defrag->defrag_timeout_ms = 0;
4213 			rx_tid_defrag->defrag_waitlist_elem.tqe_next = NULL;
4214 			rx_tid_defrag->defrag_waitlist_elem.tqe_prev = NULL;
4215 			rx_tid_defrag->base.head = NULL;
4216 			rx_tid_defrag->base.tail = NULL;
4217 			rx_tid_defrag->tid = tid;
4218 			rx_tid_defrag->defrag_peer = peer->txrx_peer;
4219 		}
4220 	}
4221 
4222 	/* if not first assoc link peer,
4223 	 * not to initialize rx_tids again.
4224 	 */
4225 	if (IS_MLO_DP_LINK_PEER(peer) && !peer->first_link)
4226 		return;
4227 
4228 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
4229 		rx_tid = &peer->rx_tid[tid];
4230 		rx_tid->tid = tid;
4231 		rx_tid->ba_win_size = 0;
4232 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
4233 	}
4234 }
4235 #else
4236 static void dp_peer_rx_tids_init(struct dp_peer *peer)
4237 {
4238 	int tid;
4239 	struct dp_rx_tid *rx_tid;
4240 	struct dp_rx_tid_defrag *rx_tid_defrag;
4241 
4242 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
4243 		rx_tid = &peer->rx_tid[tid];
4244 
4245 		rx_tid_defrag = &peer->txrx_peer->rx_tid[tid];
4246 		rx_tid->tid = tid;
4247 		rx_tid->ba_win_size = 0;
4248 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
4249 
4250 		rx_tid_defrag->base.head = NULL;
4251 		rx_tid_defrag->base.tail = NULL;
4252 		rx_tid_defrag->tid = tid;
4253 		rx_tid_defrag->array = &rx_tid_defrag->base;
4254 		rx_tid_defrag->defrag_timeout_ms = 0;
4255 		rx_tid_defrag->defrag_waitlist_elem.tqe_next = NULL;
4256 		rx_tid_defrag->defrag_waitlist_elem.tqe_prev = NULL;
4257 		rx_tid_defrag->defrag_peer = peer->txrx_peer;
4258 	}
4259 }
4260 #endif
4261 
4262 /*
4263  * dp_peer_rx_init() – Initialize receive TID state
4264  * @pdev: Datapath pdev
4265  * @peer: Datapath peer
4266  *
4267  */
4268 void dp_peer_rx_init(struct dp_pdev *pdev, struct dp_peer *peer)
4269 {
4270 	dp_peer_rx_tids_init(peer);
4271 
4272 	peer->active_ba_session_cnt = 0;
4273 	peer->hw_buffer_size = 0;
4274 	peer->kill_256_sessions = 0;
4275 
4276 	/* Setup default (non-qos) rx tid queue */
4277 	dp_rx_tid_setup_wifi3(peer, DP_NON_QOS_TID, 1, 0);
4278 
4279 	/* Setup rx tid queue for TID 0.
4280 	 * Other queues will be setup on receiving first packet, which will cause
4281 	 * NULL REO queue error
4282 	 */
4283 	dp_rx_tid_setup_wifi3(peer, 0, 1, 0);
4284 
4285 	/*
4286 	 * Setup the rest of TID's to handle LFR
4287 	 */
4288 	dp_peer_setup_remaining_tids(peer);
4289 
4290 	/*
4291 	 * Set security defaults: no PN check, no security. The target may
4292 	 * send a HTT SEC_IND message to overwrite these defaults.
4293 	 */
4294 	if (peer->txrx_peer)
4295 		peer->txrx_peer->security[dp_sec_ucast].sec_type =
4296 			peer->txrx_peer->security[dp_sec_mcast].sec_type =
4297 				cdp_sec_type_none;
4298 }
4299 
4300 /*
4301  * dp_peer_rx_cleanup() – Cleanup receive TID state
4302  * @vdev: Datapath vdev
4303  * @peer: Datapath peer
4304  *
4305  */
4306 void dp_peer_rx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
4307 {
4308 	int tid;
4309 	uint32_t tid_delete_mask = 0;
4310 
4311 	if (!peer->txrx_peer)
4312 		return;
4313 
4314 	dp_info("Remove tids for peer: %pK", peer);
4315 
4316 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
4317 		struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
4318 		struct dp_rx_tid_defrag *defrag_rx_tid =
4319 				&peer->txrx_peer->rx_tid[tid];
4320 
4321 		qdf_spin_lock_bh(&defrag_rx_tid->defrag_tid_lock);
4322 		if (!peer->bss_peer || peer->vdev->opmode == wlan_op_mode_sta) {
4323 			/* Cleanup defrag related resource */
4324 			dp_rx_defrag_waitlist_remove(peer->txrx_peer, tid);
4325 			dp_rx_reorder_flush_frag(peer->txrx_peer, tid);
4326 		}
4327 		qdf_spin_unlock_bh(&defrag_rx_tid->defrag_tid_lock);
4328 
4329 		qdf_spin_lock_bh(&rx_tid->tid_lock);
4330 		if (peer->rx_tid[tid].hw_qdesc_vaddr_unaligned) {
4331 			dp_rx_tid_delete_wifi3(peer, tid);
4332 
4333 			tid_delete_mask |= (1 << tid);
4334 		}
4335 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
4336 	}
4337 #ifdef notyet /* See if FW can remove queues as part of peer cleanup */
4338 	if (soc->ol_ops->peer_rx_reorder_queue_remove) {
4339 		soc->ol_ops->peer_rx_reorder_queue_remove(soc->ctrl_psoc,
4340 			peer->vdev->pdev->pdev_id,
4341 			peer->vdev->vdev_id, peer->mac_addr.raw,
4342 			tid_delete_mask);
4343 	}
4344 #endif
4345 }
4346 
4347 /*
4348  * dp_peer_cleanup() – Cleanup peer information
4349  * @vdev: Datapath vdev
4350  * @peer: Datapath peer
4351  *
4352  */
4353 void dp_peer_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
4354 {
4355 	enum wlan_op_mode vdev_opmode;
4356 	uint8_t vdev_mac_addr[QDF_MAC_ADDR_SIZE];
4357 	struct dp_pdev *pdev = vdev->pdev;
4358 	struct dp_soc *soc = pdev->soc;
4359 
4360 	/* save vdev related member in case vdev freed */
4361 	vdev_opmode = vdev->opmode;
4362 
4363 	if (!IS_MLO_DP_MLD_PEER(peer))
4364 		dp_monitor_peer_tx_cleanup(vdev, peer);
4365 
4366 	if (vdev_opmode != wlan_op_mode_monitor)
4367 	/* cleanup the Rx reorder queues for this peer */
4368 		dp_peer_rx_cleanup(vdev, peer);
4369 
4370 	dp_peer_rx_tids_destroy(peer);
4371 
4372 	if (IS_MLO_DP_LINK_PEER(peer))
4373 		dp_link_peer_del_mld_peer(peer);
4374 	if (IS_MLO_DP_MLD_PEER(peer))
4375 		dp_mld_peer_deinit_link_peers_info(peer);
4376 
4377 	qdf_mem_copy(vdev_mac_addr, vdev->mac_addr.raw,
4378 		     QDF_MAC_ADDR_SIZE);
4379 
4380 	if (soc->cdp_soc.ol_ops->peer_unref_delete)
4381 		soc->cdp_soc.ol_ops->peer_unref_delete(
4382 				soc->ctrl_psoc,
4383 				vdev->pdev->pdev_id,
4384 				peer->mac_addr.raw, vdev_mac_addr,
4385 				vdev_opmode);
4386 }
4387 
4388 /* dp_teardown_256_ba_session() - Teardown sessions using 256
4389  *                                window size when a request with
4390  *                                64 window size is received.
4391  *                                This is done as a WAR since HW can
4392  *                                have only one setting per peer (64 or 256).
4393  *                                For HKv2, we use per tid buffersize setting
4394  *                                for 0 to per_tid_basize_max_tid. For tid
4395  *                                more than per_tid_basize_max_tid we use HKv1
4396  *                                method.
4397  * @peer: Datapath peer
4398  *
4399  * Return: void
4400  */
4401 static void dp_teardown_256_ba_sessions(struct dp_peer *peer)
4402 {
4403 	uint8_t delba_rcode = 0;
4404 	int tid;
4405 	struct dp_rx_tid *rx_tid = NULL;
4406 
4407 	tid = peer->vdev->pdev->soc->per_tid_basize_max_tid;
4408 	for (; tid < DP_MAX_TIDS; tid++) {
4409 		rx_tid = &peer->rx_tid[tid];
4410 		qdf_spin_lock_bh(&rx_tid->tid_lock);
4411 
4412 		if (rx_tid->ba_win_size <= 64) {
4413 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
4414 			continue;
4415 		} else {
4416 			if (rx_tid->ba_status == DP_RX_BA_ACTIVE ||
4417 			    rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
4418 				/* send delba */
4419 				if (!rx_tid->delba_tx_status) {
4420 					rx_tid->delba_tx_retry++;
4421 					rx_tid->delba_tx_status = 1;
4422 					rx_tid->delba_rcode =
4423 					IEEE80211_REASON_QOS_SETUP_REQUIRED;
4424 					delba_rcode = rx_tid->delba_rcode;
4425 
4426 					qdf_spin_unlock_bh(&rx_tid->tid_lock);
4427 					if (peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba)
4428 						peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba(
4429 							peer->vdev->pdev->soc->ctrl_psoc,
4430 							peer->vdev->vdev_id,
4431 							peer->mac_addr.raw,
4432 							tid, delba_rcode,
4433 							CDP_DELBA_REASON_NONE);
4434 				} else {
4435 					qdf_spin_unlock_bh(&rx_tid->tid_lock);
4436 				}
4437 			} else {
4438 				qdf_spin_unlock_bh(&rx_tid->tid_lock);
4439 			}
4440 		}
4441 	}
4442 }
4443 
4444 /*
4445 * dp_rx_addba_resp_tx_completion_wifi3() – Update Rx Tid State
4446 *
4447 * @soc: Datapath soc handle
4448 * @peer_mac: Datapath peer mac address
4449 * @vdev_id: id of atapath vdev
4450 * @tid: TID number
4451 * @status: tx completion status
4452 * Return: 0 on success, error code on failure
4453 */
4454 int dp_addba_resp_tx_completion_wifi3(struct cdp_soc_t *cdp_soc,
4455 				      uint8_t *peer_mac,
4456 				      uint16_t vdev_id,
4457 				      uint8_t tid, int status)
4458 {
4459 	struct dp_peer *peer = dp_peer_get_tgt_peer_hash_find(
4460 					(struct dp_soc *)cdp_soc,
4461 					peer_mac, 0, vdev_id,
4462 					DP_MOD_ID_CDP);
4463 	struct dp_rx_tid *rx_tid = NULL;
4464 
4465 	if (!peer) {
4466 		dp_peer_debug("%pK: Peer is NULL!\n", cdp_soc);
4467 		goto fail;
4468 	}
4469 	rx_tid = &peer->rx_tid[tid];
4470 	qdf_spin_lock_bh(&rx_tid->tid_lock);
4471 	if (status) {
4472 		rx_tid->num_addba_rsp_failed++;
4473 		if (rx_tid->hw_qdesc_vaddr_unaligned)
4474 			dp_rx_tid_update_wifi3(peer, tid, 1,
4475 					       IEEE80211_SEQ_MAX, false);
4476 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
4477 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
4478 		dp_err("RxTid- %d addba rsp tx completion failed", tid);
4479 
4480 		goto success;
4481 	}
4482 
4483 	rx_tid->num_addba_rsp_success++;
4484 	if (rx_tid->ba_status == DP_RX_BA_INACTIVE) {
4485 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
4486 		dp_peer_err("%pK: Rx Tid- %d hw qdesc is not in IN_PROGRESS",
4487 			    cdp_soc, tid);
4488 		goto fail;
4489 	}
4490 
4491 	if (!qdf_atomic_read(&peer->is_default_route_set)) {
4492 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
4493 		dp_peer_debug("%pK: default route is not set for peer: " QDF_MAC_ADDR_FMT,
4494 			      cdp_soc, QDF_MAC_ADDR_REF(peer->mac_addr.raw));
4495 		goto fail;
4496 	}
4497 
4498 	if (dp_rx_tid_update_wifi3(peer, tid,
4499 				   rx_tid->ba_win_size,
4500 				   rx_tid->startseqnum,
4501 				   false)) {
4502 		dp_err("Failed update REO SSN");
4503 	}
4504 
4505 	dp_info("tid %u window_size %u start_seq_num %u",
4506 		tid, rx_tid->ba_win_size,
4507 		rx_tid->startseqnum);
4508 
4509 	/* First Session */
4510 	if (peer->active_ba_session_cnt == 0) {
4511 		if (rx_tid->ba_win_size > 64 && rx_tid->ba_win_size <= 256)
4512 			peer->hw_buffer_size = 256;
4513 		else if (rx_tid->ba_win_size <= 1024 &&
4514 			 rx_tid->ba_win_size > 256)
4515 			peer->hw_buffer_size = 1024;
4516 		else
4517 			peer->hw_buffer_size = 64;
4518 	}
4519 
4520 	rx_tid->ba_status = DP_RX_BA_ACTIVE;
4521 
4522 	peer->active_ba_session_cnt++;
4523 
4524 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
4525 
4526 	/* Kill any session having 256 buffer size
4527 	 * when 64 buffer size request is received.
4528 	 * Also, latch on to 64 as new buffer size.
4529 	 */
4530 	if (peer->kill_256_sessions) {
4531 		dp_teardown_256_ba_sessions(peer);
4532 		peer->kill_256_sessions = 0;
4533 	}
4534 
4535 success:
4536 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4537 	return QDF_STATUS_SUCCESS;
4538 
4539 fail:
4540 	if (peer)
4541 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4542 
4543 	return QDF_STATUS_E_FAILURE;
4544 }
4545 
4546 /*
4547 * dp_rx_addba_responsesetup_wifi3() – Process ADDBA request from peer
4548 *
4549 * @soc: Datapath soc handle
4550 * @peer_mac: Datapath peer mac address
4551 * @vdev_id: id of atapath vdev
4552 * @tid: TID number
4553 * @dialogtoken: output dialogtoken
4554 * @statuscode: output dialogtoken
4555 * @buffersize: Output BA window size
4556 * @batimeout: Output BA timeout
4557 */
4558 QDF_STATUS
4559 dp_addba_responsesetup_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
4560 			     uint16_t vdev_id, uint8_t tid,
4561 			     uint8_t *dialogtoken, uint16_t *statuscode,
4562 			     uint16_t *buffersize, uint16_t *batimeout)
4563 {
4564 	struct dp_rx_tid *rx_tid = NULL;
4565 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4566 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
4567 						       peer_mac, 0, vdev_id,
4568 						       DP_MOD_ID_CDP);
4569 
4570 	if (!peer) {
4571 		dp_peer_debug("%pK: Peer is NULL!\n", cdp_soc);
4572 		return QDF_STATUS_E_FAILURE;
4573 	}
4574 	rx_tid = &peer->rx_tid[tid];
4575 	qdf_spin_lock_bh(&rx_tid->tid_lock);
4576 	rx_tid->num_of_addba_resp++;
4577 	/* setup ADDBA response parameters */
4578 	*dialogtoken = rx_tid->dialogtoken;
4579 	*statuscode = rx_tid->statuscode;
4580 	*buffersize = rx_tid->ba_win_size;
4581 	*batimeout  = 0;
4582 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
4583 
4584 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4585 
4586 	return status;
4587 }
4588 
4589 /* dp_check_ba_buffersize() - Check buffer size in request
4590  *                            and latch onto this size based on
4591  *                            size used in first active session.
4592  * @peer: Datapath peer
4593  * @tid: Tid
4594  * @buffersize: Block ack window size
4595  *
4596  * Return: void
4597  */
4598 static void dp_check_ba_buffersize(struct dp_peer *peer,
4599 				   uint16_t tid,
4600 				   uint16_t buffersize)
4601 {
4602 	struct dp_rx_tid *rx_tid = NULL;
4603 	struct dp_soc *soc = peer->vdev->pdev->soc;
4604 	uint16_t max_ba_window;
4605 
4606 	max_ba_window = hal_get_rx_max_ba_window(soc->hal_soc, tid);
4607 	dp_info("Input buffersize %d, max dp allowed %d",
4608 		buffersize, max_ba_window);
4609 	/* Adjust BA window size, restrict it to max DP allowed */
4610 	buffersize = QDF_MIN(buffersize, max_ba_window);
4611 
4612 	dp_info(QDF_MAC_ADDR_FMT" per_tid_basize_max_tid %d tid %d buffersize %d hw_buffer_size %d",
4613 		QDF_MAC_ADDR_REF(peer->mac_addr.raw),
4614 		soc->per_tid_basize_max_tid, tid, buffersize,
4615 		peer->hw_buffer_size);
4616 
4617 	rx_tid = &peer->rx_tid[tid];
4618 	if (soc->per_tid_basize_max_tid &&
4619 	    tid < soc->per_tid_basize_max_tid) {
4620 		rx_tid->ba_win_size = buffersize;
4621 		goto out;
4622 	} else {
4623 		if (peer->active_ba_session_cnt == 0) {
4624 			rx_tid->ba_win_size = buffersize;
4625 		} else {
4626 			if (peer->hw_buffer_size == 64) {
4627 				if (buffersize <= 64)
4628 					rx_tid->ba_win_size = buffersize;
4629 				else
4630 					rx_tid->ba_win_size = peer->hw_buffer_size;
4631 			} else if (peer->hw_buffer_size == 256) {
4632 				if (buffersize > 64) {
4633 					rx_tid->ba_win_size = buffersize;
4634 				} else {
4635 					rx_tid->ba_win_size = buffersize;
4636 					peer->hw_buffer_size = 64;
4637 					peer->kill_256_sessions = 1;
4638 				}
4639 			} else if (buffersize <= 1024) {
4640 				/**
4641 				 * Above checks are only for HK V2
4642 				 * Set incoming buffer size for others
4643 				 */
4644 				rx_tid->ba_win_size = buffersize;
4645 			} else {
4646 				dp_err("Invalid buffer size %d", buffersize);
4647 				qdf_assert_always(0);
4648 			}
4649 		}
4650 	}
4651 
4652 out:
4653 	dp_info("rx_tid->ba_win_size %d peer->hw_buffer_size %d peer->kill_256_sessions %d",
4654 		rx_tid->ba_win_size,
4655 		peer->hw_buffer_size,
4656 		peer->kill_256_sessions);
4657 }
4658 
4659 QDF_STATUS dp_rx_tid_update_ba_win_size(struct cdp_soc_t *cdp_soc,
4660 					uint8_t *peer_mac, uint16_t vdev_id,
4661 					uint8_t tid, uint16_t buffersize)
4662 {
4663 	struct dp_rx_tid *rx_tid = NULL;
4664 	struct dp_peer *peer;
4665 
4666 	peer = dp_peer_get_tgt_peer_hash_find((struct dp_soc *)cdp_soc,
4667 					      peer_mac, 0, vdev_id,
4668 					      DP_MOD_ID_CDP);
4669 	if (!peer) {
4670 		dp_peer_debug("%pK: Peer is NULL!\n", cdp_soc);
4671 		return QDF_STATUS_E_FAILURE;
4672 	}
4673 
4674 	rx_tid = &peer->rx_tid[tid];
4675 
4676 	qdf_spin_lock_bh(&rx_tid->tid_lock);
4677 	rx_tid->ba_win_size = buffersize;
4678 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
4679 
4680 	dp_info("peer "QDF_MAC_ADDR_FMT", tid %d, update BA win size to %d",
4681 		QDF_MAC_ADDR_REF(peer->mac_addr.raw), tid, buffersize);
4682 
4683 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4684 
4685 	return QDF_STATUS_SUCCESS;
4686 }
4687 
4688 #define DP_RX_BA_SESSION_DISABLE  1
4689 
4690 /*
4691  * dp_addba_requestprocess_wifi3() - Process ADDBA request from peer
4692  *
4693  * @soc: Datapath soc handle
4694  * @peer_mac: Datapath peer mac address
4695  * @vdev_id: id of atapath vdev
4696  * @dialogtoken: dialogtoken from ADDBA frame
4697  * @tid: TID number
4698  * @batimeout: BA timeout
4699  * @buffersize: BA window size
4700  * @startseqnum: Start seq. number received in BA sequence control
4701  *
4702  * Return: 0 on success, error code on failure
4703  */
4704 int dp_addba_requestprocess_wifi3(struct cdp_soc_t *cdp_soc,
4705 				  uint8_t *peer_mac,
4706 				  uint16_t vdev_id,
4707 				  uint8_t dialogtoken,
4708 				  uint16_t tid, uint16_t batimeout,
4709 				  uint16_t buffersize,
4710 				  uint16_t startseqnum)
4711 {
4712 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4713 	struct dp_rx_tid *rx_tid = NULL;
4714 	struct dp_peer *peer;
4715 
4716 	peer = dp_peer_get_tgt_peer_hash_find((struct dp_soc *)cdp_soc,
4717 					      peer_mac,
4718 					      0, vdev_id,
4719 					      DP_MOD_ID_CDP);
4720 
4721 	if (!peer) {
4722 		dp_peer_debug("%pK: Peer is NULL!\n", cdp_soc);
4723 		return QDF_STATUS_E_FAILURE;
4724 	}
4725 	rx_tid = &peer->rx_tid[tid];
4726 	qdf_spin_lock_bh(&rx_tid->tid_lock);
4727 	rx_tid->num_of_addba_req++;
4728 	if ((rx_tid->ba_status == DP_RX_BA_ACTIVE &&
4729 	     rx_tid->hw_qdesc_vaddr_unaligned)) {
4730 		dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX, false);
4731 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
4732 		peer->active_ba_session_cnt--;
4733 		dp_peer_debug("%pK: Rx Tid- %d hw qdesc is already setup",
4734 			      cdp_soc, tid);
4735 	}
4736 
4737 	if (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
4738 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
4739 		status = QDF_STATUS_E_FAILURE;
4740 		goto fail;
4741 	}
4742 
4743 	if (rx_tid->rx_ba_win_size_override == DP_RX_BA_SESSION_DISABLE) {
4744 		dp_peer_info("%pK: disable BA session",
4745 			     cdp_soc);
4746 
4747 		buffersize = 1;
4748 	} else if (rx_tid->rx_ba_win_size_override) {
4749 		dp_peer_info("%pK: override BA win to %d", cdp_soc,
4750 			     rx_tid->rx_ba_win_size_override);
4751 
4752 		buffersize = rx_tid->rx_ba_win_size_override;
4753 	} else {
4754 		dp_peer_info("%pK: restore BA win %d based on addba req", cdp_soc,
4755 			     buffersize);
4756 	}
4757 
4758 	dp_check_ba_buffersize(peer, tid, buffersize);
4759 
4760 	if (dp_rx_tid_setup_wifi3(peer, tid,
4761 	    rx_tid->ba_win_size, startseqnum)) {
4762 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
4763 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
4764 		status = QDF_STATUS_E_FAILURE;
4765 		goto fail;
4766 	}
4767 	rx_tid->ba_status = DP_RX_BA_IN_PROGRESS;
4768 
4769 	rx_tid->dialogtoken = dialogtoken;
4770 	rx_tid->startseqnum = startseqnum;
4771 
4772 	if (rx_tid->userstatuscode != IEEE80211_STATUS_SUCCESS)
4773 		rx_tid->statuscode = rx_tid->userstatuscode;
4774 	else
4775 		rx_tid->statuscode = IEEE80211_STATUS_SUCCESS;
4776 
4777 	if (rx_tid->rx_ba_win_size_override == DP_RX_BA_SESSION_DISABLE)
4778 		rx_tid->statuscode = IEEE80211_STATUS_REFUSED;
4779 
4780 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
4781 
4782 fail:
4783 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4784 
4785 	return status;
4786 }
4787 
4788 /*
4789 * dp_set_addba_response() – Set a user defined ADDBA response status code
4790 *
4791 * @soc: Datapath soc handle
4792 * @peer_mac: Datapath peer mac address
4793 * @vdev_id: id of atapath vdev
4794 * @tid: TID number
4795 * @statuscode: response status code to be set
4796 */
4797 QDF_STATUS
4798 dp_set_addba_response(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
4799 		      uint16_t vdev_id, uint8_t tid, uint16_t statuscode)
4800 {
4801 	struct dp_peer *peer = dp_peer_get_tgt_peer_hash_find(
4802 					(struct dp_soc *)cdp_soc,
4803 					peer_mac, 0, vdev_id,
4804 					DP_MOD_ID_CDP);
4805 	struct dp_rx_tid *rx_tid;
4806 
4807 	if (!peer) {
4808 		dp_peer_debug("%pK: Peer is NULL!\n", cdp_soc);
4809 		return QDF_STATUS_E_FAILURE;
4810 	}
4811 
4812 	rx_tid = &peer->rx_tid[tid];
4813 	qdf_spin_lock_bh(&rx_tid->tid_lock);
4814 	rx_tid->userstatuscode = statuscode;
4815 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
4816 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4817 
4818 	return QDF_STATUS_SUCCESS;
4819 }
4820 
4821 /*
4822 * dp_rx_delba_process_wifi3() – Process DELBA from peer
4823 * @soc: Datapath soc handle
4824 * @peer_mac: Datapath peer mac address
4825 * @vdev_id: id of atapath vdev
4826 * @tid: TID number
4827 * @reasoncode: Reason code received in DELBA frame
4828 *
4829 * Return: 0 on success, error code on failure
4830 */
4831 int dp_delba_process_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
4832 			   uint16_t vdev_id, int tid, uint16_t reasoncode)
4833 {
4834 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4835 	struct dp_rx_tid *rx_tid;
4836 	struct dp_peer *peer = dp_peer_get_tgt_peer_hash_find(
4837 					(struct dp_soc *)cdp_soc,
4838 					peer_mac, 0, vdev_id,
4839 					DP_MOD_ID_CDP);
4840 
4841 	if (!peer) {
4842 		dp_peer_debug("%pK: Peer is NULL!\n", cdp_soc);
4843 		return QDF_STATUS_E_FAILURE;
4844 	}
4845 	rx_tid = &peer->rx_tid[tid];
4846 	qdf_spin_lock_bh(&rx_tid->tid_lock);
4847 	if (rx_tid->ba_status == DP_RX_BA_INACTIVE ||
4848 	    rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
4849 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
4850 		status = QDF_STATUS_E_FAILURE;
4851 		goto fail;
4852 	}
4853 	/* TODO: See if we can delete the existing REO queue descriptor and
4854 	 * replace with a new one without queue extension descript to save
4855 	 * memory
4856 	 */
4857 	rx_tid->delba_rcode = reasoncode;
4858 	rx_tid->num_of_delba_req++;
4859 	dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX, false);
4860 
4861 	rx_tid->ba_status = DP_RX_BA_INACTIVE;
4862 	peer->active_ba_session_cnt--;
4863 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
4864 fail:
4865 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4866 
4867 	return status;
4868 }
4869 
4870 /*
4871  * dp_rx_delba_tx_completion_wifi3() – Send Delba Request
4872  *
4873  * @soc: Datapath soc handle
4874  * @peer_mac: Datapath peer mac address
4875  * @vdev_id: id of atapath vdev
4876  * @tid: TID number
4877  * @status: tx completion status
4878  * Return: 0 on success, error code on failure
4879  */
4880 
4881 int dp_delba_tx_completion_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
4882 				 uint16_t vdev_id,
4883 				 uint8_t tid, int status)
4884 {
4885 	QDF_STATUS ret = QDF_STATUS_SUCCESS;
4886 	struct dp_rx_tid *rx_tid = NULL;
4887 	struct dp_peer *peer = dp_peer_get_tgt_peer_hash_find(
4888 					(struct dp_soc *)cdp_soc,
4889 					peer_mac, 0, vdev_id,
4890 					DP_MOD_ID_CDP);
4891 
4892 	if (!peer) {
4893 		dp_peer_debug("%pK: Peer is NULL!", cdp_soc);
4894 		return QDF_STATUS_E_FAILURE;
4895 	}
4896 	rx_tid = &peer->rx_tid[tid];
4897 	qdf_spin_lock_bh(&rx_tid->tid_lock);
4898 	if (status) {
4899 		rx_tid->delba_tx_fail_cnt++;
4900 		if (rx_tid->delba_tx_retry >= DP_MAX_DELBA_RETRY) {
4901 			rx_tid->delba_tx_retry = 0;
4902 			rx_tid->delba_tx_status = 0;
4903 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
4904 		} else {
4905 			rx_tid->delba_tx_retry++;
4906 			rx_tid->delba_tx_status = 1;
4907 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
4908 			if (peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba)
4909 				peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba(
4910 					peer->vdev->pdev->soc->ctrl_psoc,
4911 					peer->vdev->vdev_id,
4912 					peer->mac_addr.raw, tid,
4913 					rx_tid->delba_rcode,
4914 					CDP_DELBA_REASON_NONE);
4915 		}
4916 		goto end;
4917 	} else {
4918 		rx_tid->delba_tx_success_cnt++;
4919 		rx_tid->delba_tx_retry = 0;
4920 		rx_tid->delba_tx_status = 0;
4921 	}
4922 	if (rx_tid->ba_status == DP_RX_BA_ACTIVE) {
4923 		dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX, false);
4924 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
4925 		peer->active_ba_session_cnt--;
4926 	}
4927 	if (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
4928 		dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX, false);
4929 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
4930 	}
4931 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
4932 
4933 end:
4934 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4935 
4936 	return ret;
4937 }
4938 
4939 /**
4940  * dp_set_pn_check_wifi3() - enable PN check in REO for security
4941  * @soc: Datapath soc handle
4942  * @peer_mac: Datapath peer mac address
4943  * @vdev_id: id of atapath vdev
4944  * @vdev: Datapath vdev
4945  * @pdev - data path device instance
4946  * @sec_type - security type
4947  * @rx_pn - Receive pn starting number
4948  *
4949  */
4950 
4951 QDF_STATUS
4952 dp_set_pn_check_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id,
4953 		      uint8_t *peer_mac, enum cdp_sec_type sec_type,
4954 		      uint32_t *rx_pn)
4955 {
4956 	struct dp_pdev *pdev;
4957 	int i;
4958 	uint8_t pn_size;
4959 	struct hal_reo_cmd_params params;
4960 	struct dp_peer *peer = NULL;
4961 	struct dp_vdev *vdev = NULL;
4962 
4963 	peer = dp_peer_find_hash_find((struct dp_soc *)soc,
4964 				      peer_mac, 0, vdev_id,
4965 				      DP_MOD_ID_CDP);
4966 
4967 	if (!peer) {
4968 		dp_peer_debug("%pK: Peer is NULL!\n", soc);
4969 		return QDF_STATUS_E_FAILURE;
4970 	}
4971 
4972 	vdev = peer->vdev;
4973 
4974 	if (!vdev) {
4975 		dp_peer_debug("%pK: VDEV is NULL!\n", soc);
4976 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4977 		return QDF_STATUS_E_FAILURE;
4978 	}
4979 
4980 	pdev = vdev->pdev;
4981 	qdf_mem_zero(&params, sizeof(params));
4982 
4983 	params.std.need_status = 1;
4984 	params.u.upd_queue_params.update_pn_valid = 1;
4985 	params.u.upd_queue_params.update_pn_size = 1;
4986 	params.u.upd_queue_params.update_pn = 1;
4987 	params.u.upd_queue_params.update_pn_check_needed = 1;
4988 	params.u.upd_queue_params.update_svld = 1;
4989 	params.u.upd_queue_params.svld = 0;
4990 
4991 	switch (sec_type) {
4992 	case cdp_sec_type_tkip_nomic:
4993 	case cdp_sec_type_aes_ccmp:
4994 	case cdp_sec_type_aes_ccmp_256:
4995 	case cdp_sec_type_aes_gcmp:
4996 	case cdp_sec_type_aes_gcmp_256:
4997 		params.u.upd_queue_params.pn_check_needed = 1;
4998 		params.u.upd_queue_params.pn_size = PN_SIZE_48;
4999 		pn_size = 48;
5000 		break;
5001 	case cdp_sec_type_wapi:
5002 		params.u.upd_queue_params.pn_check_needed = 1;
5003 		params.u.upd_queue_params.pn_size = PN_SIZE_128;
5004 		pn_size = 128;
5005 		if (vdev->opmode == wlan_op_mode_ap) {
5006 			params.u.upd_queue_params.pn_even = 1;
5007 			params.u.upd_queue_params.update_pn_even = 1;
5008 		} else {
5009 			params.u.upd_queue_params.pn_uneven = 1;
5010 			params.u.upd_queue_params.update_pn_uneven = 1;
5011 		}
5012 		break;
5013 	default:
5014 		params.u.upd_queue_params.pn_check_needed = 0;
5015 		pn_size = 0;
5016 		break;
5017 	}
5018 
5019 
5020 	for (i = 0; i < DP_MAX_TIDS; i++) {
5021 		struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
5022 		qdf_spin_lock_bh(&rx_tid->tid_lock);
5023 		if (rx_tid->hw_qdesc_vaddr_unaligned) {
5024 			params.std.addr_lo =
5025 				rx_tid->hw_qdesc_paddr & 0xffffffff;
5026 			params.std.addr_hi =
5027 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
5028 
5029 			if (pn_size) {
5030 				dp_peer_info("%pK: PN set for TID:%d pn:%x:%x:%x:%x",
5031 					     soc, i, rx_pn[3], rx_pn[2],
5032 					     rx_pn[1], rx_pn[0]);
5033 				params.u.upd_queue_params.update_pn_valid = 1;
5034 				params.u.upd_queue_params.pn_31_0 = rx_pn[0];
5035 				params.u.upd_queue_params.pn_63_32 = rx_pn[1];
5036 				params.u.upd_queue_params.pn_95_64 = rx_pn[2];
5037 				params.u.upd_queue_params.pn_127_96 = rx_pn[3];
5038 			}
5039 			rx_tid->pn_size = pn_size;
5040 			if (dp_reo_send_cmd(cdp_soc_t_to_dp_soc(soc),
5041 					    CMD_UPDATE_RX_REO_QUEUE,
5042 					    &params, dp_rx_tid_update_cb,
5043 					    rx_tid)) {
5044 				dp_err_log("fail to send CMD_UPDATE_RX_REO_QUEUE"
5045 					   "tid %d desc %pK", rx_tid->tid,
5046 					   (void *)(rx_tid->hw_qdesc_paddr));
5047 				DP_STATS_INC(cdp_soc_t_to_dp_soc(soc),
5048 					     rx.err.reo_cmd_send_fail, 1);
5049 			}
5050 		} else {
5051 			dp_peer_info("%pK: PN Check not setup for TID :%d ", soc, i);
5052 		}
5053 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
5054 	}
5055 
5056 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5057 
5058 	return QDF_STATUS_SUCCESS;
5059 }
5060 
5061 
5062 /**
5063  * dp_set_key_sec_type_wifi3() - set security mode of key
5064  * @soc: Datapath soc handle
5065  * @peer_mac: Datapath peer mac address
5066  * @vdev_id: id of atapath vdev
5067  * @vdev: Datapath vdev
5068  * @pdev - data path device instance
5069  * @sec_type - security type
5070  * #is_unicast - key type
5071  *
5072  */
5073 
5074 QDF_STATUS
5075 dp_set_key_sec_type_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id,
5076 			  uint8_t *peer_mac, enum cdp_sec_type sec_type,
5077 			  bool is_unicast)
5078 {
5079 	struct dp_peer *peer =
5080 			dp_peer_get_tgt_peer_hash_find((struct dp_soc *)soc,
5081 						       peer_mac, 0, vdev_id,
5082 						       DP_MOD_ID_CDP);
5083 	int sec_index;
5084 
5085 	if (!peer) {
5086 		dp_peer_debug("%pK: Peer is NULL!\n", soc);
5087 		return QDF_STATUS_E_FAILURE;
5088 	}
5089 
5090 	if (!peer->txrx_peer) {
5091 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5092 		dp_peer_debug("%pK: txrx peer is NULL!\n", soc);
5093 		return QDF_STATUS_E_FAILURE;
5094 	}
5095 
5096 	dp_peer_info("%pK: key sec spec for peer %pK " QDF_MAC_ADDR_FMT ": %s key of type %d",
5097 		     soc, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw),
5098 		     is_unicast ? "ucast" : "mcast", sec_type);
5099 
5100 	sec_index = is_unicast ? dp_sec_ucast : dp_sec_mcast;
5101 	peer->txrx_peer->security[sec_index].sec_type = sec_type;
5102 
5103 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5104 
5105 	return QDF_STATUS_SUCCESS;
5106 }
5107 
5108 void
5109 dp_rx_sec_ind_handler(struct dp_soc *soc, uint16_t peer_id,
5110 		      enum cdp_sec_type sec_type, int is_unicast,
5111 		      u_int32_t *michael_key,
5112 		      u_int32_t *rx_pn)
5113 {
5114 	struct dp_peer *peer;
5115 	struct dp_txrx_peer *txrx_peer;
5116 	int sec_index;
5117 
5118 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_HTT);
5119 	if (!peer) {
5120 		dp_peer_err("Couldn't find peer from ID %d - skipping security inits",
5121 			    peer_id);
5122 		return;
5123 	}
5124 	txrx_peer = dp_get_txrx_peer(peer);
5125 	if (!txrx_peer) {
5126 		dp_peer_err("Couldn't find txrx peer from ID %d - skipping security inits",
5127 			    peer_id);
5128 		return;
5129 	}
5130 
5131 	dp_peer_info("%pK: sec spec for peer %pK " QDF_MAC_ADDR_FMT ": %s key of type %d",
5132 		     soc, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw),
5133 			  is_unicast ? "ucast" : "mcast", sec_type);
5134 	sec_index = is_unicast ? dp_sec_ucast : dp_sec_mcast;
5135 
5136 	peer->txrx_peer->security[sec_index].sec_type = sec_type;
5137 #ifdef notyet /* TODO: See if this is required for defrag support */
5138 	/* michael key only valid for TKIP, but for simplicity,
5139 	 * copy it anyway
5140 	 */
5141 	qdf_mem_copy(
5142 		&peer->txrx_peer->security[sec_index].michael_key[0],
5143 		michael_key,
5144 		sizeof(peer->txrx_peer->security[sec_index].michael_key));
5145 #ifdef BIG_ENDIAN_HOST
5146 	OL_IF_SWAPBO(peer->txrx_peer->security[sec_index].michael_key[0],
5147 		     sizeof(peer->txrx_peer->security[sec_index].michael_key));
5148 #endif /* BIG_ENDIAN_HOST */
5149 #endif
5150 
5151 #ifdef notyet /* TODO: Check if this is required for wifi3.0 */
5152 	if (sec_type != cdp_sec_type_wapi) {
5153 		qdf_mem_zero(peer->tids_last_pn_valid, _EXT_TIDS);
5154 	} else {
5155 		for (i = 0; i < DP_MAX_TIDS; i++) {
5156 			/*
5157 			 * Setting PN valid bit for WAPI sec_type,
5158 			 * since WAPI PN has to be started with predefined value
5159 			 */
5160 			peer->tids_last_pn_valid[i] = 1;
5161 			qdf_mem_copy(
5162 				(u_int8_t *) &peer->tids_last_pn[i],
5163 				(u_int8_t *) rx_pn, sizeof(union htt_rx_pn_t));
5164 			peer->tids_last_pn[i].pn128[1] =
5165 				qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[1]);
5166 			peer->tids_last_pn[i].pn128[0] =
5167 				qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[0]);
5168 		}
5169 	}
5170 #endif
5171 	/* TODO: Update HW TID queue with PN check parameters (pn type for
5172 	 * all security types and last pn for WAPI) once REO command API
5173 	 * is available
5174 	 */
5175 
5176 	dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
5177 }
5178 
5179 #ifdef QCA_PEER_EXT_STATS
5180 /*
5181  * dp_peer_delay_stats_ctx_alloc() - Allocate peer delay
5182  *                                 stats content
5183  * @soc: DP SoC context
5184  * @txrx_peer: DP txrx peer context
5185  *
5186  * Allocate the peer delay stats context
5187  *
5188  * Return: QDF_STATUS_SUCCESS if allocation is
5189  *	   successful
5190  */
5191 QDF_STATUS dp_peer_delay_stats_ctx_alloc(struct dp_soc *soc,
5192 					 struct dp_txrx_peer *txrx_peer)
5193 {
5194 	uint8_t tid, ctx_id;
5195 
5196 	if (!soc || !txrx_peer) {
5197 		dp_warn("Null soc%pK or peer%pK", soc, txrx_peer);
5198 		return QDF_STATUS_E_INVAL;
5199 	}
5200 
5201 	if (!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx))
5202 		return QDF_STATUS_SUCCESS;
5203 
5204 	/*
5205 	 * Allocate memory for peer extended stats.
5206 	 */
5207 	txrx_peer->delay_stats =
5208 			qdf_mem_malloc(sizeof(struct dp_peer_delay_stats));
5209 	if (!txrx_peer->delay_stats) {
5210 		dp_err("Peer extended stats obj alloc failed!!");
5211 		return QDF_STATUS_E_NOMEM;
5212 	}
5213 
5214 	for (tid = 0; tid < CDP_MAX_DATA_TIDS; tid++) {
5215 		for (ctx_id = 0; ctx_id < CDP_MAX_TXRX_CTX; ctx_id++) {
5216 			struct cdp_delay_tx_stats *tx_delay =
5217 			&txrx_peer->delay_stats->delay_tid_stats[tid][ctx_id].tx_delay;
5218 			struct cdp_delay_rx_stats *rx_delay =
5219 			&txrx_peer->delay_stats->delay_tid_stats[tid][ctx_id].rx_delay;
5220 
5221 			dp_hist_init(&tx_delay->tx_swq_delay,
5222 				     CDP_HIST_TYPE_SW_ENQEUE_DELAY);
5223 			dp_hist_init(&tx_delay->hwtx_delay,
5224 				     CDP_HIST_TYPE_HW_COMP_DELAY);
5225 			dp_hist_init(&rx_delay->to_stack_delay,
5226 				     CDP_HIST_TYPE_REAP_STACK);
5227 		}
5228 	}
5229 
5230 	return QDF_STATUS_SUCCESS;
5231 }
5232 
5233 /*
5234  * dp_peer_delay_stats_ctx_dealloc() - Dealloc the peer delay stats context
5235  * @txrx_peer: txrx DP peer context
5236  *
5237  * Free the peer delay stats context
5238  *
5239  * Return: Void
5240  */
5241 void dp_peer_delay_stats_ctx_dealloc(struct dp_soc *soc,
5242 				     struct dp_txrx_peer *txrx_peer)
5243 {
5244 	if (!txrx_peer) {
5245 		dp_warn("peer_ext dealloc failed due to NULL peer object");
5246 		return;
5247 	}
5248 
5249 	if (!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx))
5250 		return;
5251 
5252 	if (!txrx_peer->delay_stats)
5253 		return;
5254 
5255 	qdf_mem_free(txrx_peer->delay_stats);
5256 	txrx_peer->delay_stats = NULL;
5257 }
5258 
5259 /**
5260  * dp_peer_delay_stats_ctx_clr() - Clear delay stats context of peer
5261  *
5262  * @txrx_peer: dp_txrx_peer handle
5263  *
5264  * Return: void
5265  */
5266 void dp_peer_delay_stats_ctx_clr(struct dp_txrx_peer *txrx_peer)
5267 {
5268 	if (txrx_peer->delay_stats)
5269 		qdf_mem_zero(txrx_peer->delay_stats,
5270 			     sizeof(struct dp_peer_delay_stats));
5271 }
5272 #endif
5273 
5274 #ifdef WLAN_PEER_JITTER
5275 /**
5276  * dp_peer_jitter_stats_ctx_alloc() - Allocate jitter stats context for peer
5277  *
5278  * @soc: Datapath pdev handle
5279  * @txrx_peer: dp_txrx_peer handle
5280  *
5281  * Return: QDF_STATUS
5282  */
5283 QDF_STATUS dp_peer_jitter_stats_ctx_alloc(struct dp_pdev *pdev,
5284 					  struct dp_txrx_peer *txrx_peer)
5285 {
5286 	if (!pdev || !txrx_peer) {
5287 		dp_warn("Null pdev or peer");
5288 		return QDF_STATUS_E_INVAL;
5289 	}
5290 
5291 	if (!wlan_cfg_is_peer_jitter_stats_enabled(pdev->soc->wlan_cfg_ctx))
5292 		return QDF_STATUS_SUCCESS;
5293 
5294 	if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
5295 		/*
5296 		 * Allocate memory on per tid basis when nss is enabled
5297 		 */
5298 		txrx_peer->jitter_stats =
5299 			qdf_mem_malloc(sizeof(struct cdp_peer_tid_stats)
5300 					* DP_MAX_TIDS);
5301 	} else {
5302 		/*
5303 		 * Allocate memory on per tid per ring basis
5304 		 */
5305 		txrx_peer->jitter_stats =
5306 			qdf_mem_malloc(sizeof(struct cdp_peer_tid_stats)
5307 					* DP_MAX_TIDS * CDP_MAX_TXRX_CTX);
5308 	}
5309 
5310 	if (!txrx_peer->jitter_stats) {
5311 		dp_warn("Jitter stats obj alloc failed!!");
5312 		return QDF_STATUS_E_NOMEM;
5313 	}
5314 
5315 	return QDF_STATUS_SUCCESS;
5316 }
5317 
5318 /**
5319  * dp_peer_jitter_stats_ctx_dealloc() - Deallocate jitter stats context
5320  *
5321  * @pdev: Datapath pdev handle
5322  * @txrx_peer: dp_txrx_peer handle
5323  *
5324  * Return: void
5325  */
5326 void dp_peer_jitter_stats_ctx_dealloc(struct dp_pdev *pdev,
5327 				      struct dp_txrx_peer *txrx_peer)
5328 {
5329 	if (!pdev || !txrx_peer) {
5330 		dp_warn("Null pdev or peer");
5331 		return;
5332 	}
5333 
5334 	if (!wlan_cfg_is_peer_jitter_stats_enabled(pdev->soc->wlan_cfg_ctx))
5335 		return;
5336 
5337 	if (txrx_peer->jitter_stats) {
5338 		qdf_mem_free(txrx_peer->jitter_stats);
5339 		txrx_peer->jitter_stats = NULL;
5340 	}
5341 }
5342 
5343 /**
5344  * dp_peer_jitter_stats_ctx_clr() - Clear jitter stats context of peer
5345  *
5346  * @txrx_peer: dp_txrx_peer handle
5347  *
5348  * Return: void
5349  */
5350 void dp_peer_jitter_stats_ctx_clr(struct dp_txrx_peer *txrx_peer)
5351 {
5352 	struct cdp_peer_tid_stats *jitter_stats = NULL;
5353 
5354 	if (!txrx_peer) {
5355 		dp_warn("Null peer");
5356 		return;
5357 	}
5358 
5359 	if (!wlan_cfg_is_peer_jitter_stats_enabled(txrx_peer->
5360 						   vdev->
5361 						   pdev->soc->wlan_cfg_ctx))
5362 		return;
5363 
5364 	jitter_stats = txrx_peer->jitter_stats;
5365 	if (!jitter_stats)
5366 		return;
5367 
5368 	if (wlan_cfg_get_dp_pdev_nss_enabled(txrx_peer->
5369 					     vdev->pdev->wlan_cfg_ctx))
5370 		qdf_mem_zero(jitter_stats,
5371 			     sizeof(struct cdp_peer_tid_stats) *
5372 			     DP_MAX_TIDS);
5373 
5374 	else
5375 		qdf_mem_zero(jitter_stats,
5376 			     sizeof(struct cdp_peer_tid_stats) *
5377 			     DP_MAX_TIDS * CDP_MAX_TXRX_CTX);
5378 
5379 }
5380 #endif
5381 
5382 QDF_STATUS
5383 dp_rx_delba_ind_handler(void *soc_handle, uint16_t peer_id,
5384 			uint8_t tid, uint16_t win_sz)
5385 {
5386 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
5387 	struct dp_peer *peer;
5388 	struct dp_rx_tid *rx_tid;
5389 	QDF_STATUS status = QDF_STATUS_SUCCESS;
5390 
5391 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_HTT);
5392 
5393 	if (!peer) {
5394 		dp_peer_err("%pK: Couldn't find peer from ID %d",
5395 			    soc, peer_id);
5396 		return QDF_STATUS_E_FAILURE;
5397 	}
5398 
5399 	qdf_assert_always(tid < DP_MAX_TIDS);
5400 
5401 	rx_tid = &peer->rx_tid[tid];
5402 
5403 	if (rx_tid->hw_qdesc_vaddr_unaligned) {
5404 		if (!rx_tid->delba_tx_status) {
5405 			dp_peer_info("%pK: PEER_ID: %d TID: %d, BA win: %d ",
5406 				     soc, peer_id, tid, win_sz);
5407 
5408 			qdf_spin_lock_bh(&rx_tid->tid_lock);
5409 
5410 			rx_tid->delba_tx_status = 1;
5411 
5412 			rx_tid->rx_ba_win_size_override =
5413 			    qdf_min((uint16_t)63, win_sz);
5414 
5415 			rx_tid->delba_rcode =
5416 			    IEEE80211_REASON_QOS_SETUP_REQUIRED;
5417 
5418 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
5419 
5420 			if (soc->cdp_soc.ol_ops->send_delba)
5421 				soc->cdp_soc.ol_ops->send_delba(
5422 					peer->vdev->pdev->soc->ctrl_psoc,
5423 					peer->vdev->vdev_id,
5424 					peer->mac_addr.raw,
5425 					tid,
5426 					rx_tid->delba_rcode,
5427 					CDP_DELBA_REASON_NONE);
5428 		}
5429 	} else {
5430 		dp_peer_err("%pK: BA session is not setup for TID:%d ", soc, tid);
5431 		status = QDF_STATUS_E_FAILURE;
5432 	}
5433 
5434 	dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
5435 
5436 	return status;
5437 }
5438 
5439 #ifdef DP_PEER_EXTENDED_API
5440 /**
5441  * dp_peer_set_bw() - Set bandwidth and mpdu retry count threshold for peer
5442  * @soc: DP soc handle
5443  * @txrx_peer: Core txrx_peer handle
5444  * @set_bw: enum of bandwidth to be set for this peer connection
5445  *
5446  * Return: None
5447  */
5448 static void dp_peer_set_bw(struct dp_soc *soc, struct dp_txrx_peer *txrx_peer,
5449 			   enum cdp_peer_bw set_bw)
5450 {
5451 	if (!txrx_peer)
5452 		return;
5453 
5454 	txrx_peer->bw = set_bw;
5455 
5456 	switch (set_bw) {
5457 	case CDP_160_MHZ:
5458 	case CDP_320_MHZ:
5459 		txrx_peer->mpdu_retry_threshold =
5460 				soc->wlan_cfg_ctx->mpdu_retry_threshold_2;
5461 		break;
5462 	case CDP_20_MHZ:
5463 	case CDP_40_MHZ:
5464 	case CDP_80_MHZ:
5465 	default:
5466 		txrx_peer->mpdu_retry_threshold =
5467 				soc->wlan_cfg_ctx->mpdu_retry_threshold_1;
5468 		break;
5469 	}
5470 
5471 	dp_info("Peer id: %u: BW: %u, mpdu retry threshold: %u",
5472 		txrx_peer->peer_id, txrx_peer->bw,
5473 		txrx_peer->mpdu_retry_threshold);
5474 }
5475 
5476 #ifdef WLAN_FEATURE_11BE_MLO
5477 QDF_STATUS dp_register_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
5478 			    struct ol_txrx_desc_type *sta_desc)
5479 {
5480 	struct dp_peer *peer;
5481 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5482 
5483 	peer = dp_peer_find_hash_find(soc, sta_desc->peer_addr.bytes,
5484 				      0, DP_VDEV_ALL, DP_MOD_ID_CDP);
5485 
5486 	if (!peer)
5487 		return QDF_STATUS_E_FAULT;
5488 
5489 	qdf_spin_lock_bh(&peer->peer_info_lock);
5490 	peer->state = OL_TXRX_PEER_STATE_CONN;
5491 	qdf_spin_unlock_bh(&peer->peer_info_lock);
5492 
5493 	dp_peer_set_bw(soc, peer->txrx_peer, sta_desc->bw);
5494 
5495 	dp_rx_flush_rx_cached(peer, false);
5496 
5497 	if (IS_MLO_DP_LINK_PEER(peer) && peer->first_link) {
5498 		dp_peer_info("register for mld peer" QDF_MAC_ADDR_FMT,
5499 			     QDF_MAC_ADDR_REF(peer->mld_peer->mac_addr.raw));
5500 		qdf_spin_lock_bh(&peer->mld_peer->peer_info_lock);
5501 		peer->mld_peer->state = peer->state;
5502 		qdf_spin_unlock_bh(&peer->mld_peer->peer_info_lock);
5503 		dp_rx_flush_rx_cached(peer->mld_peer, false);
5504 	}
5505 
5506 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5507 
5508 	return QDF_STATUS_SUCCESS;
5509 }
5510 
5511 QDF_STATUS dp_peer_state_update(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
5512 				enum ol_txrx_peer_state state)
5513 {
5514 	struct dp_peer *peer;
5515 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5516 
5517 	peer =  dp_peer_find_hash_find(soc, peer_mac, 0, DP_VDEV_ALL,
5518 				       DP_MOD_ID_CDP);
5519 	if (!peer) {
5520 		dp_peer_err("%pK: Failed to find peer[" QDF_MAC_ADDR_FMT "]",
5521 			    soc, QDF_MAC_ADDR_REF(peer_mac));
5522 		return QDF_STATUS_E_FAILURE;
5523 	}
5524 	peer->state = state;
5525 	peer->authorize = (state == OL_TXRX_PEER_STATE_AUTH) ? 1 : 0;
5526 
5527 	if (peer->txrx_peer)
5528 		peer->txrx_peer->authorize = peer->authorize;
5529 
5530 	dp_peer_info("peer" QDF_MAC_ADDR_FMT "state %d",
5531 		     QDF_MAC_ADDR_REF(peer->mac_addr.raw),
5532 		     peer->state);
5533 
5534 	if (IS_MLO_DP_LINK_PEER(peer) && peer->first_link) {
5535 		peer->mld_peer->state = peer->state;
5536 		peer->mld_peer->txrx_peer->authorize = peer->authorize;
5537 		dp_peer_info("mld peer" QDF_MAC_ADDR_FMT "state %d",
5538 			     QDF_MAC_ADDR_REF(peer->mld_peer->mac_addr.raw),
5539 			     peer->mld_peer->state);
5540 	}
5541 
5542 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
5543 	 * Decrement it here.
5544 	 */
5545 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5546 
5547 	return QDF_STATUS_SUCCESS;
5548 }
5549 #else
5550 QDF_STATUS dp_register_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
5551 			    struct ol_txrx_desc_type *sta_desc)
5552 {
5553 	struct dp_peer *peer;
5554 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5555 
5556 	peer = dp_peer_find_hash_find(soc, sta_desc->peer_addr.bytes,
5557 				      0, DP_VDEV_ALL, DP_MOD_ID_CDP);
5558 
5559 	if (!peer)
5560 		return QDF_STATUS_E_FAULT;
5561 
5562 	qdf_spin_lock_bh(&peer->peer_info_lock);
5563 	peer->state = OL_TXRX_PEER_STATE_CONN;
5564 	qdf_spin_unlock_bh(&peer->peer_info_lock);
5565 
5566 	dp_peer_set_bw(soc, peer->txrx_peer, sta_desc->bw);
5567 
5568 	dp_rx_flush_rx_cached(peer, false);
5569 
5570 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5571 
5572 	return QDF_STATUS_SUCCESS;
5573 }
5574 
5575 QDF_STATUS dp_peer_state_update(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
5576 				enum ol_txrx_peer_state state)
5577 {
5578 	struct dp_peer *peer;
5579 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5580 
5581 	peer =  dp_peer_find_hash_find(soc, peer_mac, 0, DP_VDEV_ALL,
5582 				       DP_MOD_ID_CDP);
5583 	if (!peer) {
5584 		dp_peer_err("%pK: Failed to find peer for: [" QDF_MAC_ADDR_FMT "]",
5585 			    soc, QDF_MAC_ADDR_REF(peer_mac));
5586 		return QDF_STATUS_E_FAILURE;
5587 	}
5588 	peer->state = state;
5589 	peer->authorize = (state == OL_TXRX_PEER_STATE_AUTH) ? 1 : 0;
5590 
5591 	if (peer->txrx_peer)
5592 		peer->txrx_peer->authorize = peer->authorize;
5593 
5594 	dp_info("peer %pK state %d", peer, peer->state);
5595 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
5596 	 * Decrement it here.
5597 	 */
5598 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5599 
5600 	return QDF_STATUS_SUCCESS;
5601 }
5602 #endif
5603 
5604 QDF_STATUS
5605 dp_clear_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
5606 	      struct qdf_mac_addr peer_addr)
5607 {
5608 	struct dp_peer *peer;
5609 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5610 
5611 	peer = dp_peer_find_hash_find(soc, peer_addr.bytes,
5612 				      0, DP_VDEV_ALL, DP_MOD_ID_CDP);
5613 	if (!peer || !peer->valid)
5614 		return QDF_STATUS_E_FAULT;
5615 
5616 	dp_clear_peer_internal(soc, peer);
5617 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5618 	return QDF_STATUS_SUCCESS;
5619 }
5620 
5621 QDF_STATUS dp_get_vdevid(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
5622 			 uint8_t *vdev_id)
5623 {
5624 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5625 	struct dp_peer *peer =
5626 		dp_peer_find_hash_find(soc, peer_mac, 0, DP_VDEV_ALL,
5627 				       DP_MOD_ID_CDP);
5628 
5629 	if (!peer)
5630 		return QDF_STATUS_E_FAILURE;
5631 
5632 	dp_info("peer %pK vdev %pK vdev id %d",
5633 		peer, peer->vdev, peer->vdev->vdev_id);
5634 	*vdev_id = peer->vdev->vdev_id;
5635 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
5636 	 * Decrement it here.
5637 	 */
5638 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5639 
5640 	return QDF_STATUS_SUCCESS;
5641 }
5642 
5643 struct cdp_vdev *
5644 dp_get_vdev_by_peer_addr(struct cdp_pdev *pdev_handle,
5645 			 struct qdf_mac_addr peer_addr)
5646 {
5647 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5648 	struct dp_peer *peer = NULL;
5649 	struct cdp_vdev *vdev = NULL;
5650 
5651 	if (!pdev) {
5652 		dp_peer_info("PDEV not found for peer_addr: " QDF_MAC_ADDR_FMT,
5653 			     QDF_MAC_ADDR_REF(peer_addr.bytes));
5654 		return NULL;
5655 	}
5656 
5657 	peer = dp_peer_find_hash_find(pdev->soc, peer_addr.bytes, 0,
5658 				      DP_VDEV_ALL, DP_MOD_ID_CDP);
5659 	if (!peer) {
5660 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
5661 			  "PDEV not found for peer_addr: "QDF_MAC_ADDR_FMT,
5662 			  QDF_MAC_ADDR_REF(peer_addr.bytes));
5663 		return NULL;
5664 	}
5665 
5666 	vdev = (struct cdp_vdev *)peer->vdev;
5667 
5668 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5669 	return vdev;
5670 }
5671 
5672 /**
5673  * dp_get_vdev_for_peer() - Get virtual interface instance which peer belongs
5674  * @peer - peer instance
5675  *
5676  * Get virtual interface instance which peer belongs
5677  *
5678  * Return: virtual interface instance pointer
5679  *         NULL in case cannot find
5680  */
5681 struct cdp_vdev *dp_get_vdev_for_peer(void *peer_handle)
5682 {
5683 	struct dp_peer *peer = peer_handle;
5684 
5685 	DP_TRACE(DEBUG, "peer %pK vdev %pK", peer, peer->vdev);
5686 	return (struct cdp_vdev *)peer->vdev;
5687 }
5688 
5689 /**
5690  * dp_peer_get_peer_mac_addr() - Get peer mac address
5691  * @peer - peer instance
5692  *
5693  * Get peer mac address
5694  *
5695  * Return: peer mac address pointer
5696  *         NULL in case cannot find
5697  */
5698 uint8_t *dp_peer_get_peer_mac_addr(void *peer_handle)
5699 {
5700 	struct dp_peer *peer = peer_handle;
5701 	uint8_t *mac;
5702 
5703 	mac = peer->mac_addr.raw;
5704 	dp_info("peer %pK mac 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
5705 		peer, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
5706 	return peer->mac_addr.raw;
5707 }
5708 
5709 int dp_get_peer_state(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
5710 		      uint8_t *peer_mac)
5711 {
5712 	enum ol_txrx_peer_state peer_state;
5713 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5714 	struct cdp_peer_info peer_info = { 0 };
5715 	struct dp_peer *peer;
5716 	struct dp_peer *tgt_peer;
5717 
5718 	DP_PEER_INFO_PARAMS_INIT(&peer_info, vdev_id, peer_mac,
5719 				 false, CDP_WILD_PEER_TYPE);
5720 
5721 	peer =  dp_peer_hash_find_wrapper(soc, &peer_info, DP_MOD_ID_CDP);
5722 
5723 	if (!peer)
5724 		return OL_TXRX_PEER_STATE_INVALID;
5725 
5726 	DP_TRACE(DEBUG, "peer %pK stats %d", peer, peer->state);
5727 
5728 	tgt_peer = dp_get_tgt_peer_from_peer(peer);
5729 	peer_state = tgt_peer->state;
5730 
5731 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5732 
5733 	return peer_state;
5734 }
5735 
5736 /**
5737  * dp_local_peer_id_pool_init() - local peer id pool alloc for physical device
5738  * @pdev - data path device instance
5739  *
5740  * local peer id pool alloc for physical device
5741  *
5742  * Return: none
5743  */
5744 void dp_local_peer_id_pool_init(struct dp_pdev *pdev)
5745 {
5746 	int i;
5747 
5748 	/* point the freelist to the first ID */
5749 	pdev->local_peer_ids.freelist = 0;
5750 
5751 	/* link each ID to the next one */
5752 	for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) {
5753 		pdev->local_peer_ids.pool[i] = i + 1;
5754 		pdev->local_peer_ids.map[i] = NULL;
5755 	}
5756 
5757 	/* link the last ID to itself, to mark the end of the list */
5758 	i = OL_TXRX_NUM_LOCAL_PEER_IDS;
5759 	pdev->local_peer_ids.pool[i] = i;
5760 
5761 	qdf_spinlock_create(&pdev->local_peer_ids.lock);
5762 	DP_TRACE(INFO, "Peer pool init");
5763 }
5764 
5765 /**
5766  * dp_local_peer_id_alloc() - allocate local peer id
5767  * @pdev - data path device instance
5768  * @peer - new peer instance
5769  *
5770  * allocate local peer id
5771  *
5772  * Return: none
5773  */
5774 void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer)
5775 {
5776 	int i;
5777 
5778 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
5779 	i = pdev->local_peer_ids.freelist;
5780 	if (pdev->local_peer_ids.pool[i] == i) {
5781 		/* the list is empty, except for the list-end marker */
5782 		peer->local_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
5783 	} else {
5784 		/* take the head ID and advance the freelist */
5785 		peer->local_id = i;
5786 		pdev->local_peer_ids.freelist = pdev->local_peer_ids.pool[i];
5787 		pdev->local_peer_ids.map[i] = peer;
5788 	}
5789 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
5790 	dp_info("peer %pK, local id %d", peer, peer->local_id);
5791 }
5792 
5793 /**
5794  * dp_local_peer_id_free() - remove local peer id
5795  * @pdev - data path device instance
5796  * @peer - peer instance should be removed
5797  *
5798  * remove local peer id
5799  *
5800  * Return: none
5801  */
5802 void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer)
5803 {
5804 	int i = peer->local_id;
5805 	if ((i == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
5806 	    (i >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
5807 		return;
5808 	}
5809 
5810 	/* put this ID on the head of the freelist */
5811 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
5812 	pdev->local_peer_ids.pool[i] = pdev->local_peer_ids.freelist;
5813 	pdev->local_peer_ids.freelist = i;
5814 	pdev->local_peer_ids.map[i] = NULL;
5815 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
5816 }
5817 
5818 bool dp_find_peer_exist_on_vdev(struct cdp_soc_t *soc_hdl,
5819 				uint8_t vdev_id, uint8_t *peer_addr)
5820 {
5821 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5822 	struct dp_peer *peer = NULL;
5823 
5824 	peer = dp_peer_find_hash_find(soc, peer_addr, 0, vdev_id,
5825 				      DP_MOD_ID_CDP);
5826 	if (!peer)
5827 		return false;
5828 
5829 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5830 
5831 	return true;
5832 }
5833 
5834 bool dp_find_peer_exist_on_other_vdev(struct cdp_soc_t *soc_hdl,
5835 				      uint8_t vdev_id, uint8_t *peer_addr,
5836 				      uint16_t max_bssid)
5837 {
5838 	int i;
5839 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5840 	struct dp_peer *peer = NULL;
5841 
5842 	for (i = 0; i < max_bssid; i++) {
5843 		/* Need to check vdevs other than the vdev_id */
5844 		if (vdev_id == i)
5845 			continue;
5846 		peer = dp_peer_find_hash_find(soc, peer_addr, 0, i,
5847 					      DP_MOD_ID_CDP);
5848 		if (peer) {
5849 			dp_err("Duplicate peer "QDF_MAC_ADDR_FMT" already exist on vdev %d",
5850 			       QDF_MAC_ADDR_REF(peer_addr), i);
5851 			dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5852 			return true;
5853 		}
5854 	}
5855 
5856 	return false;
5857 }
5858 
5859 void dp_set_peer_as_tdls_peer(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
5860 			      uint8_t *peer_mac, bool val)
5861 {
5862 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5863 	struct dp_peer *peer = NULL;
5864 
5865 	peer = dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id,
5866 				      DP_MOD_ID_CDP);
5867 	if (!peer) {
5868 		dp_err("Failed to find peer for:" QDF_MAC_ADDR_FMT,
5869 		       QDF_MAC_ADDR_REF(peer_mac));
5870 		return;
5871 	}
5872 
5873 	dp_info("Set tdls flag %d for peer:" QDF_MAC_ADDR_FMT,
5874 		val, QDF_MAC_ADDR_REF(peer_mac));
5875 	peer->is_tdls_peer = val;
5876 
5877 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5878 }
5879 #endif
5880 
5881 bool dp_find_peer_exist(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
5882 			uint8_t *peer_addr)
5883 {
5884 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5885 	struct dp_peer *peer = NULL;
5886 
5887 	peer = dp_peer_find_hash_find(soc, peer_addr, 0, DP_VDEV_ALL,
5888 				      DP_MOD_ID_CDP);
5889 	if (peer) {
5890 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5891 		return true;
5892 	}
5893 
5894 	return false;
5895 }
5896 
5897 #ifdef IPA_OFFLOAD
5898 int dp_peer_get_rxtid_stats_ipa(struct dp_peer *peer,
5899 				dp_rxtid_stats_cmd_cb dp_stats_cmd_cb)
5900 {
5901 	struct dp_soc *soc = peer->vdev->pdev->soc;
5902 	struct hal_reo_cmd_params params;
5903 	int i;
5904 	int stats_cmd_sent_cnt = 0;
5905 	QDF_STATUS status;
5906 	uint16_t peer_id = peer->peer_id;
5907 	unsigned long comb_peer_id_tid;
5908 	struct dp_rx_tid *rx_tid;
5909 
5910 	if (!dp_stats_cmd_cb)
5911 		return stats_cmd_sent_cnt;
5912 
5913 	qdf_mem_zero(&params, sizeof(params));
5914 	for (i = 0; i < DP_MAX_TIDS; i++) {
5915 		if ((i >= CDP_DATA_TID_MAX) && (i != CDP_DATA_NON_QOS_TID))
5916 			continue;
5917 
5918 		rx_tid = &peer->rx_tid[i];
5919 		if (rx_tid->hw_qdesc_vaddr_unaligned) {
5920 			params.std.need_status = 1;
5921 			params.std.addr_lo =
5922 				rx_tid->hw_qdesc_paddr & 0xffffffff;
5923 			params.std.addr_hi =
5924 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
5925 			params.u.stats_params.clear = 1;
5926 			comb_peer_id_tid = ((i << DP_PEER_REO_STATS_TID_SHIFT)
5927 					    | peer_id);
5928 			status = dp_reo_send_cmd(soc, CMD_GET_QUEUE_STATS,
5929 						 &params, dp_stats_cmd_cb,
5930 						 (void *)comb_peer_id_tid);
5931 			if (QDF_IS_STATUS_SUCCESS(status))
5932 				stats_cmd_sent_cnt++;
5933 
5934 			/* Flush REO descriptor from HW cache to update stats
5935 			 * in descriptor memory. This is to help debugging
5936 			 */
5937 			qdf_mem_zero(&params, sizeof(params));
5938 			params.std.need_status = 0;
5939 			params.std.addr_lo =
5940 				rx_tid->hw_qdesc_paddr & 0xffffffff;
5941 			params.std.addr_hi =
5942 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
5943 			params.u.fl_cache_params.flush_no_inval = 1;
5944 			dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, &params, NULL,
5945 					NULL);
5946 		}
5947 	}
5948 
5949 	return stats_cmd_sent_cnt;
5950 }
5951 
5952 qdf_export_symbol(dp_peer_get_rxtid_stats_ipa);
5953 
5954 #endif
5955 /**
5956  * dp_peer_rxtid_stats: Retried Rx TID (REO queue) stats from HW
5957  * @peer: DP peer handle
5958  * @dp_stats_cmd_cb: REO command callback function
5959  * @cb_ctxt: Callback context
5960  *
5961  * Return: count of tid stats cmd send succeeded
5962  */
5963 int dp_peer_rxtid_stats(struct dp_peer *peer,
5964 			dp_rxtid_stats_cmd_cb dp_stats_cmd_cb,
5965 			void *cb_ctxt)
5966 {
5967 	struct dp_soc *soc = peer->vdev->pdev->soc;
5968 	struct hal_reo_cmd_params params;
5969 	int i;
5970 	int stats_cmd_sent_cnt = 0;
5971 	QDF_STATUS status;
5972 	struct dp_rx_tid *rx_tid;
5973 
5974 	if (!dp_stats_cmd_cb)
5975 		return stats_cmd_sent_cnt;
5976 
5977 	qdf_mem_zero(&params, sizeof(params));
5978 	for (i = 0; i < DP_MAX_TIDS; i++) {
5979 		if ((i >= CDP_DATA_TID_MAX) && (i != CDP_DATA_NON_QOS_TID))
5980 			continue;
5981 
5982 		rx_tid = &peer->rx_tid[i];
5983 		if (rx_tid->hw_qdesc_vaddr_unaligned) {
5984 			params.std.need_status = 1;
5985 			params.std.addr_lo =
5986 				rx_tid->hw_qdesc_paddr & 0xffffffff;
5987 			params.std.addr_hi =
5988 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
5989 
5990 			if (cb_ctxt) {
5991 				status = dp_reo_send_cmd(
5992 						soc, CMD_GET_QUEUE_STATS,
5993 						&params, dp_stats_cmd_cb,
5994 						cb_ctxt);
5995 			} else {
5996 				status = dp_reo_send_cmd(
5997 						soc, CMD_GET_QUEUE_STATS,
5998 						&params, dp_stats_cmd_cb,
5999 						rx_tid);
6000 			}
6001 
6002 			if (QDF_IS_STATUS_SUCCESS(status))
6003 				stats_cmd_sent_cnt++;
6004 
6005 
6006 			/* Flush REO descriptor from HW cache to update stats
6007 			 * in descriptor memory. This is to help debugging
6008 			 */
6009 			qdf_mem_zero(&params, sizeof(params));
6010 			params.std.need_status = 0;
6011 			params.std.addr_lo =
6012 				rx_tid->hw_qdesc_paddr & 0xffffffff;
6013 			params.std.addr_hi =
6014 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
6015 			params.u.fl_cache_params.flush_no_inval = 1;
6016 			dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, &params, NULL,
6017 					NULL);
6018 		}
6019 	}
6020 
6021 	return stats_cmd_sent_cnt;
6022 }
6023 
6024 QDF_STATUS
6025 dp_set_michael_key(struct cdp_soc_t *soc,
6026 		   uint8_t vdev_id,
6027 		   uint8_t *peer_mac,
6028 		   bool is_unicast, uint32_t *key)
6029 {
6030 	uint8_t sec_index = is_unicast ? 1 : 0;
6031 	struct dp_peer *peer =
6032 			dp_peer_get_tgt_peer_hash_find((struct dp_soc *)soc,
6033 						       peer_mac, 0, vdev_id,
6034 						       DP_MOD_ID_CDP);
6035 
6036 	if (!peer) {
6037 		dp_peer_err("%pK: peer not found ", soc);
6038 		return QDF_STATUS_E_FAILURE;
6039 	}
6040 
6041 	qdf_mem_copy(&peer->txrx_peer->security[sec_index].michael_key[0],
6042 		     key, IEEE80211_WEP_MICLEN);
6043 
6044 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
6045 
6046 	return QDF_STATUS_SUCCESS;
6047 }
6048 
6049 
6050 /**
6051  * dp_vdev_bss_peer_ref_n_get: Get bss peer of a vdev
6052  * @soc: DP soc
6053  * @vdev: vdev
6054  * @mod_id: id of module requesting reference
6055  *
6056  * Return: VDEV BSS peer
6057  */
6058 struct dp_peer *dp_vdev_bss_peer_ref_n_get(struct dp_soc *soc,
6059 					   struct dp_vdev *vdev,
6060 					   enum dp_mod_id mod_id)
6061 {
6062 	struct dp_peer *peer = NULL;
6063 
6064 	qdf_spin_lock_bh(&vdev->peer_list_lock);
6065 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
6066 		if (peer->bss_peer)
6067 			break;
6068 	}
6069 
6070 	if (!peer) {
6071 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
6072 		return NULL;
6073 	}
6074 
6075 	if (dp_peer_get_ref(soc, peer, mod_id) == QDF_STATUS_SUCCESS) {
6076 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
6077 		return peer;
6078 	}
6079 
6080 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
6081 	return peer;
6082 }
6083 
6084 /**
6085  * dp_sta_vdev_self_peer_ref_n_get: Get self peer of sta vdev
6086  * @soc: DP soc
6087  * @vdev: vdev
6088  * @mod_id: id of module requesting reference
6089  *
6090  * Return: VDEV self peer
6091  */
6092 struct dp_peer *dp_sta_vdev_self_peer_ref_n_get(struct dp_soc *soc,
6093 						struct dp_vdev *vdev,
6094 						enum dp_mod_id mod_id)
6095 {
6096 	struct dp_peer *peer;
6097 
6098 	if (vdev->opmode != wlan_op_mode_sta)
6099 		return NULL;
6100 
6101 	qdf_spin_lock_bh(&vdev->peer_list_lock);
6102 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
6103 		if (peer->sta_self_peer)
6104 			break;
6105 	}
6106 
6107 	if (!peer) {
6108 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
6109 		return NULL;
6110 	}
6111 
6112 	if (dp_peer_get_ref(soc, peer, mod_id) == QDF_STATUS_SUCCESS) {
6113 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
6114 		return peer;
6115 	}
6116 
6117 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
6118 	return peer;
6119 }
6120 
6121 #ifdef DUMP_REO_QUEUE_INFO_IN_DDR
6122 void dp_dump_rx_reo_queue_info(
6123 	struct dp_soc *soc, void *cb_ctxt, union hal_reo_status *reo_status)
6124 {
6125 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
6126 
6127 	if (!rx_tid)
6128 		return;
6129 
6130 	if (reo_status->fl_cache_status.header.status !=
6131 		HAL_REO_CMD_SUCCESS) {
6132 		dp_err_rl("Rx tid REO HW desc flush failed(%d)",
6133 			  reo_status->rx_queue_status.header.status);
6134 		return;
6135 	}
6136 	qdf_spin_lock_bh(&rx_tid->tid_lock);
6137 	hal_dump_rx_reo_queue_desc(rx_tid->hw_qdesc_vaddr_aligned);
6138 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
6139 }
6140 
6141 void dp_send_cache_flush_for_rx_tid(
6142 	struct dp_soc *soc, struct dp_peer *peer)
6143 {
6144 	int i;
6145 	struct dp_rx_tid *rx_tid;
6146 	struct hal_reo_cmd_params params;
6147 
6148 	if (!peer) {
6149 		dp_err_rl("Peer is NULL");
6150 		return;
6151 	}
6152 
6153 	for (i = 0; i < DP_MAX_TIDS; i++) {
6154 		rx_tid = &peer->rx_tid[i];
6155 		if (!rx_tid)
6156 			continue;
6157 		qdf_spin_lock_bh(&rx_tid->tid_lock);
6158 		if (rx_tid->hw_qdesc_vaddr_aligned) {
6159 			qdf_mem_zero(&params, sizeof(params));
6160 			params.std.need_status = 1;
6161 			params.std.addr_lo =
6162 				rx_tid->hw_qdesc_paddr & 0xffffffff;
6163 			params.std.addr_hi =
6164 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
6165 			params.u.fl_cache_params.flush_no_inval = 0;
6166 			if (QDF_STATUS_SUCCESS !=
6167 				dp_reo_send_cmd(
6168 					soc, CMD_FLUSH_CACHE,
6169 					&params, dp_dump_rx_reo_queue_info,
6170 					(void *)rx_tid)) {
6171 				dp_err_rl("cache flush send failed tid %d",
6172 					  rx_tid->tid);
6173 				qdf_spin_unlock_bh(&rx_tid->tid_lock);
6174 				break;
6175 			}
6176 		}
6177 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
6178 	}
6179 }
6180 
6181 void dp_get_rx_reo_queue_info(
6182 	struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
6183 {
6184 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
6185 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
6186 						     DP_MOD_ID_GENERIC_STATS);
6187 	struct dp_peer *peer = NULL;
6188 
6189 	if (!vdev) {
6190 		dp_err_rl("vdev is null for vdev_id: %u", vdev_id);
6191 		goto failed;
6192 	}
6193 
6194 	peer = dp_vdev_bss_peer_ref_n_get(soc, vdev, DP_MOD_ID_GENERIC_STATS);
6195 
6196 	if (!peer) {
6197 		dp_err_rl("Peer is NULL");
6198 		goto failed;
6199 	}
6200 	dp_send_cache_flush_for_rx_tid(soc, peer);
6201 failed:
6202 	if (peer)
6203 		dp_peer_unref_delete(peer, DP_MOD_ID_GENERIC_STATS);
6204 	if (vdev)
6205 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_GENERIC_STATS);
6206 }
6207 #endif /* DUMP_REO_QUEUE_INFO_IN_DDR */
6208 
6209 void dp_peer_flush_frags(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
6210 			 uint8_t *peer_mac)
6211 {
6212 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
6213 	struct dp_peer *peer = dp_peer_get_tgt_peer_hash_find(soc, peer_mac, 0,
6214 							      vdev_id,
6215 							      DP_MOD_ID_CDP);
6216 	struct dp_txrx_peer *txrx_peer;
6217 	uint8_t tid;
6218 	struct dp_rx_tid_defrag *defrag_rx_tid;
6219 
6220 	if (!peer)
6221 		return;
6222 
6223 	if (!peer->txrx_peer)
6224 		goto fail;
6225 
6226 	dp_info("Flushing fragments for peer " QDF_MAC_ADDR_FMT,
6227 		QDF_MAC_ADDR_REF(peer->mac_addr.raw));
6228 
6229 	txrx_peer = peer->txrx_peer;
6230 
6231 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
6232 		defrag_rx_tid = &txrx_peer->rx_tid[tid];
6233 
6234 		qdf_spin_lock_bh(&defrag_rx_tid->defrag_tid_lock);
6235 		dp_rx_defrag_waitlist_remove(txrx_peer, tid);
6236 		dp_rx_reorder_flush_frag(txrx_peer, tid);
6237 		qdf_spin_unlock_bh(&defrag_rx_tid->defrag_tid_lock);
6238 	}
6239 fail:
6240 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
6241 }
6242 
6243 /*
6244  * dp_peer_find_by_id_valid - check if peer exists for given id
6245  * @soc: core DP soc context
6246  * @peer_id: peer id from peer object can be retrieved
6247  *
6248  * Return: true if peer exists of false otherwise
6249  */
6250 bool dp_peer_find_by_id_valid(struct dp_soc *soc, uint16_t peer_id)
6251 {
6252 	struct dp_peer *peer = dp_peer_get_ref_by_id(soc, peer_id,
6253 						     DP_MOD_ID_HTT);
6254 
6255 	if (peer) {
6256 		/*
6257 		 * Decrement the peer ref which is taken as part of
6258 		 * dp_peer_get_ref_by_id if PEER_LOCK_REF_PROTECT is enabled
6259 		 */
6260 		dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
6261 
6262 		return true;
6263 	}
6264 
6265 	return false;
6266 }
6267 
6268 qdf_export_symbol(dp_peer_find_by_id_valid);
6269