xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_peer.c (revision 47cbcd3bd0d589a09df69fc7efe9d19dd924315a)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <qdf_types.h>
21 #include <qdf_lock.h>
22 #include <hal_hw_headers.h>
23 #include "dp_htt.h"
24 #include "dp_types.h"
25 #include "dp_internal.h"
26 #include "dp_peer.h"
27 #include "dp_rx_defrag.h"
28 #include "dp_rx.h"
29 #include <hal_api.h>
30 #include <hal_reo.h>
31 #include <cdp_txrx_handle.h>
32 #include <wlan_cfg.h>
33 #ifdef WIFI_MONITOR_SUPPORT
34 #include <dp_mon.h>
35 #endif
36 #ifdef FEATURE_WDS
37 #include "dp_txrx_wds.h"
38 #endif
39 #include <qdf_module.h>
40 #ifdef QCA_PEER_EXT_STATS
41 #include "dp_hist.h"
42 #endif
43 #ifdef BYPASS_OL_OPS
44 #include <target_if_dp.h>
45 #endif
46 #if defined(WLAN_FEATURE_11BE_MLO) && defined(DP_MLO_LINK_STATS_SUPPORT)
47 #include "reg_services_common.h"
48 #endif
49 #ifdef FEATURE_AST
50 #ifdef BYPASS_OL_OPS
51 /**
52  * dp_add_wds_entry_wrapper() - Add new AST entry for the wds station
53  * @soc: DP soc structure pointer
54  * @peer: dp peer structure
55  * @dest_macaddr: MAC address of ast node
56  * @flags: wds or hmwds
57  * @type: type from enum cdp_txrx_ast_entry_type
58  *
59  * This API is used by WDS source port learning function to
60  * add a new AST entry in the fw.
61  *
62  * Return: 0 on success, error code otherwise.
63  */
64 static int dp_add_wds_entry_wrapper(struct dp_soc *soc,
65 				    struct dp_peer *peer,
66 				    const uint8_t *dest_macaddr,
67 				    uint32_t flags,
68 				    uint8_t type)
69 {
70 	QDF_STATUS status;
71 
72 	status = target_if_add_wds_entry(soc->ctrl_psoc,
73 					 peer->vdev->vdev_id,
74 					 peer->mac_addr.raw,
75 					 dest_macaddr,
76 					 WMI_HOST_WDS_FLAG_STATIC,
77 					 type);
78 
79 	return qdf_status_to_os_return(status);
80 }
81 
82 /**
83  * dp_update_wds_entry_wrapper() - update an existing wds entry with new peer
84  * @soc: DP soc structure pointer
85  * @peer: dp peer structure
86  * @dest_macaddr: MAC address of ast node
87  * @flags: wds or hmwds
88  *
89  * This API is used by update the peer mac address for the ast
90  * in the fw.
91  *
92  * Return: 0 on success, error code otherwise.
93  */
94 static int dp_update_wds_entry_wrapper(struct dp_soc *soc,
95 				       struct dp_peer *peer,
96 				       uint8_t *dest_macaddr,
97 				       uint32_t flags)
98 {
99 	QDF_STATUS status;
100 
101 	status = target_if_update_wds_entry(soc->ctrl_psoc,
102 					    peer->vdev->vdev_id,
103 					    dest_macaddr,
104 					    peer->mac_addr.raw,
105 					    WMI_HOST_WDS_FLAG_STATIC);
106 
107 	return qdf_status_to_os_return(status);
108 }
109 
110 /**
111  * dp_del_wds_entry_wrapper() - delete a WSD AST entry
112  * @soc: DP soc structure pointer
113  * @vdev_id: vdev_id
114  * @wds_macaddr: MAC address of ast node
115  * @type: type from enum cdp_txrx_ast_entry_type
116  * @delete_in_fw: Flag to indicate if entry needs to be deleted in fw
117  *
118  * This API is used to delete an AST entry from fw
119  *
120  * Return: None
121  */
122 void dp_del_wds_entry_wrapper(struct dp_soc *soc,
123 			      uint8_t vdev_id,
124 			      uint8_t *wds_macaddr,
125 			      uint8_t type,
126 			      uint8_t delete_in_fw)
127 {
128 	target_if_del_wds_entry(soc->ctrl_psoc, vdev_id,
129 				wds_macaddr, type, delete_in_fw);
130 }
131 #else
132 static int dp_add_wds_entry_wrapper(struct dp_soc *soc,
133 				    struct dp_peer *peer,
134 				    const uint8_t *dest_macaddr,
135 				    uint32_t flags,
136 				    uint8_t type)
137 {
138 	int status;
139 
140 	status = soc->cdp_soc.ol_ops->peer_add_wds_entry(
141 					soc->ctrl_psoc,
142 					peer->vdev->vdev_id,
143 					peer->mac_addr.raw,
144 					peer->peer_id,
145 					dest_macaddr,
146 					peer->mac_addr.raw,
147 					flags,
148 					type);
149 
150 	return status;
151 }
152 
153 static int dp_update_wds_entry_wrapper(struct dp_soc *soc,
154 				       struct dp_peer *peer,
155 				       uint8_t *dest_macaddr,
156 				       uint32_t flags)
157 {
158 	int status;
159 
160 	status = soc->cdp_soc.ol_ops->peer_update_wds_entry(
161 				soc->ctrl_psoc,
162 				peer->vdev->vdev_id,
163 				dest_macaddr,
164 				peer->mac_addr.raw,
165 				flags);
166 
167 	return status;
168 }
169 
170 void dp_del_wds_entry_wrapper(struct dp_soc *soc,
171 			      uint8_t vdev_id,
172 			      uint8_t *wds_macaddr,
173 			      uint8_t type,
174 			      uint8_t delete_in_fw)
175 {
176 	soc->cdp_soc.ol_ops->peer_del_wds_entry(soc->ctrl_psoc,
177 						vdev_id,
178 						wds_macaddr,
179 						type,
180 						delete_in_fw);
181 }
182 #endif /* BYPASS_OL_OPS */
183 #else
184 void dp_del_wds_entry_wrapper(struct dp_soc *soc,
185 			      uint8_t vdev_id,
186 			      uint8_t *wds_macaddr,
187 			      uint8_t type,
188 			      uint8_t delete_in_fw)
189 {
190 }
191 #endif /* FEATURE_AST */
192 
193 #ifdef FEATURE_WDS
194 static inline bool
195 dp_peer_ast_free_in_unmap_supported(struct dp_soc *soc,
196 				    struct dp_ast_entry *ast_entry)
197 {
198 	/* if peer map v2 is enabled we are not freeing ast entry
199 	 * here and it is supposed to be freed in unmap event (after
200 	 * we receive delete confirmation from target)
201 	 *
202 	 * if peer_id is invalid we did not get the peer map event
203 	 * for the peer free ast entry from here only in this case
204 	 */
205 
206 	if ((ast_entry->type != CDP_TXRX_AST_TYPE_WDS_HM_SEC) &&
207 	    (ast_entry->type != CDP_TXRX_AST_TYPE_SELF))
208 		return true;
209 
210 	return false;
211 }
212 #else
213 static inline bool
214 dp_peer_ast_free_in_unmap_supported(struct dp_soc *soc,
215 				    struct dp_ast_entry *ast_entry)
216 {
217 	return false;
218 }
219 
220 void dp_soc_wds_attach(struct dp_soc *soc)
221 {
222 }
223 
224 void dp_soc_wds_detach(struct dp_soc *soc)
225 {
226 }
227 #endif
228 
229 #ifdef QCA_SUPPORT_WDS_EXTENDED
230 bool dp_peer_check_wds_ext_peer(struct dp_peer *peer)
231 {
232 	struct dp_vdev *vdev = peer->vdev;
233 	struct dp_txrx_peer *txrx_peer;
234 
235 	if (!vdev->wds_ext_enabled)
236 		return false;
237 
238 	txrx_peer = dp_get_txrx_peer(peer);
239 	if (!txrx_peer)
240 		return false;
241 
242 	if (qdf_atomic_test_bit(WDS_EXT_PEER_INIT_BIT,
243 				&txrx_peer->wds_ext.init))
244 		return true;
245 
246 	return false;
247 }
248 #else
249 bool dp_peer_check_wds_ext_peer(struct dp_peer *peer)
250 {
251 	return false;
252 }
253 #endif
254 
255 QDF_STATUS dp_peer_ast_table_attach(struct dp_soc *soc)
256 {
257 	uint32_t max_ast_index;
258 
259 	max_ast_index = wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx);
260 	/* allocate ast_table for ast entry to ast_index map */
261 	dp_peer_info("\n%pK:<=== cfg max ast idx %d ====>", soc, max_ast_index);
262 	soc->ast_table = qdf_mem_malloc(max_ast_index *
263 					sizeof(struct dp_ast_entry *));
264 	if (!soc->ast_table) {
265 		dp_peer_err("%pK: ast_table memory allocation failed", soc);
266 		return QDF_STATUS_E_NOMEM;
267 	}
268 	return QDF_STATUS_SUCCESS; /* success */
269 }
270 
271 /**
272  * dp_find_peer_by_macaddr() - Finding the peer from mac address provided.
273  * @soc: soc handle
274  * @mac_addr: MAC address to be used to find peer
275  * @vdev_id: VDEV id
276  * @mod_id: MODULE ID
277  *
278  * Return: struct dp_peer
279  */
280 struct dp_peer *dp_find_peer_by_macaddr(struct dp_soc *soc, uint8_t *mac_addr,
281 					uint8_t vdev_id, enum dp_mod_id mod_id)
282 {
283 	bool ast_ind_disable = wlan_cfg_get_ast_indication_disable(
284 							    soc->wlan_cfg_ctx);
285 	struct cdp_peer_info peer_info = {0};
286 
287 	if ((!soc->ast_offload_support) || (!ast_ind_disable)) {
288 		struct dp_ast_entry *ast_entry = NULL;
289 		uint16_t peer_id;
290 
291 		qdf_spin_lock_bh(&soc->ast_lock);
292 
293 		if (vdev_id == DP_VDEV_ALL)
294 			ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
295 		else
296 			ast_entry = dp_peer_ast_hash_find_by_vdevid
297 						(soc, mac_addr, vdev_id);
298 
299 		if (!ast_entry) {
300 			qdf_spin_unlock_bh(&soc->ast_lock);
301 			dp_err("NULL ast entry");
302 			return NULL;
303 		}
304 
305 		peer_id = ast_entry->peer_id;
306 		qdf_spin_unlock_bh(&soc->ast_lock);
307 
308 		if (peer_id == HTT_INVALID_PEER)
309 			return NULL;
310 
311 		return dp_peer_get_ref_by_id(soc, peer_id, mod_id);
312 	}
313 
314 	DP_PEER_INFO_PARAMS_INIT(&peer_info, vdev_id, mac_addr, false,
315 				 CDP_WILD_PEER_TYPE);
316 	return dp_peer_hash_find_wrapper(soc, &peer_info, mod_id);
317 }
318 
319 /**
320  * dp_peer_find_map_attach() - allocate memory for peer_id_to_obj_map
321  * @soc: soc handle
322  *
323  * return: QDF_STATUS
324  */
325 static QDF_STATUS dp_peer_find_map_attach(struct dp_soc *soc)
326 {
327 	uint32_t max_peers, peer_map_size;
328 
329 	max_peers = soc->max_peer_id;
330 	/* allocate the peer ID -> peer object map */
331 	dp_peer_info("\n%pK:<=== cfg max peer id %d ====>", soc, max_peers);
332 	peer_map_size = max_peers * sizeof(soc->peer_id_to_obj_map[0]);
333 	soc->peer_id_to_obj_map = qdf_mem_malloc(peer_map_size);
334 	if (!soc->peer_id_to_obj_map) {
335 		dp_peer_err("%pK: peer map memory allocation failed", soc);
336 		return QDF_STATUS_E_NOMEM;
337 	}
338 
339 	/*
340 	 * The peer_id_to_obj_map doesn't really need to be initialized,
341 	 * since elements are only used after they have been individually
342 	 * initialized.
343 	 * However, it is convenient for debugging to have all elements
344 	 * that are not in use set to 0.
345 	 */
346 	qdf_mem_zero(soc->peer_id_to_obj_map, peer_map_size);
347 
348 	qdf_spinlock_create(&soc->peer_map_lock);
349 	return QDF_STATUS_SUCCESS; /* success */
350 }
351 
352 #define DP_AST_HASH_LOAD_MULT  2
353 #define DP_AST_HASH_LOAD_SHIFT 0
354 
355 static inline uint32_t
356 dp_peer_find_hash_index(struct dp_soc *soc,
357 			union dp_align_mac_addr *mac_addr)
358 {
359 	uint32_t index;
360 
361 	index =
362 		mac_addr->align2.bytes_ab ^
363 		mac_addr->align2.bytes_cd ^
364 		mac_addr->align2.bytes_ef;
365 
366 	index ^= index >> soc->peer_hash.idx_bits;
367 	index &= soc->peer_hash.mask;
368 	return index;
369 }
370 
371 struct dp_peer *dp_peer_find_hash_find(
372 				struct dp_soc *soc, uint8_t *peer_mac_addr,
373 				int mac_addr_is_aligned, uint8_t vdev_id,
374 				enum dp_mod_id mod_id)
375 {
376 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
377 	uint32_t index;
378 	struct dp_peer *peer;
379 
380 	if (!soc->peer_hash.bins)
381 		return NULL;
382 
383 	if (mac_addr_is_aligned) {
384 		mac_addr = (union dp_align_mac_addr *)peer_mac_addr;
385 	} else {
386 		qdf_mem_copy(
387 			&local_mac_addr_aligned.raw[0],
388 			peer_mac_addr, QDF_MAC_ADDR_SIZE);
389 		mac_addr = &local_mac_addr_aligned;
390 	}
391 	index = dp_peer_find_hash_index(soc, mac_addr);
392 	qdf_spin_lock_bh(&soc->peer_hash_lock);
393 	TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
394 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
395 		    ((peer->vdev->vdev_id == vdev_id) ||
396 		     (vdev_id == DP_VDEV_ALL))) {
397 			/* take peer reference before returning */
398 			if (dp_peer_get_ref(soc, peer, mod_id) !=
399 						QDF_STATUS_SUCCESS)
400 				peer = NULL;
401 
402 			qdf_spin_unlock_bh(&soc->peer_hash_lock);
403 			return peer;
404 		}
405 	}
406 	qdf_spin_unlock_bh(&soc->peer_hash_lock);
407 	return NULL; /* failure */
408 }
409 
410 qdf_export_symbol(dp_peer_find_hash_find);
411 
412 #ifdef WLAN_FEATURE_11BE_MLO
413 /**
414  * dp_peer_find_hash_detach() - cleanup memory for peer_hash table
415  * @soc: soc handle
416  *
417  * return: none
418  */
419 static void dp_peer_find_hash_detach(struct dp_soc *soc)
420 {
421 	if (soc->peer_hash.bins) {
422 		qdf_mem_free(soc->peer_hash.bins);
423 		soc->peer_hash.bins = NULL;
424 		qdf_spinlock_destroy(&soc->peer_hash_lock);
425 	}
426 
427 	if (soc->arch_ops.mlo_peer_find_hash_detach)
428 		soc->arch_ops.mlo_peer_find_hash_detach(soc);
429 }
430 
431 /**
432  * dp_peer_find_hash_attach() - allocate memory for peer_hash table
433  * @soc: soc handle
434  *
435  * return: QDF_STATUS
436  */
437 static QDF_STATUS dp_peer_find_hash_attach(struct dp_soc *soc)
438 {
439 	int i, hash_elems, log2;
440 
441 	/* allocate the peer MAC address -> peer object hash table */
442 	hash_elems = soc->max_peers;
443 	hash_elems *= DP_PEER_HASH_LOAD_MULT;
444 	hash_elems >>= DP_PEER_HASH_LOAD_SHIFT;
445 	log2 = dp_log2_ceil(hash_elems);
446 	hash_elems = 1 << log2;
447 
448 	soc->peer_hash.mask = hash_elems - 1;
449 	soc->peer_hash.idx_bits = log2;
450 	/* allocate an array of TAILQ peer object lists */
451 	soc->peer_hash.bins = qdf_mem_malloc(
452 		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q, dp_peer)));
453 	if (!soc->peer_hash.bins)
454 		return QDF_STATUS_E_NOMEM;
455 
456 	for (i = 0; i < hash_elems; i++)
457 		TAILQ_INIT(&soc->peer_hash.bins[i]);
458 
459 	qdf_spinlock_create(&soc->peer_hash_lock);
460 
461 	if (soc->arch_ops.mlo_peer_find_hash_attach &&
462 	    (soc->arch_ops.mlo_peer_find_hash_attach(soc) !=
463 			QDF_STATUS_SUCCESS)) {
464 		dp_peer_find_hash_detach(soc);
465 		return QDF_STATUS_E_NOMEM;
466 	}
467 	return QDF_STATUS_SUCCESS;
468 }
469 
470 void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer)
471 {
472 	unsigned index;
473 
474 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
475 	if (peer->peer_type == CDP_LINK_PEER_TYPE) {
476 		qdf_spin_lock_bh(&soc->peer_hash_lock);
477 
478 		if (QDF_IS_STATUS_ERROR(dp_peer_get_ref(soc, peer,
479 							DP_MOD_ID_CONFIG))) {
480 			dp_err("fail to get peer ref:" QDF_MAC_ADDR_FMT,
481 			       QDF_MAC_ADDR_REF(peer->mac_addr.raw));
482 			qdf_spin_unlock_bh(&soc->peer_hash_lock);
483 			return;
484 		}
485 
486 		/*
487 		 * It is important to add the new peer at the tail of
488 		 * peer list with the bin index. Together with having
489 		 * the hash_find function search from head to tail,
490 		 * this ensures that if two entries with the same MAC address
491 		 * are stored, the one added first will be found first.
492 		 */
493 		TAILQ_INSERT_TAIL(&soc->peer_hash.bins[index], peer,
494 				  hash_list_elem);
495 
496 		qdf_spin_unlock_bh(&soc->peer_hash_lock);
497 	} else if (peer->peer_type == CDP_MLD_PEER_TYPE) {
498 		if (soc->arch_ops.mlo_peer_find_hash_add)
499 			soc->arch_ops.mlo_peer_find_hash_add(soc, peer);
500 	} else {
501 		dp_err("unknown peer type %d", peer->peer_type);
502 	}
503 }
504 
505 void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer)
506 {
507 	unsigned index;
508 	struct dp_peer *tmppeer = NULL;
509 	int found = 0;
510 
511 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
512 
513 	if (peer->peer_type == CDP_LINK_PEER_TYPE) {
514 		/* Check if tail is not empty before delete*/
515 		QDF_ASSERT(!TAILQ_EMPTY(&soc->peer_hash.bins[index]));
516 
517 		qdf_spin_lock_bh(&soc->peer_hash_lock);
518 		TAILQ_FOREACH(tmppeer, &soc->peer_hash.bins[index],
519 			      hash_list_elem) {
520 			if (tmppeer == peer) {
521 				found = 1;
522 				break;
523 			}
524 		}
525 		QDF_ASSERT(found);
526 		TAILQ_REMOVE(&soc->peer_hash.bins[index], peer,
527 			     hash_list_elem);
528 
529 		dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
530 		qdf_spin_unlock_bh(&soc->peer_hash_lock);
531 	} else if (peer->peer_type == CDP_MLD_PEER_TYPE) {
532 		if (soc->arch_ops.mlo_peer_find_hash_remove)
533 			soc->arch_ops.mlo_peer_find_hash_remove(soc, peer);
534 	} else {
535 		dp_err("unknown peer type %d", peer->peer_type);
536 	}
537 }
538 
539 uint8_t dp_get_peer_link_id(struct dp_peer *peer)
540 {
541 	uint8_t link_id;
542 
543 	link_id = IS_MLO_DP_LINK_PEER(peer) ? peer->link_id + 1 : 0;
544 	if (link_id < 1 || link_id > DP_MAX_MLO_LINKS)
545 		link_id = 0;
546 
547 	return link_id;
548 }
549 #else
550 static QDF_STATUS dp_peer_find_hash_attach(struct dp_soc *soc)
551 {
552 	int i, hash_elems, log2;
553 
554 	/* allocate the peer MAC address -> peer object hash table */
555 	hash_elems = soc->max_peers;
556 	hash_elems *= DP_PEER_HASH_LOAD_MULT;
557 	hash_elems >>= DP_PEER_HASH_LOAD_SHIFT;
558 	log2 = dp_log2_ceil(hash_elems);
559 	hash_elems = 1 << log2;
560 
561 	soc->peer_hash.mask = hash_elems - 1;
562 	soc->peer_hash.idx_bits = log2;
563 	/* allocate an array of TAILQ peer object lists */
564 	soc->peer_hash.bins = qdf_mem_malloc(
565 		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q, dp_peer)));
566 	if (!soc->peer_hash.bins)
567 		return QDF_STATUS_E_NOMEM;
568 
569 	for (i = 0; i < hash_elems; i++)
570 		TAILQ_INIT(&soc->peer_hash.bins[i]);
571 
572 	qdf_spinlock_create(&soc->peer_hash_lock);
573 	return QDF_STATUS_SUCCESS;
574 }
575 
576 static void dp_peer_find_hash_detach(struct dp_soc *soc)
577 {
578 	if (soc->peer_hash.bins) {
579 		qdf_mem_free(soc->peer_hash.bins);
580 		soc->peer_hash.bins = NULL;
581 		qdf_spinlock_destroy(&soc->peer_hash_lock);
582 	}
583 }
584 
585 void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer)
586 {
587 	unsigned index;
588 
589 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
590 	qdf_spin_lock_bh(&soc->peer_hash_lock);
591 
592 	if (QDF_IS_STATUS_ERROR(dp_peer_get_ref(soc, peer, DP_MOD_ID_CONFIG))) {
593 		dp_err("unable to get peer ref at MAP mac: "QDF_MAC_ADDR_FMT,
594 		       QDF_MAC_ADDR_REF(peer->mac_addr.raw));
595 		qdf_spin_unlock_bh(&soc->peer_hash_lock);
596 		return;
597 	}
598 
599 	/*
600 	 * It is important to add the new peer at the tail of the peer list
601 	 * with the bin index.  Together with having the hash_find function
602 	 * search from head to tail, this ensures that if two entries with
603 	 * the same MAC address are stored, the one added first will be
604 	 * found first.
605 	 */
606 	TAILQ_INSERT_TAIL(&soc->peer_hash.bins[index], peer, hash_list_elem);
607 
608 	qdf_spin_unlock_bh(&soc->peer_hash_lock);
609 }
610 
611 void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer)
612 {
613 	unsigned index;
614 	struct dp_peer *tmppeer = NULL;
615 	int found = 0;
616 
617 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
618 	/* Check if tail is not empty before delete*/
619 	QDF_ASSERT(!TAILQ_EMPTY(&soc->peer_hash.bins[index]));
620 
621 	qdf_spin_lock_bh(&soc->peer_hash_lock);
622 	TAILQ_FOREACH(tmppeer, &soc->peer_hash.bins[index], hash_list_elem) {
623 		if (tmppeer == peer) {
624 			found = 1;
625 			break;
626 		}
627 	}
628 	QDF_ASSERT(found);
629 	TAILQ_REMOVE(&soc->peer_hash.bins[index], peer, hash_list_elem);
630 
631 	dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
632 	qdf_spin_unlock_bh(&soc->peer_hash_lock);
633 }
634 
635 
636 #endif/* WLAN_FEATURE_11BE_MLO */
637 
638 void dp_peer_vdev_list_add(struct dp_soc *soc, struct dp_vdev *vdev,
639 			   struct dp_peer *peer)
640 {
641 	/* only link peer will be added to vdev peer list */
642 	if (IS_MLO_DP_MLD_PEER(peer))
643 		return;
644 
645 	qdf_spin_lock_bh(&vdev->peer_list_lock);
646 	if (QDF_IS_STATUS_ERROR(dp_peer_get_ref(soc, peer, DP_MOD_ID_CONFIG))) {
647 		dp_err("unable to get peer ref at MAP mac: "QDF_MAC_ADDR_FMT,
648 		       QDF_MAC_ADDR_REF(peer->mac_addr.raw));
649 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
650 		return;
651 	}
652 
653 	/* add this peer into the vdev's list */
654 	if (wlan_op_mode_sta == vdev->opmode)
655 		TAILQ_INSERT_HEAD(&vdev->peer_list, peer, peer_list_elem);
656 	else
657 		TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
658 
659 	vdev->num_peers++;
660 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
661 }
662 
663 void dp_peer_vdev_list_remove(struct dp_soc *soc, struct dp_vdev *vdev,
664 			      struct dp_peer *peer)
665 {
666 	uint8_t found = 0;
667 	struct dp_peer *tmppeer = NULL;
668 
669 	/* only link peer will be added to vdev peer list */
670 	if (IS_MLO_DP_MLD_PEER(peer))
671 		return;
672 
673 	qdf_spin_lock_bh(&vdev->peer_list_lock);
674 	TAILQ_FOREACH(tmppeer, &peer->vdev->peer_list, peer_list_elem) {
675 		if (tmppeer == peer) {
676 			found = 1;
677 			break;
678 		}
679 	}
680 
681 	if (found) {
682 		TAILQ_REMOVE(&peer->vdev->peer_list, peer,
683 			     peer_list_elem);
684 		dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
685 		vdev->num_peers--;
686 	} else {
687 		/*Ignoring the remove operation as peer not found*/
688 		dp_peer_debug("%pK: peer:%pK not found in vdev:%pK peerlist:%pK"
689 			      , soc, peer, vdev, &peer->vdev->peer_list);
690 	}
691 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
692 }
693 
694 void dp_txrx_peer_attach_add(struct dp_soc *soc,
695 			     struct dp_peer *peer,
696 			     struct dp_txrx_peer *txrx_peer)
697 {
698 	qdf_spin_lock_bh(&soc->peer_map_lock);
699 
700 	peer->txrx_peer = txrx_peer;
701 	txrx_peer->bss_peer = peer->bss_peer;
702 
703 	if (peer->peer_id == HTT_INVALID_PEER) {
704 		qdf_spin_unlock_bh(&soc->peer_map_lock);
705 		return;
706 	}
707 
708 	txrx_peer->peer_id = peer->peer_id;
709 
710 	QDF_ASSERT(soc->peer_id_to_obj_map[peer->peer_id]);
711 
712 	qdf_spin_unlock_bh(&soc->peer_map_lock);
713 }
714 
715 void dp_peer_find_id_to_obj_add(struct dp_soc *soc,
716 				struct dp_peer *peer,
717 				uint16_t peer_id)
718 {
719 	QDF_ASSERT(peer_id <= soc->max_peer_id);
720 
721 	qdf_spin_lock_bh(&soc->peer_map_lock);
722 
723 	peer->peer_id = peer_id;
724 
725 	if (QDF_IS_STATUS_ERROR(dp_peer_get_ref(soc, peer, DP_MOD_ID_CONFIG))) {
726 		dp_err("unable to get peer ref at MAP mac: "QDF_MAC_ADDR_FMT" peer_id %u",
727 		       QDF_MAC_ADDR_REF(peer->mac_addr.raw), peer_id);
728 		qdf_spin_unlock_bh(&soc->peer_map_lock);
729 		return;
730 	}
731 
732 	if (!soc->peer_id_to_obj_map[peer_id]) {
733 		soc->peer_id_to_obj_map[peer_id] = peer;
734 		if (peer->txrx_peer)
735 			peer->txrx_peer->peer_id = peer_id;
736 	} else {
737 		/* Peer map event came for peer_id which
738 		 * is already mapped, this is not expected
739 		 */
740 		dp_err("peer %pK(" QDF_MAC_ADDR_FMT ")map failed, id %d mapped to peer %pK",
741 		       peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw), peer_id,
742 		       soc->peer_id_to_obj_map[peer_id]);
743 		dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
744 		qdf_assert_always(0);
745 	}
746 	qdf_spin_unlock_bh(&soc->peer_map_lock);
747 }
748 
749 void dp_peer_find_id_to_obj_remove(struct dp_soc *soc,
750 				   uint16_t peer_id)
751 {
752 	struct dp_peer *peer = NULL;
753 	QDF_ASSERT(peer_id <= soc->max_peer_id);
754 
755 	qdf_spin_lock_bh(&soc->peer_map_lock);
756 	peer = soc->peer_id_to_obj_map[peer_id];
757 	if (!peer) {
758 		dp_err("unable to get peer during peer id obj map remove");
759 		qdf_spin_unlock_bh(&soc->peer_map_lock);
760 		return;
761 	}
762 	peer->peer_id = HTT_INVALID_PEER;
763 	if (peer->txrx_peer)
764 		peer->txrx_peer->peer_id = HTT_INVALID_PEER;
765 	soc->peer_id_to_obj_map[peer_id] = NULL;
766 	dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
767 	qdf_spin_unlock_bh(&soc->peer_map_lock);
768 }
769 
770 #ifdef FEATURE_MEC
771 QDF_STATUS dp_peer_mec_hash_attach(struct dp_soc *soc)
772 {
773 	int log2, hash_elems, i;
774 
775 	log2 = dp_log2_ceil(DP_PEER_MAX_MEC_IDX);
776 	hash_elems = 1 << log2;
777 
778 	soc->mec_hash.mask = hash_elems - 1;
779 	soc->mec_hash.idx_bits = log2;
780 
781 	dp_peer_info("%pK: max mec index: %d",
782 		     soc, DP_PEER_MAX_MEC_IDX);
783 
784 	/* allocate an array of TAILQ mec object lists */
785 	soc->mec_hash.bins = qdf_mem_malloc(hash_elems *
786 					    sizeof(TAILQ_HEAD(anonymous_tail_q,
787 							      dp_mec_entry)));
788 
789 	if (!soc->mec_hash.bins)
790 		return QDF_STATUS_E_NOMEM;
791 
792 	for (i = 0; i < hash_elems; i++)
793 		TAILQ_INIT(&soc->mec_hash.bins[i]);
794 
795 	return QDF_STATUS_SUCCESS;
796 }
797 
798 /**
799  * dp_peer_mec_hash_index() - Compute the MEC hash from MAC address
800  * @soc: SoC handle
801  * @mac_addr: MAC address
802  *
803  * Return: MEC hash
804  */
805 static inline uint32_t dp_peer_mec_hash_index(struct dp_soc *soc,
806 					      union dp_align_mac_addr *mac_addr)
807 {
808 	uint32_t index;
809 
810 	index =
811 		mac_addr->align2.bytes_ab ^
812 		mac_addr->align2.bytes_cd ^
813 		mac_addr->align2.bytes_ef;
814 	index ^= index >> soc->mec_hash.idx_bits;
815 	index &= soc->mec_hash.mask;
816 	return index;
817 }
818 
819 struct dp_mec_entry *dp_peer_mec_hash_find_by_pdevid(struct dp_soc *soc,
820 						     uint8_t pdev_id,
821 						     uint8_t *mec_mac_addr)
822 {
823 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
824 	uint32_t index;
825 	struct dp_mec_entry *mecentry;
826 
827 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
828 		     mec_mac_addr, QDF_MAC_ADDR_SIZE);
829 	mac_addr = &local_mac_addr_aligned;
830 
831 	index = dp_peer_mec_hash_index(soc, mac_addr);
832 	TAILQ_FOREACH(mecentry, &soc->mec_hash.bins[index], hash_list_elem) {
833 		if ((pdev_id == mecentry->pdev_id) &&
834 		    !dp_peer_find_mac_addr_cmp(mac_addr, &mecentry->mac_addr))
835 			return mecentry;
836 	}
837 
838 	return NULL;
839 }
840 
841 /**
842  * dp_peer_mec_hash_add() - Add MEC entry into hash table
843  * @soc: SoC handle
844  * @mecentry: MEC entry
845  *
846  * This function adds the MEC entry into SoC MEC hash table
847  *
848  * Return: None
849  */
850 static inline void dp_peer_mec_hash_add(struct dp_soc *soc,
851 					struct dp_mec_entry *mecentry)
852 {
853 	uint32_t index;
854 
855 	index = dp_peer_mec_hash_index(soc, &mecentry->mac_addr);
856 	qdf_spin_lock_bh(&soc->mec_lock);
857 	TAILQ_INSERT_TAIL(&soc->mec_hash.bins[index], mecentry, hash_list_elem);
858 	qdf_spin_unlock_bh(&soc->mec_lock);
859 }
860 
861 QDF_STATUS dp_peer_mec_add_entry(struct dp_soc *soc,
862 				 struct dp_vdev *vdev,
863 				 uint8_t *mac_addr)
864 {
865 	struct dp_mec_entry *mecentry = NULL;
866 	struct dp_pdev *pdev = NULL;
867 
868 	if (!vdev) {
869 		dp_peer_err("%pK: Peers vdev is NULL", soc);
870 		return QDF_STATUS_E_INVAL;
871 	}
872 
873 	pdev = vdev->pdev;
874 
875 	if (qdf_unlikely(qdf_atomic_read(&soc->mec_cnt) >=
876 					 DP_PEER_MAX_MEC_ENTRY)) {
877 		dp_peer_warn("%pK: max MEC entry limit reached mac_addr: "
878 			     QDF_MAC_ADDR_FMT, soc, QDF_MAC_ADDR_REF(mac_addr));
879 		return QDF_STATUS_E_NOMEM;
880 	}
881 
882 	qdf_spin_lock_bh(&soc->mec_lock);
883 	mecentry = dp_peer_mec_hash_find_by_pdevid(soc, pdev->pdev_id,
884 						   mac_addr);
885 	if (qdf_likely(mecentry)) {
886 		mecentry->is_active = TRUE;
887 		qdf_spin_unlock_bh(&soc->mec_lock);
888 		return QDF_STATUS_E_ALREADY;
889 	}
890 
891 	qdf_spin_unlock_bh(&soc->mec_lock);
892 
893 	dp_peer_debug("%pK: pdevid: %u vdev: %u type: MEC mac_addr: "
894 		      QDF_MAC_ADDR_FMT,
895 		      soc, pdev->pdev_id, vdev->vdev_id,
896 		      QDF_MAC_ADDR_REF(mac_addr));
897 
898 	mecentry = (struct dp_mec_entry *)
899 			qdf_mem_malloc(sizeof(struct dp_mec_entry));
900 
901 	if (qdf_unlikely(!mecentry)) {
902 		dp_peer_err("%pK: fail to allocate mecentry", soc);
903 		return QDF_STATUS_E_NOMEM;
904 	}
905 
906 	qdf_copy_macaddr((struct qdf_mac_addr *)&mecentry->mac_addr.raw[0],
907 			 (struct qdf_mac_addr *)mac_addr);
908 	mecentry->pdev_id = pdev->pdev_id;
909 	mecentry->vdev_id = vdev->vdev_id;
910 	mecentry->is_active = TRUE;
911 	dp_peer_mec_hash_add(soc, mecentry);
912 
913 	qdf_atomic_inc(&soc->mec_cnt);
914 	DP_STATS_INC(soc, mec.added, 1);
915 
916 	return QDF_STATUS_SUCCESS;
917 }
918 
919 void dp_peer_mec_detach_entry(struct dp_soc *soc, struct dp_mec_entry *mecentry,
920 			      void *ptr)
921 {
922 	uint32_t index = dp_peer_mec_hash_index(soc, &mecentry->mac_addr);
923 
924 	TAILQ_HEAD(, dp_mec_entry) * free_list = ptr;
925 
926 	TAILQ_REMOVE(&soc->mec_hash.bins[index], mecentry,
927 		     hash_list_elem);
928 	TAILQ_INSERT_TAIL(free_list, mecentry, hash_list_elem);
929 }
930 
931 void dp_peer_mec_free_list(struct dp_soc *soc, void *ptr)
932 {
933 	struct dp_mec_entry *mecentry, *mecentry_next;
934 
935 	TAILQ_HEAD(, dp_mec_entry) * free_list = ptr;
936 
937 	TAILQ_FOREACH_SAFE(mecentry, free_list, hash_list_elem,
938 			   mecentry_next) {
939 		dp_peer_debug("%pK: MEC delete for mac_addr " QDF_MAC_ADDR_FMT,
940 			      soc, QDF_MAC_ADDR_REF(&mecentry->mac_addr));
941 		qdf_mem_free(mecentry);
942 		qdf_atomic_dec(&soc->mec_cnt);
943 		DP_STATS_INC(soc, mec.deleted, 1);
944 	}
945 }
946 
947 void dp_peer_mec_hash_detach(struct dp_soc *soc)
948 {
949 	dp_peer_mec_flush_entries(soc);
950 	qdf_mem_free(soc->mec_hash.bins);
951 	soc->mec_hash.bins = NULL;
952 }
953 
954 void dp_peer_mec_spinlock_destroy(struct dp_soc *soc)
955 {
956 	qdf_spinlock_destroy(&soc->mec_lock);
957 }
958 
959 void dp_peer_mec_spinlock_create(struct dp_soc *soc)
960 {
961 	qdf_spinlock_create(&soc->mec_lock);
962 }
963 #else
964 QDF_STATUS dp_peer_mec_hash_attach(struct dp_soc *soc)
965 {
966 	return QDF_STATUS_SUCCESS;
967 }
968 
969 void dp_peer_mec_hash_detach(struct dp_soc *soc)
970 {
971 }
972 #endif
973 
974 #ifdef FEATURE_AST
975 #ifdef WLAN_FEATURE_11BE_MLO
976 /**
977  * dp_peer_exist_on_pdev() - check if peer with mac address exist on pdev
978  *
979  * @soc: Datapath SOC handle
980  * @peer_mac_addr: peer mac address
981  * @mac_addr_is_aligned: is mac address aligned
982  * @pdev: Datapath PDEV handle
983  *
984  * Return: true if peer found else return false
985  */
986 static bool dp_peer_exist_on_pdev(struct dp_soc *soc,
987 				  uint8_t *peer_mac_addr,
988 				  int mac_addr_is_aligned,
989 				  struct dp_pdev *pdev)
990 {
991 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
992 	unsigned int index;
993 	struct dp_peer *peer;
994 	bool found = false;
995 
996 	if (mac_addr_is_aligned) {
997 		mac_addr = (union dp_align_mac_addr *)peer_mac_addr;
998 	} else {
999 		qdf_mem_copy(
1000 			&local_mac_addr_aligned.raw[0],
1001 			peer_mac_addr, QDF_MAC_ADDR_SIZE);
1002 		mac_addr = &local_mac_addr_aligned;
1003 	}
1004 	index = dp_peer_find_hash_index(soc, mac_addr);
1005 	qdf_spin_lock_bh(&soc->peer_hash_lock);
1006 	TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
1007 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
1008 		    (peer->vdev->pdev == pdev)) {
1009 			found = true;
1010 			break;
1011 		}
1012 	}
1013 	qdf_spin_unlock_bh(&soc->peer_hash_lock);
1014 
1015 	if (found)
1016 		return found;
1017 
1018 	peer = dp_mld_peer_find_hash_find(soc, peer_mac_addr,
1019 					  mac_addr_is_aligned, DP_VDEV_ALL,
1020 					  DP_MOD_ID_CDP);
1021 	if (peer) {
1022 		if (peer->vdev->pdev == pdev)
1023 			found = true;
1024 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
1025 	}
1026 
1027 	return found;
1028 }
1029 #else
1030 static bool dp_peer_exist_on_pdev(struct dp_soc *soc,
1031 				  uint8_t *peer_mac_addr,
1032 				  int mac_addr_is_aligned,
1033 				  struct dp_pdev *pdev)
1034 {
1035 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
1036 	unsigned int index;
1037 	struct dp_peer *peer;
1038 	bool found = false;
1039 
1040 	if (mac_addr_is_aligned) {
1041 		mac_addr = (union dp_align_mac_addr *)peer_mac_addr;
1042 	} else {
1043 		qdf_mem_copy(
1044 			&local_mac_addr_aligned.raw[0],
1045 			peer_mac_addr, QDF_MAC_ADDR_SIZE);
1046 		mac_addr = &local_mac_addr_aligned;
1047 	}
1048 	index = dp_peer_find_hash_index(soc, mac_addr);
1049 	qdf_spin_lock_bh(&soc->peer_hash_lock);
1050 	TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
1051 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
1052 		    (peer->vdev->pdev == pdev)) {
1053 			found = true;
1054 			break;
1055 		}
1056 	}
1057 	qdf_spin_unlock_bh(&soc->peer_hash_lock);
1058 	return found;
1059 }
1060 #endif /* WLAN_FEATURE_11BE_MLO */
1061 
1062 QDF_STATUS dp_peer_ast_hash_attach(struct dp_soc *soc)
1063 {
1064 	int i, hash_elems, log2;
1065 	unsigned int max_ast_idx = wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx);
1066 
1067 	hash_elems = ((max_ast_idx * DP_AST_HASH_LOAD_MULT) >>
1068 		DP_AST_HASH_LOAD_SHIFT);
1069 
1070 	log2 = dp_log2_ceil(hash_elems);
1071 	hash_elems = 1 << log2;
1072 
1073 	soc->ast_hash.mask = hash_elems - 1;
1074 	soc->ast_hash.idx_bits = log2;
1075 
1076 	dp_peer_info("%pK: ast hash_elems: %d, max_ast_idx: %d",
1077 		     soc, hash_elems, max_ast_idx);
1078 
1079 	/* allocate an array of TAILQ peer object lists */
1080 	soc->ast_hash.bins = qdf_mem_malloc(
1081 		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q,
1082 				dp_ast_entry)));
1083 
1084 	if (!soc->ast_hash.bins)
1085 		return QDF_STATUS_E_NOMEM;
1086 
1087 	for (i = 0; i < hash_elems; i++)
1088 		TAILQ_INIT(&soc->ast_hash.bins[i]);
1089 
1090 	return QDF_STATUS_SUCCESS;
1091 }
1092 
1093 /**
1094  * dp_peer_ast_cleanup() - cleanup the references
1095  * @soc: SoC handle
1096  * @ast: ast entry
1097  *
1098  * Return: None
1099  */
1100 static inline void dp_peer_ast_cleanup(struct dp_soc *soc,
1101 				       struct dp_ast_entry *ast)
1102 {
1103 	txrx_ast_free_cb cb = ast->callback;
1104 	void *cookie = ast->cookie;
1105 
1106 	dp_peer_debug("mac_addr: " QDF_MAC_ADDR_FMT ", cb: %pK, cookie: %pK",
1107 		      QDF_MAC_ADDR_REF(ast->mac_addr.raw), cb, cookie);
1108 
1109 	/* Call the callbacks to free up the cookie */
1110 	if (cb) {
1111 		ast->callback = NULL;
1112 		ast->cookie = NULL;
1113 		cb(soc->ctrl_psoc,
1114 		   dp_soc_to_cdp_soc(soc),
1115 		   cookie,
1116 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
1117 	}
1118 }
1119 
1120 void dp_peer_ast_hash_detach(struct dp_soc *soc)
1121 {
1122 	unsigned int index;
1123 	struct dp_ast_entry *ast, *ast_next;
1124 
1125 	if (!soc->ast_hash.mask)
1126 		return;
1127 
1128 	if (!soc->ast_hash.bins)
1129 		return;
1130 
1131 	dp_peer_debug("%pK: num_ast_entries: %u", soc, soc->num_ast_entries);
1132 
1133 	qdf_spin_lock_bh(&soc->ast_lock);
1134 	for (index = 0; index <= soc->ast_hash.mask; index++) {
1135 		if (!TAILQ_EMPTY(&soc->ast_hash.bins[index])) {
1136 			TAILQ_FOREACH_SAFE(ast, &soc->ast_hash.bins[index],
1137 					   hash_list_elem, ast_next) {
1138 				TAILQ_REMOVE(&soc->ast_hash.bins[index], ast,
1139 					     hash_list_elem);
1140 				dp_peer_ast_cleanup(soc, ast);
1141 				soc->num_ast_entries--;
1142 				qdf_mem_free(ast);
1143 			}
1144 		}
1145 	}
1146 	qdf_spin_unlock_bh(&soc->ast_lock);
1147 
1148 	qdf_mem_free(soc->ast_hash.bins);
1149 	soc->ast_hash.bins = NULL;
1150 }
1151 
1152 /**
1153  * dp_peer_ast_hash_index() - Compute the AST hash from MAC address
1154  * @soc: SoC handle
1155  * @mac_addr: MAC address
1156  *
1157  * Return: AST hash
1158  */
1159 static inline uint32_t dp_peer_ast_hash_index(struct dp_soc *soc,
1160 					      union dp_align_mac_addr *mac_addr)
1161 {
1162 	uint32_t index;
1163 
1164 	index =
1165 		mac_addr->align2.bytes_ab ^
1166 		mac_addr->align2.bytes_cd ^
1167 		mac_addr->align2.bytes_ef;
1168 	index ^= index >> soc->ast_hash.idx_bits;
1169 	index &= soc->ast_hash.mask;
1170 	return index;
1171 }
1172 
1173 /**
1174  * dp_peer_ast_hash_add() - Add AST entry into hash table
1175  * @soc: SoC handle
1176  * @ase: AST entry
1177  *
1178  * This function adds the AST entry into SoC AST hash table
1179  * It assumes caller has taken the ast lock to protect the access to this table
1180  *
1181  * Return: None
1182  */
1183 static inline void dp_peer_ast_hash_add(struct dp_soc *soc,
1184 					struct dp_ast_entry *ase)
1185 {
1186 	uint32_t index;
1187 
1188 	index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
1189 	TAILQ_INSERT_TAIL(&soc->ast_hash.bins[index], ase, hash_list_elem);
1190 }
1191 
1192 void dp_peer_ast_hash_remove(struct dp_soc *soc,
1193 			     struct dp_ast_entry *ase)
1194 {
1195 	unsigned index;
1196 	struct dp_ast_entry *tmpase;
1197 	int found = 0;
1198 
1199 	if (soc->ast_offload_support && !soc->host_ast_db_enable)
1200 		return;
1201 
1202 	index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
1203 	/* Check if tail is not empty before delete*/
1204 	QDF_ASSERT(!TAILQ_EMPTY(&soc->ast_hash.bins[index]));
1205 
1206 	dp_peer_debug("ID: %u idx: %u mac_addr: " QDF_MAC_ADDR_FMT,
1207 		      ase->peer_id, index, QDF_MAC_ADDR_REF(ase->mac_addr.raw));
1208 
1209 	TAILQ_FOREACH(tmpase, &soc->ast_hash.bins[index], hash_list_elem) {
1210 		if (tmpase == ase) {
1211 			found = 1;
1212 			break;
1213 		}
1214 	}
1215 
1216 	QDF_ASSERT(found);
1217 
1218 	if (found)
1219 		TAILQ_REMOVE(&soc->ast_hash.bins[index], ase, hash_list_elem);
1220 }
1221 
1222 struct dp_ast_entry *dp_peer_ast_hash_find_by_vdevid(struct dp_soc *soc,
1223 						     uint8_t *ast_mac_addr,
1224 						     uint8_t vdev_id)
1225 {
1226 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
1227 	uint32_t index;
1228 	struct dp_ast_entry *ase;
1229 
1230 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
1231 		     ast_mac_addr, QDF_MAC_ADDR_SIZE);
1232 	mac_addr = &local_mac_addr_aligned;
1233 
1234 	index = dp_peer_ast_hash_index(soc, mac_addr);
1235 	TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
1236 		if ((vdev_id == ase->vdev_id) &&
1237 		    !dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr)) {
1238 			return ase;
1239 		}
1240 	}
1241 
1242 	return NULL;
1243 }
1244 
1245 struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
1246 						     uint8_t *ast_mac_addr,
1247 						     uint8_t pdev_id)
1248 {
1249 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
1250 	uint32_t index;
1251 	struct dp_ast_entry *ase;
1252 
1253 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
1254 		     ast_mac_addr, QDF_MAC_ADDR_SIZE);
1255 	mac_addr = &local_mac_addr_aligned;
1256 
1257 	index = dp_peer_ast_hash_index(soc, mac_addr);
1258 	TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
1259 		if ((pdev_id == ase->pdev_id) &&
1260 		    !dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr)) {
1261 			return ase;
1262 		}
1263 	}
1264 
1265 	return NULL;
1266 }
1267 
1268 struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
1269 					       uint8_t *ast_mac_addr)
1270 {
1271 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
1272 	unsigned index;
1273 	struct dp_ast_entry *ase;
1274 
1275 	if (!soc->ast_hash.bins)
1276 		return NULL;
1277 
1278 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
1279 			ast_mac_addr, QDF_MAC_ADDR_SIZE);
1280 	mac_addr = &local_mac_addr_aligned;
1281 
1282 	index = dp_peer_ast_hash_index(soc, mac_addr);
1283 	TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
1284 		if (dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr) == 0) {
1285 			return ase;
1286 		}
1287 	}
1288 
1289 	return NULL;
1290 }
1291 
1292 struct dp_ast_entry *dp_peer_ast_hash_find_soc_by_type(
1293 					struct dp_soc *soc,
1294 					uint8_t *ast_mac_addr,
1295 					enum cdp_txrx_ast_entry_type type)
1296 {
1297 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
1298 	unsigned index;
1299 	struct dp_ast_entry *ase;
1300 
1301 	if (!soc->ast_hash.bins)
1302 		return NULL;
1303 
1304 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
1305 			ast_mac_addr, QDF_MAC_ADDR_SIZE);
1306 	mac_addr = &local_mac_addr_aligned;
1307 
1308 	index = dp_peer_ast_hash_index(soc, mac_addr);
1309 	TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
1310 		if (dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr) == 0 &&
1311 		    ase->type == type) {
1312 			return ase;
1313 		}
1314 	}
1315 
1316 	return NULL;
1317 }
1318 
1319 /**
1320  * dp_peer_map_ipa_evt() - Send peer map event to IPA
1321  * @soc: SoC handle
1322  * @peer: peer to which ast node belongs
1323  * @ast_entry: AST entry
1324  * @mac_addr: MAC address of ast node
1325  *
1326  * Return: None
1327  */
1328 #if defined(IPA_OFFLOAD) && defined(QCA_IPA_LL_TX_FLOW_CONTROL)
1329 static inline
1330 void dp_peer_map_ipa_evt(struct dp_soc *soc, struct dp_peer *peer,
1331 			 struct dp_ast_entry *ast_entry, uint8_t *mac_addr)
1332 {
1333 	if (ast_entry || (peer->vdev && peer->vdev->proxysta_vdev)) {
1334 		if (soc->cdp_soc.ol_ops->peer_map_event) {
1335 			soc->cdp_soc.ol_ops->peer_map_event(
1336 			soc->ctrl_psoc, ast_entry->peer_id,
1337 			ast_entry->ast_idx, ast_entry->vdev_id,
1338 			mac_addr, ast_entry->type, ast_entry->ast_hash_value);
1339 		}
1340 	} else {
1341 		dp_peer_info("%pK: AST entry not found", soc);
1342 	}
1343 }
1344 
1345 /**
1346  * dp_peer_unmap_ipa_evt() - Send peer unmap event to IPA
1347  * @soc: SoC handle
1348  * @peer_id: Peerid
1349  * @vdev_id: Vdev id
1350  * @mac_addr: Peer mac address
1351  *
1352  * Return: None
1353  */
1354 static inline
1355 void dp_peer_unmap_ipa_evt(struct dp_soc *soc, uint16_t peer_id,
1356 			   uint8_t vdev_id, uint8_t *mac_addr)
1357 {
1358 	if (soc->cdp_soc.ol_ops->peer_unmap_event) {
1359 		soc->cdp_soc.ol_ops->peer_unmap_event(soc->ctrl_psoc,
1360 						      peer_id, vdev_id,
1361 						      mac_addr);
1362 	}
1363 }
1364 #else
1365 static inline
1366 void dp_peer_unmap_ipa_evt(struct dp_soc *soc, uint16_t peer_id,
1367 			   uint8_t vdev_id, uint8_t *mac_addr)
1368 {
1369 }
1370 
1371 static inline
1372 void dp_peer_map_ipa_evt(struct dp_soc *soc, struct dp_peer *peer,
1373 			 struct dp_ast_entry *ast_entry, uint8_t *mac_addr)
1374 {
1375 }
1376 #endif
1377 
1378 QDF_STATUS dp_peer_host_add_map_ast(struct dp_soc *soc, uint16_t peer_id,
1379 				    uint8_t *mac_addr, uint16_t hw_peer_id,
1380 				    uint8_t vdev_id, uint16_t ast_hash,
1381 				    uint8_t is_wds)
1382 {
1383 	struct dp_vdev *vdev;
1384 	struct dp_ast_entry *ast_entry;
1385 	enum cdp_txrx_ast_entry_type type;
1386 	struct dp_peer *peer;
1387 	struct dp_peer *old_peer;
1388 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1389 
1390 	if (is_wds)
1391 		type = CDP_TXRX_AST_TYPE_WDS;
1392 	else
1393 		type = CDP_TXRX_AST_TYPE_STATIC;
1394 
1395 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_HTT);
1396 	if (!peer) {
1397 		dp_peer_info("Peer not found soc:%pK: peer_id %d, peer_mac " QDF_MAC_ADDR_FMT ", vdev_id %d",
1398 			     soc, peer_id,
1399 			     QDF_MAC_ADDR_REF(mac_addr), vdev_id);
1400 		return QDF_STATUS_E_INVAL;
1401 	}
1402 
1403 	if (!is_wds && IS_MLO_DP_MLD_PEER(peer))
1404 		type = CDP_TXRX_AST_TYPE_MLD;
1405 
1406 	vdev = peer->vdev;
1407 	if (!vdev) {
1408 		dp_peer_err("%pK: Peers vdev is NULL", soc);
1409 		status = QDF_STATUS_E_INVAL;
1410 		goto fail;
1411 	}
1412 
1413 	if (!dp_peer_state_cmp(peer, DP_PEER_STATE_ACTIVE)) {
1414 		if (type != CDP_TXRX_AST_TYPE_STATIC &&
1415 		    type != CDP_TXRX_AST_TYPE_MLD &&
1416 		    type != CDP_TXRX_AST_TYPE_SELF) {
1417 			status = QDF_STATUS_E_BUSY;
1418 			goto fail;
1419 		}
1420 	}
1421 
1422 	dp_peer_debug("%pK: vdev: %u  ast_entry->type: %d peer_mac: " QDF_MAC_ADDR_FMT " peer: %pK mac " QDF_MAC_ADDR_FMT,
1423 		      soc, vdev->vdev_id, type,
1424 		      QDF_MAC_ADDR_REF(peer->mac_addr.raw), peer,
1425 		      QDF_MAC_ADDR_REF(mac_addr));
1426 
1427 	/*
1428 	 * In MLO scenario, there is possibility for same mac address
1429 	 * on both link mac address and MLD mac address.
1430 	 * Duplicate AST map needs to be handled for non-mld type.
1431 	 */
1432 	qdf_spin_lock_bh(&soc->ast_lock);
1433 	ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
1434 	if (ast_entry && type != CDP_TXRX_AST_TYPE_MLD) {
1435 		dp_peer_debug("AST present ID %d vid %d mac " QDF_MAC_ADDR_FMT,
1436 			      hw_peer_id, vdev_id,
1437 			      QDF_MAC_ADDR_REF(mac_addr));
1438 
1439 		old_peer = __dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
1440 						   DP_MOD_ID_AST);
1441 		if (!old_peer) {
1442 			dp_peer_info("Peer not found soc:%pK: peer_id %d, peer_mac " QDF_MAC_ADDR_FMT ", vdev_id %d",
1443 				     soc, ast_entry->peer_id,
1444 				     QDF_MAC_ADDR_REF(mac_addr), vdev_id);
1445 			qdf_spin_unlock_bh(&soc->ast_lock);
1446 			status = QDF_STATUS_E_INVAL;
1447 			goto fail;
1448 		}
1449 
1450 		dp_peer_unlink_ast_entry(soc, ast_entry, old_peer);
1451 		dp_peer_free_ast_entry(soc, ast_entry);
1452 		if (old_peer)
1453 			dp_peer_unref_delete(old_peer, DP_MOD_ID_AST);
1454 	}
1455 
1456 	ast_entry = (struct dp_ast_entry *)
1457 		qdf_mem_malloc(sizeof(struct dp_ast_entry));
1458 	if (!ast_entry) {
1459 		dp_peer_err("%pK: fail to allocate ast_entry", soc);
1460 		qdf_spin_unlock_bh(&soc->ast_lock);
1461 		QDF_ASSERT(0);
1462 		status = QDF_STATUS_E_NOMEM;
1463 		goto fail;
1464 	}
1465 
1466 	qdf_mem_copy(&ast_entry->mac_addr.raw[0], mac_addr, QDF_MAC_ADDR_SIZE);
1467 	ast_entry->pdev_id = vdev->pdev->pdev_id;
1468 	ast_entry->is_mapped = false;
1469 	ast_entry->delete_in_progress = false;
1470 	ast_entry->next_hop = 0;
1471 	ast_entry->vdev_id = vdev->vdev_id;
1472 	ast_entry->type = type;
1473 
1474 	switch (type) {
1475 	case CDP_TXRX_AST_TYPE_STATIC:
1476 		if (peer->vdev->opmode == wlan_op_mode_sta)
1477 			ast_entry->type = CDP_TXRX_AST_TYPE_STA_BSS;
1478 		break;
1479 	case CDP_TXRX_AST_TYPE_WDS:
1480 		ast_entry->next_hop = 1;
1481 		break;
1482 	case CDP_TXRX_AST_TYPE_MLD:
1483 		break;
1484 	default:
1485 		dp_peer_alert("%pK: Incorrect AST entry type", soc);
1486 	}
1487 
1488 	ast_entry->is_active = TRUE;
1489 	DP_STATS_INC(soc, ast.added, 1);
1490 	soc->num_ast_entries++;
1491 	dp_peer_ast_hash_add(soc, ast_entry);
1492 
1493 	ast_entry->ast_idx = hw_peer_id;
1494 	ast_entry->ast_hash_value = ast_hash;
1495 	ast_entry->peer_id = peer_id;
1496 	TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry,
1497 			  ase_list_elem);
1498 
1499 	dp_peer_map_ipa_evt(soc, peer, ast_entry, mac_addr);
1500 
1501 	qdf_spin_unlock_bh(&soc->ast_lock);
1502 fail:
1503 	dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
1504 
1505 	return status;
1506 }
1507 
1508 /**
1509  * dp_peer_map_ast() - Map the ast entry with HW AST Index
1510  * @soc: SoC handle
1511  * @peer: peer to which ast node belongs
1512  * @mac_addr: MAC address of ast node
1513  * @hw_peer_id: HW AST Index returned by target in peer map event
1514  * @vdev_id: vdev id for VAP to which the peer belongs to
1515  * @ast_hash: ast hash value in HW
1516  * @is_wds: flag to indicate peer map event for WDS ast entry
1517  *
1518  * Return: QDF_STATUS code
1519  */
1520 static inline QDF_STATUS dp_peer_map_ast(struct dp_soc *soc,
1521 					 struct dp_peer *peer,
1522 					 uint8_t *mac_addr,
1523 					 uint16_t hw_peer_id,
1524 					 uint8_t vdev_id,
1525 					 uint16_t ast_hash,
1526 					 uint8_t is_wds)
1527 {
1528 	struct dp_ast_entry *ast_entry = NULL;
1529 	enum cdp_txrx_ast_entry_type peer_type = CDP_TXRX_AST_TYPE_STATIC;
1530 	void *cookie = NULL;
1531 	txrx_ast_free_cb cb = NULL;
1532 	QDF_STATUS err = QDF_STATUS_SUCCESS;
1533 
1534 	if (soc->ast_offload_support && !wlan_cfg_get_dp_soc_dpdk_cfg(soc->ctrl_psoc))
1535 		return QDF_STATUS_SUCCESS;
1536 
1537 	if (!peer) {
1538 		return QDF_STATUS_E_INVAL;
1539 	}
1540 
1541 	dp_peer_err("%pK: peer %pK ID %d vid %d mac " QDF_MAC_ADDR_FMT,
1542 		    soc, peer, hw_peer_id, vdev_id,
1543 		    QDF_MAC_ADDR_REF(mac_addr));
1544 
1545 	qdf_spin_lock_bh(&soc->ast_lock);
1546 
1547 	ast_entry = dp_peer_ast_hash_find_by_vdevid(soc, mac_addr, vdev_id);
1548 
1549 	if (is_wds) {
1550 		/*
1551 		 * In certain cases like Auth attack on a repeater
1552 		 * can result in the number of ast_entries falling
1553 		 * in the same hash bucket to exceed the max_skid
1554 		 * length supported by HW in root AP. In these cases
1555 		 * the FW will return the hw_peer_id (ast_index) as
1556 		 * 0xffff indicating HW could not add the entry in
1557 		 * its table. Host has to delete the entry from its
1558 		 * table in these cases.
1559 		 */
1560 		if (hw_peer_id == HTT_INVALID_PEER) {
1561 			DP_STATS_INC(soc, ast.map_err, 1);
1562 			if (ast_entry) {
1563 				if (ast_entry->is_mapped) {
1564 					soc->ast_table[ast_entry->ast_idx] =
1565 						NULL;
1566 				}
1567 
1568 				cb = ast_entry->callback;
1569 				cookie = ast_entry->cookie;
1570 				peer_type = ast_entry->type;
1571 
1572 				dp_peer_unlink_ast_entry(soc, ast_entry, peer);
1573 				dp_peer_free_ast_entry(soc, ast_entry);
1574 
1575 				qdf_spin_unlock_bh(&soc->ast_lock);
1576 
1577 				if (cb) {
1578 					cb(soc->ctrl_psoc,
1579 					   dp_soc_to_cdp_soc(soc),
1580 					   cookie,
1581 					   CDP_TXRX_AST_DELETED);
1582 				}
1583 			} else {
1584 				qdf_spin_unlock_bh(&soc->ast_lock);
1585 				dp_peer_alert("AST entry not found with peer %pK peer_id %u peer_mac " QDF_MAC_ADDR_FMT " mac_addr " QDF_MAC_ADDR_FMT " vdev_id %u next_hop %u",
1586 					      peer, peer->peer_id,
1587 					      QDF_MAC_ADDR_REF(peer->mac_addr.raw),
1588 					      QDF_MAC_ADDR_REF(mac_addr),
1589 					      vdev_id, is_wds);
1590 			}
1591 			err = QDF_STATUS_E_INVAL;
1592 
1593 			dp_hmwds_ast_add_notify(peer, mac_addr,
1594 						peer_type, err, true);
1595 
1596 			return err;
1597 		}
1598 	}
1599 
1600 	if (ast_entry) {
1601 		ast_entry->ast_idx = hw_peer_id;
1602 		soc->ast_table[hw_peer_id] = ast_entry;
1603 		ast_entry->is_active = TRUE;
1604 		peer_type = ast_entry->type;
1605 		ast_entry->ast_hash_value = ast_hash;
1606 		ast_entry->is_mapped = TRUE;
1607 		qdf_assert_always(ast_entry->peer_id == HTT_INVALID_PEER);
1608 
1609 		ast_entry->peer_id = peer->peer_id;
1610 		TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry,
1611 				  ase_list_elem);
1612 	}
1613 
1614 	if (ast_entry || (peer->vdev && peer->vdev->proxysta_vdev) ||
1615 	    wlan_cfg_get_dp_soc_dpdk_cfg(soc->ctrl_psoc)) {
1616 		if (soc->cdp_soc.ol_ops->peer_map_event) {
1617 			soc->cdp_soc.ol_ops->peer_map_event(
1618 			soc->ctrl_psoc, peer->peer_id,
1619 			hw_peer_id, vdev_id,
1620 			mac_addr, peer_type, ast_hash);
1621 		}
1622 	} else {
1623 		dp_peer_err("%pK: AST entry not found", soc);
1624 		err = QDF_STATUS_E_NOENT;
1625 	}
1626 
1627 	qdf_spin_unlock_bh(&soc->ast_lock);
1628 
1629 	dp_hmwds_ast_add_notify(peer, mac_addr,
1630 				peer_type, err, true);
1631 
1632 	return err;
1633 }
1634 
1635 void dp_peer_free_hmwds_cb(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
1636 			   struct cdp_soc *dp_soc,
1637 			   void *cookie,
1638 			   enum cdp_ast_free_status status)
1639 {
1640 	struct dp_ast_free_cb_params *param =
1641 		(struct dp_ast_free_cb_params *)cookie;
1642 	struct dp_soc *soc = (struct dp_soc *)dp_soc;
1643 	struct dp_peer *peer = NULL;
1644 	QDF_STATUS err = QDF_STATUS_SUCCESS;
1645 
1646 	if (status != CDP_TXRX_AST_DELETED) {
1647 		qdf_mem_free(cookie);
1648 		return;
1649 	}
1650 
1651 	peer = dp_peer_find_hash_find(soc, &param->peer_mac_addr.raw[0],
1652 				      0, param->vdev_id, DP_MOD_ID_AST);
1653 	if (peer) {
1654 		err = dp_peer_add_ast(soc, peer,
1655 				      &param->mac_addr.raw[0],
1656 				      param->type,
1657 				      param->flags);
1658 
1659 		dp_hmwds_ast_add_notify(peer, &param->mac_addr.raw[0],
1660 					param->type, err, false);
1661 
1662 		dp_peer_unref_delete(peer, DP_MOD_ID_AST);
1663 	}
1664 	qdf_mem_free(cookie);
1665 }
1666 
1667 QDF_STATUS dp_peer_add_ast(struct dp_soc *soc,
1668 			   struct dp_peer *peer,
1669 			   uint8_t *mac_addr,
1670 			   enum cdp_txrx_ast_entry_type type,
1671 			   uint32_t flags)
1672 {
1673 	struct dp_ast_entry *ast_entry = NULL;
1674 	struct dp_vdev *vdev = NULL;
1675 	struct dp_pdev *pdev = NULL;
1676 	txrx_ast_free_cb cb = NULL;
1677 	void *cookie = NULL;
1678 	struct dp_peer *vap_bss_peer = NULL;
1679 	bool is_peer_found = false;
1680 	int status = 0;
1681 
1682 	if (soc->ast_offload_support)
1683 		return QDF_STATUS_E_INVAL;
1684 
1685 	vdev = peer->vdev;
1686 	if (!vdev) {
1687 		dp_peer_err("%pK: Peers vdev is NULL", soc);
1688 		QDF_ASSERT(0);
1689 		return QDF_STATUS_E_INVAL;
1690 	}
1691 
1692 	pdev = vdev->pdev;
1693 
1694 	is_peer_found = dp_peer_exist_on_pdev(soc, mac_addr, 0, pdev);
1695 
1696 	qdf_spin_lock_bh(&soc->ast_lock);
1697 
1698 	if (!dp_peer_state_cmp(peer, DP_PEER_STATE_ACTIVE)) {
1699 		if ((type != CDP_TXRX_AST_TYPE_STATIC) &&
1700 		    (type != CDP_TXRX_AST_TYPE_SELF)) {
1701 			qdf_spin_unlock_bh(&soc->ast_lock);
1702 			return QDF_STATUS_E_BUSY;
1703 		}
1704 	}
1705 
1706 	dp_peer_debug("%pK: pdevid: %u vdev: %u  ast_entry->type: %d flags: 0x%x peer_mac: " QDF_MAC_ADDR_FMT " peer: %pK mac " QDF_MAC_ADDR_FMT,
1707 		      soc, pdev->pdev_id, vdev->vdev_id, type, flags,
1708 		      QDF_MAC_ADDR_REF(peer->mac_addr.raw), peer,
1709 		      QDF_MAC_ADDR_REF(mac_addr));
1710 
1711 	/* fw supports only 2 times the max_peers ast entries */
1712 	if (soc->num_ast_entries >=
1713 	    wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx)) {
1714 		qdf_spin_unlock_bh(&soc->ast_lock);
1715 		dp_peer_err("%pK: Max ast entries reached", soc);
1716 		return QDF_STATUS_E_RESOURCES;
1717 	}
1718 
1719 	/* If AST entry already exists , just return from here
1720 	 * ast entry with same mac address can exist on different radios
1721 	 * if ast_override support is enabled use search by pdev in this
1722 	 * case
1723 	 */
1724 	if (soc->ast_override_support) {
1725 		ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, mac_addr,
1726 							    pdev->pdev_id);
1727 		if (ast_entry) {
1728 			qdf_spin_unlock_bh(&soc->ast_lock);
1729 			return QDF_STATUS_E_ALREADY;
1730 		}
1731 
1732 		if (is_peer_found) {
1733 			/* During WDS to static roaming, peer is added
1734 			 * to the list before static AST entry create.
1735 			 * So, allow AST entry for STATIC type
1736 			 * even if peer is present
1737 			 */
1738 			if (type != CDP_TXRX_AST_TYPE_STATIC) {
1739 				qdf_spin_unlock_bh(&soc->ast_lock);
1740 				return QDF_STATUS_E_ALREADY;
1741 			}
1742 		}
1743 	} else {
1744 		/* For HWMWDS_SEC entries can be added for same mac address
1745 		 * do not check for existing entry
1746 		 */
1747 		if (type == CDP_TXRX_AST_TYPE_WDS_HM_SEC)
1748 			goto add_ast_entry;
1749 
1750 		ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
1751 
1752 		if (ast_entry) {
1753 			if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) &&
1754 			    !ast_entry->delete_in_progress) {
1755 				qdf_spin_unlock_bh(&soc->ast_lock);
1756 				return QDF_STATUS_E_ALREADY;
1757 			}
1758 
1759 			/* Add for HMWDS entry we cannot be ignored if there
1760 			 * is AST entry with same mac address
1761 			 *
1762 			 * if ast entry exists with the requested mac address
1763 			 * send a delete command and register callback which
1764 			 * can take care of adding HMWDS ast entry on delete
1765 			 * confirmation from target
1766 			 */
1767 			if (type == CDP_TXRX_AST_TYPE_WDS_HM) {
1768 				struct dp_ast_free_cb_params *param = NULL;
1769 
1770 				if (ast_entry->type ==
1771 					CDP_TXRX_AST_TYPE_WDS_HM_SEC)
1772 					goto add_ast_entry;
1773 
1774 				/* save existing callback */
1775 				if (ast_entry->callback) {
1776 					cb = ast_entry->callback;
1777 					cookie = ast_entry->cookie;
1778 				}
1779 
1780 				param = qdf_mem_malloc(sizeof(*param));
1781 				if (!param) {
1782 					QDF_TRACE(QDF_MODULE_ID_TXRX,
1783 						  QDF_TRACE_LEVEL_ERROR,
1784 						  "Allocation failed");
1785 					qdf_spin_unlock_bh(&soc->ast_lock);
1786 					return QDF_STATUS_E_NOMEM;
1787 				}
1788 
1789 				qdf_mem_copy(&param->mac_addr.raw[0], mac_addr,
1790 					     QDF_MAC_ADDR_SIZE);
1791 				qdf_mem_copy(&param->peer_mac_addr.raw[0],
1792 					     &peer->mac_addr.raw[0],
1793 					     QDF_MAC_ADDR_SIZE);
1794 				param->type = type;
1795 				param->flags = flags;
1796 				param->vdev_id = vdev->vdev_id;
1797 				ast_entry->callback = dp_peer_free_hmwds_cb;
1798 				ast_entry->pdev_id = vdev->pdev->pdev_id;
1799 				ast_entry->type = type;
1800 				ast_entry->cookie = (void *)param;
1801 				if (!ast_entry->delete_in_progress)
1802 					dp_peer_del_ast(soc, ast_entry);
1803 
1804 				qdf_spin_unlock_bh(&soc->ast_lock);
1805 
1806 				/* Call the saved callback*/
1807 				if (cb) {
1808 					cb(soc->ctrl_psoc,
1809 					   dp_soc_to_cdp_soc(soc),
1810 					   cookie,
1811 					   CDP_TXRX_AST_DELETE_IN_PROGRESS);
1812 				}
1813 				return QDF_STATUS_E_AGAIN;
1814 			}
1815 
1816 			qdf_spin_unlock_bh(&soc->ast_lock);
1817 			return QDF_STATUS_E_ALREADY;
1818 		}
1819 	}
1820 
1821 add_ast_entry:
1822 	ast_entry = (struct dp_ast_entry *)
1823 			qdf_mem_malloc(sizeof(struct dp_ast_entry));
1824 
1825 	if (!ast_entry) {
1826 		qdf_spin_unlock_bh(&soc->ast_lock);
1827 		dp_peer_err("%pK: fail to allocate ast_entry", soc);
1828 		QDF_ASSERT(0);
1829 		return QDF_STATUS_E_NOMEM;
1830 	}
1831 
1832 	qdf_mem_copy(&ast_entry->mac_addr.raw[0], mac_addr, QDF_MAC_ADDR_SIZE);
1833 	ast_entry->pdev_id = vdev->pdev->pdev_id;
1834 	ast_entry->is_mapped = false;
1835 	ast_entry->delete_in_progress = false;
1836 	ast_entry->peer_id = HTT_INVALID_PEER;
1837 	ast_entry->next_hop = 0;
1838 	ast_entry->vdev_id = vdev->vdev_id;
1839 
1840 	switch (type) {
1841 	case CDP_TXRX_AST_TYPE_STATIC:
1842 		peer->self_ast_entry = ast_entry;
1843 		ast_entry->type = CDP_TXRX_AST_TYPE_STATIC;
1844 		if (peer->vdev->opmode == wlan_op_mode_sta)
1845 			ast_entry->type = CDP_TXRX_AST_TYPE_STA_BSS;
1846 		break;
1847 	case CDP_TXRX_AST_TYPE_SELF:
1848 		peer->self_ast_entry = ast_entry;
1849 		ast_entry->type = CDP_TXRX_AST_TYPE_SELF;
1850 		break;
1851 	case CDP_TXRX_AST_TYPE_WDS:
1852 		ast_entry->next_hop = 1;
1853 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
1854 		break;
1855 	case CDP_TXRX_AST_TYPE_WDS_HM:
1856 		ast_entry->next_hop = 1;
1857 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM;
1858 		break;
1859 	case CDP_TXRX_AST_TYPE_WDS_HM_SEC:
1860 		ast_entry->next_hop = 1;
1861 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM_SEC;
1862 		ast_entry->peer_id = peer->peer_id;
1863 		TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry,
1864 				  ase_list_elem);
1865 		break;
1866 	case CDP_TXRX_AST_TYPE_DA:
1867 		vap_bss_peer = dp_vdev_bss_peer_ref_n_get(soc, vdev,
1868 							  DP_MOD_ID_AST);
1869 		if (!vap_bss_peer) {
1870 			qdf_spin_unlock_bh(&soc->ast_lock);
1871 			qdf_mem_free(ast_entry);
1872 			return QDF_STATUS_E_FAILURE;
1873 		}
1874 		peer = vap_bss_peer;
1875 		ast_entry->next_hop = 1;
1876 		ast_entry->type = CDP_TXRX_AST_TYPE_DA;
1877 		break;
1878 	default:
1879 		dp_peer_err("%pK: Incorrect AST entry type", soc);
1880 	}
1881 
1882 	ast_entry->is_active = TRUE;
1883 	DP_STATS_INC(soc, ast.added, 1);
1884 	soc->num_ast_entries++;
1885 	dp_peer_ast_hash_add(soc, ast_entry);
1886 
1887 	if ((ast_entry->type != CDP_TXRX_AST_TYPE_STATIC) &&
1888 	    (ast_entry->type != CDP_TXRX_AST_TYPE_SELF) &&
1889 	    (ast_entry->type != CDP_TXRX_AST_TYPE_STA_BSS) &&
1890 	    (ast_entry->type != CDP_TXRX_AST_TYPE_WDS_HM_SEC))
1891 		status = dp_add_wds_entry_wrapper(soc,
1892 						  peer,
1893 						  mac_addr,
1894 						  flags,
1895 						  ast_entry->type);
1896 
1897 	if (vap_bss_peer)
1898 		dp_peer_unref_delete(vap_bss_peer, DP_MOD_ID_AST);
1899 
1900 	qdf_spin_unlock_bh(&soc->ast_lock);
1901 	return qdf_status_from_os_return(status);
1902 }
1903 
1904 qdf_export_symbol(dp_peer_add_ast);
1905 
1906 void dp_peer_free_ast_entry(struct dp_soc *soc,
1907 			    struct dp_ast_entry *ast_entry)
1908 {
1909 	/*
1910 	 * NOTE: Ensure that call to this API is done
1911 	 * after soc->ast_lock is taken
1912 	 */
1913 	dp_peer_debug("type: %d ID: %u vid: %u mac_addr: " QDF_MAC_ADDR_FMT,
1914 		      ast_entry->type, ast_entry->peer_id, ast_entry->vdev_id,
1915 		      QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw));
1916 
1917 	ast_entry->callback = NULL;
1918 	ast_entry->cookie = NULL;
1919 
1920 	DP_STATS_INC(soc, ast.deleted, 1);
1921 	dp_peer_ast_hash_remove(soc, ast_entry);
1922 	dp_peer_ast_cleanup(soc, ast_entry);
1923 	qdf_mem_free(ast_entry);
1924 	soc->num_ast_entries--;
1925 }
1926 
1927 void dp_peer_unlink_ast_entry(struct dp_soc *soc,
1928 			      struct dp_ast_entry *ast_entry,
1929 			      struct dp_peer *peer)
1930 {
1931 	if (!peer) {
1932 		dp_info_rl("NULL peer");
1933 		return;
1934 	}
1935 
1936 	if (ast_entry->peer_id == HTT_INVALID_PEER) {
1937 		dp_info_rl("Invalid peer id in AST entry mac addr:"QDF_MAC_ADDR_FMT" type:%d",
1938 			  QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw),
1939 			  ast_entry->type);
1940 		return;
1941 	}
1942 	/*
1943 	 * NOTE: Ensure that call to this API is done
1944 	 * after soc->ast_lock is taken
1945 	 */
1946 
1947 	qdf_assert_always(ast_entry->peer_id == peer->peer_id);
1948 	TAILQ_REMOVE(&peer->ast_entry_list, ast_entry, ase_list_elem);
1949 
1950 	if (ast_entry == peer->self_ast_entry)
1951 		peer->self_ast_entry = NULL;
1952 
1953 	/*
1954 	 * release the reference only if it is mapped
1955 	 * to ast_table
1956 	 */
1957 	if (ast_entry->is_mapped)
1958 		soc->ast_table[ast_entry->ast_idx] = NULL;
1959 
1960 	ast_entry->peer_id = HTT_INVALID_PEER;
1961 }
1962 
1963 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
1964 {
1965 	struct dp_peer *peer = NULL;
1966 
1967 	if (soc->ast_offload_support)
1968 		return;
1969 
1970 	if (!ast_entry) {
1971 		dp_info_rl("NULL AST entry");
1972 		return;
1973 	}
1974 
1975 	if (ast_entry->delete_in_progress) {
1976 		dp_info_rl("AST entry deletion in progress mac addr:"QDF_MAC_ADDR_FMT" type:%d",
1977 			  QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw),
1978 			  ast_entry->type);
1979 		return;
1980 	}
1981 
1982 	dp_peer_debug("call by %ps: ID: %u vid: %u mac_addr: " QDF_MAC_ADDR_FMT,
1983 		      (void *)_RET_IP_, ast_entry->peer_id, ast_entry->vdev_id,
1984 		      QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw));
1985 
1986 	ast_entry->delete_in_progress = true;
1987 
1988 	/* In teardown del ast is called after setting logical delete state
1989 	 * use __dp_peer_get_ref_by_id to get the reference irrespective of
1990 	 * state
1991 	 */
1992 	peer = __dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
1993 				       DP_MOD_ID_AST);
1994 
1995 	dp_peer_ast_send_wds_del(soc, ast_entry, peer);
1996 
1997 	/* Remove SELF and STATIC entries in teardown itself */
1998 	if (!ast_entry->next_hop)
1999 		dp_peer_unlink_ast_entry(soc, ast_entry, peer);
2000 
2001 	if (ast_entry->is_mapped)
2002 		soc->ast_table[ast_entry->ast_idx] = NULL;
2003 
2004 	/* if peer map v2 is enabled we are not freeing ast entry
2005 	 * here and it is supposed to be freed in unmap event (after
2006 	 * we receive delete confirmation from target)
2007 	 *
2008 	 * if peer_id is invalid we did not get the peer map event
2009 	 * for the peer free ast entry from here only in this case
2010 	 */
2011 	if (dp_peer_ast_free_in_unmap_supported(soc, ast_entry))
2012 		goto end;
2013 
2014 	/* for WDS secondary entry ast_entry->next_hop would be set so
2015 	 * unlinking has to be done explicitly here.
2016 	 * As this entry is not a mapped entry unmap notification from
2017 	 * FW will not come. Hence unlinkling is done right here.
2018 	 */
2019 
2020 	if (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC)
2021 		dp_peer_unlink_ast_entry(soc, ast_entry, peer);
2022 
2023 	dp_peer_free_ast_entry(soc, ast_entry);
2024 
2025 end:
2026 	if (peer)
2027 		dp_peer_unref_delete(peer, DP_MOD_ID_AST);
2028 }
2029 
2030 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
2031 		       struct dp_ast_entry *ast_entry, uint32_t flags)
2032 {
2033 	int ret = -1;
2034 	struct dp_peer *old_peer;
2035 
2036 	if (soc->ast_offload_support)
2037 		return QDF_STATUS_E_INVAL;
2038 
2039 	dp_peer_debug("%pK: ast_entry->type: %d pdevid: %u vdevid: %u flags: 0x%x mac_addr: " QDF_MAC_ADDR_FMT " peer_mac: " QDF_MAC_ADDR_FMT "\n",
2040 		      soc, ast_entry->type, peer->vdev->pdev->pdev_id,
2041 		      peer->vdev->vdev_id, flags,
2042 		      QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw),
2043 		      QDF_MAC_ADDR_REF(peer->mac_addr.raw));
2044 
2045 	/* Do not send AST update in below cases
2046 	 *  1) Ast entry delete has already triggered
2047 	 *  2) Peer delete is already triggered
2048 	 *  3) We did not get the HTT map for create event
2049 	 */
2050 	if (ast_entry->delete_in_progress ||
2051 	    !dp_peer_state_cmp(peer, DP_PEER_STATE_ACTIVE) ||
2052 	    !ast_entry->is_mapped)
2053 		return ret;
2054 
2055 	if ((ast_entry->type == CDP_TXRX_AST_TYPE_STATIC) ||
2056 	    (ast_entry->type == CDP_TXRX_AST_TYPE_SELF) ||
2057 	    (ast_entry->type == CDP_TXRX_AST_TYPE_STA_BSS) ||
2058 	    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
2059 		return 0;
2060 
2061 	/*
2062 	 * Avoids flood of WMI update messages sent to FW for same peer.
2063 	 */
2064 	if (qdf_unlikely(ast_entry->peer_id == peer->peer_id) &&
2065 	    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS) &&
2066 	    (ast_entry->vdev_id == peer->vdev->vdev_id) &&
2067 	    (ast_entry->is_active))
2068 		return 0;
2069 
2070 	old_peer = dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
2071 					 DP_MOD_ID_AST);
2072 	if (!old_peer)
2073 		return 0;
2074 
2075 	TAILQ_REMOVE(&old_peer->ast_entry_list, ast_entry, ase_list_elem);
2076 
2077 	dp_peer_unref_delete(old_peer, DP_MOD_ID_AST);
2078 
2079 	ast_entry->peer_id = peer->peer_id;
2080 	ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
2081 	ast_entry->pdev_id = peer->vdev->pdev->pdev_id;
2082 	ast_entry->vdev_id = peer->vdev->vdev_id;
2083 	ast_entry->is_active = TRUE;
2084 	TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem);
2085 
2086 	ret = dp_update_wds_entry_wrapper(soc,
2087 					  peer,
2088 					  ast_entry->mac_addr.raw,
2089 					  flags);
2090 
2091 	return ret;
2092 }
2093 
2094 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
2095 				struct dp_ast_entry *ast_entry)
2096 {
2097 	return ast_entry->pdev_id;
2098 }
2099 
2100 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
2101 				struct dp_ast_entry *ast_entry)
2102 {
2103 	return ast_entry->next_hop;
2104 }
2105 
2106 void dp_peer_ast_set_type(struct dp_soc *soc,
2107 				struct dp_ast_entry *ast_entry,
2108 				enum cdp_txrx_ast_entry_type type)
2109 {
2110 	ast_entry->type = type;
2111 }
2112 
2113 void dp_peer_ast_send_wds_del(struct dp_soc *soc,
2114 			      struct dp_ast_entry *ast_entry,
2115 			      struct dp_peer *peer)
2116 {
2117 	bool delete_in_fw = false;
2118 
2119 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_TRACE,
2120 		  "%s: ast_entry->type: %d pdevid: %u vdev: %u mac_addr: "QDF_MAC_ADDR_FMT" next_hop: %u peer_id: %uM\n",
2121 		  __func__, ast_entry->type, ast_entry->pdev_id,
2122 		  ast_entry->vdev_id,
2123 		  QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw),
2124 		  ast_entry->next_hop, ast_entry->peer_id);
2125 
2126 	/*
2127 	 * If peer state is logical delete, the peer is about to get
2128 	 * teared down with a peer delete command to firmware,
2129 	 * which will cleanup all the wds ast entries.
2130 	 * So, no need to send explicit wds ast delete to firmware.
2131 	 */
2132 	if (ast_entry->next_hop) {
2133 		if (peer && dp_peer_state_cmp(peer,
2134 					      DP_PEER_STATE_LOGICAL_DELETE))
2135 			delete_in_fw = false;
2136 		else
2137 			delete_in_fw = true;
2138 
2139 		dp_del_wds_entry_wrapper(soc,
2140 					 ast_entry->vdev_id,
2141 					 ast_entry->mac_addr.raw,
2142 					 ast_entry->type,
2143 					 delete_in_fw);
2144 	}
2145 }
2146 #else
2147 void dp_peer_free_ast_entry(struct dp_soc *soc,
2148 			    struct dp_ast_entry *ast_entry)
2149 {
2150 }
2151 
2152 void dp_peer_unlink_ast_entry(struct dp_soc *soc,
2153 			      struct dp_ast_entry *ast_entry,
2154 			      struct dp_peer *peer)
2155 {
2156 }
2157 
2158 void dp_peer_ast_hash_remove(struct dp_soc *soc,
2159 			     struct dp_ast_entry *ase)
2160 {
2161 }
2162 
2163 struct dp_ast_entry *dp_peer_ast_hash_find_by_vdevid(struct dp_soc *soc,
2164 						     uint8_t *ast_mac_addr,
2165 						     uint8_t vdev_id)
2166 {
2167 	return NULL;
2168 }
2169 
2170 QDF_STATUS dp_peer_add_ast(struct dp_soc *soc,
2171 			   struct dp_peer *peer,
2172 			   uint8_t *mac_addr,
2173 			   enum cdp_txrx_ast_entry_type type,
2174 			   uint32_t flags)
2175 {
2176 	return QDF_STATUS_E_FAILURE;
2177 }
2178 
2179 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
2180 {
2181 }
2182 
2183 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
2184 			struct dp_ast_entry *ast_entry, uint32_t flags)
2185 {
2186 	return 1;
2187 }
2188 
2189 struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
2190 					       uint8_t *ast_mac_addr)
2191 {
2192 	return NULL;
2193 }
2194 
2195 struct dp_ast_entry *dp_peer_ast_hash_find_soc_by_type(
2196 					struct dp_soc *soc,
2197 					uint8_t *ast_mac_addr,
2198 					enum cdp_txrx_ast_entry_type type)
2199 {
2200 	return NULL;
2201 }
2202 
2203 static inline
2204 QDF_STATUS dp_peer_host_add_map_ast(struct dp_soc *soc, uint16_t peer_id,
2205 				    uint8_t *mac_addr, uint16_t hw_peer_id,
2206 				    uint8_t vdev_id, uint16_t ast_hash,
2207 				    uint8_t is_wds)
2208 {
2209 	return QDF_STATUS_SUCCESS;
2210 }
2211 
2212 struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
2213 						     uint8_t *ast_mac_addr,
2214 						     uint8_t pdev_id)
2215 {
2216 	return NULL;
2217 }
2218 
2219 QDF_STATUS dp_peer_ast_hash_attach(struct dp_soc *soc)
2220 {
2221 	return QDF_STATUS_SUCCESS;
2222 }
2223 
2224 static inline QDF_STATUS dp_peer_map_ast(struct dp_soc *soc,
2225 					 struct dp_peer *peer,
2226 					 uint8_t *mac_addr,
2227 					 uint16_t hw_peer_id,
2228 					 uint8_t vdev_id,
2229 					 uint16_t ast_hash,
2230 					 uint8_t is_wds)
2231 {
2232 	return QDF_STATUS_SUCCESS;
2233 }
2234 
2235 void dp_peer_ast_hash_detach(struct dp_soc *soc)
2236 {
2237 }
2238 
2239 void dp_peer_ast_set_type(struct dp_soc *soc,
2240 				struct dp_ast_entry *ast_entry,
2241 				enum cdp_txrx_ast_entry_type type)
2242 {
2243 }
2244 
2245 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
2246 				struct dp_ast_entry *ast_entry)
2247 {
2248 	return 0xff;
2249 }
2250 
2251 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
2252 				 struct dp_ast_entry *ast_entry)
2253 {
2254 	return 0xff;
2255 }
2256 
2257 void dp_peer_ast_send_wds_del(struct dp_soc *soc,
2258 			      struct dp_ast_entry *ast_entry,
2259 			      struct dp_peer *peer)
2260 {
2261 }
2262 
2263 static inline
2264 void dp_peer_unmap_ipa_evt(struct dp_soc *soc, uint16_t peer_id,
2265 			   uint8_t vdev_id, uint8_t *mac_addr)
2266 {
2267 }
2268 #endif
2269 
2270 #ifdef WLAN_FEATURE_MULTI_AST_DEL
2271 void dp_peer_ast_send_multi_wds_del(
2272 		struct dp_soc *soc, uint8_t vdev_id,
2273 		struct peer_del_multi_wds_entries *wds_list)
2274 {
2275 	struct cdp_soc_t *cdp_soc = &soc->cdp_soc;
2276 
2277 	if (cdp_soc && cdp_soc->ol_ops &&
2278 	    cdp_soc->ol_ops->peer_del_multi_wds_entry)
2279 		cdp_soc->ol_ops->peer_del_multi_wds_entry(soc->ctrl_psoc,
2280 							  vdev_id, wds_list);
2281 }
2282 #endif
2283 
2284 #ifdef FEATURE_WDS
2285 /**
2286  * dp_peer_ast_free_wds_entries() - Free wds ast entries associated with peer
2287  * @soc: soc handle
2288  * @peer: peer handle
2289  *
2290  * Free all the wds ast entries associated with peer
2291  *
2292  * Return: Number of wds ast entries freed
2293  */
2294 static uint32_t dp_peer_ast_free_wds_entries(struct dp_soc *soc,
2295 					     struct dp_peer *peer)
2296 {
2297 	TAILQ_HEAD(, dp_ast_entry) ast_local_list = {0};
2298 	struct dp_ast_entry *ast_entry, *temp_ast_entry;
2299 	uint32_t num_ast = 0;
2300 
2301 	TAILQ_INIT(&ast_local_list);
2302 	qdf_spin_lock_bh(&soc->ast_lock);
2303 
2304 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry) {
2305 		if (ast_entry->next_hop)
2306 			num_ast++;
2307 
2308 		if (ast_entry->is_mapped)
2309 			soc->ast_table[ast_entry->ast_idx] = NULL;
2310 
2311 		dp_peer_unlink_ast_entry(soc, ast_entry, peer);
2312 		DP_STATS_INC(soc, ast.deleted, 1);
2313 		dp_peer_ast_hash_remove(soc, ast_entry);
2314 		TAILQ_INSERT_TAIL(&ast_local_list, ast_entry,
2315 				  ase_list_elem);
2316 		soc->num_ast_entries--;
2317 	}
2318 
2319 	qdf_spin_unlock_bh(&soc->ast_lock);
2320 
2321 	TAILQ_FOREACH_SAFE(ast_entry, &ast_local_list, ase_list_elem,
2322 			   temp_ast_entry) {
2323 		if (ast_entry->callback)
2324 			ast_entry->callback(soc->ctrl_psoc,
2325 					    dp_soc_to_cdp_soc(soc),
2326 					    ast_entry->cookie,
2327 					    CDP_TXRX_AST_DELETED);
2328 
2329 		qdf_mem_free(ast_entry);
2330 	}
2331 
2332 	return num_ast;
2333 }
2334 /**
2335  * dp_peer_clean_wds_entries() - Clean wds ast entries and compare
2336  * @soc: soc handle
2337  * @peer: peer handle
2338  * @free_wds_count: number of wds entries freed by FW with peer delete
2339  *
2340  * Free all the wds ast entries associated with peer and compare with
2341  * the value received from firmware
2342  *
2343  * Return: Number of wds ast entries freed
2344  */
2345 static void
2346 dp_peer_clean_wds_entries(struct dp_soc *soc, struct dp_peer *peer,
2347 			  uint32_t free_wds_count)
2348 {
2349 	uint32_t wds_deleted = 0;
2350 
2351 	if (soc->ast_offload_support && !soc->host_ast_db_enable)
2352 		return;
2353 
2354 	wds_deleted = dp_peer_ast_free_wds_entries(soc, peer);
2355 	if ((DP_PEER_WDS_COUNT_INVALID != free_wds_count) &&
2356 	    (free_wds_count != wds_deleted)) {
2357 		DP_STATS_INC(soc, ast.ast_mismatch, 1);
2358 		dp_alert("For peer %pK (mac: "QDF_MAC_ADDR_FMT")number of wds entries deleted by fw = %d during peer delete is not same as the numbers deleted by host = %d",
2359 			 peer, peer->mac_addr.raw, free_wds_count,
2360 			 wds_deleted);
2361 	}
2362 }
2363 
2364 #else
2365 static void
2366 dp_peer_clean_wds_entries(struct dp_soc *soc, struct dp_peer *peer,
2367 			  uint32_t free_wds_count)
2368 {
2369 	struct dp_ast_entry *ast_entry, *temp_ast_entry;
2370 
2371 	qdf_spin_lock_bh(&soc->ast_lock);
2372 
2373 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry) {
2374 		dp_peer_unlink_ast_entry(soc, ast_entry, peer);
2375 
2376 		if (ast_entry->is_mapped)
2377 			soc->ast_table[ast_entry->ast_idx] = NULL;
2378 
2379 		dp_peer_free_ast_entry(soc, ast_entry);
2380 	}
2381 
2382 	peer->self_ast_entry = NULL;
2383 	qdf_spin_unlock_bh(&soc->ast_lock);
2384 }
2385 #endif
2386 
2387 /**
2388  * dp_peer_ast_free_entry_by_mac() - find ast entry by MAC address and delete
2389  * @soc: soc handle
2390  * @peer: peer handle
2391  * @vdev_id: vdev_id
2392  * @mac_addr: mac address of the AST entry to searc and delete
2393  *
2394  * find the ast entry from the peer list using the mac address and free
2395  * the entry.
2396  *
2397  * Return: SUCCESS or NOENT
2398  */
2399 static int dp_peer_ast_free_entry_by_mac(struct dp_soc *soc,
2400 					 struct dp_peer *peer,
2401 					 uint8_t vdev_id,
2402 					 uint8_t *mac_addr)
2403 {
2404 	struct dp_ast_entry *ast_entry;
2405 	void *cookie = NULL;
2406 	txrx_ast_free_cb cb = NULL;
2407 
2408 	/*
2409 	 * release the reference only if it is mapped
2410 	 * to ast_table
2411 	 */
2412 
2413 	qdf_spin_lock_bh(&soc->ast_lock);
2414 
2415 	ast_entry = dp_peer_ast_hash_find_by_vdevid(soc, mac_addr, vdev_id);
2416 	if (!ast_entry) {
2417 		qdf_spin_unlock_bh(&soc->ast_lock);
2418 		return QDF_STATUS_E_NOENT;
2419 	} else if (ast_entry->is_mapped) {
2420 		soc->ast_table[ast_entry->ast_idx] = NULL;
2421 	}
2422 
2423 	cb = ast_entry->callback;
2424 	cookie = ast_entry->cookie;
2425 
2426 
2427 	dp_peer_unlink_ast_entry(soc, ast_entry, peer);
2428 
2429 	dp_peer_free_ast_entry(soc, ast_entry);
2430 
2431 	qdf_spin_unlock_bh(&soc->ast_lock);
2432 
2433 	if (cb) {
2434 		cb(soc->ctrl_psoc,
2435 		   dp_soc_to_cdp_soc(soc),
2436 		   cookie,
2437 		   CDP_TXRX_AST_DELETED);
2438 	}
2439 
2440 	return QDF_STATUS_SUCCESS;
2441 }
2442 
2443 void dp_peer_find_hash_erase(struct dp_soc *soc)
2444 {
2445 	int i;
2446 
2447 	/*
2448 	 * Not really necessary to take peer_ref_mutex lock - by this point,
2449 	 * it's known that the soc is no longer in use.
2450 	 */
2451 	for (i = 0; i <= soc->peer_hash.mask; i++) {
2452 		if (!TAILQ_EMPTY(&soc->peer_hash.bins[i])) {
2453 			struct dp_peer *peer, *peer_next;
2454 
2455 			/*
2456 			 * TAILQ_FOREACH_SAFE must be used here to avoid any
2457 			 * memory access violation after peer is freed
2458 			 */
2459 			TAILQ_FOREACH_SAFE(peer, &soc->peer_hash.bins[i],
2460 				hash_list_elem, peer_next) {
2461 				/*
2462 				 * Don't remove the peer from the hash table -
2463 				 * that would modify the list we are currently
2464 				 * traversing, and it's not necessary anyway.
2465 				 */
2466 				/*
2467 				 * Artificially adjust the peer's ref count to
2468 				 * 1, so it will get deleted by
2469 				 * dp_peer_unref_delete.
2470 				 */
2471 				/* set to zero */
2472 				qdf_atomic_init(&peer->ref_cnt);
2473 				for (i = 0; i < DP_MOD_ID_MAX; i++)
2474 					qdf_atomic_init(&peer->mod_refs[i]);
2475 				/* incr to one */
2476 				qdf_atomic_inc(&peer->ref_cnt);
2477 				qdf_atomic_inc(&peer->mod_refs
2478 						[DP_MOD_ID_CONFIG]);
2479 				dp_peer_unref_delete(peer,
2480 						     DP_MOD_ID_CONFIG);
2481 			}
2482 		}
2483 	}
2484 }
2485 
2486 void dp_peer_ast_table_detach(struct dp_soc *soc)
2487 {
2488 	if (soc->ast_table) {
2489 		qdf_mem_free(soc->ast_table);
2490 		soc->ast_table = NULL;
2491 	}
2492 }
2493 
2494 void dp_peer_find_map_detach(struct dp_soc *soc)
2495 {
2496 	struct dp_peer *peer = NULL;
2497 	uint32_t i = 0;
2498 
2499 	if (soc->peer_id_to_obj_map) {
2500 		for (i = 0; i < soc->max_peer_id; i++) {
2501 			peer = soc->peer_id_to_obj_map[i];
2502 			if (peer)
2503 				dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
2504 		}
2505 		qdf_mem_free(soc->peer_id_to_obj_map);
2506 		soc->peer_id_to_obj_map = NULL;
2507 		qdf_spinlock_destroy(&soc->peer_map_lock);
2508 	}
2509 }
2510 
2511 #ifndef AST_OFFLOAD_ENABLE
2512 QDF_STATUS dp_peer_find_attach(struct dp_soc *soc)
2513 {
2514 	QDF_STATUS status;
2515 
2516 	status = dp_peer_find_map_attach(soc);
2517 	if (!QDF_IS_STATUS_SUCCESS(status))
2518 		return status;
2519 
2520 	status = dp_peer_find_hash_attach(soc);
2521 	if (!QDF_IS_STATUS_SUCCESS(status))
2522 		goto map_detach;
2523 
2524 	status = dp_peer_ast_table_attach(soc);
2525 	if (!QDF_IS_STATUS_SUCCESS(status))
2526 		goto hash_detach;
2527 
2528 	status = dp_peer_ast_hash_attach(soc);
2529 	if (!QDF_IS_STATUS_SUCCESS(status))
2530 		goto ast_table_detach;
2531 
2532 	status = dp_peer_mec_hash_attach(soc);
2533 	if (QDF_IS_STATUS_SUCCESS(status)) {
2534 		dp_soc_wds_attach(soc);
2535 		return status;
2536 	}
2537 
2538 	dp_peer_ast_hash_detach(soc);
2539 ast_table_detach:
2540 	dp_peer_ast_table_detach(soc);
2541 hash_detach:
2542 	dp_peer_find_hash_detach(soc);
2543 map_detach:
2544 	dp_peer_find_map_detach(soc);
2545 
2546 	return status;
2547 }
2548 #else
2549 QDF_STATUS dp_peer_find_attach(struct dp_soc *soc)
2550 {
2551 	QDF_STATUS status;
2552 
2553 	status = dp_peer_find_map_attach(soc);
2554 	if (!QDF_IS_STATUS_SUCCESS(status))
2555 		return status;
2556 
2557 	status = dp_peer_find_hash_attach(soc);
2558 	if (!QDF_IS_STATUS_SUCCESS(status))
2559 		goto map_detach;
2560 
2561 	return status;
2562 map_detach:
2563 	dp_peer_find_map_detach(soc);
2564 
2565 	return status;
2566 }
2567 #endif
2568 
2569 #ifdef REO_SHARED_QREF_TABLE_EN
2570 void dp_peer_rx_reo_shared_qaddr_delete(struct dp_soc *soc,
2571 					struct dp_peer *peer)
2572 {
2573 	uint8_t tid;
2574 	uint16_t peer_id;
2575 	uint32_t max_list_size;
2576 
2577 	max_list_size = soc->wlan_cfg_ctx->qref_control_size;
2578 
2579 	peer_id = peer->peer_id;
2580 
2581 	if (peer_id > soc->max_peer_id)
2582 		return;
2583 	if (IS_MLO_DP_LINK_PEER(peer))
2584 		return;
2585 
2586 	if (max_list_size) {
2587 		unsigned long curr_ts = qdf_get_system_timestamp();
2588 		struct dp_peer *primary_peer = peer;
2589 		uint16_t chip_id = 0xFFFF;
2590 		uint32_t qref_index;
2591 
2592 		qref_index = soc->shared_qaddr_del_idx;
2593 
2594 		soc->list_shared_qaddr_del[qref_index].peer_id =
2595 							  primary_peer->peer_id;
2596 		soc->list_shared_qaddr_del[qref_index].ts_qaddr_del = curr_ts;
2597 		soc->list_shared_qaddr_del[qref_index].chip_id = chip_id;
2598 		soc->shared_qaddr_del_idx++;
2599 
2600 		if (soc->shared_qaddr_del_idx == max_list_size)
2601 			soc->shared_qaddr_del_idx = 0;
2602 	}
2603 
2604 	if (hal_reo_shared_qaddr_is_enable(soc->hal_soc)) {
2605 		for (tid = 0; tid < DP_MAX_TIDS; tid++) {
2606 			hal_reo_shared_qaddr_write(soc->hal_soc,
2607 						   peer_id, tid, 0);
2608 		}
2609 	}
2610 }
2611 #endif
2612 
2613 /**
2614  * dp_peer_find_add_id() - map peer_id with peer
2615  * @soc: soc handle
2616  * @peer_mac_addr: peer mac address
2617  * @peer_id: peer id to be mapped
2618  * @hw_peer_id: HW ast index
2619  * @vdev_id: vdev_id
2620  * @peer_type: peer type (link or MLD)
2621  *
2622  * return: peer in success
2623  *         NULL in failure
2624  */
2625 static inline struct dp_peer *dp_peer_find_add_id(struct dp_soc *soc,
2626 	uint8_t *peer_mac_addr, uint16_t peer_id, uint16_t hw_peer_id,
2627 	uint8_t vdev_id, enum cdp_peer_type peer_type)
2628 {
2629 	struct dp_peer *peer;
2630 	struct cdp_peer_info peer_info = { 0 };
2631 
2632 	QDF_ASSERT(peer_id <= soc->max_peer_id);
2633 	/* check if there's already a peer object with this MAC address */
2634 	DP_PEER_INFO_PARAMS_INIT(&peer_info, vdev_id, peer_mac_addr,
2635 				 false, peer_type);
2636 	peer = dp_peer_hash_find_wrapper(soc, &peer_info, DP_MOD_ID_CONFIG);
2637 	dp_peer_debug("%pK: peer %pK ID %d vid %d mac " QDF_MAC_ADDR_FMT,
2638 		      soc, peer, peer_id, vdev_id,
2639 		      QDF_MAC_ADDR_REF(peer_mac_addr));
2640 
2641 	if (peer) {
2642 		/* peer's ref count was already incremented by
2643 		 * peer_find_hash_find
2644 		 */
2645 		dp_peer_info("%pK: ref_cnt: %d", soc,
2646 			     qdf_atomic_read(&peer->ref_cnt));
2647 
2648 		/*
2649 		 * if peer is in logical delete CP triggered delete before map
2650 		 * is received ignore this event
2651 		 */
2652 		if (dp_peer_state_cmp(peer, DP_PEER_STATE_LOGICAL_DELETE)) {
2653 			dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
2654 			dp_alert("Peer %pK["QDF_MAC_ADDR_FMT"] logical delete state vid %d",
2655 				 peer, QDF_MAC_ADDR_REF(peer_mac_addr),
2656 				 vdev_id);
2657 			return NULL;
2658 		}
2659 
2660 		if (peer->peer_id == HTT_INVALID_PEER) {
2661 			if (!IS_MLO_DP_MLD_PEER(peer))
2662 				dp_monitor_peer_tid_peer_id_update(soc, peer,
2663 								   peer_id);
2664 		} else {
2665 			dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
2666 			QDF_ASSERT(0);
2667 			return NULL;
2668 		}
2669 		dp_peer_find_id_to_obj_add(soc, peer, peer_id);
2670 		if (soc->arch_ops.dp_partner_chips_map)
2671 			soc->arch_ops.dp_partner_chips_map(soc, peer, peer_id);
2672 
2673 		dp_peer_update_state(soc, peer, DP_PEER_STATE_ACTIVE);
2674 		return peer;
2675 	}
2676 
2677 	return NULL;
2678 }
2679 
2680 #ifdef WLAN_FEATURE_11BE_MLO
2681 #ifdef DP_USE_REDUCED_PEER_ID_FIELD_WIDTH
2682 uint16_t dp_gen_ml_peer_id(struct dp_soc *soc, uint16_t peer_id)
2683 {
2684 	return ((peer_id & soc->peer_id_mask) | (1 << soc->peer_id_shift));
2685 }
2686 #else
2687 uint16_t dp_gen_ml_peer_id(struct dp_soc *soc, uint16_t peer_id)
2688 {
2689 	return (peer_id | (1 << HTT_RX_PEER_META_DATA_V1_ML_PEER_VALID_S));
2690 }
2691 #endif
2692 
2693 QDF_STATUS
2694 dp_rx_mlo_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
2695 			   uint8_t *peer_mac_addr,
2696 			   struct dp_mlo_flow_override_info *mlo_flow_info,
2697 			   struct dp_mlo_link_info *mlo_link_info)
2698 {
2699 	struct dp_peer *peer = NULL;
2700 	uint16_t hw_peer_id = mlo_flow_info[0].ast_idx;
2701 	uint16_t ast_hash = mlo_flow_info[0].cache_set_num;
2702 	uint8_t vdev_id = 0;
2703 	uint8_t is_wds = 0;
2704 	int i;
2705 	uint16_t ml_peer_id = dp_gen_ml_peer_id(soc, peer_id);
2706 	enum cdp_txrx_ast_entry_type type = CDP_TXRX_AST_TYPE_STATIC;
2707 	QDF_STATUS err = QDF_STATUS_SUCCESS;
2708 	struct dp_soc *primary_soc = NULL;
2709 
2710 	dp_cfg_event_record_peer_map_unmap_evt(soc, DP_CFG_EVENT_MLO_PEER_MAP,
2711 					       NULL, peer_mac_addr,
2712 					       1, peer_id, ml_peer_id, 0,
2713 					       vdev_id);
2714 
2715 	dp_info("mlo_peer_map_event (soc:%pK): peer_id %d ml_peer_id %d, peer_mac "QDF_MAC_ADDR_FMT,
2716 		soc, peer_id, ml_peer_id,
2717 		QDF_MAC_ADDR_REF(peer_mac_addr));
2718 
2719 	/* Get corresponding vdev ID for the peer based
2720 	 * on chip ID obtained from mlo peer_map event
2721 	 */
2722 	for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
2723 		if (mlo_link_info[i].peer_chip_id == dp_get_chip_id(soc)) {
2724 			vdev_id = mlo_link_info[i].vdev_id;
2725 			break;
2726 		}
2727 	}
2728 
2729 	peer = dp_peer_find_add_id(soc, peer_mac_addr, ml_peer_id,
2730 				   hw_peer_id, vdev_id, CDP_MLD_PEER_TYPE);
2731 	if (peer) {
2732 		if (wlan_op_mode_sta == peer->vdev->opmode &&
2733 		    qdf_mem_cmp(peer->mac_addr.raw,
2734 				peer->vdev->mld_mac_addr.raw,
2735 				QDF_MAC_ADDR_SIZE) != 0) {
2736 			dp_peer_info("%pK: STA vdev bss_peer!!!!", soc);
2737 			peer->bss_peer = 1;
2738 			if (peer->txrx_peer)
2739 				peer->txrx_peer->bss_peer = 1;
2740 		}
2741 
2742 		if (peer->vdev->opmode == wlan_op_mode_sta) {
2743 			peer->vdev->bss_ast_hash = ast_hash;
2744 			peer->vdev->bss_ast_idx = hw_peer_id;
2745 		}
2746 
2747 		/* Add ast entry incase self ast entry is
2748 		 * deleted due to DP CP sync issue
2749 		 *
2750 		 * self_ast_entry is modified in peer create
2751 		 * and peer unmap path which cannot run in
2752 		 * parllel with peer map, no lock need before
2753 		 * referring it
2754 		 */
2755 		if (!peer->self_ast_entry) {
2756 			dp_info("Add self ast from map "QDF_MAC_ADDR_FMT,
2757 				QDF_MAC_ADDR_REF(peer_mac_addr));
2758 			dp_peer_add_ast(soc, peer,
2759 					peer_mac_addr,
2760 					type, 0);
2761 		}
2762 		/* If peer setup and hence rx_tid setup got called
2763 		 * before htt peer map then Qref write to LUT did not
2764 		 * happen in rx_tid setup as peer_id was invalid.
2765 		 * So defer Qref write to peer map handler. Check if
2766 		 * rx_tid qdesc for tid 0 is already setup and perform
2767 		 * qref write to LUT for Tid 0 and 16.
2768 		 *
2769 		 * Peer map could be obtained on assoc link, hence
2770 		 * change to primary link's soc.
2771 		 */
2772 		primary_soc = peer->vdev->pdev->soc;
2773 		if (hal_reo_shared_qaddr_is_enable(primary_soc->hal_soc) &&
2774 		    peer->rx_tid[0].hw_qdesc_vaddr_unaligned) {
2775 			hal_reo_shared_qaddr_write(primary_soc->hal_soc,
2776 						   ml_peer_id,
2777 						   0,
2778 						   peer->rx_tid[0].hw_qdesc_paddr);
2779 			hal_reo_shared_qaddr_write(primary_soc->hal_soc,
2780 						   ml_peer_id,
2781 						   DP_NON_QOS_TID,
2782 						   peer->rx_tid[DP_NON_QOS_TID].hw_qdesc_paddr);
2783 		}
2784 	}
2785 
2786 	if (!primary_soc)
2787 		primary_soc = soc;
2788 
2789 	err = dp_peer_map_ast(soc, peer, peer_mac_addr, hw_peer_id,
2790 			      vdev_id, ast_hash, is_wds);
2791 
2792 	/*
2793 	 * If AST offload and host AST DB is enabled, populate AST entries on
2794 	 * host based on mlo peer map event from FW
2795 	 */
2796 	if (peer && soc->ast_offload_support && soc->host_ast_db_enable) {
2797 		dp_peer_host_add_map_ast(primary_soc, ml_peer_id, peer_mac_addr,
2798 					 hw_peer_id, vdev_id,
2799 					 ast_hash, is_wds);
2800 	}
2801 
2802 	return err;
2803 }
2804 #endif
2805 
2806 #ifdef DP_RX_UDP_OVER_PEER_ROAM
2807 void dp_rx_reset_roaming_peer(struct dp_soc *soc, uint8_t vdev_id,
2808 			      uint8_t *peer_mac_addr)
2809 {
2810 	struct dp_vdev *vdev = NULL;
2811 
2812 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_HTT);
2813 	if (vdev) {
2814 		if (qdf_mem_cmp(vdev->roaming_peer_mac.raw, peer_mac_addr,
2815 				QDF_MAC_ADDR_SIZE) == 0) {
2816 			vdev->roaming_peer_status =
2817 						WLAN_ROAM_PEER_AUTH_STATUS_NONE;
2818 			qdf_mem_zero(vdev->roaming_peer_mac.raw,
2819 				     QDF_MAC_ADDR_SIZE);
2820 		}
2821 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT);
2822 	}
2823 }
2824 #endif
2825 
2826 #ifdef WLAN_SUPPORT_PPEDS
2827 static void
2828 dp_tx_ppeds_cfg_astidx_cache_mapping(struct dp_soc *soc, struct dp_vdev *vdev,
2829 				     bool peer_map)
2830 {
2831 	if (soc->arch_ops.dp_tx_ppeds_cfg_astidx_cache_mapping)
2832 		soc->arch_ops.dp_tx_ppeds_cfg_astidx_cache_mapping(soc, vdev,
2833 								   peer_map);
2834 }
2835 #else
2836 static void
2837 dp_tx_ppeds_cfg_astidx_cache_mapping(struct dp_soc *soc, struct dp_vdev *vdev,
2838 				     bool peer_map)
2839 {
2840 }
2841 #endif
2842 
2843 QDF_STATUS
2844 dp_rx_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
2845 		       uint16_t hw_peer_id, uint8_t vdev_id,
2846 		       uint8_t *peer_mac_addr, uint16_t ast_hash,
2847 		       uint8_t is_wds)
2848 {
2849 	struct dp_peer *peer = NULL;
2850 	struct dp_vdev *vdev = NULL;
2851 	enum cdp_txrx_ast_entry_type type = CDP_TXRX_AST_TYPE_STATIC;
2852 	QDF_STATUS err = QDF_STATUS_SUCCESS;
2853 
2854 	dp_cfg_event_record_peer_map_unmap_evt(soc, DP_CFG_EVENT_PEER_MAP,
2855 					       NULL, peer_mac_addr, 1, peer_id,
2856 					       0, 0, vdev_id);
2857 	dp_info("peer_map_event (soc:%pK): peer_id %d, hw_peer_id %d, peer_mac "QDF_MAC_ADDR_FMT", vdev_id %d",
2858 		soc, peer_id, hw_peer_id,
2859 		QDF_MAC_ADDR_REF(peer_mac_addr), vdev_id);
2860 
2861 	/* Peer map event for WDS ast entry get the peer from
2862 	 * obj map
2863 	 */
2864 	if (is_wds) {
2865 		if (!soc->ast_offload_support) {
2866 			peer = dp_peer_get_ref_by_id(soc, peer_id,
2867 						     DP_MOD_ID_HTT);
2868 
2869 			err = dp_peer_map_ast(soc, peer, peer_mac_addr,
2870 					      hw_peer_id,
2871 					      vdev_id, ast_hash, is_wds);
2872 			if (peer)
2873 				dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
2874 		}
2875 	} else {
2876 		/*
2877 		 * It's the responsibility of the CP and FW to ensure
2878 		 * that peer is created successfully. Ideally DP should
2879 		 * not hit the below condition for directly associated
2880 		 * peers.
2881 		 */
2882 		if ((!soc->ast_offload_support) && ((hw_peer_id < 0) ||
2883 		    (hw_peer_id >=
2884 		     wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx)))) {
2885 			dp_peer_err("%pK: invalid hw_peer_id: %d", soc, hw_peer_id);
2886 			qdf_assert_always(0);
2887 		}
2888 
2889 		peer = dp_peer_find_add_id(soc, peer_mac_addr, peer_id,
2890 					   hw_peer_id, vdev_id,
2891 					   CDP_LINK_PEER_TYPE);
2892 
2893 		if (peer) {
2894 			bool peer_map = true;
2895 
2896 			/* Updating ast_hash and ast_idx in peer level */
2897 			peer->ast_hash = ast_hash;
2898 			peer->ast_idx = hw_peer_id;
2899 			vdev = peer->vdev;
2900 			/* Only check for STA Vdev and peer is not for TDLS */
2901 			if (wlan_op_mode_sta == vdev->opmode &&
2902 			    !peer->is_tdls_peer) {
2903 				if (qdf_mem_cmp(peer->mac_addr.raw,
2904 						vdev->mac_addr.raw,
2905 						QDF_MAC_ADDR_SIZE) != 0) {
2906 					dp_info("%pK: STA vdev bss_peer", soc);
2907 					peer->bss_peer = 1;
2908 					if (peer->txrx_peer)
2909 						peer->txrx_peer->bss_peer = 1;
2910 				}
2911 
2912 				dp_info("bss ast_hash 0x%x, ast_index 0x%x",
2913 					ast_hash, hw_peer_id);
2914 				vdev->bss_ast_hash = ast_hash;
2915 				vdev->bss_ast_idx = hw_peer_id;
2916 
2917 				dp_tx_ppeds_cfg_astidx_cache_mapping(soc, vdev,
2918 								     peer_map);
2919 			}
2920 
2921 			/* Add ast entry incase self ast entry is
2922 			 * deleted due to DP CP sync issue
2923 			 *
2924 			 * self_ast_entry is modified in peer create
2925 			 * and peer unmap path which cannot run in
2926 			 * parllel with peer map, no lock need before
2927 			 * referring it
2928 			 */
2929 			if (!soc->ast_offload_support &&
2930 				!peer->self_ast_entry) {
2931 				dp_info("Add self ast from map "QDF_MAC_ADDR_FMT,
2932 					QDF_MAC_ADDR_REF(peer_mac_addr));
2933 				dp_peer_add_ast(soc, peer,
2934 						peer_mac_addr,
2935 						type, 0);
2936 			}
2937 
2938 			/* If peer setup and hence rx_tid setup got called
2939 			 * before htt peer map then Qref write to LUT did
2940 			 * not happen in rx_tid setup as peer_id was invalid.
2941 			 * So defer Qref write to peer map handler. Check if
2942 			 * rx_tid qdesc for tid 0 is already setup perform qref
2943 			 * write to LUT for Tid 0 and 16.
2944 			 */
2945 			if (hal_reo_shared_qaddr_is_enable(soc->hal_soc) &&
2946 			    peer->rx_tid[0].hw_qdesc_vaddr_unaligned &&
2947 			    !IS_MLO_DP_LINK_PEER(peer)) {
2948 				add_entry_write_list(soc, peer, 0);
2949 				hal_reo_shared_qaddr_write(soc->hal_soc,
2950 							   peer_id,
2951 							   0,
2952 							   peer->rx_tid[0].hw_qdesc_paddr);
2953 				add_entry_write_list(soc, peer, DP_NON_QOS_TID);
2954 				hal_reo_shared_qaddr_write(soc->hal_soc,
2955 							   peer_id,
2956 							   DP_NON_QOS_TID,
2957 							   peer->rx_tid[DP_NON_QOS_TID].hw_qdesc_paddr);
2958 			}
2959 		}
2960 
2961 		err = dp_peer_map_ast(soc, peer, peer_mac_addr, hw_peer_id,
2962 				      vdev_id, ast_hash, is_wds);
2963 	}
2964 
2965 	dp_rx_reset_roaming_peer(soc, vdev_id, peer_mac_addr);
2966 
2967 	/*
2968 	 * If AST offload and host AST DB is enabled, populate AST entries on
2969 	 * host based on peer map event from FW
2970 	 */
2971 	if (soc->ast_offload_support && soc->host_ast_db_enable) {
2972 		dp_peer_host_add_map_ast(soc, peer_id, peer_mac_addr,
2973 					 hw_peer_id, vdev_id,
2974 					 ast_hash, is_wds);
2975 	}
2976 
2977 	return err;
2978 }
2979 
2980 void
2981 dp_rx_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id,
2982 			 uint8_t vdev_id, uint8_t *mac_addr,
2983 			 uint8_t is_wds, uint32_t free_wds_count)
2984 {
2985 	struct dp_peer *peer;
2986 	struct dp_vdev *vdev = NULL;
2987 
2988 	/*
2989 	 * If FW AST offload is enabled and host AST DB is enabled,
2990 	 * the AST entries are created during peer map from FW.
2991 	 */
2992 	if (soc->ast_offload_support && is_wds) {
2993 		if (!soc->host_ast_db_enable)
2994 			return;
2995 	}
2996 
2997 	peer = __dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_HTT);
2998 
2999 	/*
3000 	 * Currently peer IDs are assigned for vdevs as well as peers.
3001 	 * If the peer ID is for a vdev, then the peer pointer stored
3002 	 * in peer_id_to_obj_map will be NULL.
3003 	 */
3004 	if (!peer) {
3005 		dp_err("Received unmap event for invalid peer_id %u",
3006 		       peer_id);
3007 		return;
3008 	}
3009 
3010 	vdev = peer->vdev;
3011 
3012 	if (peer->txrx_peer) {
3013 		struct cdp_txrx_peer_params_update params = {0};
3014 
3015 		params.vdev_id = vdev->vdev_id;
3016 		params.peer_mac = peer->mac_addr.raw;
3017 		params.chip_id = dp_get_chip_id(soc);
3018 		params.pdev_id = vdev->pdev->pdev_id;
3019 
3020 		dp_wdi_event_handler(WDI_EVENT_PEER_UNMAP, soc,
3021 				     (void *)&params, peer_id,
3022 				     WDI_NO_VAL, vdev->pdev->pdev_id);
3023 	}
3024 
3025 	/*
3026 	 * In scenario where assoc peer soc id is different from
3027 	 * primary soc id, reset the soc to point to primary psoc.
3028 	 * Since map is received on primary soc, the unmap should
3029 	 * also delete ast on primary soc.
3030 	 */
3031 	soc = peer->vdev->pdev->soc;
3032 
3033 	/* If V2 Peer map messages are enabled AST entry has to be
3034 	 * freed here
3035 	 */
3036 	if (is_wds) {
3037 		if (!dp_peer_ast_free_entry_by_mac(soc, peer, vdev_id,
3038 						   mac_addr)) {
3039 			dp_peer_unmap_ipa_evt(soc, peer_id, vdev_id, mac_addr);
3040 			dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
3041 			return;
3042 		}
3043 
3044 		dp_alert("AST entry not found with peer %pK peer_id %u peer_mac "QDF_MAC_ADDR_FMT" mac_addr "QDF_MAC_ADDR_FMT" vdev_id %u next_hop %u",
3045 			  peer, peer->peer_id,
3046 			  QDF_MAC_ADDR_REF(peer->mac_addr.raw),
3047 			  QDF_MAC_ADDR_REF(mac_addr), vdev_id,
3048 			  is_wds);
3049 
3050 		dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
3051 		return;
3052 	}
3053 
3054 	dp_peer_clean_wds_entries(soc, peer, free_wds_count);
3055 
3056 	dp_cfg_event_record_peer_map_unmap_evt(soc, DP_CFG_EVENT_PEER_UNMAP,
3057 					       peer, mac_addr, 0, peer_id,
3058 					       0, 0, vdev_id);
3059 	dp_info("peer_unmap_event (soc:%pK) peer_id %d peer %pK",
3060 		soc, peer_id, peer);
3061 
3062 	/* Clear entries in Qref LUT */
3063 	/* TODO: Check if this is to be called from
3064 	 * dp_peer_delete for MLO case if there is race between
3065 	 * new peer id assignment and still not having received
3066 	 * peer unmap for MLD peer with same peer id.
3067 	 */
3068 	dp_peer_rx_reo_shared_qaddr_delete(soc, peer);
3069 
3070 	vdev = peer->vdev;
3071 
3072 	/* only if peer is in STA mode and not tdls peer */
3073 	if (wlan_op_mode_sta == vdev->opmode && !peer->is_tdls_peer) {
3074 		bool peer_map = false;
3075 
3076 		dp_tx_ppeds_cfg_astidx_cache_mapping(soc, vdev, peer_map);
3077 	}
3078 
3079 	dp_peer_find_id_to_obj_remove(soc, peer_id);
3080 
3081 	if (soc->arch_ops.dp_partner_chips_unmap)
3082 		soc->arch_ops.dp_partner_chips_unmap(soc, peer_id);
3083 
3084 	peer->peer_id = HTT_INVALID_PEER;
3085 
3086 	/*
3087 	 *	 Reset ast flow mapping table
3088 	 */
3089 	if (!soc->ast_offload_support)
3090 		dp_peer_reset_flowq_map(peer);
3091 
3092 	if (soc->cdp_soc.ol_ops->peer_unmap_event) {
3093 		soc->cdp_soc.ol_ops->peer_unmap_event(soc->ctrl_psoc,
3094 				peer_id, vdev_id, mac_addr);
3095 	}
3096 
3097 	dp_update_vdev_stats_on_peer_unmap(vdev, peer);
3098 
3099 	dp_peer_update_state(soc, peer, DP_PEER_STATE_INACTIVE);
3100 	dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
3101 	/*
3102 	 * Remove a reference to the peer.
3103 	 * If there are no more references, delete the peer object.
3104 	 */
3105 	dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
3106 }
3107 
3108 #if defined(WLAN_FEATURE_11BE_MLO) && defined(DP_MLO_LINK_STATS_SUPPORT)
3109 enum dp_bands dp_freq_to_band(qdf_freq_t freq)
3110 {
3111 	if (REG_IS_24GHZ_CH_FREQ(freq))
3112 		return DP_BAND_2GHZ;
3113 	else if (REG_IS_5GHZ_FREQ(freq) || REG_IS_49GHZ_FREQ(freq))
3114 		return DP_BAND_5GHZ;
3115 	else if (REG_IS_6GHZ_FREQ(freq))
3116 		return DP_BAND_6GHZ;
3117 	return DP_BAND_INVALID;
3118 }
3119 
3120 void dp_map_link_id_band(struct dp_peer *peer)
3121 {
3122 	struct dp_txrx_peer *txrx_peer = NULL;
3123 	enum dp_bands band;
3124 
3125 	txrx_peer = dp_get_txrx_peer(peer);
3126 	if (txrx_peer) {
3127 		band = dp_freq_to_band(peer->freq);
3128 		txrx_peer->band[peer->link_id + 1] = band;
3129 		dp_info("Band(Freq: %u): %u mapped to Link ID: %u",
3130 			peer->freq, band, peer->link_id);
3131 	} else {
3132 		dp_info("txrx_peer NULL for peer: " QDF_MAC_ADDR_FMT,
3133 			QDF_MAC_ADDR_REF(peer->mac_addr.raw));
3134 	}
3135 }
3136 
3137 QDF_STATUS
3138 dp_rx_peer_ext_evt(struct dp_soc *soc, struct dp_peer_ext_evt_info *info)
3139 {
3140 	struct dp_peer *peer = NULL;
3141 	struct cdp_peer_info peer_info = { 0 };
3142 
3143 	QDF_ASSERT(info->peer_id <= soc->max_peer_id);
3144 
3145 	DP_PEER_INFO_PARAMS_INIT(&peer_info, info->vdev_id, info->peer_mac_addr,
3146 				 false, CDP_LINK_PEER_TYPE);
3147 	peer = dp_peer_hash_find_wrapper(soc, &peer_info, DP_MOD_ID_CONFIG);
3148 
3149 	if (!peer) {
3150 		dp_err("peer NULL, id %u, MAC " QDF_MAC_ADDR_FMT ", vdev_id %u",
3151 		       info->peer_id, QDF_MAC_ADDR_REF(info->peer_mac_addr),
3152 		       info->vdev_id);
3153 
3154 		return QDF_STATUS_E_FAILURE;
3155 	}
3156 
3157 	peer->link_id = info->link_id;
3158 	peer->link_id_valid = info->link_id_valid;
3159 
3160 	if (peer->freq)
3161 		dp_map_link_id_band(peer);
3162 
3163 	dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
3164 
3165 	return QDF_STATUS_SUCCESS;
3166 }
3167 #endif
3168 #ifdef WLAN_FEATURE_11BE_MLO
3169 void dp_rx_mlo_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id)
3170 {
3171 	uint16_t ml_peer_id = dp_gen_ml_peer_id(soc, peer_id);
3172 	uint8_t mac_addr[QDF_MAC_ADDR_SIZE] = {0};
3173 	uint8_t vdev_id = DP_VDEV_ALL;
3174 	uint8_t is_wds = 0;
3175 
3176 	dp_cfg_event_record_peer_map_unmap_evt(soc, DP_CFG_EVENT_MLO_PEER_UNMAP,
3177 					       NULL, mac_addr, 0, peer_id,
3178 					       0, 0, vdev_id);
3179 	dp_info("MLO peer_unmap_event (soc:%pK) peer_id %d",
3180 		soc, peer_id);
3181 
3182 	dp_rx_peer_unmap_handler(soc, ml_peer_id, vdev_id,
3183 				 mac_addr, is_wds,
3184 				 DP_PEER_WDS_COUNT_INVALID);
3185 }
3186 #endif
3187 
3188 #ifndef AST_OFFLOAD_ENABLE
3189 void
3190 dp_peer_find_detach(struct dp_soc *soc)
3191 {
3192 	dp_soc_wds_detach(soc);
3193 	dp_peer_find_map_detach(soc);
3194 	dp_peer_find_hash_detach(soc);
3195 	dp_peer_ast_hash_detach(soc);
3196 	dp_peer_ast_table_detach(soc);
3197 	dp_peer_mec_hash_detach(soc);
3198 }
3199 #else
3200 void
3201 dp_peer_find_detach(struct dp_soc *soc)
3202 {
3203 	dp_peer_find_map_detach(soc);
3204 	dp_peer_find_hash_detach(soc);
3205 }
3206 #endif
3207 
3208 void dp_peer_rx_init(struct dp_pdev *pdev, struct dp_peer *peer)
3209 {
3210 	dp_peer_rx_tid_setup(peer);
3211 
3212 	peer->active_ba_session_cnt = 0;
3213 	peer->hw_buffer_size = 0;
3214 	peer->kill_256_sessions = 0;
3215 
3216 	/*
3217 	 * Set security defaults: no PN check, no security. The target may
3218 	 * send a HTT SEC_IND message to overwrite these defaults.
3219 	 */
3220 	if (peer->txrx_peer)
3221 		peer->txrx_peer->security[dp_sec_ucast].sec_type =
3222 			peer->txrx_peer->security[dp_sec_mcast].sec_type =
3223 				cdp_sec_type_none;
3224 }
3225 
3226 void dp_peer_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
3227 {
3228 	enum wlan_op_mode vdev_opmode;
3229 	uint8_t vdev_mac_addr[QDF_MAC_ADDR_SIZE];
3230 	struct dp_pdev *pdev = vdev->pdev;
3231 	struct dp_soc *soc = pdev->soc;
3232 
3233 	/* save vdev related member in case vdev freed */
3234 	vdev_opmode = vdev->opmode;
3235 
3236 	if (!IS_MLO_DP_MLD_PEER(peer))
3237 		dp_monitor_peer_tx_cleanup(vdev, peer);
3238 
3239 	if (vdev_opmode != wlan_op_mode_monitor)
3240 	/* cleanup the Rx reorder queues for this peer */
3241 		dp_peer_rx_cleanup(vdev, peer);
3242 
3243 	dp_peer_rx_tids_destroy(peer);
3244 
3245 	if (IS_MLO_DP_LINK_PEER(peer))
3246 		dp_link_peer_del_mld_peer(peer);
3247 	if (IS_MLO_DP_MLD_PEER(peer))
3248 		dp_mld_peer_deinit_link_peers_info(peer);
3249 
3250 	qdf_mem_copy(vdev_mac_addr, vdev->mac_addr.raw,
3251 		     QDF_MAC_ADDR_SIZE);
3252 
3253 	if (soc->cdp_soc.ol_ops->peer_unref_delete)
3254 		soc->cdp_soc.ol_ops->peer_unref_delete(
3255 				soc->ctrl_psoc,
3256 				vdev->pdev->pdev_id,
3257 				peer->mac_addr.raw, vdev_mac_addr,
3258 				vdev_opmode);
3259 }
3260 
3261 QDF_STATUS
3262 dp_set_key_sec_type_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id,
3263 			  uint8_t *peer_mac, enum cdp_sec_type sec_type,
3264 			  bool is_unicast)
3265 {
3266 	struct dp_peer *peer =
3267 			dp_peer_get_tgt_peer_hash_find((struct dp_soc *)soc,
3268 						       peer_mac, 0, vdev_id,
3269 						       DP_MOD_ID_CDP);
3270 	int sec_index;
3271 
3272 	if (!peer) {
3273 		dp_peer_debug("%pK: Peer is NULL!\n", soc);
3274 		return QDF_STATUS_E_FAILURE;
3275 	}
3276 
3277 	if (!peer->txrx_peer) {
3278 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3279 		dp_peer_debug("%pK: txrx peer is NULL!\n", soc);
3280 		return QDF_STATUS_E_FAILURE;
3281 	}
3282 
3283 	dp_peer_info("%pK: key sec spec for peer %pK " QDF_MAC_ADDR_FMT ": %s key of type %d",
3284 		     soc, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw),
3285 		     is_unicast ? "ucast" : "mcast", sec_type);
3286 
3287 	sec_index = is_unicast ? dp_sec_ucast : dp_sec_mcast;
3288 	peer->txrx_peer->security[sec_index].sec_type = sec_type;
3289 
3290 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3291 
3292 	return QDF_STATUS_SUCCESS;
3293 }
3294 
3295 void
3296 dp_rx_sec_ind_handler(struct dp_soc *soc, uint16_t peer_id,
3297 		      enum cdp_sec_type sec_type, int is_unicast,
3298 		      u_int32_t *michael_key,
3299 		      u_int32_t *rx_pn)
3300 {
3301 	struct dp_peer *peer;
3302 	struct dp_txrx_peer *txrx_peer;
3303 	int sec_index;
3304 
3305 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_HTT);
3306 	if (!peer) {
3307 		dp_peer_err("Couldn't find peer from ID %d - skipping security inits",
3308 			    peer_id);
3309 		return;
3310 	}
3311 	txrx_peer = dp_get_txrx_peer(peer);
3312 	if (!txrx_peer) {
3313 		dp_peer_err("Couldn't find txrx peer from ID %d - skipping security inits",
3314 			    peer_id);
3315 		return;
3316 	}
3317 
3318 	dp_peer_info("%pK: sec spec for peer %pK " QDF_MAC_ADDR_FMT ": %s key of type %d",
3319 		     soc, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw),
3320 			  is_unicast ? "ucast" : "mcast", sec_type);
3321 	sec_index = is_unicast ? dp_sec_ucast : dp_sec_mcast;
3322 
3323 	peer->txrx_peer->security[sec_index].sec_type = sec_type;
3324 #ifdef notyet /* TODO: See if this is required for defrag support */
3325 	/* michael key only valid for TKIP, but for simplicity,
3326 	 * copy it anyway
3327 	 */
3328 	qdf_mem_copy(
3329 		&peer->txrx_peer->security[sec_index].michael_key[0],
3330 		michael_key,
3331 		sizeof(peer->txrx_peer->security[sec_index].michael_key));
3332 #ifdef BIG_ENDIAN_HOST
3333 	OL_IF_SWAPBO(peer->txrx_peer->security[sec_index].michael_key[0],
3334 		     sizeof(peer->txrx_peer->security[sec_index].michael_key));
3335 #endif /* BIG_ENDIAN_HOST */
3336 #endif
3337 
3338 #ifdef notyet /* TODO: Check if this is required for wifi3.0 */
3339 	if (sec_type != cdp_sec_type_wapi) {
3340 		qdf_mem_zero(peer->tids_last_pn_valid, _EXT_TIDS);
3341 	} else {
3342 		for (i = 0; i < DP_MAX_TIDS; i++) {
3343 			/*
3344 			 * Setting PN valid bit for WAPI sec_type,
3345 			 * since WAPI PN has to be started with predefined value
3346 			 */
3347 			peer->tids_last_pn_valid[i] = 1;
3348 			qdf_mem_copy(
3349 				(u_int8_t *) &peer->tids_last_pn[i],
3350 				(u_int8_t *) rx_pn, sizeof(union htt_rx_pn_t));
3351 			peer->tids_last_pn[i].pn128[1] =
3352 				qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[1]);
3353 			peer->tids_last_pn[i].pn128[0] =
3354 				qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[0]);
3355 		}
3356 	}
3357 #endif
3358 	/* TODO: Update HW TID queue with PN check parameters (pn type for
3359 	 * all security types and last pn for WAPI) once REO command API
3360 	 * is available
3361 	 */
3362 
3363 	dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
3364 }
3365 
3366 #ifdef QCA_PEER_EXT_STATS
3367 QDF_STATUS dp_peer_delay_stats_ctx_alloc(struct dp_soc *soc,
3368 					 struct dp_txrx_peer *txrx_peer)
3369 {
3370 	uint8_t tid, ctx_id;
3371 
3372 	if (!soc || !txrx_peer) {
3373 		dp_warn("Null soc%pK or peer%pK", soc, txrx_peer);
3374 		return QDF_STATUS_E_INVAL;
3375 	}
3376 
3377 	if (!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx))
3378 		return QDF_STATUS_SUCCESS;
3379 
3380 	/*
3381 	 * Allocate memory for peer extended stats.
3382 	 */
3383 	txrx_peer->delay_stats =
3384 			qdf_mem_malloc(sizeof(struct dp_peer_delay_stats));
3385 	if (!txrx_peer->delay_stats) {
3386 		dp_err("Peer extended stats obj alloc failed!!");
3387 		return QDF_STATUS_E_NOMEM;
3388 	}
3389 
3390 	for (tid = 0; tid < CDP_MAX_DATA_TIDS; tid++) {
3391 		for (ctx_id = 0; ctx_id < CDP_MAX_TXRX_CTX; ctx_id++) {
3392 			struct cdp_delay_tx_stats *tx_delay =
3393 			&txrx_peer->delay_stats->delay_tid_stats[tid][ctx_id].tx_delay;
3394 			struct cdp_delay_rx_stats *rx_delay =
3395 			&txrx_peer->delay_stats->delay_tid_stats[tid][ctx_id].rx_delay;
3396 
3397 			dp_hist_init(&tx_delay->tx_swq_delay,
3398 				     CDP_HIST_TYPE_SW_ENQEUE_DELAY);
3399 			dp_hist_init(&tx_delay->hwtx_delay,
3400 				     CDP_HIST_TYPE_HW_COMP_DELAY);
3401 			dp_hist_init(&rx_delay->to_stack_delay,
3402 				     CDP_HIST_TYPE_REAP_STACK);
3403 		}
3404 	}
3405 
3406 	return QDF_STATUS_SUCCESS;
3407 }
3408 
3409 void dp_peer_delay_stats_ctx_dealloc(struct dp_soc *soc,
3410 				     struct dp_txrx_peer *txrx_peer)
3411 {
3412 	if (!txrx_peer) {
3413 		dp_warn("peer_ext dealloc failed due to NULL peer object");
3414 		return;
3415 	}
3416 
3417 	if (!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx))
3418 		return;
3419 
3420 	if (!txrx_peer->delay_stats)
3421 		return;
3422 
3423 	qdf_mem_free(txrx_peer->delay_stats);
3424 	txrx_peer->delay_stats = NULL;
3425 }
3426 
3427 void dp_peer_delay_stats_ctx_clr(struct dp_txrx_peer *txrx_peer)
3428 {
3429 	if (txrx_peer->delay_stats)
3430 		qdf_mem_zero(txrx_peer->delay_stats,
3431 			     sizeof(struct dp_peer_delay_stats));
3432 }
3433 #endif
3434 
3435 #ifdef WLAN_PEER_JITTER
3436 QDF_STATUS dp_peer_jitter_stats_ctx_alloc(struct dp_pdev *pdev,
3437 					  struct dp_txrx_peer *txrx_peer)
3438 {
3439 	if (!pdev || !txrx_peer) {
3440 		dp_warn("Null pdev or peer");
3441 		return QDF_STATUS_E_INVAL;
3442 	}
3443 
3444 	if (!wlan_cfg_is_peer_jitter_stats_enabled(pdev->soc->wlan_cfg_ctx))
3445 		return QDF_STATUS_SUCCESS;
3446 
3447 	if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
3448 		/*
3449 		 * Allocate memory on per tid basis when nss is enabled
3450 		 */
3451 		txrx_peer->jitter_stats =
3452 			qdf_mem_malloc(sizeof(struct cdp_peer_tid_stats)
3453 					* DP_MAX_TIDS);
3454 	} else {
3455 		/*
3456 		 * Allocate memory on per tid per ring basis
3457 		 */
3458 		txrx_peer->jitter_stats =
3459 			qdf_mem_malloc(sizeof(struct cdp_peer_tid_stats)
3460 					* DP_MAX_TIDS * CDP_MAX_TXRX_CTX);
3461 	}
3462 
3463 	if (!txrx_peer->jitter_stats) {
3464 		dp_warn("Jitter stats obj alloc failed!!");
3465 		return QDF_STATUS_E_NOMEM;
3466 	}
3467 
3468 	return QDF_STATUS_SUCCESS;
3469 }
3470 
3471 void dp_peer_jitter_stats_ctx_dealloc(struct dp_pdev *pdev,
3472 				      struct dp_txrx_peer *txrx_peer)
3473 {
3474 	if (!pdev || !txrx_peer) {
3475 		dp_warn("Null pdev or peer");
3476 		return;
3477 	}
3478 
3479 	if (!wlan_cfg_is_peer_jitter_stats_enabled(pdev->soc->wlan_cfg_ctx))
3480 		return;
3481 
3482 	if (txrx_peer->jitter_stats) {
3483 		qdf_mem_free(txrx_peer->jitter_stats);
3484 		txrx_peer->jitter_stats = NULL;
3485 	}
3486 }
3487 
3488 void dp_peer_jitter_stats_ctx_clr(struct dp_txrx_peer *txrx_peer)
3489 {
3490 	struct cdp_peer_tid_stats *jitter_stats = NULL;
3491 
3492 	if (!txrx_peer) {
3493 		dp_warn("Null peer");
3494 		return;
3495 	}
3496 
3497 	if (!wlan_cfg_is_peer_jitter_stats_enabled(txrx_peer->
3498 						   vdev->
3499 						   pdev->soc->wlan_cfg_ctx))
3500 		return;
3501 
3502 	jitter_stats = txrx_peer->jitter_stats;
3503 	if (!jitter_stats)
3504 		return;
3505 
3506 	if (wlan_cfg_get_dp_pdev_nss_enabled(txrx_peer->
3507 					     vdev->pdev->wlan_cfg_ctx))
3508 		qdf_mem_zero(jitter_stats,
3509 			     sizeof(struct cdp_peer_tid_stats) *
3510 			     DP_MAX_TIDS);
3511 
3512 	else
3513 		qdf_mem_zero(jitter_stats,
3514 			     sizeof(struct cdp_peer_tid_stats) *
3515 			     DP_MAX_TIDS * CDP_MAX_TXRX_CTX);
3516 
3517 }
3518 #endif
3519 
3520 #ifdef DP_PEER_EXTENDED_API
3521 /**
3522  * dp_peer_set_bw() - Set bandwidth and mpdu retry count threshold for peer
3523  * @soc: DP soc handle
3524  * @txrx_peer: Core txrx_peer handle
3525  * @set_bw: enum of bandwidth to be set for this peer connection
3526  *
3527  * Return: None
3528  */
3529 static void dp_peer_set_bw(struct dp_soc *soc, struct dp_txrx_peer *txrx_peer,
3530 			   enum cdp_peer_bw set_bw)
3531 {
3532 	if (!txrx_peer)
3533 		return;
3534 
3535 	txrx_peer->bw = set_bw;
3536 
3537 	switch (set_bw) {
3538 	case CDP_160_MHZ:
3539 	case CDP_320_MHZ:
3540 		txrx_peer->mpdu_retry_threshold =
3541 				soc->wlan_cfg_ctx->mpdu_retry_threshold_2;
3542 		break;
3543 	case CDP_20_MHZ:
3544 	case CDP_40_MHZ:
3545 	case CDP_80_MHZ:
3546 	default:
3547 		txrx_peer->mpdu_retry_threshold =
3548 				soc->wlan_cfg_ctx->mpdu_retry_threshold_1;
3549 		break;
3550 	}
3551 
3552 	dp_info("Peer id: %u: BW: %u, mpdu retry threshold: %u",
3553 		txrx_peer->peer_id, txrx_peer->bw,
3554 		txrx_peer->mpdu_retry_threshold);
3555 }
3556 
3557 #ifdef WLAN_FEATURE_11BE_MLO
3558 QDF_STATUS dp_register_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
3559 			    struct ol_txrx_desc_type *sta_desc)
3560 {
3561 	struct dp_peer *peer;
3562 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3563 
3564 	peer = dp_peer_find_hash_find(soc, sta_desc->peer_addr.bytes,
3565 				      0, DP_VDEV_ALL, DP_MOD_ID_CDP);
3566 
3567 	if (!peer)
3568 		return QDF_STATUS_E_FAULT;
3569 
3570 	qdf_spin_lock_bh(&peer->peer_info_lock);
3571 	peer->state = OL_TXRX_PEER_STATE_CONN;
3572 	qdf_spin_unlock_bh(&peer->peer_info_lock);
3573 
3574 	dp_peer_set_bw(soc, peer->txrx_peer, sta_desc->bw);
3575 
3576 	dp_rx_flush_rx_cached(peer, false);
3577 
3578 	if (IS_MLO_DP_LINK_PEER(peer) && peer->first_link) {
3579 		dp_peer_info("register for mld peer" QDF_MAC_ADDR_FMT,
3580 			     QDF_MAC_ADDR_REF(peer->mld_peer->mac_addr.raw));
3581 		qdf_spin_lock_bh(&peer->mld_peer->peer_info_lock);
3582 		peer->mld_peer->state = peer->state;
3583 		qdf_spin_unlock_bh(&peer->mld_peer->peer_info_lock);
3584 		dp_rx_flush_rx_cached(peer->mld_peer, false);
3585 	}
3586 
3587 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3588 
3589 	return QDF_STATUS_SUCCESS;
3590 }
3591 
3592 QDF_STATUS dp_peer_state_update(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
3593 				enum ol_txrx_peer_state state)
3594 {
3595 	struct dp_peer *peer;
3596 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3597 
3598 	peer =  dp_peer_find_hash_find(soc, peer_mac, 0, DP_VDEV_ALL,
3599 				       DP_MOD_ID_CDP);
3600 	if (!peer) {
3601 		dp_peer_err("%pK: Failed to find peer[" QDF_MAC_ADDR_FMT "]",
3602 			    soc, QDF_MAC_ADDR_REF(peer_mac));
3603 		return QDF_STATUS_E_FAILURE;
3604 	}
3605 	peer->state = state;
3606 	peer->authorize = (state == OL_TXRX_PEER_STATE_AUTH) ? 1 : 0;
3607 
3608 	if (peer->txrx_peer)
3609 		peer->txrx_peer->authorize = peer->authorize;
3610 
3611 	dp_peer_info("peer %pK MAC " QDF_MAC_ADDR_FMT " state %d",
3612 		     peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw),
3613 		     peer->state);
3614 
3615 	if (IS_MLO_DP_LINK_PEER(peer) && peer->first_link) {
3616 		peer->mld_peer->state = peer->state;
3617 		peer->mld_peer->txrx_peer->authorize = peer->authorize;
3618 		dp_peer_info("mld peer %pK MAC " QDF_MAC_ADDR_FMT " state %d",
3619 			     peer->mld_peer,
3620 			     QDF_MAC_ADDR_REF(peer->mld_peer->mac_addr.raw),
3621 			     peer->mld_peer->state);
3622 	}
3623 
3624 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
3625 	 * Decrement it here.
3626 	 */
3627 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3628 
3629 	return QDF_STATUS_SUCCESS;
3630 }
3631 #else
3632 QDF_STATUS dp_register_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
3633 			    struct ol_txrx_desc_type *sta_desc)
3634 {
3635 	struct dp_peer *peer;
3636 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3637 
3638 	peer = dp_peer_find_hash_find(soc, sta_desc->peer_addr.bytes,
3639 				      0, DP_VDEV_ALL, DP_MOD_ID_CDP);
3640 
3641 	if (!peer)
3642 		return QDF_STATUS_E_FAULT;
3643 
3644 	qdf_spin_lock_bh(&peer->peer_info_lock);
3645 	peer->state = OL_TXRX_PEER_STATE_CONN;
3646 	qdf_spin_unlock_bh(&peer->peer_info_lock);
3647 
3648 	dp_peer_set_bw(soc, peer->txrx_peer, sta_desc->bw);
3649 
3650 	dp_rx_flush_rx_cached(peer, false);
3651 
3652 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3653 
3654 	return QDF_STATUS_SUCCESS;
3655 }
3656 
3657 QDF_STATUS dp_peer_state_update(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
3658 				enum ol_txrx_peer_state state)
3659 {
3660 	struct dp_peer *peer;
3661 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3662 
3663 	peer =  dp_peer_find_hash_find(soc, peer_mac, 0, DP_VDEV_ALL,
3664 				       DP_MOD_ID_CDP);
3665 	if (!peer) {
3666 		dp_peer_err("%pK: Failed to find peer for: [" QDF_MAC_ADDR_FMT "]",
3667 			    soc, QDF_MAC_ADDR_REF(peer_mac));
3668 		return QDF_STATUS_E_FAILURE;
3669 	}
3670 	peer->state = state;
3671 	peer->authorize = (state == OL_TXRX_PEER_STATE_AUTH) ? 1 : 0;
3672 
3673 	if (peer->txrx_peer)
3674 		peer->txrx_peer->authorize = peer->authorize;
3675 
3676 	dp_info("peer %pK state %d", peer, peer->state);
3677 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
3678 	 * Decrement it here.
3679 	 */
3680 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3681 
3682 	return QDF_STATUS_SUCCESS;
3683 }
3684 #endif
3685 
3686 QDF_STATUS
3687 dp_clear_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
3688 	      struct qdf_mac_addr peer_addr)
3689 {
3690 	struct dp_peer *peer;
3691 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3692 
3693 	peer = dp_peer_find_hash_find(soc, peer_addr.bytes,
3694 				      0, DP_VDEV_ALL, DP_MOD_ID_CDP);
3695 
3696 	if (!peer)
3697 		return QDF_STATUS_E_FAULT;
3698 	if (!peer->valid) {
3699 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3700 		return QDF_STATUS_E_FAULT;
3701 	}
3702 
3703 	dp_clear_peer_internal(soc, peer);
3704 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3705 	return QDF_STATUS_SUCCESS;
3706 }
3707 
3708 QDF_STATUS dp_get_vdevid(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
3709 			 uint8_t *vdev_id)
3710 {
3711 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3712 	struct dp_peer *peer =
3713 		dp_peer_find_hash_find(soc, peer_mac, 0, DP_VDEV_ALL,
3714 				       DP_MOD_ID_CDP);
3715 
3716 	if (!peer)
3717 		return QDF_STATUS_E_FAILURE;
3718 
3719 	dp_info("peer %pK vdev %pK vdev id %d",
3720 		peer, peer->vdev, peer->vdev->vdev_id);
3721 	*vdev_id = peer->vdev->vdev_id;
3722 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
3723 	 * Decrement it here.
3724 	 */
3725 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3726 
3727 	return QDF_STATUS_SUCCESS;
3728 }
3729 
3730 struct cdp_vdev *
3731 dp_get_vdev_by_peer_addr(struct cdp_pdev *pdev_handle,
3732 			 struct qdf_mac_addr peer_addr)
3733 {
3734 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3735 	struct dp_peer *peer = NULL;
3736 	struct cdp_vdev *vdev = NULL;
3737 
3738 	if (!pdev) {
3739 		dp_peer_info("PDEV not found for peer_addr: " QDF_MAC_ADDR_FMT,
3740 			     QDF_MAC_ADDR_REF(peer_addr.bytes));
3741 		return NULL;
3742 	}
3743 
3744 	peer = dp_peer_find_hash_find(pdev->soc, peer_addr.bytes, 0,
3745 				      DP_VDEV_ALL, DP_MOD_ID_CDP);
3746 	if (!peer) {
3747 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
3748 			  "PDEV not found for peer_addr: "QDF_MAC_ADDR_FMT,
3749 			  QDF_MAC_ADDR_REF(peer_addr.bytes));
3750 		return NULL;
3751 	}
3752 
3753 	vdev = (struct cdp_vdev *)peer->vdev;
3754 
3755 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3756 	return vdev;
3757 }
3758 
3759 struct cdp_vdev *dp_get_vdev_for_peer(void *peer_handle)
3760 {
3761 	struct dp_peer *peer = peer_handle;
3762 
3763 	DP_TRACE(DEBUG, "peer %pK vdev %pK", peer, peer->vdev);
3764 	return (struct cdp_vdev *)peer->vdev;
3765 }
3766 
3767 uint8_t *dp_peer_get_peer_mac_addr(void *peer_handle)
3768 {
3769 	struct dp_peer *peer = peer_handle;
3770 	uint8_t *mac;
3771 
3772 	mac = peer->mac_addr.raw;
3773 	dp_info("peer %pK mac 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
3774 		peer, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
3775 	return peer->mac_addr.raw;
3776 }
3777 
3778 int dp_get_peer_state(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3779 		      uint8_t *peer_mac)
3780 {
3781 	enum ol_txrx_peer_state peer_state;
3782 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3783 	struct cdp_peer_info peer_info = { 0 };
3784 	struct dp_peer *peer;
3785 	struct dp_peer *tgt_peer;
3786 
3787 	DP_PEER_INFO_PARAMS_INIT(&peer_info, vdev_id, peer_mac,
3788 				 false, CDP_WILD_PEER_TYPE);
3789 
3790 	peer =  dp_peer_hash_find_wrapper(soc, &peer_info, DP_MOD_ID_CDP);
3791 
3792 	if (!peer)
3793 		return OL_TXRX_PEER_STATE_INVALID;
3794 
3795 	tgt_peer = dp_get_tgt_peer_from_peer(peer);
3796 	peer_state = tgt_peer->state;
3797 
3798 	dp_peer_debug("peer %pK tgt_peer: %pK peer MAC "
3799 		     QDF_MAC_ADDR_FMT " tgt peer MAC "
3800 		     QDF_MAC_ADDR_FMT " tgt peer state %d",
3801 		     peer, tgt_peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw),
3802 		     QDF_MAC_ADDR_REF(tgt_peer->mac_addr.raw),
3803 		     tgt_peer->state);
3804 
3805 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3806 
3807 	return peer_state;
3808 }
3809 
3810 void dp_local_peer_id_pool_init(struct dp_pdev *pdev)
3811 {
3812 	int i;
3813 
3814 	/* point the freelist to the first ID */
3815 	pdev->local_peer_ids.freelist = 0;
3816 
3817 	/* link each ID to the next one */
3818 	for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) {
3819 		pdev->local_peer_ids.pool[i] = i + 1;
3820 		pdev->local_peer_ids.map[i] = NULL;
3821 	}
3822 
3823 	/* link the last ID to itself, to mark the end of the list */
3824 	i = OL_TXRX_NUM_LOCAL_PEER_IDS;
3825 	pdev->local_peer_ids.pool[i] = i;
3826 
3827 	qdf_spinlock_create(&pdev->local_peer_ids.lock);
3828 	dp_info("Peer pool init");
3829 }
3830 
3831 void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer)
3832 {
3833 	int i;
3834 
3835 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
3836 	i = pdev->local_peer_ids.freelist;
3837 	if (pdev->local_peer_ids.pool[i] == i) {
3838 		/* the list is empty, except for the list-end marker */
3839 		peer->local_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
3840 	} else {
3841 		/* take the head ID and advance the freelist */
3842 		peer->local_id = i;
3843 		pdev->local_peer_ids.freelist = pdev->local_peer_ids.pool[i];
3844 		pdev->local_peer_ids.map[i] = peer;
3845 	}
3846 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
3847 	dp_info("peer %pK, local id %d", peer, peer->local_id);
3848 }
3849 
3850 void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer)
3851 {
3852 	int i = peer->local_id;
3853 	if ((i == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
3854 	    (i >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
3855 		return;
3856 	}
3857 
3858 	/* put this ID on the head of the freelist */
3859 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
3860 	pdev->local_peer_ids.pool[i] = pdev->local_peer_ids.freelist;
3861 	pdev->local_peer_ids.freelist = i;
3862 	pdev->local_peer_ids.map[i] = NULL;
3863 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
3864 }
3865 
3866 bool dp_find_peer_exist_on_vdev(struct cdp_soc_t *soc_hdl,
3867 				uint8_t vdev_id, uint8_t *peer_addr)
3868 {
3869 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3870 	struct dp_peer *peer = NULL;
3871 
3872 	peer = dp_peer_find_hash_find(soc, peer_addr, 0, vdev_id,
3873 				      DP_MOD_ID_CDP);
3874 	if (!peer)
3875 		return false;
3876 
3877 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3878 
3879 	return true;
3880 }
3881 
3882 bool dp_find_peer_exist_on_other_vdev(struct cdp_soc_t *soc_hdl,
3883 				      uint8_t vdev_id, uint8_t *peer_addr,
3884 				      uint16_t max_bssid)
3885 {
3886 	int i;
3887 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3888 	struct dp_peer *peer = NULL;
3889 
3890 	for (i = 0; i < max_bssid; i++) {
3891 		/* Need to check vdevs other than the vdev_id */
3892 		if (vdev_id == i)
3893 			continue;
3894 		peer = dp_peer_find_hash_find(soc, peer_addr, 0, i,
3895 					      DP_MOD_ID_CDP);
3896 		if (peer) {
3897 			dp_err("Duplicate peer "QDF_MAC_ADDR_FMT" already exist on vdev %d",
3898 			       QDF_MAC_ADDR_REF(peer_addr), i);
3899 			dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3900 			return true;
3901 		}
3902 	}
3903 
3904 	return false;
3905 }
3906 
3907 void dp_set_peer_as_tdls_peer(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3908 			      uint8_t *peer_mac, bool val)
3909 {
3910 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3911 	struct dp_peer *peer = NULL;
3912 
3913 	peer = dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id,
3914 				      DP_MOD_ID_CDP);
3915 	if (!peer) {
3916 		dp_err("Failed to find peer for:" QDF_MAC_ADDR_FMT,
3917 		       QDF_MAC_ADDR_REF(peer_mac));
3918 		return;
3919 	}
3920 
3921 	dp_info("Set tdls flag %d for peer:" QDF_MAC_ADDR_FMT,
3922 		val, QDF_MAC_ADDR_REF(peer_mac));
3923 	peer->is_tdls_peer = val;
3924 
3925 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3926 }
3927 #endif
3928 
3929 bool dp_find_peer_exist(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
3930 			uint8_t *peer_addr)
3931 {
3932 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3933 	struct dp_peer *peer = NULL;
3934 
3935 	peer = dp_peer_find_hash_find(soc, peer_addr, 0, DP_VDEV_ALL,
3936 				      DP_MOD_ID_CDP);
3937 	if (peer) {
3938 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3939 		return true;
3940 	}
3941 
3942 	return false;
3943 }
3944 
3945 QDF_STATUS
3946 dp_set_michael_key(struct cdp_soc_t *soc,
3947 		   uint8_t vdev_id,
3948 		   uint8_t *peer_mac,
3949 		   bool is_unicast, uint32_t *key)
3950 {
3951 	uint8_t sec_index = is_unicast ? 1 : 0;
3952 	struct dp_peer *peer =
3953 			dp_peer_get_tgt_peer_hash_find((struct dp_soc *)soc,
3954 						       peer_mac, 0, vdev_id,
3955 						       DP_MOD_ID_CDP);
3956 
3957 	if (!peer) {
3958 		dp_peer_err("%pK: peer not found ", soc);
3959 		return QDF_STATUS_E_FAILURE;
3960 	}
3961 
3962 	qdf_mem_copy(&peer->txrx_peer->security[sec_index].michael_key[0],
3963 		     key, IEEE80211_WEP_MICLEN);
3964 
3965 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3966 
3967 	return QDF_STATUS_SUCCESS;
3968 }
3969 
3970 
3971 struct dp_peer *dp_vdev_bss_peer_ref_n_get(struct dp_soc *soc,
3972 					   struct dp_vdev *vdev,
3973 					   enum dp_mod_id mod_id)
3974 {
3975 	struct dp_peer *peer = NULL;
3976 
3977 	qdf_spin_lock_bh(&vdev->peer_list_lock);
3978 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
3979 		if (peer->bss_peer)
3980 			break;
3981 	}
3982 
3983 	if (!peer) {
3984 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
3985 		return NULL;
3986 	}
3987 
3988 	if (dp_peer_get_ref(soc, peer, mod_id) == QDF_STATUS_SUCCESS) {
3989 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
3990 		return peer;
3991 	}
3992 
3993 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
3994 	return peer;
3995 }
3996 
3997 struct dp_peer *dp_sta_vdev_self_peer_ref_n_get(struct dp_soc *soc,
3998 						struct dp_vdev *vdev,
3999 						enum dp_mod_id mod_id)
4000 {
4001 	struct dp_peer *peer;
4002 
4003 	if (vdev->opmode != wlan_op_mode_sta)
4004 		return NULL;
4005 
4006 	qdf_spin_lock_bh(&vdev->peer_list_lock);
4007 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
4008 		if (peer->sta_self_peer)
4009 			break;
4010 	}
4011 
4012 	if (!peer) {
4013 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
4014 		return NULL;
4015 	}
4016 
4017 	if (dp_peer_get_ref(soc, peer, mod_id) == QDF_STATUS_SUCCESS) {
4018 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
4019 		return peer;
4020 	}
4021 
4022 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
4023 	return peer;
4024 }
4025 
4026 void dp_peer_flush_frags(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
4027 			 uint8_t *peer_mac)
4028 {
4029 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4030 	struct dp_peer *peer = dp_peer_get_tgt_peer_hash_find(soc, peer_mac, 0,
4031 							      vdev_id,
4032 							      DP_MOD_ID_CDP);
4033 	struct dp_txrx_peer *txrx_peer;
4034 	uint8_t tid;
4035 	struct dp_rx_tid_defrag *defrag_rx_tid;
4036 
4037 	if (!peer)
4038 		return;
4039 
4040 	if (!peer->txrx_peer)
4041 		goto fail;
4042 
4043 	dp_info("Flushing fragments for peer " QDF_MAC_ADDR_FMT,
4044 		QDF_MAC_ADDR_REF(peer->mac_addr.raw));
4045 
4046 	txrx_peer = peer->txrx_peer;
4047 
4048 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
4049 		defrag_rx_tid = &txrx_peer->rx_tid[tid];
4050 
4051 		qdf_spin_lock_bh(&defrag_rx_tid->defrag_tid_lock);
4052 		dp_rx_defrag_waitlist_remove(txrx_peer, tid);
4053 		dp_rx_reorder_flush_frag(txrx_peer, tid);
4054 		qdf_spin_unlock_bh(&defrag_rx_tid->defrag_tid_lock);
4055 	}
4056 fail:
4057 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4058 }
4059 
4060 bool dp_peer_find_by_id_valid(struct dp_soc *soc, uint16_t peer_id)
4061 {
4062 	struct dp_peer *peer = dp_peer_get_ref_by_id(soc, peer_id,
4063 						     DP_MOD_ID_HTT);
4064 
4065 	if (peer) {
4066 		/*
4067 		 * Decrement the peer ref which is taken as part of
4068 		 * dp_peer_get_ref_by_id if PEER_LOCK_REF_PROTECT is enabled
4069 		 */
4070 		dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
4071 
4072 		return true;
4073 	}
4074 
4075 	return false;
4076 }
4077 
4078 qdf_export_symbol(dp_peer_find_by_id_valid);
4079 
4080 #ifdef QCA_MULTIPASS_SUPPORT
4081 void dp_peer_multipass_list_remove(struct dp_peer *peer)
4082 {
4083 	struct dp_vdev *vdev = peer->vdev;
4084 	struct dp_txrx_peer *tpeer = NULL;
4085 	bool found = 0;
4086 
4087 	qdf_spin_lock_bh(&vdev->mpass_peer_mutex);
4088 	TAILQ_FOREACH(tpeer, &vdev->mpass_peer_list, mpass_peer_list_elem) {
4089 		if (tpeer == peer->txrx_peer) {
4090 			found = 1;
4091 			TAILQ_REMOVE(&vdev->mpass_peer_list, peer->txrx_peer,
4092 				     mpass_peer_list_elem);
4093 			break;
4094 		}
4095 	}
4096 
4097 	qdf_spin_unlock_bh(&vdev->mpass_peer_mutex);
4098 
4099 	if (found)
4100 		dp_peer_unref_delete(peer, DP_MOD_ID_TX_MULTIPASS);
4101 }
4102 
4103 /**
4104  * dp_peer_multipass_list_add() - add to new multipass list
4105  * @soc: soc handle
4106  * @peer_mac: mac address
4107  * @vdev_id: vdev id for peer
4108  * @vlan_id: vlan_id
4109  *
4110  * return: void
4111  */
4112 static void dp_peer_multipass_list_add(struct dp_soc *soc, uint8_t *peer_mac,
4113 				       uint8_t vdev_id, uint16_t vlan_id)
4114 {
4115 	struct dp_peer *peer =
4116 			dp_peer_get_tgt_peer_hash_find(soc, peer_mac, 0,
4117 						       vdev_id,
4118 						       DP_MOD_ID_TX_MULTIPASS);
4119 
4120 	if (qdf_unlikely(!peer)) {
4121 		qdf_err("NULL peer");
4122 		return;
4123 	}
4124 
4125 	if (qdf_unlikely(!peer->txrx_peer))
4126 		goto fail;
4127 
4128 	/* If peer already exists in vdev multipass list, do not add it.
4129 	 * This may happen if key install comes twice or re-key
4130 	 * happens for a peer.
4131 	 */
4132 	if (peer->txrx_peer->vlan_id) {
4133 		dp_debug("peer already added to vdev multipass list"
4134 			 "MAC: "QDF_MAC_ADDR_FMT" vlan: %d ",
4135 			 QDF_MAC_ADDR_REF(peer->mac_addr.raw),
4136 			 peer->txrx_peer->vlan_id);
4137 		goto fail;
4138 	}
4139 
4140 	/*
4141 	 * Ref_cnt is incremented inside dp_peer_find_hash_find().
4142 	 * Decrement it when element is deleted from the list.
4143 	 */
4144 	peer->txrx_peer->vlan_id = vlan_id;
4145 	qdf_spin_lock_bh(&peer->txrx_peer->vdev->mpass_peer_mutex);
4146 	TAILQ_INSERT_HEAD(&peer->txrx_peer->vdev->mpass_peer_list,
4147 			  peer->txrx_peer,
4148 			  mpass_peer_list_elem);
4149 	qdf_spin_unlock_bh(&peer->txrx_peer->vdev->mpass_peer_mutex);
4150 	return;
4151 
4152 fail:
4153 	dp_peer_unref_delete(peer, DP_MOD_ID_TX_MULTIPASS);
4154 }
4155 
4156 void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
4157 			 uint8_t vdev_id, uint8_t *peer_mac,
4158 			 uint16_t vlan_id)
4159 {
4160 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
4161 	struct dp_vdev *vdev =
4162 		dp_vdev_get_ref_by_id((struct dp_soc *)soc, vdev_id,
4163 				      DP_MOD_ID_TX_MULTIPASS);
4164 
4165 	dp_info("vdev_id %d, vdev %pK, multipass_en %d, peer_mac " QDF_MAC_ADDR_FMT " vlan %d",
4166 		vdev_id, vdev, vdev ? vdev->multipass_en : 0,
4167 		QDF_MAC_ADDR_REF(peer_mac), vlan_id);
4168 	if (vdev && vdev->multipass_en) {
4169 		dp_peer_multipass_list_add(soc, peer_mac, vdev_id, vlan_id);
4170 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_MULTIPASS);
4171 	}
4172 }
4173 #endif /* QCA_MULTIPASS_SUPPORT */
4174