xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_peer.c (revision 37f00a180ceecf8017c1edaf65f03c8ada24eb2a)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <qdf_types.h>
21 #include <qdf_lock.h>
22 #include <hal_hw_headers.h>
23 #include "dp_htt.h"
24 #include "dp_types.h"
25 #include "dp_internal.h"
26 #include "dp_peer.h"
27 #include "dp_rx_defrag.h"
28 #include "dp_rx.h"
29 #include <hal_api.h>
30 #include <hal_reo.h>
31 #include <cdp_txrx_handle.h>
32 #include <wlan_cfg.h>
33 #ifdef WIFI_MONITOR_SUPPORT
34 #include <dp_mon.h>
35 #endif
36 #ifdef FEATURE_WDS
37 #include "dp_txrx_wds.h"
38 #endif
39 #include <qdf_module.h>
40 #ifdef QCA_PEER_EXT_STATS
41 #include "dp_hist.h"
42 #endif
43 #ifdef BYPASS_OL_OPS
44 #include <target_if_dp.h>
45 #endif
46 #if defined(WLAN_FEATURE_11BE_MLO) && defined(DP_MLO_LINK_STATS_SUPPORT)
47 #include "reg_services_common.h"
48 #endif
49 #ifdef FEATURE_AST
50 #ifdef BYPASS_OL_OPS
51 /**
52  * dp_add_wds_entry_wrapper() - Add new AST entry for the wds station
53  * @soc: DP soc structure pointer
54  * @peer: dp peer structure
55  * @dest_macaddr: MAC address of ast node
56  * @flags: wds or hmwds
57  * @type: type from enum cdp_txrx_ast_entry_type
58  *
59  * This API is used by WDS source port learning function to
60  * add a new AST entry in the fw.
61  *
62  * Return: 0 on success, error code otherwise.
63  */
64 static int dp_add_wds_entry_wrapper(struct dp_soc *soc,
65 				    struct dp_peer *peer,
66 				    const uint8_t *dest_macaddr,
67 				    uint32_t flags,
68 				    uint8_t type)
69 {
70 	QDF_STATUS status;
71 
72 	status = target_if_add_wds_entry(soc->ctrl_psoc,
73 					 peer->vdev->vdev_id,
74 					 peer->mac_addr.raw,
75 					 dest_macaddr,
76 					 WMI_HOST_WDS_FLAG_STATIC,
77 					 type);
78 
79 	return qdf_status_to_os_return(status);
80 }
81 
82 /**
83  * dp_update_wds_entry_wrapper() - update an existing wds entry with new peer
84  * @soc: DP soc structure pointer
85  * @peer: dp peer structure
86  * @dest_macaddr: MAC address of ast node
87  * @flags: wds or hmwds
88  *
89  * This API is used by update the peer mac address for the ast
90  * in the fw.
91  *
92  * Return: 0 on success, error code otherwise.
93  */
94 static int dp_update_wds_entry_wrapper(struct dp_soc *soc,
95 				       struct dp_peer *peer,
96 				       uint8_t *dest_macaddr,
97 				       uint32_t flags)
98 {
99 	QDF_STATUS status;
100 
101 	status = target_if_update_wds_entry(soc->ctrl_psoc,
102 					    peer->vdev->vdev_id,
103 					    dest_macaddr,
104 					    peer->mac_addr.raw,
105 					    WMI_HOST_WDS_FLAG_STATIC);
106 
107 	return qdf_status_to_os_return(status);
108 }
109 
110 /**
111  * dp_del_wds_entry_wrapper() - delete a WSD AST entry
112  * @soc: DP soc structure pointer
113  * @vdev_id: vdev_id
114  * @wds_macaddr: MAC address of ast node
115  * @type: type from enum cdp_txrx_ast_entry_type
116  * @delete_in_fw: Flag to indicate if entry needs to be deleted in fw
117  *
118  * This API is used to delete an AST entry from fw
119  *
120  * Return: None
121  */
122 void dp_del_wds_entry_wrapper(struct dp_soc *soc,
123 			      uint8_t vdev_id,
124 			      uint8_t *wds_macaddr,
125 			      uint8_t type,
126 			      uint8_t delete_in_fw)
127 {
128 	target_if_del_wds_entry(soc->ctrl_psoc, vdev_id,
129 				wds_macaddr, type, delete_in_fw);
130 }
131 #else
132 static int dp_add_wds_entry_wrapper(struct dp_soc *soc,
133 				    struct dp_peer *peer,
134 				    const uint8_t *dest_macaddr,
135 				    uint32_t flags,
136 				    uint8_t type)
137 {
138 	int status;
139 
140 	status = soc->cdp_soc.ol_ops->peer_add_wds_entry(
141 					soc->ctrl_psoc,
142 					peer->vdev->vdev_id,
143 					peer->mac_addr.raw,
144 					peer->peer_id,
145 					dest_macaddr,
146 					peer->mac_addr.raw,
147 					flags,
148 					type);
149 
150 	return status;
151 }
152 
153 static int dp_update_wds_entry_wrapper(struct dp_soc *soc,
154 				       struct dp_peer *peer,
155 				       uint8_t *dest_macaddr,
156 				       uint32_t flags)
157 {
158 	int status;
159 
160 	status = soc->cdp_soc.ol_ops->peer_update_wds_entry(
161 				soc->ctrl_psoc,
162 				peer->vdev->vdev_id,
163 				dest_macaddr,
164 				peer->mac_addr.raw,
165 				flags);
166 
167 	return status;
168 }
169 
170 void dp_del_wds_entry_wrapper(struct dp_soc *soc,
171 			      uint8_t vdev_id,
172 			      uint8_t *wds_macaddr,
173 			      uint8_t type,
174 			      uint8_t delete_in_fw)
175 {
176 	soc->cdp_soc.ol_ops->peer_del_wds_entry(soc->ctrl_psoc,
177 						vdev_id,
178 						wds_macaddr,
179 						type,
180 						delete_in_fw);
181 }
182 #endif /* BYPASS_OL_OPS */
183 #else
184 void dp_del_wds_entry_wrapper(struct dp_soc *soc,
185 			      uint8_t vdev_id,
186 			      uint8_t *wds_macaddr,
187 			      uint8_t type,
188 			      uint8_t delete_in_fw)
189 {
190 }
191 #endif /* FEATURE_AST */
192 
193 #ifdef FEATURE_WDS
194 static inline bool
195 dp_peer_ast_free_in_unmap_supported(struct dp_soc *soc,
196 				    struct dp_ast_entry *ast_entry)
197 {
198 	/* if peer map v2 is enabled we are not freeing ast entry
199 	 * here and it is supposed to be freed in unmap event (after
200 	 * we receive delete confirmation from target)
201 	 *
202 	 * if peer_id is invalid we did not get the peer map event
203 	 * for the peer free ast entry from here only in this case
204 	 */
205 
206 	if ((ast_entry->type != CDP_TXRX_AST_TYPE_WDS_HM_SEC) &&
207 	    (ast_entry->type != CDP_TXRX_AST_TYPE_SELF))
208 		return true;
209 
210 	return false;
211 }
212 #else
213 static inline bool
214 dp_peer_ast_free_in_unmap_supported(struct dp_soc *soc,
215 				    struct dp_ast_entry *ast_entry)
216 {
217 	return false;
218 }
219 
220 void dp_soc_wds_attach(struct dp_soc *soc)
221 {
222 }
223 
224 void dp_soc_wds_detach(struct dp_soc *soc)
225 {
226 }
227 #endif
228 
229 #ifdef QCA_SUPPORT_WDS_EXTENDED
230 bool dp_peer_check_wds_ext_peer(struct dp_peer *peer)
231 {
232 	struct dp_vdev *vdev = peer->vdev;
233 	struct dp_txrx_peer *txrx_peer;
234 
235 	if (!vdev->wds_ext_enabled)
236 		return false;
237 
238 	txrx_peer = dp_get_txrx_peer(peer);
239 	if (!txrx_peer)
240 		return false;
241 
242 	if (qdf_atomic_test_bit(WDS_EXT_PEER_INIT_BIT,
243 				&txrx_peer->wds_ext.init))
244 		return true;
245 
246 	return false;
247 }
248 #else
249 bool dp_peer_check_wds_ext_peer(struct dp_peer *peer)
250 {
251 	return false;
252 }
253 #endif
254 
255 QDF_STATUS dp_peer_ast_table_attach(struct dp_soc *soc)
256 {
257 	uint32_t max_ast_index;
258 
259 	max_ast_index = wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx);
260 	/* allocate ast_table for ast entry to ast_index map */
261 	dp_peer_info("\n%pK:<=== cfg max ast idx %d ====>", soc, max_ast_index);
262 	soc->ast_table = qdf_mem_malloc(max_ast_index *
263 					sizeof(struct dp_ast_entry *));
264 	if (!soc->ast_table) {
265 		dp_peer_err("%pK: ast_table memory allocation failed", soc);
266 		return QDF_STATUS_E_NOMEM;
267 	}
268 	return QDF_STATUS_SUCCESS; /* success */
269 }
270 
271 /**
272  * dp_find_peer_by_macaddr() - Finding the peer from mac address provided.
273  * @soc: soc handle
274  * @mac_addr: MAC address to be used to find peer
275  * @vdev_id: VDEV id
276  * @mod_id: MODULE ID
277  *
278  * Return: struct dp_peer
279  */
280 struct dp_peer *dp_find_peer_by_macaddr(struct dp_soc *soc, uint8_t *mac_addr,
281 					uint8_t vdev_id, enum dp_mod_id mod_id)
282 {
283 	bool ast_ind_disable = wlan_cfg_get_ast_indication_disable(
284 							    soc->wlan_cfg_ctx);
285 	struct cdp_peer_info peer_info = {0};
286 
287 	if ((!soc->ast_offload_support) || (!ast_ind_disable)) {
288 		struct dp_ast_entry *ast_entry = NULL;
289 		uint16_t peer_id;
290 
291 		qdf_spin_lock_bh(&soc->ast_lock);
292 
293 		if (vdev_id == DP_VDEV_ALL)
294 			ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
295 		else
296 			ast_entry = dp_peer_ast_hash_find_by_vdevid
297 						(soc, mac_addr, vdev_id);
298 
299 		if (!ast_entry) {
300 			qdf_spin_unlock_bh(&soc->ast_lock);
301 			dp_err("NULL ast entry");
302 			return NULL;
303 		}
304 
305 		peer_id = ast_entry->peer_id;
306 		qdf_spin_unlock_bh(&soc->ast_lock);
307 
308 		if (peer_id == HTT_INVALID_PEER)
309 			return NULL;
310 
311 		return dp_peer_get_ref_by_id(soc, peer_id, mod_id);
312 	}
313 
314 	DP_PEER_INFO_PARAMS_INIT(&peer_info, vdev_id, mac_addr, false,
315 				 CDP_WILD_PEER_TYPE);
316 	return dp_peer_hash_find_wrapper(soc, &peer_info, mod_id);
317 }
318 
319 /**
320  * dp_peer_find_map_attach() - allocate memory for peer_id_to_obj_map
321  * @soc: soc handle
322  *
323  * return: QDF_STATUS
324  */
325 static QDF_STATUS dp_peer_find_map_attach(struct dp_soc *soc)
326 {
327 	uint32_t max_peers, peer_map_size;
328 
329 	max_peers = soc->max_peer_id;
330 	/* allocate the peer ID -> peer object map */
331 	dp_peer_info("\n%pK:<=== cfg max peer id %d ====>", soc, max_peers);
332 	peer_map_size = max_peers * sizeof(soc->peer_id_to_obj_map[0]);
333 	soc->peer_id_to_obj_map = qdf_mem_malloc(peer_map_size);
334 	if (!soc->peer_id_to_obj_map) {
335 		dp_peer_err("%pK: peer map memory allocation failed", soc);
336 		return QDF_STATUS_E_NOMEM;
337 	}
338 
339 	/*
340 	 * The peer_id_to_obj_map doesn't really need to be initialized,
341 	 * since elements are only used after they have been individually
342 	 * initialized.
343 	 * However, it is convenient for debugging to have all elements
344 	 * that are not in use set to 0.
345 	 */
346 	qdf_mem_zero(soc->peer_id_to_obj_map, peer_map_size);
347 
348 	qdf_spinlock_create(&soc->peer_map_lock);
349 	return QDF_STATUS_SUCCESS; /* success */
350 }
351 
352 #define DP_AST_HASH_LOAD_MULT  2
353 #define DP_AST_HASH_LOAD_SHIFT 0
354 
355 static inline uint32_t
356 dp_peer_find_hash_index(struct dp_soc *soc,
357 			union dp_align_mac_addr *mac_addr)
358 {
359 	uint32_t index;
360 
361 	index =
362 		mac_addr->align2.bytes_ab ^
363 		mac_addr->align2.bytes_cd ^
364 		mac_addr->align2.bytes_ef;
365 
366 	index ^= index >> soc->peer_hash.idx_bits;
367 	index &= soc->peer_hash.mask;
368 	return index;
369 }
370 
371 struct dp_peer *dp_peer_find_hash_find(
372 				struct dp_soc *soc, uint8_t *peer_mac_addr,
373 				int mac_addr_is_aligned, uint8_t vdev_id,
374 				enum dp_mod_id mod_id)
375 {
376 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
377 	uint32_t index;
378 	struct dp_peer *peer;
379 
380 	if (!soc->peer_hash.bins)
381 		return NULL;
382 
383 	if (mac_addr_is_aligned) {
384 		mac_addr = (union dp_align_mac_addr *)peer_mac_addr;
385 	} else {
386 		qdf_mem_copy(
387 			&local_mac_addr_aligned.raw[0],
388 			peer_mac_addr, QDF_MAC_ADDR_SIZE);
389 		mac_addr = &local_mac_addr_aligned;
390 	}
391 	index = dp_peer_find_hash_index(soc, mac_addr);
392 	qdf_spin_lock_bh(&soc->peer_hash_lock);
393 	TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
394 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
395 		    ((peer->vdev->vdev_id == vdev_id) ||
396 		     (vdev_id == DP_VDEV_ALL))) {
397 			/* take peer reference before returning */
398 			if (dp_peer_get_ref(soc, peer, mod_id) !=
399 						QDF_STATUS_SUCCESS)
400 				peer = NULL;
401 
402 			qdf_spin_unlock_bh(&soc->peer_hash_lock);
403 			return peer;
404 		}
405 	}
406 	qdf_spin_unlock_bh(&soc->peer_hash_lock);
407 	return NULL; /* failure */
408 }
409 
410 qdf_export_symbol(dp_peer_find_hash_find);
411 
412 #ifdef WLAN_FEATURE_11BE_MLO
413 /**
414  * dp_peer_find_hash_detach() - cleanup memory for peer_hash table
415  * @soc: soc handle
416  *
417  * return: none
418  */
419 static void dp_peer_find_hash_detach(struct dp_soc *soc)
420 {
421 	if (soc->peer_hash.bins) {
422 		qdf_mem_free(soc->peer_hash.bins);
423 		soc->peer_hash.bins = NULL;
424 		qdf_spinlock_destroy(&soc->peer_hash_lock);
425 	}
426 
427 	if (soc->arch_ops.mlo_peer_find_hash_detach)
428 		soc->arch_ops.mlo_peer_find_hash_detach(soc);
429 }
430 
431 /**
432  * dp_peer_find_hash_attach() - allocate memory for peer_hash table
433  * @soc: soc handle
434  *
435  * return: QDF_STATUS
436  */
437 static QDF_STATUS dp_peer_find_hash_attach(struct dp_soc *soc)
438 {
439 	int i, hash_elems, log2;
440 
441 	/* allocate the peer MAC address -> peer object hash table */
442 	hash_elems = soc->max_peers;
443 	hash_elems *= DP_PEER_HASH_LOAD_MULT;
444 	hash_elems >>= DP_PEER_HASH_LOAD_SHIFT;
445 	log2 = dp_log2_ceil(hash_elems);
446 	hash_elems = 1 << log2;
447 
448 	soc->peer_hash.mask = hash_elems - 1;
449 	soc->peer_hash.idx_bits = log2;
450 	/* allocate an array of TAILQ peer object lists */
451 	soc->peer_hash.bins = qdf_mem_malloc(
452 		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q, dp_peer)));
453 	if (!soc->peer_hash.bins)
454 		return QDF_STATUS_E_NOMEM;
455 
456 	for (i = 0; i < hash_elems; i++)
457 		TAILQ_INIT(&soc->peer_hash.bins[i]);
458 
459 	qdf_spinlock_create(&soc->peer_hash_lock);
460 
461 	if (soc->arch_ops.mlo_peer_find_hash_attach &&
462 	    (soc->arch_ops.mlo_peer_find_hash_attach(soc) !=
463 			QDF_STATUS_SUCCESS)) {
464 		dp_peer_find_hash_detach(soc);
465 		return QDF_STATUS_E_NOMEM;
466 	}
467 	return QDF_STATUS_SUCCESS;
468 }
469 
470 void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer)
471 {
472 	unsigned index;
473 
474 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
475 	if (peer->peer_type == CDP_LINK_PEER_TYPE) {
476 		qdf_spin_lock_bh(&soc->peer_hash_lock);
477 
478 		if (QDF_IS_STATUS_ERROR(dp_peer_get_ref(soc, peer,
479 							DP_MOD_ID_CONFIG))) {
480 			dp_err("fail to get peer ref:" QDF_MAC_ADDR_FMT,
481 			       QDF_MAC_ADDR_REF(peer->mac_addr.raw));
482 			qdf_spin_unlock_bh(&soc->peer_hash_lock);
483 			return;
484 		}
485 
486 		/*
487 		 * It is important to add the new peer at the tail of
488 		 * peer list with the bin index. Together with having
489 		 * the hash_find function search from head to tail,
490 		 * this ensures that if two entries with the same MAC address
491 		 * are stored, the one added first will be found first.
492 		 */
493 		TAILQ_INSERT_TAIL(&soc->peer_hash.bins[index], peer,
494 				  hash_list_elem);
495 
496 		qdf_spin_unlock_bh(&soc->peer_hash_lock);
497 	} else if (peer->peer_type == CDP_MLD_PEER_TYPE) {
498 		if (soc->arch_ops.mlo_peer_find_hash_add)
499 			soc->arch_ops.mlo_peer_find_hash_add(soc, peer);
500 	} else {
501 		dp_err("unknown peer type %d", peer->peer_type);
502 	}
503 }
504 
505 void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer)
506 {
507 	unsigned index;
508 	struct dp_peer *tmppeer = NULL;
509 	int found = 0;
510 
511 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
512 
513 	if (peer->peer_type == CDP_LINK_PEER_TYPE) {
514 		/* Check if tail is not empty before delete*/
515 		QDF_ASSERT(!TAILQ_EMPTY(&soc->peer_hash.bins[index]));
516 
517 		qdf_spin_lock_bh(&soc->peer_hash_lock);
518 		TAILQ_FOREACH(tmppeer, &soc->peer_hash.bins[index],
519 			      hash_list_elem) {
520 			if (tmppeer == peer) {
521 				found = 1;
522 				break;
523 			}
524 		}
525 		QDF_ASSERT(found);
526 		TAILQ_REMOVE(&soc->peer_hash.bins[index], peer,
527 			     hash_list_elem);
528 
529 		dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
530 		qdf_spin_unlock_bh(&soc->peer_hash_lock);
531 	} else if (peer->peer_type == CDP_MLD_PEER_TYPE) {
532 		if (soc->arch_ops.mlo_peer_find_hash_remove)
533 			soc->arch_ops.mlo_peer_find_hash_remove(soc, peer);
534 	} else {
535 		dp_err("unknown peer type %d", peer->peer_type);
536 	}
537 }
538 
539 uint8_t dp_get_peer_link_id(struct dp_peer *peer)
540 {
541 	uint8_t link_id;
542 
543 	link_id = IS_MLO_DP_LINK_PEER(peer) ? peer->link_id + 1 : 0;
544 	if (link_id < 1 || link_id > DP_MAX_MLO_LINKS)
545 		link_id = 0;
546 
547 	return link_id;
548 }
549 #else
550 static QDF_STATUS dp_peer_find_hash_attach(struct dp_soc *soc)
551 {
552 	int i, hash_elems, log2;
553 
554 	/* allocate the peer MAC address -> peer object hash table */
555 	hash_elems = soc->max_peers;
556 	hash_elems *= DP_PEER_HASH_LOAD_MULT;
557 	hash_elems >>= DP_PEER_HASH_LOAD_SHIFT;
558 	log2 = dp_log2_ceil(hash_elems);
559 	hash_elems = 1 << log2;
560 
561 	soc->peer_hash.mask = hash_elems - 1;
562 	soc->peer_hash.idx_bits = log2;
563 	/* allocate an array of TAILQ peer object lists */
564 	soc->peer_hash.bins = qdf_mem_malloc(
565 		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q, dp_peer)));
566 	if (!soc->peer_hash.bins)
567 		return QDF_STATUS_E_NOMEM;
568 
569 	for (i = 0; i < hash_elems; i++)
570 		TAILQ_INIT(&soc->peer_hash.bins[i]);
571 
572 	qdf_spinlock_create(&soc->peer_hash_lock);
573 	return QDF_STATUS_SUCCESS;
574 }
575 
576 static void dp_peer_find_hash_detach(struct dp_soc *soc)
577 {
578 	if (soc->peer_hash.bins) {
579 		qdf_mem_free(soc->peer_hash.bins);
580 		soc->peer_hash.bins = NULL;
581 		qdf_spinlock_destroy(&soc->peer_hash_lock);
582 	}
583 }
584 
585 void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer)
586 {
587 	unsigned index;
588 
589 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
590 	qdf_spin_lock_bh(&soc->peer_hash_lock);
591 
592 	if (QDF_IS_STATUS_ERROR(dp_peer_get_ref(soc, peer, DP_MOD_ID_CONFIG))) {
593 		dp_err("unable to get peer ref at MAP mac: "QDF_MAC_ADDR_FMT,
594 		       QDF_MAC_ADDR_REF(peer->mac_addr.raw));
595 		qdf_spin_unlock_bh(&soc->peer_hash_lock);
596 		return;
597 	}
598 
599 	/*
600 	 * It is important to add the new peer at the tail of the peer list
601 	 * with the bin index.  Together with having the hash_find function
602 	 * search from head to tail, this ensures that if two entries with
603 	 * the same MAC address are stored, the one added first will be
604 	 * found first.
605 	 */
606 	TAILQ_INSERT_TAIL(&soc->peer_hash.bins[index], peer, hash_list_elem);
607 
608 	qdf_spin_unlock_bh(&soc->peer_hash_lock);
609 }
610 
611 void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer)
612 {
613 	unsigned index;
614 	struct dp_peer *tmppeer = NULL;
615 	int found = 0;
616 
617 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
618 	/* Check if tail is not empty before delete*/
619 	QDF_ASSERT(!TAILQ_EMPTY(&soc->peer_hash.bins[index]));
620 
621 	qdf_spin_lock_bh(&soc->peer_hash_lock);
622 	TAILQ_FOREACH(tmppeer, &soc->peer_hash.bins[index], hash_list_elem) {
623 		if (tmppeer == peer) {
624 			found = 1;
625 			break;
626 		}
627 	}
628 	QDF_ASSERT(found);
629 	TAILQ_REMOVE(&soc->peer_hash.bins[index], peer, hash_list_elem);
630 
631 	dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
632 	qdf_spin_unlock_bh(&soc->peer_hash_lock);
633 }
634 
635 
636 #endif/* WLAN_FEATURE_11BE_MLO */
637 
638 void dp_peer_vdev_list_add(struct dp_soc *soc, struct dp_vdev *vdev,
639 			   struct dp_peer *peer)
640 {
641 	/* only link peer will be added to vdev peer list */
642 	if (IS_MLO_DP_MLD_PEER(peer))
643 		return;
644 
645 	qdf_spin_lock_bh(&vdev->peer_list_lock);
646 	if (QDF_IS_STATUS_ERROR(dp_peer_get_ref(soc, peer, DP_MOD_ID_CONFIG))) {
647 		dp_err("unable to get peer ref at MAP mac: "QDF_MAC_ADDR_FMT,
648 		       QDF_MAC_ADDR_REF(peer->mac_addr.raw));
649 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
650 		return;
651 	}
652 
653 	/* add this peer into the vdev's list */
654 	if (wlan_op_mode_sta == vdev->opmode)
655 		TAILQ_INSERT_HEAD(&vdev->peer_list, peer, peer_list_elem);
656 	else
657 		TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
658 
659 	vdev->num_peers++;
660 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
661 }
662 
663 void dp_peer_vdev_list_remove(struct dp_soc *soc, struct dp_vdev *vdev,
664 			      struct dp_peer *peer)
665 {
666 	uint8_t found = 0;
667 	struct dp_peer *tmppeer = NULL;
668 
669 	/* only link peer will be added to vdev peer list */
670 	if (IS_MLO_DP_MLD_PEER(peer))
671 		return;
672 
673 	qdf_spin_lock_bh(&vdev->peer_list_lock);
674 	TAILQ_FOREACH(tmppeer, &peer->vdev->peer_list, peer_list_elem) {
675 		if (tmppeer == peer) {
676 			found = 1;
677 			break;
678 		}
679 	}
680 
681 	if (found) {
682 		TAILQ_REMOVE(&peer->vdev->peer_list, peer,
683 			     peer_list_elem);
684 		dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
685 		vdev->num_peers--;
686 	} else {
687 		/*Ignoring the remove operation as peer not found*/
688 		dp_peer_debug("%pK: peer:%pK not found in vdev:%pK peerlist:%pK"
689 			      , soc, peer, vdev, &peer->vdev->peer_list);
690 	}
691 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
692 }
693 
694 void dp_txrx_peer_attach_add(struct dp_soc *soc,
695 			     struct dp_peer *peer,
696 			     struct dp_txrx_peer *txrx_peer)
697 {
698 	qdf_spin_lock_bh(&soc->peer_map_lock);
699 
700 	peer->txrx_peer = txrx_peer;
701 	txrx_peer->bss_peer = peer->bss_peer;
702 
703 	if (peer->peer_id == HTT_INVALID_PEER) {
704 		qdf_spin_unlock_bh(&soc->peer_map_lock);
705 		return;
706 	}
707 
708 	txrx_peer->peer_id = peer->peer_id;
709 
710 	QDF_ASSERT(soc->peer_id_to_obj_map[peer->peer_id]);
711 
712 	qdf_spin_unlock_bh(&soc->peer_map_lock);
713 }
714 
715 void dp_peer_find_id_to_obj_add(struct dp_soc *soc,
716 				struct dp_peer *peer,
717 				uint16_t peer_id)
718 {
719 	QDF_ASSERT(peer_id <= soc->max_peer_id);
720 
721 	qdf_spin_lock_bh(&soc->peer_map_lock);
722 
723 	peer->peer_id = peer_id;
724 
725 	if (QDF_IS_STATUS_ERROR(dp_peer_get_ref(soc, peer, DP_MOD_ID_CONFIG))) {
726 		dp_err("unable to get peer ref at MAP mac: "QDF_MAC_ADDR_FMT" peer_id %u",
727 		       QDF_MAC_ADDR_REF(peer->mac_addr.raw), peer_id);
728 		qdf_spin_unlock_bh(&soc->peer_map_lock);
729 		return;
730 	}
731 
732 	if (!soc->peer_id_to_obj_map[peer_id]) {
733 		soc->peer_id_to_obj_map[peer_id] = peer;
734 		if (peer->txrx_peer)
735 			peer->txrx_peer->peer_id = peer_id;
736 	} else {
737 		/* Peer map event came for peer_id which
738 		 * is already mapped, this is not expected
739 		 */
740 		dp_err("peer %pK(" QDF_MAC_ADDR_FMT ")map failed, id %d mapped to peer %pK",
741 		       peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw), peer_id,
742 		       soc->peer_id_to_obj_map[peer_id]);
743 		dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
744 		qdf_assert_always(0);
745 	}
746 	qdf_spin_unlock_bh(&soc->peer_map_lock);
747 }
748 
749 void dp_peer_find_id_to_obj_remove(struct dp_soc *soc,
750 				   uint16_t peer_id)
751 {
752 	struct dp_peer *peer = NULL;
753 	QDF_ASSERT(peer_id <= soc->max_peer_id);
754 
755 	qdf_spin_lock_bh(&soc->peer_map_lock);
756 	peer = soc->peer_id_to_obj_map[peer_id];
757 	if (!peer) {
758 		dp_err("unable to get peer during peer id obj map remove");
759 		qdf_spin_unlock_bh(&soc->peer_map_lock);
760 		return;
761 	}
762 	peer->peer_id = HTT_INVALID_PEER;
763 	if (peer->txrx_peer)
764 		peer->txrx_peer->peer_id = HTT_INVALID_PEER;
765 	soc->peer_id_to_obj_map[peer_id] = NULL;
766 	dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
767 	qdf_spin_unlock_bh(&soc->peer_map_lock);
768 }
769 
770 #ifdef FEATURE_MEC
771 QDF_STATUS dp_peer_mec_hash_attach(struct dp_soc *soc)
772 {
773 	int log2, hash_elems, i;
774 
775 	log2 = dp_log2_ceil(DP_PEER_MAX_MEC_IDX);
776 	hash_elems = 1 << log2;
777 
778 	soc->mec_hash.mask = hash_elems - 1;
779 	soc->mec_hash.idx_bits = log2;
780 
781 	dp_peer_info("%pK: max mec index: %d",
782 		     soc, DP_PEER_MAX_MEC_IDX);
783 
784 	/* allocate an array of TAILQ mec object lists */
785 	soc->mec_hash.bins = qdf_mem_malloc(hash_elems *
786 					    sizeof(TAILQ_HEAD(anonymous_tail_q,
787 							      dp_mec_entry)));
788 
789 	if (!soc->mec_hash.bins)
790 		return QDF_STATUS_E_NOMEM;
791 
792 	for (i = 0; i < hash_elems; i++)
793 		TAILQ_INIT(&soc->mec_hash.bins[i]);
794 
795 	return QDF_STATUS_SUCCESS;
796 }
797 
798 /**
799  * dp_peer_mec_hash_index() - Compute the MEC hash from MAC address
800  * @soc: SoC handle
801  * @mac_addr: MAC address
802  *
803  * Return: MEC hash
804  */
805 static inline uint32_t dp_peer_mec_hash_index(struct dp_soc *soc,
806 					      union dp_align_mac_addr *mac_addr)
807 {
808 	uint32_t index;
809 
810 	index =
811 		mac_addr->align2.bytes_ab ^
812 		mac_addr->align2.bytes_cd ^
813 		mac_addr->align2.bytes_ef;
814 	index ^= index >> soc->mec_hash.idx_bits;
815 	index &= soc->mec_hash.mask;
816 	return index;
817 }
818 
819 struct dp_mec_entry *dp_peer_mec_hash_find_by_pdevid(struct dp_soc *soc,
820 						     uint8_t pdev_id,
821 						     uint8_t *mec_mac_addr)
822 {
823 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
824 	uint32_t index;
825 	struct dp_mec_entry *mecentry;
826 
827 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
828 		     mec_mac_addr, QDF_MAC_ADDR_SIZE);
829 	mac_addr = &local_mac_addr_aligned;
830 
831 	index = dp_peer_mec_hash_index(soc, mac_addr);
832 	TAILQ_FOREACH(mecentry, &soc->mec_hash.bins[index], hash_list_elem) {
833 		if ((pdev_id == mecentry->pdev_id) &&
834 		    !dp_peer_find_mac_addr_cmp(mac_addr, &mecentry->mac_addr))
835 			return mecentry;
836 	}
837 
838 	return NULL;
839 }
840 
841 /**
842  * dp_peer_mec_hash_add() - Add MEC entry into hash table
843  * @soc: SoC handle
844  * @mecentry: MEC entry
845  *
846  * This function adds the MEC entry into SoC MEC hash table
847  *
848  * Return: None
849  */
850 static inline void dp_peer_mec_hash_add(struct dp_soc *soc,
851 					struct dp_mec_entry *mecentry)
852 {
853 	uint32_t index;
854 
855 	index = dp_peer_mec_hash_index(soc, &mecentry->mac_addr);
856 	qdf_spin_lock_bh(&soc->mec_lock);
857 	TAILQ_INSERT_TAIL(&soc->mec_hash.bins[index], mecentry, hash_list_elem);
858 	qdf_spin_unlock_bh(&soc->mec_lock);
859 }
860 
861 QDF_STATUS dp_peer_mec_add_entry(struct dp_soc *soc,
862 				 struct dp_vdev *vdev,
863 				 uint8_t *mac_addr)
864 {
865 	struct dp_mec_entry *mecentry = NULL;
866 	struct dp_pdev *pdev = NULL;
867 
868 	if (!vdev) {
869 		dp_peer_err("%pK: Peers vdev is NULL", soc);
870 		return QDF_STATUS_E_INVAL;
871 	}
872 
873 	pdev = vdev->pdev;
874 
875 	if (qdf_unlikely(qdf_atomic_read(&soc->mec_cnt) >=
876 					 DP_PEER_MAX_MEC_ENTRY)) {
877 		dp_peer_warn("%pK: max MEC entry limit reached mac_addr: "
878 			     QDF_MAC_ADDR_FMT, soc, QDF_MAC_ADDR_REF(mac_addr));
879 		return QDF_STATUS_E_NOMEM;
880 	}
881 
882 	qdf_spin_lock_bh(&soc->mec_lock);
883 	mecentry = dp_peer_mec_hash_find_by_pdevid(soc, pdev->pdev_id,
884 						   mac_addr);
885 	if (qdf_likely(mecentry)) {
886 		mecentry->is_active = TRUE;
887 		qdf_spin_unlock_bh(&soc->mec_lock);
888 		return QDF_STATUS_E_ALREADY;
889 	}
890 
891 	qdf_spin_unlock_bh(&soc->mec_lock);
892 
893 	dp_peer_debug("%pK: pdevid: %u vdev: %u type: MEC mac_addr: "
894 		      QDF_MAC_ADDR_FMT,
895 		      soc, pdev->pdev_id, vdev->vdev_id,
896 		      QDF_MAC_ADDR_REF(mac_addr));
897 
898 	mecentry = (struct dp_mec_entry *)
899 			qdf_mem_malloc(sizeof(struct dp_mec_entry));
900 
901 	if (qdf_unlikely(!mecentry)) {
902 		dp_peer_err("%pK: fail to allocate mecentry", soc);
903 		return QDF_STATUS_E_NOMEM;
904 	}
905 
906 	qdf_copy_macaddr((struct qdf_mac_addr *)&mecentry->mac_addr.raw[0],
907 			 (struct qdf_mac_addr *)mac_addr);
908 	mecentry->pdev_id = pdev->pdev_id;
909 	mecentry->vdev_id = vdev->vdev_id;
910 	mecentry->is_active = TRUE;
911 	dp_peer_mec_hash_add(soc, mecentry);
912 
913 	qdf_atomic_inc(&soc->mec_cnt);
914 	DP_STATS_INC(soc, mec.added, 1);
915 
916 	return QDF_STATUS_SUCCESS;
917 }
918 
919 void dp_peer_mec_detach_entry(struct dp_soc *soc, struct dp_mec_entry *mecentry,
920 			      void *ptr)
921 {
922 	uint32_t index = dp_peer_mec_hash_index(soc, &mecentry->mac_addr);
923 
924 	TAILQ_HEAD(, dp_mec_entry) * free_list = ptr;
925 
926 	TAILQ_REMOVE(&soc->mec_hash.bins[index], mecentry,
927 		     hash_list_elem);
928 	TAILQ_INSERT_TAIL(free_list, mecentry, hash_list_elem);
929 }
930 
931 void dp_peer_mec_free_list(struct dp_soc *soc, void *ptr)
932 {
933 	struct dp_mec_entry *mecentry, *mecentry_next;
934 
935 	TAILQ_HEAD(, dp_mec_entry) * free_list = ptr;
936 
937 	TAILQ_FOREACH_SAFE(mecentry, free_list, hash_list_elem,
938 			   mecentry_next) {
939 		dp_peer_debug("%pK: MEC delete for mac_addr " QDF_MAC_ADDR_FMT,
940 			      soc, QDF_MAC_ADDR_REF(&mecentry->mac_addr));
941 		qdf_mem_free(mecentry);
942 		qdf_atomic_dec(&soc->mec_cnt);
943 		DP_STATS_INC(soc, mec.deleted, 1);
944 	}
945 }
946 
947 void dp_peer_mec_hash_detach(struct dp_soc *soc)
948 {
949 	dp_peer_mec_flush_entries(soc);
950 	qdf_mem_free(soc->mec_hash.bins);
951 	soc->mec_hash.bins = NULL;
952 }
953 
954 void dp_peer_mec_spinlock_destroy(struct dp_soc *soc)
955 {
956 	qdf_spinlock_destroy(&soc->mec_lock);
957 }
958 
959 void dp_peer_mec_spinlock_create(struct dp_soc *soc)
960 {
961 	qdf_spinlock_create(&soc->mec_lock);
962 }
963 #else
964 QDF_STATUS dp_peer_mec_hash_attach(struct dp_soc *soc)
965 {
966 	return QDF_STATUS_SUCCESS;
967 }
968 
969 void dp_peer_mec_hash_detach(struct dp_soc *soc)
970 {
971 }
972 #endif
973 
974 #ifdef FEATURE_AST
975 #ifdef WLAN_FEATURE_11BE_MLO
976 /**
977  * dp_peer_exist_on_pdev() - check if peer with mac address exist on pdev
978  *
979  * @soc: Datapath SOC handle
980  * @peer_mac_addr: peer mac address
981  * @mac_addr_is_aligned: is mac address aligned
982  * @pdev: Datapath PDEV handle
983  *
984  * Return: true if peer found else return false
985  */
986 static bool dp_peer_exist_on_pdev(struct dp_soc *soc,
987 				  uint8_t *peer_mac_addr,
988 				  int mac_addr_is_aligned,
989 				  struct dp_pdev *pdev)
990 {
991 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
992 	unsigned int index;
993 	struct dp_peer *peer;
994 	bool found = false;
995 
996 	if (mac_addr_is_aligned) {
997 		mac_addr = (union dp_align_mac_addr *)peer_mac_addr;
998 	} else {
999 		qdf_mem_copy(
1000 			&local_mac_addr_aligned.raw[0],
1001 			peer_mac_addr, QDF_MAC_ADDR_SIZE);
1002 		mac_addr = &local_mac_addr_aligned;
1003 	}
1004 	index = dp_peer_find_hash_index(soc, mac_addr);
1005 	qdf_spin_lock_bh(&soc->peer_hash_lock);
1006 	TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
1007 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
1008 		    (peer->vdev->pdev == pdev)) {
1009 			found = true;
1010 			break;
1011 		}
1012 	}
1013 	qdf_spin_unlock_bh(&soc->peer_hash_lock);
1014 
1015 	if (found)
1016 		return found;
1017 
1018 	peer = dp_mld_peer_find_hash_find(soc, peer_mac_addr,
1019 					  mac_addr_is_aligned, DP_VDEV_ALL,
1020 					  DP_MOD_ID_CDP);
1021 	if (peer) {
1022 		if (peer->vdev->pdev == pdev)
1023 			found = true;
1024 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
1025 	}
1026 
1027 	return found;
1028 }
1029 #else
1030 static bool dp_peer_exist_on_pdev(struct dp_soc *soc,
1031 				  uint8_t *peer_mac_addr,
1032 				  int mac_addr_is_aligned,
1033 				  struct dp_pdev *pdev)
1034 {
1035 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
1036 	unsigned int index;
1037 	struct dp_peer *peer;
1038 	bool found = false;
1039 
1040 	if (mac_addr_is_aligned) {
1041 		mac_addr = (union dp_align_mac_addr *)peer_mac_addr;
1042 	} else {
1043 		qdf_mem_copy(
1044 			&local_mac_addr_aligned.raw[0],
1045 			peer_mac_addr, QDF_MAC_ADDR_SIZE);
1046 		mac_addr = &local_mac_addr_aligned;
1047 	}
1048 	index = dp_peer_find_hash_index(soc, mac_addr);
1049 	qdf_spin_lock_bh(&soc->peer_hash_lock);
1050 	TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
1051 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
1052 		    (peer->vdev->pdev == pdev)) {
1053 			found = true;
1054 			break;
1055 		}
1056 	}
1057 	qdf_spin_unlock_bh(&soc->peer_hash_lock);
1058 	return found;
1059 }
1060 #endif /* WLAN_FEATURE_11BE_MLO */
1061 
1062 QDF_STATUS dp_peer_ast_hash_attach(struct dp_soc *soc)
1063 {
1064 	int i, hash_elems, log2;
1065 	unsigned int max_ast_idx = wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx);
1066 
1067 	hash_elems = ((max_ast_idx * DP_AST_HASH_LOAD_MULT) >>
1068 		DP_AST_HASH_LOAD_SHIFT);
1069 
1070 	log2 = dp_log2_ceil(hash_elems);
1071 	hash_elems = 1 << log2;
1072 
1073 	soc->ast_hash.mask = hash_elems - 1;
1074 	soc->ast_hash.idx_bits = log2;
1075 
1076 	dp_peer_info("%pK: ast hash_elems: %d, max_ast_idx: %d",
1077 		     soc, hash_elems, max_ast_idx);
1078 
1079 	/* allocate an array of TAILQ peer object lists */
1080 	soc->ast_hash.bins = qdf_mem_malloc(
1081 		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q,
1082 				dp_ast_entry)));
1083 
1084 	if (!soc->ast_hash.bins)
1085 		return QDF_STATUS_E_NOMEM;
1086 
1087 	for (i = 0; i < hash_elems; i++)
1088 		TAILQ_INIT(&soc->ast_hash.bins[i]);
1089 
1090 	return QDF_STATUS_SUCCESS;
1091 }
1092 
1093 /**
1094  * dp_peer_ast_cleanup() - cleanup the references
1095  * @soc: SoC handle
1096  * @ast: ast entry
1097  *
1098  * Return: None
1099  */
1100 static inline void dp_peer_ast_cleanup(struct dp_soc *soc,
1101 				       struct dp_ast_entry *ast)
1102 {
1103 	txrx_ast_free_cb cb = ast->callback;
1104 	void *cookie = ast->cookie;
1105 
1106 	dp_peer_debug("mac_addr: " QDF_MAC_ADDR_FMT ", cb: %pK, cookie: %pK",
1107 		      QDF_MAC_ADDR_REF(ast->mac_addr.raw), cb, cookie);
1108 
1109 	/* Call the callbacks to free up the cookie */
1110 	if (cb) {
1111 		ast->callback = NULL;
1112 		ast->cookie = NULL;
1113 		cb(soc->ctrl_psoc,
1114 		   dp_soc_to_cdp_soc(soc),
1115 		   cookie,
1116 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
1117 	}
1118 }
1119 
1120 void dp_peer_ast_hash_detach(struct dp_soc *soc)
1121 {
1122 	unsigned int index;
1123 	struct dp_ast_entry *ast, *ast_next;
1124 
1125 	if (!soc->ast_hash.mask)
1126 		return;
1127 
1128 	if (!soc->ast_hash.bins)
1129 		return;
1130 
1131 	dp_peer_debug("%pK: num_ast_entries: %u", soc, soc->num_ast_entries);
1132 
1133 	qdf_spin_lock_bh(&soc->ast_lock);
1134 	for (index = 0; index <= soc->ast_hash.mask; index++) {
1135 		if (!TAILQ_EMPTY(&soc->ast_hash.bins[index])) {
1136 			TAILQ_FOREACH_SAFE(ast, &soc->ast_hash.bins[index],
1137 					   hash_list_elem, ast_next) {
1138 				TAILQ_REMOVE(&soc->ast_hash.bins[index], ast,
1139 					     hash_list_elem);
1140 				dp_peer_ast_cleanup(soc, ast);
1141 				soc->num_ast_entries--;
1142 				qdf_mem_free(ast);
1143 			}
1144 		}
1145 	}
1146 	qdf_spin_unlock_bh(&soc->ast_lock);
1147 
1148 	qdf_mem_free(soc->ast_hash.bins);
1149 	soc->ast_hash.bins = NULL;
1150 }
1151 
1152 /**
1153  * dp_peer_ast_hash_index() - Compute the AST hash from MAC address
1154  * @soc: SoC handle
1155  * @mac_addr: MAC address
1156  *
1157  * Return: AST hash
1158  */
1159 static inline uint32_t dp_peer_ast_hash_index(struct dp_soc *soc,
1160 					      union dp_align_mac_addr *mac_addr)
1161 {
1162 	uint32_t index;
1163 
1164 	index =
1165 		mac_addr->align2.bytes_ab ^
1166 		mac_addr->align2.bytes_cd ^
1167 		mac_addr->align2.bytes_ef;
1168 	index ^= index >> soc->ast_hash.idx_bits;
1169 	index &= soc->ast_hash.mask;
1170 	return index;
1171 }
1172 
1173 /**
1174  * dp_peer_ast_hash_add() - Add AST entry into hash table
1175  * @soc: SoC handle
1176  * @ase: AST entry
1177  *
1178  * This function adds the AST entry into SoC AST hash table
1179  * It assumes caller has taken the ast lock to protect the access to this table
1180  *
1181  * Return: None
1182  */
1183 static inline void dp_peer_ast_hash_add(struct dp_soc *soc,
1184 					struct dp_ast_entry *ase)
1185 {
1186 	uint32_t index;
1187 
1188 	index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
1189 	TAILQ_INSERT_TAIL(&soc->ast_hash.bins[index], ase, hash_list_elem);
1190 }
1191 
1192 void dp_peer_ast_hash_remove(struct dp_soc *soc,
1193 			     struct dp_ast_entry *ase)
1194 {
1195 	unsigned index;
1196 	struct dp_ast_entry *tmpase;
1197 	int found = 0;
1198 
1199 	if (soc->ast_offload_support && !soc->host_ast_db_enable)
1200 		return;
1201 
1202 	index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
1203 	/* Check if tail is not empty before delete*/
1204 	QDF_ASSERT(!TAILQ_EMPTY(&soc->ast_hash.bins[index]));
1205 
1206 	dp_peer_debug("ID: %u idx: %u mac_addr: " QDF_MAC_ADDR_FMT,
1207 		      ase->peer_id, index, QDF_MAC_ADDR_REF(ase->mac_addr.raw));
1208 
1209 	TAILQ_FOREACH(tmpase, &soc->ast_hash.bins[index], hash_list_elem) {
1210 		if (tmpase == ase) {
1211 			found = 1;
1212 			break;
1213 		}
1214 	}
1215 
1216 	QDF_ASSERT(found);
1217 
1218 	if (found)
1219 		TAILQ_REMOVE(&soc->ast_hash.bins[index], ase, hash_list_elem);
1220 }
1221 
1222 struct dp_ast_entry *dp_peer_ast_hash_find_by_vdevid(struct dp_soc *soc,
1223 						     uint8_t *ast_mac_addr,
1224 						     uint8_t vdev_id)
1225 {
1226 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
1227 	uint32_t index;
1228 	struct dp_ast_entry *ase;
1229 
1230 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
1231 		     ast_mac_addr, QDF_MAC_ADDR_SIZE);
1232 	mac_addr = &local_mac_addr_aligned;
1233 
1234 	index = dp_peer_ast_hash_index(soc, mac_addr);
1235 	TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
1236 		if ((vdev_id == ase->vdev_id) &&
1237 		    !dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr)) {
1238 			return ase;
1239 		}
1240 	}
1241 
1242 	return NULL;
1243 }
1244 
1245 struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
1246 						     uint8_t *ast_mac_addr,
1247 						     uint8_t pdev_id)
1248 {
1249 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
1250 	uint32_t index;
1251 	struct dp_ast_entry *ase;
1252 
1253 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
1254 		     ast_mac_addr, QDF_MAC_ADDR_SIZE);
1255 	mac_addr = &local_mac_addr_aligned;
1256 
1257 	index = dp_peer_ast_hash_index(soc, mac_addr);
1258 	TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
1259 		if ((pdev_id == ase->pdev_id) &&
1260 		    !dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr)) {
1261 			return ase;
1262 		}
1263 	}
1264 
1265 	return NULL;
1266 }
1267 
1268 struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
1269 					       uint8_t *ast_mac_addr)
1270 {
1271 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
1272 	unsigned index;
1273 	struct dp_ast_entry *ase;
1274 
1275 	if (!soc->ast_hash.bins)
1276 		return NULL;
1277 
1278 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
1279 			ast_mac_addr, QDF_MAC_ADDR_SIZE);
1280 	mac_addr = &local_mac_addr_aligned;
1281 
1282 	index = dp_peer_ast_hash_index(soc, mac_addr);
1283 	TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
1284 		if (dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr) == 0) {
1285 			return ase;
1286 		}
1287 	}
1288 
1289 	return NULL;
1290 }
1291 
1292 struct dp_ast_entry *dp_peer_ast_hash_find_soc_by_type(
1293 					struct dp_soc *soc,
1294 					uint8_t *ast_mac_addr,
1295 					enum cdp_txrx_ast_entry_type type)
1296 {
1297 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
1298 	unsigned index;
1299 	struct dp_ast_entry *ase;
1300 
1301 	if (!soc->ast_hash.bins)
1302 		return NULL;
1303 
1304 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
1305 			ast_mac_addr, QDF_MAC_ADDR_SIZE);
1306 	mac_addr = &local_mac_addr_aligned;
1307 
1308 	index = dp_peer_ast_hash_index(soc, mac_addr);
1309 	TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
1310 		if (dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr) == 0 &&
1311 		    ase->type == type) {
1312 			return ase;
1313 		}
1314 	}
1315 
1316 	return NULL;
1317 }
1318 
1319 /**
1320  * dp_peer_map_ipa_evt() - Send peer map event to IPA
1321  * @soc: SoC handle
1322  * @peer: peer to which ast node belongs
1323  * @ast_entry: AST entry
1324  * @mac_addr: MAC address of ast node
1325  *
1326  * Return: None
1327  */
1328 #if defined(IPA_OFFLOAD) && defined(QCA_IPA_LL_TX_FLOW_CONTROL)
1329 static inline
1330 void dp_peer_map_ipa_evt(struct dp_soc *soc, struct dp_peer *peer,
1331 			 struct dp_ast_entry *ast_entry, uint8_t *mac_addr)
1332 {
1333 	if (ast_entry || (peer->vdev && peer->vdev->proxysta_vdev)) {
1334 		if (soc->cdp_soc.ol_ops->peer_map_event) {
1335 			soc->cdp_soc.ol_ops->peer_map_event(
1336 			soc->ctrl_psoc, ast_entry->peer_id,
1337 			ast_entry->ast_idx, ast_entry->vdev_id,
1338 			mac_addr, ast_entry->type, ast_entry->ast_hash_value);
1339 		}
1340 	} else {
1341 		dp_peer_info("%pK: AST entry not found", soc);
1342 	}
1343 }
1344 
1345 /**
1346  * dp_peer_unmap_ipa_evt() - Send peer unmap event to IPA
1347  * @soc: SoC handle
1348  * @peer_id: Peerid
1349  * @vdev_id: Vdev id
1350  * @mac_addr: Peer mac address
1351  *
1352  * Return: None
1353  */
1354 static inline
1355 void dp_peer_unmap_ipa_evt(struct dp_soc *soc, uint16_t peer_id,
1356 			   uint8_t vdev_id, uint8_t *mac_addr)
1357 {
1358 	if (soc->cdp_soc.ol_ops->peer_unmap_event) {
1359 		soc->cdp_soc.ol_ops->peer_unmap_event(soc->ctrl_psoc,
1360 						      peer_id, vdev_id,
1361 						      mac_addr);
1362 	}
1363 }
1364 #else
1365 static inline
1366 void dp_peer_unmap_ipa_evt(struct dp_soc *soc, uint16_t peer_id,
1367 			   uint8_t vdev_id, uint8_t *mac_addr)
1368 {
1369 }
1370 
1371 static inline
1372 void dp_peer_map_ipa_evt(struct dp_soc *soc, struct dp_peer *peer,
1373 			 struct dp_ast_entry *ast_entry, uint8_t *mac_addr)
1374 {
1375 }
1376 #endif
1377 
1378 QDF_STATUS dp_peer_host_add_map_ast(struct dp_soc *soc, uint16_t peer_id,
1379 				    uint8_t *mac_addr, uint16_t hw_peer_id,
1380 				    uint8_t vdev_id, uint16_t ast_hash,
1381 				    uint8_t is_wds)
1382 {
1383 	struct dp_vdev *vdev;
1384 	struct dp_ast_entry *ast_entry;
1385 	enum cdp_txrx_ast_entry_type type;
1386 	struct dp_peer *peer;
1387 	struct dp_peer *old_peer;
1388 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1389 
1390 	if (is_wds)
1391 		type = CDP_TXRX_AST_TYPE_WDS;
1392 	else
1393 		type = CDP_TXRX_AST_TYPE_STATIC;
1394 
1395 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_HTT);
1396 	if (!peer) {
1397 		dp_peer_info("Peer not found soc:%pK: peer_id %d, peer_mac " QDF_MAC_ADDR_FMT ", vdev_id %d",
1398 			     soc, peer_id,
1399 			     QDF_MAC_ADDR_REF(mac_addr), vdev_id);
1400 		return QDF_STATUS_E_INVAL;
1401 	}
1402 
1403 	if (!is_wds && IS_MLO_DP_MLD_PEER(peer))
1404 		type = CDP_TXRX_AST_TYPE_MLD;
1405 
1406 	vdev = peer->vdev;
1407 	if (!vdev) {
1408 		dp_peer_err("%pK: Peers vdev is NULL", soc);
1409 		status = QDF_STATUS_E_INVAL;
1410 		goto fail;
1411 	}
1412 
1413 	if (!dp_peer_state_cmp(peer, DP_PEER_STATE_ACTIVE)) {
1414 		if (type != CDP_TXRX_AST_TYPE_STATIC &&
1415 		    type != CDP_TXRX_AST_TYPE_MLD &&
1416 		    type != CDP_TXRX_AST_TYPE_SELF) {
1417 			status = QDF_STATUS_E_BUSY;
1418 			goto fail;
1419 		}
1420 	}
1421 
1422 	dp_peer_debug("%pK: vdev: %u  ast_entry->type: %d peer_mac: " QDF_MAC_ADDR_FMT " peer: %pK mac " QDF_MAC_ADDR_FMT,
1423 		      soc, vdev->vdev_id, type,
1424 		      QDF_MAC_ADDR_REF(peer->mac_addr.raw), peer,
1425 		      QDF_MAC_ADDR_REF(mac_addr));
1426 
1427 	/*
1428 	 * In MLO scenario, there is possibility for same mac address
1429 	 * on both link mac address and MLD mac address.
1430 	 * Duplicate AST map needs to be handled for non-mld type.
1431 	 */
1432 	qdf_spin_lock_bh(&soc->ast_lock);
1433 	ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
1434 	if (ast_entry && type != CDP_TXRX_AST_TYPE_MLD) {
1435 		dp_peer_debug("AST present ID %d vid %d mac " QDF_MAC_ADDR_FMT,
1436 			      hw_peer_id, vdev_id,
1437 			      QDF_MAC_ADDR_REF(mac_addr));
1438 
1439 		old_peer = __dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
1440 						   DP_MOD_ID_AST);
1441 		if (!old_peer) {
1442 			dp_peer_info("Peer not found soc:%pK: peer_id %d, peer_mac " QDF_MAC_ADDR_FMT ", vdev_id %d",
1443 				     soc, ast_entry->peer_id,
1444 				     QDF_MAC_ADDR_REF(mac_addr), vdev_id);
1445 			qdf_spin_unlock_bh(&soc->ast_lock);
1446 			status = QDF_STATUS_E_INVAL;
1447 			goto fail;
1448 		}
1449 
1450 		dp_peer_unlink_ast_entry(soc, ast_entry, old_peer);
1451 		dp_peer_free_ast_entry(soc, ast_entry);
1452 		if (old_peer)
1453 			dp_peer_unref_delete(old_peer, DP_MOD_ID_AST);
1454 	}
1455 
1456 	ast_entry = (struct dp_ast_entry *)
1457 		qdf_mem_malloc(sizeof(struct dp_ast_entry));
1458 	if (!ast_entry) {
1459 		dp_peer_err("%pK: fail to allocate ast_entry", soc);
1460 		qdf_spin_unlock_bh(&soc->ast_lock);
1461 		QDF_ASSERT(0);
1462 		status = QDF_STATUS_E_NOMEM;
1463 		goto fail;
1464 	}
1465 
1466 	qdf_mem_copy(&ast_entry->mac_addr.raw[0], mac_addr, QDF_MAC_ADDR_SIZE);
1467 	ast_entry->pdev_id = vdev->pdev->pdev_id;
1468 	ast_entry->is_mapped = false;
1469 	ast_entry->delete_in_progress = false;
1470 	ast_entry->next_hop = 0;
1471 	ast_entry->vdev_id = vdev->vdev_id;
1472 	ast_entry->type = type;
1473 
1474 	switch (type) {
1475 	case CDP_TXRX_AST_TYPE_STATIC:
1476 		if (peer->vdev->opmode == wlan_op_mode_sta)
1477 			ast_entry->type = CDP_TXRX_AST_TYPE_STA_BSS;
1478 		break;
1479 	case CDP_TXRX_AST_TYPE_WDS:
1480 		ast_entry->next_hop = 1;
1481 		break;
1482 	case CDP_TXRX_AST_TYPE_MLD:
1483 		break;
1484 	default:
1485 		dp_peer_alert("%pK: Incorrect AST entry type", soc);
1486 	}
1487 
1488 	ast_entry->is_active = TRUE;
1489 	DP_STATS_INC(soc, ast.added, 1);
1490 	soc->num_ast_entries++;
1491 	dp_peer_ast_hash_add(soc, ast_entry);
1492 
1493 	ast_entry->ast_idx = hw_peer_id;
1494 	ast_entry->ast_hash_value = ast_hash;
1495 	ast_entry->peer_id = peer_id;
1496 	TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry,
1497 			  ase_list_elem);
1498 
1499 	dp_peer_map_ipa_evt(soc, peer, ast_entry, mac_addr);
1500 
1501 	qdf_spin_unlock_bh(&soc->ast_lock);
1502 fail:
1503 	dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
1504 
1505 	return status;
1506 }
1507 
1508 /**
1509  * dp_peer_map_ast() - Map the ast entry with HW AST Index
1510  * @soc: SoC handle
1511  * @peer: peer to which ast node belongs
1512  * @mac_addr: MAC address of ast node
1513  * @hw_peer_id: HW AST Index returned by target in peer map event
1514  * @vdev_id: vdev id for VAP to which the peer belongs to
1515  * @ast_hash: ast hash value in HW
1516  * @is_wds: flag to indicate peer map event for WDS ast entry
1517  *
1518  * Return: QDF_STATUS code
1519  */
1520 static inline QDF_STATUS dp_peer_map_ast(struct dp_soc *soc,
1521 					 struct dp_peer *peer,
1522 					 uint8_t *mac_addr,
1523 					 uint16_t hw_peer_id,
1524 					 uint8_t vdev_id,
1525 					 uint16_t ast_hash,
1526 					 uint8_t is_wds)
1527 {
1528 	struct dp_ast_entry *ast_entry = NULL;
1529 	enum cdp_txrx_ast_entry_type peer_type = CDP_TXRX_AST_TYPE_STATIC;
1530 	void *cookie = NULL;
1531 	txrx_ast_free_cb cb = NULL;
1532 	QDF_STATUS err = QDF_STATUS_SUCCESS;
1533 
1534 	if (soc->ast_offload_support)
1535 		return QDF_STATUS_SUCCESS;
1536 
1537 	if (!peer) {
1538 		return QDF_STATUS_E_INVAL;
1539 	}
1540 
1541 	dp_peer_err("%pK: peer %pK ID %d vid %d mac " QDF_MAC_ADDR_FMT,
1542 		    soc, peer, hw_peer_id, vdev_id,
1543 		    QDF_MAC_ADDR_REF(mac_addr));
1544 
1545 	qdf_spin_lock_bh(&soc->ast_lock);
1546 
1547 	ast_entry = dp_peer_ast_hash_find_by_vdevid(soc, mac_addr, vdev_id);
1548 
1549 	if (is_wds) {
1550 		/*
1551 		 * In certain cases like Auth attack on a repeater
1552 		 * can result in the number of ast_entries falling
1553 		 * in the same hash bucket to exceed the max_skid
1554 		 * length supported by HW in root AP. In these cases
1555 		 * the FW will return the hw_peer_id (ast_index) as
1556 		 * 0xffff indicating HW could not add the entry in
1557 		 * its table. Host has to delete the entry from its
1558 		 * table in these cases.
1559 		 */
1560 		if (hw_peer_id == HTT_INVALID_PEER) {
1561 			DP_STATS_INC(soc, ast.map_err, 1);
1562 			if (ast_entry) {
1563 				if (ast_entry->is_mapped) {
1564 					soc->ast_table[ast_entry->ast_idx] =
1565 						NULL;
1566 				}
1567 
1568 				cb = ast_entry->callback;
1569 				cookie = ast_entry->cookie;
1570 				peer_type = ast_entry->type;
1571 
1572 				dp_peer_unlink_ast_entry(soc, ast_entry, peer);
1573 				dp_peer_free_ast_entry(soc, ast_entry);
1574 
1575 				qdf_spin_unlock_bh(&soc->ast_lock);
1576 
1577 				if (cb) {
1578 					cb(soc->ctrl_psoc,
1579 					   dp_soc_to_cdp_soc(soc),
1580 					   cookie,
1581 					   CDP_TXRX_AST_DELETED);
1582 				}
1583 			} else {
1584 				qdf_spin_unlock_bh(&soc->ast_lock);
1585 				dp_peer_alert("AST entry not found with peer %pK peer_id %u peer_mac " QDF_MAC_ADDR_FMT " mac_addr " QDF_MAC_ADDR_FMT " vdev_id %u next_hop %u",
1586 					      peer, peer->peer_id,
1587 					      QDF_MAC_ADDR_REF(peer->mac_addr.raw),
1588 					      QDF_MAC_ADDR_REF(mac_addr),
1589 					      vdev_id, is_wds);
1590 			}
1591 			err = QDF_STATUS_E_INVAL;
1592 
1593 			dp_hmwds_ast_add_notify(peer, mac_addr,
1594 						peer_type, err, true);
1595 
1596 			return err;
1597 		}
1598 	}
1599 
1600 	if (ast_entry) {
1601 		ast_entry->ast_idx = hw_peer_id;
1602 		soc->ast_table[hw_peer_id] = ast_entry;
1603 		ast_entry->is_active = TRUE;
1604 		peer_type = ast_entry->type;
1605 		ast_entry->ast_hash_value = ast_hash;
1606 		ast_entry->is_mapped = TRUE;
1607 		qdf_assert_always(ast_entry->peer_id == HTT_INVALID_PEER);
1608 
1609 		ast_entry->peer_id = peer->peer_id;
1610 		TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry,
1611 				  ase_list_elem);
1612 	}
1613 
1614 	if (ast_entry || (peer->vdev && peer->vdev->proxysta_vdev)) {
1615 		if (soc->cdp_soc.ol_ops->peer_map_event) {
1616 			soc->cdp_soc.ol_ops->peer_map_event(
1617 			soc->ctrl_psoc, peer->peer_id,
1618 			hw_peer_id, vdev_id,
1619 			mac_addr, peer_type, ast_hash);
1620 		}
1621 	} else {
1622 		dp_peer_err("%pK: AST entry not found", soc);
1623 		err = QDF_STATUS_E_NOENT;
1624 	}
1625 
1626 	qdf_spin_unlock_bh(&soc->ast_lock);
1627 
1628 	dp_hmwds_ast_add_notify(peer, mac_addr,
1629 				peer_type, err, true);
1630 
1631 	return err;
1632 }
1633 
1634 void dp_peer_free_hmwds_cb(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
1635 			   struct cdp_soc *dp_soc,
1636 			   void *cookie,
1637 			   enum cdp_ast_free_status status)
1638 {
1639 	struct dp_ast_free_cb_params *param =
1640 		(struct dp_ast_free_cb_params *)cookie;
1641 	struct dp_soc *soc = (struct dp_soc *)dp_soc;
1642 	struct dp_peer *peer = NULL;
1643 	QDF_STATUS err = QDF_STATUS_SUCCESS;
1644 
1645 	if (status != CDP_TXRX_AST_DELETED) {
1646 		qdf_mem_free(cookie);
1647 		return;
1648 	}
1649 
1650 	peer = dp_peer_find_hash_find(soc, &param->peer_mac_addr.raw[0],
1651 				      0, param->vdev_id, DP_MOD_ID_AST);
1652 	if (peer) {
1653 		err = dp_peer_add_ast(soc, peer,
1654 				      &param->mac_addr.raw[0],
1655 				      param->type,
1656 				      param->flags);
1657 
1658 		dp_hmwds_ast_add_notify(peer, &param->mac_addr.raw[0],
1659 					param->type, err, false);
1660 
1661 		dp_peer_unref_delete(peer, DP_MOD_ID_AST);
1662 	}
1663 	qdf_mem_free(cookie);
1664 }
1665 
1666 QDF_STATUS dp_peer_add_ast(struct dp_soc *soc,
1667 			   struct dp_peer *peer,
1668 			   uint8_t *mac_addr,
1669 			   enum cdp_txrx_ast_entry_type type,
1670 			   uint32_t flags)
1671 {
1672 	struct dp_ast_entry *ast_entry = NULL;
1673 	struct dp_vdev *vdev = NULL;
1674 	struct dp_pdev *pdev = NULL;
1675 	txrx_ast_free_cb cb = NULL;
1676 	void *cookie = NULL;
1677 	struct dp_peer *vap_bss_peer = NULL;
1678 	bool is_peer_found = false;
1679 	int status = 0;
1680 
1681 	if (soc->ast_offload_support)
1682 		return QDF_STATUS_E_INVAL;
1683 
1684 	vdev = peer->vdev;
1685 	if (!vdev) {
1686 		dp_peer_err("%pK: Peers vdev is NULL", soc);
1687 		QDF_ASSERT(0);
1688 		return QDF_STATUS_E_INVAL;
1689 	}
1690 
1691 	pdev = vdev->pdev;
1692 
1693 	is_peer_found = dp_peer_exist_on_pdev(soc, mac_addr, 0, pdev);
1694 
1695 	qdf_spin_lock_bh(&soc->ast_lock);
1696 
1697 	if (!dp_peer_state_cmp(peer, DP_PEER_STATE_ACTIVE)) {
1698 		if ((type != CDP_TXRX_AST_TYPE_STATIC) &&
1699 		    (type != CDP_TXRX_AST_TYPE_SELF)) {
1700 			qdf_spin_unlock_bh(&soc->ast_lock);
1701 			return QDF_STATUS_E_BUSY;
1702 		}
1703 	}
1704 
1705 	dp_peer_debug("%pK: pdevid: %u vdev: %u  ast_entry->type: %d flags: 0x%x peer_mac: " QDF_MAC_ADDR_FMT " peer: %pK mac " QDF_MAC_ADDR_FMT,
1706 		      soc, pdev->pdev_id, vdev->vdev_id, type, flags,
1707 		      QDF_MAC_ADDR_REF(peer->mac_addr.raw), peer,
1708 		      QDF_MAC_ADDR_REF(mac_addr));
1709 
1710 	/* fw supports only 2 times the max_peers ast entries */
1711 	if (soc->num_ast_entries >=
1712 	    wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx)) {
1713 		qdf_spin_unlock_bh(&soc->ast_lock);
1714 		dp_peer_err("%pK: Max ast entries reached", soc);
1715 		return QDF_STATUS_E_RESOURCES;
1716 	}
1717 
1718 	/* If AST entry already exists , just return from here
1719 	 * ast entry with same mac address can exist on different radios
1720 	 * if ast_override support is enabled use search by pdev in this
1721 	 * case
1722 	 */
1723 	if (soc->ast_override_support) {
1724 		ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, mac_addr,
1725 							    pdev->pdev_id);
1726 		if (ast_entry) {
1727 			qdf_spin_unlock_bh(&soc->ast_lock);
1728 			return QDF_STATUS_E_ALREADY;
1729 		}
1730 
1731 		if (is_peer_found) {
1732 			/* During WDS to static roaming, peer is added
1733 			 * to the list before static AST entry create.
1734 			 * So, allow AST entry for STATIC type
1735 			 * even if peer is present
1736 			 */
1737 			if (type != CDP_TXRX_AST_TYPE_STATIC) {
1738 				qdf_spin_unlock_bh(&soc->ast_lock);
1739 				return QDF_STATUS_E_ALREADY;
1740 			}
1741 		}
1742 	} else {
1743 		/* For HWMWDS_SEC entries can be added for same mac address
1744 		 * do not check for existing entry
1745 		 */
1746 		if (type == CDP_TXRX_AST_TYPE_WDS_HM_SEC)
1747 			goto add_ast_entry;
1748 
1749 		ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
1750 
1751 		if (ast_entry) {
1752 			if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) &&
1753 			    !ast_entry->delete_in_progress) {
1754 				qdf_spin_unlock_bh(&soc->ast_lock);
1755 				return QDF_STATUS_E_ALREADY;
1756 			}
1757 
1758 			/* Add for HMWDS entry we cannot be ignored if there
1759 			 * is AST entry with same mac address
1760 			 *
1761 			 * if ast entry exists with the requested mac address
1762 			 * send a delete command and register callback which
1763 			 * can take care of adding HMWDS ast entry on delete
1764 			 * confirmation from target
1765 			 */
1766 			if (type == CDP_TXRX_AST_TYPE_WDS_HM) {
1767 				struct dp_ast_free_cb_params *param = NULL;
1768 
1769 				if (ast_entry->type ==
1770 					CDP_TXRX_AST_TYPE_WDS_HM_SEC)
1771 					goto add_ast_entry;
1772 
1773 				/* save existing callback */
1774 				if (ast_entry->callback) {
1775 					cb = ast_entry->callback;
1776 					cookie = ast_entry->cookie;
1777 				}
1778 
1779 				param = qdf_mem_malloc(sizeof(*param));
1780 				if (!param) {
1781 					QDF_TRACE(QDF_MODULE_ID_TXRX,
1782 						  QDF_TRACE_LEVEL_ERROR,
1783 						  "Allocation failed");
1784 					qdf_spin_unlock_bh(&soc->ast_lock);
1785 					return QDF_STATUS_E_NOMEM;
1786 				}
1787 
1788 				qdf_mem_copy(&param->mac_addr.raw[0], mac_addr,
1789 					     QDF_MAC_ADDR_SIZE);
1790 				qdf_mem_copy(&param->peer_mac_addr.raw[0],
1791 					     &peer->mac_addr.raw[0],
1792 					     QDF_MAC_ADDR_SIZE);
1793 				param->type = type;
1794 				param->flags = flags;
1795 				param->vdev_id = vdev->vdev_id;
1796 				ast_entry->callback = dp_peer_free_hmwds_cb;
1797 				ast_entry->pdev_id = vdev->pdev->pdev_id;
1798 				ast_entry->type = type;
1799 				ast_entry->cookie = (void *)param;
1800 				if (!ast_entry->delete_in_progress)
1801 					dp_peer_del_ast(soc, ast_entry);
1802 
1803 				qdf_spin_unlock_bh(&soc->ast_lock);
1804 
1805 				/* Call the saved callback*/
1806 				if (cb) {
1807 					cb(soc->ctrl_psoc,
1808 					   dp_soc_to_cdp_soc(soc),
1809 					   cookie,
1810 					   CDP_TXRX_AST_DELETE_IN_PROGRESS);
1811 				}
1812 				return QDF_STATUS_E_AGAIN;
1813 			}
1814 
1815 			qdf_spin_unlock_bh(&soc->ast_lock);
1816 			return QDF_STATUS_E_ALREADY;
1817 		}
1818 	}
1819 
1820 add_ast_entry:
1821 	ast_entry = (struct dp_ast_entry *)
1822 			qdf_mem_malloc(sizeof(struct dp_ast_entry));
1823 
1824 	if (!ast_entry) {
1825 		qdf_spin_unlock_bh(&soc->ast_lock);
1826 		dp_peer_err("%pK: fail to allocate ast_entry", soc);
1827 		QDF_ASSERT(0);
1828 		return QDF_STATUS_E_NOMEM;
1829 	}
1830 
1831 	qdf_mem_copy(&ast_entry->mac_addr.raw[0], mac_addr, QDF_MAC_ADDR_SIZE);
1832 	ast_entry->pdev_id = vdev->pdev->pdev_id;
1833 	ast_entry->is_mapped = false;
1834 	ast_entry->delete_in_progress = false;
1835 	ast_entry->peer_id = HTT_INVALID_PEER;
1836 	ast_entry->next_hop = 0;
1837 	ast_entry->vdev_id = vdev->vdev_id;
1838 
1839 	switch (type) {
1840 	case CDP_TXRX_AST_TYPE_STATIC:
1841 		peer->self_ast_entry = ast_entry;
1842 		ast_entry->type = CDP_TXRX_AST_TYPE_STATIC;
1843 		if (peer->vdev->opmode == wlan_op_mode_sta)
1844 			ast_entry->type = CDP_TXRX_AST_TYPE_STA_BSS;
1845 		break;
1846 	case CDP_TXRX_AST_TYPE_SELF:
1847 		peer->self_ast_entry = ast_entry;
1848 		ast_entry->type = CDP_TXRX_AST_TYPE_SELF;
1849 		break;
1850 	case CDP_TXRX_AST_TYPE_WDS:
1851 		ast_entry->next_hop = 1;
1852 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
1853 		break;
1854 	case CDP_TXRX_AST_TYPE_WDS_HM:
1855 		ast_entry->next_hop = 1;
1856 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM;
1857 		break;
1858 	case CDP_TXRX_AST_TYPE_WDS_HM_SEC:
1859 		ast_entry->next_hop = 1;
1860 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM_SEC;
1861 		ast_entry->peer_id = peer->peer_id;
1862 		TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry,
1863 				  ase_list_elem);
1864 		break;
1865 	case CDP_TXRX_AST_TYPE_DA:
1866 		vap_bss_peer = dp_vdev_bss_peer_ref_n_get(soc, vdev,
1867 							  DP_MOD_ID_AST);
1868 		if (!vap_bss_peer) {
1869 			qdf_spin_unlock_bh(&soc->ast_lock);
1870 			qdf_mem_free(ast_entry);
1871 			return QDF_STATUS_E_FAILURE;
1872 		}
1873 		peer = vap_bss_peer;
1874 		ast_entry->next_hop = 1;
1875 		ast_entry->type = CDP_TXRX_AST_TYPE_DA;
1876 		break;
1877 	default:
1878 		dp_peer_err("%pK: Incorrect AST entry type", soc);
1879 	}
1880 
1881 	ast_entry->is_active = TRUE;
1882 	DP_STATS_INC(soc, ast.added, 1);
1883 	soc->num_ast_entries++;
1884 	dp_peer_ast_hash_add(soc, ast_entry);
1885 
1886 	if ((ast_entry->type != CDP_TXRX_AST_TYPE_STATIC) &&
1887 	    (ast_entry->type != CDP_TXRX_AST_TYPE_SELF) &&
1888 	    (ast_entry->type != CDP_TXRX_AST_TYPE_STA_BSS) &&
1889 	    (ast_entry->type != CDP_TXRX_AST_TYPE_WDS_HM_SEC))
1890 		status = dp_add_wds_entry_wrapper(soc,
1891 						  peer,
1892 						  mac_addr,
1893 						  flags,
1894 						  ast_entry->type);
1895 
1896 	if (vap_bss_peer)
1897 		dp_peer_unref_delete(vap_bss_peer, DP_MOD_ID_AST);
1898 
1899 	qdf_spin_unlock_bh(&soc->ast_lock);
1900 	return qdf_status_from_os_return(status);
1901 }
1902 
1903 qdf_export_symbol(dp_peer_add_ast);
1904 
1905 void dp_peer_free_ast_entry(struct dp_soc *soc,
1906 			    struct dp_ast_entry *ast_entry)
1907 {
1908 	/*
1909 	 * NOTE: Ensure that call to this API is done
1910 	 * after soc->ast_lock is taken
1911 	 */
1912 	dp_peer_debug("type: %d ID: %u vid: %u mac_addr: " QDF_MAC_ADDR_FMT,
1913 		      ast_entry->type, ast_entry->peer_id, ast_entry->vdev_id,
1914 		      QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw));
1915 
1916 	ast_entry->callback = NULL;
1917 	ast_entry->cookie = NULL;
1918 
1919 	DP_STATS_INC(soc, ast.deleted, 1);
1920 	dp_peer_ast_hash_remove(soc, ast_entry);
1921 	dp_peer_ast_cleanup(soc, ast_entry);
1922 	qdf_mem_free(ast_entry);
1923 	soc->num_ast_entries--;
1924 }
1925 
1926 void dp_peer_unlink_ast_entry(struct dp_soc *soc,
1927 			      struct dp_ast_entry *ast_entry,
1928 			      struct dp_peer *peer)
1929 {
1930 	if (!peer) {
1931 		dp_info_rl("NULL peer");
1932 		return;
1933 	}
1934 
1935 	if (ast_entry->peer_id == HTT_INVALID_PEER) {
1936 		dp_info_rl("Invalid peer id in AST entry mac addr:"QDF_MAC_ADDR_FMT" type:%d",
1937 			  QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw),
1938 			  ast_entry->type);
1939 		return;
1940 	}
1941 	/*
1942 	 * NOTE: Ensure that call to this API is done
1943 	 * after soc->ast_lock is taken
1944 	 */
1945 
1946 	qdf_assert_always(ast_entry->peer_id == peer->peer_id);
1947 	TAILQ_REMOVE(&peer->ast_entry_list, ast_entry, ase_list_elem);
1948 
1949 	if (ast_entry == peer->self_ast_entry)
1950 		peer->self_ast_entry = NULL;
1951 
1952 	/*
1953 	 * release the reference only if it is mapped
1954 	 * to ast_table
1955 	 */
1956 	if (ast_entry->is_mapped)
1957 		soc->ast_table[ast_entry->ast_idx] = NULL;
1958 
1959 	ast_entry->peer_id = HTT_INVALID_PEER;
1960 }
1961 
1962 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
1963 {
1964 	struct dp_peer *peer = NULL;
1965 
1966 	if (soc->ast_offload_support)
1967 		return;
1968 
1969 	if (!ast_entry) {
1970 		dp_info_rl("NULL AST entry");
1971 		return;
1972 	}
1973 
1974 	if (ast_entry->delete_in_progress) {
1975 		dp_info_rl("AST entry deletion in progress mac addr:"QDF_MAC_ADDR_FMT" type:%d",
1976 			  QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw),
1977 			  ast_entry->type);
1978 		return;
1979 	}
1980 
1981 	dp_peer_debug("call by %ps: ID: %u vid: %u mac_addr: " QDF_MAC_ADDR_FMT,
1982 		      (void *)_RET_IP_, ast_entry->peer_id, ast_entry->vdev_id,
1983 		      QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw));
1984 
1985 	ast_entry->delete_in_progress = true;
1986 
1987 	/* In teardown del ast is called after setting logical delete state
1988 	 * use __dp_peer_get_ref_by_id to get the reference irrespective of
1989 	 * state
1990 	 */
1991 	peer = __dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
1992 				       DP_MOD_ID_AST);
1993 
1994 	dp_peer_ast_send_wds_del(soc, ast_entry, peer);
1995 
1996 	/* Remove SELF and STATIC entries in teardown itself */
1997 	if (!ast_entry->next_hop)
1998 		dp_peer_unlink_ast_entry(soc, ast_entry, peer);
1999 
2000 	if (ast_entry->is_mapped)
2001 		soc->ast_table[ast_entry->ast_idx] = NULL;
2002 
2003 	/* if peer map v2 is enabled we are not freeing ast entry
2004 	 * here and it is supposed to be freed in unmap event (after
2005 	 * we receive delete confirmation from target)
2006 	 *
2007 	 * if peer_id is invalid we did not get the peer map event
2008 	 * for the peer free ast entry from here only in this case
2009 	 */
2010 	if (dp_peer_ast_free_in_unmap_supported(soc, ast_entry))
2011 		goto end;
2012 
2013 	/* for WDS secondary entry ast_entry->next_hop would be set so
2014 	 * unlinking has to be done explicitly here.
2015 	 * As this entry is not a mapped entry unmap notification from
2016 	 * FW will not come. Hence unlinkling is done right here.
2017 	 */
2018 
2019 	if (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC)
2020 		dp_peer_unlink_ast_entry(soc, ast_entry, peer);
2021 
2022 	dp_peer_free_ast_entry(soc, ast_entry);
2023 
2024 end:
2025 	if (peer)
2026 		dp_peer_unref_delete(peer, DP_MOD_ID_AST);
2027 }
2028 
2029 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
2030 		       struct dp_ast_entry *ast_entry, uint32_t flags)
2031 {
2032 	int ret = -1;
2033 	struct dp_peer *old_peer;
2034 
2035 	if (soc->ast_offload_support)
2036 		return QDF_STATUS_E_INVAL;
2037 
2038 	dp_peer_debug("%pK: ast_entry->type: %d pdevid: %u vdevid: %u flags: 0x%x mac_addr: " QDF_MAC_ADDR_FMT " peer_mac: " QDF_MAC_ADDR_FMT "\n",
2039 		      soc, ast_entry->type, peer->vdev->pdev->pdev_id,
2040 		      peer->vdev->vdev_id, flags,
2041 		      QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw),
2042 		      QDF_MAC_ADDR_REF(peer->mac_addr.raw));
2043 
2044 	/* Do not send AST update in below cases
2045 	 *  1) Ast entry delete has already triggered
2046 	 *  2) Peer delete is already triggered
2047 	 *  3) We did not get the HTT map for create event
2048 	 */
2049 	if (ast_entry->delete_in_progress ||
2050 	    !dp_peer_state_cmp(peer, DP_PEER_STATE_ACTIVE) ||
2051 	    !ast_entry->is_mapped)
2052 		return ret;
2053 
2054 	if ((ast_entry->type == CDP_TXRX_AST_TYPE_STATIC) ||
2055 	    (ast_entry->type == CDP_TXRX_AST_TYPE_SELF) ||
2056 	    (ast_entry->type == CDP_TXRX_AST_TYPE_STA_BSS) ||
2057 	    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
2058 		return 0;
2059 
2060 	/*
2061 	 * Avoids flood of WMI update messages sent to FW for same peer.
2062 	 */
2063 	if (qdf_unlikely(ast_entry->peer_id == peer->peer_id) &&
2064 	    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS) &&
2065 	    (ast_entry->vdev_id == peer->vdev->vdev_id) &&
2066 	    (ast_entry->is_active))
2067 		return 0;
2068 
2069 	old_peer = dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
2070 					 DP_MOD_ID_AST);
2071 	if (!old_peer)
2072 		return 0;
2073 
2074 	TAILQ_REMOVE(&old_peer->ast_entry_list, ast_entry, ase_list_elem);
2075 
2076 	dp_peer_unref_delete(old_peer, DP_MOD_ID_AST);
2077 
2078 	ast_entry->peer_id = peer->peer_id;
2079 	ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
2080 	ast_entry->pdev_id = peer->vdev->pdev->pdev_id;
2081 	ast_entry->vdev_id = peer->vdev->vdev_id;
2082 	ast_entry->is_active = TRUE;
2083 	TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem);
2084 
2085 	ret = dp_update_wds_entry_wrapper(soc,
2086 					  peer,
2087 					  ast_entry->mac_addr.raw,
2088 					  flags);
2089 
2090 	return ret;
2091 }
2092 
2093 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
2094 				struct dp_ast_entry *ast_entry)
2095 {
2096 	return ast_entry->pdev_id;
2097 }
2098 
2099 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
2100 				struct dp_ast_entry *ast_entry)
2101 {
2102 	return ast_entry->next_hop;
2103 }
2104 
2105 void dp_peer_ast_set_type(struct dp_soc *soc,
2106 				struct dp_ast_entry *ast_entry,
2107 				enum cdp_txrx_ast_entry_type type)
2108 {
2109 	ast_entry->type = type;
2110 }
2111 
2112 void dp_peer_ast_send_wds_del(struct dp_soc *soc,
2113 			      struct dp_ast_entry *ast_entry,
2114 			      struct dp_peer *peer)
2115 {
2116 	bool delete_in_fw = false;
2117 
2118 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_TRACE,
2119 		  "%s: ast_entry->type: %d pdevid: %u vdev: %u mac_addr: "QDF_MAC_ADDR_FMT" next_hop: %u peer_id: %uM\n",
2120 		  __func__, ast_entry->type, ast_entry->pdev_id,
2121 		  ast_entry->vdev_id,
2122 		  QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw),
2123 		  ast_entry->next_hop, ast_entry->peer_id);
2124 
2125 	/*
2126 	 * If peer state is logical delete, the peer is about to get
2127 	 * teared down with a peer delete command to firmware,
2128 	 * which will cleanup all the wds ast entries.
2129 	 * So, no need to send explicit wds ast delete to firmware.
2130 	 */
2131 	if (ast_entry->next_hop) {
2132 		if (peer && dp_peer_state_cmp(peer,
2133 					      DP_PEER_STATE_LOGICAL_DELETE))
2134 			delete_in_fw = false;
2135 		else
2136 			delete_in_fw = true;
2137 
2138 		dp_del_wds_entry_wrapper(soc,
2139 					 ast_entry->vdev_id,
2140 					 ast_entry->mac_addr.raw,
2141 					 ast_entry->type,
2142 					 delete_in_fw);
2143 	}
2144 }
2145 #else
2146 void dp_peer_free_ast_entry(struct dp_soc *soc,
2147 			    struct dp_ast_entry *ast_entry)
2148 {
2149 }
2150 
2151 void dp_peer_unlink_ast_entry(struct dp_soc *soc,
2152 			      struct dp_ast_entry *ast_entry,
2153 			      struct dp_peer *peer)
2154 {
2155 }
2156 
2157 void dp_peer_ast_hash_remove(struct dp_soc *soc,
2158 			     struct dp_ast_entry *ase)
2159 {
2160 }
2161 
2162 struct dp_ast_entry *dp_peer_ast_hash_find_by_vdevid(struct dp_soc *soc,
2163 						     uint8_t *ast_mac_addr,
2164 						     uint8_t vdev_id)
2165 {
2166 	return NULL;
2167 }
2168 
2169 QDF_STATUS dp_peer_add_ast(struct dp_soc *soc,
2170 			   struct dp_peer *peer,
2171 			   uint8_t *mac_addr,
2172 			   enum cdp_txrx_ast_entry_type type,
2173 			   uint32_t flags)
2174 {
2175 	return QDF_STATUS_E_FAILURE;
2176 }
2177 
2178 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
2179 {
2180 }
2181 
2182 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
2183 			struct dp_ast_entry *ast_entry, uint32_t flags)
2184 {
2185 	return 1;
2186 }
2187 
2188 struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
2189 					       uint8_t *ast_mac_addr)
2190 {
2191 	return NULL;
2192 }
2193 
2194 struct dp_ast_entry *dp_peer_ast_hash_find_soc_by_type(
2195 					struct dp_soc *soc,
2196 					uint8_t *ast_mac_addr,
2197 					enum cdp_txrx_ast_entry_type type)
2198 {
2199 	return NULL;
2200 }
2201 
2202 static inline
2203 QDF_STATUS dp_peer_host_add_map_ast(struct dp_soc *soc, uint16_t peer_id,
2204 				    uint8_t *mac_addr, uint16_t hw_peer_id,
2205 				    uint8_t vdev_id, uint16_t ast_hash,
2206 				    uint8_t is_wds)
2207 {
2208 	return QDF_STATUS_SUCCESS;
2209 }
2210 
2211 struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
2212 						     uint8_t *ast_mac_addr,
2213 						     uint8_t pdev_id)
2214 {
2215 	return NULL;
2216 }
2217 
2218 QDF_STATUS dp_peer_ast_hash_attach(struct dp_soc *soc)
2219 {
2220 	return QDF_STATUS_SUCCESS;
2221 }
2222 
2223 static inline QDF_STATUS dp_peer_map_ast(struct dp_soc *soc,
2224 					 struct dp_peer *peer,
2225 					 uint8_t *mac_addr,
2226 					 uint16_t hw_peer_id,
2227 					 uint8_t vdev_id,
2228 					 uint16_t ast_hash,
2229 					 uint8_t is_wds)
2230 {
2231 	return QDF_STATUS_SUCCESS;
2232 }
2233 
2234 void dp_peer_ast_hash_detach(struct dp_soc *soc)
2235 {
2236 }
2237 
2238 void dp_peer_ast_set_type(struct dp_soc *soc,
2239 				struct dp_ast_entry *ast_entry,
2240 				enum cdp_txrx_ast_entry_type type)
2241 {
2242 }
2243 
2244 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
2245 				struct dp_ast_entry *ast_entry)
2246 {
2247 	return 0xff;
2248 }
2249 
2250 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
2251 				 struct dp_ast_entry *ast_entry)
2252 {
2253 	return 0xff;
2254 }
2255 
2256 void dp_peer_ast_send_wds_del(struct dp_soc *soc,
2257 			      struct dp_ast_entry *ast_entry,
2258 			      struct dp_peer *peer)
2259 {
2260 }
2261 
2262 static inline
2263 void dp_peer_unmap_ipa_evt(struct dp_soc *soc, uint16_t peer_id,
2264 			   uint8_t vdev_id, uint8_t *mac_addr)
2265 {
2266 }
2267 #endif
2268 
2269 #ifdef WLAN_FEATURE_MULTI_AST_DEL
2270 void dp_peer_ast_send_multi_wds_del(
2271 		struct dp_soc *soc, uint8_t vdev_id,
2272 		struct peer_del_multi_wds_entries *wds_list)
2273 {
2274 	struct cdp_soc_t *cdp_soc = &soc->cdp_soc;
2275 
2276 	if (cdp_soc && cdp_soc->ol_ops &&
2277 	    cdp_soc->ol_ops->peer_del_multi_wds_entry)
2278 		cdp_soc->ol_ops->peer_del_multi_wds_entry(soc->ctrl_psoc,
2279 							  vdev_id, wds_list);
2280 }
2281 #endif
2282 
2283 #ifdef FEATURE_WDS
2284 /**
2285  * dp_peer_ast_free_wds_entries() - Free wds ast entries associated with peer
2286  * @soc: soc handle
2287  * @peer: peer handle
2288  *
2289  * Free all the wds ast entries associated with peer
2290  *
2291  * Return: Number of wds ast entries freed
2292  */
2293 static uint32_t dp_peer_ast_free_wds_entries(struct dp_soc *soc,
2294 					     struct dp_peer *peer)
2295 {
2296 	TAILQ_HEAD(, dp_ast_entry) ast_local_list = {0};
2297 	struct dp_ast_entry *ast_entry, *temp_ast_entry;
2298 	uint32_t num_ast = 0;
2299 
2300 	TAILQ_INIT(&ast_local_list);
2301 	qdf_spin_lock_bh(&soc->ast_lock);
2302 
2303 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry) {
2304 		if (ast_entry->next_hop)
2305 			num_ast++;
2306 
2307 		if (ast_entry->is_mapped)
2308 			soc->ast_table[ast_entry->ast_idx] = NULL;
2309 
2310 		dp_peer_unlink_ast_entry(soc, ast_entry, peer);
2311 		DP_STATS_INC(soc, ast.deleted, 1);
2312 		dp_peer_ast_hash_remove(soc, ast_entry);
2313 		TAILQ_INSERT_TAIL(&ast_local_list, ast_entry,
2314 				  ase_list_elem);
2315 		soc->num_ast_entries--;
2316 	}
2317 
2318 	qdf_spin_unlock_bh(&soc->ast_lock);
2319 
2320 	TAILQ_FOREACH_SAFE(ast_entry, &ast_local_list, ase_list_elem,
2321 			   temp_ast_entry) {
2322 		if (ast_entry->callback)
2323 			ast_entry->callback(soc->ctrl_psoc,
2324 					    dp_soc_to_cdp_soc(soc),
2325 					    ast_entry->cookie,
2326 					    CDP_TXRX_AST_DELETED);
2327 
2328 		qdf_mem_free(ast_entry);
2329 	}
2330 
2331 	return num_ast;
2332 }
2333 /**
2334  * dp_peer_clean_wds_entries() - Clean wds ast entries and compare
2335  * @soc: soc handle
2336  * @peer: peer handle
2337  * @free_wds_count: number of wds entries freed by FW with peer delete
2338  *
2339  * Free all the wds ast entries associated with peer and compare with
2340  * the value received from firmware
2341  *
2342  * Return: Number of wds ast entries freed
2343  */
2344 static void
2345 dp_peer_clean_wds_entries(struct dp_soc *soc, struct dp_peer *peer,
2346 			  uint32_t free_wds_count)
2347 {
2348 	uint32_t wds_deleted = 0;
2349 
2350 	if (soc->ast_offload_support && !soc->host_ast_db_enable)
2351 		return;
2352 
2353 	wds_deleted = dp_peer_ast_free_wds_entries(soc, peer);
2354 	if ((DP_PEER_WDS_COUNT_INVALID != free_wds_count) &&
2355 	    (free_wds_count != wds_deleted)) {
2356 		DP_STATS_INC(soc, ast.ast_mismatch, 1);
2357 		dp_alert("For peer %pK (mac: "QDF_MAC_ADDR_FMT")number of wds entries deleted by fw = %d during peer delete is not same as the numbers deleted by host = %d",
2358 			 peer, peer->mac_addr.raw, free_wds_count,
2359 			 wds_deleted);
2360 	}
2361 }
2362 
2363 #else
2364 static void
2365 dp_peer_clean_wds_entries(struct dp_soc *soc, struct dp_peer *peer,
2366 			  uint32_t free_wds_count)
2367 {
2368 	struct dp_ast_entry *ast_entry, *temp_ast_entry;
2369 
2370 	qdf_spin_lock_bh(&soc->ast_lock);
2371 
2372 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry) {
2373 		dp_peer_unlink_ast_entry(soc, ast_entry, peer);
2374 
2375 		if (ast_entry->is_mapped)
2376 			soc->ast_table[ast_entry->ast_idx] = NULL;
2377 
2378 		dp_peer_free_ast_entry(soc, ast_entry);
2379 	}
2380 
2381 	peer->self_ast_entry = NULL;
2382 	qdf_spin_unlock_bh(&soc->ast_lock);
2383 }
2384 #endif
2385 
2386 /**
2387  * dp_peer_ast_free_entry_by_mac() - find ast entry by MAC address and delete
2388  * @soc: soc handle
2389  * @peer: peer handle
2390  * @vdev_id: vdev_id
2391  * @mac_addr: mac address of the AST entry to searc and delete
2392  *
2393  * find the ast entry from the peer list using the mac address and free
2394  * the entry.
2395  *
2396  * Return: SUCCESS or NOENT
2397  */
2398 static int dp_peer_ast_free_entry_by_mac(struct dp_soc *soc,
2399 					 struct dp_peer *peer,
2400 					 uint8_t vdev_id,
2401 					 uint8_t *mac_addr)
2402 {
2403 	struct dp_ast_entry *ast_entry;
2404 	void *cookie = NULL;
2405 	txrx_ast_free_cb cb = NULL;
2406 
2407 	/*
2408 	 * release the reference only if it is mapped
2409 	 * to ast_table
2410 	 */
2411 
2412 	qdf_spin_lock_bh(&soc->ast_lock);
2413 
2414 	ast_entry = dp_peer_ast_hash_find_by_vdevid(soc, mac_addr, vdev_id);
2415 	if (!ast_entry) {
2416 		qdf_spin_unlock_bh(&soc->ast_lock);
2417 		return QDF_STATUS_E_NOENT;
2418 	} else if (ast_entry->is_mapped) {
2419 		soc->ast_table[ast_entry->ast_idx] = NULL;
2420 	}
2421 
2422 	cb = ast_entry->callback;
2423 	cookie = ast_entry->cookie;
2424 
2425 
2426 	dp_peer_unlink_ast_entry(soc, ast_entry, peer);
2427 
2428 	dp_peer_free_ast_entry(soc, ast_entry);
2429 
2430 	qdf_spin_unlock_bh(&soc->ast_lock);
2431 
2432 	if (cb) {
2433 		cb(soc->ctrl_psoc,
2434 		   dp_soc_to_cdp_soc(soc),
2435 		   cookie,
2436 		   CDP_TXRX_AST_DELETED);
2437 	}
2438 
2439 	return QDF_STATUS_SUCCESS;
2440 }
2441 
2442 void dp_peer_find_hash_erase(struct dp_soc *soc)
2443 {
2444 	int i;
2445 
2446 	/*
2447 	 * Not really necessary to take peer_ref_mutex lock - by this point,
2448 	 * it's known that the soc is no longer in use.
2449 	 */
2450 	for (i = 0; i <= soc->peer_hash.mask; i++) {
2451 		if (!TAILQ_EMPTY(&soc->peer_hash.bins[i])) {
2452 			struct dp_peer *peer, *peer_next;
2453 
2454 			/*
2455 			 * TAILQ_FOREACH_SAFE must be used here to avoid any
2456 			 * memory access violation after peer is freed
2457 			 */
2458 			TAILQ_FOREACH_SAFE(peer, &soc->peer_hash.bins[i],
2459 				hash_list_elem, peer_next) {
2460 				/*
2461 				 * Don't remove the peer from the hash table -
2462 				 * that would modify the list we are currently
2463 				 * traversing, and it's not necessary anyway.
2464 				 */
2465 				/*
2466 				 * Artificially adjust the peer's ref count to
2467 				 * 1, so it will get deleted by
2468 				 * dp_peer_unref_delete.
2469 				 */
2470 				/* set to zero */
2471 				qdf_atomic_init(&peer->ref_cnt);
2472 				for (i = 0; i < DP_MOD_ID_MAX; i++)
2473 					qdf_atomic_init(&peer->mod_refs[i]);
2474 				/* incr to one */
2475 				qdf_atomic_inc(&peer->ref_cnt);
2476 				qdf_atomic_inc(&peer->mod_refs
2477 						[DP_MOD_ID_CONFIG]);
2478 				dp_peer_unref_delete(peer,
2479 						     DP_MOD_ID_CONFIG);
2480 			}
2481 		}
2482 	}
2483 }
2484 
2485 void dp_peer_ast_table_detach(struct dp_soc *soc)
2486 {
2487 	if (soc->ast_table) {
2488 		qdf_mem_free(soc->ast_table);
2489 		soc->ast_table = NULL;
2490 	}
2491 }
2492 
2493 void dp_peer_find_map_detach(struct dp_soc *soc)
2494 {
2495 	struct dp_peer *peer = NULL;
2496 	uint32_t i = 0;
2497 
2498 	if (soc->peer_id_to_obj_map) {
2499 		for (i = 0; i < soc->max_peer_id; i++) {
2500 			peer = soc->peer_id_to_obj_map[i];
2501 			if (peer)
2502 				dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
2503 		}
2504 		qdf_mem_free(soc->peer_id_to_obj_map);
2505 		soc->peer_id_to_obj_map = NULL;
2506 		qdf_spinlock_destroy(&soc->peer_map_lock);
2507 	}
2508 }
2509 
2510 #ifndef AST_OFFLOAD_ENABLE
2511 QDF_STATUS dp_peer_find_attach(struct dp_soc *soc)
2512 {
2513 	QDF_STATUS status;
2514 
2515 	status = dp_peer_find_map_attach(soc);
2516 	if (!QDF_IS_STATUS_SUCCESS(status))
2517 		return status;
2518 
2519 	status = dp_peer_find_hash_attach(soc);
2520 	if (!QDF_IS_STATUS_SUCCESS(status))
2521 		goto map_detach;
2522 
2523 	status = dp_peer_ast_table_attach(soc);
2524 	if (!QDF_IS_STATUS_SUCCESS(status))
2525 		goto hash_detach;
2526 
2527 	status = dp_peer_ast_hash_attach(soc);
2528 	if (!QDF_IS_STATUS_SUCCESS(status))
2529 		goto ast_table_detach;
2530 
2531 	status = dp_peer_mec_hash_attach(soc);
2532 	if (QDF_IS_STATUS_SUCCESS(status)) {
2533 		dp_soc_wds_attach(soc);
2534 		return status;
2535 	}
2536 
2537 	dp_peer_ast_hash_detach(soc);
2538 ast_table_detach:
2539 	dp_peer_ast_table_detach(soc);
2540 hash_detach:
2541 	dp_peer_find_hash_detach(soc);
2542 map_detach:
2543 	dp_peer_find_map_detach(soc);
2544 
2545 	return status;
2546 }
2547 #else
2548 QDF_STATUS dp_peer_find_attach(struct dp_soc *soc)
2549 {
2550 	QDF_STATUS status;
2551 
2552 	status = dp_peer_find_map_attach(soc);
2553 	if (!QDF_IS_STATUS_SUCCESS(status))
2554 		return status;
2555 
2556 	status = dp_peer_find_hash_attach(soc);
2557 	if (!QDF_IS_STATUS_SUCCESS(status))
2558 		goto map_detach;
2559 
2560 	return status;
2561 map_detach:
2562 	dp_peer_find_map_detach(soc);
2563 
2564 	return status;
2565 }
2566 #endif
2567 
2568 #ifdef REO_SHARED_QREF_TABLE_EN
2569 void dp_peer_rx_reo_shared_qaddr_delete(struct dp_soc *soc,
2570 					struct dp_peer *peer)
2571 {
2572 	uint8_t tid;
2573 	uint16_t peer_id;
2574 	uint32_t max_list_size;
2575 
2576 	max_list_size = soc->wlan_cfg_ctx->qref_control_size;
2577 
2578 	peer_id = peer->peer_id;
2579 
2580 	if (peer_id > soc->max_peer_id)
2581 		return;
2582 	if (IS_MLO_DP_LINK_PEER(peer))
2583 		return;
2584 
2585 	if (max_list_size) {
2586 		unsigned long curr_ts = qdf_get_system_timestamp();
2587 		struct dp_peer *primary_peer = peer;
2588 		uint16_t chip_id = 0xFFFF;
2589 		uint32_t qref_index;
2590 
2591 		qref_index = soc->shared_qaddr_del_idx;
2592 
2593 		soc->list_shared_qaddr_del[qref_index].peer_id =
2594 							  primary_peer->peer_id;
2595 		soc->list_shared_qaddr_del[qref_index].ts_qaddr_del = curr_ts;
2596 		soc->list_shared_qaddr_del[qref_index].chip_id = chip_id;
2597 		soc->shared_qaddr_del_idx++;
2598 
2599 		if (soc->shared_qaddr_del_idx == max_list_size)
2600 			soc->shared_qaddr_del_idx = 0;
2601 	}
2602 
2603 	if (hal_reo_shared_qaddr_is_enable(soc->hal_soc)) {
2604 		for (tid = 0; tid < DP_MAX_TIDS; tid++) {
2605 			hal_reo_shared_qaddr_write(soc->hal_soc,
2606 						   peer_id, tid, 0);
2607 		}
2608 	}
2609 }
2610 #endif
2611 
2612 /**
2613  * dp_peer_find_add_id() - map peer_id with peer
2614  * @soc: soc handle
2615  * @peer_mac_addr: peer mac address
2616  * @peer_id: peer id to be mapped
2617  * @hw_peer_id: HW ast index
2618  * @vdev_id: vdev_id
2619  * @peer_type: peer type (link or MLD)
2620  *
2621  * return: peer in success
2622  *         NULL in failure
2623  */
2624 static inline struct dp_peer *dp_peer_find_add_id(struct dp_soc *soc,
2625 	uint8_t *peer_mac_addr, uint16_t peer_id, uint16_t hw_peer_id,
2626 	uint8_t vdev_id, enum cdp_peer_type peer_type)
2627 {
2628 	struct dp_peer *peer;
2629 	struct cdp_peer_info peer_info = { 0 };
2630 
2631 	QDF_ASSERT(peer_id <= soc->max_peer_id);
2632 	/* check if there's already a peer object with this MAC address */
2633 	DP_PEER_INFO_PARAMS_INIT(&peer_info, vdev_id, peer_mac_addr,
2634 				 false, peer_type);
2635 	peer = dp_peer_hash_find_wrapper(soc, &peer_info, DP_MOD_ID_CONFIG);
2636 	dp_peer_debug("%pK: peer %pK ID %d vid %d mac " QDF_MAC_ADDR_FMT,
2637 		      soc, peer, peer_id, vdev_id,
2638 		      QDF_MAC_ADDR_REF(peer_mac_addr));
2639 
2640 	if (peer) {
2641 		/* peer's ref count was already incremented by
2642 		 * peer_find_hash_find
2643 		 */
2644 		dp_peer_info("%pK: ref_cnt: %d", soc,
2645 			     qdf_atomic_read(&peer->ref_cnt));
2646 
2647 		/*
2648 		 * if peer is in logical delete CP triggered delete before map
2649 		 * is received ignore this event
2650 		 */
2651 		if (dp_peer_state_cmp(peer, DP_PEER_STATE_LOGICAL_DELETE)) {
2652 			dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
2653 			dp_alert("Peer %pK["QDF_MAC_ADDR_FMT"] logical delete state vid %d",
2654 				 peer, QDF_MAC_ADDR_REF(peer_mac_addr),
2655 				 vdev_id);
2656 			return NULL;
2657 		}
2658 
2659 		if (peer->peer_id == HTT_INVALID_PEER) {
2660 			if (!IS_MLO_DP_MLD_PEER(peer))
2661 				dp_monitor_peer_tid_peer_id_update(soc, peer,
2662 								   peer_id);
2663 		} else {
2664 			dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
2665 			QDF_ASSERT(0);
2666 			return NULL;
2667 		}
2668 		dp_peer_find_id_to_obj_add(soc, peer, peer_id);
2669 		if (soc->arch_ops.dp_partner_chips_map)
2670 			soc->arch_ops.dp_partner_chips_map(soc, peer, peer_id);
2671 
2672 		dp_peer_update_state(soc, peer, DP_PEER_STATE_ACTIVE);
2673 		return peer;
2674 	}
2675 
2676 	return NULL;
2677 }
2678 
2679 #ifdef WLAN_FEATURE_11BE_MLO
2680 #ifdef DP_USE_REDUCED_PEER_ID_FIELD_WIDTH
2681 uint16_t dp_gen_ml_peer_id(struct dp_soc *soc, uint16_t peer_id)
2682 {
2683 	return ((peer_id & soc->peer_id_mask) | (1 << soc->peer_id_shift));
2684 }
2685 #else
2686 uint16_t dp_gen_ml_peer_id(struct dp_soc *soc, uint16_t peer_id)
2687 {
2688 	return (peer_id | (1 << HTT_RX_PEER_META_DATA_V1_ML_PEER_VALID_S));
2689 }
2690 #endif
2691 
2692 QDF_STATUS
2693 dp_rx_mlo_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
2694 			   uint8_t *peer_mac_addr,
2695 			   struct dp_mlo_flow_override_info *mlo_flow_info,
2696 			   struct dp_mlo_link_info *mlo_link_info)
2697 {
2698 	struct dp_peer *peer = NULL;
2699 	uint16_t hw_peer_id = mlo_flow_info[0].ast_idx;
2700 	uint16_t ast_hash = mlo_flow_info[0].cache_set_num;
2701 	uint8_t vdev_id = 0;
2702 	uint8_t is_wds = 0;
2703 	int i;
2704 	uint16_t ml_peer_id = dp_gen_ml_peer_id(soc, peer_id);
2705 	enum cdp_txrx_ast_entry_type type = CDP_TXRX_AST_TYPE_STATIC;
2706 	QDF_STATUS err = QDF_STATUS_SUCCESS;
2707 	struct dp_soc *primary_soc = NULL;
2708 
2709 	dp_cfg_event_record_peer_map_unmap_evt(soc, DP_CFG_EVENT_MLO_PEER_MAP,
2710 					       NULL, peer_mac_addr,
2711 					       1, peer_id, ml_peer_id, 0,
2712 					       vdev_id);
2713 
2714 	dp_info("mlo_peer_map_event (soc:%pK): peer_id %d ml_peer_id %d, peer_mac "QDF_MAC_ADDR_FMT,
2715 		soc, peer_id, ml_peer_id,
2716 		QDF_MAC_ADDR_REF(peer_mac_addr));
2717 
2718 	/* Get corresponding vdev ID for the peer based
2719 	 * on chip ID obtained from mlo peer_map event
2720 	 */
2721 	for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
2722 		if (mlo_link_info[i].peer_chip_id == dp_get_chip_id(soc)) {
2723 			vdev_id = mlo_link_info[i].vdev_id;
2724 			break;
2725 		}
2726 	}
2727 
2728 	peer = dp_peer_find_add_id(soc, peer_mac_addr, ml_peer_id,
2729 				   hw_peer_id, vdev_id, CDP_MLD_PEER_TYPE);
2730 	if (peer) {
2731 		if (wlan_op_mode_sta == peer->vdev->opmode &&
2732 		    qdf_mem_cmp(peer->mac_addr.raw,
2733 				peer->vdev->mld_mac_addr.raw,
2734 				QDF_MAC_ADDR_SIZE) != 0) {
2735 			dp_peer_info("%pK: STA vdev bss_peer!!!!", soc);
2736 			peer->bss_peer = 1;
2737 			if (peer->txrx_peer)
2738 				peer->txrx_peer->bss_peer = 1;
2739 		}
2740 
2741 		if (peer->vdev->opmode == wlan_op_mode_sta) {
2742 			peer->vdev->bss_ast_hash = ast_hash;
2743 			peer->vdev->bss_ast_idx = hw_peer_id;
2744 		}
2745 
2746 		/* Add ast entry incase self ast entry is
2747 		 * deleted due to DP CP sync issue
2748 		 *
2749 		 * self_ast_entry is modified in peer create
2750 		 * and peer unmap path which cannot run in
2751 		 * parllel with peer map, no lock need before
2752 		 * referring it
2753 		 */
2754 		if (!peer->self_ast_entry) {
2755 			dp_info("Add self ast from map "QDF_MAC_ADDR_FMT,
2756 				QDF_MAC_ADDR_REF(peer_mac_addr));
2757 			dp_peer_add_ast(soc, peer,
2758 					peer_mac_addr,
2759 					type, 0);
2760 		}
2761 		/* If peer setup and hence rx_tid setup got called
2762 		 * before htt peer map then Qref write to LUT did not
2763 		 * happen in rx_tid setup as peer_id was invalid.
2764 		 * So defer Qref write to peer map handler. Check if
2765 		 * rx_tid qdesc for tid 0 is already setup and perform
2766 		 * qref write to LUT for Tid 0 and 16.
2767 		 *
2768 		 * Peer map could be obtained on assoc link, hence
2769 		 * change to primary link's soc.
2770 		 */
2771 		primary_soc = peer->vdev->pdev->soc;
2772 		if (hal_reo_shared_qaddr_is_enable(primary_soc->hal_soc) &&
2773 		    peer->rx_tid[0].hw_qdesc_vaddr_unaligned) {
2774 			hal_reo_shared_qaddr_write(primary_soc->hal_soc,
2775 						   ml_peer_id,
2776 						   0,
2777 						   peer->rx_tid[0].hw_qdesc_paddr);
2778 			hal_reo_shared_qaddr_write(primary_soc->hal_soc,
2779 						   ml_peer_id,
2780 						   DP_NON_QOS_TID,
2781 						   peer->rx_tid[DP_NON_QOS_TID].hw_qdesc_paddr);
2782 		}
2783 	}
2784 
2785 	if (!primary_soc)
2786 		primary_soc = soc;
2787 
2788 	err = dp_peer_map_ast(soc, peer, peer_mac_addr, hw_peer_id,
2789 			      vdev_id, ast_hash, is_wds);
2790 
2791 	/*
2792 	 * If AST offload and host AST DB is enabled, populate AST entries on
2793 	 * host based on mlo peer map event from FW
2794 	 */
2795 	if (peer && soc->ast_offload_support && soc->host_ast_db_enable) {
2796 		dp_peer_host_add_map_ast(primary_soc, ml_peer_id, peer_mac_addr,
2797 					 hw_peer_id, vdev_id,
2798 					 ast_hash, is_wds);
2799 	}
2800 
2801 	return err;
2802 }
2803 #endif
2804 
2805 #ifdef DP_RX_UDP_OVER_PEER_ROAM
2806 void dp_rx_reset_roaming_peer(struct dp_soc *soc, uint8_t vdev_id,
2807 			      uint8_t *peer_mac_addr)
2808 {
2809 	struct dp_vdev *vdev = NULL;
2810 
2811 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_HTT);
2812 	if (vdev) {
2813 		if (qdf_mem_cmp(vdev->roaming_peer_mac.raw, peer_mac_addr,
2814 				QDF_MAC_ADDR_SIZE) == 0) {
2815 			vdev->roaming_peer_status =
2816 						WLAN_ROAM_PEER_AUTH_STATUS_NONE;
2817 			qdf_mem_zero(vdev->roaming_peer_mac.raw,
2818 				     QDF_MAC_ADDR_SIZE);
2819 		}
2820 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT);
2821 	}
2822 }
2823 #endif
2824 
2825 #ifdef WLAN_SUPPORT_PPEDS
2826 static void
2827 dp_tx_ppeds_cfg_astidx_cache_mapping(struct dp_soc *soc, struct dp_vdev *vdev,
2828 				     bool peer_map)
2829 {
2830 	if (soc->arch_ops.dp_tx_ppeds_cfg_astidx_cache_mapping)
2831 		soc->arch_ops.dp_tx_ppeds_cfg_astidx_cache_mapping(soc, vdev,
2832 								   peer_map);
2833 }
2834 #else
2835 static void
2836 dp_tx_ppeds_cfg_astidx_cache_mapping(struct dp_soc *soc, struct dp_vdev *vdev,
2837 				     bool peer_map)
2838 {
2839 }
2840 #endif
2841 
2842 QDF_STATUS
2843 dp_rx_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
2844 		       uint16_t hw_peer_id, uint8_t vdev_id,
2845 		       uint8_t *peer_mac_addr, uint16_t ast_hash,
2846 		       uint8_t is_wds)
2847 {
2848 	struct dp_peer *peer = NULL;
2849 	struct dp_vdev *vdev = NULL;
2850 	enum cdp_txrx_ast_entry_type type = CDP_TXRX_AST_TYPE_STATIC;
2851 	QDF_STATUS err = QDF_STATUS_SUCCESS;
2852 
2853 	dp_cfg_event_record_peer_map_unmap_evt(soc, DP_CFG_EVENT_PEER_MAP,
2854 					       NULL, peer_mac_addr, 1, peer_id,
2855 					       0, 0, vdev_id);
2856 	dp_info("peer_map_event (soc:%pK): peer_id %d, hw_peer_id %d, peer_mac "QDF_MAC_ADDR_FMT", vdev_id %d",
2857 		soc, peer_id, hw_peer_id,
2858 		QDF_MAC_ADDR_REF(peer_mac_addr), vdev_id);
2859 
2860 	/* Peer map event for WDS ast entry get the peer from
2861 	 * obj map
2862 	 */
2863 	if (is_wds) {
2864 		if (!soc->ast_offload_support) {
2865 			peer = dp_peer_get_ref_by_id(soc, peer_id,
2866 						     DP_MOD_ID_HTT);
2867 
2868 			err = dp_peer_map_ast(soc, peer, peer_mac_addr,
2869 					      hw_peer_id,
2870 					      vdev_id, ast_hash, is_wds);
2871 			if (peer)
2872 				dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
2873 		}
2874 	} else {
2875 		/*
2876 		 * It's the responsibility of the CP and FW to ensure
2877 		 * that peer is created successfully. Ideally DP should
2878 		 * not hit the below condition for directly associated
2879 		 * peers.
2880 		 */
2881 		if ((!soc->ast_offload_support) && ((hw_peer_id < 0) ||
2882 		    (hw_peer_id >=
2883 		     wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx)))) {
2884 			dp_peer_err("%pK: invalid hw_peer_id: %d", soc, hw_peer_id);
2885 			qdf_assert_always(0);
2886 		}
2887 
2888 		peer = dp_peer_find_add_id(soc, peer_mac_addr, peer_id,
2889 					   hw_peer_id, vdev_id,
2890 					   CDP_LINK_PEER_TYPE);
2891 
2892 		if (peer) {
2893 			bool peer_map = true;
2894 
2895 			/* Updating ast_hash and ast_idx in peer level */
2896 			peer->ast_hash = ast_hash;
2897 			peer->ast_idx = hw_peer_id;
2898 			vdev = peer->vdev;
2899 			/* Only check for STA Vdev and peer is not for TDLS */
2900 			if (wlan_op_mode_sta == vdev->opmode &&
2901 			    !peer->is_tdls_peer) {
2902 				if (qdf_mem_cmp(peer->mac_addr.raw,
2903 						vdev->mac_addr.raw,
2904 						QDF_MAC_ADDR_SIZE) != 0) {
2905 					dp_info("%pK: STA vdev bss_peer", soc);
2906 					peer->bss_peer = 1;
2907 					if (peer->txrx_peer)
2908 						peer->txrx_peer->bss_peer = 1;
2909 				}
2910 
2911 				dp_info("bss ast_hash 0x%x, ast_index 0x%x",
2912 					ast_hash, hw_peer_id);
2913 				vdev->bss_ast_hash = ast_hash;
2914 				vdev->bss_ast_idx = hw_peer_id;
2915 
2916 				dp_tx_ppeds_cfg_astidx_cache_mapping(soc, vdev,
2917 								     peer_map);
2918 			}
2919 
2920 			/* Add ast entry incase self ast entry is
2921 			 * deleted due to DP CP sync issue
2922 			 *
2923 			 * self_ast_entry is modified in peer create
2924 			 * and peer unmap path which cannot run in
2925 			 * parllel with peer map, no lock need before
2926 			 * referring it
2927 			 */
2928 			if (!soc->ast_offload_support &&
2929 				!peer->self_ast_entry) {
2930 				dp_info("Add self ast from map "QDF_MAC_ADDR_FMT,
2931 					QDF_MAC_ADDR_REF(peer_mac_addr));
2932 				dp_peer_add_ast(soc, peer,
2933 						peer_mac_addr,
2934 						type, 0);
2935 			}
2936 
2937 			/* If peer setup and hence rx_tid setup got called
2938 			 * before htt peer map then Qref write to LUT did
2939 			 * not happen in rx_tid setup as peer_id was invalid.
2940 			 * So defer Qref write to peer map handler. Check if
2941 			 * rx_tid qdesc for tid 0 is already setup perform qref
2942 			 * write to LUT for Tid 0 and 16.
2943 			 */
2944 			if (hal_reo_shared_qaddr_is_enable(soc->hal_soc) &&
2945 			    peer->rx_tid[0].hw_qdesc_vaddr_unaligned &&
2946 			    !IS_MLO_DP_LINK_PEER(peer)) {
2947 				add_entry_write_list(soc, peer, 0);
2948 				hal_reo_shared_qaddr_write(soc->hal_soc,
2949 							   peer_id,
2950 							   0,
2951 							   peer->rx_tid[0].hw_qdesc_paddr);
2952 				add_entry_write_list(soc, peer, DP_NON_QOS_TID);
2953 				hal_reo_shared_qaddr_write(soc->hal_soc,
2954 							   peer_id,
2955 							   DP_NON_QOS_TID,
2956 							   peer->rx_tid[DP_NON_QOS_TID].hw_qdesc_paddr);
2957 			}
2958 		}
2959 
2960 		err = dp_peer_map_ast(soc, peer, peer_mac_addr, hw_peer_id,
2961 				      vdev_id, ast_hash, is_wds);
2962 	}
2963 
2964 	dp_rx_reset_roaming_peer(soc, vdev_id, peer_mac_addr);
2965 
2966 	/*
2967 	 * If AST offload and host AST DB is enabled, populate AST entries on
2968 	 * host based on peer map event from FW
2969 	 */
2970 	if (soc->ast_offload_support && soc->host_ast_db_enable) {
2971 		dp_peer_host_add_map_ast(soc, peer_id, peer_mac_addr,
2972 					 hw_peer_id, vdev_id,
2973 					 ast_hash, is_wds);
2974 	}
2975 
2976 	return err;
2977 }
2978 
2979 void
2980 dp_rx_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id,
2981 			 uint8_t vdev_id, uint8_t *mac_addr,
2982 			 uint8_t is_wds, uint32_t free_wds_count)
2983 {
2984 	struct dp_peer *peer;
2985 	struct dp_vdev *vdev = NULL;
2986 
2987 	/*
2988 	 * If FW AST offload is enabled and host AST DB is enabled,
2989 	 * the AST entries are created during peer map from FW.
2990 	 */
2991 	if (soc->ast_offload_support && is_wds) {
2992 		if (!soc->host_ast_db_enable)
2993 			return;
2994 	}
2995 
2996 	peer = __dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_HTT);
2997 
2998 	/*
2999 	 * Currently peer IDs are assigned for vdevs as well as peers.
3000 	 * If the peer ID is for a vdev, then the peer pointer stored
3001 	 * in peer_id_to_obj_map will be NULL.
3002 	 */
3003 	if (!peer) {
3004 		dp_err("Received unmap event for invalid peer_id %u",
3005 		       peer_id);
3006 		return;
3007 	}
3008 
3009 	vdev = peer->vdev;
3010 
3011 	if (peer->txrx_peer) {
3012 		struct cdp_txrx_peer_params_update params = {0};
3013 
3014 		params.vdev_id = vdev->vdev_id;
3015 		params.peer_mac = peer->mac_addr.raw;
3016 		params.chip_id = dp_get_chip_id(soc);
3017 		params.pdev_id = vdev->pdev->pdev_id;
3018 
3019 		dp_wdi_event_handler(WDI_EVENT_PEER_UNMAP, soc,
3020 				     (void *)&params, peer_id,
3021 				     WDI_NO_VAL, vdev->pdev->pdev_id);
3022 	}
3023 
3024 	/*
3025 	 * In scenario where assoc peer soc id is different from
3026 	 * primary soc id, reset the soc to point to primary psoc.
3027 	 * Since map is received on primary soc, the unmap should
3028 	 * also delete ast on primary soc.
3029 	 */
3030 	soc = peer->vdev->pdev->soc;
3031 
3032 	/* If V2 Peer map messages are enabled AST entry has to be
3033 	 * freed here
3034 	 */
3035 	if (is_wds) {
3036 		if (!dp_peer_ast_free_entry_by_mac(soc, peer, vdev_id,
3037 						   mac_addr)) {
3038 			dp_peer_unmap_ipa_evt(soc, peer_id, vdev_id, mac_addr);
3039 			dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
3040 			return;
3041 		}
3042 
3043 		dp_alert("AST entry not found with peer %pK peer_id %u peer_mac "QDF_MAC_ADDR_FMT" mac_addr "QDF_MAC_ADDR_FMT" vdev_id %u next_hop %u",
3044 			  peer, peer->peer_id,
3045 			  QDF_MAC_ADDR_REF(peer->mac_addr.raw),
3046 			  QDF_MAC_ADDR_REF(mac_addr), vdev_id,
3047 			  is_wds);
3048 
3049 		dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
3050 		return;
3051 	}
3052 
3053 	dp_peer_clean_wds_entries(soc, peer, free_wds_count);
3054 
3055 	dp_cfg_event_record_peer_map_unmap_evt(soc, DP_CFG_EVENT_PEER_UNMAP,
3056 					       peer, mac_addr, 0, peer_id,
3057 					       0, 0, vdev_id);
3058 	dp_info("peer_unmap_event (soc:%pK) peer_id %d peer %pK",
3059 		soc, peer_id, peer);
3060 
3061 	/* Clear entries in Qref LUT */
3062 	/* TODO: Check if this is to be called from
3063 	 * dp_peer_delete for MLO case if there is race between
3064 	 * new peer id assignment and still not having received
3065 	 * peer unmap for MLD peer with same peer id.
3066 	 */
3067 	dp_peer_rx_reo_shared_qaddr_delete(soc, peer);
3068 
3069 	vdev = peer->vdev;
3070 
3071 	/* only if peer is in STA mode and not tdls peer */
3072 	if (wlan_op_mode_sta == vdev->opmode && !peer->is_tdls_peer) {
3073 		bool peer_map = false;
3074 
3075 		dp_tx_ppeds_cfg_astidx_cache_mapping(soc, vdev, peer_map);
3076 	}
3077 
3078 	dp_peer_find_id_to_obj_remove(soc, peer_id);
3079 
3080 	if (soc->arch_ops.dp_partner_chips_unmap)
3081 		soc->arch_ops.dp_partner_chips_unmap(soc, peer_id);
3082 
3083 	peer->peer_id = HTT_INVALID_PEER;
3084 
3085 	/*
3086 	 *	 Reset ast flow mapping table
3087 	 */
3088 	if (!soc->ast_offload_support)
3089 		dp_peer_reset_flowq_map(peer);
3090 
3091 	if (soc->cdp_soc.ol_ops->peer_unmap_event) {
3092 		soc->cdp_soc.ol_ops->peer_unmap_event(soc->ctrl_psoc,
3093 				peer_id, vdev_id, mac_addr);
3094 	}
3095 
3096 	dp_update_vdev_stats_on_peer_unmap(vdev, peer);
3097 
3098 	dp_peer_update_state(soc, peer, DP_PEER_STATE_INACTIVE);
3099 	dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
3100 	/*
3101 	 * Remove a reference to the peer.
3102 	 * If there are no more references, delete the peer object.
3103 	 */
3104 	dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
3105 }
3106 
3107 #if defined(WLAN_FEATURE_11BE_MLO) && defined(DP_MLO_LINK_STATS_SUPPORT)
3108 /**
3109  * dp_freq_to_band() - Convert frequency to band
3110  * @freq: peer frequency
3111  *
3112  * Return: band for input frequency
3113  */
3114 static inline
3115 enum dp_bands dp_freq_to_band(qdf_freq_t freq)
3116 {
3117 	if (REG_IS_24GHZ_CH_FREQ(freq))
3118 		return DP_BAND_2GHZ;
3119 	else if (REG_IS_5GHZ_FREQ(freq) || REG_IS_49GHZ_FREQ(freq))
3120 		return DP_BAND_5GHZ;
3121 	else if (REG_IS_6GHZ_FREQ(freq))
3122 		return DP_BAND_6GHZ;
3123 	return DP_BAND_INVALID;
3124 }
3125 
3126 void dp_map_link_id_band(struct dp_peer *peer)
3127 {
3128 	struct dp_txrx_peer *txrx_peer = NULL;
3129 	enum dp_bands band;
3130 
3131 	txrx_peer = dp_get_txrx_peer(peer);
3132 	if (txrx_peer) {
3133 		band = dp_freq_to_band(peer->freq);
3134 		txrx_peer->band[peer->link_id + 1] = band;
3135 		dp_info("Band(Freq: %u): %u mapped to Link ID: %u",
3136 			peer->freq, band, peer->link_id);
3137 	} else {
3138 		dp_info("txrx_peer NULL for peer: " QDF_MAC_ADDR_FMT,
3139 			QDF_MAC_ADDR_REF(peer->mac_addr.raw));
3140 	}
3141 }
3142 
3143 QDF_STATUS
3144 dp_rx_peer_ext_evt(struct dp_soc *soc, struct dp_peer_ext_evt_info *info)
3145 {
3146 	struct dp_peer *peer = NULL;
3147 	struct cdp_peer_info peer_info = { 0 };
3148 
3149 	QDF_ASSERT(info->peer_id <= soc->max_peer_id);
3150 
3151 	DP_PEER_INFO_PARAMS_INIT(&peer_info, info->vdev_id, info->peer_mac_addr,
3152 				 false, CDP_LINK_PEER_TYPE);
3153 	peer = dp_peer_hash_find_wrapper(soc, &peer_info, DP_MOD_ID_CONFIG);
3154 
3155 	if (!peer) {
3156 		dp_err("peer NULL, id %u, MAC " QDF_MAC_ADDR_FMT ", vdev_id %u",
3157 		       info->peer_id, QDF_MAC_ADDR_REF(info->peer_mac_addr),
3158 		       info->vdev_id);
3159 
3160 		return QDF_STATUS_E_FAILURE;
3161 	}
3162 
3163 	peer->link_id = info->link_id;
3164 	peer->link_id_valid = info->link_id_valid;
3165 
3166 	if (peer->freq)
3167 		dp_map_link_id_band(peer);
3168 
3169 	dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
3170 
3171 	return QDF_STATUS_SUCCESS;
3172 }
3173 #endif
3174 #ifdef WLAN_FEATURE_11BE_MLO
3175 void dp_rx_mlo_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id)
3176 {
3177 	uint16_t ml_peer_id = dp_gen_ml_peer_id(soc, peer_id);
3178 	uint8_t mac_addr[QDF_MAC_ADDR_SIZE] = {0};
3179 	uint8_t vdev_id = DP_VDEV_ALL;
3180 	uint8_t is_wds = 0;
3181 
3182 	dp_cfg_event_record_peer_map_unmap_evt(soc, DP_CFG_EVENT_MLO_PEER_UNMAP,
3183 					       NULL, mac_addr, 0, peer_id,
3184 					       0, 0, vdev_id);
3185 	dp_info("MLO peer_unmap_event (soc:%pK) peer_id %d",
3186 		soc, peer_id);
3187 
3188 	dp_rx_peer_unmap_handler(soc, ml_peer_id, vdev_id,
3189 				 mac_addr, is_wds,
3190 				 DP_PEER_WDS_COUNT_INVALID);
3191 }
3192 #endif
3193 
3194 #ifndef AST_OFFLOAD_ENABLE
3195 void
3196 dp_peer_find_detach(struct dp_soc *soc)
3197 {
3198 	dp_soc_wds_detach(soc);
3199 	dp_peer_find_map_detach(soc);
3200 	dp_peer_find_hash_detach(soc);
3201 	dp_peer_ast_hash_detach(soc);
3202 	dp_peer_ast_table_detach(soc);
3203 	dp_peer_mec_hash_detach(soc);
3204 }
3205 #else
3206 void
3207 dp_peer_find_detach(struct dp_soc *soc)
3208 {
3209 	dp_peer_find_map_detach(soc);
3210 	dp_peer_find_hash_detach(soc);
3211 }
3212 #endif
3213 
3214 void dp_peer_rx_init(struct dp_pdev *pdev, struct dp_peer *peer)
3215 {
3216 	dp_peer_rx_tid_setup(peer);
3217 
3218 	peer->active_ba_session_cnt = 0;
3219 	peer->hw_buffer_size = 0;
3220 	peer->kill_256_sessions = 0;
3221 
3222 	/*
3223 	 * Set security defaults: no PN check, no security. The target may
3224 	 * send a HTT SEC_IND message to overwrite these defaults.
3225 	 */
3226 	if (peer->txrx_peer)
3227 		peer->txrx_peer->security[dp_sec_ucast].sec_type =
3228 			peer->txrx_peer->security[dp_sec_mcast].sec_type =
3229 				cdp_sec_type_none;
3230 }
3231 
3232 void dp_peer_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
3233 {
3234 	enum wlan_op_mode vdev_opmode;
3235 	uint8_t vdev_mac_addr[QDF_MAC_ADDR_SIZE];
3236 	struct dp_pdev *pdev = vdev->pdev;
3237 	struct dp_soc *soc = pdev->soc;
3238 
3239 	/* save vdev related member in case vdev freed */
3240 	vdev_opmode = vdev->opmode;
3241 
3242 	if (!IS_MLO_DP_MLD_PEER(peer))
3243 		dp_monitor_peer_tx_cleanup(vdev, peer);
3244 
3245 	if (vdev_opmode != wlan_op_mode_monitor)
3246 	/* cleanup the Rx reorder queues for this peer */
3247 		dp_peer_rx_cleanup(vdev, peer);
3248 
3249 	dp_peer_rx_tids_destroy(peer);
3250 
3251 	if (IS_MLO_DP_LINK_PEER(peer))
3252 		dp_link_peer_del_mld_peer(peer);
3253 	if (IS_MLO_DP_MLD_PEER(peer))
3254 		dp_mld_peer_deinit_link_peers_info(peer);
3255 
3256 	qdf_mem_copy(vdev_mac_addr, vdev->mac_addr.raw,
3257 		     QDF_MAC_ADDR_SIZE);
3258 
3259 	if (soc->cdp_soc.ol_ops->peer_unref_delete)
3260 		soc->cdp_soc.ol_ops->peer_unref_delete(
3261 				soc->ctrl_psoc,
3262 				vdev->pdev->pdev_id,
3263 				peer->mac_addr.raw, vdev_mac_addr,
3264 				vdev_opmode);
3265 }
3266 
3267 QDF_STATUS
3268 dp_set_key_sec_type_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id,
3269 			  uint8_t *peer_mac, enum cdp_sec_type sec_type,
3270 			  bool is_unicast)
3271 {
3272 	struct dp_peer *peer =
3273 			dp_peer_get_tgt_peer_hash_find((struct dp_soc *)soc,
3274 						       peer_mac, 0, vdev_id,
3275 						       DP_MOD_ID_CDP);
3276 	int sec_index;
3277 
3278 	if (!peer) {
3279 		dp_peer_debug("%pK: Peer is NULL!\n", soc);
3280 		return QDF_STATUS_E_FAILURE;
3281 	}
3282 
3283 	if (!peer->txrx_peer) {
3284 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3285 		dp_peer_debug("%pK: txrx peer is NULL!\n", soc);
3286 		return QDF_STATUS_E_FAILURE;
3287 	}
3288 
3289 	dp_peer_info("%pK: key sec spec for peer %pK " QDF_MAC_ADDR_FMT ": %s key of type %d",
3290 		     soc, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw),
3291 		     is_unicast ? "ucast" : "mcast", sec_type);
3292 
3293 	sec_index = is_unicast ? dp_sec_ucast : dp_sec_mcast;
3294 	peer->txrx_peer->security[sec_index].sec_type = sec_type;
3295 
3296 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3297 
3298 	return QDF_STATUS_SUCCESS;
3299 }
3300 
3301 void
3302 dp_rx_sec_ind_handler(struct dp_soc *soc, uint16_t peer_id,
3303 		      enum cdp_sec_type sec_type, int is_unicast,
3304 		      u_int32_t *michael_key,
3305 		      u_int32_t *rx_pn)
3306 {
3307 	struct dp_peer *peer;
3308 	struct dp_txrx_peer *txrx_peer;
3309 	int sec_index;
3310 
3311 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_HTT);
3312 	if (!peer) {
3313 		dp_peer_err("Couldn't find peer from ID %d - skipping security inits",
3314 			    peer_id);
3315 		return;
3316 	}
3317 	txrx_peer = dp_get_txrx_peer(peer);
3318 	if (!txrx_peer) {
3319 		dp_peer_err("Couldn't find txrx peer from ID %d - skipping security inits",
3320 			    peer_id);
3321 		return;
3322 	}
3323 
3324 	dp_peer_info("%pK: sec spec for peer %pK " QDF_MAC_ADDR_FMT ": %s key of type %d",
3325 		     soc, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw),
3326 			  is_unicast ? "ucast" : "mcast", sec_type);
3327 	sec_index = is_unicast ? dp_sec_ucast : dp_sec_mcast;
3328 
3329 	peer->txrx_peer->security[sec_index].sec_type = sec_type;
3330 #ifdef notyet /* TODO: See if this is required for defrag support */
3331 	/* michael key only valid for TKIP, but for simplicity,
3332 	 * copy it anyway
3333 	 */
3334 	qdf_mem_copy(
3335 		&peer->txrx_peer->security[sec_index].michael_key[0],
3336 		michael_key,
3337 		sizeof(peer->txrx_peer->security[sec_index].michael_key));
3338 #ifdef BIG_ENDIAN_HOST
3339 	OL_IF_SWAPBO(peer->txrx_peer->security[sec_index].michael_key[0],
3340 		     sizeof(peer->txrx_peer->security[sec_index].michael_key));
3341 #endif /* BIG_ENDIAN_HOST */
3342 #endif
3343 
3344 #ifdef notyet /* TODO: Check if this is required for wifi3.0 */
3345 	if (sec_type != cdp_sec_type_wapi) {
3346 		qdf_mem_zero(peer->tids_last_pn_valid, _EXT_TIDS);
3347 	} else {
3348 		for (i = 0; i < DP_MAX_TIDS; i++) {
3349 			/*
3350 			 * Setting PN valid bit for WAPI sec_type,
3351 			 * since WAPI PN has to be started with predefined value
3352 			 */
3353 			peer->tids_last_pn_valid[i] = 1;
3354 			qdf_mem_copy(
3355 				(u_int8_t *) &peer->tids_last_pn[i],
3356 				(u_int8_t *) rx_pn, sizeof(union htt_rx_pn_t));
3357 			peer->tids_last_pn[i].pn128[1] =
3358 				qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[1]);
3359 			peer->tids_last_pn[i].pn128[0] =
3360 				qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[0]);
3361 		}
3362 	}
3363 #endif
3364 	/* TODO: Update HW TID queue with PN check parameters (pn type for
3365 	 * all security types and last pn for WAPI) once REO command API
3366 	 * is available
3367 	 */
3368 
3369 	dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
3370 }
3371 
3372 #ifdef QCA_PEER_EXT_STATS
3373 QDF_STATUS dp_peer_delay_stats_ctx_alloc(struct dp_soc *soc,
3374 					 struct dp_txrx_peer *txrx_peer)
3375 {
3376 	uint8_t tid, ctx_id;
3377 
3378 	if (!soc || !txrx_peer) {
3379 		dp_warn("Null soc%pK or peer%pK", soc, txrx_peer);
3380 		return QDF_STATUS_E_INVAL;
3381 	}
3382 
3383 	if (!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx))
3384 		return QDF_STATUS_SUCCESS;
3385 
3386 	/*
3387 	 * Allocate memory for peer extended stats.
3388 	 */
3389 	txrx_peer->delay_stats =
3390 			qdf_mem_malloc(sizeof(struct dp_peer_delay_stats));
3391 	if (!txrx_peer->delay_stats) {
3392 		dp_err("Peer extended stats obj alloc failed!!");
3393 		return QDF_STATUS_E_NOMEM;
3394 	}
3395 
3396 	for (tid = 0; tid < CDP_MAX_DATA_TIDS; tid++) {
3397 		for (ctx_id = 0; ctx_id < CDP_MAX_TXRX_CTX; ctx_id++) {
3398 			struct cdp_delay_tx_stats *tx_delay =
3399 			&txrx_peer->delay_stats->delay_tid_stats[tid][ctx_id].tx_delay;
3400 			struct cdp_delay_rx_stats *rx_delay =
3401 			&txrx_peer->delay_stats->delay_tid_stats[tid][ctx_id].rx_delay;
3402 
3403 			dp_hist_init(&tx_delay->tx_swq_delay,
3404 				     CDP_HIST_TYPE_SW_ENQEUE_DELAY);
3405 			dp_hist_init(&tx_delay->hwtx_delay,
3406 				     CDP_HIST_TYPE_HW_COMP_DELAY);
3407 			dp_hist_init(&rx_delay->to_stack_delay,
3408 				     CDP_HIST_TYPE_REAP_STACK);
3409 		}
3410 	}
3411 
3412 	return QDF_STATUS_SUCCESS;
3413 }
3414 
3415 void dp_peer_delay_stats_ctx_dealloc(struct dp_soc *soc,
3416 				     struct dp_txrx_peer *txrx_peer)
3417 {
3418 	if (!txrx_peer) {
3419 		dp_warn("peer_ext dealloc failed due to NULL peer object");
3420 		return;
3421 	}
3422 
3423 	if (!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx))
3424 		return;
3425 
3426 	if (!txrx_peer->delay_stats)
3427 		return;
3428 
3429 	qdf_mem_free(txrx_peer->delay_stats);
3430 	txrx_peer->delay_stats = NULL;
3431 }
3432 
3433 void dp_peer_delay_stats_ctx_clr(struct dp_txrx_peer *txrx_peer)
3434 {
3435 	if (txrx_peer->delay_stats)
3436 		qdf_mem_zero(txrx_peer->delay_stats,
3437 			     sizeof(struct dp_peer_delay_stats));
3438 }
3439 #endif
3440 
3441 #ifdef WLAN_PEER_JITTER
3442 QDF_STATUS dp_peer_jitter_stats_ctx_alloc(struct dp_pdev *pdev,
3443 					  struct dp_txrx_peer *txrx_peer)
3444 {
3445 	if (!pdev || !txrx_peer) {
3446 		dp_warn("Null pdev or peer");
3447 		return QDF_STATUS_E_INVAL;
3448 	}
3449 
3450 	if (!wlan_cfg_is_peer_jitter_stats_enabled(pdev->soc->wlan_cfg_ctx))
3451 		return QDF_STATUS_SUCCESS;
3452 
3453 	if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
3454 		/*
3455 		 * Allocate memory on per tid basis when nss is enabled
3456 		 */
3457 		txrx_peer->jitter_stats =
3458 			qdf_mem_malloc(sizeof(struct cdp_peer_tid_stats)
3459 					* DP_MAX_TIDS);
3460 	} else {
3461 		/*
3462 		 * Allocate memory on per tid per ring basis
3463 		 */
3464 		txrx_peer->jitter_stats =
3465 			qdf_mem_malloc(sizeof(struct cdp_peer_tid_stats)
3466 					* DP_MAX_TIDS * CDP_MAX_TXRX_CTX);
3467 	}
3468 
3469 	if (!txrx_peer->jitter_stats) {
3470 		dp_warn("Jitter stats obj alloc failed!!");
3471 		return QDF_STATUS_E_NOMEM;
3472 	}
3473 
3474 	return QDF_STATUS_SUCCESS;
3475 }
3476 
3477 void dp_peer_jitter_stats_ctx_dealloc(struct dp_pdev *pdev,
3478 				      struct dp_txrx_peer *txrx_peer)
3479 {
3480 	if (!pdev || !txrx_peer) {
3481 		dp_warn("Null pdev or peer");
3482 		return;
3483 	}
3484 
3485 	if (!wlan_cfg_is_peer_jitter_stats_enabled(pdev->soc->wlan_cfg_ctx))
3486 		return;
3487 
3488 	if (txrx_peer->jitter_stats) {
3489 		qdf_mem_free(txrx_peer->jitter_stats);
3490 		txrx_peer->jitter_stats = NULL;
3491 	}
3492 }
3493 
3494 void dp_peer_jitter_stats_ctx_clr(struct dp_txrx_peer *txrx_peer)
3495 {
3496 	struct cdp_peer_tid_stats *jitter_stats = NULL;
3497 
3498 	if (!txrx_peer) {
3499 		dp_warn("Null peer");
3500 		return;
3501 	}
3502 
3503 	if (!wlan_cfg_is_peer_jitter_stats_enabled(txrx_peer->
3504 						   vdev->
3505 						   pdev->soc->wlan_cfg_ctx))
3506 		return;
3507 
3508 	jitter_stats = txrx_peer->jitter_stats;
3509 	if (!jitter_stats)
3510 		return;
3511 
3512 	if (wlan_cfg_get_dp_pdev_nss_enabled(txrx_peer->
3513 					     vdev->pdev->wlan_cfg_ctx))
3514 		qdf_mem_zero(jitter_stats,
3515 			     sizeof(struct cdp_peer_tid_stats) *
3516 			     DP_MAX_TIDS);
3517 
3518 	else
3519 		qdf_mem_zero(jitter_stats,
3520 			     sizeof(struct cdp_peer_tid_stats) *
3521 			     DP_MAX_TIDS * CDP_MAX_TXRX_CTX);
3522 
3523 }
3524 #endif
3525 
3526 #ifdef DP_PEER_EXTENDED_API
3527 /**
3528  * dp_peer_set_bw() - Set bandwidth and mpdu retry count threshold for peer
3529  * @soc: DP soc handle
3530  * @txrx_peer: Core txrx_peer handle
3531  * @set_bw: enum of bandwidth to be set for this peer connection
3532  *
3533  * Return: None
3534  */
3535 static void dp_peer_set_bw(struct dp_soc *soc, struct dp_txrx_peer *txrx_peer,
3536 			   enum cdp_peer_bw set_bw)
3537 {
3538 	if (!txrx_peer)
3539 		return;
3540 
3541 	txrx_peer->bw = set_bw;
3542 
3543 	switch (set_bw) {
3544 	case CDP_160_MHZ:
3545 	case CDP_320_MHZ:
3546 		txrx_peer->mpdu_retry_threshold =
3547 				soc->wlan_cfg_ctx->mpdu_retry_threshold_2;
3548 		break;
3549 	case CDP_20_MHZ:
3550 	case CDP_40_MHZ:
3551 	case CDP_80_MHZ:
3552 	default:
3553 		txrx_peer->mpdu_retry_threshold =
3554 				soc->wlan_cfg_ctx->mpdu_retry_threshold_1;
3555 		break;
3556 	}
3557 
3558 	dp_info("Peer id: %u: BW: %u, mpdu retry threshold: %u",
3559 		txrx_peer->peer_id, txrx_peer->bw,
3560 		txrx_peer->mpdu_retry_threshold);
3561 }
3562 
3563 #ifdef WLAN_FEATURE_11BE_MLO
3564 QDF_STATUS dp_register_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
3565 			    struct ol_txrx_desc_type *sta_desc)
3566 {
3567 	struct dp_peer *peer;
3568 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3569 
3570 	peer = dp_peer_find_hash_find(soc, sta_desc->peer_addr.bytes,
3571 				      0, DP_VDEV_ALL, DP_MOD_ID_CDP);
3572 
3573 	if (!peer)
3574 		return QDF_STATUS_E_FAULT;
3575 
3576 	qdf_spin_lock_bh(&peer->peer_info_lock);
3577 	peer->state = OL_TXRX_PEER_STATE_CONN;
3578 	qdf_spin_unlock_bh(&peer->peer_info_lock);
3579 
3580 	dp_peer_set_bw(soc, peer->txrx_peer, sta_desc->bw);
3581 
3582 	dp_rx_flush_rx_cached(peer, false);
3583 
3584 	if (IS_MLO_DP_LINK_PEER(peer) && peer->first_link) {
3585 		dp_peer_info("register for mld peer" QDF_MAC_ADDR_FMT,
3586 			     QDF_MAC_ADDR_REF(peer->mld_peer->mac_addr.raw));
3587 		qdf_spin_lock_bh(&peer->mld_peer->peer_info_lock);
3588 		peer->mld_peer->state = peer->state;
3589 		qdf_spin_unlock_bh(&peer->mld_peer->peer_info_lock);
3590 		dp_rx_flush_rx_cached(peer->mld_peer, false);
3591 	}
3592 
3593 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3594 
3595 	return QDF_STATUS_SUCCESS;
3596 }
3597 
3598 QDF_STATUS dp_peer_state_update(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
3599 				enum ol_txrx_peer_state state)
3600 {
3601 	struct dp_peer *peer;
3602 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3603 
3604 	peer =  dp_peer_find_hash_find(soc, peer_mac, 0, DP_VDEV_ALL,
3605 				       DP_MOD_ID_CDP);
3606 	if (!peer) {
3607 		dp_peer_err("%pK: Failed to find peer[" QDF_MAC_ADDR_FMT "]",
3608 			    soc, QDF_MAC_ADDR_REF(peer_mac));
3609 		return QDF_STATUS_E_FAILURE;
3610 	}
3611 	peer->state = state;
3612 	peer->authorize = (state == OL_TXRX_PEER_STATE_AUTH) ? 1 : 0;
3613 
3614 	if (peer->txrx_peer)
3615 		peer->txrx_peer->authorize = peer->authorize;
3616 
3617 	dp_peer_info("peer" QDF_MAC_ADDR_FMT "state %d",
3618 		     QDF_MAC_ADDR_REF(peer->mac_addr.raw),
3619 		     peer->state);
3620 
3621 	if (IS_MLO_DP_LINK_PEER(peer) && peer->first_link) {
3622 		peer->mld_peer->state = peer->state;
3623 		peer->mld_peer->txrx_peer->authorize = peer->authorize;
3624 		dp_peer_info("mld peer" QDF_MAC_ADDR_FMT "state %d",
3625 			     QDF_MAC_ADDR_REF(peer->mld_peer->mac_addr.raw),
3626 			     peer->mld_peer->state);
3627 	}
3628 
3629 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
3630 	 * Decrement it here.
3631 	 */
3632 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3633 
3634 	return QDF_STATUS_SUCCESS;
3635 }
3636 #else
3637 QDF_STATUS dp_register_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
3638 			    struct ol_txrx_desc_type *sta_desc)
3639 {
3640 	struct dp_peer *peer;
3641 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3642 
3643 	peer = dp_peer_find_hash_find(soc, sta_desc->peer_addr.bytes,
3644 				      0, DP_VDEV_ALL, DP_MOD_ID_CDP);
3645 
3646 	if (!peer)
3647 		return QDF_STATUS_E_FAULT;
3648 
3649 	qdf_spin_lock_bh(&peer->peer_info_lock);
3650 	peer->state = OL_TXRX_PEER_STATE_CONN;
3651 	qdf_spin_unlock_bh(&peer->peer_info_lock);
3652 
3653 	dp_peer_set_bw(soc, peer->txrx_peer, sta_desc->bw);
3654 
3655 	dp_rx_flush_rx_cached(peer, false);
3656 
3657 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3658 
3659 	return QDF_STATUS_SUCCESS;
3660 }
3661 
3662 QDF_STATUS dp_peer_state_update(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
3663 				enum ol_txrx_peer_state state)
3664 {
3665 	struct dp_peer *peer;
3666 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3667 
3668 	peer =  dp_peer_find_hash_find(soc, peer_mac, 0, DP_VDEV_ALL,
3669 				       DP_MOD_ID_CDP);
3670 	if (!peer) {
3671 		dp_peer_err("%pK: Failed to find peer for: [" QDF_MAC_ADDR_FMT "]",
3672 			    soc, QDF_MAC_ADDR_REF(peer_mac));
3673 		return QDF_STATUS_E_FAILURE;
3674 	}
3675 	peer->state = state;
3676 	peer->authorize = (state == OL_TXRX_PEER_STATE_AUTH) ? 1 : 0;
3677 
3678 	if (peer->txrx_peer)
3679 		peer->txrx_peer->authorize = peer->authorize;
3680 
3681 	dp_info("peer %pK state %d", peer, peer->state);
3682 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
3683 	 * Decrement it here.
3684 	 */
3685 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3686 
3687 	return QDF_STATUS_SUCCESS;
3688 }
3689 #endif
3690 
3691 QDF_STATUS
3692 dp_clear_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
3693 	      struct qdf_mac_addr peer_addr)
3694 {
3695 	struct dp_peer *peer;
3696 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3697 
3698 	peer = dp_peer_find_hash_find(soc, peer_addr.bytes,
3699 				      0, DP_VDEV_ALL, DP_MOD_ID_CDP);
3700 
3701 	if (!peer)
3702 		return QDF_STATUS_E_FAULT;
3703 	if (!peer->valid) {
3704 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3705 		return QDF_STATUS_E_FAULT;
3706 	}
3707 
3708 	dp_clear_peer_internal(soc, peer);
3709 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3710 	return QDF_STATUS_SUCCESS;
3711 }
3712 
3713 QDF_STATUS dp_get_vdevid(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
3714 			 uint8_t *vdev_id)
3715 {
3716 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3717 	struct dp_peer *peer =
3718 		dp_peer_find_hash_find(soc, peer_mac, 0, DP_VDEV_ALL,
3719 				       DP_MOD_ID_CDP);
3720 
3721 	if (!peer)
3722 		return QDF_STATUS_E_FAILURE;
3723 
3724 	dp_info("peer %pK vdev %pK vdev id %d",
3725 		peer, peer->vdev, peer->vdev->vdev_id);
3726 	*vdev_id = peer->vdev->vdev_id;
3727 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
3728 	 * Decrement it here.
3729 	 */
3730 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3731 
3732 	return QDF_STATUS_SUCCESS;
3733 }
3734 
3735 struct cdp_vdev *
3736 dp_get_vdev_by_peer_addr(struct cdp_pdev *pdev_handle,
3737 			 struct qdf_mac_addr peer_addr)
3738 {
3739 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3740 	struct dp_peer *peer = NULL;
3741 	struct cdp_vdev *vdev = NULL;
3742 
3743 	if (!pdev) {
3744 		dp_peer_info("PDEV not found for peer_addr: " QDF_MAC_ADDR_FMT,
3745 			     QDF_MAC_ADDR_REF(peer_addr.bytes));
3746 		return NULL;
3747 	}
3748 
3749 	peer = dp_peer_find_hash_find(pdev->soc, peer_addr.bytes, 0,
3750 				      DP_VDEV_ALL, DP_MOD_ID_CDP);
3751 	if (!peer) {
3752 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
3753 			  "PDEV not found for peer_addr: "QDF_MAC_ADDR_FMT,
3754 			  QDF_MAC_ADDR_REF(peer_addr.bytes));
3755 		return NULL;
3756 	}
3757 
3758 	vdev = (struct cdp_vdev *)peer->vdev;
3759 
3760 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3761 	return vdev;
3762 }
3763 
3764 struct cdp_vdev *dp_get_vdev_for_peer(void *peer_handle)
3765 {
3766 	struct dp_peer *peer = peer_handle;
3767 
3768 	DP_TRACE(DEBUG, "peer %pK vdev %pK", peer, peer->vdev);
3769 	return (struct cdp_vdev *)peer->vdev;
3770 }
3771 
3772 uint8_t *dp_peer_get_peer_mac_addr(void *peer_handle)
3773 {
3774 	struct dp_peer *peer = peer_handle;
3775 	uint8_t *mac;
3776 
3777 	mac = peer->mac_addr.raw;
3778 	dp_info("peer %pK mac 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
3779 		peer, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
3780 	return peer->mac_addr.raw;
3781 }
3782 
3783 int dp_get_peer_state(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3784 		      uint8_t *peer_mac)
3785 {
3786 	enum ol_txrx_peer_state peer_state;
3787 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3788 	struct cdp_peer_info peer_info = { 0 };
3789 	struct dp_peer *peer;
3790 	struct dp_peer *tgt_peer;
3791 
3792 	DP_PEER_INFO_PARAMS_INIT(&peer_info, vdev_id, peer_mac,
3793 				 false, CDP_WILD_PEER_TYPE);
3794 
3795 	peer =  dp_peer_hash_find_wrapper(soc, &peer_info, DP_MOD_ID_CDP);
3796 
3797 	if (!peer)
3798 		return OL_TXRX_PEER_STATE_INVALID;
3799 
3800 	DP_TRACE(DEBUG, "peer %pK stats %d", peer, peer->state);
3801 
3802 	tgt_peer = dp_get_tgt_peer_from_peer(peer);
3803 	peer_state = tgt_peer->state;
3804 
3805 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3806 
3807 	return peer_state;
3808 }
3809 
3810 void dp_local_peer_id_pool_init(struct dp_pdev *pdev)
3811 {
3812 	int i;
3813 
3814 	/* point the freelist to the first ID */
3815 	pdev->local_peer_ids.freelist = 0;
3816 
3817 	/* link each ID to the next one */
3818 	for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) {
3819 		pdev->local_peer_ids.pool[i] = i + 1;
3820 		pdev->local_peer_ids.map[i] = NULL;
3821 	}
3822 
3823 	/* link the last ID to itself, to mark the end of the list */
3824 	i = OL_TXRX_NUM_LOCAL_PEER_IDS;
3825 	pdev->local_peer_ids.pool[i] = i;
3826 
3827 	qdf_spinlock_create(&pdev->local_peer_ids.lock);
3828 	dp_info("Peer pool init");
3829 }
3830 
3831 void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer)
3832 {
3833 	int i;
3834 
3835 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
3836 	i = pdev->local_peer_ids.freelist;
3837 	if (pdev->local_peer_ids.pool[i] == i) {
3838 		/* the list is empty, except for the list-end marker */
3839 		peer->local_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
3840 	} else {
3841 		/* take the head ID and advance the freelist */
3842 		peer->local_id = i;
3843 		pdev->local_peer_ids.freelist = pdev->local_peer_ids.pool[i];
3844 		pdev->local_peer_ids.map[i] = peer;
3845 	}
3846 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
3847 	dp_info("peer %pK, local id %d", peer, peer->local_id);
3848 }
3849 
3850 void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer)
3851 {
3852 	int i = peer->local_id;
3853 	if ((i == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
3854 	    (i >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
3855 		return;
3856 	}
3857 
3858 	/* put this ID on the head of the freelist */
3859 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
3860 	pdev->local_peer_ids.pool[i] = pdev->local_peer_ids.freelist;
3861 	pdev->local_peer_ids.freelist = i;
3862 	pdev->local_peer_ids.map[i] = NULL;
3863 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
3864 }
3865 
3866 bool dp_find_peer_exist_on_vdev(struct cdp_soc_t *soc_hdl,
3867 				uint8_t vdev_id, uint8_t *peer_addr)
3868 {
3869 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3870 	struct dp_peer *peer = NULL;
3871 
3872 	peer = dp_peer_find_hash_find(soc, peer_addr, 0, vdev_id,
3873 				      DP_MOD_ID_CDP);
3874 	if (!peer)
3875 		return false;
3876 
3877 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3878 
3879 	return true;
3880 }
3881 
3882 bool dp_find_peer_exist_on_other_vdev(struct cdp_soc_t *soc_hdl,
3883 				      uint8_t vdev_id, uint8_t *peer_addr,
3884 				      uint16_t max_bssid)
3885 {
3886 	int i;
3887 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3888 	struct dp_peer *peer = NULL;
3889 
3890 	for (i = 0; i < max_bssid; i++) {
3891 		/* Need to check vdevs other than the vdev_id */
3892 		if (vdev_id == i)
3893 			continue;
3894 		peer = dp_peer_find_hash_find(soc, peer_addr, 0, i,
3895 					      DP_MOD_ID_CDP);
3896 		if (peer) {
3897 			dp_err("Duplicate peer "QDF_MAC_ADDR_FMT" already exist on vdev %d",
3898 			       QDF_MAC_ADDR_REF(peer_addr), i);
3899 			dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3900 			return true;
3901 		}
3902 	}
3903 
3904 	return false;
3905 }
3906 
3907 void dp_set_peer_as_tdls_peer(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3908 			      uint8_t *peer_mac, bool val)
3909 {
3910 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3911 	struct dp_peer *peer = NULL;
3912 
3913 	peer = dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id,
3914 				      DP_MOD_ID_CDP);
3915 	if (!peer) {
3916 		dp_err("Failed to find peer for:" QDF_MAC_ADDR_FMT,
3917 		       QDF_MAC_ADDR_REF(peer_mac));
3918 		return;
3919 	}
3920 
3921 	dp_info("Set tdls flag %d for peer:" QDF_MAC_ADDR_FMT,
3922 		val, QDF_MAC_ADDR_REF(peer_mac));
3923 	peer->is_tdls_peer = val;
3924 
3925 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3926 }
3927 #endif
3928 
3929 bool dp_find_peer_exist(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
3930 			uint8_t *peer_addr)
3931 {
3932 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3933 	struct dp_peer *peer = NULL;
3934 
3935 	peer = dp_peer_find_hash_find(soc, peer_addr, 0, DP_VDEV_ALL,
3936 				      DP_MOD_ID_CDP);
3937 	if (peer) {
3938 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3939 		return true;
3940 	}
3941 
3942 	return false;
3943 }
3944 
3945 QDF_STATUS
3946 dp_set_michael_key(struct cdp_soc_t *soc,
3947 		   uint8_t vdev_id,
3948 		   uint8_t *peer_mac,
3949 		   bool is_unicast, uint32_t *key)
3950 {
3951 	uint8_t sec_index = is_unicast ? 1 : 0;
3952 	struct dp_peer *peer =
3953 			dp_peer_get_tgt_peer_hash_find((struct dp_soc *)soc,
3954 						       peer_mac, 0, vdev_id,
3955 						       DP_MOD_ID_CDP);
3956 
3957 	if (!peer) {
3958 		dp_peer_err("%pK: peer not found ", soc);
3959 		return QDF_STATUS_E_FAILURE;
3960 	}
3961 
3962 	qdf_mem_copy(&peer->txrx_peer->security[sec_index].michael_key[0],
3963 		     key, IEEE80211_WEP_MICLEN);
3964 
3965 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3966 
3967 	return QDF_STATUS_SUCCESS;
3968 }
3969 
3970 
3971 struct dp_peer *dp_vdev_bss_peer_ref_n_get(struct dp_soc *soc,
3972 					   struct dp_vdev *vdev,
3973 					   enum dp_mod_id mod_id)
3974 {
3975 	struct dp_peer *peer = NULL;
3976 
3977 	qdf_spin_lock_bh(&vdev->peer_list_lock);
3978 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
3979 		if (peer->bss_peer)
3980 			break;
3981 	}
3982 
3983 	if (!peer) {
3984 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
3985 		return NULL;
3986 	}
3987 
3988 	if (dp_peer_get_ref(soc, peer, mod_id) == QDF_STATUS_SUCCESS) {
3989 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
3990 		return peer;
3991 	}
3992 
3993 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
3994 	return peer;
3995 }
3996 
3997 struct dp_peer *dp_sta_vdev_self_peer_ref_n_get(struct dp_soc *soc,
3998 						struct dp_vdev *vdev,
3999 						enum dp_mod_id mod_id)
4000 {
4001 	struct dp_peer *peer;
4002 
4003 	if (vdev->opmode != wlan_op_mode_sta)
4004 		return NULL;
4005 
4006 	qdf_spin_lock_bh(&vdev->peer_list_lock);
4007 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
4008 		if (peer->sta_self_peer)
4009 			break;
4010 	}
4011 
4012 	if (!peer) {
4013 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
4014 		return NULL;
4015 	}
4016 
4017 	if (dp_peer_get_ref(soc, peer, mod_id) == QDF_STATUS_SUCCESS) {
4018 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
4019 		return peer;
4020 	}
4021 
4022 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
4023 	return peer;
4024 }
4025 
4026 void dp_peer_flush_frags(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
4027 			 uint8_t *peer_mac)
4028 {
4029 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4030 	struct dp_peer *peer = dp_peer_get_tgt_peer_hash_find(soc, peer_mac, 0,
4031 							      vdev_id,
4032 							      DP_MOD_ID_CDP);
4033 	struct dp_txrx_peer *txrx_peer;
4034 	uint8_t tid;
4035 	struct dp_rx_tid_defrag *defrag_rx_tid;
4036 
4037 	if (!peer)
4038 		return;
4039 
4040 	if (!peer->txrx_peer)
4041 		goto fail;
4042 
4043 	dp_info("Flushing fragments for peer " QDF_MAC_ADDR_FMT,
4044 		QDF_MAC_ADDR_REF(peer->mac_addr.raw));
4045 
4046 	txrx_peer = peer->txrx_peer;
4047 
4048 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
4049 		defrag_rx_tid = &txrx_peer->rx_tid[tid];
4050 
4051 		qdf_spin_lock_bh(&defrag_rx_tid->defrag_tid_lock);
4052 		dp_rx_defrag_waitlist_remove(txrx_peer, tid);
4053 		dp_rx_reorder_flush_frag(txrx_peer, tid);
4054 		qdf_spin_unlock_bh(&defrag_rx_tid->defrag_tid_lock);
4055 	}
4056 fail:
4057 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4058 }
4059 
4060 bool dp_peer_find_by_id_valid(struct dp_soc *soc, uint16_t peer_id)
4061 {
4062 	struct dp_peer *peer = dp_peer_get_ref_by_id(soc, peer_id,
4063 						     DP_MOD_ID_HTT);
4064 
4065 	if (peer) {
4066 		/*
4067 		 * Decrement the peer ref which is taken as part of
4068 		 * dp_peer_get_ref_by_id if PEER_LOCK_REF_PROTECT is enabled
4069 		 */
4070 		dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
4071 
4072 		return true;
4073 	}
4074 
4075 	return false;
4076 }
4077 
4078 qdf_export_symbol(dp_peer_find_by_id_valid);
4079 
4080 #ifdef QCA_MULTIPASS_SUPPORT
4081 void dp_peer_multipass_list_remove(struct dp_peer *peer)
4082 {
4083 	struct dp_vdev *vdev = peer->vdev;
4084 	struct dp_txrx_peer *tpeer = NULL;
4085 	bool found = 0;
4086 
4087 	qdf_spin_lock_bh(&vdev->mpass_peer_mutex);
4088 	TAILQ_FOREACH(tpeer, &vdev->mpass_peer_list, mpass_peer_list_elem) {
4089 		if (tpeer == peer->txrx_peer) {
4090 			found = 1;
4091 			TAILQ_REMOVE(&vdev->mpass_peer_list, peer->txrx_peer,
4092 				     mpass_peer_list_elem);
4093 			break;
4094 		}
4095 	}
4096 
4097 	qdf_spin_unlock_bh(&vdev->mpass_peer_mutex);
4098 
4099 	if (found)
4100 		dp_peer_unref_delete(peer, DP_MOD_ID_TX_MULTIPASS);
4101 }
4102 
4103 /**
4104  * dp_peer_multipass_list_add() - add to new multipass list
4105  * @soc: soc handle
4106  * @peer_mac: mac address
4107  * @vdev_id: vdev id for peer
4108  * @vlan_id: vlan_id
4109  *
4110  * return: void
4111  */
4112 static void dp_peer_multipass_list_add(struct dp_soc *soc, uint8_t *peer_mac,
4113 				       uint8_t vdev_id, uint16_t vlan_id)
4114 {
4115 	struct dp_peer *peer =
4116 			dp_peer_get_tgt_peer_hash_find(soc, peer_mac, 0,
4117 						       vdev_id,
4118 						       DP_MOD_ID_TX_MULTIPASS);
4119 
4120 	if (qdf_unlikely(!peer)) {
4121 		qdf_err("NULL peer");
4122 		return;
4123 	}
4124 
4125 	if (qdf_unlikely(!peer->txrx_peer))
4126 		goto fail;
4127 
4128 	/* If peer already exists in vdev multipass list, do not add it.
4129 	 * This may happen if key install comes twice or re-key
4130 	 * happens for a peer.
4131 	 */
4132 	if (peer->txrx_peer->vlan_id) {
4133 		dp_debug("peer already added to vdev multipass list"
4134 			 "MAC: "QDF_MAC_ADDR_FMT" vlan: %d ",
4135 			 QDF_MAC_ADDR_REF(peer->mac_addr.raw),
4136 			 peer->txrx_peer->vlan_id);
4137 		goto fail;
4138 	}
4139 
4140 	/*
4141 	 * Ref_cnt is incremented inside dp_peer_find_hash_find().
4142 	 * Decrement it when element is deleted from the list.
4143 	 */
4144 	peer->txrx_peer->vlan_id = vlan_id;
4145 	qdf_spin_lock_bh(&peer->txrx_peer->vdev->mpass_peer_mutex);
4146 	TAILQ_INSERT_HEAD(&peer->txrx_peer->vdev->mpass_peer_list,
4147 			  peer->txrx_peer,
4148 			  mpass_peer_list_elem);
4149 	qdf_spin_unlock_bh(&peer->txrx_peer->vdev->mpass_peer_mutex);
4150 	return;
4151 
4152 fail:
4153 	dp_peer_unref_delete(peer, DP_MOD_ID_TX_MULTIPASS);
4154 }
4155 
4156 void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
4157 			 uint8_t vdev_id, uint8_t *peer_mac,
4158 			 uint16_t vlan_id)
4159 {
4160 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
4161 	struct dp_vdev *vdev =
4162 		dp_vdev_get_ref_by_id((struct dp_soc *)soc, vdev_id,
4163 				      DP_MOD_ID_TX_MULTIPASS);
4164 
4165 	dp_info("vdev_id %d, vdev %pK, multipass_en %d, peer_mac " QDF_MAC_ADDR_FMT " vlan %d",
4166 		vdev_id, vdev, vdev ? vdev->multipass_en : 0,
4167 		QDF_MAC_ADDR_REF(peer_mac), vlan_id);
4168 	if (vdev && vdev->multipass_en) {
4169 		dp_peer_multipass_list_add(soc, peer_mac, vdev_id, vlan_id);
4170 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_MULTIPASS);
4171 	}
4172 }
4173 #endif /* QCA_MULTIPASS_SUPPORT */
4174