xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_peer.c (revision 4bf1baaed274932a40f44cf65b2dda09f9009426)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <qdf_types.h>
21 #include <qdf_lock.h>
22 #include <hal_hw_headers.h>
23 #include "dp_htt.h"
24 #include "dp_types.h"
25 #include "dp_internal.h"
26 #include "dp_peer.h"
27 #include "dp_rx_defrag.h"
28 #include "dp_rx.h"
29 #include <hal_api.h>
30 #include <hal_reo.h>
31 #include <cdp_txrx_handle.h>
32 #include <wlan_cfg.h>
33 #ifdef WIFI_MONITOR_SUPPORT
34 #include <dp_mon.h>
35 #endif
36 #ifdef FEATURE_WDS
37 #include "dp_txrx_wds.h"
38 #endif
39 #include <qdf_module.h>
40 #ifdef QCA_PEER_EXT_STATS
41 #include "dp_hist.h"
42 #endif
43 #ifdef BYPASS_OL_OPS
44 #include <target_if_dp.h>
45 #endif
46 #if defined(WLAN_FEATURE_11BE_MLO) && defined(DP_MLO_LINK_STATS_SUPPORT)
47 #include "reg_services_common.h"
48 #endif
49 #ifdef FEATURE_AST
50 #ifdef BYPASS_OL_OPS
51 /**
52  * dp_add_wds_entry_wrapper() - Add new AST entry for the wds station
53  * @soc: DP soc structure pointer
54  * @peer: dp peer structure
55  * @dest_macaddr: MAC address of ast node
56  * @flags: wds or hmwds
57  * @type: type from enum cdp_txrx_ast_entry_type
58  *
59  * This API is used by WDS source port learning function to
60  * add a new AST entry in the fw.
61  *
62  * Return: 0 on success, error code otherwise.
63  */
64 static int dp_add_wds_entry_wrapper(struct dp_soc *soc,
65 				    struct dp_peer *peer,
66 				    const uint8_t *dest_macaddr,
67 				    uint32_t flags,
68 				    uint8_t type)
69 {
70 	QDF_STATUS status;
71 
72 	status = target_if_add_wds_entry(soc->ctrl_psoc,
73 					 peer->vdev->vdev_id,
74 					 peer->mac_addr.raw,
75 					 dest_macaddr,
76 					 WMI_HOST_WDS_FLAG_STATIC,
77 					 type);
78 
79 	return qdf_status_to_os_return(status);
80 }
81 
82 /**
83  * dp_update_wds_entry_wrapper() - update an existing wds entry with new peer
84  * @soc: DP soc structure pointer
85  * @peer: dp peer structure
86  * @dest_macaddr: MAC address of ast node
87  * @flags: wds or hmwds
88  *
89  * This API is used by update the peer mac address for the ast
90  * in the fw.
91  *
92  * Return: 0 on success, error code otherwise.
93  */
94 static int dp_update_wds_entry_wrapper(struct dp_soc *soc,
95 				       struct dp_peer *peer,
96 				       uint8_t *dest_macaddr,
97 				       uint32_t flags)
98 {
99 	QDF_STATUS status;
100 
101 	status = target_if_update_wds_entry(soc->ctrl_psoc,
102 					    peer->vdev->vdev_id,
103 					    dest_macaddr,
104 					    peer->mac_addr.raw,
105 					    WMI_HOST_WDS_FLAG_STATIC);
106 
107 	return qdf_status_to_os_return(status);
108 }
109 
110 /**
111  * dp_del_wds_entry_wrapper() - delete a WSD AST entry
112  * @soc: DP soc structure pointer
113  * @vdev_id: vdev_id
114  * @wds_macaddr: MAC address of ast node
115  * @type: type from enum cdp_txrx_ast_entry_type
116  * @delete_in_fw: Flag to indicate if entry needs to be deleted in fw
117  *
118  * This API is used to delete an AST entry from fw
119  *
120  * Return: None
121  */
122 void dp_del_wds_entry_wrapper(struct dp_soc *soc,
123 			      uint8_t vdev_id,
124 			      uint8_t *wds_macaddr,
125 			      uint8_t type,
126 			      uint8_t delete_in_fw)
127 {
128 	target_if_del_wds_entry(soc->ctrl_psoc, vdev_id,
129 				wds_macaddr, type, delete_in_fw);
130 }
131 #else
132 static int dp_add_wds_entry_wrapper(struct dp_soc *soc,
133 				    struct dp_peer *peer,
134 				    const uint8_t *dest_macaddr,
135 				    uint32_t flags,
136 				    uint8_t type)
137 {
138 	int status;
139 
140 	status = soc->cdp_soc.ol_ops->peer_add_wds_entry(
141 					soc->ctrl_psoc,
142 					peer->vdev->vdev_id,
143 					peer->mac_addr.raw,
144 					peer->peer_id,
145 					dest_macaddr,
146 					peer->mac_addr.raw,
147 					flags,
148 					type);
149 
150 	return status;
151 }
152 
153 static int dp_update_wds_entry_wrapper(struct dp_soc *soc,
154 				       struct dp_peer *peer,
155 				       uint8_t *dest_macaddr,
156 				       uint32_t flags)
157 {
158 	int status;
159 
160 	status = soc->cdp_soc.ol_ops->peer_update_wds_entry(
161 				soc->ctrl_psoc,
162 				peer->vdev->vdev_id,
163 				dest_macaddr,
164 				peer->mac_addr.raw,
165 				flags);
166 
167 	return status;
168 }
169 
170 void dp_del_wds_entry_wrapper(struct dp_soc *soc,
171 			      uint8_t vdev_id,
172 			      uint8_t *wds_macaddr,
173 			      uint8_t type,
174 			      uint8_t delete_in_fw)
175 {
176 	soc->cdp_soc.ol_ops->peer_del_wds_entry(soc->ctrl_psoc,
177 						vdev_id,
178 						wds_macaddr,
179 						type,
180 						delete_in_fw);
181 }
182 #endif /* BYPASS_OL_OPS */
183 #else
184 void dp_del_wds_entry_wrapper(struct dp_soc *soc,
185 			      uint8_t vdev_id,
186 			      uint8_t *wds_macaddr,
187 			      uint8_t type,
188 			      uint8_t delete_in_fw)
189 {
190 }
191 #endif /* FEATURE_AST */
192 
193 #ifdef FEATURE_WDS
194 static inline bool
195 dp_peer_ast_free_in_unmap_supported(struct dp_soc *soc,
196 				    struct dp_ast_entry *ast_entry)
197 {
198 	/* if peer map v2 is enabled we are not freeing ast entry
199 	 * here and it is supposed to be freed in unmap event (after
200 	 * we receive delete confirmation from target)
201 	 *
202 	 * if peer_id is invalid we did not get the peer map event
203 	 * for the peer free ast entry from here only in this case
204 	 */
205 
206 	if ((ast_entry->type != CDP_TXRX_AST_TYPE_WDS_HM_SEC) &&
207 	    (ast_entry->type != CDP_TXRX_AST_TYPE_SELF))
208 		return true;
209 
210 	return false;
211 }
212 #else
213 static inline bool
214 dp_peer_ast_free_in_unmap_supported(struct dp_soc *soc,
215 				    struct dp_ast_entry *ast_entry)
216 {
217 	return false;
218 }
219 
220 void dp_soc_wds_attach(struct dp_soc *soc)
221 {
222 }
223 
224 void dp_soc_wds_detach(struct dp_soc *soc)
225 {
226 }
227 #endif
228 
229 #ifdef QCA_SUPPORT_WDS_EXTENDED
230 bool dp_peer_check_wds_ext_peer(struct dp_peer *peer)
231 {
232 	struct dp_vdev *vdev = peer->vdev;
233 	struct dp_txrx_peer *txrx_peer;
234 
235 	if (!vdev->wds_ext_enabled)
236 		return false;
237 
238 	txrx_peer = dp_get_txrx_peer(peer);
239 	if (!txrx_peer)
240 		return false;
241 
242 	if (qdf_atomic_test_bit(WDS_EXT_PEER_INIT_BIT,
243 				&txrx_peer->wds_ext.init))
244 		return true;
245 
246 	return false;
247 }
248 #else
249 bool dp_peer_check_wds_ext_peer(struct dp_peer *peer)
250 {
251 	return false;
252 }
253 #endif
254 
255 QDF_STATUS dp_peer_ast_table_attach(struct dp_soc *soc)
256 {
257 	uint32_t max_ast_index;
258 
259 	max_ast_index = wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx);
260 	/* allocate ast_table for ast entry to ast_index map */
261 	dp_peer_info("\n%pK:<=== cfg max ast idx %d ====>", soc, max_ast_index);
262 	soc->ast_table = qdf_mem_malloc(max_ast_index *
263 					sizeof(struct dp_ast_entry *));
264 	if (!soc->ast_table) {
265 		dp_peer_err("%pK: ast_table memory allocation failed", soc);
266 		return QDF_STATUS_E_NOMEM;
267 	}
268 	return QDF_STATUS_SUCCESS; /* success */
269 }
270 
271 /**
272  * dp_find_peer_by_macaddr() - Finding the peer from mac address provided.
273  * @soc: soc handle
274  * @mac_addr: MAC address to be used to find peer
275  * @vdev_id: VDEV id
276  * @mod_id: MODULE ID
277  *
278  * Return: struct dp_peer
279  */
280 struct dp_peer *dp_find_peer_by_macaddr(struct dp_soc *soc, uint8_t *mac_addr,
281 					uint8_t vdev_id, enum dp_mod_id mod_id)
282 {
283 	bool ast_ind_disable = wlan_cfg_get_ast_indication_disable(
284 							    soc->wlan_cfg_ctx);
285 	struct cdp_peer_info peer_info = {0};
286 
287 	if ((!soc->ast_offload_support) || (!ast_ind_disable)) {
288 		struct dp_ast_entry *ast_entry = NULL;
289 		uint16_t peer_id;
290 
291 		qdf_spin_lock_bh(&soc->ast_lock);
292 
293 		if (vdev_id == DP_VDEV_ALL)
294 			ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
295 		else
296 			ast_entry = dp_peer_ast_hash_find_by_vdevid
297 						(soc, mac_addr, vdev_id);
298 
299 		if (!ast_entry) {
300 			qdf_spin_unlock_bh(&soc->ast_lock);
301 			dp_err("NULL ast entry");
302 			return NULL;
303 		}
304 
305 		peer_id = ast_entry->peer_id;
306 		qdf_spin_unlock_bh(&soc->ast_lock);
307 
308 		if (peer_id == HTT_INVALID_PEER)
309 			return NULL;
310 
311 		return dp_peer_get_ref_by_id(soc, peer_id, mod_id);
312 	}
313 
314 	DP_PEER_INFO_PARAMS_INIT(&peer_info, vdev_id, mac_addr, false,
315 				 CDP_WILD_PEER_TYPE);
316 	return dp_peer_hash_find_wrapper(soc, &peer_info, mod_id);
317 }
318 
319 /**
320  * dp_peer_find_map_attach() - allocate memory for peer_id_to_obj_map
321  * @soc: soc handle
322  *
323  * return: QDF_STATUS
324  */
325 static QDF_STATUS dp_peer_find_map_attach(struct dp_soc *soc)
326 {
327 	uint32_t max_peers, peer_map_size;
328 
329 	max_peers = soc->max_peer_id;
330 	/* allocate the peer ID -> peer object map */
331 	dp_peer_info("\n%pK:<=== cfg max peer id %d ====>", soc, max_peers);
332 	peer_map_size = max_peers * sizeof(soc->peer_id_to_obj_map[0]);
333 	soc->peer_id_to_obj_map = qdf_mem_malloc(peer_map_size);
334 	if (!soc->peer_id_to_obj_map) {
335 		dp_peer_err("%pK: peer map memory allocation failed", soc);
336 		return QDF_STATUS_E_NOMEM;
337 	}
338 
339 	/*
340 	 * The peer_id_to_obj_map doesn't really need to be initialized,
341 	 * since elements are only used after they have been individually
342 	 * initialized.
343 	 * However, it is convenient for debugging to have all elements
344 	 * that are not in use set to 0.
345 	 */
346 	qdf_mem_zero(soc->peer_id_to_obj_map, peer_map_size);
347 
348 	qdf_spinlock_create(&soc->peer_map_lock);
349 	return QDF_STATUS_SUCCESS; /* success */
350 }
351 
352 #define DP_AST_HASH_LOAD_MULT  2
353 #define DP_AST_HASH_LOAD_SHIFT 0
354 
355 static inline uint32_t
356 dp_peer_find_hash_index(struct dp_soc *soc,
357 			union dp_align_mac_addr *mac_addr)
358 {
359 	uint32_t index;
360 
361 	index =
362 		mac_addr->align2.bytes_ab ^
363 		mac_addr->align2.bytes_cd ^
364 		mac_addr->align2.bytes_ef;
365 
366 	index ^= index >> soc->peer_hash.idx_bits;
367 	index &= soc->peer_hash.mask;
368 	return index;
369 }
370 
371 struct dp_peer *dp_peer_find_hash_find(
372 				struct dp_soc *soc, uint8_t *peer_mac_addr,
373 				int mac_addr_is_aligned, uint8_t vdev_id,
374 				enum dp_mod_id mod_id)
375 {
376 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
377 	uint32_t index;
378 	struct dp_peer *peer;
379 
380 	if (!soc->peer_hash.bins)
381 		return NULL;
382 
383 	if (mac_addr_is_aligned) {
384 		mac_addr = (union dp_align_mac_addr *)peer_mac_addr;
385 	} else {
386 		qdf_mem_copy(
387 			&local_mac_addr_aligned.raw[0],
388 			peer_mac_addr, QDF_MAC_ADDR_SIZE);
389 		mac_addr = &local_mac_addr_aligned;
390 	}
391 	index = dp_peer_find_hash_index(soc, mac_addr);
392 	qdf_spin_lock_bh(&soc->peer_hash_lock);
393 	TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
394 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
395 		    ((peer->vdev->vdev_id == vdev_id) ||
396 		     (vdev_id == DP_VDEV_ALL))) {
397 			/* take peer reference before returning */
398 			if (dp_peer_get_ref(soc, peer, mod_id) !=
399 						QDF_STATUS_SUCCESS)
400 				peer = NULL;
401 
402 			qdf_spin_unlock_bh(&soc->peer_hash_lock);
403 			return peer;
404 		}
405 	}
406 	qdf_spin_unlock_bh(&soc->peer_hash_lock);
407 	return NULL; /* failure */
408 }
409 
410 qdf_export_symbol(dp_peer_find_hash_find);
411 
412 #ifdef WLAN_FEATURE_11BE_MLO
413 /**
414  * dp_peer_find_hash_detach() - cleanup memory for peer_hash table
415  * @soc: soc handle
416  *
417  * return: none
418  */
419 static void dp_peer_find_hash_detach(struct dp_soc *soc)
420 {
421 	if (soc->peer_hash.bins) {
422 		qdf_mem_free(soc->peer_hash.bins);
423 		soc->peer_hash.bins = NULL;
424 		qdf_spinlock_destroy(&soc->peer_hash_lock);
425 	}
426 
427 	if (soc->arch_ops.mlo_peer_find_hash_detach)
428 		soc->arch_ops.mlo_peer_find_hash_detach(soc);
429 }
430 
431 /**
432  * dp_peer_find_hash_attach() - allocate memory for peer_hash table
433  * @soc: soc handle
434  *
435  * return: QDF_STATUS
436  */
437 static QDF_STATUS dp_peer_find_hash_attach(struct dp_soc *soc)
438 {
439 	int i, hash_elems, log2;
440 
441 	/* allocate the peer MAC address -> peer object hash table */
442 	hash_elems = soc->max_peers;
443 	hash_elems *= DP_PEER_HASH_LOAD_MULT;
444 	hash_elems >>= DP_PEER_HASH_LOAD_SHIFT;
445 	log2 = dp_log2_ceil(hash_elems);
446 	hash_elems = 1 << log2;
447 
448 	soc->peer_hash.mask = hash_elems - 1;
449 	soc->peer_hash.idx_bits = log2;
450 	/* allocate an array of TAILQ peer object lists */
451 	soc->peer_hash.bins = qdf_mem_malloc(
452 		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q, dp_peer)));
453 	if (!soc->peer_hash.bins)
454 		return QDF_STATUS_E_NOMEM;
455 
456 	for (i = 0; i < hash_elems; i++)
457 		TAILQ_INIT(&soc->peer_hash.bins[i]);
458 
459 	qdf_spinlock_create(&soc->peer_hash_lock);
460 
461 	if (soc->arch_ops.mlo_peer_find_hash_attach &&
462 	    (soc->arch_ops.mlo_peer_find_hash_attach(soc) !=
463 			QDF_STATUS_SUCCESS)) {
464 		dp_peer_find_hash_detach(soc);
465 		return QDF_STATUS_E_NOMEM;
466 	}
467 	return QDF_STATUS_SUCCESS;
468 }
469 
470 void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer)
471 {
472 	unsigned index;
473 
474 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
475 	if (peer->peer_type == CDP_LINK_PEER_TYPE) {
476 		qdf_spin_lock_bh(&soc->peer_hash_lock);
477 
478 		if (QDF_IS_STATUS_ERROR(dp_peer_get_ref(soc, peer,
479 							DP_MOD_ID_CONFIG))) {
480 			dp_err("fail to get peer ref:" QDF_MAC_ADDR_FMT,
481 			       QDF_MAC_ADDR_REF(peer->mac_addr.raw));
482 			qdf_spin_unlock_bh(&soc->peer_hash_lock);
483 			return;
484 		}
485 
486 		/*
487 		 * It is important to add the new peer at the tail of
488 		 * peer list with the bin index. Together with having
489 		 * the hash_find function search from head to tail,
490 		 * this ensures that if two entries with the same MAC address
491 		 * are stored, the one added first will be found first.
492 		 */
493 		TAILQ_INSERT_TAIL(&soc->peer_hash.bins[index], peer,
494 				  hash_list_elem);
495 
496 		qdf_spin_unlock_bh(&soc->peer_hash_lock);
497 	} else if (peer->peer_type == CDP_MLD_PEER_TYPE) {
498 		if (soc->arch_ops.mlo_peer_find_hash_add)
499 			soc->arch_ops.mlo_peer_find_hash_add(soc, peer);
500 	} else {
501 		dp_err("unknown peer type %d", peer->peer_type);
502 	}
503 }
504 
505 void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer)
506 {
507 	unsigned index;
508 	struct dp_peer *tmppeer = NULL;
509 	int found = 0;
510 
511 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
512 
513 	if (peer->peer_type == CDP_LINK_PEER_TYPE) {
514 		/* Check if tail is not empty before delete*/
515 		QDF_ASSERT(!TAILQ_EMPTY(&soc->peer_hash.bins[index]));
516 
517 		qdf_spin_lock_bh(&soc->peer_hash_lock);
518 		TAILQ_FOREACH(tmppeer, &soc->peer_hash.bins[index],
519 			      hash_list_elem) {
520 			if (tmppeer == peer) {
521 				found = 1;
522 				break;
523 			}
524 		}
525 		QDF_ASSERT(found);
526 		TAILQ_REMOVE(&soc->peer_hash.bins[index], peer,
527 			     hash_list_elem);
528 
529 		dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
530 		qdf_spin_unlock_bh(&soc->peer_hash_lock);
531 	} else if (peer->peer_type == CDP_MLD_PEER_TYPE) {
532 		if (soc->arch_ops.mlo_peer_find_hash_remove)
533 			soc->arch_ops.mlo_peer_find_hash_remove(soc, peer);
534 	} else {
535 		dp_err("unknown peer type %d", peer->peer_type);
536 	}
537 }
538 
539 uint8_t dp_get_peer_link_id(struct dp_peer *peer)
540 {
541 	uint8_t link_id;
542 
543 	link_id = IS_MLO_DP_LINK_PEER(peer) ? peer->link_id + 1 : 0;
544 	if (link_id < 1 || link_id > DP_MAX_MLO_LINKS)
545 		link_id = 0;
546 
547 	return link_id;
548 }
549 #else
550 static QDF_STATUS dp_peer_find_hash_attach(struct dp_soc *soc)
551 {
552 	int i, hash_elems, log2;
553 
554 	/* allocate the peer MAC address -> peer object hash table */
555 	hash_elems = soc->max_peers;
556 	hash_elems *= DP_PEER_HASH_LOAD_MULT;
557 	hash_elems >>= DP_PEER_HASH_LOAD_SHIFT;
558 	log2 = dp_log2_ceil(hash_elems);
559 	hash_elems = 1 << log2;
560 
561 	soc->peer_hash.mask = hash_elems - 1;
562 	soc->peer_hash.idx_bits = log2;
563 	/* allocate an array of TAILQ peer object lists */
564 	soc->peer_hash.bins = qdf_mem_malloc(
565 		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q, dp_peer)));
566 	if (!soc->peer_hash.bins)
567 		return QDF_STATUS_E_NOMEM;
568 
569 	for (i = 0; i < hash_elems; i++)
570 		TAILQ_INIT(&soc->peer_hash.bins[i]);
571 
572 	qdf_spinlock_create(&soc->peer_hash_lock);
573 	return QDF_STATUS_SUCCESS;
574 }
575 
576 static void dp_peer_find_hash_detach(struct dp_soc *soc)
577 {
578 	if (soc->peer_hash.bins) {
579 		qdf_mem_free(soc->peer_hash.bins);
580 		soc->peer_hash.bins = NULL;
581 		qdf_spinlock_destroy(&soc->peer_hash_lock);
582 	}
583 }
584 
585 void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer)
586 {
587 	unsigned index;
588 
589 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
590 	qdf_spin_lock_bh(&soc->peer_hash_lock);
591 
592 	if (QDF_IS_STATUS_ERROR(dp_peer_get_ref(soc, peer, DP_MOD_ID_CONFIG))) {
593 		dp_err("unable to get peer ref at MAP mac: "QDF_MAC_ADDR_FMT,
594 		       QDF_MAC_ADDR_REF(peer->mac_addr.raw));
595 		qdf_spin_unlock_bh(&soc->peer_hash_lock);
596 		return;
597 	}
598 
599 	/*
600 	 * It is important to add the new peer at the tail of the peer list
601 	 * with the bin index.  Together with having the hash_find function
602 	 * search from head to tail, this ensures that if two entries with
603 	 * the same MAC address are stored, the one added first will be
604 	 * found first.
605 	 */
606 	TAILQ_INSERT_TAIL(&soc->peer_hash.bins[index], peer, hash_list_elem);
607 
608 	qdf_spin_unlock_bh(&soc->peer_hash_lock);
609 }
610 
611 void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer)
612 {
613 	unsigned index;
614 	struct dp_peer *tmppeer = NULL;
615 	int found = 0;
616 
617 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
618 	/* Check if tail is not empty before delete*/
619 	QDF_ASSERT(!TAILQ_EMPTY(&soc->peer_hash.bins[index]));
620 
621 	qdf_spin_lock_bh(&soc->peer_hash_lock);
622 	TAILQ_FOREACH(tmppeer, &soc->peer_hash.bins[index], hash_list_elem) {
623 		if (tmppeer == peer) {
624 			found = 1;
625 			break;
626 		}
627 	}
628 	QDF_ASSERT(found);
629 	TAILQ_REMOVE(&soc->peer_hash.bins[index], peer, hash_list_elem);
630 
631 	dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
632 	qdf_spin_unlock_bh(&soc->peer_hash_lock);
633 }
634 
635 
636 #endif/* WLAN_FEATURE_11BE_MLO */
637 
638 void dp_peer_vdev_list_add(struct dp_soc *soc, struct dp_vdev *vdev,
639 			   struct dp_peer *peer)
640 {
641 	/* only link peer will be added to vdev peer list */
642 	if (IS_MLO_DP_MLD_PEER(peer))
643 		return;
644 
645 	qdf_spin_lock_bh(&vdev->peer_list_lock);
646 	if (QDF_IS_STATUS_ERROR(dp_peer_get_ref(soc, peer, DP_MOD_ID_CONFIG))) {
647 		dp_err("unable to get peer ref at MAP mac: "QDF_MAC_ADDR_FMT,
648 		       QDF_MAC_ADDR_REF(peer->mac_addr.raw));
649 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
650 		return;
651 	}
652 
653 	/* add this peer into the vdev's list */
654 	if (wlan_op_mode_sta == vdev->opmode)
655 		TAILQ_INSERT_HEAD(&vdev->peer_list, peer, peer_list_elem);
656 	else
657 		TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
658 
659 	vdev->num_peers++;
660 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
661 }
662 
663 void dp_peer_vdev_list_remove(struct dp_soc *soc, struct dp_vdev *vdev,
664 			      struct dp_peer *peer)
665 {
666 	uint8_t found = 0;
667 	struct dp_peer *tmppeer = NULL;
668 
669 	/* only link peer will be added to vdev peer list */
670 	if (IS_MLO_DP_MLD_PEER(peer))
671 		return;
672 
673 	qdf_spin_lock_bh(&vdev->peer_list_lock);
674 	TAILQ_FOREACH(tmppeer, &peer->vdev->peer_list, peer_list_elem) {
675 		if (tmppeer == peer) {
676 			found = 1;
677 			break;
678 		}
679 	}
680 
681 	if (found) {
682 		TAILQ_REMOVE(&peer->vdev->peer_list, peer,
683 			     peer_list_elem);
684 		dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
685 		vdev->num_peers--;
686 	} else {
687 		/*Ignoring the remove operation as peer not found*/
688 		dp_peer_debug("%pK: peer:%pK not found in vdev:%pK peerlist:%pK"
689 			      , soc, peer, vdev, &peer->vdev->peer_list);
690 	}
691 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
692 }
693 
694 void dp_txrx_peer_attach_add(struct dp_soc *soc,
695 			     struct dp_peer *peer,
696 			     struct dp_txrx_peer *txrx_peer)
697 {
698 	qdf_spin_lock_bh(&soc->peer_map_lock);
699 
700 	peer->txrx_peer = txrx_peer;
701 	txrx_peer->bss_peer = peer->bss_peer;
702 
703 	if (peer->peer_id == HTT_INVALID_PEER) {
704 		qdf_spin_unlock_bh(&soc->peer_map_lock);
705 		return;
706 	}
707 
708 	txrx_peer->peer_id = peer->peer_id;
709 
710 	QDF_ASSERT(soc->peer_id_to_obj_map[peer->peer_id]);
711 
712 	qdf_spin_unlock_bh(&soc->peer_map_lock);
713 }
714 
715 void dp_peer_find_id_to_obj_add(struct dp_soc *soc,
716 				struct dp_peer *peer,
717 				uint16_t peer_id)
718 {
719 	QDF_ASSERT(peer_id <= soc->max_peer_id);
720 
721 	qdf_spin_lock_bh(&soc->peer_map_lock);
722 
723 	peer->peer_id = peer_id;
724 
725 	if (QDF_IS_STATUS_ERROR(dp_peer_get_ref(soc, peer, DP_MOD_ID_CONFIG))) {
726 		dp_err("unable to get peer ref at MAP mac: "QDF_MAC_ADDR_FMT" peer_id %u",
727 		       QDF_MAC_ADDR_REF(peer->mac_addr.raw), peer_id);
728 		qdf_spin_unlock_bh(&soc->peer_map_lock);
729 		return;
730 	}
731 
732 	if (!soc->peer_id_to_obj_map[peer_id]) {
733 		soc->peer_id_to_obj_map[peer_id] = peer;
734 		if (peer->txrx_peer)
735 			peer->txrx_peer->peer_id = peer_id;
736 	} else {
737 		/* Peer map event came for peer_id which
738 		 * is already mapped, this is not expected
739 		 */
740 		dp_err("peer %pK(" QDF_MAC_ADDR_FMT ")map failed, id %d mapped to peer %pK",
741 		       peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw), peer_id,
742 		       soc->peer_id_to_obj_map[peer_id]);
743 		dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
744 		qdf_assert_always(0);
745 	}
746 	qdf_spin_unlock_bh(&soc->peer_map_lock);
747 }
748 
749 void dp_peer_find_id_to_obj_remove(struct dp_soc *soc,
750 				   uint16_t peer_id)
751 {
752 	struct dp_peer *peer = NULL;
753 	QDF_ASSERT(peer_id <= soc->max_peer_id);
754 
755 	qdf_spin_lock_bh(&soc->peer_map_lock);
756 	peer = soc->peer_id_to_obj_map[peer_id];
757 	if (!peer) {
758 		dp_err("unable to get peer during peer id obj map remove");
759 		qdf_spin_unlock_bh(&soc->peer_map_lock);
760 		return;
761 	}
762 	peer->peer_id = HTT_INVALID_PEER;
763 	if (peer->txrx_peer)
764 		peer->txrx_peer->peer_id = HTT_INVALID_PEER;
765 	soc->peer_id_to_obj_map[peer_id] = NULL;
766 	dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
767 	qdf_spin_unlock_bh(&soc->peer_map_lock);
768 }
769 
770 #ifdef FEATURE_MEC
771 QDF_STATUS dp_peer_mec_hash_attach(struct dp_soc *soc)
772 {
773 	int log2, hash_elems, i;
774 
775 	log2 = dp_log2_ceil(DP_PEER_MAX_MEC_IDX);
776 	hash_elems = 1 << log2;
777 
778 	soc->mec_hash.mask = hash_elems - 1;
779 	soc->mec_hash.idx_bits = log2;
780 
781 	dp_peer_info("%pK: max mec index: %d",
782 		     soc, DP_PEER_MAX_MEC_IDX);
783 
784 	/* allocate an array of TAILQ mec object lists */
785 	soc->mec_hash.bins = qdf_mem_malloc(hash_elems *
786 					    sizeof(TAILQ_HEAD(anonymous_tail_q,
787 							      dp_mec_entry)));
788 
789 	if (!soc->mec_hash.bins)
790 		return QDF_STATUS_E_NOMEM;
791 
792 	for (i = 0; i < hash_elems; i++)
793 		TAILQ_INIT(&soc->mec_hash.bins[i]);
794 
795 	return QDF_STATUS_SUCCESS;
796 }
797 
798 /**
799  * dp_peer_mec_hash_index() - Compute the MEC hash from MAC address
800  * @soc: SoC handle
801  * @mac_addr: MAC address
802  *
803  * Return: MEC hash
804  */
805 static inline uint32_t dp_peer_mec_hash_index(struct dp_soc *soc,
806 					      union dp_align_mac_addr *mac_addr)
807 {
808 	uint32_t index;
809 
810 	index =
811 		mac_addr->align2.bytes_ab ^
812 		mac_addr->align2.bytes_cd ^
813 		mac_addr->align2.bytes_ef;
814 	index ^= index >> soc->mec_hash.idx_bits;
815 	index &= soc->mec_hash.mask;
816 	return index;
817 }
818 
819 struct dp_mec_entry *dp_peer_mec_hash_find_by_pdevid(struct dp_soc *soc,
820 						     uint8_t pdev_id,
821 						     uint8_t *mec_mac_addr)
822 {
823 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
824 	uint32_t index;
825 	struct dp_mec_entry *mecentry;
826 
827 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
828 		     mec_mac_addr, QDF_MAC_ADDR_SIZE);
829 	mac_addr = &local_mac_addr_aligned;
830 
831 	index = dp_peer_mec_hash_index(soc, mac_addr);
832 	TAILQ_FOREACH(mecentry, &soc->mec_hash.bins[index], hash_list_elem) {
833 		if ((pdev_id == mecentry->pdev_id) &&
834 		    !dp_peer_find_mac_addr_cmp(mac_addr, &mecentry->mac_addr))
835 			return mecentry;
836 	}
837 
838 	return NULL;
839 }
840 
841 /**
842  * dp_peer_mec_hash_add() - Add MEC entry into hash table
843  * @soc: SoC handle
844  * @mecentry: MEC entry
845  *
846  * This function adds the MEC entry into SoC MEC hash table
847  *
848  * Return: None
849  */
850 static inline void dp_peer_mec_hash_add(struct dp_soc *soc,
851 					struct dp_mec_entry *mecentry)
852 {
853 	uint32_t index;
854 
855 	index = dp_peer_mec_hash_index(soc, &mecentry->mac_addr);
856 	qdf_spin_lock_bh(&soc->mec_lock);
857 	TAILQ_INSERT_TAIL(&soc->mec_hash.bins[index], mecentry, hash_list_elem);
858 	qdf_spin_unlock_bh(&soc->mec_lock);
859 }
860 
861 QDF_STATUS dp_peer_mec_add_entry(struct dp_soc *soc,
862 				 struct dp_vdev *vdev,
863 				 uint8_t *mac_addr)
864 {
865 	struct dp_mec_entry *mecentry = NULL;
866 	struct dp_pdev *pdev = NULL;
867 
868 	if (!vdev) {
869 		dp_peer_err("%pK: Peers vdev is NULL", soc);
870 		return QDF_STATUS_E_INVAL;
871 	}
872 
873 	pdev = vdev->pdev;
874 
875 	if (qdf_unlikely(qdf_atomic_read(&soc->mec_cnt) >=
876 					 DP_PEER_MAX_MEC_ENTRY)) {
877 		dp_peer_warn("%pK: max MEC entry limit reached mac_addr: "
878 			     QDF_MAC_ADDR_FMT, soc, QDF_MAC_ADDR_REF(mac_addr));
879 		return QDF_STATUS_E_NOMEM;
880 	}
881 
882 	qdf_spin_lock_bh(&soc->mec_lock);
883 	mecentry = dp_peer_mec_hash_find_by_pdevid(soc, pdev->pdev_id,
884 						   mac_addr);
885 	if (qdf_likely(mecentry)) {
886 		mecentry->is_active = TRUE;
887 		qdf_spin_unlock_bh(&soc->mec_lock);
888 		return QDF_STATUS_E_ALREADY;
889 	}
890 
891 	qdf_spin_unlock_bh(&soc->mec_lock);
892 
893 	dp_peer_debug("%pK: pdevid: %u vdev: %u type: MEC mac_addr: "
894 		      QDF_MAC_ADDR_FMT,
895 		      soc, pdev->pdev_id, vdev->vdev_id,
896 		      QDF_MAC_ADDR_REF(mac_addr));
897 
898 	mecentry = (struct dp_mec_entry *)
899 			qdf_mem_malloc(sizeof(struct dp_mec_entry));
900 
901 	if (qdf_unlikely(!mecentry)) {
902 		dp_peer_err("%pK: fail to allocate mecentry", soc);
903 		return QDF_STATUS_E_NOMEM;
904 	}
905 
906 	qdf_copy_macaddr((struct qdf_mac_addr *)&mecentry->mac_addr.raw[0],
907 			 (struct qdf_mac_addr *)mac_addr);
908 	mecentry->pdev_id = pdev->pdev_id;
909 	mecentry->vdev_id = vdev->vdev_id;
910 	mecentry->is_active = TRUE;
911 	dp_peer_mec_hash_add(soc, mecentry);
912 
913 	qdf_atomic_inc(&soc->mec_cnt);
914 	DP_STATS_INC(soc, mec.added, 1);
915 
916 	return QDF_STATUS_SUCCESS;
917 }
918 
919 void dp_peer_mec_detach_entry(struct dp_soc *soc, struct dp_mec_entry *mecentry,
920 			      void *ptr)
921 {
922 	uint32_t index = dp_peer_mec_hash_index(soc, &mecentry->mac_addr);
923 
924 	TAILQ_HEAD(, dp_mec_entry) * free_list = ptr;
925 
926 	TAILQ_REMOVE(&soc->mec_hash.bins[index], mecentry,
927 		     hash_list_elem);
928 	TAILQ_INSERT_TAIL(free_list, mecentry, hash_list_elem);
929 }
930 
931 void dp_peer_mec_free_list(struct dp_soc *soc, void *ptr)
932 {
933 	struct dp_mec_entry *mecentry, *mecentry_next;
934 
935 	TAILQ_HEAD(, dp_mec_entry) * free_list = ptr;
936 
937 	TAILQ_FOREACH_SAFE(mecentry, free_list, hash_list_elem,
938 			   mecentry_next) {
939 		dp_peer_debug("%pK: MEC delete for mac_addr " QDF_MAC_ADDR_FMT,
940 			      soc, QDF_MAC_ADDR_REF(&mecentry->mac_addr));
941 		qdf_mem_free(mecentry);
942 		qdf_atomic_dec(&soc->mec_cnt);
943 		DP_STATS_INC(soc, mec.deleted, 1);
944 	}
945 }
946 
947 void dp_peer_mec_hash_detach(struct dp_soc *soc)
948 {
949 	dp_peer_mec_flush_entries(soc);
950 	qdf_mem_free(soc->mec_hash.bins);
951 	soc->mec_hash.bins = NULL;
952 }
953 
954 void dp_peer_mec_spinlock_destroy(struct dp_soc *soc)
955 {
956 	qdf_spinlock_destroy(&soc->mec_lock);
957 }
958 
959 void dp_peer_mec_spinlock_create(struct dp_soc *soc)
960 {
961 	qdf_spinlock_create(&soc->mec_lock);
962 }
963 #else
964 QDF_STATUS dp_peer_mec_hash_attach(struct dp_soc *soc)
965 {
966 	return QDF_STATUS_SUCCESS;
967 }
968 
969 void dp_peer_mec_hash_detach(struct dp_soc *soc)
970 {
971 }
972 #endif
973 
974 #ifdef FEATURE_AST
975 #ifdef WLAN_FEATURE_11BE_MLO
976 /**
977  * dp_peer_exist_on_pdev() - check if peer with mac address exist on pdev
978  *
979  * @soc: Datapath SOC handle
980  * @peer_mac_addr: peer mac address
981  * @mac_addr_is_aligned: is mac address aligned
982  * @pdev: Datapath PDEV handle
983  *
984  * Return: true if peer found else return false
985  */
986 static bool dp_peer_exist_on_pdev(struct dp_soc *soc,
987 				  uint8_t *peer_mac_addr,
988 				  int mac_addr_is_aligned,
989 				  struct dp_pdev *pdev)
990 {
991 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
992 	unsigned int index;
993 	struct dp_peer *peer;
994 	bool found = false;
995 
996 	if (mac_addr_is_aligned) {
997 		mac_addr = (union dp_align_mac_addr *)peer_mac_addr;
998 	} else {
999 		qdf_mem_copy(
1000 			&local_mac_addr_aligned.raw[0],
1001 			peer_mac_addr, QDF_MAC_ADDR_SIZE);
1002 		mac_addr = &local_mac_addr_aligned;
1003 	}
1004 	index = dp_peer_find_hash_index(soc, mac_addr);
1005 	qdf_spin_lock_bh(&soc->peer_hash_lock);
1006 	TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
1007 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
1008 		    (peer->vdev->pdev == pdev)) {
1009 			found = true;
1010 			break;
1011 		}
1012 	}
1013 	qdf_spin_unlock_bh(&soc->peer_hash_lock);
1014 
1015 	if (found)
1016 		return found;
1017 
1018 	peer = dp_mld_peer_find_hash_find(soc, peer_mac_addr,
1019 					  mac_addr_is_aligned, DP_VDEV_ALL,
1020 					  DP_MOD_ID_CDP);
1021 	if (peer) {
1022 		if (peer->vdev->pdev == pdev)
1023 			found = true;
1024 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
1025 	}
1026 
1027 	return found;
1028 }
1029 #else
1030 static bool dp_peer_exist_on_pdev(struct dp_soc *soc,
1031 				  uint8_t *peer_mac_addr,
1032 				  int mac_addr_is_aligned,
1033 				  struct dp_pdev *pdev)
1034 {
1035 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
1036 	unsigned int index;
1037 	struct dp_peer *peer;
1038 	bool found = false;
1039 
1040 	if (mac_addr_is_aligned) {
1041 		mac_addr = (union dp_align_mac_addr *)peer_mac_addr;
1042 	} else {
1043 		qdf_mem_copy(
1044 			&local_mac_addr_aligned.raw[0],
1045 			peer_mac_addr, QDF_MAC_ADDR_SIZE);
1046 		mac_addr = &local_mac_addr_aligned;
1047 	}
1048 	index = dp_peer_find_hash_index(soc, mac_addr);
1049 	qdf_spin_lock_bh(&soc->peer_hash_lock);
1050 	TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
1051 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
1052 		    (peer->vdev->pdev == pdev)) {
1053 			found = true;
1054 			break;
1055 		}
1056 	}
1057 	qdf_spin_unlock_bh(&soc->peer_hash_lock);
1058 	return found;
1059 }
1060 #endif /* WLAN_FEATURE_11BE_MLO */
1061 
1062 QDF_STATUS dp_peer_ast_hash_attach(struct dp_soc *soc)
1063 {
1064 	int i, hash_elems, log2;
1065 	unsigned int max_ast_idx = wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx);
1066 
1067 	hash_elems = ((max_ast_idx * DP_AST_HASH_LOAD_MULT) >>
1068 		DP_AST_HASH_LOAD_SHIFT);
1069 
1070 	log2 = dp_log2_ceil(hash_elems);
1071 	hash_elems = 1 << log2;
1072 
1073 	soc->ast_hash.mask = hash_elems - 1;
1074 	soc->ast_hash.idx_bits = log2;
1075 
1076 	dp_peer_info("%pK: ast hash_elems: %d, max_ast_idx: %d",
1077 		     soc, hash_elems, max_ast_idx);
1078 
1079 	/* allocate an array of TAILQ peer object lists */
1080 	soc->ast_hash.bins = qdf_mem_malloc(
1081 		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q,
1082 				dp_ast_entry)));
1083 
1084 	if (!soc->ast_hash.bins)
1085 		return QDF_STATUS_E_NOMEM;
1086 
1087 	for (i = 0; i < hash_elems; i++)
1088 		TAILQ_INIT(&soc->ast_hash.bins[i]);
1089 
1090 	return QDF_STATUS_SUCCESS;
1091 }
1092 
1093 /**
1094  * dp_peer_ast_cleanup() - cleanup the references
1095  * @soc: SoC handle
1096  * @ast: ast entry
1097  *
1098  * Return: None
1099  */
1100 static inline void dp_peer_ast_cleanup(struct dp_soc *soc,
1101 				       struct dp_ast_entry *ast)
1102 {
1103 	txrx_ast_free_cb cb = ast->callback;
1104 	void *cookie = ast->cookie;
1105 
1106 	dp_peer_debug("mac_addr: " QDF_MAC_ADDR_FMT ", cb: %pK, cookie: %pK",
1107 		      QDF_MAC_ADDR_REF(ast->mac_addr.raw), cb, cookie);
1108 
1109 	/* Call the callbacks to free up the cookie */
1110 	if (cb) {
1111 		ast->callback = NULL;
1112 		ast->cookie = NULL;
1113 		cb(soc->ctrl_psoc,
1114 		   dp_soc_to_cdp_soc(soc),
1115 		   cookie,
1116 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
1117 	}
1118 }
1119 
1120 void dp_peer_ast_hash_detach(struct dp_soc *soc)
1121 {
1122 	unsigned int index;
1123 	struct dp_ast_entry *ast, *ast_next;
1124 
1125 	if (!soc->ast_hash.mask)
1126 		return;
1127 
1128 	if (!soc->ast_hash.bins)
1129 		return;
1130 
1131 	dp_peer_debug("%pK: num_ast_entries: %u", soc, soc->num_ast_entries);
1132 
1133 	qdf_spin_lock_bh(&soc->ast_lock);
1134 	for (index = 0; index <= soc->ast_hash.mask; index++) {
1135 		if (!TAILQ_EMPTY(&soc->ast_hash.bins[index])) {
1136 			TAILQ_FOREACH_SAFE(ast, &soc->ast_hash.bins[index],
1137 					   hash_list_elem, ast_next) {
1138 				TAILQ_REMOVE(&soc->ast_hash.bins[index], ast,
1139 					     hash_list_elem);
1140 				dp_peer_ast_cleanup(soc, ast);
1141 				soc->num_ast_entries--;
1142 				qdf_mem_free(ast);
1143 			}
1144 		}
1145 	}
1146 	qdf_spin_unlock_bh(&soc->ast_lock);
1147 
1148 	qdf_mem_free(soc->ast_hash.bins);
1149 	soc->ast_hash.bins = NULL;
1150 }
1151 
1152 /**
1153  * dp_peer_ast_hash_index() - Compute the AST hash from MAC address
1154  * @soc: SoC handle
1155  * @mac_addr: MAC address
1156  *
1157  * Return: AST hash
1158  */
1159 static inline uint32_t dp_peer_ast_hash_index(struct dp_soc *soc,
1160 					      union dp_align_mac_addr *mac_addr)
1161 {
1162 	uint32_t index;
1163 
1164 	index =
1165 		mac_addr->align2.bytes_ab ^
1166 		mac_addr->align2.bytes_cd ^
1167 		mac_addr->align2.bytes_ef;
1168 	index ^= index >> soc->ast_hash.idx_bits;
1169 	index &= soc->ast_hash.mask;
1170 	return index;
1171 }
1172 
1173 /**
1174  * dp_peer_ast_hash_add() - Add AST entry into hash table
1175  * @soc: SoC handle
1176  * @ase: AST entry
1177  *
1178  * This function adds the AST entry into SoC AST hash table
1179  * It assumes caller has taken the ast lock to protect the access to this table
1180  *
1181  * Return: None
1182  */
1183 static inline void dp_peer_ast_hash_add(struct dp_soc *soc,
1184 					struct dp_ast_entry *ase)
1185 {
1186 	uint32_t index;
1187 
1188 	index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
1189 	TAILQ_INSERT_TAIL(&soc->ast_hash.bins[index], ase, hash_list_elem);
1190 }
1191 
1192 void dp_peer_ast_hash_remove(struct dp_soc *soc,
1193 			     struct dp_ast_entry *ase)
1194 {
1195 	unsigned index;
1196 	struct dp_ast_entry *tmpase;
1197 	int found = 0;
1198 
1199 	if (soc->ast_offload_support && !soc->host_ast_db_enable)
1200 		return;
1201 
1202 	index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
1203 	/* Check if tail is not empty before delete*/
1204 	QDF_ASSERT(!TAILQ_EMPTY(&soc->ast_hash.bins[index]));
1205 
1206 	dp_peer_debug("ID: %u idx: %u mac_addr: " QDF_MAC_ADDR_FMT,
1207 		      ase->peer_id, index, QDF_MAC_ADDR_REF(ase->mac_addr.raw));
1208 
1209 	TAILQ_FOREACH(tmpase, &soc->ast_hash.bins[index], hash_list_elem) {
1210 		if (tmpase == ase) {
1211 			found = 1;
1212 			break;
1213 		}
1214 	}
1215 
1216 	QDF_ASSERT(found);
1217 
1218 	if (found)
1219 		TAILQ_REMOVE(&soc->ast_hash.bins[index], ase, hash_list_elem);
1220 }
1221 
1222 struct dp_ast_entry *dp_peer_ast_hash_find_by_vdevid(struct dp_soc *soc,
1223 						     uint8_t *ast_mac_addr,
1224 						     uint8_t vdev_id)
1225 {
1226 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
1227 	uint32_t index;
1228 	struct dp_ast_entry *ase;
1229 
1230 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
1231 		     ast_mac_addr, QDF_MAC_ADDR_SIZE);
1232 	mac_addr = &local_mac_addr_aligned;
1233 
1234 	index = dp_peer_ast_hash_index(soc, mac_addr);
1235 	TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
1236 		if ((vdev_id == ase->vdev_id) &&
1237 		    !dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr)) {
1238 			return ase;
1239 		}
1240 	}
1241 
1242 	return NULL;
1243 }
1244 
1245 struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
1246 						     uint8_t *ast_mac_addr,
1247 						     uint8_t pdev_id)
1248 {
1249 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
1250 	uint32_t index;
1251 	struct dp_ast_entry *ase;
1252 
1253 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
1254 		     ast_mac_addr, QDF_MAC_ADDR_SIZE);
1255 	mac_addr = &local_mac_addr_aligned;
1256 
1257 	index = dp_peer_ast_hash_index(soc, mac_addr);
1258 	TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
1259 		if ((pdev_id == ase->pdev_id) &&
1260 		    !dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr)) {
1261 			return ase;
1262 		}
1263 	}
1264 
1265 	return NULL;
1266 }
1267 
1268 struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
1269 					       uint8_t *ast_mac_addr)
1270 {
1271 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
1272 	unsigned index;
1273 	struct dp_ast_entry *ase;
1274 
1275 	if (!soc->ast_hash.bins)
1276 		return NULL;
1277 
1278 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
1279 			ast_mac_addr, QDF_MAC_ADDR_SIZE);
1280 	mac_addr = &local_mac_addr_aligned;
1281 
1282 	index = dp_peer_ast_hash_index(soc, mac_addr);
1283 	TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
1284 		if (dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr) == 0) {
1285 			return ase;
1286 		}
1287 	}
1288 
1289 	return NULL;
1290 }
1291 
1292 struct dp_ast_entry *dp_peer_ast_hash_find_soc_by_type(
1293 					struct dp_soc *soc,
1294 					uint8_t *ast_mac_addr,
1295 					enum cdp_txrx_ast_entry_type type)
1296 {
1297 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
1298 	unsigned index;
1299 	struct dp_ast_entry *ase;
1300 
1301 	if (!soc->ast_hash.bins)
1302 		return NULL;
1303 
1304 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
1305 			ast_mac_addr, QDF_MAC_ADDR_SIZE);
1306 	mac_addr = &local_mac_addr_aligned;
1307 
1308 	index = dp_peer_ast_hash_index(soc, mac_addr);
1309 	TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
1310 		if (dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr) == 0 &&
1311 		    ase->type == type) {
1312 			return ase;
1313 		}
1314 	}
1315 
1316 	return NULL;
1317 }
1318 
1319 /**
1320  * dp_peer_map_ipa_evt() - Send peer map event to IPA
1321  * @soc: SoC handle
1322  * @peer: peer to which ast node belongs
1323  * @ast_entry: AST entry
1324  * @mac_addr: MAC address of ast node
1325  *
1326  * Return: None
1327  */
1328 #if defined(IPA_OFFLOAD) && defined(QCA_IPA_LL_TX_FLOW_CONTROL)
1329 static inline
1330 void dp_peer_map_ipa_evt(struct dp_soc *soc, struct dp_peer *peer,
1331 			 struct dp_ast_entry *ast_entry, uint8_t *mac_addr)
1332 {
1333 	if (ast_entry || (peer->vdev && peer->vdev->proxysta_vdev)) {
1334 		if (soc->cdp_soc.ol_ops->peer_map_event) {
1335 			soc->cdp_soc.ol_ops->peer_map_event(
1336 			soc->ctrl_psoc, ast_entry->peer_id,
1337 			ast_entry->ast_idx, ast_entry->vdev_id,
1338 			mac_addr, ast_entry->type, ast_entry->ast_hash_value);
1339 		}
1340 	} else {
1341 		dp_peer_info("%pK: AST entry not found", soc);
1342 	}
1343 }
1344 
1345 /**
1346  * dp_peer_unmap_ipa_evt() - Send peer unmap event to IPA
1347  * @soc: SoC handle
1348  * @peer_id: Peerid
1349  * @vdev_id: Vdev id
1350  * @mac_addr: Peer mac address
1351  *
1352  * Return: None
1353  */
1354 static inline
1355 void dp_peer_unmap_ipa_evt(struct dp_soc *soc, uint16_t peer_id,
1356 			   uint8_t vdev_id, uint8_t *mac_addr)
1357 {
1358 	if (soc->cdp_soc.ol_ops->peer_unmap_event) {
1359 		soc->cdp_soc.ol_ops->peer_unmap_event(soc->ctrl_psoc,
1360 						      peer_id, vdev_id,
1361 						      mac_addr);
1362 	}
1363 }
1364 #else
1365 static inline
1366 void dp_peer_unmap_ipa_evt(struct dp_soc *soc, uint16_t peer_id,
1367 			   uint8_t vdev_id, uint8_t *mac_addr)
1368 {
1369 }
1370 
1371 static inline
1372 void dp_peer_map_ipa_evt(struct dp_soc *soc, struct dp_peer *peer,
1373 			 struct dp_ast_entry *ast_entry, uint8_t *mac_addr)
1374 {
1375 }
1376 #endif
1377 
1378 QDF_STATUS dp_peer_host_add_map_ast(struct dp_soc *soc, uint16_t peer_id,
1379 				    uint8_t *mac_addr, uint16_t hw_peer_id,
1380 				    uint8_t vdev_id, uint16_t ast_hash,
1381 				    uint8_t is_wds)
1382 {
1383 	struct dp_vdev *vdev;
1384 	struct dp_ast_entry *ast_entry;
1385 	enum cdp_txrx_ast_entry_type type;
1386 	struct dp_peer *peer;
1387 	struct dp_peer *old_peer;
1388 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1389 
1390 	if (is_wds)
1391 		type = CDP_TXRX_AST_TYPE_WDS;
1392 	else
1393 		type = CDP_TXRX_AST_TYPE_STATIC;
1394 
1395 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_HTT);
1396 	if (!peer) {
1397 		dp_peer_info("Peer not found soc:%pK: peer_id %d, peer_mac " QDF_MAC_ADDR_FMT ", vdev_id %d",
1398 			     soc, peer_id,
1399 			     QDF_MAC_ADDR_REF(mac_addr), vdev_id);
1400 		return QDF_STATUS_E_INVAL;
1401 	}
1402 
1403 	if (!is_wds && IS_MLO_DP_MLD_PEER(peer))
1404 		type = CDP_TXRX_AST_TYPE_MLD;
1405 
1406 	vdev = peer->vdev;
1407 	if (!vdev) {
1408 		dp_peer_err("%pK: Peers vdev is NULL", soc);
1409 		status = QDF_STATUS_E_INVAL;
1410 		goto fail;
1411 	}
1412 
1413 	if (!dp_peer_state_cmp(peer, DP_PEER_STATE_ACTIVE)) {
1414 		if (type != CDP_TXRX_AST_TYPE_STATIC &&
1415 		    type != CDP_TXRX_AST_TYPE_MLD &&
1416 		    type != CDP_TXRX_AST_TYPE_SELF) {
1417 			status = QDF_STATUS_E_BUSY;
1418 			goto fail;
1419 		}
1420 	}
1421 
1422 	dp_peer_debug("%pK: vdev: %u  ast_entry->type: %d peer_mac: " QDF_MAC_ADDR_FMT " peer: %pK mac " QDF_MAC_ADDR_FMT,
1423 		      soc, vdev->vdev_id, type,
1424 		      QDF_MAC_ADDR_REF(peer->mac_addr.raw), peer,
1425 		      QDF_MAC_ADDR_REF(mac_addr));
1426 
1427 	/*
1428 	 * In MLO scenario, there is possibility for same mac address
1429 	 * on both link mac address and MLD mac address.
1430 	 * Duplicate AST map needs to be handled for non-mld type.
1431 	 */
1432 	qdf_spin_lock_bh(&soc->ast_lock);
1433 	ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
1434 	if (ast_entry && type != CDP_TXRX_AST_TYPE_MLD) {
1435 		dp_peer_debug("AST present ID %d vid %d mac " QDF_MAC_ADDR_FMT,
1436 			      hw_peer_id, vdev_id,
1437 			      QDF_MAC_ADDR_REF(mac_addr));
1438 
1439 		old_peer = __dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
1440 						   DP_MOD_ID_AST);
1441 		if (!old_peer) {
1442 			dp_peer_info("Peer not found soc:%pK: peer_id %d, peer_mac " QDF_MAC_ADDR_FMT ", vdev_id %d",
1443 				     soc, ast_entry->peer_id,
1444 				     QDF_MAC_ADDR_REF(mac_addr), vdev_id);
1445 			qdf_spin_unlock_bh(&soc->ast_lock);
1446 			status = QDF_STATUS_E_INVAL;
1447 			goto fail;
1448 		}
1449 
1450 		dp_peer_unlink_ast_entry(soc, ast_entry, old_peer);
1451 		dp_peer_free_ast_entry(soc, ast_entry);
1452 		if (old_peer)
1453 			dp_peer_unref_delete(old_peer, DP_MOD_ID_AST);
1454 	}
1455 
1456 	ast_entry = (struct dp_ast_entry *)
1457 		qdf_mem_malloc(sizeof(struct dp_ast_entry));
1458 	if (!ast_entry) {
1459 		dp_peer_err("%pK: fail to allocate ast_entry", soc);
1460 		qdf_spin_unlock_bh(&soc->ast_lock);
1461 		QDF_ASSERT(0);
1462 		status = QDF_STATUS_E_NOMEM;
1463 		goto fail;
1464 	}
1465 
1466 	qdf_mem_copy(&ast_entry->mac_addr.raw[0], mac_addr, QDF_MAC_ADDR_SIZE);
1467 	ast_entry->pdev_id = vdev->pdev->pdev_id;
1468 	ast_entry->is_mapped = false;
1469 	ast_entry->delete_in_progress = false;
1470 	ast_entry->next_hop = 0;
1471 	ast_entry->vdev_id = vdev->vdev_id;
1472 	ast_entry->type = type;
1473 
1474 	switch (type) {
1475 	case CDP_TXRX_AST_TYPE_STATIC:
1476 		if (peer->vdev->opmode == wlan_op_mode_sta)
1477 			ast_entry->type = CDP_TXRX_AST_TYPE_STA_BSS;
1478 		break;
1479 	case CDP_TXRX_AST_TYPE_WDS:
1480 		ast_entry->next_hop = 1;
1481 		break;
1482 	case CDP_TXRX_AST_TYPE_MLD:
1483 		break;
1484 	default:
1485 		dp_peer_alert("%pK: Incorrect AST entry type", soc);
1486 	}
1487 
1488 	ast_entry->is_active = TRUE;
1489 	DP_STATS_INC(soc, ast.added, 1);
1490 	soc->num_ast_entries++;
1491 	dp_peer_ast_hash_add(soc, ast_entry);
1492 
1493 	ast_entry->ast_idx = hw_peer_id;
1494 	ast_entry->ast_hash_value = ast_hash;
1495 	ast_entry->peer_id = peer_id;
1496 	TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry,
1497 			  ase_list_elem);
1498 
1499 	dp_peer_map_ipa_evt(soc, peer, ast_entry, mac_addr);
1500 
1501 	qdf_spin_unlock_bh(&soc->ast_lock);
1502 fail:
1503 	dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
1504 
1505 	return status;
1506 }
1507 
1508 /**
1509  * dp_peer_map_ast() - Map the ast entry with HW AST Index
1510  * @soc: SoC handle
1511  * @peer: peer to which ast node belongs
1512  * @mac_addr: MAC address of ast node
1513  * @hw_peer_id: HW AST Index returned by target in peer map event
1514  * @vdev_id: vdev id for VAP to which the peer belongs to
1515  * @ast_hash: ast hash value in HW
1516  * @is_wds: flag to indicate peer map event for WDS ast entry
1517  *
1518  * Return: QDF_STATUS code
1519  */
1520 static inline QDF_STATUS dp_peer_map_ast(struct dp_soc *soc,
1521 					 struct dp_peer *peer,
1522 					 uint8_t *mac_addr,
1523 					 uint16_t hw_peer_id,
1524 					 uint8_t vdev_id,
1525 					 uint16_t ast_hash,
1526 					 uint8_t is_wds)
1527 {
1528 	struct dp_ast_entry *ast_entry = NULL;
1529 	enum cdp_txrx_ast_entry_type peer_type = CDP_TXRX_AST_TYPE_STATIC;
1530 	void *cookie = NULL;
1531 	txrx_ast_free_cb cb = NULL;
1532 	QDF_STATUS err = QDF_STATUS_SUCCESS;
1533 
1534 	if (soc->ast_offload_support && !wlan_cfg_get_dp_soc_dpdk_cfg(soc->ctrl_psoc))
1535 		return QDF_STATUS_SUCCESS;
1536 
1537 	dp_peer_err("%pK: peer %pK ID %d vid %d mac " QDF_MAC_ADDR_FMT,
1538 		    soc, peer, hw_peer_id, vdev_id,
1539 		    QDF_MAC_ADDR_REF(mac_addr));
1540 
1541 	qdf_spin_lock_bh(&soc->ast_lock);
1542 
1543 	ast_entry = dp_peer_ast_hash_find_by_vdevid(soc, mac_addr, vdev_id);
1544 
1545 	if (is_wds) {
1546 		/*
1547 		 * While processing peer map of AST entry if the next hop peer is
1548 		 * deleted free the AST entry as it is not attached to peer yet
1549 		 */
1550 		if (!peer) {
1551 			if (ast_entry)
1552 				dp_peer_free_ast_entry(soc, ast_entry);
1553 
1554 			qdf_spin_unlock_bh(&soc->ast_lock);
1555 
1556 			dp_peer_alert("Peer is NULL for WDS entry mac "
1557 				      QDF_MAC_ADDR_FMT " ",
1558 				      QDF_MAC_ADDR_REF(mac_addr));
1559 			return QDF_STATUS_E_INVAL;
1560 		}
1561 		/*
1562 		 * In certain cases like Auth attack on a repeater
1563 		 * can result in the number of ast_entries falling
1564 		 * in the same hash bucket to exceed the max_skid
1565 		 * length supported by HW in root AP. In these cases
1566 		 * the FW will return the hw_peer_id (ast_index) as
1567 		 * 0xffff indicating HW could not add the entry in
1568 		 * its table. Host has to delete the entry from its
1569 		 * table in these cases.
1570 		 */
1571 		if (hw_peer_id == HTT_INVALID_PEER) {
1572 			DP_STATS_INC(soc, ast.map_err, 1);
1573 			if (ast_entry) {
1574 				if (ast_entry->is_mapped) {
1575 					soc->ast_table[ast_entry->ast_idx] =
1576 						NULL;
1577 				}
1578 
1579 				cb = ast_entry->callback;
1580 				cookie = ast_entry->cookie;
1581 				peer_type = ast_entry->type;
1582 
1583 				dp_peer_unlink_ast_entry(soc, ast_entry, peer);
1584 				dp_peer_free_ast_entry(soc, ast_entry);
1585 
1586 				qdf_spin_unlock_bh(&soc->ast_lock);
1587 
1588 				if (cb) {
1589 					cb(soc->ctrl_psoc,
1590 					   dp_soc_to_cdp_soc(soc),
1591 					   cookie,
1592 					   CDP_TXRX_AST_DELETED);
1593 				}
1594 			} else {
1595 				qdf_spin_unlock_bh(&soc->ast_lock);
1596 				dp_peer_alert("AST entry not found with peer %pK peer_id %u peer_mac " QDF_MAC_ADDR_FMT " mac_addr " QDF_MAC_ADDR_FMT " vdev_id %u next_hop %u",
1597 					      peer, peer->peer_id,
1598 					      QDF_MAC_ADDR_REF(peer->mac_addr.raw),
1599 					      QDF_MAC_ADDR_REF(mac_addr),
1600 					      vdev_id, is_wds);
1601 			}
1602 			err = QDF_STATUS_E_INVAL;
1603 
1604 			dp_hmwds_ast_add_notify(peer, mac_addr,
1605 						peer_type, err, true);
1606 
1607 			return err;
1608 		}
1609 	}
1610 
1611 	if (!peer) {
1612 		qdf_spin_unlock_bh(&soc->ast_lock);
1613 		dp_peer_alert("Peer is NULL for mac " QDF_MAC_ADDR_FMT " ",
1614 			      QDF_MAC_ADDR_REF(mac_addr));
1615 		return QDF_STATUS_E_INVAL;
1616 	}
1617 
1618 	if (ast_entry) {
1619 		ast_entry->ast_idx = hw_peer_id;
1620 		soc->ast_table[hw_peer_id] = ast_entry;
1621 		ast_entry->is_active = TRUE;
1622 		peer_type = ast_entry->type;
1623 		ast_entry->ast_hash_value = ast_hash;
1624 		ast_entry->is_mapped = TRUE;
1625 		qdf_assert_always(ast_entry->peer_id == HTT_INVALID_PEER);
1626 
1627 		ast_entry->peer_id = peer->peer_id;
1628 		TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry,
1629 				  ase_list_elem);
1630 	}
1631 
1632 	if (ast_entry || (peer->vdev && peer->vdev->proxysta_vdev) ||
1633 	    wlan_cfg_get_dp_soc_dpdk_cfg(soc->ctrl_psoc)) {
1634 		if (soc->cdp_soc.ol_ops->peer_map_event) {
1635 			soc->cdp_soc.ol_ops->peer_map_event(
1636 			soc->ctrl_psoc, peer->peer_id,
1637 			hw_peer_id, vdev_id,
1638 			mac_addr, peer_type, ast_hash);
1639 		}
1640 	} else {
1641 		dp_peer_err("%pK: AST entry not found", soc);
1642 		err = QDF_STATUS_E_NOENT;
1643 	}
1644 
1645 	qdf_spin_unlock_bh(&soc->ast_lock);
1646 
1647 	dp_hmwds_ast_add_notify(peer, mac_addr,
1648 				peer_type, err, true);
1649 
1650 	return err;
1651 }
1652 
1653 void dp_peer_free_hmwds_cb(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
1654 			   struct cdp_soc *dp_soc,
1655 			   void *cookie,
1656 			   enum cdp_ast_free_status status)
1657 {
1658 	struct dp_ast_free_cb_params *param =
1659 		(struct dp_ast_free_cb_params *)cookie;
1660 	struct dp_soc *soc = (struct dp_soc *)dp_soc;
1661 	struct dp_peer *peer = NULL;
1662 	QDF_STATUS err = QDF_STATUS_SUCCESS;
1663 
1664 	if (status != CDP_TXRX_AST_DELETED) {
1665 		qdf_mem_free(cookie);
1666 		return;
1667 	}
1668 
1669 	peer = dp_peer_find_hash_find(soc, &param->peer_mac_addr.raw[0],
1670 				      0, param->vdev_id, DP_MOD_ID_AST);
1671 	if (peer) {
1672 		err = dp_peer_add_ast(soc, peer,
1673 				      &param->mac_addr.raw[0],
1674 				      param->type,
1675 				      param->flags);
1676 
1677 		dp_hmwds_ast_add_notify(peer, &param->mac_addr.raw[0],
1678 					param->type, err, false);
1679 
1680 		dp_peer_unref_delete(peer, DP_MOD_ID_AST);
1681 	}
1682 	qdf_mem_free(cookie);
1683 }
1684 
1685 QDF_STATUS dp_peer_add_ast(struct dp_soc *soc,
1686 			   struct dp_peer *peer,
1687 			   uint8_t *mac_addr,
1688 			   enum cdp_txrx_ast_entry_type type,
1689 			   uint32_t flags)
1690 {
1691 	struct dp_ast_entry *ast_entry = NULL;
1692 	struct dp_vdev *vdev = NULL;
1693 	struct dp_pdev *pdev = NULL;
1694 	txrx_ast_free_cb cb = NULL;
1695 	void *cookie = NULL;
1696 	struct dp_peer *vap_bss_peer = NULL;
1697 	bool is_peer_found = false;
1698 	int status = 0;
1699 
1700 	if (soc->ast_offload_support)
1701 		return QDF_STATUS_E_INVAL;
1702 
1703 	vdev = peer->vdev;
1704 	if (!vdev) {
1705 		dp_peer_err("%pK: Peers vdev is NULL", soc);
1706 		QDF_ASSERT(0);
1707 		return QDF_STATUS_E_INVAL;
1708 	}
1709 
1710 	pdev = vdev->pdev;
1711 
1712 	is_peer_found = dp_peer_exist_on_pdev(soc, mac_addr, 0, pdev);
1713 
1714 	qdf_spin_lock_bh(&soc->ast_lock);
1715 
1716 	if (!dp_peer_state_cmp(peer, DP_PEER_STATE_ACTIVE)) {
1717 		if ((type != CDP_TXRX_AST_TYPE_STATIC) &&
1718 		    (type != CDP_TXRX_AST_TYPE_SELF)) {
1719 			qdf_spin_unlock_bh(&soc->ast_lock);
1720 			return QDF_STATUS_E_BUSY;
1721 		}
1722 	}
1723 
1724 	dp_peer_debug("%pK: pdevid: %u vdev: %u  ast_entry->type: %d flags: 0x%x peer_mac: " QDF_MAC_ADDR_FMT " peer: %pK mac " QDF_MAC_ADDR_FMT,
1725 		      soc, pdev->pdev_id, vdev->vdev_id, type, flags,
1726 		      QDF_MAC_ADDR_REF(peer->mac_addr.raw), peer,
1727 		      QDF_MAC_ADDR_REF(mac_addr));
1728 
1729 	/* fw supports only 2 times the max_peers ast entries */
1730 	if (soc->num_ast_entries >=
1731 	    wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx)) {
1732 		qdf_spin_unlock_bh(&soc->ast_lock);
1733 		dp_peer_err("%pK: Max ast entries reached", soc);
1734 		return QDF_STATUS_E_RESOURCES;
1735 	}
1736 
1737 	/* If AST entry already exists , just return from here
1738 	 * ast entry with same mac address can exist on different radios
1739 	 * if ast_override support is enabled use search by pdev in this
1740 	 * case
1741 	 */
1742 	if (soc->ast_override_support) {
1743 		ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, mac_addr,
1744 							    pdev->pdev_id);
1745 		if (ast_entry) {
1746 			qdf_spin_unlock_bh(&soc->ast_lock);
1747 			return QDF_STATUS_E_ALREADY;
1748 		}
1749 
1750 		if (is_peer_found) {
1751 			/* During WDS to static roaming, peer is added
1752 			 * to the list before static AST entry create.
1753 			 * So, allow AST entry for STATIC type
1754 			 * even if peer is present
1755 			 */
1756 			if (type != CDP_TXRX_AST_TYPE_STATIC) {
1757 				qdf_spin_unlock_bh(&soc->ast_lock);
1758 				return QDF_STATUS_E_ALREADY;
1759 			}
1760 		}
1761 	} else {
1762 		/* For HWMWDS_SEC entries can be added for same mac address
1763 		 * do not check for existing entry
1764 		 */
1765 		if (type == CDP_TXRX_AST_TYPE_WDS_HM_SEC)
1766 			goto add_ast_entry;
1767 
1768 		ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
1769 
1770 		if (ast_entry) {
1771 			if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) &&
1772 			    !ast_entry->delete_in_progress) {
1773 				qdf_spin_unlock_bh(&soc->ast_lock);
1774 				return QDF_STATUS_E_ALREADY;
1775 			}
1776 
1777 			/* Add for HMWDS entry we cannot be ignored if there
1778 			 * is AST entry with same mac address
1779 			 *
1780 			 * if ast entry exists with the requested mac address
1781 			 * send a delete command and register callback which
1782 			 * can take care of adding HMWDS ast entry on delete
1783 			 * confirmation from target
1784 			 */
1785 			if (type == CDP_TXRX_AST_TYPE_WDS_HM) {
1786 				struct dp_ast_free_cb_params *param = NULL;
1787 
1788 				if (ast_entry->type ==
1789 					CDP_TXRX_AST_TYPE_WDS_HM_SEC)
1790 					goto add_ast_entry;
1791 
1792 				/* save existing callback */
1793 				if (ast_entry->callback) {
1794 					cb = ast_entry->callback;
1795 					cookie = ast_entry->cookie;
1796 				}
1797 
1798 				param = qdf_mem_malloc(sizeof(*param));
1799 				if (!param) {
1800 					QDF_TRACE(QDF_MODULE_ID_TXRX,
1801 						  QDF_TRACE_LEVEL_ERROR,
1802 						  "Allocation failed");
1803 					qdf_spin_unlock_bh(&soc->ast_lock);
1804 					return QDF_STATUS_E_NOMEM;
1805 				}
1806 
1807 				qdf_mem_copy(&param->mac_addr.raw[0], mac_addr,
1808 					     QDF_MAC_ADDR_SIZE);
1809 				qdf_mem_copy(&param->peer_mac_addr.raw[0],
1810 					     &peer->mac_addr.raw[0],
1811 					     QDF_MAC_ADDR_SIZE);
1812 				param->type = type;
1813 				param->flags = flags;
1814 				param->vdev_id = vdev->vdev_id;
1815 				ast_entry->callback = dp_peer_free_hmwds_cb;
1816 				ast_entry->pdev_id = vdev->pdev->pdev_id;
1817 				ast_entry->type = type;
1818 				ast_entry->cookie = (void *)param;
1819 				if (!ast_entry->delete_in_progress)
1820 					dp_peer_del_ast(soc, ast_entry);
1821 
1822 				qdf_spin_unlock_bh(&soc->ast_lock);
1823 
1824 				/* Call the saved callback*/
1825 				if (cb) {
1826 					cb(soc->ctrl_psoc,
1827 					   dp_soc_to_cdp_soc(soc),
1828 					   cookie,
1829 					   CDP_TXRX_AST_DELETE_IN_PROGRESS);
1830 				}
1831 				return QDF_STATUS_E_AGAIN;
1832 			}
1833 
1834 			qdf_spin_unlock_bh(&soc->ast_lock);
1835 			return QDF_STATUS_E_ALREADY;
1836 		}
1837 	}
1838 
1839 add_ast_entry:
1840 	ast_entry = (struct dp_ast_entry *)
1841 			qdf_mem_malloc(sizeof(struct dp_ast_entry));
1842 
1843 	if (!ast_entry) {
1844 		qdf_spin_unlock_bh(&soc->ast_lock);
1845 		dp_peer_err("%pK: fail to allocate ast_entry", soc);
1846 		QDF_ASSERT(0);
1847 		return QDF_STATUS_E_NOMEM;
1848 	}
1849 
1850 	qdf_mem_copy(&ast_entry->mac_addr.raw[0], mac_addr, QDF_MAC_ADDR_SIZE);
1851 	ast_entry->pdev_id = vdev->pdev->pdev_id;
1852 	ast_entry->is_mapped = false;
1853 	ast_entry->delete_in_progress = false;
1854 	ast_entry->peer_id = HTT_INVALID_PEER;
1855 	ast_entry->next_hop = 0;
1856 	ast_entry->vdev_id = vdev->vdev_id;
1857 
1858 	switch (type) {
1859 	case CDP_TXRX_AST_TYPE_STATIC:
1860 		peer->self_ast_entry = ast_entry;
1861 		ast_entry->type = CDP_TXRX_AST_TYPE_STATIC;
1862 		if (peer->vdev->opmode == wlan_op_mode_sta)
1863 			ast_entry->type = CDP_TXRX_AST_TYPE_STA_BSS;
1864 		break;
1865 	case CDP_TXRX_AST_TYPE_SELF:
1866 		peer->self_ast_entry = ast_entry;
1867 		ast_entry->type = CDP_TXRX_AST_TYPE_SELF;
1868 		break;
1869 	case CDP_TXRX_AST_TYPE_WDS:
1870 		ast_entry->next_hop = 1;
1871 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
1872 		break;
1873 	case CDP_TXRX_AST_TYPE_WDS_HM:
1874 		ast_entry->next_hop = 1;
1875 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM;
1876 		break;
1877 	case CDP_TXRX_AST_TYPE_WDS_HM_SEC:
1878 		ast_entry->next_hop = 1;
1879 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM_SEC;
1880 		ast_entry->peer_id = peer->peer_id;
1881 		TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry,
1882 				  ase_list_elem);
1883 		break;
1884 	case CDP_TXRX_AST_TYPE_DA:
1885 		vap_bss_peer = dp_vdev_bss_peer_ref_n_get(soc, vdev,
1886 							  DP_MOD_ID_AST);
1887 		if (!vap_bss_peer) {
1888 			qdf_spin_unlock_bh(&soc->ast_lock);
1889 			qdf_mem_free(ast_entry);
1890 			return QDF_STATUS_E_FAILURE;
1891 		}
1892 		peer = vap_bss_peer;
1893 		ast_entry->next_hop = 1;
1894 		ast_entry->type = CDP_TXRX_AST_TYPE_DA;
1895 		break;
1896 	default:
1897 		dp_peer_err("%pK: Incorrect AST entry type", soc);
1898 	}
1899 
1900 	ast_entry->is_active = TRUE;
1901 	DP_STATS_INC(soc, ast.added, 1);
1902 	soc->num_ast_entries++;
1903 	dp_peer_ast_hash_add(soc, ast_entry);
1904 
1905 	if ((ast_entry->type != CDP_TXRX_AST_TYPE_STATIC) &&
1906 	    (ast_entry->type != CDP_TXRX_AST_TYPE_SELF) &&
1907 	    (ast_entry->type != CDP_TXRX_AST_TYPE_STA_BSS) &&
1908 	    (ast_entry->type != CDP_TXRX_AST_TYPE_WDS_HM_SEC))
1909 		status = dp_add_wds_entry_wrapper(soc,
1910 						  peer,
1911 						  mac_addr,
1912 						  flags,
1913 						  ast_entry->type);
1914 
1915 	if (vap_bss_peer)
1916 		dp_peer_unref_delete(vap_bss_peer, DP_MOD_ID_AST);
1917 
1918 	qdf_spin_unlock_bh(&soc->ast_lock);
1919 	return qdf_status_from_os_return(status);
1920 }
1921 
1922 qdf_export_symbol(dp_peer_add_ast);
1923 
1924 void dp_peer_free_ast_entry(struct dp_soc *soc,
1925 			    struct dp_ast_entry *ast_entry)
1926 {
1927 	/*
1928 	 * NOTE: Ensure that call to this API is done
1929 	 * after soc->ast_lock is taken
1930 	 */
1931 	dp_peer_debug("type: %d ID: %u vid: %u mac_addr: " QDF_MAC_ADDR_FMT,
1932 		      ast_entry->type, ast_entry->peer_id, ast_entry->vdev_id,
1933 		      QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw));
1934 
1935 	ast_entry->callback = NULL;
1936 	ast_entry->cookie = NULL;
1937 
1938 	DP_STATS_INC(soc, ast.deleted, 1);
1939 	dp_peer_ast_hash_remove(soc, ast_entry);
1940 	dp_peer_ast_cleanup(soc, ast_entry);
1941 	qdf_mem_free(ast_entry);
1942 	soc->num_ast_entries--;
1943 }
1944 
1945 void dp_peer_unlink_ast_entry(struct dp_soc *soc,
1946 			      struct dp_ast_entry *ast_entry,
1947 			      struct dp_peer *peer)
1948 {
1949 	if (!peer) {
1950 		dp_info_rl("NULL peer");
1951 		return;
1952 	}
1953 
1954 	if (ast_entry->peer_id == HTT_INVALID_PEER) {
1955 		dp_info_rl("Invalid peer id in AST entry mac addr:"QDF_MAC_ADDR_FMT" type:%d",
1956 			  QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw),
1957 			  ast_entry->type);
1958 		return;
1959 	}
1960 	/*
1961 	 * NOTE: Ensure that call to this API is done
1962 	 * after soc->ast_lock is taken
1963 	 */
1964 
1965 	qdf_assert_always(ast_entry->peer_id == peer->peer_id);
1966 	TAILQ_REMOVE(&peer->ast_entry_list, ast_entry, ase_list_elem);
1967 
1968 	if (ast_entry == peer->self_ast_entry)
1969 		peer->self_ast_entry = NULL;
1970 
1971 	/*
1972 	 * release the reference only if it is mapped
1973 	 * to ast_table
1974 	 */
1975 	if (ast_entry->is_mapped)
1976 		soc->ast_table[ast_entry->ast_idx] = NULL;
1977 
1978 	ast_entry->peer_id = HTT_INVALID_PEER;
1979 }
1980 
1981 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
1982 {
1983 	struct dp_peer *peer = NULL;
1984 
1985 	if (soc->ast_offload_support)
1986 		return;
1987 
1988 	if (!ast_entry) {
1989 		dp_info_rl("NULL AST entry");
1990 		return;
1991 	}
1992 
1993 	if (ast_entry->delete_in_progress) {
1994 		dp_info_rl("AST entry deletion in progress mac addr:"QDF_MAC_ADDR_FMT" type:%d",
1995 			  QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw),
1996 			  ast_entry->type);
1997 		return;
1998 	}
1999 
2000 	dp_peer_debug("call by %ps: ID: %u vid: %u mac_addr: " QDF_MAC_ADDR_FMT,
2001 		      (void *)_RET_IP_, ast_entry->peer_id, ast_entry->vdev_id,
2002 		      QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw));
2003 
2004 	ast_entry->delete_in_progress = true;
2005 
2006 	/* In teardown del ast is called after setting logical delete state
2007 	 * use __dp_peer_get_ref_by_id to get the reference irrespective of
2008 	 * state
2009 	 */
2010 	peer = __dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
2011 				       DP_MOD_ID_AST);
2012 
2013 	dp_peer_ast_send_wds_del(soc, ast_entry, peer);
2014 
2015 	/* Remove SELF and STATIC entries in teardown itself */
2016 	if (!ast_entry->next_hop)
2017 		dp_peer_unlink_ast_entry(soc, ast_entry, peer);
2018 
2019 	if (ast_entry->is_mapped)
2020 		soc->ast_table[ast_entry->ast_idx] = NULL;
2021 
2022 	/* if peer map v2 is enabled we are not freeing ast entry
2023 	 * here and it is supposed to be freed in unmap event (after
2024 	 * we receive delete confirmation from target)
2025 	 *
2026 	 * if peer_id is invalid we did not get the peer map event
2027 	 * for the peer free ast entry from here only in this case
2028 	 */
2029 	if (dp_peer_ast_free_in_unmap_supported(soc, ast_entry))
2030 		goto end;
2031 
2032 	/* for WDS secondary entry ast_entry->next_hop would be set so
2033 	 * unlinking has to be done explicitly here.
2034 	 * As this entry is not a mapped entry unmap notification from
2035 	 * FW will not come. Hence unlinkling is done right here.
2036 	 */
2037 
2038 	if (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC)
2039 		dp_peer_unlink_ast_entry(soc, ast_entry, peer);
2040 
2041 	dp_peer_free_ast_entry(soc, ast_entry);
2042 
2043 end:
2044 	if (peer)
2045 		dp_peer_unref_delete(peer, DP_MOD_ID_AST);
2046 }
2047 
2048 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
2049 		       struct dp_ast_entry *ast_entry, uint32_t flags)
2050 {
2051 	int ret = -1;
2052 	struct dp_peer *old_peer;
2053 
2054 	if (soc->ast_offload_support)
2055 		return QDF_STATUS_E_INVAL;
2056 
2057 	dp_peer_debug("%pK: ast_entry->type: %d pdevid: %u vdevid: %u flags: 0x%x mac_addr: " QDF_MAC_ADDR_FMT " peer_mac: " QDF_MAC_ADDR_FMT "\n",
2058 		      soc, ast_entry->type, peer->vdev->pdev->pdev_id,
2059 		      peer->vdev->vdev_id, flags,
2060 		      QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw),
2061 		      QDF_MAC_ADDR_REF(peer->mac_addr.raw));
2062 
2063 	/* Do not send AST update in below cases
2064 	 *  1) Ast entry delete has already triggered
2065 	 *  2) Peer delete is already triggered
2066 	 *  3) We did not get the HTT map for create event
2067 	 */
2068 	if (ast_entry->delete_in_progress ||
2069 	    !dp_peer_state_cmp(peer, DP_PEER_STATE_ACTIVE) ||
2070 	    !ast_entry->is_mapped)
2071 		return ret;
2072 
2073 	if ((ast_entry->type == CDP_TXRX_AST_TYPE_STATIC) ||
2074 	    (ast_entry->type == CDP_TXRX_AST_TYPE_SELF) ||
2075 	    (ast_entry->type == CDP_TXRX_AST_TYPE_STA_BSS) ||
2076 	    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
2077 		return 0;
2078 
2079 	/*
2080 	 * Avoids flood of WMI update messages sent to FW for same peer.
2081 	 */
2082 	if (qdf_unlikely(ast_entry->peer_id == peer->peer_id) &&
2083 	    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS) &&
2084 	    (ast_entry->vdev_id == peer->vdev->vdev_id) &&
2085 	    (ast_entry->is_active))
2086 		return 0;
2087 
2088 	old_peer = dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
2089 					 DP_MOD_ID_AST);
2090 	if (!old_peer)
2091 		return 0;
2092 
2093 	TAILQ_REMOVE(&old_peer->ast_entry_list, ast_entry, ase_list_elem);
2094 
2095 	dp_peer_unref_delete(old_peer, DP_MOD_ID_AST);
2096 
2097 	ast_entry->peer_id = peer->peer_id;
2098 	ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
2099 	ast_entry->pdev_id = peer->vdev->pdev->pdev_id;
2100 	ast_entry->vdev_id = peer->vdev->vdev_id;
2101 	ast_entry->is_active = TRUE;
2102 	TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem);
2103 
2104 	ret = dp_update_wds_entry_wrapper(soc,
2105 					  peer,
2106 					  ast_entry->mac_addr.raw,
2107 					  flags);
2108 
2109 	return ret;
2110 }
2111 
2112 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
2113 				struct dp_ast_entry *ast_entry)
2114 {
2115 	return ast_entry->pdev_id;
2116 }
2117 
2118 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
2119 				struct dp_ast_entry *ast_entry)
2120 {
2121 	return ast_entry->next_hop;
2122 }
2123 
2124 void dp_peer_ast_set_type(struct dp_soc *soc,
2125 				struct dp_ast_entry *ast_entry,
2126 				enum cdp_txrx_ast_entry_type type)
2127 {
2128 	ast_entry->type = type;
2129 }
2130 
2131 void dp_peer_ast_send_wds_del(struct dp_soc *soc,
2132 			      struct dp_ast_entry *ast_entry,
2133 			      struct dp_peer *peer)
2134 {
2135 	bool delete_in_fw = false;
2136 
2137 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_TRACE,
2138 		  "%s: ast_entry->type: %d pdevid: %u vdev: %u mac_addr: "QDF_MAC_ADDR_FMT" next_hop: %u peer_id: %uM\n",
2139 		  __func__, ast_entry->type, ast_entry->pdev_id,
2140 		  ast_entry->vdev_id,
2141 		  QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw),
2142 		  ast_entry->next_hop, ast_entry->peer_id);
2143 
2144 	/*
2145 	 * If peer state is logical delete, the peer is about to get
2146 	 * teared down with a peer delete command to firmware,
2147 	 * which will cleanup all the wds ast entries.
2148 	 * So, no need to send explicit wds ast delete to firmware.
2149 	 */
2150 	if (ast_entry->next_hop) {
2151 		if (peer && dp_peer_state_cmp(peer,
2152 					      DP_PEER_STATE_LOGICAL_DELETE))
2153 			delete_in_fw = false;
2154 		else
2155 			delete_in_fw = true;
2156 
2157 		dp_del_wds_entry_wrapper(soc,
2158 					 ast_entry->vdev_id,
2159 					 ast_entry->mac_addr.raw,
2160 					 ast_entry->type,
2161 					 delete_in_fw);
2162 	}
2163 }
2164 #else
2165 void dp_peer_free_ast_entry(struct dp_soc *soc,
2166 			    struct dp_ast_entry *ast_entry)
2167 {
2168 }
2169 
2170 void dp_peer_unlink_ast_entry(struct dp_soc *soc,
2171 			      struct dp_ast_entry *ast_entry,
2172 			      struct dp_peer *peer)
2173 {
2174 }
2175 
2176 void dp_peer_ast_hash_remove(struct dp_soc *soc,
2177 			     struct dp_ast_entry *ase)
2178 {
2179 }
2180 
2181 struct dp_ast_entry *dp_peer_ast_hash_find_by_vdevid(struct dp_soc *soc,
2182 						     uint8_t *ast_mac_addr,
2183 						     uint8_t vdev_id)
2184 {
2185 	return NULL;
2186 }
2187 
2188 QDF_STATUS dp_peer_add_ast(struct dp_soc *soc,
2189 			   struct dp_peer *peer,
2190 			   uint8_t *mac_addr,
2191 			   enum cdp_txrx_ast_entry_type type,
2192 			   uint32_t flags)
2193 {
2194 	return QDF_STATUS_E_FAILURE;
2195 }
2196 
2197 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
2198 {
2199 }
2200 
2201 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
2202 			struct dp_ast_entry *ast_entry, uint32_t flags)
2203 {
2204 	return 1;
2205 }
2206 
2207 struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
2208 					       uint8_t *ast_mac_addr)
2209 {
2210 	return NULL;
2211 }
2212 
2213 struct dp_ast_entry *dp_peer_ast_hash_find_soc_by_type(
2214 					struct dp_soc *soc,
2215 					uint8_t *ast_mac_addr,
2216 					enum cdp_txrx_ast_entry_type type)
2217 {
2218 	return NULL;
2219 }
2220 
2221 static inline
2222 QDF_STATUS dp_peer_host_add_map_ast(struct dp_soc *soc, uint16_t peer_id,
2223 				    uint8_t *mac_addr, uint16_t hw_peer_id,
2224 				    uint8_t vdev_id, uint16_t ast_hash,
2225 				    uint8_t is_wds)
2226 {
2227 	return QDF_STATUS_SUCCESS;
2228 }
2229 
2230 struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
2231 						     uint8_t *ast_mac_addr,
2232 						     uint8_t pdev_id)
2233 {
2234 	return NULL;
2235 }
2236 
2237 QDF_STATUS dp_peer_ast_hash_attach(struct dp_soc *soc)
2238 {
2239 	return QDF_STATUS_SUCCESS;
2240 }
2241 
2242 static inline QDF_STATUS dp_peer_map_ast(struct dp_soc *soc,
2243 					 struct dp_peer *peer,
2244 					 uint8_t *mac_addr,
2245 					 uint16_t hw_peer_id,
2246 					 uint8_t vdev_id,
2247 					 uint16_t ast_hash,
2248 					 uint8_t is_wds)
2249 {
2250 	return QDF_STATUS_SUCCESS;
2251 }
2252 
2253 void dp_peer_ast_hash_detach(struct dp_soc *soc)
2254 {
2255 }
2256 
2257 void dp_peer_ast_set_type(struct dp_soc *soc,
2258 				struct dp_ast_entry *ast_entry,
2259 				enum cdp_txrx_ast_entry_type type)
2260 {
2261 }
2262 
2263 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
2264 				struct dp_ast_entry *ast_entry)
2265 {
2266 	return 0xff;
2267 }
2268 
2269 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
2270 				 struct dp_ast_entry *ast_entry)
2271 {
2272 	return 0xff;
2273 }
2274 
2275 void dp_peer_ast_send_wds_del(struct dp_soc *soc,
2276 			      struct dp_ast_entry *ast_entry,
2277 			      struct dp_peer *peer)
2278 {
2279 }
2280 
2281 static inline
2282 void dp_peer_unmap_ipa_evt(struct dp_soc *soc, uint16_t peer_id,
2283 			   uint8_t vdev_id, uint8_t *mac_addr)
2284 {
2285 }
2286 #endif
2287 
2288 #ifdef WLAN_FEATURE_MULTI_AST_DEL
2289 void dp_peer_ast_send_multi_wds_del(
2290 		struct dp_soc *soc, uint8_t vdev_id,
2291 		struct peer_del_multi_wds_entries *wds_list)
2292 {
2293 	struct cdp_soc_t *cdp_soc = &soc->cdp_soc;
2294 
2295 	if (cdp_soc && cdp_soc->ol_ops &&
2296 	    cdp_soc->ol_ops->peer_del_multi_wds_entry)
2297 		cdp_soc->ol_ops->peer_del_multi_wds_entry(soc->ctrl_psoc,
2298 							  vdev_id, wds_list);
2299 }
2300 #endif
2301 
2302 #ifdef FEATURE_WDS
2303 /**
2304  * dp_peer_ast_free_wds_entries() - Free wds ast entries associated with peer
2305  * @soc: soc handle
2306  * @peer: peer handle
2307  *
2308  * Free all the wds ast entries associated with peer
2309  *
2310  * Return: Number of wds ast entries freed
2311  */
2312 static uint32_t dp_peer_ast_free_wds_entries(struct dp_soc *soc,
2313 					     struct dp_peer *peer)
2314 {
2315 	TAILQ_HEAD(, dp_ast_entry) ast_local_list = {0};
2316 	struct dp_ast_entry *ast_entry, *temp_ast_entry;
2317 	uint32_t num_ast = 0;
2318 
2319 	TAILQ_INIT(&ast_local_list);
2320 	qdf_spin_lock_bh(&soc->ast_lock);
2321 
2322 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry) {
2323 		if (ast_entry->next_hop)
2324 			num_ast++;
2325 
2326 		if (ast_entry->is_mapped)
2327 			soc->ast_table[ast_entry->ast_idx] = NULL;
2328 
2329 		dp_peer_unlink_ast_entry(soc, ast_entry, peer);
2330 		DP_STATS_INC(soc, ast.deleted, 1);
2331 		dp_peer_ast_hash_remove(soc, ast_entry);
2332 		TAILQ_INSERT_TAIL(&ast_local_list, ast_entry,
2333 				  ase_list_elem);
2334 		soc->num_ast_entries--;
2335 	}
2336 
2337 	qdf_spin_unlock_bh(&soc->ast_lock);
2338 
2339 	TAILQ_FOREACH_SAFE(ast_entry, &ast_local_list, ase_list_elem,
2340 			   temp_ast_entry) {
2341 		if (ast_entry->callback)
2342 			ast_entry->callback(soc->ctrl_psoc,
2343 					    dp_soc_to_cdp_soc(soc),
2344 					    ast_entry->cookie,
2345 					    CDP_TXRX_AST_DELETED);
2346 
2347 		qdf_mem_free(ast_entry);
2348 	}
2349 
2350 	return num_ast;
2351 }
2352 /**
2353  * dp_peer_clean_wds_entries() - Clean wds ast entries and compare
2354  * @soc: soc handle
2355  * @peer: peer handle
2356  * @free_wds_count: number of wds entries freed by FW with peer delete
2357  *
2358  * Free all the wds ast entries associated with peer and compare with
2359  * the value received from firmware
2360  *
2361  * Return: Number of wds ast entries freed
2362  */
2363 static void
2364 dp_peer_clean_wds_entries(struct dp_soc *soc, struct dp_peer *peer,
2365 			  uint32_t free_wds_count)
2366 {
2367 	uint32_t wds_deleted = 0;
2368 	bool ast_ind_disable;
2369 
2370 	if (soc->ast_offload_support && !soc->host_ast_db_enable)
2371 		return;
2372 
2373 	ast_ind_disable = wlan_cfg_get_ast_indication_disable
2374 		(soc->wlan_cfg_ctx);
2375 
2376 	wds_deleted = dp_peer_ast_free_wds_entries(soc, peer);
2377 	if ((DP_PEER_WDS_COUNT_INVALID != free_wds_count) &&
2378 	    (free_wds_count != wds_deleted) && !ast_ind_disable) {
2379 		DP_STATS_INC(soc, ast.ast_mismatch, 1);
2380 		dp_alert("For peer %pK (mac: "QDF_MAC_ADDR_FMT")number of wds entries deleted by fw = %d during peer delete is not same as the numbers deleted by host = %d",
2381 			 peer, peer->mac_addr.raw, free_wds_count,
2382 			 wds_deleted);
2383 	}
2384 }
2385 
2386 #else
2387 static void
2388 dp_peer_clean_wds_entries(struct dp_soc *soc, struct dp_peer *peer,
2389 			  uint32_t free_wds_count)
2390 {
2391 	struct dp_ast_entry *ast_entry, *temp_ast_entry;
2392 
2393 	qdf_spin_lock_bh(&soc->ast_lock);
2394 
2395 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry) {
2396 		dp_peer_unlink_ast_entry(soc, ast_entry, peer);
2397 
2398 		if (ast_entry->is_mapped)
2399 			soc->ast_table[ast_entry->ast_idx] = NULL;
2400 
2401 		dp_peer_free_ast_entry(soc, ast_entry);
2402 	}
2403 
2404 	peer->self_ast_entry = NULL;
2405 	qdf_spin_unlock_bh(&soc->ast_lock);
2406 }
2407 #endif
2408 
2409 /**
2410  * dp_peer_ast_free_entry_by_mac() - find ast entry by MAC address and delete
2411  * @soc: soc handle
2412  * @peer: peer handle
2413  * @vdev_id: vdev_id
2414  * @mac_addr: mac address of the AST entry to searc and delete
2415  *
2416  * find the ast entry from the peer list using the mac address and free
2417  * the entry.
2418  *
2419  * Return: SUCCESS or NOENT
2420  */
2421 static int dp_peer_ast_free_entry_by_mac(struct dp_soc *soc,
2422 					 struct dp_peer *peer,
2423 					 uint8_t vdev_id,
2424 					 uint8_t *mac_addr)
2425 {
2426 	struct dp_ast_entry *ast_entry;
2427 	void *cookie = NULL;
2428 	txrx_ast_free_cb cb = NULL;
2429 
2430 	/*
2431 	 * release the reference only if it is mapped
2432 	 * to ast_table
2433 	 */
2434 
2435 	qdf_spin_lock_bh(&soc->ast_lock);
2436 
2437 	ast_entry = dp_peer_ast_hash_find_by_vdevid(soc, mac_addr, vdev_id);
2438 	if (!ast_entry) {
2439 		qdf_spin_unlock_bh(&soc->ast_lock);
2440 		return QDF_STATUS_E_NOENT;
2441 	} else if (ast_entry->is_mapped) {
2442 		soc->ast_table[ast_entry->ast_idx] = NULL;
2443 	}
2444 
2445 	cb = ast_entry->callback;
2446 	cookie = ast_entry->cookie;
2447 
2448 
2449 	dp_peer_unlink_ast_entry(soc, ast_entry, peer);
2450 
2451 	dp_peer_free_ast_entry(soc, ast_entry);
2452 
2453 	qdf_spin_unlock_bh(&soc->ast_lock);
2454 
2455 	if (cb) {
2456 		cb(soc->ctrl_psoc,
2457 		   dp_soc_to_cdp_soc(soc),
2458 		   cookie,
2459 		   CDP_TXRX_AST_DELETED);
2460 	}
2461 
2462 	return QDF_STATUS_SUCCESS;
2463 }
2464 
2465 void dp_peer_find_hash_erase(struct dp_soc *soc)
2466 {
2467 	int i;
2468 
2469 	/*
2470 	 * Not really necessary to take peer_ref_mutex lock - by this point,
2471 	 * it's known that the soc is no longer in use.
2472 	 */
2473 	for (i = 0; i <= soc->peer_hash.mask; i++) {
2474 		if (!TAILQ_EMPTY(&soc->peer_hash.bins[i])) {
2475 			struct dp_peer *peer, *peer_next;
2476 
2477 			/*
2478 			 * TAILQ_FOREACH_SAFE must be used here to avoid any
2479 			 * memory access violation after peer is freed
2480 			 */
2481 			TAILQ_FOREACH_SAFE(peer, &soc->peer_hash.bins[i],
2482 				hash_list_elem, peer_next) {
2483 				/*
2484 				 * Don't remove the peer from the hash table -
2485 				 * that would modify the list we are currently
2486 				 * traversing, and it's not necessary anyway.
2487 				 */
2488 				/*
2489 				 * Artificially adjust the peer's ref count to
2490 				 * 1, so it will get deleted by
2491 				 * dp_peer_unref_delete.
2492 				 */
2493 				/* set to zero */
2494 				qdf_atomic_init(&peer->ref_cnt);
2495 				for (i = 0; i < DP_MOD_ID_MAX; i++)
2496 					qdf_atomic_init(&peer->mod_refs[i]);
2497 				/* incr to one */
2498 				qdf_atomic_inc(&peer->ref_cnt);
2499 				qdf_atomic_inc(&peer->mod_refs
2500 						[DP_MOD_ID_CONFIG]);
2501 				dp_peer_unref_delete(peer,
2502 						     DP_MOD_ID_CONFIG);
2503 			}
2504 		}
2505 	}
2506 }
2507 
2508 void dp_peer_ast_table_detach(struct dp_soc *soc)
2509 {
2510 	if (soc->ast_table) {
2511 		qdf_mem_free(soc->ast_table);
2512 		soc->ast_table = NULL;
2513 	}
2514 }
2515 
2516 void dp_peer_find_map_detach(struct dp_soc *soc)
2517 {
2518 	struct dp_peer *peer = NULL;
2519 	uint32_t i = 0;
2520 
2521 	if (soc->peer_id_to_obj_map) {
2522 		for (i = 0; i < soc->max_peer_id; i++) {
2523 			peer = soc->peer_id_to_obj_map[i];
2524 			if (peer)
2525 				dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
2526 		}
2527 		qdf_mem_free(soc->peer_id_to_obj_map);
2528 		soc->peer_id_to_obj_map = NULL;
2529 		qdf_spinlock_destroy(&soc->peer_map_lock);
2530 	}
2531 }
2532 
2533 #ifndef AST_OFFLOAD_ENABLE
2534 QDF_STATUS dp_peer_find_attach(struct dp_soc *soc)
2535 {
2536 	QDF_STATUS status;
2537 
2538 	status = dp_peer_find_map_attach(soc);
2539 	if (!QDF_IS_STATUS_SUCCESS(status))
2540 		return status;
2541 
2542 	status = dp_peer_find_hash_attach(soc);
2543 	if (!QDF_IS_STATUS_SUCCESS(status))
2544 		goto map_detach;
2545 
2546 	status = dp_peer_ast_table_attach(soc);
2547 	if (!QDF_IS_STATUS_SUCCESS(status))
2548 		goto hash_detach;
2549 
2550 	status = dp_peer_ast_hash_attach(soc);
2551 	if (!QDF_IS_STATUS_SUCCESS(status))
2552 		goto ast_table_detach;
2553 
2554 	status = dp_peer_mec_hash_attach(soc);
2555 	if (QDF_IS_STATUS_SUCCESS(status)) {
2556 		dp_soc_wds_attach(soc);
2557 		return status;
2558 	}
2559 
2560 	dp_peer_ast_hash_detach(soc);
2561 ast_table_detach:
2562 	dp_peer_ast_table_detach(soc);
2563 hash_detach:
2564 	dp_peer_find_hash_detach(soc);
2565 map_detach:
2566 	dp_peer_find_map_detach(soc);
2567 
2568 	return status;
2569 }
2570 #else
2571 QDF_STATUS dp_peer_find_attach(struct dp_soc *soc)
2572 {
2573 	QDF_STATUS status;
2574 
2575 	status = dp_peer_find_map_attach(soc);
2576 	if (!QDF_IS_STATUS_SUCCESS(status))
2577 		return status;
2578 
2579 	status = dp_peer_find_hash_attach(soc);
2580 	if (!QDF_IS_STATUS_SUCCESS(status))
2581 		goto map_detach;
2582 
2583 	return status;
2584 map_detach:
2585 	dp_peer_find_map_detach(soc);
2586 
2587 	return status;
2588 }
2589 #endif
2590 
2591 #ifdef REO_SHARED_QREF_TABLE_EN
2592 void dp_peer_rx_reo_shared_qaddr_delete(struct dp_soc *soc,
2593 					struct dp_peer *peer)
2594 {
2595 	uint8_t tid;
2596 	uint16_t peer_id;
2597 	uint32_t max_list_size;
2598 
2599 	max_list_size = soc->wlan_cfg_ctx->qref_control_size;
2600 
2601 	peer_id = peer->peer_id;
2602 
2603 	if (peer_id > soc->max_peer_id)
2604 		return;
2605 	if (IS_MLO_DP_LINK_PEER(peer))
2606 		return;
2607 
2608 	if (max_list_size) {
2609 		unsigned long curr_ts = qdf_get_system_timestamp();
2610 		struct dp_peer *primary_peer = peer;
2611 		uint16_t chip_id = 0xFFFF;
2612 		uint32_t qref_index;
2613 
2614 		qref_index = soc->shared_qaddr_del_idx;
2615 
2616 		soc->list_shared_qaddr_del[qref_index].peer_id =
2617 							  primary_peer->peer_id;
2618 		soc->list_shared_qaddr_del[qref_index].ts_qaddr_del = curr_ts;
2619 		soc->list_shared_qaddr_del[qref_index].chip_id = chip_id;
2620 		soc->shared_qaddr_del_idx++;
2621 
2622 		if (soc->shared_qaddr_del_idx == max_list_size)
2623 			soc->shared_qaddr_del_idx = 0;
2624 	}
2625 
2626 	if (hal_reo_shared_qaddr_is_enable(soc->hal_soc)) {
2627 		for (tid = 0; tid < DP_MAX_TIDS; tid++) {
2628 			hal_reo_shared_qaddr_write(soc->hal_soc,
2629 						   peer_id, tid, 0);
2630 		}
2631 	}
2632 }
2633 #endif
2634 
2635 /**
2636  * dp_peer_find_add_id() - map peer_id with peer
2637  * @soc: soc handle
2638  * @peer_mac_addr: peer mac address
2639  * @peer_id: peer id to be mapped
2640  * @hw_peer_id: HW ast index
2641  * @vdev_id: vdev_id
2642  * @peer_type: peer type (link or MLD)
2643  *
2644  * return: peer in success
2645  *         NULL in failure
2646  */
2647 static inline struct dp_peer *dp_peer_find_add_id(struct dp_soc *soc,
2648 	uint8_t *peer_mac_addr, uint16_t peer_id, uint16_t hw_peer_id,
2649 	uint8_t vdev_id, enum cdp_peer_type peer_type)
2650 {
2651 	struct dp_peer *peer;
2652 	struct cdp_peer_info peer_info = { 0 };
2653 
2654 	QDF_ASSERT(peer_id <= soc->max_peer_id);
2655 	/* check if there's already a peer object with this MAC address */
2656 	DP_PEER_INFO_PARAMS_INIT(&peer_info, vdev_id, peer_mac_addr,
2657 				 false, peer_type);
2658 	peer = dp_peer_hash_find_wrapper(soc, &peer_info, DP_MOD_ID_CONFIG);
2659 	dp_peer_debug("%pK: peer %pK ID %d vid %d mac " QDF_MAC_ADDR_FMT,
2660 		      soc, peer, peer_id, vdev_id,
2661 		      QDF_MAC_ADDR_REF(peer_mac_addr));
2662 
2663 	if (peer) {
2664 		/* peer's ref count was already incremented by
2665 		 * peer_find_hash_find
2666 		 */
2667 		dp_peer_info("%pK: ref_cnt: %d", soc,
2668 			     qdf_atomic_read(&peer->ref_cnt));
2669 
2670 		/*
2671 		 * if peer is in logical delete CP triggered delete before map
2672 		 * is received ignore this event
2673 		 */
2674 		if (dp_peer_state_cmp(peer, DP_PEER_STATE_LOGICAL_DELETE)) {
2675 			dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
2676 			dp_alert("Peer %pK["QDF_MAC_ADDR_FMT"] logical delete state vid %d",
2677 				 peer, QDF_MAC_ADDR_REF(peer_mac_addr),
2678 				 vdev_id);
2679 			return NULL;
2680 		}
2681 
2682 		if (peer->peer_id == HTT_INVALID_PEER) {
2683 			if (!IS_MLO_DP_MLD_PEER(peer))
2684 				dp_monitor_peer_tid_peer_id_update(soc, peer,
2685 								   peer_id);
2686 		} else {
2687 			dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
2688 			QDF_ASSERT(0);
2689 			return NULL;
2690 		}
2691 		dp_peer_find_id_to_obj_add(soc, peer, peer_id);
2692 		if (soc->arch_ops.dp_partner_chips_map)
2693 			soc->arch_ops.dp_partner_chips_map(soc, peer, peer_id);
2694 
2695 		dp_peer_update_state(soc, peer, DP_PEER_STATE_ACTIVE);
2696 		return peer;
2697 	}
2698 
2699 	return NULL;
2700 }
2701 
2702 #ifdef WLAN_FEATURE_11BE_MLO
2703 #ifdef DP_USE_REDUCED_PEER_ID_FIELD_WIDTH
2704 uint16_t dp_gen_ml_peer_id(struct dp_soc *soc, uint16_t peer_id)
2705 {
2706 	return ((peer_id & soc->peer_id_mask) | (1 << soc->peer_id_shift));
2707 }
2708 #else
2709 uint16_t dp_gen_ml_peer_id(struct dp_soc *soc, uint16_t peer_id)
2710 {
2711 	return (peer_id | (1 << HTT_RX_PEER_META_DATA_V1_ML_PEER_VALID_S));
2712 }
2713 #endif
2714 
2715 QDF_STATUS
2716 dp_rx_mlo_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
2717 			   uint8_t *peer_mac_addr,
2718 			   struct dp_mlo_flow_override_info *mlo_flow_info,
2719 			   struct dp_mlo_link_info *mlo_link_info)
2720 {
2721 	struct dp_peer *peer = NULL;
2722 	uint16_t hw_peer_id = mlo_flow_info[0].ast_idx;
2723 	uint16_t ast_hash = mlo_flow_info[0].cache_set_num;
2724 	uint8_t vdev_id = 0;
2725 	uint8_t is_wds = 0;
2726 	int i;
2727 	uint16_t ml_peer_id = dp_gen_ml_peer_id(soc, peer_id);
2728 	enum cdp_txrx_ast_entry_type type = CDP_TXRX_AST_TYPE_STATIC;
2729 	QDF_STATUS err = QDF_STATUS_SUCCESS;
2730 	struct dp_soc *primary_soc = NULL;
2731 
2732 	dp_cfg_event_record_peer_map_unmap_evt(soc, DP_CFG_EVENT_MLO_PEER_MAP,
2733 					       NULL, peer_mac_addr,
2734 					       1, peer_id, ml_peer_id, 0,
2735 					       vdev_id);
2736 
2737 	dp_info("mlo_peer_map_event (soc:%pK): peer_id %d ml_peer_id %d, peer_mac "QDF_MAC_ADDR_FMT,
2738 		soc, peer_id, ml_peer_id,
2739 		QDF_MAC_ADDR_REF(peer_mac_addr));
2740 
2741 	/* Get corresponding vdev ID for the peer based
2742 	 * on chip ID obtained from mlo peer_map event
2743 	 */
2744 	for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
2745 		if (mlo_link_info[i].peer_chip_id == dp_get_chip_id(soc)) {
2746 			vdev_id = mlo_link_info[i].vdev_id;
2747 			break;
2748 		}
2749 	}
2750 
2751 	peer = dp_peer_find_add_id(soc, peer_mac_addr, ml_peer_id,
2752 				   hw_peer_id, vdev_id, CDP_MLD_PEER_TYPE);
2753 	if (peer) {
2754 		if (wlan_op_mode_sta == peer->vdev->opmode &&
2755 		    qdf_mem_cmp(peer->mac_addr.raw,
2756 				peer->vdev->mld_mac_addr.raw,
2757 				QDF_MAC_ADDR_SIZE) != 0) {
2758 			dp_peer_info("%pK: STA vdev bss_peer!!!!", soc);
2759 			peer->bss_peer = 1;
2760 			if (peer->txrx_peer)
2761 				peer->txrx_peer->bss_peer = 1;
2762 		}
2763 
2764 		if (peer->vdev->opmode == wlan_op_mode_sta) {
2765 			peer->vdev->bss_ast_hash = ast_hash;
2766 			peer->vdev->bss_ast_idx = hw_peer_id;
2767 		}
2768 
2769 		/* Add ast entry incase self ast entry is
2770 		 * deleted due to DP CP sync issue
2771 		 *
2772 		 * self_ast_entry is modified in peer create
2773 		 * and peer unmap path which cannot run in
2774 		 * parllel with peer map, no lock need before
2775 		 * referring it
2776 		 */
2777 		if (!peer->self_ast_entry) {
2778 			dp_info("Add self ast from map "QDF_MAC_ADDR_FMT,
2779 				QDF_MAC_ADDR_REF(peer_mac_addr));
2780 			dp_peer_add_ast(soc, peer,
2781 					peer_mac_addr,
2782 					type, 0);
2783 		}
2784 		/* If peer setup and hence rx_tid setup got called
2785 		 * before htt peer map then Qref write to LUT did not
2786 		 * happen in rx_tid setup as peer_id was invalid.
2787 		 * So defer Qref write to peer map handler. Check if
2788 		 * rx_tid qdesc for tid 0 is already setup and perform
2789 		 * qref write to LUT for Tid 0 and 16.
2790 		 *
2791 		 * Peer map could be obtained on assoc link, hence
2792 		 * change to primary link's soc.
2793 		 */
2794 		primary_soc = peer->vdev->pdev->soc;
2795 		if (hal_reo_shared_qaddr_is_enable(primary_soc->hal_soc) &&
2796 		    peer->rx_tid[0].hw_qdesc_vaddr_unaligned) {
2797 			hal_reo_shared_qaddr_write(primary_soc->hal_soc,
2798 						   ml_peer_id,
2799 						   0,
2800 						   peer->rx_tid[0].hw_qdesc_paddr);
2801 			hal_reo_shared_qaddr_write(primary_soc->hal_soc,
2802 						   ml_peer_id,
2803 						   DP_NON_QOS_TID,
2804 						   peer->rx_tid[DP_NON_QOS_TID].hw_qdesc_paddr);
2805 		}
2806 	}
2807 
2808 	if (!primary_soc)
2809 		primary_soc = soc;
2810 
2811 	err = dp_peer_map_ast(soc, peer, peer_mac_addr, hw_peer_id,
2812 			      vdev_id, ast_hash, is_wds);
2813 
2814 	/*
2815 	 * If AST offload and host AST DB is enabled, populate AST entries on
2816 	 * host based on mlo peer map event from FW
2817 	 */
2818 	if (peer && soc->ast_offload_support && soc->host_ast_db_enable) {
2819 		dp_peer_host_add_map_ast(primary_soc, ml_peer_id, peer_mac_addr,
2820 					 hw_peer_id, vdev_id,
2821 					 ast_hash, is_wds);
2822 	}
2823 
2824 	return err;
2825 }
2826 #endif
2827 
2828 #ifdef DP_RX_UDP_OVER_PEER_ROAM
2829 void dp_rx_reset_roaming_peer(struct dp_soc *soc, uint8_t vdev_id,
2830 			      uint8_t *peer_mac_addr)
2831 {
2832 	struct dp_vdev *vdev = NULL;
2833 
2834 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_HTT);
2835 	if (vdev) {
2836 		if (qdf_mem_cmp(vdev->roaming_peer_mac.raw, peer_mac_addr,
2837 				QDF_MAC_ADDR_SIZE) == 0) {
2838 			vdev->roaming_peer_status =
2839 						WLAN_ROAM_PEER_AUTH_STATUS_NONE;
2840 			qdf_mem_zero(vdev->roaming_peer_mac.raw,
2841 				     QDF_MAC_ADDR_SIZE);
2842 		}
2843 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT);
2844 	}
2845 }
2846 #endif
2847 
2848 #ifdef WLAN_SUPPORT_PPEDS
2849 static void
2850 dp_tx_ppeds_cfg_astidx_cache_mapping(struct dp_soc *soc, struct dp_vdev *vdev,
2851 				     bool peer_map)
2852 {
2853 	if (soc->arch_ops.dp_tx_ppeds_cfg_astidx_cache_mapping)
2854 		soc->arch_ops.dp_tx_ppeds_cfg_astidx_cache_mapping(soc, vdev,
2855 								   peer_map);
2856 }
2857 #else
2858 static void
2859 dp_tx_ppeds_cfg_astidx_cache_mapping(struct dp_soc *soc, struct dp_vdev *vdev,
2860 				     bool peer_map)
2861 {
2862 }
2863 #endif
2864 
2865 QDF_STATUS
2866 dp_rx_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
2867 		       uint16_t hw_peer_id, uint8_t vdev_id,
2868 		       uint8_t *peer_mac_addr, uint16_t ast_hash,
2869 		       uint8_t is_wds)
2870 {
2871 	struct dp_peer *peer = NULL;
2872 	struct dp_vdev *vdev = NULL;
2873 	enum cdp_txrx_ast_entry_type type = CDP_TXRX_AST_TYPE_STATIC;
2874 	QDF_STATUS err = QDF_STATUS_SUCCESS;
2875 
2876 	dp_cfg_event_record_peer_map_unmap_evt(soc, DP_CFG_EVENT_PEER_MAP,
2877 					       NULL, peer_mac_addr, 1, peer_id,
2878 					       0, 0, vdev_id);
2879 	dp_info("peer_map_event (soc:%pK): peer_id %d, hw_peer_id %d, peer_mac "QDF_MAC_ADDR_FMT", vdev_id %d",
2880 		soc, peer_id, hw_peer_id,
2881 		QDF_MAC_ADDR_REF(peer_mac_addr), vdev_id);
2882 
2883 	/* Peer map event for WDS ast entry get the peer from
2884 	 * obj map
2885 	 */
2886 	if (is_wds) {
2887 		if (!soc->ast_offload_support) {
2888 			peer = dp_peer_get_ref_by_id(soc, peer_id,
2889 						     DP_MOD_ID_HTT);
2890 
2891 			err = dp_peer_map_ast(soc, peer, peer_mac_addr,
2892 					      hw_peer_id,
2893 					      vdev_id, ast_hash, is_wds);
2894 			if (peer)
2895 				dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
2896 		}
2897 	} else {
2898 		/*
2899 		 * It's the responsibility of the CP and FW to ensure
2900 		 * that peer is created successfully. Ideally DP should
2901 		 * not hit the below condition for directly associated
2902 		 * peers.
2903 		 */
2904 		if ((!soc->ast_offload_support) && ((hw_peer_id < 0) ||
2905 		    (hw_peer_id >=
2906 		     wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx)))) {
2907 			dp_peer_err("%pK: invalid hw_peer_id: %d", soc, hw_peer_id);
2908 			qdf_assert_always(0);
2909 		}
2910 
2911 		peer = dp_peer_find_add_id(soc, peer_mac_addr, peer_id,
2912 					   hw_peer_id, vdev_id,
2913 					   CDP_LINK_PEER_TYPE);
2914 
2915 		if (peer) {
2916 			bool peer_map = true;
2917 
2918 			/* Updating ast_hash and ast_idx in peer level */
2919 			peer->ast_hash = ast_hash;
2920 			peer->ast_idx = hw_peer_id;
2921 			vdev = peer->vdev;
2922 			/* Only check for STA Vdev and peer is not for TDLS */
2923 			if (wlan_op_mode_sta == vdev->opmode &&
2924 			    !peer->is_tdls_peer) {
2925 				if (qdf_mem_cmp(peer->mac_addr.raw,
2926 						vdev->mac_addr.raw,
2927 						QDF_MAC_ADDR_SIZE) != 0) {
2928 					dp_info("%pK: STA vdev bss_peer", soc);
2929 					peer->bss_peer = 1;
2930 					if (peer->txrx_peer)
2931 						peer->txrx_peer->bss_peer = 1;
2932 				}
2933 
2934 				dp_info("bss ast_hash 0x%x, ast_index 0x%x",
2935 					ast_hash, hw_peer_id);
2936 				vdev->bss_ast_hash = ast_hash;
2937 				vdev->bss_ast_idx = hw_peer_id;
2938 
2939 				dp_tx_ppeds_cfg_astidx_cache_mapping(soc, vdev,
2940 								     peer_map);
2941 			}
2942 
2943 			/* Add ast entry incase self ast entry is
2944 			 * deleted due to DP CP sync issue
2945 			 *
2946 			 * self_ast_entry is modified in peer create
2947 			 * and peer unmap path which cannot run in
2948 			 * parllel with peer map, no lock need before
2949 			 * referring it
2950 			 */
2951 			if (!soc->ast_offload_support &&
2952 				!peer->self_ast_entry) {
2953 				dp_info("Add self ast from map "QDF_MAC_ADDR_FMT,
2954 					QDF_MAC_ADDR_REF(peer_mac_addr));
2955 				dp_peer_add_ast(soc, peer,
2956 						peer_mac_addr,
2957 						type, 0);
2958 			}
2959 
2960 			/* If peer setup and hence rx_tid setup got called
2961 			 * before htt peer map then Qref write to LUT did
2962 			 * not happen in rx_tid setup as peer_id was invalid.
2963 			 * So defer Qref write to peer map handler. Check if
2964 			 * rx_tid qdesc for tid 0 is already setup perform qref
2965 			 * write to LUT for Tid 0 and 16.
2966 			 */
2967 			if (hal_reo_shared_qaddr_is_enable(soc->hal_soc) &&
2968 			    peer->rx_tid[0].hw_qdesc_vaddr_unaligned &&
2969 			    !IS_MLO_DP_LINK_PEER(peer)) {
2970 				add_entry_write_list(soc, peer, 0);
2971 				hal_reo_shared_qaddr_write(soc->hal_soc,
2972 							   peer_id,
2973 							   0,
2974 							   peer->rx_tid[0].hw_qdesc_paddr);
2975 				add_entry_write_list(soc, peer, DP_NON_QOS_TID);
2976 				hal_reo_shared_qaddr_write(soc->hal_soc,
2977 							   peer_id,
2978 							   DP_NON_QOS_TID,
2979 							   peer->rx_tid[DP_NON_QOS_TID].hw_qdesc_paddr);
2980 			}
2981 		}
2982 
2983 		err = dp_peer_map_ast(soc, peer, peer_mac_addr, hw_peer_id,
2984 				      vdev_id, ast_hash, is_wds);
2985 	}
2986 
2987 	dp_rx_reset_roaming_peer(soc, vdev_id, peer_mac_addr);
2988 
2989 	/*
2990 	 * If AST offload and host AST DB is enabled, populate AST entries on
2991 	 * host based on peer map event from FW
2992 	 */
2993 	if (soc->ast_offload_support && soc->host_ast_db_enable) {
2994 		dp_peer_host_add_map_ast(soc, peer_id, peer_mac_addr,
2995 					 hw_peer_id, vdev_id,
2996 					 ast_hash, is_wds);
2997 	}
2998 
2999 	return err;
3000 }
3001 
3002 void
3003 dp_rx_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id,
3004 			 uint8_t vdev_id, uint8_t *mac_addr,
3005 			 uint8_t is_wds, uint32_t free_wds_count)
3006 {
3007 	struct dp_peer *peer;
3008 	struct dp_vdev *vdev = NULL;
3009 
3010 	/*
3011 	 * If FW AST offload is enabled and host AST DB is enabled,
3012 	 * the AST entries are created during peer map from FW.
3013 	 */
3014 	if (soc->ast_offload_support && is_wds) {
3015 		if (!soc->host_ast_db_enable)
3016 			return;
3017 	}
3018 
3019 	peer = __dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_HTT);
3020 
3021 	/*
3022 	 * Currently peer IDs are assigned for vdevs as well as peers.
3023 	 * If the peer ID is for a vdev, then the peer pointer stored
3024 	 * in peer_id_to_obj_map will be NULL.
3025 	 */
3026 	if (!peer) {
3027 		dp_err("Received unmap event for invalid peer_id %u",
3028 		       peer_id);
3029 		return;
3030 	}
3031 
3032 	vdev = peer->vdev;
3033 
3034 	if (peer->txrx_peer) {
3035 		struct cdp_txrx_peer_params_update params = {0};
3036 
3037 		params.vdev_id = vdev->vdev_id;
3038 		params.peer_mac = peer->mac_addr.raw;
3039 		params.chip_id = dp_get_chip_id(soc);
3040 		params.pdev_id = vdev->pdev->pdev_id;
3041 
3042 		dp_wdi_event_handler(WDI_EVENT_PEER_UNMAP, soc,
3043 				     (void *)&params, peer_id,
3044 				     WDI_NO_VAL, vdev->pdev->pdev_id);
3045 	}
3046 
3047 	/*
3048 	 * In scenario where assoc peer soc id is different from
3049 	 * primary soc id, reset the soc to point to primary psoc.
3050 	 * Since map is received on primary soc, the unmap should
3051 	 * also delete ast on primary soc.
3052 	 */
3053 	soc = peer->vdev->pdev->soc;
3054 
3055 	/* If V2 Peer map messages are enabled AST entry has to be
3056 	 * freed here
3057 	 */
3058 	if (is_wds) {
3059 		if (!dp_peer_ast_free_entry_by_mac(soc, peer, vdev_id,
3060 						   mac_addr)) {
3061 			dp_peer_unmap_ipa_evt(soc, peer_id, vdev_id, mac_addr);
3062 			dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
3063 			return;
3064 		}
3065 
3066 		dp_alert("AST entry not found with peer %pK peer_id %u peer_mac "QDF_MAC_ADDR_FMT" mac_addr "QDF_MAC_ADDR_FMT" vdev_id %u next_hop %u",
3067 			  peer, peer->peer_id,
3068 			  QDF_MAC_ADDR_REF(peer->mac_addr.raw),
3069 			  QDF_MAC_ADDR_REF(mac_addr), vdev_id,
3070 			  is_wds);
3071 
3072 		dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
3073 		return;
3074 	}
3075 
3076 	dp_peer_clean_wds_entries(soc, peer, free_wds_count);
3077 
3078 	dp_cfg_event_record_peer_map_unmap_evt(soc, DP_CFG_EVENT_PEER_UNMAP,
3079 					       peer, mac_addr, 0, peer_id,
3080 					       0, 0, vdev_id);
3081 	dp_info("peer_unmap_event (soc:%pK) peer_id %d peer %pK",
3082 		soc, peer_id, peer);
3083 
3084 	/* Clear entries in Qref LUT */
3085 	/* TODO: Check if this is to be called from
3086 	 * dp_peer_delete for MLO case if there is race between
3087 	 * new peer id assignment and still not having received
3088 	 * peer unmap for MLD peer with same peer id.
3089 	 */
3090 	dp_peer_rx_reo_shared_qaddr_delete(soc, peer);
3091 
3092 	vdev = peer->vdev;
3093 
3094 	/* only if peer is in STA mode and not tdls peer */
3095 	if (wlan_op_mode_sta == vdev->opmode && !peer->is_tdls_peer) {
3096 		bool peer_map = false;
3097 
3098 		dp_tx_ppeds_cfg_astidx_cache_mapping(soc, vdev, peer_map);
3099 	}
3100 
3101 	dp_peer_find_id_to_obj_remove(soc, peer_id);
3102 
3103 	if (soc->arch_ops.dp_partner_chips_unmap)
3104 		soc->arch_ops.dp_partner_chips_unmap(soc, peer_id);
3105 
3106 	peer->peer_id = HTT_INVALID_PEER;
3107 
3108 	/*
3109 	 *	 Reset ast flow mapping table
3110 	 */
3111 	if (!soc->ast_offload_support)
3112 		dp_peer_reset_flowq_map(peer);
3113 
3114 	if (soc->cdp_soc.ol_ops->peer_unmap_event) {
3115 		soc->cdp_soc.ol_ops->peer_unmap_event(soc->ctrl_psoc,
3116 				peer_id, vdev_id, mac_addr);
3117 	}
3118 
3119 	dp_update_vdev_stats_on_peer_unmap(vdev, peer);
3120 
3121 	dp_peer_update_state(soc, peer, DP_PEER_STATE_INACTIVE);
3122 	dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
3123 	/*
3124 	 * Remove a reference to the peer.
3125 	 * If there are no more references, delete the peer object.
3126 	 */
3127 	dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
3128 }
3129 
3130 #if defined(WLAN_FEATURE_11BE_MLO) && defined(DP_MLO_LINK_STATS_SUPPORT)
3131 enum dp_bands dp_freq_to_band(qdf_freq_t freq)
3132 {
3133 	if (REG_IS_24GHZ_CH_FREQ(freq))
3134 		return DP_BAND_2GHZ;
3135 	else if (REG_IS_5GHZ_FREQ(freq) || REG_IS_49GHZ_FREQ(freq))
3136 		return DP_BAND_5GHZ;
3137 	else if (REG_IS_6GHZ_FREQ(freq))
3138 		return DP_BAND_6GHZ;
3139 	return DP_BAND_INVALID;
3140 }
3141 
3142 void dp_map_link_id_band(struct dp_peer *peer)
3143 {
3144 	struct dp_txrx_peer *txrx_peer = NULL;
3145 	enum dp_bands band;
3146 
3147 	txrx_peer = dp_get_txrx_peer(peer);
3148 	if (txrx_peer) {
3149 		band = dp_freq_to_band(peer->freq);
3150 		txrx_peer->band[peer->link_id + 1] = band;
3151 		dp_info("Band(Freq: %u): %u mapped to Link ID: %u",
3152 			peer->freq, band, peer->link_id);
3153 	} else {
3154 		dp_info("txrx_peer NULL for peer: " QDF_MAC_ADDR_FMT,
3155 			QDF_MAC_ADDR_REF(peer->mac_addr.raw));
3156 	}
3157 }
3158 
3159 QDF_STATUS
3160 dp_rx_peer_ext_evt(struct dp_soc *soc, struct dp_peer_ext_evt_info *info)
3161 {
3162 	struct dp_peer *peer = NULL;
3163 	struct cdp_peer_info peer_info = { 0 };
3164 
3165 	QDF_ASSERT(info->peer_id <= soc->max_peer_id);
3166 
3167 	DP_PEER_INFO_PARAMS_INIT(&peer_info, info->vdev_id, info->peer_mac_addr,
3168 				 false, CDP_LINK_PEER_TYPE);
3169 	peer = dp_peer_hash_find_wrapper(soc, &peer_info, DP_MOD_ID_CONFIG);
3170 
3171 	if (!peer) {
3172 		dp_err("peer NULL, id %u, MAC " QDF_MAC_ADDR_FMT ", vdev_id %u",
3173 		       info->peer_id, QDF_MAC_ADDR_REF(info->peer_mac_addr),
3174 		       info->vdev_id);
3175 
3176 		return QDF_STATUS_E_FAILURE;
3177 	}
3178 
3179 	peer->link_id = info->link_id;
3180 	peer->link_id_valid = info->link_id_valid;
3181 
3182 	if (peer->freq)
3183 		dp_map_link_id_band(peer);
3184 
3185 	dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
3186 
3187 	return QDF_STATUS_SUCCESS;
3188 }
3189 #endif
3190 #ifdef WLAN_FEATURE_11BE_MLO
3191 void dp_rx_mlo_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id)
3192 {
3193 	uint16_t ml_peer_id = dp_gen_ml_peer_id(soc, peer_id);
3194 	uint8_t mac_addr[QDF_MAC_ADDR_SIZE] = {0};
3195 	uint8_t vdev_id = DP_VDEV_ALL;
3196 	uint8_t is_wds = 0;
3197 
3198 	dp_cfg_event_record_peer_map_unmap_evt(soc, DP_CFG_EVENT_MLO_PEER_UNMAP,
3199 					       NULL, mac_addr, 0, peer_id,
3200 					       0, 0, vdev_id);
3201 	dp_info("MLO peer_unmap_event (soc:%pK) peer_id %d",
3202 		soc, peer_id);
3203 
3204 	dp_rx_peer_unmap_handler(soc, ml_peer_id, vdev_id,
3205 				 mac_addr, is_wds,
3206 				 DP_PEER_WDS_COUNT_INVALID);
3207 }
3208 #endif
3209 
3210 #ifndef AST_OFFLOAD_ENABLE
3211 void
3212 dp_peer_find_detach(struct dp_soc *soc)
3213 {
3214 	dp_soc_wds_detach(soc);
3215 	dp_peer_find_map_detach(soc);
3216 	dp_peer_find_hash_detach(soc);
3217 	dp_peer_ast_hash_detach(soc);
3218 	dp_peer_ast_table_detach(soc);
3219 	dp_peer_mec_hash_detach(soc);
3220 }
3221 #else
3222 void
3223 dp_peer_find_detach(struct dp_soc *soc)
3224 {
3225 	dp_peer_find_map_detach(soc);
3226 	dp_peer_find_hash_detach(soc);
3227 }
3228 #endif
3229 
3230 void dp_peer_rx_init(struct dp_pdev *pdev, struct dp_peer *peer)
3231 {
3232 	dp_peer_rx_tid_setup(peer);
3233 
3234 	peer->active_ba_session_cnt = 0;
3235 	peer->hw_buffer_size = 0;
3236 	peer->kill_256_sessions = 0;
3237 
3238 	/*
3239 	 * Set security defaults: no PN check, no security. The target may
3240 	 * send a HTT SEC_IND message to overwrite these defaults.
3241 	 */
3242 	if (peer->txrx_peer)
3243 		peer->txrx_peer->security[dp_sec_ucast].sec_type =
3244 			peer->txrx_peer->security[dp_sec_mcast].sec_type =
3245 				cdp_sec_type_none;
3246 }
3247 
3248 #ifdef WLAN_FEATURE_11BE_MLO
3249 static void dp_peer_rx_init_reorder_queue(struct dp_pdev *pdev,
3250 					  struct dp_peer *peer)
3251 {
3252 	struct dp_soc *soc = pdev->soc;
3253 	struct dp_peer *mld_peer = DP_GET_MLD_PEER_FROM_PEER(peer);
3254 	struct dp_rx_tid *rx_tid = NULL;
3255 	uint32_t ba_window_size, tid;
3256 	QDF_STATUS status;
3257 
3258 	if (dp_get_peer_vdev_roaming_in_progress(peer))
3259 		return;
3260 
3261 	tid = DP_NON_QOS_TID;
3262 	rx_tid = &mld_peer->rx_tid[tid];
3263 	ba_window_size = rx_tid->ba_status == DP_RX_BA_ACTIVE ?
3264 					rx_tid->ba_win_size : 1;
3265 	status = dp_peer_rx_reorder_queue_setup(soc, peer, tid, ba_window_size);
3266 	if (QDF_IS_STATUS_ERROR(status)) {
3267 		dp_info("peer %pK " QDF_MAC_ADDR_FMT " type %d failed to setup tid %d ba_win_size %d",
3268 			peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw),
3269 			peer->peer_type, tid, ba_window_size);
3270 		/* Do not return, continue for other tids. */
3271 	}
3272 
3273 	for (tid = 0; tid < DP_MAX_TIDS - 1; tid++) {
3274 		rx_tid = &mld_peer->rx_tid[tid];
3275 		ba_window_size = rx_tid->ba_status == DP_RX_BA_ACTIVE ?
3276 						rx_tid->ba_win_size : 1;
3277 		status = dp_peer_rx_reorder_queue_setup(soc, peer,
3278 							tid, ba_window_size);
3279 		if (QDF_IS_STATUS_ERROR(status)) {
3280 			dp_info("peer %pK " QDF_MAC_ADDR_FMT " type %d failed to setup tid %d ba_win_size %d",
3281 				peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw),
3282 				peer->peer_type, tid, ba_window_size);
3283 			/* Do not return, continue for other tids. */
3284 		}
3285 	}
3286 }
3287 
3288 void dp_peer_rx_init_wrapper(struct dp_pdev *pdev, struct dp_peer *peer,
3289 			     struct cdp_peer_setup_info *setup_info)
3290 {
3291 	if (setup_info && !setup_info->is_first_link)
3292 		dp_peer_rx_init_reorder_queue(pdev, peer);
3293 	else
3294 		dp_peer_rx_init(pdev, peer);
3295 }
3296 #else
3297 void dp_peer_rx_init_wrapper(struct dp_pdev *pdev, struct dp_peer *peer,
3298 			     struct cdp_peer_setup_info *setup_info)
3299 {
3300 	dp_peer_rx_init(pdev, peer);
3301 }
3302 #endif
3303 
3304 void dp_peer_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
3305 {
3306 	enum wlan_op_mode vdev_opmode;
3307 	uint8_t vdev_mac_addr[QDF_MAC_ADDR_SIZE];
3308 	struct dp_pdev *pdev = vdev->pdev;
3309 	struct dp_soc *soc = pdev->soc;
3310 
3311 	/* save vdev related member in case vdev freed */
3312 	vdev_opmode = vdev->opmode;
3313 
3314 	if (!IS_MLO_DP_MLD_PEER(peer))
3315 		dp_monitor_peer_tx_cleanup(vdev, peer);
3316 
3317 	if (vdev_opmode != wlan_op_mode_monitor)
3318 	/* cleanup the Rx reorder queues for this peer */
3319 		dp_peer_rx_cleanup(vdev, peer);
3320 
3321 	dp_peer_rx_tids_destroy(peer);
3322 
3323 	if (IS_MLO_DP_LINK_PEER(peer))
3324 		dp_link_peer_del_mld_peer(peer);
3325 	if (IS_MLO_DP_MLD_PEER(peer))
3326 		dp_mld_peer_deinit_link_peers_info(peer);
3327 
3328 	qdf_mem_copy(vdev_mac_addr, vdev->mac_addr.raw,
3329 		     QDF_MAC_ADDR_SIZE);
3330 
3331 	if (soc->cdp_soc.ol_ops->peer_unref_delete)
3332 		soc->cdp_soc.ol_ops->peer_unref_delete(
3333 				soc->ctrl_psoc,
3334 				vdev->pdev->pdev_id,
3335 				peer->mac_addr.raw, vdev_mac_addr,
3336 				vdev_opmode);
3337 }
3338 
3339 QDF_STATUS
3340 dp_set_key_sec_type_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id,
3341 			  uint8_t *peer_mac, enum cdp_sec_type sec_type,
3342 			  bool is_unicast)
3343 {
3344 	struct dp_peer *peer =
3345 			dp_peer_get_tgt_peer_hash_find((struct dp_soc *)soc,
3346 						       peer_mac, 0, vdev_id,
3347 						       DP_MOD_ID_CDP);
3348 	int sec_index;
3349 
3350 	if (!peer) {
3351 		dp_peer_debug("%pK: Peer is NULL!\n", soc);
3352 		return QDF_STATUS_E_FAILURE;
3353 	}
3354 
3355 	if (!peer->txrx_peer) {
3356 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3357 		dp_peer_debug("%pK: txrx peer is NULL!\n", soc);
3358 		return QDF_STATUS_E_FAILURE;
3359 	}
3360 
3361 	dp_peer_info("%pK: key sec spec for peer %pK " QDF_MAC_ADDR_FMT ": %s key of type %d",
3362 		     soc, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw),
3363 		     is_unicast ? "ucast" : "mcast", sec_type);
3364 
3365 	sec_index = is_unicast ? dp_sec_ucast : dp_sec_mcast;
3366 	peer->txrx_peer->security[sec_index].sec_type = sec_type;
3367 
3368 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3369 
3370 	return QDF_STATUS_SUCCESS;
3371 }
3372 
3373 void
3374 dp_rx_sec_ind_handler(struct dp_soc *soc, uint16_t peer_id,
3375 		      enum cdp_sec_type sec_type, int is_unicast,
3376 		      u_int32_t *michael_key,
3377 		      u_int32_t *rx_pn)
3378 {
3379 	struct dp_peer *peer;
3380 	struct dp_txrx_peer *txrx_peer;
3381 	int sec_index;
3382 
3383 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_HTT);
3384 	if (!peer) {
3385 		dp_peer_err("Couldn't find peer from ID %d - skipping security inits",
3386 			    peer_id);
3387 		return;
3388 	}
3389 	txrx_peer = dp_get_txrx_peer(peer);
3390 	if (!txrx_peer) {
3391 		dp_peer_err("Couldn't find txrx peer from ID %d - skipping security inits",
3392 			    peer_id);
3393 		return;
3394 	}
3395 
3396 	dp_peer_info("%pK: sec spec for peer %pK " QDF_MAC_ADDR_FMT ": %s key of type %d",
3397 		     soc, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw),
3398 			  is_unicast ? "ucast" : "mcast", sec_type);
3399 	sec_index = is_unicast ? dp_sec_ucast : dp_sec_mcast;
3400 
3401 	peer->txrx_peer->security[sec_index].sec_type = sec_type;
3402 #ifdef notyet /* TODO: See if this is required for defrag support */
3403 	/* michael key only valid for TKIP, but for simplicity,
3404 	 * copy it anyway
3405 	 */
3406 	qdf_mem_copy(
3407 		&peer->txrx_peer->security[sec_index].michael_key[0],
3408 		michael_key,
3409 		sizeof(peer->txrx_peer->security[sec_index].michael_key));
3410 #ifdef BIG_ENDIAN_HOST
3411 	OL_IF_SWAPBO(peer->txrx_peer->security[sec_index].michael_key[0],
3412 		     sizeof(peer->txrx_peer->security[sec_index].michael_key));
3413 #endif /* BIG_ENDIAN_HOST */
3414 #endif
3415 
3416 #ifdef notyet /* TODO: Check if this is required for wifi3.0 */
3417 	if (sec_type != cdp_sec_type_wapi) {
3418 		qdf_mem_zero(peer->tids_last_pn_valid, _EXT_TIDS);
3419 	} else {
3420 		for (i = 0; i < DP_MAX_TIDS; i++) {
3421 			/*
3422 			 * Setting PN valid bit for WAPI sec_type,
3423 			 * since WAPI PN has to be started with predefined value
3424 			 */
3425 			peer->tids_last_pn_valid[i] = 1;
3426 			qdf_mem_copy(
3427 				(u_int8_t *) &peer->tids_last_pn[i],
3428 				(u_int8_t *) rx_pn, sizeof(union htt_rx_pn_t));
3429 			peer->tids_last_pn[i].pn128[1] =
3430 				qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[1]);
3431 			peer->tids_last_pn[i].pn128[0] =
3432 				qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[0]);
3433 		}
3434 	}
3435 #endif
3436 	/* TODO: Update HW TID queue with PN check parameters (pn type for
3437 	 * all security types and last pn for WAPI) once REO command API
3438 	 * is available
3439 	 */
3440 
3441 	dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
3442 }
3443 
3444 #ifdef QCA_PEER_EXT_STATS
3445 QDF_STATUS dp_peer_delay_stats_ctx_alloc(struct dp_soc *soc,
3446 					 struct dp_txrx_peer *txrx_peer)
3447 {
3448 	uint8_t tid, ctx_id;
3449 
3450 	if (!soc || !txrx_peer) {
3451 		dp_warn("Null soc%pK or peer%pK", soc, txrx_peer);
3452 		return QDF_STATUS_E_INVAL;
3453 	}
3454 
3455 	if (!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx))
3456 		return QDF_STATUS_SUCCESS;
3457 
3458 	/*
3459 	 * Allocate memory for peer extended stats.
3460 	 */
3461 	txrx_peer->delay_stats =
3462 			qdf_mem_malloc(sizeof(struct dp_peer_delay_stats));
3463 	if (!txrx_peer->delay_stats) {
3464 		dp_err("Peer extended stats obj alloc failed!!");
3465 		return QDF_STATUS_E_NOMEM;
3466 	}
3467 
3468 	for (tid = 0; tid < CDP_MAX_DATA_TIDS; tid++) {
3469 		for (ctx_id = 0; ctx_id < CDP_MAX_TXRX_CTX; ctx_id++) {
3470 			struct cdp_delay_tx_stats *tx_delay =
3471 			&txrx_peer->delay_stats->delay_tid_stats[tid][ctx_id].tx_delay;
3472 			struct cdp_delay_rx_stats *rx_delay =
3473 			&txrx_peer->delay_stats->delay_tid_stats[tid][ctx_id].rx_delay;
3474 
3475 			dp_hist_init(&tx_delay->tx_swq_delay,
3476 				     CDP_HIST_TYPE_SW_ENQEUE_DELAY);
3477 			dp_hist_init(&tx_delay->hwtx_delay,
3478 				     CDP_HIST_TYPE_HW_COMP_DELAY);
3479 			dp_hist_init(&rx_delay->to_stack_delay,
3480 				     CDP_HIST_TYPE_REAP_STACK);
3481 		}
3482 	}
3483 
3484 	return QDF_STATUS_SUCCESS;
3485 }
3486 
3487 void dp_peer_delay_stats_ctx_dealloc(struct dp_soc *soc,
3488 				     struct dp_txrx_peer *txrx_peer)
3489 {
3490 	if (!txrx_peer) {
3491 		dp_warn("peer_ext dealloc failed due to NULL peer object");
3492 		return;
3493 	}
3494 
3495 	if (!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx))
3496 		return;
3497 
3498 	if (!txrx_peer->delay_stats)
3499 		return;
3500 
3501 	qdf_mem_free(txrx_peer->delay_stats);
3502 	txrx_peer->delay_stats = NULL;
3503 }
3504 
3505 void dp_peer_delay_stats_ctx_clr(struct dp_txrx_peer *txrx_peer)
3506 {
3507 	if (txrx_peer->delay_stats)
3508 		qdf_mem_zero(txrx_peer->delay_stats,
3509 			     sizeof(struct dp_peer_delay_stats));
3510 }
3511 #endif
3512 
3513 #ifdef WLAN_PEER_JITTER
3514 QDF_STATUS dp_peer_jitter_stats_ctx_alloc(struct dp_pdev *pdev,
3515 					  struct dp_txrx_peer *txrx_peer)
3516 {
3517 	if (!pdev || !txrx_peer) {
3518 		dp_warn("Null pdev or peer");
3519 		return QDF_STATUS_E_INVAL;
3520 	}
3521 
3522 	if (!wlan_cfg_is_peer_jitter_stats_enabled(pdev->soc->wlan_cfg_ctx))
3523 		return QDF_STATUS_SUCCESS;
3524 
3525 	if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
3526 		/*
3527 		 * Allocate memory on per tid basis when nss is enabled
3528 		 */
3529 		txrx_peer->jitter_stats =
3530 			qdf_mem_malloc(sizeof(struct cdp_peer_tid_stats)
3531 					* DP_MAX_TIDS);
3532 	} else {
3533 		/*
3534 		 * Allocate memory on per tid per ring basis
3535 		 */
3536 		txrx_peer->jitter_stats =
3537 			qdf_mem_malloc(sizeof(struct cdp_peer_tid_stats)
3538 					* DP_MAX_TIDS * CDP_MAX_TXRX_CTX);
3539 	}
3540 
3541 	if (!txrx_peer->jitter_stats) {
3542 		dp_warn("Jitter stats obj alloc failed!!");
3543 		return QDF_STATUS_E_NOMEM;
3544 	}
3545 
3546 	return QDF_STATUS_SUCCESS;
3547 }
3548 
3549 void dp_peer_jitter_stats_ctx_dealloc(struct dp_pdev *pdev,
3550 				      struct dp_txrx_peer *txrx_peer)
3551 {
3552 	if (!pdev || !txrx_peer) {
3553 		dp_warn("Null pdev or peer");
3554 		return;
3555 	}
3556 
3557 	if (!wlan_cfg_is_peer_jitter_stats_enabled(pdev->soc->wlan_cfg_ctx))
3558 		return;
3559 
3560 	if (txrx_peer->jitter_stats) {
3561 		qdf_mem_free(txrx_peer->jitter_stats);
3562 		txrx_peer->jitter_stats = NULL;
3563 	}
3564 }
3565 
3566 void dp_peer_jitter_stats_ctx_clr(struct dp_txrx_peer *txrx_peer)
3567 {
3568 	struct cdp_peer_tid_stats *jitter_stats = NULL;
3569 
3570 	if (!txrx_peer) {
3571 		dp_warn("Null peer");
3572 		return;
3573 	}
3574 
3575 	if (!wlan_cfg_is_peer_jitter_stats_enabled(txrx_peer->
3576 						   vdev->
3577 						   pdev->soc->wlan_cfg_ctx))
3578 		return;
3579 
3580 	jitter_stats = txrx_peer->jitter_stats;
3581 	if (!jitter_stats)
3582 		return;
3583 
3584 	if (wlan_cfg_get_dp_pdev_nss_enabled(txrx_peer->
3585 					     vdev->pdev->wlan_cfg_ctx))
3586 		qdf_mem_zero(jitter_stats,
3587 			     sizeof(struct cdp_peer_tid_stats) *
3588 			     DP_MAX_TIDS);
3589 
3590 	else
3591 		qdf_mem_zero(jitter_stats,
3592 			     sizeof(struct cdp_peer_tid_stats) *
3593 			     DP_MAX_TIDS * CDP_MAX_TXRX_CTX);
3594 
3595 }
3596 #endif
3597 
3598 #ifdef DP_PEER_EXTENDED_API
3599 /**
3600  * dp_peer_set_bw() - Set bandwidth and mpdu retry count threshold for peer
3601  * @soc: DP soc handle
3602  * @txrx_peer: Core txrx_peer handle
3603  * @set_bw: enum of bandwidth to be set for this peer connection
3604  *
3605  * Return: None
3606  */
3607 static void dp_peer_set_bw(struct dp_soc *soc, struct dp_txrx_peer *txrx_peer,
3608 			   enum cdp_peer_bw set_bw)
3609 {
3610 	if (!txrx_peer)
3611 		return;
3612 
3613 	txrx_peer->bw = set_bw;
3614 
3615 	switch (set_bw) {
3616 	case CDP_160_MHZ:
3617 	case CDP_320_MHZ:
3618 		txrx_peer->mpdu_retry_threshold =
3619 				soc->wlan_cfg_ctx->mpdu_retry_threshold_2;
3620 		break;
3621 	case CDP_20_MHZ:
3622 	case CDP_40_MHZ:
3623 	case CDP_80_MHZ:
3624 	default:
3625 		txrx_peer->mpdu_retry_threshold =
3626 				soc->wlan_cfg_ctx->mpdu_retry_threshold_1;
3627 		break;
3628 	}
3629 
3630 	dp_info("Peer id: %u: BW: %u, mpdu retry threshold: %u",
3631 		txrx_peer->peer_id, txrx_peer->bw,
3632 		txrx_peer->mpdu_retry_threshold);
3633 }
3634 
3635 #ifdef WLAN_FEATURE_11BE_MLO
3636 QDF_STATUS dp_register_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
3637 			    struct ol_txrx_desc_type *sta_desc)
3638 {
3639 	struct dp_peer *peer;
3640 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3641 
3642 	peer = dp_peer_find_hash_find(soc, sta_desc->peer_addr.bytes,
3643 				      0, DP_VDEV_ALL, DP_MOD_ID_CDP);
3644 
3645 	if (!peer)
3646 		return QDF_STATUS_E_FAULT;
3647 
3648 	qdf_spin_lock_bh(&peer->peer_info_lock);
3649 	peer->state = OL_TXRX_PEER_STATE_CONN;
3650 	qdf_spin_unlock_bh(&peer->peer_info_lock);
3651 
3652 	dp_peer_set_bw(soc, peer->txrx_peer, sta_desc->bw);
3653 
3654 	dp_rx_flush_rx_cached(peer, false);
3655 
3656 	if (IS_MLO_DP_LINK_PEER(peer) && peer->first_link) {
3657 		dp_peer_info("register for mld peer" QDF_MAC_ADDR_FMT,
3658 			     QDF_MAC_ADDR_REF(peer->mld_peer->mac_addr.raw));
3659 		qdf_spin_lock_bh(&peer->mld_peer->peer_info_lock);
3660 		peer->mld_peer->state = peer->state;
3661 		qdf_spin_unlock_bh(&peer->mld_peer->peer_info_lock);
3662 		dp_rx_flush_rx_cached(peer->mld_peer, false);
3663 	}
3664 
3665 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3666 
3667 	return QDF_STATUS_SUCCESS;
3668 }
3669 
3670 QDF_STATUS dp_peer_state_update(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
3671 				enum ol_txrx_peer_state state)
3672 {
3673 	struct dp_peer *peer;
3674 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3675 
3676 	peer =  dp_peer_find_hash_find(soc, peer_mac, 0, DP_VDEV_ALL,
3677 				       DP_MOD_ID_CDP);
3678 	if (!peer) {
3679 		dp_peer_err("%pK: Failed to find peer[" QDF_MAC_ADDR_FMT "]",
3680 			    soc, QDF_MAC_ADDR_REF(peer_mac));
3681 		return QDF_STATUS_E_FAILURE;
3682 	}
3683 	peer->state = state;
3684 	peer->authorize = (state == OL_TXRX_PEER_STATE_AUTH) ? 1 : 0;
3685 
3686 	if (peer->txrx_peer)
3687 		peer->txrx_peer->authorize = peer->authorize;
3688 
3689 	dp_peer_info("peer %pK MAC " QDF_MAC_ADDR_FMT " state %d",
3690 		     peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw),
3691 		     peer->state);
3692 
3693 	if (IS_MLO_DP_LINK_PEER(peer) && peer->first_link) {
3694 		peer->mld_peer->state = peer->state;
3695 		peer->mld_peer->txrx_peer->authorize = peer->authorize;
3696 		dp_peer_info("mld peer %pK MAC " QDF_MAC_ADDR_FMT " state %d",
3697 			     peer->mld_peer,
3698 			     QDF_MAC_ADDR_REF(peer->mld_peer->mac_addr.raw),
3699 			     peer->mld_peer->state);
3700 	}
3701 
3702 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
3703 	 * Decrement it here.
3704 	 */
3705 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3706 
3707 	return QDF_STATUS_SUCCESS;
3708 }
3709 #else
3710 QDF_STATUS dp_register_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
3711 			    struct ol_txrx_desc_type *sta_desc)
3712 {
3713 	struct dp_peer *peer;
3714 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3715 
3716 	peer = dp_peer_find_hash_find(soc, sta_desc->peer_addr.bytes,
3717 				      0, DP_VDEV_ALL, DP_MOD_ID_CDP);
3718 
3719 	if (!peer)
3720 		return QDF_STATUS_E_FAULT;
3721 
3722 	qdf_spin_lock_bh(&peer->peer_info_lock);
3723 	peer->state = OL_TXRX_PEER_STATE_CONN;
3724 	qdf_spin_unlock_bh(&peer->peer_info_lock);
3725 
3726 	dp_peer_set_bw(soc, peer->txrx_peer, sta_desc->bw);
3727 
3728 	dp_rx_flush_rx_cached(peer, false);
3729 
3730 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3731 
3732 	return QDF_STATUS_SUCCESS;
3733 }
3734 
3735 QDF_STATUS dp_peer_state_update(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
3736 				enum ol_txrx_peer_state state)
3737 {
3738 	struct dp_peer *peer;
3739 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3740 
3741 	peer =  dp_peer_find_hash_find(soc, peer_mac, 0, DP_VDEV_ALL,
3742 				       DP_MOD_ID_CDP);
3743 	if (!peer) {
3744 		dp_peer_err("%pK: Failed to find peer for: [" QDF_MAC_ADDR_FMT "]",
3745 			    soc, QDF_MAC_ADDR_REF(peer_mac));
3746 		return QDF_STATUS_E_FAILURE;
3747 	}
3748 	peer->state = state;
3749 	peer->authorize = (state == OL_TXRX_PEER_STATE_AUTH) ? 1 : 0;
3750 
3751 	if (peer->txrx_peer)
3752 		peer->txrx_peer->authorize = peer->authorize;
3753 
3754 	dp_info("peer %pK state %d", peer, peer->state);
3755 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
3756 	 * Decrement it here.
3757 	 */
3758 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3759 
3760 	return QDF_STATUS_SUCCESS;
3761 }
3762 #endif
3763 
3764 QDF_STATUS
3765 dp_clear_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
3766 	      struct qdf_mac_addr peer_addr)
3767 {
3768 	struct dp_peer *peer;
3769 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3770 
3771 	peer = dp_peer_find_hash_find(soc, peer_addr.bytes,
3772 				      0, DP_VDEV_ALL, DP_MOD_ID_CDP);
3773 
3774 	if (!peer)
3775 		return QDF_STATUS_E_FAULT;
3776 	if (!peer->valid) {
3777 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3778 		return QDF_STATUS_E_FAULT;
3779 	}
3780 
3781 	dp_clear_peer_internal(soc, peer);
3782 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3783 	return QDF_STATUS_SUCCESS;
3784 }
3785 
3786 QDF_STATUS dp_get_vdevid(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
3787 			 uint8_t *vdev_id)
3788 {
3789 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3790 	struct dp_peer *peer =
3791 		dp_peer_find_hash_find(soc, peer_mac, 0, DP_VDEV_ALL,
3792 				       DP_MOD_ID_CDP);
3793 
3794 	if (!peer)
3795 		return QDF_STATUS_E_FAILURE;
3796 
3797 	dp_info("peer %pK vdev %pK vdev id %d",
3798 		peer, peer->vdev, peer->vdev->vdev_id);
3799 	*vdev_id = peer->vdev->vdev_id;
3800 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
3801 	 * Decrement it here.
3802 	 */
3803 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3804 
3805 	return QDF_STATUS_SUCCESS;
3806 }
3807 
3808 struct cdp_vdev *
3809 dp_get_vdev_by_peer_addr(struct cdp_pdev *pdev_handle,
3810 			 struct qdf_mac_addr peer_addr)
3811 {
3812 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3813 	struct dp_peer *peer = NULL;
3814 	struct cdp_vdev *vdev = NULL;
3815 
3816 	if (!pdev) {
3817 		dp_peer_info("PDEV not found for peer_addr: " QDF_MAC_ADDR_FMT,
3818 			     QDF_MAC_ADDR_REF(peer_addr.bytes));
3819 		return NULL;
3820 	}
3821 
3822 	peer = dp_peer_find_hash_find(pdev->soc, peer_addr.bytes, 0,
3823 				      DP_VDEV_ALL, DP_MOD_ID_CDP);
3824 	if (!peer) {
3825 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
3826 			  "PDEV not found for peer_addr: "QDF_MAC_ADDR_FMT,
3827 			  QDF_MAC_ADDR_REF(peer_addr.bytes));
3828 		return NULL;
3829 	}
3830 
3831 	vdev = (struct cdp_vdev *)peer->vdev;
3832 
3833 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3834 	return vdev;
3835 }
3836 
3837 struct cdp_vdev *dp_get_vdev_for_peer(void *peer_handle)
3838 {
3839 	struct dp_peer *peer = peer_handle;
3840 
3841 	DP_TRACE(DEBUG, "peer %pK vdev %pK", peer, peer->vdev);
3842 	return (struct cdp_vdev *)peer->vdev;
3843 }
3844 
3845 uint8_t *dp_peer_get_peer_mac_addr(void *peer_handle)
3846 {
3847 	struct dp_peer *peer = peer_handle;
3848 	uint8_t *mac;
3849 
3850 	mac = peer->mac_addr.raw;
3851 	dp_info("peer %pK mac 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
3852 		peer, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
3853 	return peer->mac_addr.raw;
3854 }
3855 
3856 int dp_get_peer_state(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3857 		      uint8_t *peer_mac)
3858 {
3859 	enum ol_txrx_peer_state peer_state;
3860 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3861 	struct cdp_peer_info peer_info = { 0 };
3862 	struct dp_peer *peer;
3863 	struct dp_peer *tgt_peer;
3864 
3865 	DP_PEER_INFO_PARAMS_INIT(&peer_info, vdev_id, peer_mac,
3866 				 false, CDP_WILD_PEER_TYPE);
3867 
3868 	peer =  dp_peer_hash_find_wrapper(soc, &peer_info, DP_MOD_ID_CDP);
3869 
3870 	if (!peer)
3871 		return OL_TXRX_PEER_STATE_INVALID;
3872 
3873 	tgt_peer = dp_get_tgt_peer_from_peer(peer);
3874 	peer_state = tgt_peer->state;
3875 
3876 	dp_peer_debug("peer %pK tgt_peer: %pK peer MAC "
3877 		     QDF_MAC_ADDR_FMT " tgt peer MAC "
3878 		     QDF_MAC_ADDR_FMT " tgt peer state %d",
3879 		     peer, tgt_peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw),
3880 		     QDF_MAC_ADDR_REF(tgt_peer->mac_addr.raw),
3881 		     tgt_peer->state);
3882 
3883 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3884 
3885 	return peer_state;
3886 }
3887 
3888 void dp_local_peer_id_pool_init(struct dp_pdev *pdev)
3889 {
3890 	int i;
3891 
3892 	/* point the freelist to the first ID */
3893 	pdev->local_peer_ids.freelist = 0;
3894 
3895 	/* link each ID to the next one */
3896 	for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) {
3897 		pdev->local_peer_ids.pool[i] = i + 1;
3898 		pdev->local_peer_ids.map[i] = NULL;
3899 	}
3900 
3901 	/* link the last ID to itself, to mark the end of the list */
3902 	i = OL_TXRX_NUM_LOCAL_PEER_IDS;
3903 	pdev->local_peer_ids.pool[i] = i;
3904 
3905 	qdf_spinlock_create(&pdev->local_peer_ids.lock);
3906 	dp_info("Peer pool init");
3907 }
3908 
3909 void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer)
3910 {
3911 	int i;
3912 
3913 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
3914 	i = pdev->local_peer_ids.freelist;
3915 	if (pdev->local_peer_ids.pool[i] == i) {
3916 		/* the list is empty, except for the list-end marker */
3917 		peer->local_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
3918 	} else {
3919 		/* take the head ID and advance the freelist */
3920 		peer->local_id = i;
3921 		pdev->local_peer_ids.freelist = pdev->local_peer_ids.pool[i];
3922 		pdev->local_peer_ids.map[i] = peer;
3923 	}
3924 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
3925 	dp_info("peer %pK, local id %d", peer, peer->local_id);
3926 }
3927 
3928 void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer)
3929 {
3930 	int i = peer->local_id;
3931 	if ((i == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
3932 	    (i >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
3933 		return;
3934 	}
3935 
3936 	/* put this ID on the head of the freelist */
3937 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
3938 	pdev->local_peer_ids.pool[i] = pdev->local_peer_ids.freelist;
3939 	pdev->local_peer_ids.freelist = i;
3940 	pdev->local_peer_ids.map[i] = NULL;
3941 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
3942 }
3943 
3944 bool dp_find_peer_exist_on_vdev(struct cdp_soc_t *soc_hdl,
3945 				uint8_t vdev_id, uint8_t *peer_addr)
3946 {
3947 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3948 	struct dp_peer *peer = NULL;
3949 
3950 	peer = dp_peer_find_hash_find(soc, peer_addr, 0, vdev_id,
3951 				      DP_MOD_ID_CDP);
3952 	if (!peer)
3953 		return false;
3954 
3955 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3956 
3957 	return true;
3958 }
3959 
3960 bool dp_find_peer_exist_on_other_vdev(struct cdp_soc_t *soc_hdl,
3961 				      uint8_t vdev_id, uint8_t *peer_addr,
3962 				      uint16_t max_bssid)
3963 {
3964 	int i;
3965 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3966 	struct dp_peer *peer = NULL;
3967 
3968 	for (i = 0; i < max_bssid; i++) {
3969 		/* Need to check vdevs other than the vdev_id */
3970 		if (vdev_id == i)
3971 			continue;
3972 		peer = dp_peer_find_hash_find(soc, peer_addr, 0, i,
3973 					      DP_MOD_ID_CDP);
3974 		if (peer) {
3975 			dp_err("Duplicate peer "QDF_MAC_ADDR_FMT" already exist on vdev %d",
3976 			       QDF_MAC_ADDR_REF(peer_addr), i);
3977 			dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3978 			return true;
3979 		}
3980 	}
3981 
3982 	return false;
3983 }
3984 
3985 void dp_set_peer_as_tdls_peer(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3986 			      uint8_t *peer_mac, bool val)
3987 {
3988 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3989 	struct dp_peer *peer = NULL;
3990 
3991 	peer = dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id,
3992 				      DP_MOD_ID_CDP);
3993 	if (!peer) {
3994 		dp_err("Failed to find peer for:" QDF_MAC_ADDR_FMT,
3995 		       QDF_MAC_ADDR_REF(peer_mac));
3996 		return;
3997 	}
3998 
3999 	dp_info("Set tdls flag %d for peer:" QDF_MAC_ADDR_FMT,
4000 		val, QDF_MAC_ADDR_REF(peer_mac));
4001 	peer->is_tdls_peer = val;
4002 
4003 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4004 }
4005 #endif
4006 
4007 bool dp_find_peer_exist(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
4008 			uint8_t *peer_addr)
4009 {
4010 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4011 	struct dp_peer *peer = NULL;
4012 
4013 	peer = dp_peer_find_hash_find(soc, peer_addr, 0, DP_VDEV_ALL,
4014 				      DP_MOD_ID_CDP);
4015 	if (peer) {
4016 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4017 		return true;
4018 	}
4019 
4020 	return false;
4021 }
4022 
4023 QDF_STATUS
4024 dp_set_michael_key(struct cdp_soc_t *soc,
4025 		   uint8_t vdev_id,
4026 		   uint8_t *peer_mac,
4027 		   bool is_unicast, uint32_t *key)
4028 {
4029 	uint8_t sec_index = is_unicast ? 1 : 0;
4030 	struct dp_peer *peer =
4031 			dp_peer_get_tgt_peer_hash_find((struct dp_soc *)soc,
4032 						       peer_mac, 0, vdev_id,
4033 						       DP_MOD_ID_CDP);
4034 
4035 	if (!peer) {
4036 		dp_peer_err("%pK: peer not found ", soc);
4037 		return QDF_STATUS_E_FAILURE;
4038 	}
4039 
4040 	qdf_mem_copy(&peer->txrx_peer->security[sec_index].michael_key[0],
4041 		     key, IEEE80211_WEP_MICLEN);
4042 
4043 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4044 
4045 	return QDF_STATUS_SUCCESS;
4046 }
4047 
4048 
4049 struct dp_peer *dp_vdev_bss_peer_ref_n_get(struct dp_soc *soc,
4050 					   struct dp_vdev *vdev,
4051 					   enum dp_mod_id mod_id)
4052 {
4053 	struct dp_peer *peer = NULL;
4054 
4055 	qdf_spin_lock_bh(&vdev->peer_list_lock);
4056 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
4057 		if (peer->bss_peer)
4058 			break;
4059 	}
4060 
4061 	if (!peer) {
4062 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
4063 		return NULL;
4064 	}
4065 
4066 	if (dp_peer_get_ref(soc, peer, mod_id) == QDF_STATUS_SUCCESS) {
4067 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
4068 		return peer;
4069 	}
4070 
4071 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
4072 	return peer;
4073 }
4074 
4075 struct dp_peer *dp_sta_vdev_self_peer_ref_n_get(struct dp_soc *soc,
4076 						struct dp_vdev *vdev,
4077 						enum dp_mod_id mod_id)
4078 {
4079 	struct dp_peer *peer;
4080 
4081 	if (vdev->opmode != wlan_op_mode_sta)
4082 		return NULL;
4083 
4084 	qdf_spin_lock_bh(&vdev->peer_list_lock);
4085 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
4086 		if (peer->sta_self_peer)
4087 			break;
4088 	}
4089 
4090 	if (!peer) {
4091 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
4092 		return NULL;
4093 	}
4094 
4095 	if (dp_peer_get_ref(soc, peer, mod_id) == QDF_STATUS_SUCCESS) {
4096 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
4097 		return peer;
4098 	}
4099 
4100 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
4101 	return peer;
4102 }
4103 
4104 void dp_peer_flush_frags(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
4105 			 uint8_t *peer_mac)
4106 {
4107 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4108 	struct dp_peer *peer = dp_peer_get_tgt_peer_hash_find(soc, peer_mac, 0,
4109 							      vdev_id,
4110 							      DP_MOD_ID_CDP);
4111 	struct dp_txrx_peer *txrx_peer;
4112 	uint8_t tid;
4113 	struct dp_rx_tid_defrag *defrag_rx_tid;
4114 
4115 	if (!peer)
4116 		return;
4117 
4118 	if (!peer->txrx_peer)
4119 		goto fail;
4120 
4121 	dp_info("Flushing fragments for peer " QDF_MAC_ADDR_FMT,
4122 		QDF_MAC_ADDR_REF(peer->mac_addr.raw));
4123 
4124 	txrx_peer = peer->txrx_peer;
4125 
4126 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
4127 		defrag_rx_tid = &txrx_peer->rx_tid[tid];
4128 
4129 		qdf_spin_lock_bh(&defrag_rx_tid->defrag_tid_lock);
4130 		dp_rx_defrag_waitlist_remove(txrx_peer, tid);
4131 		dp_rx_reorder_flush_frag(txrx_peer, tid);
4132 		qdf_spin_unlock_bh(&defrag_rx_tid->defrag_tid_lock);
4133 	}
4134 fail:
4135 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4136 }
4137 
4138 bool dp_peer_find_by_id_valid(struct dp_soc *soc, uint16_t peer_id)
4139 {
4140 	struct dp_peer *peer = dp_peer_get_ref_by_id(soc, peer_id,
4141 						     DP_MOD_ID_HTT);
4142 
4143 	if (peer) {
4144 		/*
4145 		 * Decrement the peer ref which is taken as part of
4146 		 * dp_peer_get_ref_by_id if PEER_LOCK_REF_PROTECT is enabled
4147 		 */
4148 		dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
4149 
4150 		return true;
4151 	}
4152 
4153 	return false;
4154 }
4155 
4156 qdf_export_symbol(dp_peer_find_by_id_valid);
4157 
4158 #ifdef QCA_MULTIPASS_SUPPORT
4159 void dp_peer_multipass_list_remove(struct dp_peer *peer)
4160 {
4161 	struct dp_vdev *vdev = peer->vdev;
4162 	struct dp_txrx_peer *tpeer = NULL;
4163 	bool found = 0;
4164 
4165 	qdf_spin_lock_bh(&vdev->mpass_peer_mutex);
4166 	TAILQ_FOREACH(tpeer, &vdev->mpass_peer_list, mpass_peer_list_elem) {
4167 		if (tpeer == peer->txrx_peer) {
4168 			found = 1;
4169 			TAILQ_REMOVE(&vdev->mpass_peer_list, peer->txrx_peer,
4170 				     mpass_peer_list_elem);
4171 			break;
4172 		}
4173 	}
4174 
4175 	qdf_spin_unlock_bh(&vdev->mpass_peer_mutex);
4176 
4177 	if (found)
4178 		dp_peer_unref_delete(peer, DP_MOD_ID_TX_MULTIPASS);
4179 }
4180 
4181 /**
4182  * dp_peer_multipass_list_add() - add to new multipass list
4183  * @soc: soc handle
4184  * @peer_mac: mac address
4185  * @vdev_id: vdev id for peer
4186  * @vlan_id: vlan_id
4187  *
4188  * return: void
4189  */
4190 static void dp_peer_multipass_list_add(struct dp_soc *soc, uint8_t *peer_mac,
4191 				       uint8_t vdev_id, uint16_t vlan_id)
4192 {
4193 	struct dp_peer *peer =
4194 			dp_peer_get_tgt_peer_hash_find(soc, peer_mac, 0,
4195 						       vdev_id,
4196 						       DP_MOD_ID_TX_MULTIPASS);
4197 
4198 	if (qdf_unlikely(!peer)) {
4199 		qdf_err("NULL peer");
4200 		return;
4201 	}
4202 
4203 	if (qdf_unlikely(!peer->txrx_peer))
4204 		goto fail;
4205 
4206 	/* If peer already exists in vdev multipass list, do not add it.
4207 	 * This may happen if key install comes twice or re-key
4208 	 * happens for a peer.
4209 	 */
4210 	if (peer->txrx_peer->vlan_id) {
4211 		dp_debug("peer already added to vdev multipass list"
4212 			 "MAC: "QDF_MAC_ADDR_FMT" vlan: %d ",
4213 			 QDF_MAC_ADDR_REF(peer->mac_addr.raw),
4214 			 peer->txrx_peer->vlan_id);
4215 		goto fail;
4216 	}
4217 
4218 	/*
4219 	 * Ref_cnt is incremented inside dp_peer_find_hash_find().
4220 	 * Decrement it when element is deleted from the list.
4221 	 */
4222 	peer->txrx_peer->vlan_id = vlan_id;
4223 	qdf_spin_lock_bh(&peer->txrx_peer->vdev->mpass_peer_mutex);
4224 	TAILQ_INSERT_HEAD(&peer->txrx_peer->vdev->mpass_peer_list,
4225 			  peer->txrx_peer,
4226 			  mpass_peer_list_elem);
4227 	qdf_spin_unlock_bh(&peer->txrx_peer->vdev->mpass_peer_mutex);
4228 	return;
4229 
4230 fail:
4231 	dp_peer_unref_delete(peer, DP_MOD_ID_TX_MULTIPASS);
4232 }
4233 
4234 void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
4235 			 uint8_t vdev_id, uint8_t *peer_mac,
4236 			 uint16_t vlan_id)
4237 {
4238 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
4239 	struct dp_vdev *vdev =
4240 		dp_vdev_get_ref_by_id((struct dp_soc *)soc, vdev_id,
4241 				      DP_MOD_ID_TX_MULTIPASS);
4242 
4243 	dp_info("vdev_id %d, vdev %pK, multipass_en %d, peer_mac " QDF_MAC_ADDR_FMT " vlan %d",
4244 		vdev_id, vdev, vdev ? vdev->multipass_en : 0,
4245 		QDF_MAC_ADDR_REF(peer_mac), vlan_id);
4246 	if (vdev && vdev->multipass_en) {
4247 		dp_peer_multipass_list_add(soc, peer_mac, vdev_id, vlan_id);
4248 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_MULTIPASS);
4249 	}
4250 }
4251 #endif /* QCA_MULTIPASS_SUPPORT */
4252