xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_peer.c (revision 2dab6c754cd486221fe9943c1fe9baf5923f4d82)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <qdf_types.h>
21 #include <qdf_lock.h>
22 #include <hal_hw_headers.h>
23 #include "dp_htt.h"
24 #include "dp_types.h"
25 #include "dp_internal.h"
26 #include "dp_peer.h"
27 #include "dp_rx_defrag.h"
28 #include "dp_rx.h"
29 #include <hal_api.h>
30 #include <hal_reo.h>
31 #include <cdp_txrx_handle.h>
32 #include <wlan_cfg.h>
33 #ifdef WIFI_MONITOR_SUPPORT
34 #include <dp_mon.h>
35 #endif
36 #ifdef FEATURE_WDS
37 #include "dp_txrx_wds.h"
38 #endif
39 #include <qdf_module.h>
40 #ifdef QCA_PEER_EXT_STATS
41 #include "dp_hist.h"
42 #endif
43 #ifdef BYPASS_OL_OPS
44 #include <target_if_dp.h>
45 #endif
46 
47 #ifdef FEATURE_AST
48 #ifdef BYPASS_OL_OPS
49 /**
50  * dp_add_wds_entry_wrapper() - Add new AST entry for the wds station
51  * @soc: DP soc structure pointer
52  * @peer: dp peer structure
53  * @dest_macaddr: MAC address of ast node
54  * @flags: wds or hmwds
55  * @type: type from enum cdp_txrx_ast_entry_type
56  *
57  * This API is used by WDS source port learning function to
58  * add a new AST entry in the fw.
59  *
60  * Return: 0 on success, error code otherwise.
61  */
62 static int dp_add_wds_entry_wrapper(struct dp_soc *soc,
63 				    struct dp_peer *peer,
64 				    const uint8_t *dest_macaddr,
65 				    uint32_t flags,
66 				    uint8_t type)
67 {
68 	QDF_STATUS status;
69 
70 	status = target_if_add_wds_entry(soc->ctrl_psoc,
71 					 peer->vdev->vdev_id,
72 					 peer->mac_addr.raw,
73 					 dest_macaddr,
74 					 WMI_HOST_WDS_FLAG_STATIC,
75 					 type);
76 
77 	return qdf_status_to_os_return(status);
78 }
79 
80 /**
81  * dp_update_wds_entry_wrapper() - update an existing wds entry with new peer
82  * @soc: DP soc structure pointer
83  * @peer: dp peer structure
84  * @dest_macaddr: MAC address of ast node
85  * @flags: wds or hmwds
86  *
87  * This API is used by update the peer mac address for the ast
88  * in the fw.
89  *
90  * Return: 0 on success, error code otherwise.
91  */
92 static int dp_update_wds_entry_wrapper(struct dp_soc *soc,
93 				       struct dp_peer *peer,
94 				       uint8_t *dest_macaddr,
95 				       uint32_t flags)
96 {
97 	QDF_STATUS status;
98 
99 	status = target_if_update_wds_entry(soc->ctrl_psoc,
100 					    peer->vdev->vdev_id,
101 					    dest_macaddr,
102 					    peer->mac_addr.raw,
103 					    WMI_HOST_WDS_FLAG_STATIC);
104 
105 	return qdf_status_to_os_return(status);
106 }
107 
108 /**
109  * dp_del_wds_entry_wrapper() - delete a WSD AST entry
110  * @soc: DP soc structure pointer
111  * @vdev_id: vdev_id
112  * @wds_macaddr: MAC address of ast node
113  * @type: type from enum cdp_txrx_ast_entry_type
114  * @delete_in_fw: Flag to indicate if entry needs to be deleted in fw
115  *
116  * This API is used to delete an AST entry from fw
117  *
118  * Return: None
119  */
120 void dp_del_wds_entry_wrapper(struct dp_soc *soc,
121 			      uint8_t vdev_id,
122 			      uint8_t *wds_macaddr,
123 			      uint8_t type,
124 			      uint8_t delete_in_fw)
125 {
126 	target_if_del_wds_entry(soc->ctrl_psoc, vdev_id,
127 				wds_macaddr, type, delete_in_fw);
128 }
129 #else
130 static int dp_add_wds_entry_wrapper(struct dp_soc *soc,
131 				    struct dp_peer *peer,
132 				    const uint8_t *dest_macaddr,
133 				    uint32_t flags,
134 				    uint8_t type)
135 {
136 	int status;
137 
138 	status = soc->cdp_soc.ol_ops->peer_add_wds_entry(
139 					soc->ctrl_psoc,
140 					peer->vdev->vdev_id,
141 					peer->mac_addr.raw,
142 					peer->peer_id,
143 					dest_macaddr,
144 					peer->mac_addr.raw,
145 					flags,
146 					type);
147 
148 	return status;
149 }
150 
151 static int dp_update_wds_entry_wrapper(struct dp_soc *soc,
152 				       struct dp_peer *peer,
153 				       uint8_t *dest_macaddr,
154 				       uint32_t flags)
155 {
156 	int status;
157 
158 	status = soc->cdp_soc.ol_ops->peer_update_wds_entry(
159 				soc->ctrl_psoc,
160 				peer->vdev->vdev_id,
161 				dest_macaddr,
162 				peer->mac_addr.raw,
163 				flags);
164 
165 	return status;
166 }
167 
168 void dp_del_wds_entry_wrapper(struct dp_soc *soc,
169 			      uint8_t vdev_id,
170 			      uint8_t *wds_macaddr,
171 			      uint8_t type,
172 			      uint8_t delete_in_fw)
173 {
174 	soc->cdp_soc.ol_ops->peer_del_wds_entry(soc->ctrl_psoc,
175 						vdev_id,
176 						wds_macaddr,
177 						type,
178 						delete_in_fw);
179 }
180 #endif /* BYPASS_OL_OPS */
181 #else
182 void dp_del_wds_entry_wrapper(struct dp_soc *soc,
183 			      uint8_t vdev_id,
184 			      uint8_t *wds_macaddr,
185 			      uint8_t type,
186 			      uint8_t delete_in_fw)
187 {
188 }
189 #endif /* FEATURE_AST */
190 
191 #ifdef FEATURE_WDS
192 static inline bool
193 dp_peer_ast_free_in_unmap_supported(struct dp_soc *soc,
194 				    struct dp_ast_entry *ast_entry)
195 {
196 	/* if peer map v2 is enabled we are not freeing ast entry
197 	 * here and it is supposed to be freed in unmap event (after
198 	 * we receive delete confirmation from target)
199 	 *
200 	 * if peer_id is invalid we did not get the peer map event
201 	 * for the peer free ast entry from here only in this case
202 	 */
203 
204 	if ((ast_entry->type != CDP_TXRX_AST_TYPE_WDS_HM_SEC) &&
205 	    (ast_entry->type != CDP_TXRX_AST_TYPE_SELF))
206 		return true;
207 
208 	return false;
209 }
210 #else
211 static inline bool
212 dp_peer_ast_free_in_unmap_supported(struct dp_soc *soc,
213 				    struct dp_ast_entry *ast_entry)
214 {
215 	return false;
216 }
217 
218 void dp_soc_wds_attach(struct dp_soc *soc)
219 {
220 }
221 
222 void dp_soc_wds_detach(struct dp_soc *soc)
223 {
224 }
225 #endif
226 
227 #ifdef QCA_SUPPORT_WDS_EXTENDED
228 bool dp_peer_check_wds_ext_peer(struct dp_peer *peer)
229 {
230 	struct dp_vdev *vdev = peer->vdev;
231 	struct dp_txrx_peer *txrx_peer;
232 
233 	if (!vdev->wds_ext_enabled)
234 		return false;
235 
236 	txrx_peer = dp_get_txrx_peer(peer);
237 	if (!txrx_peer)
238 		return false;
239 
240 	if (qdf_atomic_test_bit(WDS_EXT_PEER_INIT_BIT,
241 				&txrx_peer->wds_ext.init))
242 		return true;
243 
244 	return false;
245 }
246 #else
247 bool dp_peer_check_wds_ext_peer(struct dp_peer *peer)
248 {
249 	return false;
250 }
251 #endif
252 
253 QDF_STATUS dp_peer_ast_table_attach(struct dp_soc *soc)
254 {
255 	uint32_t max_ast_index;
256 
257 	max_ast_index = wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx);
258 	/* allocate ast_table for ast entry to ast_index map */
259 	dp_peer_info("\n%pK:<=== cfg max ast idx %d ====>", soc, max_ast_index);
260 	soc->ast_table = qdf_mem_malloc(max_ast_index *
261 					sizeof(struct dp_ast_entry *));
262 	if (!soc->ast_table) {
263 		dp_peer_err("%pK: ast_table memory allocation failed", soc);
264 		return QDF_STATUS_E_NOMEM;
265 	}
266 	return QDF_STATUS_SUCCESS; /* success */
267 }
268 
269 /**
270  * dp_find_peer_by_macaddr() - Finding the peer from mac address provided.
271  * @soc: soc handle
272  * @mac_addr: MAC address to be used to find peer
273  * @vdev_id: VDEV id
274  * @mod_id: MODULE ID
275  *
276  * Return: struct dp_peer
277  */
278 struct dp_peer *dp_find_peer_by_macaddr(struct dp_soc *soc, uint8_t *mac_addr,
279 					uint8_t vdev_id, enum dp_mod_id mod_id)
280 {
281 	bool ast_ind_disable = wlan_cfg_get_ast_indication_disable(
282 							    soc->wlan_cfg_ctx);
283 	struct cdp_peer_info peer_info = {0};
284 
285 	if ((!soc->ast_offload_support) || (!ast_ind_disable)) {
286 		struct dp_ast_entry *ast_entry = NULL;
287 		uint16_t peer_id;
288 
289 		qdf_spin_lock_bh(&soc->ast_lock);
290 		ast_entry = dp_peer_ast_hash_find_by_vdevid(soc, mac_addr,
291 							    vdev_id);
292 
293 		if (!ast_entry) {
294 			qdf_spin_unlock_bh(&soc->ast_lock);
295 			dp_err("NULL ast entry");
296 			return NULL;
297 		}
298 
299 		peer_id = ast_entry->peer_id;
300 		qdf_spin_unlock_bh(&soc->ast_lock);
301 
302 		if (peer_id == HTT_INVALID_PEER)
303 			return NULL;
304 
305 		return dp_peer_get_ref_by_id(soc, peer_id, mod_id);
306 	}
307 
308 	DP_PEER_INFO_PARAMS_INIT(&peer_info, vdev_id, mac_addr, false,
309 				 CDP_WILD_PEER_TYPE);
310 	return dp_peer_hash_find_wrapper(soc, &peer_info, mod_id);
311 }
312 
313 /**
314  * dp_peer_find_map_attach() - allocate memory for peer_id_to_obj_map
315  * @soc: soc handle
316  *
317  * return: QDF_STATUS
318  */
319 static QDF_STATUS dp_peer_find_map_attach(struct dp_soc *soc)
320 {
321 	uint32_t max_peers, peer_map_size;
322 
323 	max_peers = soc->max_peer_id;
324 	/* allocate the peer ID -> peer object map */
325 	dp_peer_info("\n%pK:<=== cfg max peer id %d ====>", soc, max_peers);
326 	peer_map_size = max_peers * sizeof(soc->peer_id_to_obj_map[0]);
327 	soc->peer_id_to_obj_map = qdf_mem_malloc(peer_map_size);
328 	if (!soc->peer_id_to_obj_map) {
329 		dp_peer_err("%pK: peer map memory allocation failed", soc);
330 		return QDF_STATUS_E_NOMEM;
331 	}
332 
333 	/*
334 	 * The peer_id_to_obj_map doesn't really need to be initialized,
335 	 * since elements are only used after they have been individually
336 	 * initialized.
337 	 * However, it is convenient for debugging to have all elements
338 	 * that are not in use set to 0.
339 	 */
340 	qdf_mem_zero(soc->peer_id_to_obj_map, peer_map_size);
341 
342 	qdf_spinlock_create(&soc->peer_map_lock);
343 	return QDF_STATUS_SUCCESS; /* success */
344 }
345 
346 #define DP_AST_HASH_LOAD_MULT  2
347 #define DP_AST_HASH_LOAD_SHIFT 0
348 
349 static inline uint32_t
350 dp_peer_find_hash_index(struct dp_soc *soc,
351 			union dp_align_mac_addr *mac_addr)
352 {
353 	uint32_t index;
354 
355 	index =
356 		mac_addr->align2.bytes_ab ^
357 		mac_addr->align2.bytes_cd ^
358 		mac_addr->align2.bytes_ef;
359 
360 	index ^= index >> soc->peer_hash.idx_bits;
361 	index &= soc->peer_hash.mask;
362 	return index;
363 }
364 
365 struct dp_peer *dp_peer_find_hash_find(
366 				struct dp_soc *soc, uint8_t *peer_mac_addr,
367 				int mac_addr_is_aligned, uint8_t vdev_id,
368 				enum dp_mod_id mod_id)
369 {
370 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
371 	uint32_t index;
372 	struct dp_peer *peer;
373 
374 	if (!soc->peer_hash.bins)
375 		return NULL;
376 
377 	if (mac_addr_is_aligned) {
378 		mac_addr = (union dp_align_mac_addr *)peer_mac_addr;
379 	} else {
380 		qdf_mem_copy(
381 			&local_mac_addr_aligned.raw[0],
382 			peer_mac_addr, QDF_MAC_ADDR_SIZE);
383 		mac_addr = &local_mac_addr_aligned;
384 	}
385 	index = dp_peer_find_hash_index(soc, mac_addr);
386 	qdf_spin_lock_bh(&soc->peer_hash_lock);
387 	TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
388 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
389 		    ((peer->vdev->vdev_id == vdev_id) ||
390 		     (vdev_id == DP_VDEV_ALL))) {
391 			/* take peer reference before returning */
392 			if (dp_peer_get_ref(soc, peer, mod_id) !=
393 						QDF_STATUS_SUCCESS)
394 				peer = NULL;
395 
396 			qdf_spin_unlock_bh(&soc->peer_hash_lock);
397 			return peer;
398 		}
399 	}
400 	qdf_spin_unlock_bh(&soc->peer_hash_lock);
401 	return NULL; /* failure */
402 }
403 
404 qdf_export_symbol(dp_peer_find_hash_find);
405 
406 #ifdef WLAN_FEATURE_11BE_MLO
407 /**
408  * dp_peer_find_hash_detach() - cleanup memory for peer_hash table
409  * @soc: soc handle
410  *
411  * return: none
412  */
413 static void dp_peer_find_hash_detach(struct dp_soc *soc)
414 {
415 	if (soc->peer_hash.bins) {
416 		qdf_mem_free(soc->peer_hash.bins);
417 		soc->peer_hash.bins = NULL;
418 		qdf_spinlock_destroy(&soc->peer_hash_lock);
419 	}
420 
421 	if (soc->arch_ops.mlo_peer_find_hash_detach)
422 		soc->arch_ops.mlo_peer_find_hash_detach(soc);
423 }
424 
425 /**
426  * dp_peer_find_hash_attach() - allocate memory for peer_hash table
427  * @soc: soc handle
428  *
429  * return: QDF_STATUS
430  */
431 static QDF_STATUS dp_peer_find_hash_attach(struct dp_soc *soc)
432 {
433 	int i, hash_elems, log2;
434 
435 	/* allocate the peer MAC address -> peer object hash table */
436 	hash_elems = soc->max_peers;
437 	hash_elems *= DP_PEER_HASH_LOAD_MULT;
438 	hash_elems >>= DP_PEER_HASH_LOAD_SHIFT;
439 	log2 = dp_log2_ceil(hash_elems);
440 	hash_elems = 1 << log2;
441 
442 	soc->peer_hash.mask = hash_elems - 1;
443 	soc->peer_hash.idx_bits = log2;
444 	/* allocate an array of TAILQ peer object lists */
445 	soc->peer_hash.bins = qdf_mem_malloc(
446 		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q, dp_peer)));
447 	if (!soc->peer_hash.bins)
448 		return QDF_STATUS_E_NOMEM;
449 
450 	for (i = 0; i < hash_elems; i++)
451 		TAILQ_INIT(&soc->peer_hash.bins[i]);
452 
453 	qdf_spinlock_create(&soc->peer_hash_lock);
454 
455 	if (soc->arch_ops.mlo_peer_find_hash_attach &&
456 	    (soc->arch_ops.mlo_peer_find_hash_attach(soc) !=
457 			QDF_STATUS_SUCCESS)) {
458 		dp_peer_find_hash_detach(soc);
459 		return QDF_STATUS_E_NOMEM;
460 	}
461 	return QDF_STATUS_SUCCESS;
462 }
463 
464 void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer)
465 {
466 	unsigned index;
467 
468 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
469 	if (peer->peer_type == CDP_LINK_PEER_TYPE) {
470 		qdf_spin_lock_bh(&soc->peer_hash_lock);
471 
472 		if (QDF_IS_STATUS_ERROR(dp_peer_get_ref(soc, peer,
473 							DP_MOD_ID_CONFIG))) {
474 			dp_err("fail to get peer ref:" QDF_MAC_ADDR_FMT,
475 			       QDF_MAC_ADDR_REF(peer->mac_addr.raw));
476 			qdf_spin_unlock_bh(&soc->peer_hash_lock);
477 			return;
478 		}
479 
480 		/*
481 		 * It is important to add the new peer at the tail of
482 		 * peer list with the bin index. Together with having
483 		 * the hash_find function search from head to tail,
484 		 * this ensures that if two entries with the same MAC address
485 		 * are stored, the one added first will be found first.
486 		 */
487 		TAILQ_INSERT_TAIL(&soc->peer_hash.bins[index], peer,
488 				  hash_list_elem);
489 
490 		qdf_spin_unlock_bh(&soc->peer_hash_lock);
491 	} else if (peer->peer_type == CDP_MLD_PEER_TYPE) {
492 		if (soc->arch_ops.mlo_peer_find_hash_add)
493 			soc->arch_ops.mlo_peer_find_hash_add(soc, peer);
494 	} else {
495 		dp_err("unknown peer type %d", peer->peer_type);
496 	}
497 }
498 
499 void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer)
500 {
501 	unsigned index;
502 	struct dp_peer *tmppeer = NULL;
503 	int found = 0;
504 
505 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
506 
507 	if (peer->peer_type == CDP_LINK_PEER_TYPE) {
508 		/* Check if tail is not empty before delete*/
509 		QDF_ASSERT(!TAILQ_EMPTY(&soc->peer_hash.bins[index]));
510 
511 		qdf_spin_lock_bh(&soc->peer_hash_lock);
512 		TAILQ_FOREACH(tmppeer, &soc->peer_hash.bins[index],
513 			      hash_list_elem) {
514 			if (tmppeer == peer) {
515 				found = 1;
516 				break;
517 			}
518 		}
519 		QDF_ASSERT(found);
520 		TAILQ_REMOVE(&soc->peer_hash.bins[index], peer,
521 			     hash_list_elem);
522 
523 		dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
524 		qdf_spin_unlock_bh(&soc->peer_hash_lock);
525 	} else if (peer->peer_type == CDP_MLD_PEER_TYPE) {
526 		if (soc->arch_ops.mlo_peer_find_hash_remove)
527 			soc->arch_ops.mlo_peer_find_hash_remove(soc, peer);
528 	} else {
529 		dp_err("unknown peer type %d", peer->peer_type);
530 	}
531 }
532 #else
533 static QDF_STATUS dp_peer_find_hash_attach(struct dp_soc *soc)
534 {
535 	int i, hash_elems, log2;
536 
537 	/* allocate the peer MAC address -> peer object hash table */
538 	hash_elems = soc->max_peers;
539 	hash_elems *= DP_PEER_HASH_LOAD_MULT;
540 	hash_elems >>= DP_PEER_HASH_LOAD_SHIFT;
541 	log2 = dp_log2_ceil(hash_elems);
542 	hash_elems = 1 << log2;
543 
544 	soc->peer_hash.mask = hash_elems - 1;
545 	soc->peer_hash.idx_bits = log2;
546 	/* allocate an array of TAILQ peer object lists */
547 	soc->peer_hash.bins = qdf_mem_malloc(
548 		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q, dp_peer)));
549 	if (!soc->peer_hash.bins)
550 		return QDF_STATUS_E_NOMEM;
551 
552 	for (i = 0; i < hash_elems; i++)
553 		TAILQ_INIT(&soc->peer_hash.bins[i]);
554 
555 	qdf_spinlock_create(&soc->peer_hash_lock);
556 	return QDF_STATUS_SUCCESS;
557 }
558 
559 static void dp_peer_find_hash_detach(struct dp_soc *soc)
560 {
561 	if (soc->peer_hash.bins) {
562 		qdf_mem_free(soc->peer_hash.bins);
563 		soc->peer_hash.bins = NULL;
564 		qdf_spinlock_destroy(&soc->peer_hash_lock);
565 	}
566 }
567 
568 void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer)
569 {
570 	unsigned index;
571 
572 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
573 	qdf_spin_lock_bh(&soc->peer_hash_lock);
574 
575 	if (QDF_IS_STATUS_ERROR(dp_peer_get_ref(soc, peer, DP_MOD_ID_CONFIG))) {
576 		dp_err("unable to get peer ref at MAP mac: "QDF_MAC_ADDR_FMT,
577 		       QDF_MAC_ADDR_REF(peer->mac_addr.raw));
578 		qdf_spin_unlock_bh(&soc->peer_hash_lock);
579 		return;
580 	}
581 
582 	/*
583 	 * It is important to add the new peer at the tail of the peer list
584 	 * with the bin index.  Together with having the hash_find function
585 	 * search from head to tail, this ensures that if two entries with
586 	 * the same MAC address are stored, the one added first will be
587 	 * found first.
588 	 */
589 	TAILQ_INSERT_TAIL(&soc->peer_hash.bins[index], peer, hash_list_elem);
590 
591 	qdf_spin_unlock_bh(&soc->peer_hash_lock);
592 }
593 
594 void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer)
595 {
596 	unsigned index;
597 	struct dp_peer *tmppeer = NULL;
598 	int found = 0;
599 
600 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
601 	/* Check if tail is not empty before delete*/
602 	QDF_ASSERT(!TAILQ_EMPTY(&soc->peer_hash.bins[index]));
603 
604 	qdf_spin_lock_bh(&soc->peer_hash_lock);
605 	TAILQ_FOREACH(tmppeer, &soc->peer_hash.bins[index], hash_list_elem) {
606 		if (tmppeer == peer) {
607 			found = 1;
608 			break;
609 		}
610 	}
611 	QDF_ASSERT(found);
612 	TAILQ_REMOVE(&soc->peer_hash.bins[index], peer, hash_list_elem);
613 
614 	dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
615 	qdf_spin_unlock_bh(&soc->peer_hash_lock);
616 }
617 
618 
619 #endif/* WLAN_FEATURE_11BE_MLO */
620 
621 void dp_peer_vdev_list_add(struct dp_soc *soc, struct dp_vdev *vdev,
622 			   struct dp_peer *peer)
623 {
624 	/* only link peer will be added to vdev peer list */
625 	if (IS_MLO_DP_MLD_PEER(peer))
626 		return;
627 
628 	qdf_spin_lock_bh(&vdev->peer_list_lock);
629 	if (QDF_IS_STATUS_ERROR(dp_peer_get_ref(soc, peer, DP_MOD_ID_CONFIG))) {
630 		dp_err("unable to get peer ref at MAP mac: "QDF_MAC_ADDR_FMT,
631 		       QDF_MAC_ADDR_REF(peer->mac_addr.raw));
632 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
633 		return;
634 	}
635 
636 	/* add this peer into the vdev's list */
637 	if (wlan_op_mode_sta == vdev->opmode)
638 		TAILQ_INSERT_HEAD(&vdev->peer_list, peer, peer_list_elem);
639 	else
640 		TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
641 
642 	vdev->num_peers++;
643 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
644 }
645 
646 void dp_peer_vdev_list_remove(struct dp_soc *soc, struct dp_vdev *vdev,
647 			      struct dp_peer *peer)
648 {
649 	uint8_t found = 0;
650 	struct dp_peer *tmppeer = NULL;
651 
652 	/* only link peer will be added to vdev peer list */
653 	if (IS_MLO_DP_MLD_PEER(peer))
654 		return;
655 
656 	qdf_spin_lock_bh(&vdev->peer_list_lock);
657 	TAILQ_FOREACH(tmppeer, &peer->vdev->peer_list, peer_list_elem) {
658 		if (tmppeer == peer) {
659 			found = 1;
660 			break;
661 		}
662 	}
663 
664 	if (found) {
665 		TAILQ_REMOVE(&peer->vdev->peer_list, peer,
666 			     peer_list_elem);
667 		dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
668 		vdev->num_peers--;
669 	} else {
670 		/*Ignoring the remove operation as peer not found*/
671 		dp_peer_debug("%pK: peer:%pK not found in vdev:%pK peerlist:%pK"
672 			      , soc, peer, vdev, &peer->vdev->peer_list);
673 	}
674 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
675 }
676 
677 void dp_txrx_peer_attach_add(struct dp_soc *soc,
678 			     struct dp_peer *peer,
679 			     struct dp_txrx_peer *txrx_peer)
680 {
681 	qdf_spin_lock_bh(&soc->peer_map_lock);
682 
683 	peer->txrx_peer = txrx_peer;
684 	txrx_peer->bss_peer = peer->bss_peer;
685 
686 	if (peer->peer_id == HTT_INVALID_PEER) {
687 		qdf_spin_unlock_bh(&soc->peer_map_lock);
688 		return;
689 	}
690 
691 	txrx_peer->peer_id = peer->peer_id;
692 
693 	QDF_ASSERT(soc->peer_id_to_obj_map[peer->peer_id]);
694 
695 	qdf_spin_unlock_bh(&soc->peer_map_lock);
696 }
697 
698 void dp_peer_find_id_to_obj_add(struct dp_soc *soc,
699 				struct dp_peer *peer,
700 				uint16_t peer_id)
701 {
702 	QDF_ASSERT(peer_id <= soc->max_peer_id);
703 
704 	qdf_spin_lock_bh(&soc->peer_map_lock);
705 
706 	peer->peer_id = peer_id;
707 
708 	if (QDF_IS_STATUS_ERROR(dp_peer_get_ref(soc, peer, DP_MOD_ID_CONFIG))) {
709 		dp_err("unable to get peer ref at MAP mac: "QDF_MAC_ADDR_FMT" peer_id %u",
710 		       QDF_MAC_ADDR_REF(peer->mac_addr.raw), peer_id);
711 		qdf_spin_unlock_bh(&soc->peer_map_lock);
712 		return;
713 	}
714 
715 	if (!soc->peer_id_to_obj_map[peer_id]) {
716 		soc->peer_id_to_obj_map[peer_id] = peer;
717 		if (peer->txrx_peer)
718 			peer->txrx_peer->peer_id = peer_id;
719 	} else {
720 		/* Peer map event came for peer_id which
721 		 * is already mapped, this is not expected
722 		 */
723 		dp_err("peer %pK(" QDF_MAC_ADDR_FMT ")map failed, id %d mapped to peer %pK",
724 		       peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw), peer_id,
725 		       soc->peer_id_to_obj_map[peer_id]);
726 		dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
727 		qdf_assert_always(0);
728 	}
729 	qdf_spin_unlock_bh(&soc->peer_map_lock);
730 }
731 
732 void dp_peer_find_id_to_obj_remove(struct dp_soc *soc,
733 				   uint16_t peer_id)
734 {
735 	struct dp_peer *peer = NULL;
736 	QDF_ASSERT(peer_id <= soc->max_peer_id);
737 
738 	qdf_spin_lock_bh(&soc->peer_map_lock);
739 	peer = soc->peer_id_to_obj_map[peer_id];
740 	if (!peer) {
741 		dp_err("unable to get peer during peer id obj map remove");
742 		qdf_spin_unlock_bh(&soc->peer_map_lock);
743 		return;
744 	}
745 	peer->peer_id = HTT_INVALID_PEER;
746 	if (peer->txrx_peer)
747 		peer->txrx_peer->peer_id = HTT_INVALID_PEER;
748 	soc->peer_id_to_obj_map[peer_id] = NULL;
749 	dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
750 	qdf_spin_unlock_bh(&soc->peer_map_lock);
751 }
752 
753 #ifdef FEATURE_MEC
754 QDF_STATUS dp_peer_mec_hash_attach(struct dp_soc *soc)
755 {
756 	int log2, hash_elems, i;
757 
758 	log2 = dp_log2_ceil(DP_PEER_MAX_MEC_IDX);
759 	hash_elems = 1 << log2;
760 
761 	soc->mec_hash.mask = hash_elems - 1;
762 	soc->mec_hash.idx_bits = log2;
763 
764 	dp_peer_info("%pK: max mec index: %d",
765 		     soc, DP_PEER_MAX_MEC_IDX);
766 
767 	/* allocate an array of TAILQ mec object lists */
768 	soc->mec_hash.bins = qdf_mem_malloc(hash_elems *
769 					    sizeof(TAILQ_HEAD(anonymous_tail_q,
770 							      dp_mec_entry)));
771 
772 	if (!soc->mec_hash.bins)
773 		return QDF_STATUS_E_NOMEM;
774 
775 	for (i = 0; i < hash_elems; i++)
776 		TAILQ_INIT(&soc->mec_hash.bins[i]);
777 
778 	return QDF_STATUS_SUCCESS;
779 }
780 
781 /**
782  * dp_peer_mec_hash_index() - Compute the MEC hash from MAC address
783  * @soc: SoC handle
784  * @mac_addr: MAC address
785  *
786  * Return: MEC hash
787  */
788 static inline uint32_t dp_peer_mec_hash_index(struct dp_soc *soc,
789 					      union dp_align_mac_addr *mac_addr)
790 {
791 	uint32_t index;
792 
793 	index =
794 		mac_addr->align2.bytes_ab ^
795 		mac_addr->align2.bytes_cd ^
796 		mac_addr->align2.bytes_ef;
797 	index ^= index >> soc->mec_hash.idx_bits;
798 	index &= soc->mec_hash.mask;
799 	return index;
800 }
801 
802 struct dp_mec_entry *dp_peer_mec_hash_find_by_pdevid(struct dp_soc *soc,
803 						     uint8_t pdev_id,
804 						     uint8_t *mec_mac_addr)
805 {
806 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
807 	uint32_t index;
808 	struct dp_mec_entry *mecentry;
809 
810 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
811 		     mec_mac_addr, QDF_MAC_ADDR_SIZE);
812 	mac_addr = &local_mac_addr_aligned;
813 
814 	index = dp_peer_mec_hash_index(soc, mac_addr);
815 	TAILQ_FOREACH(mecentry, &soc->mec_hash.bins[index], hash_list_elem) {
816 		if ((pdev_id == mecentry->pdev_id) &&
817 		    !dp_peer_find_mac_addr_cmp(mac_addr, &mecentry->mac_addr))
818 			return mecentry;
819 	}
820 
821 	return NULL;
822 }
823 
824 /**
825  * dp_peer_mec_hash_add() - Add MEC entry into hash table
826  * @soc: SoC handle
827  * @mecentry: MEC entry
828  *
829  * This function adds the MEC entry into SoC MEC hash table
830  *
831  * Return: None
832  */
833 static inline void dp_peer_mec_hash_add(struct dp_soc *soc,
834 					struct dp_mec_entry *mecentry)
835 {
836 	uint32_t index;
837 
838 	index = dp_peer_mec_hash_index(soc, &mecentry->mac_addr);
839 	qdf_spin_lock_bh(&soc->mec_lock);
840 	TAILQ_INSERT_TAIL(&soc->mec_hash.bins[index], mecentry, hash_list_elem);
841 	qdf_spin_unlock_bh(&soc->mec_lock);
842 }
843 
844 QDF_STATUS dp_peer_mec_add_entry(struct dp_soc *soc,
845 				 struct dp_vdev *vdev,
846 				 uint8_t *mac_addr)
847 {
848 	struct dp_mec_entry *mecentry = NULL;
849 	struct dp_pdev *pdev = NULL;
850 
851 	if (!vdev) {
852 		dp_peer_err("%pK: Peers vdev is NULL", soc);
853 		return QDF_STATUS_E_INVAL;
854 	}
855 
856 	pdev = vdev->pdev;
857 
858 	if (qdf_unlikely(qdf_atomic_read(&soc->mec_cnt) >=
859 					 DP_PEER_MAX_MEC_ENTRY)) {
860 		dp_peer_warn("%pK: max MEC entry limit reached mac_addr: "
861 			     QDF_MAC_ADDR_FMT, soc, QDF_MAC_ADDR_REF(mac_addr));
862 		return QDF_STATUS_E_NOMEM;
863 	}
864 
865 	qdf_spin_lock_bh(&soc->mec_lock);
866 	mecentry = dp_peer_mec_hash_find_by_pdevid(soc, pdev->pdev_id,
867 						   mac_addr);
868 	if (qdf_likely(mecentry)) {
869 		mecentry->is_active = TRUE;
870 		qdf_spin_unlock_bh(&soc->mec_lock);
871 		return QDF_STATUS_E_ALREADY;
872 	}
873 
874 	qdf_spin_unlock_bh(&soc->mec_lock);
875 
876 	dp_peer_debug("%pK: pdevid: %u vdev: %u type: MEC mac_addr: "
877 		      QDF_MAC_ADDR_FMT,
878 		      soc, pdev->pdev_id, vdev->vdev_id,
879 		      QDF_MAC_ADDR_REF(mac_addr));
880 
881 	mecentry = (struct dp_mec_entry *)
882 			qdf_mem_malloc(sizeof(struct dp_mec_entry));
883 
884 	if (qdf_unlikely(!mecentry)) {
885 		dp_peer_err("%pK: fail to allocate mecentry", soc);
886 		return QDF_STATUS_E_NOMEM;
887 	}
888 
889 	qdf_copy_macaddr((struct qdf_mac_addr *)&mecentry->mac_addr.raw[0],
890 			 (struct qdf_mac_addr *)mac_addr);
891 	mecentry->pdev_id = pdev->pdev_id;
892 	mecentry->vdev_id = vdev->vdev_id;
893 	mecentry->is_active = TRUE;
894 	dp_peer_mec_hash_add(soc, mecentry);
895 
896 	qdf_atomic_inc(&soc->mec_cnt);
897 	DP_STATS_INC(soc, mec.added, 1);
898 
899 	return QDF_STATUS_SUCCESS;
900 }
901 
902 void dp_peer_mec_detach_entry(struct dp_soc *soc, struct dp_mec_entry *mecentry,
903 			      void *ptr)
904 {
905 	uint32_t index = dp_peer_mec_hash_index(soc, &mecentry->mac_addr);
906 
907 	TAILQ_HEAD(, dp_mec_entry) * free_list = ptr;
908 
909 	TAILQ_REMOVE(&soc->mec_hash.bins[index], mecentry,
910 		     hash_list_elem);
911 	TAILQ_INSERT_TAIL(free_list, mecentry, hash_list_elem);
912 }
913 
914 void dp_peer_mec_free_list(struct dp_soc *soc, void *ptr)
915 {
916 	struct dp_mec_entry *mecentry, *mecentry_next;
917 
918 	TAILQ_HEAD(, dp_mec_entry) * free_list = ptr;
919 
920 	TAILQ_FOREACH_SAFE(mecentry, free_list, hash_list_elem,
921 			   mecentry_next) {
922 		dp_peer_debug("%pK: MEC delete for mac_addr " QDF_MAC_ADDR_FMT,
923 			      soc, QDF_MAC_ADDR_REF(&mecentry->mac_addr));
924 		qdf_mem_free(mecentry);
925 		qdf_atomic_dec(&soc->mec_cnt);
926 		DP_STATS_INC(soc, mec.deleted, 1);
927 	}
928 }
929 
930 void dp_peer_mec_hash_detach(struct dp_soc *soc)
931 {
932 	dp_peer_mec_flush_entries(soc);
933 	qdf_mem_free(soc->mec_hash.bins);
934 	soc->mec_hash.bins = NULL;
935 }
936 
937 void dp_peer_mec_spinlock_destroy(struct dp_soc *soc)
938 {
939 	qdf_spinlock_destroy(&soc->mec_lock);
940 }
941 
942 void dp_peer_mec_spinlock_create(struct dp_soc *soc)
943 {
944 	qdf_spinlock_create(&soc->mec_lock);
945 }
946 #else
947 QDF_STATUS dp_peer_mec_hash_attach(struct dp_soc *soc)
948 {
949 	return QDF_STATUS_SUCCESS;
950 }
951 
952 void dp_peer_mec_hash_detach(struct dp_soc *soc)
953 {
954 }
955 #endif
956 
957 #ifdef FEATURE_AST
958 #ifdef WLAN_FEATURE_11BE_MLO
959 /**
960  * dp_peer_exist_on_pdev() - check if peer with mac address exist on pdev
961  *
962  * @soc: Datapath SOC handle
963  * @peer_mac_addr: peer mac address
964  * @mac_addr_is_aligned: is mac address aligned
965  * @pdev: Datapath PDEV handle
966  *
967  * Return: true if peer found else return false
968  */
969 static bool dp_peer_exist_on_pdev(struct dp_soc *soc,
970 				  uint8_t *peer_mac_addr,
971 				  int mac_addr_is_aligned,
972 				  struct dp_pdev *pdev)
973 {
974 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
975 	unsigned int index;
976 	struct dp_peer *peer;
977 	bool found = false;
978 
979 	if (mac_addr_is_aligned) {
980 		mac_addr = (union dp_align_mac_addr *)peer_mac_addr;
981 	} else {
982 		qdf_mem_copy(
983 			&local_mac_addr_aligned.raw[0],
984 			peer_mac_addr, QDF_MAC_ADDR_SIZE);
985 		mac_addr = &local_mac_addr_aligned;
986 	}
987 	index = dp_peer_find_hash_index(soc, mac_addr);
988 	qdf_spin_lock_bh(&soc->peer_hash_lock);
989 	TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
990 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
991 		    (peer->vdev->pdev == pdev)) {
992 			found = true;
993 			break;
994 		}
995 	}
996 	qdf_spin_unlock_bh(&soc->peer_hash_lock);
997 
998 	if (found)
999 		return found;
1000 
1001 	peer = dp_mld_peer_find_hash_find(soc, peer_mac_addr,
1002 					  mac_addr_is_aligned, DP_VDEV_ALL,
1003 					  DP_MOD_ID_CDP);
1004 	if (peer) {
1005 		if (peer->vdev->pdev == pdev)
1006 			found = true;
1007 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
1008 	}
1009 
1010 	return found;
1011 }
1012 #else
1013 static bool dp_peer_exist_on_pdev(struct dp_soc *soc,
1014 				  uint8_t *peer_mac_addr,
1015 				  int mac_addr_is_aligned,
1016 				  struct dp_pdev *pdev)
1017 {
1018 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
1019 	unsigned int index;
1020 	struct dp_peer *peer;
1021 	bool found = false;
1022 
1023 	if (mac_addr_is_aligned) {
1024 		mac_addr = (union dp_align_mac_addr *)peer_mac_addr;
1025 	} else {
1026 		qdf_mem_copy(
1027 			&local_mac_addr_aligned.raw[0],
1028 			peer_mac_addr, QDF_MAC_ADDR_SIZE);
1029 		mac_addr = &local_mac_addr_aligned;
1030 	}
1031 	index = dp_peer_find_hash_index(soc, mac_addr);
1032 	qdf_spin_lock_bh(&soc->peer_hash_lock);
1033 	TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
1034 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
1035 		    (peer->vdev->pdev == pdev)) {
1036 			found = true;
1037 			break;
1038 		}
1039 	}
1040 	qdf_spin_unlock_bh(&soc->peer_hash_lock);
1041 	return found;
1042 }
1043 #endif /* WLAN_FEATURE_11BE_MLO */
1044 
1045 QDF_STATUS dp_peer_ast_hash_attach(struct dp_soc *soc)
1046 {
1047 	int i, hash_elems, log2;
1048 	unsigned int max_ast_idx = wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx);
1049 
1050 	hash_elems = ((max_ast_idx * DP_AST_HASH_LOAD_MULT) >>
1051 		DP_AST_HASH_LOAD_SHIFT);
1052 
1053 	log2 = dp_log2_ceil(hash_elems);
1054 	hash_elems = 1 << log2;
1055 
1056 	soc->ast_hash.mask = hash_elems - 1;
1057 	soc->ast_hash.idx_bits = log2;
1058 
1059 	dp_peer_info("%pK: ast hash_elems: %d, max_ast_idx: %d",
1060 		     soc, hash_elems, max_ast_idx);
1061 
1062 	/* allocate an array of TAILQ peer object lists */
1063 	soc->ast_hash.bins = qdf_mem_malloc(
1064 		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q,
1065 				dp_ast_entry)));
1066 
1067 	if (!soc->ast_hash.bins)
1068 		return QDF_STATUS_E_NOMEM;
1069 
1070 	for (i = 0; i < hash_elems; i++)
1071 		TAILQ_INIT(&soc->ast_hash.bins[i]);
1072 
1073 	return QDF_STATUS_SUCCESS;
1074 }
1075 
1076 /**
1077  * dp_peer_ast_cleanup() - cleanup the references
1078  * @soc: SoC handle
1079  * @ast: ast entry
1080  *
1081  * Return: None
1082  */
1083 static inline void dp_peer_ast_cleanup(struct dp_soc *soc,
1084 				       struct dp_ast_entry *ast)
1085 {
1086 	txrx_ast_free_cb cb = ast->callback;
1087 	void *cookie = ast->cookie;
1088 
1089 	dp_peer_debug("mac_addr: " QDF_MAC_ADDR_FMT ", cb: %pK, cookie: %pK",
1090 		      QDF_MAC_ADDR_REF(ast->mac_addr.raw), cb, cookie);
1091 
1092 	/* Call the callbacks to free up the cookie */
1093 	if (cb) {
1094 		ast->callback = NULL;
1095 		ast->cookie = NULL;
1096 		cb(soc->ctrl_psoc,
1097 		   dp_soc_to_cdp_soc(soc),
1098 		   cookie,
1099 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
1100 	}
1101 }
1102 
1103 void dp_peer_ast_hash_detach(struct dp_soc *soc)
1104 {
1105 	unsigned int index;
1106 	struct dp_ast_entry *ast, *ast_next;
1107 
1108 	if (!soc->ast_hash.mask)
1109 		return;
1110 
1111 	if (!soc->ast_hash.bins)
1112 		return;
1113 
1114 	dp_peer_debug("%pK: num_ast_entries: %u", soc, soc->num_ast_entries);
1115 
1116 	qdf_spin_lock_bh(&soc->ast_lock);
1117 	for (index = 0; index <= soc->ast_hash.mask; index++) {
1118 		if (!TAILQ_EMPTY(&soc->ast_hash.bins[index])) {
1119 			TAILQ_FOREACH_SAFE(ast, &soc->ast_hash.bins[index],
1120 					   hash_list_elem, ast_next) {
1121 				TAILQ_REMOVE(&soc->ast_hash.bins[index], ast,
1122 					     hash_list_elem);
1123 				dp_peer_ast_cleanup(soc, ast);
1124 				soc->num_ast_entries--;
1125 				qdf_mem_free(ast);
1126 			}
1127 		}
1128 	}
1129 	qdf_spin_unlock_bh(&soc->ast_lock);
1130 
1131 	qdf_mem_free(soc->ast_hash.bins);
1132 	soc->ast_hash.bins = NULL;
1133 }
1134 
1135 /**
1136  * dp_peer_ast_hash_index() - Compute the AST hash from MAC address
1137  * @soc: SoC handle
1138  * @mac_addr: MAC address
1139  *
1140  * Return: AST hash
1141  */
1142 static inline uint32_t dp_peer_ast_hash_index(struct dp_soc *soc,
1143 					      union dp_align_mac_addr *mac_addr)
1144 {
1145 	uint32_t index;
1146 
1147 	index =
1148 		mac_addr->align2.bytes_ab ^
1149 		mac_addr->align2.bytes_cd ^
1150 		mac_addr->align2.bytes_ef;
1151 	index ^= index >> soc->ast_hash.idx_bits;
1152 	index &= soc->ast_hash.mask;
1153 	return index;
1154 }
1155 
1156 /**
1157  * dp_peer_ast_hash_add() - Add AST entry into hash table
1158  * @soc: SoC handle
1159  * @ase: AST entry
1160  *
1161  * This function adds the AST entry into SoC AST hash table
1162  * It assumes caller has taken the ast lock to protect the access to this table
1163  *
1164  * Return: None
1165  */
1166 static inline void dp_peer_ast_hash_add(struct dp_soc *soc,
1167 					struct dp_ast_entry *ase)
1168 {
1169 	uint32_t index;
1170 
1171 	index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
1172 	TAILQ_INSERT_TAIL(&soc->ast_hash.bins[index], ase, hash_list_elem);
1173 }
1174 
1175 void dp_peer_ast_hash_remove(struct dp_soc *soc,
1176 			     struct dp_ast_entry *ase)
1177 {
1178 	unsigned index;
1179 	struct dp_ast_entry *tmpase;
1180 	int found = 0;
1181 
1182 	if (soc->ast_offload_support && !soc->host_ast_db_enable)
1183 		return;
1184 
1185 	index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
1186 	/* Check if tail is not empty before delete*/
1187 	QDF_ASSERT(!TAILQ_EMPTY(&soc->ast_hash.bins[index]));
1188 
1189 	dp_peer_debug("ID: %u idx: %u mac_addr: " QDF_MAC_ADDR_FMT,
1190 		      ase->peer_id, index, QDF_MAC_ADDR_REF(ase->mac_addr.raw));
1191 
1192 	TAILQ_FOREACH(tmpase, &soc->ast_hash.bins[index], hash_list_elem) {
1193 		if (tmpase == ase) {
1194 			found = 1;
1195 			break;
1196 		}
1197 	}
1198 
1199 	QDF_ASSERT(found);
1200 
1201 	if (found)
1202 		TAILQ_REMOVE(&soc->ast_hash.bins[index], ase, hash_list_elem);
1203 }
1204 
1205 struct dp_ast_entry *dp_peer_ast_hash_find_by_vdevid(struct dp_soc *soc,
1206 						     uint8_t *ast_mac_addr,
1207 						     uint8_t vdev_id)
1208 {
1209 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
1210 	uint32_t index;
1211 	struct dp_ast_entry *ase;
1212 
1213 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
1214 		     ast_mac_addr, QDF_MAC_ADDR_SIZE);
1215 	mac_addr = &local_mac_addr_aligned;
1216 
1217 	index = dp_peer_ast_hash_index(soc, mac_addr);
1218 	TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
1219 		if ((vdev_id == ase->vdev_id) &&
1220 		    !dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr)) {
1221 			return ase;
1222 		}
1223 	}
1224 
1225 	return NULL;
1226 }
1227 
1228 struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
1229 						     uint8_t *ast_mac_addr,
1230 						     uint8_t pdev_id)
1231 {
1232 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
1233 	uint32_t index;
1234 	struct dp_ast_entry *ase;
1235 
1236 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
1237 		     ast_mac_addr, QDF_MAC_ADDR_SIZE);
1238 	mac_addr = &local_mac_addr_aligned;
1239 
1240 	index = dp_peer_ast_hash_index(soc, mac_addr);
1241 	TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
1242 		if ((pdev_id == ase->pdev_id) &&
1243 		    !dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr)) {
1244 			return ase;
1245 		}
1246 	}
1247 
1248 	return NULL;
1249 }
1250 
1251 struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
1252 					       uint8_t *ast_mac_addr)
1253 {
1254 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
1255 	unsigned index;
1256 	struct dp_ast_entry *ase;
1257 
1258 	if (!soc->ast_hash.bins)
1259 		return NULL;
1260 
1261 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
1262 			ast_mac_addr, QDF_MAC_ADDR_SIZE);
1263 	mac_addr = &local_mac_addr_aligned;
1264 
1265 	index = dp_peer_ast_hash_index(soc, mac_addr);
1266 	TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
1267 		if (dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr) == 0) {
1268 			return ase;
1269 		}
1270 	}
1271 
1272 	return NULL;
1273 }
1274 
1275 /**
1276  * dp_peer_map_ipa_evt() - Send peer map event to IPA
1277  * @soc: SoC handle
1278  * @peer: peer to which ast node belongs
1279  * @ast_entry: AST entry
1280  * @mac_addr: MAC address of ast node
1281  *
1282  * Return: None
1283  */
1284 #if defined(IPA_OFFLOAD) && defined(QCA_IPA_LL_TX_FLOW_CONTROL)
1285 static inline
1286 void dp_peer_map_ipa_evt(struct dp_soc *soc, struct dp_peer *peer,
1287 			 struct dp_ast_entry *ast_entry, uint8_t *mac_addr)
1288 {
1289 	if (ast_entry || (peer->vdev && peer->vdev->proxysta_vdev)) {
1290 		if (soc->cdp_soc.ol_ops->peer_map_event) {
1291 			soc->cdp_soc.ol_ops->peer_map_event(
1292 			soc->ctrl_psoc, ast_entry->peer_id,
1293 			ast_entry->ast_idx, ast_entry->vdev_id,
1294 			mac_addr, ast_entry->type, ast_entry->ast_hash_value);
1295 		}
1296 	} else {
1297 		dp_peer_info("%pK: AST entry not found", soc);
1298 	}
1299 }
1300 
1301 /**
1302  * dp_peer_unmap_ipa_evt() - Send peer unmap event to IPA
1303  * @soc: SoC handle
1304  * @peer_id: Peerid
1305  * @vdev_id: Vdev id
1306  * @mac_addr: Peer mac address
1307  *
1308  * Return: None
1309  */
1310 static inline
1311 void dp_peer_unmap_ipa_evt(struct dp_soc *soc, uint16_t peer_id,
1312 			   uint8_t vdev_id, uint8_t *mac_addr)
1313 {
1314 	if (soc->cdp_soc.ol_ops->peer_unmap_event) {
1315 		soc->cdp_soc.ol_ops->peer_unmap_event(soc->ctrl_psoc,
1316 						      peer_id, vdev_id,
1317 						      mac_addr);
1318 	}
1319 }
1320 #else
1321 static inline
1322 void dp_peer_unmap_ipa_evt(struct dp_soc *soc, uint16_t peer_id,
1323 			   uint8_t vdev_id, uint8_t *mac_addr)
1324 {
1325 }
1326 
1327 static inline
1328 void dp_peer_map_ipa_evt(struct dp_soc *soc, struct dp_peer *peer,
1329 			 struct dp_ast_entry *ast_entry, uint8_t *mac_addr)
1330 {
1331 }
1332 #endif
1333 
1334 QDF_STATUS dp_peer_host_add_map_ast(struct dp_soc *soc, uint16_t peer_id,
1335 				    uint8_t *mac_addr, uint16_t hw_peer_id,
1336 				    uint8_t vdev_id, uint16_t ast_hash,
1337 				    uint8_t is_wds)
1338 {
1339 	struct dp_vdev *vdev;
1340 	struct dp_ast_entry *ast_entry;
1341 	enum cdp_txrx_ast_entry_type type;
1342 	struct dp_peer *peer;
1343 	struct dp_peer *old_peer;
1344 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1345 
1346 	if (is_wds)
1347 		type = CDP_TXRX_AST_TYPE_WDS;
1348 	else
1349 		type = CDP_TXRX_AST_TYPE_STATIC;
1350 
1351 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_HTT);
1352 	if (!peer) {
1353 		dp_peer_info("Peer not found soc:%pK: peer_id %d, peer_mac " QDF_MAC_ADDR_FMT ", vdev_id %d",
1354 			     soc, peer_id,
1355 			     QDF_MAC_ADDR_REF(mac_addr), vdev_id);
1356 		return QDF_STATUS_E_INVAL;
1357 	}
1358 
1359 	if (!is_wds && IS_MLO_DP_MLD_PEER(peer))
1360 		type = CDP_TXRX_AST_TYPE_MLD;
1361 
1362 	vdev = peer->vdev;
1363 	if (!vdev) {
1364 		dp_peer_err("%pK: Peers vdev is NULL", soc);
1365 		status = QDF_STATUS_E_INVAL;
1366 		goto fail;
1367 	}
1368 
1369 	if (!dp_peer_state_cmp(peer, DP_PEER_STATE_ACTIVE)) {
1370 		if (type != CDP_TXRX_AST_TYPE_STATIC &&
1371 		    type != CDP_TXRX_AST_TYPE_MLD &&
1372 		    type != CDP_TXRX_AST_TYPE_SELF) {
1373 			status = QDF_STATUS_E_BUSY;
1374 			goto fail;
1375 		}
1376 	}
1377 
1378 	dp_peer_debug("%pK: vdev: %u  ast_entry->type: %d peer_mac: " QDF_MAC_ADDR_FMT " peer: %pK mac " QDF_MAC_ADDR_FMT,
1379 		      soc, vdev->vdev_id, type,
1380 		      QDF_MAC_ADDR_REF(peer->mac_addr.raw), peer,
1381 		      QDF_MAC_ADDR_REF(mac_addr));
1382 
1383 	/*
1384 	 * In MLO scenario, there is possibility for same mac address
1385 	 * on both link mac address and MLD mac address.
1386 	 * Duplicate AST map needs to be handled for non-mld type.
1387 	 */
1388 	qdf_spin_lock_bh(&soc->ast_lock);
1389 	ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
1390 	if (ast_entry && type != CDP_TXRX_AST_TYPE_MLD) {
1391 		dp_peer_debug("AST present ID %d vid %d mac " QDF_MAC_ADDR_FMT,
1392 			      hw_peer_id, vdev_id,
1393 			      QDF_MAC_ADDR_REF(mac_addr));
1394 
1395 		old_peer = __dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
1396 						   DP_MOD_ID_AST);
1397 		if (!old_peer) {
1398 			dp_peer_info("Peer not found soc:%pK: peer_id %d, peer_mac " QDF_MAC_ADDR_FMT ", vdev_id %d",
1399 				     soc, ast_entry->peer_id,
1400 				     QDF_MAC_ADDR_REF(mac_addr), vdev_id);
1401 			qdf_spin_unlock_bh(&soc->ast_lock);
1402 			status = QDF_STATUS_E_INVAL;
1403 			goto fail;
1404 		}
1405 
1406 		dp_peer_unlink_ast_entry(soc, ast_entry, old_peer);
1407 		dp_peer_free_ast_entry(soc, ast_entry);
1408 		if (old_peer)
1409 			dp_peer_unref_delete(old_peer, DP_MOD_ID_AST);
1410 	}
1411 
1412 	ast_entry = (struct dp_ast_entry *)
1413 		qdf_mem_malloc(sizeof(struct dp_ast_entry));
1414 	if (!ast_entry) {
1415 		dp_peer_err("%pK: fail to allocate ast_entry", soc);
1416 		qdf_spin_unlock_bh(&soc->ast_lock);
1417 		QDF_ASSERT(0);
1418 		status = QDF_STATUS_E_NOMEM;
1419 		goto fail;
1420 	}
1421 
1422 	qdf_mem_copy(&ast_entry->mac_addr.raw[0], mac_addr, QDF_MAC_ADDR_SIZE);
1423 	ast_entry->pdev_id = vdev->pdev->pdev_id;
1424 	ast_entry->is_mapped = false;
1425 	ast_entry->delete_in_progress = false;
1426 	ast_entry->next_hop = 0;
1427 	ast_entry->vdev_id = vdev->vdev_id;
1428 	ast_entry->type = type;
1429 
1430 	switch (type) {
1431 	case CDP_TXRX_AST_TYPE_STATIC:
1432 		if (peer->vdev->opmode == wlan_op_mode_sta)
1433 			ast_entry->type = CDP_TXRX_AST_TYPE_STA_BSS;
1434 		break;
1435 	case CDP_TXRX_AST_TYPE_WDS:
1436 		ast_entry->next_hop = 1;
1437 		break;
1438 	case CDP_TXRX_AST_TYPE_MLD:
1439 		break;
1440 	default:
1441 		dp_peer_alert("%pK: Incorrect AST entry type", soc);
1442 	}
1443 
1444 	ast_entry->is_active = TRUE;
1445 	DP_STATS_INC(soc, ast.added, 1);
1446 	soc->num_ast_entries++;
1447 	dp_peer_ast_hash_add(soc, ast_entry);
1448 
1449 	ast_entry->ast_idx = hw_peer_id;
1450 	ast_entry->ast_hash_value = ast_hash;
1451 	ast_entry->peer_id = peer_id;
1452 	TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry,
1453 			  ase_list_elem);
1454 
1455 	dp_peer_map_ipa_evt(soc, peer, ast_entry, mac_addr);
1456 
1457 	qdf_spin_unlock_bh(&soc->ast_lock);
1458 fail:
1459 	dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
1460 
1461 	return status;
1462 }
1463 
1464 /**
1465  * dp_peer_map_ast() - Map the ast entry with HW AST Index
1466  * @soc: SoC handle
1467  * @peer: peer to which ast node belongs
1468  * @mac_addr: MAC address of ast node
1469  * @hw_peer_id: HW AST Index returned by target in peer map event
1470  * @vdev_id: vdev id for VAP to which the peer belongs to
1471  * @ast_hash: ast hash value in HW
1472  * @is_wds: flag to indicate peer map event for WDS ast entry
1473  *
1474  * Return: QDF_STATUS code
1475  */
1476 static inline QDF_STATUS dp_peer_map_ast(struct dp_soc *soc,
1477 					 struct dp_peer *peer,
1478 					 uint8_t *mac_addr,
1479 					 uint16_t hw_peer_id,
1480 					 uint8_t vdev_id,
1481 					 uint16_t ast_hash,
1482 					 uint8_t is_wds)
1483 {
1484 	struct dp_ast_entry *ast_entry = NULL;
1485 	enum cdp_txrx_ast_entry_type peer_type = CDP_TXRX_AST_TYPE_STATIC;
1486 	void *cookie = NULL;
1487 	txrx_ast_free_cb cb = NULL;
1488 	QDF_STATUS err = QDF_STATUS_SUCCESS;
1489 
1490 	if (soc->ast_offload_support)
1491 		return QDF_STATUS_SUCCESS;
1492 
1493 	if (!peer) {
1494 		return QDF_STATUS_E_INVAL;
1495 	}
1496 
1497 	dp_peer_err("%pK: peer %pK ID %d vid %d mac " QDF_MAC_ADDR_FMT,
1498 		    soc, peer, hw_peer_id, vdev_id,
1499 		    QDF_MAC_ADDR_REF(mac_addr));
1500 
1501 	qdf_spin_lock_bh(&soc->ast_lock);
1502 
1503 	ast_entry = dp_peer_ast_hash_find_by_vdevid(soc, mac_addr, vdev_id);
1504 
1505 	if (is_wds) {
1506 		/*
1507 		 * In certain cases like Auth attack on a repeater
1508 		 * can result in the number of ast_entries falling
1509 		 * in the same hash bucket to exceed the max_skid
1510 		 * length supported by HW in root AP. In these cases
1511 		 * the FW will return the hw_peer_id (ast_index) as
1512 		 * 0xffff indicating HW could not add the entry in
1513 		 * its table. Host has to delete the entry from its
1514 		 * table in these cases.
1515 		 */
1516 		if (hw_peer_id == HTT_INVALID_PEER) {
1517 			DP_STATS_INC(soc, ast.map_err, 1);
1518 			if (ast_entry) {
1519 				if (ast_entry->is_mapped) {
1520 					soc->ast_table[ast_entry->ast_idx] =
1521 						NULL;
1522 				}
1523 
1524 				cb = ast_entry->callback;
1525 				cookie = ast_entry->cookie;
1526 				peer_type = ast_entry->type;
1527 
1528 				dp_peer_unlink_ast_entry(soc, ast_entry, peer);
1529 				dp_peer_free_ast_entry(soc, ast_entry);
1530 
1531 				qdf_spin_unlock_bh(&soc->ast_lock);
1532 
1533 				if (cb) {
1534 					cb(soc->ctrl_psoc,
1535 					   dp_soc_to_cdp_soc(soc),
1536 					   cookie,
1537 					   CDP_TXRX_AST_DELETED);
1538 				}
1539 			} else {
1540 				qdf_spin_unlock_bh(&soc->ast_lock);
1541 				dp_peer_alert("AST entry not found with peer %pK peer_id %u peer_mac " QDF_MAC_ADDR_FMT " mac_addr " QDF_MAC_ADDR_FMT " vdev_id %u next_hop %u",
1542 					      peer, peer->peer_id,
1543 					      QDF_MAC_ADDR_REF(peer->mac_addr.raw),
1544 					      QDF_MAC_ADDR_REF(mac_addr),
1545 					      vdev_id, is_wds);
1546 			}
1547 			err = QDF_STATUS_E_INVAL;
1548 
1549 			dp_hmwds_ast_add_notify(peer, mac_addr,
1550 						peer_type, err, true);
1551 
1552 			return err;
1553 		}
1554 	}
1555 
1556 	if (ast_entry) {
1557 		ast_entry->ast_idx = hw_peer_id;
1558 		soc->ast_table[hw_peer_id] = ast_entry;
1559 		ast_entry->is_active = TRUE;
1560 		peer_type = ast_entry->type;
1561 		ast_entry->ast_hash_value = ast_hash;
1562 		ast_entry->is_mapped = TRUE;
1563 		qdf_assert_always(ast_entry->peer_id == HTT_INVALID_PEER);
1564 
1565 		ast_entry->peer_id = peer->peer_id;
1566 		TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry,
1567 				  ase_list_elem);
1568 	}
1569 
1570 	if (ast_entry || (peer->vdev && peer->vdev->proxysta_vdev)) {
1571 		if (soc->cdp_soc.ol_ops->peer_map_event) {
1572 			soc->cdp_soc.ol_ops->peer_map_event(
1573 			soc->ctrl_psoc, peer->peer_id,
1574 			hw_peer_id, vdev_id,
1575 			mac_addr, peer_type, ast_hash);
1576 		}
1577 	} else {
1578 		dp_peer_err("%pK: AST entry not found", soc);
1579 		err = QDF_STATUS_E_NOENT;
1580 	}
1581 
1582 	qdf_spin_unlock_bh(&soc->ast_lock);
1583 
1584 	dp_hmwds_ast_add_notify(peer, mac_addr,
1585 				peer_type, err, true);
1586 
1587 	return err;
1588 }
1589 
1590 void dp_peer_free_hmwds_cb(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
1591 			   struct cdp_soc *dp_soc,
1592 			   void *cookie,
1593 			   enum cdp_ast_free_status status)
1594 {
1595 	struct dp_ast_free_cb_params *param =
1596 		(struct dp_ast_free_cb_params *)cookie;
1597 	struct dp_soc *soc = (struct dp_soc *)dp_soc;
1598 	struct dp_peer *peer = NULL;
1599 	QDF_STATUS err = QDF_STATUS_SUCCESS;
1600 
1601 	if (status != CDP_TXRX_AST_DELETED) {
1602 		qdf_mem_free(cookie);
1603 		return;
1604 	}
1605 
1606 	peer = dp_peer_find_hash_find(soc, &param->peer_mac_addr.raw[0],
1607 				      0, param->vdev_id, DP_MOD_ID_AST);
1608 	if (peer) {
1609 		err = dp_peer_add_ast(soc, peer,
1610 				      &param->mac_addr.raw[0],
1611 				      param->type,
1612 				      param->flags);
1613 
1614 		dp_hmwds_ast_add_notify(peer, &param->mac_addr.raw[0],
1615 					param->type, err, false);
1616 
1617 		dp_peer_unref_delete(peer, DP_MOD_ID_AST);
1618 	}
1619 	qdf_mem_free(cookie);
1620 }
1621 
1622 QDF_STATUS dp_peer_add_ast(struct dp_soc *soc,
1623 			   struct dp_peer *peer,
1624 			   uint8_t *mac_addr,
1625 			   enum cdp_txrx_ast_entry_type type,
1626 			   uint32_t flags)
1627 {
1628 	struct dp_ast_entry *ast_entry = NULL;
1629 	struct dp_vdev *vdev = NULL;
1630 	struct dp_pdev *pdev = NULL;
1631 	txrx_ast_free_cb cb = NULL;
1632 	void *cookie = NULL;
1633 	struct dp_peer *vap_bss_peer = NULL;
1634 	bool is_peer_found = false;
1635 	int status = 0;
1636 
1637 	if (soc->ast_offload_support)
1638 		return QDF_STATUS_E_INVAL;
1639 
1640 	vdev = peer->vdev;
1641 	if (!vdev) {
1642 		dp_peer_err("%pK: Peers vdev is NULL", soc);
1643 		QDF_ASSERT(0);
1644 		return QDF_STATUS_E_INVAL;
1645 	}
1646 
1647 	pdev = vdev->pdev;
1648 
1649 	is_peer_found = dp_peer_exist_on_pdev(soc, mac_addr, 0, pdev);
1650 
1651 	qdf_spin_lock_bh(&soc->ast_lock);
1652 
1653 	if (!dp_peer_state_cmp(peer, DP_PEER_STATE_ACTIVE)) {
1654 		if ((type != CDP_TXRX_AST_TYPE_STATIC) &&
1655 		    (type != CDP_TXRX_AST_TYPE_SELF)) {
1656 			qdf_spin_unlock_bh(&soc->ast_lock);
1657 			return QDF_STATUS_E_BUSY;
1658 		}
1659 	}
1660 
1661 	dp_peer_debug("%pK: pdevid: %u vdev: %u  ast_entry->type: %d flags: 0x%x peer_mac: " QDF_MAC_ADDR_FMT " peer: %pK mac " QDF_MAC_ADDR_FMT,
1662 		      soc, pdev->pdev_id, vdev->vdev_id, type, flags,
1663 		      QDF_MAC_ADDR_REF(peer->mac_addr.raw), peer,
1664 		      QDF_MAC_ADDR_REF(mac_addr));
1665 
1666 	/* fw supports only 2 times the max_peers ast entries */
1667 	if (soc->num_ast_entries >=
1668 	    wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx)) {
1669 		qdf_spin_unlock_bh(&soc->ast_lock);
1670 		dp_peer_err("%pK: Max ast entries reached", soc);
1671 		return QDF_STATUS_E_RESOURCES;
1672 	}
1673 
1674 	/* If AST entry already exists , just return from here
1675 	 * ast entry with same mac address can exist on different radios
1676 	 * if ast_override support is enabled use search by pdev in this
1677 	 * case
1678 	 */
1679 	if (soc->ast_override_support) {
1680 		ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, mac_addr,
1681 							    pdev->pdev_id);
1682 		if (ast_entry) {
1683 			qdf_spin_unlock_bh(&soc->ast_lock);
1684 			return QDF_STATUS_E_ALREADY;
1685 		}
1686 
1687 		if (is_peer_found) {
1688 			/* During WDS to static roaming, peer is added
1689 			 * to the list before static AST entry create.
1690 			 * So, allow AST entry for STATIC type
1691 			 * even if peer is present
1692 			 */
1693 			if (type != CDP_TXRX_AST_TYPE_STATIC) {
1694 				qdf_spin_unlock_bh(&soc->ast_lock);
1695 				return QDF_STATUS_E_ALREADY;
1696 			}
1697 		}
1698 	} else {
1699 		/* For HWMWDS_SEC entries can be added for same mac address
1700 		 * do not check for existing entry
1701 		 */
1702 		if (type == CDP_TXRX_AST_TYPE_WDS_HM_SEC)
1703 			goto add_ast_entry;
1704 
1705 		ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
1706 
1707 		if (ast_entry) {
1708 			if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) &&
1709 			    !ast_entry->delete_in_progress) {
1710 				qdf_spin_unlock_bh(&soc->ast_lock);
1711 				return QDF_STATUS_E_ALREADY;
1712 			}
1713 
1714 			/* Add for HMWDS entry we cannot be ignored if there
1715 			 * is AST entry with same mac address
1716 			 *
1717 			 * if ast entry exists with the requested mac address
1718 			 * send a delete command and register callback which
1719 			 * can take care of adding HMWDS ast entry on delete
1720 			 * confirmation from target
1721 			 */
1722 			if (type == CDP_TXRX_AST_TYPE_WDS_HM) {
1723 				struct dp_ast_free_cb_params *param = NULL;
1724 
1725 				if (ast_entry->type ==
1726 					CDP_TXRX_AST_TYPE_WDS_HM_SEC)
1727 					goto add_ast_entry;
1728 
1729 				/* save existing callback */
1730 				if (ast_entry->callback) {
1731 					cb = ast_entry->callback;
1732 					cookie = ast_entry->cookie;
1733 				}
1734 
1735 				param = qdf_mem_malloc(sizeof(*param));
1736 				if (!param) {
1737 					QDF_TRACE(QDF_MODULE_ID_TXRX,
1738 						  QDF_TRACE_LEVEL_ERROR,
1739 						  "Allocation failed");
1740 					qdf_spin_unlock_bh(&soc->ast_lock);
1741 					return QDF_STATUS_E_NOMEM;
1742 				}
1743 
1744 				qdf_mem_copy(&param->mac_addr.raw[0], mac_addr,
1745 					     QDF_MAC_ADDR_SIZE);
1746 				qdf_mem_copy(&param->peer_mac_addr.raw[0],
1747 					     &peer->mac_addr.raw[0],
1748 					     QDF_MAC_ADDR_SIZE);
1749 				param->type = type;
1750 				param->flags = flags;
1751 				param->vdev_id = vdev->vdev_id;
1752 				ast_entry->callback = dp_peer_free_hmwds_cb;
1753 				ast_entry->pdev_id = vdev->pdev->pdev_id;
1754 				ast_entry->type = type;
1755 				ast_entry->cookie = (void *)param;
1756 				if (!ast_entry->delete_in_progress)
1757 					dp_peer_del_ast(soc, ast_entry);
1758 
1759 				qdf_spin_unlock_bh(&soc->ast_lock);
1760 
1761 				/* Call the saved callback*/
1762 				if (cb) {
1763 					cb(soc->ctrl_psoc,
1764 					   dp_soc_to_cdp_soc(soc),
1765 					   cookie,
1766 					   CDP_TXRX_AST_DELETE_IN_PROGRESS);
1767 				}
1768 				return QDF_STATUS_E_AGAIN;
1769 			}
1770 
1771 			qdf_spin_unlock_bh(&soc->ast_lock);
1772 			return QDF_STATUS_E_ALREADY;
1773 		}
1774 	}
1775 
1776 add_ast_entry:
1777 	ast_entry = (struct dp_ast_entry *)
1778 			qdf_mem_malloc(sizeof(struct dp_ast_entry));
1779 
1780 	if (!ast_entry) {
1781 		qdf_spin_unlock_bh(&soc->ast_lock);
1782 		dp_peer_err("%pK: fail to allocate ast_entry", soc);
1783 		QDF_ASSERT(0);
1784 		return QDF_STATUS_E_NOMEM;
1785 	}
1786 
1787 	qdf_mem_copy(&ast_entry->mac_addr.raw[0], mac_addr, QDF_MAC_ADDR_SIZE);
1788 	ast_entry->pdev_id = vdev->pdev->pdev_id;
1789 	ast_entry->is_mapped = false;
1790 	ast_entry->delete_in_progress = false;
1791 	ast_entry->peer_id = HTT_INVALID_PEER;
1792 	ast_entry->next_hop = 0;
1793 	ast_entry->vdev_id = vdev->vdev_id;
1794 
1795 	switch (type) {
1796 	case CDP_TXRX_AST_TYPE_STATIC:
1797 		peer->self_ast_entry = ast_entry;
1798 		ast_entry->type = CDP_TXRX_AST_TYPE_STATIC;
1799 		if (peer->vdev->opmode == wlan_op_mode_sta)
1800 			ast_entry->type = CDP_TXRX_AST_TYPE_STA_BSS;
1801 		break;
1802 	case CDP_TXRX_AST_TYPE_SELF:
1803 		peer->self_ast_entry = ast_entry;
1804 		ast_entry->type = CDP_TXRX_AST_TYPE_SELF;
1805 		break;
1806 	case CDP_TXRX_AST_TYPE_WDS:
1807 		ast_entry->next_hop = 1;
1808 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
1809 		break;
1810 	case CDP_TXRX_AST_TYPE_WDS_HM:
1811 		ast_entry->next_hop = 1;
1812 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM;
1813 		break;
1814 	case CDP_TXRX_AST_TYPE_WDS_HM_SEC:
1815 		ast_entry->next_hop = 1;
1816 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM_SEC;
1817 		ast_entry->peer_id = peer->peer_id;
1818 		TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry,
1819 				  ase_list_elem);
1820 		break;
1821 	case CDP_TXRX_AST_TYPE_DA:
1822 		vap_bss_peer = dp_vdev_bss_peer_ref_n_get(soc, vdev,
1823 							  DP_MOD_ID_AST);
1824 		if (!vap_bss_peer) {
1825 			qdf_spin_unlock_bh(&soc->ast_lock);
1826 			qdf_mem_free(ast_entry);
1827 			return QDF_STATUS_E_FAILURE;
1828 		}
1829 		peer = vap_bss_peer;
1830 		ast_entry->next_hop = 1;
1831 		ast_entry->type = CDP_TXRX_AST_TYPE_DA;
1832 		break;
1833 	default:
1834 		dp_peer_err("%pK: Incorrect AST entry type", soc);
1835 	}
1836 
1837 	ast_entry->is_active = TRUE;
1838 	DP_STATS_INC(soc, ast.added, 1);
1839 	soc->num_ast_entries++;
1840 	dp_peer_ast_hash_add(soc, ast_entry);
1841 
1842 	if ((ast_entry->type != CDP_TXRX_AST_TYPE_STATIC) &&
1843 	    (ast_entry->type != CDP_TXRX_AST_TYPE_SELF) &&
1844 	    (ast_entry->type != CDP_TXRX_AST_TYPE_STA_BSS) &&
1845 	    (ast_entry->type != CDP_TXRX_AST_TYPE_WDS_HM_SEC))
1846 		status = dp_add_wds_entry_wrapper(soc,
1847 						  peer,
1848 						  mac_addr,
1849 						  flags,
1850 						  ast_entry->type);
1851 
1852 	if (vap_bss_peer)
1853 		dp_peer_unref_delete(vap_bss_peer, DP_MOD_ID_AST);
1854 
1855 	qdf_spin_unlock_bh(&soc->ast_lock);
1856 	return qdf_status_from_os_return(status);
1857 }
1858 
1859 qdf_export_symbol(dp_peer_add_ast);
1860 
1861 void dp_peer_free_ast_entry(struct dp_soc *soc,
1862 			    struct dp_ast_entry *ast_entry)
1863 {
1864 	/*
1865 	 * NOTE: Ensure that call to this API is done
1866 	 * after soc->ast_lock is taken
1867 	 */
1868 	dp_peer_debug("type: %d ID: %u vid: %u mac_addr: " QDF_MAC_ADDR_FMT,
1869 		      ast_entry->type, ast_entry->peer_id, ast_entry->vdev_id,
1870 		      QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw));
1871 
1872 	ast_entry->callback = NULL;
1873 	ast_entry->cookie = NULL;
1874 
1875 	DP_STATS_INC(soc, ast.deleted, 1);
1876 	dp_peer_ast_hash_remove(soc, ast_entry);
1877 	dp_peer_ast_cleanup(soc, ast_entry);
1878 	qdf_mem_free(ast_entry);
1879 	soc->num_ast_entries--;
1880 }
1881 
1882 void dp_peer_unlink_ast_entry(struct dp_soc *soc,
1883 			      struct dp_ast_entry *ast_entry,
1884 			      struct dp_peer *peer)
1885 {
1886 	if (!peer) {
1887 		dp_info_rl("NULL peer");
1888 		return;
1889 	}
1890 
1891 	if (ast_entry->peer_id == HTT_INVALID_PEER) {
1892 		dp_info_rl("Invalid peer id in AST entry mac addr:"QDF_MAC_ADDR_FMT" type:%d",
1893 			  QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw),
1894 			  ast_entry->type);
1895 		return;
1896 	}
1897 	/*
1898 	 * NOTE: Ensure that call to this API is done
1899 	 * after soc->ast_lock is taken
1900 	 */
1901 
1902 	qdf_assert_always(ast_entry->peer_id == peer->peer_id);
1903 	TAILQ_REMOVE(&peer->ast_entry_list, ast_entry, ase_list_elem);
1904 
1905 	if (ast_entry == peer->self_ast_entry)
1906 		peer->self_ast_entry = NULL;
1907 
1908 	/*
1909 	 * release the reference only if it is mapped
1910 	 * to ast_table
1911 	 */
1912 	if (ast_entry->is_mapped)
1913 		soc->ast_table[ast_entry->ast_idx] = NULL;
1914 
1915 	ast_entry->peer_id = HTT_INVALID_PEER;
1916 }
1917 
1918 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
1919 {
1920 	struct dp_peer *peer = NULL;
1921 
1922 	if (soc->ast_offload_support)
1923 		return;
1924 
1925 	if (!ast_entry) {
1926 		dp_info_rl("NULL AST entry");
1927 		return;
1928 	}
1929 
1930 	if (ast_entry->delete_in_progress) {
1931 		dp_info_rl("AST entry deletion in progress mac addr:"QDF_MAC_ADDR_FMT" type:%d",
1932 			  QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw),
1933 			  ast_entry->type);
1934 		return;
1935 	}
1936 
1937 	dp_peer_debug("call by %ps: ID: %u vid: %u mac_addr: " QDF_MAC_ADDR_FMT,
1938 		      (void *)_RET_IP_, ast_entry->peer_id, ast_entry->vdev_id,
1939 		      QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw));
1940 
1941 	ast_entry->delete_in_progress = true;
1942 
1943 	/* In teardown del ast is called after setting logical delete state
1944 	 * use __dp_peer_get_ref_by_id to get the reference irrespective of
1945 	 * state
1946 	 */
1947 	peer = __dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
1948 				       DP_MOD_ID_AST);
1949 
1950 	dp_peer_ast_send_wds_del(soc, ast_entry, peer);
1951 
1952 	/* Remove SELF and STATIC entries in teardown itself */
1953 	if (!ast_entry->next_hop)
1954 		dp_peer_unlink_ast_entry(soc, ast_entry, peer);
1955 
1956 	if (ast_entry->is_mapped)
1957 		soc->ast_table[ast_entry->ast_idx] = NULL;
1958 
1959 	/* if peer map v2 is enabled we are not freeing ast entry
1960 	 * here and it is supposed to be freed in unmap event (after
1961 	 * we receive delete confirmation from target)
1962 	 *
1963 	 * if peer_id is invalid we did not get the peer map event
1964 	 * for the peer free ast entry from here only in this case
1965 	 */
1966 	if (dp_peer_ast_free_in_unmap_supported(soc, ast_entry))
1967 		goto end;
1968 
1969 	/* for WDS secondary entry ast_entry->next_hop would be set so
1970 	 * unlinking has to be done explicitly here.
1971 	 * As this entry is not a mapped entry unmap notification from
1972 	 * FW will not come. Hence unlinkling is done right here.
1973 	 */
1974 
1975 	if (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC)
1976 		dp_peer_unlink_ast_entry(soc, ast_entry, peer);
1977 
1978 	dp_peer_free_ast_entry(soc, ast_entry);
1979 
1980 end:
1981 	if (peer)
1982 		dp_peer_unref_delete(peer, DP_MOD_ID_AST);
1983 }
1984 
1985 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
1986 		       struct dp_ast_entry *ast_entry, uint32_t flags)
1987 {
1988 	int ret = -1;
1989 	struct dp_peer *old_peer;
1990 
1991 	if (soc->ast_offload_support)
1992 		return QDF_STATUS_E_INVAL;
1993 
1994 	dp_peer_debug("%pK: ast_entry->type: %d pdevid: %u vdevid: %u flags: 0x%x mac_addr: " QDF_MAC_ADDR_FMT " peer_mac: " QDF_MAC_ADDR_FMT "\n",
1995 		      soc, ast_entry->type, peer->vdev->pdev->pdev_id,
1996 		      peer->vdev->vdev_id, flags,
1997 		      QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw),
1998 		      QDF_MAC_ADDR_REF(peer->mac_addr.raw));
1999 
2000 	/* Do not send AST update in below cases
2001 	 *  1) Ast entry delete has already triggered
2002 	 *  2) Peer delete is already triggered
2003 	 *  3) We did not get the HTT map for create event
2004 	 */
2005 	if (ast_entry->delete_in_progress ||
2006 	    !dp_peer_state_cmp(peer, DP_PEER_STATE_ACTIVE) ||
2007 	    !ast_entry->is_mapped)
2008 		return ret;
2009 
2010 	if ((ast_entry->type == CDP_TXRX_AST_TYPE_STATIC) ||
2011 	    (ast_entry->type == CDP_TXRX_AST_TYPE_SELF) ||
2012 	    (ast_entry->type == CDP_TXRX_AST_TYPE_STA_BSS) ||
2013 	    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
2014 		return 0;
2015 
2016 	/*
2017 	 * Avoids flood of WMI update messages sent to FW for same peer.
2018 	 */
2019 	if (qdf_unlikely(ast_entry->peer_id == peer->peer_id) &&
2020 	    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS) &&
2021 	    (ast_entry->vdev_id == peer->vdev->vdev_id) &&
2022 	    (ast_entry->is_active))
2023 		return 0;
2024 
2025 	old_peer = dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
2026 					 DP_MOD_ID_AST);
2027 	if (!old_peer)
2028 		return 0;
2029 
2030 	TAILQ_REMOVE(&old_peer->ast_entry_list, ast_entry, ase_list_elem);
2031 
2032 	dp_peer_unref_delete(old_peer, DP_MOD_ID_AST);
2033 
2034 	ast_entry->peer_id = peer->peer_id;
2035 	ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
2036 	ast_entry->pdev_id = peer->vdev->pdev->pdev_id;
2037 	ast_entry->vdev_id = peer->vdev->vdev_id;
2038 	ast_entry->is_active = TRUE;
2039 	TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem);
2040 
2041 	ret = dp_update_wds_entry_wrapper(soc,
2042 					  peer,
2043 					  ast_entry->mac_addr.raw,
2044 					  flags);
2045 
2046 	return ret;
2047 }
2048 
2049 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
2050 				struct dp_ast_entry *ast_entry)
2051 {
2052 	return ast_entry->pdev_id;
2053 }
2054 
2055 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
2056 				struct dp_ast_entry *ast_entry)
2057 {
2058 	return ast_entry->next_hop;
2059 }
2060 
2061 void dp_peer_ast_set_type(struct dp_soc *soc,
2062 				struct dp_ast_entry *ast_entry,
2063 				enum cdp_txrx_ast_entry_type type)
2064 {
2065 	ast_entry->type = type;
2066 }
2067 
2068 void dp_peer_ast_send_wds_del(struct dp_soc *soc,
2069 			      struct dp_ast_entry *ast_entry,
2070 			      struct dp_peer *peer)
2071 {
2072 	bool delete_in_fw = false;
2073 
2074 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_TRACE,
2075 		  "%s: ast_entry->type: %d pdevid: %u vdev: %u mac_addr: "QDF_MAC_ADDR_FMT" next_hop: %u peer_id: %uM\n",
2076 		  __func__, ast_entry->type, ast_entry->pdev_id,
2077 		  ast_entry->vdev_id,
2078 		  QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw),
2079 		  ast_entry->next_hop, ast_entry->peer_id);
2080 
2081 	/*
2082 	 * If peer state is logical delete, the peer is about to get
2083 	 * teared down with a peer delete command to firmware,
2084 	 * which will cleanup all the wds ast entries.
2085 	 * So, no need to send explicit wds ast delete to firmware.
2086 	 */
2087 	if (ast_entry->next_hop) {
2088 		if (peer && dp_peer_state_cmp(peer,
2089 					      DP_PEER_STATE_LOGICAL_DELETE))
2090 			delete_in_fw = false;
2091 		else
2092 			delete_in_fw = true;
2093 
2094 		dp_del_wds_entry_wrapper(soc,
2095 					 ast_entry->vdev_id,
2096 					 ast_entry->mac_addr.raw,
2097 					 ast_entry->type,
2098 					 delete_in_fw);
2099 	}
2100 }
2101 #else
2102 void dp_peer_free_ast_entry(struct dp_soc *soc,
2103 			    struct dp_ast_entry *ast_entry)
2104 {
2105 }
2106 
2107 void dp_peer_unlink_ast_entry(struct dp_soc *soc,
2108 			      struct dp_ast_entry *ast_entry,
2109 			      struct dp_peer *peer)
2110 {
2111 }
2112 
2113 void dp_peer_ast_hash_remove(struct dp_soc *soc,
2114 			     struct dp_ast_entry *ase)
2115 {
2116 }
2117 
2118 struct dp_ast_entry *dp_peer_ast_hash_find_by_vdevid(struct dp_soc *soc,
2119 						     uint8_t *ast_mac_addr,
2120 						     uint8_t vdev_id)
2121 {
2122 	return NULL;
2123 }
2124 
2125 QDF_STATUS dp_peer_add_ast(struct dp_soc *soc,
2126 			   struct dp_peer *peer,
2127 			   uint8_t *mac_addr,
2128 			   enum cdp_txrx_ast_entry_type type,
2129 			   uint32_t flags)
2130 {
2131 	return QDF_STATUS_E_FAILURE;
2132 }
2133 
2134 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
2135 {
2136 }
2137 
2138 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
2139 			struct dp_ast_entry *ast_entry, uint32_t flags)
2140 {
2141 	return 1;
2142 }
2143 
2144 struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
2145 					       uint8_t *ast_mac_addr)
2146 {
2147 	return NULL;
2148 }
2149 
2150 static inline
2151 QDF_STATUS dp_peer_host_add_map_ast(struct dp_soc *soc, uint16_t peer_id,
2152 				    uint8_t *mac_addr, uint16_t hw_peer_id,
2153 				    uint8_t vdev_id, uint16_t ast_hash,
2154 				    uint8_t is_wds)
2155 {
2156 	return QDF_STATUS_SUCCESS;
2157 }
2158 
2159 struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
2160 						     uint8_t *ast_mac_addr,
2161 						     uint8_t pdev_id)
2162 {
2163 	return NULL;
2164 }
2165 
2166 QDF_STATUS dp_peer_ast_hash_attach(struct dp_soc *soc)
2167 {
2168 	return QDF_STATUS_SUCCESS;
2169 }
2170 
2171 static inline QDF_STATUS dp_peer_map_ast(struct dp_soc *soc,
2172 					 struct dp_peer *peer,
2173 					 uint8_t *mac_addr,
2174 					 uint16_t hw_peer_id,
2175 					 uint8_t vdev_id,
2176 					 uint16_t ast_hash,
2177 					 uint8_t is_wds)
2178 {
2179 	return QDF_STATUS_SUCCESS;
2180 }
2181 
2182 void dp_peer_ast_hash_detach(struct dp_soc *soc)
2183 {
2184 }
2185 
2186 void dp_peer_ast_set_type(struct dp_soc *soc,
2187 				struct dp_ast_entry *ast_entry,
2188 				enum cdp_txrx_ast_entry_type type)
2189 {
2190 }
2191 
2192 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
2193 				struct dp_ast_entry *ast_entry)
2194 {
2195 	return 0xff;
2196 }
2197 
2198 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
2199 				 struct dp_ast_entry *ast_entry)
2200 {
2201 	return 0xff;
2202 }
2203 
2204 void dp_peer_ast_send_wds_del(struct dp_soc *soc,
2205 			      struct dp_ast_entry *ast_entry,
2206 			      struct dp_peer *peer)
2207 {
2208 }
2209 
2210 static inline
2211 void dp_peer_unmap_ipa_evt(struct dp_soc *soc, uint16_t peer_id,
2212 			   uint8_t vdev_id, uint8_t *mac_addr)
2213 {
2214 }
2215 #endif
2216 
2217 #ifdef WLAN_FEATURE_MULTI_AST_DEL
2218 void dp_peer_ast_send_multi_wds_del(
2219 		struct dp_soc *soc, uint8_t vdev_id,
2220 		struct peer_del_multi_wds_entries *wds_list)
2221 {
2222 	struct cdp_soc_t *cdp_soc = &soc->cdp_soc;
2223 
2224 	if (cdp_soc && cdp_soc->ol_ops &&
2225 	    cdp_soc->ol_ops->peer_del_multi_wds_entry)
2226 		cdp_soc->ol_ops->peer_del_multi_wds_entry(soc->ctrl_psoc,
2227 							  vdev_id, wds_list);
2228 }
2229 #endif
2230 
2231 #ifdef FEATURE_WDS
2232 /**
2233  * dp_peer_ast_free_wds_entries() - Free wds ast entries associated with peer
2234  * @soc: soc handle
2235  * @peer: peer handle
2236  *
2237  * Free all the wds ast entries associated with peer
2238  *
2239  * Return: Number of wds ast entries freed
2240  */
2241 static uint32_t dp_peer_ast_free_wds_entries(struct dp_soc *soc,
2242 					     struct dp_peer *peer)
2243 {
2244 	TAILQ_HEAD(, dp_ast_entry) ast_local_list = {0};
2245 	struct dp_ast_entry *ast_entry, *temp_ast_entry;
2246 	uint32_t num_ast = 0;
2247 
2248 	TAILQ_INIT(&ast_local_list);
2249 	qdf_spin_lock_bh(&soc->ast_lock);
2250 
2251 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry) {
2252 		if (ast_entry->next_hop)
2253 			num_ast++;
2254 
2255 		if (ast_entry->is_mapped)
2256 			soc->ast_table[ast_entry->ast_idx] = NULL;
2257 
2258 		dp_peer_unlink_ast_entry(soc, ast_entry, peer);
2259 		DP_STATS_INC(soc, ast.deleted, 1);
2260 		dp_peer_ast_hash_remove(soc, ast_entry);
2261 		TAILQ_INSERT_TAIL(&ast_local_list, ast_entry,
2262 				  ase_list_elem);
2263 		soc->num_ast_entries--;
2264 	}
2265 
2266 	qdf_spin_unlock_bh(&soc->ast_lock);
2267 
2268 	TAILQ_FOREACH_SAFE(ast_entry, &ast_local_list, ase_list_elem,
2269 			   temp_ast_entry) {
2270 		if (ast_entry->callback)
2271 			ast_entry->callback(soc->ctrl_psoc,
2272 					    dp_soc_to_cdp_soc(soc),
2273 					    ast_entry->cookie,
2274 					    CDP_TXRX_AST_DELETED);
2275 
2276 		qdf_mem_free(ast_entry);
2277 	}
2278 
2279 	return num_ast;
2280 }
2281 /**
2282  * dp_peer_clean_wds_entries() - Clean wds ast entries and compare
2283  * @soc: soc handle
2284  * @peer: peer handle
2285  * @free_wds_count: number of wds entries freed by FW with peer delete
2286  *
2287  * Free all the wds ast entries associated with peer and compare with
2288  * the value received from firmware
2289  *
2290  * Return: Number of wds ast entries freed
2291  */
2292 static void
2293 dp_peer_clean_wds_entries(struct dp_soc *soc, struct dp_peer *peer,
2294 			  uint32_t free_wds_count)
2295 {
2296 	uint32_t wds_deleted = 0;
2297 
2298 	if (soc->ast_offload_support && !soc->host_ast_db_enable)
2299 		return;
2300 
2301 	wds_deleted = dp_peer_ast_free_wds_entries(soc, peer);
2302 	if ((DP_PEER_WDS_COUNT_INVALID != free_wds_count) &&
2303 	    (free_wds_count != wds_deleted)) {
2304 		DP_STATS_INC(soc, ast.ast_mismatch, 1);
2305 		dp_alert("For peer %pK (mac: "QDF_MAC_ADDR_FMT")number of wds entries deleted by fw = %d during peer delete is not same as the numbers deleted by host = %d",
2306 			 peer, peer->mac_addr.raw, free_wds_count,
2307 			 wds_deleted);
2308 	}
2309 }
2310 
2311 #else
2312 static void
2313 dp_peer_clean_wds_entries(struct dp_soc *soc, struct dp_peer *peer,
2314 			  uint32_t free_wds_count)
2315 {
2316 	struct dp_ast_entry *ast_entry, *temp_ast_entry;
2317 
2318 	qdf_spin_lock_bh(&soc->ast_lock);
2319 
2320 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry) {
2321 		dp_peer_unlink_ast_entry(soc, ast_entry, peer);
2322 
2323 		if (ast_entry->is_mapped)
2324 			soc->ast_table[ast_entry->ast_idx] = NULL;
2325 
2326 		dp_peer_free_ast_entry(soc, ast_entry);
2327 	}
2328 
2329 	peer->self_ast_entry = NULL;
2330 	qdf_spin_unlock_bh(&soc->ast_lock);
2331 }
2332 #endif
2333 
2334 /**
2335  * dp_peer_ast_free_entry_by_mac() - find ast entry by MAC address and delete
2336  * @soc: soc handle
2337  * @peer: peer handle
2338  * @vdev_id: vdev_id
2339  * @mac_addr: mac address of the AST entry to searc and delete
2340  *
2341  * find the ast entry from the peer list using the mac address and free
2342  * the entry.
2343  *
2344  * Return: SUCCESS or NOENT
2345  */
2346 static int dp_peer_ast_free_entry_by_mac(struct dp_soc *soc,
2347 					 struct dp_peer *peer,
2348 					 uint8_t vdev_id,
2349 					 uint8_t *mac_addr)
2350 {
2351 	struct dp_ast_entry *ast_entry;
2352 	void *cookie = NULL;
2353 	txrx_ast_free_cb cb = NULL;
2354 
2355 	/*
2356 	 * release the reference only if it is mapped
2357 	 * to ast_table
2358 	 */
2359 
2360 	qdf_spin_lock_bh(&soc->ast_lock);
2361 
2362 	ast_entry = dp_peer_ast_hash_find_by_vdevid(soc, mac_addr, vdev_id);
2363 	if (!ast_entry) {
2364 		qdf_spin_unlock_bh(&soc->ast_lock);
2365 		return QDF_STATUS_E_NOENT;
2366 	} else if (ast_entry->is_mapped) {
2367 		soc->ast_table[ast_entry->ast_idx] = NULL;
2368 	}
2369 
2370 	cb = ast_entry->callback;
2371 	cookie = ast_entry->cookie;
2372 
2373 
2374 	dp_peer_unlink_ast_entry(soc, ast_entry, peer);
2375 
2376 	dp_peer_free_ast_entry(soc, ast_entry);
2377 
2378 	qdf_spin_unlock_bh(&soc->ast_lock);
2379 
2380 	if (cb) {
2381 		cb(soc->ctrl_psoc,
2382 		   dp_soc_to_cdp_soc(soc),
2383 		   cookie,
2384 		   CDP_TXRX_AST_DELETED);
2385 	}
2386 
2387 	return QDF_STATUS_SUCCESS;
2388 }
2389 
2390 void dp_peer_find_hash_erase(struct dp_soc *soc)
2391 {
2392 	int i;
2393 
2394 	/*
2395 	 * Not really necessary to take peer_ref_mutex lock - by this point,
2396 	 * it's known that the soc is no longer in use.
2397 	 */
2398 	for (i = 0; i <= soc->peer_hash.mask; i++) {
2399 		if (!TAILQ_EMPTY(&soc->peer_hash.bins[i])) {
2400 			struct dp_peer *peer, *peer_next;
2401 
2402 			/*
2403 			 * TAILQ_FOREACH_SAFE must be used here to avoid any
2404 			 * memory access violation after peer is freed
2405 			 */
2406 			TAILQ_FOREACH_SAFE(peer, &soc->peer_hash.bins[i],
2407 				hash_list_elem, peer_next) {
2408 				/*
2409 				 * Don't remove the peer from the hash table -
2410 				 * that would modify the list we are currently
2411 				 * traversing, and it's not necessary anyway.
2412 				 */
2413 				/*
2414 				 * Artificially adjust the peer's ref count to
2415 				 * 1, so it will get deleted by
2416 				 * dp_peer_unref_delete.
2417 				 */
2418 				/* set to zero */
2419 				qdf_atomic_init(&peer->ref_cnt);
2420 				for (i = 0; i < DP_MOD_ID_MAX; i++)
2421 					qdf_atomic_init(&peer->mod_refs[i]);
2422 				/* incr to one */
2423 				qdf_atomic_inc(&peer->ref_cnt);
2424 				qdf_atomic_inc(&peer->mod_refs
2425 						[DP_MOD_ID_CONFIG]);
2426 				dp_peer_unref_delete(peer,
2427 						     DP_MOD_ID_CONFIG);
2428 			}
2429 		}
2430 	}
2431 }
2432 
2433 void dp_peer_ast_table_detach(struct dp_soc *soc)
2434 {
2435 	if (soc->ast_table) {
2436 		qdf_mem_free(soc->ast_table);
2437 		soc->ast_table = NULL;
2438 	}
2439 }
2440 
2441 void dp_peer_find_map_detach(struct dp_soc *soc)
2442 {
2443 	struct dp_peer *peer = NULL;
2444 	uint32_t i = 0;
2445 
2446 	if (soc->peer_id_to_obj_map) {
2447 		for (i = 0; i < soc->max_peer_id; i++) {
2448 			peer = soc->peer_id_to_obj_map[i];
2449 			if (peer)
2450 				dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
2451 		}
2452 		qdf_mem_free(soc->peer_id_to_obj_map);
2453 		soc->peer_id_to_obj_map = NULL;
2454 		qdf_spinlock_destroy(&soc->peer_map_lock);
2455 	}
2456 }
2457 
2458 #ifndef AST_OFFLOAD_ENABLE
2459 QDF_STATUS dp_peer_find_attach(struct dp_soc *soc)
2460 {
2461 	QDF_STATUS status;
2462 
2463 	status = dp_peer_find_map_attach(soc);
2464 	if (!QDF_IS_STATUS_SUCCESS(status))
2465 		return status;
2466 
2467 	status = dp_peer_find_hash_attach(soc);
2468 	if (!QDF_IS_STATUS_SUCCESS(status))
2469 		goto map_detach;
2470 
2471 	status = dp_peer_ast_table_attach(soc);
2472 	if (!QDF_IS_STATUS_SUCCESS(status))
2473 		goto hash_detach;
2474 
2475 	status = dp_peer_ast_hash_attach(soc);
2476 	if (!QDF_IS_STATUS_SUCCESS(status))
2477 		goto ast_table_detach;
2478 
2479 	status = dp_peer_mec_hash_attach(soc);
2480 	if (QDF_IS_STATUS_SUCCESS(status)) {
2481 		dp_soc_wds_attach(soc);
2482 		return status;
2483 	}
2484 
2485 	dp_peer_ast_hash_detach(soc);
2486 ast_table_detach:
2487 	dp_peer_ast_table_detach(soc);
2488 hash_detach:
2489 	dp_peer_find_hash_detach(soc);
2490 map_detach:
2491 	dp_peer_find_map_detach(soc);
2492 
2493 	return status;
2494 }
2495 #else
2496 QDF_STATUS dp_peer_find_attach(struct dp_soc *soc)
2497 {
2498 	QDF_STATUS status;
2499 
2500 	status = dp_peer_find_map_attach(soc);
2501 	if (!QDF_IS_STATUS_SUCCESS(status))
2502 		return status;
2503 
2504 	status = dp_peer_find_hash_attach(soc);
2505 	if (!QDF_IS_STATUS_SUCCESS(status))
2506 		goto map_detach;
2507 
2508 	return status;
2509 map_detach:
2510 	dp_peer_find_map_detach(soc);
2511 
2512 	return status;
2513 }
2514 #endif
2515 
2516 #ifdef REO_SHARED_QREF_TABLE_EN
2517 void dp_peer_rx_reo_shared_qaddr_delete(struct dp_soc *soc,
2518 					struct dp_peer *peer)
2519 {
2520 	uint8_t tid;
2521 	uint16_t peer_id;
2522 	uint32_t max_list_size;
2523 
2524 	max_list_size = soc->wlan_cfg_ctx->qref_control_size;
2525 
2526 	peer_id = peer->peer_id;
2527 
2528 	if (peer_id > soc->max_peer_id)
2529 		return;
2530 	if (IS_MLO_DP_LINK_PEER(peer))
2531 		return;
2532 
2533 	if (max_list_size) {
2534 		unsigned long curr_ts = qdf_get_system_timestamp();
2535 		struct dp_peer *primary_peer = peer;
2536 		uint16_t chip_id = 0xFFFF;
2537 		uint32_t qref_index;
2538 
2539 		qref_index = soc->shared_qaddr_del_idx;
2540 
2541 		soc->list_shared_qaddr_del[qref_index].peer_id =
2542 							  primary_peer->peer_id;
2543 		soc->list_shared_qaddr_del[qref_index].ts_qaddr_del = curr_ts;
2544 		soc->list_shared_qaddr_del[qref_index].chip_id = chip_id;
2545 		soc->shared_qaddr_del_idx++;
2546 
2547 		if (soc->shared_qaddr_del_idx == max_list_size)
2548 			soc->shared_qaddr_del_idx = 0;
2549 	}
2550 
2551 	if (hal_reo_shared_qaddr_is_enable(soc->hal_soc)) {
2552 		for (tid = 0; tid < DP_MAX_TIDS; tid++) {
2553 			hal_reo_shared_qaddr_write(soc->hal_soc,
2554 						   peer_id, tid, 0);
2555 		}
2556 	}
2557 }
2558 #endif
2559 
2560 /**
2561  * dp_peer_find_add_id() - map peer_id with peer
2562  * @soc: soc handle
2563  * @peer_mac_addr: peer mac address
2564  * @peer_id: peer id to be mapped
2565  * @hw_peer_id: HW ast index
2566  * @vdev_id: vdev_id
2567  * @peer_type: peer type (link or MLD)
2568  *
2569  * return: peer in success
2570  *         NULL in failure
2571  */
2572 static inline struct dp_peer *dp_peer_find_add_id(struct dp_soc *soc,
2573 	uint8_t *peer_mac_addr, uint16_t peer_id, uint16_t hw_peer_id,
2574 	uint8_t vdev_id, enum cdp_peer_type peer_type)
2575 {
2576 	struct dp_peer *peer;
2577 	struct cdp_peer_info peer_info = { 0 };
2578 
2579 	QDF_ASSERT(peer_id <= soc->max_peer_id);
2580 	/* check if there's already a peer object with this MAC address */
2581 	DP_PEER_INFO_PARAMS_INIT(&peer_info, vdev_id, peer_mac_addr,
2582 				 false, peer_type);
2583 	peer = dp_peer_hash_find_wrapper(soc, &peer_info, DP_MOD_ID_CONFIG);
2584 	dp_peer_debug("%pK: peer %pK ID %d vid %d mac " QDF_MAC_ADDR_FMT,
2585 		      soc, peer, peer_id, vdev_id,
2586 		      QDF_MAC_ADDR_REF(peer_mac_addr));
2587 
2588 	if (peer) {
2589 		/* peer's ref count was already incremented by
2590 		 * peer_find_hash_find
2591 		 */
2592 		dp_peer_info("%pK: ref_cnt: %d", soc,
2593 			     qdf_atomic_read(&peer->ref_cnt));
2594 
2595 		/*
2596 		 * if peer is in logical delete CP triggered delete before map
2597 		 * is received ignore this event
2598 		 */
2599 		if (dp_peer_state_cmp(peer, DP_PEER_STATE_LOGICAL_DELETE)) {
2600 			dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
2601 			dp_alert("Peer %pK["QDF_MAC_ADDR_FMT"] logical delete state vid %d",
2602 				 peer, QDF_MAC_ADDR_REF(peer_mac_addr),
2603 				 vdev_id);
2604 			return NULL;
2605 		}
2606 
2607 		if (peer->peer_id == HTT_INVALID_PEER) {
2608 			if (!IS_MLO_DP_MLD_PEER(peer))
2609 				dp_monitor_peer_tid_peer_id_update(soc, peer,
2610 								   peer_id);
2611 		} else {
2612 			dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
2613 			QDF_ASSERT(0);
2614 			return NULL;
2615 		}
2616 		dp_peer_find_id_to_obj_add(soc, peer, peer_id);
2617 		if (soc->arch_ops.dp_partner_chips_map)
2618 			soc->arch_ops.dp_partner_chips_map(soc, peer, peer_id);
2619 
2620 		dp_peer_update_state(soc, peer, DP_PEER_STATE_ACTIVE);
2621 		return peer;
2622 	}
2623 
2624 	return NULL;
2625 }
2626 
2627 #ifdef WLAN_FEATURE_11BE_MLO
2628 #ifdef DP_USE_REDUCED_PEER_ID_FIELD_WIDTH
2629 uint16_t dp_gen_ml_peer_id(struct dp_soc *soc, uint16_t peer_id)
2630 {
2631 	return ((peer_id & soc->peer_id_mask) | (1 << soc->peer_id_shift));
2632 }
2633 #else
2634 uint16_t dp_gen_ml_peer_id(struct dp_soc *soc, uint16_t peer_id)
2635 {
2636 	return (peer_id | (1 << HTT_RX_PEER_META_DATA_V1_ML_PEER_VALID_S));
2637 }
2638 #endif
2639 
2640 QDF_STATUS
2641 dp_rx_mlo_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
2642 			   uint8_t *peer_mac_addr,
2643 			   struct dp_mlo_flow_override_info *mlo_flow_info,
2644 			   struct dp_mlo_link_info *mlo_link_info)
2645 {
2646 	struct dp_peer *peer = NULL;
2647 	uint16_t hw_peer_id = mlo_flow_info[0].ast_idx;
2648 	uint16_t ast_hash = mlo_flow_info[0].cache_set_num;
2649 	uint8_t vdev_id = 0;
2650 	uint8_t is_wds = 0;
2651 	int i;
2652 	uint16_t ml_peer_id = dp_gen_ml_peer_id(soc, peer_id);
2653 	enum cdp_txrx_ast_entry_type type = CDP_TXRX_AST_TYPE_STATIC;
2654 	QDF_STATUS err = QDF_STATUS_SUCCESS;
2655 	struct dp_soc *primary_soc = NULL;
2656 
2657 	dp_cfg_event_record_peer_map_unmap_evt(soc, DP_CFG_EVENT_MLO_PEER_MAP,
2658 					       NULL, peer_mac_addr,
2659 					       1, peer_id, ml_peer_id, 0,
2660 					       vdev_id);
2661 
2662 	dp_info("mlo_peer_map_event (soc:%pK): peer_id %d ml_peer_id %d, peer_mac "QDF_MAC_ADDR_FMT,
2663 		soc, peer_id, ml_peer_id,
2664 		QDF_MAC_ADDR_REF(peer_mac_addr));
2665 
2666 	/* Get corresponding vdev ID for the peer based
2667 	 * on chip ID obtained from mlo peer_map event
2668 	 */
2669 	for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
2670 		if (mlo_link_info[i].peer_chip_id == dp_mlo_get_chip_id(soc)) {
2671 			vdev_id = mlo_link_info[i].vdev_id;
2672 			break;
2673 		}
2674 	}
2675 
2676 	peer = dp_peer_find_add_id(soc, peer_mac_addr, ml_peer_id,
2677 				   hw_peer_id, vdev_id, CDP_MLD_PEER_TYPE);
2678 	if (peer) {
2679 		if (wlan_op_mode_sta == peer->vdev->opmode &&
2680 		    qdf_mem_cmp(peer->mac_addr.raw,
2681 				peer->vdev->mld_mac_addr.raw,
2682 				QDF_MAC_ADDR_SIZE) != 0) {
2683 			dp_peer_info("%pK: STA vdev bss_peer!!!!", soc);
2684 			peer->bss_peer = 1;
2685 			if (peer->txrx_peer)
2686 				peer->txrx_peer->bss_peer = 1;
2687 		}
2688 
2689 		if (peer->vdev->opmode == wlan_op_mode_sta) {
2690 			peer->vdev->bss_ast_hash = ast_hash;
2691 			peer->vdev->bss_ast_idx = hw_peer_id;
2692 		}
2693 
2694 		/* Add ast entry incase self ast entry is
2695 		 * deleted due to DP CP sync issue
2696 		 *
2697 		 * self_ast_entry is modified in peer create
2698 		 * and peer unmap path which cannot run in
2699 		 * parllel with peer map, no lock need before
2700 		 * referring it
2701 		 */
2702 		if (!peer->self_ast_entry) {
2703 			dp_info("Add self ast from map "QDF_MAC_ADDR_FMT,
2704 				QDF_MAC_ADDR_REF(peer_mac_addr));
2705 			dp_peer_add_ast(soc, peer,
2706 					peer_mac_addr,
2707 					type, 0);
2708 		}
2709 		/* If peer setup and hence rx_tid setup got called
2710 		 * before htt peer map then Qref write to LUT did not
2711 		 * happen in rx_tid setup as peer_id was invalid.
2712 		 * So defer Qref write to peer map handler. Check if
2713 		 * rx_tid qdesc for tid 0 is already setup and perform
2714 		 * qref write to LUT for Tid 0 and 16.
2715 		 *
2716 		 * Peer map could be obtained on assoc link, hence
2717 		 * change to primary link's soc.
2718 		 */
2719 		primary_soc = peer->vdev->pdev->soc;
2720 		if (hal_reo_shared_qaddr_is_enable(primary_soc->hal_soc) &&
2721 		    peer->rx_tid[0].hw_qdesc_vaddr_unaligned) {
2722 			hal_reo_shared_qaddr_write(primary_soc->hal_soc,
2723 						   ml_peer_id,
2724 						   0,
2725 						   peer->rx_tid[0].hw_qdesc_paddr);
2726 			hal_reo_shared_qaddr_write(primary_soc->hal_soc,
2727 						   ml_peer_id,
2728 						   DP_NON_QOS_TID,
2729 						   peer->rx_tid[DP_NON_QOS_TID].hw_qdesc_paddr);
2730 		}
2731 	}
2732 
2733 	if (!primary_soc)
2734 		primary_soc = soc;
2735 
2736 	err = dp_peer_map_ast(soc, peer, peer_mac_addr, hw_peer_id,
2737 			      vdev_id, ast_hash, is_wds);
2738 
2739 	/*
2740 	 * If AST offload and host AST DB is enabled, populate AST entries on
2741 	 * host based on mlo peer map event from FW
2742 	 */
2743 	if (peer && soc->ast_offload_support && soc->host_ast_db_enable) {
2744 		dp_peer_host_add_map_ast(primary_soc, ml_peer_id, peer_mac_addr,
2745 					 hw_peer_id, vdev_id,
2746 					 ast_hash, is_wds);
2747 	}
2748 
2749 	return err;
2750 }
2751 #endif
2752 
2753 #ifdef DP_RX_UDP_OVER_PEER_ROAM
2754 void dp_rx_reset_roaming_peer(struct dp_soc *soc, uint8_t vdev_id,
2755 			      uint8_t *peer_mac_addr)
2756 {
2757 	struct dp_vdev *vdev = NULL;
2758 
2759 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_HTT);
2760 	if (vdev) {
2761 		if (qdf_mem_cmp(vdev->roaming_peer_mac.raw, peer_mac_addr,
2762 				QDF_MAC_ADDR_SIZE) == 0) {
2763 			vdev->roaming_peer_status =
2764 						WLAN_ROAM_PEER_AUTH_STATUS_NONE;
2765 			qdf_mem_zero(vdev->roaming_peer_mac.raw,
2766 				     QDF_MAC_ADDR_SIZE);
2767 		}
2768 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT);
2769 	}
2770 }
2771 #endif
2772 
2773 #ifdef WLAN_SUPPORT_PPEDS
2774 static void
2775 dp_tx_ppeds_cfg_astidx_cache_mapping(struct dp_soc *soc, struct dp_vdev *vdev,
2776 				     bool peer_map)
2777 {
2778 	if (soc->arch_ops.dp_tx_ppeds_cfg_astidx_cache_mapping)
2779 		soc->arch_ops.dp_tx_ppeds_cfg_astidx_cache_mapping(soc, vdev,
2780 								   peer_map);
2781 }
2782 #else
2783 static void
2784 dp_tx_ppeds_cfg_astidx_cache_mapping(struct dp_soc *soc, struct dp_vdev *vdev,
2785 				     bool peer_map)
2786 {
2787 }
2788 #endif
2789 
2790 QDF_STATUS
2791 dp_rx_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
2792 		       uint16_t hw_peer_id, uint8_t vdev_id,
2793 		       uint8_t *peer_mac_addr, uint16_t ast_hash,
2794 		       uint8_t is_wds)
2795 {
2796 	struct dp_peer *peer = NULL;
2797 	struct dp_vdev *vdev = NULL;
2798 	enum cdp_txrx_ast_entry_type type = CDP_TXRX_AST_TYPE_STATIC;
2799 	QDF_STATUS err = QDF_STATUS_SUCCESS;
2800 
2801 	dp_cfg_event_record_peer_map_unmap_evt(soc, DP_CFG_EVENT_PEER_MAP,
2802 					       NULL, peer_mac_addr, 1, peer_id,
2803 					       0, 0, vdev_id);
2804 	dp_info("peer_map_event (soc:%pK): peer_id %d, hw_peer_id %d, peer_mac "QDF_MAC_ADDR_FMT", vdev_id %d",
2805 		soc, peer_id, hw_peer_id,
2806 		QDF_MAC_ADDR_REF(peer_mac_addr), vdev_id);
2807 
2808 	/* Peer map event for WDS ast entry get the peer from
2809 	 * obj map
2810 	 */
2811 	if (is_wds) {
2812 		if (!soc->ast_offload_support) {
2813 			peer = dp_peer_get_ref_by_id(soc, peer_id,
2814 						     DP_MOD_ID_HTT);
2815 
2816 			err = dp_peer_map_ast(soc, peer, peer_mac_addr,
2817 					      hw_peer_id,
2818 					      vdev_id, ast_hash, is_wds);
2819 			if (peer)
2820 				dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
2821 		}
2822 	} else {
2823 		/*
2824 		 * It's the responsibility of the CP and FW to ensure
2825 		 * that peer is created successfully. Ideally DP should
2826 		 * not hit the below condition for directly associated
2827 		 * peers.
2828 		 */
2829 		if ((!soc->ast_offload_support) && ((hw_peer_id < 0) ||
2830 		    (hw_peer_id >=
2831 		     wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx)))) {
2832 			dp_peer_err("%pK: invalid hw_peer_id: %d", soc, hw_peer_id);
2833 			qdf_assert_always(0);
2834 		}
2835 
2836 		peer = dp_peer_find_add_id(soc, peer_mac_addr, peer_id,
2837 					   hw_peer_id, vdev_id,
2838 					   CDP_LINK_PEER_TYPE);
2839 
2840 		if (peer) {
2841 			bool peer_map = true;
2842 
2843 			/* Updating ast_hash and ast_idx in peer level */
2844 			peer->ast_hash = ast_hash;
2845 			peer->ast_idx = hw_peer_id;
2846 			vdev = peer->vdev;
2847 			/* Only check for STA Vdev and peer is not for TDLS */
2848 			if (wlan_op_mode_sta == vdev->opmode &&
2849 			    !peer->is_tdls_peer) {
2850 				if (qdf_mem_cmp(peer->mac_addr.raw,
2851 						vdev->mac_addr.raw,
2852 						QDF_MAC_ADDR_SIZE) != 0) {
2853 					dp_info("%pK: STA vdev bss_peer", soc);
2854 					peer->bss_peer = 1;
2855 					if (peer->txrx_peer)
2856 						peer->txrx_peer->bss_peer = 1;
2857 				}
2858 
2859 				dp_info("bss ast_hash 0x%x, ast_index 0x%x",
2860 					ast_hash, hw_peer_id);
2861 				vdev->bss_ast_hash = ast_hash;
2862 				vdev->bss_ast_idx = hw_peer_id;
2863 
2864 				dp_tx_ppeds_cfg_astidx_cache_mapping(soc, vdev,
2865 								     peer_map);
2866 			}
2867 
2868 			/* Add ast entry incase self ast entry is
2869 			 * deleted due to DP CP sync issue
2870 			 *
2871 			 * self_ast_entry is modified in peer create
2872 			 * and peer unmap path which cannot run in
2873 			 * parllel with peer map, no lock need before
2874 			 * referring it
2875 			 */
2876 			if (!soc->ast_offload_support &&
2877 				!peer->self_ast_entry) {
2878 				dp_info("Add self ast from map "QDF_MAC_ADDR_FMT,
2879 					QDF_MAC_ADDR_REF(peer_mac_addr));
2880 				dp_peer_add_ast(soc, peer,
2881 						peer_mac_addr,
2882 						type, 0);
2883 			}
2884 
2885 			/* If peer setup and hence rx_tid setup got called
2886 			 * before htt peer map then Qref write to LUT did
2887 			 * not happen in rx_tid setup as peer_id was invalid.
2888 			 * So defer Qref write to peer map handler. Check if
2889 			 * rx_tid qdesc for tid 0 is already setup perform qref
2890 			 * write to LUT for Tid 0 and 16.
2891 			 */
2892 			if (hal_reo_shared_qaddr_is_enable(soc->hal_soc) &&
2893 			    peer->rx_tid[0].hw_qdesc_vaddr_unaligned &&
2894 			    !IS_MLO_DP_LINK_PEER(peer)) {
2895 				add_entry_write_list(soc, peer, 0);
2896 				hal_reo_shared_qaddr_write(soc->hal_soc,
2897 							   peer_id,
2898 							   0,
2899 							   peer->rx_tid[0].hw_qdesc_paddr);
2900 				add_entry_write_list(soc, peer, DP_NON_QOS_TID);
2901 				hal_reo_shared_qaddr_write(soc->hal_soc,
2902 							   peer_id,
2903 							   DP_NON_QOS_TID,
2904 							   peer->rx_tid[DP_NON_QOS_TID].hw_qdesc_paddr);
2905 			}
2906 		}
2907 
2908 		err = dp_peer_map_ast(soc, peer, peer_mac_addr, hw_peer_id,
2909 				      vdev_id, ast_hash, is_wds);
2910 	}
2911 
2912 	dp_rx_reset_roaming_peer(soc, vdev_id, peer_mac_addr);
2913 
2914 	/*
2915 	 * If AST offload and host AST DB is enabled, populate AST entries on
2916 	 * host based on peer map event from FW
2917 	 */
2918 	if (soc->ast_offload_support && soc->host_ast_db_enable) {
2919 		dp_peer_host_add_map_ast(soc, peer_id, peer_mac_addr,
2920 					 hw_peer_id, vdev_id,
2921 					 ast_hash, is_wds);
2922 	}
2923 
2924 	return err;
2925 }
2926 
2927 void
2928 dp_rx_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id,
2929 			 uint8_t vdev_id, uint8_t *mac_addr,
2930 			 uint8_t is_wds, uint32_t free_wds_count)
2931 {
2932 	struct dp_peer *peer;
2933 	struct dp_vdev *vdev = NULL;
2934 
2935 	/*
2936 	 * If FW AST offload is enabled and host AST DB is enabled,
2937 	 * the AST entries are created during peer map from FW.
2938 	 */
2939 	if (soc->ast_offload_support && is_wds) {
2940 		if (!soc->host_ast_db_enable)
2941 			return;
2942 	}
2943 
2944 	peer = __dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_HTT);
2945 
2946 	/*
2947 	 * Currently peer IDs are assigned for vdevs as well as peers.
2948 	 * If the peer ID is for a vdev, then the peer pointer stored
2949 	 * in peer_id_to_obj_map will be NULL.
2950 	 */
2951 	if (!peer) {
2952 		dp_err("Received unmap event for invalid peer_id %u",
2953 		       peer_id);
2954 		return;
2955 	}
2956 
2957 	vdev = peer->vdev;
2958 
2959 	if (peer->txrx_peer) {
2960 		struct cdp_txrx_peer_params_update params = {0};
2961 
2962 		params.vdev_id = vdev->vdev_id;
2963 		params.peer_mac = peer->mac_addr.raw;
2964 		params.chip_id = dp_mlo_get_chip_id(soc);
2965 		params.pdev_id = vdev->pdev->pdev_id;
2966 
2967 		dp_wdi_event_handler(WDI_EVENT_PEER_UNMAP, soc,
2968 				     (void *)&params, peer_id,
2969 				     WDI_NO_VAL, vdev->pdev->pdev_id);
2970 	}
2971 
2972 	/*
2973 	 * In scenario where assoc peer soc id is different from
2974 	 * primary soc id, reset the soc to point to primary psoc.
2975 	 * Since map is received on primary soc, the unmap should
2976 	 * also delete ast on primary soc.
2977 	 */
2978 	soc = peer->vdev->pdev->soc;
2979 
2980 	/* If V2 Peer map messages are enabled AST entry has to be
2981 	 * freed here
2982 	 */
2983 	if (is_wds) {
2984 		if (!dp_peer_ast_free_entry_by_mac(soc, peer, vdev_id,
2985 						   mac_addr)) {
2986 			dp_peer_unmap_ipa_evt(soc, peer_id, vdev_id, mac_addr);
2987 			dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
2988 			return;
2989 		}
2990 
2991 		dp_alert("AST entry not found with peer %pK peer_id %u peer_mac "QDF_MAC_ADDR_FMT" mac_addr "QDF_MAC_ADDR_FMT" vdev_id %u next_hop %u",
2992 			  peer, peer->peer_id,
2993 			  QDF_MAC_ADDR_REF(peer->mac_addr.raw),
2994 			  QDF_MAC_ADDR_REF(mac_addr), vdev_id,
2995 			  is_wds);
2996 
2997 		dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
2998 		return;
2999 	}
3000 
3001 	dp_peer_clean_wds_entries(soc, peer, free_wds_count);
3002 
3003 	dp_cfg_event_record_peer_map_unmap_evt(soc, DP_CFG_EVENT_PEER_UNMAP,
3004 					       peer, mac_addr, 0, peer_id,
3005 					       0, 0, vdev_id);
3006 	dp_info("peer_unmap_event (soc:%pK) peer_id %d peer %pK",
3007 		soc, peer_id, peer);
3008 
3009 	/* Clear entries in Qref LUT */
3010 	/* TODO: Check if this is to be called from
3011 	 * dp_peer_delete for MLO case if there is race between
3012 	 * new peer id assignment and still not having received
3013 	 * peer unmap for MLD peer with same peer id.
3014 	 */
3015 	dp_peer_rx_reo_shared_qaddr_delete(soc, peer);
3016 
3017 	vdev = peer->vdev;
3018 
3019 	/* only if peer is in STA mode and not tdls peer */
3020 	if (wlan_op_mode_sta == vdev->opmode && !peer->is_tdls_peer) {
3021 		bool peer_map = false;
3022 
3023 		dp_tx_ppeds_cfg_astidx_cache_mapping(soc, vdev, peer_map);
3024 	}
3025 
3026 	dp_peer_find_id_to_obj_remove(soc, peer_id);
3027 
3028 	if (soc->arch_ops.dp_partner_chips_unmap)
3029 		soc->arch_ops.dp_partner_chips_unmap(soc, peer_id);
3030 
3031 	peer->peer_id = HTT_INVALID_PEER;
3032 
3033 	/*
3034 	 *	 Reset ast flow mapping table
3035 	 */
3036 	if (!soc->ast_offload_support)
3037 		dp_peer_reset_flowq_map(peer);
3038 
3039 	if (soc->cdp_soc.ol_ops->peer_unmap_event) {
3040 		soc->cdp_soc.ol_ops->peer_unmap_event(soc->ctrl_psoc,
3041 				peer_id, vdev_id, mac_addr);
3042 	}
3043 
3044 	dp_update_vdev_stats_on_peer_unmap(vdev, peer);
3045 
3046 	dp_peer_update_state(soc, peer, DP_PEER_STATE_INACTIVE);
3047 	dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
3048 	/*
3049 	 * Remove a reference to the peer.
3050 	 * If there are no more references, delete the peer object.
3051 	 */
3052 	dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
3053 }
3054 
3055 #if defined(WLAN_FEATURE_11BE_MLO) && defined(DP_MLO_LINK_STATS_SUPPORT)
3056 QDF_STATUS
3057 dp_rx_peer_ext_evt(struct dp_soc *soc, struct dp_peer_ext_evt_info *info)
3058 {
3059 	struct dp_peer *peer = NULL;
3060 	struct cdp_peer_info peer_info = { 0 };
3061 
3062 	QDF_ASSERT(info->peer_id <= soc->max_peer_id);
3063 
3064 	DP_PEER_INFO_PARAMS_INIT(&peer_info, info->vdev_id, info->peer_mac_addr,
3065 				 false, CDP_LINK_PEER_TYPE);
3066 	peer = dp_peer_hash_find_wrapper(soc, &peer_info, DP_MOD_ID_CONFIG);
3067 
3068 	if (!peer) {
3069 		dp_err("peer NULL, id %u, MAC " QDF_MAC_ADDR_FMT ", vdev_id %u",
3070 		       info->peer_id, QDF_MAC_ADDR_REF(info->peer_mac_addr),
3071 		       info->vdev_id);
3072 
3073 		return QDF_STATUS_E_FAILURE;
3074 	}
3075 
3076 	peer->link_id = info->link_id;
3077 	peer->link_id_valid = info->link_id_valid;
3078 	dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
3079 
3080 	return QDF_STATUS_SUCCESS;
3081 }
3082 #endif
3083 #ifdef WLAN_FEATURE_11BE_MLO
3084 void dp_rx_mlo_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id)
3085 {
3086 	uint16_t ml_peer_id = dp_gen_ml_peer_id(soc, peer_id);
3087 	uint8_t mac_addr[QDF_MAC_ADDR_SIZE] = {0};
3088 	uint8_t vdev_id = DP_VDEV_ALL;
3089 	uint8_t is_wds = 0;
3090 
3091 	dp_cfg_event_record_peer_map_unmap_evt(soc, DP_CFG_EVENT_MLO_PEER_UNMAP,
3092 					       NULL, mac_addr, 0, peer_id,
3093 					       0, 0, vdev_id);
3094 	dp_info("MLO peer_unmap_event (soc:%pK) peer_id %d",
3095 		soc, peer_id);
3096 
3097 	dp_rx_peer_unmap_handler(soc, ml_peer_id, vdev_id,
3098 				 mac_addr, is_wds,
3099 				 DP_PEER_WDS_COUNT_INVALID);
3100 }
3101 #endif
3102 
3103 #ifndef AST_OFFLOAD_ENABLE
3104 void
3105 dp_peer_find_detach(struct dp_soc *soc)
3106 {
3107 	dp_soc_wds_detach(soc);
3108 	dp_peer_find_map_detach(soc);
3109 	dp_peer_find_hash_detach(soc);
3110 	dp_peer_ast_hash_detach(soc);
3111 	dp_peer_ast_table_detach(soc);
3112 	dp_peer_mec_hash_detach(soc);
3113 }
3114 #else
3115 void
3116 dp_peer_find_detach(struct dp_soc *soc)
3117 {
3118 	dp_peer_find_map_detach(soc);
3119 	dp_peer_find_hash_detach(soc);
3120 }
3121 #endif
3122 
3123 void dp_peer_rx_init(struct dp_pdev *pdev, struct dp_peer *peer)
3124 {
3125 	dp_peer_rx_tid_setup(peer);
3126 
3127 	peer->active_ba_session_cnt = 0;
3128 	peer->hw_buffer_size = 0;
3129 	peer->kill_256_sessions = 0;
3130 
3131 	/*
3132 	 * Set security defaults: no PN check, no security. The target may
3133 	 * send a HTT SEC_IND message to overwrite these defaults.
3134 	 */
3135 	if (peer->txrx_peer)
3136 		peer->txrx_peer->security[dp_sec_ucast].sec_type =
3137 			peer->txrx_peer->security[dp_sec_mcast].sec_type =
3138 				cdp_sec_type_none;
3139 }
3140 
3141 void dp_peer_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
3142 {
3143 	enum wlan_op_mode vdev_opmode;
3144 	uint8_t vdev_mac_addr[QDF_MAC_ADDR_SIZE];
3145 	struct dp_pdev *pdev = vdev->pdev;
3146 	struct dp_soc *soc = pdev->soc;
3147 
3148 	/* save vdev related member in case vdev freed */
3149 	vdev_opmode = vdev->opmode;
3150 
3151 	if (!IS_MLO_DP_MLD_PEER(peer))
3152 		dp_monitor_peer_tx_cleanup(vdev, peer);
3153 
3154 	if (vdev_opmode != wlan_op_mode_monitor)
3155 	/* cleanup the Rx reorder queues for this peer */
3156 		dp_peer_rx_cleanup(vdev, peer);
3157 
3158 	dp_peer_rx_tids_destroy(peer);
3159 
3160 	if (IS_MLO_DP_LINK_PEER(peer))
3161 		dp_link_peer_del_mld_peer(peer);
3162 	if (IS_MLO_DP_MLD_PEER(peer))
3163 		dp_mld_peer_deinit_link_peers_info(peer);
3164 
3165 	qdf_mem_copy(vdev_mac_addr, vdev->mac_addr.raw,
3166 		     QDF_MAC_ADDR_SIZE);
3167 
3168 	if (soc->cdp_soc.ol_ops->peer_unref_delete)
3169 		soc->cdp_soc.ol_ops->peer_unref_delete(
3170 				soc->ctrl_psoc,
3171 				vdev->pdev->pdev_id,
3172 				peer->mac_addr.raw, vdev_mac_addr,
3173 				vdev_opmode);
3174 }
3175 
3176 QDF_STATUS
3177 dp_set_key_sec_type_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id,
3178 			  uint8_t *peer_mac, enum cdp_sec_type sec_type,
3179 			  bool is_unicast)
3180 {
3181 	struct dp_peer *peer =
3182 			dp_peer_get_tgt_peer_hash_find((struct dp_soc *)soc,
3183 						       peer_mac, 0, vdev_id,
3184 						       DP_MOD_ID_CDP);
3185 	int sec_index;
3186 
3187 	if (!peer) {
3188 		dp_peer_debug("%pK: Peer is NULL!\n", soc);
3189 		return QDF_STATUS_E_FAILURE;
3190 	}
3191 
3192 	if (!peer->txrx_peer) {
3193 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3194 		dp_peer_debug("%pK: txrx peer is NULL!\n", soc);
3195 		return QDF_STATUS_E_FAILURE;
3196 	}
3197 
3198 	dp_peer_info("%pK: key sec spec for peer %pK " QDF_MAC_ADDR_FMT ": %s key of type %d",
3199 		     soc, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw),
3200 		     is_unicast ? "ucast" : "mcast", sec_type);
3201 
3202 	sec_index = is_unicast ? dp_sec_ucast : dp_sec_mcast;
3203 	peer->txrx_peer->security[sec_index].sec_type = sec_type;
3204 
3205 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3206 
3207 	return QDF_STATUS_SUCCESS;
3208 }
3209 
3210 void
3211 dp_rx_sec_ind_handler(struct dp_soc *soc, uint16_t peer_id,
3212 		      enum cdp_sec_type sec_type, int is_unicast,
3213 		      u_int32_t *michael_key,
3214 		      u_int32_t *rx_pn)
3215 {
3216 	struct dp_peer *peer;
3217 	struct dp_txrx_peer *txrx_peer;
3218 	int sec_index;
3219 
3220 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_HTT);
3221 	if (!peer) {
3222 		dp_peer_err("Couldn't find peer from ID %d - skipping security inits",
3223 			    peer_id);
3224 		return;
3225 	}
3226 	txrx_peer = dp_get_txrx_peer(peer);
3227 	if (!txrx_peer) {
3228 		dp_peer_err("Couldn't find txrx peer from ID %d - skipping security inits",
3229 			    peer_id);
3230 		return;
3231 	}
3232 
3233 	dp_peer_info("%pK: sec spec for peer %pK " QDF_MAC_ADDR_FMT ": %s key of type %d",
3234 		     soc, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw),
3235 			  is_unicast ? "ucast" : "mcast", sec_type);
3236 	sec_index = is_unicast ? dp_sec_ucast : dp_sec_mcast;
3237 
3238 	peer->txrx_peer->security[sec_index].sec_type = sec_type;
3239 #ifdef notyet /* TODO: See if this is required for defrag support */
3240 	/* michael key only valid for TKIP, but for simplicity,
3241 	 * copy it anyway
3242 	 */
3243 	qdf_mem_copy(
3244 		&peer->txrx_peer->security[sec_index].michael_key[0],
3245 		michael_key,
3246 		sizeof(peer->txrx_peer->security[sec_index].michael_key));
3247 #ifdef BIG_ENDIAN_HOST
3248 	OL_IF_SWAPBO(peer->txrx_peer->security[sec_index].michael_key[0],
3249 		     sizeof(peer->txrx_peer->security[sec_index].michael_key));
3250 #endif /* BIG_ENDIAN_HOST */
3251 #endif
3252 
3253 #ifdef notyet /* TODO: Check if this is required for wifi3.0 */
3254 	if (sec_type != cdp_sec_type_wapi) {
3255 		qdf_mem_zero(peer->tids_last_pn_valid, _EXT_TIDS);
3256 	} else {
3257 		for (i = 0; i < DP_MAX_TIDS; i++) {
3258 			/*
3259 			 * Setting PN valid bit for WAPI sec_type,
3260 			 * since WAPI PN has to be started with predefined value
3261 			 */
3262 			peer->tids_last_pn_valid[i] = 1;
3263 			qdf_mem_copy(
3264 				(u_int8_t *) &peer->tids_last_pn[i],
3265 				(u_int8_t *) rx_pn, sizeof(union htt_rx_pn_t));
3266 			peer->tids_last_pn[i].pn128[1] =
3267 				qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[1]);
3268 			peer->tids_last_pn[i].pn128[0] =
3269 				qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[0]);
3270 		}
3271 	}
3272 #endif
3273 	/* TODO: Update HW TID queue with PN check parameters (pn type for
3274 	 * all security types and last pn for WAPI) once REO command API
3275 	 * is available
3276 	 */
3277 
3278 	dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
3279 }
3280 
3281 #ifdef QCA_PEER_EXT_STATS
3282 QDF_STATUS dp_peer_delay_stats_ctx_alloc(struct dp_soc *soc,
3283 					 struct dp_txrx_peer *txrx_peer)
3284 {
3285 	uint8_t tid, ctx_id;
3286 
3287 	if (!soc || !txrx_peer) {
3288 		dp_warn("Null soc%pK or peer%pK", soc, txrx_peer);
3289 		return QDF_STATUS_E_INVAL;
3290 	}
3291 
3292 	if (!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx))
3293 		return QDF_STATUS_SUCCESS;
3294 
3295 	/*
3296 	 * Allocate memory for peer extended stats.
3297 	 */
3298 	txrx_peer->delay_stats =
3299 			qdf_mem_malloc(sizeof(struct dp_peer_delay_stats));
3300 	if (!txrx_peer->delay_stats) {
3301 		dp_err("Peer extended stats obj alloc failed!!");
3302 		return QDF_STATUS_E_NOMEM;
3303 	}
3304 
3305 	for (tid = 0; tid < CDP_MAX_DATA_TIDS; tid++) {
3306 		for (ctx_id = 0; ctx_id < CDP_MAX_TXRX_CTX; ctx_id++) {
3307 			struct cdp_delay_tx_stats *tx_delay =
3308 			&txrx_peer->delay_stats->delay_tid_stats[tid][ctx_id].tx_delay;
3309 			struct cdp_delay_rx_stats *rx_delay =
3310 			&txrx_peer->delay_stats->delay_tid_stats[tid][ctx_id].rx_delay;
3311 
3312 			dp_hist_init(&tx_delay->tx_swq_delay,
3313 				     CDP_HIST_TYPE_SW_ENQEUE_DELAY);
3314 			dp_hist_init(&tx_delay->hwtx_delay,
3315 				     CDP_HIST_TYPE_HW_COMP_DELAY);
3316 			dp_hist_init(&rx_delay->to_stack_delay,
3317 				     CDP_HIST_TYPE_REAP_STACK);
3318 		}
3319 	}
3320 
3321 	return QDF_STATUS_SUCCESS;
3322 }
3323 
3324 void dp_peer_delay_stats_ctx_dealloc(struct dp_soc *soc,
3325 				     struct dp_txrx_peer *txrx_peer)
3326 {
3327 	if (!txrx_peer) {
3328 		dp_warn("peer_ext dealloc failed due to NULL peer object");
3329 		return;
3330 	}
3331 
3332 	if (!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx))
3333 		return;
3334 
3335 	if (!txrx_peer->delay_stats)
3336 		return;
3337 
3338 	qdf_mem_free(txrx_peer->delay_stats);
3339 	txrx_peer->delay_stats = NULL;
3340 }
3341 
3342 void dp_peer_delay_stats_ctx_clr(struct dp_txrx_peer *txrx_peer)
3343 {
3344 	if (txrx_peer->delay_stats)
3345 		qdf_mem_zero(txrx_peer->delay_stats,
3346 			     sizeof(struct dp_peer_delay_stats));
3347 }
3348 #endif
3349 
3350 #ifdef WLAN_PEER_JITTER
3351 QDF_STATUS dp_peer_jitter_stats_ctx_alloc(struct dp_pdev *pdev,
3352 					  struct dp_txrx_peer *txrx_peer)
3353 {
3354 	if (!pdev || !txrx_peer) {
3355 		dp_warn("Null pdev or peer");
3356 		return QDF_STATUS_E_INVAL;
3357 	}
3358 
3359 	if (!wlan_cfg_is_peer_jitter_stats_enabled(pdev->soc->wlan_cfg_ctx))
3360 		return QDF_STATUS_SUCCESS;
3361 
3362 	if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
3363 		/*
3364 		 * Allocate memory on per tid basis when nss is enabled
3365 		 */
3366 		txrx_peer->jitter_stats =
3367 			qdf_mem_malloc(sizeof(struct cdp_peer_tid_stats)
3368 					* DP_MAX_TIDS);
3369 	} else {
3370 		/*
3371 		 * Allocate memory on per tid per ring basis
3372 		 */
3373 		txrx_peer->jitter_stats =
3374 			qdf_mem_malloc(sizeof(struct cdp_peer_tid_stats)
3375 					* DP_MAX_TIDS * CDP_MAX_TXRX_CTX);
3376 	}
3377 
3378 	if (!txrx_peer->jitter_stats) {
3379 		dp_warn("Jitter stats obj alloc failed!!");
3380 		return QDF_STATUS_E_NOMEM;
3381 	}
3382 
3383 	return QDF_STATUS_SUCCESS;
3384 }
3385 
3386 void dp_peer_jitter_stats_ctx_dealloc(struct dp_pdev *pdev,
3387 				      struct dp_txrx_peer *txrx_peer)
3388 {
3389 	if (!pdev || !txrx_peer) {
3390 		dp_warn("Null pdev or peer");
3391 		return;
3392 	}
3393 
3394 	if (!wlan_cfg_is_peer_jitter_stats_enabled(pdev->soc->wlan_cfg_ctx))
3395 		return;
3396 
3397 	if (txrx_peer->jitter_stats) {
3398 		qdf_mem_free(txrx_peer->jitter_stats);
3399 		txrx_peer->jitter_stats = NULL;
3400 	}
3401 }
3402 
3403 void dp_peer_jitter_stats_ctx_clr(struct dp_txrx_peer *txrx_peer)
3404 {
3405 	struct cdp_peer_tid_stats *jitter_stats = NULL;
3406 
3407 	if (!txrx_peer) {
3408 		dp_warn("Null peer");
3409 		return;
3410 	}
3411 
3412 	if (!wlan_cfg_is_peer_jitter_stats_enabled(txrx_peer->
3413 						   vdev->
3414 						   pdev->soc->wlan_cfg_ctx))
3415 		return;
3416 
3417 	jitter_stats = txrx_peer->jitter_stats;
3418 	if (!jitter_stats)
3419 		return;
3420 
3421 	if (wlan_cfg_get_dp_pdev_nss_enabled(txrx_peer->
3422 					     vdev->pdev->wlan_cfg_ctx))
3423 		qdf_mem_zero(jitter_stats,
3424 			     sizeof(struct cdp_peer_tid_stats) *
3425 			     DP_MAX_TIDS);
3426 
3427 	else
3428 		qdf_mem_zero(jitter_stats,
3429 			     sizeof(struct cdp_peer_tid_stats) *
3430 			     DP_MAX_TIDS * CDP_MAX_TXRX_CTX);
3431 
3432 }
3433 #endif
3434 
3435 #ifdef DP_PEER_EXTENDED_API
3436 /**
3437  * dp_peer_set_bw() - Set bandwidth and mpdu retry count threshold for peer
3438  * @soc: DP soc handle
3439  * @txrx_peer: Core txrx_peer handle
3440  * @set_bw: enum of bandwidth to be set for this peer connection
3441  *
3442  * Return: None
3443  */
3444 static void dp_peer_set_bw(struct dp_soc *soc, struct dp_txrx_peer *txrx_peer,
3445 			   enum cdp_peer_bw set_bw)
3446 {
3447 	if (!txrx_peer)
3448 		return;
3449 
3450 	txrx_peer->bw = set_bw;
3451 
3452 	switch (set_bw) {
3453 	case CDP_160_MHZ:
3454 	case CDP_320_MHZ:
3455 		txrx_peer->mpdu_retry_threshold =
3456 				soc->wlan_cfg_ctx->mpdu_retry_threshold_2;
3457 		break;
3458 	case CDP_20_MHZ:
3459 	case CDP_40_MHZ:
3460 	case CDP_80_MHZ:
3461 	default:
3462 		txrx_peer->mpdu_retry_threshold =
3463 				soc->wlan_cfg_ctx->mpdu_retry_threshold_1;
3464 		break;
3465 	}
3466 
3467 	dp_info("Peer id: %u: BW: %u, mpdu retry threshold: %u",
3468 		txrx_peer->peer_id, txrx_peer->bw,
3469 		txrx_peer->mpdu_retry_threshold);
3470 }
3471 
3472 #ifdef WLAN_FEATURE_11BE_MLO
3473 QDF_STATUS dp_register_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
3474 			    struct ol_txrx_desc_type *sta_desc)
3475 {
3476 	struct dp_peer *peer;
3477 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3478 
3479 	peer = dp_peer_find_hash_find(soc, sta_desc->peer_addr.bytes,
3480 				      0, DP_VDEV_ALL, DP_MOD_ID_CDP);
3481 
3482 	if (!peer)
3483 		return QDF_STATUS_E_FAULT;
3484 
3485 	qdf_spin_lock_bh(&peer->peer_info_lock);
3486 	peer->state = OL_TXRX_PEER_STATE_CONN;
3487 	qdf_spin_unlock_bh(&peer->peer_info_lock);
3488 
3489 	dp_peer_set_bw(soc, peer->txrx_peer, sta_desc->bw);
3490 
3491 	dp_rx_flush_rx_cached(peer, false);
3492 
3493 	if (IS_MLO_DP_LINK_PEER(peer) && peer->first_link) {
3494 		dp_peer_info("register for mld peer" QDF_MAC_ADDR_FMT,
3495 			     QDF_MAC_ADDR_REF(peer->mld_peer->mac_addr.raw));
3496 		qdf_spin_lock_bh(&peer->mld_peer->peer_info_lock);
3497 		peer->mld_peer->state = peer->state;
3498 		qdf_spin_unlock_bh(&peer->mld_peer->peer_info_lock);
3499 		dp_rx_flush_rx_cached(peer->mld_peer, false);
3500 	}
3501 
3502 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3503 
3504 	return QDF_STATUS_SUCCESS;
3505 }
3506 
3507 QDF_STATUS dp_peer_state_update(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
3508 				enum ol_txrx_peer_state state)
3509 {
3510 	struct dp_peer *peer;
3511 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3512 
3513 	peer =  dp_peer_find_hash_find(soc, peer_mac, 0, DP_VDEV_ALL,
3514 				       DP_MOD_ID_CDP);
3515 	if (!peer) {
3516 		dp_peer_err("%pK: Failed to find peer[" QDF_MAC_ADDR_FMT "]",
3517 			    soc, QDF_MAC_ADDR_REF(peer_mac));
3518 		return QDF_STATUS_E_FAILURE;
3519 	}
3520 	peer->state = state;
3521 	peer->authorize = (state == OL_TXRX_PEER_STATE_AUTH) ? 1 : 0;
3522 
3523 	if (peer->txrx_peer)
3524 		peer->txrx_peer->authorize = peer->authorize;
3525 
3526 	dp_peer_info("peer" QDF_MAC_ADDR_FMT "state %d",
3527 		     QDF_MAC_ADDR_REF(peer->mac_addr.raw),
3528 		     peer->state);
3529 
3530 	if (IS_MLO_DP_LINK_PEER(peer) && peer->first_link) {
3531 		peer->mld_peer->state = peer->state;
3532 		peer->mld_peer->txrx_peer->authorize = peer->authorize;
3533 		dp_peer_info("mld peer" QDF_MAC_ADDR_FMT "state %d",
3534 			     QDF_MAC_ADDR_REF(peer->mld_peer->mac_addr.raw),
3535 			     peer->mld_peer->state);
3536 	}
3537 
3538 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
3539 	 * Decrement it here.
3540 	 */
3541 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3542 
3543 	return QDF_STATUS_SUCCESS;
3544 }
3545 #else
3546 QDF_STATUS dp_register_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
3547 			    struct ol_txrx_desc_type *sta_desc)
3548 {
3549 	struct dp_peer *peer;
3550 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3551 
3552 	peer = dp_peer_find_hash_find(soc, sta_desc->peer_addr.bytes,
3553 				      0, DP_VDEV_ALL, DP_MOD_ID_CDP);
3554 
3555 	if (!peer)
3556 		return QDF_STATUS_E_FAULT;
3557 
3558 	qdf_spin_lock_bh(&peer->peer_info_lock);
3559 	peer->state = OL_TXRX_PEER_STATE_CONN;
3560 	qdf_spin_unlock_bh(&peer->peer_info_lock);
3561 
3562 	dp_peer_set_bw(soc, peer->txrx_peer, sta_desc->bw);
3563 
3564 	dp_rx_flush_rx_cached(peer, false);
3565 
3566 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3567 
3568 	return QDF_STATUS_SUCCESS;
3569 }
3570 
3571 QDF_STATUS dp_peer_state_update(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
3572 				enum ol_txrx_peer_state state)
3573 {
3574 	struct dp_peer *peer;
3575 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3576 
3577 	peer =  dp_peer_find_hash_find(soc, peer_mac, 0, DP_VDEV_ALL,
3578 				       DP_MOD_ID_CDP);
3579 	if (!peer) {
3580 		dp_peer_err("%pK: Failed to find peer for: [" QDF_MAC_ADDR_FMT "]",
3581 			    soc, QDF_MAC_ADDR_REF(peer_mac));
3582 		return QDF_STATUS_E_FAILURE;
3583 	}
3584 	peer->state = state;
3585 	peer->authorize = (state == OL_TXRX_PEER_STATE_AUTH) ? 1 : 0;
3586 
3587 	if (peer->txrx_peer)
3588 		peer->txrx_peer->authorize = peer->authorize;
3589 
3590 	dp_info("peer %pK state %d", peer, peer->state);
3591 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
3592 	 * Decrement it here.
3593 	 */
3594 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3595 
3596 	return QDF_STATUS_SUCCESS;
3597 }
3598 #endif
3599 
3600 QDF_STATUS
3601 dp_clear_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
3602 	      struct qdf_mac_addr peer_addr)
3603 {
3604 	struct dp_peer *peer;
3605 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3606 
3607 	peer = dp_peer_find_hash_find(soc, peer_addr.bytes,
3608 				      0, DP_VDEV_ALL, DP_MOD_ID_CDP);
3609 
3610 	if (!peer)
3611 		return QDF_STATUS_E_FAULT;
3612 	if (!peer->valid) {
3613 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3614 		return QDF_STATUS_E_FAULT;
3615 	}
3616 
3617 	dp_clear_peer_internal(soc, peer);
3618 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3619 	return QDF_STATUS_SUCCESS;
3620 }
3621 
3622 QDF_STATUS dp_get_vdevid(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
3623 			 uint8_t *vdev_id)
3624 {
3625 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3626 	struct dp_peer *peer =
3627 		dp_peer_find_hash_find(soc, peer_mac, 0, DP_VDEV_ALL,
3628 				       DP_MOD_ID_CDP);
3629 
3630 	if (!peer)
3631 		return QDF_STATUS_E_FAILURE;
3632 
3633 	dp_info("peer %pK vdev %pK vdev id %d",
3634 		peer, peer->vdev, peer->vdev->vdev_id);
3635 	*vdev_id = peer->vdev->vdev_id;
3636 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
3637 	 * Decrement it here.
3638 	 */
3639 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3640 
3641 	return QDF_STATUS_SUCCESS;
3642 }
3643 
3644 struct cdp_vdev *
3645 dp_get_vdev_by_peer_addr(struct cdp_pdev *pdev_handle,
3646 			 struct qdf_mac_addr peer_addr)
3647 {
3648 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3649 	struct dp_peer *peer = NULL;
3650 	struct cdp_vdev *vdev = NULL;
3651 
3652 	if (!pdev) {
3653 		dp_peer_info("PDEV not found for peer_addr: " QDF_MAC_ADDR_FMT,
3654 			     QDF_MAC_ADDR_REF(peer_addr.bytes));
3655 		return NULL;
3656 	}
3657 
3658 	peer = dp_peer_find_hash_find(pdev->soc, peer_addr.bytes, 0,
3659 				      DP_VDEV_ALL, DP_MOD_ID_CDP);
3660 	if (!peer) {
3661 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
3662 			  "PDEV not found for peer_addr: "QDF_MAC_ADDR_FMT,
3663 			  QDF_MAC_ADDR_REF(peer_addr.bytes));
3664 		return NULL;
3665 	}
3666 
3667 	vdev = (struct cdp_vdev *)peer->vdev;
3668 
3669 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3670 	return vdev;
3671 }
3672 
3673 struct cdp_vdev *dp_get_vdev_for_peer(void *peer_handle)
3674 {
3675 	struct dp_peer *peer = peer_handle;
3676 
3677 	DP_TRACE(DEBUG, "peer %pK vdev %pK", peer, peer->vdev);
3678 	return (struct cdp_vdev *)peer->vdev;
3679 }
3680 
3681 uint8_t *dp_peer_get_peer_mac_addr(void *peer_handle)
3682 {
3683 	struct dp_peer *peer = peer_handle;
3684 	uint8_t *mac;
3685 
3686 	mac = peer->mac_addr.raw;
3687 	dp_info("peer %pK mac 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
3688 		peer, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
3689 	return peer->mac_addr.raw;
3690 }
3691 
3692 int dp_get_peer_state(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3693 		      uint8_t *peer_mac)
3694 {
3695 	enum ol_txrx_peer_state peer_state;
3696 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3697 	struct cdp_peer_info peer_info = { 0 };
3698 	struct dp_peer *peer;
3699 	struct dp_peer *tgt_peer;
3700 
3701 	DP_PEER_INFO_PARAMS_INIT(&peer_info, vdev_id, peer_mac,
3702 				 false, CDP_WILD_PEER_TYPE);
3703 
3704 	peer =  dp_peer_hash_find_wrapper(soc, &peer_info, DP_MOD_ID_CDP);
3705 
3706 	if (!peer)
3707 		return OL_TXRX_PEER_STATE_INVALID;
3708 
3709 	DP_TRACE(DEBUG, "peer %pK stats %d", peer, peer->state);
3710 
3711 	tgt_peer = dp_get_tgt_peer_from_peer(peer);
3712 	peer_state = tgt_peer->state;
3713 
3714 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3715 
3716 	return peer_state;
3717 }
3718 
3719 void dp_local_peer_id_pool_init(struct dp_pdev *pdev)
3720 {
3721 	int i;
3722 
3723 	/* point the freelist to the first ID */
3724 	pdev->local_peer_ids.freelist = 0;
3725 
3726 	/* link each ID to the next one */
3727 	for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) {
3728 		pdev->local_peer_ids.pool[i] = i + 1;
3729 		pdev->local_peer_ids.map[i] = NULL;
3730 	}
3731 
3732 	/* link the last ID to itself, to mark the end of the list */
3733 	i = OL_TXRX_NUM_LOCAL_PEER_IDS;
3734 	pdev->local_peer_ids.pool[i] = i;
3735 
3736 	qdf_spinlock_create(&pdev->local_peer_ids.lock);
3737 	dp_info("Peer pool init");
3738 }
3739 
3740 void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer)
3741 {
3742 	int i;
3743 
3744 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
3745 	i = pdev->local_peer_ids.freelist;
3746 	if (pdev->local_peer_ids.pool[i] == i) {
3747 		/* the list is empty, except for the list-end marker */
3748 		peer->local_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
3749 	} else {
3750 		/* take the head ID and advance the freelist */
3751 		peer->local_id = i;
3752 		pdev->local_peer_ids.freelist = pdev->local_peer_ids.pool[i];
3753 		pdev->local_peer_ids.map[i] = peer;
3754 	}
3755 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
3756 	dp_info("peer %pK, local id %d", peer, peer->local_id);
3757 }
3758 
3759 void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer)
3760 {
3761 	int i = peer->local_id;
3762 	if ((i == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
3763 	    (i >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
3764 		return;
3765 	}
3766 
3767 	/* put this ID on the head of the freelist */
3768 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
3769 	pdev->local_peer_ids.pool[i] = pdev->local_peer_ids.freelist;
3770 	pdev->local_peer_ids.freelist = i;
3771 	pdev->local_peer_ids.map[i] = NULL;
3772 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
3773 }
3774 
3775 bool dp_find_peer_exist_on_vdev(struct cdp_soc_t *soc_hdl,
3776 				uint8_t vdev_id, uint8_t *peer_addr)
3777 {
3778 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3779 	struct dp_peer *peer = NULL;
3780 
3781 	peer = dp_peer_find_hash_find(soc, peer_addr, 0, vdev_id,
3782 				      DP_MOD_ID_CDP);
3783 	if (!peer)
3784 		return false;
3785 
3786 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3787 
3788 	return true;
3789 }
3790 
3791 bool dp_find_peer_exist_on_other_vdev(struct cdp_soc_t *soc_hdl,
3792 				      uint8_t vdev_id, uint8_t *peer_addr,
3793 				      uint16_t max_bssid)
3794 {
3795 	int i;
3796 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3797 	struct dp_peer *peer = NULL;
3798 
3799 	for (i = 0; i < max_bssid; i++) {
3800 		/* Need to check vdevs other than the vdev_id */
3801 		if (vdev_id == i)
3802 			continue;
3803 		peer = dp_peer_find_hash_find(soc, peer_addr, 0, i,
3804 					      DP_MOD_ID_CDP);
3805 		if (peer) {
3806 			dp_err("Duplicate peer "QDF_MAC_ADDR_FMT" already exist on vdev %d",
3807 			       QDF_MAC_ADDR_REF(peer_addr), i);
3808 			dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3809 			return true;
3810 		}
3811 	}
3812 
3813 	return false;
3814 }
3815 
3816 void dp_set_peer_as_tdls_peer(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3817 			      uint8_t *peer_mac, bool val)
3818 {
3819 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3820 	struct dp_peer *peer = NULL;
3821 
3822 	peer = dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id,
3823 				      DP_MOD_ID_CDP);
3824 	if (!peer) {
3825 		dp_err("Failed to find peer for:" QDF_MAC_ADDR_FMT,
3826 		       QDF_MAC_ADDR_REF(peer_mac));
3827 		return;
3828 	}
3829 
3830 	dp_info("Set tdls flag %d for peer:" QDF_MAC_ADDR_FMT,
3831 		val, QDF_MAC_ADDR_REF(peer_mac));
3832 	peer->is_tdls_peer = val;
3833 
3834 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3835 }
3836 #endif
3837 
3838 bool dp_find_peer_exist(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
3839 			uint8_t *peer_addr)
3840 {
3841 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3842 	struct dp_peer *peer = NULL;
3843 
3844 	peer = dp_peer_find_hash_find(soc, peer_addr, 0, DP_VDEV_ALL,
3845 				      DP_MOD_ID_CDP);
3846 	if (peer) {
3847 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3848 		return true;
3849 	}
3850 
3851 	return false;
3852 }
3853 
3854 QDF_STATUS
3855 dp_set_michael_key(struct cdp_soc_t *soc,
3856 		   uint8_t vdev_id,
3857 		   uint8_t *peer_mac,
3858 		   bool is_unicast, uint32_t *key)
3859 {
3860 	uint8_t sec_index = is_unicast ? 1 : 0;
3861 	struct dp_peer *peer =
3862 			dp_peer_get_tgt_peer_hash_find((struct dp_soc *)soc,
3863 						       peer_mac, 0, vdev_id,
3864 						       DP_MOD_ID_CDP);
3865 
3866 	if (!peer) {
3867 		dp_peer_err("%pK: peer not found ", soc);
3868 		return QDF_STATUS_E_FAILURE;
3869 	}
3870 
3871 	qdf_mem_copy(&peer->txrx_peer->security[sec_index].michael_key[0],
3872 		     key, IEEE80211_WEP_MICLEN);
3873 
3874 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3875 
3876 	return QDF_STATUS_SUCCESS;
3877 }
3878 
3879 
3880 struct dp_peer *dp_vdev_bss_peer_ref_n_get(struct dp_soc *soc,
3881 					   struct dp_vdev *vdev,
3882 					   enum dp_mod_id mod_id)
3883 {
3884 	struct dp_peer *peer = NULL;
3885 
3886 	qdf_spin_lock_bh(&vdev->peer_list_lock);
3887 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
3888 		if (peer->bss_peer)
3889 			break;
3890 	}
3891 
3892 	if (!peer) {
3893 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
3894 		return NULL;
3895 	}
3896 
3897 	if (dp_peer_get_ref(soc, peer, mod_id) == QDF_STATUS_SUCCESS) {
3898 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
3899 		return peer;
3900 	}
3901 
3902 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
3903 	return peer;
3904 }
3905 
3906 struct dp_peer *dp_sta_vdev_self_peer_ref_n_get(struct dp_soc *soc,
3907 						struct dp_vdev *vdev,
3908 						enum dp_mod_id mod_id)
3909 {
3910 	struct dp_peer *peer;
3911 
3912 	if (vdev->opmode != wlan_op_mode_sta)
3913 		return NULL;
3914 
3915 	qdf_spin_lock_bh(&vdev->peer_list_lock);
3916 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
3917 		if (peer->sta_self_peer)
3918 			break;
3919 	}
3920 
3921 	if (!peer) {
3922 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
3923 		return NULL;
3924 	}
3925 
3926 	if (dp_peer_get_ref(soc, peer, mod_id) == QDF_STATUS_SUCCESS) {
3927 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
3928 		return peer;
3929 	}
3930 
3931 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
3932 	return peer;
3933 }
3934 
3935 void dp_peer_flush_frags(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3936 			 uint8_t *peer_mac)
3937 {
3938 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3939 	struct dp_peer *peer = dp_peer_get_tgt_peer_hash_find(soc, peer_mac, 0,
3940 							      vdev_id,
3941 							      DP_MOD_ID_CDP);
3942 	struct dp_txrx_peer *txrx_peer;
3943 	uint8_t tid;
3944 	struct dp_rx_tid_defrag *defrag_rx_tid;
3945 
3946 	if (!peer)
3947 		return;
3948 
3949 	if (!peer->txrx_peer)
3950 		goto fail;
3951 
3952 	dp_info("Flushing fragments for peer " QDF_MAC_ADDR_FMT,
3953 		QDF_MAC_ADDR_REF(peer->mac_addr.raw));
3954 
3955 	txrx_peer = peer->txrx_peer;
3956 
3957 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
3958 		defrag_rx_tid = &txrx_peer->rx_tid[tid];
3959 
3960 		qdf_spin_lock_bh(&defrag_rx_tid->defrag_tid_lock);
3961 		dp_rx_defrag_waitlist_remove(txrx_peer, tid);
3962 		dp_rx_reorder_flush_frag(txrx_peer, tid);
3963 		qdf_spin_unlock_bh(&defrag_rx_tid->defrag_tid_lock);
3964 	}
3965 fail:
3966 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3967 }
3968 
3969 bool dp_peer_find_by_id_valid(struct dp_soc *soc, uint16_t peer_id)
3970 {
3971 	struct dp_peer *peer = dp_peer_get_ref_by_id(soc, peer_id,
3972 						     DP_MOD_ID_HTT);
3973 
3974 	if (peer) {
3975 		/*
3976 		 * Decrement the peer ref which is taken as part of
3977 		 * dp_peer_get_ref_by_id if PEER_LOCK_REF_PROTECT is enabled
3978 		 */
3979 		dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
3980 
3981 		return true;
3982 	}
3983 
3984 	return false;
3985 }
3986 
3987 qdf_export_symbol(dp_peer_find_by_id_valid);
3988 
3989 #ifdef QCA_MULTIPASS_SUPPORT
3990 void dp_peer_multipass_list_remove(struct dp_peer *peer)
3991 {
3992 	struct dp_vdev *vdev = peer->vdev;
3993 	struct dp_txrx_peer *tpeer = NULL;
3994 	bool found = 0;
3995 
3996 	qdf_spin_lock_bh(&vdev->mpass_peer_mutex);
3997 	TAILQ_FOREACH(tpeer, &vdev->mpass_peer_list, mpass_peer_list_elem) {
3998 		if (tpeer == peer->txrx_peer) {
3999 			found = 1;
4000 			TAILQ_REMOVE(&vdev->mpass_peer_list, peer->txrx_peer,
4001 				     mpass_peer_list_elem);
4002 			break;
4003 		}
4004 	}
4005 
4006 	qdf_spin_unlock_bh(&vdev->mpass_peer_mutex);
4007 
4008 	if (found)
4009 		dp_peer_unref_delete(peer, DP_MOD_ID_TX_MULTIPASS);
4010 }
4011 
4012 /**
4013  * dp_peer_multipass_list_add() - add to new multipass list
4014  * @soc: soc handle
4015  * @peer_mac: mac address
4016  * @vdev_id: vdev id for peer
4017  * @vlan_id: vlan_id
4018  *
4019  * return: void
4020  */
4021 static void dp_peer_multipass_list_add(struct dp_soc *soc, uint8_t *peer_mac,
4022 				       uint8_t vdev_id, uint16_t vlan_id)
4023 {
4024 	struct dp_peer *peer =
4025 			dp_peer_get_tgt_peer_hash_find(soc, peer_mac, 0,
4026 						       vdev_id,
4027 						       DP_MOD_ID_TX_MULTIPASS);
4028 
4029 	if (qdf_unlikely(!peer)) {
4030 		qdf_err("NULL peer");
4031 		return;
4032 	}
4033 
4034 	if (qdf_unlikely(!peer->txrx_peer))
4035 		goto fail;
4036 
4037 	/* If peer already exists in vdev multipass list, do not add it.
4038 	 * This may happen if key install comes twice or re-key
4039 	 * happens for a peer.
4040 	 */
4041 	if (peer->txrx_peer->vlan_id) {
4042 		dp_debug("peer already added to vdev multipass list"
4043 			 "MAC: "QDF_MAC_ADDR_FMT" vlan: %d ",
4044 			 QDF_MAC_ADDR_REF(peer->mac_addr.raw),
4045 			 peer->txrx_peer->vlan_id);
4046 		goto fail;
4047 	}
4048 
4049 	/*
4050 	 * Ref_cnt is incremented inside dp_peer_find_hash_find().
4051 	 * Decrement it when element is deleted from the list.
4052 	 */
4053 	peer->txrx_peer->vlan_id = vlan_id;
4054 	qdf_spin_lock_bh(&peer->txrx_peer->vdev->mpass_peer_mutex);
4055 	TAILQ_INSERT_HEAD(&peer->txrx_peer->vdev->mpass_peer_list,
4056 			  peer->txrx_peer,
4057 			  mpass_peer_list_elem);
4058 	qdf_spin_unlock_bh(&peer->txrx_peer->vdev->mpass_peer_mutex);
4059 	return;
4060 
4061 fail:
4062 	dp_peer_unref_delete(peer, DP_MOD_ID_TX_MULTIPASS);
4063 }
4064 
4065 void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
4066 			 uint8_t vdev_id, uint8_t *peer_mac,
4067 			 uint16_t vlan_id)
4068 {
4069 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
4070 	struct dp_vdev *vdev =
4071 		dp_vdev_get_ref_by_id((struct dp_soc *)soc, vdev_id,
4072 				      DP_MOD_ID_TX_MULTIPASS);
4073 
4074 	dp_info("vdev_id %d, vdev %pK, multipass_en %d, peer_mac " QDF_MAC_ADDR_FMT " vlan %d",
4075 		vdev_id, vdev, vdev ? vdev->multipass_en : 0,
4076 		QDF_MAC_ADDR_REF(peer_mac), vlan_id);
4077 	if (vdev && vdev->multipass_en) {
4078 		dp_peer_multipass_list_add(soc, peer_mac, vdev_id, vlan_id);
4079 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_MULTIPASS);
4080 	}
4081 }
4082 #endif /* QCA_MULTIPASS_SUPPORT */
4083