xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_peer.c (revision 4aa7278f36e63149f11ece1180035fa59b904361)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <qdf_types.h>
21 #include <qdf_lock.h>
22 #include <hal_hw_headers.h>
23 #include "dp_htt.h"
24 #include "dp_types.h"
25 #include "dp_internal.h"
26 #include "dp_peer.h"
27 #include "dp_rx_defrag.h"
28 #include "dp_rx.h"
29 #include <hal_api.h>
30 #include <hal_reo.h>
31 #include <cdp_txrx_handle.h>
32 #include <wlan_cfg.h>
33 #ifdef WIFI_MONITOR_SUPPORT
34 #include <dp_mon.h>
35 #endif
36 #ifdef FEATURE_WDS
37 #include "dp_txrx_wds.h"
38 #endif
39 #include <qdf_module.h>
40 #ifdef QCA_PEER_EXT_STATS
41 #include "dp_hist.h"
42 #endif
43 #ifdef BYPASS_OL_OPS
44 #include <target_if_dp.h>
45 #endif
46 
47 #ifdef REO_QDESC_HISTORY
48 #define REO_QDESC_HISTORY_SIZE 512
49 uint64_t reo_qdesc_history_idx;
50 struct reo_qdesc_event reo_qdesc_history[REO_QDESC_HISTORY_SIZE];
51 #endif
52 
53 #ifdef FEATURE_AST
54 #ifdef BYPASS_OL_OPS
55 /*
56  * dp_add_wds_entry_wrapper() - Add new AST entry for the wds station
57  * @soc: DP soc structure pointer
58  * @peer: dp peer structure
59  * @dest_mac: MAC address of ast node
60  * @flags: wds or hmwds
61  * @type: type from enum cdp_txrx_ast_entry_type
62  *
63  * This API is used by WDS source port learning function to
64  * add a new AST entry in the fw.
65  *
66  * Return: 0 on success, error code otherwise.
67  */
68 static int dp_add_wds_entry_wrapper(struct dp_soc *soc,
69 				    struct dp_peer *peer,
70 				    const uint8_t *dest_macaddr,
71 				    uint32_t flags,
72 				    uint8_t type)
73 {
74 	QDF_STATUS status;
75 
76 	status = target_if_add_wds_entry(soc->ctrl_psoc,
77 					 peer->vdev->vdev_id,
78 					 peer->mac_addr.raw,
79 					 dest_macaddr,
80 					 WMI_HOST_WDS_FLAG_STATIC,
81 					 type);
82 
83 	return qdf_status_to_os_return(status);
84 }
85 
86 /*
87  * dp_update_wds_entry_wrapper() - update an existing wds entry with new peer
88  * @soc: DP soc structure pointer
89  * @peer: dp peer structure
90  * @dest_macaddr: MAC address of ast node
91  * @flags: wds or hmwds
92  *
93  * This API is used by update the peer mac address for the ast
94  * in the fw.
95  *
96  * Return: 0 on success, error code otherwise.
97  */
98 static int dp_update_wds_entry_wrapper(struct dp_soc *soc,
99 				       struct dp_peer *peer,
100 				       uint8_t *dest_macaddr,
101 				       uint32_t flags)
102 {
103 	QDF_STATUS status;
104 
105 	status = target_if_update_wds_entry(soc->ctrl_psoc,
106 					    peer->vdev->vdev_id,
107 					    dest_macaddr,
108 					    peer->mac_addr.raw,
109 					    WMI_HOST_WDS_FLAG_STATIC);
110 
111 	return qdf_status_to_os_return(status);
112 }
113 
114 /*
115  * dp_del_wds_entry_wrapper() - delete a WSD AST entry
116  * @soc: DP soc structure pointer
117  * @vdev_id: vdev_id
118  * @wds_macaddr: MAC address of ast node
119  * @type: type from enum cdp_txrx_ast_entry_type
120  * @delete_in_fw: Flag to indicate if entry needs to be deleted in fw
121  *
122  * This API is used to delete an AST entry from fw
123  *
124  * Return: None
125  */
126 static void dp_del_wds_entry_wrapper(struct dp_soc *soc,
127 				     uint8_t vdev_id,
128 				     uint8_t *wds_macaddr,
129 				     uint8_t type,
130 				     uint8_t delete_in_fw)
131 {
132 	target_if_del_wds_entry(soc->ctrl_psoc, vdev_id,
133 				wds_macaddr, type, delete_in_fw);
134 }
135 #else
136 static int dp_add_wds_entry_wrapper(struct dp_soc *soc,
137 				    struct dp_peer *peer,
138 				    const uint8_t *dest_macaddr,
139 				    uint32_t flags,
140 				    uint8_t type)
141 {
142 	int status;
143 
144 	status = soc->cdp_soc.ol_ops->peer_add_wds_entry(
145 					soc->ctrl_psoc,
146 					peer->vdev->vdev_id,
147 					peer->mac_addr.raw,
148 					peer->peer_id,
149 					dest_macaddr,
150 					peer->mac_addr.raw,
151 					flags,
152 					type);
153 
154 	return status;
155 }
156 
157 static int dp_update_wds_entry_wrapper(struct dp_soc *soc,
158 				       struct dp_peer *peer,
159 				       uint8_t *dest_macaddr,
160 				       uint32_t flags)
161 {
162 	int status;
163 
164 	status = soc->cdp_soc.ol_ops->peer_update_wds_entry(
165 				soc->ctrl_psoc,
166 				peer->vdev->vdev_id,
167 				dest_macaddr,
168 				peer->mac_addr.raw,
169 				flags);
170 
171 	return status;
172 }
173 
174 static void dp_del_wds_entry_wrapper(struct dp_soc *soc,
175 				     uint8_t vdev_id,
176 				     uint8_t *wds_macaddr,
177 				     uint8_t type,
178 				     uint8_t delete_in_fw)
179 {
180 	soc->cdp_soc.ol_ops->peer_del_wds_entry(soc->ctrl_psoc,
181 						vdev_id,
182 						wds_macaddr,
183 						type,
184 						delete_in_fw);
185 }
186 #endif
187 #endif
188 
189 #ifdef FEATURE_WDS
190 static inline bool
191 dp_peer_ast_free_in_unmap_supported(struct dp_soc *soc,
192 				    struct dp_ast_entry *ast_entry)
193 {
194 	/* if peer map v2 is enabled we are not freeing ast entry
195 	 * here and it is supposed to be freed in unmap event (after
196 	 * we receive delete confirmation from target)
197 	 *
198 	 * if peer_id is invalid we did not get the peer map event
199 	 * for the peer free ast entry from here only in this case
200 	 */
201 
202 	if ((ast_entry->type != CDP_TXRX_AST_TYPE_WDS_HM_SEC) &&
203 	    (ast_entry->type != CDP_TXRX_AST_TYPE_SELF))
204 		return true;
205 
206 	return false;
207 }
208 #else
209 static inline bool
210 dp_peer_ast_free_in_unmap_supported(struct dp_soc *soc,
211 				    struct dp_ast_entry *ast_entry)
212 {
213 	return false;
214 }
215 
216 void dp_soc_wds_attach(struct dp_soc *soc)
217 {
218 }
219 
220 void dp_soc_wds_detach(struct dp_soc *soc)
221 {
222 }
223 #endif
224 
225 #ifdef REO_QDESC_HISTORY
226 static inline void
227 dp_rx_reo_qdesc_history_add(struct reo_desc_list_node *free_desc,
228 			    enum reo_qdesc_event_type type)
229 {
230 	struct reo_qdesc_event *evt;
231 	struct dp_rx_tid *rx_tid = &free_desc->rx_tid;
232 	uint32_t idx;
233 
234 	reo_qdesc_history_idx++;
235 	idx = (reo_qdesc_history_idx & (REO_QDESC_HISTORY_SIZE - 1));
236 
237 	evt = &reo_qdesc_history[idx];
238 
239 	qdf_mem_copy(evt->peer_mac, free_desc->peer_mac, QDF_MAC_ADDR_SIZE);
240 	evt->qdesc_addr = rx_tid->hw_qdesc_paddr;
241 	evt->ts = qdf_get_log_timestamp();
242 	evt->type = type;
243 }
244 
245 #ifdef WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY
246 static inline void
247 dp_rx_reo_qdesc_deferred_evt_add(struct reo_desc_deferred_freelist_node *desc,
248 				 enum reo_qdesc_event_type type)
249 {
250 	struct reo_qdesc_event *evt;
251 	uint32_t idx;
252 
253 	reo_qdesc_history_idx++;
254 	idx = (reo_qdesc_history_idx & (REO_QDESC_HISTORY_SIZE - 1));
255 
256 	evt = &reo_qdesc_history[idx];
257 
258 	qdf_mem_copy(evt->peer_mac, desc->peer_mac, QDF_MAC_ADDR_SIZE);
259 	evt->qdesc_addr = desc->hw_qdesc_paddr;
260 	evt->ts = qdf_get_log_timestamp();
261 	evt->type = type;
262 }
263 
264 #define DP_RX_REO_QDESC_DEFERRED_FREE_EVT(desc) \
265 	dp_rx_reo_qdesc_deferred_evt_add((desc), REO_QDESC_FREE)
266 
267 #define DP_RX_REO_QDESC_DEFERRED_GET_MAC(desc, freedesc) \
268 	qdf_mem_copy(desc->peer_mac, freedesc->peer_mac, QDF_MAC_ADDR_SIZE)
269 #endif /* WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY */
270 
271 #define DP_RX_REO_QDESC_GET_MAC(freedesc, peer) \
272 	qdf_mem_copy(freedesc->peer_mac, peer->mac_addr.raw, QDF_MAC_ADDR_SIZE)
273 
274 #define DP_RX_REO_QDESC_UPDATE_EVT(free_desc) \
275 	dp_rx_reo_qdesc_history_add((free_desc), REO_QDESC_UPDATE_CB)
276 
277 #define DP_RX_REO_QDESC_FREE_EVT(free_desc) \
278 	dp_rx_reo_qdesc_history_add((free_desc), REO_QDESC_FREE)
279 
280 #else
281 #define DP_RX_REO_QDESC_GET_MAC(freedesc, peer)
282 
283 #define DP_RX_REO_QDESC_UPDATE_EVT(free_desc)
284 
285 #define DP_RX_REO_QDESC_FREE_EVT(free_desc)
286 
287 #define DP_RX_REO_QDESC_DEFERRED_FREE_EVT(desc)
288 
289 #define DP_RX_REO_QDESC_DEFERRED_GET_MAC(desc, freedesc)
290 #endif
291 
292 static inline void
293 dp_set_ssn_valid_flag(struct hal_reo_cmd_params *params,
294 					uint8_t valid)
295 {
296 	params->u.upd_queue_params.update_svld = 1;
297 	params->u.upd_queue_params.svld = valid;
298 	dp_peer_debug("Setting SSN valid bit to %d",
299 		      valid);
300 }
301 
302 QDF_STATUS dp_peer_ast_table_attach(struct dp_soc *soc)
303 {
304 	uint32_t max_ast_index;
305 
306 	max_ast_index = wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx);
307 	/* allocate ast_table for ast entry to ast_index map */
308 	dp_peer_info("\n%pK:<=== cfg max ast idx %d ====>", soc, max_ast_index);
309 	soc->ast_table = qdf_mem_malloc(max_ast_index *
310 					sizeof(struct dp_ast_entry *));
311 	if (!soc->ast_table) {
312 		dp_peer_err("%pK: ast_table memory allocation failed", soc);
313 		return QDF_STATUS_E_NOMEM;
314 	}
315 	return QDF_STATUS_SUCCESS; /* success */
316 }
317 
318 /*
319  * dp_peer_find_map_attach() - allocate memory for peer_id_to_obj_map
320  * @soc: soc handle
321  *
322  * return: QDF_STATUS
323  */
324 static QDF_STATUS dp_peer_find_map_attach(struct dp_soc *soc)
325 {
326 	uint32_t max_peers, peer_map_size;
327 
328 	max_peers = soc->max_peer_id;
329 	/* allocate the peer ID -> peer object map */
330 	dp_peer_info("\n%pK:<=== cfg max peer id %d ====>", soc, max_peers);
331 	peer_map_size = max_peers * sizeof(soc->peer_id_to_obj_map[0]);
332 	soc->peer_id_to_obj_map = qdf_mem_malloc(peer_map_size);
333 	if (!soc->peer_id_to_obj_map) {
334 		dp_peer_err("%pK: peer map memory allocation failed", soc);
335 		return QDF_STATUS_E_NOMEM;
336 	}
337 
338 	/*
339 	 * The peer_id_to_obj_map doesn't really need to be initialized,
340 	 * since elements are only used after they have been individually
341 	 * initialized.
342 	 * However, it is convenient for debugging to have all elements
343 	 * that are not in use set to 0.
344 	 */
345 	qdf_mem_zero(soc->peer_id_to_obj_map, peer_map_size);
346 
347 	qdf_spinlock_create(&soc->peer_map_lock);
348 	return QDF_STATUS_SUCCESS; /* success */
349 }
350 
351 #define DP_AST_HASH_LOAD_MULT  2
352 #define DP_AST_HASH_LOAD_SHIFT 0
353 
354 static inline uint32_t
355 dp_peer_find_hash_index(struct dp_soc *soc,
356 			union dp_align_mac_addr *mac_addr)
357 {
358 	uint32_t index;
359 
360 	index =
361 		mac_addr->align2.bytes_ab ^
362 		mac_addr->align2.bytes_cd ^
363 		mac_addr->align2.bytes_ef;
364 
365 	index ^= index >> soc->peer_hash.idx_bits;
366 	index &= soc->peer_hash.mask;
367 	return index;
368 }
369 
370 /*
371  * dp_peer_find_hash_find() - returns legacy or mlo link peer from
372  *			      peer_hash_table matching vdev_id and mac_address
373  * @soc: soc handle
374  * @peer_mac_addr: peer mac address
375  * @mac_addr_is_aligned: is mac addr aligned
376  * @vdev_id: vdev_id
377  * @mod_id: id of module requesting reference
378  *
379  * return: peer in sucsess
380  *         NULL in failure
381  */
382 struct dp_peer *dp_peer_find_hash_find(
383 				struct dp_soc *soc, uint8_t *peer_mac_addr,
384 				int mac_addr_is_aligned, uint8_t vdev_id,
385 				enum dp_mod_id mod_id)
386 {
387 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
388 	uint32_t index;
389 	struct dp_peer *peer;
390 
391 	if (!soc->peer_hash.bins)
392 		return NULL;
393 
394 	if (mac_addr_is_aligned) {
395 		mac_addr = (union dp_align_mac_addr *)peer_mac_addr;
396 	} else {
397 		qdf_mem_copy(
398 			&local_mac_addr_aligned.raw[0],
399 			peer_mac_addr, QDF_MAC_ADDR_SIZE);
400 		mac_addr = &local_mac_addr_aligned;
401 	}
402 	index = dp_peer_find_hash_index(soc, mac_addr);
403 	qdf_spin_lock_bh(&soc->peer_hash_lock);
404 	TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
405 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
406 		    ((peer->vdev->vdev_id == vdev_id) ||
407 		     (vdev_id == DP_VDEV_ALL))) {
408 			/* take peer reference before returning */
409 			if (dp_peer_get_ref(soc, peer, mod_id) !=
410 						QDF_STATUS_SUCCESS)
411 				peer = NULL;
412 
413 			qdf_spin_unlock_bh(&soc->peer_hash_lock);
414 			return peer;
415 		}
416 	}
417 	qdf_spin_unlock_bh(&soc->peer_hash_lock);
418 	return NULL; /* failure */
419 }
420 
421 qdf_export_symbol(dp_peer_find_hash_find);
422 
423 #ifdef WLAN_FEATURE_11BE_MLO
424 /*
425  * dp_peer_find_hash_detach() - cleanup memory for peer_hash table
426  * @soc: soc handle
427  *
428  * return: none
429  */
430 static void dp_peer_find_hash_detach(struct dp_soc *soc)
431 {
432 	if (soc->peer_hash.bins) {
433 		qdf_mem_free(soc->peer_hash.bins);
434 		soc->peer_hash.bins = NULL;
435 		qdf_spinlock_destroy(&soc->peer_hash_lock);
436 	}
437 
438 	if (soc->arch_ops.mlo_peer_find_hash_detach)
439 		soc->arch_ops.mlo_peer_find_hash_detach(soc);
440 }
441 
442 /*
443  * dp_peer_find_hash_attach() - allocate memory for peer_hash table
444  * @soc: soc handle
445  *
446  * return: QDF_STATUS
447  */
448 static QDF_STATUS dp_peer_find_hash_attach(struct dp_soc *soc)
449 {
450 	int i, hash_elems, log2;
451 
452 	/* allocate the peer MAC address -> peer object hash table */
453 	hash_elems = soc->max_peers;
454 	hash_elems *= DP_PEER_HASH_LOAD_MULT;
455 	hash_elems >>= DP_PEER_HASH_LOAD_SHIFT;
456 	log2 = dp_log2_ceil(hash_elems);
457 	hash_elems = 1 << log2;
458 
459 	soc->peer_hash.mask = hash_elems - 1;
460 	soc->peer_hash.idx_bits = log2;
461 	/* allocate an array of TAILQ peer object lists */
462 	soc->peer_hash.bins = qdf_mem_malloc(
463 		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q, dp_peer)));
464 	if (!soc->peer_hash.bins)
465 		return QDF_STATUS_E_NOMEM;
466 
467 	for (i = 0; i < hash_elems; i++)
468 		TAILQ_INIT(&soc->peer_hash.bins[i]);
469 
470 	qdf_spinlock_create(&soc->peer_hash_lock);
471 
472 	if (soc->arch_ops.mlo_peer_find_hash_attach &&
473 	    (soc->arch_ops.mlo_peer_find_hash_attach(soc) !=
474 			QDF_STATUS_SUCCESS)) {
475 		dp_peer_find_hash_detach(soc);
476 		return QDF_STATUS_E_NOMEM;
477 	}
478 	return QDF_STATUS_SUCCESS;
479 }
480 
481 /*
482  * dp_peer_find_hash_add() - add peer to peer_hash_table
483  * @soc: soc handle
484  * @peer: peer handle
485  * @peer_type: link or mld peer
486  *
487  * return: none
488  */
489 void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer)
490 {
491 	unsigned index;
492 
493 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
494 	if (peer->peer_type == CDP_LINK_PEER_TYPE) {
495 		qdf_spin_lock_bh(&soc->peer_hash_lock);
496 
497 		if (QDF_IS_STATUS_ERROR(dp_peer_get_ref(soc, peer,
498 							DP_MOD_ID_CONFIG))) {
499 			dp_err("fail to get peer ref:" QDF_MAC_ADDR_FMT,
500 			       QDF_MAC_ADDR_REF(peer->mac_addr.raw));
501 			qdf_spin_unlock_bh(&soc->peer_hash_lock);
502 			return;
503 		}
504 
505 		/*
506 		 * It is important to add the new peer at the tail of
507 		 * peer list with the bin index. Together with having
508 		 * the hash_find function search from head to tail,
509 		 * this ensures that if two entries with the same MAC address
510 		 * are stored, the one added first will be found first.
511 		 */
512 		TAILQ_INSERT_TAIL(&soc->peer_hash.bins[index], peer,
513 				  hash_list_elem);
514 
515 		qdf_spin_unlock_bh(&soc->peer_hash_lock);
516 	} else if (peer->peer_type == CDP_MLD_PEER_TYPE) {
517 		if (soc->arch_ops.mlo_peer_find_hash_add)
518 			soc->arch_ops.mlo_peer_find_hash_add(soc, peer);
519 	} else {
520 		dp_err("unknown peer type %d", peer->peer_type);
521 	}
522 }
523 
524 /*
525  * dp_peer_find_hash_remove() - remove peer from peer_hash_table
526  * @soc: soc handle
527  * @peer: peer handle
528  *
529  * return: none
530  */
531 void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer)
532 {
533 	unsigned index;
534 	struct dp_peer *tmppeer = NULL;
535 	int found = 0;
536 
537 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
538 
539 	if (peer->peer_type == CDP_LINK_PEER_TYPE) {
540 		/* Check if tail is not empty before delete*/
541 		QDF_ASSERT(!TAILQ_EMPTY(&soc->peer_hash.bins[index]));
542 
543 		qdf_spin_lock_bh(&soc->peer_hash_lock);
544 		TAILQ_FOREACH(tmppeer, &soc->peer_hash.bins[index],
545 			      hash_list_elem) {
546 			if (tmppeer == peer) {
547 				found = 1;
548 				break;
549 			}
550 		}
551 		QDF_ASSERT(found);
552 		TAILQ_REMOVE(&soc->peer_hash.bins[index], peer,
553 			     hash_list_elem);
554 
555 		dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
556 		qdf_spin_unlock_bh(&soc->peer_hash_lock);
557 	} else if (peer->peer_type == CDP_MLD_PEER_TYPE) {
558 		if (soc->arch_ops.mlo_peer_find_hash_remove)
559 			soc->arch_ops.mlo_peer_find_hash_remove(soc, peer);
560 	} else {
561 		dp_err("unknown peer type %d", peer->peer_type);
562 	}
563 }
564 #else
565 static QDF_STATUS dp_peer_find_hash_attach(struct dp_soc *soc)
566 {
567 	int i, hash_elems, log2;
568 
569 	/* allocate the peer MAC address -> peer object hash table */
570 	hash_elems = soc->max_peers;
571 	hash_elems *= DP_PEER_HASH_LOAD_MULT;
572 	hash_elems >>= DP_PEER_HASH_LOAD_SHIFT;
573 	log2 = dp_log2_ceil(hash_elems);
574 	hash_elems = 1 << log2;
575 
576 	soc->peer_hash.mask = hash_elems - 1;
577 	soc->peer_hash.idx_bits = log2;
578 	/* allocate an array of TAILQ peer object lists */
579 	soc->peer_hash.bins = qdf_mem_malloc(
580 		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q, dp_peer)));
581 	if (!soc->peer_hash.bins)
582 		return QDF_STATUS_E_NOMEM;
583 
584 	for (i = 0; i < hash_elems; i++)
585 		TAILQ_INIT(&soc->peer_hash.bins[i]);
586 
587 	qdf_spinlock_create(&soc->peer_hash_lock);
588 	return QDF_STATUS_SUCCESS;
589 }
590 
591 static void dp_peer_find_hash_detach(struct dp_soc *soc)
592 {
593 	if (soc->peer_hash.bins) {
594 		qdf_mem_free(soc->peer_hash.bins);
595 		soc->peer_hash.bins = NULL;
596 		qdf_spinlock_destroy(&soc->peer_hash_lock);
597 	}
598 }
599 
600 void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer)
601 {
602 	unsigned index;
603 
604 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
605 	qdf_spin_lock_bh(&soc->peer_hash_lock);
606 
607 	if (QDF_IS_STATUS_ERROR(dp_peer_get_ref(soc, peer, DP_MOD_ID_CONFIG))) {
608 		dp_err("unable to get peer ref at MAP mac: "QDF_MAC_ADDR_FMT,
609 		       QDF_MAC_ADDR_REF(peer->mac_addr.raw));
610 		qdf_spin_unlock_bh(&soc->peer_hash_lock);
611 		return;
612 	}
613 
614 	/*
615 	 * It is important to add the new peer at the tail of the peer list
616 	 * with the bin index.  Together with having the hash_find function
617 	 * search from head to tail, this ensures that if two entries with
618 	 * the same MAC address are stored, the one added first will be
619 	 * found first.
620 	 */
621 	TAILQ_INSERT_TAIL(&soc->peer_hash.bins[index], peer, hash_list_elem);
622 
623 	qdf_spin_unlock_bh(&soc->peer_hash_lock);
624 }
625 
626 void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer)
627 {
628 	unsigned index;
629 	struct dp_peer *tmppeer = NULL;
630 	int found = 0;
631 
632 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
633 	/* Check if tail is not empty before delete*/
634 	QDF_ASSERT(!TAILQ_EMPTY(&soc->peer_hash.bins[index]));
635 
636 	qdf_spin_lock_bh(&soc->peer_hash_lock);
637 	TAILQ_FOREACH(tmppeer, &soc->peer_hash.bins[index], hash_list_elem) {
638 		if (tmppeer == peer) {
639 			found = 1;
640 			break;
641 		}
642 	}
643 	QDF_ASSERT(found);
644 	TAILQ_REMOVE(&soc->peer_hash.bins[index], peer, hash_list_elem);
645 
646 	dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
647 	qdf_spin_unlock_bh(&soc->peer_hash_lock);
648 }
649 
650 
651 #endif/* WLAN_FEATURE_11BE_MLO */
652 
653 /*
654  * dp_peer_vdev_list_add() - add peer into vdev's peer list
655  * @soc: soc handle
656  * @vdev: vdev handle
657  * @peer: peer handle
658  *
659  * return: none
660  */
661 void dp_peer_vdev_list_add(struct dp_soc *soc, struct dp_vdev *vdev,
662 			   struct dp_peer *peer)
663 {
664 	/* only link peer will be added to vdev peer list */
665 	if (IS_MLO_DP_MLD_PEER(peer))
666 		return;
667 
668 	qdf_spin_lock_bh(&vdev->peer_list_lock);
669 	if (QDF_IS_STATUS_ERROR(dp_peer_get_ref(soc, peer, DP_MOD_ID_CONFIG))) {
670 		dp_err("unable to get peer ref at MAP mac: "QDF_MAC_ADDR_FMT,
671 		       QDF_MAC_ADDR_REF(peer->mac_addr.raw));
672 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
673 		return;
674 	}
675 
676 	/* add this peer into the vdev's list */
677 	if (wlan_op_mode_sta == vdev->opmode)
678 		TAILQ_INSERT_HEAD(&vdev->peer_list, peer, peer_list_elem);
679 	else
680 		TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
681 
682 	vdev->num_peers++;
683 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
684 }
685 
686 /*
687  * dp_peer_vdev_list_remove() - remove peer from vdev's peer list
688  * @soc: SoC handle
689  * @vdev: VDEV handle
690  * @peer: peer handle
691  *
692  * Return: none
693  */
694 void dp_peer_vdev_list_remove(struct dp_soc *soc, struct dp_vdev *vdev,
695 			      struct dp_peer *peer)
696 {
697 	uint8_t found = 0;
698 	struct dp_peer *tmppeer = NULL;
699 
700 	/* only link peer will be added to vdev peer list */
701 	if (IS_MLO_DP_MLD_PEER(peer))
702 		return;
703 
704 	qdf_spin_lock_bh(&vdev->peer_list_lock);
705 	TAILQ_FOREACH(tmppeer, &peer->vdev->peer_list, peer_list_elem) {
706 		if (tmppeer == peer) {
707 			found = 1;
708 			break;
709 		}
710 	}
711 
712 	if (found) {
713 		TAILQ_REMOVE(&peer->vdev->peer_list, peer,
714 			     peer_list_elem);
715 		dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
716 		vdev->num_peers--;
717 	} else {
718 		/*Ignoring the remove operation as peer not found*/
719 		dp_peer_debug("%pK: peer:%pK not found in vdev:%pK peerlist:%pK"
720 			      , soc, peer, vdev, &peer->vdev->peer_list);
721 	}
722 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
723 }
724 
725 /*
726  * dp_txrx_peer_attach_add() - Attach txrx_peer and add it to peer_id table
727  * @soc: SoC handle
728  * @peer: peer handle
729  * @txrx_peer: txrx peer handle
730  *
731  * Return: None
732  */
733 void dp_txrx_peer_attach_add(struct dp_soc *soc,
734 			     struct dp_peer *peer,
735 			     struct dp_txrx_peer *txrx_peer)
736 {
737 	qdf_spin_lock_bh(&soc->peer_map_lock);
738 
739 	peer->txrx_peer = txrx_peer;
740 	txrx_peer->bss_peer = peer->bss_peer;
741 
742 	if (peer->peer_id == HTT_INVALID_PEER) {
743 		qdf_spin_unlock_bh(&soc->peer_map_lock);
744 		return;
745 	}
746 
747 	txrx_peer->peer_id = peer->peer_id;
748 
749 	QDF_ASSERT(soc->peer_id_to_obj_map[peer->peer_id]);
750 
751 	qdf_spin_unlock_bh(&soc->peer_map_lock);
752 }
753 
754 /*
755  * dp_peer_find_id_to_obj_add() - Add peer into peer_id table
756  * @soc: SoC handle
757  * @peer: peer handle
758  * @peer_id: peer_id
759  *
760  * Return: None
761  */
762 void dp_peer_find_id_to_obj_add(struct dp_soc *soc,
763 				struct dp_peer *peer,
764 				uint16_t peer_id)
765 {
766 	QDF_ASSERT(peer_id <= soc->max_peer_id);
767 
768 	qdf_spin_lock_bh(&soc->peer_map_lock);
769 
770 	peer->peer_id = peer_id;
771 
772 	if (QDF_IS_STATUS_ERROR(dp_peer_get_ref(soc, peer, DP_MOD_ID_CONFIG))) {
773 		dp_err("unable to get peer ref at MAP mac: "QDF_MAC_ADDR_FMT" peer_id %u",
774 		       QDF_MAC_ADDR_REF(peer->mac_addr.raw), peer_id);
775 		qdf_spin_unlock_bh(&soc->peer_map_lock);
776 		return;
777 	}
778 
779 	if (!soc->peer_id_to_obj_map[peer_id]) {
780 		soc->peer_id_to_obj_map[peer_id] = peer;
781 		if (peer->txrx_peer)
782 			peer->txrx_peer->peer_id = peer_id;
783 	} else {
784 		/* Peer map event came for peer_id which
785 		 * is already mapped, this is not expected
786 		 */
787 		dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
788 		QDF_ASSERT(0);
789 	}
790 	qdf_spin_unlock_bh(&soc->peer_map_lock);
791 }
792 
793 /*
794  * dp_peer_find_id_to_obj_remove() - remove peer from peer_id table
795  * @soc: SoC handle
796  * @peer_id: peer_id
797  *
798  * Return: None
799  */
800 void dp_peer_find_id_to_obj_remove(struct dp_soc *soc,
801 				   uint16_t peer_id)
802 {
803 	struct dp_peer *peer = NULL;
804 	QDF_ASSERT(peer_id <= soc->max_peer_id);
805 
806 	qdf_spin_lock_bh(&soc->peer_map_lock);
807 	peer = soc->peer_id_to_obj_map[peer_id];
808 	peer->peer_id = HTT_INVALID_PEER;
809 	if (peer->txrx_peer)
810 		peer->txrx_peer->peer_id = HTT_INVALID_PEER;
811 	soc->peer_id_to_obj_map[peer_id] = NULL;
812 	dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
813 	qdf_spin_unlock_bh(&soc->peer_map_lock);
814 }
815 
816 #ifdef FEATURE_MEC
817 /**
818  * dp_peer_mec_hash_attach() - Allocate and initialize MEC Hash Table
819  * @soc: SoC handle
820  *
821  * Return: QDF_STATUS
822  */
823 QDF_STATUS dp_peer_mec_hash_attach(struct dp_soc *soc)
824 {
825 	int log2, hash_elems, i;
826 
827 	log2 = dp_log2_ceil(DP_PEER_MAX_MEC_IDX);
828 	hash_elems = 1 << log2;
829 
830 	soc->mec_hash.mask = hash_elems - 1;
831 	soc->mec_hash.idx_bits = log2;
832 
833 	dp_peer_info("%pK: max mec index: %d",
834 		     soc, DP_PEER_MAX_MEC_IDX);
835 
836 	/* allocate an array of TAILQ mec object lists */
837 	soc->mec_hash.bins = qdf_mem_malloc(hash_elems *
838 					    sizeof(TAILQ_HEAD(anonymous_tail_q,
839 							      dp_mec_entry)));
840 
841 	if (!soc->mec_hash.bins)
842 		return QDF_STATUS_E_NOMEM;
843 
844 	for (i = 0; i < hash_elems; i++)
845 		TAILQ_INIT(&soc->mec_hash.bins[i]);
846 
847 	return QDF_STATUS_SUCCESS;
848 }
849 
850 /**
851  * dp_peer_mec_hash_index() - Compute the MEC hash from MAC address
852  * @soc: SoC handle
853  *
854  * Return: MEC hash
855  */
856 static inline uint32_t dp_peer_mec_hash_index(struct dp_soc *soc,
857 					      union dp_align_mac_addr *mac_addr)
858 {
859 	uint32_t index;
860 
861 	index =
862 		mac_addr->align2.bytes_ab ^
863 		mac_addr->align2.bytes_cd ^
864 		mac_addr->align2.bytes_ef;
865 	index ^= index >> soc->mec_hash.idx_bits;
866 	index &= soc->mec_hash.mask;
867 	return index;
868 }
869 
870 struct dp_mec_entry *dp_peer_mec_hash_find_by_pdevid(struct dp_soc *soc,
871 						     uint8_t pdev_id,
872 						     uint8_t *mec_mac_addr)
873 {
874 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
875 	uint32_t index;
876 	struct dp_mec_entry *mecentry;
877 
878 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
879 		     mec_mac_addr, QDF_MAC_ADDR_SIZE);
880 	mac_addr = &local_mac_addr_aligned;
881 
882 	index = dp_peer_mec_hash_index(soc, mac_addr);
883 	TAILQ_FOREACH(mecentry, &soc->mec_hash.bins[index], hash_list_elem) {
884 		if ((pdev_id == mecentry->pdev_id) &&
885 		    !dp_peer_find_mac_addr_cmp(mac_addr, &mecentry->mac_addr))
886 			return mecentry;
887 	}
888 
889 	return NULL;
890 }
891 
892 /**
893  * dp_peer_mec_hash_add() - Add MEC entry into hash table
894  * @soc: SoC handle
895  *
896  * This function adds the MEC entry into SoC MEC hash table
897  *
898  * Return: None
899  */
900 static inline void dp_peer_mec_hash_add(struct dp_soc *soc,
901 					struct dp_mec_entry *mecentry)
902 {
903 	uint32_t index;
904 
905 	index = dp_peer_mec_hash_index(soc, &mecentry->mac_addr);
906 	qdf_spin_lock_bh(&soc->mec_lock);
907 	TAILQ_INSERT_TAIL(&soc->mec_hash.bins[index], mecentry, hash_list_elem);
908 	qdf_spin_unlock_bh(&soc->mec_lock);
909 }
910 
911 QDF_STATUS dp_peer_mec_add_entry(struct dp_soc *soc,
912 				 struct dp_vdev *vdev,
913 				 uint8_t *mac_addr)
914 {
915 	struct dp_mec_entry *mecentry = NULL;
916 	struct dp_pdev *pdev = NULL;
917 
918 	if (!vdev) {
919 		dp_peer_err("%pK: Peers vdev is NULL", soc);
920 		return QDF_STATUS_E_INVAL;
921 	}
922 
923 	pdev = vdev->pdev;
924 
925 	if (qdf_unlikely(qdf_atomic_read(&soc->mec_cnt) >=
926 					 DP_PEER_MAX_MEC_ENTRY)) {
927 		dp_peer_warn("%pK: max MEC entry limit reached mac_addr: "
928 			     QDF_MAC_ADDR_FMT, soc, QDF_MAC_ADDR_REF(mac_addr));
929 		return QDF_STATUS_E_NOMEM;
930 	}
931 
932 	qdf_spin_lock_bh(&soc->mec_lock);
933 	mecentry = dp_peer_mec_hash_find_by_pdevid(soc, pdev->pdev_id,
934 						   mac_addr);
935 	if (qdf_likely(mecentry)) {
936 		mecentry->is_active = TRUE;
937 		qdf_spin_unlock_bh(&soc->mec_lock);
938 		return QDF_STATUS_E_ALREADY;
939 	}
940 
941 	qdf_spin_unlock_bh(&soc->mec_lock);
942 
943 	dp_peer_debug("%pK: pdevid: %u vdev: %u type: MEC mac_addr: "
944 		      QDF_MAC_ADDR_FMT,
945 		      soc, pdev->pdev_id, vdev->vdev_id,
946 		      QDF_MAC_ADDR_REF(mac_addr));
947 
948 	mecentry = (struct dp_mec_entry *)
949 			qdf_mem_malloc(sizeof(struct dp_mec_entry));
950 
951 	if (qdf_unlikely(!mecentry)) {
952 		dp_peer_err("%pK: fail to allocate mecentry", soc);
953 		return QDF_STATUS_E_NOMEM;
954 	}
955 
956 	qdf_copy_macaddr((struct qdf_mac_addr *)&mecentry->mac_addr.raw[0],
957 			 (struct qdf_mac_addr *)mac_addr);
958 	mecentry->pdev_id = pdev->pdev_id;
959 	mecentry->vdev_id = vdev->vdev_id;
960 	mecentry->is_active = TRUE;
961 	dp_peer_mec_hash_add(soc, mecentry);
962 
963 	qdf_atomic_inc(&soc->mec_cnt);
964 	DP_STATS_INC(soc, mec.added, 1);
965 
966 	return QDF_STATUS_SUCCESS;
967 }
968 
969 void dp_peer_mec_detach_entry(struct dp_soc *soc, struct dp_mec_entry *mecentry,
970 			      void *ptr)
971 {
972 	uint32_t index = dp_peer_mec_hash_index(soc, &mecentry->mac_addr);
973 
974 	TAILQ_HEAD(, dp_mec_entry) * free_list = ptr;
975 
976 	TAILQ_REMOVE(&soc->mec_hash.bins[index], mecentry,
977 		     hash_list_elem);
978 	TAILQ_INSERT_TAIL(free_list, mecentry, hash_list_elem);
979 }
980 
981 void dp_peer_mec_free_list(struct dp_soc *soc, void *ptr)
982 {
983 	struct dp_mec_entry *mecentry, *mecentry_next;
984 
985 	TAILQ_HEAD(, dp_mec_entry) * free_list = ptr;
986 
987 	TAILQ_FOREACH_SAFE(mecentry, free_list, hash_list_elem,
988 			   mecentry_next) {
989 		dp_peer_debug("%pK: MEC delete for mac_addr " QDF_MAC_ADDR_FMT,
990 			      soc, QDF_MAC_ADDR_REF(&mecentry->mac_addr));
991 		qdf_mem_free(mecentry);
992 		qdf_atomic_dec(&soc->mec_cnt);
993 		DP_STATS_INC(soc, mec.deleted, 1);
994 	}
995 }
996 
997 /**
998  * dp_peer_mec_hash_detach() - Free MEC Hash table
999  * @soc: SoC handle
1000  *
1001  * Return: None
1002  */
1003 void dp_peer_mec_hash_detach(struct dp_soc *soc)
1004 {
1005 	dp_peer_mec_flush_entries(soc);
1006 	qdf_mem_free(soc->mec_hash.bins);
1007 	soc->mec_hash.bins = NULL;
1008 }
1009 
1010 void dp_peer_mec_spinlock_destroy(struct dp_soc *soc)
1011 {
1012 	qdf_spinlock_destroy(&soc->mec_lock);
1013 }
1014 
1015 void dp_peer_mec_spinlock_create(struct dp_soc *soc)
1016 {
1017 	qdf_spinlock_create(&soc->mec_lock);
1018 }
1019 #else
1020 QDF_STATUS dp_peer_mec_hash_attach(struct dp_soc *soc)
1021 {
1022 	return QDF_STATUS_SUCCESS;
1023 }
1024 
1025 void dp_peer_mec_hash_detach(struct dp_soc *soc)
1026 {
1027 }
1028 #endif
1029 
1030 #ifdef FEATURE_AST
1031 #ifdef WLAN_FEATURE_11BE_MLO
1032 /*
1033  * dp_peer_exist_on_pdev - check if peer with mac address exist on pdev
1034  *
1035  * @soc: Datapath SOC handle
1036  * @peer_mac_addr: peer mac address
1037  * @mac_addr_is_aligned: is mac address aligned
1038  * @pdev: Datapath PDEV handle
1039  *
1040  * Return: true if peer found else return false
1041  */
1042 static bool dp_peer_exist_on_pdev(struct dp_soc *soc,
1043 				  uint8_t *peer_mac_addr,
1044 				  int mac_addr_is_aligned,
1045 				  struct dp_pdev *pdev)
1046 {
1047 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
1048 	unsigned int index;
1049 	struct dp_peer *peer;
1050 	bool found = false;
1051 
1052 	if (mac_addr_is_aligned) {
1053 		mac_addr = (union dp_align_mac_addr *)peer_mac_addr;
1054 	} else {
1055 		qdf_mem_copy(
1056 			&local_mac_addr_aligned.raw[0],
1057 			peer_mac_addr, QDF_MAC_ADDR_SIZE);
1058 		mac_addr = &local_mac_addr_aligned;
1059 	}
1060 	index = dp_peer_find_hash_index(soc, mac_addr);
1061 	qdf_spin_lock_bh(&soc->peer_hash_lock);
1062 	TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
1063 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
1064 		    (peer->vdev->pdev == pdev)) {
1065 			found = true;
1066 			break;
1067 		}
1068 	}
1069 	qdf_spin_unlock_bh(&soc->peer_hash_lock);
1070 
1071 	if (found)
1072 		return found;
1073 
1074 	peer = dp_mld_peer_find_hash_find(soc, peer_mac_addr,
1075 					  mac_addr_is_aligned, DP_VDEV_ALL,
1076 					  DP_MOD_ID_CDP);
1077 	if (peer) {
1078 		if (peer->vdev->pdev == pdev)
1079 			found = true;
1080 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
1081 	}
1082 
1083 	return found;
1084 }
1085 #else
1086 static bool dp_peer_exist_on_pdev(struct dp_soc *soc,
1087 				  uint8_t *peer_mac_addr,
1088 				  int mac_addr_is_aligned,
1089 				  struct dp_pdev *pdev)
1090 {
1091 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
1092 	unsigned int index;
1093 	struct dp_peer *peer;
1094 	bool found = false;
1095 
1096 	if (mac_addr_is_aligned) {
1097 		mac_addr = (union dp_align_mac_addr *)peer_mac_addr;
1098 	} else {
1099 		qdf_mem_copy(
1100 			&local_mac_addr_aligned.raw[0],
1101 			peer_mac_addr, QDF_MAC_ADDR_SIZE);
1102 		mac_addr = &local_mac_addr_aligned;
1103 	}
1104 	index = dp_peer_find_hash_index(soc, mac_addr);
1105 	qdf_spin_lock_bh(&soc->peer_hash_lock);
1106 	TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
1107 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
1108 		    (peer->vdev->pdev == pdev)) {
1109 			found = true;
1110 			break;
1111 		}
1112 	}
1113 	qdf_spin_unlock_bh(&soc->peer_hash_lock);
1114 	return found;
1115 }
1116 #endif /* WLAN_FEATURE_11BE_MLO */
1117 
1118 /*
1119  * dp_peer_ast_hash_attach() - Allocate and initialize AST Hash Table
1120  * @soc: SoC handle
1121  *
1122  * Return: QDF_STATUS
1123  */
1124 QDF_STATUS dp_peer_ast_hash_attach(struct dp_soc *soc)
1125 {
1126 	int i, hash_elems, log2;
1127 	unsigned int max_ast_idx = wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx);
1128 
1129 	hash_elems = ((max_ast_idx * DP_AST_HASH_LOAD_MULT) >>
1130 		DP_AST_HASH_LOAD_SHIFT);
1131 
1132 	log2 = dp_log2_ceil(hash_elems);
1133 	hash_elems = 1 << log2;
1134 
1135 	soc->ast_hash.mask = hash_elems - 1;
1136 	soc->ast_hash.idx_bits = log2;
1137 
1138 	dp_peer_info("%pK: ast hash_elems: %d, max_ast_idx: %d",
1139 		     soc, hash_elems, max_ast_idx);
1140 
1141 	/* allocate an array of TAILQ peer object lists */
1142 	soc->ast_hash.bins = qdf_mem_malloc(
1143 		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q,
1144 				dp_ast_entry)));
1145 
1146 	if (!soc->ast_hash.bins)
1147 		return QDF_STATUS_E_NOMEM;
1148 
1149 	for (i = 0; i < hash_elems; i++)
1150 		TAILQ_INIT(&soc->ast_hash.bins[i]);
1151 
1152 	return QDF_STATUS_SUCCESS;
1153 }
1154 
1155 /*
1156  * dp_peer_ast_cleanup() - cleanup the references
1157  * @soc: SoC handle
1158  * @ast: ast entry
1159  *
1160  * Return: None
1161  */
1162 static inline void dp_peer_ast_cleanup(struct dp_soc *soc,
1163 				       struct dp_ast_entry *ast)
1164 {
1165 	txrx_ast_free_cb cb = ast->callback;
1166 	void *cookie = ast->cookie;
1167 
1168 	dp_peer_debug("mac_addr: " QDF_MAC_ADDR_FMT ", cb: %pK, cookie: %pK",
1169 		      QDF_MAC_ADDR_REF(ast->mac_addr.raw), cb, cookie);
1170 
1171 	/* Call the callbacks to free up the cookie */
1172 	if (cb) {
1173 		ast->callback = NULL;
1174 		ast->cookie = NULL;
1175 		cb(soc->ctrl_psoc,
1176 		   dp_soc_to_cdp_soc(soc),
1177 		   cookie,
1178 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
1179 	}
1180 }
1181 
1182 /*
1183  * dp_peer_ast_hash_detach() - Free AST Hash table
1184  * @soc: SoC handle
1185  *
1186  * Return: None
1187  */
1188 void dp_peer_ast_hash_detach(struct dp_soc *soc)
1189 {
1190 	unsigned int index;
1191 	struct dp_ast_entry *ast, *ast_next;
1192 
1193 	if (!soc->ast_hash.mask)
1194 		return;
1195 
1196 	if (!soc->ast_hash.bins)
1197 		return;
1198 
1199 	dp_peer_debug("%pK: num_ast_entries: %u", soc, soc->num_ast_entries);
1200 
1201 	qdf_spin_lock_bh(&soc->ast_lock);
1202 	for (index = 0; index <= soc->ast_hash.mask; index++) {
1203 		if (!TAILQ_EMPTY(&soc->ast_hash.bins[index])) {
1204 			TAILQ_FOREACH_SAFE(ast, &soc->ast_hash.bins[index],
1205 					   hash_list_elem, ast_next) {
1206 				TAILQ_REMOVE(&soc->ast_hash.bins[index], ast,
1207 					     hash_list_elem);
1208 				dp_peer_ast_cleanup(soc, ast);
1209 				soc->num_ast_entries--;
1210 				qdf_mem_free(ast);
1211 			}
1212 		}
1213 	}
1214 	qdf_spin_unlock_bh(&soc->ast_lock);
1215 
1216 	qdf_mem_free(soc->ast_hash.bins);
1217 	soc->ast_hash.bins = NULL;
1218 }
1219 
1220 /*
1221  * dp_peer_ast_hash_index() - Compute the AST hash from MAC address
1222  * @soc: SoC handle
1223  *
1224  * Return: AST hash
1225  */
1226 static inline uint32_t dp_peer_ast_hash_index(struct dp_soc *soc,
1227 	union dp_align_mac_addr *mac_addr)
1228 {
1229 	uint32_t index;
1230 
1231 	index =
1232 		mac_addr->align2.bytes_ab ^
1233 		mac_addr->align2.bytes_cd ^
1234 		mac_addr->align2.bytes_ef;
1235 	index ^= index >> soc->ast_hash.idx_bits;
1236 	index &= soc->ast_hash.mask;
1237 	return index;
1238 }
1239 
1240 /*
1241  * dp_peer_ast_hash_add() - Add AST entry into hash table
1242  * @soc: SoC handle
1243  *
1244  * This function adds the AST entry into SoC AST hash table
1245  * It assumes caller has taken the ast lock to protect the access to this table
1246  *
1247  * Return: None
1248  */
1249 static inline void dp_peer_ast_hash_add(struct dp_soc *soc,
1250 		struct dp_ast_entry *ase)
1251 {
1252 	uint32_t index;
1253 
1254 	index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
1255 	TAILQ_INSERT_TAIL(&soc->ast_hash.bins[index], ase, hash_list_elem);
1256 }
1257 
1258 /*
1259  * dp_peer_ast_hash_remove() - Look up and remove AST entry from hash table
1260  * @soc: SoC handle
1261  *
1262  * This function removes the AST entry from soc AST hash table
1263  * It assumes caller has taken the ast lock to protect the access to this table
1264  *
1265  * Return: None
1266  */
1267 void dp_peer_ast_hash_remove(struct dp_soc *soc,
1268 			     struct dp_ast_entry *ase)
1269 {
1270 	unsigned index;
1271 	struct dp_ast_entry *tmpase;
1272 	int found = 0;
1273 
1274 	if (soc->ast_offload_support && !soc->host_ast_db_enable)
1275 		return;
1276 
1277 	index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
1278 	/* Check if tail is not empty before delete*/
1279 	QDF_ASSERT(!TAILQ_EMPTY(&soc->ast_hash.bins[index]));
1280 
1281 	dp_peer_debug("ID: %u idx: %u mac_addr: " QDF_MAC_ADDR_FMT,
1282 		      ase->peer_id, index, QDF_MAC_ADDR_REF(ase->mac_addr.raw));
1283 
1284 	TAILQ_FOREACH(tmpase, &soc->ast_hash.bins[index], hash_list_elem) {
1285 		if (tmpase == ase) {
1286 			found = 1;
1287 			break;
1288 		}
1289 	}
1290 
1291 	QDF_ASSERT(found);
1292 
1293 	if (found)
1294 		TAILQ_REMOVE(&soc->ast_hash.bins[index], ase, hash_list_elem);
1295 }
1296 
1297 /*
1298  * dp_peer_ast_hash_find_by_vdevid() - Find AST entry by MAC address
1299  * @soc: SoC handle
1300  *
1301  * It assumes caller has taken the ast lock to protect the access to
1302  * AST hash table
1303  *
1304  * Return: AST entry
1305  */
1306 struct dp_ast_entry *dp_peer_ast_hash_find_by_vdevid(struct dp_soc *soc,
1307 						     uint8_t *ast_mac_addr,
1308 						     uint8_t vdev_id)
1309 {
1310 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
1311 	uint32_t index;
1312 	struct dp_ast_entry *ase;
1313 
1314 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
1315 		     ast_mac_addr, QDF_MAC_ADDR_SIZE);
1316 	mac_addr = &local_mac_addr_aligned;
1317 
1318 	index = dp_peer_ast_hash_index(soc, mac_addr);
1319 	TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
1320 		if ((vdev_id == ase->vdev_id) &&
1321 		    !dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr)) {
1322 			return ase;
1323 		}
1324 	}
1325 
1326 	return NULL;
1327 }
1328 
1329 /*
1330  * dp_peer_ast_hash_find_by_pdevid() - Find AST entry by MAC address
1331  * @soc: SoC handle
1332  *
1333  * It assumes caller has taken the ast lock to protect the access to
1334  * AST hash table
1335  *
1336  * Return: AST entry
1337  */
1338 struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
1339 						     uint8_t *ast_mac_addr,
1340 						     uint8_t pdev_id)
1341 {
1342 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
1343 	uint32_t index;
1344 	struct dp_ast_entry *ase;
1345 
1346 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
1347 		     ast_mac_addr, QDF_MAC_ADDR_SIZE);
1348 	mac_addr = &local_mac_addr_aligned;
1349 
1350 	index = dp_peer_ast_hash_index(soc, mac_addr);
1351 	TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
1352 		if ((pdev_id == ase->pdev_id) &&
1353 		    !dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr)) {
1354 			return ase;
1355 		}
1356 	}
1357 
1358 	return NULL;
1359 }
1360 
1361 /*
1362  * dp_peer_ast_hash_find_soc() - Find AST entry by MAC address
1363  * @soc: SoC handle
1364  *
1365  * It assumes caller has taken the ast lock to protect the access to
1366  * AST hash table
1367  *
1368  * Return: AST entry
1369  */
1370 struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
1371 					       uint8_t *ast_mac_addr)
1372 {
1373 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
1374 	unsigned index;
1375 	struct dp_ast_entry *ase;
1376 
1377 	if (!soc->ast_hash.bins)
1378 		return NULL;
1379 
1380 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
1381 			ast_mac_addr, QDF_MAC_ADDR_SIZE);
1382 	mac_addr = &local_mac_addr_aligned;
1383 
1384 	index = dp_peer_ast_hash_index(soc, mac_addr);
1385 	TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
1386 		if (dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr) == 0) {
1387 			return ase;
1388 		}
1389 	}
1390 
1391 	return NULL;
1392 }
1393 
1394 /*
1395  * dp_peer_host_add_map_ast() - Add ast entry with HW AST Index
1396  * @soc: SoC handle
1397  * @peer_id: peer id from firmware
1398  * @mac_addr: MAC address of ast node
1399  * @hw_peer_id: HW AST Index returned by target in peer map event
1400  * @vdev_id: vdev id for VAP to which the peer belongs to
1401  * @ast_hash: ast hash value in HW
1402  * @is_wds: flag to indicate peer map event for WDS ast entry
1403  *
1404  * Return: QDF_STATUS code
1405  */
1406 static inline
1407 QDF_STATUS dp_peer_host_add_map_ast(struct dp_soc *soc, uint16_t peer_id,
1408 				    uint8_t *mac_addr, uint16_t hw_peer_id,
1409 				    uint8_t vdev_id, uint16_t ast_hash,
1410 				    uint8_t is_wds)
1411 {
1412 	struct dp_vdev *vdev;
1413 	struct dp_ast_entry *ast_entry;
1414 	enum cdp_txrx_ast_entry_type type;
1415 	struct dp_peer *peer;
1416 	struct dp_peer *old_peer;
1417 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1418 
1419 	if (is_wds)
1420 		type = CDP_TXRX_AST_TYPE_WDS;
1421 	else
1422 		type = CDP_TXRX_AST_TYPE_STATIC;
1423 
1424 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_HTT);
1425 	if (!peer) {
1426 		dp_peer_info("Peer not found soc:%pK: peer_id %d, peer_mac " QDF_MAC_ADDR_FMT ", vdev_id %d",
1427 			     soc, peer_id,
1428 			     QDF_MAC_ADDR_REF(mac_addr), vdev_id);
1429 		return QDF_STATUS_E_INVAL;
1430 	}
1431 
1432 	if (!is_wds && IS_MLO_DP_MLD_PEER(peer))
1433 		type = CDP_TXRX_AST_TYPE_MLD;
1434 
1435 	vdev = peer->vdev;
1436 	if (!vdev) {
1437 		dp_peer_err("%pK: Peers vdev is NULL", soc);
1438 		status = QDF_STATUS_E_INVAL;
1439 		goto fail;
1440 	}
1441 
1442 	if (!dp_peer_state_cmp(peer, DP_PEER_STATE_ACTIVE)) {
1443 		if (type != CDP_TXRX_AST_TYPE_STATIC &&
1444 		    type != CDP_TXRX_AST_TYPE_MLD &&
1445 		    type != CDP_TXRX_AST_TYPE_SELF) {
1446 			status = QDF_STATUS_E_BUSY;
1447 			goto fail;
1448 		}
1449 	}
1450 
1451 	dp_peer_debug("%pK: vdev: %u  ast_entry->type: %d peer_mac: " QDF_MAC_ADDR_FMT " peer: %pK mac " QDF_MAC_ADDR_FMT,
1452 		      soc, vdev->vdev_id, type,
1453 		      QDF_MAC_ADDR_REF(peer->mac_addr.raw), peer,
1454 		      QDF_MAC_ADDR_REF(mac_addr));
1455 
1456 	/*
1457 	 * In MLO scenario, there is possibility for same mac address
1458 	 * on both link mac address and MLD mac address.
1459 	 * Duplicate AST map needs to be handled for non-mld type.
1460 	 */
1461 	qdf_spin_lock_bh(&soc->ast_lock);
1462 	ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
1463 	if (ast_entry && type != CDP_TXRX_AST_TYPE_MLD) {
1464 		dp_peer_debug("AST present ID %d vid %d mac " QDF_MAC_ADDR_FMT,
1465 			      hw_peer_id, vdev_id,
1466 			      QDF_MAC_ADDR_REF(mac_addr));
1467 
1468 		old_peer = __dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
1469 						   DP_MOD_ID_AST);
1470 		if (!old_peer) {
1471 			dp_peer_info("Peer not found soc:%pK: peer_id %d, peer_mac " QDF_MAC_ADDR_FMT ", vdev_id %d",
1472 				     soc, ast_entry->peer_id,
1473 				     QDF_MAC_ADDR_REF(mac_addr), vdev_id);
1474 			qdf_spin_unlock_bh(&soc->ast_lock);
1475 			status = QDF_STATUS_E_INVAL;
1476 			goto fail;
1477 		}
1478 
1479 		dp_peer_unlink_ast_entry(soc, ast_entry, old_peer);
1480 		dp_peer_free_ast_entry(soc, ast_entry);
1481 		if (old_peer)
1482 			dp_peer_unref_delete(old_peer, DP_MOD_ID_AST);
1483 	}
1484 
1485 	ast_entry = (struct dp_ast_entry *)
1486 		qdf_mem_malloc(sizeof(struct dp_ast_entry));
1487 	if (!ast_entry) {
1488 		dp_peer_err("%pK: fail to allocate ast_entry", soc);
1489 		qdf_spin_unlock_bh(&soc->ast_lock);
1490 		QDF_ASSERT(0);
1491 		status = QDF_STATUS_E_NOMEM;
1492 		goto fail;
1493 	}
1494 
1495 	qdf_mem_copy(&ast_entry->mac_addr.raw[0], mac_addr, QDF_MAC_ADDR_SIZE);
1496 	ast_entry->pdev_id = vdev->pdev->pdev_id;
1497 	ast_entry->is_mapped = false;
1498 	ast_entry->delete_in_progress = false;
1499 	ast_entry->next_hop = 0;
1500 	ast_entry->vdev_id = vdev->vdev_id;
1501 	ast_entry->type = type;
1502 
1503 	switch (type) {
1504 	case CDP_TXRX_AST_TYPE_STATIC:
1505 		if (peer->vdev->opmode == wlan_op_mode_sta)
1506 			ast_entry->type = CDP_TXRX_AST_TYPE_STA_BSS;
1507 		break;
1508 	case CDP_TXRX_AST_TYPE_WDS:
1509 		ast_entry->next_hop = 1;
1510 		break;
1511 	case CDP_TXRX_AST_TYPE_MLD:
1512 		break;
1513 	default:
1514 		dp_peer_alert("%pK: Incorrect AST entry type", soc);
1515 	}
1516 
1517 	ast_entry->is_active = TRUE;
1518 	DP_STATS_INC(soc, ast.added, 1);
1519 	soc->num_ast_entries++;
1520 	dp_peer_ast_hash_add(soc, ast_entry);
1521 
1522 	ast_entry->ast_idx = hw_peer_id;
1523 	ast_entry->ast_hash_value = ast_hash;
1524 	ast_entry->peer_id = peer_id;
1525 	TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry,
1526 			  ase_list_elem);
1527 
1528 	qdf_spin_unlock_bh(&soc->ast_lock);
1529 fail:
1530 	dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
1531 
1532 	return status;
1533 }
1534 
1535 /*
1536  * dp_peer_map_ast() - Map the ast entry with HW AST Index
1537  * @soc: SoC handle
1538  * @peer: peer to which ast node belongs
1539  * @mac_addr: MAC address of ast node
1540  * @hw_peer_id: HW AST Index returned by target in peer map event
1541  * @vdev_id: vdev id for VAP to which the peer belongs to
1542  * @ast_hash: ast hash value in HW
1543  * @is_wds: flag to indicate peer map event for WDS ast entry
1544  *
1545  * Return: QDF_STATUS code
1546  */
1547 static inline QDF_STATUS dp_peer_map_ast(struct dp_soc *soc,
1548 					 struct dp_peer *peer,
1549 					 uint8_t *mac_addr,
1550 					 uint16_t hw_peer_id,
1551 					 uint8_t vdev_id,
1552 					 uint16_t ast_hash,
1553 					 uint8_t is_wds)
1554 {
1555 	struct dp_ast_entry *ast_entry = NULL;
1556 	enum cdp_txrx_ast_entry_type peer_type = CDP_TXRX_AST_TYPE_STATIC;
1557 	void *cookie = NULL;
1558 	txrx_ast_free_cb cb = NULL;
1559 	QDF_STATUS err = QDF_STATUS_SUCCESS;
1560 
1561 	if (soc->ast_offload_support)
1562 		return QDF_STATUS_SUCCESS;
1563 
1564 	if (!peer) {
1565 		return QDF_STATUS_E_INVAL;
1566 	}
1567 
1568 	dp_peer_err("%pK: peer %pK ID %d vid %d mac " QDF_MAC_ADDR_FMT,
1569 		    soc, peer, hw_peer_id, vdev_id,
1570 		    QDF_MAC_ADDR_REF(mac_addr));
1571 
1572 	qdf_spin_lock_bh(&soc->ast_lock);
1573 
1574 	ast_entry = dp_peer_ast_hash_find_by_vdevid(soc, mac_addr, vdev_id);
1575 
1576 	if (is_wds) {
1577 		/*
1578 		 * In certain cases like Auth attack on a repeater
1579 		 * can result in the number of ast_entries falling
1580 		 * in the same hash bucket to exceed the max_skid
1581 		 * length supported by HW in root AP. In these cases
1582 		 * the FW will return the hw_peer_id (ast_index) as
1583 		 * 0xffff indicating HW could not add the entry in
1584 		 * its table. Host has to delete the entry from its
1585 		 * table in these cases.
1586 		 */
1587 		if (hw_peer_id == HTT_INVALID_PEER) {
1588 			DP_STATS_INC(soc, ast.map_err, 1);
1589 			if (ast_entry) {
1590 				if (ast_entry->is_mapped) {
1591 					soc->ast_table[ast_entry->ast_idx] =
1592 						NULL;
1593 				}
1594 
1595 				cb = ast_entry->callback;
1596 				cookie = ast_entry->cookie;
1597 				peer_type = ast_entry->type;
1598 
1599 				dp_peer_unlink_ast_entry(soc, ast_entry, peer);
1600 				dp_peer_free_ast_entry(soc, ast_entry);
1601 
1602 				qdf_spin_unlock_bh(&soc->ast_lock);
1603 
1604 				if (cb) {
1605 					cb(soc->ctrl_psoc,
1606 					   dp_soc_to_cdp_soc(soc),
1607 					   cookie,
1608 					   CDP_TXRX_AST_DELETED);
1609 				}
1610 			} else {
1611 				qdf_spin_unlock_bh(&soc->ast_lock);
1612 				dp_peer_alert("AST entry not found with peer %pK peer_id %u peer_mac " QDF_MAC_ADDR_FMT " mac_addr " QDF_MAC_ADDR_FMT " vdev_id %u next_hop %u",
1613 					      peer, peer->peer_id,
1614 					      QDF_MAC_ADDR_REF(peer->mac_addr.raw),
1615 					      QDF_MAC_ADDR_REF(mac_addr),
1616 					      vdev_id, is_wds);
1617 			}
1618 			err = QDF_STATUS_E_INVAL;
1619 
1620 			dp_hmwds_ast_add_notify(peer, mac_addr,
1621 						peer_type, err, true);
1622 
1623 			return err;
1624 		}
1625 	}
1626 
1627 	if (ast_entry) {
1628 		ast_entry->ast_idx = hw_peer_id;
1629 		soc->ast_table[hw_peer_id] = ast_entry;
1630 		ast_entry->is_active = TRUE;
1631 		peer_type = ast_entry->type;
1632 		ast_entry->ast_hash_value = ast_hash;
1633 		ast_entry->is_mapped = TRUE;
1634 		qdf_assert_always(ast_entry->peer_id == HTT_INVALID_PEER);
1635 
1636 		ast_entry->peer_id = peer->peer_id;
1637 		TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry,
1638 				  ase_list_elem);
1639 	}
1640 
1641 	if (ast_entry || (peer->vdev && peer->vdev->proxysta_vdev)) {
1642 		if (soc->cdp_soc.ol_ops->peer_map_event) {
1643 			soc->cdp_soc.ol_ops->peer_map_event(
1644 			soc->ctrl_psoc, peer->peer_id,
1645 			hw_peer_id, vdev_id,
1646 			mac_addr, peer_type, ast_hash);
1647 		}
1648 	} else {
1649 		dp_peer_err("%pK: AST entry not found", soc);
1650 		err = QDF_STATUS_E_NOENT;
1651 	}
1652 
1653 	qdf_spin_unlock_bh(&soc->ast_lock);
1654 
1655 	dp_hmwds_ast_add_notify(peer, mac_addr,
1656 				peer_type, err, true);
1657 
1658 	return err;
1659 }
1660 
1661 void dp_peer_free_hmwds_cb(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
1662 			   struct cdp_soc *dp_soc,
1663 			   void *cookie,
1664 			   enum cdp_ast_free_status status)
1665 {
1666 	struct dp_ast_free_cb_params *param =
1667 		(struct dp_ast_free_cb_params *)cookie;
1668 	struct dp_soc *soc = (struct dp_soc *)dp_soc;
1669 	struct dp_peer *peer = NULL;
1670 	QDF_STATUS err = QDF_STATUS_SUCCESS;
1671 
1672 	if (status != CDP_TXRX_AST_DELETED) {
1673 		qdf_mem_free(cookie);
1674 		return;
1675 	}
1676 
1677 	peer = dp_peer_find_hash_find(soc, &param->peer_mac_addr.raw[0],
1678 				      0, param->vdev_id, DP_MOD_ID_AST);
1679 	if (peer) {
1680 		err = dp_peer_add_ast(soc, peer,
1681 				      &param->mac_addr.raw[0],
1682 				      param->type,
1683 				      param->flags);
1684 
1685 		dp_hmwds_ast_add_notify(peer, &param->mac_addr.raw[0],
1686 					param->type, err, false);
1687 
1688 		dp_peer_unref_delete(peer, DP_MOD_ID_AST);
1689 	}
1690 	qdf_mem_free(cookie);
1691 }
1692 
1693 /*
1694  * dp_peer_add_ast() - Allocate and add AST entry into peer list
1695  * @soc: SoC handle
1696  * @peer: peer to which ast node belongs
1697  * @mac_addr: MAC address of ast node
1698  * @is_self: Is this base AST entry with peer mac address
1699  *
1700  * This API is used by WDS source port learning function to
1701  * add a new AST entry into peer AST list
1702  *
1703  * Return: QDF_STATUS code
1704  */
1705 QDF_STATUS dp_peer_add_ast(struct dp_soc *soc,
1706 			   struct dp_peer *peer,
1707 			   uint8_t *mac_addr,
1708 			   enum cdp_txrx_ast_entry_type type,
1709 			   uint32_t flags)
1710 {
1711 	struct dp_ast_entry *ast_entry = NULL;
1712 	struct dp_vdev *vdev = NULL;
1713 	struct dp_pdev *pdev = NULL;
1714 	txrx_ast_free_cb cb = NULL;
1715 	void *cookie = NULL;
1716 	struct dp_peer *vap_bss_peer = NULL;
1717 	bool is_peer_found = false;
1718 	int status = 0;
1719 
1720 	if (soc->ast_offload_support)
1721 		return QDF_STATUS_E_INVAL;
1722 
1723 	vdev = peer->vdev;
1724 	if (!vdev) {
1725 		dp_peer_err("%pK: Peers vdev is NULL", soc);
1726 		QDF_ASSERT(0);
1727 		return QDF_STATUS_E_INVAL;
1728 	}
1729 
1730 	pdev = vdev->pdev;
1731 
1732 	is_peer_found = dp_peer_exist_on_pdev(soc, mac_addr, 0, pdev);
1733 
1734 	qdf_spin_lock_bh(&soc->ast_lock);
1735 
1736 	if (!dp_peer_state_cmp(peer, DP_PEER_STATE_ACTIVE)) {
1737 		if ((type != CDP_TXRX_AST_TYPE_STATIC) &&
1738 		    (type != CDP_TXRX_AST_TYPE_SELF)) {
1739 			qdf_spin_unlock_bh(&soc->ast_lock);
1740 			return QDF_STATUS_E_BUSY;
1741 		}
1742 	}
1743 
1744 	dp_peer_debug("%pK: pdevid: %u vdev: %u  ast_entry->type: %d flags: 0x%x peer_mac: " QDF_MAC_ADDR_FMT " peer: %pK mac " QDF_MAC_ADDR_FMT,
1745 		      soc, pdev->pdev_id, vdev->vdev_id, type, flags,
1746 		      QDF_MAC_ADDR_REF(peer->mac_addr.raw), peer,
1747 		      QDF_MAC_ADDR_REF(mac_addr));
1748 
1749 	/* fw supports only 2 times the max_peers ast entries */
1750 	if (soc->num_ast_entries >=
1751 	    wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx)) {
1752 		qdf_spin_unlock_bh(&soc->ast_lock);
1753 		dp_peer_err("%pK: Max ast entries reached", soc);
1754 		return QDF_STATUS_E_RESOURCES;
1755 	}
1756 
1757 	/* If AST entry already exists , just return from here
1758 	 * ast entry with same mac address can exist on different radios
1759 	 * if ast_override support is enabled use search by pdev in this
1760 	 * case
1761 	 */
1762 	if (soc->ast_override_support) {
1763 		ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, mac_addr,
1764 							    pdev->pdev_id);
1765 		if (ast_entry) {
1766 			qdf_spin_unlock_bh(&soc->ast_lock);
1767 			return QDF_STATUS_E_ALREADY;
1768 		}
1769 
1770 		if (is_peer_found) {
1771 			/* During WDS to static roaming, peer is added
1772 			 * to the list before static AST entry create.
1773 			 * So, allow AST entry for STATIC type
1774 			 * even if peer is present
1775 			 */
1776 			if (type != CDP_TXRX_AST_TYPE_STATIC) {
1777 				qdf_spin_unlock_bh(&soc->ast_lock);
1778 				return QDF_STATUS_E_ALREADY;
1779 			}
1780 		}
1781 	} else {
1782 		/* For HWMWDS_SEC entries can be added for same mac address
1783 		 * do not check for existing entry
1784 		 */
1785 		if (type == CDP_TXRX_AST_TYPE_WDS_HM_SEC)
1786 			goto add_ast_entry;
1787 
1788 		ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
1789 
1790 		if (ast_entry) {
1791 			if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) &&
1792 			    !ast_entry->delete_in_progress) {
1793 				qdf_spin_unlock_bh(&soc->ast_lock);
1794 				return QDF_STATUS_E_ALREADY;
1795 			}
1796 
1797 			/* Add for HMWDS entry we cannot be ignored if there
1798 			 * is AST entry with same mac address
1799 			 *
1800 			 * if ast entry exists with the requested mac address
1801 			 * send a delete command and register callback which
1802 			 * can take care of adding HMWDS ast entry on delete
1803 			 * confirmation from target
1804 			 */
1805 			if (type == CDP_TXRX_AST_TYPE_WDS_HM) {
1806 				struct dp_ast_free_cb_params *param = NULL;
1807 
1808 				if (ast_entry->type ==
1809 					CDP_TXRX_AST_TYPE_WDS_HM_SEC)
1810 					goto add_ast_entry;
1811 
1812 				/* save existing callback */
1813 				if (ast_entry->callback) {
1814 					cb = ast_entry->callback;
1815 					cookie = ast_entry->cookie;
1816 				}
1817 
1818 				param = qdf_mem_malloc(sizeof(*param));
1819 				if (!param) {
1820 					QDF_TRACE(QDF_MODULE_ID_TXRX,
1821 						  QDF_TRACE_LEVEL_ERROR,
1822 						  "Allocation failed");
1823 					qdf_spin_unlock_bh(&soc->ast_lock);
1824 					return QDF_STATUS_E_NOMEM;
1825 				}
1826 
1827 				qdf_mem_copy(&param->mac_addr.raw[0], mac_addr,
1828 					     QDF_MAC_ADDR_SIZE);
1829 				qdf_mem_copy(&param->peer_mac_addr.raw[0],
1830 					     &peer->mac_addr.raw[0],
1831 					     QDF_MAC_ADDR_SIZE);
1832 				param->type = type;
1833 				param->flags = flags;
1834 				param->vdev_id = vdev->vdev_id;
1835 				ast_entry->callback = dp_peer_free_hmwds_cb;
1836 				ast_entry->pdev_id = vdev->pdev->pdev_id;
1837 				ast_entry->type = type;
1838 				ast_entry->cookie = (void *)param;
1839 				if (!ast_entry->delete_in_progress)
1840 					dp_peer_del_ast(soc, ast_entry);
1841 
1842 				qdf_spin_unlock_bh(&soc->ast_lock);
1843 
1844 				/* Call the saved callback*/
1845 				if (cb) {
1846 					cb(soc->ctrl_psoc,
1847 					   dp_soc_to_cdp_soc(soc),
1848 					   cookie,
1849 					   CDP_TXRX_AST_DELETE_IN_PROGRESS);
1850 				}
1851 				return QDF_STATUS_E_AGAIN;
1852 			}
1853 
1854 			qdf_spin_unlock_bh(&soc->ast_lock);
1855 			return QDF_STATUS_E_ALREADY;
1856 		}
1857 	}
1858 
1859 add_ast_entry:
1860 	ast_entry = (struct dp_ast_entry *)
1861 			qdf_mem_malloc(sizeof(struct dp_ast_entry));
1862 
1863 	if (!ast_entry) {
1864 		qdf_spin_unlock_bh(&soc->ast_lock);
1865 		dp_peer_err("%pK: fail to allocate ast_entry", soc);
1866 		QDF_ASSERT(0);
1867 		return QDF_STATUS_E_NOMEM;
1868 	}
1869 
1870 	qdf_mem_copy(&ast_entry->mac_addr.raw[0], mac_addr, QDF_MAC_ADDR_SIZE);
1871 	ast_entry->pdev_id = vdev->pdev->pdev_id;
1872 	ast_entry->is_mapped = false;
1873 	ast_entry->delete_in_progress = false;
1874 	ast_entry->peer_id = HTT_INVALID_PEER;
1875 	ast_entry->next_hop = 0;
1876 	ast_entry->vdev_id = vdev->vdev_id;
1877 
1878 	switch (type) {
1879 	case CDP_TXRX_AST_TYPE_STATIC:
1880 		peer->self_ast_entry = ast_entry;
1881 		ast_entry->type = CDP_TXRX_AST_TYPE_STATIC;
1882 		if (peer->vdev->opmode == wlan_op_mode_sta)
1883 			ast_entry->type = CDP_TXRX_AST_TYPE_STA_BSS;
1884 		break;
1885 	case CDP_TXRX_AST_TYPE_SELF:
1886 		peer->self_ast_entry = ast_entry;
1887 		ast_entry->type = CDP_TXRX_AST_TYPE_SELF;
1888 		break;
1889 	case CDP_TXRX_AST_TYPE_WDS:
1890 		ast_entry->next_hop = 1;
1891 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
1892 		break;
1893 	case CDP_TXRX_AST_TYPE_WDS_HM:
1894 		ast_entry->next_hop = 1;
1895 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM;
1896 		break;
1897 	case CDP_TXRX_AST_TYPE_WDS_HM_SEC:
1898 		ast_entry->next_hop = 1;
1899 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM_SEC;
1900 		ast_entry->peer_id = peer->peer_id;
1901 		TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry,
1902 				  ase_list_elem);
1903 		break;
1904 	case CDP_TXRX_AST_TYPE_DA:
1905 		vap_bss_peer = dp_vdev_bss_peer_ref_n_get(soc, vdev,
1906 							  DP_MOD_ID_AST);
1907 		if (!vap_bss_peer) {
1908 			qdf_spin_unlock_bh(&soc->ast_lock);
1909 			qdf_mem_free(ast_entry);
1910 			return QDF_STATUS_E_FAILURE;
1911 		}
1912 		peer = vap_bss_peer;
1913 		ast_entry->next_hop = 1;
1914 		ast_entry->type = CDP_TXRX_AST_TYPE_DA;
1915 		break;
1916 	default:
1917 		dp_peer_err("%pK: Incorrect AST entry type", soc);
1918 	}
1919 
1920 	ast_entry->is_active = TRUE;
1921 	DP_STATS_INC(soc, ast.added, 1);
1922 	soc->num_ast_entries++;
1923 	dp_peer_ast_hash_add(soc, ast_entry);
1924 
1925 	if ((ast_entry->type != CDP_TXRX_AST_TYPE_STATIC) &&
1926 	    (ast_entry->type != CDP_TXRX_AST_TYPE_SELF) &&
1927 	    (ast_entry->type != CDP_TXRX_AST_TYPE_STA_BSS) &&
1928 	    (ast_entry->type != CDP_TXRX_AST_TYPE_WDS_HM_SEC))
1929 		status = dp_add_wds_entry_wrapper(soc,
1930 						  peer,
1931 						  mac_addr,
1932 						  flags,
1933 						  ast_entry->type);
1934 
1935 	if (vap_bss_peer)
1936 		dp_peer_unref_delete(vap_bss_peer, DP_MOD_ID_AST);
1937 
1938 	qdf_spin_unlock_bh(&soc->ast_lock);
1939 	return qdf_status_from_os_return(status);
1940 }
1941 
1942 qdf_export_symbol(dp_peer_add_ast);
1943 
1944 /*
1945  * dp_peer_free_ast_entry() - Free up the ast entry memory
1946  * @soc: SoC handle
1947  * @ast_entry: Address search entry
1948  *
1949  * This API is used to free up the memory associated with
1950  * AST entry.
1951  *
1952  * Return: None
1953  */
1954 void dp_peer_free_ast_entry(struct dp_soc *soc,
1955 			    struct dp_ast_entry *ast_entry)
1956 {
1957 	/*
1958 	 * NOTE: Ensure that call to this API is done
1959 	 * after soc->ast_lock is taken
1960 	 */
1961 	dp_peer_debug("type: %d ID: %u vid: %u mac_addr: " QDF_MAC_ADDR_FMT,
1962 		      ast_entry->type, ast_entry->peer_id, ast_entry->vdev_id,
1963 		      QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw));
1964 
1965 	ast_entry->callback = NULL;
1966 	ast_entry->cookie = NULL;
1967 
1968 	DP_STATS_INC(soc, ast.deleted, 1);
1969 	dp_peer_ast_hash_remove(soc, ast_entry);
1970 	dp_peer_ast_cleanup(soc, ast_entry);
1971 	qdf_mem_free(ast_entry);
1972 	soc->num_ast_entries--;
1973 }
1974 
1975 /*
1976  * dp_peer_unlink_ast_entry() - Free up the ast entry memory
1977  * @soc: SoC handle
1978  * @ast_entry: Address search entry
1979  * @peer: peer
1980  *
1981  * This API is used to remove/unlink AST entry from the peer list
1982  * and hash list.
1983  *
1984  * Return: None
1985  */
1986 void dp_peer_unlink_ast_entry(struct dp_soc *soc,
1987 			      struct dp_ast_entry *ast_entry,
1988 			      struct dp_peer *peer)
1989 {
1990 	if (!peer) {
1991 		dp_info_rl("NULL peer");
1992 		return;
1993 	}
1994 
1995 	if (ast_entry->peer_id == HTT_INVALID_PEER) {
1996 		dp_info_rl("Invalid peer id in AST entry mac addr:"QDF_MAC_ADDR_FMT" type:%d",
1997 			  QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw),
1998 			  ast_entry->type);
1999 		return;
2000 	}
2001 	/*
2002 	 * NOTE: Ensure that call to this API is done
2003 	 * after soc->ast_lock is taken
2004 	 */
2005 
2006 	qdf_assert_always(ast_entry->peer_id == peer->peer_id);
2007 	TAILQ_REMOVE(&peer->ast_entry_list, ast_entry, ase_list_elem);
2008 
2009 	if (ast_entry == peer->self_ast_entry)
2010 		peer->self_ast_entry = NULL;
2011 
2012 	/*
2013 	 * release the reference only if it is mapped
2014 	 * to ast_table
2015 	 */
2016 	if (ast_entry->is_mapped)
2017 		soc->ast_table[ast_entry->ast_idx] = NULL;
2018 
2019 	ast_entry->peer_id = HTT_INVALID_PEER;
2020 }
2021 
2022 /*
2023  * dp_peer_del_ast() - Delete and free AST entry
2024  * @soc: SoC handle
2025  * @ast_entry: AST entry of the node
2026  *
2027  * This function removes the AST entry from peer and soc tables
2028  * It assumes caller has taken the ast lock to protect the access to these
2029  * tables
2030  *
2031  * Return: None
2032  */
2033 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
2034 {
2035 	struct dp_peer *peer = NULL;
2036 
2037 	if (soc->ast_offload_support)
2038 		return;
2039 
2040 	if (!ast_entry) {
2041 		dp_info_rl("NULL AST entry");
2042 		return;
2043 	}
2044 
2045 	if (ast_entry->delete_in_progress) {
2046 		dp_info_rl("AST entry deletion in progress mac addr:"QDF_MAC_ADDR_FMT" type:%d",
2047 			  QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw),
2048 			  ast_entry->type);
2049 		return;
2050 	}
2051 
2052 	dp_peer_debug("call by %ps: ID: %u vid: %u mac_addr: " QDF_MAC_ADDR_FMT,
2053 		      (void *)_RET_IP_, ast_entry->peer_id, ast_entry->vdev_id,
2054 		      QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw));
2055 
2056 	ast_entry->delete_in_progress = true;
2057 
2058 	/* In teardown del ast is called after setting logical delete state
2059 	 * use __dp_peer_get_ref_by_id to get the reference irrespective of
2060 	 * state
2061 	 */
2062 	peer = __dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
2063 				       DP_MOD_ID_AST);
2064 
2065 	dp_peer_ast_send_wds_del(soc, ast_entry, peer);
2066 
2067 	/* Remove SELF and STATIC entries in teardown itself */
2068 	if (!ast_entry->next_hop)
2069 		dp_peer_unlink_ast_entry(soc, ast_entry, peer);
2070 
2071 	if (ast_entry->is_mapped)
2072 		soc->ast_table[ast_entry->ast_idx] = NULL;
2073 
2074 	/* if peer map v2 is enabled we are not freeing ast entry
2075 	 * here and it is supposed to be freed in unmap event (after
2076 	 * we receive delete confirmation from target)
2077 	 *
2078 	 * if peer_id is invalid we did not get the peer map event
2079 	 * for the peer free ast entry from here only in this case
2080 	 */
2081 	if (dp_peer_ast_free_in_unmap_supported(soc, ast_entry))
2082 		goto end;
2083 
2084 	/* for WDS secondary entry ast_entry->next_hop would be set so
2085 	 * unlinking has to be done explicitly here.
2086 	 * As this entry is not a mapped entry unmap notification from
2087 	 * FW will not come. Hence unlinkling is done right here.
2088 	 */
2089 
2090 	if (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC)
2091 		dp_peer_unlink_ast_entry(soc, ast_entry, peer);
2092 
2093 	dp_peer_free_ast_entry(soc, ast_entry);
2094 
2095 end:
2096 	if (peer)
2097 		dp_peer_unref_delete(peer, DP_MOD_ID_AST);
2098 }
2099 
2100 /*
2101  * dp_peer_update_ast() - Delete and free AST entry
2102  * @soc: SoC handle
2103  * @peer: peer to which ast node belongs
2104  * @ast_entry: AST entry of the node
2105  * @flags: wds or hmwds
2106  *
2107  * This function update the AST entry to the roamed peer and soc tables
2108  * It assumes caller has taken the ast lock to protect the access to these
2109  * tables
2110  *
2111  * Return: 0 if ast entry is updated successfully
2112  *         -1 failure
2113  */
2114 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
2115 		       struct dp_ast_entry *ast_entry, uint32_t flags)
2116 {
2117 	int ret = -1;
2118 	struct dp_peer *old_peer;
2119 
2120 	if (soc->ast_offload_support)
2121 		return QDF_STATUS_E_INVAL;
2122 
2123 	dp_peer_debug("%pK: ast_entry->type: %d pdevid: %u vdevid: %u flags: 0x%x mac_addr: " QDF_MAC_ADDR_FMT " peer_mac: " QDF_MAC_ADDR_FMT "\n",
2124 		      soc, ast_entry->type, peer->vdev->pdev->pdev_id,
2125 		      peer->vdev->vdev_id, flags,
2126 		      QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw),
2127 		      QDF_MAC_ADDR_REF(peer->mac_addr.raw));
2128 
2129 	/* Do not send AST update in below cases
2130 	 *  1) Ast entry delete has already triggered
2131 	 *  2) Peer delete is already triggered
2132 	 *  3) We did not get the HTT map for create event
2133 	 */
2134 	if (ast_entry->delete_in_progress ||
2135 	    !dp_peer_state_cmp(peer, DP_PEER_STATE_ACTIVE) ||
2136 	    !ast_entry->is_mapped)
2137 		return ret;
2138 
2139 	if ((ast_entry->type == CDP_TXRX_AST_TYPE_STATIC) ||
2140 	    (ast_entry->type == CDP_TXRX_AST_TYPE_SELF) ||
2141 	    (ast_entry->type == CDP_TXRX_AST_TYPE_STA_BSS) ||
2142 	    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
2143 		return 0;
2144 
2145 	/*
2146 	 * Avoids flood of WMI update messages sent to FW for same peer.
2147 	 */
2148 	if (qdf_unlikely(ast_entry->peer_id == peer->peer_id) &&
2149 	    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS) &&
2150 	    (ast_entry->vdev_id == peer->vdev->vdev_id) &&
2151 	    (ast_entry->is_active))
2152 		return 0;
2153 
2154 	old_peer = dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
2155 					 DP_MOD_ID_AST);
2156 	if (!old_peer)
2157 		return 0;
2158 
2159 	TAILQ_REMOVE(&old_peer->ast_entry_list, ast_entry, ase_list_elem);
2160 
2161 	dp_peer_unref_delete(old_peer, DP_MOD_ID_AST);
2162 
2163 	ast_entry->peer_id = peer->peer_id;
2164 	ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
2165 	ast_entry->pdev_id = peer->vdev->pdev->pdev_id;
2166 	ast_entry->vdev_id = peer->vdev->vdev_id;
2167 	ast_entry->is_active = TRUE;
2168 	TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem);
2169 
2170 	ret = dp_update_wds_entry_wrapper(soc,
2171 					  peer,
2172 					  ast_entry->mac_addr.raw,
2173 					  flags);
2174 
2175 	return ret;
2176 }
2177 
2178 /*
2179  * dp_peer_ast_get_pdev_id() - get pdev_id from the ast entry
2180  * @soc: SoC handle
2181  * @ast_entry: AST entry of the node
2182  *
2183  * This function gets the pdev_id from the ast entry.
2184  *
2185  * Return: (uint8_t) pdev_id
2186  */
2187 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
2188 				struct dp_ast_entry *ast_entry)
2189 {
2190 	return ast_entry->pdev_id;
2191 }
2192 
2193 /*
2194  * dp_peer_ast_get_next_hop() - get next_hop from the ast entry
2195  * @soc: SoC handle
2196  * @ast_entry: AST entry of the node
2197  *
2198  * This function gets the next hop from the ast entry.
2199  *
2200  * Return: (uint8_t) next_hop
2201  */
2202 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
2203 				struct dp_ast_entry *ast_entry)
2204 {
2205 	return ast_entry->next_hop;
2206 }
2207 
2208 /*
2209  * dp_peer_ast_set_type() - set type from the ast entry
2210  * @soc: SoC handle
2211  * @ast_entry: AST entry of the node
2212  *
2213  * This function sets the type in the ast entry.
2214  *
2215  * Return:
2216  */
2217 void dp_peer_ast_set_type(struct dp_soc *soc,
2218 				struct dp_ast_entry *ast_entry,
2219 				enum cdp_txrx_ast_entry_type type)
2220 {
2221 	ast_entry->type = type;
2222 }
2223 
2224 void dp_peer_ast_send_wds_del(struct dp_soc *soc,
2225 			      struct dp_ast_entry *ast_entry,
2226 			      struct dp_peer *peer)
2227 {
2228 	bool delete_in_fw = false;
2229 
2230 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_TRACE,
2231 		  "%s: ast_entry->type: %d pdevid: %u vdev: %u mac_addr: "QDF_MAC_ADDR_FMT" next_hop: %u peer_id: %uM\n",
2232 		  __func__, ast_entry->type, ast_entry->pdev_id,
2233 		  ast_entry->vdev_id,
2234 		  QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw),
2235 		  ast_entry->next_hop, ast_entry->peer_id);
2236 
2237 	/*
2238 	 * If peer state is logical delete, the peer is about to get
2239 	 * teared down with a peer delete command to firmware,
2240 	 * which will cleanup all the wds ast entries.
2241 	 * So, no need to send explicit wds ast delete to firmware.
2242 	 */
2243 	if (ast_entry->next_hop) {
2244 		if (peer && dp_peer_state_cmp(peer,
2245 					      DP_PEER_STATE_LOGICAL_DELETE))
2246 			delete_in_fw = false;
2247 		else
2248 			delete_in_fw = true;
2249 
2250 		dp_del_wds_entry_wrapper(soc,
2251 					 ast_entry->vdev_id,
2252 					 ast_entry->mac_addr.raw,
2253 					 ast_entry->type,
2254 					 delete_in_fw);
2255 	}
2256 }
2257 #else
2258 void dp_peer_free_ast_entry(struct dp_soc *soc,
2259 			    struct dp_ast_entry *ast_entry)
2260 {
2261 }
2262 
2263 void dp_peer_unlink_ast_entry(struct dp_soc *soc,
2264 			      struct dp_ast_entry *ast_entry,
2265 			      struct dp_peer *peer)
2266 {
2267 }
2268 
2269 void dp_peer_ast_hash_remove(struct dp_soc *soc,
2270 			     struct dp_ast_entry *ase)
2271 {
2272 }
2273 
2274 struct dp_ast_entry *dp_peer_ast_hash_find_by_vdevid(struct dp_soc *soc,
2275 						     uint8_t *ast_mac_addr,
2276 						     uint8_t vdev_id)
2277 {
2278 	return NULL;
2279 }
2280 
2281 QDF_STATUS dp_peer_add_ast(struct dp_soc *soc,
2282 			   struct dp_peer *peer,
2283 			   uint8_t *mac_addr,
2284 			   enum cdp_txrx_ast_entry_type type,
2285 			   uint32_t flags)
2286 {
2287 	return QDF_STATUS_E_FAILURE;
2288 }
2289 
2290 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
2291 {
2292 }
2293 
2294 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
2295 			struct dp_ast_entry *ast_entry, uint32_t flags)
2296 {
2297 	return 1;
2298 }
2299 
2300 struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
2301 					       uint8_t *ast_mac_addr)
2302 {
2303 	return NULL;
2304 }
2305 
2306 static inline
2307 QDF_STATUS dp_peer_host_add_map_ast(struct dp_soc *soc, uint16_t peer_id,
2308 				    uint8_t *mac_addr, uint16_t hw_peer_id,
2309 				    uint8_t vdev_id, uint16_t ast_hash,
2310 				    uint8_t is_wds)
2311 {
2312 	return QDF_STATUS_SUCCESS;
2313 }
2314 
2315 struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
2316 						     uint8_t *ast_mac_addr,
2317 						     uint8_t pdev_id)
2318 {
2319 	return NULL;
2320 }
2321 
2322 QDF_STATUS dp_peer_ast_hash_attach(struct dp_soc *soc)
2323 {
2324 	return QDF_STATUS_SUCCESS;
2325 }
2326 
2327 static inline QDF_STATUS dp_peer_map_ast(struct dp_soc *soc,
2328 					 struct dp_peer *peer,
2329 					 uint8_t *mac_addr,
2330 					 uint16_t hw_peer_id,
2331 					 uint8_t vdev_id,
2332 					 uint16_t ast_hash,
2333 					 uint8_t is_wds)
2334 {
2335 	return QDF_STATUS_SUCCESS;
2336 }
2337 
2338 void dp_peer_ast_hash_detach(struct dp_soc *soc)
2339 {
2340 }
2341 
2342 void dp_peer_ast_set_type(struct dp_soc *soc,
2343 				struct dp_ast_entry *ast_entry,
2344 				enum cdp_txrx_ast_entry_type type)
2345 {
2346 }
2347 
2348 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
2349 				struct dp_ast_entry *ast_entry)
2350 {
2351 	return 0xff;
2352 }
2353 
2354 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
2355 				 struct dp_ast_entry *ast_entry)
2356 {
2357 	return 0xff;
2358 }
2359 
2360 void dp_peer_ast_send_wds_del(struct dp_soc *soc,
2361 			      struct dp_ast_entry *ast_entry,
2362 			      struct dp_peer *peer)
2363 {
2364 }
2365 #endif
2366 
2367 #ifdef WLAN_FEATURE_MULTI_AST_DEL
2368 void dp_peer_ast_send_multi_wds_del(
2369 		struct dp_soc *soc, uint8_t vdev_id,
2370 		struct peer_del_multi_wds_entries *wds_list)
2371 {
2372 	struct cdp_soc_t *cdp_soc = &soc->cdp_soc;
2373 
2374 	if (cdp_soc && cdp_soc->ol_ops &&
2375 	    cdp_soc->ol_ops->peer_del_multi_wds_entry)
2376 		cdp_soc->ol_ops->peer_del_multi_wds_entry(soc->ctrl_psoc,
2377 							  vdev_id, wds_list);
2378 }
2379 #endif
2380 
2381 #ifdef FEATURE_WDS
2382 /**
2383  * dp_peer_ast_free_wds_entries() - Free wds ast entries associated with peer
2384  * @soc: soc handle
2385  * @peer: peer handle
2386  *
2387  * Free all the wds ast entries associated with peer
2388  *
2389  * Return: Number of wds ast entries freed
2390  */
2391 static uint32_t dp_peer_ast_free_wds_entries(struct dp_soc *soc,
2392 					     struct dp_peer *peer)
2393 {
2394 	TAILQ_HEAD(, dp_ast_entry) ast_local_list = {0};
2395 	struct dp_ast_entry *ast_entry, *temp_ast_entry;
2396 	uint32_t num_ast = 0;
2397 
2398 	TAILQ_INIT(&ast_local_list);
2399 	qdf_spin_lock_bh(&soc->ast_lock);
2400 
2401 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry) {
2402 		if (ast_entry->next_hop)
2403 			num_ast++;
2404 
2405 		if (ast_entry->is_mapped)
2406 			soc->ast_table[ast_entry->ast_idx] = NULL;
2407 
2408 		dp_peer_unlink_ast_entry(soc, ast_entry, peer);
2409 		DP_STATS_INC(soc, ast.deleted, 1);
2410 		dp_peer_ast_hash_remove(soc, ast_entry);
2411 		TAILQ_INSERT_TAIL(&ast_local_list, ast_entry,
2412 				  ase_list_elem);
2413 		soc->num_ast_entries--;
2414 	}
2415 
2416 	qdf_spin_unlock_bh(&soc->ast_lock);
2417 
2418 	TAILQ_FOREACH_SAFE(ast_entry, &ast_local_list, ase_list_elem,
2419 			   temp_ast_entry) {
2420 		if (ast_entry->callback)
2421 			ast_entry->callback(soc->ctrl_psoc,
2422 					    dp_soc_to_cdp_soc(soc),
2423 					    ast_entry->cookie,
2424 					    CDP_TXRX_AST_DELETED);
2425 
2426 		qdf_mem_free(ast_entry);
2427 	}
2428 
2429 	return num_ast;
2430 }
2431 /**
2432  * dp_peer_clean_wds_entries() - Clean wds ast entries and compare
2433  * @soc: soc handle
2434  * @peer: peer handle
2435  * @free_wds_count - number of wds entries freed by FW with peer delete
2436  *
2437  * Free all the wds ast entries associated with peer and compare with
2438  * the value received from firmware
2439  *
2440  * Return: Number of wds ast entries freed
2441  */
2442 static void
2443 dp_peer_clean_wds_entries(struct dp_soc *soc, struct dp_peer *peer,
2444 			  uint32_t free_wds_count)
2445 {
2446 	uint32_t wds_deleted = 0;
2447 
2448 	if (soc->ast_offload_support && !soc->host_ast_db_enable)
2449 		return;
2450 
2451 	wds_deleted = dp_peer_ast_free_wds_entries(soc, peer);
2452 	if ((DP_PEER_WDS_COUNT_INVALID != free_wds_count) &&
2453 	    (free_wds_count != wds_deleted)) {
2454 		DP_STATS_INC(soc, ast.ast_mismatch, 1);
2455 		dp_alert("For peer %pK (mac: "QDF_MAC_ADDR_FMT")number of wds entries deleted by fw = %d during peer delete is not same as the numbers deleted by host = %d",
2456 			 peer, peer->mac_addr.raw, free_wds_count,
2457 			 wds_deleted);
2458 	}
2459 }
2460 
2461 #else
2462 static void
2463 dp_peer_clean_wds_entries(struct dp_soc *soc, struct dp_peer *peer,
2464 			  uint32_t free_wds_count)
2465 {
2466 	struct dp_ast_entry *ast_entry, *temp_ast_entry;
2467 
2468 	qdf_spin_lock_bh(&soc->ast_lock);
2469 
2470 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry) {
2471 		dp_peer_unlink_ast_entry(soc, ast_entry, peer);
2472 
2473 		if (ast_entry->is_mapped)
2474 			soc->ast_table[ast_entry->ast_idx] = NULL;
2475 
2476 		dp_peer_free_ast_entry(soc, ast_entry);
2477 	}
2478 
2479 	peer->self_ast_entry = NULL;
2480 	qdf_spin_unlock_bh(&soc->ast_lock);
2481 }
2482 #endif
2483 
2484 /**
2485  * dp_peer_ast_free_entry_by_mac() - find ast entry by MAC address and delete
2486  * @soc: soc handle
2487  * @peer: peer handle
2488  * @vdev_id: vdev_id
2489  * @mac_addr: mac address of the AST entry to searc and delete
2490  *
2491  * find the ast entry from the peer list using the mac address and free
2492  * the entry.
2493  *
2494  * Return: SUCCESS or NOENT
2495  */
2496 static int dp_peer_ast_free_entry_by_mac(struct dp_soc *soc,
2497 					 struct dp_peer *peer,
2498 					 uint8_t vdev_id,
2499 					 uint8_t *mac_addr)
2500 {
2501 	struct dp_ast_entry *ast_entry;
2502 	void *cookie = NULL;
2503 	txrx_ast_free_cb cb = NULL;
2504 
2505 	/*
2506 	 * release the reference only if it is mapped
2507 	 * to ast_table
2508 	 */
2509 
2510 	qdf_spin_lock_bh(&soc->ast_lock);
2511 
2512 	ast_entry = dp_peer_ast_hash_find_by_vdevid(soc, mac_addr, vdev_id);
2513 	if (!ast_entry) {
2514 		qdf_spin_unlock_bh(&soc->ast_lock);
2515 		return QDF_STATUS_E_NOENT;
2516 	} else if (ast_entry->is_mapped) {
2517 		soc->ast_table[ast_entry->ast_idx] = NULL;
2518 	}
2519 
2520 	cb = ast_entry->callback;
2521 	cookie = ast_entry->cookie;
2522 
2523 
2524 	dp_peer_unlink_ast_entry(soc, ast_entry, peer);
2525 
2526 	dp_peer_free_ast_entry(soc, ast_entry);
2527 
2528 	qdf_spin_unlock_bh(&soc->ast_lock);
2529 
2530 	if (cb) {
2531 		cb(soc->ctrl_psoc,
2532 		   dp_soc_to_cdp_soc(soc),
2533 		   cookie,
2534 		   CDP_TXRX_AST_DELETED);
2535 	}
2536 
2537 	return QDF_STATUS_SUCCESS;
2538 }
2539 
2540 void dp_peer_find_hash_erase(struct dp_soc *soc)
2541 {
2542 	int i;
2543 
2544 	/*
2545 	 * Not really necessary to take peer_ref_mutex lock - by this point,
2546 	 * it's known that the soc is no longer in use.
2547 	 */
2548 	for (i = 0; i <= soc->peer_hash.mask; i++) {
2549 		if (!TAILQ_EMPTY(&soc->peer_hash.bins[i])) {
2550 			struct dp_peer *peer, *peer_next;
2551 
2552 			/*
2553 			 * TAILQ_FOREACH_SAFE must be used here to avoid any
2554 			 * memory access violation after peer is freed
2555 			 */
2556 			TAILQ_FOREACH_SAFE(peer, &soc->peer_hash.bins[i],
2557 				hash_list_elem, peer_next) {
2558 				/*
2559 				 * Don't remove the peer from the hash table -
2560 				 * that would modify the list we are currently
2561 				 * traversing, and it's not necessary anyway.
2562 				 */
2563 				/*
2564 				 * Artificially adjust the peer's ref count to
2565 				 * 1, so it will get deleted by
2566 				 * dp_peer_unref_delete.
2567 				 */
2568 				/* set to zero */
2569 				qdf_atomic_init(&peer->ref_cnt);
2570 				for (i = 0; i < DP_MOD_ID_MAX; i++)
2571 					qdf_atomic_init(&peer->mod_refs[i]);
2572 				/* incr to one */
2573 				qdf_atomic_inc(&peer->ref_cnt);
2574 				qdf_atomic_inc(&peer->mod_refs
2575 						[DP_MOD_ID_CONFIG]);
2576 				dp_peer_unref_delete(peer,
2577 						     DP_MOD_ID_CONFIG);
2578 			}
2579 		}
2580 	}
2581 }
2582 
2583 void dp_peer_ast_table_detach(struct dp_soc *soc)
2584 {
2585 	if (soc->ast_table) {
2586 		qdf_mem_free(soc->ast_table);
2587 		soc->ast_table = NULL;
2588 	}
2589 }
2590 
2591 /*
2592  * dp_peer_find_map_detach() - cleanup memory for peer_id_to_obj_map
2593  * @soc: soc handle
2594  *
2595  * return: none
2596  */
2597 void dp_peer_find_map_detach(struct dp_soc *soc)
2598 {
2599 	if (soc->peer_id_to_obj_map) {
2600 		qdf_mem_free(soc->peer_id_to_obj_map);
2601 		soc->peer_id_to_obj_map = NULL;
2602 		qdf_spinlock_destroy(&soc->peer_map_lock);
2603 	}
2604 }
2605 
2606 #ifndef AST_OFFLOAD_ENABLE
2607 QDF_STATUS dp_peer_find_attach(struct dp_soc *soc)
2608 {
2609 	QDF_STATUS status;
2610 
2611 	status = dp_peer_find_map_attach(soc);
2612 	if (!QDF_IS_STATUS_SUCCESS(status))
2613 		return status;
2614 
2615 	status = dp_peer_find_hash_attach(soc);
2616 	if (!QDF_IS_STATUS_SUCCESS(status))
2617 		goto map_detach;
2618 
2619 	status = dp_peer_ast_table_attach(soc);
2620 	if (!QDF_IS_STATUS_SUCCESS(status))
2621 		goto hash_detach;
2622 
2623 	status = dp_peer_ast_hash_attach(soc);
2624 	if (!QDF_IS_STATUS_SUCCESS(status))
2625 		goto ast_table_detach;
2626 
2627 	status = dp_peer_mec_hash_attach(soc);
2628 	if (QDF_IS_STATUS_SUCCESS(status)) {
2629 		dp_soc_wds_attach(soc);
2630 		return status;
2631 	}
2632 
2633 	dp_peer_ast_hash_detach(soc);
2634 ast_table_detach:
2635 	dp_peer_ast_table_detach(soc);
2636 hash_detach:
2637 	dp_peer_find_hash_detach(soc);
2638 map_detach:
2639 	dp_peer_find_map_detach(soc);
2640 
2641 	return status;
2642 }
2643 #else
2644 QDF_STATUS dp_peer_find_attach(struct dp_soc *soc)
2645 {
2646 	QDF_STATUS status;
2647 
2648 	status = dp_peer_find_map_attach(soc);
2649 	if (!QDF_IS_STATUS_SUCCESS(status))
2650 		return status;
2651 
2652 	status = dp_peer_find_hash_attach(soc);
2653 	if (!QDF_IS_STATUS_SUCCESS(status))
2654 		goto map_detach;
2655 
2656 	return status;
2657 map_detach:
2658 	dp_peer_find_map_detach(soc);
2659 
2660 	return status;
2661 }
2662 #endif
2663 
2664 #ifdef IPA_OFFLOAD
2665 /*
2666  * dp_peer_update_tid_stats_from_reo() - update rx pkt and byte count from reo
2667  * @soc - soc handle
2668  * @cb_ctxt - combination of peer_id and tid
2669  * @reo_status - reo status
2670  *
2671  * return: void
2672  */
2673 void dp_peer_update_tid_stats_from_reo(struct dp_soc *soc, void *cb_ctxt,
2674 				       union hal_reo_status *reo_status)
2675 {
2676 	struct dp_peer *peer = NULL;
2677 	struct dp_rx_tid *rx_tid = NULL;
2678 	unsigned long comb_peer_id_tid;
2679 	struct hal_reo_queue_status *queue_status = &reo_status->queue_status;
2680 	uint16_t tid;
2681 	uint16_t peer_id;
2682 
2683 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
2684 		dp_err("REO stats failure %d\n",
2685 		       queue_status->header.status);
2686 		return;
2687 	}
2688 	comb_peer_id_tid = (unsigned long)cb_ctxt;
2689 	tid = DP_PEER_GET_REO_STATS_TID(comb_peer_id_tid);
2690 	peer_id = DP_PEER_GET_REO_STATS_PEER_ID(comb_peer_id_tid);
2691 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_GENERIC_STATS);
2692 	if (!peer)
2693 		return;
2694 	rx_tid  = &peer->rx_tid[tid];
2695 
2696 	if (!rx_tid) {
2697 		dp_peer_unref_delete(peer, DP_MOD_ID_GENERIC_STATS);
2698 		return;
2699 	}
2700 
2701 	rx_tid->rx_msdu_cnt.bytes += queue_status->total_cnt;
2702 	rx_tid->rx_msdu_cnt.num += queue_status->msdu_frms_cnt;
2703 	dp_peer_unref_delete(peer, DP_MOD_ID_GENERIC_STATS);
2704 }
2705 
2706 qdf_export_symbol(dp_peer_update_tid_stats_from_reo);
2707 #endif
2708 
2709 void dp_rx_tid_stats_cb(struct dp_soc *soc, void *cb_ctxt,
2710 	union hal_reo_status *reo_status)
2711 {
2712 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
2713 	struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
2714 
2715 	if (queue_status->header.status == HAL_REO_CMD_DRAIN)
2716 		return;
2717 
2718 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
2719 		DP_PRINT_STATS("REO stats failure %d for TID %d\n",
2720 			       queue_status->header.status, rx_tid->tid);
2721 		return;
2722 	}
2723 
2724 	DP_PRINT_STATS("REO queue stats (TID: %d):\n"
2725 		       "ssn: %d\n"
2726 		       "curr_idx  : %d\n"
2727 		       "pn_31_0   : %08x\n"
2728 		       "pn_63_32  : %08x\n"
2729 		       "pn_95_64  : %08x\n"
2730 		       "pn_127_96 : %08x\n"
2731 		       "last_rx_enq_tstamp : %08x\n"
2732 		       "last_rx_deq_tstamp : %08x\n"
2733 		       "rx_bitmap_31_0     : %08x\n"
2734 		       "rx_bitmap_63_32    : %08x\n"
2735 		       "rx_bitmap_95_64    : %08x\n"
2736 		       "rx_bitmap_127_96   : %08x\n"
2737 		       "rx_bitmap_159_128  : %08x\n"
2738 		       "rx_bitmap_191_160  : %08x\n"
2739 		       "rx_bitmap_223_192  : %08x\n"
2740 		       "rx_bitmap_255_224  : %08x\n",
2741 		       rx_tid->tid,
2742 		       queue_status->ssn, queue_status->curr_idx,
2743 		       queue_status->pn_31_0, queue_status->pn_63_32,
2744 		       queue_status->pn_95_64, queue_status->pn_127_96,
2745 		       queue_status->last_rx_enq_tstamp,
2746 		       queue_status->last_rx_deq_tstamp,
2747 		       queue_status->rx_bitmap_31_0,
2748 		       queue_status->rx_bitmap_63_32,
2749 		       queue_status->rx_bitmap_95_64,
2750 		       queue_status->rx_bitmap_127_96,
2751 		       queue_status->rx_bitmap_159_128,
2752 		       queue_status->rx_bitmap_191_160,
2753 		       queue_status->rx_bitmap_223_192,
2754 		       queue_status->rx_bitmap_255_224);
2755 
2756 	DP_PRINT_STATS(
2757 		       "curr_mpdu_cnt      : %d\n"
2758 		       "curr_msdu_cnt      : %d\n"
2759 		       "fwd_timeout_cnt    : %d\n"
2760 		       "fwd_bar_cnt        : %d\n"
2761 		       "dup_cnt            : %d\n"
2762 		       "frms_in_order_cnt  : %d\n"
2763 		       "bar_rcvd_cnt       : %d\n"
2764 		       "mpdu_frms_cnt      : %d\n"
2765 		       "msdu_frms_cnt      : %d\n"
2766 		       "total_byte_cnt     : %d\n"
2767 		       "late_recv_mpdu_cnt : %d\n"
2768 		       "win_jump_2k        : %d\n"
2769 		       "hole_cnt           : %d\n",
2770 		       queue_status->curr_mpdu_cnt,
2771 		       queue_status->curr_msdu_cnt,
2772 		       queue_status->fwd_timeout_cnt,
2773 		       queue_status->fwd_bar_cnt,
2774 		       queue_status->dup_cnt,
2775 		       queue_status->frms_in_order_cnt,
2776 		       queue_status->bar_rcvd_cnt,
2777 		       queue_status->mpdu_frms_cnt,
2778 		       queue_status->msdu_frms_cnt,
2779 		       queue_status->total_cnt,
2780 		       queue_status->late_recv_mpdu_cnt,
2781 		       queue_status->win_jump_2k,
2782 		       queue_status->hole_cnt);
2783 
2784 	DP_PRINT_STATS("Addba Req          : %d\n"
2785 			"Addba Resp         : %d\n"
2786 			"Addba Resp success : %d\n"
2787 			"Addba Resp failed  : %d\n"
2788 			"Delba Req received : %d\n"
2789 			"Delba Tx success   : %d\n"
2790 			"Delba Tx Fail      : %d\n"
2791 			"BA window size     : %d\n"
2792 			"Pn size            : %d\n",
2793 			rx_tid->num_of_addba_req,
2794 			rx_tid->num_of_addba_resp,
2795 			rx_tid->num_addba_rsp_success,
2796 			rx_tid->num_addba_rsp_failed,
2797 			rx_tid->num_of_delba_req,
2798 			rx_tid->delba_tx_success_cnt,
2799 			rx_tid->delba_tx_fail_cnt,
2800 			rx_tid->ba_win_size,
2801 			rx_tid->pn_size);
2802 }
2803 
2804 #ifdef REO_SHARED_QREF_TABLE_EN
2805 void dp_peer_rx_reo_shared_qaddr_delete(struct dp_soc *soc,
2806 					struct dp_peer *peer)
2807 {
2808 	uint8_t tid;
2809 
2810 	if (IS_MLO_DP_LINK_PEER(peer))
2811 		return;
2812 	if (hal_reo_shared_qaddr_is_enable(soc->hal_soc)) {
2813 		for (tid = 0; tid < DP_MAX_TIDS; tid++)
2814 			hal_reo_shared_qaddr_write(soc->hal_soc,
2815 						   peer->peer_id, tid, 0);
2816 	}
2817 }
2818 #endif
2819 
2820 /*
2821  * dp_peer_find_add_id() - map peer_id with peer
2822  * @soc: soc handle
2823  * @peer_mac_addr: peer mac address
2824  * @peer_id: peer id to be mapped
2825  * @hw_peer_id: HW ast index
2826  * @vdev_id: vdev_id
2827  * @peer_type: peer type (link or MLD)
2828  *
2829  * return: peer in success
2830  *         NULL in failure
2831  */
2832 static inline struct dp_peer *dp_peer_find_add_id(struct dp_soc *soc,
2833 	uint8_t *peer_mac_addr, uint16_t peer_id, uint16_t hw_peer_id,
2834 	uint8_t vdev_id, enum cdp_peer_type peer_type)
2835 {
2836 	struct dp_peer *peer;
2837 	struct cdp_peer_info peer_info = { 0 };
2838 
2839 	QDF_ASSERT(peer_id <= soc->max_peer_id);
2840 	/* check if there's already a peer object with this MAC address */
2841 	DP_PEER_INFO_PARAMS_INIT(&peer_info, vdev_id, peer_mac_addr,
2842 				 false, peer_type);
2843 	peer = dp_peer_hash_find_wrapper(soc, &peer_info, DP_MOD_ID_CONFIG);
2844 	dp_peer_err("%pK: peer %pK ID %d vid %d mac " QDF_MAC_ADDR_FMT,
2845 		    soc, peer, peer_id, vdev_id,
2846 		    QDF_MAC_ADDR_REF(peer_mac_addr));
2847 
2848 	if (peer) {
2849 		/* peer's ref count was already incremented by
2850 		 * peer_find_hash_find
2851 		 */
2852 		dp_peer_info("%pK: ref_cnt: %d", soc,
2853 			     qdf_atomic_read(&peer->ref_cnt));
2854 
2855 		/*
2856 		 * if peer is in logical delete CP triggered delete before map
2857 		 * is received ignore this event
2858 		 */
2859 		if (dp_peer_state_cmp(peer, DP_PEER_STATE_LOGICAL_DELETE)) {
2860 			dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
2861 			dp_alert("Peer %pK["QDF_MAC_ADDR_FMT"] logical delete state vid %d",
2862 				 peer, QDF_MAC_ADDR_REF(peer_mac_addr),
2863 				 vdev_id);
2864 			return NULL;
2865 		}
2866 
2867 		if (peer->peer_id == HTT_INVALID_PEER) {
2868 			if (!IS_MLO_DP_MLD_PEER(peer))
2869 				dp_monitor_peer_tid_peer_id_update(soc, peer,
2870 								   peer_id);
2871 		} else {
2872 			dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
2873 			QDF_ASSERT(0);
2874 			return NULL;
2875 		}
2876 		dp_peer_find_id_to_obj_add(soc, peer, peer_id);
2877 		if (soc->arch_ops.dp_partner_chips_map)
2878 			soc->arch_ops.dp_partner_chips_map(soc, peer, peer_id);
2879 
2880 		dp_peer_update_state(soc, peer, DP_PEER_STATE_ACTIVE);
2881 		return peer;
2882 	}
2883 
2884 	return NULL;
2885 }
2886 
2887 #ifdef WLAN_FEATURE_11BE_MLO
2888 #ifdef DP_USE_REDUCED_PEER_ID_FIELD_WIDTH
2889 static inline uint16_t dp_gen_ml_peer_id(struct dp_soc *soc,
2890 					 uint16_t peer_id)
2891 {
2892 	return ((peer_id & soc->peer_id_mask) | (1 << soc->peer_id_shift));
2893 }
2894 #else
2895 static inline uint16_t dp_gen_ml_peer_id(struct dp_soc *soc,
2896 					 uint16_t peer_id)
2897 {
2898 	return (peer_id | (1 << HTT_RX_PEER_META_DATA_V1_ML_PEER_VALID_S));
2899 }
2900 #endif
2901 
2902 QDF_STATUS
2903 dp_rx_mlo_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
2904 			   uint8_t *peer_mac_addr,
2905 			   struct dp_mlo_flow_override_info *mlo_flow_info,
2906 			   struct dp_mlo_link_info *mlo_link_info)
2907 {
2908 	struct dp_peer *peer = NULL;
2909 	uint16_t hw_peer_id = mlo_flow_info[0].ast_idx;
2910 	uint16_t ast_hash = mlo_flow_info[0].cache_set_num;
2911 	uint8_t vdev_id = 0;
2912 	uint8_t is_wds = 0;
2913 	int i;
2914 	uint16_t ml_peer_id = dp_gen_ml_peer_id(soc, peer_id);
2915 	enum cdp_txrx_ast_entry_type type = CDP_TXRX_AST_TYPE_STATIC;
2916 	QDF_STATUS err = QDF_STATUS_SUCCESS;
2917 	struct dp_soc *primary_soc;
2918 
2919 	dp_info("mlo_peer_map_event (soc:%pK): peer_id %d ml_peer_id %d, peer_mac "QDF_MAC_ADDR_FMT,
2920 		soc, peer_id, ml_peer_id,
2921 		QDF_MAC_ADDR_REF(peer_mac_addr));
2922 
2923 	/* Get corresponding vdev ID for the peer based
2924 	 * on chip ID obtained from mlo peer_map event
2925 	 */
2926 	for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
2927 		if (mlo_link_info[i].peer_chip_id == dp_mlo_get_chip_id(soc)) {
2928 			vdev_id = mlo_link_info[i].vdev_id;
2929 			break;
2930 		}
2931 	}
2932 
2933 	peer = dp_peer_find_add_id(soc, peer_mac_addr, ml_peer_id,
2934 				   hw_peer_id, vdev_id, CDP_MLD_PEER_TYPE);
2935 
2936 	if (peer) {
2937 		if (wlan_op_mode_sta == peer->vdev->opmode &&
2938 		    qdf_mem_cmp(peer->mac_addr.raw,
2939 				peer->vdev->mld_mac_addr.raw,
2940 				QDF_MAC_ADDR_SIZE) != 0) {
2941 			dp_peer_info("%pK: STA vdev bss_peer!!!!", soc);
2942 			peer->bss_peer = 1;
2943 			if (peer->txrx_peer)
2944 				peer->txrx_peer->bss_peer = 1;
2945 		}
2946 
2947 		if (peer->vdev->opmode == wlan_op_mode_sta) {
2948 			peer->vdev->bss_ast_hash = ast_hash;
2949 			peer->vdev->bss_ast_idx = hw_peer_id;
2950 		}
2951 
2952 		/* Add ast entry incase self ast entry is
2953 		 * deleted due to DP CP sync issue
2954 		 *
2955 		 * self_ast_entry is modified in peer create
2956 		 * and peer unmap path which cannot run in
2957 		 * parllel with peer map, no lock need before
2958 		 * referring it
2959 		 */
2960 		if (!peer->self_ast_entry) {
2961 			dp_info("Add self ast from map "QDF_MAC_ADDR_FMT,
2962 				QDF_MAC_ADDR_REF(peer_mac_addr));
2963 			dp_peer_add_ast(soc, peer,
2964 					peer_mac_addr,
2965 					type, 0);
2966 		}
2967 		/* If peer setup and hence rx_tid setup got called
2968 		 * before htt peer map then Qref write to LUT did not
2969 		 * happen in rx_tid setup as peer_id was invalid.
2970 		 * So defer Qref write to peer map handler. Check if
2971 		 * rx_tid qdesc for tid 0 is already setup and perform
2972 		 * qref write to LUT for Tid 0 and 16.
2973 		 *
2974 		 * Peer map could be obtained on assoc link, hence
2975 		 * change to primary link's soc.
2976 		 */
2977 		primary_soc = peer->vdev->pdev->soc;
2978 		if (hal_reo_shared_qaddr_is_enable(primary_soc->hal_soc) &&
2979 		    peer->rx_tid[0].hw_qdesc_vaddr_unaligned) {
2980 			hal_reo_shared_qaddr_write(primary_soc->hal_soc,
2981 						   ml_peer_id,
2982 						   0,
2983 						   peer->rx_tid[0].hw_qdesc_paddr);
2984 			hal_reo_shared_qaddr_write(primary_soc->hal_soc,
2985 						   ml_peer_id,
2986 						   DP_NON_QOS_TID,
2987 						   peer->rx_tid[DP_NON_QOS_TID].hw_qdesc_paddr);
2988 		}
2989 	}
2990 
2991 	err = dp_peer_map_ast(soc, peer, peer_mac_addr, hw_peer_id,
2992 			      vdev_id, ast_hash, is_wds);
2993 
2994 	/*
2995 	 * If AST offload and host AST DB is enabled, populate AST entries on
2996 	 * host based on mlo peer map event from FW
2997 	 */
2998 	if (soc->ast_offload_support && soc->host_ast_db_enable) {
2999 		dp_peer_host_add_map_ast(soc, ml_peer_id, peer_mac_addr,
3000 					 hw_peer_id, vdev_id,
3001 					 ast_hash, is_wds);
3002 	}
3003 
3004 	return err;
3005 }
3006 #endif
3007 
3008 #ifdef DP_RX_UDP_OVER_PEER_ROAM
3009 void dp_rx_reset_roaming_peer(struct dp_soc *soc, uint8_t vdev_id,
3010 			      uint8_t *peer_mac_addr)
3011 {
3012 	struct dp_vdev *vdev = NULL;
3013 
3014 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_HTT);
3015 	if (vdev) {
3016 		if (qdf_mem_cmp(vdev->roaming_peer_mac.raw, peer_mac_addr,
3017 				QDF_MAC_ADDR_SIZE) == 0) {
3018 			vdev->roaming_peer_status =
3019 						WLAN_ROAM_PEER_AUTH_STATUS_NONE;
3020 			qdf_mem_zero(vdev->roaming_peer_mac.raw,
3021 				     QDF_MAC_ADDR_SIZE);
3022 		}
3023 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT);
3024 	}
3025 }
3026 #endif
3027 
3028 /**
3029  * dp_rx_peer_map_handler() - handle peer map event from firmware
3030  * @soc_handle - generic soc handle
3031  * @peeri_id - peer_id from firmware
3032  * @hw_peer_id - ast index for this peer
3033  * @vdev_id - vdev ID
3034  * @peer_mac_addr - mac address of the peer
3035  * @ast_hash - ast hash value
3036  * @is_wds - flag to indicate peer map event for WDS ast entry
3037  *
3038  * associate the peer_id that firmware provided with peer entry
3039  * and update the ast table in the host with the hw_peer_id.
3040  *
3041  * Return: QDF_STATUS code
3042  */
3043 
3044 QDF_STATUS
3045 dp_rx_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
3046 		       uint16_t hw_peer_id, uint8_t vdev_id,
3047 		       uint8_t *peer_mac_addr, uint16_t ast_hash,
3048 		       uint8_t is_wds)
3049 {
3050 	struct dp_peer *peer = NULL;
3051 	struct dp_vdev *vdev = NULL;
3052 	enum cdp_txrx_ast_entry_type type = CDP_TXRX_AST_TYPE_STATIC;
3053 	QDF_STATUS err = QDF_STATUS_SUCCESS;
3054 
3055 	dp_info("peer_map_event (soc:%pK): peer_id %d, hw_peer_id %d, peer_mac "QDF_MAC_ADDR_FMT", vdev_id %d",
3056 		soc, peer_id, hw_peer_id,
3057 		QDF_MAC_ADDR_REF(peer_mac_addr), vdev_id);
3058 
3059 	/* Peer map event for WDS ast entry get the peer from
3060 	 * obj map
3061 	 */
3062 	if (is_wds) {
3063 		if (!soc->ast_offload_support) {
3064 			peer = dp_peer_get_ref_by_id(soc, peer_id,
3065 						     DP_MOD_ID_HTT);
3066 
3067 			err = dp_peer_map_ast(soc, peer, peer_mac_addr,
3068 					      hw_peer_id,
3069 					      vdev_id, ast_hash, is_wds);
3070 			if (peer)
3071 				dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
3072 		}
3073 	} else {
3074 		/*
3075 		 * It's the responsibility of the CP and FW to ensure
3076 		 * that peer is created successfully. Ideally DP should
3077 		 * not hit the below condition for directly associated
3078 		 * peers.
3079 		 */
3080 		if ((!soc->ast_offload_support) && ((hw_peer_id < 0) ||
3081 		    (hw_peer_id >=
3082 		     wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx)))) {
3083 			dp_peer_err("%pK: invalid hw_peer_id: %d", soc, hw_peer_id);
3084 			qdf_assert_always(0);
3085 		}
3086 
3087 		peer = dp_peer_find_add_id(soc, peer_mac_addr, peer_id,
3088 					   hw_peer_id, vdev_id,
3089 					   CDP_LINK_PEER_TYPE);
3090 
3091 		if (peer) {
3092 			vdev = peer->vdev;
3093 			/* Only check for STA Vdev and peer is not for TDLS */
3094 			if (wlan_op_mode_sta == vdev->opmode &&
3095 			    !peer->is_tdls_peer) {
3096 				if (qdf_mem_cmp(peer->mac_addr.raw,
3097 						vdev->mac_addr.raw,
3098 						QDF_MAC_ADDR_SIZE) != 0) {
3099 					dp_info("%pK: STA vdev bss_peer", soc);
3100 					peer->bss_peer = 1;
3101 					if (peer->txrx_peer)
3102 						peer->txrx_peer->bss_peer = 1;
3103 				}
3104 
3105 				dp_info("bss ast_hash 0x%x, ast_index 0x%x",
3106 					ast_hash, hw_peer_id);
3107 				vdev->bss_ast_hash = ast_hash;
3108 				vdev->bss_ast_idx = hw_peer_id;
3109 			}
3110 
3111 			/* Add ast entry incase self ast entry is
3112 			 * deleted due to DP CP sync issue
3113 			 *
3114 			 * self_ast_entry is modified in peer create
3115 			 * and peer unmap path which cannot run in
3116 			 * parllel with peer map, no lock need before
3117 			 * referring it
3118 			 */
3119 			if (!soc->ast_offload_support &&
3120 				!peer->self_ast_entry) {
3121 				dp_info("Add self ast from map "QDF_MAC_ADDR_FMT,
3122 					QDF_MAC_ADDR_REF(peer_mac_addr));
3123 				dp_peer_add_ast(soc, peer,
3124 						peer_mac_addr,
3125 						type, 0);
3126 			}
3127 
3128 			/* If peer setup and hence rx_tid setup got called
3129 			 * before htt peer map then Qref write to LUT did
3130 			 * not happen in rx_tid setup as peer_id was invalid.
3131 			 * So defer Qref write to peer map handler. Check if
3132 			 * rx_tid qdesc for tid 0 is already setup perform qref
3133 			 * write to LUT for Tid 0 and 16.
3134 			 */
3135 			if (hal_reo_shared_qaddr_is_enable(soc->hal_soc) &&
3136 			    peer->rx_tid[0].hw_qdesc_vaddr_unaligned &&
3137 			    !IS_MLO_DP_LINK_PEER(peer)) {
3138 				hal_reo_shared_qaddr_write(soc->hal_soc,
3139 							   peer_id,
3140 							   0,
3141 							   peer->rx_tid[0].hw_qdesc_paddr);
3142 				hal_reo_shared_qaddr_write(soc->hal_soc,
3143 							   peer_id,
3144 							   DP_NON_QOS_TID,
3145 							   peer->rx_tid[DP_NON_QOS_TID].hw_qdesc_paddr);
3146 			}
3147 		}
3148 
3149 		err = dp_peer_map_ast(soc, peer, peer_mac_addr, hw_peer_id,
3150 				      vdev_id, ast_hash, is_wds);
3151 	}
3152 
3153 	dp_rx_reset_roaming_peer(soc, vdev_id, peer_mac_addr);
3154 
3155 	/*
3156 	 * If AST offload and host AST DB is enabled, populate AST entries on
3157 	 * host based on peer map event from FW
3158 	 */
3159 	if (soc->ast_offload_support && soc->host_ast_db_enable) {
3160 		dp_peer_host_add_map_ast(soc, peer_id, peer_mac_addr,
3161 					 hw_peer_id, vdev_id,
3162 					 ast_hash, is_wds);
3163 	}
3164 
3165 	return err;
3166 }
3167 
3168 /**
3169  * dp_rx_peer_unmap_handler() - handle peer unmap event from firmware
3170  * @soc_handle - generic soc handle
3171  * @peeri_id - peer_id from firmware
3172  * @vdev_id - vdev ID
3173  * @mac_addr - mac address of the peer or wds entry
3174  * @is_wds - flag to indicate peer map event for WDS ast entry
3175  * @free_wds_count - number of wds entries freed by FW with peer delete
3176  *
3177  * Return: none
3178  */
3179 void
3180 dp_rx_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id,
3181 			 uint8_t vdev_id, uint8_t *mac_addr,
3182 			 uint8_t is_wds, uint32_t free_wds_count)
3183 {
3184 	struct dp_peer *peer;
3185 	struct dp_vdev *vdev = NULL;
3186 
3187 	/*
3188 	 * If FW AST offload is enabled and host AST DB is enabled,
3189 	 * the AST entries are created during peer map from FW.
3190 	 */
3191 	if (soc->ast_offload_support && is_wds) {
3192 		if (!soc->host_ast_db_enable)
3193 			return;
3194 	}
3195 
3196 	peer = __dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_HTT);
3197 
3198 	/*
3199 	 * Currently peer IDs are assigned for vdevs as well as peers.
3200 	 * If the peer ID is for a vdev, then the peer pointer stored
3201 	 * in peer_id_to_obj_map will be NULL.
3202 	 */
3203 	if (!peer) {
3204 		dp_err("Received unmap event for invalid peer_id %u",
3205 		       peer_id);
3206 		return;
3207 	}
3208 
3209 	/* If V2 Peer map messages are enabled AST entry has to be
3210 	 * freed here
3211 	 */
3212 	if (is_wds) {
3213 		if (!dp_peer_ast_free_entry_by_mac(soc, peer, vdev_id,
3214 						   mac_addr)) {
3215 			dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
3216 			return;
3217 		}
3218 
3219 		dp_alert("AST entry not found with peer %pK peer_id %u peer_mac "QDF_MAC_ADDR_FMT" mac_addr "QDF_MAC_ADDR_FMT" vdev_id %u next_hop %u",
3220 			  peer, peer->peer_id,
3221 			  QDF_MAC_ADDR_REF(peer->mac_addr.raw),
3222 			  QDF_MAC_ADDR_REF(mac_addr), vdev_id,
3223 			  is_wds);
3224 
3225 		dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
3226 		return;
3227 	}
3228 
3229 	dp_peer_clean_wds_entries(soc, peer, free_wds_count);
3230 
3231 	dp_info("peer_unmap_event (soc:%pK) peer_id %d peer %pK",
3232 		soc, peer_id, peer);
3233 
3234 	/* Clear entries in Qref LUT */
3235 	/* TODO: Check if this is to be called from
3236 	 * dp_peer_delete for MLO case if there is race between
3237 	 * new peer id assignment and still not having received
3238 	 * peer unmap for MLD peer with same peer id.
3239 	 */
3240 	dp_peer_rx_reo_shared_qaddr_delete(soc, peer);
3241 
3242 	dp_peer_find_id_to_obj_remove(soc, peer_id);
3243 
3244 	if (soc->arch_ops.dp_partner_chips_unmap)
3245 		soc->arch_ops.dp_partner_chips_unmap(soc, peer_id);
3246 
3247 	peer->peer_id = HTT_INVALID_PEER;
3248 
3249 	/*
3250 	 *	 Reset ast flow mapping table
3251 	 */
3252 	if (!soc->ast_offload_support)
3253 		dp_peer_reset_flowq_map(peer);
3254 
3255 	if (soc->cdp_soc.ol_ops->peer_unmap_event) {
3256 		soc->cdp_soc.ol_ops->peer_unmap_event(soc->ctrl_psoc,
3257 				peer_id, vdev_id, mac_addr);
3258 	}
3259 
3260 	vdev = peer->vdev;
3261 	dp_update_vdev_stats_on_peer_unmap(vdev, peer);
3262 
3263 	dp_peer_update_state(soc, peer, DP_PEER_STATE_INACTIVE);
3264 	dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
3265 	/*
3266 	 * Remove a reference to the peer.
3267 	 * If there are no more references, delete the peer object.
3268 	 */
3269 	dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
3270 }
3271 
3272 #ifdef WLAN_FEATURE_11BE_MLO
3273 void dp_rx_mlo_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id)
3274 {
3275 	uint16_t ml_peer_id = dp_gen_ml_peer_id(soc, peer_id);
3276 	uint8_t mac_addr[QDF_MAC_ADDR_SIZE] = {0};
3277 	uint8_t vdev_id = DP_VDEV_ALL;
3278 	uint8_t is_wds = 0;
3279 
3280 	dp_info("MLO peer_unmap_event (soc:%pK) peer_id %d",
3281 		soc, peer_id);
3282 
3283 	dp_rx_peer_unmap_handler(soc, ml_peer_id, vdev_id,
3284 				 mac_addr, is_wds,
3285 				 DP_PEER_WDS_COUNT_INVALID);
3286 }
3287 #endif
3288 
3289 #ifndef AST_OFFLOAD_ENABLE
3290 void
3291 dp_peer_find_detach(struct dp_soc *soc)
3292 {
3293 	dp_soc_wds_detach(soc);
3294 	dp_peer_find_map_detach(soc);
3295 	dp_peer_find_hash_detach(soc);
3296 	dp_peer_ast_hash_detach(soc);
3297 	dp_peer_ast_table_detach(soc);
3298 	dp_peer_mec_hash_detach(soc);
3299 }
3300 #else
3301 void
3302 dp_peer_find_detach(struct dp_soc *soc)
3303 {
3304 	dp_peer_find_map_detach(soc);
3305 	dp_peer_find_hash_detach(soc);
3306 }
3307 #endif
3308 
3309 static void dp_rx_tid_update_cb(struct dp_soc *soc, void *cb_ctxt,
3310 	union hal_reo_status *reo_status)
3311 {
3312 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
3313 
3314 	if ((reo_status->rx_queue_status.header.status !=
3315 		HAL_REO_CMD_SUCCESS) &&
3316 		(reo_status->rx_queue_status.header.status !=
3317 		HAL_REO_CMD_DRAIN)) {
3318 		/* Should not happen normally. Just print error for now */
3319 		dp_peer_err("%pK: Rx tid HW desc update failed(%d): tid %d",
3320 			    soc, reo_status->rx_queue_status.header.status,
3321 			    rx_tid->tid);
3322 	}
3323 }
3324 
3325 static bool dp_get_peer_vdev_roaming_in_progress(struct dp_peer *peer)
3326 {
3327 	struct ol_if_ops *ol_ops = NULL;
3328 	bool is_roaming = false;
3329 	uint8_t vdev_id = -1;
3330 	struct cdp_soc_t *soc;
3331 
3332 	if (!peer) {
3333 		dp_peer_info("Peer is NULL. No roaming possible");
3334 		return false;
3335 	}
3336 
3337 	soc = dp_soc_to_cdp_soc_t(peer->vdev->pdev->soc);
3338 	ol_ops = peer->vdev->pdev->soc->cdp_soc.ol_ops;
3339 
3340 	if (ol_ops && ol_ops->is_roam_inprogress) {
3341 		dp_get_vdevid(soc, peer->mac_addr.raw, &vdev_id);
3342 		is_roaming = ol_ops->is_roam_inprogress(vdev_id);
3343 	}
3344 
3345 	dp_peer_info("peer: " QDF_MAC_ADDR_FMT ", vdev_id: %d, is_roaming: %d",
3346 		     QDF_MAC_ADDR_REF(peer->mac_addr.raw), vdev_id, is_roaming);
3347 
3348 	return is_roaming;
3349 }
3350 
3351 #ifdef WLAN_FEATURE_11BE_MLO
3352 /**
3353  * dp_rx_tid_setup_allow() - check if rx_tid and reo queue desc
3354 			     setup is necessary
3355  * @peer: DP peer handle
3356  *
3357  * Return: true - allow, false - disallow
3358  */
3359 static inline
3360 bool dp_rx_tid_setup_allow(struct dp_peer *peer)
3361 {
3362 	if (IS_MLO_DP_LINK_PEER(peer) && !peer->first_link)
3363 		return false;
3364 
3365 	return true;
3366 }
3367 
3368 /**
3369  * dp_rx_tid_update_allow() - check if rx_tid update needed
3370  * @peer: DP peer handle
3371  *
3372  * Return: true - allow, false - disallow
3373  */
3374 static inline
3375 bool dp_rx_tid_update_allow(struct dp_peer *peer)
3376 {
3377 	/* not as expected for MLO connection link peer */
3378 	if (IS_MLO_DP_LINK_PEER(peer)) {
3379 		QDF_BUG(0);
3380 		return false;
3381 	}
3382 
3383 	return true;
3384 }
3385 #else
3386 static inline
3387 bool dp_rx_tid_setup_allow(struct dp_peer *peer)
3388 {
3389 	return true;
3390 }
3391 
3392 static inline
3393 bool dp_rx_tid_update_allow(struct dp_peer *peer)
3394 {
3395 	return true;
3396 }
3397 #endif
3398 
3399 QDF_STATUS dp_rx_tid_update_wifi3(struct dp_peer *peer, int tid, uint32_t
3400 					 ba_window_size, uint32_t start_seq,
3401 					 bool bar_update)
3402 {
3403 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
3404 	struct dp_soc *soc = peer->vdev->pdev->soc;
3405 	struct hal_reo_cmd_params params;
3406 
3407 	if (!dp_rx_tid_update_allow(peer)) {
3408 		dp_peer_err("skip tid update for peer:" QDF_MAC_ADDR_FMT,
3409 			    QDF_MAC_ADDR_REF(peer->mac_addr.raw));
3410 		return QDF_STATUS_E_FAILURE;
3411 	}
3412 
3413 	qdf_mem_zero(&params, sizeof(params));
3414 
3415 	params.std.need_status = 1;
3416 	params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
3417 	params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
3418 	params.u.upd_queue_params.update_ba_window_size = 1;
3419 	params.u.upd_queue_params.ba_window_size = ba_window_size;
3420 
3421 	if (start_seq < IEEE80211_SEQ_MAX) {
3422 		params.u.upd_queue_params.update_ssn = 1;
3423 		params.u.upd_queue_params.ssn = start_seq;
3424 	} else {
3425 	    dp_set_ssn_valid_flag(&params, 0);
3426 	}
3427 
3428 	if (dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
3429 			    dp_rx_tid_update_cb, rx_tid)) {
3430 		dp_err_log("failed to send reo cmd CMD_UPDATE_RX_REO_QUEUE");
3431 		DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
3432 	}
3433 
3434 	rx_tid->ba_win_size = ba_window_size;
3435 
3436 	if (dp_get_peer_vdev_roaming_in_progress(peer))
3437 		return QDF_STATUS_E_PERM;
3438 
3439 	if (!bar_update)
3440 		dp_peer_rx_reorder_queue_setup(soc, peer,
3441 					       tid, ba_window_size);
3442 
3443 	return QDF_STATUS_SUCCESS;
3444 }
3445 
3446 #ifdef WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY
3447 /*
3448  * dp_reo_desc_defer_free_enqueue() - enqueue REO QDESC to be freed into
3449  *                                    the deferred list
3450  * @soc: Datapath soc handle
3451  * @free_desc: REO DESC reference that needs to be freed
3452  *
3453  * Return: true if enqueued, else false
3454  */
3455 static bool dp_reo_desc_defer_free_enqueue(struct dp_soc *soc,
3456 					   struct reo_desc_list_node *freedesc)
3457 {
3458 	struct reo_desc_deferred_freelist_node *desc;
3459 
3460 	if (!qdf_atomic_read(&soc->cmn_init_done))
3461 		return false;
3462 
3463 	desc = qdf_mem_malloc(sizeof(*desc));
3464 	if (!desc)
3465 		return false;
3466 
3467 	desc->hw_qdesc_paddr = freedesc->rx_tid.hw_qdesc_paddr;
3468 	desc->hw_qdesc_alloc_size = freedesc->rx_tid.hw_qdesc_alloc_size;
3469 	desc->hw_qdesc_vaddr_unaligned =
3470 			freedesc->rx_tid.hw_qdesc_vaddr_unaligned;
3471 	desc->free_ts = qdf_get_system_timestamp();
3472 	DP_RX_REO_QDESC_DEFERRED_GET_MAC(desc, freedesc);
3473 
3474 	qdf_spin_lock_bh(&soc->reo_desc_deferred_freelist_lock);
3475 	if (!soc->reo_desc_deferred_freelist_init) {
3476 		qdf_mem_free(desc);
3477 		qdf_spin_unlock_bh(&soc->reo_desc_deferred_freelist_lock);
3478 		return false;
3479 	}
3480 	qdf_list_insert_back(&soc->reo_desc_deferred_freelist,
3481 			     (qdf_list_node_t *)desc);
3482 	qdf_spin_unlock_bh(&soc->reo_desc_deferred_freelist_lock);
3483 
3484 	return true;
3485 }
3486 
3487 /*
3488  * dp_reo_desc_defer_free() - free the REO QDESC in the deferred list
3489  *                            based on time threshold
3490  * @soc: Datapath soc handle
3491  * @free_desc: REO DESC reference that needs to be freed
3492  *
3493  * Return: true if enqueued, else false
3494  */
3495 static void dp_reo_desc_defer_free(struct dp_soc *soc)
3496 {
3497 	struct reo_desc_deferred_freelist_node *desc;
3498 	unsigned long curr_ts = qdf_get_system_timestamp();
3499 
3500 	qdf_spin_lock_bh(&soc->reo_desc_deferred_freelist_lock);
3501 
3502 	while ((qdf_list_peek_front(&soc->reo_desc_deferred_freelist,
3503 	       (qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) &&
3504 	       (curr_ts > (desc->free_ts + REO_DESC_DEFERRED_FREE_MS))) {
3505 		qdf_list_remove_front(&soc->reo_desc_deferred_freelist,
3506 				      (qdf_list_node_t **)&desc);
3507 
3508 		DP_RX_REO_QDESC_DEFERRED_FREE_EVT(desc);
3509 
3510 		qdf_mem_unmap_nbytes_single(soc->osdev,
3511 					    desc->hw_qdesc_paddr,
3512 					    QDF_DMA_BIDIRECTIONAL,
3513 					    desc->hw_qdesc_alloc_size);
3514 		qdf_mem_free(desc->hw_qdesc_vaddr_unaligned);
3515 		qdf_mem_free(desc);
3516 
3517 		curr_ts = qdf_get_system_timestamp();
3518 	}
3519 
3520 	qdf_spin_unlock_bh(&soc->reo_desc_deferred_freelist_lock);
3521 }
3522 #else
3523 static inline bool
3524 dp_reo_desc_defer_free_enqueue(struct dp_soc *soc,
3525 			       struct reo_desc_list_node *freedesc)
3526 {
3527 	return false;
3528 }
3529 
3530 static void dp_reo_desc_defer_free(struct dp_soc *soc)
3531 {
3532 }
3533 #endif /* !WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY */
3534 
3535 /*
3536  * dp_reo_desc_free() - Callback free reo descriptor memory after
3537  * HW cache flush
3538  *
3539  * @soc: DP SOC handle
3540  * @cb_ctxt: Callback context
3541  * @reo_status: REO command status
3542  */
3543 static void dp_reo_desc_free(struct dp_soc *soc, void *cb_ctxt,
3544 	union hal_reo_status *reo_status)
3545 {
3546 	struct reo_desc_list_node *freedesc =
3547 		(struct reo_desc_list_node *)cb_ctxt;
3548 	struct dp_rx_tid *rx_tid = &freedesc->rx_tid;
3549 	unsigned long curr_ts = qdf_get_system_timestamp();
3550 
3551 	if ((reo_status->fl_cache_status.header.status !=
3552 		HAL_REO_CMD_SUCCESS) &&
3553 		(reo_status->fl_cache_status.header.status !=
3554 		HAL_REO_CMD_DRAIN)) {
3555 		dp_peer_err("%pK: Rx tid HW desc flush failed(%d): tid %d",
3556 			    soc, reo_status->rx_queue_status.header.status,
3557 			    freedesc->rx_tid.tid);
3558 	}
3559 	dp_peer_info("%pK: %lu hw_qdesc_paddr: %pK, tid:%d", soc,
3560 		     curr_ts, (void *)(rx_tid->hw_qdesc_paddr),
3561 		     rx_tid->tid);
3562 
3563 	/* REO desc is enqueued to be freed at a later point
3564 	 * in time, just free the freedesc alone and return
3565 	 */
3566 	if (dp_reo_desc_defer_free_enqueue(soc, freedesc))
3567 		goto out;
3568 
3569 	DP_RX_REO_QDESC_FREE_EVT(freedesc);
3570 
3571 	qdf_mem_unmap_nbytes_single(soc->osdev,
3572 		rx_tid->hw_qdesc_paddr,
3573 		QDF_DMA_BIDIRECTIONAL,
3574 		rx_tid->hw_qdesc_alloc_size);
3575 	qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
3576 out:
3577 	qdf_mem_free(freedesc);
3578 }
3579 
3580 #if defined(CONFIG_WIFI_EMULATION_WIFI_3_0) && defined(BUILD_X86)
3581 /* Hawkeye emulation requires bus address to be >= 0x50000000 */
3582 static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
3583 {
3584 	if (dma_addr < 0x50000000)
3585 		return QDF_STATUS_E_FAILURE;
3586 	else
3587 		return QDF_STATUS_SUCCESS;
3588 }
3589 #else
3590 static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
3591 {
3592 	return QDF_STATUS_SUCCESS;
3593 }
3594 #endif
3595 
3596 /*
3597  * dp_rx_tid_setup_wifi3() – Setup receive TID state
3598  * @peer: Datapath peer handle
3599  * @tid: TID
3600  * @ba_window_size: BlockAck window size
3601  * @start_seq: Starting sequence number
3602  *
3603  * Return: QDF_STATUS code
3604  */
3605 QDF_STATUS dp_rx_tid_setup_wifi3(struct dp_peer *peer, int tid,
3606 				 uint32_t ba_window_size, uint32_t start_seq)
3607 {
3608 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
3609 	struct dp_vdev *vdev = peer->vdev;
3610 	struct dp_soc *soc = vdev->pdev->soc;
3611 	uint32_t hw_qdesc_size;
3612 	uint32_t hw_qdesc_align;
3613 	int hal_pn_type;
3614 	void *hw_qdesc_vaddr;
3615 	uint32_t alloc_tries = 0;
3616 	QDF_STATUS status = QDF_STATUS_SUCCESS;
3617 	struct dp_txrx_peer *txrx_peer;
3618 
3619 	if (!qdf_atomic_read(&peer->is_default_route_set))
3620 		return QDF_STATUS_E_FAILURE;
3621 
3622 	if (!dp_rx_tid_setup_allow(peer)) {
3623 		dp_peer_info("skip rx tid setup for peer" QDF_MAC_ADDR_FMT,
3624 			     QDF_MAC_ADDR_REF(peer->mac_addr.raw));
3625 		goto send_wmi_reo_cmd;
3626 	}
3627 
3628 	rx_tid->ba_win_size = ba_window_size;
3629 	if (rx_tid->hw_qdesc_vaddr_unaligned)
3630 		return dp_rx_tid_update_wifi3(peer, tid, ba_window_size,
3631 			start_seq, false);
3632 	rx_tid->delba_tx_status = 0;
3633 	rx_tid->ppdu_id_2k = 0;
3634 	rx_tid->num_of_addba_req = 0;
3635 	rx_tid->num_of_delba_req = 0;
3636 	rx_tid->num_of_addba_resp = 0;
3637 	rx_tid->num_addba_rsp_failed = 0;
3638 	rx_tid->num_addba_rsp_success = 0;
3639 	rx_tid->delba_tx_success_cnt = 0;
3640 	rx_tid->delba_tx_fail_cnt = 0;
3641 	rx_tid->statuscode = 0;
3642 
3643 	/* TODO: Allocating HW queue descriptors based on max BA window size
3644 	 * for all QOS TIDs so that same descriptor can be used later when
3645 	 * ADDBA request is received. This should be changed to allocate HW
3646 	 * queue descriptors based on BA window size being negotiated (0 for
3647 	 * non BA cases), and reallocate when BA window size changes and also
3648 	 * send WMI message to FW to change the REO queue descriptor in Rx
3649 	 * peer entry as part of dp_rx_tid_update.
3650 	 */
3651 	hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc,
3652 					       ba_window_size, tid);
3653 
3654 	hw_qdesc_align = hal_get_reo_qdesc_align(soc->hal_soc);
3655 	/* To avoid unnecessary extra allocation for alignment, try allocating
3656 	 * exact size and see if we already have aligned address.
3657 	 */
3658 	rx_tid->hw_qdesc_alloc_size = hw_qdesc_size;
3659 
3660 try_desc_alloc:
3661 	rx_tid->hw_qdesc_vaddr_unaligned =
3662 		qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size);
3663 
3664 	if (!rx_tid->hw_qdesc_vaddr_unaligned) {
3665 		dp_peer_err("%pK: Rx tid HW desc alloc failed: tid %d",
3666 			    soc, tid);
3667 		return QDF_STATUS_E_NOMEM;
3668 	}
3669 
3670 	if ((unsigned long)(rx_tid->hw_qdesc_vaddr_unaligned) %
3671 		hw_qdesc_align) {
3672 		/* Address allocated above is not aligned. Allocate extra
3673 		 * memory for alignment
3674 		 */
3675 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
3676 		rx_tid->hw_qdesc_vaddr_unaligned =
3677 			qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size +
3678 					hw_qdesc_align - 1);
3679 
3680 		if (!rx_tid->hw_qdesc_vaddr_unaligned) {
3681 			dp_peer_err("%pK: Rx tid HW desc alloc failed: tid %d",
3682 				    soc, tid);
3683 			return QDF_STATUS_E_NOMEM;
3684 		}
3685 
3686 		hw_qdesc_vaddr = (void *)qdf_align((unsigned long)
3687 			rx_tid->hw_qdesc_vaddr_unaligned,
3688 			hw_qdesc_align);
3689 
3690 		dp_peer_debug("%pK: Total Size %d Aligned Addr %pK",
3691 			      soc, rx_tid->hw_qdesc_alloc_size,
3692 			      hw_qdesc_vaddr);
3693 
3694 	} else {
3695 		hw_qdesc_vaddr = rx_tid->hw_qdesc_vaddr_unaligned;
3696 	}
3697 	rx_tid->hw_qdesc_vaddr_aligned = hw_qdesc_vaddr;
3698 
3699 	txrx_peer = dp_get_txrx_peer(peer);
3700 
3701 	/* TODO: Ensure that sec_type is set before ADDBA is received.
3702 	 * Currently this is set based on htt indication
3703 	 * HTT_T2H_MSG_TYPE_SEC_IND from target
3704 	 */
3705 	switch (txrx_peer->security[dp_sec_ucast].sec_type) {
3706 	case cdp_sec_type_tkip_nomic:
3707 	case cdp_sec_type_aes_ccmp:
3708 	case cdp_sec_type_aes_ccmp_256:
3709 	case cdp_sec_type_aes_gcmp:
3710 	case cdp_sec_type_aes_gcmp_256:
3711 		hal_pn_type = HAL_PN_WPA;
3712 		break;
3713 	case cdp_sec_type_wapi:
3714 		if (vdev->opmode == wlan_op_mode_ap)
3715 			hal_pn_type = HAL_PN_WAPI_EVEN;
3716 		else
3717 			hal_pn_type = HAL_PN_WAPI_UNEVEN;
3718 		break;
3719 	default:
3720 		hal_pn_type = HAL_PN_NONE;
3721 		break;
3722 	}
3723 
3724 	hal_reo_qdesc_setup(soc->hal_soc, tid, ba_window_size, start_seq,
3725 		hw_qdesc_vaddr, rx_tid->hw_qdesc_paddr, hal_pn_type,
3726 		vdev->vdev_stats_id);
3727 
3728 	qdf_mem_map_nbytes_single(soc->osdev, hw_qdesc_vaddr,
3729 		QDF_DMA_BIDIRECTIONAL, rx_tid->hw_qdesc_alloc_size,
3730 		&(rx_tid->hw_qdesc_paddr));
3731 
3732 	if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) !=
3733 			QDF_STATUS_SUCCESS) {
3734 		if (alloc_tries++ < 10) {
3735 			qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
3736 			rx_tid->hw_qdesc_vaddr_unaligned = NULL;
3737 			goto try_desc_alloc;
3738 		} else {
3739 			dp_peer_err("%pK: Rx tid HW desc alloc failed (lowmem): tid %d",
3740 				    soc, tid);
3741 			status = QDF_STATUS_E_NOMEM;
3742 			goto error;
3743 		}
3744 	}
3745 
3746 send_wmi_reo_cmd:
3747 	if (dp_get_peer_vdev_roaming_in_progress(peer)) {
3748 		status = QDF_STATUS_E_PERM;
3749 		goto error;
3750 	}
3751 
3752 	status = dp_peer_rx_reorder_queue_setup(soc, peer,
3753 						tid, ba_window_size);
3754 	if (QDF_IS_STATUS_SUCCESS(status))
3755 		return status;
3756 
3757 error:
3758 	if (rx_tid->hw_qdesc_vaddr_unaligned) {
3759 		if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) ==
3760 		    QDF_STATUS_SUCCESS)
3761 			qdf_mem_unmap_nbytes_single(
3762 				soc->osdev,
3763 				rx_tid->hw_qdesc_paddr,
3764 				QDF_DMA_BIDIRECTIONAL,
3765 				rx_tid->hw_qdesc_alloc_size);
3766 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
3767 		rx_tid->hw_qdesc_vaddr_unaligned = NULL;
3768 		rx_tid->hw_qdesc_paddr = 0;
3769 	}
3770 	return status;
3771 }
3772 
3773 #ifdef DP_UMAC_HW_RESET_SUPPORT
3774 static
3775 void dp_peer_rst_tids(struct dp_soc *soc, struct dp_peer *peer, void *arg)
3776 {
3777 	int tid;
3778 
3779 	for (tid = 0; tid < (DP_MAX_TIDS - 1); tid++) {
3780 		struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
3781 		void *vaddr = rx_tid->hw_qdesc_vaddr_aligned;
3782 
3783 		if (vaddr)
3784 			dp_reset_rx_reo_tid_queue(soc, vaddr,
3785 						  rx_tid->hw_qdesc_alloc_size);
3786 	}
3787 }
3788 
3789 void dp_reset_tid_q_setup(struct dp_soc *soc)
3790 {
3791 	dp_soc_iterate_peer(soc, dp_peer_rst_tids, NULL, DP_MOD_ID_UMAC_RESET);
3792 }
3793 #endif
3794 #ifdef REO_DESC_DEFER_FREE
3795 /*
3796  * dp_reo_desc_clean_up() - If cmd to flush base desc fails add
3797  * desc back to freelist and defer the deletion
3798  *
3799  * @soc: DP SOC handle
3800  * @desc: Base descriptor to be freed
3801  * @reo_status: REO command status
3802  */
3803 static void dp_reo_desc_clean_up(struct dp_soc *soc,
3804 				 struct reo_desc_list_node *desc,
3805 				 union hal_reo_status *reo_status)
3806 {
3807 	desc->free_ts = qdf_get_system_timestamp();
3808 	DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
3809 	qdf_list_insert_back(&soc->reo_desc_freelist,
3810 			     (qdf_list_node_t *)desc);
3811 }
3812 
3813 /*
3814  * dp_reo_limit_clean_batch_sz() - Limit number REO CMD queued to cmd
3815  * ring in avoid of REO hang
3816  *
3817  * @list_size: REO desc list size to be cleaned
3818  */
3819 static inline void dp_reo_limit_clean_batch_sz(uint32_t *list_size)
3820 {
3821 	unsigned long curr_ts = qdf_get_system_timestamp();
3822 
3823 	if ((*list_size) > REO_DESC_FREELIST_SIZE) {
3824 		dp_err_log("%lu:freedesc number %d in freelist",
3825 			   curr_ts, *list_size);
3826 		/* limit the batch queue size */
3827 		*list_size = REO_DESC_FREELIST_SIZE;
3828 	}
3829 }
3830 #else
3831 /*
3832  * dp_reo_desc_clean_up() - If send cmd to REO inorder to flush
3833  * cache fails free the base REO desc anyway
3834  *
3835  * @soc: DP SOC handle
3836  * @desc: Base descriptor to be freed
3837  * @reo_status: REO command status
3838  */
3839 static void dp_reo_desc_clean_up(struct dp_soc *soc,
3840 				 struct reo_desc_list_node *desc,
3841 				 union hal_reo_status *reo_status)
3842 {
3843 	if (reo_status) {
3844 		qdf_mem_zero(reo_status, sizeof(*reo_status));
3845 		reo_status->fl_cache_status.header.status = 0;
3846 		dp_reo_desc_free(soc, (void *)desc, reo_status);
3847 	}
3848 }
3849 
3850 /*
3851  * dp_reo_limit_clean_batch_sz() - Limit number REO CMD queued to cmd
3852  * ring in avoid of REO hang
3853  *
3854  * @list_size: REO desc list size to be cleaned
3855  */
3856 static inline void dp_reo_limit_clean_batch_sz(uint32_t *list_size)
3857 {
3858 }
3859 #endif
3860 
3861 /*
3862  * dp_resend_update_reo_cmd() - Resend the UPDATE_REO_QUEUE
3863  * cmd and re-insert desc into free list if send fails.
3864  *
3865  * @soc: DP SOC handle
3866  * @desc: desc with resend update cmd flag set
3867  * @rx_tid: Desc RX tid associated with update cmd for resetting
3868  * valid field to 0 in h/w
3869  *
3870  * Return: QDF status
3871  */
3872 static QDF_STATUS
3873 dp_resend_update_reo_cmd(struct dp_soc *soc,
3874 			 struct reo_desc_list_node *desc,
3875 			 struct dp_rx_tid *rx_tid)
3876 {
3877 	struct hal_reo_cmd_params params;
3878 
3879 	qdf_mem_zero(&params, sizeof(params));
3880 	params.std.need_status = 1;
3881 	params.std.addr_lo =
3882 		rx_tid->hw_qdesc_paddr & 0xffffffff;
3883 	params.std.addr_hi =
3884 		(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
3885 	params.u.upd_queue_params.update_vld = 1;
3886 	params.u.upd_queue_params.vld = 0;
3887 	desc->resend_update_reo_cmd = false;
3888 	/*
3889 	 * If the cmd send fails then set resend_update_reo_cmd flag
3890 	 * and insert the desc at the end of the free list to retry.
3891 	 */
3892 	if (dp_reo_send_cmd(soc,
3893 			    CMD_UPDATE_RX_REO_QUEUE,
3894 			    &params,
3895 			    dp_rx_tid_delete_cb,
3896 			    (void *)desc)
3897 	    != QDF_STATUS_SUCCESS) {
3898 		desc->resend_update_reo_cmd = true;
3899 		desc->free_ts = qdf_get_system_timestamp();
3900 		qdf_list_insert_back(&soc->reo_desc_freelist,
3901 				     (qdf_list_node_t *)desc);
3902 		dp_err_log("failed to send reo cmd CMD_UPDATE_RX_REO_QUEUE");
3903 		DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
3904 		return QDF_STATUS_E_FAILURE;
3905 	}
3906 
3907 	return QDF_STATUS_SUCCESS;
3908 }
3909 
3910 /*
3911  * dp_rx_tid_delete_cb() - Callback to flush reo descriptor HW cache
3912  * after deleting the entries (ie., setting valid=0)
3913  *
3914  * @soc: DP SOC handle
3915  * @cb_ctxt: Callback context
3916  * @reo_status: REO command status
3917  */
3918 void dp_rx_tid_delete_cb(struct dp_soc *soc, void *cb_ctxt,
3919 			 union hal_reo_status *reo_status)
3920 {
3921 	struct reo_desc_list_node *freedesc =
3922 		(struct reo_desc_list_node *)cb_ctxt;
3923 	uint32_t list_size;
3924 	struct reo_desc_list_node *desc;
3925 	unsigned long curr_ts = qdf_get_system_timestamp();
3926 	uint32_t desc_size, tot_desc_size;
3927 	struct hal_reo_cmd_params params;
3928 	bool flush_failure = false;
3929 
3930 	DP_RX_REO_QDESC_UPDATE_EVT(freedesc);
3931 
3932 	if (reo_status->rx_queue_status.header.status == HAL_REO_CMD_DRAIN) {
3933 		qdf_mem_zero(reo_status, sizeof(*reo_status));
3934 		reo_status->fl_cache_status.header.status = HAL_REO_CMD_DRAIN;
3935 		dp_reo_desc_free(soc, (void *)freedesc, reo_status);
3936 		DP_STATS_INC(soc, rx.err.reo_cmd_send_drain, 1);
3937 		return;
3938 	} else if (reo_status->rx_queue_status.header.status !=
3939 		HAL_REO_CMD_SUCCESS) {
3940 		/* Should not happen normally. Just print error for now */
3941 		dp_info_rl("Rx tid HW desc deletion failed(%d): tid %d",
3942 			   reo_status->rx_queue_status.header.status,
3943 			   freedesc->rx_tid.tid);
3944 	}
3945 
3946 	dp_peer_info("%pK: rx_tid: %d status: %d",
3947 		     soc, freedesc->rx_tid.tid,
3948 		     reo_status->rx_queue_status.header.status);
3949 
3950 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
3951 	freedesc->free_ts = curr_ts;
3952 	qdf_list_insert_back_size(&soc->reo_desc_freelist,
3953 		(qdf_list_node_t *)freedesc, &list_size);
3954 
3955 	/* MCL path add the desc back to reo_desc_freelist when REO FLUSH
3956 	 * failed. it may cause the number of REO queue pending  in free
3957 	 * list is even larger than REO_CMD_RING max size and lead REO CMD
3958 	 * flood then cause REO HW in an unexpected condition. So it's
3959 	 * needed to limit the number REO cmds in a batch operation.
3960 	 */
3961 	dp_reo_limit_clean_batch_sz(&list_size);
3962 
3963 	while ((qdf_list_peek_front(&soc->reo_desc_freelist,
3964 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) &&
3965 		((list_size >= REO_DESC_FREELIST_SIZE) ||
3966 		(curr_ts > (desc->free_ts + REO_DESC_FREE_DEFER_MS)) ||
3967 		(desc->resend_update_reo_cmd && list_size))) {
3968 		struct dp_rx_tid *rx_tid;
3969 
3970 		qdf_list_remove_front(&soc->reo_desc_freelist,
3971 				(qdf_list_node_t **)&desc);
3972 		list_size--;
3973 		rx_tid = &desc->rx_tid;
3974 
3975 		/* First process descs with resend_update_reo_cmd set */
3976 		if (desc->resend_update_reo_cmd) {
3977 			if (dp_resend_update_reo_cmd(soc, desc, rx_tid) !=
3978 			    QDF_STATUS_SUCCESS)
3979 				break;
3980 			else
3981 				continue;
3982 		}
3983 
3984 		/* Flush and invalidate REO descriptor from HW cache: Base and
3985 		 * extension descriptors should be flushed separately */
3986 		if (desc->pending_ext_desc_size)
3987 			tot_desc_size = desc->pending_ext_desc_size;
3988 		else
3989 			tot_desc_size = rx_tid->hw_qdesc_alloc_size;
3990 		/* Get base descriptor size by passing non-qos TID */
3991 		desc_size = hal_get_reo_qdesc_size(soc->hal_soc, 0,
3992 						   DP_NON_QOS_TID);
3993 
3994 		/* Flush reo extension descriptors */
3995 		while ((tot_desc_size -= desc_size) > 0) {
3996 			qdf_mem_zero(&params, sizeof(params));
3997 			params.std.addr_lo =
3998 				((uint64_t)(rx_tid->hw_qdesc_paddr) +
3999 				tot_desc_size) & 0xffffffff;
4000 			params.std.addr_hi =
4001 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
4002 
4003 			if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc,
4004 							CMD_FLUSH_CACHE,
4005 							&params,
4006 							NULL,
4007 							NULL)) {
4008 				dp_info_rl("fail to send CMD_CACHE_FLUSH:"
4009 					   "tid %d desc %pK", rx_tid->tid,
4010 					   (void *)(rx_tid->hw_qdesc_paddr));
4011 				desc->pending_ext_desc_size = tot_desc_size +
4012 								      desc_size;
4013 				dp_reo_desc_clean_up(soc, desc, reo_status);
4014 				flush_failure = true;
4015 				break;
4016 			}
4017 		}
4018 
4019 		if (flush_failure)
4020 			break;
4021 		else
4022 			desc->pending_ext_desc_size = desc_size;
4023 
4024 		/* Flush base descriptor */
4025 		qdf_mem_zero(&params, sizeof(params));
4026 		params.std.need_status = 1;
4027 		params.std.addr_lo =
4028 			(uint64_t)(rx_tid->hw_qdesc_paddr) & 0xffffffff;
4029 		params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
4030 
4031 		if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc,
4032 							  CMD_FLUSH_CACHE,
4033 							  &params,
4034 							  dp_reo_desc_free,
4035 							  (void *)desc)) {
4036 			union hal_reo_status reo_status;
4037 			/*
4038 			 * If dp_reo_send_cmd return failure, related TID queue desc
4039 			 * should be unmapped. Also locally reo_desc, together with
4040 			 * TID queue desc also need to be freed accordingly.
4041 			 *
4042 			 * Here invoke desc_free function directly to do clean up.
4043 			 *
4044 			 * In case of MCL path add the desc back to the free
4045 			 * desc list and defer deletion.
4046 			 */
4047 			dp_info_rl("fail to send REO cmd to flush cache: tid %d",
4048 				   rx_tid->tid);
4049 			dp_reo_desc_clean_up(soc, desc, &reo_status);
4050 			DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
4051 			break;
4052 		}
4053 	}
4054 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
4055 
4056 	dp_reo_desc_defer_free(soc);
4057 }
4058 
4059 /*
4060  * dp_rx_tid_delete_wifi3() – Delete receive TID queue
4061  * @peer: Datapath peer handle
4062  * @tid: TID
4063  *
4064  * Return: 0 on success, error code on failure
4065  */
4066 static int dp_rx_tid_delete_wifi3(struct dp_peer *peer, int tid)
4067 {
4068 	struct dp_rx_tid *rx_tid = &(peer->rx_tid[tid]);
4069 	struct dp_soc *soc = peer->vdev->pdev->soc;
4070 	struct hal_reo_cmd_params params;
4071 	struct reo_desc_list_node *freedesc =
4072 		qdf_mem_malloc(sizeof(*freedesc));
4073 
4074 	if (!freedesc) {
4075 		dp_peer_err("%pK: malloc failed for freedesc: tid %d",
4076 			    soc, tid);
4077 		qdf_assert(0);
4078 		return -ENOMEM;
4079 	}
4080 
4081 	freedesc->rx_tid = *rx_tid;
4082 	freedesc->resend_update_reo_cmd = false;
4083 
4084 	qdf_mem_zero(&params, sizeof(params));
4085 
4086 	DP_RX_REO_QDESC_GET_MAC(freedesc, peer);
4087 
4088 	params.std.need_status = 1;
4089 	params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
4090 	params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
4091 	params.u.upd_queue_params.update_vld = 1;
4092 	params.u.upd_queue_params.vld = 0;
4093 
4094 	if (dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
4095 			    dp_rx_tid_delete_cb, (void *)freedesc)
4096 		!= QDF_STATUS_SUCCESS) {
4097 		/* Defer the clean up to the call back context */
4098 		qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
4099 		freedesc->free_ts = qdf_get_system_timestamp();
4100 		freedesc->resend_update_reo_cmd = true;
4101 		qdf_list_insert_front(&soc->reo_desc_freelist,
4102 				      (qdf_list_node_t *)freedesc);
4103 		DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
4104 		qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
4105 		dp_info("Failed to send CMD_UPDATE_RX_REO_QUEUE");
4106 	}
4107 
4108 	rx_tid->hw_qdesc_vaddr_unaligned = NULL;
4109 	rx_tid->hw_qdesc_alloc_size = 0;
4110 	rx_tid->hw_qdesc_paddr = 0;
4111 
4112 	return 0;
4113 }
4114 
4115 #ifdef DP_LFR
4116 static void dp_peer_setup_remaining_tids(struct dp_peer *peer)
4117 {
4118 	int tid;
4119 
4120 	for (tid = 1; tid < DP_MAX_TIDS-1; tid++) {
4121 		dp_rx_tid_setup_wifi3(peer, tid, 1, 0);
4122 		dp_peer_debug("Setting up TID %d for peer %pK peer->local_id %d",
4123 			      tid, peer, peer->local_id);
4124 	}
4125 }
4126 #else
4127 static void dp_peer_setup_remaining_tids(struct dp_peer *peer) {};
4128 #endif
4129 
4130 #ifdef WLAN_FEATURE_11BE_MLO
4131 /**
4132  * dp_peer_rx_tids_init() - initialize each tids in peer
4133  * @peer: peer pointer
4134  *
4135  * Return: None
4136  */
4137 static void dp_peer_rx_tids_init(struct dp_peer *peer)
4138 {
4139 	int tid;
4140 	struct dp_rx_tid *rx_tid;
4141 	struct dp_rx_tid_defrag *rx_tid_defrag;
4142 
4143 	if (!IS_MLO_DP_LINK_PEER(peer)) {
4144 		for (tid = 0; tid < DP_MAX_TIDS; tid++) {
4145 			rx_tid_defrag = &peer->txrx_peer->rx_tid[tid];
4146 
4147 			rx_tid_defrag->array = &rx_tid_defrag->base;
4148 			rx_tid_defrag->defrag_timeout_ms = 0;
4149 			rx_tid_defrag->defrag_waitlist_elem.tqe_next = NULL;
4150 			rx_tid_defrag->defrag_waitlist_elem.tqe_prev = NULL;
4151 			rx_tid_defrag->base.head = NULL;
4152 			rx_tid_defrag->base.tail = NULL;
4153 			rx_tid_defrag->tid = tid;
4154 			rx_tid_defrag->defrag_peer = peer->txrx_peer;
4155 		}
4156 	}
4157 
4158 	/* if not first assoc link peer,
4159 	 * not to initialize rx_tids again.
4160 	 */
4161 	if (IS_MLO_DP_LINK_PEER(peer) && !peer->first_link)
4162 		return;
4163 
4164 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
4165 		rx_tid = &peer->rx_tid[tid];
4166 		rx_tid->tid = tid;
4167 		rx_tid->ba_win_size = 0;
4168 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
4169 	}
4170 }
4171 #else
4172 static void dp_peer_rx_tids_init(struct dp_peer *peer)
4173 {
4174 	int tid;
4175 	struct dp_rx_tid *rx_tid;
4176 	struct dp_rx_tid_defrag *rx_tid_defrag;
4177 
4178 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
4179 		rx_tid = &peer->rx_tid[tid];
4180 
4181 		rx_tid_defrag = &peer->txrx_peer->rx_tid[tid];
4182 		rx_tid->tid = tid;
4183 		rx_tid->ba_win_size = 0;
4184 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
4185 
4186 		rx_tid_defrag->base.head = NULL;
4187 		rx_tid_defrag->base.tail = NULL;
4188 		rx_tid_defrag->tid = tid;
4189 		rx_tid_defrag->array = &rx_tid_defrag->base;
4190 		rx_tid_defrag->defrag_timeout_ms = 0;
4191 		rx_tid_defrag->defrag_waitlist_elem.tqe_next = NULL;
4192 		rx_tid_defrag->defrag_waitlist_elem.tqe_prev = NULL;
4193 		rx_tid_defrag->defrag_peer = peer->txrx_peer;
4194 	}
4195 }
4196 #endif
4197 
4198 /*
4199  * dp_peer_rx_init() – Initialize receive TID state
4200  * @pdev: Datapath pdev
4201  * @peer: Datapath peer
4202  *
4203  */
4204 void dp_peer_rx_init(struct dp_pdev *pdev, struct dp_peer *peer)
4205 {
4206 	dp_peer_rx_tids_init(peer);
4207 
4208 	peer->active_ba_session_cnt = 0;
4209 	peer->hw_buffer_size = 0;
4210 	peer->kill_256_sessions = 0;
4211 
4212 	/* Setup default (non-qos) rx tid queue */
4213 	dp_rx_tid_setup_wifi3(peer, DP_NON_QOS_TID, 1, 0);
4214 
4215 	/* Setup rx tid queue for TID 0.
4216 	 * Other queues will be setup on receiving first packet, which will cause
4217 	 * NULL REO queue error
4218 	 */
4219 	dp_rx_tid_setup_wifi3(peer, 0, 1, 0);
4220 
4221 	/*
4222 	 * Setup the rest of TID's to handle LFR
4223 	 */
4224 	dp_peer_setup_remaining_tids(peer);
4225 
4226 	/*
4227 	 * Set security defaults: no PN check, no security. The target may
4228 	 * send a HTT SEC_IND message to overwrite these defaults.
4229 	 */
4230 	if (peer->txrx_peer)
4231 		peer->txrx_peer->security[dp_sec_ucast].sec_type =
4232 			peer->txrx_peer->security[dp_sec_mcast].sec_type =
4233 				cdp_sec_type_none;
4234 }
4235 
4236 /*
4237  * dp_peer_rx_cleanup() – Cleanup receive TID state
4238  * @vdev: Datapath vdev
4239  * @peer: Datapath peer
4240  *
4241  */
4242 void dp_peer_rx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
4243 {
4244 	int tid;
4245 	uint32_t tid_delete_mask = 0;
4246 
4247 	if (!peer->txrx_peer)
4248 		return;
4249 
4250 	dp_info("Remove tids for peer: %pK", peer);
4251 
4252 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
4253 		struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
4254 		struct dp_rx_tid_defrag *defrag_rx_tid =
4255 				&peer->txrx_peer->rx_tid[tid];
4256 
4257 		qdf_spin_lock_bh(&defrag_rx_tid->defrag_tid_lock);
4258 		if (!peer->bss_peer || peer->vdev->opmode == wlan_op_mode_sta) {
4259 			/* Cleanup defrag related resource */
4260 			dp_rx_defrag_waitlist_remove(peer->txrx_peer, tid);
4261 			dp_rx_reorder_flush_frag(peer->txrx_peer, tid);
4262 		}
4263 		qdf_spin_unlock_bh(&defrag_rx_tid->defrag_tid_lock);
4264 
4265 		qdf_spin_lock_bh(&rx_tid->tid_lock);
4266 		if (peer->rx_tid[tid].hw_qdesc_vaddr_unaligned) {
4267 			dp_rx_tid_delete_wifi3(peer, tid);
4268 
4269 			tid_delete_mask |= (1 << tid);
4270 		}
4271 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
4272 	}
4273 #ifdef notyet /* See if FW can remove queues as part of peer cleanup */
4274 	if (soc->ol_ops->peer_rx_reorder_queue_remove) {
4275 		soc->ol_ops->peer_rx_reorder_queue_remove(soc->ctrl_psoc,
4276 			peer->vdev->pdev->pdev_id,
4277 			peer->vdev->vdev_id, peer->mac_addr.raw,
4278 			tid_delete_mask);
4279 	}
4280 #endif
4281 }
4282 
4283 /*
4284  * dp_peer_cleanup() – Cleanup peer information
4285  * @vdev: Datapath vdev
4286  * @peer: Datapath peer
4287  *
4288  */
4289 void dp_peer_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
4290 {
4291 	enum wlan_op_mode vdev_opmode;
4292 	uint8_t vdev_mac_addr[QDF_MAC_ADDR_SIZE];
4293 	struct dp_pdev *pdev = vdev->pdev;
4294 	struct dp_soc *soc = pdev->soc;
4295 
4296 	/* save vdev related member in case vdev freed */
4297 	vdev_opmode = vdev->opmode;
4298 
4299 	if (!IS_MLO_DP_MLD_PEER(peer))
4300 		dp_monitor_peer_tx_cleanup(vdev, peer);
4301 
4302 	if (vdev_opmode != wlan_op_mode_monitor)
4303 	/* cleanup the Rx reorder queues for this peer */
4304 		dp_peer_rx_cleanup(vdev, peer);
4305 
4306 	dp_peer_rx_tids_destroy(peer);
4307 
4308 	if (IS_MLO_DP_LINK_PEER(peer))
4309 		dp_link_peer_del_mld_peer(peer);
4310 	if (IS_MLO_DP_MLD_PEER(peer))
4311 		dp_mld_peer_deinit_link_peers_info(peer);
4312 
4313 	qdf_mem_copy(vdev_mac_addr, vdev->mac_addr.raw,
4314 		     QDF_MAC_ADDR_SIZE);
4315 
4316 	if (soc->cdp_soc.ol_ops->peer_unref_delete)
4317 		soc->cdp_soc.ol_ops->peer_unref_delete(
4318 				soc->ctrl_psoc,
4319 				vdev->pdev->pdev_id,
4320 				peer->mac_addr.raw, vdev_mac_addr,
4321 				vdev_opmode);
4322 }
4323 
4324 /* dp_teardown_256_ba_session() - Teardown sessions using 256
4325  *                                window size when a request with
4326  *                                64 window size is received.
4327  *                                This is done as a WAR since HW can
4328  *                                have only one setting per peer (64 or 256).
4329  *                                For HKv2, we use per tid buffersize setting
4330  *                                for 0 to per_tid_basize_max_tid. For tid
4331  *                                more than per_tid_basize_max_tid we use HKv1
4332  *                                method.
4333  * @peer: Datapath peer
4334  *
4335  * Return: void
4336  */
4337 static void dp_teardown_256_ba_sessions(struct dp_peer *peer)
4338 {
4339 	uint8_t delba_rcode = 0;
4340 	int tid;
4341 	struct dp_rx_tid *rx_tid = NULL;
4342 
4343 	tid = peer->vdev->pdev->soc->per_tid_basize_max_tid;
4344 	for (; tid < DP_MAX_TIDS; tid++) {
4345 		rx_tid = &peer->rx_tid[tid];
4346 		qdf_spin_lock_bh(&rx_tid->tid_lock);
4347 
4348 		if (rx_tid->ba_win_size <= 64) {
4349 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
4350 			continue;
4351 		} else {
4352 			if (rx_tid->ba_status == DP_RX_BA_ACTIVE ||
4353 			    rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
4354 				/* send delba */
4355 				if (!rx_tid->delba_tx_status) {
4356 					rx_tid->delba_tx_retry++;
4357 					rx_tid->delba_tx_status = 1;
4358 					rx_tid->delba_rcode =
4359 					IEEE80211_REASON_QOS_SETUP_REQUIRED;
4360 					delba_rcode = rx_tid->delba_rcode;
4361 
4362 					qdf_spin_unlock_bh(&rx_tid->tid_lock);
4363 					if (peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba)
4364 						peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba(
4365 							peer->vdev->pdev->soc->ctrl_psoc,
4366 							peer->vdev->vdev_id,
4367 							peer->mac_addr.raw,
4368 							tid, delba_rcode,
4369 							CDP_DELBA_REASON_NONE);
4370 				} else {
4371 					qdf_spin_unlock_bh(&rx_tid->tid_lock);
4372 				}
4373 			} else {
4374 				qdf_spin_unlock_bh(&rx_tid->tid_lock);
4375 			}
4376 		}
4377 	}
4378 }
4379 
4380 /*
4381 * dp_rx_addba_resp_tx_completion_wifi3() – Update Rx Tid State
4382 *
4383 * @soc: Datapath soc handle
4384 * @peer_mac: Datapath peer mac address
4385 * @vdev_id: id of atapath vdev
4386 * @tid: TID number
4387 * @status: tx completion status
4388 * Return: 0 on success, error code on failure
4389 */
4390 int dp_addba_resp_tx_completion_wifi3(struct cdp_soc_t *cdp_soc,
4391 				      uint8_t *peer_mac,
4392 				      uint16_t vdev_id,
4393 				      uint8_t tid, int status)
4394 {
4395 	struct dp_peer *peer = dp_peer_get_tgt_peer_hash_find(
4396 					(struct dp_soc *)cdp_soc,
4397 					peer_mac, 0, vdev_id,
4398 					DP_MOD_ID_CDP);
4399 	struct dp_rx_tid *rx_tid = NULL;
4400 
4401 	if (!peer) {
4402 		dp_peer_debug("%pK: Peer is NULL!\n", cdp_soc);
4403 		goto fail;
4404 	}
4405 	rx_tid = &peer->rx_tid[tid];
4406 	qdf_spin_lock_bh(&rx_tid->tid_lock);
4407 	if (status) {
4408 		rx_tid->num_addba_rsp_failed++;
4409 		if (rx_tid->hw_qdesc_vaddr_unaligned)
4410 			dp_rx_tid_update_wifi3(peer, tid, 1,
4411 					       IEEE80211_SEQ_MAX, false);
4412 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
4413 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
4414 		dp_err("RxTid- %d addba rsp tx completion failed", tid);
4415 
4416 		goto success;
4417 	}
4418 
4419 	rx_tid->num_addba_rsp_success++;
4420 	if (rx_tid->ba_status == DP_RX_BA_INACTIVE) {
4421 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
4422 		dp_peer_err("%pK: Rx Tid- %d hw qdesc is not in IN_PROGRESS",
4423 			    cdp_soc, tid);
4424 		goto fail;
4425 	}
4426 
4427 	if (!qdf_atomic_read(&peer->is_default_route_set)) {
4428 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
4429 		dp_peer_debug("%pK: default route is not set for peer: " QDF_MAC_ADDR_FMT,
4430 			      cdp_soc, QDF_MAC_ADDR_REF(peer->mac_addr.raw));
4431 		goto fail;
4432 	}
4433 
4434 	if (dp_rx_tid_update_wifi3(peer, tid,
4435 				   rx_tid->ba_win_size,
4436 				   rx_tid->startseqnum,
4437 				   false)) {
4438 		dp_err("Failed update REO SSN");
4439 	}
4440 
4441 	dp_info("tid %u window_size %u start_seq_num %u",
4442 		tid, rx_tid->ba_win_size,
4443 		rx_tid->startseqnum);
4444 
4445 	/* First Session */
4446 	if (peer->active_ba_session_cnt == 0) {
4447 		if (rx_tid->ba_win_size > 64 && rx_tid->ba_win_size <= 256)
4448 			peer->hw_buffer_size = 256;
4449 		else if (rx_tid->ba_win_size <= 1024 &&
4450 			 rx_tid->ba_win_size > 256)
4451 			peer->hw_buffer_size = 1024;
4452 		else
4453 			peer->hw_buffer_size = 64;
4454 	}
4455 
4456 	rx_tid->ba_status = DP_RX_BA_ACTIVE;
4457 
4458 	peer->active_ba_session_cnt++;
4459 
4460 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
4461 
4462 	/* Kill any session having 256 buffer size
4463 	 * when 64 buffer size request is received.
4464 	 * Also, latch on to 64 as new buffer size.
4465 	 */
4466 	if (peer->kill_256_sessions) {
4467 		dp_teardown_256_ba_sessions(peer);
4468 		peer->kill_256_sessions = 0;
4469 	}
4470 
4471 success:
4472 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4473 	return QDF_STATUS_SUCCESS;
4474 
4475 fail:
4476 	if (peer)
4477 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4478 
4479 	return QDF_STATUS_E_FAILURE;
4480 }
4481 
4482 /*
4483 * dp_rx_addba_responsesetup_wifi3() – Process ADDBA request from peer
4484 *
4485 * @soc: Datapath soc handle
4486 * @peer_mac: Datapath peer mac address
4487 * @vdev_id: id of atapath vdev
4488 * @tid: TID number
4489 * @dialogtoken: output dialogtoken
4490 * @statuscode: output dialogtoken
4491 * @buffersize: Output BA window size
4492 * @batimeout: Output BA timeout
4493 */
4494 QDF_STATUS
4495 dp_addba_responsesetup_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
4496 			     uint16_t vdev_id, uint8_t tid,
4497 			     uint8_t *dialogtoken, uint16_t *statuscode,
4498 			     uint16_t *buffersize, uint16_t *batimeout)
4499 {
4500 	struct dp_rx_tid *rx_tid = NULL;
4501 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4502 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
4503 						       peer_mac, 0, vdev_id,
4504 						       DP_MOD_ID_CDP);
4505 
4506 	if (!peer) {
4507 		dp_peer_debug("%pK: Peer is NULL!\n", cdp_soc);
4508 		return QDF_STATUS_E_FAILURE;
4509 	}
4510 	rx_tid = &peer->rx_tid[tid];
4511 	qdf_spin_lock_bh(&rx_tid->tid_lock);
4512 	rx_tid->num_of_addba_resp++;
4513 	/* setup ADDBA response parameters */
4514 	*dialogtoken = rx_tid->dialogtoken;
4515 	*statuscode = rx_tid->statuscode;
4516 	*buffersize = rx_tid->ba_win_size;
4517 	*batimeout  = 0;
4518 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
4519 
4520 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4521 
4522 	return status;
4523 }
4524 
4525 /* dp_check_ba_buffersize() - Check buffer size in request
4526  *                            and latch onto this size based on
4527  *                            size used in first active session.
4528  * @peer: Datapath peer
4529  * @tid: Tid
4530  * @buffersize: Block ack window size
4531  *
4532  * Return: void
4533  */
4534 static void dp_check_ba_buffersize(struct dp_peer *peer,
4535 				   uint16_t tid,
4536 				   uint16_t buffersize)
4537 {
4538 	struct dp_rx_tid *rx_tid = NULL;
4539 	struct dp_soc *soc = peer->vdev->pdev->soc;
4540 	uint16_t max_ba_window;
4541 
4542 	max_ba_window = hal_get_rx_max_ba_window(soc->hal_soc, tid);
4543 	dp_info("Input buffersize %d, max dp allowed %d",
4544 		buffersize, max_ba_window);
4545 	/* Adjust BA window size, restrict it to max DP allowed */
4546 	buffersize = QDF_MIN(buffersize, max_ba_window);
4547 
4548 	dp_info(QDF_MAC_ADDR_FMT" per_tid_basize_max_tid %d tid %d buffersize %d hw_buffer_size %d",
4549 		QDF_MAC_ADDR_REF(peer->mac_addr.raw),
4550 		soc->per_tid_basize_max_tid, tid, buffersize,
4551 		peer->hw_buffer_size);
4552 
4553 	rx_tid = &peer->rx_tid[tid];
4554 	if (soc->per_tid_basize_max_tid &&
4555 	    tid < soc->per_tid_basize_max_tid) {
4556 		rx_tid->ba_win_size = buffersize;
4557 		goto out;
4558 	} else {
4559 		if (peer->active_ba_session_cnt == 0) {
4560 			rx_tid->ba_win_size = buffersize;
4561 		} else {
4562 			if (peer->hw_buffer_size == 64) {
4563 				if (buffersize <= 64)
4564 					rx_tid->ba_win_size = buffersize;
4565 				else
4566 					rx_tid->ba_win_size = peer->hw_buffer_size;
4567 			} else if (peer->hw_buffer_size == 256) {
4568 				if (buffersize > 64) {
4569 					rx_tid->ba_win_size = buffersize;
4570 				} else {
4571 					rx_tid->ba_win_size = buffersize;
4572 					peer->hw_buffer_size = 64;
4573 					peer->kill_256_sessions = 1;
4574 				}
4575 			} else if (buffersize <= 1024) {
4576 				/**
4577 				 * Above checks are only for HK V2
4578 				 * Set incoming buffer size for others
4579 				 */
4580 				rx_tid->ba_win_size = buffersize;
4581 			} else {
4582 				dp_err("Invalid buffer size %d", buffersize);
4583 				qdf_assert_always(0);
4584 			}
4585 		}
4586 	}
4587 
4588 out:
4589 	dp_info("rx_tid->ba_win_size %d peer->hw_buffer_size %d peer->kill_256_sessions %d",
4590 		rx_tid->ba_win_size,
4591 		peer->hw_buffer_size,
4592 		peer->kill_256_sessions);
4593 }
4594 
4595 QDF_STATUS dp_rx_tid_update_ba_win_size(struct cdp_soc_t *cdp_soc,
4596 					uint8_t *peer_mac, uint16_t vdev_id,
4597 					uint8_t tid, uint16_t buffersize)
4598 {
4599 	struct dp_rx_tid *rx_tid = NULL;
4600 	struct dp_peer *peer;
4601 
4602 	peer = dp_peer_get_tgt_peer_hash_find((struct dp_soc *)cdp_soc,
4603 					      peer_mac, 0, vdev_id,
4604 					      DP_MOD_ID_CDP);
4605 	if (!peer) {
4606 		dp_peer_debug("%pK: Peer is NULL!\n", cdp_soc);
4607 		return QDF_STATUS_E_FAILURE;
4608 	}
4609 
4610 	rx_tid = &peer->rx_tid[tid];
4611 
4612 	qdf_spin_lock_bh(&rx_tid->tid_lock);
4613 	rx_tid->ba_win_size = buffersize;
4614 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
4615 
4616 	dp_info("peer "QDF_MAC_ADDR_FMT", tid %d, update BA win size to %d",
4617 		QDF_MAC_ADDR_REF(peer->mac_addr.raw), tid, buffersize);
4618 
4619 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4620 
4621 	return QDF_STATUS_SUCCESS;
4622 }
4623 
4624 #define DP_RX_BA_SESSION_DISABLE  1
4625 
4626 /*
4627  * dp_addba_requestprocess_wifi3() - Process ADDBA request from peer
4628  *
4629  * @soc: Datapath soc handle
4630  * @peer_mac: Datapath peer mac address
4631  * @vdev_id: id of atapath vdev
4632  * @dialogtoken: dialogtoken from ADDBA frame
4633  * @tid: TID number
4634  * @batimeout: BA timeout
4635  * @buffersize: BA window size
4636  * @startseqnum: Start seq. number received in BA sequence control
4637  *
4638  * Return: 0 on success, error code on failure
4639  */
4640 int dp_addba_requestprocess_wifi3(struct cdp_soc_t *cdp_soc,
4641 				  uint8_t *peer_mac,
4642 				  uint16_t vdev_id,
4643 				  uint8_t dialogtoken,
4644 				  uint16_t tid, uint16_t batimeout,
4645 				  uint16_t buffersize,
4646 				  uint16_t startseqnum)
4647 {
4648 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4649 	struct dp_rx_tid *rx_tid = NULL;
4650 	struct dp_peer *peer;
4651 
4652 	peer = dp_peer_get_tgt_peer_hash_find((struct dp_soc *)cdp_soc,
4653 					      peer_mac,
4654 					      0, vdev_id,
4655 					      DP_MOD_ID_CDP);
4656 
4657 	if (!peer) {
4658 		dp_peer_debug("%pK: Peer is NULL!\n", cdp_soc);
4659 		return QDF_STATUS_E_FAILURE;
4660 	}
4661 	rx_tid = &peer->rx_tid[tid];
4662 	qdf_spin_lock_bh(&rx_tid->tid_lock);
4663 	rx_tid->num_of_addba_req++;
4664 	if ((rx_tid->ba_status == DP_RX_BA_ACTIVE &&
4665 	     rx_tid->hw_qdesc_vaddr_unaligned)) {
4666 		dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX, false);
4667 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
4668 		peer->active_ba_session_cnt--;
4669 		dp_peer_debug("%pK: Rx Tid- %d hw qdesc is already setup",
4670 			      cdp_soc, tid);
4671 	}
4672 
4673 	if (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
4674 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
4675 		status = QDF_STATUS_E_FAILURE;
4676 		goto fail;
4677 	}
4678 
4679 	if (rx_tid->rx_ba_win_size_override == DP_RX_BA_SESSION_DISABLE) {
4680 		dp_peer_info("%pK: disable BA session",
4681 			     cdp_soc);
4682 
4683 		buffersize = 1;
4684 	} else if (rx_tid->rx_ba_win_size_override) {
4685 		dp_peer_info("%pK: override BA win to %d", cdp_soc,
4686 			     rx_tid->rx_ba_win_size_override);
4687 
4688 		buffersize = rx_tid->rx_ba_win_size_override;
4689 	} else {
4690 		dp_peer_info("%pK: restore BA win %d based on addba req", cdp_soc,
4691 			     buffersize);
4692 	}
4693 
4694 	dp_check_ba_buffersize(peer, tid, buffersize);
4695 
4696 	if (dp_rx_tid_setup_wifi3(peer, tid,
4697 	    rx_tid->ba_win_size, startseqnum)) {
4698 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
4699 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
4700 		status = QDF_STATUS_E_FAILURE;
4701 		goto fail;
4702 	}
4703 	rx_tid->ba_status = DP_RX_BA_IN_PROGRESS;
4704 
4705 	rx_tid->dialogtoken = dialogtoken;
4706 	rx_tid->startseqnum = startseqnum;
4707 
4708 	if (rx_tid->userstatuscode != IEEE80211_STATUS_SUCCESS)
4709 		rx_tid->statuscode = rx_tid->userstatuscode;
4710 	else
4711 		rx_tid->statuscode = IEEE80211_STATUS_SUCCESS;
4712 
4713 	if (rx_tid->rx_ba_win_size_override == DP_RX_BA_SESSION_DISABLE)
4714 		rx_tid->statuscode = IEEE80211_STATUS_REFUSED;
4715 
4716 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
4717 
4718 fail:
4719 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4720 
4721 	return status;
4722 }
4723 
4724 /*
4725 * dp_set_addba_response() – Set a user defined ADDBA response status code
4726 *
4727 * @soc: Datapath soc handle
4728 * @peer_mac: Datapath peer mac address
4729 * @vdev_id: id of atapath vdev
4730 * @tid: TID number
4731 * @statuscode: response status code to be set
4732 */
4733 QDF_STATUS
4734 dp_set_addba_response(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
4735 		      uint16_t vdev_id, uint8_t tid, uint16_t statuscode)
4736 {
4737 	struct dp_peer *peer = dp_peer_get_tgt_peer_hash_find(
4738 					(struct dp_soc *)cdp_soc,
4739 					peer_mac, 0, vdev_id,
4740 					DP_MOD_ID_CDP);
4741 	struct dp_rx_tid *rx_tid;
4742 
4743 	if (!peer) {
4744 		dp_peer_debug("%pK: Peer is NULL!\n", cdp_soc);
4745 		return QDF_STATUS_E_FAILURE;
4746 	}
4747 
4748 	rx_tid = &peer->rx_tid[tid];
4749 	qdf_spin_lock_bh(&rx_tid->tid_lock);
4750 	rx_tid->userstatuscode = statuscode;
4751 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
4752 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4753 
4754 	return QDF_STATUS_SUCCESS;
4755 }
4756 
4757 /*
4758 * dp_rx_delba_process_wifi3() – Process DELBA from peer
4759 * @soc: Datapath soc handle
4760 * @peer_mac: Datapath peer mac address
4761 * @vdev_id: id of atapath vdev
4762 * @tid: TID number
4763 * @reasoncode: Reason code received in DELBA frame
4764 *
4765 * Return: 0 on success, error code on failure
4766 */
4767 int dp_delba_process_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
4768 			   uint16_t vdev_id, int tid, uint16_t reasoncode)
4769 {
4770 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4771 	struct dp_rx_tid *rx_tid;
4772 	struct dp_peer *peer = dp_peer_get_tgt_peer_hash_find(
4773 					(struct dp_soc *)cdp_soc,
4774 					peer_mac, 0, vdev_id,
4775 					DP_MOD_ID_CDP);
4776 
4777 	if (!peer) {
4778 		dp_peer_debug("%pK: Peer is NULL!\n", cdp_soc);
4779 		return QDF_STATUS_E_FAILURE;
4780 	}
4781 	rx_tid = &peer->rx_tid[tid];
4782 	qdf_spin_lock_bh(&rx_tid->tid_lock);
4783 	if (rx_tid->ba_status == DP_RX_BA_INACTIVE ||
4784 	    rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
4785 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
4786 		status = QDF_STATUS_E_FAILURE;
4787 		goto fail;
4788 	}
4789 	/* TODO: See if we can delete the existing REO queue descriptor and
4790 	 * replace with a new one without queue extension descript to save
4791 	 * memory
4792 	 */
4793 	rx_tid->delba_rcode = reasoncode;
4794 	rx_tid->num_of_delba_req++;
4795 	dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX, false);
4796 
4797 	rx_tid->ba_status = DP_RX_BA_INACTIVE;
4798 	peer->active_ba_session_cnt--;
4799 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
4800 fail:
4801 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4802 
4803 	return status;
4804 }
4805 
4806 /*
4807  * dp_rx_delba_tx_completion_wifi3() – Send Delba Request
4808  *
4809  * @soc: Datapath soc handle
4810  * @peer_mac: Datapath peer mac address
4811  * @vdev_id: id of atapath vdev
4812  * @tid: TID number
4813  * @status: tx completion status
4814  * Return: 0 on success, error code on failure
4815  */
4816 
4817 int dp_delba_tx_completion_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
4818 				 uint16_t vdev_id,
4819 				 uint8_t tid, int status)
4820 {
4821 	QDF_STATUS ret = QDF_STATUS_SUCCESS;
4822 	struct dp_rx_tid *rx_tid = NULL;
4823 	struct dp_peer *peer = dp_peer_get_tgt_peer_hash_find(
4824 					(struct dp_soc *)cdp_soc,
4825 					peer_mac, 0, vdev_id,
4826 					DP_MOD_ID_CDP);
4827 
4828 	if (!peer) {
4829 		dp_peer_debug("%pK: Peer is NULL!", cdp_soc);
4830 		return QDF_STATUS_E_FAILURE;
4831 	}
4832 	rx_tid = &peer->rx_tid[tid];
4833 	qdf_spin_lock_bh(&rx_tid->tid_lock);
4834 	if (status) {
4835 		rx_tid->delba_tx_fail_cnt++;
4836 		if (rx_tid->delba_tx_retry >= DP_MAX_DELBA_RETRY) {
4837 			rx_tid->delba_tx_retry = 0;
4838 			rx_tid->delba_tx_status = 0;
4839 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
4840 		} else {
4841 			rx_tid->delba_tx_retry++;
4842 			rx_tid->delba_tx_status = 1;
4843 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
4844 			if (peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba)
4845 				peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba(
4846 					peer->vdev->pdev->soc->ctrl_psoc,
4847 					peer->vdev->vdev_id,
4848 					peer->mac_addr.raw, tid,
4849 					rx_tid->delba_rcode,
4850 					CDP_DELBA_REASON_NONE);
4851 		}
4852 		goto end;
4853 	} else {
4854 		rx_tid->delba_tx_success_cnt++;
4855 		rx_tid->delba_tx_retry = 0;
4856 		rx_tid->delba_tx_status = 0;
4857 	}
4858 	if (rx_tid->ba_status == DP_RX_BA_ACTIVE) {
4859 		dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX, false);
4860 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
4861 		peer->active_ba_session_cnt--;
4862 	}
4863 	if (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
4864 		dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX, false);
4865 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
4866 	}
4867 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
4868 
4869 end:
4870 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4871 
4872 	return ret;
4873 }
4874 
4875 /**
4876  * dp_set_pn_check_wifi3() - enable PN check in REO for security
4877  * @soc: Datapath soc handle
4878  * @peer_mac: Datapath peer mac address
4879  * @vdev_id: id of atapath vdev
4880  * @vdev: Datapath vdev
4881  * @pdev - data path device instance
4882  * @sec_type - security type
4883  * @rx_pn - Receive pn starting number
4884  *
4885  */
4886 
4887 QDF_STATUS
4888 dp_set_pn_check_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id,
4889 		      uint8_t *peer_mac, enum cdp_sec_type sec_type,
4890 		      uint32_t *rx_pn)
4891 {
4892 	struct dp_pdev *pdev;
4893 	int i;
4894 	uint8_t pn_size;
4895 	struct hal_reo_cmd_params params;
4896 	struct dp_peer *peer = NULL;
4897 	struct dp_vdev *vdev = NULL;
4898 
4899 	peer = dp_peer_find_hash_find((struct dp_soc *)soc,
4900 				      peer_mac, 0, vdev_id,
4901 				      DP_MOD_ID_CDP);
4902 
4903 	if (!peer) {
4904 		dp_peer_debug("%pK: Peer is NULL!\n", soc);
4905 		return QDF_STATUS_E_FAILURE;
4906 	}
4907 
4908 	vdev = peer->vdev;
4909 
4910 	if (!vdev) {
4911 		dp_peer_debug("%pK: VDEV is NULL!\n", soc);
4912 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4913 		return QDF_STATUS_E_FAILURE;
4914 	}
4915 
4916 	pdev = vdev->pdev;
4917 	qdf_mem_zero(&params, sizeof(params));
4918 
4919 	params.std.need_status = 1;
4920 	params.u.upd_queue_params.update_pn_valid = 1;
4921 	params.u.upd_queue_params.update_pn_size = 1;
4922 	params.u.upd_queue_params.update_pn = 1;
4923 	params.u.upd_queue_params.update_pn_check_needed = 1;
4924 	params.u.upd_queue_params.update_svld = 1;
4925 	params.u.upd_queue_params.svld = 0;
4926 
4927 	switch (sec_type) {
4928 	case cdp_sec_type_tkip_nomic:
4929 	case cdp_sec_type_aes_ccmp:
4930 	case cdp_sec_type_aes_ccmp_256:
4931 	case cdp_sec_type_aes_gcmp:
4932 	case cdp_sec_type_aes_gcmp_256:
4933 		params.u.upd_queue_params.pn_check_needed = 1;
4934 		params.u.upd_queue_params.pn_size = PN_SIZE_48;
4935 		pn_size = 48;
4936 		break;
4937 	case cdp_sec_type_wapi:
4938 		params.u.upd_queue_params.pn_check_needed = 1;
4939 		params.u.upd_queue_params.pn_size = PN_SIZE_128;
4940 		pn_size = 128;
4941 		if (vdev->opmode == wlan_op_mode_ap) {
4942 			params.u.upd_queue_params.pn_even = 1;
4943 			params.u.upd_queue_params.update_pn_even = 1;
4944 		} else {
4945 			params.u.upd_queue_params.pn_uneven = 1;
4946 			params.u.upd_queue_params.update_pn_uneven = 1;
4947 		}
4948 		break;
4949 	default:
4950 		params.u.upd_queue_params.pn_check_needed = 0;
4951 		pn_size = 0;
4952 		break;
4953 	}
4954 
4955 
4956 	for (i = 0; i < DP_MAX_TIDS; i++) {
4957 		struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
4958 		qdf_spin_lock_bh(&rx_tid->tid_lock);
4959 		if (rx_tid->hw_qdesc_vaddr_unaligned) {
4960 			params.std.addr_lo =
4961 				rx_tid->hw_qdesc_paddr & 0xffffffff;
4962 			params.std.addr_hi =
4963 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
4964 
4965 			if (pn_size) {
4966 				dp_peer_info("%pK: PN set for TID:%d pn:%x:%x:%x:%x",
4967 					     soc, i, rx_pn[3], rx_pn[2],
4968 					     rx_pn[1], rx_pn[0]);
4969 				params.u.upd_queue_params.update_pn_valid = 1;
4970 				params.u.upd_queue_params.pn_31_0 = rx_pn[0];
4971 				params.u.upd_queue_params.pn_63_32 = rx_pn[1];
4972 				params.u.upd_queue_params.pn_95_64 = rx_pn[2];
4973 				params.u.upd_queue_params.pn_127_96 = rx_pn[3];
4974 			}
4975 			rx_tid->pn_size = pn_size;
4976 			if (dp_reo_send_cmd(cdp_soc_t_to_dp_soc(soc),
4977 					    CMD_UPDATE_RX_REO_QUEUE,
4978 					    &params, dp_rx_tid_update_cb,
4979 					    rx_tid)) {
4980 				dp_err_log("fail to send CMD_UPDATE_RX_REO_QUEUE"
4981 					   "tid %d desc %pK", rx_tid->tid,
4982 					   (void *)(rx_tid->hw_qdesc_paddr));
4983 				DP_STATS_INC(cdp_soc_t_to_dp_soc(soc),
4984 					     rx.err.reo_cmd_send_fail, 1);
4985 			}
4986 		} else {
4987 			dp_peer_info("%pK: PN Check not setup for TID :%d ", soc, i);
4988 		}
4989 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
4990 	}
4991 
4992 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4993 
4994 	return QDF_STATUS_SUCCESS;
4995 }
4996 
4997 
4998 /**
4999  * dp_set_key_sec_type_wifi3() - set security mode of key
5000  * @soc: Datapath soc handle
5001  * @peer_mac: Datapath peer mac address
5002  * @vdev_id: id of atapath vdev
5003  * @vdev: Datapath vdev
5004  * @pdev - data path device instance
5005  * @sec_type - security type
5006  * #is_unicast - key type
5007  *
5008  */
5009 
5010 QDF_STATUS
5011 dp_set_key_sec_type_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id,
5012 			  uint8_t *peer_mac, enum cdp_sec_type sec_type,
5013 			  bool is_unicast)
5014 {
5015 	struct dp_peer *peer =
5016 			dp_peer_get_tgt_peer_hash_find((struct dp_soc *)soc,
5017 						       peer_mac, 0, vdev_id,
5018 						       DP_MOD_ID_CDP);
5019 	int sec_index;
5020 
5021 	if (!peer) {
5022 		dp_peer_debug("%pK: Peer is NULL!\n", soc);
5023 		return QDF_STATUS_E_FAILURE;
5024 	}
5025 
5026 	if (!peer->txrx_peer) {
5027 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5028 		dp_peer_debug("%pK: txrx peer is NULL!\n", soc);
5029 		return QDF_STATUS_E_FAILURE;
5030 	}
5031 
5032 	dp_peer_info("%pK: key sec spec for peer %pK " QDF_MAC_ADDR_FMT ": %s key of type %d",
5033 		     soc, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw),
5034 		     is_unicast ? "ucast" : "mcast", sec_type);
5035 
5036 	sec_index = is_unicast ? dp_sec_ucast : dp_sec_mcast;
5037 	peer->txrx_peer->security[sec_index].sec_type = sec_type;
5038 
5039 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5040 
5041 	return QDF_STATUS_SUCCESS;
5042 }
5043 
5044 void
5045 dp_rx_sec_ind_handler(struct dp_soc *soc, uint16_t peer_id,
5046 		      enum cdp_sec_type sec_type, int is_unicast,
5047 		      u_int32_t *michael_key,
5048 		      u_int32_t *rx_pn)
5049 {
5050 	struct dp_peer *peer;
5051 	struct dp_txrx_peer *txrx_peer;
5052 	int sec_index;
5053 
5054 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_HTT);
5055 	if (!peer) {
5056 		dp_peer_err("Couldn't find peer from ID %d - skipping security inits",
5057 			    peer_id);
5058 		return;
5059 	}
5060 	txrx_peer = dp_get_txrx_peer(peer);
5061 	if (!txrx_peer) {
5062 		dp_peer_err("Couldn't find txrx peer from ID %d - skipping security inits",
5063 			    peer_id);
5064 		return;
5065 	}
5066 
5067 	dp_peer_info("%pK: sec spec for peer %pK " QDF_MAC_ADDR_FMT ": %s key of type %d",
5068 		     soc, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw),
5069 			  is_unicast ? "ucast" : "mcast", sec_type);
5070 	sec_index = is_unicast ? dp_sec_ucast : dp_sec_mcast;
5071 
5072 	peer->txrx_peer->security[sec_index].sec_type = sec_type;
5073 #ifdef notyet /* TODO: See if this is required for defrag support */
5074 	/* michael key only valid for TKIP, but for simplicity,
5075 	 * copy it anyway
5076 	 */
5077 	qdf_mem_copy(
5078 		&peer->txrx_peer->security[sec_index].michael_key[0],
5079 		michael_key,
5080 		sizeof(peer->txrx_peer->security[sec_index].michael_key));
5081 #ifdef BIG_ENDIAN_HOST
5082 	OL_IF_SWAPBO(peer->txrx_peer->security[sec_index].michael_key[0],
5083 		     sizeof(peer->txrx_peer->security[sec_index].michael_key));
5084 #endif /* BIG_ENDIAN_HOST */
5085 #endif
5086 
5087 #ifdef notyet /* TODO: Check if this is required for wifi3.0 */
5088 	if (sec_type != cdp_sec_type_wapi) {
5089 		qdf_mem_zero(peer->tids_last_pn_valid, _EXT_TIDS);
5090 	} else {
5091 		for (i = 0; i < DP_MAX_TIDS; i++) {
5092 			/*
5093 			 * Setting PN valid bit for WAPI sec_type,
5094 			 * since WAPI PN has to be started with predefined value
5095 			 */
5096 			peer->tids_last_pn_valid[i] = 1;
5097 			qdf_mem_copy(
5098 				(u_int8_t *) &peer->tids_last_pn[i],
5099 				(u_int8_t *) rx_pn, sizeof(union htt_rx_pn_t));
5100 			peer->tids_last_pn[i].pn128[1] =
5101 				qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[1]);
5102 			peer->tids_last_pn[i].pn128[0] =
5103 				qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[0]);
5104 		}
5105 	}
5106 #endif
5107 	/* TODO: Update HW TID queue with PN check parameters (pn type for
5108 	 * all security types and last pn for WAPI) once REO command API
5109 	 * is available
5110 	 */
5111 
5112 	dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
5113 }
5114 
5115 #ifdef QCA_PEER_EXT_STATS
5116 /*
5117  * dp_peer_delay_stats_ctx_alloc() - Allocate peer delay
5118  *                                 stats content
5119  * @soc: DP SoC context
5120  * @txrx_peer: DP txrx peer context
5121  *
5122  * Allocate the peer delay stats context
5123  *
5124  * Return: QDF_STATUS_SUCCESS if allocation is
5125  *	   successful
5126  */
5127 QDF_STATUS dp_peer_delay_stats_ctx_alloc(struct dp_soc *soc,
5128 					 struct dp_txrx_peer *txrx_peer)
5129 {
5130 	uint8_t tid, ctx_id;
5131 
5132 	if (!soc || !txrx_peer) {
5133 		dp_warn("Null soc%pK or peer%pK", soc, txrx_peer);
5134 		return QDF_STATUS_E_INVAL;
5135 	}
5136 
5137 	if (!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx))
5138 		return QDF_STATUS_SUCCESS;
5139 
5140 	/*
5141 	 * Allocate memory for peer extended stats.
5142 	 */
5143 	txrx_peer->delay_stats =
5144 			qdf_mem_malloc(sizeof(struct dp_peer_delay_stats));
5145 	if (!txrx_peer->delay_stats) {
5146 		dp_err("Peer extended stats obj alloc failed!!");
5147 		return QDF_STATUS_E_NOMEM;
5148 	}
5149 
5150 	for (tid = 0; tid < CDP_MAX_DATA_TIDS; tid++) {
5151 		for (ctx_id = 0; ctx_id < CDP_MAX_TXRX_CTX; ctx_id++) {
5152 			struct cdp_delay_tx_stats *tx_delay =
5153 			&txrx_peer->delay_stats->delay_tid_stats[tid][ctx_id].tx_delay;
5154 			struct cdp_delay_rx_stats *rx_delay =
5155 			&txrx_peer->delay_stats->delay_tid_stats[tid][ctx_id].rx_delay;
5156 
5157 			dp_hist_init(&tx_delay->tx_swq_delay,
5158 				     CDP_HIST_TYPE_SW_ENQEUE_DELAY);
5159 			dp_hist_init(&tx_delay->hwtx_delay,
5160 				     CDP_HIST_TYPE_HW_COMP_DELAY);
5161 			dp_hist_init(&rx_delay->to_stack_delay,
5162 				     CDP_HIST_TYPE_REAP_STACK);
5163 		}
5164 	}
5165 
5166 	return QDF_STATUS_SUCCESS;
5167 }
5168 
5169 /*
5170  * dp_peer_delay_stats_ctx_dealloc() - Dealloc the peer delay stats context
5171  * @txrx_peer: txrx DP peer context
5172  *
5173  * Free the peer delay stats context
5174  *
5175  * Return: Void
5176  */
5177 void dp_peer_delay_stats_ctx_dealloc(struct dp_soc *soc,
5178 				     struct dp_txrx_peer *txrx_peer)
5179 {
5180 	if (!txrx_peer) {
5181 		dp_warn("peer_ext dealloc failed due to NULL peer object");
5182 		return;
5183 	}
5184 
5185 	if (!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx))
5186 		return;
5187 
5188 	if (!txrx_peer->delay_stats)
5189 		return;
5190 
5191 	qdf_mem_free(txrx_peer->delay_stats);
5192 	txrx_peer->delay_stats = NULL;
5193 }
5194 
5195 /**
5196  * dp_peer_delay_stats_ctx_clr() - Clear delay stats context of peer
5197  *
5198  * @txrx_peer: dp_txrx_peer handle
5199  *
5200  * Return: void
5201  */
5202 void dp_peer_delay_stats_ctx_clr(struct dp_txrx_peer *txrx_peer)
5203 {
5204 	if (txrx_peer->delay_stats)
5205 		qdf_mem_zero(txrx_peer->delay_stats,
5206 			     sizeof(struct dp_peer_delay_stats));
5207 }
5208 #endif
5209 
5210 #ifdef WLAN_PEER_JITTER
5211 /**
5212  * dp_peer_jitter_stats_ctx_alloc() - Allocate jitter stats context for peer
5213  *
5214  * @soc: Datapath pdev handle
5215  * @txrx_peer: dp_txrx_peer handle
5216  *
5217  * Return: QDF_STATUS
5218  */
5219 QDF_STATUS dp_peer_jitter_stats_ctx_alloc(struct dp_pdev *pdev,
5220 					  struct dp_txrx_peer *txrx_peer)
5221 {
5222 	if (!pdev || !txrx_peer) {
5223 		dp_warn("Null pdev or peer");
5224 		return QDF_STATUS_E_INVAL;
5225 	}
5226 
5227 	/*
5228 	 * Allocate memory for jitter stats only when
5229 	 * operating in offload enabled mode.
5230 	 */
5231 	if (!wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx))
5232 		return QDF_STATUS_SUCCESS;
5233 
5234 	txrx_peer->jitter_stats =
5235 		qdf_mem_malloc(sizeof(struct cdp_peer_tid_stats) * DP_MAX_TIDS);
5236 	if (!txrx_peer->jitter_stats) {
5237 		dp_warn("Jitter stats obj alloc failed!!");
5238 		return QDF_STATUS_E_NOMEM;
5239 	}
5240 
5241 	return QDF_STATUS_SUCCESS;
5242 }
5243 
5244 /**
5245  * dp_peer_jitter_stats_ctx_dealloc() - Deallocate jitter stats context
5246  *
5247  * @pdev: Datapath pdev handle
5248  * @txrx_peer: dp_txrx_peer handle
5249  *
5250  * Return: void
5251  */
5252 void dp_peer_jitter_stats_ctx_dealloc(struct dp_pdev *pdev,
5253 				      struct dp_txrx_peer *txrx_peer)
5254 {
5255 	if (!pdev || !txrx_peer) {
5256 		dp_warn("Null pdev or peer");
5257 		return;
5258 	}
5259 
5260 	/* Check for offload mode */
5261 	if (!wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx))
5262 		return;
5263 
5264 	if (txrx_peer->jitter_stats) {
5265 		qdf_mem_free(txrx_peer->jitter_stats);
5266 		txrx_peer->jitter_stats = NULL;
5267 	}
5268 }
5269 
5270 /**
5271  * dp_peer_jitter_stats_ctx_clr() - Clear jitter stats context of peer
5272  *
5273  * @txrx_peer: dp_txrx_peer handle
5274  *
5275  * Return: void
5276  */
5277 void dp_peer_jitter_stats_ctx_clr(struct dp_txrx_peer *txrx_peer)
5278 {
5279 	if (txrx_peer->jitter_stats)
5280 		qdf_mem_zero(txrx_peer->jitter_stats,
5281 			     sizeof(struct cdp_peer_tid_stats) * DP_MAX_TIDS);
5282 }
5283 #endif
5284 
5285 QDF_STATUS
5286 dp_rx_delba_ind_handler(void *soc_handle, uint16_t peer_id,
5287 			uint8_t tid, uint16_t win_sz)
5288 {
5289 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
5290 	struct dp_peer *peer;
5291 	struct dp_rx_tid *rx_tid;
5292 	QDF_STATUS status = QDF_STATUS_SUCCESS;
5293 
5294 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_HTT);
5295 
5296 	if (!peer) {
5297 		dp_peer_err("%pK: Couldn't find peer from ID %d",
5298 			    soc, peer_id);
5299 		return QDF_STATUS_E_FAILURE;
5300 	}
5301 
5302 	qdf_assert_always(tid < DP_MAX_TIDS);
5303 
5304 	rx_tid = &peer->rx_tid[tid];
5305 
5306 	if (rx_tid->hw_qdesc_vaddr_unaligned) {
5307 		if (!rx_tid->delba_tx_status) {
5308 			dp_peer_info("%pK: PEER_ID: %d TID: %d, BA win: %d ",
5309 				     soc, peer_id, tid, win_sz);
5310 
5311 			qdf_spin_lock_bh(&rx_tid->tid_lock);
5312 
5313 			rx_tid->delba_tx_status = 1;
5314 
5315 			rx_tid->rx_ba_win_size_override =
5316 			    qdf_min((uint16_t)63, win_sz);
5317 
5318 			rx_tid->delba_rcode =
5319 			    IEEE80211_REASON_QOS_SETUP_REQUIRED;
5320 
5321 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
5322 
5323 			if (soc->cdp_soc.ol_ops->send_delba)
5324 				soc->cdp_soc.ol_ops->send_delba(
5325 					peer->vdev->pdev->soc->ctrl_psoc,
5326 					peer->vdev->vdev_id,
5327 					peer->mac_addr.raw,
5328 					tid,
5329 					rx_tid->delba_rcode,
5330 					CDP_DELBA_REASON_NONE);
5331 		}
5332 	} else {
5333 		dp_peer_err("%pK: BA session is not setup for TID:%d ", soc, tid);
5334 		status = QDF_STATUS_E_FAILURE;
5335 	}
5336 
5337 	dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
5338 
5339 	return status;
5340 }
5341 
5342 #ifdef DP_PEER_EXTENDED_API
5343 /**
5344  * dp_peer_set_bw() - Set bandwidth and mpdu retry count threshold for peer
5345  * @soc: DP soc handle
5346  * @txrx_peer: Core txrx_peer handle
5347  * @set_bw: enum of bandwidth to be set for this peer connection
5348  *
5349  * Return: None
5350  */
5351 static void dp_peer_set_bw(struct dp_soc *soc, struct dp_txrx_peer *txrx_peer,
5352 			   enum cdp_peer_bw set_bw)
5353 {
5354 	if (!txrx_peer)
5355 		return;
5356 
5357 	txrx_peer->bw = set_bw;
5358 
5359 	switch (set_bw) {
5360 	case CDP_160_MHZ:
5361 	case CDP_320_MHZ:
5362 		txrx_peer->mpdu_retry_threshold =
5363 				soc->wlan_cfg_ctx->mpdu_retry_threshold_2;
5364 		break;
5365 	case CDP_20_MHZ:
5366 	case CDP_40_MHZ:
5367 	case CDP_80_MHZ:
5368 	default:
5369 		txrx_peer->mpdu_retry_threshold =
5370 				soc->wlan_cfg_ctx->mpdu_retry_threshold_1;
5371 		break;
5372 	}
5373 
5374 	dp_info("Peer id: %u: BW: %u, mpdu retry threshold: %u",
5375 		txrx_peer->peer_id, txrx_peer->bw,
5376 		txrx_peer->mpdu_retry_threshold);
5377 }
5378 
5379 #ifdef WLAN_FEATURE_11BE_MLO
5380 QDF_STATUS dp_register_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
5381 			    struct ol_txrx_desc_type *sta_desc)
5382 {
5383 	struct dp_peer *peer;
5384 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5385 
5386 	peer = dp_peer_find_hash_find(soc, sta_desc->peer_addr.bytes,
5387 				      0, DP_VDEV_ALL, DP_MOD_ID_CDP);
5388 
5389 	if (!peer)
5390 		return QDF_STATUS_E_FAULT;
5391 
5392 	qdf_spin_lock_bh(&peer->peer_info_lock);
5393 	peer->state = OL_TXRX_PEER_STATE_CONN;
5394 	qdf_spin_unlock_bh(&peer->peer_info_lock);
5395 
5396 	dp_peer_set_bw(soc, peer->txrx_peer, sta_desc->bw);
5397 
5398 	dp_rx_flush_rx_cached(peer, false);
5399 
5400 	if (IS_MLO_DP_LINK_PEER(peer) && peer->first_link) {
5401 		dp_peer_info("register for mld peer" QDF_MAC_ADDR_FMT,
5402 			     QDF_MAC_ADDR_REF(peer->mld_peer->mac_addr.raw));
5403 		qdf_spin_lock_bh(&peer->mld_peer->peer_info_lock);
5404 		peer->mld_peer->state = peer->state;
5405 		qdf_spin_unlock_bh(&peer->mld_peer->peer_info_lock);
5406 		dp_rx_flush_rx_cached(peer->mld_peer, false);
5407 	}
5408 
5409 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5410 
5411 	return QDF_STATUS_SUCCESS;
5412 }
5413 
5414 QDF_STATUS dp_peer_state_update(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
5415 				enum ol_txrx_peer_state state)
5416 {
5417 	struct dp_peer *peer;
5418 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5419 
5420 	peer =  dp_peer_find_hash_find(soc, peer_mac, 0, DP_VDEV_ALL,
5421 				       DP_MOD_ID_CDP);
5422 	if (!peer) {
5423 		dp_peer_err("%pK: Failed to find peer[" QDF_MAC_ADDR_FMT "]",
5424 			    soc, QDF_MAC_ADDR_REF(peer_mac));
5425 		return QDF_STATUS_E_FAILURE;
5426 	}
5427 	peer->state = state;
5428 	peer->authorize = (state == OL_TXRX_PEER_STATE_AUTH) ? 1 : 0;
5429 
5430 	if (peer->txrx_peer)
5431 		peer->txrx_peer->authorize = peer->authorize;
5432 
5433 	dp_peer_info("peer" QDF_MAC_ADDR_FMT "state %d",
5434 		     QDF_MAC_ADDR_REF(peer->mac_addr.raw),
5435 		     peer->state);
5436 
5437 	if (IS_MLO_DP_LINK_PEER(peer) && peer->first_link) {
5438 		peer->mld_peer->state = peer->state;
5439 		peer->mld_peer->txrx_peer->authorize = peer->authorize;
5440 		dp_peer_info("mld peer" QDF_MAC_ADDR_FMT "state %d",
5441 			     QDF_MAC_ADDR_REF(peer->mld_peer->mac_addr.raw),
5442 			     peer->mld_peer->state);
5443 	}
5444 
5445 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
5446 	 * Decrement it here.
5447 	 */
5448 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5449 
5450 	return QDF_STATUS_SUCCESS;
5451 }
5452 #else
5453 QDF_STATUS dp_register_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
5454 			    struct ol_txrx_desc_type *sta_desc)
5455 {
5456 	struct dp_peer *peer;
5457 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5458 
5459 	peer = dp_peer_find_hash_find(soc, sta_desc->peer_addr.bytes,
5460 				      0, DP_VDEV_ALL, DP_MOD_ID_CDP);
5461 
5462 	if (!peer)
5463 		return QDF_STATUS_E_FAULT;
5464 
5465 	qdf_spin_lock_bh(&peer->peer_info_lock);
5466 	peer->state = OL_TXRX_PEER_STATE_CONN;
5467 	qdf_spin_unlock_bh(&peer->peer_info_lock);
5468 
5469 	dp_peer_set_bw(soc, peer->txrx_peer, sta_desc->bw);
5470 
5471 	dp_rx_flush_rx_cached(peer, false);
5472 
5473 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5474 
5475 	return QDF_STATUS_SUCCESS;
5476 }
5477 
5478 QDF_STATUS dp_peer_state_update(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
5479 				enum ol_txrx_peer_state state)
5480 {
5481 	struct dp_peer *peer;
5482 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5483 
5484 	peer =  dp_peer_find_hash_find(soc, peer_mac, 0, DP_VDEV_ALL,
5485 				       DP_MOD_ID_CDP);
5486 	if (!peer) {
5487 		dp_peer_err("%pK: Failed to find peer for: [" QDF_MAC_ADDR_FMT "]",
5488 			    soc, QDF_MAC_ADDR_REF(peer_mac));
5489 		return QDF_STATUS_E_FAILURE;
5490 	}
5491 	peer->state = state;
5492 	peer->authorize = (state == OL_TXRX_PEER_STATE_AUTH) ? 1 : 0;
5493 
5494 	if (peer->txrx_peer)
5495 		peer->txrx_peer->authorize = peer->authorize;
5496 
5497 	dp_info("peer %pK state %d", peer, peer->state);
5498 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
5499 	 * Decrement it here.
5500 	 */
5501 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5502 
5503 	return QDF_STATUS_SUCCESS;
5504 }
5505 #endif
5506 
5507 QDF_STATUS
5508 dp_clear_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
5509 	      struct qdf_mac_addr peer_addr)
5510 {
5511 	struct dp_peer *peer;
5512 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5513 
5514 	peer = dp_peer_find_hash_find(soc, peer_addr.bytes,
5515 				      0, DP_VDEV_ALL, DP_MOD_ID_CDP);
5516 	if (!peer || !peer->valid)
5517 		return QDF_STATUS_E_FAULT;
5518 
5519 	dp_clear_peer_internal(soc, peer);
5520 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5521 	return QDF_STATUS_SUCCESS;
5522 }
5523 
5524 QDF_STATUS dp_get_vdevid(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
5525 			 uint8_t *vdev_id)
5526 {
5527 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5528 	struct dp_peer *peer =
5529 		dp_peer_find_hash_find(soc, peer_mac, 0, DP_VDEV_ALL,
5530 				       DP_MOD_ID_CDP);
5531 
5532 	if (!peer)
5533 		return QDF_STATUS_E_FAILURE;
5534 
5535 	dp_info("peer %pK vdev %pK vdev id %d",
5536 		peer, peer->vdev, peer->vdev->vdev_id);
5537 	*vdev_id = peer->vdev->vdev_id;
5538 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
5539 	 * Decrement it here.
5540 	 */
5541 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5542 
5543 	return QDF_STATUS_SUCCESS;
5544 }
5545 
5546 struct cdp_vdev *
5547 dp_get_vdev_by_peer_addr(struct cdp_pdev *pdev_handle,
5548 			 struct qdf_mac_addr peer_addr)
5549 {
5550 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5551 	struct dp_peer *peer = NULL;
5552 	struct cdp_vdev *vdev = NULL;
5553 
5554 	if (!pdev) {
5555 		dp_peer_info("PDEV not found for peer_addr: " QDF_MAC_ADDR_FMT,
5556 			     QDF_MAC_ADDR_REF(peer_addr.bytes));
5557 		return NULL;
5558 	}
5559 
5560 	peer = dp_peer_find_hash_find(pdev->soc, peer_addr.bytes, 0,
5561 				      DP_VDEV_ALL, DP_MOD_ID_CDP);
5562 	if (!peer) {
5563 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
5564 			  "PDEV not found for peer_addr: "QDF_MAC_ADDR_FMT,
5565 			  QDF_MAC_ADDR_REF(peer_addr.bytes));
5566 		return NULL;
5567 	}
5568 
5569 	vdev = (struct cdp_vdev *)peer->vdev;
5570 
5571 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5572 	return vdev;
5573 }
5574 
5575 /**
5576  * dp_get_vdev_for_peer() - Get virtual interface instance which peer belongs
5577  * @peer - peer instance
5578  *
5579  * Get virtual interface instance which peer belongs
5580  *
5581  * Return: virtual interface instance pointer
5582  *         NULL in case cannot find
5583  */
5584 struct cdp_vdev *dp_get_vdev_for_peer(void *peer_handle)
5585 {
5586 	struct dp_peer *peer = peer_handle;
5587 
5588 	DP_TRACE(DEBUG, "peer %pK vdev %pK", peer, peer->vdev);
5589 	return (struct cdp_vdev *)peer->vdev;
5590 }
5591 
5592 /**
5593  * dp_peer_get_peer_mac_addr() - Get peer mac address
5594  * @peer - peer instance
5595  *
5596  * Get peer mac address
5597  *
5598  * Return: peer mac address pointer
5599  *         NULL in case cannot find
5600  */
5601 uint8_t *dp_peer_get_peer_mac_addr(void *peer_handle)
5602 {
5603 	struct dp_peer *peer = peer_handle;
5604 	uint8_t *mac;
5605 
5606 	mac = peer->mac_addr.raw;
5607 	dp_info("peer %pK mac 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
5608 		peer, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
5609 	return peer->mac_addr.raw;
5610 }
5611 
5612 int dp_get_peer_state(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
5613 		      uint8_t *peer_mac)
5614 {
5615 	enum ol_txrx_peer_state peer_state;
5616 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5617 	struct cdp_peer_info peer_info = { 0 };
5618 	struct dp_peer *peer;
5619 	struct dp_peer *tgt_peer;
5620 
5621 	DP_PEER_INFO_PARAMS_INIT(&peer_info, vdev_id, peer_mac,
5622 				 false, CDP_WILD_PEER_TYPE);
5623 
5624 	peer =  dp_peer_hash_find_wrapper(soc, &peer_info, DP_MOD_ID_CDP);
5625 
5626 	if (!peer)
5627 		return OL_TXRX_PEER_STATE_INVALID;
5628 
5629 	DP_TRACE(DEBUG, "peer %pK stats %d", peer, peer->state);
5630 
5631 	tgt_peer = dp_get_tgt_peer_from_peer(peer);
5632 	peer_state = tgt_peer->state;
5633 
5634 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5635 
5636 	return peer_state;
5637 }
5638 
5639 /**
5640  * dp_local_peer_id_pool_init() - local peer id pool alloc for physical device
5641  * @pdev - data path device instance
5642  *
5643  * local peer id pool alloc for physical device
5644  *
5645  * Return: none
5646  */
5647 void dp_local_peer_id_pool_init(struct dp_pdev *pdev)
5648 {
5649 	int i;
5650 
5651 	/* point the freelist to the first ID */
5652 	pdev->local_peer_ids.freelist = 0;
5653 
5654 	/* link each ID to the next one */
5655 	for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) {
5656 		pdev->local_peer_ids.pool[i] = i + 1;
5657 		pdev->local_peer_ids.map[i] = NULL;
5658 	}
5659 
5660 	/* link the last ID to itself, to mark the end of the list */
5661 	i = OL_TXRX_NUM_LOCAL_PEER_IDS;
5662 	pdev->local_peer_ids.pool[i] = i;
5663 
5664 	qdf_spinlock_create(&pdev->local_peer_ids.lock);
5665 	DP_TRACE(INFO, "Peer pool init");
5666 }
5667 
5668 /**
5669  * dp_local_peer_id_alloc() - allocate local peer id
5670  * @pdev - data path device instance
5671  * @peer - new peer instance
5672  *
5673  * allocate local peer id
5674  *
5675  * Return: none
5676  */
5677 void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer)
5678 {
5679 	int i;
5680 
5681 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
5682 	i = pdev->local_peer_ids.freelist;
5683 	if (pdev->local_peer_ids.pool[i] == i) {
5684 		/* the list is empty, except for the list-end marker */
5685 		peer->local_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
5686 	} else {
5687 		/* take the head ID and advance the freelist */
5688 		peer->local_id = i;
5689 		pdev->local_peer_ids.freelist = pdev->local_peer_ids.pool[i];
5690 		pdev->local_peer_ids.map[i] = peer;
5691 	}
5692 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
5693 	dp_info("peer %pK, local id %d", peer, peer->local_id);
5694 }
5695 
5696 /**
5697  * dp_local_peer_id_free() - remove local peer id
5698  * @pdev - data path device instance
5699  * @peer - peer instance should be removed
5700  *
5701  * remove local peer id
5702  *
5703  * Return: none
5704  */
5705 void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer)
5706 {
5707 	int i = peer->local_id;
5708 	if ((i == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
5709 	    (i >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
5710 		return;
5711 	}
5712 
5713 	/* put this ID on the head of the freelist */
5714 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
5715 	pdev->local_peer_ids.pool[i] = pdev->local_peer_ids.freelist;
5716 	pdev->local_peer_ids.freelist = i;
5717 	pdev->local_peer_ids.map[i] = NULL;
5718 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
5719 }
5720 
5721 bool dp_find_peer_exist_on_vdev(struct cdp_soc_t *soc_hdl,
5722 				uint8_t vdev_id, uint8_t *peer_addr)
5723 {
5724 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5725 	struct dp_peer *peer = NULL;
5726 
5727 	peer = dp_peer_find_hash_find(soc, peer_addr, 0, vdev_id,
5728 				      DP_MOD_ID_CDP);
5729 	if (!peer)
5730 		return false;
5731 
5732 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5733 
5734 	return true;
5735 }
5736 
5737 bool dp_find_peer_exist_on_other_vdev(struct cdp_soc_t *soc_hdl,
5738 				      uint8_t vdev_id, uint8_t *peer_addr,
5739 				      uint16_t max_bssid)
5740 {
5741 	int i;
5742 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5743 	struct dp_peer *peer = NULL;
5744 
5745 	for (i = 0; i < max_bssid; i++) {
5746 		/* Need to check vdevs other than the vdev_id */
5747 		if (vdev_id == i)
5748 			continue;
5749 		peer = dp_peer_find_hash_find(soc, peer_addr, 0, i,
5750 					      DP_MOD_ID_CDP);
5751 		if (peer) {
5752 			dp_err("Duplicate peer "QDF_MAC_ADDR_FMT" already exist on vdev %d",
5753 			       QDF_MAC_ADDR_REF(peer_addr), i);
5754 			dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5755 			return true;
5756 		}
5757 	}
5758 
5759 	return false;
5760 }
5761 
5762 bool dp_find_peer_exist(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
5763 			uint8_t *peer_addr)
5764 {
5765 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5766 	struct dp_peer *peer = NULL;
5767 
5768 	peer = dp_peer_find_hash_find(soc, peer_addr, 0, DP_VDEV_ALL,
5769 				      DP_MOD_ID_CDP);
5770 	if (peer) {
5771 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5772 		return true;
5773 	}
5774 
5775 	return false;
5776 }
5777 
5778 void dp_set_peer_as_tdls_peer(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
5779 			      uint8_t *peer_mac, bool val)
5780 {
5781 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5782 	struct dp_peer *peer = NULL;
5783 
5784 	peer = dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id,
5785 				      DP_MOD_ID_CDP);
5786 	if (!peer) {
5787 		dp_err("Failed to find peer for:" QDF_MAC_ADDR_FMT,
5788 		       QDF_MAC_ADDR_REF(peer_mac));
5789 		return;
5790 	}
5791 
5792 	dp_info("Set tdls flag %d for peer:" QDF_MAC_ADDR_FMT,
5793 		val, QDF_MAC_ADDR_REF(peer_mac));
5794 	peer->is_tdls_peer = val;
5795 
5796 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5797 }
5798 #endif
5799 
5800 #ifdef IPA_OFFLOAD
5801 int dp_peer_get_rxtid_stats_ipa(struct dp_peer *peer,
5802 				dp_rxtid_stats_cmd_cb dp_stats_cmd_cb)
5803 {
5804 	struct dp_soc *soc = peer->vdev->pdev->soc;
5805 	struct hal_reo_cmd_params params;
5806 	int i;
5807 	int stats_cmd_sent_cnt = 0;
5808 	QDF_STATUS status;
5809 	uint16_t peer_id = peer->peer_id;
5810 	unsigned long comb_peer_id_tid;
5811 
5812 	if (!dp_stats_cmd_cb)
5813 		return stats_cmd_sent_cnt;
5814 
5815 	qdf_mem_zero(&params, sizeof(params));
5816 	for (i = 0; i < DP_MAX_TIDS; i++) {
5817 		struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
5818 
5819 		if (rx_tid->hw_qdesc_vaddr_unaligned) {
5820 			params.std.need_status = 1;
5821 			params.std.addr_lo =
5822 				rx_tid->hw_qdesc_paddr & 0xffffffff;
5823 			params.std.addr_hi =
5824 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
5825 			params.u.stats_params.clear = 1;
5826 			comb_peer_id_tid = ((i << DP_PEER_REO_STATS_TID_SHIFT)
5827 					    | peer_id);
5828 			status = dp_reo_send_cmd(soc, CMD_GET_QUEUE_STATS,
5829 						 &params, dp_stats_cmd_cb,
5830 						 (void *)comb_peer_id_tid);
5831 			if (QDF_IS_STATUS_SUCCESS(status))
5832 				stats_cmd_sent_cnt++;
5833 
5834 			/* Flush REO descriptor from HW cache to update stats
5835 			 * in descriptor memory. This is to help debugging
5836 			 */
5837 			qdf_mem_zero(&params, sizeof(params));
5838 			params.std.need_status = 0;
5839 			params.std.addr_lo =
5840 				rx_tid->hw_qdesc_paddr & 0xffffffff;
5841 			params.std.addr_hi =
5842 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
5843 			params.u.fl_cache_params.flush_no_inval = 1;
5844 			dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, &params, NULL,
5845 					NULL);
5846 		}
5847 	}
5848 
5849 	return stats_cmd_sent_cnt;
5850 }
5851 
5852 qdf_export_symbol(dp_peer_get_rxtid_stats_ipa);
5853 
5854 #endif
5855 /**
5856  * dp_peer_rxtid_stats: Retried Rx TID (REO queue) stats from HW
5857  * @peer: DP peer handle
5858  * @dp_stats_cmd_cb: REO command callback function
5859  * @cb_ctxt: Callback context
5860  *
5861  * Return: count of tid stats cmd send succeeded
5862  */
5863 int dp_peer_rxtid_stats(struct dp_peer *peer,
5864 			dp_rxtid_stats_cmd_cb dp_stats_cmd_cb,
5865 			void *cb_ctxt)
5866 {
5867 	struct dp_soc *soc = peer->vdev->pdev->soc;
5868 	struct hal_reo_cmd_params params;
5869 	int i;
5870 	int stats_cmd_sent_cnt = 0;
5871 	QDF_STATUS status;
5872 
5873 	if (!dp_stats_cmd_cb)
5874 		return stats_cmd_sent_cnt;
5875 
5876 	qdf_mem_zero(&params, sizeof(params));
5877 	for (i = 0; i < DP_MAX_TIDS; i++) {
5878 		struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
5879 		if (rx_tid->hw_qdesc_vaddr_unaligned) {
5880 			params.std.need_status = 1;
5881 			params.std.addr_lo =
5882 				rx_tid->hw_qdesc_paddr & 0xffffffff;
5883 			params.std.addr_hi =
5884 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
5885 
5886 			if (cb_ctxt) {
5887 				status = dp_reo_send_cmd(
5888 						soc, CMD_GET_QUEUE_STATS,
5889 						&params, dp_stats_cmd_cb,
5890 						cb_ctxt);
5891 			} else {
5892 				status = dp_reo_send_cmd(
5893 						soc, CMD_GET_QUEUE_STATS,
5894 						&params, dp_stats_cmd_cb,
5895 						rx_tid);
5896 			}
5897 
5898 			if (QDF_IS_STATUS_SUCCESS(status))
5899 				stats_cmd_sent_cnt++;
5900 
5901 			/* Flush REO descriptor from HW cache to update stats
5902 			 * in descriptor memory. This is to help debugging */
5903 			qdf_mem_zero(&params, sizeof(params));
5904 			params.std.need_status = 0;
5905 			params.std.addr_lo =
5906 				rx_tid->hw_qdesc_paddr & 0xffffffff;
5907 			params.std.addr_hi =
5908 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
5909 			params.u.fl_cache_params.flush_no_inval = 1;
5910 			dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, &params, NULL,
5911 				NULL);
5912 		}
5913 	}
5914 
5915 	return stats_cmd_sent_cnt;
5916 }
5917 
5918 QDF_STATUS
5919 dp_set_michael_key(struct cdp_soc_t *soc,
5920 		   uint8_t vdev_id,
5921 		   uint8_t *peer_mac,
5922 		   bool is_unicast, uint32_t *key)
5923 {
5924 	uint8_t sec_index = is_unicast ? 1 : 0;
5925 	struct dp_peer *peer =
5926 			dp_peer_get_tgt_peer_hash_find((struct dp_soc *)soc,
5927 						       peer_mac, 0, vdev_id,
5928 						       DP_MOD_ID_CDP);
5929 
5930 	if (!peer) {
5931 		dp_peer_err("%pK: peer not found ", soc);
5932 		return QDF_STATUS_E_FAILURE;
5933 	}
5934 
5935 	qdf_mem_copy(&peer->txrx_peer->security[sec_index].michael_key[0],
5936 		     key, IEEE80211_WEP_MICLEN);
5937 
5938 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5939 
5940 	return QDF_STATUS_SUCCESS;
5941 }
5942 
5943 
5944 /**
5945  * dp_vdev_bss_peer_ref_n_get: Get bss peer of a vdev
5946  * @soc: DP soc
5947  * @vdev: vdev
5948  * @mod_id: id of module requesting reference
5949  *
5950  * Return: VDEV BSS peer
5951  */
5952 struct dp_peer *dp_vdev_bss_peer_ref_n_get(struct dp_soc *soc,
5953 					   struct dp_vdev *vdev,
5954 					   enum dp_mod_id mod_id)
5955 {
5956 	struct dp_peer *peer = NULL;
5957 
5958 	qdf_spin_lock_bh(&vdev->peer_list_lock);
5959 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
5960 		if (peer->bss_peer)
5961 			break;
5962 	}
5963 
5964 	if (!peer) {
5965 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
5966 		return NULL;
5967 	}
5968 
5969 	if (dp_peer_get_ref(soc, peer, mod_id) == QDF_STATUS_SUCCESS) {
5970 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
5971 		return peer;
5972 	}
5973 
5974 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
5975 	return peer;
5976 }
5977 
5978 /**
5979  * dp_sta_vdev_self_peer_ref_n_get: Get self peer of sta vdev
5980  * @soc: DP soc
5981  * @vdev: vdev
5982  * @mod_id: id of module requesting reference
5983  *
5984  * Return: VDEV self peer
5985  */
5986 struct dp_peer *dp_sta_vdev_self_peer_ref_n_get(struct dp_soc *soc,
5987 						struct dp_vdev *vdev,
5988 						enum dp_mod_id mod_id)
5989 {
5990 	struct dp_peer *peer;
5991 
5992 	if (vdev->opmode != wlan_op_mode_sta)
5993 		return NULL;
5994 
5995 	qdf_spin_lock_bh(&vdev->peer_list_lock);
5996 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
5997 		if (peer->sta_self_peer)
5998 			break;
5999 	}
6000 
6001 	if (!peer) {
6002 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
6003 		return NULL;
6004 	}
6005 
6006 	if (dp_peer_get_ref(soc, peer, mod_id) == QDF_STATUS_SUCCESS) {
6007 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
6008 		return peer;
6009 	}
6010 
6011 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
6012 	return peer;
6013 }
6014 
6015 #ifdef DUMP_REO_QUEUE_INFO_IN_DDR
6016 void dp_dump_rx_reo_queue_info(
6017 	struct dp_soc *soc, void *cb_ctxt, union hal_reo_status *reo_status)
6018 {
6019 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
6020 
6021 	if (!rx_tid)
6022 		return;
6023 
6024 	if (reo_status->fl_cache_status.header.status !=
6025 		HAL_REO_CMD_SUCCESS) {
6026 		dp_err_rl("Rx tid REO HW desc flush failed(%d)",
6027 			  reo_status->rx_queue_status.header.status);
6028 		return;
6029 	}
6030 	qdf_spin_lock_bh(&rx_tid->tid_lock);
6031 	hal_dump_rx_reo_queue_desc(rx_tid->hw_qdesc_vaddr_aligned);
6032 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
6033 }
6034 
6035 void dp_send_cache_flush_for_rx_tid(
6036 	struct dp_soc *soc, struct dp_peer *peer)
6037 {
6038 	int i;
6039 	struct dp_rx_tid *rx_tid;
6040 	struct hal_reo_cmd_params params;
6041 
6042 	if (!peer) {
6043 		dp_err_rl("Peer is NULL");
6044 		return;
6045 	}
6046 
6047 	for (i = 0; i < DP_MAX_TIDS; i++) {
6048 		rx_tid = &peer->rx_tid[i];
6049 		if (!rx_tid)
6050 			continue;
6051 		qdf_spin_lock_bh(&rx_tid->tid_lock);
6052 		if (rx_tid->hw_qdesc_vaddr_aligned) {
6053 			qdf_mem_zero(&params, sizeof(params));
6054 			params.std.need_status = 1;
6055 			params.std.addr_lo =
6056 				rx_tid->hw_qdesc_paddr & 0xffffffff;
6057 			params.std.addr_hi =
6058 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
6059 			params.u.fl_cache_params.flush_no_inval = 0;
6060 			if (QDF_STATUS_SUCCESS !=
6061 				dp_reo_send_cmd(
6062 					soc, CMD_FLUSH_CACHE,
6063 					&params, dp_dump_rx_reo_queue_info,
6064 					(void *)rx_tid)) {
6065 				dp_err_rl("cache flush send failed tid %d",
6066 					  rx_tid->tid);
6067 				qdf_spin_unlock_bh(&rx_tid->tid_lock);
6068 				break;
6069 			}
6070 		}
6071 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
6072 	}
6073 }
6074 
6075 void dp_get_rx_reo_queue_info(
6076 	struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
6077 {
6078 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
6079 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
6080 						     DP_MOD_ID_GENERIC_STATS);
6081 	struct dp_peer *peer = NULL;
6082 
6083 	if (!vdev) {
6084 		dp_err_rl("vdev is null for vdev_id: %u", vdev_id);
6085 		goto failed;
6086 	}
6087 
6088 	peer = dp_vdev_bss_peer_ref_n_get(soc, vdev, DP_MOD_ID_GENERIC_STATS);
6089 
6090 	if (!peer) {
6091 		dp_err_rl("Peer is NULL");
6092 		goto failed;
6093 	}
6094 	dp_send_cache_flush_for_rx_tid(soc, peer);
6095 failed:
6096 	if (peer)
6097 		dp_peer_unref_delete(peer, DP_MOD_ID_GENERIC_STATS);
6098 	if (vdev)
6099 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_GENERIC_STATS);
6100 }
6101 #endif /* DUMP_REO_QUEUE_INFO_IN_DDR */
6102 
6103 void dp_peer_flush_frags(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
6104 			 uint8_t *peer_mac)
6105 {
6106 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
6107 	struct dp_peer *peer = dp_peer_get_tgt_peer_hash_find(soc, peer_mac, 0,
6108 							      vdev_id,
6109 							      DP_MOD_ID_CDP);
6110 	struct dp_txrx_peer *txrx_peer;
6111 	uint8_t tid;
6112 	struct dp_rx_tid_defrag *defrag_rx_tid;
6113 
6114 	if (!peer)
6115 		return;
6116 
6117 	if (!peer->txrx_peer)
6118 		goto fail;
6119 
6120 	dp_info("Flushing fragments for peer " QDF_MAC_ADDR_FMT,
6121 		QDF_MAC_ADDR_REF(peer->mac_addr.raw));
6122 
6123 	txrx_peer = peer->txrx_peer;
6124 
6125 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
6126 		defrag_rx_tid = &txrx_peer->rx_tid[tid];
6127 
6128 		qdf_spin_lock_bh(&defrag_rx_tid->defrag_tid_lock);
6129 		dp_rx_defrag_waitlist_remove(txrx_peer, tid);
6130 		dp_rx_reorder_flush_frag(txrx_peer, tid);
6131 		qdf_spin_unlock_bh(&defrag_rx_tid->defrag_tid_lock);
6132 	}
6133 fail:
6134 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
6135 }
6136 
6137 /*
6138  * dp_peer_find_by_id_valid - check if peer exists for given id
6139  * @soc: core DP soc context
6140  * @peer_id: peer id from peer object can be retrieved
6141  *
6142  * Return: true if peer exists of false otherwise
6143  */
6144 bool dp_peer_find_by_id_valid(struct dp_soc *soc, uint16_t peer_id)
6145 {
6146 	struct dp_peer *peer = dp_peer_get_ref_by_id(soc, peer_id,
6147 						     DP_MOD_ID_HTT);
6148 
6149 	if (peer) {
6150 		/*
6151 		 * Decrement the peer ref which is taken as part of
6152 		 * dp_peer_get_ref_by_id if PEER_LOCK_REF_PROTECT is enabled
6153 		 */
6154 		dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
6155 
6156 		return true;
6157 	}
6158 
6159 	return false;
6160 }
6161 
6162 qdf_export_symbol(dp_peer_find_by_id_valid);
6163