xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_peer.c (revision bbede4e20da707b830f49b874abe82230b2018e0)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <qdf_types.h>
21 #include <qdf_lock.h>
22 #include <hal_hw_headers.h>
23 #include "dp_htt.h"
24 #include "dp_types.h"
25 #include "dp_internal.h"
26 #include "dp_peer.h"
27 #include "dp_rx_defrag.h"
28 #include "dp_rx.h"
29 #include <hal_api.h>
30 #include <hal_reo.h>
31 #include <cdp_txrx_handle.h>
32 #include <wlan_cfg.h>
33 #ifdef WIFI_MONITOR_SUPPORT
34 #include <dp_mon.h>
35 #endif
36 #ifdef FEATURE_WDS
37 #include "dp_txrx_wds.h"
38 #endif
39 #include <qdf_module.h>
40 #ifdef QCA_PEER_EXT_STATS
41 #include "dp_hist.h"
42 #endif
43 #ifdef BYPASS_OL_OPS
44 #include <target_if_dp.h>
45 #endif
46 
47 #ifdef REO_QDESC_HISTORY
48 #define REO_QDESC_HISTORY_SIZE 512
49 uint64_t reo_qdesc_history_idx;
50 struct reo_qdesc_event reo_qdesc_history[REO_QDESC_HISTORY_SIZE];
51 #endif
52 
53 #ifdef FEATURE_AST
54 #ifdef BYPASS_OL_OPS
55 /**
56  * dp_add_wds_entry_wrapper() - Add new AST entry for the wds station
57  * @soc: DP soc structure pointer
58  * @peer: dp peer structure
59  * @dest_macaddr: MAC address of ast node
60  * @flags: wds or hmwds
61  * @type: type from enum cdp_txrx_ast_entry_type
62  *
63  * This API is used by WDS source port learning function to
64  * add a new AST entry in the fw.
65  *
66  * Return: 0 on success, error code otherwise.
67  */
68 static int dp_add_wds_entry_wrapper(struct dp_soc *soc,
69 				    struct dp_peer *peer,
70 				    const uint8_t *dest_macaddr,
71 				    uint32_t flags,
72 				    uint8_t type)
73 {
74 	QDF_STATUS status;
75 
76 	status = target_if_add_wds_entry(soc->ctrl_psoc,
77 					 peer->vdev->vdev_id,
78 					 peer->mac_addr.raw,
79 					 dest_macaddr,
80 					 WMI_HOST_WDS_FLAG_STATIC,
81 					 type);
82 
83 	return qdf_status_to_os_return(status);
84 }
85 
86 /**
87  * dp_update_wds_entry_wrapper() - update an existing wds entry with new peer
88  * @soc: DP soc structure pointer
89  * @peer: dp peer structure
90  * @dest_macaddr: MAC address of ast node
91  * @flags: wds or hmwds
92  *
93  * This API is used by update the peer mac address for the ast
94  * in the fw.
95  *
96  * Return: 0 on success, error code otherwise.
97  */
98 static int dp_update_wds_entry_wrapper(struct dp_soc *soc,
99 				       struct dp_peer *peer,
100 				       uint8_t *dest_macaddr,
101 				       uint32_t flags)
102 {
103 	QDF_STATUS status;
104 
105 	status = target_if_update_wds_entry(soc->ctrl_psoc,
106 					    peer->vdev->vdev_id,
107 					    dest_macaddr,
108 					    peer->mac_addr.raw,
109 					    WMI_HOST_WDS_FLAG_STATIC);
110 
111 	return qdf_status_to_os_return(status);
112 }
113 
114 /**
115  * dp_del_wds_entry_wrapper() - delete a WSD AST entry
116  * @soc: DP soc structure pointer
117  * @vdev_id: vdev_id
118  * @wds_macaddr: MAC address of ast node
119  * @type: type from enum cdp_txrx_ast_entry_type
120  * @delete_in_fw: Flag to indicate if entry needs to be deleted in fw
121  *
122  * This API is used to delete an AST entry from fw
123  *
124  * Return: None
125  */
126 static void dp_del_wds_entry_wrapper(struct dp_soc *soc,
127 				     uint8_t vdev_id,
128 				     uint8_t *wds_macaddr,
129 				     uint8_t type,
130 				     uint8_t delete_in_fw)
131 {
132 	target_if_del_wds_entry(soc->ctrl_psoc, vdev_id,
133 				wds_macaddr, type, delete_in_fw);
134 }
135 #else
136 static int dp_add_wds_entry_wrapper(struct dp_soc *soc,
137 				    struct dp_peer *peer,
138 				    const uint8_t *dest_macaddr,
139 				    uint32_t flags,
140 				    uint8_t type)
141 {
142 	int status;
143 
144 	status = soc->cdp_soc.ol_ops->peer_add_wds_entry(
145 					soc->ctrl_psoc,
146 					peer->vdev->vdev_id,
147 					peer->mac_addr.raw,
148 					peer->peer_id,
149 					dest_macaddr,
150 					peer->mac_addr.raw,
151 					flags,
152 					type);
153 
154 	return status;
155 }
156 
157 static int dp_update_wds_entry_wrapper(struct dp_soc *soc,
158 				       struct dp_peer *peer,
159 				       uint8_t *dest_macaddr,
160 				       uint32_t flags)
161 {
162 	int status;
163 
164 	status = soc->cdp_soc.ol_ops->peer_update_wds_entry(
165 				soc->ctrl_psoc,
166 				peer->vdev->vdev_id,
167 				dest_macaddr,
168 				peer->mac_addr.raw,
169 				flags);
170 
171 	return status;
172 }
173 
174 static void dp_del_wds_entry_wrapper(struct dp_soc *soc,
175 				     uint8_t vdev_id,
176 				     uint8_t *wds_macaddr,
177 				     uint8_t type,
178 				     uint8_t delete_in_fw)
179 {
180 	soc->cdp_soc.ol_ops->peer_del_wds_entry(soc->ctrl_psoc,
181 						vdev_id,
182 						wds_macaddr,
183 						type,
184 						delete_in_fw);
185 }
186 #endif
187 #endif
188 
189 #ifdef FEATURE_WDS
190 static inline bool
191 dp_peer_ast_free_in_unmap_supported(struct dp_soc *soc,
192 				    struct dp_ast_entry *ast_entry)
193 {
194 	/* if peer map v2 is enabled we are not freeing ast entry
195 	 * here and it is supposed to be freed in unmap event (after
196 	 * we receive delete confirmation from target)
197 	 *
198 	 * if peer_id is invalid we did not get the peer map event
199 	 * for the peer free ast entry from here only in this case
200 	 */
201 
202 	if ((ast_entry->type != CDP_TXRX_AST_TYPE_WDS_HM_SEC) &&
203 	    (ast_entry->type != CDP_TXRX_AST_TYPE_SELF))
204 		return true;
205 
206 	return false;
207 }
208 #else
209 static inline bool
210 dp_peer_ast_free_in_unmap_supported(struct dp_soc *soc,
211 				    struct dp_ast_entry *ast_entry)
212 {
213 	return false;
214 }
215 
216 void dp_soc_wds_attach(struct dp_soc *soc)
217 {
218 }
219 
220 void dp_soc_wds_detach(struct dp_soc *soc)
221 {
222 }
223 #endif
224 
225 #ifdef QCA_SUPPORT_WDS_EXTENDED
226 bool dp_peer_check_wds_ext_peer(struct dp_peer *peer)
227 {
228 	struct dp_vdev *vdev = peer->vdev;
229 	struct dp_txrx_peer *txrx_peer;
230 
231 	if (!vdev->wds_ext_enabled)
232 		return false;
233 
234 	txrx_peer = dp_get_txrx_peer(peer);
235 	if (!txrx_peer)
236 		return false;
237 
238 	if (qdf_atomic_test_bit(WDS_EXT_PEER_INIT_BIT,
239 				&txrx_peer->wds_ext.init))
240 		return true;
241 
242 	return false;
243 }
244 #else
245 bool dp_peer_check_wds_ext_peer(struct dp_peer *peer)
246 {
247 	return false;
248 }
249 #endif
250 
251 #ifdef REO_QDESC_HISTORY
252 static inline void
253 dp_rx_reo_qdesc_history_add(struct reo_desc_list_node *free_desc,
254 			    enum reo_qdesc_event_type type)
255 {
256 	struct reo_qdesc_event *evt;
257 	struct dp_rx_tid *rx_tid = &free_desc->rx_tid;
258 	uint32_t idx;
259 
260 	reo_qdesc_history_idx++;
261 	idx = (reo_qdesc_history_idx & (REO_QDESC_HISTORY_SIZE - 1));
262 
263 	evt = &reo_qdesc_history[idx];
264 
265 	qdf_mem_copy(evt->peer_mac, free_desc->peer_mac, QDF_MAC_ADDR_SIZE);
266 	evt->qdesc_addr = rx_tid->hw_qdesc_paddr;
267 	evt->ts = qdf_get_log_timestamp();
268 	evt->type = type;
269 }
270 
271 #ifdef WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY
272 static inline void
273 dp_rx_reo_qdesc_deferred_evt_add(struct reo_desc_deferred_freelist_node *desc,
274 				 enum reo_qdesc_event_type type)
275 {
276 	struct reo_qdesc_event *evt;
277 	uint32_t idx;
278 
279 	reo_qdesc_history_idx++;
280 	idx = (reo_qdesc_history_idx & (REO_QDESC_HISTORY_SIZE - 1));
281 
282 	evt = &reo_qdesc_history[idx];
283 
284 	qdf_mem_copy(evt->peer_mac, desc->peer_mac, QDF_MAC_ADDR_SIZE);
285 	evt->qdesc_addr = desc->hw_qdesc_paddr;
286 	evt->ts = qdf_get_log_timestamp();
287 	evt->type = type;
288 }
289 
290 #define DP_RX_REO_QDESC_DEFERRED_FREE_EVT(desc) \
291 	dp_rx_reo_qdesc_deferred_evt_add((desc), REO_QDESC_FREE)
292 
293 #define DP_RX_REO_QDESC_DEFERRED_GET_MAC(desc, freedesc) \
294 	qdf_mem_copy(desc->peer_mac, freedesc->peer_mac, QDF_MAC_ADDR_SIZE)
295 #endif /* WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY */
296 
297 #define DP_RX_REO_QDESC_GET_MAC(freedesc, peer) \
298 	qdf_mem_copy(freedesc->peer_mac, peer->mac_addr.raw, QDF_MAC_ADDR_SIZE)
299 
300 #define DP_RX_REO_QDESC_UPDATE_EVT(free_desc) \
301 	dp_rx_reo_qdesc_history_add((free_desc), REO_QDESC_UPDATE_CB)
302 
303 #define DP_RX_REO_QDESC_FREE_EVT(free_desc) \
304 	dp_rx_reo_qdesc_history_add((free_desc), REO_QDESC_FREE)
305 
306 #else
307 #define DP_RX_REO_QDESC_GET_MAC(freedesc, peer)
308 
309 #define DP_RX_REO_QDESC_UPDATE_EVT(free_desc)
310 
311 #define DP_RX_REO_QDESC_FREE_EVT(free_desc)
312 
313 #define DP_RX_REO_QDESC_DEFERRED_FREE_EVT(desc)
314 
315 #define DP_RX_REO_QDESC_DEFERRED_GET_MAC(desc, freedesc)
316 #endif
317 
318 static inline void
319 dp_set_ssn_valid_flag(struct hal_reo_cmd_params *params,
320 					uint8_t valid)
321 {
322 	params->u.upd_queue_params.update_svld = 1;
323 	params->u.upd_queue_params.svld = valid;
324 	dp_peer_debug("Setting SSN valid bit to %d",
325 		      valid);
326 }
327 
328 QDF_STATUS dp_peer_ast_table_attach(struct dp_soc *soc)
329 {
330 	uint32_t max_ast_index;
331 
332 	max_ast_index = wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx);
333 	/* allocate ast_table for ast entry to ast_index map */
334 	dp_peer_info("\n%pK:<=== cfg max ast idx %d ====>", soc, max_ast_index);
335 	soc->ast_table = qdf_mem_malloc(max_ast_index *
336 					sizeof(struct dp_ast_entry *));
337 	if (!soc->ast_table) {
338 		dp_peer_err("%pK: ast_table memory allocation failed", soc);
339 		return QDF_STATUS_E_NOMEM;
340 	}
341 	return QDF_STATUS_SUCCESS; /* success */
342 }
343 
344 /**
345  * dp_peer_find_map_attach() - allocate memory for peer_id_to_obj_map
346  * @soc: soc handle
347  *
348  * return: QDF_STATUS
349  */
350 static QDF_STATUS dp_peer_find_map_attach(struct dp_soc *soc)
351 {
352 	uint32_t max_peers, peer_map_size;
353 
354 	max_peers = soc->max_peer_id;
355 	/* allocate the peer ID -> peer object map */
356 	dp_peer_info("\n%pK:<=== cfg max peer id %d ====>", soc, max_peers);
357 	peer_map_size = max_peers * sizeof(soc->peer_id_to_obj_map[0]);
358 	soc->peer_id_to_obj_map = qdf_mem_malloc(peer_map_size);
359 	if (!soc->peer_id_to_obj_map) {
360 		dp_peer_err("%pK: peer map memory allocation failed", soc);
361 		return QDF_STATUS_E_NOMEM;
362 	}
363 
364 	/*
365 	 * The peer_id_to_obj_map doesn't really need to be initialized,
366 	 * since elements are only used after they have been individually
367 	 * initialized.
368 	 * However, it is convenient for debugging to have all elements
369 	 * that are not in use set to 0.
370 	 */
371 	qdf_mem_zero(soc->peer_id_to_obj_map, peer_map_size);
372 
373 	qdf_spinlock_create(&soc->peer_map_lock);
374 	return QDF_STATUS_SUCCESS; /* success */
375 }
376 
377 #define DP_AST_HASH_LOAD_MULT  2
378 #define DP_AST_HASH_LOAD_SHIFT 0
379 
380 static inline uint32_t
381 dp_peer_find_hash_index(struct dp_soc *soc,
382 			union dp_align_mac_addr *mac_addr)
383 {
384 	uint32_t index;
385 
386 	index =
387 		mac_addr->align2.bytes_ab ^
388 		mac_addr->align2.bytes_cd ^
389 		mac_addr->align2.bytes_ef;
390 
391 	index ^= index >> soc->peer_hash.idx_bits;
392 	index &= soc->peer_hash.mask;
393 	return index;
394 }
395 
396 struct dp_peer *dp_peer_find_hash_find(
397 				struct dp_soc *soc, uint8_t *peer_mac_addr,
398 				int mac_addr_is_aligned, uint8_t vdev_id,
399 				enum dp_mod_id mod_id)
400 {
401 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
402 	uint32_t index;
403 	struct dp_peer *peer;
404 
405 	if (!soc->peer_hash.bins)
406 		return NULL;
407 
408 	if (mac_addr_is_aligned) {
409 		mac_addr = (union dp_align_mac_addr *)peer_mac_addr;
410 	} else {
411 		qdf_mem_copy(
412 			&local_mac_addr_aligned.raw[0],
413 			peer_mac_addr, QDF_MAC_ADDR_SIZE);
414 		mac_addr = &local_mac_addr_aligned;
415 	}
416 	index = dp_peer_find_hash_index(soc, mac_addr);
417 	qdf_spin_lock_bh(&soc->peer_hash_lock);
418 	TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
419 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
420 		    ((peer->vdev->vdev_id == vdev_id) ||
421 		     (vdev_id == DP_VDEV_ALL))) {
422 			/* take peer reference before returning */
423 			if (dp_peer_get_ref(soc, peer, mod_id) !=
424 						QDF_STATUS_SUCCESS)
425 				peer = NULL;
426 
427 			qdf_spin_unlock_bh(&soc->peer_hash_lock);
428 			return peer;
429 		}
430 	}
431 	qdf_spin_unlock_bh(&soc->peer_hash_lock);
432 	return NULL; /* failure */
433 }
434 
435 qdf_export_symbol(dp_peer_find_hash_find);
436 
437 #ifdef WLAN_FEATURE_11BE_MLO
438 /**
439  * dp_peer_find_hash_detach() - cleanup memory for peer_hash table
440  * @soc: soc handle
441  *
442  * return: none
443  */
444 static void dp_peer_find_hash_detach(struct dp_soc *soc)
445 {
446 	if (soc->peer_hash.bins) {
447 		qdf_mem_free(soc->peer_hash.bins);
448 		soc->peer_hash.bins = NULL;
449 		qdf_spinlock_destroy(&soc->peer_hash_lock);
450 	}
451 
452 	if (soc->arch_ops.mlo_peer_find_hash_detach)
453 		soc->arch_ops.mlo_peer_find_hash_detach(soc);
454 }
455 
456 /**
457  * dp_peer_find_hash_attach() - allocate memory for peer_hash table
458  * @soc: soc handle
459  *
460  * return: QDF_STATUS
461  */
462 static QDF_STATUS dp_peer_find_hash_attach(struct dp_soc *soc)
463 {
464 	int i, hash_elems, log2;
465 
466 	/* allocate the peer MAC address -> peer object hash table */
467 	hash_elems = soc->max_peers;
468 	hash_elems *= DP_PEER_HASH_LOAD_MULT;
469 	hash_elems >>= DP_PEER_HASH_LOAD_SHIFT;
470 	log2 = dp_log2_ceil(hash_elems);
471 	hash_elems = 1 << log2;
472 
473 	soc->peer_hash.mask = hash_elems - 1;
474 	soc->peer_hash.idx_bits = log2;
475 	/* allocate an array of TAILQ peer object lists */
476 	soc->peer_hash.bins = qdf_mem_malloc(
477 		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q, dp_peer)));
478 	if (!soc->peer_hash.bins)
479 		return QDF_STATUS_E_NOMEM;
480 
481 	for (i = 0; i < hash_elems; i++)
482 		TAILQ_INIT(&soc->peer_hash.bins[i]);
483 
484 	qdf_spinlock_create(&soc->peer_hash_lock);
485 
486 	if (soc->arch_ops.mlo_peer_find_hash_attach &&
487 	    (soc->arch_ops.mlo_peer_find_hash_attach(soc) !=
488 			QDF_STATUS_SUCCESS)) {
489 		dp_peer_find_hash_detach(soc);
490 		return QDF_STATUS_E_NOMEM;
491 	}
492 	return QDF_STATUS_SUCCESS;
493 }
494 
495 void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer)
496 {
497 	unsigned index;
498 
499 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
500 	if (peer->peer_type == CDP_LINK_PEER_TYPE) {
501 		qdf_spin_lock_bh(&soc->peer_hash_lock);
502 
503 		if (QDF_IS_STATUS_ERROR(dp_peer_get_ref(soc, peer,
504 							DP_MOD_ID_CONFIG))) {
505 			dp_err("fail to get peer ref:" QDF_MAC_ADDR_FMT,
506 			       QDF_MAC_ADDR_REF(peer->mac_addr.raw));
507 			qdf_spin_unlock_bh(&soc->peer_hash_lock);
508 			return;
509 		}
510 
511 		/*
512 		 * It is important to add the new peer at the tail of
513 		 * peer list with the bin index. Together with having
514 		 * the hash_find function search from head to tail,
515 		 * this ensures that if two entries with the same MAC address
516 		 * are stored, the one added first will be found first.
517 		 */
518 		TAILQ_INSERT_TAIL(&soc->peer_hash.bins[index], peer,
519 				  hash_list_elem);
520 
521 		qdf_spin_unlock_bh(&soc->peer_hash_lock);
522 	} else if (peer->peer_type == CDP_MLD_PEER_TYPE) {
523 		if (soc->arch_ops.mlo_peer_find_hash_add)
524 			soc->arch_ops.mlo_peer_find_hash_add(soc, peer);
525 	} else {
526 		dp_err("unknown peer type %d", peer->peer_type);
527 	}
528 }
529 
530 void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer)
531 {
532 	unsigned index;
533 	struct dp_peer *tmppeer = NULL;
534 	int found = 0;
535 
536 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
537 
538 	if (peer->peer_type == CDP_LINK_PEER_TYPE) {
539 		/* Check if tail is not empty before delete*/
540 		QDF_ASSERT(!TAILQ_EMPTY(&soc->peer_hash.bins[index]));
541 
542 		qdf_spin_lock_bh(&soc->peer_hash_lock);
543 		TAILQ_FOREACH(tmppeer, &soc->peer_hash.bins[index],
544 			      hash_list_elem) {
545 			if (tmppeer == peer) {
546 				found = 1;
547 				break;
548 			}
549 		}
550 		QDF_ASSERT(found);
551 		TAILQ_REMOVE(&soc->peer_hash.bins[index], peer,
552 			     hash_list_elem);
553 
554 		dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
555 		qdf_spin_unlock_bh(&soc->peer_hash_lock);
556 	} else if (peer->peer_type == CDP_MLD_PEER_TYPE) {
557 		if (soc->arch_ops.mlo_peer_find_hash_remove)
558 			soc->arch_ops.mlo_peer_find_hash_remove(soc, peer);
559 	} else {
560 		dp_err("unknown peer type %d", peer->peer_type);
561 	}
562 }
563 #else
564 static QDF_STATUS dp_peer_find_hash_attach(struct dp_soc *soc)
565 {
566 	int i, hash_elems, log2;
567 
568 	/* allocate the peer MAC address -> peer object hash table */
569 	hash_elems = soc->max_peers;
570 	hash_elems *= DP_PEER_HASH_LOAD_MULT;
571 	hash_elems >>= DP_PEER_HASH_LOAD_SHIFT;
572 	log2 = dp_log2_ceil(hash_elems);
573 	hash_elems = 1 << log2;
574 
575 	soc->peer_hash.mask = hash_elems - 1;
576 	soc->peer_hash.idx_bits = log2;
577 	/* allocate an array of TAILQ peer object lists */
578 	soc->peer_hash.bins = qdf_mem_malloc(
579 		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q, dp_peer)));
580 	if (!soc->peer_hash.bins)
581 		return QDF_STATUS_E_NOMEM;
582 
583 	for (i = 0; i < hash_elems; i++)
584 		TAILQ_INIT(&soc->peer_hash.bins[i]);
585 
586 	qdf_spinlock_create(&soc->peer_hash_lock);
587 	return QDF_STATUS_SUCCESS;
588 }
589 
590 static void dp_peer_find_hash_detach(struct dp_soc *soc)
591 {
592 	if (soc->peer_hash.bins) {
593 		qdf_mem_free(soc->peer_hash.bins);
594 		soc->peer_hash.bins = NULL;
595 		qdf_spinlock_destroy(&soc->peer_hash_lock);
596 	}
597 }
598 
599 void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer)
600 {
601 	unsigned index;
602 
603 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
604 	qdf_spin_lock_bh(&soc->peer_hash_lock);
605 
606 	if (QDF_IS_STATUS_ERROR(dp_peer_get_ref(soc, peer, DP_MOD_ID_CONFIG))) {
607 		dp_err("unable to get peer ref at MAP mac: "QDF_MAC_ADDR_FMT,
608 		       QDF_MAC_ADDR_REF(peer->mac_addr.raw));
609 		qdf_spin_unlock_bh(&soc->peer_hash_lock);
610 		return;
611 	}
612 
613 	/*
614 	 * It is important to add the new peer at the tail of the peer list
615 	 * with the bin index.  Together with having the hash_find function
616 	 * search from head to tail, this ensures that if two entries with
617 	 * the same MAC address are stored, the one added first will be
618 	 * found first.
619 	 */
620 	TAILQ_INSERT_TAIL(&soc->peer_hash.bins[index], peer, hash_list_elem);
621 
622 	qdf_spin_unlock_bh(&soc->peer_hash_lock);
623 }
624 
625 void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer)
626 {
627 	unsigned index;
628 	struct dp_peer *tmppeer = NULL;
629 	int found = 0;
630 
631 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
632 	/* Check if tail is not empty before delete*/
633 	QDF_ASSERT(!TAILQ_EMPTY(&soc->peer_hash.bins[index]));
634 
635 	qdf_spin_lock_bh(&soc->peer_hash_lock);
636 	TAILQ_FOREACH(tmppeer, &soc->peer_hash.bins[index], hash_list_elem) {
637 		if (tmppeer == peer) {
638 			found = 1;
639 			break;
640 		}
641 	}
642 	QDF_ASSERT(found);
643 	TAILQ_REMOVE(&soc->peer_hash.bins[index], peer, hash_list_elem);
644 
645 	dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
646 	qdf_spin_unlock_bh(&soc->peer_hash_lock);
647 }
648 
649 
650 #endif/* WLAN_FEATURE_11BE_MLO */
651 
652 void dp_peer_vdev_list_add(struct dp_soc *soc, struct dp_vdev *vdev,
653 			   struct dp_peer *peer)
654 {
655 	/* only link peer will be added to vdev peer list */
656 	if (IS_MLO_DP_MLD_PEER(peer))
657 		return;
658 
659 	qdf_spin_lock_bh(&vdev->peer_list_lock);
660 	if (QDF_IS_STATUS_ERROR(dp_peer_get_ref(soc, peer, DP_MOD_ID_CONFIG))) {
661 		dp_err("unable to get peer ref at MAP mac: "QDF_MAC_ADDR_FMT,
662 		       QDF_MAC_ADDR_REF(peer->mac_addr.raw));
663 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
664 		return;
665 	}
666 
667 	/* add this peer into the vdev's list */
668 	if (wlan_op_mode_sta == vdev->opmode)
669 		TAILQ_INSERT_HEAD(&vdev->peer_list, peer, peer_list_elem);
670 	else
671 		TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
672 
673 	vdev->num_peers++;
674 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
675 }
676 
677 void dp_peer_vdev_list_remove(struct dp_soc *soc, struct dp_vdev *vdev,
678 			      struct dp_peer *peer)
679 {
680 	uint8_t found = 0;
681 	struct dp_peer *tmppeer = NULL;
682 
683 	/* only link peer will be added to vdev peer list */
684 	if (IS_MLO_DP_MLD_PEER(peer))
685 		return;
686 
687 	qdf_spin_lock_bh(&vdev->peer_list_lock);
688 	TAILQ_FOREACH(tmppeer, &peer->vdev->peer_list, peer_list_elem) {
689 		if (tmppeer == peer) {
690 			found = 1;
691 			break;
692 		}
693 	}
694 
695 	if (found) {
696 		TAILQ_REMOVE(&peer->vdev->peer_list, peer,
697 			     peer_list_elem);
698 		dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
699 		vdev->num_peers--;
700 	} else {
701 		/*Ignoring the remove operation as peer not found*/
702 		dp_peer_debug("%pK: peer:%pK not found in vdev:%pK peerlist:%pK"
703 			      , soc, peer, vdev, &peer->vdev->peer_list);
704 	}
705 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
706 }
707 
708 void dp_txrx_peer_attach_add(struct dp_soc *soc,
709 			     struct dp_peer *peer,
710 			     struct dp_txrx_peer *txrx_peer)
711 {
712 	qdf_spin_lock_bh(&soc->peer_map_lock);
713 
714 	peer->txrx_peer = txrx_peer;
715 	txrx_peer->bss_peer = peer->bss_peer;
716 
717 	if (peer->peer_id == HTT_INVALID_PEER) {
718 		qdf_spin_unlock_bh(&soc->peer_map_lock);
719 		return;
720 	}
721 
722 	txrx_peer->peer_id = peer->peer_id;
723 
724 	QDF_ASSERT(soc->peer_id_to_obj_map[peer->peer_id]);
725 
726 	qdf_spin_unlock_bh(&soc->peer_map_lock);
727 }
728 
729 void dp_peer_find_id_to_obj_add(struct dp_soc *soc,
730 				struct dp_peer *peer,
731 				uint16_t peer_id)
732 {
733 	QDF_ASSERT(peer_id <= soc->max_peer_id);
734 
735 	qdf_spin_lock_bh(&soc->peer_map_lock);
736 
737 	peer->peer_id = peer_id;
738 
739 	if (QDF_IS_STATUS_ERROR(dp_peer_get_ref(soc, peer, DP_MOD_ID_CONFIG))) {
740 		dp_err("unable to get peer ref at MAP mac: "QDF_MAC_ADDR_FMT" peer_id %u",
741 		       QDF_MAC_ADDR_REF(peer->mac_addr.raw), peer_id);
742 		qdf_spin_unlock_bh(&soc->peer_map_lock);
743 		return;
744 	}
745 
746 	if (!soc->peer_id_to_obj_map[peer_id]) {
747 		soc->peer_id_to_obj_map[peer_id] = peer;
748 		if (peer->txrx_peer)
749 			peer->txrx_peer->peer_id = peer_id;
750 	} else {
751 		/* Peer map event came for peer_id which
752 		 * is already mapped, this is not expected
753 		 */
754 		dp_err("peer %pK(" QDF_MAC_ADDR_FMT ")map failed, id %d mapped to peer %pK",
755 		       peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw), peer_id,
756 		       soc->peer_id_to_obj_map[peer_id]);
757 		dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
758 		qdf_assert_always(0);
759 	}
760 	qdf_spin_unlock_bh(&soc->peer_map_lock);
761 }
762 
763 void dp_peer_find_id_to_obj_remove(struct dp_soc *soc,
764 				   uint16_t peer_id)
765 {
766 	struct dp_peer *peer = NULL;
767 	QDF_ASSERT(peer_id <= soc->max_peer_id);
768 
769 	qdf_spin_lock_bh(&soc->peer_map_lock);
770 	peer = soc->peer_id_to_obj_map[peer_id];
771 	peer->peer_id = HTT_INVALID_PEER;
772 	if (peer->txrx_peer)
773 		peer->txrx_peer->peer_id = HTT_INVALID_PEER;
774 	soc->peer_id_to_obj_map[peer_id] = NULL;
775 	dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
776 	qdf_spin_unlock_bh(&soc->peer_map_lock);
777 }
778 
779 #ifdef FEATURE_MEC
780 QDF_STATUS dp_peer_mec_hash_attach(struct dp_soc *soc)
781 {
782 	int log2, hash_elems, i;
783 
784 	log2 = dp_log2_ceil(DP_PEER_MAX_MEC_IDX);
785 	hash_elems = 1 << log2;
786 
787 	soc->mec_hash.mask = hash_elems - 1;
788 	soc->mec_hash.idx_bits = log2;
789 
790 	dp_peer_info("%pK: max mec index: %d",
791 		     soc, DP_PEER_MAX_MEC_IDX);
792 
793 	/* allocate an array of TAILQ mec object lists */
794 	soc->mec_hash.bins = qdf_mem_malloc(hash_elems *
795 					    sizeof(TAILQ_HEAD(anonymous_tail_q,
796 							      dp_mec_entry)));
797 
798 	if (!soc->mec_hash.bins)
799 		return QDF_STATUS_E_NOMEM;
800 
801 	for (i = 0; i < hash_elems; i++)
802 		TAILQ_INIT(&soc->mec_hash.bins[i]);
803 
804 	return QDF_STATUS_SUCCESS;
805 }
806 
807 /**
808  * dp_peer_mec_hash_index() - Compute the MEC hash from MAC address
809  * @soc: SoC handle
810  * @mac_addr: MAC address
811  *
812  * Return: MEC hash
813  */
814 static inline uint32_t dp_peer_mec_hash_index(struct dp_soc *soc,
815 					      union dp_align_mac_addr *mac_addr)
816 {
817 	uint32_t index;
818 
819 	index =
820 		mac_addr->align2.bytes_ab ^
821 		mac_addr->align2.bytes_cd ^
822 		mac_addr->align2.bytes_ef;
823 	index ^= index >> soc->mec_hash.idx_bits;
824 	index &= soc->mec_hash.mask;
825 	return index;
826 }
827 
828 struct dp_mec_entry *dp_peer_mec_hash_find_by_pdevid(struct dp_soc *soc,
829 						     uint8_t pdev_id,
830 						     uint8_t *mec_mac_addr)
831 {
832 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
833 	uint32_t index;
834 	struct dp_mec_entry *mecentry;
835 
836 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
837 		     mec_mac_addr, QDF_MAC_ADDR_SIZE);
838 	mac_addr = &local_mac_addr_aligned;
839 
840 	index = dp_peer_mec_hash_index(soc, mac_addr);
841 	TAILQ_FOREACH(mecentry, &soc->mec_hash.bins[index], hash_list_elem) {
842 		if ((pdev_id == mecentry->pdev_id) &&
843 		    !dp_peer_find_mac_addr_cmp(mac_addr, &mecentry->mac_addr))
844 			return mecentry;
845 	}
846 
847 	return NULL;
848 }
849 
850 /**
851  * dp_peer_mec_hash_add() - Add MEC entry into hash table
852  * @soc: SoC handle
853  * @mecentry: MEC entry
854  *
855  * This function adds the MEC entry into SoC MEC hash table
856  *
857  * Return: None
858  */
859 static inline void dp_peer_mec_hash_add(struct dp_soc *soc,
860 					struct dp_mec_entry *mecentry)
861 {
862 	uint32_t index;
863 
864 	index = dp_peer_mec_hash_index(soc, &mecentry->mac_addr);
865 	qdf_spin_lock_bh(&soc->mec_lock);
866 	TAILQ_INSERT_TAIL(&soc->mec_hash.bins[index], mecentry, hash_list_elem);
867 	qdf_spin_unlock_bh(&soc->mec_lock);
868 }
869 
870 QDF_STATUS dp_peer_mec_add_entry(struct dp_soc *soc,
871 				 struct dp_vdev *vdev,
872 				 uint8_t *mac_addr)
873 {
874 	struct dp_mec_entry *mecentry = NULL;
875 	struct dp_pdev *pdev = NULL;
876 
877 	if (!vdev) {
878 		dp_peer_err("%pK: Peers vdev is NULL", soc);
879 		return QDF_STATUS_E_INVAL;
880 	}
881 
882 	pdev = vdev->pdev;
883 
884 	if (qdf_unlikely(qdf_atomic_read(&soc->mec_cnt) >=
885 					 DP_PEER_MAX_MEC_ENTRY)) {
886 		dp_peer_warn("%pK: max MEC entry limit reached mac_addr: "
887 			     QDF_MAC_ADDR_FMT, soc, QDF_MAC_ADDR_REF(mac_addr));
888 		return QDF_STATUS_E_NOMEM;
889 	}
890 
891 	qdf_spin_lock_bh(&soc->mec_lock);
892 	mecentry = dp_peer_mec_hash_find_by_pdevid(soc, pdev->pdev_id,
893 						   mac_addr);
894 	if (qdf_likely(mecentry)) {
895 		mecentry->is_active = TRUE;
896 		qdf_spin_unlock_bh(&soc->mec_lock);
897 		return QDF_STATUS_E_ALREADY;
898 	}
899 
900 	qdf_spin_unlock_bh(&soc->mec_lock);
901 
902 	dp_peer_debug("%pK: pdevid: %u vdev: %u type: MEC mac_addr: "
903 		      QDF_MAC_ADDR_FMT,
904 		      soc, pdev->pdev_id, vdev->vdev_id,
905 		      QDF_MAC_ADDR_REF(mac_addr));
906 
907 	mecentry = (struct dp_mec_entry *)
908 			qdf_mem_malloc(sizeof(struct dp_mec_entry));
909 
910 	if (qdf_unlikely(!mecentry)) {
911 		dp_peer_err("%pK: fail to allocate mecentry", soc);
912 		return QDF_STATUS_E_NOMEM;
913 	}
914 
915 	qdf_copy_macaddr((struct qdf_mac_addr *)&mecentry->mac_addr.raw[0],
916 			 (struct qdf_mac_addr *)mac_addr);
917 	mecentry->pdev_id = pdev->pdev_id;
918 	mecentry->vdev_id = vdev->vdev_id;
919 	mecentry->is_active = TRUE;
920 	dp_peer_mec_hash_add(soc, mecentry);
921 
922 	qdf_atomic_inc(&soc->mec_cnt);
923 	DP_STATS_INC(soc, mec.added, 1);
924 
925 	return QDF_STATUS_SUCCESS;
926 }
927 
928 void dp_peer_mec_detach_entry(struct dp_soc *soc, struct dp_mec_entry *mecentry,
929 			      void *ptr)
930 {
931 	uint32_t index = dp_peer_mec_hash_index(soc, &mecentry->mac_addr);
932 
933 	TAILQ_HEAD(, dp_mec_entry) * free_list = ptr;
934 
935 	TAILQ_REMOVE(&soc->mec_hash.bins[index], mecentry,
936 		     hash_list_elem);
937 	TAILQ_INSERT_TAIL(free_list, mecentry, hash_list_elem);
938 }
939 
940 void dp_peer_mec_free_list(struct dp_soc *soc, void *ptr)
941 {
942 	struct dp_mec_entry *mecentry, *mecentry_next;
943 
944 	TAILQ_HEAD(, dp_mec_entry) * free_list = ptr;
945 
946 	TAILQ_FOREACH_SAFE(mecentry, free_list, hash_list_elem,
947 			   mecentry_next) {
948 		dp_peer_debug("%pK: MEC delete for mac_addr " QDF_MAC_ADDR_FMT,
949 			      soc, QDF_MAC_ADDR_REF(&mecentry->mac_addr));
950 		qdf_mem_free(mecentry);
951 		qdf_atomic_dec(&soc->mec_cnt);
952 		DP_STATS_INC(soc, mec.deleted, 1);
953 	}
954 }
955 
956 void dp_peer_mec_hash_detach(struct dp_soc *soc)
957 {
958 	dp_peer_mec_flush_entries(soc);
959 	qdf_mem_free(soc->mec_hash.bins);
960 	soc->mec_hash.bins = NULL;
961 }
962 
963 void dp_peer_mec_spinlock_destroy(struct dp_soc *soc)
964 {
965 	qdf_spinlock_destroy(&soc->mec_lock);
966 }
967 
968 void dp_peer_mec_spinlock_create(struct dp_soc *soc)
969 {
970 	qdf_spinlock_create(&soc->mec_lock);
971 }
972 #else
973 QDF_STATUS dp_peer_mec_hash_attach(struct dp_soc *soc)
974 {
975 	return QDF_STATUS_SUCCESS;
976 }
977 
978 void dp_peer_mec_hash_detach(struct dp_soc *soc)
979 {
980 }
981 #endif
982 
983 #ifdef FEATURE_AST
984 #ifdef WLAN_FEATURE_11BE_MLO
985 /**
986  * dp_peer_exist_on_pdev() - check if peer with mac address exist on pdev
987  *
988  * @soc: Datapath SOC handle
989  * @peer_mac_addr: peer mac address
990  * @mac_addr_is_aligned: is mac address aligned
991  * @pdev: Datapath PDEV handle
992  *
993  * Return: true if peer found else return false
994  */
995 static bool dp_peer_exist_on_pdev(struct dp_soc *soc,
996 				  uint8_t *peer_mac_addr,
997 				  int mac_addr_is_aligned,
998 				  struct dp_pdev *pdev)
999 {
1000 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
1001 	unsigned int index;
1002 	struct dp_peer *peer;
1003 	bool found = false;
1004 
1005 	if (mac_addr_is_aligned) {
1006 		mac_addr = (union dp_align_mac_addr *)peer_mac_addr;
1007 	} else {
1008 		qdf_mem_copy(
1009 			&local_mac_addr_aligned.raw[0],
1010 			peer_mac_addr, QDF_MAC_ADDR_SIZE);
1011 		mac_addr = &local_mac_addr_aligned;
1012 	}
1013 	index = dp_peer_find_hash_index(soc, mac_addr);
1014 	qdf_spin_lock_bh(&soc->peer_hash_lock);
1015 	TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
1016 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
1017 		    (peer->vdev->pdev == pdev)) {
1018 			found = true;
1019 			break;
1020 		}
1021 	}
1022 	qdf_spin_unlock_bh(&soc->peer_hash_lock);
1023 
1024 	if (found)
1025 		return found;
1026 
1027 	peer = dp_mld_peer_find_hash_find(soc, peer_mac_addr,
1028 					  mac_addr_is_aligned, DP_VDEV_ALL,
1029 					  DP_MOD_ID_CDP);
1030 	if (peer) {
1031 		if (peer->vdev->pdev == pdev)
1032 			found = true;
1033 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
1034 	}
1035 
1036 	return found;
1037 }
1038 #else
1039 static bool dp_peer_exist_on_pdev(struct dp_soc *soc,
1040 				  uint8_t *peer_mac_addr,
1041 				  int mac_addr_is_aligned,
1042 				  struct dp_pdev *pdev)
1043 {
1044 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
1045 	unsigned int index;
1046 	struct dp_peer *peer;
1047 	bool found = false;
1048 
1049 	if (mac_addr_is_aligned) {
1050 		mac_addr = (union dp_align_mac_addr *)peer_mac_addr;
1051 	} else {
1052 		qdf_mem_copy(
1053 			&local_mac_addr_aligned.raw[0],
1054 			peer_mac_addr, QDF_MAC_ADDR_SIZE);
1055 		mac_addr = &local_mac_addr_aligned;
1056 	}
1057 	index = dp_peer_find_hash_index(soc, mac_addr);
1058 	qdf_spin_lock_bh(&soc->peer_hash_lock);
1059 	TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
1060 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
1061 		    (peer->vdev->pdev == pdev)) {
1062 			found = true;
1063 			break;
1064 		}
1065 	}
1066 	qdf_spin_unlock_bh(&soc->peer_hash_lock);
1067 	return found;
1068 }
1069 #endif /* WLAN_FEATURE_11BE_MLO */
1070 
1071 QDF_STATUS dp_peer_ast_hash_attach(struct dp_soc *soc)
1072 {
1073 	int i, hash_elems, log2;
1074 	unsigned int max_ast_idx = wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx);
1075 
1076 	hash_elems = ((max_ast_idx * DP_AST_HASH_LOAD_MULT) >>
1077 		DP_AST_HASH_LOAD_SHIFT);
1078 
1079 	log2 = dp_log2_ceil(hash_elems);
1080 	hash_elems = 1 << log2;
1081 
1082 	soc->ast_hash.mask = hash_elems - 1;
1083 	soc->ast_hash.idx_bits = log2;
1084 
1085 	dp_peer_info("%pK: ast hash_elems: %d, max_ast_idx: %d",
1086 		     soc, hash_elems, max_ast_idx);
1087 
1088 	/* allocate an array of TAILQ peer object lists */
1089 	soc->ast_hash.bins = qdf_mem_malloc(
1090 		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q,
1091 				dp_ast_entry)));
1092 
1093 	if (!soc->ast_hash.bins)
1094 		return QDF_STATUS_E_NOMEM;
1095 
1096 	for (i = 0; i < hash_elems; i++)
1097 		TAILQ_INIT(&soc->ast_hash.bins[i]);
1098 
1099 	return QDF_STATUS_SUCCESS;
1100 }
1101 
1102 /**
1103  * dp_peer_ast_cleanup() - cleanup the references
1104  * @soc: SoC handle
1105  * @ast: ast entry
1106  *
1107  * Return: None
1108  */
1109 static inline void dp_peer_ast_cleanup(struct dp_soc *soc,
1110 				       struct dp_ast_entry *ast)
1111 {
1112 	txrx_ast_free_cb cb = ast->callback;
1113 	void *cookie = ast->cookie;
1114 
1115 	dp_peer_debug("mac_addr: " QDF_MAC_ADDR_FMT ", cb: %pK, cookie: %pK",
1116 		      QDF_MAC_ADDR_REF(ast->mac_addr.raw), cb, cookie);
1117 
1118 	/* Call the callbacks to free up the cookie */
1119 	if (cb) {
1120 		ast->callback = NULL;
1121 		ast->cookie = NULL;
1122 		cb(soc->ctrl_psoc,
1123 		   dp_soc_to_cdp_soc(soc),
1124 		   cookie,
1125 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
1126 	}
1127 }
1128 
1129 void dp_peer_ast_hash_detach(struct dp_soc *soc)
1130 {
1131 	unsigned int index;
1132 	struct dp_ast_entry *ast, *ast_next;
1133 
1134 	if (!soc->ast_hash.mask)
1135 		return;
1136 
1137 	if (!soc->ast_hash.bins)
1138 		return;
1139 
1140 	dp_peer_debug("%pK: num_ast_entries: %u", soc, soc->num_ast_entries);
1141 
1142 	qdf_spin_lock_bh(&soc->ast_lock);
1143 	for (index = 0; index <= soc->ast_hash.mask; index++) {
1144 		if (!TAILQ_EMPTY(&soc->ast_hash.bins[index])) {
1145 			TAILQ_FOREACH_SAFE(ast, &soc->ast_hash.bins[index],
1146 					   hash_list_elem, ast_next) {
1147 				TAILQ_REMOVE(&soc->ast_hash.bins[index], ast,
1148 					     hash_list_elem);
1149 				dp_peer_ast_cleanup(soc, ast);
1150 				soc->num_ast_entries--;
1151 				qdf_mem_free(ast);
1152 			}
1153 		}
1154 	}
1155 	qdf_spin_unlock_bh(&soc->ast_lock);
1156 
1157 	qdf_mem_free(soc->ast_hash.bins);
1158 	soc->ast_hash.bins = NULL;
1159 }
1160 
1161 /**
1162  * dp_peer_ast_hash_index() - Compute the AST hash from MAC address
1163  * @soc: SoC handle
1164  * @mac_addr: MAC address
1165  *
1166  * Return: AST hash
1167  */
1168 static inline uint32_t dp_peer_ast_hash_index(struct dp_soc *soc,
1169 					      union dp_align_mac_addr *mac_addr)
1170 {
1171 	uint32_t index;
1172 
1173 	index =
1174 		mac_addr->align2.bytes_ab ^
1175 		mac_addr->align2.bytes_cd ^
1176 		mac_addr->align2.bytes_ef;
1177 	index ^= index >> soc->ast_hash.idx_bits;
1178 	index &= soc->ast_hash.mask;
1179 	return index;
1180 }
1181 
1182 /**
1183  * dp_peer_ast_hash_add() - Add AST entry into hash table
1184  * @soc: SoC handle
1185  * @ase: AST entry
1186  *
1187  * This function adds the AST entry into SoC AST hash table
1188  * It assumes caller has taken the ast lock to protect the access to this table
1189  *
1190  * Return: None
1191  */
1192 static inline void dp_peer_ast_hash_add(struct dp_soc *soc,
1193 					struct dp_ast_entry *ase)
1194 {
1195 	uint32_t index;
1196 
1197 	index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
1198 	TAILQ_INSERT_TAIL(&soc->ast_hash.bins[index], ase, hash_list_elem);
1199 }
1200 
1201 void dp_peer_ast_hash_remove(struct dp_soc *soc,
1202 			     struct dp_ast_entry *ase)
1203 {
1204 	unsigned index;
1205 	struct dp_ast_entry *tmpase;
1206 	int found = 0;
1207 
1208 	if (soc->ast_offload_support && !soc->host_ast_db_enable)
1209 		return;
1210 
1211 	index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
1212 	/* Check if tail is not empty before delete*/
1213 	QDF_ASSERT(!TAILQ_EMPTY(&soc->ast_hash.bins[index]));
1214 
1215 	dp_peer_debug("ID: %u idx: %u mac_addr: " QDF_MAC_ADDR_FMT,
1216 		      ase->peer_id, index, QDF_MAC_ADDR_REF(ase->mac_addr.raw));
1217 
1218 	TAILQ_FOREACH(tmpase, &soc->ast_hash.bins[index], hash_list_elem) {
1219 		if (tmpase == ase) {
1220 			found = 1;
1221 			break;
1222 		}
1223 	}
1224 
1225 	QDF_ASSERT(found);
1226 
1227 	if (found)
1228 		TAILQ_REMOVE(&soc->ast_hash.bins[index], ase, hash_list_elem);
1229 }
1230 
1231 struct dp_ast_entry *dp_peer_ast_hash_find_by_vdevid(struct dp_soc *soc,
1232 						     uint8_t *ast_mac_addr,
1233 						     uint8_t vdev_id)
1234 {
1235 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
1236 	uint32_t index;
1237 	struct dp_ast_entry *ase;
1238 
1239 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
1240 		     ast_mac_addr, QDF_MAC_ADDR_SIZE);
1241 	mac_addr = &local_mac_addr_aligned;
1242 
1243 	index = dp_peer_ast_hash_index(soc, mac_addr);
1244 	TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
1245 		if ((vdev_id == ase->vdev_id) &&
1246 		    !dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr)) {
1247 			return ase;
1248 		}
1249 	}
1250 
1251 	return NULL;
1252 }
1253 
1254 struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
1255 						     uint8_t *ast_mac_addr,
1256 						     uint8_t pdev_id)
1257 {
1258 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
1259 	uint32_t index;
1260 	struct dp_ast_entry *ase;
1261 
1262 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
1263 		     ast_mac_addr, QDF_MAC_ADDR_SIZE);
1264 	mac_addr = &local_mac_addr_aligned;
1265 
1266 	index = dp_peer_ast_hash_index(soc, mac_addr);
1267 	TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
1268 		if ((pdev_id == ase->pdev_id) &&
1269 		    !dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr)) {
1270 			return ase;
1271 		}
1272 	}
1273 
1274 	return NULL;
1275 }
1276 
1277 struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
1278 					       uint8_t *ast_mac_addr)
1279 {
1280 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
1281 	unsigned index;
1282 	struct dp_ast_entry *ase;
1283 
1284 	if (!soc->ast_hash.bins)
1285 		return NULL;
1286 
1287 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
1288 			ast_mac_addr, QDF_MAC_ADDR_SIZE);
1289 	mac_addr = &local_mac_addr_aligned;
1290 
1291 	index = dp_peer_ast_hash_index(soc, mac_addr);
1292 	TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
1293 		if (dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr) == 0) {
1294 			return ase;
1295 		}
1296 	}
1297 
1298 	return NULL;
1299 }
1300 
1301 /**
1302  * dp_peer_host_add_map_ast() - Add ast entry with HW AST Index
1303  * @soc: SoC handle
1304  * @peer_id: peer id from firmware
1305  * @mac_addr: MAC address of ast node
1306  * @hw_peer_id: HW AST Index returned by target in peer map event
1307  * @vdev_id: vdev id for VAP to which the peer belongs to
1308  * @ast_hash: ast hash value in HW
1309  * @is_wds: flag to indicate peer map event for WDS ast entry
1310  *
1311  * Return: QDF_STATUS code
1312  */
1313 static inline
1314 QDF_STATUS dp_peer_host_add_map_ast(struct dp_soc *soc, uint16_t peer_id,
1315 				    uint8_t *mac_addr, uint16_t hw_peer_id,
1316 				    uint8_t vdev_id, uint16_t ast_hash,
1317 				    uint8_t is_wds)
1318 {
1319 	struct dp_vdev *vdev;
1320 	struct dp_ast_entry *ast_entry;
1321 	enum cdp_txrx_ast_entry_type type;
1322 	struct dp_peer *peer;
1323 	struct dp_peer *old_peer;
1324 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1325 
1326 	if (is_wds)
1327 		type = CDP_TXRX_AST_TYPE_WDS;
1328 	else
1329 		type = CDP_TXRX_AST_TYPE_STATIC;
1330 
1331 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_HTT);
1332 	if (!peer) {
1333 		dp_peer_info("Peer not found soc:%pK: peer_id %d, peer_mac " QDF_MAC_ADDR_FMT ", vdev_id %d",
1334 			     soc, peer_id,
1335 			     QDF_MAC_ADDR_REF(mac_addr), vdev_id);
1336 		return QDF_STATUS_E_INVAL;
1337 	}
1338 
1339 	if (!is_wds && IS_MLO_DP_MLD_PEER(peer))
1340 		type = CDP_TXRX_AST_TYPE_MLD;
1341 
1342 	vdev = peer->vdev;
1343 	if (!vdev) {
1344 		dp_peer_err("%pK: Peers vdev is NULL", soc);
1345 		status = QDF_STATUS_E_INVAL;
1346 		goto fail;
1347 	}
1348 
1349 	if (!dp_peer_state_cmp(peer, DP_PEER_STATE_ACTIVE)) {
1350 		if (type != CDP_TXRX_AST_TYPE_STATIC &&
1351 		    type != CDP_TXRX_AST_TYPE_MLD &&
1352 		    type != CDP_TXRX_AST_TYPE_SELF) {
1353 			status = QDF_STATUS_E_BUSY;
1354 			goto fail;
1355 		}
1356 	}
1357 
1358 	dp_peer_debug("%pK: vdev: %u  ast_entry->type: %d peer_mac: " QDF_MAC_ADDR_FMT " peer: %pK mac " QDF_MAC_ADDR_FMT,
1359 		      soc, vdev->vdev_id, type,
1360 		      QDF_MAC_ADDR_REF(peer->mac_addr.raw), peer,
1361 		      QDF_MAC_ADDR_REF(mac_addr));
1362 
1363 	/*
1364 	 * In MLO scenario, there is possibility for same mac address
1365 	 * on both link mac address and MLD mac address.
1366 	 * Duplicate AST map needs to be handled for non-mld type.
1367 	 */
1368 	qdf_spin_lock_bh(&soc->ast_lock);
1369 	ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
1370 	if (ast_entry && type != CDP_TXRX_AST_TYPE_MLD) {
1371 		dp_peer_debug("AST present ID %d vid %d mac " QDF_MAC_ADDR_FMT,
1372 			      hw_peer_id, vdev_id,
1373 			      QDF_MAC_ADDR_REF(mac_addr));
1374 
1375 		old_peer = __dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
1376 						   DP_MOD_ID_AST);
1377 		if (!old_peer) {
1378 			dp_peer_info("Peer not found soc:%pK: peer_id %d, peer_mac " QDF_MAC_ADDR_FMT ", vdev_id %d",
1379 				     soc, ast_entry->peer_id,
1380 				     QDF_MAC_ADDR_REF(mac_addr), vdev_id);
1381 			qdf_spin_unlock_bh(&soc->ast_lock);
1382 			status = QDF_STATUS_E_INVAL;
1383 			goto fail;
1384 		}
1385 
1386 		dp_peer_unlink_ast_entry(soc, ast_entry, old_peer);
1387 		dp_peer_free_ast_entry(soc, ast_entry);
1388 		if (old_peer)
1389 			dp_peer_unref_delete(old_peer, DP_MOD_ID_AST);
1390 	}
1391 
1392 	ast_entry = (struct dp_ast_entry *)
1393 		qdf_mem_malloc(sizeof(struct dp_ast_entry));
1394 	if (!ast_entry) {
1395 		dp_peer_err("%pK: fail to allocate ast_entry", soc);
1396 		qdf_spin_unlock_bh(&soc->ast_lock);
1397 		QDF_ASSERT(0);
1398 		status = QDF_STATUS_E_NOMEM;
1399 		goto fail;
1400 	}
1401 
1402 	qdf_mem_copy(&ast_entry->mac_addr.raw[0], mac_addr, QDF_MAC_ADDR_SIZE);
1403 	ast_entry->pdev_id = vdev->pdev->pdev_id;
1404 	ast_entry->is_mapped = false;
1405 	ast_entry->delete_in_progress = false;
1406 	ast_entry->next_hop = 0;
1407 	ast_entry->vdev_id = vdev->vdev_id;
1408 	ast_entry->type = type;
1409 
1410 	switch (type) {
1411 	case CDP_TXRX_AST_TYPE_STATIC:
1412 		if (peer->vdev->opmode == wlan_op_mode_sta)
1413 			ast_entry->type = CDP_TXRX_AST_TYPE_STA_BSS;
1414 		break;
1415 	case CDP_TXRX_AST_TYPE_WDS:
1416 		ast_entry->next_hop = 1;
1417 		break;
1418 	case CDP_TXRX_AST_TYPE_MLD:
1419 		break;
1420 	default:
1421 		dp_peer_alert("%pK: Incorrect AST entry type", soc);
1422 	}
1423 
1424 	ast_entry->is_active = TRUE;
1425 	DP_STATS_INC(soc, ast.added, 1);
1426 	soc->num_ast_entries++;
1427 	dp_peer_ast_hash_add(soc, ast_entry);
1428 
1429 	ast_entry->ast_idx = hw_peer_id;
1430 	ast_entry->ast_hash_value = ast_hash;
1431 	ast_entry->peer_id = peer_id;
1432 	TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry,
1433 			  ase_list_elem);
1434 
1435 	qdf_spin_unlock_bh(&soc->ast_lock);
1436 fail:
1437 	dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
1438 
1439 	return status;
1440 }
1441 
1442 /**
1443  * dp_peer_map_ast() - Map the ast entry with HW AST Index
1444  * @soc: SoC handle
1445  * @peer: peer to which ast node belongs
1446  * @mac_addr: MAC address of ast node
1447  * @hw_peer_id: HW AST Index returned by target in peer map event
1448  * @vdev_id: vdev id for VAP to which the peer belongs to
1449  * @ast_hash: ast hash value in HW
1450  * @is_wds: flag to indicate peer map event for WDS ast entry
1451  *
1452  * Return: QDF_STATUS code
1453  */
1454 static inline QDF_STATUS dp_peer_map_ast(struct dp_soc *soc,
1455 					 struct dp_peer *peer,
1456 					 uint8_t *mac_addr,
1457 					 uint16_t hw_peer_id,
1458 					 uint8_t vdev_id,
1459 					 uint16_t ast_hash,
1460 					 uint8_t is_wds)
1461 {
1462 	struct dp_ast_entry *ast_entry = NULL;
1463 	enum cdp_txrx_ast_entry_type peer_type = CDP_TXRX_AST_TYPE_STATIC;
1464 	void *cookie = NULL;
1465 	txrx_ast_free_cb cb = NULL;
1466 	QDF_STATUS err = QDF_STATUS_SUCCESS;
1467 
1468 	if (soc->ast_offload_support)
1469 		return QDF_STATUS_SUCCESS;
1470 
1471 	if (!peer) {
1472 		return QDF_STATUS_E_INVAL;
1473 	}
1474 
1475 	dp_peer_err("%pK: peer %pK ID %d vid %d mac " QDF_MAC_ADDR_FMT,
1476 		    soc, peer, hw_peer_id, vdev_id,
1477 		    QDF_MAC_ADDR_REF(mac_addr));
1478 
1479 	qdf_spin_lock_bh(&soc->ast_lock);
1480 
1481 	ast_entry = dp_peer_ast_hash_find_by_vdevid(soc, mac_addr, vdev_id);
1482 
1483 	if (is_wds) {
1484 		/*
1485 		 * In certain cases like Auth attack on a repeater
1486 		 * can result in the number of ast_entries falling
1487 		 * in the same hash bucket to exceed the max_skid
1488 		 * length supported by HW in root AP. In these cases
1489 		 * the FW will return the hw_peer_id (ast_index) as
1490 		 * 0xffff indicating HW could not add the entry in
1491 		 * its table. Host has to delete the entry from its
1492 		 * table in these cases.
1493 		 */
1494 		if (hw_peer_id == HTT_INVALID_PEER) {
1495 			DP_STATS_INC(soc, ast.map_err, 1);
1496 			if (ast_entry) {
1497 				if (ast_entry->is_mapped) {
1498 					soc->ast_table[ast_entry->ast_idx] =
1499 						NULL;
1500 				}
1501 
1502 				cb = ast_entry->callback;
1503 				cookie = ast_entry->cookie;
1504 				peer_type = ast_entry->type;
1505 
1506 				dp_peer_unlink_ast_entry(soc, ast_entry, peer);
1507 				dp_peer_free_ast_entry(soc, ast_entry);
1508 
1509 				qdf_spin_unlock_bh(&soc->ast_lock);
1510 
1511 				if (cb) {
1512 					cb(soc->ctrl_psoc,
1513 					   dp_soc_to_cdp_soc(soc),
1514 					   cookie,
1515 					   CDP_TXRX_AST_DELETED);
1516 				}
1517 			} else {
1518 				qdf_spin_unlock_bh(&soc->ast_lock);
1519 				dp_peer_alert("AST entry not found with peer %pK peer_id %u peer_mac " QDF_MAC_ADDR_FMT " mac_addr " QDF_MAC_ADDR_FMT " vdev_id %u next_hop %u",
1520 					      peer, peer->peer_id,
1521 					      QDF_MAC_ADDR_REF(peer->mac_addr.raw),
1522 					      QDF_MAC_ADDR_REF(mac_addr),
1523 					      vdev_id, is_wds);
1524 			}
1525 			err = QDF_STATUS_E_INVAL;
1526 
1527 			dp_hmwds_ast_add_notify(peer, mac_addr,
1528 						peer_type, err, true);
1529 
1530 			return err;
1531 		}
1532 	}
1533 
1534 	if (ast_entry) {
1535 		ast_entry->ast_idx = hw_peer_id;
1536 		soc->ast_table[hw_peer_id] = ast_entry;
1537 		ast_entry->is_active = TRUE;
1538 		peer_type = ast_entry->type;
1539 		ast_entry->ast_hash_value = ast_hash;
1540 		ast_entry->is_mapped = TRUE;
1541 		qdf_assert_always(ast_entry->peer_id == HTT_INVALID_PEER);
1542 
1543 		ast_entry->peer_id = peer->peer_id;
1544 		TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry,
1545 				  ase_list_elem);
1546 	}
1547 
1548 	if (ast_entry || (peer->vdev && peer->vdev->proxysta_vdev)) {
1549 		if (soc->cdp_soc.ol_ops->peer_map_event) {
1550 			soc->cdp_soc.ol_ops->peer_map_event(
1551 			soc->ctrl_psoc, peer->peer_id,
1552 			hw_peer_id, vdev_id,
1553 			mac_addr, peer_type, ast_hash);
1554 		}
1555 	} else {
1556 		dp_peer_err("%pK: AST entry not found", soc);
1557 		err = QDF_STATUS_E_NOENT;
1558 	}
1559 
1560 	qdf_spin_unlock_bh(&soc->ast_lock);
1561 
1562 	dp_hmwds_ast_add_notify(peer, mac_addr,
1563 				peer_type, err, true);
1564 
1565 	return err;
1566 }
1567 
1568 void dp_peer_free_hmwds_cb(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
1569 			   struct cdp_soc *dp_soc,
1570 			   void *cookie,
1571 			   enum cdp_ast_free_status status)
1572 {
1573 	struct dp_ast_free_cb_params *param =
1574 		(struct dp_ast_free_cb_params *)cookie;
1575 	struct dp_soc *soc = (struct dp_soc *)dp_soc;
1576 	struct dp_peer *peer = NULL;
1577 	QDF_STATUS err = QDF_STATUS_SUCCESS;
1578 
1579 	if (status != CDP_TXRX_AST_DELETED) {
1580 		qdf_mem_free(cookie);
1581 		return;
1582 	}
1583 
1584 	peer = dp_peer_find_hash_find(soc, &param->peer_mac_addr.raw[0],
1585 				      0, param->vdev_id, DP_MOD_ID_AST);
1586 	if (peer) {
1587 		err = dp_peer_add_ast(soc, peer,
1588 				      &param->mac_addr.raw[0],
1589 				      param->type,
1590 				      param->flags);
1591 
1592 		dp_hmwds_ast_add_notify(peer, &param->mac_addr.raw[0],
1593 					param->type, err, false);
1594 
1595 		dp_peer_unref_delete(peer, DP_MOD_ID_AST);
1596 	}
1597 	qdf_mem_free(cookie);
1598 }
1599 
1600 QDF_STATUS dp_peer_add_ast(struct dp_soc *soc,
1601 			   struct dp_peer *peer,
1602 			   uint8_t *mac_addr,
1603 			   enum cdp_txrx_ast_entry_type type,
1604 			   uint32_t flags)
1605 {
1606 	struct dp_ast_entry *ast_entry = NULL;
1607 	struct dp_vdev *vdev = NULL;
1608 	struct dp_pdev *pdev = NULL;
1609 	txrx_ast_free_cb cb = NULL;
1610 	void *cookie = NULL;
1611 	struct dp_peer *vap_bss_peer = NULL;
1612 	bool is_peer_found = false;
1613 	int status = 0;
1614 
1615 	if (soc->ast_offload_support)
1616 		return QDF_STATUS_E_INVAL;
1617 
1618 	vdev = peer->vdev;
1619 	if (!vdev) {
1620 		dp_peer_err("%pK: Peers vdev is NULL", soc);
1621 		QDF_ASSERT(0);
1622 		return QDF_STATUS_E_INVAL;
1623 	}
1624 
1625 	pdev = vdev->pdev;
1626 
1627 	is_peer_found = dp_peer_exist_on_pdev(soc, mac_addr, 0, pdev);
1628 
1629 	qdf_spin_lock_bh(&soc->ast_lock);
1630 
1631 	if (!dp_peer_state_cmp(peer, DP_PEER_STATE_ACTIVE)) {
1632 		if ((type != CDP_TXRX_AST_TYPE_STATIC) &&
1633 		    (type != CDP_TXRX_AST_TYPE_SELF)) {
1634 			qdf_spin_unlock_bh(&soc->ast_lock);
1635 			return QDF_STATUS_E_BUSY;
1636 		}
1637 	}
1638 
1639 	dp_peer_debug("%pK: pdevid: %u vdev: %u  ast_entry->type: %d flags: 0x%x peer_mac: " QDF_MAC_ADDR_FMT " peer: %pK mac " QDF_MAC_ADDR_FMT,
1640 		      soc, pdev->pdev_id, vdev->vdev_id, type, flags,
1641 		      QDF_MAC_ADDR_REF(peer->mac_addr.raw), peer,
1642 		      QDF_MAC_ADDR_REF(mac_addr));
1643 
1644 	/* fw supports only 2 times the max_peers ast entries */
1645 	if (soc->num_ast_entries >=
1646 	    wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx)) {
1647 		qdf_spin_unlock_bh(&soc->ast_lock);
1648 		dp_peer_err("%pK: Max ast entries reached", soc);
1649 		return QDF_STATUS_E_RESOURCES;
1650 	}
1651 
1652 	/* If AST entry already exists , just return from here
1653 	 * ast entry with same mac address can exist on different radios
1654 	 * if ast_override support is enabled use search by pdev in this
1655 	 * case
1656 	 */
1657 	if (soc->ast_override_support) {
1658 		ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, mac_addr,
1659 							    pdev->pdev_id);
1660 		if (ast_entry) {
1661 			qdf_spin_unlock_bh(&soc->ast_lock);
1662 			return QDF_STATUS_E_ALREADY;
1663 		}
1664 
1665 		if (is_peer_found) {
1666 			/* During WDS to static roaming, peer is added
1667 			 * to the list before static AST entry create.
1668 			 * So, allow AST entry for STATIC type
1669 			 * even if peer is present
1670 			 */
1671 			if (type != CDP_TXRX_AST_TYPE_STATIC) {
1672 				qdf_spin_unlock_bh(&soc->ast_lock);
1673 				return QDF_STATUS_E_ALREADY;
1674 			}
1675 		}
1676 	} else {
1677 		/* For HWMWDS_SEC entries can be added for same mac address
1678 		 * do not check for existing entry
1679 		 */
1680 		if (type == CDP_TXRX_AST_TYPE_WDS_HM_SEC)
1681 			goto add_ast_entry;
1682 
1683 		ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
1684 
1685 		if (ast_entry) {
1686 			if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) &&
1687 			    !ast_entry->delete_in_progress) {
1688 				qdf_spin_unlock_bh(&soc->ast_lock);
1689 				return QDF_STATUS_E_ALREADY;
1690 			}
1691 
1692 			/* Add for HMWDS entry we cannot be ignored if there
1693 			 * is AST entry with same mac address
1694 			 *
1695 			 * if ast entry exists with the requested mac address
1696 			 * send a delete command and register callback which
1697 			 * can take care of adding HMWDS ast entry on delete
1698 			 * confirmation from target
1699 			 */
1700 			if (type == CDP_TXRX_AST_TYPE_WDS_HM) {
1701 				struct dp_ast_free_cb_params *param = NULL;
1702 
1703 				if (ast_entry->type ==
1704 					CDP_TXRX_AST_TYPE_WDS_HM_SEC)
1705 					goto add_ast_entry;
1706 
1707 				/* save existing callback */
1708 				if (ast_entry->callback) {
1709 					cb = ast_entry->callback;
1710 					cookie = ast_entry->cookie;
1711 				}
1712 
1713 				param = qdf_mem_malloc(sizeof(*param));
1714 				if (!param) {
1715 					QDF_TRACE(QDF_MODULE_ID_TXRX,
1716 						  QDF_TRACE_LEVEL_ERROR,
1717 						  "Allocation failed");
1718 					qdf_spin_unlock_bh(&soc->ast_lock);
1719 					return QDF_STATUS_E_NOMEM;
1720 				}
1721 
1722 				qdf_mem_copy(&param->mac_addr.raw[0], mac_addr,
1723 					     QDF_MAC_ADDR_SIZE);
1724 				qdf_mem_copy(&param->peer_mac_addr.raw[0],
1725 					     &peer->mac_addr.raw[0],
1726 					     QDF_MAC_ADDR_SIZE);
1727 				param->type = type;
1728 				param->flags = flags;
1729 				param->vdev_id = vdev->vdev_id;
1730 				ast_entry->callback = dp_peer_free_hmwds_cb;
1731 				ast_entry->pdev_id = vdev->pdev->pdev_id;
1732 				ast_entry->type = type;
1733 				ast_entry->cookie = (void *)param;
1734 				if (!ast_entry->delete_in_progress)
1735 					dp_peer_del_ast(soc, ast_entry);
1736 
1737 				qdf_spin_unlock_bh(&soc->ast_lock);
1738 
1739 				/* Call the saved callback*/
1740 				if (cb) {
1741 					cb(soc->ctrl_psoc,
1742 					   dp_soc_to_cdp_soc(soc),
1743 					   cookie,
1744 					   CDP_TXRX_AST_DELETE_IN_PROGRESS);
1745 				}
1746 				return QDF_STATUS_E_AGAIN;
1747 			}
1748 
1749 			qdf_spin_unlock_bh(&soc->ast_lock);
1750 			return QDF_STATUS_E_ALREADY;
1751 		}
1752 	}
1753 
1754 add_ast_entry:
1755 	ast_entry = (struct dp_ast_entry *)
1756 			qdf_mem_malloc(sizeof(struct dp_ast_entry));
1757 
1758 	if (!ast_entry) {
1759 		qdf_spin_unlock_bh(&soc->ast_lock);
1760 		dp_peer_err("%pK: fail to allocate ast_entry", soc);
1761 		QDF_ASSERT(0);
1762 		return QDF_STATUS_E_NOMEM;
1763 	}
1764 
1765 	qdf_mem_copy(&ast_entry->mac_addr.raw[0], mac_addr, QDF_MAC_ADDR_SIZE);
1766 	ast_entry->pdev_id = vdev->pdev->pdev_id;
1767 	ast_entry->is_mapped = false;
1768 	ast_entry->delete_in_progress = false;
1769 	ast_entry->peer_id = HTT_INVALID_PEER;
1770 	ast_entry->next_hop = 0;
1771 	ast_entry->vdev_id = vdev->vdev_id;
1772 
1773 	switch (type) {
1774 	case CDP_TXRX_AST_TYPE_STATIC:
1775 		peer->self_ast_entry = ast_entry;
1776 		ast_entry->type = CDP_TXRX_AST_TYPE_STATIC;
1777 		if (peer->vdev->opmode == wlan_op_mode_sta)
1778 			ast_entry->type = CDP_TXRX_AST_TYPE_STA_BSS;
1779 		break;
1780 	case CDP_TXRX_AST_TYPE_SELF:
1781 		peer->self_ast_entry = ast_entry;
1782 		ast_entry->type = CDP_TXRX_AST_TYPE_SELF;
1783 		break;
1784 	case CDP_TXRX_AST_TYPE_WDS:
1785 		ast_entry->next_hop = 1;
1786 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
1787 		break;
1788 	case CDP_TXRX_AST_TYPE_WDS_HM:
1789 		ast_entry->next_hop = 1;
1790 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM;
1791 		break;
1792 	case CDP_TXRX_AST_TYPE_WDS_HM_SEC:
1793 		ast_entry->next_hop = 1;
1794 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM_SEC;
1795 		ast_entry->peer_id = peer->peer_id;
1796 		TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry,
1797 				  ase_list_elem);
1798 		break;
1799 	case CDP_TXRX_AST_TYPE_DA:
1800 		vap_bss_peer = dp_vdev_bss_peer_ref_n_get(soc, vdev,
1801 							  DP_MOD_ID_AST);
1802 		if (!vap_bss_peer) {
1803 			qdf_spin_unlock_bh(&soc->ast_lock);
1804 			qdf_mem_free(ast_entry);
1805 			return QDF_STATUS_E_FAILURE;
1806 		}
1807 		peer = vap_bss_peer;
1808 		ast_entry->next_hop = 1;
1809 		ast_entry->type = CDP_TXRX_AST_TYPE_DA;
1810 		break;
1811 	default:
1812 		dp_peer_err("%pK: Incorrect AST entry type", soc);
1813 	}
1814 
1815 	ast_entry->is_active = TRUE;
1816 	DP_STATS_INC(soc, ast.added, 1);
1817 	soc->num_ast_entries++;
1818 	dp_peer_ast_hash_add(soc, ast_entry);
1819 
1820 	if ((ast_entry->type != CDP_TXRX_AST_TYPE_STATIC) &&
1821 	    (ast_entry->type != CDP_TXRX_AST_TYPE_SELF) &&
1822 	    (ast_entry->type != CDP_TXRX_AST_TYPE_STA_BSS) &&
1823 	    (ast_entry->type != CDP_TXRX_AST_TYPE_WDS_HM_SEC))
1824 		status = dp_add_wds_entry_wrapper(soc,
1825 						  peer,
1826 						  mac_addr,
1827 						  flags,
1828 						  ast_entry->type);
1829 
1830 	if (vap_bss_peer)
1831 		dp_peer_unref_delete(vap_bss_peer, DP_MOD_ID_AST);
1832 
1833 	qdf_spin_unlock_bh(&soc->ast_lock);
1834 	return qdf_status_from_os_return(status);
1835 }
1836 
1837 qdf_export_symbol(dp_peer_add_ast);
1838 
1839 void dp_peer_free_ast_entry(struct dp_soc *soc,
1840 			    struct dp_ast_entry *ast_entry)
1841 {
1842 	/*
1843 	 * NOTE: Ensure that call to this API is done
1844 	 * after soc->ast_lock is taken
1845 	 */
1846 	dp_peer_debug("type: %d ID: %u vid: %u mac_addr: " QDF_MAC_ADDR_FMT,
1847 		      ast_entry->type, ast_entry->peer_id, ast_entry->vdev_id,
1848 		      QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw));
1849 
1850 	ast_entry->callback = NULL;
1851 	ast_entry->cookie = NULL;
1852 
1853 	DP_STATS_INC(soc, ast.deleted, 1);
1854 	dp_peer_ast_hash_remove(soc, ast_entry);
1855 	dp_peer_ast_cleanup(soc, ast_entry);
1856 	qdf_mem_free(ast_entry);
1857 	soc->num_ast_entries--;
1858 }
1859 
1860 void dp_peer_unlink_ast_entry(struct dp_soc *soc,
1861 			      struct dp_ast_entry *ast_entry,
1862 			      struct dp_peer *peer)
1863 {
1864 	if (!peer) {
1865 		dp_info_rl("NULL peer");
1866 		return;
1867 	}
1868 
1869 	if (ast_entry->peer_id == HTT_INVALID_PEER) {
1870 		dp_info_rl("Invalid peer id in AST entry mac addr:"QDF_MAC_ADDR_FMT" type:%d",
1871 			  QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw),
1872 			  ast_entry->type);
1873 		return;
1874 	}
1875 	/*
1876 	 * NOTE: Ensure that call to this API is done
1877 	 * after soc->ast_lock is taken
1878 	 */
1879 
1880 	qdf_assert_always(ast_entry->peer_id == peer->peer_id);
1881 	TAILQ_REMOVE(&peer->ast_entry_list, ast_entry, ase_list_elem);
1882 
1883 	if (ast_entry == peer->self_ast_entry)
1884 		peer->self_ast_entry = NULL;
1885 
1886 	/*
1887 	 * release the reference only if it is mapped
1888 	 * to ast_table
1889 	 */
1890 	if (ast_entry->is_mapped)
1891 		soc->ast_table[ast_entry->ast_idx] = NULL;
1892 
1893 	ast_entry->peer_id = HTT_INVALID_PEER;
1894 }
1895 
1896 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
1897 {
1898 	struct dp_peer *peer = NULL;
1899 
1900 	if (soc->ast_offload_support)
1901 		return;
1902 
1903 	if (!ast_entry) {
1904 		dp_info_rl("NULL AST entry");
1905 		return;
1906 	}
1907 
1908 	if (ast_entry->delete_in_progress) {
1909 		dp_info_rl("AST entry deletion in progress mac addr:"QDF_MAC_ADDR_FMT" type:%d",
1910 			  QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw),
1911 			  ast_entry->type);
1912 		return;
1913 	}
1914 
1915 	dp_peer_debug("call by %ps: ID: %u vid: %u mac_addr: " QDF_MAC_ADDR_FMT,
1916 		      (void *)_RET_IP_, ast_entry->peer_id, ast_entry->vdev_id,
1917 		      QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw));
1918 
1919 	ast_entry->delete_in_progress = true;
1920 
1921 	/* In teardown del ast is called after setting logical delete state
1922 	 * use __dp_peer_get_ref_by_id to get the reference irrespective of
1923 	 * state
1924 	 */
1925 	peer = __dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
1926 				       DP_MOD_ID_AST);
1927 
1928 	dp_peer_ast_send_wds_del(soc, ast_entry, peer);
1929 
1930 	/* Remove SELF and STATIC entries in teardown itself */
1931 	if (!ast_entry->next_hop)
1932 		dp_peer_unlink_ast_entry(soc, ast_entry, peer);
1933 
1934 	if (ast_entry->is_mapped)
1935 		soc->ast_table[ast_entry->ast_idx] = NULL;
1936 
1937 	/* if peer map v2 is enabled we are not freeing ast entry
1938 	 * here and it is supposed to be freed in unmap event (after
1939 	 * we receive delete confirmation from target)
1940 	 *
1941 	 * if peer_id is invalid we did not get the peer map event
1942 	 * for the peer free ast entry from here only in this case
1943 	 */
1944 	if (dp_peer_ast_free_in_unmap_supported(soc, ast_entry))
1945 		goto end;
1946 
1947 	/* for WDS secondary entry ast_entry->next_hop would be set so
1948 	 * unlinking has to be done explicitly here.
1949 	 * As this entry is not a mapped entry unmap notification from
1950 	 * FW will not come. Hence unlinkling is done right here.
1951 	 */
1952 
1953 	if (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC)
1954 		dp_peer_unlink_ast_entry(soc, ast_entry, peer);
1955 
1956 	dp_peer_free_ast_entry(soc, ast_entry);
1957 
1958 end:
1959 	if (peer)
1960 		dp_peer_unref_delete(peer, DP_MOD_ID_AST);
1961 }
1962 
1963 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
1964 		       struct dp_ast_entry *ast_entry, uint32_t flags)
1965 {
1966 	int ret = -1;
1967 	struct dp_peer *old_peer;
1968 
1969 	if (soc->ast_offload_support)
1970 		return QDF_STATUS_E_INVAL;
1971 
1972 	dp_peer_debug("%pK: ast_entry->type: %d pdevid: %u vdevid: %u flags: 0x%x mac_addr: " QDF_MAC_ADDR_FMT " peer_mac: " QDF_MAC_ADDR_FMT "\n",
1973 		      soc, ast_entry->type, peer->vdev->pdev->pdev_id,
1974 		      peer->vdev->vdev_id, flags,
1975 		      QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw),
1976 		      QDF_MAC_ADDR_REF(peer->mac_addr.raw));
1977 
1978 	/* Do not send AST update in below cases
1979 	 *  1) Ast entry delete has already triggered
1980 	 *  2) Peer delete is already triggered
1981 	 *  3) We did not get the HTT map for create event
1982 	 */
1983 	if (ast_entry->delete_in_progress ||
1984 	    !dp_peer_state_cmp(peer, DP_PEER_STATE_ACTIVE) ||
1985 	    !ast_entry->is_mapped)
1986 		return ret;
1987 
1988 	if ((ast_entry->type == CDP_TXRX_AST_TYPE_STATIC) ||
1989 	    (ast_entry->type == CDP_TXRX_AST_TYPE_SELF) ||
1990 	    (ast_entry->type == CDP_TXRX_AST_TYPE_STA_BSS) ||
1991 	    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
1992 		return 0;
1993 
1994 	/*
1995 	 * Avoids flood of WMI update messages sent to FW for same peer.
1996 	 */
1997 	if (qdf_unlikely(ast_entry->peer_id == peer->peer_id) &&
1998 	    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS) &&
1999 	    (ast_entry->vdev_id == peer->vdev->vdev_id) &&
2000 	    (ast_entry->is_active))
2001 		return 0;
2002 
2003 	old_peer = dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
2004 					 DP_MOD_ID_AST);
2005 	if (!old_peer)
2006 		return 0;
2007 
2008 	TAILQ_REMOVE(&old_peer->ast_entry_list, ast_entry, ase_list_elem);
2009 
2010 	dp_peer_unref_delete(old_peer, DP_MOD_ID_AST);
2011 
2012 	ast_entry->peer_id = peer->peer_id;
2013 	ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
2014 	ast_entry->pdev_id = peer->vdev->pdev->pdev_id;
2015 	ast_entry->vdev_id = peer->vdev->vdev_id;
2016 	ast_entry->is_active = TRUE;
2017 	TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem);
2018 
2019 	ret = dp_update_wds_entry_wrapper(soc,
2020 					  peer,
2021 					  ast_entry->mac_addr.raw,
2022 					  flags);
2023 
2024 	return ret;
2025 }
2026 
2027 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
2028 				struct dp_ast_entry *ast_entry)
2029 {
2030 	return ast_entry->pdev_id;
2031 }
2032 
2033 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
2034 				struct dp_ast_entry *ast_entry)
2035 {
2036 	return ast_entry->next_hop;
2037 }
2038 
2039 void dp_peer_ast_set_type(struct dp_soc *soc,
2040 				struct dp_ast_entry *ast_entry,
2041 				enum cdp_txrx_ast_entry_type type)
2042 {
2043 	ast_entry->type = type;
2044 }
2045 
2046 void dp_peer_ast_send_wds_del(struct dp_soc *soc,
2047 			      struct dp_ast_entry *ast_entry,
2048 			      struct dp_peer *peer)
2049 {
2050 	bool delete_in_fw = false;
2051 
2052 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_TRACE,
2053 		  "%s: ast_entry->type: %d pdevid: %u vdev: %u mac_addr: "QDF_MAC_ADDR_FMT" next_hop: %u peer_id: %uM\n",
2054 		  __func__, ast_entry->type, ast_entry->pdev_id,
2055 		  ast_entry->vdev_id,
2056 		  QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw),
2057 		  ast_entry->next_hop, ast_entry->peer_id);
2058 
2059 	/*
2060 	 * If peer state is logical delete, the peer is about to get
2061 	 * teared down with a peer delete command to firmware,
2062 	 * which will cleanup all the wds ast entries.
2063 	 * So, no need to send explicit wds ast delete to firmware.
2064 	 */
2065 	if (ast_entry->next_hop) {
2066 		if (peer && dp_peer_state_cmp(peer,
2067 					      DP_PEER_STATE_LOGICAL_DELETE))
2068 			delete_in_fw = false;
2069 		else
2070 			delete_in_fw = true;
2071 
2072 		dp_del_wds_entry_wrapper(soc,
2073 					 ast_entry->vdev_id,
2074 					 ast_entry->mac_addr.raw,
2075 					 ast_entry->type,
2076 					 delete_in_fw);
2077 	}
2078 }
2079 #else
2080 void dp_peer_free_ast_entry(struct dp_soc *soc,
2081 			    struct dp_ast_entry *ast_entry)
2082 {
2083 }
2084 
2085 void dp_peer_unlink_ast_entry(struct dp_soc *soc,
2086 			      struct dp_ast_entry *ast_entry,
2087 			      struct dp_peer *peer)
2088 {
2089 }
2090 
2091 void dp_peer_ast_hash_remove(struct dp_soc *soc,
2092 			     struct dp_ast_entry *ase)
2093 {
2094 }
2095 
2096 struct dp_ast_entry *dp_peer_ast_hash_find_by_vdevid(struct dp_soc *soc,
2097 						     uint8_t *ast_mac_addr,
2098 						     uint8_t vdev_id)
2099 {
2100 	return NULL;
2101 }
2102 
2103 QDF_STATUS dp_peer_add_ast(struct dp_soc *soc,
2104 			   struct dp_peer *peer,
2105 			   uint8_t *mac_addr,
2106 			   enum cdp_txrx_ast_entry_type type,
2107 			   uint32_t flags)
2108 {
2109 	return QDF_STATUS_E_FAILURE;
2110 }
2111 
2112 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
2113 {
2114 }
2115 
2116 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
2117 			struct dp_ast_entry *ast_entry, uint32_t flags)
2118 {
2119 	return 1;
2120 }
2121 
2122 struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
2123 					       uint8_t *ast_mac_addr)
2124 {
2125 	return NULL;
2126 }
2127 
2128 static inline
2129 QDF_STATUS dp_peer_host_add_map_ast(struct dp_soc *soc, uint16_t peer_id,
2130 				    uint8_t *mac_addr, uint16_t hw_peer_id,
2131 				    uint8_t vdev_id, uint16_t ast_hash,
2132 				    uint8_t is_wds)
2133 {
2134 	return QDF_STATUS_SUCCESS;
2135 }
2136 
2137 struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
2138 						     uint8_t *ast_mac_addr,
2139 						     uint8_t pdev_id)
2140 {
2141 	return NULL;
2142 }
2143 
2144 QDF_STATUS dp_peer_ast_hash_attach(struct dp_soc *soc)
2145 {
2146 	return QDF_STATUS_SUCCESS;
2147 }
2148 
2149 static inline QDF_STATUS dp_peer_map_ast(struct dp_soc *soc,
2150 					 struct dp_peer *peer,
2151 					 uint8_t *mac_addr,
2152 					 uint16_t hw_peer_id,
2153 					 uint8_t vdev_id,
2154 					 uint16_t ast_hash,
2155 					 uint8_t is_wds)
2156 {
2157 	return QDF_STATUS_SUCCESS;
2158 }
2159 
2160 void dp_peer_ast_hash_detach(struct dp_soc *soc)
2161 {
2162 }
2163 
2164 void dp_peer_ast_set_type(struct dp_soc *soc,
2165 				struct dp_ast_entry *ast_entry,
2166 				enum cdp_txrx_ast_entry_type type)
2167 {
2168 }
2169 
2170 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
2171 				struct dp_ast_entry *ast_entry)
2172 {
2173 	return 0xff;
2174 }
2175 
2176 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
2177 				 struct dp_ast_entry *ast_entry)
2178 {
2179 	return 0xff;
2180 }
2181 
2182 void dp_peer_ast_send_wds_del(struct dp_soc *soc,
2183 			      struct dp_ast_entry *ast_entry,
2184 			      struct dp_peer *peer)
2185 {
2186 }
2187 #endif
2188 
2189 #ifdef WLAN_FEATURE_MULTI_AST_DEL
2190 void dp_peer_ast_send_multi_wds_del(
2191 		struct dp_soc *soc, uint8_t vdev_id,
2192 		struct peer_del_multi_wds_entries *wds_list)
2193 {
2194 	struct cdp_soc_t *cdp_soc = &soc->cdp_soc;
2195 
2196 	if (cdp_soc && cdp_soc->ol_ops &&
2197 	    cdp_soc->ol_ops->peer_del_multi_wds_entry)
2198 		cdp_soc->ol_ops->peer_del_multi_wds_entry(soc->ctrl_psoc,
2199 							  vdev_id, wds_list);
2200 }
2201 #endif
2202 
2203 #ifdef FEATURE_WDS
2204 /**
2205  * dp_peer_ast_free_wds_entries() - Free wds ast entries associated with peer
2206  * @soc: soc handle
2207  * @peer: peer handle
2208  *
2209  * Free all the wds ast entries associated with peer
2210  *
2211  * Return: Number of wds ast entries freed
2212  */
2213 static uint32_t dp_peer_ast_free_wds_entries(struct dp_soc *soc,
2214 					     struct dp_peer *peer)
2215 {
2216 	TAILQ_HEAD(, dp_ast_entry) ast_local_list = {0};
2217 	struct dp_ast_entry *ast_entry, *temp_ast_entry;
2218 	uint32_t num_ast = 0;
2219 
2220 	TAILQ_INIT(&ast_local_list);
2221 	qdf_spin_lock_bh(&soc->ast_lock);
2222 
2223 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry) {
2224 		if (ast_entry->next_hop)
2225 			num_ast++;
2226 
2227 		if (ast_entry->is_mapped)
2228 			soc->ast_table[ast_entry->ast_idx] = NULL;
2229 
2230 		dp_peer_unlink_ast_entry(soc, ast_entry, peer);
2231 		DP_STATS_INC(soc, ast.deleted, 1);
2232 		dp_peer_ast_hash_remove(soc, ast_entry);
2233 		TAILQ_INSERT_TAIL(&ast_local_list, ast_entry,
2234 				  ase_list_elem);
2235 		soc->num_ast_entries--;
2236 	}
2237 
2238 	qdf_spin_unlock_bh(&soc->ast_lock);
2239 
2240 	TAILQ_FOREACH_SAFE(ast_entry, &ast_local_list, ase_list_elem,
2241 			   temp_ast_entry) {
2242 		if (ast_entry->callback)
2243 			ast_entry->callback(soc->ctrl_psoc,
2244 					    dp_soc_to_cdp_soc(soc),
2245 					    ast_entry->cookie,
2246 					    CDP_TXRX_AST_DELETED);
2247 
2248 		qdf_mem_free(ast_entry);
2249 	}
2250 
2251 	return num_ast;
2252 }
2253 /**
2254  * dp_peer_clean_wds_entries() - Clean wds ast entries and compare
2255  * @soc: soc handle
2256  * @peer: peer handle
2257  * @free_wds_count: number of wds entries freed by FW with peer delete
2258  *
2259  * Free all the wds ast entries associated with peer and compare with
2260  * the value received from firmware
2261  *
2262  * Return: Number of wds ast entries freed
2263  */
2264 static void
2265 dp_peer_clean_wds_entries(struct dp_soc *soc, struct dp_peer *peer,
2266 			  uint32_t free_wds_count)
2267 {
2268 	uint32_t wds_deleted = 0;
2269 
2270 	if (soc->ast_offload_support && !soc->host_ast_db_enable)
2271 		return;
2272 
2273 	wds_deleted = dp_peer_ast_free_wds_entries(soc, peer);
2274 	if ((DP_PEER_WDS_COUNT_INVALID != free_wds_count) &&
2275 	    (free_wds_count != wds_deleted)) {
2276 		DP_STATS_INC(soc, ast.ast_mismatch, 1);
2277 		dp_alert("For peer %pK (mac: "QDF_MAC_ADDR_FMT")number of wds entries deleted by fw = %d during peer delete is not same as the numbers deleted by host = %d",
2278 			 peer, peer->mac_addr.raw, free_wds_count,
2279 			 wds_deleted);
2280 	}
2281 }
2282 
2283 #else
2284 static void
2285 dp_peer_clean_wds_entries(struct dp_soc *soc, struct dp_peer *peer,
2286 			  uint32_t free_wds_count)
2287 {
2288 	struct dp_ast_entry *ast_entry, *temp_ast_entry;
2289 
2290 	qdf_spin_lock_bh(&soc->ast_lock);
2291 
2292 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry) {
2293 		dp_peer_unlink_ast_entry(soc, ast_entry, peer);
2294 
2295 		if (ast_entry->is_mapped)
2296 			soc->ast_table[ast_entry->ast_idx] = NULL;
2297 
2298 		dp_peer_free_ast_entry(soc, ast_entry);
2299 	}
2300 
2301 	peer->self_ast_entry = NULL;
2302 	qdf_spin_unlock_bh(&soc->ast_lock);
2303 }
2304 #endif
2305 
2306 /**
2307  * dp_peer_ast_free_entry_by_mac() - find ast entry by MAC address and delete
2308  * @soc: soc handle
2309  * @peer: peer handle
2310  * @vdev_id: vdev_id
2311  * @mac_addr: mac address of the AST entry to searc and delete
2312  *
2313  * find the ast entry from the peer list using the mac address and free
2314  * the entry.
2315  *
2316  * Return: SUCCESS or NOENT
2317  */
2318 static int dp_peer_ast_free_entry_by_mac(struct dp_soc *soc,
2319 					 struct dp_peer *peer,
2320 					 uint8_t vdev_id,
2321 					 uint8_t *mac_addr)
2322 {
2323 	struct dp_ast_entry *ast_entry;
2324 	void *cookie = NULL;
2325 	txrx_ast_free_cb cb = NULL;
2326 
2327 	/*
2328 	 * release the reference only if it is mapped
2329 	 * to ast_table
2330 	 */
2331 
2332 	qdf_spin_lock_bh(&soc->ast_lock);
2333 
2334 	ast_entry = dp_peer_ast_hash_find_by_vdevid(soc, mac_addr, vdev_id);
2335 	if (!ast_entry) {
2336 		qdf_spin_unlock_bh(&soc->ast_lock);
2337 		return QDF_STATUS_E_NOENT;
2338 	} else if (ast_entry->is_mapped) {
2339 		soc->ast_table[ast_entry->ast_idx] = NULL;
2340 	}
2341 
2342 	cb = ast_entry->callback;
2343 	cookie = ast_entry->cookie;
2344 
2345 
2346 	dp_peer_unlink_ast_entry(soc, ast_entry, peer);
2347 
2348 	dp_peer_free_ast_entry(soc, ast_entry);
2349 
2350 	qdf_spin_unlock_bh(&soc->ast_lock);
2351 
2352 	if (cb) {
2353 		cb(soc->ctrl_psoc,
2354 		   dp_soc_to_cdp_soc(soc),
2355 		   cookie,
2356 		   CDP_TXRX_AST_DELETED);
2357 	}
2358 
2359 	return QDF_STATUS_SUCCESS;
2360 }
2361 
2362 void dp_peer_find_hash_erase(struct dp_soc *soc)
2363 {
2364 	int i;
2365 
2366 	/*
2367 	 * Not really necessary to take peer_ref_mutex lock - by this point,
2368 	 * it's known that the soc is no longer in use.
2369 	 */
2370 	for (i = 0; i <= soc->peer_hash.mask; i++) {
2371 		if (!TAILQ_EMPTY(&soc->peer_hash.bins[i])) {
2372 			struct dp_peer *peer, *peer_next;
2373 
2374 			/*
2375 			 * TAILQ_FOREACH_SAFE must be used here to avoid any
2376 			 * memory access violation after peer is freed
2377 			 */
2378 			TAILQ_FOREACH_SAFE(peer, &soc->peer_hash.bins[i],
2379 				hash_list_elem, peer_next) {
2380 				/*
2381 				 * Don't remove the peer from the hash table -
2382 				 * that would modify the list we are currently
2383 				 * traversing, and it's not necessary anyway.
2384 				 */
2385 				/*
2386 				 * Artificially adjust the peer's ref count to
2387 				 * 1, so it will get deleted by
2388 				 * dp_peer_unref_delete.
2389 				 */
2390 				/* set to zero */
2391 				qdf_atomic_init(&peer->ref_cnt);
2392 				for (i = 0; i < DP_MOD_ID_MAX; i++)
2393 					qdf_atomic_init(&peer->mod_refs[i]);
2394 				/* incr to one */
2395 				qdf_atomic_inc(&peer->ref_cnt);
2396 				qdf_atomic_inc(&peer->mod_refs
2397 						[DP_MOD_ID_CONFIG]);
2398 				dp_peer_unref_delete(peer,
2399 						     DP_MOD_ID_CONFIG);
2400 			}
2401 		}
2402 	}
2403 }
2404 
2405 void dp_peer_ast_table_detach(struct dp_soc *soc)
2406 {
2407 	if (soc->ast_table) {
2408 		qdf_mem_free(soc->ast_table);
2409 		soc->ast_table = NULL;
2410 	}
2411 }
2412 
2413 void dp_peer_find_map_detach(struct dp_soc *soc)
2414 {
2415 	if (soc->peer_id_to_obj_map) {
2416 		qdf_mem_free(soc->peer_id_to_obj_map);
2417 		soc->peer_id_to_obj_map = NULL;
2418 		qdf_spinlock_destroy(&soc->peer_map_lock);
2419 	}
2420 }
2421 
2422 #ifndef AST_OFFLOAD_ENABLE
2423 QDF_STATUS dp_peer_find_attach(struct dp_soc *soc)
2424 {
2425 	QDF_STATUS status;
2426 
2427 	status = dp_peer_find_map_attach(soc);
2428 	if (!QDF_IS_STATUS_SUCCESS(status))
2429 		return status;
2430 
2431 	status = dp_peer_find_hash_attach(soc);
2432 	if (!QDF_IS_STATUS_SUCCESS(status))
2433 		goto map_detach;
2434 
2435 	status = dp_peer_ast_table_attach(soc);
2436 	if (!QDF_IS_STATUS_SUCCESS(status))
2437 		goto hash_detach;
2438 
2439 	status = dp_peer_ast_hash_attach(soc);
2440 	if (!QDF_IS_STATUS_SUCCESS(status))
2441 		goto ast_table_detach;
2442 
2443 	status = dp_peer_mec_hash_attach(soc);
2444 	if (QDF_IS_STATUS_SUCCESS(status)) {
2445 		dp_soc_wds_attach(soc);
2446 		return status;
2447 	}
2448 
2449 	dp_peer_ast_hash_detach(soc);
2450 ast_table_detach:
2451 	dp_peer_ast_table_detach(soc);
2452 hash_detach:
2453 	dp_peer_find_hash_detach(soc);
2454 map_detach:
2455 	dp_peer_find_map_detach(soc);
2456 
2457 	return status;
2458 }
2459 #else
2460 QDF_STATUS dp_peer_find_attach(struct dp_soc *soc)
2461 {
2462 	QDF_STATUS status;
2463 
2464 	status = dp_peer_find_map_attach(soc);
2465 	if (!QDF_IS_STATUS_SUCCESS(status))
2466 		return status;
2467 
2468 	status = dp_peer_find_hash_attach(soc);
2469 	if (!QDF_IS_STATUS_SUCCESS(status))
2470 		goto map_detach;
2471 
2472 	return status;
2473 map_detach:
2474 	dp_peer_find_map_detach(soc);
2475 
2476 	return status;
2477 }
2478 #endif
2479 
2480 #ifdef IPA_OFFLOAD
2481 void dp_peer_update_tid_stats_from_reo(struct dp_soc *soc, void *cb_ctxt,
2482 				       union hal_reo_status *reo_status)
2483 {
2484 	struct dp_peer *peer = NULL;
2485 	struct dp_rx_tid *rx_tid = NULL;
2486 	unsigned long comb_peer_id_tid;
2487 	struct hal_reo_queue_status *queue_status = &reo_status->queue_status;
2488 	uint16_t tid;
2489 	uint16_t peer_id;
2490 
2491 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
2492 		dp_err("REO stats failure %d\n",
2493 		       queue_status->header.status);
2494 		return;
2495 	}
2496 	comb_peer_id_tid = (unsigned long)cb_ctxt;
2497 	tid = DP_PEER_GET_REO_STATS_TID(comb_peer_id_tid);
2498 	peer_id = DP_PEER_GET_REO_STATS_PEER_ID(comb_peer_id_tid);
2499 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_GENERIC_STATS);
2500 	if (!peer)
2501 		return;
2502 	rx_tid  = &peer->rx_tid[tid];
2503 
2504 	if (!rx_tid) {
2505 		dp_peer_unref_delete(peer, DP_MOD_ID_GENERIC_STATS);
2506 		return;
2507 	}
2508 
2509 	rx_tid->rx_msdu_cnt.bytes += queue_status->total_cnt;
2510 	rx_tid->rx_msdu_cnt.num += queue_status->msdu_frms_cnt;
2511 	dp_peer_unref_delete(peer, DP_MOD_ID_GENERIC_STATS);
2512 }
2513 
2514 qdf_export_symbol(dp_peer_update_tid_stats_from_reo);
2515 #endif
2516 
2517 void dp_rx_tid_stats_cb(struct dp_soc *soc, void *cb_ctxt,
2518 	union hal_reo_status *reo_status)
2519 {
2520 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
2521 	struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
2522 
2523 	if (queue_status->header.status == HAL_REO_CMD_DRAIN)
2524 		return;
2525 
2526 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
2527 		DP_PRINT_STATS("REO stats failure %d for TID %d\n",
2528 			       queue_status->header.status, rx_tid->tid);
2529 		return;
2530 	}
2531 
2532 	DP_PRINT_STATS("REO queue stats (TID: %d):\n"
2533 		       "ssn: %d\n"
2534 		       "curr_idx  : %d\n"
2535 		       "pn_31_0   : %08x\n"
2536 		       "pn_63_32  : %08x\n"
2537 		       "pn_95_64  : %08x\n"
2538 		       "pn_127_96 : %08x\n"
2539 		       "last_rx_enq_tstamp : %08x\n"
2540 		       "last_rx_deq_tstamp : %08x\n"
2541 		       "rx_bitmap_31_0     : %08x\n"
2542 		       "rx_bitmap_63_32    : %08x\n"
2543 		       "rx_bitmap_95_64    : %08x\n"
2544 		       "rx_bitmap_127_96   : %08x\n"
2545 		       "rx_bitmap_159_128  : %08x\n"
2546 		       "rx_bitmap_191_160  : %08x\n"
2547 		       "rx_bitmap_223_192  : %08x\n"
2548 		       "rx_bitmap_255_224  : %08x\n",
2549 		       rx_tid->tid,
2550 		       queue_status->ssn, queue_status->curr_idx,
2551 		       queue_status->pn_31_0, queue_status->pn_63_32,
2552 		       queue_status->pn_95_64, queue_status->pn_127_96,
2553 		       queue_status->last_rx_enq_tstamp,
2554 		       queue_status->last_rx_deq_tstamp,
2555 		       queue_status->rx_bitmap_31_0,
2556 		       queue_status->rx_bitmap_63_32,
2557 		       queue_status->rx_bitmap_95_64,
2558 		       queue_status->rx_bitmap_127_96,
2559 		       queue_status->rx_bitmap_159_128,
2560 		       queue_status->rx_bitmap_191_160,
2561 		       queue_status->rx_bitmap_223_192,
2562 		       queue_status->rx_bitmap_255_224);
2563 
2564 	DP_PRINT_STATS(
2565 		       "curr_mpdu_cnt      : %d\n"
2566 		       "curr_msdu_cnt      : %d\n"
2567 		       "fwd_timeout_cnt    : %d\n"
2568 		       "fwd_bar_cnt        : %d\n"
2569 		       "dup_cnt            : %d\n"
2570 		       "frms_in_order_cnt  : %d\n"
2571 		       "bar_rcvd_cnt       : %d\n"
2572 		       "mpdu_frms_cnt      : %d\n"
2573 		       "msdu_frms_cnt      : %d\n"
2574 		       "total_byte_cnt     : %d\n"
2575 		       "late_recv_mpdu_cnt : %d\n"
2576 		       "win_jump_2k        : %d\n"
2577 		       "hole_cnt           : %d\n",
2578 		       queue_status->curr_mpdu_cnt,
2579 		       queue_status->curr_msdu_cnt,
2580 		       queue_status->fwd_timeout_cnt,
2581 		       queue_status->fwd_bar_cnt,
2582 		       queue_status->dup_cnt,
2583 		       queue_status->frms_in_order_cnt,
2584 		       queue_status->bar_rcvd_cnt,
2585 		       queue_status->mpdu_frms_cnt,
2586 		       queue_status->msdu_frms_cnt,
2587 		       queue_status->total_cnt,
2588 		       queue_status->late_recv_mpdu_cnt,
2589 		       queue_status->win_jump_2k,
2590 		       queue_status->hole_cnt);
2591 
2592 	DP_PRINT_STATS("Addba Req          : %d\n"
2593 			"Addba Resp         : %d\n"
2594 			"Addba Resp success : %d\n"
2595 			"Addba Resp failed  : %d\n"
2596 			"Delba Req received : %d\n"
2597 			"Delba Tx success   : %d\n"
2598 			"Delba Tx Fail      : %d\n"
2599 			"BA window size     : %d\n"
2600 			"Pn size            : %d\n",
2601 			rx_tid->num_of_addba_req,
2602 			rx_tid->num_of_addba_resp,
2603 			rx_tid->num_addba_rsp_success,
2604 			rx_tid->num_addba_rsp_failed,
2605 			rx_tid->num_of_delba_req,
2606 			rx_tid->delba_tx_success_cnt,
2607 			rx_tid->delba_tx_fail_cnt,
2608 			rx_tid->ba_win_size,
2609 			rx_tid->pn_size);
2610 }
2611 
2612 #ifdef REO_SHARED_QREF_TABLE_EN
2613 void dp_peer_rx_reo_shared_qaddr_delete(struct dp_soc *soc,
2614 					struct dp_peer *peer)
2615 {
2616 	uint8_t tid;
2617 
2618 	if (peer->peer_id > soc->max_peer_id)
2619 		return;
2620 	if (IS_MLO_DP_LINK_PEER(peer))
2621 		return;
2622 	if (hal_reo_shared_qaddr_is_enable(soc->hal_soc)) {
2623 		for (tid = 0; tid < DP_MAX_TIDS; tid++)
2624 			hal_reo_shared_qaddr_write(soc->hal_soc,
2625 						   peer->peer_id, tid, 0);
2626 	}
2627 }
2628 #endif
2629 
2630 /**
2631  * dp_peer_find_add_id() - map peer_id with peer
2632  * @soc: soc handle
2633  * @peer_mac_addr: peer mac address
2634  * @peer_id: peer id to be mapped
2635  * @hw_peer_id: HW ast index
2636  * @vdev_id: vdev_id
2637  * @peer_type: peer type (link or MLD)
2638  *
2639  * return: peer in success
2640  *         NULL in failure
2641  */
2642 static inline struct dp_peer *dp_peer_find_add_id(struct dp_soc *soc,
2643 	uint8_t *peer_mac_addr, uint16_t peer_id, uint16_t hw_peer_id,
2644 	uint8_t vdev_id, enum cdp_peer_type peer_type)
2645 {
2646 	struct dp_peer *peer;
2647 	struct cdp_peer_info peer_info = { 0 };
2648 
2649 	QDF_ASSERT(peer_id <= soc->max_peer_id);
2650 	/* check if there's already a peer object with this MAC address */
2651 	DP_PEER_INFO_PARAMS_INIT(&peer_info, vdev_id, peer_mac_addr,
2652 				 false, peer_type);
2653 	peer = dp_peer_hash_find_wrapper(soc, &peer_info, DP_MOD_ID_CONFIG);
2654 	dp_peer_err("%pK: peer %pK ID %d vid %d mac " QDF_MAC_ADDR_FMT,
2655 		    soc, peer, peer_id, vdev_id,
2656 		    QDF_MAC_ADDR_REF(peer_mac_addr));
2657 
2658 	if (peer) {
2659 		/* peer's ref count was already incremented by
2660 		 * peer_find_hash_find
2661 		 */
2662 		dp_peer_info("%pK: ref_cnt: %d", soc,
2663 			     qdf_atomic_read(&peer->ref_cnt));
2664 
2665 		/*
2666 		 * if peer is in logical delete CP triggered delete before map
2667 		 * is received ignore this event
2668 		 */
2669 		if (dp_peer_state_cmp(peer, DP_PEER_STATE_LOGICAL_DELETE)) {
2670 			dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
2671 			dp_alert("Peer %pK["QDF_MAC_ADDR_FMT"] logical delete state vid %d",
2672 				 peer, QDF_MAC_ADDR_REF(peer_mac_addr),
2673 				 vdev_id);
2674 			return NULL;
2675 		}
2676 
2677 		if (peer->peer_id == HTT_INVALID_PEER) {
2678 			if (!IS_MLO_DP_MLD_PEER(peer))
2679 				dp_monitor_peer_tid_peer_id_update(soc, peer,
2680 								   peer_id);
2681 		} else {
2682 			dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
2683 			QDF_ASSERT(0);
2684 			return NULL;
2685 		}
2686 		dp_peer_find_id_to_obj_add(soc, peer, peer_id);
2687 		if (soc->arch_ops.dp_partner_chips_map)
2688 			soc->arch_ops.dp_partner_chips_map(soc, peer, peer_id);
2689 
2690 		dp_peer_update_state(soc, peer, DP_PEER_STATE_ACTIVE);
2691 		return peer;
2692 	}
2693 
2694 	return NULL;
2695 }
2696 
2697 #ifdef WLAN_FEATURE_11BE_MLO
2698 #ifdef DP_USE_REDUCED_PEER_ID_FIELD_WIDTH
2699 static inline uint16_t dp_gen_ml_peer_id(struct dp_soc *soc,
2700 					 uint16_t peer_id)
2701 {
2702 	return ((peer_id & soc->peer_id_mask) | (1 << soc->peer_id_shift));
2703 }
2704 #else
2705 static inline uint16_t dp_gen_ml_peer_id(struct dp_soc *soc,
2706 					 uint16_t peer_id)
2707 {
2708 	return (peer_id | (1 << HTT_RX_PEER_META_DATA_V1_ML_PEER_VALID_S));
2709 }
2710 #endif
2711 
2712 QDF_STATUS
2713 dp_rx_mlo_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
2714 			   uint8_t *peer_mac_addr,
2715 			   struct dp_mlo_flow_override_info *mlo_flow_info,
2716 			   struct dp_mlo_link_info *mlo_link_info)
2717 {
2718 	struct dp_peer *peer = NULL;
2719 	uint16_t hw_peer_id = mlo_flow_info[0].ast_idx;
2720 	uint16_t ast_hash = mlo_flow_info[0].cache_set_num;
2721 	uint8_t vdev_id = 0;
2722 	uint8_t is_wds = 0;
2723 	int i;
2724 	uint16_t ml_peer_id = dp_gen_ml_peer_id(soc, peer_id);
2725 	enum cdp_txrx_ast_entry_type type = CDP_TXRX_AST_TYPE_STATIC;
2726 	QDF_STATUS err = QDF_STATUS_SUCCESS;
2727 	struct dp_soc *primary_soc;
2728 
2729 	dp_cfg_event_record_peer_map_unmap_evt(soc, DP_CFG_EVENT_MLO_PEER_MAP,
2730 					       NULL, peer_mac_addr,
2731 					       1, peer_id, ml_peer_id, 0,
2732 					       vdev_id);
2733 
2734 	dp_info("mlo_peer_map_event (soc:%pK): peer_id %d ml_peer_id %d, peer_mac "QDF_MAC_ADDR_FMT,
2735 		soc, peer_id, ml_peer_id,
2736 		QDF_MAC_ADDR_REF(peer_mac_addr));
2737 
2738 	/* Get corresponding vdev ID for the peer based
2739 	 * on chip ID obtained from mlo peer_map event
2740 	 */
2741 	for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
2742 		if (mlo_link_info[i].peer_chip_id == dp_mlo_get_chip_id(soc)) {
2743 			vdev_id = mlo_link_info[i].vdev_id;
2744 			break;
2745 		}
2746 	}
2747 
2748 	peer = dp_peer_find_add_id(soc, peer_mac_addr, ml_peer_id,
2749 				   hw_peer_id, vdev_id, CDP_MLD_PEER_TYPE);
2750 	if (peer) {
2751 		if (wlan_op_mode_sta == peer->vdev->opmode &&
2752 		    qdf_mem_cmp(peer->mac_addr.raw,
2753 				peer->vdev->mld_mac_addr.raw,
2754 				QDF_MAC_ADDR_SIZE) != 0) {
2755 			dp_peer_info("%pK: STA vdev bss_peer!!!!", soc);
2756 			peer->bss_peer = 1;
2757 			if (peer->txrx_peer)
2758 				peer->txrx_peer->bss_peer = 1;
2759 		}
2760 
2761 		if (peer->vdev->opmode == wlan_op_mode_sta) {
2762 			peer->vdev->bss_ast_hash = ast_hash;
2763 			peer->vdev->bss_ast_idx = hw_peer_id;
2764 		}
2765 
2766 		/* Add ast entry incase self ast entry is
2767 		 * deleted due to DP CP sync issue
2768 		 *
2769 		 * self_ast_entry is modified in peer create
2770 		 * and peer unmap path which cannot run in
2771 		 * parllel with peer map, no lock need before
2772 		 * referring it
2773 		 */
2774 		if (!peer->self_ast_entry) {
2775 			dp_info("Add self ast from map "QDF_MAC_ADDR_FMT,
2776 				QDF_MAC_ADDR_REF(peer_mac_addr));
2777 			dp_peer_add_ast(soc, peer,
2778 					peer_mac_addr,
2779 					type, 0);
2780 		}
2781 		/* If peer setup and hence rx_tid setup got called
2782 		 * before htt peer map then Qref write to LUT did not
2783 		 * happen in rx_tid setup as peer_id was invalid.
2784 		 * So defer Qref write to peer map handler. Check if
2785 		 * rx_tid qdesc for tid 0 is already setup and perform
2786 		 * qref write to LUT for Tid 0 and 16.
2787 		 *
2788 		 * Peer map could be obtained on assoc link, hence
2789 		 * change to primary link's soc.
2790 		 */
2791 		primary_soc = peer->vdev->pdev->soc;
2792 		if (hal_reo_shared_qaddr_is_enable(primary_soc->hal_soc) &&
2793 		    peer->rx_tid[0].hw_qdesc_vaddr_unaligned) {
2794 			hal_reo_shared_qaddr_write(primary_soc->hal_soc,
2795 						   ml_peer_id,
2796 						   0,
2797 						   peer->rx_tid[0].hw_qdesc_paddr);
2798 			hal_reo_shared_qaddr_write(primary_soc->hal_soc,
2799 						   ml_peer_id,
2800 						   DP_NON_QOS_TID,
2801 						   peer->rx_tid[DP_NON_QOS_TID].hw_qdesc_paddr);
2802 		}
2803 	}
2804 
2805 	err = dp_peer_map_ast(soc, peer, peer_mac_addr, hw_peer_id,
2806 			      vdev_id, ast_hash, is_wds);
2807 
2808 	/*
2809 	 * If AST offload and host AST DB is enabled, populate AST entries on
2810 	 * host based on mlo peer map event from FW
2811 	 */
2812 	if (soc->ast_offload_support && soc->host_ast_db_enable) {
2813 		dp_peer_host_add_map_ast(soc, ml_peer_id, peer_mac_addr,
2814 					 hw_peer_id, vdev_id,
2815 					 ast_hash, is_wds);
2816 	}
2817 
2818 	return err;
2819 }
2820 #endif
2821 
2822 #ifdef DP_RX_UDP_OVER_PEER_ROAM
2823 void dp_rx_reset_roaming_peer(struct dp_soc *soc, uint8_t vdev_id,
2824 			      uint8_t *peer_mac_addr)
2825 {
2826 	struct dp_vdev *vdev = NULL;
2827 
2828 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_HTT);
2829 	if (vdev) {
2830 		if (qdf_mem_cmp(vdev->roaming_peer_mac.raw, peer_mac_addr,
2831 				QDF_MAC_ADDR_SIZE) == 0) {
2832 			vdev->roaming_peer_status =
2833 						WLAN_ROAM_PEER_AUTH_STATUS_NONE;
2834 			qdf_mem_zero(vdev->roaming_peer_mac.raw,
2835 				     QDF_MAC_ADDR_SIZE);
2836 		}
2837 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT);
2838 	}
2839 }
2840 #endif
2841 
2842 #ifdef WLAN_SUPPORT_PPEDS
2843 static void
2844 dp_tx_ppeds_cfg_astidx_cache_mapping(struct dp_soc *soc, struct dp_vdev *vdev,
2845 				     bool peer_map)
2846 {
2847 	if (soc->arch_ops.dp_tx_ppeds_cfg_astidx_cache_mapping)
2848 		soc->arch_ops.dp_tx_ppeds_cfg_astidx_cache_mapping(soc, vdev,
2849 								   peer_map);
2850 }
2851 #else
2852 static void
2853 dp_tx_ppeds_cfg_astidx_cache_mapping(struct dp_soc *soc, struct dp_vdev *vdev,
2854 				     bool peer_map)
2855 {
2856 }
2857 #endif
2858 
2859 QDF_STATUS
2860 dp_rx_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
2861 		       uint16_t hw_peer_id, uint8_t vdev_id,
2862 		       uint8_t *peer_mac_addr, uint16_t ast_hash,
2863 		       uint8_t is_wds)
2864 {
2865 	struct dp_peer *peer = NULL;
2866 	struct dp_vdev *vdev = NULL;
2867 	enum cdp_txrx_ast_entry_type type = CDP_TXRX_AST_TYPE_STATIC;
2868 	QDF_STATUS err = QDF_STATUS_SUCCESS;
2869 
2870 	dp_cfg_event_record_peer_map_unmap_evt(soc, DP_CFG_EVENT_PEER_MAP,
2871 					       NULL, peer_mac_addr, 1, peer_id,
2872 					       0, 0, vdev_id);
2873 	dp_info("peer_map_event (soc:%pK): peer_id %d, hw_peer_id %d, peer_mac "QDF_MAC_ADDR_FMT", vdev_id %d",
2874 		soc, peer_id, hw_peer_id,
2875 		QDF_MAC_ADDR_REF(peer_mac_addr), vdev_id);
2876 
2877 	/* Peer map event for WDS ast entry get the peer from
2878 	 * obj map
2879 	 */
2880 	if (is_wds) {
2881 		if (!soc->ast_offload_support) {
2882 			peer = dp_peer_get_ref_by_id(soc, peer_id,
2883 						     DP_MOD_ID_HTT);
2884 
2885 			err = dp_peer_map_ast(soc, peer, peer_mac_addr,
2886 					      hw_peer_id,
2887 					      vdev_id, ast_hash, is_wds);
2888 			if (peer)
2889 				dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
2890 		}
2891 	} else {
2892 		/*
2893 		 * It's the responsibility of the CP and FW to ensure
2894 		 * that peer is created successfully. Ideally DP should
2895 		 * not hit the below condition for directly associated
2896 		 * peers.
2897 		 */
2898 		if ((!soc->ast_offload_support) && ((hw_peer_id < 0) ||
2899 		    (hw_peer_id >=
2900 		     wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx)))) {
2901 			dp_peer_err("%pK: invalid hw_peer_id: %d", soc, hw_peer_id);
2902 			qdf_assert_always(0);
2903 		}
2904 
2905 		peer = dp_peer_find_add_id(soc, peer_mac_addr, peer_id,
2906 					   hw_peer_id, vdev_id,
2907 					   CDP_LINK_PEER_TYPE);
2908 
2909 		if (peer) {
2910 			bool peer_map = true;
2911 
2912 			/* Updating ast_hash and ast_idx in peer level */
2913 			peer->ast_hash = ast_hash;
2914 			peer->ast_idx = hw_peer_id;
2915 			vdev = peer->vdev;
2916 			/* Only check for STA Vdev and peer is not for TDLS */
2917 			if (wlan_op_mode_sta == vdev->opmode &&
2918 			    !peer->is_tdls_peer) {
2919 				if (qdf_mem_cmp(peer->mac_addr.raw,
2920 						vdev->mac_addr.raw,
2921 						QDF_MAC_ADDR_SIZE) != 0) {
2922 					dp_info("%pK: STA vdev bss_peer", soc);
2923 					peer->bss_peer = 1;
2924 					if (peer->txrx_peer)
2925 						peer->txrx_peer->bss_peer = 1;
2926 				}
2927 
2928 				dp_info("bss ast_hash 0x%x, ast_index 0x%x",
2929 					ast_hash, hw_peer_id);
2930 				vdev->bss_ast_hash = ast_hash;
2931 				vdev->bss_ast_idx = hw_peer_id;
2932 
2933 				dp_tx_ppeds_cfg_astidx_cache_mapping(soc, vdev,
2934 								     peer_map);
2935 			}
2936 
2937 			/* Add ast entry incase self ast entry is
2938 			 * deleted due to DP CP sync issue
2939 			 *
2940 			 * self_ast_entry is modified in peer create
2941 			 * and peer unmap path which cannot run in
2942 			 * parllel with peer map, no lock need before
2943 			 * referring it
2944 			 */
2945 			if (!soc->ast_offload_support &&
2946 				!peer->self_ast_entry) {
2947 				dp_info("Add self ast from map "QDF_MAC_ADDR_FMT,
2948 					QDF_MAC_ADDR_REF(peer_mac_addr));
2949 				dp_peer_add_ast(soc, peer,
2950 						peer_mac_addr,
2951 						type, 0);
2952 			}
2953 
2954 			/* If peer setup and hence rx_tid setup got called
2955 			 * before htt peer map then Qref write to LUT did
2956 			 * not happen in rx_tid setup as peer_id was invalid.
2957 			 * So defer Qref write to peer map handler. Check if
2958 			 * rx_tid qdesc for tid 0 is already setup perform qref
2959 			 * write to LUT for Tid 0 and 16.
2960 			 */
2961 			if (hal_reo_shared_qaddr_is_enable(soc->hal_soc) &&
2962 			    peer->rx_tid[0].hw_qdesc_vaddr_unaligned &&
2963 			    !IS_MLO_DP_LINK_PEER(peer)) {
2964 				hal_reo_shared_qaddr_write(soc->hal_soc,
2965 							   peer_id,
2966 							   0,
2967 							   peer->rx_tid[0].hw_qdesc_paddr);
2968 				hal_reo_shared_qaddr_write(soc->hal_soc,
2969 							   peer_id,
2970 							   DP_NON_QOS_TID,
2971 							   peer->rx_tid[DP_NON_QOS_TID].hw_qdesc_paddr);
2972 			}
2973 		}
2974 
2975 		err = dp_peer_map_ast(soc, peer, peer_mac_addr, hw_peer_id,
2976 				      vdev_id, ast_hash, is_wds);
2977 	}
2978 
2979 	dp_rx_reset_roaming_peer(soc, vdev_id, peer_mac_addr);
2980 
2981 	/*
2982 	 * If AST offload and host AST DB is enabled, populate AST entries on
2983 	 * host based on peer map event from FW
2984 	 */
2985 	if (soc->ast_offload_support && soc->host_ast_db_enable) {
2986 		dp_peer_host_add_map_ast(soc, peer_id, peer_mac_addr,
2987 					 hw_peer_id, vdev_id,
2988 					 ast_hash, is_wds);
2989 	}
2990 
2991 	return err;
2992 }
2993 
2994 void
2995 dp_rx_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id,
2996 			 uint8_t vdev_id, uint8_t *mac_addr,
2997 			 uint8_t is_wds, uint32_t free_wds_count)
2998 {
2999 	struct dp_peer *peer;
3000 	struct dp_vdev *vdev = NULL;
3001 
3002 	/*
3003 	 * If FW AST offload is enabled and host AST DB is enabled,
3004 	 * the AST entries are created during peer map from FW.
3005 	 */
3006 	if (soc->ast_offload_support && is_wds) {
3007 		if (!soc->host_ast_db_enable)
3008 			return;
3009 	}
3010 
3011 	peer = __dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_HTT);
3012 
3013 	/*
3014 	 * Currently peer IDs are assigned for vdevs as well as peers.
3015 	 * If the peer ID is for a vdev, then the peer pointer stored
3016 	 * in peer_id_to_obj_map will be NULL.
3017 	 */
3018 	if (!peer) {
3019 		dp_err("Received unmap event for invalid peer_id %u",
3020 		       peer_id);
3021 		return;
3022 	}
3023 
3024 	vdev = peer->vdev;
3025 
3026 	if (peer->txrx_peer) {
3027 		struct cdp_txrx_peer_params_update params = {0};
3028 
3029 		params.osif_vdev = (void *)vdev->osif_vdev;
3030 		params.peer_mac = peer->mac_addr.raw;
3031 		params.chip_id = dp_mlo_get_chip_id(soc);
3032 		params.pdev_id = vdev->pdev->pdev_id;
3033 
3034 		dp_wdi_event_handler(WDI_EVENT_PEER_UNMAP, soc,
3035 				     (void *)&params, peer_id,
3036 				     WDI_NO_VAL, vdev->pdev->pdev_id);
3037 	}
3038 
3039 	/* If V2 Peer map messages are enabled AST entry has to be
3040 	 * freed here
3041 	 */
3042 	if (is_wds) {
3043 		if (!dp_peer_ast_free_entry_by_mac(soc, peer, vdev_id,
3044 						   mac_addr)) {
3045 			dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
3046 			return;
3047 		}
3048 
3049 		dp_alert("AST entry not found with peer %pK peer_id %u peer_mac "QDF_MAC_ADDR_FMT" mac_addr "QDF_MAC_ADDR_FMT" vdev_id %u next_hop %u",
3050 			  peer, peer->peer_id,
3051 			  QDF_MAC_ADDR_REF(peer->mac_addr.raw),
3052 			  QDF_MAC_ADDR_REF(mac_addr), vdev_id,
3053 			  is_wds);
3054 
3055 		dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
3056 		return;
3057 	}
3058 
3059 	dp_peer_clean_wds_entries(soc, peer, free_wds_count);
3060 
3061 	dp_cfg_event_record_peer_map_unmap_evt(soc, DP_CFG_EVENT_PEER_UNMAP,
3062 					       peer, mac_addr, 0, peer_id,
3063 					       0, 0, vdev_id);
3064 	dp_info("peer_unmap_event (soc:%pK) peer_id %d peer %pK",
3065 		soc, peer_id, peer);
3066 
3067 	/* Clear entries in Qref LUT */
3068 	/* TODO: Check if this is to be called from
3069 	 * dp_peer_delete for MLO case if there is race between
3070 	 * new peer id assignment and still not having received
3071 	 * peer unmap for MLD peer with same peer id.
3072 	 */
3073 	dp_peer_rx_reo_shared_qaddr_delete(soc, peer);
3074 
3075 	vdev = peer->vdev;
3076 
3077 	/* only if peer is in STA mode and not tdls peer */
3078 	if (wlan_op_mode_sta == vdev->opmode && !peer->is_tdls_peer) {
3079 		bool peer_map = false;
3080 
3081 		dp_tx_ppeds_cfg_astidx_cache_mapping(soc, vdev, peer_map);
3082 	}
3083 
3084 	dp_peer_find_id_to_obj_remove(soc, peer_id);
3085 
3086 	if (soc->arch_ops.dp_partner_chips_unmap)
3087 		soc->arch_ops.dp_partner_chips_unmap(soc, peer_id);
3088 
3089 	peer->peer_id = HTT_INVALID_PEER;
3090 
3091 	/*
3092 	 *	 Reset ast flow mapping table
3093 	 */
3094 	if (!soc->ast_offload_support)
3095 		dp_peer_reset_flowq_map(peer);
3096 
3097 	if (soc->cdp_soc.ol_ops->peer_unmap_event) {
3098 		soc->cdp_soc.ol_ops->peer_unmap_event(soc->ctrl_psoc,
3099 				peer_id, vdev_id, mac_addr);
3100 	}
3101 
3102 	dp_update_vdev_stats_on_peer_unmap(vdev, peer);
3103 
3104 	dp_peer_update_state(soc, peer, DP_PEER_STATE_INACTIVE);
3105 	dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
3106 	/*
3107 	 * Remove a reference to the peer.
3108 	 * If there are no more references, delete the peer object.
3109 	 */
3110 	dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
3111 }
3112 
3113 #ifdef WLAN_FEATURE_11BE_MLO
3114 void dp_rx_mlo_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id)
3115 {
3116 	uint16_t ml_peer_id = dp_gen_ml_peer_id(soc, peer_id);
3117 	uint8_t mac_addr[QDF_MAC_ADDR_SIZE] = {0};
3118 	uint8_t vdev_id = DP_VDEV_ALL;
3119 	uint8_t is_wds = 0;
3120 
3121 	dp_cfg_event_record_peer_map_unmap_evt(soc, DP_CFG_EVENT_MLO_PEER_UNMAP,
3122 					       NULL, mac_addr, 0, peer_id,
3123 					       0, 0, vdev_id);
3124 	dp_info("MLO peer_unmap_event (soc:%pK) peer_id %d",
3125 		soc, peer_id);
3126 
3127 	dp_rx_peer_unmap_handler(soc, ml_peer_id, vdev_id,
3128 				 mac_addr, is_wds,
3129 				 DP_PEER_WDS_COUNT_INVALID);
3130 }
3131 #endif
3132 
3133 #ifndef AST_OFFLOAD_ENABLE
3134 void
3135 dp_peer_find_detach(struct dp_soc *soc)
3136 {
3137 	dp_soc_wds_detach(soc);
3138 	dp_peer_find_map_detach(soc);
3139 	dp_peer_find_hash_detach(soc);
3140 	dp_peer_ast_hash_detach(soc);
3141 	dp_peer_ast_table_detach(soc);
3142 	dp_peer_mec_hash_detach(soc);
3143 }
3144 #else
3145 void
3146 dp_peer_find_detach(struct dp_soc *soc)
3147 {
3148 	dp_peer_find_map_detach(soc);
3149 	dp_peer_find_hash_detach(soc);
3150 }
3151 #endif
3152 
3153 static void dp_rx_tid_update_cb(struct dp_soc *soc, void *cb_ctxt,
3154 	union hal_reo_status *reo_status)
3155 {
3156 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
3157 
3158 	if ((reo_status->rx_queue_status.header.status !=
3159 		HAL_REO_CMD_SUCCESS) &&
3160 		(reo_status->rx_queue_status.header.status !=
3161 		HAL_REO_CMD_DRAIN)) {
3162 		/* Should not happen normally. Just print error for now */
3163 		dp_peer_err("%pK: Rx tid HW desc update failed(%d): tid %d",
3164 			    soc, reo_status->rx_queue_status.header.status,
3165 			    rx_tid->tid);
3166 	}
3167 }
3168 
3169 static bool dp_get_peer_vdev_roaming_in_progress(struct dp_peer *peer)
3170 {
3171 	struct ol_if_ops *ol_ops = NULL;
3172 	bool is_roaming = false;
3173 	uint8_t vdev_id = -1;
3174 	struct cdp_soc_t *soc;
3175 
3176 	if (!peer) {
3177 		dp_peer_info("Peer is NULL. No roaming possible");
3178 		return false;
3179 	}
3180 
3181 	soc = dp_soc_to_cdp_soc_t(peer->vdev->pdev->soc);
3182 	ol_ops = peer->vdev->pdev->soc->cdp_soc.ol_ops;
3183 
3184 	if (ol_ops && ol_ops->is_roam_inprogress) {
3185 		dp_get_vdevid(soc, peer->mac_addr.raw, &vdev_id);
3186 		is_roaming = ol_ops->is_roam_inprogress(vdev_id);
3187 	}
3188 
3189 	dp_peer_info("peer: " QDF_MAC_ADDR_FMT ", vdev_id: %d, is_roaming: %d",
3190 		     QDF_MAC_ADDR_REF(peer->mac_addr.raw), vdev_id, is_roaming);
3191 
3192 	return is_roaming;
3193 }
3194 
3195 #ifdef WLAN_FEATURE_11BE_MLO
3196 /**
3197  * dp_rx_tid_setup_allow() - check if rx_tid and reo queue desc
3198  *			     setup is necessary
3199  * @peer: DP peer handle
3200  *
3201  * Return: true - allow, false - disallow
3202  */
3203 static inline
3204 bool dp_rx_tid_setup_allow(struct dp_peer *peer)
3205 {
3206 	if (IS_MLO_DP_LINK_PEER(peer) && !peer->first_link)
3207 		return false;
3208 
3209 	return true;
3210 }
3211 
3212 /**
3213  * dp_rx_tid_update_allow() - check if rx_tid update needed
3214  * @peer: DP peer handle
3215  *
3216  * Return: true - allow, false - disallow
3217  */
3218 static inline
3219 bool dp_rx_tid_update_allow(struct dp_peer *peer)
3220 {
3221 	/* not as expected for MLO connection link peer */
3222 	if (IS_MLO_DP_LINK_PEER(peer)) {
3223 		QDF_BUG(0);
3224 		return false;
3225 	}
3226 
3227 	return true;
3228 }
3229 #else
3230 static inline
3231 bool dp_rx_tid_setup_allow(struct dp_peer *peer)
3232 {
3233 	return true;
3234 }
3235 
3236 static inline
3237 bool dp_rx_tid_update_allow(struct dp_peer *peer)
3238 {
3239 	return true;
3240 }
3241 #endif
3242 
3243 QDF_STATUS dp_rx_tid_update_wifi3(struct dp_peer *peer, int tid, uint32_t
3244 					 ba_window_size, uint32_t start_seq,
3245 					 bool bar_update)
3246 {
3247 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
3248 	struct dp_soc *soc = peer->vdev->pdev->soc;
3249 	struct hal_reo_cmd_params params;
3250 
3251 	if (!dp_rx_tid_update_allow(peer)) {
3252 		dp_peer_err("skip tid update for peer:" QDF_MAC_ADDR_FMT,
3253 			    QDF_MAC_ADDR_REF(peer->mac_addr.raw));
3254 		return QDF_STATUS_E_FAILURE;
3255 	}
3256 
3257 	qdf_mem_zero(&params, sizeof(params));
3258 
3259 	params.std.need_status = 1;
3260 	params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
3261 	params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
3262 	params.u.upd_queue_params.update_ba_window_size = 1;
3263 	params.u.upd_queue_params.ba_window_size = ba_window_size;
3264 
3265 	if (start_seq < IEEE80211_SEQ_MAX) {
3266 		params.u.upd_queue_params.update_ssn = 1;
3267 		params.u.upd_queue_params.ssn = start_seq;
3268 	} else {
3269 	    dp_set_ssn_valid_flag(&params, 0);
3270 	}
3271 
3272 	if (dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
3273 			    dp_rx_tid_update_cb, rx_tid)) {
3274 		dp_err_log("failed to send reo cmd CMD_UPDATE_RX_REO_QUEUE");
3275 		DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
3276 	}
3277 
3278 	rx_tid->ba_win_size = ba_window_size;
3279 
3280 	if (dp_get_peer_vdev_roaming_in_progress(peer))
3281 		return QDF_STATUS_E_PERM;
3282 
3283 	if (!bar_update)
3284 		dp_peer_rx_reorder_queue_setup(soc, peer,
3285 					       tid, ba_window_size);
3286 
3287 	return QDF_STATUS_SUCCESS;
3288 }
3289 
3290 #ifdef WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY
3291 /**
3292  * dp_reo_desc_defer_free_enqueue() - enqueue REO QDESC to be freed into
3293  *                                    the deferred list
3294  * @soc: Datapath soc handle
3295  * @freedesc: REO DESC reference that needs to be freed
3296  *
3297  * Return: true if enqueued, else false
3298  */
3299 static bool dp_reo_desc_defer_free_enqueue(struct dp_soc *soc,
3300 					   struct reo_desc_list_node *freedesc)
3301 {
3302 	struct reo_desc_deferred_freelist_node *desc;
3303 
3304 	if (!qdf_atomic_read(&soc->cmn_init_done))
3305 		return false;
3306 
3307 	desc = qdf_mem_malloc(sizeof(*desc));
3308 	if (!desc)
3309 		return false;
3310 
3311 	desc->hw_qdesc_paddr = freedesc->rx_tid.hw_qdesc_paddr;
3312 	desc->hw_qdesc_alloc_size = freedesc->rx_tid.hw_qdesc_alloc_size;
3313 	desc->hw_qdesc_vaddr_unaligned =
3314 			freedesc->rx_tid.hw_qdesc_vaddr_unaligned;
3315 	desc->free_ts = qdf_get_system_timestamp();
3316 	DP_RX_REO_QDESC_DEFERRED_GET_MAC(desc, freedesc);
3317 
3318 	qdf_spin_lock_bh(&soc->reo_desc_deferred_freelist_lock);
3319 	if (!soc->reo_desc_deferred_freelist_init) {
3320 		qdf_mem_free(desc);
3321 		qdf_spin_unlock_bh(&soc->reo_desc_deferred_freelist_lock);
3322 		return false;
3323 	}
3324 	qdf_list_insert_back(&soc->reo_desc_deferred_freelist,
3325 			     (qdf_list_node_t *)desc);
3326 	qdf_spin_unlock_bh(&soc->reo_desc_deferred_freelist_lock);
3327 
3328 	return true;
3329 }
3330 
3331 /**
3332  * dp_reo_desc_defer_free() - free the REO QDESC in the deferred list
3333  *                            based on time threshold
3334  * @soc: Datapath soc handle
3335  *
3336  * Return: true if enqueued, else false
3337  */
3338 static void dp_reo_desc_defer_free(struct dp_soc *soc)
3339 {
3340 	struct reo_desc_deferred_freelist_node *desc;
3341 	unsigned long curr_ts = qdf_get_system_timestamp();
3342 
3343 	qdf_spin_lock_bh(&soc->reo_desc_deferred_freelist_lock);
3344 
3345 	while ((qdf_list_peek_front(&soc->reo_desc_deferred_freelist,
3346 	       (qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) &&
3347 	       (curr_ts > (desc->free_ts + REO_DESC_DEFERRED_FREE_MS))) {
3348 		qdf_list_remove_front(&soc->reo_desc_deferred_freelist,
3349 				      (qdf_list_node_t **)&desc);
3350 
3351 		DP_RX_REO_QDESC_DEFERRED_FREE_EVT(desc);
3352 
3353 		qdf_mem_unmap_nbytes_single(soc->osdev,
3354 					    desc->hw_qdesc_paddr,
3355 					    QDF_DMA_BIDIRECTIONAL,
3356 					    desc->hw_qdesc_alloc_size);
3357 		qdf_mem_free(desc->hw_qdesc_vaddr_unaligned);
3358 		qdf_mem_free(desc);
3359 
3360 		curr_ts = qdf_get_system_timestamp();
3361 	}
3362 
3363 	qdf_spin_unlock_bh(&soc->reo_desc_deferred_freelist_lock);
3364 }
3365 #else
3366 static inline bool
3367 dp_reo_desc_defer_free_enqueue(struct dp_soc *soc,
3368 			       struct reo_desc_list_node *freedesc)
3369 {
3370 	return false;
3371 }
3372 
3373 static void dp_reo_desc_defer_free(struct dp_soc *soc)
3374 {
3375 }
3376 #endif /* !WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY */
3377 
3378 /**
3379  * dp_reo_desc_free() - Callback free reo descriptor memory after
3380  * HW cache flush
3381  *
3382  * @soc: DP SOC handle
3383  * @cb_ctxt: Callback context
3384  * @reo_status: REO command status
3385  */
3386 static void dp_reo_desc_free(struct dp_soc *soc, void *cb_ctxt,
3387 	union hal_reo_status *reo_status)
3388 {
3389 	struct reo_desc_list_node *freedesc =
3390 		(struct reo_desc_list_node *)cb_ctxt;
3391 	struct dp_rx_tid *rx_tid = &freedesc->rx_tid;
3392 	unsigned long curr_ts = qdf_get_system_timestamp();
3393 
3394 	if ((reo_status->fl_cache_status.header.status !=
3395 		HAL_REO_CMD_SUCCESS) &&
3396 		(reo_status->fl_cache_status.header.status !=
3397 		HAL_REO_CMD_DRAIN)) {
3398 		dp_peer_err("%pK: Rx tid HW desc flush failed(%d): tid %d",
3399 			    soc, reo_status->rx_queue_status.header.status,
3400 			    freedesc->rx_tid.tid);
3401 	}
3402 	dp_peer_info("%pK: %lu hw_qdesc_paddr: %pK, tid:%d", soc,
3403 		     curr_ts, (void *)(rx_tid->hw_qdesc_paddr),
3404 		     rx_tid->tid);
3405 
3406 	/* REO desc is enqueued to be freed at a later point
3407 	 * in time, just free the freedesc alone and return
3408 	 */
3409 	if (dp_reo_desc_defer_free_enqueue(soc, freedesc))
3410 		goto out;
3411 
3412 	DP_RX_REO_QDESC_FREE_EVT(freedesc);
3413 
3414 	hal_reo_shared_qaddr_cache_clear(soc->hal_soc);
3415 	qdf_mem_unmap_nbytes_single(soc->osdev,
3416 		rx_tid->hw_qdesc_paddr,
3417 		QDF_DMA_BIDIRECTIONAL,
3418 		rx_tid->hw_qdesc_alloc_size);
3419 	qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
3420 out:
3421 	qdf_mem_free(freedesc);
3422 }
3423 
3424 #if defined(CONFIG_WIFI_EMULATION_WIFI_3_0) && defined(BUILD_X86)
3425 /* Hawkeye emulation requires bus address to be >= 0x50000000 */
3426 static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
3427 {
3428 	if (dma_addr < 0x50000000)
3429 		return QDF_STATUS_E_FAILURE;
3430 	else
3431 		return QDF_STATUS_SUCCESS;
3432 }
3433 #else
3434 static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
3435 {
3436 	return QDF_STATUS_SUCCESS;
3437 }
3438 #endif
3439 
3440 QDF_STATUS dp_rx_tid_setup_wifi3(struct dp_peer *peer, int tid,
3441 				 uint32_t ba_window_size, uint32_t start_seq)
3442 {
3443 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
3444 	struct dp_vdev *vdev = peer->vdev;
3445 	struct dp_soc *soc = vdev->pdev->soc;
3446 	uint32_t hw_qdesc_size;
3447 	uint32_t hw_qdesc_align;
3448 	int hal_pn_type;
3449 	void *hw_qdesc_vaddr;
3450 	uint32_t alloc_tries = 0;
3451 	QDF_STATUS status = QDF_STATUS_SUCCESS;
3452 	struct dp_txrx_peer *txrx_peer;
3453 
3454 	if (!qdf_atomic_read(&peer->is_default_route_set))
3455 		return QDF_STATUS_E_FAILURE;
3456 
3457 	if (!dp_rx_tid_setup_allow(peer)) {
3458 		dp_peer_info("skip rx tid setup for peer" QDF_MAC_ADDR_FMT,
3459 			     QDF_MAC_ADDR_REF(peer->mac_addr.raw));
3460 		goto send_wmi_reo_cmd;
3461 	}
3462 
3463 	rx_tid->ba_win_size = ba_window_size;
3464 	if (rx_tid->hw_qdesc_vaddr_unaligned)
3465 		return dp_rx_tid_update_wifi3(peer, tid, ba_window_size,
3466 			start_seq, false);
3467 	rx_tid->delba_tx_status = 0;
3468 	rx_tid->ppdu_id_2k = 0;
3469 	rx_tid->num_of_addba_req = 0;
3470 	rx_tid->num_of_delba_req = 0;
3471 	rx_tid->num_of_addba_resp = 0;
3472 	rx_tid->num_addba_rsp_failed = 0;
3473 	rx_tid->num_addba_rsp_success = 0;
3474 	rx_tid->delba_tx_success_cnt = 0;
3475 	rx_tid->delba_tx_fail_cnt = 0;
3476 	rx_tid->statuscode = 0;
3477 
3478 	/* TODO: Allocating HW queue descriptors based on max BA window size
3479 	 * for all QOS TIDs so that same descriptor can be used later when
3480 	 * ADDBA request is received. This should be changed to allocate HW
3481 	 * queue descriptors based on BA window size being negotiated (0 for
3482 	 * non BA cases), and reallocate when BA window size changes and also
3483 	 * send WMI message to FW to change the REO queue descriptor in Rx
3484 	 * peer entry as part of dp_rx_tid_update.
3485 	 */
3486 	hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc,
3487 					       ba_window_size, tid);
3488 
3489 	hw_qdesc_align = hal_get_reo_qdesc_align(soc->hal_soc);
3490 	/* To avoid unnecessary extra allocation for alignment, try allocating
3491 	 * exact size and see if we already have aligned address.
3492 	 */
3493 	rx_tid->hw_qdesc_alloc_size = hw_qdesc_size;
3494 
3495 try_desc_alloc:
3496 	rx_tid->hw_qdesc_vaddr_unaligned =
3497 		qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size);
3498 
3499 	if (!rx_tid->hw_qdesc_vaddr_unaligned) {
3500 		dp_peer_err("%pK: Rx tid HW desc alloc failed: tid %d",
3501 			    soc, tid);
3502 		return QDF_STATUS_E_NOMEM;
3503 	}
3504 
3505 	if ((unsigned long)(rx_tid->hw_qdesc_vaddr_unaligned) %
3506 		hw_qdesc_align) {
3507 		/* Address allocated above is not aligned. Allocate extra
3508 		 * memory for alignment
3509 		 */
3510 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
3511 		rx_tid->hw_qdesc_vaddr_unaligned =
3512 			qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size +
3513 					hw_qdesc_align - 1);
3514 
3515 		if (!rx_tid->hw_qdesc_vaddr_unaligned) {
3516 			dp_peer_err("%pK: Rx tid HW desc alloc failed: tid %d",
3517 				    soc, tid);
3518 			return QDF_STATUS_E_NOMEM;
3519 		}
3520 
3521 		hw_qdesc_vaddr = (void *)qdf_align((unsigned long)
3522 			rx_tid->hw_qdesc_vaddr_unaligned,
3523 			hw_qdesc_align);
3524 
3525 		dp_peer_debug("%pK: Total Size %d Aligned Addr %pK",
3526 			      soc, rx_tid->hw_qdesc_alloc_size,
3527 			      hw_qdesc_vaddr);
3528 
3529 	} else {
3530 		hw_qdesc_vaddr = rx_tid->hw_qdesc_vaddr_unaligned;
3531 	}
3532 	rx_tid->hw_qdesc_vaddr_aligned = hw_qdesc_vaddr;
3533 
3534 	txrx_peer = dp_get_txrx_peer(peer);
3535 
3536 	/* TODO: Ensure that sec_type is set before ADDBA is received.
3537 	 * Currently this is set based on htt indication
3538 	 * HTT_T2H_MSG_TYPE_SEC_IND from target
3539 	 */
3540 	switch (txrx_peer->security[dp_sec_ucast].sec_type) {
3541 	case cdp_sec_type_tkip_nomic:
3542 	case cdp_sec_type_aes_ccmp:
3543 	case cdp_sec_type_aes_ccmp_256:
3544 	case cdp_sec_type_aes_gcmp:
3545 	case cdp_sec_type_aes_gcmp_256:
3546 		hal_pn_type = HAL_PN_WPA;
3547 		break;
3548 	case cdp_sec_type_wapi:
3549 		if (vdev->opmode == wlan_op_mode_ap)
3550 			hal_pn_type = HAL_PN_WAPI_EVEN;
3551 		else
3552 			hal_pn_type = HAL_PN_WAPI_UNEVEN;
3553 		break;
3554 	default:
3555 		hal_pn_type = HAL_PN_NONE;
3556 		break;
3557 	}
3558 
3559 	hal_reo_qdesc_setup(soc->hal_soc, tid, ba_window_size, start_seq,
3560 		hw_qdesc_vaddr, rx_tid->hw_qdesc_paddr, hal_pn_type,
3561 		vdev->vdev_stats_id);
3562 
3563 	qdf_mem_map_nbytes_single(soc->osdev, hw_qdesc_vaddr,
3564 		QDF_DMA_BIDIRECTIONAL, rx_tid->hw_qdesc_alloc_size,
3565 		&(rx_tid->hw_qdesc_paddr));
3566 
3567 	if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) !=
3568 			QDF_STATUS_SUCCESS) {
3569 		if (alloc_tries++ < 10) {
3570 			qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
3571 			rx_tid->hw_qdesc_vaddr_unaligned = NULL;
3572 			goto try_desc_alloc;
3573 		} else {
3574 			dp_peer_err("%pK: Rx tid HW desc alloc failed (lowmem): tid %d",
3575 				    soc, tid);
3576 			status = QDF_STATUS_E_NOMEM;
3577 			goto error;
3578 		}
3579 	}
3580 
3581 send_wmi_reo_cmd:
3582 	if (dp_get_peer_vdev_roaming_in_progress(peer)) {
3583 		status = QDF_STATUS_E_PERM;
3584 		goto error;
3585 	}
3586 
3587 	status = dp_peer_rx_reorder_queue_setup(soc, peer,
3588 						tid, ba_window_size);
3589 	if (QDF_IS_STATUS_SUCCESS(status))
3590 		return status;
3591 
3592 error:
3593 	if (rx_tid->hw_qdesc_vaddr_unaligned) {
3594 		if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) ==
3595 		    QDF_STATUS_SUCCESS)
3596 			qdf_mem_unmap_nbytes_single(
3597 				soc->osdev,
3598 				rx_tid->hw_qdesc_paddr,
3599 				QDF_DMA_BIDIRECTIONAL,
3600 				rx_tid->hw_qdesc_alloc_size);
3601 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
3602 		rx_tid->hw_qdesc_vaddr_unaligned = NULL;
3603 		rx_tid->hw_qdesc_paddr = 0;
3604 	}
3605 	return status;
3606 }
3607 
3608 #ifdef DP_UMAC_HW_RESET_SUPPORT
3609 static
3610 void dp_peer_rst_tids(struct dp_soc *soc, struct dp_peer *peer, void *arg)
3611 {
3612 	int tid;
3613 
3614 	for (tid = 0; tid < (DP_MAX_TIDS - 1); tid++) {
3615 		struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
3616 		void *vaddr = rx_tid->hw_qdesc_vaddr_aligned;
3617 
3618 		if (vaddr)
3619 			dp_reset_rx_reo_tid_queue(soc, vaddr,
3620 						  rx_tid->hw_qdesc_alloc_size);
3621 	}
3622 }
3623 
3624 void dp_reset_tid_q_setup(struct dp_soc *soc)
3625 {
3626 	dp_soc_iterate_peer(soc, dp_peer_rst_tids, NULL, DP_MOD_ID_UMAC_RESET);
3627 }
3628 #endif
3629 #ifdef REO_DESC_DEFER_FREE
3630 /**
3631  * dp_reo_desc_clean_up() - If cmd to flush base desc fails add
3632  * desc back to freelist and defer the deletion
3633  *
3634  * @soc: DP SOC handle
3635  * @desc: Base descriptor to be freed
3636  * @reo_status: REO command status
3637  */
3638 static void dp_reo_desc_clean_up(struct dp_soc *soc,
3639 				 struct reo_desc_list_node *desc,
3640 				 union hal_reo_status *reo_status)
3641 {
3642 	desc->free_ts = qdf_get_system_timestamp();
3643 	DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
3644 	qdf_list_insert_back(&soc->reo_desc_freelist,
3645 			     (qdf_list_node_t *)desc);
3646 }
3647 
3648 /**
3649  * dp_reo_limit_clean_batch_sz() - Limit number REO CMD queued to cmd
3650  * ring in avoid of REO hang
3651  *
3652  * @list_size: REO desc list size to be cleaned
3653  */
3654 static inline void dp_reo_limit_clean_batch_sz(uint32_t *list_size)
3655 {
3656 	unsigned long curr_ts = qdf_get_system_timestamp();
3657 
3658 	if ((*list_size) > REO_DESC_FREELIST_SIZE) {
3659 		dp_err_log("%lu:freedesc number %d in freelist",
3660 			   curr_ts, *list_size);
3661 		/* limit the batch queue size */
3662 		*list_size = REO_DESC_FREELIST_SIZE;
3663 	}
3664 }
3665 #else
3666 /**
3667  * dp_reo_desc_clean_up() - If send cmd to REO inorder to flush
3668  * cache fails free the base REO desc anyway
3669  *
3670  * @soc: DP SOC handle
3671  * @desc: Base descriptor to be freed
3672  * @reo_status: REO command status
3673  */
3674 static void dp_reo_desc_clean_up(struct dp_soc *soc,
3675 				 struct reo_desc_list_node *desc,
3676 				 union hal_reo_status *reo_status)
3677 {
3678 	if (reo_status) {
3679 		qdf_mem_zero(reo_status, sizeof(*reo_status));
3680 		reo_status->fl_cache_status.header.status = 0;
3681 		dp_reo_desc_free(soc, (void *)desc, reo_status);
3682 	}
3683 }
3684 
3685 /**
3686  * dp_reo_limit_clean_batch_sz() - Limit number REO CMD queued to cmd
3687  * ring in avoid of REO hang
3688  *
3689  * @list_size: REO desc list size to be cleaned
3690  */
3691 static inline void dp_reo_limit_clean_batch_sz(uint32_t *list_size)
3692 {
3693 }
3694 #endif
3695 
3696 /**
3697  * dp_resend_update_reo_cmd() - Resend the UPDATE_REO_QUEUE
3698  * cmd and re-insert desc into free list if send fails.
3699  *
3700  * @soc: DP SOC handle
3701  * @desc: desc with resend update cmd flag set
3702  * @rx_tid: Desc RX tid associated with update cmd for resetting
3703  * valid field to 0 in h/w
3704  *
3705  * Return: QDF status
3706  */
3707 static QDF_STATUS
3708 dp_resend_update_reo_cmd(struct dp_soc *soc,
3709 			 struct reo_desc_list_node *desc,
3710 			 struct dp_rx_tid *rx_tid)
3711 {
3712 	struct hal_reo_cmd_params params;
3713 
3714 	qdf_mem_zero(&params, sizeof(params));
3715 	params.std.need_status = 1;
3716 	params.std.addr_lo =
3717 		rx_tid->hw_qdesc_paddr & 0xffffffff;
3718 	params.std.addr_hi =
3719 		(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
3720 	params.u.upd_queue_params.update_vld = 1;
3721 	params.u.upd_queue_params.vld = 0;
3722 	desc->resend_update_reo_cmd = false;
3723 	/*
3724 	 * If the cmd send fails then set resend_update_reo_cmd flag
3725 	 * and insert the desc at the end of the free list to retry.
3726 	 */
3727 	if (dp_reo_send_cmd(soc,
3728 			    CMD_UPDATE_RX_REO_QUEUE,
3729 			    &params,
3730 			    dp_rx_tid_delete_cb,
3731 			    (void *)desc)
3732 	    != QDF_STATUS_SUCCESS) {
3733 		desc->resend_update_reo_cmd = true;
3734 		desc->free_ts = qdf_get_system_timestamp();
3735 		qdf_list_insert_back(&soc->reo_desc_freelist,
3736 				     (qdf_list_node_t *)desc);
3737 		dp_err_log("failed to send reo cmd CMD_UPDATE_RX_REO_QUEUE");
3738 		DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
3739 		return QDF_STATUS_E_FAILURE;
3740 	}
3741 
3742 	return QDF_STATUS_SUCCESS;
3743 }
3744 
3745 void dp_rx_tid_delete_cb(struct dp_soc *soc, void *cb_ctxt,
3746 			 union hal_reo_status *reo_status)
3747 {
3748 	struct reo_desc_list_node *freedesc =
3749 		(struct reo_desc_list_node *)cb_ctxt;
3750 	uint32_t list_size;
3751 	struct reo_desc_list_node *desc;
3752 	unsigned long curr_ts = qdf_get_system_timestamp();
3753 	struct hal_reo_cmd_params params;
3754 
3755 	DP_RX_REO_QDESC_UPDATE_EVT(freedesc);
3756 
3757 	if (reo_status->rx_queue_status.header.status == HAL_REO_CMD_DRAIN) {
3758 		qdf_mem_zero(reo_status, sizeof(*reo_status));
3759 		reo_status->fl_cache_status.header.status = HAL_REO_CMD_DRAIN;
3760 		dp_reo_desc_free(soc, (void *)freedesc, reo_status);
3761 		DP_STATS_INC(soc, rx.err.reo_cmd_send_drain, 1);
3762 		return;
3763 	} else if (reo_status->rx_queue_status.header.status !=
3764 		HAL_REO_CMD_SUCCESS) {
3765 		/* Should not happen normally. Just print error for now */
3766 		dp_info_rl("Rx tid HW desc deletion failed(%d): tid %d",
3767 			   reo_status->rx_queue_status.header.status,
3768 			   freedesc->rx_tid.tid);
3769 	}
3770 
3771 	dp_peer_info("%pK: rx_tid: %d status: %d",
3772 		     soc, freedesc->rx_tid.tid,
3773 		     reo_status->rx_queue_status.header.status);
3774 
3775 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
3776 	freedesc->free_ts = curr_ts;
3777 	qdf_list_insert_back_size(&soc->reo_desc_freelist,
3778 		(qdf_list_node_t *)freedesc, &list_size);
3779 
3780 	/* MCL path add the desc back to reo_desc_freelist when REO FLUSH
3781 	 * failed. it may cause the number of REO queue pending  in free
3782 	 * list is even larger than REO_CMD_RING max size and lead REO CMD
3783 	 * flood then cause REO HW in an unexpected condition. So it's
3784 	 * needed to limit the number REO cmds in a batch operation.
3785 	 */
3786 	dp_reo_limit_clean_batch_sz(&list_size);
3787 
3788 	while ((qdf_list_peek_front(&soc->reo_desc_freelist,
3789 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) &&
3790 		((list_size >= REO_DESC_FREELIST_SIZE) ||
3791 		(curr_ts > (desc->free_ts + REO_DESC_FREE_DEFER_MS)) ||
3792 		(desc->resend_update_reo_cmd && list_size))) {
3793 		struct dp_rx_tid *rx_tid;
3794 
3795 		qdf_list_remove_front(&soc->reo_desc_freelist,
3796 				(qdf_list_node_t **)&desc);
3797 		list_size--;
3798 		rx_tid = &desc->rx_tid;
3799 
3800 		/* First process descs with resend_update_reo_cmd set */
3801 		if (desc->resend_update_reo_cmd) {
3802 			if (dp_resend_update_reo_cmd(soc, desc, rx_tid) !=
3803 			    QDF_STATUS_SUCCESS)
3804 				break;
3805 			else
3806 				continue;
3807 		}
3808 
3809 		/* Flush base descriptor */
3810 		qdf_mem_zero(&params, sizeof(params));
3811 		params.std.need_status = 1;
3812 		params.std.addr_lo =
3813 			(uint64_t)(rx_tid->hw_qdesc_paddr) & 0xffffffff;
3814 		params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
3815 		if (rx_tid->ba_win_size > 256)
3816 			params.u.fl_cache_params.flush_q_1k_desc = 1;
3817 		params.u.fl_cache_params.fwd_mpdus_in_queue = 1;
3818 
3819 		if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc,
3820 							  CMD_FLUSH_CACHE,
3821 							  &params,
3822 							  dp_reo_desc_free,
3823 							  (void *)desc)) {
3824 			union hal_reo_status reo_status;
3825 			/*
3826 			 * If dp_reo_send_cmd return failure, related TID queue desc
3827 			 * should be unmapped. Also locally reo_desc, together with
3828 			 * TID queue desc also need to be freed accordingly.
3829 			 *
3830 			 * Here invoke desc_free function directly to do clean up.
3831 			 *
3832 			 * In case of MCL path add the desc back to the free
3833 			 * desc list and defer deletion.
3834 			 */
3835 			dp_info_rl("fail to send REO cmd to flush cache: tid %d",
3836 				   rx_tid->tid);
3837 			dp_reo_desc_clean_up(soc, desc, &reo_status);
3838 			DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
3839 			break;
3840 		}
3841 	}
3842 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
3843 
3844 	dp_reo_desc_defer_free(soc);
3845 }
3846 
3847 /**
3848  * dp_rx_tid_delete_wifi3() - Delete receive TID queue
3849  * @peer: Datapath peer handle
3850  * @tid: TID
3851  *
3852  * Return: 0 on success, error code on failure
3853  */
3854 static int dp_rx_tid_delete_wifi3(struct dp_peer *peer, int tid)
3855 {
3856 	struct dp_rx_tid *rx_tid = &(peer->rx_tid[tid]);
3857 	struct dp_soc *soc = peer->vdev->pdev->soc;
3858 	union hal_reo_status reo_status;
3859 	struct hal_reo_cmd_params params;
3860 	struct reo_desc_list_node *freedesc =
3861 		qdf_mem_malloc(sizeof(*freedesc));
3862 
3863 	if (!freedesc) {
3864 		dp_peer_err("%pK: malloc failed for freedesc: tid %d",
3865 			    soc, tid);
3866 		qdf_assert(0);
3867 		return -ENOMEM;
3868 	}
3869 
3870 	freedesc->rx_tid = *rx_tid;
3871 	freedesc->resend_update_reo_cmd = false;
3872 
3873 	qdf_mem_zero(&params, sizeof(params));
3874 
3875 	DP_RX_REO_QDESC_GET_MAC(freedesc, peer);
3876 
3877 	reo_status.rx_queue_status.header.status = HAL_REO_CMD_SUCCESS;
3878 	dp_rx_tid_delete_cb(soc, freedesc, &reo_status);
3879 
3880 	rx_tid->hw_qdesc_vaddr_unaligned = NULL;
3881 	rx_tid->hw_qdesc_alloc_size = 0;
3882 	rx_tid->hw_qdesc_paddr = 0;
3883 
3884 	return 0;
3885 }
3886 
3887 #ifdef DP_LFR
3888 static void dp_peer_setup_remaining_tids(struct dp_peer *peer)
3889 {
3890 	int tid;
3891 
3892 	for (tid = 1; tid < DP_MAX_TIDS-1; tid++) {
3893 		dp_rx_tid_setup_wifi3(peer, tid, 1, 0);
3894 		dp_peer_debug("Setting up TID %d for peer %pK peer->local_id %d",
3895 			      tid, peer, peer->local_id);
3896 	}
3897 }
3898 #else
3899 static void dp_peer_setup_remaining_tids(struct dp_peer *peer) {};
3900 #endif
3901 
3902 #ifdef WLAN_FEATURE_11BE_MLO
3903 /**
3904  * dp_peer_rx_tids_init() - initialize each tids in peer
3905  * @peer: peer pointer
3906  *
3907  * Return: None
3908  */
3909 static void dp_peer_rx_tids_init(struct dp_peer *peer)
3910 {
3911 	int tid;
3912 	struct dp_rx_tid *rx_tid;
3913 	struct dp_rx_tid_defrag *rx_tid_defrag;
3914 
3915 	if (!IS_MLO_DP_LINK_PEER(peer)) {
3916 		for (tid = 0; tid < DP_MAX_TIDS; tid++) {
3917 			rx_tid_defrag = &peer->txrx_peer->rx_tid[tid];
3918 
3919 			rx_tid_defrag->array = &rx_tid_defrag->base;
3920 			rx_tid_defrag->defrag_timeout_ms = 0;
3921 			rx_tid_defrag->defrag_waitlist_elem.tqe_next = NULL;
3922 			rx_tid_defrag->defrag_waitlist_elem.tqe_prev = NULL;
3923 			rx_tid_defrag->base.head = NULL;
3924 			rx_tid_defrag->base.tail = NULL;
3925 			rx_tid_defrag->tid = tid;
3926 			rx_tid_defrag->defrag_peer = peer->txrx_peer;
3927 		}
3928 	}
3929 
3930 	/* if not first assoc link peer,
3931 	 * not to initialize rx_tids again.
3932 	 */
3933 	if (IS_MLO_DP_LINK_PEER(peer) && !peer->first_link)
3934 		return;
3935 
3936 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
3937 		rx_tid = &peer->rx_tid[tid];
3938 		rx_tid->tid = tid;
3939 		rx_tid->ba_win_size = 0;
3940 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
3941 	}
3942 }
3943 #else
3944 static void dp_peer_rx_tids_init(struct dp_peer *peer)
3945 {
3946 	int tid;
3947 	struct dp_rx_tid *rx_tid;
3948 	struct dp_rx_tid_defrag *rx_tid_defrag;
3949 
3950 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
3951 		rx_tid = &peer->rx_tid[tid];
3952 
3953 		rx_tid_defrag = &peer->txrx_peer->rx_tid[tid];
3954 		rx_tid->tid = tid;
3955 		rx_tid->ba_win_size = 0;
3956 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
3957 
3958 		rx_tid_defrag->base.head = NULL;
3959 		rx_tid_defrag->base.tail = NULL;
3960 		rx_tid_defrag->tid = tid;
3961 		rx_tid_defrag->array = &rx_tid_defrag->base;
3962 		rx_tid_defrag->defrag_timeout_ms = 0;
3963 		rx_tid_defrag->defrag_waitlist_elem.tqe_next = NULL;
3964 		rx_tid_defrag->defrag_waitlist_elem.tqe_prev = NULL;
3965 		rx_tid_defrag->defrag_peer = peer->txrx_peer;
3966 	}
3967 }
3968 #endif
3969 
3970 void dp_peer_rx_init(struct dp_pdev *pdev, struct dp_peer *peer)
3971 {
3972 	dp_peer_rx_tids_init(peer);
3973 
3974 	peer->active_ba_session_cnt = 0;
3975 	peer->hw_buffer_size = 0;
3976 	peer->kill_256_sessions = 0;
3977 
3978 	/* Setup default (non-qos) rx tid queue */
3979 	dp_rx_tid_setup_wifi3(peer, DP_NON_QOS_TID, 1, 0);
3980 
3981 	/* Setup rx tid queue for TID 0.
3982 	 * Other queues will be setup on receiving first packet, which will cause
3983 	 * NULL REO queue error
3984 	 */
3985 	dp_rx_tid_setup_wifi3(peer, 0, 1, 0);
3986 
3987 	/*
3988 	 * Setup the rest of TID's to handle LFR
3989 	 */
3990 	dp_peer_setup_remaining_tids(peer);
3991 
3992 	/*
3993 	 * Set security defaults: no PN check, no security. The target may
3994 	 * send a HTT SEC_IND message to overwrite these defaults.
3995 	 */
3996 	if (peer->txrx_peer)
3997 		peer->txrx_peer->security[dp_sec_ucast].sec_type =
3998 			peer->txrx_peer->security[dp_sec_mcast].sec_type =
3999 				cdp_sec_type_none;
4000 }
4001 
4002 void dp_peer_rx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
4003 {
4004 	int tid;
4005 	uint32_t tid_delete_mask = 0;
4006 
4007 	if (!peer->txrx_peer)
4008 		return;
4009 
4010 	dp_info("Remove tids for peer: %pK", peer);
4011 
4012 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
4013 		struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
4014 		struct dp_rx_tid_defrag *defrag_rx_tid =
4015 				&peer->txrx_peer->rx_tid[tid];
4016 
4017 		qdf_spin_lock_bh(&defrag_rx_tid->defrag_tid_lock);
4018 		if (!peer->bss_peer || peer->vdev->opmode == wlan_op_mode_sta) {
4019 			/* Cleanup defrag related resource */
4020 			dp_rx_defrag_waitlist_remove(peer->txrx_peer, tid);
4021 			dp_rx_reorder_flush_frag(peer->txrx_peer, tid);
4022 		}
4023 		qdf_spin_unlock_bh(&defrag_rx_tid->defrag_tid_lock);
4024 
4025 		qdf_spin_lock_bh(&rx_tid->tid_lock);
4026 		if (peer->rx_tid[tid].hw_qdesc_vaddr_unaligned) {
4027 			dp_rx_tid_delete_wifi3(peer, tid);
4028 
4029 			tid_delete_mask |= (1 << tid);
4030 		}
4031 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
4032 	}
4033 #ifdef notyet /* See if FW can remove queues as part of peer cleanup */
4034 	if (soc->ol_ops->peer_rx_reorder_queue_remove) {
4035 		soc->ol_ops->peer_rx_reorder_queue_remove(soc->ctrl_psoc,
4036 			peer->vdev->pdev->pdev_id,
4037 			peer->vdev->vdev_id, peer->mac_addr.raw,
4038 			tid_delete_mask);
4039 	}
4040 #endif
4041 }
4042 
4043 void dp_peer_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
4044 {
4045 	enum wlan_op_mode vdev_opmode;
4046 	uint8_t vdev_mac_addr[QDF_MAC_ADDR_SIZE];
4047 	struct dp_pdev *pdev = vdev->pdev;
4048 	struct dp_soc *soc = pdev->soc;
4049 
4050 	/* save vdev related member in case vdev freed */
4051 	vdev_opmode = vdev->opmode;
4052 
4053 	if (!IS_MLO_DP_MLD_PEER(peer))
4054 		dp_monitor_peer_tx_cleanup(vdev, peer);
4055 
4056 	if (vdev_opmode != wlan_op_mode_monitor)
4057 	/* cleanup the Rx reorder queues for this peer */
4058 		dp_peer_rx_cleanup(vdev, peer);
4059 
4060 	dp_peer_rx_tids_destroy(peer);
4061 
4062 	if (IS_MLO_DP_LINK_PEER(peer))
4063 		dp_link_peer_del_mld_peer(peer);
4064 	if (IS_MLO_DP_MLD_PEER(peer))
4065 		dp_mld_peer_deinit_link_peers_info(peer);
4066 
4067 	qdf_mem_copy(vdev_mac_addr, vdev->mac_addr.raw,
4068 		     QDF_MAC_ADDR_SIZE);
4069 
4070 	if (soc->cdp_soc.ol_ops->peer_unref_delete)
4071 		soc->cdp_soc.ol_ops->peer_unref_delete(
4072 				soc->ctrl_psoc,
4073 				vdev->pdev->pdev_id,
4074 				peer->mac_addr.raw, vdev_mac_addr,
4075 				vdev_opmode);
4076 }
4077 
4078 /**
4079  * dp_teardown_256_ba_sessions() - Teardown sessions using 256
4080  *                                window size when a request with
4081  *                                64 window size is received.
4082  *                                This is done as a WAR since HW can
4083  *                                have only one setting per peer (64 or 256).
4084  *                                For HKv2, we use per tid buffersize setting
4085  *                                for 0 to per_tid_basize_max_tid. For tid
4086  *                                more than per_tid_basize_max_tid we use HKv1
4087  *                                method.
4088  * @peer: Datapath peer
4089  *
4090  * Return: void
4091  */
4092 static void dp_teardown_256_ba_sessions(struct dp_peer *peer)
4093 {
4094 	uint8_t delba_rcode = 0;
4095 	int tid;
4096 	struct dp_rx_tid *rx_tid = NULL;
4097 
4098 	tid = peer->vdev->pdev->soc->per_tid_basize_max_tid;
4099 	for (; tid < DP_MAX_TIDS; tid++) {
4100 		rx_tid = &peer->rx_tid[tid];
4101 		qdf_spin_lock_bh(&rx_tid->tid_lock);
4102 
4103 		if (rx_tid->ba_win_size <= 64) {
4104 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
4105 			continue;
4106 		} else {
4107 			if (rx_tid->ba_status == DP_RX_BA_ACTIVE ||
4108 			    rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
4109 				/* send delba */
4110 				if (!rx_tid->delba_tx_status) {
4111 					rx_tid->delba_tx_retry++;
4112 					rx_tid->delba_tx_status = 1;
4113 					rx_tid->delba_rcode =
4114 					IEEE80211_REASON_QOS_SETUP_REQUIRED;
4115 					delba_rcode = rx_tid->delba_rcode;
4116 
4117 					qdf_spin_unlock_bh(&rx_tid->tid_lock);
4118 					if (peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba)
4119 						peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba(
4120 							peer->vdev->pdev->soc->ctrl_psoc,
4121 							peer->vdev->vdev_id,
4122 							peer->mac_addr.raw,
4123 							tid, delba_rcode,
4124 							CDP_DELBA_REASON_NONE);
4125 				} else {
4126 					qdf_spin_unlock_bh(&rx_tid->tid_lock);
4127 				}
4128 			} else {
4129 				qdf_spin_unlock_bh(&rx_tid->tid_lock);
4130 			}
4131 		}
4132 	}
4133 }
4134 
4135 int dp_addba_resp_tx_completion_wifi3(struct cdp_soc_t *cdp_soc,
4136 				      uint8_t *peer_mac,
4137 				      uint16_t vdev_id,
4138 				      uint8_t tid, int status)
4139 {
4140 	struct dp_peer *peer = dp_peer_get_tgt_peer_hash_find(
4141 					(struct dp_soc *)cdp_soc,
4142 					peer_mac, 0, vdev_id,
4143 					DP_MOD_ID_CDP);
4144 	struct dp_rx_tid *rx_tid = NULL;
4145 
4146 	if (!peer) {
4147 		dp_peer_debug("%pK: Peer is NULL!\n", cdp_soc);
4148 		goto fail;
4149 	}
4150 	rx_tid = &peer->rx_tid[tid];
4151 	qdf_spin_lock_bh(&rx_tid->tid_lock);
4152 	if (status) {
4153 		rx_tid->num_addba_rsp_failed++;
4154 		if (rx_tid->hw_qdesc_vaddr_unaligned)
4155 			dp_rx_tid_update_wifi3(peer, tid, 1,
4156 					       IEEE80211_SEQ_MAX, false);
4157 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
4158 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
4159 		dp_err("RxTid- %d addba rsp tx completion failed", tid);
4160 
4161 		goto success;
4162 	}
4163 
4164 	rx_tid->num_addba_rsp_success++;
4165 	if (rx_tid->ba_status == DP_RX_BA_INACTIVE) {
4166 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
4167 		dp_peer_err("%pK: Rx Tid- %d hw qdesc is not in IN_PROGRESS",
4168 			    cdp_soc, tid);
4169 		goto fail;
4170 	}
4171 
4172 	if (!qdf_atomic_read(&peer->is_default_route_set)) {
4173 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
4174 		dp_peer_debug("%pK: default route is not set for peer: " QDF_MAC_ADDR_FMT,
4175 			      cdp_soc, QDF_MAC_ADDR_REF(peer->mac_addr.raw));
4176 		goto fail;
4177 	}
4178 
4179 	if (dp_rx_tid_update_wifi3(peer, tid,
4180 				   rx_tid->ba_win_size,
4181 				   rx_tid->startseqnum,
4182 				   false)) {
4183 		dp_err("Failed update REO SSN");
4184 	}
4185 
4186 	dp_info("tid %u window_size %u start_seq_num %u",
4187 		tid, rx_tid->ba_win_size,
4188 		rx_tid->startseqnum);
4189 
4190 	/* First Session */
4191 	if (peer->active_ba_session_cnt == 0) {
4192 		if (rx_tid->ba_win_size > 64 && rx_tid->ba_win_size <= 256)
4193 			peer->hw_buffer_size = 256;
4194 		else if (rx_tid->ba_win_size <= 1024 &&
4195 			 rx_tid->ba_win_size > 256)
4196 			peer->hw_buffer_size = 1024;
4197 		else
4198 			peer->hw_buffer_size = 64;
4199 	}
4200 
4201 	rx_tid->ba_status = DP_RX_BA_ACTIVE;
4202 
4203 	peer->active_ba_session_cnt++;
4204 
4205 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
4206 
4207 	/* Kill any session having 256 buffer size
4208 	 * when 64 buffer size request is received.
4209 	 * Also, latch on to 64 as new buffer size.
4210 	 */
4211 	if (peer->kill_256_sessions) {
4212 		dp_teardown_256_ba_sessions(peer);
4213 		peer->kill_256_sessions = 0;
4214 	}
4215 
4216 success:
4217 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4218 	return QDF_STATUS_SUCCESS;
4219 
4220 fail:
4221 	if (peer)
4222 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4223 
4224 	return QDF_STATUS_E_FAILURE;
4225 }
4226 
4227 QDF_STATUS
4228 dp_addba_responsesetup_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
4229 			     uint16_t vdev_id, uint8_t tid,
4230 			     uint8_t *dialogtoken, uint16_t *statuscode,
4231 			     uint16_t *buffersize, uint16_t *batimeout)
4232 {
4233 	struct dp_rx_tid *rx_tid = NULL;
4234 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4235 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
4236 						       peer_mac, 0, vdev_id,
4237 						       DP_MOD_ID_CDP);
4238 
4239 	if (!peer) {
4240 		dp_peer_debug("%pK: Peer is NULL!\n", cdp_soc);
4241 		return QDF_STATUS_E_FAILURE;
4242 	}
4243 	rx_tid = &peer->rx_tid[tid];
4244 	qdf_spin_lock_bh(&rx_tid->tid_lock);
4245 	rx_tid->num_of_addba_resp++;
4246 	/* setup ADDBA response parameters */
4247 	*dialogtoken = rx_tid->dialogtoken;
4248 	*statuscode = rx_tid->statuscode;
4249 	*buffersize = rx_tid->ba_win_size;
4250 	*batimeout  = 0;
4251 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
4252 
4253 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4254 
4255 	return status;
4256 }
4257 
4258 /**
4259  * dp_check_ba_buffersize() - Check buffer size in request
4260  *                            and latch onto this size based on
4261  *                            size used in first active session.
4262  * @peer: Datapath peer
4263  * @tid: Tid
4264  * @buffersize: Block ack window size
4265  *
4266  * Return: void
4267  */
4268 static void dp_check_ba_buffersize(struct dp_peer *peer,
4269 				   uint16_t tid,
4270 				   uint16_t buffersize)
4271 {
4272 	struct dp_rx_tid *rx_tid = NULL;
4273 	struct dp_soc *soc = peer->vdev->pdev->soc;
4274 	uint16_t max_ba_window;
4275 
4276 	max_ba_window = hal_get_rx_max_ba_window(soc->hal_soc, tid);
4277 	dp_info("Input buffersize %d, max dp allowed %d",
4278 		buffersize, max_ba_window);
4279 	/* Adjust BA window size, restrict it to max DP allowed */
4280 	buffersize = QDF_MIN(buffersize, max_ba_window);
4281 
4282 	dp_info(QDF_MAC_ADDR_FMT" per_tid_basize_max_tid %d tid %d buffersize %d hw_buffer_size %d",
4283 		QDF_MAC_ADDR_REF(peer->mac_addr.raw),
4284 		soc->per_tid_basize_max_tid, tid, buffersize,
4285 		peer->hw_buffer_size);
4286 
4287 	rx_tid = &peer->rx_tid[tid];
4288 	if (soc->per_tid_basize_max_tid &&
4289 	    tid < soc->per_tid_basize_max_tid) {
4290 		rx_tid->ba_win_size = buffersize;
4291 		goto out;
4292 	} else {
4293 		if (peer->active_ba_session_cnt == 0) {
4294 			rx_tid->ba_win_size = buffersize;
4295 		} else {
4296 			if (peer->hw_buffer_size == 64) {
4297 				if (buffersize <= 64)
4298 					rx_tid->ba_win_size = buffersize;
4299 				else
4300 					rx_tid->ba_win_size = peer->hw_buffer_size;
4301 			} else if (peer->hw_buffer_size == 256) {
4302 				if (buffersize > 64) {
4303 					rx_tid->ba_win_size = buffersize;
4304 				} else {
4305 					rx_tid->ba_win_size = buffersize;
4306 					peer->hw_buffer_size = 64;
4307 					peer->kill_256_sessions = 1;
4308 				}
4309 			} else if (buffersize <= 1024) {
4310 				/*
4311 				 * Above checks are only for HK V2
4312 				 * Set incoming buffer size for others
4313 				 */
4314 				rx_tid->ba_win_size = buffersize;
4315 			} else {
4316 				dp_err("Invalid buffer size %d", buffersize);
4317 				qdf_assert_always(0);
4318 			}
4319 		}
4320 	}
4321 
4322 out:
4323 	dp_info("rx_tid->ba_win_size %d peer->hw_buffer_size %d peer->kill_256_sessions %d",
4324 		rx_tid->ba_win_size,
4325 		peer->hw_buffer_size,
4326 		peer->kill_256_sessions);
4327 }
4328 
4329 QDF_STATUS dp_rx_tid_update_ba_win_size(struct cdp_soc_t *cdp_soc,
4330 					uint8_t *peer_mac, uint16_t vdev_id,
4331 					uint8_t tid, uint16_t buffersize)
4332 {
4333 	struct dp_rx_tid *rx_tid = NULL;
4334 	struct dp_peer *peer;
4335 
4336 	peer = dp_peer_get_tgt_peer_hash_find((struct dp_soc *)cdp_soc,
4337 					      peer_mac, 0, vdev_id,
4338 					      DP_MOD_ID_CDP);
4339 	if (!peer) {
4340 		dp_peer_debug("%pK: Peer is NULL!\n", cdp_soc);
4341 		return QDF_STATUS_E_FAILURE;
4342 	}
4343 
4344 	rx_tid = &peer->rx_tid[tid];
4345 
4346 	qdf_spin_lock_bh(&rx_tid->tid_lock);
4347 	rx_tid->ba_win_size = buffersize;
4348 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
4349 
4350 	dp_info("peer "QDF_MAC_ADDR_FMT", tid %d, update BA win size to %d",
4351 		QDF_MAC_ADDR_REF(peer->mac_addr.raw), tid, buffersize);
4352 
4353 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4354 
4355 	return QDF_STATUS_SUCCESS;
4356 }
4357 
4358 #define DP_RX_BA_SESSION_DISABLE  1
4359 
4360 int dp_addba_requestprocess_wifi3(struct cdp_soc_t *cdp_soc,
4361 				  uint8_t *peer_mac,
4362 				  uint16_t vdev_id,
4363 				  uint8_t dialogtoken,
4364 				  uint16_t tid, uint16_t batimeout,
4365 				  uint16_t buffersize,
4366 				  uint16_t startseqnum)
4367 {
4368 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4369 	struct dp_rx_tid *rx_tid = NULL;
4370 	struct dp_peer *peer;
4371 
4372 	peer = dp_peer_get_tgt_peer_hash_find((struct dp_soc *)cdp_soc,
4373 					      peer_mac,
4374 					      0, vdev_id,
4375 					      DP_MOD_ID_CDP);
4376 
4377 	if (!peer) {
4378 		dp_peer_debug("%pK: Peer is NULL!\n", cdp_soc);
4379 		return QDF_STATUS_E_FAILURE;
4380 	}
4381 	rx_tid = &peer->rx_tid[tid];
4382 	qdf_spin_lock_bh(&rx_tid->tid_lock);
4383 	rx_tid->num_of_addba_req++;
4384 	if ((rx_tid->ba_status == DP_RX_BA_ACTIVE &&
4385 	     rx_tid->hw_qdesc_vaddr_unaligned)) {
4386 		dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX, false);
4387 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
4388 		peer->active_ba_session_cnt--;
4389 		dp_peer_debug("%pK: Rx Tid- %d hw qdesc is already setup",
4390 			      cdp_soc, tid);
4391 	}
4392 
4393 	if (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
4394 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
4395 		status = QDF_STATUS_E_FAILURE;
4396 		goto fail;
4397 	}
4398 
4399 	if (rx_tid->rx_ba_win_size_override == DP_RX_BA_SESSION_DISABLE) {
4400 		dp_peer_info("%pK: disable BA session",
4401 			     cdp_soc);
4402 
4403 		buffersize = 1;
4404 	} else if (rx_tid->rx_ba_win_size_override) {
4405 		dp_peer_info("%pK: override BA win to %d", cdp_soc,
4406 			     rx_tid->rx_ba_win_size_override);
4407 
4408 		buffersize = rx_tid->rx_ba_win_size_override;
4409 	} else {
4410 		dp_peer_info("%pK: restore BA win %d based on addba req", cdp_soc,
4411 			     buffersize);
4412 	}
4413 
4414 	dp_check_ba_buffersize(peer, tid, buffersize);
4415 
4416 	if (dp_rx_tid_setup_wifi3(peer, tid,
4417 	    rx_tid->ba_win_size, startseqnum)) {
4418 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
4419 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
4420 		status = QDF_STATUS_E_FAILURE;
4421 		goto fail;
4422 	}
4423 	rx_tid->ba_status = DP_RX_BA_IN_PROGRESS;
4424 
4425 	rx_tid->dialogtoken = dialogtoken;
4426 	rx_tid->startseqnum = startseqnum;
4427 
4428 	if (rx_tid->userstatuscode != IEEE80211_STATUS_SUCCESS)
4429 		rx_tid->statuscode = rx_tid->userstatuscode;
4430 	else
4431 		rx_tid->statuscode = IEEE80211_STATUS_SUCCESS;
4432 
4433 	if (rx_tid->rx_ba_win_size_override == DP_RX_BA_SESSION_DISABLE)
4434 		rx_tid->statuscode = IEEE80211_STATUS_REFUSED;
4435 
4436 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
4437 
4438 fail:
4439 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4440 
4441 	return status;
4442 }
4443 
4444 QDF_STATUS
4445 dp_set_addba_response(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
4446 		      uint16_t vdev_id, uint8_t tid, uint16_t statuscode)
4447 {
4448 	struct dp_peer *peer = dp_peer_get_tgt_peer_hash_find(
4449 					(struct dp_soc *)cdp_soc,
4450 					peer_mac, 0, vdev_id,
4451 					DP_MOD_ID_CDP);
4452 	struct dp_rx_tid *rx_tid;
4453 
4454 	if (!peer) {
4455 		dp_peer_debug("%pK: Peer is NULL!\n", cdp_soc);
4456 		return QDF_STATUS_E_FAILURE;
4457 	}
4458 
4459 	rx_tid = &peer->rx_tid[tid];
4460 	qdf_spin_lock_bh(&rx_tid->tid_lock);
4461 	rx_tid->userstatuscode = statuscode;
4462 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
4463 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4464 
4465 	return QDF_STATUS_SUCCESS;
4466 }
4467 
4468 int dp_delba_process_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
4469 			   uint16_t vdev_id, int tid, uint16_t reasoncode)
4470 {
4471 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4472 	struct dp_rx_tid *rx_tid;
4473 	struct dp_peer *peer = dp_peer_get_tgt_peer_hash_find(
4474 					(struct dp_soc *)cdp_soc,
4475 					peer_mac, 0, vdev_id,
4476 					DP_MOD_ID_CDP);
4477 
4478 	if (!peer) {
4479 		dp_peer_debug("%pK: Peer is NULL!\n", cdp_soc);
4480 		return QDF_STATUS_E_FAILURE;
4481 	}
4482 	rx_tid = &peer->rx_tid[tid];
4483 	qdf_spin_lock_bh(&rx_tid->tid_lock);
4484 	if (rx_tid->ba_status == DP_RX_BA_INACTIVE ||
4485 	    rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
4486 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
4487 		status = QDF_STATUS_E_FAILURE;
4488 		goto fail;
4489 	}
4490 	/* TODO: See if we can delete the existing REO queue descriptor and
4491 	 * replace with a new one without queue extension descript to save
4492 	 * memory
4493 	 */
4494 	rx_tid->delba_rcode = reasoncode;
4495 	rx_tid->num_of_delba_req++;
4496 	dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX, false);
4497 
4498 	rx_tid->ba_status = DP_RX_BA_INACTIVE;
4499 	peer->active_ba_session_cnt--;
4500 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
4501 fail:
4502 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4503 
4504 	return status;
4505 }
4506 
4507 int dp_delba_tx_completion_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
4508 				 uint16_t vdev_id,
4509 				 uint8_t tid, int status)
4510 {
4511 	QDF_STATUS ret = QDF_STATUS_SUCCESS;
4512 	struct dp_rx_tid *rx_tid = NULL;
4513 	struct dp_peer *peer = dp_peer_get_tgt_peer_hash_find(
4514 					(struct dp_soc *)cdp_soc,
4515 					peer_mac, 0, vdev_id,
4516 					DP_MOD_ID_CDP);
4517 
4518 	if (!peer) {
4519 		dp_peer_debug("%pK: Peer is NULL!", cdp_soc);
4520 		return QDF_STATUS_E_FAILURE;
4521 	}
4522 	rx_tid = &peer->rx_tid[tid];
4523 	qdf_spin_lock_bh(&rx_tid->tid_lock);
4524 	if (status) {
4525 		rx_tid->delba_tx_fail_cnt++;
4526 		if (rx_tid->delba_tx_retry >= DP_MAX_DELBA_RETRY) {
4527 			rx_tid->delba_tx_retry = 0;
4528 			rx_tid->delba_tx_status = 0;
4529 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
4530 		} else {
4531 			rx_tid->delba_tx_retry++;
4532 			rx_tid->delba_tx_status = 1;
4533 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
4534 			if (peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba)
4535 				peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba(
4536 					peer->vdev->pdev->soc->ctrl_psoc,
4537 					peer->vdev->vdev_id,
4538 					peer->mac_addr.raw, tid,
4539 					rx_tid->delba_rcode,
4540 					CDP_DELBA_REASON_NONE);
4541 		}
4542 		goto end;
4543 	} else {
4544 		rx_tid->delba_tx_success_cnt++;
4545 		rx_tid->delba_tx_retry = 0;
4546 		rx_tid->delba_tx_status = 0;
4547 	}
4548 	if (rx_tid->ba_status == DP_RX_BA_ACTIVE) {
4549 		dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX, false);
4550 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
4551 		peer->active_ba_session_cnt--;
4552 	}
4553 	if (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
4554 		dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX, false);
4555 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
4556 	}
4557 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
4558 
4559 end:
4560 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4561 
4562 	return ret;
4563 }
4564 
4565 QDF_STATUS
4566 dp_set_pn_check_wifi3(struct cdp_soc_t *soc_t, uint8_t vdev_id,
4567 		      uint8_t *peer_mac, enum cdp_sec_type sec_type,
4568 		      uint32_t *rx_pn)
4569 {
4570 	struct dp_pdev *pdev;
4571 	int i;
4572 	uint8_t pn_size;
4573 	struct hal_reo_cmd_params params;
4574 	struct dp_peer *peer = NULL;
4575 	struct dp_vdev *vdev = NULL;
4576 	struct dp_soc *soc = NULL;
4577 
4578 	peer = dp_peer_get_tgt_peer_hash_find((struct dp_soc *)soc_t,
4579 					      peer_mac, 0, vdev_id,
4580 					      DP_MOD_ID_CDP);
4581 
4582 	if (!peer) {
4583 		dp_peer_debug("%pK: Peer is NULL!\n", soc);
4584 		return QDF_STATUS_E_FAILURE;
4585 	}
4586 
4587 	vdev = peer->vdev;
4588 
4589 	if (!vdev) {
4590 		dp_peer_debug("%pK: VDEV is NULL!\n", soc);
4591 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4592 		return QDF_STATUS_E_FAILURE;
4593 	}
4594 
4595 	pdev = vdev->pdev;
4596 	soc = pdev->soc;
4597 	qdf_mem_zero(&params, sizeof(params));
4598 
4599 	params.std.need_status = 1;
4600 	params.u.upd_queue_params.update_pn_valid = 1;
4601 	params.u.upd_queue_params.update_pn_size = 1;
4602 	params.u.upd_queue_params.update_pn = 1;
4603 	params.u.upd_queue_params.update_pn_check_needed = 1;
4604 	params.u.upd_queue_params.update_svld = 1;
4605 	params.u.upd_queue_params.svld = 0;
4606 
4607 	switch (sec_type) {
4608 	case cdp_sec_type_tkip_nomic:
4609 	case cdp_sec_type_aes_ccmp:
4610 	case cdp_sec_type_aes_ccmp_256:
4611 	case cdp_sec_type_aes_gcmp:
4612 	case cdp_sec_type_aes_gcmp_256:
4613 		params.u.upd_queue_params.pn_check_needed = 1;
4614 		params.u.upd_queue_params.pn_size = PN_SIZE_48;
4615 		pn_size = 48;
4616 		break;
4617 	case cdp_sec_type_wapi:
4618 		params.u.upd_queue_params.pn_check_needed = 1;
4619 		params.u.upd_queue_params.pn_size = PN_SIZE_128;
4620 		pn_size = 128;
4621 		if (vdev->opmode == wlan_op_mode_ap) {
4622 			params.u.upd_queue_params.pn_even = 1;
4623 			params.u.upd_queue_params.update_pn_even = 1;
4624 		} else {
4625 			params.u.upd_queue_params.pn_uneven = 1;
4626 			params.u.upd_queue_params.update_pn_uneven = 1;
4627 		}
4628 		break;
4629 	default:
4630 		params.u.upd_queue_params.pn_check_needed = 0;
4631 		pn_size = 0;
4632 		break;
4633 	}
4634 
4635 
4636 	for (i = 0; i < DP_MAX_TIDS; i++) {
4637 		struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
4638 		qdf_spin_lock_bh(&rx_tid->tid_lock);
4639 		if (rx_tid->hw_qdesc_vaddr_unaligned) {
4640 			params.std.addr_lo =
4641 				rx_tid->hw_qdesc_paddr & 0xffffffff;
4642 			params.std.addr_hi =
4643 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
4644 
4645 			if (pn_size) {
4646 				dp_peer_info("%pK: PN set for TID:%d pn:%x:%x:%x:%x",
4647 					     soc, i, rx_pn[3], rx_pn[2],
4648 					     rx_pn[1], rx_pn[0]);
4649 				params.u.upd_queue_params.update_pn_valid = 1;
4650 				params.u.upd_queue_params.pn_31_0 = rx_pn[0];
4651 				params.u.upd_queue_params.pn_63_32 = rx_pn[1];
4652 				params.u.upd_queue_params.pn_95_64 = rx_pn[2];
4653 				params.u.upd_queue_params.pn_127_96 = rx_pn[3];
4654 			}
4655 			rx_tid->pn_size = pn_size;
4656 			if (dp_reo_send_cmd(soc,
4657 					    CMD_UPDATE_RX_REO_QUEUE,
4658 					    &params, dp_rx_tid_update_cb,
4659 					    rx_tid)) {
4660 				dp_err_log("fail to send CMD_UPDATE_RX_REO_QUEUE"
4661 					   "tid %d desc %pK", rx_tid->tid,
4662 					   (void *)(rx_tid->hw_qdesc_paddr));
4663 				DP_STATS_INC(soc,
4664 					     rx.err.reo_cmd_send_fail, 1);
4665 			}
4666 		} else {
4667 			dp_peer_info("%pK: PN Check not setup for TID :%d ", soc, i);
4668 		}
4669 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
4670 	}
4671 
4672 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4673 
4674 	return QDF_STATUS_SUCCESS;
4675 }
4676 
4677 QDF_STATUS
4678 dp_set_key_sec_type_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id,
4679 			  uint8_t *peer_mac, enum cdp_sec_type sec_type,
4680 			  bool is_unicast)
4681 {
4682 	struct dp_peer *peer =
4683 			dp_peer_get_tgt_peer_hash_find((struct dp_soc *)soc,
4684 						       peer_mac, 0, vdev_id,
4685 						       DP_MOD_ID_CDP);
4686 	int sec_index;
4687 
4688 	if (!peer) {
4689 		dp_peer_debug("%pK: Peer is NULL!\n", soc);
4690 		return QDF_STATUS_E_FAILURE;
4691 	}
4692 
4693 	if (!peer->txrx_peer) {
4694 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4695 		dp_peer_debug("%pK: txrx peer is NULL!\n", soc);
4696 		return QDF_STATUS_E_FAILURE;
4697 	}
4698 
4699 	dp_peer_info("%pK: key sec spec for peer %pK " QDF_MAC_ADDR_FMT ": %s key of type %d",
4700 		     soc, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw),
4701 		     is_unicast ? "ucast" : "mcast", sec_type);
4702 
4703 	sec_index = is_unicast ? dp_sec_ucast : dp_sec_mcast;
4704 	peer->txrx_peer->security[sec_index].sec_type = sec_type;
4705 
4706 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4707 
4708 	return QDF_STATUS_SUCCESS;
4709 }
4710 
4711 void
4712 dp_rx_sec_ind_handler(struct dp_soc *soc, uint16_t peer_id,
4713 		      enum cdp_sec_type sec_type, int is_unicast,
4714 		      u_int32_t *michael_key,
4715 		      u_int32_t *rx_pn)
4716 {
4717 	struct dp_peer *peer;
4718 	struct dp_txrx_peer *txrx_peer;
4719 	int sec_index;
4720 
4721 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_HTT);
4722 	if (!peer) {
4723 		dp_peer_err("Couldn't find peer from ID %d - skipping security inits",
4724 			    peer_id);
4725 		return;
4726 	}
4727 	txrx_peer = dp_get_txrx_peer(peer);
4728 	if (!txrx_peer) {
4729 		dp_peer_err("Couldn't find txrx peer from ID %d - skipping security inits",
4730 			    peer_id);
4731 		return;
4732 	}
4733 
4734 	dp_peer_info("%pK: sec spec for peer %pK " QDF_MAC_ADDR_FMT ": %s key of type %d",
4735 		     soc, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw),
4736 			  is_unicast ? "ucast" : "mcast", sec_type);
4737 	sec_index = is_unicast ? dp_sec_ucast : dp_sec_mcast;
4738 
4739 	peer->txrx_peer->security[sec_index].sec_type = sec_type;
4740 #ifdef notyet /* TODO: See if this is required for defrag support */
4741 	/* michael key only valid for TKIP, but for simplicity,
4742 	 * copy it anyway
4743 	 */
4744 	qdf_mem_copy(
4745 		&peer->txrx_peer->security[sec_index].michael_key[0],
4746 		michael_key,
4747 		sizeof(peer->txrx_peer->security[sec_index].michael_key));
4748 #ifdef BIG_ENDIAN_HOST
4749 	OL_IF_SWAPBO(peer->txrx_peer->security[sec_index].michael_key[0],
4750 		     sizeof(peer->txrx_peer->security[sec_index].michael_key));
4751 #endif /* BIG_ENDIAN_HOST */
4752 #endif
4753 
4754 #ifdef notyet /* TODO: Check if this is required for wifi3.0 */
4755 	if (sec_type != cdp_sec_type_wapi) {
4756 		qdf_mem_zero(peer->tids_last_pn_valid, _EXT_TIDS);
4757 	} else {
4758 		for (i = 0; i < DP_MAX_TIDS; i++) {
4759 			/*
4760 			 * Setting PN valid bit for WAPI sec_type,
4761 			 * since WAPI PN has to be started with predefined value
4762 			 */
4763 			peer->tids_last_pn_valid[i] = 1;
4764 			qdf_mem_copy(
4765 				(u_int8_t *) &peer->tids_last_pn[i],
4766 				(u_int8_t *) rx_pn, sizeof(union htt_rx_pn_t));
4767 			peer->tids_last_pn[i].pn128[1] =
4768 				qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[1]);
4769 			peer->tids_last_pn[i].pn128[0] =
4770 				qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[0]);
4771 		}
4772 	}
4773 #endif
4774 	/* TODO: Update HW TID queue with PN check parameters (pn type for
4775 	 * all security types and last pn for WAPI) once REO command API
4776 	 * is available
4777 	 */
4778 
4779 	dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
4780 }
4781 
4782 #ifdef QCA_PEER_EXT_STATS
4783 QDF_STATUS dp_peer_delay_stats_ctx_alloc(struct dp_soc *soc,
4784 					 struct dp_txrx_peer *txrx_peer)
4785 {
4786 	uint8_t tid, ctx_id;
4787 
4788 	if (!soc || !txrx_peer) {
4789 		dp_warn("Null soc%pK or peer%pK", soc, txrx_peer);
4790 		return QDF_STATUS_E_INVAL;
4791 	}
4792 
4793 	if (!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx))
4794 		return QDF_STATUS_SUCCESS;
4795 
4796 	/*
4797 	 * Allocate memory for peer extended stats.
4798 	 */
4799 	txrx_peer->delay_stats =
4800 			qdf_mem_malloc(sizeof(struct dp_peer_delay_stats));
4801 	if (!txrx_peer->delay_stats) {
4802 		dp_err("Peer extended stats obj alloc failed!!");
4803 		return QDF_STATUS_E_NOMEM;
4804 	}
4805 
4806 	for (tid = 0; tid < CDP_MAX_DATA_TIDS; tid++) {
4807 		for (ctx_id = 0; ctx_id < CDP_MAX_TXRX_CTX; ctx_id++) {
4808 			struct cdp_delay_tx_stats *tx_delay =
4809 			&txrx_peer->delay_stats->delay_tid_stats[tid][ctx_id].tx_delay;
4810 			struct cdp_delay_rx_stats *rx_delay =
4811 			&txrx_peer->delay_stats->delay_tid_stats[tid][ctx_id].rx_delay;
4812 
4813 			dp_hist_init(&tx_delay->tx_swq_delay,
4814 				     CDP_HIST_TYPE_SW_ENQEUE_DELAY);
4815 			dp_hist_init(&tx_delay->hwtx_delay,
4816 				     CDP_HIST_TYPE_HW_COMP_DELAY);
4817 			dp_hist_init(&rx_delay->to_stack_delay,
4818 				     CDP_HIST_TYPE_REAP_STACK);
4819 		}
4820 	}
4821 
4822 	return QDF_STATUS_SUCCESS;
4823 }
4824 
4825 void dp_peer_delay_stats_ctx_dealloc(struct dp_soc *soc,
4826 				     struct dp_txrx_peer *txrx_peer)
4827 {
4828 	if (!txrx_peer) {
4829 		dp_warn("peer_ext dealloc failed due to NULL peer object");
4830 		return;
4831 	}
4832 
4833 	if (!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx))
4834 		return;
4835 
4836 	if (!txrx_peer->delay_stats)
4837 		return;
4838 
4839 	qdf_mem_free(txrx_peer->delay_stats);
4840 	txrx_peer->delay_stats = NULL;
4841 }
4842 
4843 void dp_peer_delay_stats_ctx_clr(struct dp_txrx_peer *txrx_peer)
4844 {
4845 	if (txrx_peer->delay_stats)
4846 		qdf_mem_zero(txrx_peer->delay_stats,
4847 			     sizeof(struct dp_peer_delay_stats));
4848 }
4849 #endif
4850 
4851 #ifdef WLAN_PEER_JITTER
4852 QDF_STATUS dp_peer_jitter_stats_ctx_alloc(struct dp_pdev *pdev,
4853 					  struct dp_txrx_peer *txrx_peer)
4854 {
4855 	if (!pdev || !txrx_peer) {
4856 		dp_warn("Null pdev or peer");
4857 		return QDF_STATUS_E_INVAL;
4858 	}
4859 
4860 	if (!wlan_cfg_is_peer_jitter_stats_enabled(pdev->soc->wlan_cfg_ctx))
4861 		return QDF_STATUS_SUCCESS;
4862 
4863 	if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
4864 		/*
4865 		 * Allocate memory on per tid basis when nss is enabled
4866 		 */
4867 		txrx_peer->jitter_stats =
4868 			qdf_mem_malloc(sizeof(struct cdp_peer_tid_stats)
4869 					* DP_MAX_TIDS);
4870 	} else {
4871 		/*
4872 		 * Allocate memory on per tid per ring basis
4873 		 */
4874 		txrx_peer->jitter_stats =
4875 			qdf_mem_malloc(sizeof(struct cdp_peer_tid_stats)
4876 					* DP_MAX_TIDS * CDP_MAX_TXRX_CTX);
4877 	}
4878 
4879 	if (!txrx_peer->jitter_stats) {
4880 		dp_warn("Jitter stats obj alloc failed!!");
4881 		return QDF_STATUS_E_NOMEM;
4882 	}
4883 
4884 	return QDF_STATUS_SUCCESS;
4885 }
4886 
4887 void dp_peer_jitter_stats_ctx_dealloc(struct dp_pdev *pdev,
4888 				      struct dp_txrx_peer *txrx_peer)
4889 {
4890 	if (!pdev || !txrx_peer) {
4891 		dp_warn("Null pdev or peer");
4892 		return;
4893 	}
4894 
4895 	if (!wlan_cfg_is_peer_jitter_stats_enabled(pdev->soc->wlan_cfg_ctx))
4896 		return;
4897 
4898 	if (txrx_peer->jitter_stats) {
4899 		qdf_mem_free(txrx_peer->jitter_stats);
4900 		txrx_peer->jitter_stats = NULL;
4901 	}
4902 }
4903 
4904 void dp_peer_jitter_stats_ctx_clr(struct dp_txrx_peer *txrx_peer)
4905 {
4906 	struct cdp_peer_tid_stats *jitter_stats = NULL;
4907 
4908 	if (!txrx_peer) {
4909 		dp_warn("Null peer");
4910 		return;
4911 	}
4912 
4913 	if (!wlan_cfg_is_peer_jitter_stats_enabled(txrx_peer->
4914 						   vdev->
4915 						   pdev->soc->wlan_cfg_ctx))
4916 		return;
4917 
4918 	jitter_stats = txrx_peer->jitter_stats;
4919 	if (!jitter_stats)
4920 		return;
4921 
4922 	if (wlan_cfg_get_dp_pdev_nss_enabled(txrx_peer->
4923 					     vdev->pdev->wlan_cfg_ctx))
4924 		qdf_mem_zero(jitter_stats,
4925 			     sizeof(struct cdp_peer_tid_stats) *
4926 			     DP_MAX_TIDS);
4927 
4928 	else
4929 		qdf_mem_zero(jitter_stats,
4930 			     sizeof(struct cdp_peer_tid_stats) *
4931 			     DP_MAX_TIDS * CDP_MAX_TXRX_CTX);
4932 
4933 }
4934 #endif
4935 
4936 QDF_STATUS
4937 dp_rx_delba_ind_handler(void *soc_handle, uint16_t peer_id,
4938 			uint8_t tid, uint16_t win_sz)
4939 {
4940 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
4941 	struct dp_peer *peer;
4942 	struct dp_rx_tid *rx_tid;
4943 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4944 
4945 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_HTT);
4946 
4947 	if (!peer) {
4948 		dp_peer_err("%pK: Couldn't find peer from ID %d",
4949 			    soc, peer_id);
4950 		return QDF_STATUS_E_FAILURE;
4951 	}
4952 
4953 	qdf_assert_always(tid < DP_MAX_TIDS);
4954 
4955 	rx_tid = &peer->rx_tid[tid];
4956 
4957 	if (rx_tid->hw_qdesc_vaddr_unaligned) {
4958 		if (!rx_tid->delba_tx_status) {
4959 			dp_peer_info("%pK: PEER_ID: %d TID: %d, BA win: %d ",
4960 				     soc, peer_id, tid, win_sz);
4961 
4962 			qdf_spin_lock_bh(&rx_tid->tid_lock);
4963 
4964 			rx_tid->delba_tx_status = 1;
4965 
4966 			rx_tid->rx_ba_win_size_override =
4967 			    qdf_min((uint16_t)63, win_sz);
4968 
4969 			rx_tid->delba_rcode =
4970 			    IEEE80211_REASON_QOS_SETUP_REQUIRED;
4971 
4972 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
4973 
4974 			if (soc->cdp_soc.ol_ops->send_delba)
4975 				soc->cdp_soc.ol_ops->send_delba(
4976 					peer->vdev->pdev->soc->ctrl_psoc,
4977 					peer->vdev->vdev_id,
4978 					peer->mac_addr.raw,
4979 					tid,
4980 					rx_tid->delba_rcode,
4981 					CDP_DELBA_REASON_NONE);
4982 		}
4983 	} else {
4984 		dp_peer_err("%pK: BA session is not setup for TID:%d ", soc, tid);
4985 		status = QDF_STATUS_E_FAILURE;
4986 	}
4987 
4988 	dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
4989 
4990 	return status;
4991 }
4992 
4993 #ifdef DP_PEER_EXTENDED_API
4994 /**
4995  * dp_peer_set_bw() - Set bandwidth and mpdu retry count threshold for peer
4996  * @soc: DP soc handle
4997  * @txrx_peer: Core txrx_peer handle
4998  * @set_bw: enum of bandwidth to be set for this peer connection
4999  *
5000  * Return: None
5001  */
5002 static void dp_peer_set_bw(struct dp_soc *soc, struct dp_txrx_peer *txrx_peer,
5003 			   enum cdp_peer_bw set_bw)
5004 {
5005 	if (!txrx_peer)
5006 		return;
5007 
5008 	txrx_peer->bw = set_bw;
5009 
5010 	switch (set_bw) {
5011 	case CDP_160_MHZ:
5012 	case CDP_320_MHZ:
5013 		txrx_peer->mpdu_retry_threshold =
5014 				soc->wlan_cfg_ctx->mpdu_retry_threshold_2;
5015 		break;
5016 	case CDP_20_MHZ:
5017 	case CDP_40_MHZ:
5018 	case CDP_80_MHZ:
5019 	default:
5020 		txrx_peer->mpdu_retry_threshold =
5021 				soc->wlan_cfg_ctx->mpdu_retry_threshold_1;
5022 		break;
5023 	}
5024 
5025 	dp_info("Peer id: %u: BW: %u, mpdu retry threshold: %u",
5026 		txrx_peer->peer_id, txrx_peer->bw,
5027 		txrx_peer->mpdu_retry_threshold);
5028 }
5029 
5030 #ifdef WLAN_FEATURE_11BE_MLO
5031 QDF_STATUS dp_register_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
5032 			    struct ol_txrx_desc_type *sta_desc)
5033 {
5034 	struct dp_peer *peer;
5035 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5036 
5037 	peer = dp_peer_find_hash_find(soc, sta_desc->peer_addr.bytes,
5038 				      0, DP_VDEV_ALL, DP_MOD_ID_CDP);
5039 
5040 	if (!peer)
5041 		return QDF_STATUS_E_FAULT;
5042 
5043 	qdf_spin_lock_bh(&peer->peer_info_lock);
5044 	peer->state = OL_TXRX_PEER_STATE_CONN;
5045 	qdf_spin_unlock_bh(&peer->peer_info_lock);
5046 
5047 	dp_peer_set_bw(soc, peer->txrx_peer, sta_desc->bw);
5048 
5049 	dp_rx_flush_rx_cached(peer, false);
5050 
5051 	if (IS_MLO_DP_LINK_PEER(peer) && peer->first_link) {
5052 		dp_peer_info("register for mld peer" QDF_MAC_ADDR_FMT,
5053 			     QDF_MAC_ADDR_REF(peer->mld_peer->mac_addr.raw));
5054 		qdf_spin_lock_bh(&peer->mld_peer->peer_info_lock);
5055 		peer->mld_peer->state = peer->state;
5056 		qdf_spin_unlock_bh(&peer->mld_peer->peer_info_lock);
5057 		dp_rx_flush_rx_cached(peer->mld_peer, false);
5058 	}
5059 
5060 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5061 
5062 	return QDF_STATUS_SUCCESS;
5063 }
5064 
5065 QDF_STATUS dp_peer_state_update(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
5066 				enum ol_txrx_peer_state state)
5067 {
5068 	struct dp_peer *peer;
5069 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5070 
5071 	peer =  dp_peer_find_hash_find(soc, peer_mac, 0, DP_VDEV_ALL,
5072 				       DP_MOD_ID_CDP);
5073 	if (!peer) {
5074 		dp_peer_err("%pK: Failed to find peer[" QDF_MAC_ADDR_FMT "]",
5075 			    soc, QDF_MAC_ADDR_REF(peer_mac));
5076 		return QDF_STATUS_E_FAILURE;
5077 	}
5078 	peer->state = state;
5079 	peer->authorize = (state == OL_TXRX_PEER_STATE_AUTH) ? 1 : 0;
5080 
5081 	if (peer->txrx_peer)
5082 		peer->txrx_peer->authorize = peer->authorize;
5083 
5084 	dp_peer_info("peer" QDF_MAC_ADDR_FMT "state %d",
5085 		     QDF_MAC_ADDR_REF(peer->mac_addr.raw),
5086 		     peer->state);
5087 
5088 	if (IS_MLO_DP_LINK_PEER(peer) && peer->first_link) {
5089 		peer->mld_peer->state = peer->state;
5090 		peer->mld_peer->txrx_peer->authorize = peer->authorize;
5091 		dp_peer_info("mld peer" QDF_MAC_ADDR_FMT "state %d",
5092 			     QDF_MAC_ADDR_REF(peer->mld_peer->mac_addr.raw),
5093 			     peer->mld_peer->state);
5094 	}
5095 
5096 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
5097 	 * Decrement it here.
5098 	 */
5099 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5100 
5101 	return QDF_STATUS_SUCCESS;
5102 }
5103 #else
5104 QDF_STATUS dp_register_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
5105 			    struct ol_txrx_desc_type *sta_desc)
5106 {
5107 	struct dp_peer *peer;
5108 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5109 
5110 	peer = dp_peer_find_hash_find(soc, sta_desc->peer_addr.bytes,
5111 				      0, DP_VDEV_ALL, DP_MOD_ID_CDP);
5112 
5113 	if (!peer)
5114 		return QDF_STATUS_E_FAULT;
5115 
5116 	qdf_spin_lock_bh(&peer->peer_info_lock);
5117 	peer->state = OL_TXRX_PEER_STATE_CONN;
5118 	qdf_spin_unlock_bh(&peer->peer_info_lock);
5119 
5120 	dp_peer_set_bw(soc, peer->txrx_peer, sta_desc->bw);
5121 
5122 	dp_rx_flush_rx_cached(peer, false);
5123 
5124 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5125 
5126 	return QDF_STATUS_SUCCESS;
5127 }
5128 
5129 QDF_STATUS dp_peer_state_update(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
5130 				enum ol_txrx_peer_state state)
5131 {
5132 	struct dp_peer *peer;
5133 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5134 
5135 	peer =  dp_peer_find_hash_find(soc, peer_mac, 0, DP_VDEV_ALL,
5136 				       DP_MOD_ID_CDP);
5137 	if (!peer) {
5138 		dp_peer_err("%pK: Failed to find peer for: [" QDF_MAC_ADDR_FMT "]",
5139 			    soc, QDF_MAC_ADDR_REF(peer_mac));
5140 		return QDF_STATUS_E_FAILURE;
5141 	}
5142 	peer->state = state;
5143 	peer->authorize = (state == OL_TXRX_PEER_STATE_AUTH) ? 1 : 0;
5144 
5145 	if (peer->txrx_peer)
5146 		peer->txrx_peer->authorize = peer->authorize;
5147 
5148 	dp_info("peer %pK state %d", peer, peer->state);
5149 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
5150 	 * Decrement it here.
5151 	 */
5152 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5153 
5154 	return QDF_STATUS_SUCCESS;
5155 }
5156 #endif
5157 
5158 QDF_STATUS
5159 dp_clear_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
5160 	      struct qdf_mac_addr peer_addr)
5161 {
5162 	struct dp_peer *peer;
5163 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5164 
5165 	peer = dp_peer_find_hash_find(soc, peer_addr.bytes,
5166 				      0, DP_VDEV_ALL, DP_MOD_ID_CDP);
5167 	if (!peer || !peer->valid)
5168 		return QDF_STATUS_E_FAULT;
5169 
5170 	dp_clear_peer_internal(soc, peer);
5171 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5172 	return QDF_STATUS_SUCCESS;
5173 }
5174 
5175 QDF_STATUS dp_get_vdevid(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
5176 			 uint8_t *vdev_id)
5177 {
5178 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5179 	struct dp_peer *peer =
5180 		dp_peer_find_hash_find(soc, peer_mac, 0, DP_VDEV_ALL,
5181 				       DP_MOD_ID_CDP);
5182 
5183 	if (!peer)
5184 		return QDF_STATUS_E_FAILURE;
5185 
5186 	dp_info("peer %pK vdev %pK vdev id %d",
5187 		peer, peer->vdev, peer->vdev->vdev_id);
5188 	*vdev_id = peer->vdev->vdev_id;
5189 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
5190 	 * Decrement it here.
5191 	 */
5192 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5193 
5194 	return QDF_STATUS_SUCCESS;
5195 }
5196 
5197 struct cdp_vdev *
5198 dp_get_vdev_by_peer_addr(struct cdp_pdev *pdev_handle,
5199 			 struct qdf_mac_addr peer_addr)
5200 {
5201 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5202 	struct dp_peer *peer = NULL;
5203 	struct cdp_vdev *vdev = NULL;
5204 
5205 	if (!pdev) {
5206 		dp_peer_info("PDEV not found for peer_addr: " QDF_MAC_ADDR_FMT,
5207 			     QDF_MAC_ADDR_REF(peer_addr.bytes));
5208 		return NULL;
5209 	}
5210 
5211 	peer = dp_peer_find_hash_find(pdev->soc, peer_addr.bytes, 0,
5212 				      DP_VDEV_ALL, DP_MOD_ID_CDP);
5213 	if (!peer) {
5214 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
5215 			  "PDEV not found for peer_addr: "QDF_MAC_ADDR_FMT,
5216 			  QDF_MAC_ADDR_REF(peer_addr.bytes));
5217 		return NULL;
5218 	}
5219 
5220 	vdev = (struct cdp_vdev *)peer->vdev;
5221 
5222 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5223 	return vdev;
5224 }
5225 
5226 struct cdp_vdev *dp_get_vdev_for_peer(void *peer_handle)
5227 {
5228 	struct dp_peer *peer = peer_handle;
5229 
5230 	DP_TRACE(DEBUG, "peer %pK vdev %pK", peer, peer->vdev);
5231 	return (struct cdp_vdev *)peer->vdev;
5232 }
5233 
5234 uint8_t *dp_peer_get_peer_mac_addr(void *peer_handle)
5235 {
5236 	struct dp_peer *peer = peer_handle;
5237 	uint8_t *mac;
5238 
5239 	mac = peer->mac_addr.raw;
5240 	dp_info("peer %pK mac 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
5241 		peer, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
5242 	return peer->mac_addr.raw;
5243 }
5244 
5245 int dp_get_peer_state(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
5246 		      uint8_t *peer_mac)
5247 {
5248 	enum ol_txrx_peer_state peer_state;
5249 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5250 	struct cdp_peer_info peer_info = { 0 };
5251 	struct dp_peer *peer;
5252 	struct dp_peer *tgt_peer;
5253 
5254 	DP_PEER_INFO_PARAMS_INIT(&peer_info, vdev_id, peer_mac,
5255 				 false, CDP_WILD_PEER_TYPE);
5256 
5257 	peer =  dp_peer_hash_find_wrapper(soc, &peer_info, DP_MOD_ID_CDP);
5258 
5259 	if (!peer)
5260 		return OL_TXRX_PEER_STATE_INVALID;
5261 
5262 	DP_TRACE(DEBUG, "peer %pK stats %d", peer, peer->state);
5263 
5264 	tgt_peer = dp_get_tgt_peer_from_peer(peer);
5265 	peer_state = tgt_peer->state;
5266 
5267 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5268 
5269 	return peer_state;
5270 }
5271 
5272 void dp_local_peer_id_pool_init(struct dp_pdev *pdev)
5273 {
5274 	int i;
5275 
5276 	/* point the freelist to the first ID */
5277 	pdev->local_peer_ids.freelist = 0;
5278 
5279 	/* link each ID to the next one */
5280 	for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) {
5281 		pdev->local_peer_ids.pool[i] = i + 1;
5282 		pdev->local_peer_ids.map[i] = NULL;
5283 	}
5284 
5285 	/* link the last ID to itself, to mark the end of the list */
5286 	i = OL_TXRX_NUM_LOCAL_PEER_IDS;
5287 	pdev->local_peer_ids.pool[i] = i;
5288 
5289 	qdf_spinlock_create(&pdev->local_peer_ids.lock);
5290 	DP_TRACE(INFO, "Peer pool init");
5291 }
5292 
5293 void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer)
5294 {
5295 	int i;
5296 
5297 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
5298 	i = pdev->local_peer_ids.freelist;
5299 	if (pdev->local_peer_ids.pool[i] == i) {
5300 		/* the list is empty, except for the list-end marker */
5301 		peer->local_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
5302 	} else {
5303 		/* take the head ID and advance the freelist */
5304 		peer->local_id = i;
5305 		pdev->local_peer_ids.freelist = pdev->local_peer_ids.pool[i];
5306 		pdev->local_peer_ids.map[i] = peer;
5307 	}
5308 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
5309 	dp_info("peer %pK, local id %d", peer, peer->local_id);
5310 }
5311 
5312 void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer)
5313 {
5314 	int i = peer->local_id;
5315 	if ((i == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
5316 	    (i >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
5317 		return;
5318 	}
5319 
5320 	/* put this ID on the head of the freelist */
5321 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
5322 	pdev->local_peer_ids.pool[i] = pdev->local_peer_ids.freelist;
5323 	pdev->local_peer_ids.freelist = i;
5324 	pdev->local_peer_ids.map[i] = NULL;
5325 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
5326 }
5327 
5328 bool dp_find_peer_exist_on_vdev(struct cdp_soc_t *soc_hdl,
5329 				uint8_t vdev_id, uint8_t *peer_addr)
5330 {
5331 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5332 	struct dp_peer *peer = NULL;
5333 
5334 	peer = dp_peer_find_hash_find(soc, peer_addr, 0, vdev_id,
5335 				      DP_MOD_ID_CDP);
5336 	if (!peer)
5337 		return false;
5338 
5339 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5340 
5341 	return true;
5342 }
5343 
5344 bool dp_find_peer_exist_on_other_vdev(struct cdp_soc_t *soc_hdl,
5345 				      uint8_t vdev_id, uint8_t *peer_addr,
5346 				      uint16_t max_bssid)
5347 {
5348 	int i;
5349 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5350 	struct dp_peer *peer = NULL;
5351 
5352 	for (i = 0; i < max_bssid; i++) {
5353 		/* Need to check vdevs other than the vdev_id */
5354 		if (vdev_id == i)
5355 			continue;
5356 		peer = dp_peer_find_hash_find(soc, peer_addr, 0, i,
5357 					      DP_MOD_ID_CDP);
5358 		if (peer) {
5359 			dp_err("Duplicate peer "QDF_MAC_ADDR_FMT" already exist on vdev %d",
5360 			       QDF_MAC_ADDR_REF(peer_addr), i);
5361 			dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5362 			return true;
5363 		}
5364 	}
5365 
5366 	return false;
5367 }
5368 
5369 void dp_set_peer_as_tdls_peer(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
5370 			      uint8_t *peer_mac, bool val)
5371 {
5372 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5373 	struct dp_peer *peer = NULL;
5374 
5375 	peer = dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id,
5376 				      DP_MOD_ID_CDP);
5377 	if (!peer) {
5378 		dp_err("Failed to find peer for:" QDF_MAC_ADDR_FMT,
5379 		       QDF_MAC_ADDR_REF(peer_mac));
5380 		return;
5381 	}
5382 
5383 	dp_info("Set tdls flag %d for peer:" QDF_MAC_ADDR_FMT,
5384 		val, QDF_MAC_ADDR_REF(peer_mac));
5385 	peer->is_tdls_peer = val;
5386 
5387 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5388 }
5389 #endif
5390 
5391 bool dp_find_peer_exist(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
5392 			uint8_t *peer_addr)
5393 {
5394 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5395 	struct dp_peer *peer = NULL;
5396 
5397 	peer = dp_peer_find_hash_find(soc, peer_addr, 0, DP_VDEV_ALL,
5398 				      DP_MOD_ID_CDP);
5399 	if (peer) {
5400 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5401 		return true;
5402 	}
5403 
5404 	return false;
5405 }
5406 
5407 #ifdef IPA_OFFLOAD
5408 int dp_peer_get_rxtid_stats_ipa(struct dp_peer *peer,
5409 				dp_rxtid_stats_cmd_cb dp_stats_cmd_cb)
5410 {
5411 	struct dp_soc *soc = peer->vdev->pdev->soc;
5412 	struct hal_reo_cmd_params params;
5413 	int i;
5414 	int stats_cmd_sent_cnt = 0;
5415 	QDF_STATUS status;
5416 	uint16_t peer_id = peer->peer_id;
5417 	unsigned long comb_peer_id_tid;
5418 	struct dp_rx_tid *rx_tid;
5419 
5420 	if (!dp_stats_cmd_cb)
5421 		return stats_cmd_sent_cnt;
5422 
5423 	qdf_mem_zero(&params, sizeof(params));
5424 	for (i = 0; i < DP_MAX_TIDS; i++) {
5425 		if ((i >= CDP_DATA_TID_MAX) && (i != CDP_DATA_NON_QOS_TID))
5426 			continue;
5427 
5428 		rx_tid = &peer->rx_tid[i];
5429 		if (rx_tid->hw_qdesc_vaddr_unaligned) {
5430 			params.std.need_status = 1;
5431 			params.std.addr_lo =
5432 				rx_tid->hw_qdesc_paddr & 0xffffffff;
5433 			params.std.addr_hi =
5434 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
5435 			params.u.stats_params.clear = 1;
5436 			comb_peer_id_tid = ((i << DP_PEER_REO_STATS_TID_SHIFT)
5437 					    | peer_id);
5438 			status = dp_reo_send_cmd(soc, CMD_GET_QUEUE_STATS,
5439 						 &params, dp_stats_cmd_cb,
5440 						 (void *)comb_peer_id_tid);
5441 			if (QDF_IS_STATUS_SUCCESS(status))
5442 				stats_cmd_sent_cnt++;
5443 
5444 			/* Flush REO descriptor from HW cache to update stats
5445 			 * in descriptor memory. This is to help debugging
5446 			 */
5447 			qdf_mem_zero(&params, sizeof(params));
5448 			params.std.need_status = 0;
5449 			params.std.addr_lo =
5450 				rx_tid->hw_qdesc_paddr & 0xffffffff;
5451 			params.std.addr_hi =
5452 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
5453 			params.u.fl_cache_params.flush_no_inval = 1;
5454 			dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, &params, NULL,
5455 					NULL);
5456 		}
5457 	}
5458 
5459 	return stats_cmd_sent_cnt;
5460 }
5461 
5462 qdf_export_symbol(dp_peer_get_rxtid_stats_ipa);
5463 
5464 #endif
5465 int dp_peer_rxtid_stats(struct dp_peer *peer,
5466 			dp_rxtid_stats_cmd_cb dp_stats_cmd_cb,
5467 			void *cb_ctxt)
5468 {
5469 	struct dp_soc *soc = peer->vdev->pdev->soc;
5470 	struct hal_reo_cmd_params params;
5471 	int i;
5472 	int stats_cmd_sent_cnt = 0;
5473 	QDF_STATUS status;
5474 	struct dp_rx_tid *rx_tid;
5475 
5476 	if (!dp_stats_cmd_cb)
5477 		return stats_cmd_sent_cnt;
5478 
5479 	qdf_mem_zero(&params, sizeof(params));
5480 	for (i = 0; i < DP_MAX_TIDS; i++) {
5481 		if ((i >= CDP_DATA_TID_MAX) && (i != CDP_DATA_NON_QOS_TID))
5482 			continue;
5483 
5484 		rx_tid = &peer->rx_tid[i];
5485 		if (rx_tid->hw_qdesc_vaddr_unaligned) {
5486 			params.std.need_status = 1;
5487 			params.std.addr_lo =
5488 				rx_tid->hw_qdesc_paddr & 0xffffffff;
5489 			params.std.addr_hi =
5490 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
5491 
5492 			if (cb_ctxt) {
5493 				status = dp_reo_send_cmd(
5494 						soc, CMD_GET_QUEUE_STATS,
5495 						&params, dp_stats_cmd_cb,
5496 						cb_ctxt);
5497 			} else {
5498 				status = dp_reo_send_cmd(
5499 						soc, CMD_GET_QUEUE_STATS,
5500 						&params, dp_stats_cmd_cb,
5501 						rx_tid);
5502 			}
5503 
5504 			if (QDF_IS_STATUS_SUCCESS(status))
5505 				stats_cmd_sent_cnt++;
5506 
5507 
5508 			/* Flush REO descriptor from HW cache to update stats
5509 			 * in descriptor memory. This is to help debugging
5510 			 */
5511 			qdf_mem_zero(&params, sizeof(params));
5512 			params.std.need_status = 0;
5513 			params.std.addr_lo =
5514 				rx_tid->hw_qdesc_paddr & 0xffffffff;
5515 			params.std.addr_hi =
5516 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
5517 			params.u.fl_cache_params.flush_no_inval = 1;
5518 			dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, &params, NULL,
5519 					NULL);
5520 		}
5521 	}
5522 
5523 	return stats_cmd_sent_cnt;
5524 }
5525 
5526 QDF_STATUS
5527 dp_set_michael_key(struct cdp_soc_t *soc,
5528 		   uint8_t vdev_id,
5529 		   uint8_t *peer_mac,
5530 		   bool is_unicast, uint32_t *key)
5531 {
5532 	uint8_t sec_index = is_unicast ? 1 : 0;
5533 	struct dp_peer *peer =
5534 			dp_peer_get_tgt_peer_hash_find((struct dp_soc *)soc,
5535 						       peer_mac, 0, vdev_id,
5536 						       DP_MOD_ID_CDP);
5537 
5538 	if (!peer) {
5539 		dp_peer_err("%pK: peer not found ", soc);
5540 		return QDF_STATUS_E_FAILURE;
5541 	}
5542 
5543 	qdf_mem_copy(&peer->txrx_peer->security[sec_index].michael_key[0],
5544 		     key, IEEE80211_WEP_MICLEN);
5545 
5546 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5547 
5548 	return QDF_STATUS_SUCCESS;
5549 }
5550 
5551 
5552 struct dp_peer *dp_vdev_bss_peer_ref_n_get(struct dp_soc *soc,
5553 					   struct dp_vdev *vdev,
5554 					   enum dp_mod_id mod_id)
5555 {
5556 	struct dp_peer *peer = NULL;
5557 
5558 	qdf_spin_lock_bh(&vdev->peer_list_lock);
5559 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
5560 		if (peer->bss_peer)
5561 			break;
5562 	}
5563 
5564 	if (!peer) {
5565 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
5566 		return NULL;
5567 	}
5568 
5569 	if (dp_peer_get_ref(soc, peer, mod_id) == QDF_STATUS_SUCCESS) {
5570 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
5571 		return peer;
5572 	}
5573 
5574 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
5575 	return peer;
5576 }
5577 
5578 struct dp_peer *dp_sta_vdev_self_peer_ref_n_get(struct dp_soc *soc,
5579 						struct dp_vdev *vdev,
5580 						enum dp_mod_id mod_id)
5581 {
5582 	struct dp_peer *peer;
5583 
5584 	if (vdev->opmode != wlan_op_mode_sta)
5585 		return NULL;
5586 
5587 	qdf_spin_lock_bh(&vdev->peer_list_lock);
5588 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
5589 		if (peer->sta_self_peer)
5590 			break;
5591 	}
5592 
5593 	if (!peer) {
5594 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
5595 		return NULL;
5596 	}
5597 
5598 	if (dp_peer_get_ref(soc, peer, mod_id) == QDF_STATUS_SUCCESS) {
5599 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
5600 		return peer;
5601 	}
5602 
5603 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
5604 	return peer;
5605 }
5606 
5607 #ifdef DUMP_REO_QUEUE_INFO_IN_DDR
5608 void dp_dump_rx_reo_queue_info(
5609 	struct dp_soc *soc, void *cb_ctxt, union hal_reo_status *reo_status)
5610 {
5611 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
5612 
5613 	if (!rx_tid)
5614 		return;
5615 
5616 	if (reo_status->fl_cache_status.header.status !=
5617 		HAL_REO_CMD_SUCCESS) {
5618 		dp_err_rl("Rx tid REO HW desc flush failed(%d)",
5619 			  reo_status->rx_queue_status.header.status);
5620 		return;
5621 	}
5622 	qdf_spin_lock_bh(&rx_tid->tid_lock);
5623 	hal_dump_rx_reo_queue_desc(rx_tid->hw_qdesc_vaddr_aligned);
5624 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
5625 }
5626 
5627 void dp_send_cache_flush_for_rx_tid(
5628 	struct dp_soc *soc, struct dp_peer *peer)
5629 {
5630 	int i;
5631 	struct dp_rx_tid *rx_tid;
5632 	struct hal_reo_cmd_params params;
5633 
5634 	if (!peer) {
5635 		dp_err_rl("Peer is NULL");
5636 		return;
5637 	}
5638 
5639 	for (i = 0; i < DP_MAX_TIDS; i++) {
5640 		rx_tid = &peer->rx_tid[i];
5641 		if (!rx_tid)
5642 			continue;
5643 		qdf_spin_lock_bh(&rx_tid->tid_lock);
5644 		if (rx_tid->hw_qdesc_vaddr_aligned) {
5645 			qdf_mem_zero(&params, sizeof(params));
5646 			params.std.need_status = 1;
5647 			params.std.addr_lo =
5648 				rx_tid->hw_qdesc_paddr & 0xffffffff;
5649 			params.std.addr_hi =
5650 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
5651 			params.u.fl_cache_params.flush_no_inval = 0;
5652 
5653 			if (rx_tid->ba_win_size > 256)
5654 				params.u.fl_cache_params.flush_q_1k_desc = 1;
5655 			params.u.fl_cache_params.fwd_mpdus_in_queue = 1;
5656 
5657 			if (QDF_STATUS_SUCCESS !=
5658 				dp_reo_send_cmd(
5659 					soc, CMD_FLUSH_CACHE,
5660 					&params, dp_dump_rx_reo_queue_info,
5661 					(void *)rx_tid)) {
5662 				dp_err_rl("cache flush send failed tid %d",
5663 					  rx_tid->tid);
5664 				qdf_spin_unlock_bh(&rx_tid->tid_lock);
5665 				break;
5666 			}
5667 		}
5668 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
5669 	}
5670 }
5671 
5672 void dp_get_rx_reo_queue_info(
5673 	struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
5674 {
5675 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
5676 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
5677 						     DP_MOD_ID_GENERIC_STATS);
5678 	struct dp_peer *peer = NULL;
5679 
5680 	if (!vdev) {
5681 		dp_err_rl("vdev is null for vdev_id: %u", vdev_id);
5682 		goto failed;
5683 	}
5684 
5685 	peer = dp_vdev_bss_peer_ref_n_get(soc, vdev, DP_MOD_ID_GENERIC_STATS);
5686 
5687 	if (!peer) {
5688 		dp_err_rl("Peer is NULL");
5689 		goto failed;
5690 	}
5691 	dp_send_cache_flush_for_rx_tid(soc, peer);
5692 failed:
5693 	if (peer)
5694 		dp_peer_unref_delete(peer, DP_MOD_ID_GENERIC_STATS);
5695 	if (vdev)
5696 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_GENERIC_STATS);
5697 }
5698 #endif /* DUMP_REO_QUEUE_INFO_IN_DDR */
5699 
5700 void dp_peer_flush_frags(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
5701 			 uint8_t *peer_mac)
5702 {
5703 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5704 	struct dp_peer *peer = dp_peer_get_tgt_peer_hash_find(soc, peer_mac, 0,
5705 							      vdev_id,
5706 							      DP_MOD_ID_CDP);
5707 	struct dp_txrx_peer *txrx_peer;
5708 	uint8_t tid;
5709 	struct dp_rx_tid_defrag *defrag_rx_tid;
5710 
5711 	if (!peer)
5712 		return;
5713 
5714 	if (!peer->txrx_peer)
5715 		goto fail;
5716 
5717 	dp_info("Flushing fragments for peer " QDF_MAC_ADDR_FMT,
5718 		QDF_MAC_ADDR_REF(peer->mac_addr.raw));
5719 
5720 	txrx_peer = peer->txrx_peer;
5721 
5722 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
5723 		defrag_rx_tid = &txrx_peer->rx_tid[tid];
5724 
5725 		qdf_spin_lock_bh(&defrag_rx_tid->defrag_tid_lock);
5726 		dp_rx_defrag_waitlist_remove(txrx_peer, tid);
5727 		dp_rx_reorder_flush_frag(txrx_peer, tid);
5728 		qdf_spin_unlock_bh(&defrag_rx_tid->defrag_tid_lock);
5729 	}
5730 fail:
5731 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5732 }
5733 
5734 bool dp_peer_find_by_id_valid(struct dp_soc *soc, uint16_t peer_id)
5735 {
5736 	struct dp_peer *peer = dp_peer_get_ref_by_id(soc, peer_id,
5737 						     DP_MOD_ID_HTT);
5738 
5739 	if (peer) {
5740 		/*
5741 		 * Decrement the peer ref which is taken as part of
5742 		 * dp_peer_get_ref_by_id if PEER_LOCK_REF_PROTECT is enabled
5743 		 */
5744 		dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
5745 
5746 		return true;
5747 	}
5748 
5749 	return false;
5750 }
5751 
5752 qdf_export_symbol(dp_peer_find_by_id_valid);
5753