xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_peer.c (revision 70a19e16789e308182f63b15c75decec7bf0b342)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <qdf_types.h>
21 #include <qdf_lock.h>
22 #include <hal_hw_headers.h>
23 #include "dp_htt.h"
24 #include "dp_types.h"
25 #include "dp_internal.h"
26 #include "dp_peer.h"
27 #include "dp_rx_defrag.h"
28 #include "dp_rx.h"
29 #include <hal_api.h>
30 #include <hal_reo.h>
31 #include <cdp_txrx_handle.h>
32 #include <wlan_cfg.h>
33 #ifdef WIFI_MONITOR_SUPPORT
34 #include <dp_mon.h>
35 #endif
36 #ifdef FEATURE_WDS
37 #include "dp_txrx_wds.h"
38 #endif
39 #include <qdf_module.h>
40 #ifdef QCA_PEER_EXT_STATS
41 #include "dp_hist.h"
42 #endif
43 #ifdef BYPASS_OL_OPS
44 #include <target_if_dp.h>
45 #endif
46 
47 #ifdef REO_QDESC_HISTORY
48 #define REO_QDESC_HISTORY_SIZE 512
49 uint64_t reo_qdesc_history_idx;
50 struct reo_qdesc_event reo_qdesc_history[REO_QDESC_HISTORY_SIZE];
51 #endif
52 
53 #ifdef FEATURE_AST
54 #ifdef BYPASS_OL_OPS
55 /*
56  * dp_add_wds_entry_wrapper() - Add new AST entry for the wds station
57  * @soc: DP soc structure pointer
58  * @peer: dp peer structure
59  * @dest_mac: MAC address of ast node
60  * @flags: wds or hmwds
61  * @type: type from enum cdp_txrx_ast_entry_type
62  *
63  * This API is used by WDS source port learning function to
64  * add a new AST entry in the fw.
65  *
66  * Return: 0 on success, error code otherwise.
67  */
68 static int dp_add_wds_entry_wrapper(struct dp_soc *soc,
69 				    struct dp_peer *peer,
70 				    const uint8_t *dest_macaddr,
71 				    uint32_t flags,
72 				    uint8_t type)
73 {
74 	QDF_STATUS status;
75 
76 	status = target_if_add_wds_entry(soc->ctrl_psoc,
77 					 peer->vdev->vdev_id,
78 					 peer->mac_addr.raw,
79 					 dest_macaddr,
80 					 WMI_HOST_WDS_FLAG_STATIC,
81 					 type);
82 
83 	return qdf_status_to_os_return(status);
84 }
85 
86 /*
87  * dp_update_wds_entry_wrapper() - update an existing wds entry with new peer
88  * @soc: DP soc structure pointer
89  * @peer: dp peer structure
90  * @dest_macaddr: MAC address of ast node
91  * @flags: wds or hmwds
92  *
93  * This API is used by update the peer mac address for the ast
94  * in the fw.
95  *
96  * Return: 0 on success, error code otherwise.
97  */
98 static int dp_update_wds_entry_wrapper(struct dp_soc *soc,
99 				       struct dp_peer *peer,
100 				       uint8_t *dest_macaddr,
101 				       uint32_t flags)
102 {
103 	QDF_STATUS status;
104 
105 	status = target_if_update_wds_entry(soc->ctrl_psoc,
106 					    peer->vdev->vdev_id,
107 					    dest_macaddr,
108 					    peer->mac_addr.raw,
109 					    WMI_HOST_WDS_FLAG_STATIC);
110 
111 	return qdf_status_to_os_return(status);
112 }
113 
114 /*
115  * dp_del_wds_entry_wrapper() - delete a WSD AST entry
116  * @soc: DP soc structure pointer
117  * @vdev_id: vdev_id
118  * @wds_macaddr: MAC address of ast node
119  * @type: type from enum cdp_txrx_ast_entry_type
120  * @delete_in_fw: Flag to indicate if entry needs to be deleted in fw
121  *
122  * This API is used to delete an AST entry from fw
123  *
124  * Return: None
125  */
126 static void dp_del_wds_entry_wrapper(struct dp_soc *soc,
127 				     uint8_t vdev_id,
128 				     uint8_t *wds_macaddr,
129 				     uint8_t type,
130 				     uint8_t delete_in_fw)
131 {
132 	target_if_del_wds_entry(soc->ctrl_psoc, vdev_id,
133 				wds_macaddr, type, delete_in_fw);
134 }
135 #else
136 static int dp_add_wds_entry_wrapper(struct dp_soc *soc,
137 				    struct dp_peer *peer,
138 				    const uint8_t *dest_macaddr,
139 				    uint32_t flags,
140 				    uint8_t type)
141 {
142 	int status;
143 
144 	status = soc->cdp_soc.ol_ops->peer_add_wds_entry(
145 					soc->ctrl_psoc,
146 					peer->vdev->vdev_id,
147 					peer->mac_addr.raw,
148 					peer->peer_id,
149 					dest_macaddr,
150 					peer->mac_addr.raw,
151 					flags,
152 					type);
153 
154 	return status;
155 }
156 
157 static int dp_update_wds_entry_wrapper(struct dp_soc *soc,
158 				       struct dp_peer *peer,
159 				       uint8_t *dest_macaddr,
160 				       uint32_t flags)
161 {
162 	int status;
163 
164 	status = soc->cdp_soc.ol_ops->peer_update_wds_entry(
165 				soc->ctrl_psoc,
166 				peer->vdev->vdev_id,
167 				dest_macaddr,
168 				peer->mac_addr.raw,
169 				flags);
170 
171 	return status;
172 }
173 
174 static void dp_del_wds_entry_wrapper(struct dp_soc *soc,
175 				     uint8_t vdev_id,
176 				     uint8_t *wds_macaddr,
177 				     uint8_t type,
178 				     uint8_t delete_in_fw)
179 {
180 	soc->cdp_soc.ol_ops->peer_del_wds_entry(soc->ctrl_psoc,
181 						vdev_id,
182 						wds_macaddr,
183 						type,
184 						delete_in_fw);
185 }
186 #endif
187 #endif
188 
189 #ifdef FEATURE_WDS
190 static inline bool
191 dp_peer_ast_free_in_unmap_supported(struct dp_soc *soc,
192 				    struct dp_ast_entry *ast_entry)
193 {
194 	/* if peer map v2 is enabled we are not freeing ast entry
195 	 * here and it is supposed to be freed in unmap event (after
196 	 * we receive delete confirmation from target)
197 	 *
198 	 * if peer_id is invalid we did not get the peer map event
199 	 * for the peer free ast entry from here only in this case
200 	 */
201 
202 	if ((ast_entry->type != CDP_TXRX_AST_TYPE_WDS_HM_SEC) &&
203 	    (ast_entry->type != CDP_TXRX_AST_TYPE_SELF))
204 		return true;
205 
206 	return false;
207 }
208 #else
209 static inline bool
210 dp_peer_ast_free_in_unmap_supported(struct dp_soc *soc,
211 				    struct dp_ast_entry *ast_entry)
212 {
213 	return false;
214 }
215 
216 void dp_soc_wds_attach(struct dp_soc *soc)
217 {
218 }
219 
220 void dp_soc_wds_detach(struct dp_soc *soc)
221 {
222 }
223 #endif
224 
225 #ifdef QCA_SUPPORT_WDS_EXTENDED
226 bool dp_peer_check_wds_ext_peer(struct dp_peer *peer)
227 {
228 	struct dp_vdev *vdev = peer->vdev;
229 	struct dp_txrx_peer *txrx_peer;
230 
231 	if (!vdev->wds_ext_enabled)
232 		return false;
233 
234 	txrx_peer = dp_get_txrx_peer(peer);
235 	if (!txrx_peer)
236 		return false;
237 
238 	if (qdf_atomic_test_bit(WDS_EXT_PEER_INIT_BIT,
239 				&txrx_peer->wds_ext.init))
240 		return true;
241 
242 	return false;
243 }
244 #else
245 bool dp_peer_check_wds_ext_peer(struct dp_peer *peer)
246 {
247 	return false;
248 }
249 #endif
250 
251 #ifdef REO_QDESC_HISTORY
252 static inline void
253 dp_rx_reo_qdesc_history_add(struct reo_desc_list_node *free_desc,
254 			    enum reo_qdesc_event_type type)
255 {
256 	struct reo_qdesc_event *evt;
257 	struct dp_rx_tid *rx_tid = &free_desc->rx_tid;
258 	uint32_t idx;
259 
260 	reo_qdesc_history_idx++;
261 	idx = (reo_qdesc_history_idx & (REO_QDESC_HISTORY_SIZE - 1));
262 
263 	evt = &reo_qdesc_history[idx];
264 
265 	qdf_mem_copy(evt->peer_mac, free_desc->peer_mac, QDF_MAC_ADDR_SIZE);
266 	evt->qdesc_addr = rx_tid->hw_qdesc_paddr;
267 	evt->ts = qdf_get_log_timestamp();
268 	evt->type = type;
269 }
270 
271 #ifdef WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY
272 static inline void
273 dp_rx_reo_qdesc_deferred_evt_add(struct reo_desc_deferred_freelist_node *desc,
274 				 enum reo_qdesc_event_type type)
275 {
276 	struct reo_qdesc_event *evt;
277 	uint32_t idx;
278 
279 	reo_qdesc_history_idx++;
280 	idx = (reo_qdesc_history_idx & (REO_QDESC_HISTORY_SIZE - 1));
281 
282 	evt = &reo_qdesc_history[idx];
283 
284 	qdf_mem_copy(evt->peer_mac, desc->peer_mac, QDF_MAC_ADDR_SIZE);
285 	evt->qdesc_addr = desc->hw_qdesc_paddr;
286 	evt->ts = qdf_get_log_timestamp();
287 	evt->type = type;
288 }
289 
290 #define DP_RX_REO_QDESC_DEFERRED_FREE_EVT(desc) \
291 	dp_rx_reo_qdesc_deferred_evt_add((desc), REO_QDESC_FREE)
292 
293 #define DP_RX_REO_QDESC_DEFERRED_GET_MAC(desc, freedesc) \
294 	qdf_mem_copy(desc->peer_mac, freedesc->peer_mac, QDF_MAC_ADDR_SIZE)
295 #endif /* WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY */
296 
297 #define DP_RX_REO_QDESC_GET_MAC(freedesc, peer) \
298 	qdf_mem_copy(freedesc->peer_mac, peer->mac_addr.raw, QDF_MAC_ADDR_SIZE)
299 
300 #define DP_RX_REO_QDESC_UPDATE_EVT(free_desc) \
301 	dp_rx_reo_qdesc_history_add((free_desc), REO_QDESC_UPDATE_CB)
302 
303 #define DP_RX_REO_QDESC_FREE_EVT(free_desc) \
304 	dp_rx_reo_qdesc_history_add((free_desc), REO_QDESC_FREE)
305 
306 #else
307 #define DP_RX_REO_QDESC_GET_MAC(freedesc, peer)
308 
309 #define DP_RX_REO_QDESC_UPDATE_EVT(free_desc)
310 
311 #define DP_RX_REO_QDESC_FREE_EVT(free_desc)
312 
313 #define DP_RX_REO_QDESC_DEFERRED_FREE_EVT(desc)
314 
315 #define DP_RX_REO_QDESC_DEFERRED_GET_MAC(desc, freedesc)
316 #endif
317 
318 static inline void
319 dp_set_ssn_valid_flag(struct hal_reo_cmd_params *params,
320 					uint8_t valid)
321 {
322 	params->u.upd_queue_params.update_svld = 1;
323 	params->u.upd_queue_params.svld = valid;
324 	dp_peer_debug("Setting SSN valid bit to %d",
325 		      valid);
326 }
327 
328 QDF_STATUS dp_peer_ast_table_attach(struct dp_soc *soc)
329 {
330 	uint32_t max_ast_index;
331 
332 	max_ast_index = wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx);
333 	/* allocate ast_table for ast entry to ast_index map */
334 	dp_peer_info("\n%pK:<=== cfg max ast idx %d ====>", soc, max_ast_index);
335 	soc->ast_table = qdf_mem_malloc(max_ast_index *
336 					sizeof(struct dp_ast_entry *));
337 	if (!soc->ast_table) {
338 		dp_peer_err("%pK: ast_table memory allocation failed", soc);
339 		return QDF_STATUS_E_NOMEM;
340 	}
341 	return QDF_STATUS_SUCCESS; /* success */
342 }
343 
344 /*
345  * dp_peer_find_map_attach() - allocate memory for peer_id_to_obj_map
346  * @soc: soc handle
347  *
348  * return: QDF_STATUS
349  */
350 static QDF_STATUS dp_peer_find_map_attach(struct dp_soc *soc)
351 {
352 	uint32_t max_peers, peer_map_size;
353 
354 	max_peers = soc->max_peer_id;
355 	/* allocate the peer ID -> peer object map */
356 	dp_peer_info("\n%pK:<=== cfg max peer id %d ====>", soc, max_peers);
357 	peer_map_size = max_peers * sizeof(soc->peer_id_to_obj_map[0]);
358 	soc->peer_id_to_obj_map = qdf_mem_malloc(peer_map_size);
359 	if (!soc->peer_id_to_obj_map) {
360 		dp_peer_err("%pK: peer map memory allocation failed", soc);
361 		return QDF_STATUS_E_NOMEM;
362 	}
363 
364 	/*
365 	 * The peer_id_to_obj_map doesn't really need to be initialized,
366 	 * since elements are only used after they have been individually
367 	 * initialized.
368 	 * However, it is convenient for debugging to have all elements
369 	 * that are not in use set to 0.
370 	 */
371 	qdf_mem_zero(soc->peer_id_to_obj_map, peer_map_size);
372 
373 	qdf_spinlock_create(&soc->peer_map_lock);
374 	return QDF_STATUS_SUCCESS; /* success */
375 }
376 
377 #define DP_AST_HASH_LOAD_MULT  2
378 #define DP_AST_HASH_LOAD_SHIFT 0
379 
380 static inline uint32_t
381 dp_peer_find_hash_index(struct dp_soc *soc,
382 			union dp_align_mac_addr *mac_addr)
383 {
384 	uint32_t index;
385 
386 	index =
387 		mac_addr->align2.bytes_ab ^
388 		mac_addr->align2.bytes_cd ^
389 		mac_addr->align2.bytes_ef;
390 
391 	index ^= index >> soc->peer_hash.idx_bits;
392 	index &= soc->peer_hash.mask;
393 	return index;
394 }
395 
396 /*
397  * dp_peer_find_hash_find() - returns legacy or mlo link peer from
398  *			      peer_hash_table matching vdev_id and mac_address
399  * @soc: soc handle
400  * @peer_mac_addr: peer mac address
401  * @mac_addr_is_aligned: is mac addr aligned
402  * @vdev_id: vdev_id
403  * @mod_id: id of module requesting reference
404  *
405  * return: peer in sucsess
406  *         NULL in failure
407  */
408 struct dp_peer *dp_peer_find_hash_find(
409 				struct dp_soc *soc, uint8_t *peer_mac_addr,
410 				int mac_addr_is_aligned, uint8_t vdev_id,
411 				enum dp_mod_id mod_id)
412 {
413 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
414 	uint32_t index;
415 	struct dp_peer *peer;
416 
417 	if (!soc->peer_hash.bins)
418 		return NULL;
419 
420 	if (mac_addr_is_aligned) {
421 		mac_addr = (union dp_align_mac_addr *)peer_mac_addr;
422 	} else {
423 		qdf_mem_copy(
424 			&local_mac_addr_aligned.raw[0],
425 			peer_mac_addr, QDF_MAC_ADDR_SIZE);
426 		mac_addr = &local_mac_addr_aligned;
427 	}
428 	index = dp_peer_find_hash_index(soc, mac_addr);
429 	qdf_spin_lock_bh(&soc->peer_hash_lock);
430 	TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
431 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
432 		    ((peer->vdev->vdev_id == vdev_id) ||
433 		     (vdev_id == DP_VDEV_ALL))) {
434 			/* take peer reference before returning */
435 			if (dp_peer_get_ref(soc, peer, mod_id) !=
436 						QDF_STATUS_SUCCESS)
437 				peer = NULL;
438 
439 			qdf_spin_unlock_bh(&soc->peer_hash_lock);
440 			return peer;
441 		}
442 	}
443 	qdf_spin_unlock_bh(&soc->peer_hash_lock);
444 	return NULL; /* failure */
445 }
446 
447 qdf_export_symbol(dp_peer_find_hash_find);
448 
449 #ifdef WLAN_FEATURE_11BE_MLO
450 /*
451  * dp_peer_find_hash_detach() - cleanup memory for peer_hash table
452  * @soc: soc handle
453  *
454  * return: none
455  */
456 static void dp_peer_find_hash_detach(struct dp_soc *soc)
457 {
458 	if (soc->peer_hash.bins) {
459 		qdf_mem_free(soc->peer_hash.bins);
460 		soc->peer_hash.bins = NULL;
461 		qdf_spinlock_destroy(&soc->peer_hash_lock);
462 	}
463 
464 	if (soc->arch_ops.mlo_peer_find_hash_detach)
465 		soc->arch_ops.mlo_peer_find_hash_detach(soc);
466 }
467 
468 /*
469  * dp_peer_find_hash_attach() - allocate memory for peer_hash table
470  * @soc: soc handle
471  *
472  * return: QDF_STATUS
473  */
474 static QDF_STATUS dp_peer_find_hash_attach(struct dp_soc *soc)
475 {
476 	int i, hash_elems, log2;
477 
478 	/* allocate the peer MAC address -> peer object hash table */
479 	hash_elems = soc->max_peers;
480 	hash_elems *= DP_PEER_HASH_LOAD_MULT;
481 	hash_elems >>= DP_PEER_HASH_LOAD_SHIFT;
482 	log2 = dp_log2_ceil(hash_elems);
483 	hash_elems = 1 << log2;
484 
485 	soc->peer_hash.mask = hash_elems - 1;
486 	soc->peer_hash.idx_bits = log2;
487 	/* allocate an array of TAILQ peer object lists */
488 	soc->peer_hash.bins = qdf_mem_malloc(
489 		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q, dp_peer)));
490 	if (!soc->peer_hash.bins)
491 		return QDF_STATUS_E_NOMEM;
492 
493 	for (i = 0; i < hash_elems; i++)
494 		TAILQ_INIT(&soc->peer_hash.bins[i]);
495 
496 	qdf_spinlock_create(&soc->peer_hash_lock);
497 
498 	if (soc->arch_ops.mlo_peer_find_hash_attach &&
499 	    (soc->arch_ops.mlo_peer_find_hash_attach(soc) !=
500 			QDF_STATUS_SUCCESS)) {
501 		dp_peer_find_hash_detach(soc);
502 		return QDF_STATUS_E_NOMEM;
503 	}
504 	return QDF_STATUS_SUCCESS;
505 }
506 
507 /*
508  * dp_peer_find_hash_add() - add peer to peer_hash_table
509  * @soc: soc handle
510  * @peer: peer handle
511  * @peer_type: link or mld peer
512  *
513  * return: none
514  */
515 void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer)
516 {
517 	unsigned index;
518 
519 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
520 	if (peer->peer_type == CDP_LINK_PEER_TYPE) {
521 		qdf_spin_lock_bh(&soc->peer_hash_lock);
522 
523 		if (QDF_IS_STATUS_ERROR(dp_peer_get_ref(soc, peer,
524 							DP_MOD_ID_CONFIG))) {
525 			dp_err("fail to get peer ref:" QDF_MAC_ADDR_FMT,
526 			       QDF_MAC_ADDR_REF(peer->mac_addr.raw));
527 			qdf_spin_unlock_bh(&soc->peer_hash_lock);
528 			return;
529 		}
530 
531 		/*
532 		 * It is important to add the new peer at the tail of
533 		 * peer list with the bin index. Together with having
534 		 * the hash_find function search from head to tail,
535 		 * this ensures that if two entries with the same MAC address
536 		 * are stored, the one added first will be found first.
537 		 */
538 		TAILQ_INSERT_TAIL(&soc->peer_hash.bins[index], peer,
539 				  hash_list_elem);
540 
541 		qdf_spin_unlock_bh(&soc->peer_hash_lock);
542 	} else if (peer->peer_type == CDP_MLD_PEER_TYPE) {
543 		if (soc->arch_ops.mlo_peer_find_hash_add)
544 			soc->arch_ops.mlo_peer_find_hash_add(soc, peer);
545 	} else {
546 		dp_err("unknown peer type %d", peer->peer_type);
547 	}
548 }
549 
550 /*
551  * dp_peer_find_hash_remove() - remove peer from peer_hash_table
552  * @soc: soc handle
553  * @peer: peer handle
554  *
555  * return: none
556  */
557 void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer)
558 {
559 	unsigned index;
560 	struct dp_peer *tmppeer = NULL;
561 	int found = 0;
562 
563 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
564 
565 	if (peer->peer_type == CDP_LINK_PEER_TYPE) {
566 		/* Check if tail is not empty before delete*/
567 		QDF_ASSERT(!TAILQ_EMPTY(&soc->peer_hash.bins[index]));
568 
569 		qdf_spin_lock_bh(&soc->peer_hash_lock);
570 		TAILQ_FOREACH(tmppeer, &soc->peer_hash.bins[index],
571 			      hash_list_elem) {
572 			if (tmppeer == peer) {
573 				found = 1;
574 				break;
575 			}
576 		}
577 		QDF_ASSERT(found);
578 		TAILQ_REMOVE(&soc->peer_hash.bins[index], peer,
579 			     hash_list_elem);
580 
581 		dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
582 		qdf_spin_unlock_bh(&soc->peer_hash_lock);
583 	} else if (peer->peer_type == CDP_MLD_PEER_TYPE) {
584 		if (soc->arch_ops.mlo_peer_find_hash_remove)
585 			soc->arch_ops.mlo_peer_find_hash_remove(soc, peer);
586 	} else {
587 		dp_err("unknown peer type %d", peer->peer_type);
588 	}
589 }
590 #else
591 static QDF_STATUS dp_peer_find_hash_attach(struct dp_soc *soc)
592 {
593 	int i, hash_elems, log2;
594 
595 	/* allocate the peer MAC address -> peer object hash table */
596 	hash_elems = soc->max_peers;
597 	hash_elems *= DP_PEER_HASH_LOAD_MULT;
598 	hash_elems >>= DP_PEER_HASH_LOAD_SHIFT;
599 	log2 = dp_log2_ceil(hash_elems);
600 	hash_elems = 1 << log2;
601 
602 	soc->peer_hash.mask = hash_elems - 1;
603 	soc->peer_hash.idx_bits = log2;
604 	/* allocate an array of TAILQ peer object lists */
605 	soc->peer_hash.bins = qdf_mem_malloc(
606 		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q, dp_peer)));
607 	if (!soc->peer_hash.bins)
608 		return QDF_STATUS_E_NOMEM;
609 
610 	for (i = 0; i < hash_elems; i++)
611 		TAILQ_INIT(&soc->peer_hash.bins[i]);
612 
613 	qdf_spinlock_create(&soc->peer_hash_lock);
614 	return QDF_STATUS_SUCCESS;
615 }
616 
617 static void dp_peer_find_hash_detach(struct dp_soc *soc)
618 {
619 	if (soc->peer_hash.bins) {
620 		qdf_mem_free(soc->peer_hash.bins);
621 		soc->peer_hash.bins = NULL;
622 		qdf_spinlock_destroy(&soc->peer_hash_lock);
623 	}
624 }
625 
626 void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer)
627 {
628 	unsigned index;
629 
630 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
631 	qdf_spin_lock_bh(&soc->peer_hash_lock);
632 
633 	if (QDF_IS_STATUS_ERROR(dp_peer_get_ref(soc, peer, DP_MOD_ID_CONFIG))) {
634 		dp_err("unable to get peer ref at MAP mac: "QDF_MAC_ADDR_FMT,
635 		       QDF_MAC_ADDR_REF(peer->mac_addr.raw));
636 		qdf_spin_unlock_bh(&soc->peer_hash_lock);
637 		return;
638 	}
639 
640 	/*
641 	 * It is important to add the new peer at the tail of the peer list
642 	 * with the bin index.  Together with having the hash_find function
643 	 * search from head to tail, this ensures that if two entries with
644 	 * the same MAC address are stored, the one added first will be
645 	 * found first.
646 	 */
647 	TAILQ_INSERT_TAIL(&soc->peer_hash.bins[index], peer, hash_list_elem);
648 
649 	qdf_spin_unlock_bh(&soc->peer_hash_lock);
650 }
651 
652 void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer)
653 {
654 	unsigned index;
655 	struct dp_peer *tmppeer = NULL;
656 	int found = 0;
657 
658 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
659 	/* Check if tail is not empty before delete*/
660 	QDF_ASSERT(!TAILQ_EMPTY(&soc->peer_hash.bins[index]));
661 
662 	qdf_spin_lock_bh(&soc->peer_hash_lock);
663 	TAILQ_FOREACH(tmppeer, &soc->peer_hash.bins[index], hash_list_elem) {
664 		if (tmppeer == peer) {
665 			found = 1;
666 			break;
667 		}
668 	}
669 	QDF_ASSERT(found);
670 	TAILQ_REMOVE(&soc->peer_hash.bins[index], peer, hash_list_elem);
671 
672 	dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
673 	qdf_spin_unlock_bh(&soc->peer_hash_lock);
674 }
675 
676 
677 #endif/* WLAN_FEATURE_11BE_MLO */
678 
679 /*
680  * dp_peer_vdev_list_add() - add peer into vdev's peer list
681  * @soc: soc handle
682  * @vdev: vdev handle
683  * @peer: peer handle
684  *
685  * return: none
686  */
687 void dp_peer_vdev_list_add(struct dp_soc *soc, struct dp_vdev *vdev,
688 			   struct dp_peer *peer)
689 {
690 	/* only link peer will be added to vdev peer list */
691 	if (IS_MLO_DP_MLD_PEER(peer))
692 		return;
693 
694 	qdf_spin_lock_bh(&vdev->peer_list_lock);
695 	if (QDF_IS_STATUS_ERROR(dp_peer_get_ref(soc, peer, DP_MOD_ID_CONFIG))) {
696 		dp_err("unable to get peer ref at MAP mac: "QDF_MAC_ADDR_FMT,
697 		       QDF_MAC_ADDR_REF(peer->mac_addr.raw));
698 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
699 		return;
700 	}
701 
702 	/* add this peer into the vdev's list */
703 	if (wlan_op_mode_sta == vdev->opmode)
704 		TAILQ_INSERT_HEAD(&vdev->peer_list, peer, peer_list_elem);
705 	else
706 		TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
707 
708 	vdev->num_peers++;
709 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
710 }
711 
712 /*
713  * dp_peer_vdev_list_remove() - remove peer from vdev's peer list
714  * @soc: SoC handle
715  * @vdev: VDEV handle
716  * @peer: peer handle
717  *
718  * Return: none
719  */
720 void dp_peer_vdev_list_remove(struct dp_soc *soc, struct dp_vdev *vdev,
721 			      struct dp_peer *peer)
722 {
723 	uint8_t found = 0;
724 	struct dp_peer *tmppeer = NULL;
725 
726 	/* only link peer will be added to vdev peer list */
727 	if (IS_MLO_DP_MLD_PEER(peer))
728 		return;
729 
730 	qdf_spin_lock_bh(&vdev->peer_list_lock);
731 	TAILQ_FOREACH(tmppeer, &peer->vdev->peer_list, peer_list_elem) {
732 		if (tmppeer == peer) {
733 			found = 1;
734 			break;
735 		}
736 	}
737 
738 	if (found) {
739 		TAILQ_REMOVE(&peer->vdev->peer_list, peer,
740 			     peer_list_elem);
741 		dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
742 		vdev->num_peers--;
743 	} else {
744 		/*Ignoring the remove operation as peer not found*/
745 		dp_peer_debug("%pK: peer:%pK not found in vdev:%pK peerlist:%pK"
746 			      , soc, peer, vdev, &peer->vdev->peer_list);
747 	}
748 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
749 }
750 
751 /*
752  * dp_txrx_peer_attach_add() - Attach txrx_peer and add it to peer_id table
753  * @soc: SoC handle
754  * @peer: peer handle
755  * @txrx_peer: txrx peer handle
756  *
757  * Return: None
758  */
759 void dp_txrx_peer_attach_add(struct dp_soc *soc,
760 			     struct dp_peer *peer,
761 			     struct dp_txrx_peer *txrx_peer)
762 {
763 	qdf_spin_lock_bh(&soc->peer_map_lock);
764 
765 	peer->txrx_peer = txrx_peer;
766 	txrx_peer->bss_peer = peer->bss_peer;
767 
768 	if (peer->peer_id == HTT_INVALID_PEER) {
769 		qdf_spin_unlock_bh(&soc->peer_map_lock);
770 		return;
771 	}
772 
773 	txrx_peer->peer_id = peer->peer_id;
774 
775 	QDF_ASSERT(soc->peer_id_to_obj_map[peer->peer_id]);
776 
777 	qdf_spin_unlock_bh(&soc->peer_map_lock);
778 }
779 
780 /*
781  * dp_peer_find_id_to_obj_add() - Add peer into peer_id table
782  * @soc: SoC handle
783  * @peer: peer handle
784  * @peer_id: peer_id
785  *
786  * Return: None
787  */
788 void dp_peer_find_id_to_obj_add(struct dp_soc *soc,
789 				struct dp_peer *peer,
790 				uint16_t peer_id)
791 {
792 	QDF_ASSERT(peer_id <= soc->max_peer_id);
793 
794 	qdf_spin_lock_bh(&soc->peer_map_lock);
795 
796 	peer->peer_id = peer_id;
797 
798 	if (QDF_IS_STATUS_ERROR(dp_peer_get_ref(soc, peer, DP_MOD_ID_CONFIG))) {
799 		dp_err("unable to get peer ref at MAP mac: "QDF_MAC_ADDR_FMT" peer_id %u",
800 		       QDF_MAC_ADDR_REF(peer->mac_addr.raw), peer_id);
801 		qdf_spin_unlock_bh(&soc->peer_map_lock);
802 		return;
803 	}
804 
805 	if (!soc->peer_id_to_obj_map[peer_id]) {
806 		soc->peer_id_to_obj_map[peer_id] = peer;
807 		if (peer->txrx_peer)
808 			peer->txrx_peer->peer_id = peer_id;
809 	} else {
810 		/* Peer map event came for peer_id which
811 		 * is already mapped, this is not expected
812 		 */
813 		dp_err("peer %pK(" QDF_MAC_ADDR_FMT ")map failed, id %d mapped to peer %pK",
814 		       peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw), peer_id,
815 		       soc->peer_id_to_obj_map[peer_id]);
816 		dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
817 		qdf_assert_always(0);
818 	}
819 	qdf_spin_unlock_bh(&soc->peer_map_lock);
820 }
821 
822 /*
823  * dp_peer_find_id_to_obj_remove() - remove peer from peer_id table
824  * @soc: SoC handle
825  * @peer_id: peer_id
826  *
827  * Return: None
828  */
829 void dp_peer_find_id_to_obj_remove(struct dp_soc *soc,
830 				   uint16_t peer_id)
831 {
832 	struct dp_peer *peer = NULL;
833 	QDF_ASSERT(peer_id <= soc->max_peer_id);
834 
835 	qdf_spin_lock_bh(&soc->peer_map_lock);
836 	peer = soc->peer_id_to_obj_map[peer_id];
837 	peer->peer_id = HTT_INVALID_PEER;
838 	if (peer->txrx_peer)
839 		peer->txrx_peer->peer_id = HTT_INVALID_PEER;
840 	soc->peer_id_to_obj_map[peer_id] = NULL;
841 	dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
842 	qdf_spin_unlock_bh(&soc->peer_map_lock);
843 }
844 
845 #ifdef FEATURE_MEC
846 /**
847  * dp_peer_mec_hash_attach() - Allocate and initialize MEC Hash Table
848  * @soc: SoC handle
849  *
850  * Return: QDF_STATUS
851  */
852 QDF_STATUS dp_peer_mec_hash_attach(struct dp_soc *soc)
853 {
854 	int log2, hash_elems, i;
855 
856 	log2 = dp_log2_ceil(DP_PEER_MAX_MEC_IDX);
857 	hash_elems = 1 << log2;
858 
859 	soc->mec_hash.mask = hash_elems - 1;
860 	soc->mec_hash.idx_bits = log2;
861 
862 	dp_peer_info("%pK: max mec index: %d",
863 		     soc, DP_PEER_MAX_MEC_IDX);
864 
865 	/* allocate an array of TAILQ mec object lists */
866 	soc->mec_hash.bins = qdf_mem_malloc(hash_elems *
867 					    sizeof(TAILQ_HEAD(anonymous_tail_q,
868 							      dp_mec_entry)));
869 
870 	if (!soc->mec_hash.bins)
871 		return QDF_STATUS_E_NOMEM;
872 
873 	for (i = 0; i < hash_elems; i++)
874 		TAILQ_INIT(&soc->mec_hash.bins[i]);
875 
876 	return QDF_STATUS_SUCCESS;
877 }
878 
879 /**
880  * dp_peer_mec_hash_index() - Compute the MEC hash from MAC address
881  * @soc: SoC handle
882  *
883  * Return: MEC hash
884  */
885 static inline uint32_t dp_peer_mec_hash_index(struct dp_soc *soc,
886 					      union dp_align_mac_addr *mac_addr)
887 {
888 	uint32_t index;
889 
890 	index =
891 		mac_addr->align2.bytes_ab ^
892 		mac_addr->align2.bytes_cd ^
893 		mac_addr->align2.bytes_ef;
894 	index ^= index >> soc->mec_hash.idx_bits;
895 	index &= soc->mec_hash.mask;
896 	return index;
897 }
898 
899 struct dp_mec_entry *dp_peer_mec_hash_find_by_pdevid(struct dp_soc *soc,
900 						     uint8_t pdev_id,
901 						     uint8_t *mec_mac_addr)
902 {
903 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
904 	uint32_t index;
905 	struct dp_mec_entry *mecentry;
906 
907 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
908 		     mec_mac_addr, QDF_MAC_ADDR_SIZE);
909 	mac_addr = &local_mac_addr_aligned;
910 
911 	index = dp_peer_mec_hash_index(soc, mac_addr);
912 	TAILQ_FOREACH(mecentry, &soc->mec_hash.bins[index], hash_list_elem) {
913 		if ((pdev_id == mecentry->pdev_id) &&
914 		    !dp_peer_find_mac_addr_cmp(mac_addr, &mecentry->mac_addr))
915 			return mecentry;
916 	}
917 
918 	return NULL;
919 }
920 
921 /**
922  * dp_peer_mec_hash_add() - Add MEC entry into hash table
923  * @soc: SoC handle
924  *
925  * This function adds the MEC entry into SoC MEC hash table
926  *
927  * Return: None
928  */
929 static inline void dp_peer_mec_hash_add(struct dp_soc *soc,
930 					struct dp_mec_entry *mecentry)
931 {
932 	uint32_t index;
933 
934 	index = dp_peer_mec_hash_index(soc, &mecentry->mac_addr);
935 	qdf_spin_lock_bh(&soc->mec_lock);
936 	TAILQ_INSERT_TAIL(&soc->mec_hash.bins[index], mecentry, hash_list_elem);
937 	qdf_spin_unlock_bh(&soc->mec_lock);
938 }
939 
940 QDF_STATUS dp_peer_mec_add_entry(struct dp_soc *soc,
941 				 struct dp_vdev *vdev,
942 				 uint8_t *mac_addr)
943 {
944 	struct dp_mec_entry *mecentry = NULL;
945 	struct dp_pdev *pdev = NULL;
946 
947 	if (!vdev) {
948 		dp_peer_err("%pK: Peers vdev is NULL", soc);
949 		return QDF_STATUS_E_INVAL;
950 	}
951 
952 	pdev = vdev->pdev;
953 
954 	if (qdf_unlikely(qdf_atomic_read(&soc->mec_cnt) >=
955 					 DP_PEER_MAX_MEC_ENTRY)) {
956 		dp_peer_warn("%pK: max MEC entry limit reached mac_addr: "
957 			     QDF_MAC_ADDR_FMT, soc, QDF_MAC_ADDR_REF(mac_addr));
958 		return QDF_STATUS_E_NOMEM;
959 	}
960 
961 	qdf_spin_lock_bh(&soc->mec_lock);
962 	mecentry = dp_peer_mec_hash_find_by_pdevid(soc, pdev->pdev_id,
963 						   mac_addr);
964 	if (qdf_likely(mecentry)) {
965 		mecentry->is_active = TRUE;
966 		qdf_spin_unlock_bh(&soc->mec_lock);
967 		return QDF_STATUS_E_ALREADY;
968 	}
969 
970 	qdf_spin_unlock_bh(&soc->mec_lock);
971 
972 	dp_peer_debug("%pK: pdevid: %u vdev: %u type: MEC mac_addr: "
973 		      QDF_MAC_ADDR_FMT,
974 		      soc, pdev->pdev_id, vdev->vdev_id,
975 		      QDF_MAC_ADDR_REF(mac_addr));
976 
977 	mecentry = (struct dp_mec_entry *)
978 			qdf_mem_malloc(sizeof(struct dp_mec_entry));
979 
980 	if (qdf_unlikely(!mecentry)) {
981 		dp_peer_err("%pK: fail to allocate mecentry", soc);
982 		return QDF_STATUS_E_NOMEM;
983 	}
984 
985 	qdf_copy_macaddr((struct qdf_mac_addr *)&mecentry->mac_addr.raw[0],
986 			 (struct qdf_mac_addr *)mac_addr);
987 	mecentry->pdev_id = pdev->pdev_id;
988 	mecentry->vdev_id = vdev->vdev_id;
989 	mecentry->is_active = TRUE;
990 	dp_peer_mec_hash_add(soc, mecentry);
991 
992 	qdf_atomic_inc(&soc->mec_cnt);
993 	DP_STATS_INC(soc, mec.added, 1);
994 
995 	return QDF_STATUS_SUCCESS;
996 }
997 
998 void dp_peer_mec_detach_entry(struct dp_soc *soc, struct dp_mec_entry *mecentry,
999 			      void *ptr)
1000 {
1001 	uint32_t index = dp_peer_mec_hash_index(soc, &mecentry->mac_addr);
1002 
1003 	TAILQ_HEAD(, dp_mec_entry) * free_list = ptr;
1004 
1005 	TAILQ_REMOVE(&soc->mec_hash.bins[index], mecentry,
1006 		     hash_list_elem);
1007 	TAILQ_INSERT_TAIL(free_list, mecentry, hash_list_elem);
1008 }
1009 
1010 void dp_peer_mec_free_list(struct dp_soc *soc, void *ptr)
1011 {
1012 	struct dp_mec_entry *mecentry, *mecentry_next;
1013 
1014 	TAILQ_HEAD(, dp_mec_entry) * free_list = ptr;
1015 
1016 	TAILQ_FOREACH_SAFE(mecentry, free_list, hash_list_elem,
1017 			   mecentry_next) {
1018 		dp_peer_debug("%pK: MEC delete for mac_addr " QDF_MAC_ADDR_FMT,
1019 			      soc, QDF_MAC_ADDR_REF(&mecentry->mac_addr));
1020 		qdf_mem_free(mecentry);
1021 		qdf_atomic_dec(&soc->mec_cnt);
1022 		DP_STATS_INC(soc, mec.deleted, 1);
1023 	}
1024 }
1025 
1026 /**
1027  * dp_peer_mec_hash_detach() - Free MEC Hash table
1028  * @soc: SoC handle
1029  *
1030  * Return: None
1031  */
1032 void dp_peer_mec_hash_detach(struct dp_soc *soc)
1033 {
1034 	dp_peer_mec_flush_entries(soc);
1035 	qdf_mem_free(soc->mec_hash.bins);
1036 	soc->mec_hash.bins = NULL;
1037 }
1038 
1039 void dp_peer_mec_spinlock_destroy(struct dp_soc *soc)
1040 {
1041 	qdf_spinlock_destroy(&soc->mec_lock);
1042 }
1043 
1044 void dp_peer_mec_spinlock_create(struct dp_soc *soc)
1045 {
1046 	qdf_spinlock_create(&soc->mec_lock);
1047 }
1048 #else
1049 QDF_STATUS dp_peer_mec_hash_attach(struct dp_soc *soc)
1050 {
1051 	return QDF_STATUS_SUCCESS;
1052 }
1053 
1054 void dp_peer_mec_hash_detach(struct dp_soc *soc)
1055 {
1056 }
1057 #endif
1058 
1059 #ifdef FEATURE_AST
1060 #ifdef WLAN_FEATURE_11BE_MLO
1061 /*
1062  * dp_peer_exist_on_pdev - check if peer with mac address exist on pdev
1063  *
1064  * @soc: Datapath SOC handle
1065  * @peer_mac_addr: peer mac address
1066  * @mac_addr_is_aligned: is mac address aligned
1067  * @pdev: Datapath PDEV handle
1068  *
1069  * Return: true if peer found else return false
1070  */
1071 static bool dp_peer_exist_on_pdev(struct dp_soc *soc,
1072 				  uint8_t *peer_mac_addr,
1073 				  int mac_addr_is_aligned,
1074 				  struct dp_pdev *pdev)
1075 {
1076 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
1077 	unsigned int index;
1078 	struct dp_peer *peer;
1079 	bool found = false;
1080 
1081 	if (mac_addr_is_aligned) {
1082 		mac_addr = (union dp_align_mac_addr *)peer_mac_addr;
1083 	} else {
1084 		qdf_mem_copy(
1085 			&local_mac_addr_aligned.raw[0],
1086 			peer_mac_addr, QDF_MAC_ADDR_SIZE);
1087 		mac_addr = &local_mac_addr_aligned;
1088 	}
1089 	index = dp_peer_find_hash_index(soc, mac_addr);
1090 	qdf_spin_lock_bh(&soc->peer_hash_lock);
1091 	TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
1092 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
1093 		    (peer->vdev->pdev == pdev)) {
1094 			found = true;
1095 			break;
1096 		}
1097 	}
1098 	qdf_spin_unlock_bh(&soc->peer_hash_lock);
1099 
1100 	if (found)
1101 		return found;
1102 
1103 	peer = dp_mld_peer_find_hash_find(soc, peer_mac_addr,
1104 					  mac_addr_is_aligned, DP_VDEV_ALL,
1105 					  DP_MOD_ID_CDP);
1106 	if (peer) {
1107 		if (peer->vdev->pdev == pdev)
1108 			found = true;
1109 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
1110 	}
1111 
1112 	return found;
1113 }
1114 #else
1115 static bool dp_peer_exist_on_pdev(struct dp_soc *soc,
1116 				  uint8_t *peer_mac_addr,
1117 				  int mac_addr_is_aligned,
1118 				  struct dp_pdev *pdev)
1119 {
1120 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
1121 	unsigned int index;
1122 	struct dp_peer *peer;
1123 	bool found = false;
1124 
1125 	if (mac_addr_is_aligned) {
1126 		mac_addr = (union dp_align_mac_addr *)peer_mac_addr;
1127 	} else {
1128 		qdf_mem_copy(
1129 			&local_mac_addr_aligned.raw[0],
1130 			peer_mac_addr, QDF_MAC_ADDR_SIZE);
1131 		mac_addr = &local_mac_addr_aligned;
1132 	}
1133 	index = dp_peer_find_hash_index(soc, mac_addr);
1134 	qdf_spin_lock_bh(&soc->peer_hash_lock);
1135 	TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
1136 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
1137 		    (peer->vdev->pdev == pdev)) {
1138 			found = true;
1139 			break;
1140 		}
1141 	}
1142 	qdf_spin_unlock_bh(&soc->peer_hash_lock);
1143 	return found;
1144 }
1145 #endif /* WLAN_FEATURE_11BE_MLO */
1146 
1147 /*
1148  * dp_peer_ast_hash_attach() - Allocate and initialize AST Hash Table
1149  * @soc: SoC handle
1150  *
1151  * Return: QDF_STATUS
1152  */
1153 QDF_STATUS dp_peer_ast_hash_attach(struct dp_soc *soc)
1154 {
1155 	int i, hash_elems, log2;
1156 	unsigned int max_ast_idx = wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx);
1157 
1158 	hash_elems = ((max_ast_idx * DP_AST_HASH_LOAD_MULT) >>
1159 		DP_AST_HASH_LOAD_SHIFT);
1160 
1161 	log2 = dp_log2_ceil(hash_elems);
1162 	hash_elems = 1 << log2;
1163 
1164 	soc->ast_hash.mask = hash_elems - 1;
1165 	soc->ast_hash.idx_bits = log2;
1166 
1167 	dp_peer_info("%pK: ast hash_elems: %d, max_ast_idx: %d",
1168 		     soc, hash_elems, max_ast_idx);
1169 
1170 	/* allocate an array of TAILQ peer object lists */
1171 	soc->ast_hash.bins = qdf_mem_malloc(
1172 		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q,
1173 				dp_ast_entry)));
1174 
1175 	if (!soc->ast_hash.bins)
1176 		return QDF_STATUS_E_NOMEM;
1177 
1178 	for (i = 0; i < hash_elems; i++)
1179 		TAILQ_INIT(&soc->ast_hash.bins[i]);
1180 
1181 	return QDF_STATUS_SUCCESS;
1182 }
1183 
1184 /*
1185  * dp_peer_ast_cleanup() - cleanup the references
1186  * @soc: SoC handle
1187  * @ast: ast entry
1188  *
1189  * Return: None
1190  */
1191 static inline void dp_peer_ast_cleanup(struct dp_soc *soc,
1192 				       struct dp_ast_entry *ast)
1193 {
1194 	txrx_ast_free_cb cb = ast->callback;
1195 	void *cookie = ast->cookie;
1196 
1197 	dp_peer_debug("mac_addr: " QDF_MAC_ADDR_FMT ", cb: %pK, cookie: %pK",
1198 		      QDF_MAC_ADDR_REF(ast->mac_addr.raw), cb, cookie);
1199 
1200 	/* Call the callbacks to free up the cookie */
1201 	if (cb) {
1202 		ast->callback = NULL;
1203 		ast->cookie = NULL;
1204 		cb(soc->ctrl_psoc,
1205 		   dp_soc_to_cdp_soc(soc),
1206 		   cookie,
1207 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
1208 	}
1209 }
1210 
1211 /*
1212  * dp_peer_ast_hash_detach() - Free AST Hash table
1213  * @soc: SoC handle
1214  *
1215  * Return: None
1216  */
1217 void dp_peer_ast_hash_detach(struct dp_soc *soc)
1218 {
1219 	unsigned int index;
1220 	struct dp_ast_entry *ast, *ast_next;
1221 
1222 	if (!soc->ast_hash.mask)
1223 		return;
1224 
1225 	if (!soc->ast_hash.bins)
1226 		return;
1227 
1228 	dp_peer_debug("%pK: num_ast_entries: %u", soc, soc->num_ast_entries);
1229 
1230 	qdf_spin_lock_bh(&soc->ast_lock);
1231 	for (index = 0; index <= soc->ast_hash.mask; index++) {
1232 		if (!TAILQ_EMPTY(&soc->ast_hash.bins[index])) {
1233 			TAILQ_FOREACH_SAFE(ast, &soc->ast_hash.bins[index],
1234 					   hash_list_elem, ast_next) {
1235 				TAILQ_REMOVE(&soc->ast_hash.bins[index], ast,
1236 					     hash_list_elem);
1237 				dp_peer_ast_cleanup(soc, ast);
1238 				soc->num_ast_entries--;
1239 				qdf_mem_free(ast);
1240 			}
1241 		}
1242 	}
1243 	qdf_spin_unlock_bh(&soc->ast_lock);
1244 
1245 	qdf_mem_free(soc->ast_hash.bins);
1246 	soc->ast_hash.bins = NULL;
1247 }
1248 
1249 /*
1250  * dp_peer_ast_hash_index() - Compute the AST hash from MAC address
1251  * @soc: SoC handle
1252  *
1253  * Return: AST hash
1254  */
1255 static inline uint32_t dp_peer_ast_hash_index(struct dp_soc *soc,
1256 	union dp_align_mac_addr *mac_addr)
1257 {
1258 	uint32_t index;
1259 
1260 	index =
1261 		mac_addr->align2.bytes_ab ^
1262 		mac_addr->align2.bytes_cd ^
1263 		mac_addr->align2.bytes_ef;
1264 	index ^= index >> soc->ast_hash.idx_bits;
1265 	index &= soc->ast_hash.mask;
1266 	return index;
1267 }
1268 
1269 /*
1270  * dp_peer_ast_hash_add() - Add AST entry into hash table
1271  * @soc: SoC handle
1272  *
1273  * This function adds the AST entry into SoC AST hash table
1274  * It assumes caller has taken the ast lock to protect the access to this table
1275  *
1276  * Return: None
1277  */
1278 static inline void dp_peer_ast_hash_add(struct dp_soc *soc,
1279 		struct dp_ast_entry *ase)
1280 {
1281 	uint32_t index;
1282 
1283 	index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
1284 	TAILQ_INSERT_TAIL(&soc->ast_hash.bins[index], ase, hash_list_elem);
1285 }
1286 
1287 /*
1288  * dp_peer_ast_hash_remove() - Look up and remove AST entry from hash table
1289  * @soc: SoC handle
1290  *
1291  * This function removes the AST entry from soc AST hash table
1292  * It assumes caller has taken the ast lock to protect the access to this table
1293  *
1294  * Return: None
1295  */
1296 void dp_peer_ast_hash_remove(struct dp_soc *soc,
1297 			     struct dp_ast_entry *ase)
1298 {
1299 	unsigned index;
1300 	struct dp_ast_entry *tmpase;
1301 	int found = 0;
1302 
1303 	if (soc->ast_offload_support && !soc->host_ast_db_enable)
1304 		return;
1305 
1306 	index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
1307 	/* Check if tail is not empty before delete*/
1308 	QDF_ASSERT(!TAILQ_EMPTY(&soc->ast_hash.bins[index]));
1309 
1310 	dp_peer_debug("ID: %u idx: %u mac_addr: " QDF_MAC_ADDR_FMT,
1311 		      ase->peer_id, index, QDF_MAC_ADDR_REF(ase->mac_addr.raw));
1312 
1313 	TAILQ_FOREACH(tmpase, &soc->ast_hash.bins[index], hash_list_elem) {
1314 		if (tmpase == ase) {
1315 			found = 1;
1316 			break;
1317 		}
1318 	}
1319 
1320 	QDF_ASSERT(found);
1321 
1322 	if (found)
1323 		TAILQ_REMOVE(&soc->ast_hash.bins[index], ase, hash_list_elem);
1324 }
1325 
1326 /*
1327  * dp_peer_ast_hash_find_by_vdevid() - Find AST entry by MAC address
1328  * @soc: SoC handle
1329  *
1330  * It assumes caller has taken the ast lock to protect the access to
1331  * AST hash table
1332  *
1333  * Return: AST entry
1334  */
1335 struct dp_ast_entry *dp_peer_ast_hash_find_by_vdevid(struct dp_soc *soc,
1336 						     uint8_t *ast_mac_addr,
1337 						     uint8_t vdev_id)
1338 {
1339 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
1340 	uint32_t index;
1341 	struct dp_ast_entry *ase;
1342 
1343 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
1344 		     ast_mac_addr, QDF_MAC_ADDR_SIZE);
1345 	mac_addr = &local_mac_addr_aligned;
1346 
1347 	index = dp_peer_ast_hash_index(soc, mac_addr);
1348 	TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
1349 		if ((vdev_id == ase->vdev_id) &&
1350 		    !dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr)) {
1351 			return ase;
1352 		}
1353 	}
1354 
1355 	return NULL;
1356 }
1357 
1358 /*
1359  * dp_peer_ast_hash_find_by_pdevid() - Find AST entry by MAC address
1360  * @soc: SoC handle
1361  *
1362  * It assumes caller has taken the ast lock to protect the access to
1363  * AST hash table
1364  *
1365  * Return: AST entry
1366  */
1367 struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
1368 						     uint8_t *ast_mac_addr,
1369 						     uint8_t pdev_id)
1370 {
1371 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
1372 	uint32_t index;
1373 	struct dp_ast_entry *ase;
1374 
1375 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
1376 		     ast_mac_addr, QDF_MAC_ADDR_SIZE);
1377 	mac_addr = &local_mac_addr_aligned;
1378 
1379 	index = dp_peer_ast_hash_index(soc, mac_addr);
1380 	TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
1381 		if ((pdev_id == ase->pdev_id) &&
1382 		    !dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr)) {
1383 			return ase;
1384 		}
1385 	}
1386 
1387 	return NULL;
1388 }
1389 
1390 /*
1391  * dp_peer_ast_hash_find_soc() - Find AST entry by MAC address
1392  * @soc: SoC handle
1393  *
1394  * It assumes caller has taken the ast lock to protect the access to
1395  * AST hash table
1396  *
1397  * Return: AST entry
1398  */
1399 struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
1400 					       uint8_t *ast_mac_addr)
1401 {
1402 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
1403 	unsigned index;
1404 	struct dp_ast_entry *ase;
1405 
1406 	if (!soc->ast_hash.bins)
1407 		return NULL;
1408 
1409 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
1410 			ast_mac_addr, QDF_MAC_ADDR_SIZE);
1411 	mac_addr = &local_mac_addr_aligned;
1412 
1413 	index = dp_peer_ast_hash_index(soc, mac_addr);
1414 	TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
1415 		if (dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr) == 0) {
1416 			return ase;
1417 		}
1418 	}
1419 
1420 	return NULL;
1421 }
1422 
1423 /*
1424  * dp_peer_host_add_map_ast() - Add ast entry with HW AST Index
1425  * @soc: SoC handle
1426  * @peer_id: peer id from firmware
1427  * @mac_addr: MAC address of ast node
1428  * @hw_peer_id: HW AST Index returned by target in peer map event
1429  * @vdev_id: vdev id for VAP to which the peer belongs to
1430  * @ast_hash: ast hash value in HW
1431  * @is_wds: flag to indicate peer map event for WDS ast entry
1432  *
1433  * Return: QDF_STATUS code
1434  */
1435 static inline
1436 QDF_STATUS dp_peer_host_add_map_ast(struct dp_soc *soc, uint16_t peer_id,
1437 				    uint8_t *mac_addr, uint16_t hw_peer_id,
1438 				    uint8_t vdev_id, uint16_t ast_hash,
1439 				    uint8_t is_wds)
1440 {
1441 	struct dp_vdev *vdev;
1442 	struct dp_ast_entry *ast_entry;
1443 	enum cdp_txrx_ast_entry_type type;
1444 	struct dp_peer *peer;
1445 	struct dp_peer *old_peer;
1446 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1447 
1448 	if (is_wds)
1449 		type = CDP_TXRX_AST_TYPE_WDS;
1450 	else
1451 		type = CDP_TXRX_AST_TYPE_STATIC;
1452 
1453 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_HTT);
1454 	if (!peer) {
1455 		dp_peer_info("Peer not found soc:%pK: peer_id %d, peer_mac " QDF_MAC_ADDR_FMT ", vdev_id %d",
1456 			     soc, peer_id,
1457 			     QDF_MAC_ADDR_REF(mac_addr), vdev_id);
1458 		return QDF_STATUS_E_INVAL;
1459 	}
1460 
1461 	if (!is_wds && IS_MLO_DP_MLD_PEER(peer))
1462 		type = CDP_TXRX_AST_TYPE_MLD;
1463 
1464 	vdev = peer->vdev;
1465 	if (!vdev) {
1466 		dp_peer_err("%pK: Peers vdev is NULL", soc);
1467 		status = QDF_STATUS_E_INVAL;
1468 		goto fail;
1469 	}
1470 
1471 	if (!dp_peer_state_cmp(peer, DP_PEER_STATE_ACTIVE)) {
1472 		if (type != CDP_TXRX_AST_TYPE_STATIC &&
1473 		    type != CDP_TXRX_AST_TYPE_MLD &&
1474 		    type != CDP_TXRX_AST_TYPE_SELF) {
1475 			status = QDF_STATUS_E_BUSY;
1476 			goto fail;
1477 		}
1478 	}
1479 
1480 	dp_peer_debug("%pK: vdev: %u  ast_entry->type: %d peer_mac: " QDF_MAC_ADDR_FMT " peer: %pK mac " QDF_MAC_ADDR_FMT,
1481 		      soc, vdev->vdev_id, type,
1482 		      QDF_MAC_ADDR_REF(peer->mac_addr.raw), peer,
1483 		      QDF_MAC_ADDR_REF(mac_addr));
1484 
1485 	/*
1486 	 * In MLO scenario, there is possibility for same mac address
1487 	 * on both link mac address and MLD mac address.
1488 	 * Duplicate AST map needs to be handled for non-mld type.
1489 	 */
1490 	qdf_spin_lock_bh(&soc->ast_lock);
1491 	ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
1492 	if (ast_entry && type != CDP_TXRX_AST_TYPE_MLD) {
1493 		dp_peer_debug("AST present ID %d vid %d mac " QDF_MAC_ADDR_FMT,
1494 			      hw_peer_id, vdev_id,
1495 			      QDF_MAC_ADDR_REF(mac_addr));
1496 
1497 		old_peer = __dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
1498 						   DP_MOD_ID_AST);
1499 		if (!old_peer) {
1500 			dp_peer_info("Peer not found soc:%pK: peer_id %d, peer_mac " QDF_MAC_ADDR_FMT ", vdev_id %d",
1501 				     soc, ast_entry->peer_id,
1502 				     QDF_MAC_ADDR_REF(mac_addr), vdev_id);
1503 			qdf_spin_unlock_bh(&soc->ast_lock);
1504 			status = QDF_STATUS_E_INVAL;
1505 			goto fail;
1506 		}
1507 
1508 		dp_peer_unlink_ast_entry(soc, ast_entry, old_peer);
1509 		dp_peer_free_ast_entry(soc, ast_entry);
1510 		if (old_peer)
1511 			dp_peer_unref_delete(old_peer, DP_MOD_ID_AST);
1512 	}
1513 
1514 	ast_entry = (struct dp_ast_entry *)
1515 		qdf_mem_malloc(sizeof(struct dp_ast_entry));
1516 	if (!ast_entry) {
1517 		dp_peer_err("%pK: fail to allocate ast_entry", soc);
1518 		qdf_spin_unlock_bh(&soc->ast_lock);
1519 		QDF_ASSERT(0);
1520 		status = QDF_STATUS_E_NOMEM;
1521 		goto fail;
1522 	}
1523 
1524 	qdf_mem_copy(&ast_entry->mac_addr.raw[0], mac_addr, QDF_MAC_ADDR_SIZE);
1525 	ast_entry->pdev_id = vdev->pdev->pdev_id;
1526 	ast_entry->is_mapped = false;
1527 	ast_entry->delete_in_progress = false;
1528 	ast_entry->next_hop = 0;
1529 	ast_entry->vdev_id = vdev->vdev_id;
1530 	ast_entry->type = type;
1531 
1532 	switch (type) {
1533 	case CDP_TXRX_AST_TYPE_STATIC:
1534 		if (peer->vdev->opmode == wlan_op_mode_sta)
1535 			ast_entry->type = CDP_TXRX_AST_TYPE_STA_BSS;
1536 		break;
1537 	case CDP_TXRX_AST_TYPE_WDS:
1538 		ast_entry->next_hop = 1;
1539 		break;
1540 	case CDP_TXRX_AST_TYPE_MLD:
1541 		break;
1542 	default:
1543 		dp_peer_alert("%pK: Incorrect AST entry type", soc);
1544 	}
1545 
1546 	ast_entry->is_active = TRUE;
1547 	DP_STATS_INC(soc, ast.added, 1);
1548 	soc->num_ast_entries++;
1549 	dp_peer_ast_hash_add(soc, ast_entry);
1550 
1551 	ast_entry->ast_idx = hw_peer_id;
1552 	ast_entry->ast_hash_value = ast_hash;
1553 	ast_entry->peer_id = peer_id;
1554 	TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry,
1555 			  ase_list_elem);
1556 
1557 	qdf_spin_unlock_bh(&soc->ast_lock);
1558 fail:
1559 	dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
1560 
1561 	return status;
1562 }
1563 
1564 /*
1565  * dp_peer_map_ast() - Map the ast entry with HW AST Index
1566  * @soc: SoC handle
1567  * @peer: peer to which ast node belongs
1568  * @mac_addr: MAC address of ast node
1569  * @hw_peer_id: HW AST Index returned by target in peer map event
1570  * @vdev_id: vdev id for VAP to which the peer belongs to
1571  * @ast_hash: ast hash value in HW
1572  * @is_wds: flag to indicate peer map event for WDS ast entry
1573  *
1574  * Return: QDF_STATUS code
1575  */
1576 static inline QDF_STATUS dp_peer_map_ast(struct dp_soc *soc,
1577 					 struct dp_peer *peer,
1578 					 uint8_t *mac_addr,
1579 					 uint16_t hw_peer_id,
1580 					 uint8_t vdev_id,
1581 					 uint16_t ast_hash,
1582 					 uint8_t is_wds)
1583 {
1584 	struct dp_ast_entry *ast_entry = NULL;
1585 	enum cdp_txrx_ast_entry_type peer_type = CDP_TXRX_AST_TYPE_STATIC;
1586 	void *cookie = NULL;
1587 	txrx_ast_free_cb cb = NULL;
1588 	QDF_STATUS err = QDF_STATUS_SUCCESS;
1589 
1590 	if (soc->ast_offload_support)
1591 		return QDF_STATUS_SUCCESS;
1592 
1593 	if (!peer) {
1594 		return QDF_STATUS_E_INVAL;
1595 	}
1596 
1597 	dp_peer_err("%pK: peer %pK ID %d vid %d mac " QDF_MAC_ADDR_FMT,
1598 		    soc, peer, hw_peer_id, vdev_id,
1599 		    QDF_MAC_ADDR_REF(mac_addr));
1600 
1601 	qdf_spin_lock_bh(&soc->ast_lock);
1602 
1603 	ast_entry = dp_peer_ast_hash_find_by_vdevid(soc, mac_addr, vdev_id);
1604 
1605 	if (is_wds) {
1606 		/*
1607 		 * In certain cases like Auth attack on a repeater
1608 		 * can result in the number of ast_entries falling
1609 		 * in the same hash bucket to exceed the max_skid
1610 		 * length supported by HW in root AP. In these cases
1611 		 * the FW will return the hw_peer_id (ast_index) as
1612 		 * 0xffff indicating HW could not add the entry in
1613 		 * its table. Host has to delete the entry from its
1614 		 * table in these cases.
1615 		 */
1616 		if (hw_peer_id == HTT_INVALID_PEER) {
1617 			DP_STATS_INC(soc, ast.map_err, 1);
1618 			if (ast_entry) {
1619 				if (ast_entry->is_mapped) {
1620 					soc->ast_table[ast_entry->ast_idx] =
1621 						NULL;
1622 				}
1623 
1624 				cb = ast_entry->callback;
1625 				cookie = ast_entry->cookie;
1626 				peer_type = ast_entry->type;
1627 
1628 				dp_peer_unlink_ast_entry(soc, ast_entry, peer);
1629 				dp_peer_free_ast_entry(soc, ast_entry);
1630 
1631 				qdf_spin_unlock_bh(&soc->ast_lock);
1632 
1633 				if (cb) {
1634 					cb(soc->ctrl_psoc,
1635 					   dp_soc_to_cdp_soc(soc),
1636 					   cookie,
1637 					   CDP_TXRX_AST_DELETED);
1638 				}
1639 			} else {
1640 				qdf_spin_unlock_bh(&soc->ast_lock);
1641 				dp_peer_alert("AST entry not found with peer %pK peer_id %u peer_mac " QDF_MAC_ADDR_FMT " mac_addr " QDF_MAC_ADDR_FMT " vdev_id %u next_hop %u",
1642 					      peer, peer->peer_id,
1643 					      QDF_MAC_ADDR_REF(peer->mac_addr.raw),
1644 					      QDF_MAC_ADDR_REF(mac_addr),
1645 					      vdev_id, is_wds);
1646 			}
1647 			err = QDF_STATUS_E_INVAL;
1648 
1649 			dp_hmwds_ast_add_notify(peer, mac_addr,
1650 						peer_type, err, true);
1651 
1652 			return err;
1653 		}
1654 	}
1655 
1656 	if (ast_entry) {
1657 		ast_entry->ast_idx = hw_peer_id;
1658 		soc->ast_table[hw_peer_id] = ast_entry;
1659 		ast_entry->is_active = TRUE;
1660 		peer_type = ast_entry->type;
1661 		ast_entry->ast_hash_value = ast_hash;
1662 		ast_entry->is_mapped = TRUE;
1663 		qdf_assert_always(ast_entry->peer_id == HTT_INVALID_PEER);
1664 
1665 		ast_entry->peer_id = peer->peer_id;
1666 		TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry,
1667 				  ase_list_elem);
1668 	}
1669 
1670 	if (ast_entry || (peer->vdev && peer->vdev->proxysta_vdev)) {
1671 		if (soc->cdp_soc.ol_ops->peer_map_event) {
1672 			soc->cdp_soc.ol_ops->peer_map_event(
1673 			soc->ctrl_psoc, peer->peer_id,
1674 			hw_peer_id, vdev_id,
1675 			mac_addr, peer_type, ast_hash);
1676 		}
1677 	} else {
1678 		dp_peer_err("%pK: AST entry not found", soc);
1679 		err = QDF_STATUS_E_NOENT;
1680 	}
1681 
1682 	qdf_spin_unlock_bh(&soc->ast_lock);
1683 
1684 	dp_hmwds_ast_add_notify(peer, mac_addr,
1685 				peer_type, err, true);
1686 
1687 	return err;
1688 }
1689 
1690 void dp_peer_free_hmwds_cb(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
1691 			   struct cdp_soc *dp_soc,
1692 			   void *cookie,
1693 			   enum cdp_ast_free_status status)
1694 {
1695 	struct dp_ast_free_cb_params *param =
1696 		(struct dp_ast_free_cb_params *)cookie;
1697 	struct dp_soc *soc = (struct dp_soc *)dp_soc;
1698 	struct dp_peer *peer = NULL;
1699 	QDF_STATUS err = QDF_STATUS_SUCCESS;
1700 
1701 	if (status != CDP_TXRX_AST_DELETED) {
1702 		qdf_mem_free(cookie);
1703 		return;
1704 	}
1705 
1706 	peer = dp_peer_find_hash_find(soc, &param->peer_mac_addr.raw[0],
1707 				      0, param->vdev_id, DP_MOD_ID_AST);
1708 	if (peer) {
1709 		err = dp_peer_add_ast(soc, peer,
1710 				      &param->mac_addr.raw[0],
1711 				      param->type,
1712 				      param->flags);
1713 
1714 		dp_hmwds_ast_add_notify(peer, &param->mac_addr.raw[0],
1715 					param->type, err, false);
1716 
1717 		dp_peer_unref_delete(peer, DP_MOD_ID_AST);
1718 	}
1719 	qdf_mem_free(cookie);
1720 }
1721 
1722 /*
1723  * dp_peer_add_ast() - Allocate and add AST entry into peer list
1724  * @soc: SoC handle
1725  * @peer: peer to which ast node belongs
1726  * @mac_addr: MAC address of ast node
1727  * @is_self: Is this base AST entry with peer mac address
1728  *
1729  * This API is used by WDS source port learning function to
1730  * add a new AST entry into peer AST list
1731  *
1732  * Return: QDF_STATUS code
1733  */
1734 QDF_STATUS dp_peer_add_ast(struct dp_soc *soc,
1735 			   struct dp_peer *peer,
1736 			   uint8_t *mac_addr,
1737 			   enum cdp_txrx_ast_entry_type type,
1738 			   uint32_t flags)
1739 {
1740 	struct dp_ast_entry *ast_entry = NULL;
1741 	struct dp_vdev *vdev = NULL;
1742 	struct dp_pdev *pdev = NULL;
1743 	txrx_ast_free_cb cb = NULL;
1744 	void *cookie = NULL;
1745 	struct dp_peer *vap_bss_peer = NULL;
1746 	bool is_peer_found = false;
1747 	int status = 0;
1748 
1749 	if (soc->ast_offload_support)
1750 		return QDF_STATUS_E_INVAL;
1751 
1752 	vdev = peer->vdev;
1753 	if (!vdev) {
1754 		dp_peer_err("%pK: Peers vdev is NULL", soc);
1755 		QDF_ASSERT(0);
1756 		return QDF_STATUS_E_INVAL;
1757 	}
1758 
1759 	pdev = vdev->pdev;
1760 
1761 	is_peer_found = dp_peer_exist_on_pdev(soc, mac_addr, 0, pdev);
1762 
1763 	qdf_spin_lock_bh(&soc->ast_lock);
1764 
1765 	if (!dp_peer_state_cmp(peer, DP_PEER_STATE_ACTIVE)) {
1766 		if ((type != CDP_TXRX_AST_TYPE_STATIC) &&
1767 		    (type != CDP_TXRX_AST_TYPE_SELF)) {
1768 			qdf_spin_unlock_bh(&soc->ast_lock);
1769 			return QDF_STATUS_E_BUSY;
1770 		}
1771 	}
1772 
1773 	dp_peer_debug("%pK: pdevid: %u vdev: %u  ast_entry->type: %d flags: 0x%x peer_mac: " QDF_MAC_ADDR_FMT " peer: %pK mac " QDF_MAC_ADDR_FMT,
1774 		      soc, pdev->pdev_id, vdev->vdev_id, type, flags,
1775 		      QDF_MAC_ADDR_REF(peer->mac_addr.raw), peer,
1776 		      QDF_MAC_ADDR_REF(mac_addr));
1777 
1778 	/* fw supports only 2 times the max_peers ast entries */
1779 	if (soc->num_ast_entries >=
1780 	    wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx)) {
1781 		qdf_spin_unlock_bh(&soc->ast_lock);
1782 		dp_peer_err("%pK: Max ast entries reached", soc);
1783 		return QDF_STATUS_E_RESOURCES;
1784 	}
1785 
1786 	/* If AST entry already exists , just return from here
1787 	 * ast entry with same mac address can exist on different radios
1788 	 * if ast_override support is enabled use search by pdev in this
1789 	 * case
1790 	 */
1791 	if (soc->ast_override_support) {
1792 		ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, mac_addr,
1793 							    pdev->pdev_id);
1794 		if (ast_entry) {
1795 			qdf_spin_unlock_bh(&soc->ast_lock);
1796 			return QDF_STATUS_E_ALREADY;
1797 		}
1798 
1799 		if (is_peer_found) {
1800 			/* During WDS to static roaming, peer is added
1801 			 * to the list before static AST entry create.
1802 			 * So, allow AST entry for STATIC type
1803 			 * even if peer is present
1804 			 */
1805 			if (type != CDP_TXRX_AST_TYPE_STATIC) {
1806 				qdf_spin_unlock_bh(&soc->ast_lock);
1807 				return QDF_STATUS_E_ALREADY;
1808 			}
1809 		}
1810 	} else {
1811 		/* For HWMWDS_SEC entries can be added for same mac address
1812 		 * do not check for existing entry
1813 		 */
1814 		if (type == CDP_TXRX_AST_TYPE_WDS_HM_SEC)
1815 			goto add_ast_entry;
1816 
1817 		ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
1818 
1819 		if (ast_entry) {
1820 			if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) &&
1821 			    !ast_entry->delete_in_progress) {
1822 				qdf_spin_unlock_bh(&soc->ast_lock);
1823 				return QDF_STATUS_E_ALREADY;
1824 			}
1825 
1826 			/* Add for HMWDS entry we cannot be ignored if there
1827 			 * is AST entry with same mac address
1828 			 *
1829 			 * if ast entry exists with the requested mac address
1830 			 * send a delete command and register callback which
1831 			 * can take care of adding HMWDS ast entry on delete
1832 			 * confirmation from target
1833 			 */
1834 			if (type == CDP_TXRX_AST_TYPE_WDS_HM) {
1835 				struct dp_ast_free_cb_params *param = NULL;
1836 
1837 				if (ast_entry->type ==
1838 					CDP_TXRX_AST_TYPE_WDS_HM_SEC)
1839 					goto add_ast_entry;
1840 
1841 				/* save existing callback */
1842 				if (ast_entry->callback) {
1843 					cb = ast_entry->callback;
1844 					cookie = ast_entry->cookie;
1845 				}
1846 
1847 				param = qdf_mem_malloc(sizeof(*param));
1848 				if (!param) {
1849 					QDF_TRACE(QDF_MODULE_ID_TXRX,
1850 						  QDF_TRACE_LEVEL_ERROR,
1851 						  "Allocation failed");
1852 					qdf_spin_unlock_bh(&soc->ast_lock);
1853 					return QDF_STATUS_E_NOMEM;
1854 				}
1855 
1856 				qdf_mem_copy(&param->mac_addr.raw[0], mac_addr,
1857 					     QDF_MAC_ADDR_SIZE);
1858 				qdf_mem_copy(&param->peer_mac_addr.raw[0],
1859 					     &peer->mac_addr.raw[0],
1860 					     QDF_MAC_ADDR_SIZE);
1861 				param->type = type;
1862 				param->flags = flags;
1863 				param->vdev_id = vdev->vdev_id;
1864 				ast_entry->callback = dp_peer_free_hmwds_cb;
1865 				ast_entry->pdev_id = vdev->pdev->pdev_id;
1866 				ast_entry->type = type;
1867 				ast_entry->cookie = (void *)param;
1868 				if (!ast_entry->delete_in_progress)
1869 					dp_peer_del_ast(soc, ast_entry);
1870 
1871 				qdf_spin_unlock_bh(&soc->ast_lock);
1872 
1873 				/* Call the saved callback*/
1874 				if (cb) {
1875 					cb(soc->ctrl_psoc,
1876 					   dp_soc_to_cdp_soc(soc),
1877 					   cookie,
1878 					   CDP_TXRX_AST_DELETE_IN_PROGRESS);
1879 				}
1880 				return QDF_STATUS_E_AGAIN;
1881 			}
1882 
1883 			qdf_spin_unlock_bh(&soc->ast_lock);
1884 			return QDF_STATUS_E_ALREADY;
1885 		}
1886 	}
1887 
1888 add_ast_entry:
1889 	ast_entry = (struct dp_ast_entry *)
1890 			qdf_mem_malloc(sizeof(struct dp_ast_entry));
1891 
1892 	if (!ast_entry) {
1893 		qdf_spin_unlock_bh(&soc->ast_lock);
1894 		dp_peer_err("%pK: fail to allocate ast_entry", soc);
1895 		QDF_ASSERT(0);
1896 		return QDF_STATUS_E_NOMEM;
1897 	}
1898 
1899 	qdf_mem_copy(&ast_entry->mac_addr.raw[0], mac_addr, QDF_MAC_ADDR_SIZE);
1900 	ast_entry->pdev_id = vdev->pdev->pdev_id;
1901 	ast_entry->is_mapped = false;
1902 	ast_entry->delete_in_progress = false;
1903 	ast_entry->peer_id = HTT_INVALID_PEER;
1904 	ast_entry->next_hop = 0;
1905 	ast_entry->vdev_id = vdev->vdev_id;
1906 
1907 	switch (type) {
1908 	case CDP_TXRX_AST_TYPE_STATIC:
1909 		peer->self_ast_entry = ast_entry;
1910 		ast_entry->type = CDP_TXRX_AST_TYPE_STATIC;
1911 		if (peer->vdev->opmode == wlan_op_mode_sta)
1912 			ast_entry->type = CDP_TXRX_AST_TYPE_STA_BSS;
1913 		break;
1914 	case CDP_TXRX_AST_TYPE_SELF:
1915 		peer->self_ast_entry = ast_entry;
1916 		ast_entry->type = CDP_TXRX_AST_TYPE_SELF;
1917 		break;
1918 	case CDP_TXRX_AST_TYPE_WDS:
1919 		ast_entry->next_hop = 1;
1920 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
1921 		break;
1922 	case CDP_TXRX_AST_TYPE_WDS_HM:
1923 		ast_entry->next_hop = 1;
1924 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM;
1925 		break;
1926 	case CDP_TXRX_AST_TYPE_WDS_HM_SEC:
1927 		ast_entry->next_hop = 1;
1928 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM_SEC;
1929 		ast_entry->peer_id = peer->peer_id;
1930 		TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry,
1931 				  ase_list_elem);
1932 		break;
1933 	case CDP_TXRX_AST_TYPE_DA:
1934 		vap_bss_peer = dp_vdev_bss_peer_ref_n_get(soc, vdev,
1935 							  DP_MOD_ID_AST);
1936 		if (!vap_bss_peer) {
1937 			qdf_spin_unlock_bh(&soc->ast_lock);
1938 			qdf_mem_free(ast_entry);
1939 			return QDF_STATUS_E_FAILURE;
1940 		}
1941 		peer = vap_bss_peer;
1942 		ast_entry->next_hop = 1;
1943 		ast_entry->type = CDP_TXRX_AST_TYPE_DA;
1944 		break;
1945 	default:
1946 		dp_peer_err("%pK: Incorrect AST entry type", soc);
1947 	}
1948 
1949 	ast_entry->is_active = TRUE;
1950 	DP_STATS_INC(soc, ast.added, 1);
1951 	soc->num_ast_entries++;
1952 	dp_peer_ast_hash_add(soc, ast_entry);
1953 
1954 	if ((ast_entry->type != CDP_TXRX_AST_TYPE_STATIC) &&
1955 	    (ast_entry->type != CDP_TXRX_AST_TYPE_SELF) &&
1956 	    (ast_entry->type != CDP_TXRX_AST_TYPE_STA_BSS) &&
1957 	    (ast_entry->type != CDP_TXRX_AST_TYPE_WDS_HM_SEC))
1958 		status = dp_add_wds_entry_wrapper(soc,
1959 						  peer,
1960 						  mac_addr,
1961 						  flags,
1962 						  ast_entry->type);
1963 
1964 	if (vap_bss_peer)
1965 		dp_peer_unref_delete(vap_bss_peer, DP_MOD_ID_AST);
1966 
1967 	qdf_spin_unlock_bh(&soc->ast_lock);
1968 	return qdf_status_from_os_return(status);
1969 }
1970 
1971 qdf_export_symbol(dp_peer_add_ast);
1972 
1973 /*
1974  * dp_peer_free_ast_entry() - Free up the ast entry memory
1975  * @soc: SoC handle
1976  * @ast_entry: Address search entry
1977  *
1978  * This API is used to free up the memory associated with
1979  * AST entry.
1980  *
1981  * Return: None
1982  */
1983 void dp_peer_free_ast_entry(struct dp_soc *soc,
1984 			    struct dp_ast_entry *ast_entry)
1985 {
1986 	/*
1987 	 * NOTE: Ensure that call to this API is done
1988 	 * after soc->ast_lock is taken
1989 	 */
1990 	dp_peer_debug("type: %d ID: %u vid: %u mac_addr: " QDF_MAC_ADDR_FMT,
1991 		      ast_entry->type, ast_entry->peer_id, ast_entry->vdev_id,
1992 		      QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw));
1993 
1994 	ast_entry->callback = NULL;
1995 	ast_entry->cookie = NULL;
1996 
1997 	DP_STATS_INC(soc, ast.deleted, 1);
1998 	dp_peer_ast_hash_remove(soc, ast_entry);
1999 	dp_peer_ast_cleanup(soc, ast_entry);
2000 	qdf_mem_free(ast_entry);
2001 	soc->num_ast_entries--;
2002 }
2003 
2004 /*
2005  * dp_peer_unlink_ast_entry() - Free up the ast entry memory
2006  * @soc: SoC handle
2007  * @ast_entry: Address search entry
2008  * @peer: peer
2009  *
2010  * This API is used to remove/unlink AST entry from the peer list
2011  * and hash list.
2012  *
2013  * Return: None
2014  */
2015 void dp_peer_unlink_ast_entry(struct dp_soc *soc,
2016 			      struct dp_ast_entry *ast_entry,
2017 			      struct dp_peer *peer)
2018 {
2019 	if (!peer) {
2020 		dp_info_rl("NULL peer");
2021 		return;
2022 	}
2023 
2024 	if (ast_entry->peer_id == HTT_INVALID_PEER) {
2025 		dp_info_rl("Invalid peer id in AST entry mac addr:"QDF_MAC_ADDR_FMT" type:%d",
2026 			  QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw),
2027 			  ast_entry->type);
2028 		return;
2029 	}
2030 	/*
2031 	 * NOTE: Ensure that call to this API is done
2032 	 * after soc->ast_lock is taken
2033 	 */
2034 
2035 	qdf_assert_always(ast_entry->peer_id == peer->peer_id);
2036 	TAILQ_REMOVE(&peer->ast_entry_list, ast_entry, ase_list_elem);
2037 
2038 	if (ast_entry == peer->self_ast_entry)
2039 		peer->self_ast_entry = NULL;
2040 
2041 	/*
2042 	 * release the reference only if it is mapped
2043 	 * to ast_table
2044 	 */
2045 	if (ast_entry->is_mapped)
2046 		soc->ast_table[ast_entry->ast_idx] = NULL;
2047 
2048 	ast_entry->peer_id = HTT_INVALID_PEER;
2049 }
2050 
2051 /*
2052  * dp_peer_del_ast() - Delete and free AST entry
2053  * @soc: SoC handle
2054  * @ast_entry: AST entry of the node
2055  *
2056  * This function removes the AST entry from peer and soc tables
2057  * It assumes caller has taken the ast lock to protect the access to these
2058  * tables
2059  *
2060  * Return: None
2061  */
2062 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
2063 {
2064 	struct dp_peer *peer = NULL;
2065 
2066 	if (soc->ast_offload_support)
2067 		return;
2068 
2069 	if (!ast_entry) {
2070 		dp_info_rl("NULL AST entry");
2071 		return;
2072 	}
2073 
2074 	if (ast_entry->delete_in_progress) {
2075 		dp_info_rl("AST entry deletion in progress mac addr:"QDF_MAC_ADDR_FMT" type:%d",
2076 			  QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw),
2077 			  ast_entry->type);
2078 		return;
2079 	}
2080 
2081 	dp_peer_debug("call by %ps: ID: %u vid: %u mac_addr: " QDF_MAC_ADDR_FMT,
2082 		      (void *)_RET_IP_, ast_entry->peer_id, ast_entry->vdev_id,
2083 		      QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw));
2084 
2085 	ast_entry->delete_in_progress = true;
2086 
2087 	/* In teardown del ast is called after setting logical delete state
2088 	 * use __dp_peer_get_ref_by_id to get the reference irrespective of
2089 	 * state
2090 	 */
2091 	peer = __dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
2092 				       DP_MOD_ID_AST);
2093 
2094 	dp_peer_ast_send_wds_del(soc, ast_entry, peer);
2095 
2096 	/* Remove SELF and STATIC entries in teardown itself */
2097 	if (!ast_entry->next_hop)
2098 		dp_peer_unlink_ast_entry(soc, ast_entry, peer);
2099 
2100 	if (ast_entry->is_mapped)
2101 		soc->ast_table[ast_entry->ast_idx] = NULL;
2102 
2103 	/* if peer map v2 is enabled we are not freeing ast entry
2104 	 * here and it is supposed to be freed in unmap event (after
2105 	 * we receive delete confirmation from target)
2106 	 *
2107 	 * if peer_id is invalid we did not get the peer map event
2108 	 * for the peer free ast entry from here only in this case
2109 	 */
2110 	if (dp_peer_ast_free_in_unmap_supported(soc, ast_entry))
2111 		goto end;
2112 
2113 	/* for WDS secondary entry ast_entry->next_hop would be set so
2114 	 * unlinking has to be done explicitly here.
2115 	 * As this entry is not a mapped entry unmap notification from
2116 	 * FW will not come. Hence unlinkling is done right here.
2117 	 */
2118 
2119 	if (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC)
2120 		dp_peer_unlink_ast_entry(soc, ast_entry, peer);
2121 
2122 	dp_peer_free_ast_entry(soc, ast_entry);
2123 
2124 end:
2125 	if (peer)
2126 		dp_peer_unref_delete(peer, DP_MOD_ID_AST);
2127 }
2128 
2129 /*
2130  * dp_peer_update_ast() - Delete and free AST entry
2131  * @soc: SoC handle
2132  * @peer: peer to which ast node belongs
2133  * @ast_entry: AST entry of the node
2134  * @flags: wds or hmwds
2135  *
2136  * This function update the AST entry to the roamed peer and soc tables
2137  * It assumes caller has taken the ast lock to protect the access to these
2138  * tables
2139  *
2140  * Return: 0 if ast entry is updated successfully
2141  *         -1 failure
2142  */
2143 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
2144 		       struct dp_ast_entry *ast_entry, uint32_t flags)
2145 {
2146 	int ret = -1;
2147 	struct dp_peer *old_peer;
2148 
2149 	if (soc->ast_offload_support)
2150 		return QDF_STATUS_E_INVAL;
2151 
2152 	dp_peer_debug("%pK: ast_entry->type: %d pdevid: %u vdevid: %u flags: 0x%x mac_addr: " QDF_MAC_ADDR_FMT " peer_mac: " QDF_MAC_ADDR_FMT "\n",
2153 		      soc, ast_entry->type, peer->vdev->pdev->pdev_id,
2154 		      peer->vdev->vdev_id, flags,
2155 		      QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw),
2156 		      QDF_MAC_ADDR_REF(peer->mac_addr.raw));
2157 
2158 	/* Do not send AST update in below cases
2159 	 *  1) Ast entry delete has already triggered
2160 	 *  2) Peer delete is already triggered
2161 	 *  3) We did not get the HTT map for create event
2162 	 */
2163 	if (ast_entry->delete_in_progress ||
2164 	    !dp_peer_state_cmp(peer, DP_PEER_STATE_ACTIVE) ||
2165 	    !ast_entry->is_mapped)
2166 		return ret;
2167 
2168 	if ((ast_entry->type == CDP_TXRX_AST_TYPE_STATIC) ||
2169 	    (ast_entry->type == CDP_TXRX_AST_TYPE_SELF) ||
2170 	    (ast_entry->type == CDP_TXRX_AST_TYPE_STA_BSS) ||
2171 	    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
2172 		return 0;
2173 
2174 	/*
2175 	 * Avoids flood of WMI update messages sent to FW for same peer.
2176 	 */
2177 	if (qdf_unlikely(ast_entry->peer_id == peer->peer_id) &&
2178 	    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS) &&
2179 	    (ast_entry->vdev_id == peer->vdev->vdev_id) &&
2180 	    (ast_entry->is_active))
2181 		return 0;
2182 
2183 	old_peer = dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
2184 					 DP_MOD_ID_AST);
2185 	if (!old_peer)
2186 		return 0;
2187 
2188 	TAILQ_REMOVE(&old_peer->ast_entry_list, ast_entry, ase_list_elem);
2189 
2190 	dp_peer_unref_delete(old_peer, DP_MOD_ID_AST);
2191 
2192 	ast_entry->peer_id = peer->peer_id;
2193 	ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
2194 	ast_entry->pdev_id = peer->vdev->pdev->pdev_id;
2195 	ast_entry->vdev_id = peer->vdev->vdev_id;
2196 	ast_entry->is_active = TRUE;
2197 	TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem);
2198 
2199 	ret = dp_update_wds_entry_wrapper(soc,
2200 					  peer,
2201 					  ast_entry->mac_addr.raw,
2202 					  flags);
2203 
2204 	return ret;
2205 }
2206 
2207 /*
2208  * dp_peer_ast_get_pdev_id() - get pdev_id from the ast entry
2209  * @soc: SoC handle
2210  * @ast_entry: AST entry of the node
2211  *
2212  * This function gets the pdev_id from the ast entry.
2213  *
2214  * Return: (uint8_t) pdev_id
2215  */
2216 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
2217 				struct dp_ast_entry *ast_entry)
2218 {
2219 	return ast_entry->pdev_id;
2220 }
2221 
2222 /*
2223  * dp_peer_ast_get_next_hop() - get next_hop from the ast entry
2224  * @soc: SoC handle
2225  * @ast_entry: AST entry of the node
2226  *
2227  * This function gets the next hop from the ast entry.
2228  *
2229  * Return: (uint8_t) next_hop
2230  */
2231 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
2232 				struct dp_ast_entry *ast_entry)
2233 {
2234 	return ast_entry->next_hop;
2235 }
2236 
2237 /*
2238  * dp_peer_ast_set_type() - set type from the ast entry
2239  * @soc: SoC handle
2240  * @ast_entry: AST entry of the node
2241  *
2242  * This function sets the type in the ast entry.
2243  *
2244  * Return:
2245  */
2246 void dp_peer_ast_set_type(struct dp_soc *soc,
2247 				struct dp_ast_entry *ast_entry,
2248 				enum cdp_txrx_ast_entry_type type)
2249 {
2250 	ast_entry->type = type;
2251 }
2252 
2253 void dp_peer_ast_send_wds_del(struct dp_soc *soc,
2254 			      struct dp_ast_entry *ast_entry,
2255 			      struct dp_peer *peer)
2256 {
2257 	bool delete_in_fw = false;
2258 
2259 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_TRACE,
2260 		  "%s: ast_entry->type: %d pdevid: %u vdev: %u mac_addr: "QDF_MAC_ADDR_FMT" next_hop: %u peer_id: %uM\n",
2261 		  __func__, ast_entry->type, ast_entry->pdev_id,
2262 		  ast_entry->vdev_id,
2263 		  QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw),
2264 		  ast_entry->next_hop, ast_entry->peer_id);
2265 
2266 	/*
2267 	 * If peer state is logical delete, the peer is about to get
2268 	 * teared down with a peer delete command to firmware,
2269 	 * which will cleanup all the wds ast entries.
2270 	 * So, no need to send explicit wds ast delete to firmware.
2271 	 */
2272 	if (ast_entry->next_hop) {
2273 		if (peer && dp_peer_state_cmp(peer,
2274 					      DP_PEER_STATE_LOGICAL_DELETE))
2275 			delete_in_fw = false;
2276 		else
2277 			delete_in_fw = true;
2278 
2279 		dp_del_wds_entry_wrapper(soc,
2280 					 ast_entry->vdev_id,
2281 					 ast_entry->mac_addr.raw,
2282 					 ast_entry->type,
2283 					 delete_in_fw);
2284 	}
2285 }
2286 #else
2287 void dp_peer_free_ast_entry(struct dp_soc *soc,
2288 			    struct dp_ast_entry *ast_entry)
2289 {
2290 }
2291 
2292 void dp_peer_unlink_ast_entry(struct dp_soc *soc,
2293 			      struct dp_ast_entry *ast_entry,
2294 			      struct dp_peer *peer)
2295 {
2296 }
2297 
2298 void dp_peer_ast_hash_remove(struct dp_soc *soc,
2299 			     struct dp_ast_entry *ase)
2300 {
2301 }
2302 
2303 struct dp_ast_entry *dp_peer_ast_hash_find_by_vdevid(struct dp_soc *soc,
2304 						     uint8_t *ast_mac_addr,
2305 						     uint8_t vdev_id)
2306 {
2307 	return NULL;
2308 }
2309 
2310 QDF_STATUS dp_peer_add_ast(struct dp_soc *soc,
2311 			   struct dp_peer *peer,
2312 			   uint8_t *mac_addr,
2313 			   enum cdp_txrx_ast_entry_type type,
2314 			   uint32_t flags)
2315 {
2316 	return QDF_STATUS_E_FAILURE;
2317 }
2318 
2319 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
2320 {
2321 }
2322 
2323 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
2324 			struct dp_ast_entry *ast_entry, uint32_t flags)
2325 {
2326 	return 1;
2327 }
2328 
2329 struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
2330 					       uint8_t *ast_mac_addr)
2331 {
2332 	return NULL;
2333 }
2334 
2335 static inline
2336 QDF_STATUS dp_peer_host_add_map_ast(struct dp_soc *soc, uint16_t peer_id,
2337 				    uint8_t *mac_addr, uint16_t hw_peer_id,
2338 				    uint8_t vdev_id, uint16_t ast_hash,
2339 				    uint8_t is_wds)
2340 {
2341 	return QDF_STATUS_SUCCESS;
2342 }
2343 
2344 struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
2345 						     uint8_t *ast_mac_addr,
2346 						     uint8_t pdev_id)
2347 {
2348 	return NULL;
2349 }
2350 
2351 QDF_STATUS dp_peer_ast_hash_attach(struct dp_soc *soc)
2352 {
2353 	return QDF_STATUS_SUCCESS;
2354 }
2355 
2356 static inline QDF_STATUS dp_peer_map_ast(struct dp_soc *soc,
2357 					 struct dp_peer *peer,
2358 					 uint8_t *mac_addr,
2359 					 uint16_t hw_peer_id,
2360 					 uint8_t vdev_id,
2361 					 uint16_t ast_hash,
2362 					 uint8_t is_wds)
2363 {
2364 	return QDF_STATUS_SUCCESS;
2365 }
2366 
2367 void dp_peer_ast_hash_detach(struct dp_soc *soc)
2368 {
2369 }
2370 
2371 void dp_peer_ast_set_type(struct dp_soc *soc,
2372 				struct dp_ast_entry *ast_entry,
2373 				enum cdp_txrx_ast_entry_type type)
2374 {
2375 }
2376 
2377 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
2378 				struct dp_ast_entry *ast_entry)
2379 {
2380 	return 0xff;
2381 }
2382 
2383 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
2384 				 struct dp_ast_entry *ast_entry)
2385 {
2386 	return 0xff;
2387 }
2388 
2389 void dp_peer_ast_send_wds_del(struct dp_soc *soc,
2390 			      struct dp_ast_entry *ast_entry,
2391 			      struct dp_peer *peer)
2392 {
2393 }
2394 #endif
2395 
2396 #ifdef WLAN_FEATURE_MULTI_AST_DEL
2397 void dp_peer_ast_send_multi_wds_del(
2398 		struct dp_soc *soc, uint8_t vdev_id,
2399 		struct peer_del_multi_wds_entries *wds_list)
2400 {
2401 	struct cdp_soc_t *cdp_soc = &soc->cdp_soc;
2402 
2403 	if (cdp_soc && cdp_soc->ol_ops &&
2404 	    cdp_soc->ol_ops->peer_del_multi_wds_entry)
2405 		cdp_soc->ol_ops->peer_del_multi_wds_entry(soc->ctrl_psoc,
2406 							  vdev_id, wds_list);
2407 }
2408 #endif
2409 
2410 #ifdef FEATURE_WDS
2411 /**
2412  * dp_peer_ast_free_wds_entries() - Free wds ast entries associated with peer
2413  * @soc: soc handle
2414  * @peer: peer handle
2415  *
2416  * Free all the wds ast entries associated with peer
2417  *
2418  * Return: Number of wds ast entries freed
2419  */
2420 static uint32_t dp_peer_ast_free_wds_entries(struct dp_soc *soc,
2421 					     struct dp_peer *peer)
2422 {
2423 	TAILQ_HEAD(, dp_ast_entry) ast_local_list = {0};
2424 	struct dp_ast_entry *ast_entry, *temp_ast_entry;
2425 	uint32_t num_ast = 0;
2426 
2427 	TAILQ_INIT(&ast_local_list);
2428 	qdf_spin_lock_bh(&soc->ast_lock);
2429 
2430 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry) {
2431 		if (ast_entry->next_hop)
2432 			num_ast++;
2433 
2434 		if (ast_entry->is_mapped)
2435 			soc->ast_table[ast_entry->ast_idx] = NULL;
2436 
2437 		dp_peer_unlink_ast_entry(soc, ast_entry, peer);
2438 		DP_STATS_INC(soc, ast.deleted, 1);
2439 		dp_peer_ast_hash_remove(soc, ast_entry);
2440 		TAILQ_INSERT_TAIL(&ast_local_list, ast_entry,
2441 				  ase_list_elem);
2442 		soc->num_ast_entries--;
2443 	}
2444 
2445 	qdf_spin_unlock_bh(&soc->ast_lock);
2446 
2447 	TAILQ_FOREACH_SAFE(ast_entry, &ast_local_list, ase_list_elem,
2448 			   temp_ast_entry) {
2449 		if (ast_entry->callback)
2450 			ast_entry->callback(soc->ctrl_psoc,
2451 					    dp_soc_to_cdp_soc(soc),
2452 					    ast_entry->cookie,
2453 					    CDP_TXRX_AST_DELETED);
2454 
2455 		qdf_mem_free(ast_entry);
2456 	}
2457 
2458 	return num_ast;
2459 }
2460 /**
2461  * dp_peer_clean_wds_entries() - Clean wds ast entries and compare
2462  * @soc: soc handle
2463  * @peer: peer handle
2464  * @free_wds_count - number of wds entries freed by FW with peer delete
2465  *
2466  * Free all the wds ast entries associated with peer and compare with
2467  * the value received from firmware
2468  *
2469  * Return: Number of wds ast entries freed
2470  */
2471 static void
2472 dp_peer_clean_wds_entries(struct dp_soc *soc, struct dp_peer *peer,
2473 			  uint32_t free_wds_count)
2474 {
2475 	uint32_t wds_deleted = 0;
2476 
2477 	if (soc->ast_offload_support && !soc->host_ast_db_enable)
2478 		return;
2479 
2480 	wds_deleted = dp_peer_ast_free_wds_entries(soc, peer);
2481 	if ((DP_PEER_WDS_COUNT_INVALID != free_wds_count) &&
2482 	    (free_wds_count != wds_deleted)) {
2483 		DP_STATS_INC(soc, ast.ast_mismatch, 1);
2484 		dp_alert("For peer %pK (mac: "QDF_MAC_ADDR_FMT")number of wds entries deleted by fw = %d during peer delete is not same as the numbers deleted by host = %d",
2485 			 peer, peer->mac_addr.raw, free_wds_count,
2486 			 wds_deleted);
2487 	}
2488 }
2489 
2490 #else
2491 static void
2492 dp_peer_clean_wds_entries(struct dp_soc *soc, struct dp_peer *peer,
2493 			  uint32_t free_wds_count)
2494 {
2495 	struct dp_ast_entry *ast_entry, *temp_ast_entry;
2496 
2497 	qdf_spin_lock_bh(&soc->ast_lock);
2498 
2499 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry) {
2500 		dp_peer_unlink_ast_entry(soc, ast_entry, peer);
2501 
2502 		if (ast_entry->is_mapped)
2503 			soc->ast_table[ast_entry->ast_idx] = NULL;
2504 
2505 		dp_peer_free_ast_entry(soc, ast_entry);
2506 	}
2507 
2508 	peer->self_ast_entry = NULL;
2509 	qdf_spin_unlock_bh(&soc->ast_lock);
2510 }
2511 #endif
2512 
2513 /**
2514  * dp_peer_ast_free_entry_by_mac() - find ast entry by MAC address and delete
2515  * @soc: soc handle
2516  * @peer: peer handle
2517  * @vdev_id: vdev_id
2518  * @mac_addr: mac address of the AST entry to searc and delete
2519  *
2520  * find the ast entry from the peer list using the mac address and free
2521  * the entry.
2522  *
2523  * Return: SUCCESS or NOENT
2524  */
2525 static int dp_peer_ast_free_entry_by_mac(struct dp_soc *soc,
2526 					 struct dp_peer *peer,
2527 					 uint8_t vdev_id,
2528 					 uint8_t *mac_addr)
2529 {
2530 	struct dp_ast_entry *ast_entry;
2531 	void *cookie = NULL;
2532 	txrx_ast_free_cb cb = NULL;
2533 
2534 	/*
2535 	 * release the reference only if it is mapped
2536 	 * to ast_table
2537 	 */
2538 
2539 	qdf_spin_lock_bh(&soc->ast_lock);
2540 
2541 	ast_entry = dp_peer_ast_hash_find_by_vdevid(soc, mac_addr, vdev_id);
2542 	if (!ast_entry) {
2543 		qdf_spin_unlock_bh(&soc->ast_lock);
2544 		return QDF_STATUS_E_NOENT;
2545 	} else if (ast_entry->is_mapped) {
2546 		soc->ast_table[ast_entry->ast_idx] = NULL;
2547 	}
2548 
2549 	cb = ast_entry->callback;
2550 	cookie = ast_entry->cookie;
2551 
2552 
2553 	dp_peer_unlink_ast_entry(soc, ast_entry, peer);
2554 
2555 	dp_peer_free_ast_entry(soc, ast_entry);
2556 
2557 	qdf_spin_unlock_bh(&soc->ast_lock);
2558 
2559 	if (cb) {
2560 		cb(soc->ctrl_psoc,
2561 		   dp_soc_to_cdp_soc(soc),
2562 		   cookie,
2563 		   CDP_TXRX_AST_DELETED);
2564 	}
2565 
2566 	return QDF_STATUS_SUCCESS;
2567 }
2568 
2569 void dp_peer_find_hash_erase(struct dp_soc *soc)
2570 {
2571 	int i;
2572 
2573 	/*
2574 	 * Not really necessary to take peer_ref_mutex lock - by this point,
2575 	 * it's known that the soc is no longer in use.
2576 	 */
2577 	for (i = 0; i <= soc->peer_hash.mask; i++) {
2578 		if (!TAILQ_EMPTY(&soc->peer_hash.bins[i])) {
2579 			struct dp_peer *peer, *peer_next;
2580 
2581 			/*
2582 			 * TAILQ_FOREACH_SAFE must be used here to avoid any
2583 			 * memory access violation after peer is freed
2584 			 */
2585 			TAILQ_FOREACH_SAFE(peer, &soc->peer_hash.bins[i],
2586 				hash_list_elem, peer_next) {
2587 				/*
2588 				 * Don't remove the peer from the hash table -
2589 				 * that would modify the list we are currently
2590 				 * traversing, and it's not necessary anyway.
2591 				 */
2592 				/*
2593 				 * Artificially adjust the peer's ref count to
2594 				 * 1, so it will get deleted by
2595 				 * dp_peer_unref_delete.
2596 				 */
2597 				/* set to zero */
2598 				qdf_atomic_init(&peer->ref_cnt);
2599 				for (i = 0; i < DP_MOD_ID_MAX; i++)
2600 					qdf_atomic_init(&peer->mod_refs[i]);
2601 				/* incr to one */
2602 				qdf_atomic_inc(&peer->ref_cnt);
2603 				qdf_atomic_inc(&peer->mod_refs
2604 						[DP_MOD_ID_CONFIG]);
2605 				dp_peer_unref_delete(peer,
2606 						     DP_MOD_ID_CONFIG);
2607 			}
2608 		}
2609 	}
2610 }
2611 
2612 void dp_peer_ast_table_detach(struct dp_soc *soc)
2613 {
2614 	if (soc->ast_table) {
2615 		qdf_mem_free(soc->ast_table);
2616 		soc->ast_table = NULL;
2617 	}
2618 }
2619 
2620 /*
2621  * dp_peer_find_map_detach() - cleanup memory for peer_id_to_obj_map
2622  * @soc: soc handle
2623  *
2624  * return: none
2625  */
2626 void dp_peer_find_map_detach(struct dp_soc *soc)
2627 {
2628 	if (soc->peer_id_to_obj_map) {
2629 		qdf_mem_free(soc->peer_id_to_obj_map);
2630 		soc->peer_id_to_obj_map = NULL;
2631 		qdf_spinlock_destroy(&soc->peer_map_lock);
2632 	}
2633 }
2634 
2635 #ifndef AST_OFFLOAD_ENABLE
2636 QDF_STATUS dp_peer_find_attach(struct dp_soc *soc)
2637 {
2638 	QDF_STATUS status;
2639 
2640 	status = dp_peer_find_map_attach(soc);
2641 	if (!QDF_IS_STATUS_SUCCESS(status))
2642 		return status;
2643 
2644 	status = dp_peer_find_hash_attach(soc);
2645 	if (!QDF_IS_STATUS_SUCCESS(status))
2646 		goto map_detach;
2647 
2648 	status = dp_peer_ast_table_attach(soc);
2649 	if (!QDF_IS_STATUS_SUCCESS(status))
2650 		goto hash_detach;
2651 
2652 	status = dp_peer_ast_hash_attach(soc);
2653 	if (!QDF_IS_STATUS_SUCCESS(status))
2654 		goto ast_table_detach;
2655 
2656 	status = dp_peer_mec_hash_attach(soc);
2657 	if (QDF_IS_STATUS_SUCCESS(status)) {
2658 		dp_soc_wds_attach(soc);
2659 		return status;
2660 	}
2661 
2662 	dp_peer_ast_hash_detach(soc);
2663 ast_table_detach:
2664 	dp_peer_ast_table_detach(soc);
2665 hash_detach:
2666 	dp_peer_find_hash_detach(soc);
2667 map_detach:
2668 	dp_peer_find_map_detach(soc);
2669 
2670 	return status;
2671 }
2672 #else
2673 QDF_STATUS dp_peer_find_attach(struct dp_soc *soc)
2674 {
2675 	QDF_STATUS status;
2676 
2677 	status = dp_peer_find_map_attach(soc);
2678 	if (!QDF_IS_STATUS_SUCCESS(status))
2679 		return status;
2680 
2681 	status = dp_peer_find_hash_attach(soc);
2682 	if (!QDF_IS_STATUS_SUCCESS(status))
2683 		goto map_detach;
2684 
2685 	return status;
2686 map_detach:
2687 	dp_peer_find_map_detach(soc);
2688 
2689 	return status;
2690 }
2691 #endif
2692 
2693 #ifdef IPA_OFFLOAD
2694 /*
2695  * dp_peer_update_tid_stats_from_reo() - update rx pkt and byte count from reo
2696  * @soc - soc handle
2697  * @cb_ctxt - combination of peer_id and tid
2698  * @reo_status - reo status
2699  *
2700  * return: void
2701  */
2702 void dp_peer_update_tid_stats_from_reo(struct dp_soc *soc, void *cb_ctxt,
2703 				       union hal_reo_status *reo_status)
2704 {
2705 	struct dp_peer *peer = NULL;
2706 	struct dp_rx_tid *rx_tid = NULL;
2707 	unsigned long comb_peer_id_tid;
2708 	struct hal_reo_queue_status *queue_status = &reo_status->queue_status;
2709 	uint16_t tid;
2710 	uint16_t peer_id;
2711 
2712 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
2713 		dp_err("REO stats failure %d\n",
2714 		       queue_status->header.status);
2715 		return;
2716 	}
2717 	comb_peer_id_tid = (unsigned long)cb_ctxt;
2718 	tid = DP_PEER_GET_REO_STATS_TID(comb_peer_id_tid);
2719 	peer_id = DP_PEER_GET_REO_STATS_PEER_ID(comb_peer_id_tid);
2720 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_GENERIC_STATS);
2721 	if (!peer)
2722 		return;
2723 	rx_tid  = &peer->rx_tid[tid];
2724 
2725 	if (!rx_tid) {
2726 		dp_peer_unref_delete(peer, DP_MOD_ID_GENERIC_STATS);
2727 		return;
2728 	}
2729 
2730 	rx_tid->rx_msdu_cnt.bytes += queue_status->total_cnt;
2731 	rx_tid->rx_msdu_cnt.num += queue_status->msdu_frms_cnt;
2732 	dp_peer_unref_delete(peer, DP_MOD_ID_GENERIC_STATS);
2733 }
2734 
2735 qdf_export_symbol(dp_peer_update_tid_stats_from_reo);
2736 #endif
2737 
2738 void dp_rx_tid_stats_cb(struct dp_soc *soc, void *cb_ctxt,
2739 	union hal_reo_status *reo_status)
2740 {
2741 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
2742 	struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
2743 
2744 	if (queue_status->header.status == HAL_REO_CMD_DRAIN)
2745 		return;
2746 
2747 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
2748 		DP_PRINT_STATS("REO stats failure %d for TID %d\n",
2749 			       queue_status->header.status, rx_tid->tid);
2750 		return;
2751 	}
2752 
2753 	DP_PRINT_STATS("REO queue stats (TID: %d):\n"
2754 		       "ssn: %d\n"
2755 		       "curr_idx  : %d\n"
2756 		       "pn_31_0   : %08x\n"
2757 		       "pn_63_32  : %08x\n"
2758 		       "pn_95_64  : %08x\n"
2759 		       "pn_127_96 : %08x\n"
2760 		       "last_rx_enq_tstamp : %08x\n"
2761 		       "last_rx_deq_tstamp : %08x\n"
2762 		       "rx_bitmap_31_0     : %08x\n"
2763 		       "rx_bitmap_63_32    : %08x\n"
2764 		       "rx_bitmap_95_64    : %08x\n"
2765 		       "rx_bitmap_127_96   : %08x\n"
2766 		       "rx_bitmap_159_128  : %08x\n"
2767 		       "rx_bitmap_191_160  : %08x\n"
2768 		       "rx_bitmap_223_192  : %08x\n"
2769 		       "rx_bitmap_255_224  : %08x\n",
2770 		       rx_tid->tid,
2771 		       queue_status->ssn, queue_status->curr_idx,
2772 		       queue_status->pn_31_0, queue_status->pn_63_32,
2773 		       queue_status->pn_95_64, queue_status->pn_127_96,
2774 		       queue_status->last_rx_enq_tstamp,
2775 		       queue_status->last_rx_deq_tstamp,
2776 		       queue_status->rx_bitmap_31_0,
2777 		       queue_status->rx_bitmap_63_32,
2778 		       queue_status->rx_bitmap_95_64,
2779 		       queue_status->rx_bitmap_127_96,
2780 		       queue_status->rx_bitmap_159_128,
2781 		       queue_status->rx_bitmap_191_160,
2782 		       queue_status->rx_bitmap_223_192,
2783 		       queue_status->rx_bitmap_255_224);
2784 
2785 	DP_PRINT_STATS(
2786 		       "curr_mpdu_cnt      : %d\n"
2787 		       "curr_msdu_cnt      : %d\n"
2788 		       "fwd_timeout_cnt    : %d\n"
2789 		       "fwd_bar_cnt        : %d\n"
2790 		       "dup_cnt            : %d\n"
2791 		       "frms_in_order_cnt  : %d\n"
2792 		       "bar_rcvd_cnt       : %d\n"
2793 		       "mpdu_frms_cnt      : %d\n"
2794 		       "msdu_frms_cnt      : %d\n"
2795 		       "total_byte_cnt     : %d\n"
2796 		       "late_recv_mpdu_cnt : %d\n"
2797 		       "win_jump_2k        : %d\n"
2798 		       "hole_cnt           : %d\n",
2799 		       queue_status->curr_mpdu_cnt,
2800 		       queue_status->curr_msdu_cnt,
2801 		       queue_status->fwd_timeout_cnt,
2802 		       queue_status->fwd_bar_cnt,
2803 		       queue_status->dup_cnt,
2804 		       queue_status->frms_in_order_cnt,
2805 		       queue_status->bar_rcvd_cnt,
2806 		       queue_status->mpdu_frms_cnt,
2807 		       queue_status->msdu_frms_cnt,
2808 		       queue_status->total_cnt,
2809 		       queue_status->late_recv_mpdu_cnt,
2810 		       queue_status->win_jump_2k,
2811 		       queue_status->hole_cnt);
2812 
2813 	DP_PRINT_STATS("Addba Req          : %d\n"
2814 			"Addba Resp         : %d\n"
2815 			"Addba Resp success : %d\n"
2816 			"Addba Resp failed  : %d\n"
2817 			"Delba Req received : %d\n"
2818 			"Delba Tx success   : %d\n"
2819 			"Delba Tx Fail      : %d\n"
2820 			"BA window size     : %d\n"
2821 			"Pn size            : %d\n",
2822 			rx_tid->num_of_addba_req,
2823 			rx_tid->num_of_addba_resp,
2824 			rx_tid->num_addba_rsp_success,
2825 			rx_tid->num_addba_rsp_failed,
2826 			rx_tid->num_of_delba_req,
2827 			rx_tid->delba_tx_success_cnt,
2828 			rx_tid->delba_tx_fail_cnt,
2829 			rx_tid->ba_win_size,
2830 			rx_tid->pn_size);
2831 }
2832 
2833 #ifdef REO_SHARED_QREF_TABLE_EN
2834 void dp_peer_rx_reo_shared_qaddr_delete(struct dp_soc *soc,
2835 					struct dp_peer *peer)
2836 {
2837 	uint8_t tid;
2838 
2839 	if (peer->peer_id > soc->max_peer_id)
2840 		return;
2841 	if (IS_MLO_DP_LINK_PEER(peer))
2842 		return;
2843 	if (hal_reo_shared_qaddr_is_enable(soc->hal_soc)) {
2844 		for (tid = 0; tid < DP_MAX_TIDS; tid++)
2845 			hal_reo_shared_qaddr_write(soc->hal_soc,
2846 						   peer->peer_id, tid, 0);
2847 	}
2848 }
2849 #endif
2850 
2851 /*
2852  * dp_peer_find_add_id() - map peer_id with peer
2853  * @soc: soc handle
2854  * @peer_mac_addr: peer mac address
2855  * @peer_id: peer id to be mapped
2856  * @hw_peer_id: HW ast index
2857  * @vdev_id: vdev_id
2858  * @peer_type: peer type (link or MLD)
2859  *
2860  * return: peer in success
2861  *         NULL in failure
2862  */
2863 static inline struct dp_peer *dp_peer_find_add_id(struct dp_soc *soc,
2864 	uint8_t *peer_mac_addr, uint16_t peer_id, uint16_t hw_peer_id,
2865 	uint8_t vdev_id, enum cdp_peer_type peer_type)
2866 {
2867 	struct dp_peer *peer;
2868 	struct cdp_peer_info peer_info = { 0 };
2869 
2870 	QDF_ASSERT(peer_id <= soc->max_peer_id);
2871 	/* check if there's already a peer object with this MAC address */
2872 	DP_PEER_INFO_PARAMS_INIT(&peer_info, vdev_id, peer_mac_addr,
2873 				 false, peer_type);
2874 	peer = dp_peer_hash_find_wrapper(soc, &peer_info, DP_MOD_ID_CONFIG);
2875 	dp_peer_err("%pK: peer %pK ID %d vid %d mac " QDF_MAC_ADDR_FMT,
2876 		    soc, peer, peer_id, vdev_id,
2877 		    QDF_MAC_ADDR_REF(peer_mac_addr));
2878 
2879 	if (peer) {
2880 		/* peer's ref count was already incremented by
2881 		 * peer_find_hash_find
2882 		 */
2883 		dp_peer_info("%pK: ref_cnt: %d", soc,
2884 			     qdf_atomic_read(&peer->ref_cnt));
2885 
2886 		/*
2887 		 * if peer is in logical delete CP triggered delete before map
2888 		 * is received ignore this event
2889 		 */
2890 		if (dp_peer_state_cmp(peer, DP_PEER_STATE_LOGICAL_DELETE)) {
2891 			dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
2892 			dp_alert("Peer %pK["QDF_MAC_ADDR_FMT"] logical delete state vid %d",
2893 				 peer, QDF_MAC_ADDR_REF(peer_mac_addr),
2894 				 vdev_id);
2895 			return NULL;
2896 		}
2897 
2898 		if (peer->peer_id == HTT_INVALID_PEER) {
2899 			if (!IS_MLO_DP_MLD_PEER(peer))
2900 				dp_monitor_peer_tid_peer_id_update(soc, peer,
2901 								   peer_id);
2902 		} else {
2903 			dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
2904 			QDF_ASSERT(0);
2905 			return NULL;
2906 		}
2907 		dp_peer_find_id_to_obj_add(soc, peer, peer_id);
2908 		if (soc->arch_ops.dp_partner_chips_map)
2909 			soc->arch_ops.dp_partner_chips_map(soc, peer, peer_id);
2910 
2911 		dp_peer_update_state(soc, peer, DP_PEER_STATE_ACTIVE);
2912 		return peer;
2913 	}
2914 
2915 	return NULL;
2916 }
2917 
2918 #ifdef WLAN_FEATURE_11BE_MLO
2919 #ifdef DP_USE_REDUCED_PEER_ID_FIELD_WIDTH
2920 static inline uint16_t dp_gen_ml_peer_id(struct dp_soc *soc,
2921 					 uint16_t peer_id)
2922 {
2923 	return ((peer_id & soc->peer_id_mask) | (1 << soc->peer_id_shift));
2924 }
2925 #else
2926 static inline uint16_t dp_gen_ml_peer_id(struct dp_soc *soc,
2927 					 uint16_t peer_id)
2928 {
2929 	return (peer_id | (1 << HTT_RX_PEER_META_DATA_V1_ML_PEER_VALID_S));
2930 }
2931 #endif
2932 
2933 QDF_STATUS
2934 dp_rx_mlo_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
2935 			   uint8_t *peer_mac_addr,
2936 			   struct dp_mlo_flow_override_info *mlo_flow_info,
2937 			   struct dp_mlo_link_info *mlo_link_info)
2938 {
2939 	struct dp_peer *peer = NULL;
2940 	uint16_t hw_peer_id = mlo_flow_info[0].ast_idx;
2941 	uint16_t ast_hash = mlo_flow_info[0].cache_set_num;
2942 	uint8_t vdev_id = 0;
2943 	uint8_t is_wds = 0;
2944 	int i;
2945 	uint16_t ml_peer_id = dp_gen_ml_peer_id(soc, peer_id);
2946 	enum cdp_txrx_ast_entry_type type = CDP_TXRX_AST_TYPE_STATIC;
2947 	QDF_STATUS err = QDF_STATUS_SUCCESS;
2948 	struct dp_soc *primary_soc;
2949 
2950 	dp_cfg_event_record_peer_map_unmap_evt(soc, DP_CFG_EVENT_MLO_PEER_MAP,
2951 					       NULL, peer_mac_addr,
2952 					       1, peer_id, ml_peer_id, 0,
2953 					       vdev_id);
2954 
2955 	dp_info("mlo_peer_map_event (soc:%pK): peer_id %d ml_peer_id %d, peer_mac "QDF_MAC_ADDR_FMT,
2956 		soc, peer_id, ml_peer_id,
2957 		QDF_MAC_ADDR_REF(peer_mac_addr));
2958 
2959 	/* Get corresponding vdev ID for the peer based
2960 	 * on chip ID obtained from mlo peer_map event
2961 	 */
2962 	for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
2963 		if (mlo_link_info[i].peer_chip_id == dp_mlo_get_chip_id(soc)) {
2964 			vdev_id = mlo_link_info[i].vdev_id;
2965 			break;
2966 		}
2967 	}
2968 
2969 	peer = dp_peer_find_add_id(soc, peer_mac_addr, ml_peer_id,
2970 				   hw_peer_id, vdev_id, CDP_MLD_PEER_TYPE);
2971 	if (peer) {
2972 		if (wlan_op_mode_sta == peer->vdev->opmode &&
2973 		    qdf_mem_cmp(peer->mac_addr.raw,
2974 				peer->vdev->mld_mac_addr.raw,
2975 				QDF_MAC_ADDR_SIZE) != 0) {
2976 			dp_peer_info("%pK: STA vdev bss_peer!!!!", soc);
2977 			peer->bss_peer = 1;
2978 			if (peer->txrx_peer)
2979 				peer->txrx_peer->bss_peer = 1;
2980 		}
2981 
2982 		if (peer->vdev->opmode == wlan_op_mode_sta) {
2983 			peer->vdev->bss_ast_hash = ast_hash;
2984 			peer->vdev->bss_ast_idx = hw_peer_id;
2985 		}
2986 
2987 		/* Add ast entry incase self ast entry is
2988 		 * deleted due to DP CP sync issue
2989 		 *
2990 		 * self_ast_entry is modified in peer create
2991 		 * and peer unmap path which cannot run in
2992 		 * parllel with peer map, no lock need before
2993 		 * referring it
2994 		 */
2995 		if (!peer->self_ast_entry) {
2996 			dp_info("Add self ast from map "QDF_MAC_ADDR_FMT,
2997 				QDF_MAC_ADDR_REF(peer_mac_addr));
2998 			dp_peer_add_ast(soc, peer,
2999 					peer_mac_addr,
3000 					type, 0);
3001 		}
3002 		/* If peer setup and hence rx_tid setup got called
3003 		 * before htt peer map then Qref write to LUT did not
3004 		 * happen in rx_tid setup as peer_id was invalid.
3005 		 * So defer Qref write to peer map handler. Check if
3006 		 * rx_tid qdesc for tid 0 is already setup and perform
3007 		 * qref write to LUT for Tid 0 and 16.
3008 		 *
3009 		 * Peer map could be obtained on assoc link, hence
3010 		 * change to primary link's soc.
3011 		 */
3012 		primary_soc = peer->vdev->pdev->soc;
3013 		if (hal_reo_shared_qaddr_is_enable(primary_soc->hal_soc) &&
3014 		    peer->rx_tid[0].hw_qdesc_vaddr_unaligned) {
3015 			hal_reo_shared_qaddr_write(primary_soc->hal_soc,
3016 						   ml_peer_id,
3017 						   0,
3018 						   peer->rx_tid[0].hw_qdesc_paddr);
3019 			hal_reo_shared_qaddr_write(primary_soc->hal_soc,
3020 						   ml_peer_id,
3021 						   DP_NON_QOS_TID,
3022 						   peer->rx_tid[DP_NON_QOS_TID].hw_qdesc_paddr);
3023 		}
3024 	}
3025 
3026 	err = dp_peer_map_ast(soc, peer, peer_mac_addr, hw_peer_id,
3027 			      vdev_id, ast_hash, is_wds);
3028 
3029 	/*
3030 	 * If AST offload and host AST DB is enabled, populate AST entries on
3031 	 * host based on mlo peer map event from FW
3032 	 */
3033 	if (soc->ast_offload_support && soc->host_ast_db_enable) {
3034 		dp_peer_host_add_map_ast(soc, ml_peer_id, peer_mac_addr,
3035 					 hw_peer_id, vdev_id,
3036 					 ast_hash, is_wds);
3037 	}
3038 
3039 	return err;
3040 }
3041 #endif
3042 
3043 #ifdef DP_RX_UDP_OVER_PEER_ROAM
3044 void dp_rx_reset_roaming_peer(struct dp_soc *soc, uint8_t vdev_id,
3045 			      uint8_t *peer_mac_addr)
3046 {
3047 	struct dp_vdev *vdev = NULL;
3048 
3049 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_HTT);
3050 	if (vdev) {
3051 		if (qdf_mem_cmp(vdev->roaming_peer_mac.raw, peer_mac_addr,
3052 				QDF_MAC_ADDR_SIZE) == 0) {
3053 			vdev->roaming_peer_status =
3054 						WLAN_ROAM_PEER_AUTH_STATUS_NONE;
3055 			qdf_mem_zero(vdev->roaming_peer_mac.raw,
3056 				     QDF_MAC_ADDR_SIZE);
3057 		}
3058 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT);
3059 	}
3060 }
3061 #endif
3062 
3063 #ifdef WLAN_SUPPORT_PPEDS
3064 static void
3065 dp_tx_ppeds_cfg_astidx_cache_mapping(struct dp_soc *soc, struct dp_vdev *vdev,
3066 				     bool peer_map)
3067 {
3068 	if (soc->arch_ops.dp_tx_ppeds_cfg_astidx_cache_mapping)
3069 		soc->arch_ops.dp_tx_ppeds_cfg_astidx_cache_mapping(soc, vdev,
3070 								   peer_map);
3071 }
3072 #else
3073 static void
3074 dp_tx_ppeds_cfg_astidx_cache_mapping(struct dp_soc *soc, struct dp_vdev *vdev,
3075 				     bool peer_map)
3076 {
3077 }
3078 #endif
3079 
3080 /**
3081  * dp_rx_peer_map_handler() - handle peer map event from firmware
3082  * @soc_handle - generic soc handle
3083  * @peeri_id - peer_id from firmware
3084  * @hw_peer_id - ast index for this peer
3085  * @vdev_id - vdev ID
3086  * @peer_mac_addr - mac address of the peer
3087  * @ast_hash - ast hash value
3088  * @is_wds - flag to indicate peer map event for WDS ast entry
3089  *
3090  * associate the peer_id that firmware provided with peer entry
3091  * and update the ast table in the host with the hw_peer_id.
3092  *
3093  * Return: QDF_STATUS code
3094  */
3095 
3096 QDF_STATUS
3097 dp_rx_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
3098 		       uint16_t hw_peer_id, uint8_t vdev_id,
3099 		       uint8_t *peer_mac_addr, uint16_t ast_hash,
3100 		       uint8_t is_wds)
3101 {
3102 	struct dp_peer *peer = NULL;
3103 	struct dp_vdev *vdev = NULL;
3104 	enum cdp_txrx_ast_entry_type type = CDP_TXRX_AST_TYPE_STATIC;
3105 	QDF_STATUS err = QDF_STATUS_SUCCESS;
3106 
3107 	dp_cfg_event_record_peer_map_unmap_evt(soc, DP_CFG_EVENT_PEER_MAP,
3108 					       NULL, peer_mac_addr, 1, peer_id,
3109 					       0, 0, vdev_id);
3110 	dp_info("peer_map_event (soc:%pK): peer_id %d, hw_peer_id %d, peer_mac "QDF_MAC_ADDR_FMT", vdev_id %d",
3111 		soc, peer_id, hw_peer_id,
3112 		QDF_MAC_ADDR_REF(peer_mac_addr), vdev_id);
3113 
3114 	/* Peer map event for WDS ast entry get the peer from
3115 	 * obj map
3116 	 */
3117 	if (is_wds) {
3118 		if (!soc->ast_offload_support) {
3119 			peer = dp_peer_get_ref_by_id(soc, peer_id,
3120 						     DP_MOD_ID_HTT);
3121 
3122 			err = dp_peer_map_ast(soc, peer, peer_mac_addr,
3123 					      hw_peer_id,
3124 					      vdev_id, ast_hash, is_wds);
3125 			if (peer)
3126 				dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
3127 		}
3128 	} else {
3129 		/*
3130 		 * It's the responsibility of the CP and FW to ensure
3131 		 * that peer is created successfully. Ideally DP should
3132 		 * not hit the below condition for directly associated
3133 		 * peers.
3134 		 */
3135 		if ((!soc->ast_offload_support) && ((hw_peer_id < 0) ||
3136 		    (hw_peer_id >=
3137 		     wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx)))) {
3138 			dp_peer_err("%pK: invalid hw_peer_id: %d", soc, hw_peer_id);
3139 			qdf_assert_always(0);
3140 		}
3141 
3142 		peer = dp_peer_find_add_id(soc, peer_mac_addr, peer_id,
3143 					   hw_peer_id, vdev_id,
3144 					   CDP_LINK_PEER_TYPE);
3145 
3146 		if (peer) {
3147 			bool peer_map = true;
3148 
3149 			/* Updating ast_hash and ast_idx in peer level */
3150 			peer->ast_hash = ast_hash;
3151 			peer->ast_idx = hw_peer_id;
3152 			vdev = peer->vdev;
3153 			/* Only check for STA Vdev and peer is not for TDLS */
3154 			if (wlan_op_mode_sta == vdev->opmode &&
3155 			    !peer->is_tdls_peer) {
3156 				if (qdf_mem_cmp(peer->mac_addr.raw,
3157 						vdev->mac_addr.raw,
3158 						QDF_MAC_ADDR_SIZE) != 0) {
3159 					dp_info("%pK: STA vdev bss_peer", soc);
3160 					peer->bss_peer = 1;
3161 					if (peer->txrx_peer)
3162 						peer->txrx_peer->bss_peer = 1;
3163 				}
3164 
3165 				dp_info("bss ast_hash 0x%x, ast_index 0x%x",
3166 					ast_hash, hw_peer_id);
3167 				vdev->bss_ast_hash = ast_hash;
3168 				vdev->bss_ast_idx = hw_peer_id;
3169 
3170 				dp_tx_ppeds_cfg_astidx_cache_mapping(soc, vdev,
3171 								     peer_map);
3172 			}
3173 
3174 			/* Add ast entry incase self ast entry is
3175 			 * deleted due to DP CP sync issue
3176 			 *
3177 			 * self_ast_entry is modified in peer create
3178 			 * and peer unmap path which cannot run in
3179 			 * parllel with peer map, no lock need before
3180 			 * referring it
3181 			 */
3182 			if (!soc->ast_offload_support &&
3183 				!peer->self_ast_entry) {
3184 				dp_info("Add self ast from map "QDF_MAC_ADDR_FMT,
3185 					QDF_MAC_ADDR_REF(peer_mac_addr));
3186 				dp_peer_add_ast(soc, peer,
3187 						peer_mac_addr,
3188 						type, 0);
3189 			}
3190 
3191 			/* If peer setup and hence rx_tid setup got called
3192 			 * before htt peer map then Qref write to LUT did
3193 			 * not happen in rx_tid setup as peer_id was invalid.
3194 			 * So defer Qref write to peer map handler. Check if
3195 			 * rx_tid qdesc for tid 0 is already setup perform qref
3196 			 * write to LUT for Tid 0 and 16.
3197 			 */
3198 			if (hal_reo_shared_qaddr_is_enable(soc->hal_soc) &&
3199 			    peer->rx_tid[0].hw_qdesc_vaddr_unaligned &&
3200 			    !IS_MLO_DP_LINK_PEER(peer)) {
3201 				hal_reo_shared_qaddr_write(soc->hal_soc,
3202 							   peer_id,
3203 							   0,
3204 							   peer->rx_tid[0].hw_qdesc_paddr);
3205 				hal_reo_shared_qaddr_write(soc->hal_soc,
3206 							   peer_id,
3207 							   DP_NON_QOS_TID,
3208 							   peer->rx_tid[DP_NON_QOS_TID].hw_qdesc_paddr);
3209 			}
3210 		}
3211 
3212 		err = dp_peer_map_ast(soc, peer, peer_mac_addr, hw_peer_id,
3213 				      vdev_id, ast_hash, is_wds);
3214 	}
3215 
3216 	dp_rx_reset_roaming_peer(soc, vdev_id, peer_mac_addr);
3217 
3218 	/*
3219 	 * If AST offload and host AST DB is enabled, populate AST entries on
3220 	 * host based on peer map event from FW
3221 	 */
3222 	if (soc->ast_offload_support && soc->host_ast_db_enable) {
3223 		dp_peer_host_add_map_ast(soc, peer_id, peer_mac_addr,
3224 					 hw_peer_id, vdev_id,
3225 					 ast_hash, is_wds);
3226 	}
3227 
3228 	return err;
3229 }
3230 
3231 /**
3232  * dp_rx_peer_unmap_handler() - handle peer unmap event from firmware
3233  * @soc_handle - generic soc handle
3234  * @peeri_id - peer_id from firmware
3235  * @vdev_id - vdev ID
3236  * @mac_addr - mac address of the peer or wds entry
3237  * @is_wds - flag to indicate peer map event for WDS ast entry
3238  * @free_wds_count - number of wds entries freed by FW with peer delete
3239  *
3240  * Return: none
3241  */
3242 void
3243 dp_rx_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id,
3244 			 uint8_t vdev_id, uint8_t *mac_addr,
3245 			 uint8_t is_wds, uint32_t free_wds_count)
3246 {
3247 	struct dp_peer *peer;
3248 	struct dp_vdev *vdev = NULL;
3249 
3250 	/*
3251 	 * If FW AST offload is enabled and host AST DB is enabled,
3252 	 * the AST entries are created during peer map from FW.
3253 	 */
3254 	if (soc->ast_offload_support && is_wds) {
3255 		if (!soc->host_ast_db_enable)
3256 			return;
3257 	}
3258 
3259 	peer = __dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_HTT);
3260 
3261 	/*
3262 	 * Currently peer IDs are assigned for vdevs as well as peers.
3263 	 * If the peer ID is for a vdev, then the peer pointer stored
3264 	 * in peer_id_to_obj_map will be NULL.
3265 	 */
3266 	if (!peer) {
3267 		dp_err("Received unmap event for invalid peer_id %u",
3268 		       peer_id);
3269 		return;
3270 	}
3271 
3272 	/* If V2 Peer map messages are enabled AST entry has to be
3273 	 * freed here
3274 	 */
3275 	if (is_wds) {
3276 		if (!dp_peer_ast_free_entry_by_mac(soc, peer, vdev_id,
3277 						   mac_addr)) {
3278 			dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
3279 			return;
3280 		}
3281 
3282 		dp_alert("AST entry not found with peer %pK peer_id %u peer_mac "QDF_MAC_ADDR_FMT" mac_addr "QDF_MAC_ADDR_FMT" vdev_id %u next_hop %u",
3283 			  peer, peer->peer_id,
3284 			  QDF_MAC_ADDR_REF(peer->mac_addr.raw),
3285 			  QDF_MAC_ADDR_REF(mac_addr), vdev_id,
3286 			  is_wds);
3287 
3288 		dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
3289 		return;
3290 	}
3291 
3292 	dp_peer_clean_wds_entries(soc, peer, free_wds_count);
3293 
3294 	dp_cfg_event_record_peer_map_unmap_evt(soc, DP_CFG_EVENT_PEER_UNMAP,
3295 					       peer, mac_addr, 0, peer_id,
3296 					       0, 0, vdev_id);
3297 	dp_info("peer_unmap_event (soc:%pK) peer_id %d peer %pK",
3298 		soc, peer_id, peer);
3299 
3300 	/* Clear entries in Qref LUT */
3301 	/* TODO: Check if this is to be called from
3302 	 * dp_peer_delete for MLO case if there is race between
3303 	 * new peer id assignment and still not having received
3304 	 * peer unmap for MLD peer with same peer id.
3305 	 */
3306 	dp_peer_rx_reo_shared_qaddr_delete(soc, peer);
3307 
3308 	vdev = peer->vdev;
3309 
3310 	/* only if peer is in STA mode and not tdls peer */
3311 	if (wlan_op_mode_sta == vdev->opmode && !peer->is_tdls_peer) {
3312 		bool peer_map = false;
3313 
3314 		dp_tx_ppeds_cfg_astidx_cache_mapping(soc, vdev, peer_map);
3315 	}
3316 
3317 	dp_peer_find_id_to_obj_remove(soc, peer_id);
3318 
3319 	if (soc->arch_ops.dp_partner_chips_unmap)
3320 		soc->arch_ops.dp_partner_chips_unmap(soc, peer_id);
3321 
3322 	peer->peer_id = HTT_INVALID_PEER;
3323 
3324 	/*
3325 	 *	 Reset ast flow mapping table
3326 	 */
3327 	if (!soc->ast_offload_support)
3328 		dp_peer_reset_flowq_map(peer);
3329 
3330 	if (soc->cdp_soc.ol_ops->peer_unmap_event) {
3331 		soc->cdp_soc.ol_ops->peer_unmap_event(soc->ctrl_psoc,
3332 				peer_id, vdev_id, mac_addr);
3333 	}
3334 
3335 	dp_update_vdev_stats_on_peer_unmap(vdev, peer);
3336 
3337 	dp_peer_update_state(soc, peer, DP_PEER_STATE_INACTIVE);
3338 	dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
3339 	/*
3340 	 * Remove a reference to the peer.
3341 	 * If there are no more references, delete the peer object.
3342 	 */
3343 	dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
3344 }
3345 
3346 #ifdef WLAN_FEATURE_11BE_MLO
3347 void dp_rx_mlo_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id)
3348 {
3349 	uint16_t ml_peer_id = dp_gen_ml_peer_id(soc, peer_id);
3350 	uint8_t mac_addr[QDF_MAC_ADDR_SIZE] = {0};
3351 	uint8_t vdev_id = DP_VDEV_ALL;
3352 	uint8_t is_wds = 0;
3353 
3354 	dp_cfg_event_record_peer_map_unmap_evt(soc, DP_CFG_EVENT_MLO_PEER_UNMAP,
3355 					       NULL, mac_addr, 0, peer_id,
3356 					       0, 0, vdev_id);
3357 	dp_info("MLO peer_unmap_event (soc:%pK) peer_id %d",
3358 		soc, peer_id);
3359 
3360 	dp_rx_peer_unmap_handler(soc, ml_peer_id, vdev_id,
3361 				 mac_addr, is_wds,
3362 				 DP_PEER_WDS_COUNT_INVALID);
3363 }
3364 #endif
3365 
3366 #ifndef AST_OFFLOAD_ENABLE
3367 void
3368 dp_peer_find_detach(struct dp_soc *soc)
3369 {
3370 	dp_soc_wds_detach(soc);
3371 	dp_peer_find_map_detach(soc);
3372 	dp_peer_find_hash_detach(soc);
3373 	dp_peer_ast_hash_detach(soc);
3374 	dp_peer_ast_table_detach(soc);
3375 	dp_peer_mec_hash_detach(soc);
3376 }
3377 #else
3378 void
3379 dp_peer_find_detach(struct dp_soc *soc)
3380 {
3381 	dp_peer_find_map_detach(soc);
3382 	dp_peer_find_hash_detach(soc);
3383 }
3384 #endif
3385 
3386 static void dp_rx_tid_update_cb(struct dp_soc *soc, void *cb_ctxt,
3387 	union hal_reo_status *reo_status)
3388 {
3389 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
3390 
3391 	if ((reo_status->rx_queue_status.header.status !=
3392 		HAL_REO_CMD_SUCCESS) &&
3393 		(reo_status->rx_queue_status.header.status !=
3394 		HAL_REO_CMD_DRAIN)) {
3395 		/* Should not happen normally. Just print error for now */
3396 		dp_peer_err("%pK: Rx tid HW desc update failed(%d): tid %d",
3397 			    soc, reo_status->rx_queue_status.header.status,
3398 			    rx_tid->tid);
3399 	}
3400 }
3401 
3402 static bool dp_get_peer_vdev_roaming_in_progress(struct dp_peer *peer)
3403 {
3404 	struct ol_if_ops *ol_ops = NULL;
3405 	bool is_roaming = false;
3406 	uint8_t vdev_id = -1;
3407 	struct cdp_soc_t *soc;
3408 
3409 	if (!peer) {
3410 		dp_peer_info("Peer is NULL. No roaming possible");
3411 		return false;
3412 	}
3413 
3414 	soc = dp_soc_to_cdp_soc_t(peer->vdev->pdev->soc);
3415 	ol_ops = peer->vdev->pdev->soc->cdp_soc.ol_ops;
3416 
3417 	if (ol_ops && ol_ops->is_roam_inprogress) {
3418 		dp_get_vdevid(soc, peer->mac_addr.raw, &vdev_id);
3419 		is_roaming = ol_ops->is_roam_inprogress(vdev_id);
3420 	}
3421 
3422 	dp_peer_info("peer: " QDF_MAC_ADDR_FMT ", vdev_id: %d, is_roaming: %d",
3423 		     QDF_MAC_ADDR_REF(peer->mac_addr.raw), vdev_id, is_roaming);
3424 
3425 	return is_roaming;
3426 }
3427 
3428 #ifdef WLAN_FEATURE_11BE_MLO
3429 /**
3430  * dp_rx_tid_setup_allow() - check if rx_tid and reo queue desc
3431 			     setup is necessary
3432  * @peer: DP peer handle
3433  *
3434  * Return: true - allow, false - disallow
3435  */
3436 static inline
3437 bool dp_rx_tid_setup_allow(struct dp_peer *peer)
3438 {
3439 	if (IS_MLO_DP_LINK_PEER(peer) && !peer->first_link)
3440 		return false;
3441 
3442 	return true;
3443 }
3444 
3445 /**
3446  * dp_rx_tid_update_allow() - check if rx_tid update needed
3447  * @peer: DP peer handle
3448  *
3449  * Return: true - allow, false - disallow
3450  */
3451 static inline
3452 bool dp_rx_tid_update_allow(struct dp_peer *peer)
3453 {
3454 	/* not as expected for MLO connection link peer */
3455 	if (IS_MLO_DP_LINK_PEER(peer)) {
3456 		QDF_BUG(0);
3457 		return false;
3458 	}
3459 
3460 	return true;
3461 }
3462 #else
3463 static inline
3464 bool dp_rx_tid_setup_allow(struct dp_peer *peer)
3465 {
3466 	return true;
3467 }
3468 
3469 static inline
3470 bool dp_rx_tid_update_allow(struct dp_peer *peer)
3471 {
3472 	return true;
3473 }
3474 #endif
3475 
3476 QDF_STATUS dp_rx_tid_update_wifi3(struct dp_peer *peer, int tid, uint32_t
3477 					 ba_window_size, uint32_t start_seq,
3478 					 bool bar_update)
3479 {
3480 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
3481 	struct dp_soc *soc = peer->vdev->pdev->soc;
3482 	struct hal_reo_cmd_params params;
3483 
3484 	if (!dp_rx_tid_update_allow(peer)) {
3485 		dp_peer_err("skip tid update for peer:" QDF_MAC_ADDR_FMT,
3486 			    QDF_MAC_ADDR_REF(peer->mac_addr.raw));
3487 		return QDF_STATUS_E_FAILURE;
3488 	}
3489 
3490 	qdf_mem_zero(&params, sizeof(params));
3491 
3492 	params.std.need_status = 1;
3493 	params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
3494 	params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
3495 	params.u.upd_queue_params.update_ba_window_size = 1;
3496 	params.u.upd_queue_params.ba_window_size = ba_window_size;
3497 
3498 	if (start_seq < IEEE80211_SEQ_MAX) {
3499 		params.u.upd_queue_params.update_ssn = 1;
3500 		params.u.upd_queue_params.ssn = start_seq;
3501 	} else {
3502 	    dp_set_ssn_valid_flag(&params, 0);
3503 	}
3504 
3505 	if (dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
3506 			    dp_rx_tid_update_cb, rx_tid)) {
3507 		dp_err_log("failed to send reo cmd CMD_UPDATE_RX_REO_QUEUE");
3508 		DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
3509 	}
3510 
3511 	rx_tid->ba_win_size = ba_window_size;
3512 
3513 	if (dp_get_peer_vdev_roaming_in_progress(peer))
3514 		return QDF_STATUS_E_PERM;
3515 
3516 	if (!bar_update)
3517 		dp_peer_rx_reorder_queue_setup(soc, peer,
3518 					       tid, ba_window_size);
3519 
3520 	return QDF_STATUS_SUCCESS;
3521 }
3522 
3523 #ifdef WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY
3524 /*
3525  * dp_reo_desc_defer_free_enqueue() - enqueue REO QDESC to be freed into
3526  *                                    the deferred list
3527  * @soc: Datapath soc handle
3528  * @free_desc: REO DESC reference that needs to be freed
3529  *
3530  * Return: true if enqueued, else false
3531  */
3532 static bool dp_reo_desc_defer_free_enqueue(struct dp_soc *soc,
3533 					   struct reo_desc_list_node *freedesc)
3534 {
3535 	struct reo_desc_deferred_freelist_node *desc;
3536 
3537 	if (!qdf_atomic_read(&soc->cmn_init_done))
3538 		return false;
3539 
3540 	desc = qdf_mem_malloc(sizeof(*desc));
3541 	if (!desc)
3542 		return false;
3543 
3544 	desc->hw_qdesc_paddr = freedesc->rx_tid.hw_qdesc_paddr;
3545 	desc->hw_qdesc_alloc_size = freedesc->rx_tid.hw_qdesc_alloc_size;
3546 	desc->hw_qdesc_vaddr_unaligned =
3547 			freedesc->rx_tid.hw_qdesc_vaddr_unaligned;
3548 	desc->free_ts = qdf_get_system_timestamp();
3549 	DP_RX_REO_QDESC_DEFERRED_GET_MAC(desc, freedesc);
3550 
3551 	qdf_spin_lock_bh(&soc->reo_desc_deferred_freelist_lock);
3552 	if (!soc->reo_desc_deferred_freelist_init) {
3553 		qdf_mem_free(desc);
3554 		qdf_spin_unlock_bh(&soc->reo_desc_deferred_freelist_lock);
3555 		return false;
3556 	}
3557 	qdf_list_insert_back(&soc->reo_desc_deferred_freelist,
3558 			     (qdf_list_node_t *)desc);
3559 	qdf_spin_unlock_bh(&soc->reo_desc_deferred_freelist_lock);
3560 
3561 	return true;
3562 }
3563 
3564 /*
3565  * dp_reo_desc_defer_free() - free the REO QDESC in the deferred list
3566  *                            based on time threshold
3567  * @soc: Datapath soc handle
3568  * @free_desc: REO DESC reference that needs to be freed
3569  *
3570  * Return: true if enqueued, else false
3571  */
3572 static void dp_reo_desc_defer_free(struct dp_soc *soc)
3573 {
3574 	struct reo_desc_deferred_freelist_node *desc;
3575 	unsigned long curr_ts = qdf_get_system_timestamp();
3576 
3577 	qdf_spin_lock_bh(&soc->reo_desc_deferred_freelist_lock);
3578 
3579 	while ((qdf_list_peek_front(&soc->reo_desc_deferred_freelist,
3580 	       (qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) &&
3581 	       (curr_ts > (desc->free_ts + REO_DESC_DEFERRED_FREE_MS))) {
3582 		qdf_list_remove_front(&soc->reo_desc_deferred_freelist,
3583 				      (qdf_list_node_t **)&desc);
3584 
3585 		DP_RX_REO_QDESC_DEFERRED_FREE_EVT(desc);
3586 
3587 		qdf_mem_unmap_nbytes_single(soc->osdev,
3588 					    desc->hw_qdesc_paddr,
3589 					    QDF_DMA_BIDIRECTIONAL,
3590 					    desc->hw_qdesc_alloc_size);
3591 		qdf_mem_free(desc->hw_qdesc_vaddr_unaligned);
3592 		qdf_mem_free(desc);
3593 
3594 		curr_ts = qdf_get_system_timestamp();
3595 	}
3596 
3597 	qdf_spin_unlock_bh(&soc->reo_desc_deferred_freelist_lock);
3598 }
3599 #else
3600 static inline bool
3601 dp_reo_desc_defer_free_enqueue(struct dp_soc *soc,
3602 			       struct reo_desc_list_node *freedesc)
3603 {
3604 	return false;
3605 }
3606 
3607 static void dp_reo_desc_defer_free(struct dp_soc *soc)
3608 {
3609 }
3610 #endif /* !WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY */
3611 
3612 /*
3613  * dp_reo_desc_free() - Callback free reo descriptor memory after
3614  * HW cache flush
3615  *
3616  * @soc: DP SOC handle
3617  * @cb_ctxt: Callback context
3618  * @reo_status: REO command status
3619  */
3620 static void dp_reo_desc_free(struct dp_soc *soc, void *cb_ctxt,
3621 	union hal_reo_status *reo_status)
3622 {
3623 	struct reo_desc_list_node *freedesc =
3624 		(struct reo_desc_list_node *)cb_ctxt;
3625 	struct dp_rx_tid *rx_tid = &freedesc->rx_tid;
3626 	unsigned long curr_ts = qdf_get_system_timestamp();
3627 
3628 	if ((reo_status->fl_cache_status.header.status !=
3629 		HAL_REO_CMD_SUCCESS) &&
3630 		(reo_status->fl_cache_status.header.status !=
3631 		HAL_REO_CMD_DRAIN)) {
3632 		dp_peer_err("%pK: Rx tid HW desc flush failed(%d): tid %d",
3633 			    soc, reo_status->rx_queue_status.header.status,
3634 			    freedesc->rx_tid.tid);
3635 	}
3636 	dp_peer_info("%pK: %lu hw_qdesc_paddr: %pK, tid:%d", soc,
3637 		     curr_ts, (void *)(rx_tid->hw_qdesc_paddr),
3638 		     rx_tid->tid);
3639 
3640 	/* REO desc is enqueued to be freed at a later point
3641 	 * in time, just free the freedesc alone and return
3642 	 */
3643 	if (dp_reo_desc_defer_free_enqueue(soc, freedesc))
3644 		goto out;
3645 
3646 	DP_RX_REO_QDESC_FREE_EVT(freedesc);
3647 
3648 	hal_reo_shared_qaddr_cache_clear(soc->hal_soc);
3649 	qdf_mem_unmap_nbytes_single(soc->osdev,
3650 		rx_tid->hw_qdesc_paddr,
3651 		QDF_DMA_BIDIRECTIONAL,
3652 		rx_tid->hw_qdesc_alloc_size);
3653 	qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
3654 out:
3655 	qdf_mem_free(freedesc);
3656 }
3657 
3658 #if defined(CONFIG_WIFI_EMULATION_WIFI_3_0) && defined(BUILD_X86)
3659 /* Hawkeye emulation requires bus address to be >= 0x50000000 */
3660 static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
3661 {
3662 	if (dma_addr < 0x50000000)
3663 		return QDF_STATUS_E_FAILURE;
3664 	else
3665 		return QDF_STATUS_SUCCESS;
3666 }
3667 #else
3668 static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
3669 {
3670 	return QDF_STATUS_SUCCESS;
3671 }
3672 #endif
3673 
3674 /*
3675  * dp_rx_tid_setup_wifi3() – Setup receive TID state
3676  * @peer: Datapath peer handle
3677  * @tid: TID
3678  * @ba_window_size: BlockAck window size
3679  * @start_seq: Starting sequence number
3680  *
3681  * Return: QDF_STATUS code
3682  */
3683 QDF_STATUS dp_rx_tid_setup_wifi3(struct dp_peer *peer, int tid,
3684 				 uint32_t ba_window_size, uint32_t start_seq)
3685 {
3686 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
3687 	struct dp_vdev *vdev = peer->vdev;
3688 	struct dp_soc *soc = vdev->pdev->soc;
3689 	uint32_t hw_qdesc_size;
3690 	uint32_t hw_qdesc_align;
3691 	int hal_pn_type;
3692 	void *hw_qdesc_vaddr;
3693 	uint32_t alloc_tries = 0;
3694 	QDF_STATUS status = QDF_STATUS_SUCCESS;
3695 	struct dp_txrx_peer *txrx_peer;
3696 
3697 	if (!qdf_atomic_read(&peer->is_default_route_set))
3698 		return QDF_STATUS_E_FAILURE;
3699 
3700 	if (!dp_rx_tid_setup_allow(peer)) {
3701 		dp_peer_info("skip rx tid setup for peer" QDF_MAC_ADDR_FMT,
3702 			     QDF_MAC_ADDR_REF(peer->mac_addr.raw));
3703 		goto send_wmi_reo_cmd;
3704 	}
3705 
3706 	rx_tid->ba_win_size = ba_window_size;
3707 	if (rx_tid->hw_qdesc_vaddr_unaligned)
3708 		return dp_rx_tid_update_wifi3(peer, tid, ba_window_size,
3709 			start_seq, false);
3710 	rx_tid->delba_tx_status = 0;
3711 	rx_tid->ppdu_id_2k = 0;
3712 	rx_tid->num_of_addba_req = 0;
3713 	rx_tid->num_of_delba_req = 0;
3714 	rx_tid->num_of_addba_resp = 0;
3715 	rx_tid->num_addba_rsp_failed = 0;
3716 	rx_tid->num_addba_rsp_success = 0;
3717 	rx_tid->delba_tx_success_cnt = 0;
3718 	rx_tid->delba_tx_fail_cnt = 0;
3719 	rx_tid->statuscode = 0;
3720 
3721 	/* TODO: Allocating HW queue descriptors based on max BA window size
3722 	 * for all QOS TIDs so that same descriptor can be used later when
3723 	 * ADDBA request is received. This should be changed to allocate HW
3724 	 * queue descriptors based on BA window size being negotiated (0 for
3725 	 * non BA cases), and reallocate when BA window size changes and also
3726 	 * send WMI message to FW to change the REO queue descriptor in Rx
3727 	 * peer entry as part of dp_rx_tid_update.
3728 	 */
3729 	hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc,
3730 					       ba_window_size, tid);
3731 
3732 	hw_qdesc_align = hal_get_reo_qdesc_align(soc->hal_soc);
3733 	/* To avoid unnecessary extra allocation for alignment, try allocating
3734 	 * exact size and see if we already have aligned address.
3735 	 */
3736 	rx_tid->hw_qdesc_alloc_size = hw_qdesc_size;
3737 
3738 try_desc_alloc:
3739 	rx_tid->hw_qdesc_vaddr_unaligned =
3740 		qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size);
3741 
3742 	if (!rx_tid->hw_qdesc_vaddr_unaligned) {
3743 		dp_peer_err("%pK: Rx tid HW desc alloc failed: tid %d",
3744 			    soc, tid);
3745 		return QDF_STATUS_E_NOMEM;
3746 	}
3747 
3748 	if ((unsigned long)(rx_tid->hw_qdesc_vaddr_unaligned) %
3749 		hw_qdesc_align) {
3750 		/* Address allocated above is not aligned. Allocate extra
3751 		 * memory for alignment
3752 		 */
3753 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
3754 		rx_tid->hw_qdesc_vaddr_unaligned =
3755 			qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size +
3756 					hw_qdesc_align - 1);
3757 
3758 		if (!rx_tid->hw_qdesc_vaddr_unaligned) {
3759 			dp_peer_err("%pK: Rx tid HW desc alloc failed: tid %d",
3760 				    soc, tid);
3761 			return QDF_STATUS_E_NOMEM;
3762 		}
3763 
3764 		hw_qdesc_vaddr = (void *)qdf_align((unsigned long)
3765 			rx_tid->hw_qdesc_vaddr_unaligned,
3766 			hw_qdesc_align);
3767 
3768 		dp_peer_debug("%pK: Total Size %d Aligned Addr %pK",
3769 			      soc, rx_tid->hw_qdesc_alloc_size,
3770 			      hw_qdesc_vaddr);
3771 
3772 	} else {
3773 		hw_qdesc_vaddr = rx_tid->hw_qdesc_vaddr_unaligned;
3774 	}
3775 	rx_tid->hw_qdesc_vaddr_aligned = hw_qdesc_vaddr;
3776 
3777 	txrx_peer = dp_get_txrx_peer(peer);
3778 
3779 	/* TODO: Ensure that sec_type is set before ADDBA is received.
3780 	 * Currently this is set based on htt indication
3781 	 * HTT_T2H_MSG_TYPE_SEC_IND from target
3782 	 */
3783 	switch (txrx_peer->security[dp_sec_ucast].sec_type) {
3784 	case cdp_sec_type_tkip_nomic:
3785 	case cdp_sec_type_aes_ccmp:
3786 	case cdp_sec_type_aes_ccmp_256:
3787 	case cdp_sec_type_aes_gcmp:
3788 	case cdp_sec_type_aes_gcmp_256:
3789 		hal_pn_type = HAL_PN_WPA;
3790 		break;
3791 	case cdp_sec_type_wapi:
3792 		if (vdev->opmode == wlan_op_mode_ap)
3793 			hal_pn_type = HAL_PN_WAPI_EVEN;
3794 		else
3795 			hal_pn_type = HAL_PN_WAPI_UNEVEN;
3796 		break;
3797 	default:
3798 		hal_pn_type = HAL_PN_NONE;
3799 		break;
3800 	}
3801 
3802 	hal_reo_qdesc_setup(soc->hal_soc, tid, ba_window_size, start_seq,
3803 		hw_qdesc_vaddr, rx_tid->hw_qdesc_paddr, hal_pn_type,
3804 		vdev->vdev_stats_id);
3805 
3806 	qdf_mem_map_nbytes_single(soc->osdev, hw_qdesc_vaddr,
3807 		QDF_DMA_BIDIRECTIONAL, rx_tid->hw_qdesc_alloc_size,
3808 		&(rx_tid->hw_qdesc_paddr));
3809 
3810 	if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) !=
3811 			QDF_STATUS_SUCCESS) {
3812 		if (alloc_tries++ < 10) {
3813 			qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
3814 			rx_tid->hw_qdesc_vaddr_unaligned = NULL;
3815 			goto try_desc_alloc;
3816 		} else {
3817 			dp_peer_err("%pK: Rx tid HW desc alloc failed (lowmem): tid %d",
3818 				    soc, tid);
3819 			status = QDF_STATUS_E_NOMEM;
3820 			goto error;
3821 		}
3822 	}
3823 
3824 send_wmi_reo_cmd:
3825 	if (dp_get_peer_vdev_roaming_in_progress(peer)) {
3826 		status = QDF_STATUS_E_PERM;
3827 		goto error;
3828 	}
3829 
3830 	status = dp_peer_rx_reorder_queue_setup(soc, peer,
3831 						tid, ba_window_size);
3832 	if (QDF_IS_STATUS_SUCCESS(status))
3833 		return status;
3834 
3835 error:
3836 	if (rx_tid->hw_qdesc_vaddr_unaligned) {
3837 		if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) ==
3838 		    QDF_STATUS_SUCCESS)
3839 			qdf_mem_unmap_nbytes_single(
3840 				soc->osdev,
3841 				rx_tid->hw_qdesc_paddr,
3842 				QDF_DMA_BIDIRECTIONAL,
3843 				rx_tid->hw_qdesc_alloc_size);
3844 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
3845 		rx_tid->hw_qdesc_vaddr_unaligned = NULL;
3846 		rx_tid->hw_qdesc_paddr = 0;
3847 	}
3848 	return status;
3849 }
3850 
3851 #ifdef DP_UMAC_HW_RESET_SUPPORT
3852 static
3853 void dp_peer_rst_tids(struct dp_soc *soc, struct dp_peer *peer, void *arg)
3854 {
3855 	int tid;
3856 
3857 	for (tid = 0; tid < (DP_MAX_TIDS - 1); tid++) {
3858 		struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
3859 		void *vaddr = rx_tid->hw_qdesc_vaddr_aligned;
3860 
3861 		if (vaddr)
3862 			dp_reset_rx_reo_tid_queue(soc, vaddr,
3863 						  rx_tid->hw_qdesc_alloc_size);
3864 	}
3865 }
3866 
3867 void dp_reset_tid_q_setup(struct dp_soc *soc)
3868 {
3869 	dp_soc_iterate_peer(soc, dp_peer_rst_tids, NULL, DP_MOD_ID_UMAC_RESET);
3870 }
3871 #endif
3872 #ifdef REO_DESC_DEFER_FREE
3873 /*
3874  * dp_reo_desc_clean_up() - If cmd to flush base desc fails add
3875  * desc back to freelist and defer the deletion
3876  *
3877  * @soc: DP SOC handle
3878  * @desc: Base descriptor to be freed
3879  * @reo_status: REO command status
3880  */
3881 static void dp_reo_desc_clean_up(struct dp_soc *soc,
3882 				 struct reo_desc_list_node *desc,
3883 				 union hal_reo_status *reo_status)
3884 {
3885 	desc->free_ts = qdf_get_system_timestamp();
3886 	DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
3887 	qdf_list_insert_back(&soc->reo_desc_freelist,
3888 			     (qdf_list_node_t *)desc);
3889 }
3890 
3891 /*
3892  * dp_reo_limit_clean_batch_sz() - Limit number REO CMD queued to cmd
3893  * ring in avoid of REO hang
3894  *
3895  * @list_size: REO desc list size to be cleaned
3896  */
3897 static inline void dp_reo_limit_clean_batch_sz(uint32_t *list_size)
3898 {
3899 	unsigned long curr_ts = qdf_get_system_timestamp();
3900 
3901 	if ((*list_size) > REO_DESC_FREELIST_SIZE) {
3902 		dp_err_log("%lu:freedesc number %d in freelist",
3903 			   curr_ts, *list_size);
3904 		/* limit the batch queue size */
3905 		*list_size = REO_DESC_FREELIST_SIZE;
3906 	}
3907 }
3908 #else
3909 /*
3910  * dp_reo_desc_clean_up() - If send cmd to REO inorder to flush
3911  * cache fails free the base REO desc anyway
3912  *
3913  * @soc: DP SOC handle
3914  * @desc: Base descriptor to be freed
3915  * @reo_status: REO command status
3916  */
3917 static void dp_reo_desc_clean_up(struct dp_soc *soc,
3918 				 struct reo_desc_list_node *desc,
3919 				 union hal_reo_status *reo_status)
3920 {
3921 	if (reo_status) {
3922 		qdf_mem_zero(reo_status, sizeof(*reo_status));
3923 		reo_status->fl_cache_status.header.status = 0;
3924 		dp_reo_desc_free(soc, (void *)desc, reo_status);
3925 	}
3926 }
3927 
3928 /*
3929  * dp_reo_limit_clean_batch_sz() - Limit number REO CMD queued to cmd
3930  * ring in avoid of REO hang
3931  *
3932  * @list_size: REO desc list size to be cleaned
3933  */
3934 static inline void dp_reo_limit_clean_batch_sz(uint32_t *list_size)
3935 {
3936 }
3937 #endif
3938 
3939 /*
3940  * dp_resend_update_reo_cmd() - Resend the UPDATE_REO_QUEUE
3941  * cmd and re-insert desc into free list if send fails.
3942  *
3943  * @soc: DP SOC handle
3944  * @desc: desc with resend update cmd flag set
3945  * @rx_tid: Desc RX tid associated with update cmd for resetting
3946  * valid field to 0 in h/w
3947  *
3948  * Return: QDF status
3949  */
3950 static QDF_STATUS
3951 dp_resend_update_reo_cmd(struct dp_soc *soc,
3952 			 struct reo_desc_list_node *desc,
3953 			 struct dp_rx_tid *rx_tid)
3954 {
3955 	struct hal_reo_cmd_params params;
3956 
3957 	qdf_mem_zero(&params, sizeof(params));
3958 	params.std.need_status = 1;
3959 	params.std.addr_lo =
3960 		rx_tid->hw_qdesc_paddr & 0xffffffff;
3961 	params.std.addr_hi =
3962 		(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
3963 	params.u.upd_queue_params.update_vld = 1;
3964 	params.u.upd_queue_params.vld = 0;
3965 	desc->resend_update_reo_cmd = false;
3966 	/*
3967 	 * If the cmd send fails then set resend_update_reo_cmd flag
3968 	 * and insert the desc at the end of the free list to retry.
3969 	 */
3970 	if (dp_reo_send_cmd(soc,
3971 			    CMD_UPDATE_RX_REO_QUEUE,
3972 			    &params,
3973 			    dp_rx_tid_delete_cb,
3974 			    (void *)desc)
3975 	    != QDF_STATUS_SUCCESS) {
3976 		desc->resend_update_reo_cmd = true;
3977 		desc->free_ts = qdf_get_system_timestamp();
3978 		qdf_list_insert_back(&soc->reo_desc_freelist,
3979 				     (qdf_list_node_t *)desc);
3980 		dp_err_log("failed to send reo cmd CMD_UPDATE_RX_REO_QUEUE");
3981 		DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
3982 		return QDF_STATUS_E_FAILURE;
3983 	}
3984 
3985 	return QDF_STATUS_SUCCESS;
3986 }
3987 
3988 /*
3989  * dp_rx_tid_delete_cb() - Callback to flush reo descriptor HW cache
3990  * after deleting the entries (ie., setting valid=0)
3991  *
3992  * @soc: DP SOC handle
3993  * @cb_ctxt: Callback context
3994  * @reo_status: REO command status
3995  */
3996 void dp_rx_tid_delete_cb(struct dp_soc *soc, void *cb_ctxt,
3997 			 union hal_reo_status *reo_status)
3998 {
3999 	struct reo_desc_list_node *freedesc =
4000 		(struct reo_desc_list_node *)cb_ctxt;
4001 	uint32_t list_size;
4002 	struct reo_desc_list_node *desc;
4003 	unsigned long curr_ts = qdf_get_system_timestamp();
4004 	uint32_t desc_size, tot_desc_size;
4005 	struct hal_reo_cmd_params params;
4006 	bool flush_failure = false;
4007 
4008 	DP_RX_REO_QDESC_UPDATE_EVT(freedesc);
4009 
4010 	if (reo_status->rx_queue_status.header.status == HAL_REO_CMD_DRAIN) {
4011 		qdf_mem_zero(reo_status, sizeof(*reo_status));
4012 		reo_status->fl_cache_status.header.status = HAL_REO_CMD_DRAIN;
4013 		dp_reo_desc_free(soc, (void *)freedesc, reo_status);
4014 		DP_STATS_INC(soc, rx.err.reo_cmd_send_drain, 1);
4015 		return;
4016 	} else if (reo_status->rx_queue_status.header.status !=
4017 		HAL_REO_CMD_SUCCESS) {
4018 		/* Should not happen normally. Just print error for now */
4019 		dp_info_rl("Rx tid HW desc deletion failed(%d): tid %d",
4020 			   reo_status->rx_queue_status.header.status,
4021 			   freedesc->rx_tid.tid);
4022 	}
4023 
4024 	dp_peer_info("%pK: rx_tid: %d status: %d",
4025 		     soc, freedesc->rx_tid.tid,
4026 		     reo_status->rx_queue_status.header.status);
4027 
4028 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
4029 	freedesc->free_ts = curr_ts;
4030 	qdf_list_insert_back_size(&soc->reo_desc_freelist,
4031 		(qdf_list_node_t *)freedesc, &list_size);
4032 
4033 	/* MCL path add the desc back to reo_desc_freelist when REO FLUSH
4034 	 * failed. it may cause the number of REO queue pending  in free
4035 	 * list is even larger than REO_CMD_RING max size and lead REO CMD
4036 	 * flood then cause REO HW in an unexpected condition. So it's
4037 	 * needed to limit the number REO cmds in a batch operation.
4038 	 */
4039 	dp_reo_limit_clean_batch_sz(&list_size);
4040 
4041 	while ((qdf_list_peek_front(&soc->reo_desc_freelist,
4042 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) &&
4043 		((list_size >= REO_DESC_FREELIST_SIZE) ||
4044 		(curr_ts > (desc->free_ts + REO_DESC_FREE_DEFER_MS)) ||
4045 		(desc->resend_update_reo_cmd && list_size))) {
4046 		struct dp_rx_tid *rx_tid;
4047 
4048 		qdf_list_remove_front(&soc->reo_desc_freelist,
4049 				(qdf_list_node_t **)&desc);
4050 		list_size--;
4051 		rx_tid = &desc->rx_tid;
4052 
4053 		/* First process descs with resend_update_reo_cmd set */
4054 		if (desc->resend_update_reo_cmd) {
4055 			if (dp_resend_update_reo_cmd(soc, desc, rx_tid) !=
4056 			    QDF_STATUS_SUCCESS)
4057 				break;
4058 			else
4059 				continue;
4060 		}
4061 
4062 		/* Flush and invalidate REO descriptor from HW cache: Base and
4063 		 * extension descriptors should be flushed separately */
4064 		if (desc->pending_ext_desc_size)
4065 			tot_desc_size = desc->pending_ext_desc_size;
4066 		else
4067 			tot_desc_size = rx_tid->hw_qdesc_alloc_size;
4068 		/* Get base descriptor size by passing non-qos TID */
4069 		desc_size = hal_get_reo_qdesc_size(soc->hal_soc, 0,
4070 						   DP_NON_QOS_TID);
4071 
4072 		/* Flush reo extension descriptors */
4073 		while ((tot_desc_size -= desc_size) > 0) {
4074 			qdf_mem_zero(&params, sizeof(params));
4075 			params.std.addr_lo =
4076 				((uint64_t)(rx_tid->hw_qdesc_paddr) +
4077 				tot_desc_size) & 0xffffffff;
4078 			params.std.addr_hi =
4079 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
4080 
4081 			if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc,
4082 							CMD_FLUSH_CACHE,
4083 							&params,
4084 							NULL,
4085 							NULL)) {
4086 				dp_info_rl("fail to send CMD_CACHE_FLUSH:"
4087 					   "tid %d desc %pK", rx_tid->tid,
4088 					   (void *)(rx_tid->hw_qdesc_paddr));
4089 				desc->pending_ext_desc_size = tot_desc_size +
4090 								      desc_size;
4091 				dp_reo_desc_clean_up(soc, desc, reo_status);
4092 				flush_failure = true;
4093 				break;
4094 			}
4095 		}
4096 
4097 		if (flush_failure)
4098 			break;
4099 		else
4100 			desc->pending_ext_desc_size = desc_size;
4101 
4102 		/* Flush base descriptor */
4103 		qdf_mem_zero(&params, sizeof(params));
4104 		params.std.need_status = 1;
4105 		params.std.addr_lo =
4106 			(uint64_t)(rx_tid->hw_qdesc_paddr) & 0xffffffff;
4107 		params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
4108 
4109 		if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc,
4110 							  CMD_FLUSH_CACHE,
4111 							  &params,
4112 							  dp_reo_desc_free,
4113 							  (void *)desc)) {
4114 			union hal_reo_status reo_status;
4115 			/*
4116 			 * If dp_reo_send_cmd return failure, related TID queue desc
4117 			 * should be unmapped. Also locally reo_desc, together with
4118 			 * TID queue desc also need to be freed accordingly.
4119 			 *
4120 			 * Here invoke desc_free function directly to do clean up.
4121 			 *
4122 			 * In case of MCL path add the desc back to the free
4123 			 * desc list and defer deletion.
4124 			 */
4125 			dp_info_rl("fail to send REO cmd to flush cache: tid %d",
4126 				   rx_tid->tid);
4127 			dp_reo_desc_clean_up(soc, desc, &reo_status);
4128 			DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
4129 			break;
4130 		}
4131 	}
4132 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
4133 
4134 	dp_reo_desc_defer_free(soc);
4135 }
4136 
4137 /*
4138  * dp_rx_tid_delete_wifi3() – Delete receive TID queue
4139  * @peer: Datapath peer handle
4140  * @tid: TID
4141  *
4142  * Return: 0 on success, error code on failure
4143  */
4144 static int dp_rx_tid_delete_wifi3(struct dp_peer *peer, int tid)
4145 {
4146 	struct dp_rx_tid *rx_tid = &(peer->rx_tid[tid]);
4147 	struct dp_soc *soc = peer->vdev->pdev->soc;
4148 	struct hal_reo_cmd_params params;
4149 	struct reo_desc_list_node *freedesc =
4150 		qdf_mem_malloc(sizeof(*freedesc));
4151 
4152 	if (!freedesc) {
4153 		dp_peer_err("%pK: malloc failed for freedesc: tid %d",
4154 			    soc, tid);
4155 		qdf_assert(0);
4156 		return -ENOMEM;
4157 	}
4158 
4159 	freedesc->rx_tid = *rx_tid;
4160 	freedesc->resend_update_reo_cmd = false;
4161 
4162 	qdf_mem_zero(&params, sizeof(params));
4163 
4164 	DP_RX_REO_QDESC_GET_MAC(freedesc, peer);
4165 
4166 	params.std.need_status = 1;
4167 	params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
4168 	params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
4169 	params.u.upd_queue_params.update_vld = 1;
4170 	params.u.upd_queue_params.vld = 0;
4171 
4172 	if (dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
4173 			    dp_rx_tid_delete_cb, (void *)freedesc)
4174 		!= QDF_STATUS_SUCCESS) {
4175 		/* Defer the clean up to the call back context */
4176 		qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
4177 		freedesc->free_ts = qdf_get_system_timestamp();
4178 		freedesc->resend_update_reo_cmd = true;
4179 		qdf_list_insert_front(&soc->reo_desc_freelist,
4180 				      (qdf_list_node_t *)freedesc);
4181 		DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
4182 		qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
4183 		dp_info("Failed to send CMD_UPDATE_RX_REO_QUEUE");
4184 	}
4185 
4186 	rx_tid->hw_qdesc_vaddr_unaligned = NULL;
4187 	rx_tid->hw_qdesc_alloc_size = 0;
4188 	rx_tid->hw_qdesc_paddr = 0;
4189 
4190 	return 0;
4191 }
4192 
4193 #ifdef DP_LFR
4194 static void dp_peer_setup_remaining_tids(struct dp_peer *peer)
4195 {
4196 	int tid;
4197 
4198 	for (tid = 1; tid < DP_MAX_TIDS-1; tid++) {
4199 		dp_rx_tid_setup_wifi3(peer, tid, 1, 0);
4200 		dp_peer_debug("Setting up TID %d for peer %pK peer->local_id %d",
4201 			      tid, peer, peer->local_id);
4202 	}
4203 }
4204 #else
4205 static void dp_peer_setup_remaining_tids(struct dp_peer *peer) {};
4206 #endif
4207 
4208 #ifdef WLAN_FEATURE_11BE_MLO
4209 /**
4210  * dp_peer_rx_tids_init() - initialize each tids in peer
4211  * @peer: peer pointer
4212  *
4213  * Return: None
4214  */
4215 static void dp_peer_rx_tids_init(struct dp_peer *peer)
4216 {
4217 	int tid;
4218 	struct dp_rx_tid *rx_tid;
4219 	struct dp_rx_tid_defrag *rx_tid_defrag;
4220 
4221 	if (!IS_MLO_DP_LINK_PEER(peer)) {
4222 		for (tid = 0; tid < DP_MAX_TIDS; tid++) {
4223 			rx_tid_defrag = &peer->txrx_peer->rx_tid[tid];
4224 
4225 			rx_tid_defrag->array = &rx_tid_defrag->base;
4226 			rx_tid_defrag->defrag_timeout_ms = 0;
4227 			rx_tid_defrag->defrag_waitlist_elem.tqe_next = NULL;
4228 			rx_tid_defrag->defrag_waitlist_elem.tqe_prev = NULL;
4229 			rx_tid_defrag->base.head = NULL;
4230 			rx_tid_defrag->base.tail = NULL;
4231 			rx_tid_defrag->tid = tid;
4232 			rx_tid_defrag->defrag_peer = peer->txrx_peer;
4233 		}
4234 	}
4235 
4236 	/* if not first assoc link peer,
4237 	 * not to initialize rx_tids again.
4238 	 */
4239 	if (IS_MLO_DP_LINK_PEER(peer) && !peer->first_link)
4240 		return;
4241 
4242 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
4243 		rx_tid = &peer->rx_tid[tid];
4244 		rx_tid->tid = tid;
4245 		rx_tid->ba_win_size = 0;
4246 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
4247 	}
4248 }
4249 #else
4250 static void dp_peer_rx_tids_init(struct dp_peer *peer)
4251 {
4252 	int tid;
4253 	struct dp_rx_tid *rx_tid;
4254 	struct dp_rx_tid_defrag *rx_tid_defrag;
4255 
4256 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
4257 		rx_tid = &peer->rx_tid[tid];
4258 
4259 		rx_tid_defrag = &peer->txrx_peer->rx_tid[tid];
4260 		rx_tid->tid = tid;
4261 		rx_tid->ba_win_size = 0;
4262 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
4263 
4264 		rx_tid_defrag->base.head = NULL;
4265 		rx_tid_defrag->base.tail = NULL;
4266 		rx_tid_defrag->tid = tid;
4267 		rx_tid_defrag->array = &rx_tid_defrag->base;
4268 		rx_tid_defrag->defrag_timeout_ms = 0;
4269 		rx_tid_defrag->defrag_waitlist_elem.tqe_next = NULL;
4270 		rx_tid_defrag->defrag_waitlist_elem.tqe_prev = NULL;
4271 		rx_tid_defrag->defrag_peer = peer->txrx_peer;
4272 	}
4273 }
4274 #endif
4275 
4276 /*
4277  * dp_peer_rx_init() – Initialize receive TID state
4278  * @pdev: Datapath pdev
4279  * @peer: Datapath peer
4280  *
4281  */
4282 void dp_peer_rx_init(struct dp_pdev *pdev, struct dp_peer *peer)
4283 {
4284 	dp_peer_rx_tids_init(peer);
4285 
4286 	peer->active_ba_session_cnt = 0;
4287 	peer->hw_buffer_size = 0;
4288 	peer->kill_256_sessions = 0;
4289 
4290 	/* Setup default (non-qos) rx tid queue */
4291 	dp_rx_tid_setup_wifi3(peer, DP_NON_QOS_TID, 1, 0);
4292 
4293 	/* Setup rx tid queue for TID 0.
4294 	 * Other queues will be setup on receiving first packet, which will cause
4295 	 * NULL REO queue error
4296 	 */
4297 	dp_rx_tid_setup_wifi3(peer, 0, 1, 0);
4298 
4299 	/*
4300 	 * Setup the rest of TID's to handle LFR
4301 	 */
4302 	dp_peer_setup_remaining_tids(peer);
4303 
4304 	/*
4305 	 * Set security defaults: no PN check, no security. The target may
4306 	 * send a HTT SEC_IND message to overwrite these defaults.
4307 	 */
4308 	if (peer->txrx_peer)
4309 		peer->txrx_peer->security[dp_sec_ucast].sec_type =
4310 			peer->txrx_peer->security[dp_sec_mcast].sec_type =
4311 				cdp_sec_type_none;
4312 }
4313 
4314 /*
4315  * dp_peer_rx_cleanup() – Cleanup receive TID state
4316  * @vdev: Datapath vdev
4317  * @peer: Datapath peer
4318  *
4319  */
4320 void dp_peer_rx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
4321 {
4322 	int tid;
4323 	uint32_t tid_delete_mask = 0;
4324 
4325 	if (!peer->txrx_peer)
4326 		return;
4327 
4328 	dp_info("Remove tids for peer: %pK", peer);
4329 
4330 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
4331 		struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
4332 		struct dp_rx_tid_defrag *defrag_rx_tid =
4333 				&peer->txrx_peer->rx_tid[tid];
4334 
4335 		qdf_spin_lock_bh(&defrag_rx_tid->defrag_tid_lock);
4336 		if (!peer->bss_peer || peer->vdev->opmode == wlan_op_mode_sta) {
4337 			/* Cleanup defrag related resource */
4338 			dp_rx_defrag_waitlist_remove(peer->txrx_peer, tid);
4339 			dp_rx_reorder_flush_frag(peer->txrx_peer, tid);
4340 		}
4341 		qdf_spin_unlock_bh(&defrag_rx_tid->defrag_tid_lock);
4342 
4343 		qdf_spin_lock_bh(&rx_tid->tid_lock);
4344 		if (peer->rx_tid[tid].hw_qdesc_vaddr_unaligned) {
4345 			dp_rx_tid_delete_wifi3(peer, tid);
4346 
4347 			tid_delete_mask |= (1 << tid);
4348 		}
4349 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
4350 	}
4351 #ifdef notyet /* See if FW can remove queues as part of peer cleanup */
4352 	if (soc->ol_ops->peer_rx_reorder_queue_remove) {
4353 		soc->ol_ops->peer_rx_reorder_queue_remove(soc->ctrl_psoc,
4354 			peer->vdev->pdev->pdev_id,
4355 			peer->vdev->vdev_id, peer->mac_addr.raw,
4356 			tid_delete_mask);
4357 	}
4358 #endif
4359 }
4360 
4361 /*
4362  * dp_peer_cleanup() – Cleanup peer information
4363  * @vdev: Datapath vdev
4364  * @peer: Datapath peer
4365  *
4366  */
4367 void dp_peer_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
4368 {
4369 	enum wlan_op_mode vdev_opmode;
4370 	uint8_t vdev_mac_addr[QDF_MAC_ADDR_SIZE];
4371 	struct dp_pdev *pdev = vdev->pdev;
4372 	struct dp_soc *soc = pdev->soc;
4373 
4374 	/* save vdev related member in case vdev freed */
4375 	vdev_opmode = vdev->opmode;
4376 
4377 	if (!IS_MLO_DP_MLD_PEER(peer))
4378 		dp_monitor_peer_tx_cleanup(vdev, peer);
4379 
4380 	if (vdev_opmode != wlan_op_mode_monitor)
4381 	/* cleanup the Rx reorder queues for this peer */
4382 		dp_peer_rx_cleanup(vdev, peer);
4383 
4384 	dp_peer_rx_tids_destroy(peer);
4385 
4386 	if (IS_MLO_DP_LINK_PEER(peer))
4387 		dp_link_peer_del_mld_peer(peer);
4388 	if (IS_MLO_DP_MLD_PEER(peer))
4389 		dp_mld_peer_deinit_link_peers_info(peer);
4390 
4391 	qdf_mem_copy(vdev_mac_addr, vdev->mac_addr.raw,
4392 		     QDF_MAC_ADDR_SIZE);
4393 
4394 	if (soc->cdp_soc.ol_ops->peer_unref_delete)
4395 		soc->cdp_soc.ol_ops->peer_unref_delete(
4396 				soc->ctrl_psoc,
4397 				vdev->pdev->pdev_id,
4398 				peer->mac_addr.raw, vdev_mac_addr,
4399 				vdev_opmode);
4400 }
4401 
4402 /* dp_teardown_256_ba_session() - Teardown sessions using 256
4403  *                                window size when a request with
4404  *                                64 window size is received.
4405  *                                This is done as a WAR since HW can
4406  *                                have only one setting per peer (64 or 256).
4407  *                                For HKv2, we use per tid buffersize setting
4408  *                                for 0 to per_tid_basize_max_tid. For tid
4409  *                                more than per_tid_basize_max_tid we use HKv1
4410  *                                method.
4411  * @peer: Datapath peer
4412  *
4413  * Return: void
4414  */
4415 static void dp_teardown_256_ba_sessions(struct dp_peer *peer)
4416 {
4417 	uint8_t delba_rcode = 0;
4418 	int tid;
4419 	struct dp_rx_tid *rx_tid = NULL;
4420 
4421 	tid = peer->vdev->pdev->soc->per_tid_basize_max_tid;
4422 	for (; tid < DP_MAX_TIDS; tid++) {
4423 		rx_tid = &peer->rx_tid[tid];
4424 		qdf_spin_lock_bh(&rx_tid->tid_lock);
4425 
4426 		if (rx_tid->ba_win_size <= 64) {
4427 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
4428 			continue;
4429 		} else {
4430 			if (rx_tid->ba_status == DP_RX_BA_ACTIVE ||
4431 			    rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
4432 				/* send delba */
4433 				if (!rx_tid->delba_tx_status) {
4434 					rx_tid->delba_tx_retry++;
4435 					rx_tid->delba_tx_status = 1;
4436 					rx_tid->delba_rcode =
4437 					IEEE80211_REASON_QOS_SETUP_REQUIRED;
4438 					delba_rcode = rx_tid->delba_rcode;
4439 
4440 					qdf_spin_unlock_bh(&rx_tid->tid_lock);
4441 					if (peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba)
4442 						peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba(
4443 							peer->vdev->pdev->soc->ctrl_psoc,
4444 							peer->vdev->vdev_id,
4445 							peer->mac_addr.raw,
4446 							tid, delba_rcode,
4447 							CDP_DELBA_REASON_NONE);
4448 				} else {
4449 					qdf_spin_unlock_bh(&rx_tid->tid_lock);
4450 				}
4451 			} else {
4452 				qdf_spin_unlock_bh(&rx_tid->tid_lock);
4453 			}
4454 		}
4455 	}
4456 }
4457 
4458 /*
4459 * dp_rx_addba_resp_tx_completion_wifi3() – Update Rx Tid State
4460 *
4461 * @soc: Datapath soc handle
4462 * @peer_mac: Datapath peer mac address
4463 * @vdev_id: id of atapath vdev
4464 * @tid: TID number
4465 * @status: tx completion status
4466 * Return: 0 on success, error code on failure
4467 */
4468 int dp_addba_resp_tx_completion_wifi3(struct cdp_soc_t *cdp_soc,
4469 				      uint8_t *peer_mac,
4470 				      uint16_t vdev_id,
4471 				      uint8_t tid, int status)
4472 {
4473 	struct dp_peer *peer = dp_peer_get_tgt_peer_hash_find(
4474 					(struct dp_soc *)cdp_soc,
4475 					peer_mac, 0, vdev_id,
4476 					DP_MOD_ID_CDP);
4477 	struct dp_rx_tid *rx_tid = NULL;
4478 
4479 	if (!peer) {
4480 		dp_peer_debug("%pK: Peer is NULL!\n", cdp_soc);
4481 		goto fail;
4482 	}
4483 	rx_tid = &peer->rx_tid[tid];
4484 	qdf_spin_lock_bh(&rx_tid->tid_lock);
4485 	if (status) {
4486 		rx_tid->num_addba_rsp_failed++;
4487 		if (rx_tid->hw_qdesc_vaddr_unaligned)
4488 			dp_rx_tid_update_wifi3(peer, tid, 1,
4489 					       IEEE80211_SEQ_MAX, false);
4490 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
4491 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
4492 		dp_err("RxTid- %d addba rsp tx completion failed", tid);
4493 
4494 		goto success;
4495 	}
4496 
4497 	rx_tid->num_addba_rsp_success++;
4498 	if (rx_tid->ba_status == DP_RX_BA_INACTIVE) {
4499 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
4500 		dp_peer_err("%pK: Rx Tid- %d hw qdesc is not in IN_PROGRESS",
4501 			    cdp_soc, tid);
4502 		goto fail;
4503 	}
4504 
4505 	if (!qdf_atomic_read(&peer->is_default_route_set)) {
4506 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
4507 		dp_peer_debug("%pK: default route is not set for peer: " QDF_MAC_ADDR_FMT,
4508 			      cdp_soc, QDF_MAC_ADDR_REF(peer->mac_addr.raw));
4509 		goto fail;
4510 	}
4511 
4512 	if (dp_rx_tid_update_wifi3(peer, tid,
4513 				   rx_tid->ba_win_size,
4514 				   rx_tid->startseqnum,
4515 				   false)) {
4516 		dp_err("Failed update REO SSN");
4517 	}
4518 
4519 	dp_info("tid %u window_size %u start_seq_num %u",
4520 		tid, rx_tid->ba_win_size,
4521 		rx_tid->startseqnum);
4522 
4523 	/* First Session */
4524 	if (peer->active_ba_session_cnt == 0) {
4525 		if (rx_tid->ba_win_size > 64 && rx_tid->ba_win_size <= 256)
4526 			peer->hw_buffer_size = 256;
4527 		else if (rx_tid->ba_win_size <= 1024 &&
4528 			 rx_tid->ba_win_size > 256)
4529 			peer->hw_buffer_size = 1024;
4530 		else
4531 			peer->hw_buffer_size = 64;
4532 	}
4533 
4534 	rx_tid->ba_status = DP_RX_BA_ACTIVE;
4535 
4536 	peer->active_ba_session_cnt++;
4537 
4538 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
4539 
4540 	/* Kill any session having 256 buffer size
4541 	 * when 64 buffer size request is received.
4542 	 * Also, latch on to 64 as new buffer size.
4543 	 */
4544 	if (peer->kill_256_sessions) {
4545 		dp_teardown_256_ba_sessions(peer);
4546 		peer->kill_256_sessions = 0;
4547 	}
4548 
4549 success:
4550 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4551 	return QDF_STATUS_SUCCESS;
4552 
4553 fail:
4554 	if (peer)
4555 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4556 
4557 	return QDF_STATUS_E_FAILURE;
4558 }
4559 
4560 /*
4561 * dp_rx_addba_responsesetup_wifi3() – Process ADDBA request from peer
4562 *
4563 * @soc: Datapath soc handle
4564 * @peer_mac: Datapath peer mac address
4565 * @vdev_id: id of atapath vdev
4566 * @tid: TID number
4567 * @dialogtoken: output dialogtoken
4568 * @statuscode: output dialogtoken
4569 * @buffersize: Output BA window size
4570 * @batimeout: Output BA timeout
4571 */
4572 QDF_STATUS
4573 dp_addba_responsesetup_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
4574 			     uint16_t vdev_id, uint8_t tid,
4575 			     uint8_t *dialogtoken, uint16_t *statuscode,
4576 			     uint16_t *buffersize, uint16_t *batimeout)
4577 {
4578 	struct dp_rx_tid *rx_tid = NULL;
4579 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4580 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
4581 						       peer_mac, 0, vdev_id,
4582 						       DP_MOD_ID_CDP);
4583 
4584 	if (!peer) {
4585 		dp_peer_debug("%pK: Peer is NULL!\n", cdp_soc);
4586 		return QDF_STATUS_E_FAILURE;
4587 	}
4588 	rx_tid = &peer->rx_tid[tid];
4589 	qdf_spin_lock_bh(&rx_tid->tid_lock);
4590 	rx_tid->num_of_addba_resp++;
4591 	/* setup ADDBA response parameters */
4592 	*dialogtoken = rx_tid->dialogtoken;
4593 	*statuscode = rx_tid->statuscode;
4594 	*buffersize = rx_tid->ba_win_size;
4595 	*batimeout  = 0;
4596 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
4597 
4598 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4599 
4600 	return status;
4601 }
4602 
4603 /* dp_check_ba_buffersize() - Check buffer size in request
4604  *                            and latch onto this size based on
4605  *                            size used in first active session.
4606  * @peer: Datapath peer
4607  * @tid: Tid
4608  * @buffersize: Block ack window size
4609  *
4610  * Return: void
4611  */
4612 static void dp_check_ba_buffersize(struct dp_peer *peer,
4613 				   uint16_t tid,
4614 				   uint16_t buffersize)
4615 {
4616 	struct dp_rx_tid *rx_tid = NULL;
4617 	struct dp_soc *soc = peer->vdev->pdev->soc;
4618 	uint16_t max_ba_window;
4619 
4620 	max_ba_window = hal_get_rx_max_ba_window(soc->hal_soc, tid);
4621 	dp_info("Input buffersize %d, max dp allowed %d",
4622 		buffersize, max_ba_window);
4623 	/* Adjust BA window size, restrict it to max DP allowed */
4624 	buffersize = QDF_MIN(buffersize, max_ba_window);
4625 
4626 	dp_info(QDF_MAC_ADDR_FMT" per_tid_basize_max_tid %d tid %d buffersize %d hw_buffer_size %d",
4627 		QDF_MAC_ADDR_REF(peer->mac_addr.raw),
4628 		soc->per_tid_basize_max_tid, tid, buffersize,
4629 		peer->hw_buffer_size);
4630 
4631 	rx_tid = &peer->rx_tid[tid];
4632 	if (soc->per_tid_basize_max_tid &&
4633 	    tid < soc->per_tid_basize_max_tid) {
4634 		rx_tid->ba_win_size = buffersize;
4635 		goto out;
4636 	} else {
4637 		if (peer->active_ba_session_cnt == 0) {
4638 			rx_tid->ba_win_size = buffersize;
4639 		} else {
4640 			if (peer->hw_buffer_size == 64) {
4641 				if (buffersize <= 64)
4642 					rx_tid->ba_win_size = buffersize;
4643 				else
4644 					rx_tid->ba_win_size = peer->hw_buffer_size;
4645 			} else if (peer->hw_buffer_size == 256) {
4646 				if (buffersize > 64) {
4647 					rx_tid->ba_win_size = buffersize;
4648 				} else {
4649 					rx_tid->ba_win_size = buffersize;
4650 					peer->hw_buffer_size = 64;
4651 					peer->kill_256_sessions = 1;
4652 				}
4653 			} else if (buffersize <= 1024) {
4654 				/**
4655 				 * Above checks are only for HK V2
4656 				 * Set incoming buffer size for others
4657 				 */
4658 				rx_tid->ba_win_size = buffersize;
4659 			} else {
4660 				dp_err("Invalid buffer size %d", buffersize);
4661 				qdf_assert_always(0);
4662 			}
4663 		}
4664 	}
4665 
4666 out:
4667 	dp_info("rx_tid->ba_win_size %d peer->hw_buffer_size %d peer->kill_256_sessions %d",
4668 		rx_tid->ba_win_size,
4669 		peer->hw_buffer_size,
4670 		peer->kill_256_sessions);
4671 }
4672 
4673 QDF_STATUS dp_rx_tid_update_ba_win_size(struct cdp_soc_t *cdp_soc,
4674 					uint8_t *peer_mac, uint16_t vdev_id,
4675 					uint8_t tid, uint16_t buffersize)
4676 {
4677 	struct dp_rx_tid *rx_tid = NULL;
4678 	struct dp_peer *peer;
4679 
4680 	peer = dp_peer_get_tgt_peer_hash_find((struct dp_soc *)cdp_soc,
4681 					      peer_mac, 0, vdev_id,
4682 					      DP_MOD_ID_CDP);
4683 	if (!peer) {
4684 		dp_peer_debug("%pK: Peer is NULL!\n", cdp_soc);
4685 		return QDF_STATUS_E_FAILURE;
4686 	}
4687 
4688 	rx_tid = &peer->rx_tid[tid];
4689 
4690 	qdf_spin_lock_bh(&rx_tid->tid_lock);
4691 	rx_tid->ba_win_size = buffersize;
4692 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
4693 
4694 	dp_info("peer "QDF_MAC_ADDR_FMT", tid %d, update BA win size to %d",
4695 		QDF_MAC_ADDR_REF(peer->mac_addr.raw), tid, buffersize);
4696 
4697 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4698 
4699 	return QDF_STATUS_SUCCESS;
4700 }
4701 
4702 #define DP_RX_BA_SESSION_DISABLE  1
4703 
4704 /*
4705  * dp_addba_requestprocess_wifi3() - Process ADDBA request from peer
4706  *
4707  * @soc: Datapath soc handle
4708  * @peer_mac: Datapath peer mac address
4709  * @vdev_id: id of atapath vdev
4710  * @dialogtoken: dialogtoken from ADDBA frame
4711  * @tid: TID number
4712  * @batimeout: BA timeout
4713  * @buffersize: BA window size
4714  * @startseqnum: Start seq. number received in BA sequence control
4715  *
4716  * Return: 0 on success, error code on failure
4717  */
4718 int dp_addba_requestprocess_wifi3(struct cdp_soc_t *cdp_soc,
4719 				  uint8_t *peer_mac,
4720 				  uint16_t vdev_id,
4721 				  uint8_t dialogtoken,
4722 				  uint16_t tid, uint16_t batimeout,
4723 				  uint16_t buffersize,
4724 				  uint16_t startseqnum)
4725 {
4726 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4727 	struct dp_rx_tid *rx_tid = NULL;
4728 	struct dp_peer *peer;
4729 
4730 	peer = dp_peer_get_tgt_peer_hash_find((struct dp_soc *)cdp_soc,
4731 					      peer_mac,
4732 					      0, vdev_id,
4733 					      DP_MOD_ID_CDP);
4734 
4735 	if (!peer) {
4736 		dp_peer_debug("%pK: Peer is NULL!\n", cdp_soc);
4737 		return QDF_STATUS_E_FAILURE;
4738 	}
4739 	rx_tid = &peer->rx_tid[tid];
4740 	qdf_spin_lock_bh(&rx_tid->tid_lock);
4741 	rx_tid->num_of_addba_req++;
4742 	if ((rx_tid->ba_status == DP_RX_BA_ACTIVE &&
4743 	     rx_tid->hw_qdesc_vaddr_unaligned)) {
4744 		dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX, false);
4745 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
4746 		peer->active_ba_session_cnt--;
4747 		dp_peer_debug("%pK: Rx Tid- %d hw qdesc is already setup",
4748 			      cdp_soc, tid);
4749 	}
4750 
4751 	if (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
4752 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
4753 		status = QDF_STATUS_E_FAILURE;
4754 		goto fail;
4755 	}
4756 
4757 	if (rx_tid->rx_ba_win_size_override == DP_RX_BA_SESSION_DISABLE) {
4758 		dp_peer_info("%pK: disable BA session",
4759 			     cdp_soc);
4760 
4761 		buffersize = 1;
4762 	} else if (rx_tid->rx_ba_win_size_override) {
4763 		dp_peer_info("%pK: override BA win to %d", cdp_soc,
4764 			     rx_tid->rx_ba_win_size_override);
4765 
4766 		buffersize = rx_tid->rx_ba_win_size_override;
4767 	} else {
4768 		dp_peer_info("%pK: restore BA win %d based on addba req", cdp_soc,
4769 			     buffersize);
4770 	}
4771 
4772 	dp_check_ba_buffersize(peer, tid, buffersize);
4773 
4774 	if (dp_rx_tid_setup_wifi3(peer, tid,
4775 	    rx_tid->ba_win_size, startseqnum)) {
4776 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
4777 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
4778 		status = QDF_STATUS_E_FAILURE;
4779 		goto fail;
4780 	}
4781 	rx_tid->ba_status = DP_RX_BA_IN_PROGRESS;
4782 
4783 	rx_tid->dialogtoken = dialogtoken;
4784 	rx_tid->startseqnum = startseqnum;
4785 
4786 	if (rx_tid->userstatuscode != IEEE80211_STATUS_SUCCESS)
4787 		rx_tid->statuscode = rx_tid->userstatuscode;
4788 	else
4789 		rx_tid->statuscode = IEEE80211_STATUS_SUCCESS;
4790 
4791 	if (rx_tid->rx_ba_win_size_override == DP_RX_BA_SESSION_DISABLE)
4792 		rx_tid->statuscode = IEEE80211_STATUS_REFUSED;
4793 
4794 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
4795 
4796 fail:
4797 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4798 
4799 	return status;
4800 }
4801 
4802 /*
4803 * dp_set_addba_response() – Set a user defined ADDBA response status code
4804 *
4805 * @soc: Datapath soc handle
4806 * @peer_mac: Datapath peer mac address
4807 * @vdev_id: id of atapath vdev
4808 * @tid: TID number
4809 * @statuscode: response status code to be set
4810 */
4811 QDF_STATUS
4812 dp_set_addba_response(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
4813 		      uint16_t vdev_id, uint8_t tid, uint16_t statuscode)
4814 {
4815 	struct dp_peer *peer = dp_peer_get_tgt_peer_hash_find(
4816 					(struct dp_soc *)cdp_soc,
4817 					peer_mac, 0, vdev_id,
4818 					DP_MOD_ID_CDP);
4819 	struct dp_rx_tid *rx_tid;
4820 
4821 	if (!peer) {
4822 		dp_peer_debug("%pK: Peer is NULL!\n", cdp_soc);
4823 		return QDF_STATUS_E_FAILURE;
4824 	}
4825 
4826 	rx_tid = &peer->rx_tid[tid];
4827 	qdf_spin_lock_bh(&rx_tid->tid_lock);
4828 	rx_tid->userstatuscode = statuscode;
4829 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
4830 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4831 
4832 	return QDF_STATUS_SUCCESS;
4833 }
4834 
4835 /*
4836 * dp_rx_delba_process_wifi3() – Process DELBA from peer
4837 * @soc: Datapath soc handle
4838 * @peer_mac: Datapath peer mac address
4839 * @vdev_id: id of atapath vdev
4840 * @tid: TID number
4841 * @reasoncode: Reason code received in DELBA frame
4842 *
4843 * Return: 0 on success, error code on failure
4844 */
4845 int dp_delba_process_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
4846 			   uint16_t vdev_id, int tid, uint16_t reasoncode)
4847 {
4848 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4849 	struct dp_rx_tid *rx_tid;
4850 	struct dp_peer *peer = dp_peer_get_tgt_peer_hash_find(
4851 					(struct dp_soc *)cdp_soc,
4852 					peer_mac, 0, vdev_id,
4853 					DP_MOD_ID_CDP);
4854 
4855 	if (!peer) {
4856 		dp_peer_debug("%pK: Peer is NULL!\n", cdp_soc);
4857 		return QDF_STATUS_E_FAILURE;
4858 	}
4859 	rx_tid = &peer->rx_tid[tid];
4860 	qdf_spin_lock_bh(&rx_tid->tid_lock);
4861 	if (rx_tid->ba_status == DP_RX_BA_INACTIVE ||
4862 	    rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
4863 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
4864 		status = QDF_STATUS_E_FAILURE;
4865 		goto fail;
4866 	}
4867 	/* TODO: See if we can delete the existing REO queue descriptor and
4868 	 * replace with a new one without queue extension descript to save
4869 	 * memory
4870 	 */
4871 	rx_tid->delba_rcode = reasoncode;
4872 	rx_tid->num_of_delba_req++;
4873 	dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX, false);
4874 
4875 	rx_tid->ba_status = DP_RX_BA_INACTIVE;
4876 	peer->active_ba_session_cnt--;
4877 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
4878 fail:
4879 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4880 
4881 	return status;
4882 }
4883 
4884 /*
4885  * dp_rx_delba_tx_completion_wifi3() – Send Delba Request
4886  *
4887  * @soc: Datapath soc handle
4888  * @peer_mac: Datapath peer mac address
4889  * @vdev_id: id of atapath vdev
4890  * @tid: TID number
4891  * @status: tx completion status
4892  * Return: 0 on success, error code on failure
4893  */
4894 
4895 int dp_delba_tx_completion_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
4896 				 uint16_t vdev_id,
4897 				 uint8_t tid, int status)
4898 {
4899 	QDF_STATUS ret = QDF_STATUS_SUCCESS;
4900 	struct dp_rx_tid *rx_tid = NULL;
4901 	struct dp_peer *peer = dp_peer_get_tgt_peer_hash_find(
4902 					(struct dp_soc *)cdp_soc,
4903 					peer_mac, 0, vdev_id,
4904 					DP_MOD_ID_CDP);
4905 
4906 	if (!peer) {
4907 		dp_peer_debug("%pK: Peer is NULL!", cdp_soc);
4908 		return QDF_STATUS_E_FAILURE;
4909 	}
4910 	rx_tid = &peer->rx_tid[tid];
4911 	qdf_spin_lock_bh(&rx_tid->tid_lock);
4912 	if (status) {
4913 		rx_tid->delba_tx_fail_cnt++;
4914 		if (rx_tid->delba_tx_retry >= DP_MAX_DELBA_RETRY) {
4915 			rx_tid->delba_tx_retry = 0;
4916 			rx_tid->delba_tx_status = 0;
4917 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
4918 		} else {
4919 			rx_tid->delba_tx_retry++;
4920 			rx_tid->delba_tx_status = 1;
4921 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
4922 			if (peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba)
4923 				peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba(
4924 					peer->vdev->pdev->soc->ctrl_psoc,
4925 					peer->vdev->vdev_id,
4926 					peer->mac_addr.raw, tid,
4927 					rx_tid->delba_rcode,
4928 					CDP_DELBA_REASON_NONE);
4929 		}
4930 		goto end;
4931 	} else {
4932 		rx_tid->delba_tx_success_cnt++;
4933 		rx_tid->delba_tx_retry = 0;
4934 		rx_tid->delba_tx_status = 0;
4935 	}
4936 	if (rx_tid->ba_status == DP_RX_BA_ACTIVE) {
4937 		dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX, false);
4938 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
4939 		peer->active_ba_session_cnt--;
4940 	}
4941 	if (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
4942 		dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX, false);
4943 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
4944 	}
4945 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
4946 
4947 end:
4948 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4949 
4950 	return ret;
4951 }
4952 
4953 /**
4954  * dp_set_pn_check_wifi3() - enable PN check in REO for security
4955  * @soc: Datapath soc handle
4956  * @peer_mac: Datapath peer mac address
4957  * @vdev_id: id of atapath vdev
4958  * @vdev: Datapath vdev
4959  * @pdev - data path device instance
4960  * @sec_type - security type
4961  * @rx_pn - Receive pn starting number
4962  *
4963  */
4964 
4965 QDF_STATUS
4966 dp_set_pn_check_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id,
4967 		      uint8_t *peer_mac, enum cdp_sec_type sec_type,
4968 		      uint32_t *rx_pn)
4969 {
4970 	struct dp_pdev *pdev;
4971 	int i;
4972 	uint8_t pn_size;
4973 	struct hal_reo_cmd_params params;
4974 	struct dp_peer *peer = NULL;
4975 	struct dp_vdev *vdev = NULL;
4976 
4977 	peer = dp_peer_find_hash_find((struct dp_soc *)soc,
4978 				      peer_mac, 0, vdev_id,
4979 				      DP_MOD_ID_CDP);
4980 
4981 	if (!peer) {
4982 		dp_peer_debug("%pK: Peer is NULL!\n", soc);
4983 		return QDF_STATUS_E_FAILURE;
4984 	}
4985 
4986 	vdev = peer->vdev;
4987 
4988 	if (!vdev) {
4989 		dp_peer_debug("%pK: VDEV is NULL!\n", soc);
4990 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4991 		return QDF_STATUS_E_FAILURE;
4992 	}
4993 
4994 	pdev = vdev->pdev;
4995 	qdf_mem_zero(&params, sizeof(params));
4996 
4997 	params.std.need_status = 1;
4998 	params.u.upd_queue_params.update_pn_valid = 1;
4999 	params.u.upd_queue_params.update_pn_size = 1;
5000 	params.u.upd_queue_params.update_pn = 1;
5001 	params.u.upd_queue_params.update_pn_check_needed = 1;
5002 	params.u.upd_queue_params.update_svld = 1;
5003 	params.u.upd_queue_params.svld = 0;
5004 
5005 	switch (sec_type) {
5006 	case cdp_sec_type_tkip_nomic:
5007 	case cdp_sec_type_aes_ccmp:
5008 	case cdp_sec_type_aes_ccmp_256:
5009 	case cdp_sec_type_aes_gcmp:
5010 	case cdp_sec_type_aes_gcmp_256:
5011 		params.u.upd_queue_params.pn_check_needed = 1;
5012 		params.u.upd_queue_params.pn_size = PN_SIZE_48;
5013 		pn_size = 48;
5014 		break;
5015 	case cdp_sec_type_wapi:
5016 		params.u.upd_queue_params.pn_check_needed = 1;
5017 		params.u.upd_queue_params.pn_size = PN_SIZE_128;
5018 		pn_size = 128;
5019 		if (vdev->opmode == wlan_op_mode_ap) {
5020 			params.u.upd_queue_params.pn_even = 1;
5021 			params.u.upd_queue_params.update_pn_even = 1;
5022 		} else {
5023 			params.u.upd_queue_params.pn_uneven = 1;
5024 			params.u.upd_queue_params.update_pn_uneven = 1;
5025 		}
5026 		break;
5027 	default:
5028 		params.u.upd_queue_params.pn_check_needed = 0;
5029 		pn_size = 0;
5030 		break;
5031 	}
5032 
5033 
5034 	for (i = 0; i < DP_MAX_TIDS; i++) {
5035 		struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
5036 		qdf_spin_lock_bh(&rx_tid->tid_lock);
5037 		if (rx_tid->hw_qdesc_vaddr_unaligned) {
5038 			params.std.addr_lo =
5039 				rx_tid->hw_qdesc_paddr & 0xffffffff;
5040 			params.std.addr_hi =
5041 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
5042 
5043 			if (pn_size) {
5044 				dp_peer_info("%pK: PN set for TID:%d pn:%x:%x:%x:%x",
5045 					     soc, i, rx_pn[3], rx_pn[2],
5046 					     rx_pn[1], rx_pn[0]);
5047 				params.u.upd_queue_params.update_pn_valid = 1;
5048 				params.u.upd_queue_params.pn_31_0 = rx_pn[0];
5049 				params.u.upd_queue_params.pn_63_32 = rx_pn[1];
5050 				params.u.upd_queue_params.pn_95_64 = rx_pn[2];
5051 				params.u.upd_queue_params.pn_127_96 = rx_pn[3];
5052 			}
5053 			rx_tid->pn_size = pn_size;
5054 			if (dp_reo_send_cmd(cdp_soc_t_to_dp_soc(soc),
5055 					    CMD_UPDATE_RX_REO_QUEUE,
5056 					    &params, dp_rx_tid_update_cb,
5057 					    rx_tid)) {
5058 				dp_err_log("fail to send CMD_UPDATE_RX_REO_QUEUE"
5059 					   "tid %d desc %pK", rx_tid->tid,
5060 					   (void *)(rx_tid->hw_qdesc_paddr));
5061 				DP_STATS_INC(cdp_soc_t_to_dp_soc(soc),
5062 					     rx.err.reo_cmd_send_fail, 1);
5063 			}
5064 		} else {
5065 			dp_peer_info("%pK: PN Check not setup for TID :%d ", soc, i);
5066 		}
5067 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
5068 	}
5069 
5070 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5071 
5072 	return QDF_STATUS_SUCCESS;
5073 }
5074 
5075 
5076 /**
5077  * dp_set_key_sec_type_wifi3() - set security mode of key
5078  * @soc: Datapath soc handle
5079  * @peer_mac: Datapath peer mac address
5080  * @vdev_id: id of atapath vdev
5081  * @vdev: Datapath vdev
5082  * @pdev - data path device instance
5083  * @sec_type - security type
5084  * #is_unicast - key type
5085  *
5086  */
5087 
5088 QDF_STATUS
5089 dp_set_key_sec_type_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id,
5090 			  uint8_t *peer_mac, enum cdp_sec_type sec_type,
5091 			  bool is_unicast)
5092 {
5093 	struct dp_peer *peer =
5094 			dp_peer_get_tgt_peer_hash_find((struct dp_soc *)soc,
5095 						       peer_mac, 0, vdev_id,
5096 						       DP_MOD_ID_CDP);
5097 	int sec_index;
5098 
5099 	if (!peer) {
5100 		dp_peer_debug("%pK: Peer is NULL!\n", soc);
5101 		return QDF_STATUS_E_FAILURE;
5102 	}
5103 
5104 	if (!peer->txrx_peer) {
5105 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5106 		dp_peer_debug("%pK: txrx peer is NULL!\n", soc);
5107 		return QDF_STATUS_E_FAILURE;
5108 	}
5109 
5110 	dp_peer_info("%pK: key sec spec for peer %pK " QDF_MAC_ADDR_FMT ": %s key of type %d",
5111 		     soc, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw),
5112 		     is_unicast ? "ucast" : "mcast", sec_type);
5113 
5114 	sec_index = is_unicast ? dp_sec_ucast : dp_sec_mcast;
5115 	peer->txrx_peer->security[sec_index].sec_type = sec_type;
5116 
5117 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5118 
5119 	return QDF_STATUS_SUCCESS;
5120 }
5121 
5122 void
5123 dp_rx_sec_ind_handler(struct dp_soc *soc, uint16_t peer_id,
5124 		      enum cdp_sec_type sec_type, int is_unicast,
5125 		      u_int32_t *michael_key,
5126 		      u_int32_t *rx_pn)
5127 {
5128 	struct dp_peer *peer;
5129 	struct dp_txrx_peer *txrx_peer;
5130 	int sec_index;
5131 
5132 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_HTT);
5133 	if (!peer) {
5134 		dp_peer_err("Couldn't find peer from ID %d - skipping security inits",
5135 			    peer_id);
5136 		return;
5137 	}
5138 	txrx_peer = dp_get_txrx_peer(peer);
5139 	if (!txrx_peer) {
5140 		dp_peer_err("Couldn't find txrx peer from ID %d - skipping security inits",
5141 			    peer_id);
5142 		return;
5143 	}
5144 
5145 	dp_peer_info("%pK: sec spec for peer %pK " QDF_MAC_ADDR_FMT ": %s key of type %d",
5146 		     soc, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw),
5147 			  is_unicast ? "ucast" : "mcast", sec_type);
5148 	sec_index = is_unicast ? dp_sec_ucast : dp_sec_mcast;
5149 
5150 	peer->txrx_peer->security[sec_index].sec_type = sec_type;
5151 #ifdef notyet /* TODO: See if this is required for defrag support */
5152 	/* michael key only valid for TKIP, but for simplicity,
5153 	 * copy it anyway
5154 	 */
5155 	qdf_mem_copy(
5156 		&peer->txrx_peer->security[sec_index].michael_key[0],
5157 		michael_key,
5158 		sizeof(peer->txrx_peer->security[sec_index].michael_key));
5159 #ifdef BIG_ENDIAN_HOST
5160 	OL_IF_SWAPBO(peer->txrx_peer->security[sec_index].michael_key[0],
5161 		     sizeof(peer->txrx_peer->security[sec_index].michael_key));
5162 #endif /* BIG_ENDIAN_HOST */
5163 #endif
5164 
5165 #ifdef notyet /* TODO: Check if this is required for wifi3.0 */
5166 	if (sec_type != cdp_sec_type_wapi) {
5167 		qdf_mem_zero(peer->tids_last_pn_valid, _EXT_TIDS);
5168 	} else {
5169 		for (i = 0; i < DP_MAX_TIDS; i++) {
5170 			/*
5171 			 * Setting PN valid bit for WAPI sec_type,
5172 			 * since WAPI PN has to be started with predefined value
5173 			 */
5174 			peer->tids_last_pn_valid[i] = 1;
5175 			qdf_mem_copy(
5176 				(u_int8_t *) &peer->tids_last_pn[i],
5177 				(u_int8_t *) rx_pn, sizeof(union htt_rx_pn_t));
5178 			peer->tids_last_pn[i].pn128[1] =
5179 				qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[1]);
5180 			peer->tids_last_pn[i].pn128[0] =
5181 				qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[0]);
5182 		}
5183 	}
5184 #endif
5185 	/* TODO: Update HW TID queue with PN check parameters (pn type for
5186 	 * all security types and last pn for WAPI) once REO command API
5187 	 * is available
5188 	 */
5189 
5190 	dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
5191 }
5192 
5193 #ifdef QCA_PEER_EXT_STATS
5194 /*
5195  * dp_peer_delay_stats_ctx_alloc() - Allocate peer delay
5196  *                                 stats content
5197  * @soc: DP SoC context
5198  * @txrx_peer: DP txrx peer context
5199  *
5200  * Allocate the peer delay stats context
5201  *
5202  * Return: QDF_STATUS_SUCCESS if allocation is
5203  *	   successful
5204  */
5205 QDF_STATUS dp_peer_delay_stats_ctx_alloc(struct dp_soc *soc,
5206 					 struct dp_txrx_peer *txrx_peer)
5207 {
5208 	uint8_t tid, ctx_id;
5209 
5210 	if (!soc || !txrx_peer) {
5211 		dp_warn("Null soc%pK or peer%pK", soc, txrx_peer);
5212 		return QDF_STATUS_E_INVAL;
5213 	}
5214 
5215 	if (!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx))
5216 		return QDF_STATUS_SUCCESS;
5217 
5218 	/*
5219 	 * Allocate memory for peer extended stats.
5220 	 */
5221 	txrx_peer->delay_stats =
5222 			qdf_mem_malloc(sizeof(struct dp_peer_delay_stats));
5223 	if (!txrx_peer->delay_stats) {
5224 		dp_err("Peer extended stats obj alloc failed!!");
5225 		return QDF_STATUS_E_NOMEM;
5226 	}
5227 
5228 	for (tid = 0; tid < CDP_MAX_DATA_TIDS; tid++) {
5229 		for (ctx_id = 0; ctx_id < CDP_MAX_TXRX_CTX; ctx_id++) {
5230 			struct cdp_delay_tx_stats *tx_delay =
5231 			&txrx_peer->delay_stats->delay_tid_stats[tid][ctx_id].tx_delay;
5232 			struct cdp_delay_rx_stats *rx_delay =
5233 			&txrx_peer->delay_stats->delay_tid_stats[tid][ctx_id].rx_delay;
5234 
5235 			dp_hist_init(&tx_delay->tx_swq_delay,
5236 				     CDP_HIST_TYPE_SW_ENQEUE_DELAY);
5237 			dp_hist_init(&tx_delay->hwtx_delay,
5238 				     CDP_HIST_TYPE_HW_COMP_DELAY);
5239 			dp_hist_init(&rx_delay->to_stack_delay,
5240 				     CDP_HIST_TYPE_REAP_STACK);
5241 		}
5242 	}
5243 
5244 	return QDF_STATUS_SUCCESS;
5245 }
5246 
5247 /*
5248  * dp_peer_delay_stats_ctx_dealloc() - Dealloc the peer delay stats context
5249  * @txrx_peer: txrx DP peer context
5250  *
5251  * Free the peer delay stats context
5252  *
5253  * Return: Void
5254  */
5255 void dp_peer_delay_stats_ctx_dealloc(struct dp_soc *soc,
5256 				     struct dp_txrx_peer *txrx_peer)
5257 {
5258 	if (!txrx_peer) {
5259 		dp_warn("peer_ext dealloc failed due to NULL peer object");
5260 		return;
5261 	}
5262 
5263 	if (!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx))
5264 		return;
5265 
5266 	if (!txrx_peer->delay_stats)
5267 		return;
5268 
5269 	qdf_mem_free(txrx_peer->delay_stats);
5270 	txrx_peer->delay_stats = NULL;
5271 }
5272 
5273 /**
5274  * dp_peer_delay_stats_ctx_clr() - Clear delay stats context of peer
5275  *
5276  * @txrx_peer: dp_txrx_peer handle
5277  *
5278  * Return: void
5279  */
5280 void dp_peer_delay_stats_ctx_clr(struct dp_txrx_peer *txrx_peer)
5281 {
5282 	if (txrx_peer->delay_stats)
5283 		qdf_mem_zero(txrx_peer->delay_stats,
5284 			     sizeof(struct dp_peer_delay_stats));
5285 }
5286 #endif
5287 
5288 #ifdef WLAN_PEER_JITTER
5289 /**
5290  * dp_peer_jitter_stats_ctx_alloc() - Allocate jitter stats context for peer
5291  *
5292  * @soc: Datapath pdev handle
5293  * @txrx_peer: dp_txrx_peer handle
5294  *
5295  * Return: QDF_STATUS
5296  */
5297 QDF_STATUS dp_peer_jitter_stats_ctx_alloc(struct dp_pdev *pdev,
5298 					  struct dp_txrx_peer *txrx_peer)
5299 {
5300 	if (!pdev || !txrx_peer) {
5301 		dp_warn("Null pdev or peer");
5302 		return QDF_STATUS_E_INVAL;
5303 	}
5304 
5305 	if (!wlan_cfg_is_peer_jitter_stats_enabled(pdev->soc->wlan_cfg_ctx))
5306 		return QDF_STATUS_SUCCESS;
5307 
5308 	if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
5309 		/*
5310 		 * Allocate memory on per tid basis when nss is enabled
5311 		 */
5312 		txrx_peer->jitter_stats =
5313 			qdf_mem_malloc(sizeof(struct cdp_peer_tid_stats)
5314 					* DP_MAX_TIDS);
5315 	} else {
5316 		/*
5317 		 * Allocate memory on per tid per ring basis
5318 		 */
5319 		txrx_peer->jitter_stats =
5320 			qdf_mem_malloc(sizeof(struct cdp_peer_tid_stats)
5321 					* DP_MAX_TIDS * CDP_MAX_TXRX_CTX);
5322 	}
5323 
5324 	if (!txrx_peer->jitter_stats) {
5325 		dp_warn("Jitter stats obj alloc failed!!");
5326 		return QDF_STATUS_E_NOMEM;
5327 	}
5328 
5329 	return QDF_STATUS_SUCCESS;
5330 }
5331 
5332 /**
5333  * dp_peer_jitter_stats_ctx_dealloc() - Deallocate jitter stats context
5334  *
5335  * @pdev: Datapath pdev handle
5336  * @txrx_peer: dp_txrx_peer handle
5337  *
5338  * Return: void
5339  */
5340 void dp_peer_jitter_stats_ctx_dealloc(struct dp_pdev *pdev,
5341 				      struct dp_txrx_peer *txrx_peer)
5342 {
5343 	if (!pdev || !txrx_peer) {
5344 		dp_warn("Null pdev or peer");
5345 		return;
5346 	}
5347 
5348 	if (!wlan_cfg_is_peer_jitter_stats_enabled(pdev->soc->wlan_cfg_ctx))
5349 		return;
5350 
5351 	if (txrx_peer->jitter_stats) {
5352 		qdf_mem_free(txrx_peer->jitter_stats);
5353 		txrx_peer->jitter_stats = NULL;
5354 	}
5355 }
5356 
5357 /**
5358  * dp_peer_jitter_stats_ctx_clr() - Clear jitter stats context of peer
5359  *
5360  * @txrx_peer: dp_txrx_peer handle
5361  *
5362  * Return: void
5363  */
5364 void dp_peer_jitter_stats_ctx_clr(struct dp_txrx_peer *txrx_peer)
5365 {
5366 	struct cdp_peer_tid_stats *jitter_stats = NULL;
5367 
5368 	if (!txrx_peer) {
5369 		dp_warn("Null peer");
5370 		return;
5371 	}
5372 
5373 	if (!wlan_cfg_is_peer_jitter_stats_enabled(txrx_peer->
5374 						   vdev->
5375 						   pdev->soc->wlan_cfg_ctx))
5376 		return;
5377 
5378 	jitter_stats = txrx_peer->jitter_stats;
5379 	if (!jitter_stats)
5380 		return;
5381 
5382 	if (wlan_cfg_get_dp_pdev_nss_enabled(txrx_peer->
5383 					     vdev->pdev->wlan_cfg_ctx))
5384 		qdf_mem_zero(jitter_stats,
5385 			     sizeof(struct cdp_peer_tid_stats) *
5386 			     DP_MAX_TIDS);
5387 
5388 	else
5389 		qdf_mem_zero(jitter_stats,
5390 			     sizeof(struct cdp_peer_tid_stats) *
5391 			     DP_MAX_TIDS * CDP_MAX_TXRX_CTX);
5392 
5393 }
5394 #endif
5395 
5396 QDF_STATUS
5397 dp_rx_delba_ind_handler(void *soc_handle, uint16_t peer_id,
5398 			uint8_t tid, uint16_t win_sz)
5399 {
5400 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
5401 	struct dp_peer *peer;
5402 	struct dp_rx_tid *rx_tid;
5403 	QDF_STATUS status = QDF_STATUS_SUCCESS;
5404 
5405 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_HTT);
5406 
5407 	if (!peer) {
5408 		dp_peer_err("%pK: Couldn't find peer from ID %d",
5409 			    soc, peer_id);
5410 		return QDF_STATUS_E_FAILURE;
5411 	}
5412 
5413 	qdf_assert_always(tid < DP_MAX_TIDS);
5414 
5415 	rx_tid = &peer->rx_tid[tid];
5416 
5417 	if (rx_tid->hw_qdesc_vaddr_unaligned) {
5418 		if (!rx_tid->delba_tx_status) {
5419 			dp_peer_info("%pK: PEER_ID: %d TID: %d, BA win: %d ",
5420 				     soc, peer_id, tid, win_sz);
5421 
5422 			qdf_spin_lock_bh(&rx_tid->tid_lock);
5423 
5424 			rx_tid->delba_tx_status = 1;
5425 
5426 			rx_tid->rx_ba_win_size_override =
5427 			    qdf_min((uint16_t)63, win_sz);
5428 
5429 			rx_tid->delba_rcode =
5430 			    IEEE80211_REASON_QOS_SETUP_REQUIRED;
5431 
5432 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
5433 
5434 			if (soc->cdp_soc.ol_ops->send_delba)
5435 				soc->cdp_soc.ol_ops->send_delba(
5436 					peer->vdev->pdev->soc->ctrl_psoc,
5437 					peer->vdev->vdev_id,
5438 					peer->mac_addr.raw,
5439 					tid,
5440 					rx_tid->delba_rcode,
5441 					CDP_DELBA_REASON_NONE);
5442 		}
5443 	} else {
5444 		dp_peer_err("%pK: BA session is not setup for TID:%d ", soc, tid);
5445 		status = QDF_STATUS_E_FAILURE;
5446 	}
5447 
5448 	dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
5449 
5450 	return status;
5451 }
5452 
5453 #ifdef DP_PEER_EXTENDED_API
5454 /**
5455  * dp_peer_set_bw() - Set bandwidth and mpdu retry count threshold for peer
5456  * @soc: DP soc handle
5457  * @txrx_peer: Core txrx_peer handle
5458  * @set_bw: enum of bandwidth to be set for this peer connection
5459  *
5460  * Return: None
5461  */
5462 static void dp_peer_set_bw(struct dp_soc *soc, struct dp_txrx_peer *txrx_peer,
5463 			   enum cdp_peer_bw set_bw)
5464 {
5465 	if (!txrx_peer)
5466 		return;
5467 
5468 	txrx_peer->bw = set_bw;
5469 
5470 	switch (set_bw) {
5471 	case CDP_160_MHZ:
5472 	case CDP_320_MHZ:
5473 		txrx_peer->mpdu_retry_threshold =
5474 				soc->wlan_cfg_ctx->mpdu_retry_threshold_2;
5475 		break;
5476 	case CDP_20_MHZ:
5477 	case CDP_40_MHZ:
5478 	case CDP_80_MHZ:
5479 	default:
5480 		txrx_peer->mpdu_retry_threshold =
5481 				soc->wlan_cfg_ctx->mpdu_retry_threshold_1;
5482 		break;
5483 	}
5484 
5485 	dp_info("Peer id: %u: BW: %u, mpdu retry threshold: %u",
5486 		txrx_peer->peer_id, txrx_peer->bw,
5487 		txrx_peer->mpdu_retry_threshold);
5488 }
5489 
5490 #ifdef WLAN_FEATURE_11BE_MLO
5491 QDF_STATUS dp_register_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
5492 			    struct ol_txrx_desc_type *sta_desc)
5493 {
5494 	struct dp_peer *peer;
5495 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5496 
5497 	peer = dp_peer_find_hash_find(soc, sta_desc->peer_addr.bytes,
5498 				      0, DP_VDEV_ALL, DP_MOD_ID_CDP);
5499 
5500 	if (!peer)
5501 		return QDF_STATUS_E_FAULT;
5502 
5503 	qdf_spin_lock_bh(&peer->peer_info_lock);
5504 	peer->state = OL_TXRX_PEER_STATE_CONN;
5505 	qdf_spin_unlock_bh(&peer->peer_info_lock);
5506 
5507 	dp_peer_set_bw(soc, peer->txrx_peer, sta_desc->bw);
5508 
5509 	dp_rx_flush_rx_cached(peer, false);
5510 
5511 	if (IS_MLO_DP_LINK_PEER(peer) && peer->first_link) {
5512 		dp_peer_info("register for mld peer" QDF_MAC_ADDR_FMT,
5513 			     QDF_MAC_ADDR_REF(peer->mld_peer->mac_addr.raw));
5514 		qdf_spin_lock_bh(&peer->mld_peer->peer_info_lock);
5515 		peer->mld_peer->state = peer->state;
5516 		qdf_spin_unlock_bh(&peer->mld_peer->peer_info_lock);
5517 		dp_rx_flush_rx_cached(peer->mld_peer, false);
5518 	}
5519 
5520 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5521 
5522 	return QDF_STATUS_SUCCESS;
5523 }
5524 
5525 QDF_STATUS dp_peer_state_update(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
5526 				enum ol_txrx_peer_state state)
5527 {
5528 	struct dp_peer *peer;
5529 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5530 
5531 	peer =  dp_peer_find_hash_find(soc, peer_mac, 0, DP_VDEV_ALL,
5532 				       DP_MOD_ID_CDP);
5533 	if (!peer) {
5534 		dp_peer_err("%pK: Failed to find peer[" QDF_MAC_ADDR_FMT "]",
5535 			    soc, QDF_MAC_ADDR_REF(peer_mac));
5536 		return QDF_STATUS_E_FAILURE;
5537 	}
5538 	peer->state = state;
5539 	peer->authorize = (state == OL_TXRX_PEER_STATE_AUTH) ? 1 : 0;
5540 
5541 	if (peer->txrx_peer)
5542 		peer->txrx_peer->authorize = peer->authorize;
5543 
5544 	dp_peer_info("peer" QDF_MAC_ADDR_FMT "state %d",
5545 		     QDF_MAC_ADDR_REF(peer->mac_addr.raw),
5546 		     peer->state);
5547 
5548 	if (IS_MLO_DP_LINK_PEER(peer) && peer->first_link) {
5549 		peer->mld_peer->state = peer->state;
5550 		peer->mld_peer->txrx_peer->authorize = peer->authorize;
5551 		dp_peer_info("mld peer" QDF_MAC_ADDR_FMT "state %d",
5552 			     QDF_MAC_ADDR_REF(peer->mld_peer->mac_addr.raw),
5553 			     peer->mld_peer->state);
5554 	}
5555 
5556 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
5557 	 * Decrement it here.
5558 	 */
5559 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5560 
5561 	return QDF_STATUS_SUCCESS;
5562 }
5563 #else
5564 QDF_STATUS dp_register_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
5565 			    struct ol_txrx_desc_type *sta_desc)
5566 {
5567 	struct dp_peer *peer;
5568 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5569 
5570 	peer = dp_peer_find_hash_find(soc, sta_desc->peer_addr.bytes,
5571 				      0, DP_VDEV_ALL, DP_MOD_ID_CDP);
5572 
5573 	if (!peer)
5574 		return QDF_STATUS_E_FAULT;
5575 
5576 	qdf_spin_lock_bh(&peer->peer_info_lock);
5577 	peer->state = OL_TXRX_PEER_STATE_CONN;
5578 	qdf_spin_unlock_bh(&peer->peer_info_lock);
5579 
5580 	dp_peer_set_bw(soc, peer->txrx_peer, sta_desc->bw);
5581 
5582 	dp_rx_flush_rx_cached(peer, false);
5583 
5584 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5585 
5586 	return QDF_STATUS_SUCCESS;
5587 }
5588 
5589 QDF_STATUS dp_peer_state_update(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
5590 				enum ol_txrx_peer_state state)
5591 {
5592 	struct dp_peer *peer;
5593 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5594 
5595 	peer =  dp_peer_find_hash_find(soc, peer_mac, 0, DP_VDEV_ALL,
5596 				       DP_MOD_ID_CDP);
5597 	if (!peer) {
5598 		dp_peer_err("%pK: Failed to find peer for: [" QDF_MAC_ADDR_FMT "]",
5599 			    soc, QDF_MAC_ADDR_REF(peer_mac));
5600 		return QDF_STATUS_E_FAILURE;
5601 	}
5602 	peer->state = state;
5603 	peer->authorize = (state == OL_TXRX_PEER_STATE_AUTH) ? 1 : 0;
5604 
5605 	if (peer->txrx_peer)
5606 		peer->txrx_peer->authorize = peer->authorize;
5607 
5608 	dp_info("peer %pK state %d", peer, peer->state);
5609 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
5610 	 * Decrement it here.
5611 	 */
5612 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5613 
5614 	return QDF_STATUS_SUCCESS;
5615 }
5616 #endif
5617 
5618 QDF_STATUS
5619 dp_clear_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
5620 	      struct qdf_mac_addr peer_addr)
5621 {
5622 	struct dp_peer *peer;
5623 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5624 
5625 	peer = dp_peer_find_hash_find(soc, peer_addr.bytes,
5626 				      0, DP_VDEV_ALL, DP_MOD_ID_CDP);
5627 	if (!peer || !peer->valid)
5628 		return QDF_STATUS_E_FAULT;
5629 
5630 	dp_clear_peer_internal(soc, peer);
5631 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5632 	return QDF_STATUS_SUCCESS;
5633 }
5634 
5635 QDF_STATUS dp_get_vdevid(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
5636 			 uint8_t *vdev_id)
5637 {
5638 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5639 	struct dp_peer *peer =
5640 		dp_peer_find_hash_find(soc, peer_mac, 0, DP_VDEV_ALL,
5641 				       DP_MOD_ID_CDP);
5642 
5643 	if (!peer)
5644 		return QDF_STATUS_E_FAILURE;
5645 
5646 	dp_info("peer %pK vdev %pK vdev id %d",
5647 		peer, peer->vdev, peer->vdev->vdev_id);
5648 	*vdev_id = peer->vdev->vdev_id;
5649 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
5650 	 * Decrement it here.
5651 	 */
5652 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5653 
5654 	return QDF_STATUS_SUCCESS;
5655 }
5656 
5657 struct cdp_vdev *
5658 dp_get_vdev_by_peer_addr(struct cdp_pdev *pdev_handle,
5659 			 struct qdf_mac_addr peer_addr)
5660 {
5661 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5662 	struct dp_peer *peer = NULL;
5663 	struct cdp_vdev *vdev = NULL;
5664 
5665 	if (!pdev) {
5666 		dp_peer_info("PDEV not found for peer_addr: " QDF_MAC_ADDR_FMT,
5667 			     QDF_MAC_ADDR_REF(peer_addr.bytes));
5668 		return NULL;
5669 	}
5670 
5671 	peer = dp_peer_find_hash_find(pdev->soc, peer_addr.bytes, 0,
5672 				      DP_VDEV_ALL, DP_MOD_ID_CDP);
5673 	if (!peer) {
5674 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
5675 			  "PDEV not found for peer_addr: "QDF_MAC_ADDR_FMT,
5676 			  QDF_MAC_ADDR_REF(peer_addr.bytes));
5677 		return NULL;
5678 	}
5679 
5680 	vdev = (struct cdp_vdev *)peer->vdev;
5681 
5682 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5683 	return vdev;
5684 }
5685 
5686 /**
5687  * dp_get_vdev_for_peer() - Get virtual interface instance which peer belongs
5688  * @peer - peer instance
5689  *
5690  * Get virtual interface instance which peer belongs
5691  *
5692  * Return: virtual interface instance pointer
5693  *         NULL in case cannot find
5694  */
5695 struct cdp_vdev *dp_get_vdev_for_peer(void *peer_handle)
5696 {
5697 	struct dp_peer *peer = peer_handle;
5698 
5699 	DP_TRACE(DEBUG, "peer %pK vdev %pK", peer, peer->vdev);
5700 	return (struct cdp_vdev *)peer->vdev;
5701 }
5702 
5703 /**
5704  * dp_peer_get_peer_mac_addr() - Get peer mac address
5705  * @peer - peer instance
5706  *
5707  * Get peer mac address
5708  *
5709  * Return: peer mac address pointer
5710  *         NULL in case cannot find
5711  */
5712 uint8_t *dp_peer_get_peer_mac_addr(void *peer_handle)
5713 {
5714 	struct dp_peer *peer = peer_handle;
5715 	uint8_t *mac;
5716 
5717 	mac = peer->mac_addr.raw;
5718 	dp_info("peer %pK mac 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
5719 		peer, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
5720 	return peer->mac_addr.raw;
5721 }
5722 
5723 int dp_get_peer_state(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
5724 		      uint8_t *peer_mac)
5725 {
5726 	enum ol_txrx_peer_state peer_state;
5727 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5728 	struct cdp_peer_info peer_info = { 0 };
5729 	struct dp_peer *peer;
5730 	struct dp_peer *tgt_peer;
5731 
5732 	DP_PEER_INFO_PARAMS_INIT(&peer_info, vdev_id, peer_mac,
5733 				 false, CDP_WILD_PEER_TYPE);
5734 
5735 	peer =  dp_peer_hash_find_wrapper(soc, &peer_info, DP_MOD_ID_CDP);
5736 
5737 	if (!peer)
5738 		return OL_TXRX_PEER_STATE_INVALID;
5739 
5740 	DP_TRACE(DEBUG, "peer %pK stats %d", peer, peer->state);
5741 
5742 	tgt_peer = dp_get_tgt_peer_from_peer(peer);
5743 	peer_state = tgt_peer->state;
5744 
5745 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5746 
5747 	return peer_state;
5748 }
5749 
5750 /**
5751  * dp_local_peer_id_pool_init() - local peer id pool alloc for physical device
5752  * @pdev - data path device instance
5753  *
5754  * local peer id pool alloc for physical device
5755  *
5756  * Return: none
5757  */
5758 void dp_local_peer_id_pool_init(struct dp_pdev *pdev)
5759 {
5760 	int i;
5761 
5762 	/* point the freelist to the first ID */
5763 	pdev->local_peer_ids.freelist = 0;
5764 
5765 	/* link each ID to the next one */
5766 	for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) {
5767 		pdev->local_peer_ids.pool[i] = i + 1;
5768 		pdev->local_peer_ids.map[i] = NULL;
5769 	}
5770 
5771 	/* link the last ID to itself, to mark the end of the list */
5772 	i = OL_TXRX_NUM_LOCAL_PEER_IDS;
5773 	pdev->local_peer_ids.pool[i] = i;
5774 
5775 	qdf_spinlock_create(&pdev->local_peer_ids.lock);
5776 	DP_TRACE(INFO, "Peer pool init");
5777 }
5778 
5779 /**
5780  * dp_local_peer_id_alloc() - allocate local peer id
5781  * @pdev - data path device instance
5782  * @peer - new peer instance
5783  *
5784  * allocate local peer id
5785  *
5786  * Return: none
5787  */
5788 void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer)
5789 {
5790 	int i;
5791 
5792 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
5793 	i = pdev->local_peer_ids.freelist;
5794 	if (pdev->local_peer_ids.pool[i] == i) {
5795 		/* the list is empty, except for the list-end marker */
5796 		peer->local_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
5797 	} else {
5798 		/* take the head ID and advance the freelist */
5799 		peer->local_id = i;
5800 		pdev->local_peer_ids.freelist = pdev->local_peer_ids.pool[i];
5801 		pdev->local_peer_ids.map[i] = peer;
5802 	}
5803 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
5804 	dp_info("peer %pK, local id %d", peer, peer->local_id);
5805 }
5806 
5807 /**
5808  * dp_local_peer_id_free() - remove local peer id
5809  * @pdev - data path device instance
5810  * @peer - peer instance should be removed
5811  *
5812  * remove local peer id
5813  *
5814  * Return: none
5815  */
5816 void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer)
5817 {
5818 	int i = peer->local_id;
5819 	if ((i == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
5820 	    (i >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
5821 		return;
5822 	}
5823 
5824 	/* put this ID on the head of the freelist */
5825 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
5826 	pdev->local_peer_ids.pool[i] = pdev->local_peer_ids.freelist;
5827 	pdev->local_peer_ids.freelist = i;
5828 	pdev->local_peer_ids.map[i] = NULL;
5829 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
5830 }
5831 
5832 bool dp_find_peer_exist_on_vdev(struct cdp_soc_t *soc_hdl,
5833 				uint8_t vdev_id, uint8_t *peer_addr)
5834 {
5835 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5836 	struct dp_peer *peer = NULL;
5837 
5838 	peer = dp_peer_find_hash_find(soc, peer_addr, 0, vdev_id,
5839 				      DP_MOD_ID_CDP);
5840 	if (!peer)
5841 		return false;
5842 
5843 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5844 
5845 	return true;
5846 }
5847 
5848 bool dp_find_peer_exist_on_other_vdev(struct cdp_soc_t *soc_hdl,
5849 				      uint8_t vdev_id, uint8_t *peer_addr,
5850 				      uint16_t max_bssid)
5851 {
5852 	int i;
5853 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5854 	struct dp_peer *peer = NULL;
5855 
5856 	for (i = 0; i < max_bssid; i++) {
5857 		/* Need to check vdevs other than the vdev_id */
5858 		if (vdev_id == i)
5859 			continue;
5860 		peer = dp_peer_find_hash_find(soc, peer_addr, 0, i,
5861 					      DP_MOD_ID_CDP);
5862 		if (peer) {
5863 			dp_err("Duplicate peer "QDF_MAC_ADDR_FMT" already exist on vdev %d",
5864 			       QDF_MAC_ADDR_REF(peer_addr), i);
5865 			dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5866 			return true;
5867 		}
5868 	}
5869 
5870 	return false;
5871 }
5872 
5873 void dp_set_peer_as_tdls_peer(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
5874 			      uint8_t *peer_mac, bool val)
5875 {
5876 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5877 	struct dp_peer *peer = NULL;
5878 
5879 	peer = dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id,
5880 				      DP_MOD_ID_CDP);
5881 	if (!peer) {
5882 		dp_err("Failed to find peer for:" QDF_MAC_ADDR_FMT,
5883 		       QDF_MAC_ADDR_REF(peer_mac));
5884 		return;
5885 	}
5886 
5887 	dp_info("Set tdls flag %d for peer:" QDF_MAC_ADDR_FMT,
5888 		val, QDF_MAC_ADDR_REF(peer_mac));
5889 	peer->is_tdls_peer = val;
5890 
5891 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5892 }
5893 #endif
5894 
5895 bool dp_find_peer_exist(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
5896 			uint8_t *peer_addr)
5897 {
5898 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5899 	struct dp_peer *peer = NULL;
5900 
5901 	peer = dp_peer_find_hash_find(soc, peer_addr, 0, DP_VDEV_ALL,
5902 				      DP_MOD_ID_CDP);
5903 	if (peer) {
5904 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5905 		return true;
5906 	}
5907 
5908 	return false;
5909 }
5910 
5911 #ifdef IPA_OFFLOAD
5912 int dp_peer_get_rxtid_stats_ipa(struct dp_peer *peer,
5913 				dp_rxtid_stats_cmd_cb dp_stats_cmd_cb)
5914 {
5915 	struct dp_soc *soc = peer->vdev->pdev->soc;
5916 	struct hal_reo_cmd_params params;
5917 	int i;
5918 	int stats_cmd_sent_cnt = 0;
5919 	QDF_STATUS status;
5920 	uint16_t peer_id = peer->peer_id;
5921 	unsigned long comb_peer_id_tid;
5922 	struct dp_rx_tid *rx_tid;
5923 
5924 	if (!dp_stats_cmd_cb)
5925 		return stats_cmd_sent_cnt;
5926 
5927 	qdf_mem_zero(&params, sizeof(params));
5928 	for (i = 0; i < DP_MAX_TIDS; i++) {
5929 		if ((i >= CDP_DATA_TID_MAX) && (i != CDP_DATA_NON_QOS_TID))
5930 			continue;
5931 
5932 		rx_tid = &peer->rx_tid[i];
5933 		if (rx_tid->hw_qdesc_vaddr_unaligned) {
5934 			params.std.need_status = 1;
5935 			params.std.addr_lo =
5936 				rx_tid->hw_qdesc_paddr & 0xffffffff;
5937 			params.std.addr_hi =
5938 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
5939 			params.u.stats_params.clear = 1;
5940 			comb_peer_id_tid = ((i << DP_PEER_REO_STATS_TID_SHIFT)
5941 					    | peer_id);
5942 			status = dp_reo_send_cmd(soc, CMD_GET_QUEUE_STATS,
5943 						 &params, dp_stats_cmd_cb,
5944 						 (void *)comb_peer_id_tid);
5945 			if (QDF_IS_STATUS_SUCCESS(status))
5946 				stats_cmd_sent_cnt++;
5947 
5948 			/* Flush REO descriptor from HW cache to update stats
5949 			 * in descriptor memory. This is to help debugging
5950 			 */
5951 			qdf_mem_zero(&params, sizeof(params));
5952 			params.std.need_status = 0;
5953 			params.std.addr_lo =
5954 				rx_tid->hw_qdesc_paddr & 0xffffffff;
5955 			params.std.addr_hi =
5956 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
5957 			params.u.fl_cache_params.flush_no_inval = 1;
5958 			dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, &params, NULL,
5959 					NULL);
5960 		}
5961 	}
5962 
5963 	return stats_cmd_sent_cnt;
5964 }
5965 
5966 qdf_export_symbol(dp_peer_get_rxtid_stats_ipa);
5967 
5968 #endif
5969 /**
5970  * dp_peer_rxtid_stats: Retried Rx TID (REO queue) stats from HW
5971  * @peer: DP peer handle
5972  * @dp_stats_cmd_cb: REO command callback function
5973  * @cb_ctxt: Callback context
5974  *
5975  * Return: count of tid stats cmd send succeeded
5976  */
5977 int dp_peer_rxtid_stats(struct dp_peer *peer,
5978 			dp_rxtid_stats_cmd_cb dp_stats_cmd_cb,
5979 			void *cb_ctxt)
5980 {
5981 	struct dp_soc *soc = peer->vdev->pdev->soc;
5982 	struct hal_reo_cmd_params params;
5983 	int i;
5984 	int stats_cmd_sent_cnt = 0;
5985 	QDF_STATUS status;
5986 	struct dp_rx_tid *rx_tid;
5987 
5988 	if (!dp_stats_cmd_cb)
5989 		return stats_cmd_sent_cnt;
5990 
5991 	qdf_mem_zero(&params, sizeof(params));
5992 	for (i = 0; i < DP_MAX_TIDS; i++) {
5993 		if ((i >= CDP_DATA_TID_MAX) && (i != CDP_DATA_NON_QOS_TID))
5994 			continue;
5995 
5996 		rx_tid = &peer->rx_tid[i];
5997 		if (rx_tid->hw_qdesc_vaddr_unaligned) {
5998 			params.std.need_status = 1;
5999 			params.std.addr_lo =
6000 				rx_tid->hw_qdesc_paddr & 0xffffffff;
6001 			params.std.addr_hi =
6002 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
6003 
6004 			if (cb_ctxt) {
6005 				status = dp_reo_send_cmd(
6006 						soc, CMD_GET_QUEUE_STATS,
6007 						&params, dp_stats_cmd_cb,
6008 						cb_ctxt);
6009 			} else {
6010 				status = dp_reo_send_cmd(
6011 						soc, CMD_GET_QUEUE_STATS,
6012 						&params, dp_stats_cmd_cb,
6013 						rx_tid);
6014 			}
6015 
6016 			if (QDF_IS_STATUS_SUCCESS(status))
6017 				stats_cmd_sent_cnt++;
6018 
6019 
6020 			/* Flush REO descriptor from HW cache to update stats
6021 			 * in descriptor memory. This is to help debugging
6022 			 */
6023 			qdf_mem_zero(&params, sizeof(params));
6024 			params.std.need_status = 0;
6025 			params.std.addr_lo =
6026 				rx_tid->hw_qdesc_paddr & 0xffffffff;
6027 			params.std.addr_hi =
6028 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
6029 			params.u.fl_cache_params.flush_no_inval = 1;
6030 			dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, &params, NULL,
6031 					NULL);
6032 		}
6033 	}
6034 
6035 	return stats_cmd_sent_cnt;
6036 }
6037 
6038 QDF_STATUS
6039 dp_set_michael_key(struct cdp_soc_t *soc,
6040 		   uint8_t vdev_id,
6041 		   uint8_t *peer_mac,
6042 		   bool is_unicast, uint32_t *key)
6043 {
6044 	uint8_t sec_index = is_unicast ? 1 : 0;
6045 	struct dp_peer *peer =
6046 			dp_peer_get_tgt_peer_hash_find((struct dp_soc *)soc,
6047 						       peer_mac, 0, vdev_id,
6048 						       DP_MOD_ID_CDP);
6049 
6050 	if (!peer) {
6051 		dp_peer_err("%pK: peer not found ", soc);
6052 		return QDF_STATUS_E_FAILURE;
6053 	}
6054 
6055 	qdf_mem_copy(&peer->txrx_peer->security[sec_index].michael_key[0],
6056 		     key, IEEE80211_WEP_MICLEN);
6057 
6058 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
6059 
6060 	return QDF_STATUS_SUCCESS;
6061 }
6062 
6063 
6064 /**
6065  * dp_vdev_bss_peer_ref_n_get: Get bss peer of a vdev
6066  * @soc: DP soc
6067  * @vdev: vdev
6068  * @mod_id: id of module requesting reference
6069  *
6070  * Return: VDEV BSS peer
6071  */
6072 struct dp_peer *dp_vdev_bss_peer_ref_n_get(struct dp_soc *soc,
6073 					   struct dp_vdev *vdev,
6074 					   enum dp_mod_id mod_id)
6075 {
6076 	struct dp_peer *peer = NULL;
6077 
6078 	qdf_spin_lock_bh(&vdev->peer_list_lock);
6079 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
6080 		if (peer->bss_peer)
6081 			break;
6082 	}
6083 
6084 	if (!peer) {
6085 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
6086 		return NULL;
6087 	}
6088 
6089 	if (dp_peer_get_ref(soc, peer, mod_id) == QDF_STATUS_SUCCESS) {
6090 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
6091 		return peer;
6092 	}
6093 
6094 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
6095 	return peer;
6096 }
6097 
6098 /**
6099  * dp_sta_vdev_self_peer_ref_n_get: Get self peer of sta vdev
6100  * @soc: DP soc
6101  * @vdev: vdev
6102  * @mod_id: id of module requesting reference
6103  *
6104  * Return: VDEV self peer
6105  */
6106 struct dp_peer *dp_sta_vdev_self_peer_ref_n_get(struct dp_soc *soc,
6107 						struct dp_vdev *vdev,
6108 						enum dp_mod_id mod_id)
6109 {
6110 	struct dp_peer *peer;
6111 
6112 	if (vdev->opmode != wlan_op_mode_sta)
6113 		return NULL;
6114 
6115 	qdf_spin_lock_bh(&vdev->peer_list_lock);
6116 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
6117 		if (peer->sta_self_peer)
6118 			break;
6119 	}
6120 
6121 	if (!peer) {
6122 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
6123 		return NULL;
6124 	}
6125 
6126 	if (dp_peer_get_ref(soc, peer, mod_id) == QDF_STATUS_SUCCESS) {
6127 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
6128 		return peer;
6129 	}
6130 
6131 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
6132 	return peer;
6133 }
6134 
6135 #ifdef DUMP_REO_QUEUE_INFO_IN_DDR
6136 void dp_dump_rx_reo_queue_info(
6137 	struct dp_soc *soc, void *cb_ctxt, union hal_reo_status *reo_status)
6138 {
6139 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
6140 
6141 	if (!rx_tid)
6142 		return;
6143 
6144 	if (reo_status->fl_cache_status.header.status !=
6145 		HAL_REO_CMD_SUCCESS) {
6146 		dp_err_rl("Rx tid REO HW desc flush failed(%d)",
6147 			  reo_status->rx_queue_status.header.status);
6148 		return;
6149 	}
6150 	qdf_spin_lock_bh(&rx_tid->tid_lock);
6151 	hal_dump_rx_reo_queue_desc(rx_tid->hw_qdesc_vaddr_aligned);
6152 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
6153 }
6154 
6155 void dp_send_cache_flush_for_rx_tid(
6156 	struct dp_soc *soc, struct dp_peer *peer)
6157 {
6158 	int i;
6159 	struct dp_rx_tid *rx_tid;
6160 	struct hal_reo_cmd_params params;
6161 
6162 	if (!peer) {
6163 		dp_err_rl("Peer is NULL");
6164 		return;
6165 	}
6166 
6167 	for (i = 0; i < DP_MAX_TIDS; i++) {
6168 		rx_tid = &peer->rx_tid[i];
6169 		if (!rx_tid)
6170 			continue;
6171 		qdf_spin_lock_bh(&rx_tid->tid_lock);
6172 		if (rx_tid->hw_qdesc_vaddr_aligned) {
6173 			qdf_mem_zero(&params, sizeof(params));
6174 			params.std.need_status = 1;
6175 			params.std.addr_lo =
6176 				rx_tid->hw_qdesc_paddr & 0xffffffff;
6177 			params.std.addr_hi =
6178 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
6179 			params.u.fl_cache_params.flush_no_inval = 0;
6180 			if (QDF_STATUS_SUCCESS !=
6181 				dp_reo_send_cmd(
6182 					soc, CMD_FLUSH_CACHE,
6183 					&params, dp_dump_rx_reo_queue_info,
6184 					(void *)rx_tid)) {
6185 				dp_err_rl("cache flush send failed tid %d",
6186 					  rx_tid->tid);
6187 				qdf_spin_unlock_bh(&rx_tid->tid_lock);
6188 				break;
6189 			}
6190 		}
6191 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
6192 	}
6193 }
6194 
6195 void dp_get_rx_reo_queue_info(
6196 	struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
6197 {
6198 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
6199 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
6200 						     DP_MOD_ID_GENERIC_STATS);
6201 	struct dp_peer *peer = NULL;
6202 
6203 	if (!vdev) {
6204 		dp_err_rl("vdev is null for vdev_id: %u", vdev_id);
6205 		goto failed;
6206 	}
6207 
6208 	peer = dp_vdev_bss_peer_ref_n_get(soc, vdev, DP_MOD_ID_GENERIC_STATS);
6209 
6210 	if (!peer) {
6211 		dp_err_rl("Peer is NULL");
6212 		goto failed;
6213 	}
6214 	dp_send_cache_flush_for_rx_tid(soc, peer);
6215 failed:
6216 	if (peer)
6217 		dp_peer_unref_delete(peer, DP_MOD_ID_GENERIC_STATS);
6218 	if (vdev)
6219 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_GENERIC_STATS);
6220 }
6221 #endif /* DUMP_REO_QUEUE_INFO_IN_DDR */
6222 
6223 void dp_peer_flush_frags(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
6224 			 uint8_t *peer_mac)
6225 {
6226 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
6227 	struct dp_peer *peer = dp_peer_get_tgt_peer_hash_find(soc, peer_mac, 0,
6228 							      vdev_id,
6229 							      DP_MOD_ID_CDP);
6230 	struct dp_txrx_peer *txrx_peer;
6231 	uint8_t tid;
6232 	struct dp_rx_tid_defrag *defrag_rx_tid;
6233 
6234 	if (!peer)
6235 		return;
6236 
6237 	if (!peer->txrx_peer)
6238 		goto fail;
6239 
6240 	dp_info("Flushing fragments for peer " QDF_MAC_ADDR_FMT,
6241 		QDF_MAC_ADDR_REF(peer->mac_addr.raw));
6242 
6243 	txrx_peer = peer->txrx_peer;
6244 
6245 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
6246 		defrag_rx_tid = &txrx_peer->rx_tid[tid];
6247 
6248 		qdf_spin_lock_bh(&defrag_rx_tid->defrag_tid_lock);
6249 		dp_rx_defrag_waitlist_remove(txrx_peer, tid);
6250 		dp_rx_reorder_flush_frag(txrx_peer, tid);
6251 		qdf_spin_unlock_bh(&defrag_rx_tid->defrag_tid_lock);
6252 	}
6253 fail:
6254 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
6255 }
6256 
6257 /*
6258  * dp_peer_find_by_id_valid - check if peer exists for given id
6259  * @soc: core DP soc context
6260  * @peer_id: peer id from peer object can be retrieved
6261  *
6262  * Return: true if peer exists of false otherwise
6263  */
6264 bool dp_peer_find_by_id_valid(struct dp_soc *soc, uint16_t peer_id)
6265 {
6266 	struct dp_peer *peer = dp_peer_get_ref_by_id(soc, peer_id,
6267 						     DP_MOD_ID_HTT);
6268 
6269 	if (peer) {
6270 		/*
6271 		 * Decrement the peer ref which is taken as part of
6272 		 * dp_peer_get_ref_by_id if PEER_LOCK_REF_PROTECT is enabled
6273 		 */
6274 		dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
6275 
6276 		return true;
6277 	}
6278 
6279 	return false;
6280 }
6281 
6282 qdf_export_symbol(dp_peer_find_by_id_valid);
6283