xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_peer.c (revision 8cfe6b10058a04cafb17eed051f2ddf11bee8931)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <qdf_types.h>
21 #include <qdf_lock.h>
22 #include <hal_hw_headers.h>
23 #include "dp_htt.h"
24 #include "dp_types.h"
25 #include "dp_internal.h"
26 #include "dp_peer.h"
27 #include "dp_rx_defrag.h"
28 #include "dp_rx.h"
29 #include <hal_api.h>
30 #include <hal_reo.h>
31 #include <cdp_txrx_handle.h>
32 #include <wlan_cfg.h>
33 #ifdef WIFI_MONITOR_SUPPORT
34 #include <dp_mon.h>
35 #endif
36 #ifdef FEATURE_WDS
37 #include "dp_txrx_wds.h"
38 #endif
39 #include <qdf_module.h>
40 #ifdef QCA_PEER_EXT_STATS
41 #include "dp_hist.h"
42 #endif
43 #ifdef BYPASS_OL_OPS
44 #include <target_if_dp.h>
45 #endif
46 
47 #ifdef REO_QDESC_HISTORY
48 #define REO_QDESC_HISTORY_SIZE 512
49 uint64_t reo_qdesc_history_idx;
50 struct reo_qdesc_event reo_qdesc_history[REO_QDESC_HISTORY_SIZE];
51 #endif
52 
53 #ifdef FEATURE_AST
54 #ifdef BYPASS_OL_OPS
55 /*
56  * dp_add_wds_entry_wrapper() - Add new AST entry for the wds station
57  * @soc: DP soc structure pointer
58  * @peer: dp peer structure
59  * @dest_mac: MAC address of ast node
60  * @flags: wds or hmwds
61  * @type: type from enum cdp_txrx_ast_entry_type
62  *
63  * This API is used by WDS source port learning function to
64  * add a new AST entry in the fw.
65  *
66  * Return: 0 on success, error code otherwise.
67  */
68 static int dp_add_wds_entry_wrapper(struct dp_soc *soc,
69 				    struct dp_peer *peer,
70 				    const uint8_t *dest_macaddr,
71 				    uint32_t flags,
72 				    uint8_t type)
73 {
74 	QDF_STATUS status;
75 
76 	status = target_if_add_wds_entry(soc->ctrl_psoc,
77 					 peer->vdev->vdev_id,
78 					 peer->mac_addr.raw,
79 					 dest_macaddr,
80 					 WMI_HOST_WDS_FLAG_STATIC,
81 					 type);
82 
83 	return qdf_status_to_os_return(status);
84 }
85 
86 /*
87  * dp_update_wds_entry_wrapper() - update an existing wds entry with new peer
88  * @soc: DP soc structure pointer
89  * @peer: dp peer structure
90  * @dest_macaddr: MAC address of ast node
91  * @flags: wds or hmwds
92  *
93  * This API is used by update the peer mac address for the ast
94  * in the fw.
95  *
96  * Return: 0 on success, error code otherwise.
97  */
98 static int dp_update_wds_entry_wrapper(struct dp_soc *soc,
99 				       struct dp_peer *peer,
100 				       uint8_t *dest_macaddr,
101 				       uint32_t flags)
102 {
103 	QDF_STATUS status;
104 
105 	status = target_if_update_wds_entry(soc->ctrl_psoc,
106 					    peer->vdev->vdev_id,
107 					    dest_macaddr,
108 					    peer->mac_addr.raw,
109 					    WMI_HOST_WDS_FLAG_STATIC);
110 
111 	return qdf_status_to_os_return(status);
112 }
113 
114 /*
115  * dp_del_wds_entry_wrapper() - delete a WSD AST entry
116  * @soc: DP soc structure pointer
117  * @vdev_id: vdev_id
118  * @wds_macaddr: MAC address of ast node
119  * @type: type from enum cdp_txrx_ast_entry_type
120  * @delete_in_fw: Flag to indicate if entry needs to be deleted in fw
121  *
122  * This API is used to delete an AST entry from fw
123  *
124  * Return: None
125  */
126 static void dp_del_wds_entry_wrapper(struct dp_soc *soc,
127 				     uint8_t vdev_id,
128 				     uint8_t *wds_macaddr,
129 				     uint8_t type,
130 				     uint8_t delete_in_fw)
131 {
132 	target_if_del_wds_entry(soc->ctrl_psoc, vdev_id,
133 				wds_macaddr, type, delete_in_fw);
134 }
135 #else
136 static int dp_add_wds_entry_wrapper(struct dp_soc *soc,
137 				    struct dp_peer *peer,
138 				    const uint8_t *dest_macaddr,
139 				    uint32_t flags,
140 				    uint8_t type)
141 {
142 	int status;
143 
144 	status = soc->cdp_soc.ol_ops->peer_add_wds_entry(
145 					soc->ctrl_psoc,
146 					peer->vdev->vdev_id,
147 					peer->mac_addr.raw,
148 					peer->peer_id,
149 					dest_macaddr,
150 					peer->mac_addr.raw,
151 					flags,
152 					type);
153 
154 	return status;
155 }
156 
157 static int dp_update_wds_entry_wrapper(struct dp_soc *soc,
158 				       struct dp_peer *peer,
159 				       uint8_t *dest_macaddr,
160 				       uint32_t flags)
161 {
162 	int status;
163 
164 	status = soc->cdp_soc.ol_ops->peer_update_wds_entry(
165 				soc->ctrl_psoc,
166 				peer->vdev->vdev_id,
167 				dest_macaddr,
168 				peer->mac_addr.raw,
169 				flags);
170 
171 	return status;
172 }
173 
174 static void dp_del_wds_entry_wrapper(struct dp_soc *soc,
175 				     uint8_t vdev_id,
176 				     uint8_t *wds_macaddr,
177 				     uint8_t type,
178 				     uint8_t delete_in_fw)
179 {
180 	soc->cdp_soc.ol_ops->peer_del_wds_entry(soc->ctrl_psoc,
181 						vdev_id,
182 						wds_macaddr,
183 						type,
184 						delete_in_fw);
185 }
186 #endif
187 #endif
188 
189 #ifdef FEATURE_WDS
190 static inline bool
191 dp_peer_ast_free_in_unmap_supported(struct dp_soc *soc,
192 				    struct dp_ast_entry *ast_entry)
193 {
194 	/* if peer map v2 is enabled we are not freeing ast entry
195 	 * here and it is supposed to be freed in unmap event (after
196 	 * we receive delete confirmation from target)
197 	 *
198 	 * if peer_id is invalid we did not get the peer map event
199 	 * for the peer free ast entry from here only in this case
200 	 */
201 
202 	if ((ast_entry->type != CDP_TXRX_AST_TYPE_WDS_HM_SEC) &&
203 	    (ast_entry->type != CDP_TXRX_AST_TYPE_SELF))
204 		return true;
205 
206 	return false;
207 }
208 #else
209 static inline bool
210 dp_peer_ast_free_in_unmap_supported(struct dp_soc *soc,
211 				    struct dp_ast_entry *ast_entry)
212 {
213 	return false;
214 }
215 
216 void dp_soc_wds_attach(struct dp_soc *soc)
217 {
218 }
219 
220 void dp_soc_wds_detach(struct dp_soc *soc)
221 {
222 }
223 #endif
224 
225 #ifdef QCA_SUPPORT_WDS_EXTENDED
226 bool dp_peer_check_wds_ext_peer(struct dp_peer *peer)
227 {
228 	struct dp_vdev *vdev = peer->vdev;
229 	struct dp_txrx_peer *txrx_peer;
230 
231 	if (!vdev->wds_ext_enabled)
232 		return false;
233 
234 	txrx_peer = dp_get_txrx_peer(peer);
235 	if (!txrx_peer)
236 		return false;
237 
238 	if (qdf_atomic_test_bit(WDS_EXT_PEER_INIT_BIT,
239 				&txrx_peer->wds_ext.init))
240 		return true;
241 
242 	return false;
243 }
244 #else
245 bool dp_peer_check_wds_ext_peer(struct dp_peer *peer)
246 {
247 	return false;
248 }
249 #endif
250 
251 #ifdef REO_QDESC_HISTORY
252 static inline void
253 dp_rx_reo_qdesc_history_add(struct reo_desc_list_node *free_desc,
254 			    enum reo_qdesc_event_type type)
255 {
256 	struct reo_qdesc_event *evt;
257 	struct dp_rx_tid *rx_tid = &free_desc->rx_tid;
258 	uint32_t idx;
259 
260 	reo_qdesc_history_idx++;
261 	idx = (reo_qdesc_history_idx & (REO_QDESC_HISTORY_SIZE - 1));
262 
263 	evt = &reo_qdesc_history[idx];
264 
265 	qdf_mem_copy(evt->peer_mac, free_desc->peer_mac, QDF_MAC_ADDR_SIZE);
266 	evt->qdesc_addr = rx_tid->hw_qdesc_paddr;
267 	evt->ts = qdf_get_log_timestamp();
268 	evt->type = type;
269 }
270 
271 #ifdef WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY
272 static inline void
273 dp_rx_reo_qdesc_deferred_evt_add(struct reo_desc_deferred_freelist_node *desc,
274 				 enum reo_qdesc_event_type type)
275 {
276 	struct reo_qdesc_event *evt;
277 	uint32_t idx;
278 
279 	reo_qdesc_history_idx++;
280 	idx = (reo_qdesc_history_idx & (REO_QDESC_HISTORY_SIZE - 1));
281 
282 	evt = &reo_qdesc_history[idx];
283 
284 	qdf_mem_copy(evt->peer_mac, desc->peer_mac, QDF_MAC_ADDR_SIZE);
285 	evt->qdesc_addr = desc->hw_qdesc_paddr;
286 	evt->ts = qdf_get_log_timestamp();
287 	evt->type = type;
288 }
289 
290 #define DP_RX_REO_QDESC_DEFERRED_FREE_EVT(desc) \
291 	dp_rx_reo_qdesc_deferred_evt_add((desc), REO_QDESC_FREE)
292 
293 #define DP_RX_REO_QDESC_DEFERRED_GET_MAC(desc, freedesc) \
294 	qdf_mem_copy(desc->peer_mac, freedesc->peer_mac, QDF_MAC_ADDR_SIZE)
295 #endif /* WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY */
296 
297 #define DP_RX_REO_QDESC_GET_MAC(freedesc, peer) \
298 	qdf_mem_copy(freedesc->peer_mac, peer->mac_addr.raw, QDF_MAC_ADDR_SIZE)
299 
300 #define DP_RX_REO_QDESC_UPDATE_EVT(free_desc) \
301 	dp_rx_reo_qdesc_history_add((free_desc), REO_QDESC_UPDATE_CB)
302 
303 #define DP_RX_REO_QDESC_FREE_EVT(free_desc) \
304 	dp_rx_reo_qdesc_history_add((free_desc), REO_QDESC_FREE)
305 
306 #else
307 #define DP_RX_REO_QDESC_GET_MAC(freedesc, peer)
308 
309 #define DP_RX_REO_QDESC_UPDATE_EVT(free_desc)
310 
311 #define DP_RX_REO_QDESC_FREE_EVT(free_desc)
312 
313 #define DP_RX_REO_QDESC_DEFERRED_FREE_EVT(desc)
314 
315 #define DP_RX_REO_QDESC_DEFERRED_GET_MAC(desc, freedesc)
316 #endif
317 
318 static inline void
319 dp_set_ssn_valid_flag(struct hal_reo_cmd_params *params,
320 					uint8_t valid)
321 {
322 	params->u.upd_queue_params.update_svld = 1;
323 	params->u.upd_queue_params.svld = valid;
324 	dp_peer_debug("Setting SSN valid bit to %d",
325 		      valid);
326 }
327 
328 QDF_STATUS dp_peer_ast_table_attach(struct dp_soc *soc)
329 {
330 	uint32_t max_ast_index;
331 
332 	max_ast_index = wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx);
333 	/* allocate ast_table for ast entry to ast_index map */
334 	dp_peer_info("\n%pK:<=== cfg max ast idx %d ====>", soc, max_ast_index);
335 	soc->ast_table = qdf_mem_malloc(max_ast_index *
336 					sizeof(struct dp_ast_entry *));
337 	if (!soc->ast_table) {
338 		dp_peer_err("%pK: ast_table memory allocation failed", soc);
339 		return QDF_STATUS_E_NOMEM;
340 	}
341 	return QDF_STATUS_SUCCESS; /* success */
342 }
343 
344 /*
345  * dp_peer_find_map_attach() - allocate memory for peer_id_to_obj_map
346  * @soc: soc handle
347  *
348  * return: QDF_STATUS
349  */
350 static QDF_STATUS dp_peer_find_map_attach(struct dp_soc *soc)
351 {
352 	uint32_t max_peers, peer_map_size;
353 
354 	max_peers = soc->max_peer_id;
355 	/* allocate the peer ID -> peer object map */
356 	dp_peer_info("\n%pK:<=== cfg max peer id %d ====>", soc, max_peers);
357 	peer_map_size = max_peers * sizeof(soc->peer_id_to_obj_map[0]);
358 	soc->peer_id_to_obj_map = qdf_mem_malloc(peer_map_size);
359 	if (!soc->peer_id_to_obj_map) {
360 		dp_peer_err("%pK: peer map memory allocation failed", soc);
361 		return QDF_STATUS_E_NOMEM;
362 	}
363 
364 	/*
365 	 * The peer_id_to_obj_map doesn't really need to be initialized,
366 	 * since elements are only used after they have been individually
367 	 * initialized.
368 	 * However, it is convenient for debugging to have all elements
369 	 * that are not in use set to 0.
370 	 */
371 	qdf_mem_zero(soc->peer_id_to_obj_map, peer_map_size);
372 
373 	qdf_spinlock_create(&soc->peer_map_lock);
374 	return QDF_STATUS_SUCCESS; /* success */
375 }
376 
377 #define DP_AST_HASH_LOAD_MULT  2
378 #define DP_AST_HASH_LOAD_SHIFT 0
379 
380 static inline uint32_t
381 dp_peer_find_hash_index(struct dp_soc *soc,
382 			union dp_align_mac_addr *mac_addr)
383 {
384 	uint32_t index;
385 
386 	index =
387 		mac_addr->align2.bytes_ab ^
388 		mac_addr->align2.bytes_cd ^
389 		mac_addr->align2.bytes_ef;
390 
391 	index ^= index >> soc->peer_hash.idx_bits;
392 	index &= soc->peer_hash.mask;
393 	return index;
394 }
395 
396 /*
397  * dp_peer_find_hash_find() - returns legacy or mlo link peer from
398  *			      peer_hash_table matching vdev_id and mac_address
399  * @soc: soc handle
400  * @peer_mac_addr: peer mac address
401  * @mac_addr_is_aligned: is mac addr aligned
402  * @vdev_id: vdev_id
403  * @mod_id: id of module requesting reference
404  *
405  * return: peer in sucsess
406  *         NULL in failure
407  */
408 struct dp_peer *dp_peer_find_hash_find(
409 				struct dp_soc *soc, uint8_t *peer_mac_addr,
410 				int mac_addr_is_aligned, uint8_t vdev_id,
411 				enum dp_mod_id mod_id)
412 {
413 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
414 	uint32_t index;
415 	struct dp_peer *peer;
416 
417 	if (!soc->peer_hash.bins)
418 		return NULL;
419 
420 	if (mac_addr_is_aligned) {
421 		mac_addr = (union dp_align_mac_addr *)peer_mac_addr;
422 	} else {
423 		qdf_mem_copy(
424 			&local_mac_addr_aligned.raw[0],
425 			peer_mac_addr, QDF_MAC_ADDR_SIZE);
426 		mac_addr = &local_mac_addr_aligned;
427 	}
428 	index = dp_peer_find_hash_index(soc, mac_addr);
429 	qdf_spin_lock_bh(&soc->peer_hash_lock);
430 	TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
431 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
432 		    ((peer->vdev->vdev_id == vdev_id) ||
433 		     (vdev_id == DP_VDEV_ALL))) {
434 			/* take peer reference before returning */
435 			if (dp_peer_get_ref(soc, peer, mod_id) !=
436 						QDF_STATUS_SUCCESS)
437 				peer = NULL;
438 
439 			qdf_spin_unlock_bh(&soc->peer_hash_lock);
440 			return peer;
441 		}
442 	}
443 	qdf_spin_unlock_bh(&soc->peer_hash_lock);
444 	return NULL; /* failure */
445 }
446 
447 qdf_export_symbol(dp_peer_find_hash_find);
448 
449 #ifdef WLAN_FEATURE_11BE_MLO
450 /*
451  * dp_peer_find_hash_detach() - cleanup memory for peer_hash table
452  * @soc: soc handle
453  *
454  * return: none
455  */
456 static void dp_peer_find_hash_detach(struct dp_soc *soc)
457 {
458 	if (soc->peer_hash.bins) {
459 		qdf_mem_free(soc->peer_hash.bins);
460 		soc->peer_hash.bins = NULL;
461 		qdf_spinlock_destroy(&soc->peer_hash_lock);
462 	}
463 
464 	if (soc->arch_ops.mlo_peer_find_hash_detach)
465 		soc->arch_ops.mlo_peer_find_hash_detach(soc);
466 }
467 
468 /*
469  * dp_peer_find_hash_attach() - allocate memory for peer_hash table
470  * @soc: soc handle
471  *
472  * return: QDF_STATUS
473  */
474 static QDF_STATUS dp_peer_find_hash_attach(struct dp_soc *soc)
475 {
476 	int i, hash_elems, log2;
477 
478 	/* allocate the peer MAC address -> peer object hash table */
479 	hash_elems = soc->max_peers;
480 	hash_elems *= DP_PEER_HASH_LOAD_MULT;
481 	hash_elems >>= DP_PEER_HASH_LOAD_SHIFT;
482 	log2 = dp_log2_ceil(hash_elems);
483 	hash_elems = 1 << log2;
484 
485 	soc->peer_hash.mask = hash_elems - 1;
486 	soc->peer_hash.idx_bits = log2;
487 	/* allocate an array of TAILQ peer object lists */
488 	soc->peer_hash.bins = qdf_mem_malloc(
489 		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q, dp_peer)));
490 	if (!soc->peer_hash.bins)
491 		return QDF_STATUS_E_NOMEM;
492 
493 	for (i = 0; i < hash_elems; i++)
494 		TAILQ_INIT(&soc->peer_hash.bins[i]);
495 
496 	qdf_spinlock_create(&soc->peer_hash_lock);
497 
498 	if (soc->arch_ops.mlo_peer_find_hash_attach &&
499 	    (soc->arch_ops.mlo_peer_find_hash_attach(soc) !=
500 			QDF_STATUS_SUCCESS)) {
501 		dp_peer_find_hash_detach(soc);
502 		return QDF_STATUS_E_NOMEM;
503 	}
504 	return QDF_STATUS_SUCCESS;
505 }
506 
507 /*
508  * dp_peer_find_hash_add() - add peer to peer_hash_table
509  * @soc: soc handle
510  * @peer: peer handle
511  * @peer_type: link or mld peer
512  *
513  * return: none
514  */
515 void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer)
516 {
517 	unsigned index;
518 
519 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
520 	if (peer->peer_type == CDP_LINK_PEER_TYPE) {
521 		qdf_spin_lock_bh(&soc->peer_hash_lock);
522 
523 		if (QDF_IS_STATUS_ERROR(dp_peer_get_ref(soc, peer,
524 							DP_MOD_ID_CONFIG))) {
525 			dp_err("fail to get peer ref:" QDF_MAC_ADDR_FMT,
526 			       QDF_MAC_ADDR_REF(peer->mac_addr.raw));
527 			qdf_spin_unlock_bh(&soc->peer_hash_lock);
528 			return;
529 		}
530 
531 		/*
532 		 * It is important to add the new peer at the tail of
533 		 * peer list with the bin index. Together with having
534 		 * the hash_find function search from head to tail,
535 		 * this ensures that if two entries with the same MAC address
536 		 * are stored, the one added first will be found first.
537 		 */
538 		TAILQ_INSERT_TAIL(&soc->peer_hash.bins[index], peer,
539 				  hash_list_elem);
540 
541 		qdf_spin_unlock_bh(&soc->peer_hash_lock);
542 	} else if (peer->peer_type == CDP_MLD_PEER_TYPE) {
543 		if (soc->arch_ops.mlo_peer_find_hash_add)
544 			soc->arch_ops.mlo_peer_find_hash_add(soc, peer);
545 	} else {
546 		dp_err("unknown peer type %d", peer->peer_type);
547 	}
548 }
549 
550 /*
551  * dp_peer_find_hash_remove() - remove peer from peer_hash_table
552  * @soc: soc handle
553  * @peer: peer handle
554  *
555  * return: none
556  */
557 void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer)
558 {
559 	unsigned index;
560 	struct dp_peer *tmppeer = NULL;
561 	int found = 0;
562 
563 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
564 
565 	if (peer->peer_type == CDP_LINK_PEER_TYPE) {
566 		/* Check if tail is not empty before delete*/
567 		QDF_ASSERT(!TAILQ_EMPTY(&soc->peer_hash.bins[index]));
568 
569 		qdf_spin_lock_bh(&soc->peer_hash_lock);
570 		TAILQ_FOREACH(tmppeer, &soc->peer_hash.bins[index],
571 			      hash_list_elem) {
572 			if (tmppeer == peer) {
573 				found = 1;
574 				break;
575 			}
576 		}
577 		QDF_ASSERT(found);
578 		TAILQ_REMOVE(&soc->peer_hash.bins[index], peer,
579 			     hash_list_elem);
580 
581 		dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
582 		qdf_spin_unlock_bh(&soc->peer_hash_lock);
583 	} else if (peer->peer_type == CDP_MLD_PEER_TYPE) {
584 		if (soc->arch_ops.mlo_peer_find_hash_remove)
585 			soc->arch_ops.mlo_peer_find_hash_remove(soc, peer);
586 	} else {
587 		dp_err("unknown peer type %d", peer->peer_type);
588 	}
589 }
590 #else
591 static QDF_STATUS dp_peer_find_hash_attach(struct dp_soc *soc)
592 {
593 	int i, hash_elems, log2;
594 
595 	/* allocate the peer MAC address -> peer object hash table */
596 	hash_elems = soc->max_peers;
597 	hash_elems *= DP_PEER_HASH_LOAD_MULT;
598 	hash_elems >>= DP_PEER_HASH_LOAD_SHIFT;
599 	log2 = dp_log2_ceil(hash_elems);
600 	hash_elems = 1 << log2;
601 
602 	soc->peer_hash.mask = hash_elems - 1;
603 	soc->peer_hash.idx_bits = log2;
604 	/* allocate an array of TAILQ peer object lists */
605 	soc->peer_hash.bins = qdf_mem_malloc(
606 		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q, dp_peer)));
607 	if (!soc->peer_hash.bins)
608 		return QDF_STATUS_E_NOMEM;
609 
610 	for (i = 0; i < hash_elems; i++)
611 		TAILQ_INIT(&soc->peer_hash.bins[i]);
612 
613 	qdf_spinlock_create(&soc->peer_hash_lock);
614 	return QDF_STATUS_SUCCESS;
615 }
616 
617 static void dp_peer_find_hash_detach(struct dp_soc *soc)
618 {
619 	if (soc->peer_hash.bins) {
620 		qdf_mem_free(soc->peer_hash.bins);
621 		soc->peer_hash.bins = NULL;
622 		qdf_spinlock_destroy(&soc->peer_hash_lock);
623 	}
624 }
625 
626 void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer)
627 {
628 	unsigned index;
629 
630 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
631 	qdf_spin_lock_bh(&soc->peer_hash_lock);
632 
633 	if (QDF_IS_STATUS_ERROR(dp_peer_get_ref(soc, peer, DP_MOD_ID_CONFIG))) {
634 		dp_err("unable to get peer ref at MAP mac: "QDF_MAC_ADDR_FMT,
635 		       QDF_MAC_ADDR_REF(peer->mac_addr.raw));
636 		qdf_spin_unlock_bh(&soc->peer_hash_lock);
637 		return;
638 	}
639 
640 	/*
641 	 * It is important to add the new peer at the tail of the peer list
642 	 * with the bin index.  Together with having the hash_find function
643 	 * search from head to tail, this ensures that if two entries with
644 	 * the same MAC address are stored, the one added first will be
645 	 * found first.
646 	 */
647 	TAILQ_INSERT_TAIL(&soc->peer_hash.bins[index], peer, hash_list_elem);
648 
649 	qdf_spin_unlock_bh(&soc->peer_hash_lock);
650 }
651 
652 void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer)
653 {
654 	unsigned index;
655 	struct dp_peer *tmppeer = NULL;
656 	int found = 0;
657 
658 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
659 	/* Check if tail is not empty before delete*/
660 	QDF_ASSERT(!TAILQ_EMPTY(&soc->peer_hash.bins[index]));
661 
662 	qdf_spin_lock_bh(&soc->peer_hash_lock);
663 	TAILQ_FOREACH(tmppeer, &soc->peer_hash.bins[index], hash_list_elem) {
664 		if (tmppeer == peer) {
665 			found = 1;
666 			break;
667 		}
668 	}
669 	QDF_ASSERT(found);
670 	TAILQ_REMOVE(&soc->peer_hash.bins[index], peer, hash_list_elem);
671 
672 	dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
673 	qdf_spin_unlock_bh(&soc->peer_hash_lock);
674 }
675 
676 
677 #endif/* WLAN_FEATURE_11BE_MLO */
678 
679 /*
680  * dp_peer_vdev_list_add() - add peer into vdev's peer list
681  * @soc: soc handle
682  * @vdev: vdev handle
683  * @peer: peer handle
684  *
685  * return: none
686  */
687 void dp_peer_vdev_list_add(struct dp_soc *soc, struct dp_vdev *vdev,
688 			   struct dp_peer *peer)
689 {
690 	/* only link peer will be added to vdev peer list */
691 	if (IS_MLO_DP_MLD_PEER(peer))
692 		return;
693 
694 	qdf_spin_lock_bh(&vdev->peer_list_lock);
695 	if (QDF_IS_STATUS_ERROR(dp_peer_get_ref(soc, peer, DP_MOD_ID_CONFIG))) {
696 		dp_err("unable to get peer ref at MAP mac: "QDF_MAC_ADDR_FMT,
697 		       QDF_MAC_ADDR_REF(peer->mac_addr.raw));
698 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
699 		return;
700 	}
701 
702 	/* add this peer into the vdev's list */
703 	if (wlan_op_mode_sta == vdev->opmode)
704 		TAILQ_INSERT_HEAD(&vdev->peer_list, peer, peer_list_elem);
705 	else
706 		TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
707 
708 	vdev->num_peers++;
709 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
710 }
711 
712 /*
713  * dp_peer_vdev_list_remove() - remove peer from vdev's peer list
714  * @soc: SoC handle
715  * @vdev: VDEV handle
716  * @peer: peer handle
717  *
718  * Return: none
719  */
720 void dp_peer_vdev_list_remove(struct dp_soc *soc, struct dp_vdev *vdev,
721 			      struct dp_peer *peer)
722 {
723 	uint8_t found = 0;
724 	struct dp_peer *tmppeer = NULL;
725 
726 	/* only link peer will be added to vdev peer list */
727 	if (IS_MLO_DP_MLD_PEER(peer))
728 		return;
729 
730 	qdf_spin_lock_bh(&vdev->peer_list_lock);
731 	TAILQ_FOREACH(tmppeer, &peer->vdev->peer_list, peer_list_elem) {
732 		if (tmppeer == peer) {
733 			found = 1;
734 			break;
735 		}
736 	}
737 
738 	if (found) {
739 		TAILQ_REMOVE(&peer->vdev->peer_list, peer,
740 			     peer_list_elem);
741 		dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
742 		vdev->num_peers--;
743 	} else {
744 		/*Ignoring the remove operation as peer not found*/
745 		dp_peer_debug("%pK: peer:%pK not found in vdev:%pK peerlist:%pK"
746 			      , soc, peer, vdev, &peer->vdev->peer_list);
747 	}
748 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
749 }
750 
751 /*
752  * dp_txrx_peer_attach_add() - Attach txrx_peer and add it to peer_id table
753  * @soc: SoC handle
754  * @peer: peer handle
755  * @txrx_peer: txrx peer handle
756  *
757  * Return: None
758  */
759 void dp_txrx_peer_attach_add(struct dp_soc *soc,
760 			     struct dp_peer *peer,
761 			     struct dp_txrx_peer *txrx_peer)
762 {
763 	qdf_spin_lock_bh(&soc->peer_map_lock);
764 
765 	peer->txrx_peer = txrx_peer;
766 	txrx_peer->bss_peer = peer->bss_peer;
767 
768 	if (peer->peer_id == HTT_INVALID_PEER) {
769 		qdf_spin_unlock_bh(&soc->peer_map_lock);
770 		return;
771 	}
772 
773 	txrx_peer->peer_id = peer->peer_id;
774 
775 	QDF_ASSERT(soc->peer_id_to_obj_map[peer->peer_id]);
776 
777 	qdf_spin_unlock_bh(&soc->peer_map_lock);
778 }
779 
780 /*
781  * dp_peer_find_id_to_obj_add() - Add peer into peer_id table
782  * @soc: SoC handle
783  * @peer: peer handle
784  * @peer_id: peer_id
785  *
786  * Return: None
787  */
788 void dp_peer_find_id_to_obj_add(struct dp_soc *soc,
789 				struct dp_peer *peer,
790 				uint16_t peer_id)
791 {
792 	QDF_ASSERT(peer_id <= soc->max_peer_id);
793 
794 	qdf_spin_lock_bh(&soc->peer_map_lock);
795 
796 	peer->peer_id = peer_id;
797 
798 	if (QDF_IS_STATUS_ERROR(dp_peer_get_ref(soc, peer, DP_MOD_ID_CONFIG))) {
799 		dp_err("unable to get peer ref at MAP mac: "QDF_MAC_ADDR_FMT" peer_id %u",
800 		       QDF_MAC_ADDR_REF(peer->mac_addr.raw), peer_id);
801 		qdf_spin_unlock_bh(&soc->peer_map_lock);
802 		return;
803 	}
804 
805 	if (!soc->peer_id_to_obj_map[peer_id]) {
806 		soc->peer_id_to_obj_map[peer_id] = peer;
807 		if (peer->txrx_peer)
808 			peer->txrx_peer->peer_id = peer_id;
809 	} else {
810 		/* Peer map event came for peer_id which
811 		 * is already mapped, this is not expected
812 		 */
813 		dp_err("peer %pK(" QDF_MAC_ADDR_FMT ")map failed, id %d mapped to peer %pK",
814 		       peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw), peer_id,
815 		       soc->peer_id_to_obj_map[peer_id]);
816 		dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
817 		qdf_assert_always(0);
818 	}
819 	qdf_spin_unlock_bh(&soc->peer_map_lock);
820 }
821 
822 /*
823  * dp_peer_find_id_to_obj_remove() - remove peer from peer_id table
824  * @soc: SoC handle
825  * @peer_id: peer_id
826  *
827  * Return: None
828  */
829 void dp_peer_find_id_to_obj_remove(struct dp_soc *soc,
830 				   uint16_t peer_id)
831 {
832 	struct dp_peer *peer = NULL;
833 	QDF_ASSERT(peer_id <= soc->max_peer_id);
834 
835 	qdf_spin_lock_bh(&soc->peer_map_lock);
836 	peer = soc->peer_id_to_obj_map[peer_id];
837 	peer->peer_id = HTT_INVALID_PEER;
838 	if (peer->txrx_peer)
839 		peer->txrx_peer->peer_id = HTT_INVALID_PEER;
840 	soc->peer_id_to_obj_map[peer_id] = NULL;
841 	dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
842 	qdf_spin_unlock_bh(&soc->peer_map_lock);
843 }
844 
845 #ifdef FEATURE_MEC
846 /**
847  * dp_peer_mec_hash_attach() - Allocate and initialize MEC Hash Table
848  * @soc: SoC handle
849  *
850  * Return: QDF_STATUS
851  */
852 QDF_STATUS dp_peer_mec_hash_attach(struct dp_soc *soc)
853 {
854 	int log2, hash_elems, i;
855 
856 	log2 = dp_log2_ceil(DP_PEER_MAX_MEC_IDX);
857 	hash_elems = 1 << log2;
858 
859 	soc->mec_hash.mask = hash_elems - 1;
860 	soc->mec_hash.idx_bits = log2;
861 
862 	dp_peer_info("%pK: max mec index: %d",
863 		     soc, DP_PEER_MAX_MEC_IDX);
864 
865 	/* allocate an array of TAILQ mec object lists */
866 	soc->mec_hash.bins = qdf_mem_malloc(hash_elems *
867 					    sizeof(TAILQ_HEAD(anonymous_tail_q,
868 							      dp_mec_entry)));
869 
870 	if (!soc->mec_hash.bins)
871 		return QDF_STATUS_E_NOMEM;
872 
873 	for (i = 0; i < hash_elems; i++)
874 		TAILQ_INIT(&soc->mec_hash.bins[i]);
875 
876 	return QDF_STATUS_SUCCESS;
877 }
878 
879 /**
880  * dp_peer_mec_hash_index() - Compute the MEC hash from MAC address
881  * @soc: SoC handle
882  *
883  * Return: MEC hash
884  */
885 static inline uint32_t dp_peer_mec_hash_index(struct dp_soc *soc,
886 					      union dp_align_mac_addr *mac_addr)
887 {
888 	uint32_t index;
889 
890 	index =
891 		mac_addr->align2.bytes_ab ^
892 		mac_addr->align2.bytes_cd ^
893 		mac_addr->align2.bytes_ef;
894 	index ^= index >> soc->mec_hash.idx_bits;
895 	index &= soc->mec_hash.mask;
896 	return index;
897 }
898 
899 struct dp_mec_entry *dp_peer_mec_hash_find_by_pdevid(struct dp_soc *soc,
900 						     uint8_t pdev_id,
901 						     uint8_t *mec_mac_addr)
902 {
903 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
904 	uint32_t index;
905 	struct dp_mec_entry *mecentry;
906 
907 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
908 		     mec_mac_addr, QDF_MAC_ADDR_SIZE);
909 	mac_addr = &local_mac_addr_aligned;
910 
911 	index = dp_peer_mec_hash_index(soc, mac_addr);
912 	TAILQ_FOREACH(mecentry, &soc->mec_hash.bins[index], hash_list_elem) {
913 		if ((pdev_id == mecentry->pdev_id) &&
914 		    !dp_peer_find_mac_addr_cmp(mac_addr, &mecentry->mac_addr))
915 			return mecentry;
916 	}
917 
918 	return NULL;
919 }
920 
921 /**
922  * dp_peer_mec_hash_add() - Add MEC entry into hash table
923  * @soc: SoC handle
924  *
925  * This function adds the MEC entry into SoC MEC hash table
926  *
927  * Return: None
928  */
929 static inline void dp_peer_mec_hash_add(struct dp_soc *soc,
930 					struct dp_mec_entry *mecentry)
931 {
932 	uint32_t index;
933 
934 	index = dp_peer_mec_hash_index(soc, &mecentry->mac_addr);
935 	qdf_spin_lock_bh(&soc->mec_lock);
936 	TAILQ_INSERT_TAIL(&soc->mec_hash.bins[index], mecentry, hash_list_elem);
937 	qdf_spin_unlock_bh(&soc->mec_lock);
938 }
939 
940 QDF_STATUS dp_peer_mec_add_entry(struct dp_soc *soc,
941 				 struct dp_vdev *vdev,
942 				 uint8_t *mac_addr)
943 {
944 	struct dp_mec_entry *mecentry = NULL;
945 	struct dp_pdev *pdev = NULL;
946 
947 	if (!vdev) {
948 		dp_peer_err("%pK: Peers vdev is NULL", soc);
949 		return QDF_STATUS_E_INVAL;
950 	}
951 
952 	pdev = vdev->pdev;
953 
954 	if (qdf_unlikely(qdf_atomic_read(&soc->mec_cnt) >=
955 					 DP_PEER_MAX_MEC_ENTRY)) {
956 		dp_peer_warn("%pK: max MEC entry limit reached mac_addr: "
957 			     QDF_MAC_ADDR_FMT, soc, QDF_MAC_ADDR_REF(mac_addr));
958 		return QDF_STATUS_E_NOMEM;
959 	}
960 
961 	qdf_spin_lock_bh(&soc->mec_lock);
962 	mecentry = dp_peer_mec_hash_find_by_pdevid(soc, pdev->pdev_id,
963 						   mac_addr);
964 	if (qdf_likely(mecentry)) {
965 		mecentry->is_active = TRUE;
966 		qdf_spin_unlock_bh(&soc->mec_lock);
967 		return QDF_STATUS_E_ALREADY;
968 	}
969 
970 	qdf_spin_unlock_bh(&soc->mec_lock);
971 
972 	dp_peer_debug("%pK: pdevid: %u vdev: %u type: MEC mac_addr: "
973 		      QDF_MAC_ADDR_FMT,
974 		      soc, pdev->pdev_id, vdev->vdev_id,
975 		      QDF_MAC_ADDR_REF(mac_addr));
976 
977 	mecentry = (struct dp_mec_entry *)
978 			qdf_mem_malloc(sizeof(struct dp_mec_entry));
979 
980 	if (qdf_unlikely(!mecentry)) {
981 		dp_peer_err("%pK: fail to allocate mecentry", soc);
982 		return QDF_STATUS_E_NOMEM;
983 	}
984 
985 	qdf_copy_macaddr((struct qdf_mac_addr *)&mecentry->mac_addr.raw[0],
986 			 (struct qdf_mac_addr *)mac_addr);
987 	mecentry->pdev_id = pdev->pdev_id;
988 	mecentry->vdev_id = vdev->vdev_id;
989 	mecentry->is_active = TRUE;
990 	dp_peer_mec_hash_add(soc, mecentry);
991 
992 	qdf_atomic_inc(&soc->mec_cnt);
993 	DP_STATS_INC(soc, mec.added, 1);
994 
995 	return QDF_STATUS_SUCCESS;
996 }
997 
998 void dp_peer_mec_detach_entry(struct dp_soc *soc, struct dp_mec_entry *mecentry,
999 			      void *ptr)
1000 {
1001 	uint32_t index = dp_peer_mec_hash_index(soc, &mecentry->mac_addr);
1002 
1003 	TAILQ_HEAD(, dp_mec_entry) * free_list = ptr;
1004 
1005 	TAILQ_REMOVE(&soc->mec_hash.bins[index], mecentry,
1006 		     hash_list_elem);
1007 	TAILQ_INSERT_TAIL(free_list, mecentry, hash_list_elem);
1008 }
1009 
1010 void dp_peer_mec_free_list(struct dp_soc *soc, void *ptr)
1011 {
1012 	struct dp_mec_entry *mecentry, *mecentry_next;
1013 
1014 	TAILQ_HEAD(, dp_mec_entry) * free_list = ptr;
1015 
1016 	TAILQ_FOREACH_SAFE(mecentry, free_list, hash_list_elem,
1017 			   mecentry_next) {
1018 		dp_peer_debug("%pK: MEC delete for mac_addr " QDF_MAC_ADDR_FMT,
1019 			      soc, QDF_MAC_ADDR_REF(&mecentry->mac_addr));
1020 		qdf_mem_free(mecentry);
1021 		qdf_atomic_dec(&soc->mec_cnt);
1022 		DP_STATS_INC(soc, mec.deleted, 1);
1023 	}
1024 }
1025 
1026 /**
1027  * dp_peer_mec_hash_detach() - Free MEC Hash table
1028  * @soc: SoC handle
1029  *
1030  * Return: None
1031  */
1032 void dp_peer_mec_hash_detach(struct dp_soc *soc)
1033 {
1034 	dp_peer_mec_flush_entries(soc);
1035 	qdf_mem_free(soc->mec_hash.bins);
1036 	soc->mec_hash.bins = NULL;
1037 }
1038 
1039 void dp_peer_mec_spinlock_destroy(struct dp_soc *soc)
1040 {
1041 	qdf_spinlock_destroy(&soc->mec_lock);
1042 }
1043 
1044 void dp_peer_mec_spinlock_create(struct dp_soc *soc)
1045 {
1046 	qdf_spinlock_create(&soc->mec_lock);
1047 }
1048 #else
1049 QDF_STATUS dp_peer_mec_hash_attach(struct dp_soc *soc)
1050 {
1051 	return QDF_STATUS_SUCCESS;
1052 }
1053 
1054 void dp_peer_mec_hash_detach(struct dp_soc *soc)
1055 {
1056 }
1057 #endif
1058 
1059 #ifdef FEATURE_AST
1060 #ifdef WLAN_FEATURE_11BE_MLO
1061 /*
1062  * dp_peer_exist_on_pdev - check if peer with mac address exist on pdev
1063  *
1064  * @soc: Datapath SOC handle
1065  * @peer_mac_addr: peer mac address
1066  * @mac_addr_is_aligned: is mac address aligned
1067  * @pdev: Datapath PDEV handle
1068  *
1069  * Return: true if peer found else return false
1070  */
1071 static bool dp_peer_exist_on_pdev(struct dp_soc *soc,
1072 				  uint8_t *peer_mac_addr,
1073 				  int mac_addr_is_aligned,
1074 				  struct dp_pdev *pdev)
1075 {
1076 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
1077 	unsigned int index;
1078 	struct dp_peer *peer;
1079 	bool found = false;
1080 
1081 	if (mac_addr_is_aligned) {
1082 		mac_addr = (union dp_align_mac_addr *)peer_mac_addr;
1083 	} else {
1084 		qdf_mem_copy(
1085 			&local_mac_addr_aligned.raw[0],
1086 			peer_mac_addr, QDF_MAC_ADDR_SIZE);
1087 		mac_addr = &local_mac_addr_aligned;
1088 	}
1089 	index = dp_peer_find_hash_index(soc, mac_addr);
1090 	qdf_spin_lock_bh(&soc->peer_hash_lock);
1091 	TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
1092 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
1093 		    (peer->vdev->pdev == pdev)) {
1094 			found = true;
1095 			break;
1096 		}
1097 	}
1098 	qdf_spin_unlock_bh(&soc->peer_hash_lock);
1099 
1100 	if (found)
1101 		return found;
1102 
1103 	peer = dp_mld_peer_find_hash_find(soc, peer_mac_addr,
1104 					  mac_addr_is_aligned, DP_VDEV_ALL,
1105 					  DP_MOD_ID_CDP);
1106 	if (peer) {
1107 		if (peer->vdev->pdev == pdev)
1108 			found = true;
1109 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
1110 	}
1111 
1112 	return found;
1113 }
1114 #else
1115 static bool dp_peer_exist_on_pdev(struct dp_soc *soc,
1116 				  uint8_t *peer_mac_addr,
1117 				  int mac_addr_is_aligned,
1118 				  struct dp_pdev *pdev)
1119 {
1120 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
1121 	unsigned int index;
1122 	struct dp_peer *peer;
1123 	bool found = false;
1124 
1125 	if (mac_addr_is_aligned) {
1126 		mac_addr = (union dp_align_mac_addr *)peer_mac_addr;
1127 	} else {
1128 		qdf_mem_copy(
1129 			&local_mac_addr_aligned.raw[0],
1130 			peer_mac_addr, QDF_MAC_ADDR_SIZE);
1131 		mac_addr = &local_mac_addr_aligned;
1132 	}
1133 	index = dp_peer_find_hash_index(soc, mac_addr);
1134 	qdf_spin_lock_bh(&soc->peer_hash_lock);
1135 	TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
1136 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
1137 		    (peer->vdev->pdev == pdev)) {
1138 			found = true;
1139 			break;
1140 		}
1141 	}
1142 	qdf_spin_unlock_bh(&soc->peer_hash_lock);
1143 	return found;
1144 }
1145 #endif /* WLAN_FEATURE_11BE_MLO */
1146 
1147 /*
1148  * dp_peer_ast_hash_attach() - Allocate and initialize AST Hash Table
1149  * @soc: SoC handle
1150  *
1151  * Return: QDF_STATUS
1152  */
1153 QDF_STATUS dp_peer_ast_hash_attach(struct dp_soc *soc)
1154 {
1155 	int i, hash_elems, log2;
1156 	unsigned int max_ast_idx = wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx);
1157 
1158 	hash_elems = ((max_ast_idx * DP_AST_HASH_LOAD_MULT) >>
1159 		DP_AST_HASH_LOAD_SHIFT);
1160 
1161 	log2 = dp_log2_ceil(hash_elems);
1162 	hash_elems = 1 << log2;
1163 
1164 	soc->ast_hash.mask = hash_elems - 1;
1165 	soc->ast_hash.idx_bits = log2;
1166 
1167 	dp_peer_info("%pK: ast hash_elems: %d, max_ast_idx: %d",
1168 		     soc, hash_elems, max_ast_idx);
1169 
1170 	/* allocate an array of TAILQ peer object lists */
1171 	soc->ast_hash.bins = qdf_mem_malloc(
1172 		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q,
1173 				dp_ast_entry)));
1174 
1175 	if (!soc->ast_hash.bins)
1176 		return QDF_STATUS_E_NOMEM;
1177 
1178 	for (i = 0; i < hash_elems; i++)
1179 		TAILQ_INIT(&soc->ast_hash.bins[i]);
1180 
1181 	return QDF_STATUS_SUCCESS;
1182 }
1183 
1184 /*
1185  * dp_peer_ast_cleanup() - cleanup the references
1186  * @soc: SoC handle
1187  * @ast: ast entry
1188  *
1189  * Return: None
1190  */
1191 static inline void dp_peer_ast_cleanup(struct dp_soc *soc,
1192 				       struct dp_ast_entry *ast)
1193 {
1194 	txrx_ast_free_cb cb = ast->callback;
1195 	void *cookie = ast->cookie;
1196 
1197 	dp_peer_debug("mac_addr: " QDF_MAC_ADDR_FMT ", cb: %pK, cookie: %pK",
1198 		      QDF_MAC_ADDR_REF(ast->mac_addr.raw), cb, cookie);
1199 
1200 	/* Call the callbacks to free up the cookie */
1201 	if (cb) {
1202 		ast->callback = NULL;
1203 		ast->cookie = NULL;
1204 		cb(soc->ctrl_psoc,
1205 		   dp_soc_to_cdp_soc(soc),
1206 		   cookie,
1207 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
1208 	}
1209 }
1210 
1211 /*
1212  * dp_peer_ast_hash_detach() - Free AST Hash table
1213  * @soc: SoC handle
1214  *
1215  * Return: None
1216  */
1217 void dp_peer_ast_hash_detach(struct dp_soc *soc)
1218 {
1219 	unsigned int index;
1220 	struct dp_ast_entry *ast, *ast_next;
1221 
1222 	if (!soc->ast_hash.mask)
1223 		return;
1224 
1225 	if (!soc->ast_hash.bins)
1226 		return;
1227 
1228 	dp_peer_debug("%pK: num_ast_entries: %u", soc, soc->num_ast_entries);
1229 
1230 	qdf_spin_lock_bh(&soc->ast_lock);
1231 	for (index = 0; index <= soc->ast_hash.mask; index++) {
1232 		if (!TAILQ_EMPTY(&soc->ast_hash.bins[index])) {
1233 			TAILQ_FOREACH_SAFE(ast, &soc->ast_hash.bins[index],
1234 					   hash_list_elem, ast_next) {
1235 				TAILQ_REMOVE(&soc->ast_hash.bins[index], ast,
1236 					     hash_list_elem);
1237 				dp_peer_ast_cleanup(soc, ast);
1238 				soc->num_ast_entries--;
1239 				qdf_mem_free(ast);
1240 			}
1241 		}
1242 	}
1243 	qdf_spin_unlock_bh(&soc->ast_lock);
1244 
1245 	qdf_mem_free(soc->ast_hash.bins);
1246 	soc->ast_hash.bins = NULL;
1247 }
1248 
1249 /*
1250  * dp_peer_ast_hash_index() - Compute the AST hash from MAC address
1251  * @soc: SoC handle
1252  *
1253  * Return: AST hash
1254  */
1255 static inline uint32_t dp_peer_ast_hash_index(struct dp_soc *soc,
1256 	union dp_align_mac_addr *mac_addr)
1257 {
1258 	uint32_t index;
1259 
1260 	index =
1261 		mac_addr->align2.bytes_ab ^
1262 		mac_addr->align2.bytes_cd ^
1263 		mac_addr->align2.bytes_ef;
1264 	index ^= index >> soc->ast_hash.idx_bits;
1265 	index &= soc->ast_hash.mask;
1266 	return index;
1267 }
1268 
1269 /*
1270  * dp_peer_ast_hash_add() - Add AST entry into hash table
1271  * @soc: SoC handle
1272  *
1273  * This function adds the AST entry into SoC AST hash table
1274  * It assumes caller has taken the ast lock to protect the access to this table
1275  *
1276  * Return: None
1277  */
1278 static inline void dp_peer_ast_hash_add(struct dp_soc *soc,
1279 		struct dp_ast_entry *ase)
1280 {
1281 	uint32_t index;
1282 
1283 	index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
1284 	TAILQ_INSERT_TAIL(&soc->ast_hash.bins[index], ase, hash_list_elem);
1285 }
1286 
1287 /*
1288  * dp_peer_ast_hash_remove() - Look up and remove AST entry from hash table
1289  * @soc: SoC handle
1290  *
1291  * This function removes the AST entry from soc AST hash table
1292  * It assumes caller has taken the ast lock to protect the access to this table
1293  *
1294  * Return: None
1295  */
1296 void dp_peer_ast_hash_remove(struct dp_soc *soc,
1297 			     struct dp_ast_entry *ase)
1298 {
1299 	unsigned index;
1300 	struct dp_ast_entry *tmpase;
1301 	int found = 0;
1302 
1303 	if (soc->ast_offload_support && !soc->host_ast_db_enable)
1304 		return;
1305 
1306 	index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
1307 	/* Check if tail is not empty before delete*/
1308 	QDF_ASSERT(!TAILQ_EMPTY(&soc->ast_hash.bins[index]));
1309 
1310 	dp_peer_debug("ID: %u idx: %u mac_addr: " QDF_MAC_ADDR_FMT,
1311 		      ase->peer_id, index, QDF_MAC_ADDR_REF(ase->mac_addr.raw));
1312 
1313 	TAILQ_FOREACH(tmpase, &soc->ast_hash.bins[index], hash_list_elem) {
1314 		if (tmpase == ase) {
1315 			found = 1;
1316 			break;
1317 		}
1318 	}
1319 
1320 	QDF_ASSERT(found);
1321 
1322 	if (found)
1323 		TAILQ_REMOVE(&soc->ast_hash.bins[index], ase, hash_list_elem);
1324 }
1325 
1326 /*
1327  * dp_peer_ast_hash_find_by_vdevid() - Find AST entry by MAC address
1328  * @soc: SoC handle
1329  *
1330  * It assumes caller has taken the ast lock to protect the access to
1331  * AST hash table
1332  *
1333  * Return: AST entry
1334  */
1335 struct dp_ast_entry *dp_peer_ast_hash_find_by_vdevid(struct dp_soc *soc,
1336 						     uint8_t *ast_mac_addr,
1337 						     uint8_t vdev_id)
1338 {
1339 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
1340 	uint32_t index;
1341 	struct dp_ast_entry *ase;
1342 
1343 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
1344 		     ast_mac_addr, QDF_MAC_ADDR_SIZE);
1345 	mac_addr = &local_mac_addr_aligned;
1346 
1347 	index = dp_peer_ast_hash_index(soc, mac_addr);
1348 	TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
1349 		if ((vdev_id == ase->vdev_id) &&
1350 		    !dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr)) {
1351 			return ase;
1352 		}
1353 	}
1354 
1355 	return NULL;
1356 }
1357 
1358 /*
1359  * dp_peer_ast_hash_find_by_pdevid() - Find AST entry by MAC address
1360  * @soc: SoC handle
1361  *
1362  * It assumes caller has taken the ast lock to protect the access to
1363  * AST hash table
1364  *
1365  * Return: AST entry
1366  */
1367 struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
1368 						     uint8_t *ast_mac_addr,
1369 						     uint8_t pdev_id)
1370 {
1371 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
1372 	uint32_t index;
1373 	struct dp_ast_entry *ase;
1374 
1375 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
1376 		     ast_mac_addr, QDF_MAC_ADDR_SIZE);
1377 	mac_addr = &local_mac_addr_aligned;
1378 
1379 	index = dp_peer_ast_hash_index(soc, mac_addr);
1380 	TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
1381 		if ((pdev_id == ase->pdev_id) &&
1382 		    !dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr)) {
1383 			return ase;
1384 		}
1385 	}
1386 
1387 	return NULL;
1388 }
1389 
1390 /*
1391  * dp_peer_ast_hash_find_soc() - Find AST entry by MAC address
1392  * @soc: SoC handle
1393  *
1394  * It assumes caller has taken the ast lock to protect the access to
1395  * AST hash table
1396  *
1397  * Return: AST entry
1398  */
1399 struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
1400 					       uint8_t *ast_mac_addr)
1401 {
1402 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
1403 	unsigned index;
1404 	struct dp_ast_entry *ase;
1405 
1406 	if (!soc->ast_hash.bins)
1407 		return NULL;
1408 
1409 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
1410 			ast_mac_addr, QDF_MAC_ADDR_SIZE);
1411 	mac_addr = &local_mac_addr_aligned;
1412 
1413 	index = dp_peer_ast_hash_index(soc, mac_addr);
1414 	TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
1415 		if (dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr) == 0) {
1416 			return ase;
1417 		}
1418 	}
1419 
1420 	return NULL;
1421 }
1422 
1423 /*
1424  * dp_peer_host_add_map_ast() - Add ast entry with HW AST Index
1425  * @soc: SoC handle
1426  * @peer_id: peer id from firmware
1427  * @mac_addr: MAC address of ast node
1428  * @hw_peer_id: HW AST Index returned by target in peer map event
1429  * @vdev_id: vdev id for VAP to which the peer belongs to
1430  * @ast_hash: ast hash value in HW
1431  * @is_wds: flag to indicate peer map event for WDS ast entry
1432  *
1433  * Return: QDF_STATUS code
1434  */
1435 static inline
1436 QDF_STATUS dp_peer_host_add_map_ast(struct dp_soc *soc, uint16_t peer_id,
1437 				    uint8_t *mac_addr, uint16_t hw_peer_id,
1438 				    uint8_t vdev_id, uint16_t ast_hash,
1439 				    uint8_t is_wds)
1440 {
1441 	struct dp_vdev *vdev;
1442 	struct dp_ast_entry *ast_entry;
1443 	enum cdp_txrx_ast_entry_type type;
1444 	struct dp_peer *peer;
1445 	struct dp_peer *old_peer;
1446 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1447 
1448 	if (is_wds)
1449 		type = CDP_TXRX_AST_TYPE_WDS;
1450 	else
1451 		type = CDP_TXRX_AST_TYPE_STATIC;
1452 
1453 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_HTT);
1454 	if (!peer) {
1455 		dp_peer_info("Peer not found soc:%pK: peer_id %d, peer_mac " QDF_MAC_ADDR_FMT ", vdev_id %d",
1456 			     soc, peer_id,
1457 			     QDF_MAC_ADDR_REF(mac_addr), vdev_id);
1458 		return QDF_STATUS_E_INVAL;
1459 	}
1460 
1461 	if (!is_wds && IS_MLO_DP_MLD_PEER(peer))
1462 		type = CDP_TXRX_AST_TYPE_MLD;
1463 
1464 	vdev = peer->vdev;
1465 	if (!vdev) {
1466 		dp_peer_err("%pK: Peers vdev is NULL", soc);
1467 		status = QDF_STATUS_E_INVAL;
1468 		goto fail;
1469 	}
1470 
1471 	if (!dp_peer_state_cmp(peer, DP_PEER_STATE_ACTIVE)) {
1472 		if (type != CDP_TXRX_AST_TYPE_STATIC &&
1473 		    type != CDP_TXRX_AST_TYPE_MLD &&
1474 		    type != CDP_TXRX_AST_TYPE_SELF) {
1475 			status = QDF_STATUS_E_BUSY;
1476 			goto fail;
1477 		}
1478 	}
1479 
1480 	dp_peer_debug("%pK: vdev: %u  ast_entry->type: %d peer_mac: " QDF_MAC_ADDR_FMT " peer: %pK mac " QDF_MAC_ADDR_FMT,
1481 		      soc, vdev->vdev_id, type,
1482 		      QDF_MAC_ADDR_REF(peer->mac_addr.raw), peer,
1483 		      QDF_MAC_ADDR_REF(mac_addr));
1484 
1485 	/*
1486 	 * In MLO scenario, there is possibility for same mac address
1487 	 * on both link mac address and MLD mac address.
1488 	 * Duplicate AST map needs to be handled for non-mld type.
1489 	 */
1490 	qdf_spin_lock_bh(&soc->ast_lock);
1491 	ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
1492 	if (ast_entry && type != CDP_TXRX_AST_TYPE_MLD) {
1493 		dp_peer_debug("AST present ID %d vid %d mac " QDF_MAC_ADDR_FMT,
1494 			      hw_peer_id, vdev_id,
1495 			      QDF_MAC_ADDR_REF(mac_addr));
1496 
1497 		old_peer = __dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
1498 						   DP_MOD_ID_AST);
1499 		if (!old_peer) {
1500 			dp_peer_info("Peer not found soc:%pK: peer_id %d, peer_mac " QDF_MAC_ADDR_FMT ", vdev_id %d",
1501 				     soc, ast_entry->peer_id,
1502 				     QDF_MAC_ADDR_REF(mac_addr), vdev_id);
1503 			qdf_spin_unlock_bh(&soc->ast_lock);
1504 			status = QDF_STATUS_E_INVAL;
1505 			goto fail;
1506 		}
1507 
1508 		dp_peer_unlink_ast_entry(soc, ast_entry, old_peer);
1509 		dp_peer_free_ast_entry(soc, ast_entry);
1510 		if (old_peer)
1511 			dp_peer_unref_delete(old_peer, DP_MOD_ID_AST);
1512 	}
1513 
1514 	ast_entry = (struct dp_ast_entry *)
1515 		qdf_mem_malloc(sizeof(struct dp_ast_entry));
1516 	if (!ast_entry) {
1517 		dp_peer_err("%pK: fail to allocate ast_entry", soc);
1518 		qdf_spin_unlock_bh(&soc->ast_lock);
1519 		QDF_ASSERT(0);
1520 		status = QDF_STATUS_E_NOMEM;
1521 		goto fail;
1522 	}
1523 
1524 	qdf_mem_copy(&ast_entry->mac_addr.raw[0], mac_addr, QDF_MAC_ADDR_SIZE);
1525 	ast_entry->pdev_id = vdev->pdev->pdev_id;
1526 	ast_entry->is_mapped = false;
1527 	ast_entry->delete_in_progress = false;
1528 	ast_entry->next_hop = 0;
1529 	ast_entry->vdev_id = vdev->vdev_id;
1530 	ast_entry->type = type;
1531 
1532 	switch (type) {
1533 	case CDP_TXRX_AST_TYPE_STATIC:
1534 		if (peer->vdev->opmode == wlan_op_mode_sta)
1535 			ast_entry->type = CDP_TXRX_AST_TYPE_STA_BSS;
1536 		break;
1537 	case CDP_TXRX_AST_TYPE_WDS:
1538 		ast_entry->next_hop = 1;
1539 		break;
1540 	case CDP_TXRX_AST_TYPE_MLD:
1541 		break;
1542 	default:
1543 		dp_peer_alert("%pK: Incorrect AST entry type", soc);
1544 	}
1545 
1546 	ast_entry->is_active = TRUE;
1547 	DP_STATS_INC(soc, ast.added, 1);
1548 	soc->num_ast_entries++;
1549 	dp_peer_ast_hash_add(soc, ast_entry);
1550 
1551 	ast_entry->ast_idx = hw_peer_id;
1552 	ast_entry->ast_hash_value = ast_hash;
1553 	ast_entry->peer_id = peer_id;
1554 	TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry,
1555 			  ase_list_elem);
1556 
1557 	qdf_spin_unlock_bh(&soc->ast_lock);
1558 fail:
1559 	dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
1560 
1561 	return status;
1562 }
1563 
1564 /*
1565  * dp_peer_map_ast() - Map the ast entry with HW AST Index
1566  * @soc: SoC handle
1567  * @peer: peer to which ast node belongs
1568  * @mac_addr: MAC address of ast node
1569  * @hw_peer_id: HW AST Index returned by target in peer map event
1570  * @vdev_id: vdev id for VAP to which the peer belongs to
1571  * @ast_hash: ast hash value in HW
1572  * @is_wds: flag to indicate peer map event for WDS ast entry
1573  *
1574  * Return: QDF_STATUS code
1575  */
1576 static inline QDF_STATUS dp_peer_map_ast(struct dp_soc *soc,
1577 					 struct dp_peer *peer,
1578 					 uint8_t *mac_addr,
1579 					 uint16_t hw_peer_id,
1580 					 uint8_t vdev_id,
1581 					 uint16_t ast_hash,
1582 					 uint8_t is_wds)
1583 {
1584 	struct dp_ast_entry *ast_entry = NULL;
1585 	enum cdp_txrx_ast_entry_type peer_type = CDP_TXRX_AST_TYPE_STATIC;
1586 	void *cookie = NULL;
1587 	txrx_ast_free_cb cb = NULL;
1588 	QDF_STATUS err = QDF_STATUS_SUCCESS;
1589 
1590 	if (soc->ast_offload_support)
1591 		return QDF_STATUS_SUCCESS;
1592 
1593 	if (!peer) {
1594 		return QDF_STATUS_E_INVAL;
1595 	}
1596 
1597 	dp_peer_err("%pK: peer %pK ID %d vid %d mac " QDF_MAC_ADDR_FMT,
1598 		    soc, peer, hw_peer_id, vdev_id,
1599 		    QDF_MAC_ADDR_REF(mac_addr));
1600 
1601 	qdf_spin_lock_bh(&soc->ast_lock);
1602 
1603 	ast_entry = dp_peer_ast_hash_find_by_vdevid(soc, mac_addr, vdev_id);
1604 
1605 	if (is_wds) {
1606 		/*
1607 		 * In certain cases like Auth attack on a repeater
1608 		 * can result in the number of ast_entries falling
1609 		 * in the same hash bucket to exceed the max_skid
1610 		 * length supported by HW in root AP. In these cases
1611 		 * the FW will return the hw_peer_id (ast_index) as
1612 		 * 0xffff indicating HW could not add the entry in
1613 		 * its table. Host has to delete the entry from its
1614 		 * table in these cases.
1615 		 */
1616 		if (hw_peer_id == HTT_INVALID_PEER) {
1617 			DP_STATS_INC(soc, ast.map_err, 1);
1618 			if (ast_entry) {
1619 				if (ast_entry->is_mapped) {
1620 					soc->ast_table[ast_entry->ast_idx] =
1621 						NULL;
1622 				}
1623 
1624 				cb = ast_entry->callback;
1625 				cookie = ast_entry->cookie;
1626 				peer_type = ast_entry->type;
1627 
1628 				dp_peer_unlink_ast_entry(soc, ast_entry, peer);
1629 				dp_peer_free_ast_entry(soc, ast_entry);
1630 
1631 				qdf_spin_unlock_bh(&soc->ast_lock);
1632 
1633 				if (cb) {
1634 					cb(soc->ctrl_psoc,
1635 					   dp_soc_to_cdp_soc(soc),
1636 					   cookie,
1637 					   CDP_TXRX_AST_DELETED);
1638 				}
1639 			} else {
1640 				qdf_spin_unlock_bh(&soc->ast_lock);
1641 				dp_peer_alert("AST entry not found with peer %pK peer_id %u peer_mac " QDF_MAC_ADDR_FMT " mac_addr " QDF_MAC_ADDR_FMT " vdev_id %u next_hop %u",
1642 					      peer, peer->peer_id,
1643 					      QDF_MAC_ADDR_REF(peer->mac_addr.raw),
1644 					      QDF_MAC_ADDR_REF(mac_addr),
1645 					      vdev_id, is_wds);
1646 			}
1647 			err = QDF_STATUS_E_INVAL;
1648 
1649 			dp_hmwds_ast_add_notify(peer, mac_addr,
1650 						peer_type, err, true);
1651 
1652 			return err;
1653 		}
1654 	}
1655 
1656 	if (ast_entry) {
1657 		ast_entry->ast_idx = hw_peer_id;
1658 		soc->ast_table[hw_peer_id] = ast_entry;
1659 		ast_entry->is_active = TRUE;
1660 		peer_type = ast_entry->type;
1661 		ast_entry->ast_hash_value = ast_hash;
1662 		ast_entry->is_mapped = TRUE;
1663 		qdf_assert_always(ast_entry->peer_id == HTT_INVALID_PEER);
1664 
1665 		ast_entry->peer_id = peer->peer_id;
1666 		TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry,
1667 				  ase_list_elem);
1668 	}
1669 
1670 	if (ast_entry || (peer->vdev && peer->vdev->proxysta_vdev)) {
1671 		if (soc->cdp_soc.ol_ops->peer_map_event) {
1672 			soc->cdp_soc.ol_ops->peer_map_event(
1673 			soc->ctrl_psoc, peer->peer_id,
1674 			hw_peer_id, vdev_id,
1675 			mac_addr, peer_type, ast_hash);
1676 		}
1677 	} else {
1678 		dp_peer_err("%pK: AST entry not found", soc);
1679 		err = QDF_STATUS_E_NOENT;
1680 	}
1681 
1682 	qdf_spin_unlock_bh(&soc->ast_lock);
1683 
1684 	dp_hmwds_ast_add_notify(peer, mac_addr,
1685 				peer_type, err, true);
1686 
1687 	return err;
1688 }
1689 
1690 void dp_peer_free_hmwds_cb(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
1691 			   struct cdp_soc *dp_soc,
1692 			   void *cookie,
1693 			   enum cdp_ast_free_status status)
1694 {
1695 	struct dp_ast_free_cb_params *param =
1696 		(struct dp_ast_free_cb_params *)cookie;
1697 	struct dp_soc *soc = (struct dp_soc *)dp_soc;
1698 	struct dp_peer *peer = NULL;
1699 	QDF_STATUS err = QDF_STATUS_SUCCESS;
1700 
1701 	if (status != CDP_TXRX_AST_DELETED) {
1702 		qdf_mem_free(cookie);
1703 		return;
1704 	}
1705 
1706 	peer = dp_peer_find_hash_find(soc, &param->peer_mac_addr.raw[0],
1707 				      0, param->vdev_id, DP_MOD_ID_AST);
1708 	if (peer) {
1709 		err = dp_peer_add_ast(soc, peer,
1710 				      &param->mac_addr.raw[0],
1711 				      param->type,
1712 				      param->flags);
1713 
1714 		dp_hmwds_ast_add_notify(peer, &param->mac_addr.raw[0],
1715 					param->type, err, false);
1716 
1717 		dp_peer_unref_delete(peer, DP_MOD_ID_AST);
1718 	}
1719 	qdf_mem_free(cookie);
1720 }
1721 
1722 /*
1723  * dp_peer_add_ast() - Allocate and add AST entry into peer list
1724  * @soc: SoC handle
1725  * @peer: peer to which ast node belongs
1726  * @mac_addr: MAC address of ast node
1727  * @is_self: Is this base AST entry with peer mac address
1728  *
1729  * This API is used by WDS source port learning function to
1730  * add a new AST entry into peer AST list
1731  *
1732  * Return: QDF_STATUS code
1733  */
1734 QDF_STATUS dp_peer_add_ast(struct dp_soc *soc,
1735 			   struct dp_peer *peer,
1736 			   uint8_t *mac_addr,
1737 			   enum cdp_txrx_ast_entry_type type,
1738 			   uint32_t flags)
1739 {
1740 	struct dp_ast_entry *ast_entry = NULL;
1741 	struct dp_vdev *vdev = NULL;
1742 	struct dp_pdev *pdev = NULL;
1743 	txrx_ast_free_cb cb = NULL;
1744 	void *cookie = NULL;
1745 	struct dp_peer *vap_bss_peer = NULL;
1746 	bool is_peer_found = false;
1747 	int status = 0;
1748 
1749 	if (soc->ast_offload_support)
1750 		return QDF_STATUS_E_INVAL;
1751 
1752 	vdev = peer->vdev;
1753 	if (!vdev) {
1754 		dp_peer_err("%pK: Peers vdev is NULL", soc);
1755 		QDF_ASSERT(0);
1756 		return QDF_STATUS_E_INVAL;
1757 	}
1758 
1759 	pdev = vdev->pdev;
1760 
1761 	is_peer_found = dp_peer_exist_on_pdev(soc, mac_addr, 0, pdev);
1762 
1763 	qdf_spin_lock_bh(&soc->ast_lock);
1764 
1765 	if (!dp_peer_state_cmp(peer, DP_PEER_STATE_ACTIVE)) {
1766 		if ((type != CDP_TXRX_AST_TYPE_STATIC) &&
1767 		    (type != CDP_TXRX_AST_TYPE_SELF)) {
1768 			qdf_spin_unlock_bh(&soc->ast_lock);
1769 			return QDF_STATUS_E_BUSY;
1770 		}
1771 	}
1772 
1773 	dp_peer_debug("%pK: pdevid: %u vdev: %u  ast_entry->type: %d flags: 0x%x peer_mac: " QDF_MAC_ADDR_FMT " peer: %pK mac " QDF_MAC_ADDR_FMT,
1774 		      soc, pdev->pdev_id, vdev->vdev_id, type, flags,
1775 		      QDF_MAC_ADDR_REF(peer->mac_addr.raw), peer,
1776 		      QDF_MAC_ADDR_REF(mac_addr));
1777 
1778 	/* fw supports only 2 times the max_peers ast entries */
1779 	if (soc->num_ast_entries >=
1780 	    wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx)) {
1781 		qdf_spin_unlock_bh(&soc->ast_lock);
1782 		dp_peer_err("%pK: Max ast entries reached", soc);
1783 		return QDF_STATUS_E_RESOURCES;
1784 	}
1785 
1786 	/* If AST entry already exists , just return from here
1787 	 * ast entry with same mac address can exist on different radios
1788 	 * if ast_override support is enabled use search by pdev in this
1789 	 * case
1790 	 */
1791 	if (soc->ast_override_support) {
1792 		ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, mac_addr,
1793 							    pdev->pdev_id);
1794 		if (ast_entry) {
1795 			qdf_spin_unlock_bh(&soc->ast_lock);
1796 			return QDF_STATUS_E_ALREADY;
1797 		}
1798 
1799 		if (is_peer_found) {
1800 			/* During WDS to static roaming, peer is added
1801 			 * to the list before static AST entry create.
1802 			 * So, allow AST entry for STATIC type
1803 			 * even if peer is present
1804 			 */
1805 			if (type != CDP_TXRX_AST_TYPE_STATIC) {
1806 				qdf_spin_unlock_bh(&soc->ast_lock);
1807 				return QDF_STATUS_E_ALREADY;
1808 			}
1809 		}
1810 	} else {
1811 		/* For HWMWDS_SEC entries can be added for same mac address
1812 		 * do not check for existing entry
1813 		 */
1814 		if (type == CDP_TXRX_AST_TYPE_WDS_HM_SEC)
1815 			goto add_ast_entry;
1816 
1817 		ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
1818 
1819 		if (ast_entry) {
1820 			if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) &&
1821 			    !ast_entry->delete_in_progress) {
1822 				qdf_spin_unlock_bh(&soc->ast_lock);
1823 				return QDF_STATUS_E_ALREADY;
1824 			}
1825 
1826 			/* Add for HMWDS entry we cannot be ignored if there
1827 			 * is AST entry with same mac address
1828 			 *
1829 			 * if ast entry exists with the requested mac address
1830 			 * send a delete command and register callback which
1831 			 * can take care of adding HMWDS ast entry on delete
1832 			 * confirmation from target
1833 			 */
1834 			if (type == CDP_TXRX_AST_TYPE_WDS_HM) {
1835 				struct dp_ast_free_cb_params *param = NULL;
1836 
1837 				if (ast_entry->type ==
1838 					CDP_TXRX_AST_TYPE_WDS_HM_SEC)
1839 					goto add_ast_entry;
1840 
1841 				/* save existing callback */
1842 				if (ast_entry->callback) {
1843 					cb = ast_entry->callback;
1844 					cookie = ast_entry->cookie;
1845 				}
1846 
1847 				param = qdf_mem_malloc(sizeof(*param));
1848 				if (!param) {
1849 					QDF_TRACE(QDF_MODULE_ID_TXRX,
1850 						  QDF_TRACE_LEVEL_ERROR,
1851 						  "Allocation failed");
1852 					qdf_spin_unlock_bh(&soc->ast_lock);
1853 					return QDF_STATUS_E_NOMEM;
1854 				}
1855 
1856 				qdf_mem_copy(&param->mac_addr.raw[0], mac_addr,
1857 					     QDF_MAC_ADDR_SIZE);
1858 				qdf_mem_copy(&param->peer_mac_addr.raw[0],
1859 					     &peer->mac_addr.raw[0],
1860 					     QDF_MAC_ADDR_SIZE);
1861 				param->type = type;
1862 				param->flags = flags;
1863 				param->vdev_id = vdev->vdev_id;
1864 				ast_entry->callback = dp_peer_free_hmwds_cb;
1865 				ast_entry->pdev_id = vdev->pdev->pdev_id;
1866 				ast_entry->type = type;
1867 				ast_entry->cookie = (void *)param;
1868 				if (!ast_entry->delete_in_progress)
1869 					dp_peer_del_ast(soc, ast_entry);
1870 
1871 				qdf_spin_unlock_bh(&soc->ast_lock);
1872 
1873 				/* Call the saved callback*/
1874 				if (cb) {
1875 					cb(soc->ctrl_psoc,
1876 					   dp_soc_to_cdp_soc(soc),
1877 					   cookie,
1878 					   CDP_TXRX_AST_DELETE_IN_PROGRESS);
1879 				}
1880 				return QDF_STATUS_E_AGAIN;
1881 			}
1882 
1883 			qdf_spin_unlock_bh(&soc->ast_lock);
1884 			return QDF_STATUS_E_ALREADY;
1885 		}
1886 	}
1887 
1888 add_ast_entry:
1889 	ast_entry = (struct dp_ast_entry *)
1890 			qdf_mem_malloc(sizeof(struct dp_ast_entry));
1891 
1892 	if (!ast_entry) {
1893 		qdf_spin_unlock_bh(&soc->ast_lock);
1894 		dp_peer_err("%pK: fail to allocate ast_entry", soc);
1895 		QDF_ASSERT(0);
1896 		return QDF_STATUS_E_NOMEM;
1897 	}
1898 
1899 	qdf_mem_copy(&ast_entry->mac_addr.raw[0], mac_addr, QDF_MAC_ADDR_SIZE);
1900 	ast_entry->pdev_id = vdev->pdev->pdev_id;
1901 	ast_entry->is_mapped = false;
1902 	ast_entry->delete_in_progress = false;
1903 	ast_entry->peer_id = HTT_INVALID_PEER;
1904 	ast_entry->next_hop = 0;
1905 	ast_entry->vdev_id = vdev->vdev_id;
1906 
1907 	switch (type) {
1908 	case CDP_TXRX_AST_TYPE_STATIC:
1909 		peer->self_ast_entry = ast_entry;
1910 		ast_entry->type = CDP_TXRX_AST_TYPE_STATIC;
1911 		if (peer->vdev->opmode == wlan_op_mode_sta)
1912 			ast_entry->type = CDP_TXRX_AST_TYPE_STA_BSS;
1913 		break;
1914 	case CDP_TXRX_AST_TYPE_SELF:
1915 		peer->self_ast_entry = ast_entry;
1916 		ast_entry->type = CDP_TXRX_AST_TYPE_SELF;
1917 		break;
1918 	case CDP_TXRX_AST_TYPE_WDS:
1919 		ast_entry->next_hop = 1;
1920 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
1921 		break;
1922 	case CDP_TXRX_AST_TYPE_WDS_HM:
1923 		ast_entry->next_hop = 1;
1924 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM;
1925 		break;
1926 	case CDP_TXRX_AST_TYPE_WDS_HM_SEC:
1927 		ast_entry->next_hop = 1;
1928 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM_SEC;
1929 		ast_entry->peer_id = peer->peer_id;
1930 		TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry,
1931 				  ase_list_elem);
1932 		break;
1933 	case CDP_TXRX_AST_TYPE_DA:
1934 		vap_bss_peer = dp_vdev_bss_peer_ref_n_get(soc, vdev,
1935 							  DP_MOD_ID_AST);
1936 		if (!vap_bss_peer) {
1937 			qdf_spin_unlock_bh(&soc->ast_lock);
1938 			qdf_mem_free(ast_entry);
1939 			return QDF_STATUS_E_FAILURE;
1940 		}
1941 		peer = vap_bss_peer;
1942 		ast_entry->next_hop = 1;
1943 		ast_entry->type = CDP_TXRX_AST_TYPE_DA;
1944 		break;
1945 	default:
1946 		dp_peer_err("%pK: Incorrect AST entry type", soc);
1947 	}
1948 
1949 	ast_entry->is_active = TRUE;
1950 	DP_STATS_INC(soc, ast.added, 1);
1951 	soc->num_ast_entries++;
1952 	dp_peer_ast_hash_add(soc, ast_entry);
1953 
1954 	if ((ast_entry->type != CDP_TXRX_AST_TYPE_STATIC) &&
1955 	    (ast_entry->type != CDP_TXRX_AST_TYPE_SELF) &&
1956 	    (ast_entry->type != CDP_TXRX_AST_TYPE_STA_BSS) &&
1957 	    (ast_entry->type != CDP_TXRX_AST_TYPE_WDS_HM_SEC))
1958 		status = dp_add_wds_entry_wrapper(soc,
1959 						  peer,
1960 						  mac_addr,
1961 						  flags,
1962 						  ast_entry->type);
1963 
1964 	if (vap_bss_peer)
1965 		dp_peer_unref_delete(vap_bss_peer, DP_MOD_ID_AST);
1966 
1967 	qdf_spin_unlock_bh(&soc->ast_lock);
1968 	return qdf_status_from_os_return(status);
1969 }
1970 
1971 qdf_export_symbol(dp_peer_add_ast);
1972 
1973 /*
1974  * dp_peer_free_ast_entry() - Free up the ast entry memory
1975  * @soc: SoC handle
1976  * @ast_entry: Address search entry
1977  *
1978  * This API is used to free up the memory associated with
1979  * AST entry.
1980  *
1981  * Return: None
1982  */
1983 void dp_peer_free_ast_entry(struct dp_soc *soc,
1984 			    struct dp_ast_entry *ast_entry)
1985 {
1986 	/*
1987 	 * NOTE: Ensure that call to this API is done
1988 	 * after soc->ast_lock is taken
1989 	 */
1990 	dp_peer_debug("type: %d ID: %u vid: %u mac_addr: " QDF_MAC_ADDR_FMT,
1991 		      ast_entry->type, ast_entry->peer_id, ast_entry->vdev_id,
1992 		      QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw));
1993 
1994 	ast_entry->callback = NULL;
1995 	ast_entry->cookie = NULL;
1996 
1997 	DP_STATS_INC(soc, ast.deleted, 1);
1998 	dp_peer_ast_hash_remove(soc, ast_entry);
1999 	dp_peer_ast_cleanup(soc, ast_entry);
2000 	qdf_mem_free(ast_entry);
2001 	soc->num_ast_entries--;
2002 }
2003 
2004 /*
2005  * dp_peer_unlink_ast_entry() - Free up the ast entry memory
2006  * @soc: SoC handle
2007  * @ast_entry: Address search entry
2008  * @peer: peer
2009  *
2010  * This API is used to remove/unlink AST entry from the peer list
2011  * and hash list.
2012  *
2013  * Return: None
2014  */
2015 void dp_peer_unlink_ast_entry(struct dp_soc *soc,
2016 			      struct dp_ast_entry *ast_entry,
2017 			      struct dp_peer *peer)
2018 {
2019 	if (!peer) {
2020 		dp_info_rl("NULL peer");
2021 		return;
2022 	}
2023 
2024 	if (ast_entry->peer_id == HTT_INVALID_PEER) {
2025 		dp_info_rl("Invalid peer id in AST entry mac addr:"QDF_MAC_ADDR_FMT" type:%d",
2026 			  QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw),
2027 			  ast_entry->type);
2028 		return;
2029 	}
2030 	/*
2031 	 * NOTE: Ensure that call to this API is done
2032 	 * after soc->ast_lock is taken
2033 	 */
2034 
2035 	qdf_assert_always(ast_entry->peer_id == peer->peer_id);
2036 	TAILQ_REMOVE(&peer->ast_entry_list, ast_entry, ase_list_elem);
2037 
2038 	if (ast_entry == peer->self_ast_entry)
2039 		peer->self_ast_entry = NULL;
2040 
2041 	/*
2042 	 * release the reference only if it is mapped
2043 	 * to ast_table
2044 	 */
2045 	if (ast_entry->is_mapped)
2046 		soc->ast_table[ast_entry->ast_idx] = NULL;
2047 
2048 	ast_entry->peer_id = HTT_INVALID_PEER;
2049 }
2050 
2051 /*
2052  * dp_peer_del_ast() - Delete and free AST entry
2053  * @soc: SoC handle
2054  * @ast_entry: AST entry of the node
2055  *
2056  * This function removes the AST entry from peer and soc tables
2057  * It assumes caller has taken the ast lock to protect the access to these
2058  * tables
2059  *
2060  * Return: None
2061  */
2062 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
2063 {
2064 	struct dp_peer *peer = NULL;
2065 
2066 	if (soc->ast_offload_support)
2067 		return;
2068 
2069 	if (!ast_entry) {
2070 		dp_info_rl("NULL AST entry");
2071 		return;
2072 	}
2073 
2074 	if (ast_entry->delete_in_progress) {
2075 		dp_info_rl("AST entry deletion in progress mac addr:"QDF_MAC_ADDR_FMT" type:%d",
2076 			  QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw),
2077 			  ast_entry->type);
2078 		return;
2079 	}
2080 
2081 	dp_peer_debug("call by %ps: ID: %u vid: %u mac_addr: " QDF_MAC_ADDR_FMT,
2082 		      (void *)_RET_IP_, ast_entry->peer_id, ast_entry->vdev_id,
2083 		      QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw));
2084 
2085 	ast_entry->delete_in_progress = true;
2086 
2087 	/* In teardown del ast is called after setting logical delete state
2088 	 * use __dp_peer_get_ref_by_id to get the reference irrespective of
2089 	 * state
2090 	 */
2091 	peer = __dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
2092 				       DP_MOD_ID_AST);
2093 
2094 	dp_peer_ast_send_wds_del(soc, ast_entry, peer);
2095 
2096 	/* Remove SELF and STATIC entries in teardown itself */
2097 	if (!ast_entry->next_hop)
2098 		dp_peer_unlink_ast_entry(soc, ast_entry, peer);
2099 
2100 	if (ast_entry->is_mapped)
2101 		soc->ast_table[ast_entry->ast_idx] = NULL;
2102 
2103 	/* if peer map v2 is enabled we are not freeing ast entry
2104 	 * here and it is supposed to be freed in unmap event (after
2105 	 * we receive delete confirmation from target)
2106 	 *
2107 	 * if peer_id is invalid we did not get the peer map event
2108 	 * for the peer free ast entry from here only in this case
2109 	 */
2110 	if (dp_peer_ast_free_in_unmap_supported(soc, ast_entry))
2111 		goto end;
2112 
2113 	/* for WDS secondary entry ast_entry->next_hop would be set so
2114 	 * unlinking has to be done explicitly here.
2115 	 * As this entry is not a mapped entry unmap notification from
2116 	 * FW will not come. Hence unlinkling is done right here.
2117 	 */
2118 
2119 	if (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC)
2120 		dp_peer_unlink_ast_entry(soc, ast_entry, peer);
2121 
2122 	dp_peer_free_ast_entry(soc, ast_entry);
2123 
2124 end:
2125 	if (peer)
2126 		dp_peer_unref_delete(peer, DP_MOD_ID_AST);
2127 }
2128 
2129 /*
2130  * dp_peer_update_ast() - Delete and free AST entry
2131  * @soc: SoC handle
2132  * @peer: peer to which ast node belongs
2133  * @ast_entry: AST entry of the node
2134  * @flags: wds or hmwds
2135  *
2136  * This function update the AST entry to the roamed peer and soc tables
2137  * It assumes caller has taken the ast lock to protect the access to these
2138  * tables
2139  *
2140  * Return: 0 if ast entry is updated successfully
2141  *         -1 failure
2142  */
2143 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
2144 		       struct dp_ast_entry *ast_entry, uint32_t flags)
2145 {
2146 	int ret = -1;
2147 	struct dp_peer *old_peer;
2148 
2149 	if (soc->ast_offload_support)
2150 		return QDF_STATUS_E_INVAL;
2151 
2152 	dp_peer_debug("%pK: ast_entry->type: %d pdevid: %u vdevid: %u flags: 0x%x mac_addr: " QDF_MAC_ADDR_FMT " peer_mac: " QDF_MAC_ADDR_FMT "\n",
2153 		      soc, ast_entry->type, peer->vdev->pdev->pdev_id,
2154 		      peer->vdev->vdev_id, flags,
2155 		      QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw),
2156 		      QDF_MAC_ADDR_REF(peer->mac_addr.raw));
2157 
2158 	/* Do not send AST update in below cases
2159 	 *  1) Ast entry delete has already triggered
2160 	 *  2) Peer delete is already triggered
2161 	 *  3) We did not get the HTT map for create event
2162 	 */
2163 	if (ast_entry->delete_in_progress ||
2164 	    !dp_peer_state_cmp(peer, DP_PEER_STATE_ACTIVE) ||
2165 	    !ast_entry->is_mapped)
2166 		return ret;
2167 
2168 	if ((ast_entry->type == CDP_TXRX_AST_TYPE_STATIC) ||
2169 	    (ast_entry->type == CDP_TXRX_AST_TYPE_SELF) ||
2170 	    (ast_entry->type == CDP_TXRX_AST_TYPE_STA_BSS) ||
2171 	    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
2172 		return 0;
2173 
2174 	/*
2175 	 * Avoids flood of WMI update messages sent to FW for same peer.
2176 	 */
2177 	if (qdf_unlikely(ast_entry->peer_id == peer->peer_id) &&
2178 	    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS) &&
2179 	    (ast_entry->vdev_id == peer->vdev->vdev_id) &&
2180 	    (ast_entry->is_active))
2181 		return 0;
2182 
2183 	old_peer = dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
2184 					 DP_MOD_ID_AST);
2185 	if (!old_peer)
2186 		return 0;
2187 
2188 	TAILQ_REMOVE(&old_peer->ast_entry_list, ast_entry, ase_list_elem);
2189 
2190 	dp_peer_unref_delete(old_peer, DP_MOD_ID_AST);
2191 
2192 	ast_entry->peer_id = peer->peer_id;
2193 	ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
2194 	ast_entry->pdev_id = peer->vdev->pdev->pdev_id;
2195 	ast_entry->vdev_id = peer->vdev->vdev_id;
2196 	ast_entry->is_active = TRUE;
2197 	TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem);
2198 
2199 	ret = dp_update_wds_entry_wrapper(soc,
2200 					  peer,
2201 					  ast_entry->mac_addr.raw,
2202 					  flags);
2203 
2204 	return ret;
2205 }
2206 
2207 /*
2208  * dp_peer_ast_get_pdev_id() - get pdev_id from the ast entry
2209  * @soc: SoC handle
2210  * @ast_entry: AST entry of the node
2211  *
2212  * This function gets the pdev_id from the ast entry.
2213  *
2214  * Return: (uint8_t) pdev_id
2215  */
2216 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
2217 				struct dp_ast_entry *ast_entry)
2218 {
2219 	return ast_entry->pdev_id;
2220 }
2221 
2222 /*
2223  * dp_peer_ast_get_next_hop() - get next_hop from the ast entry
2224  * @soc: SoC handle
2225  * @ast_entry: AST entry of the node
2226  *
2227  * This function gets the next hop from the ast entry.
2228  *
2229  * Return: (uint8_t) next_hop
2230  */
2231 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
2232 				struct dp_ast_entry *ast_entry)
2233 {
2234 	return ast_entry->next_hop;
2235 }
2236 
2237 /*
2238  * dp_peer_ast_set_type() - set type from the ast entry
2239  * @soc: SoC handle
2240  * @ast_entry: AST entry of the node
2241  *
2242  * This function sets the type in the ast entry.
2243  *
2244  * Return:
2245  */
2246 void dp_peer_ast_set_type(struct dp_soc *soc,
2247 				struct dp_ast_entry *ast_entry,
2248 				enum cdp_txrx_ast_entry_type type)
2249 {
2250 	ast_entry->type = type;
2251 }
2252 
2253 void dp_peer_ast_send_wds_del(struct dp_soc *soc,
2254 			      struct dp_ast_entry *ast_entry,
2255 			      struct dp_peer *peer)
2256 {
2257 	bool delete_in_fw = false;
2258 
2259 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_TRACE,
2260 		  "%s: ast_entry->type: %d pdevid: %u vdev: %u mac_addr: "QDF_MAC_ADDR_FMT" next_hop: %u peer_id: %uM\n",
2261 		  __func__, ast_entry->type, ast_entry->pdev_id,
2262 		  ast_entry->vdev_id,
2263 		  QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw),
2264 		  ast_entry->next_hop, ast_entry->peer_id);
2265 
2266 	/*
2267 	 * If peer state is logical delete, the peer is about to get
2268 	 * teared down with a peer delete command to firmware,
2269 	 * which will cleanup all the wds ast entries.
2270 	 * So, no need to send explicit wds ast delete to firmware.
2271 	 */
2272 	if (ast_entry->next_hop) {
2273 		if (peer && dp_peer_state_cmp(peer,
2274 					      DP_PEER_STATE_LOGICAL_DELETE))
2275 			delete_in_fw = false;
2276 		else
2277 			delete_in_fw = true;
2278 
2279 		dp_del_wds_entry_wrapper(soc,
2280 					 ast_entry->vdev_id,
2281 					 ast_entry->mac_addr.raw,
2282 					 ast_entry->type,
2283 					 delete_in_fw);
2284 	}
2285 }
2286 #else
2287 void dp_peer_free_ast_entry(struct dp_soc *soc,
2288 			    struct dp_ast_entry *ast_entry)
2289 {
2290 }
2291 
2292 void dp_peer_unlink_ast_entry(struct dp_soc *soc,
2293 			      struct dp_ast_entry *ast_entry,
2294 			      struct dp_peer *peer)
2295 {
2296 }
2297 
2298 void dp_peer_ast_hash_remove(struct dp_soc *soc,
2299 			     struct dp_ast_entry *ase)
2300 {
2301 }
2302 
2303 struct dp_ast_entry *dp_peer_ast_hash_find_by_vdevid(struct dp_soc *soc,
2304 						     uint8_t *ast_mac_addr,
2305 						     uint8_t vdev_id)
2306 {
2307 	return NULL;
2308 }
2309 
2310 QDF_STATUS dp_peer_add_ast(struct dp_soc *soc,
2311 			   struct dp_peer *peer,
2312 			   uint8_t *mac_addr,
2313 			   enum cdp_txrx_ast_entry_type type,
2314 			   uint32_t flags)
2315 {
2316 	return QDF_STATUS_E_FAILURE;
2317 }
2318 
2319 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
2320 {
2321 }
2322 
2323 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
2324 			struct dp_ast_entry *ast_entry, uint32_t flags)
2325 {
2326 	return 1;
2327 }
2328 
2329 struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
2330 					       uint8_t *ast_mac_addr)
2331 {
2332 	return NULL;
2333 }
2334 
2335 static inline
2336 QDF_STATUS dp_peer_host_add_map_ast(struct dp_soc *soc, uint16_t peer_id,
2337 				    uint8_t *mac_addr, uint16_t hw_peer_id,
2338 				    uint8_t vdev_id, uint16_t ast_hash,
2339 				    uint8_t is_wds)
2340 {
2341 	return QDF_STATUS_SUCCESS;
2342 }
2343 
2344 struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
2345 						     uint8_t *ast_mac_addr,
2346 						     uint8_t pdev_id)
2347 {
2348 	return NULL;
2349 }
2350 
2351 QDF_STATUS dp_peer_ast_hash_attach(struct dp_soc *soc)
2352 {
2353 	return QDF_STATUS_SUCCESS;
2354 }
2355 
2356 static inline QDF_STATUS dp_peer_map_ast(struct dp_soc *soc,
2357 					 struct dp_peer *peer,
2358 					 uint8_t *mac_addr,
2359 					 uint16_t hw_peer_id,
2360 					 uint8_t vdev_id,
2361 					 uint16_t ast_hash,
2362 					 uint8_t is_wds)
2363 {
2364 	return QDF_STATUS_SUCCESS;
2365 }
2366 
2367 void dp_peer_ast_hash_detach(struct dp_soc *soc)
2368 {
2369 }
2370 
2371 void dp_peer_ast_set_type(struct dp_soc *soc,
2372 				struct dp_ast_entry *ast_entry,
2373 				enum cdp_txrx_ast_entry_type type)
2374 {
2375 }
2376 
2377 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
2378 				struct dp_ast_entry *ast_entry)
2379 {
2380 	return 0xff;
2381 }
2382 
2383 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
2384 				 struct dp_ast_entry *ast_entry)
2385 {
2386 	return 0xff;
2387 }
2388 
2389 void dp_peer_ast_send_wds_del(struct dp_soc *soc,
2390 			      struct dp_ast_entry *ast_entry,
2391 			      struct dp_peer *peer)
2392 {
2393 }
2394 #endif
2395 
2396 #ifdef WLAN_FEATURE_MULTI_AST_DEL
2397 void dp_peer_ast_send_multi_wds_del(
2398 		struct dp_soc *soc, uint8_t vdev_id,
2399 		struct peer_del_multi_wds_entries *wds_list)
2400 {
2401 	struct cdp_soc_t *cdp_soc = &soc->cdp_soc;
2402 
2403 	if (cdp_soc && cdp_soc->ol_ops &&
2404 	    cdp_soc->ol_ops->peer_del_multi_wds_entry)
2405 		cdp_soc->ol_ops->peer_del_multi_wds_entry(soc->ctrl_psoc,
2406 							  vdev_id, wds_list);
2407 }
2408 #endif
2409 
2410 #ifdef FEATURE_WDS
2411 /**
2412  * dp_peer_ast_free_wds_entries() - Free wds ast entries associated with peer
2413  * @soc: soc handle
2414  * @peer: peer handle
2415  *
2416  * Free all the wds ast entries associated with peer
2417  *
2418  * Return: Number of wds ast entries freed
2419  */
2420 static uint32_t dp_peer_ast_free_wds_entries(struct dp_soc *soc,
2421 					     struct dp_peer *peer)
2422 {
2423 	TAILQ_HEAD(, dp_ast_entry) ast_local_list = {0};
2424 	struct dp_ast_entry *ast_entry, *temp_ast_entry;
2425 	uint32_t num_ast = 0;
2426 
2427 	TAILQ_INIT(&ast_local_list);
2428 	qdf_spin_lock_bh(&soc->ast_lock);
2429 
2430 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry) {
2431 		if (ast_entry->next_hop)
2432 			num_ast++;
2433 
2434 		if (ast_entry->is_mapped)
2435 			soc->ast_table[ast_entry->ast_idx] = NULL;
2436 
2437 		dp_peer_unlink_ast_entry(soc, ast_entry, peer);
2438 		DP_STATS_INC(soc, ast.deleted, 1);
2439 		dp_peer_ast_hash_remove(soc, ast_entry);
2440 		TAILQ_INSERT_TAIL(&ast_local_list, ast_entry,
2441 				  ase_list_elem);
2442 		soc->num_ast_entries--;
2443 	}
2444 
2445 	qdf_spin_unlock_bh(&soc->ast_lock);
2446 
2447 	TAILQ_FOREACH_SAFE(ast_entry, &ast_local_list, ase_list_elem,
2448 			   temp_ast_entry) {
2449 		if (ast_entry->callback)
2450 			ast_entry->callback(soc->ctrl_psoc,
2451 					    dp_soc_to_cdp_soc(soc),
2452 					    ast_entry->cookie,
2453 					    CDP_TXRX_AST_DELETED);
2454 
2455 		qdf_mem_free(ast_entry);
2456 	}
2457 
2458 	return num_ast;
2459 }
2460 /**
2461  * dp_peer_clean_wds_entries() - Clean wds ast entries and compare
2462  * @soc: soc handle
2463  * @peer: peer handle
2464  * @free_wds_count - number of wds entries freed by FW with peer delete
2465  *
2466  * Free all the wds ast entries associated with peer and compare with
2467  * the value received from firmware
2468  *
2469  * Return: Number of wds ast entries freed
2470  */
2471 static void
2472 dp_peer_clean_wds_entries(struct dp_soc *soc, struct dp_peer *peer,
2473 			  uint32_t free_wds_count)
2474 {
2475 	uint32_t wds_deleted = 0;
2476 
2477 	if (soc->ast_offload_support && !soc->host_ast_db_enable)
2478 		return;
2479 
2480 	wds_deleted = dp_peer_ast_free_wds_entries(soc, peer);
2481 	if ((DP_PEER_WDS_COUNT_INVALID != free_wds_count) &&
2482 	    (free_wds_count != wds_deleted)) {
2483 		DP_STATS_INC(soc, ast.ast_mismatch, 1);
2484 		dp_alert("For peer %pK (mac: "QDF_MAC_ADDR_FMT")number of wds entries deleted by fw = %d during peer delete is not same as the numbers deleted by host = %d",
2485 			 peer, peer->mac_addr.raw, free_wds_count,
2486 			 wds_deleted);
2487 	}
2488 }
2489 
2490 #else
2491 static void
2492 dp_peer_clean_wds_entries(struct dp_soc *soc, struct dp_peer *peer,
2493 			  uint32_t free_wds_count)
2494 {
2495 	struct dp_ast_entry *ast_entry, *temp_ast_entry;
2496 
2497 	qdf_spin_lock_bh(&soc->ast_lock);
2498 
2499 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry) {
2500 		dp_peer_unlink_ast_entry(soc, ast_entry, peer);
2501 
2502 		if (ast_entry->is_mapped)
2503 			soc->ast_table[ast_entry->ast_idx] = NULL;
2504 
2505 		dp_peer_free_ast_entry(soc, ast_entry);
2506 	}
2507 
2508 	peer->self_ast_entry = NULL;
2509 	qdf_spin_unlock_bh(&soc->ast_lock);
2510 }
2511 #endif
2512 
2513 /**
2514  * dp_peer_ast_free_entry_by_mac() - find ast entry by MAC address and delete
2515  * @soc: soc handle
2516  * @peer: peer handle
2517  * @vdev_id: vdev_id
2518  * @mac_addr: mac address of the AST entry to searc and delete
2519  *
2520  * find the ast entry from the peer list using the mac address and free
2521  * the entry.
2522  *
2523  * Return: SUCCESS or NOENT
2524  */
2525 static int dp_peer_ast_free_entry_by_mac(struct dp_soc *soc,
2526 					 struct dp_peer *peer,
2527 					 uint8_t vdev_id,
2528 					 uint8_t *mac_addr)
2529 {
2530 	struct dp_ast_entry *ast_entry;
2531 	void *cookie = NULL;
2532 	txrx_ast_free_cb cb = NULL;
2533 
2534 	/*
2535 	 * release the reference only if it is mapped
2536 	 * to ast_table
2537 	 */
2538 
2539 	qdf_spin_lock_bh(&soc->ast_lock);
2540 
2541 	ast_entry = dp_peer_ast_hash_find_by_vdevid(soc, mac_addr, vdev_id);
2542 	if (!ast_entry) {
2543 		qdf_spin_unlock_bh(&soc->ast_lock);
2544 		return QDF_STATUS_E_NOENT;
2545 	} else if (ast_entry->is_mapped) {
2546 		soc->ast_table[ast_entry->ast_idx] = NULL;
2547 	}
2548 
2549 	cb = ast_entry->callback;
2550 	cookie = ast_entry->cookie;
2551 
2552 
2553 	dp_peer_unlink_ast_entry(soc, ast_entry, peer);
2554 
2555 	dp_peer_free_ast_entry(soc, ast_entry);
2556 
2557 	qdf_spin_unlock_bh(&soc->ast_lock);
2558 
2559 	if (cb) {
2560 		cb(soc->ctrl_psoc,
2561 		   dp_soc_to_cdp_soc(soc),
2562 		   cookie,
2563 		   CDP_TXRX_AST_DELETED);
2564 	}
2565 
2566 	return QDF_STATUS_SUCCESS;
2567 }
2568 
2569 void dp_peer_find_hash_erase(struct dp_soc *soc)
2570 {
2571 	int i;
2572 
2573 	/*
2574 	 * Not really necessary to take peer_ref_mutex lock - by this point,
2575 	 * it's known that the soc is no longer in use.
2576 	 */
2577 	for (i = 0; i <= soc->peer_hash.mask; i++) {
2578 		if (!TAILQ_EMPTY(&soc->peer_hash.bins[i])) {
2579 			struct dp_peer *peer, *peer_next;
2580 
2581 			/*
2582 			 * TAILQ_FOREACH_SAFE must be used here to avoid any
2583 			 * memory access violation after peer is freed
2584 			 */
2585 			TAILQ_FOREACH_SAFE(peer, &soc->peer_hash.bins[i],
2586 				hash_list_elem, peer_next) {
2587 				/*
2588 				 * Don't remove the peer from the hash table -
2589 				 * that would modify the list we are currently
2590 				 * traversing, and it's not necessary anyway.
2591 				 */
2592 				/*
2593 				 * Artificially adjust the peer's ref count to
2594 				 * 1, so it will get deleted by
2595 				 * dp_peer_unref_delete.
2596 				 */
2597 				/* set to zero */
2598 				qdf_atomic_init(&peer->ref_cnt);
2599 				for (i = 0; i < DP_MOD_ID_MAX; i++)
2600 					qdf_atomic_init(&peer->mod_refs[i]);
2601 				/* incr to one */
2602 				qdf_atomic_inc(&peer->ref_cnt);
2603 				qdf_atomic_inc(&peer->mod_refs
2604 						[DP_MOD_ID_CONFIG]);
2605 				dp_peer_unref_delete(peer,
2606 						     DP_MOD_ID_CONFIG);
2607 			}
2608 		}
2609 	}
2610 }
2611 
2612 void dp_peer_ast_table_detach(struct dp_soc *soc)
2613 {
2614 	if (soc->ast_table) {
2615 		qdf_mem_free(soc->ast_table);
2616 		soc->ast_table = NULL;
2617 	}
2618 }
2619 
2620 /*
2621  * dp_peer_find_map_detach() - cleanup memory for peer_id_to_obj_map
2622  * @soc: soc handle
2623  *
2624  * return: none
2625  */
2626 void dp_peer_find_map_detach(struct dp_soc *soc)
2627 {
2628 	if (soc->peer_id_to_obj_map) {
2629 		qdf_mem_free(soc->peer_id_to_obj_map);
2630 		soc->peer_id_to_obj_map = NULL;
2631 		qdf_spinlock_destroy(&soc->peer_map_lock);
2632 	}
2633 }
2634 
2635 #ifndef AST_OFFLOAD_ENABLE
2636 QDF_STATUS dp_peer_find_attach(struct dp_soc *soc)
2637 {
2638 	QDF_STATUS status;
2639 
2640 	status = dp_peer_find_map_attach(soc);
2641 	if (!QDF_IS_STATUS_SUCCESS(status))
2642 		return status;
2643 
2644 	status = dp_peer_find_hash_attach(soc);
2645 	if (!QDF_IS_STATUS_SUCCESS(status))
2646 		goto map_detach;
2647 
2648 	status = dp_peer_ast_table_attach(soc);
2649 	if (!QDF_IS_STATUS_SUCCESS(status))
2650 		goto hash_detach;
2651 
2652 	status = dp_peer_ast_hash_attach(soc);
2653 	if (!QDF_IS_STATUS_SUCCESS(status))
2654 		goto ast_table_detach;
2655 
2656 	status = dp_peer_mec_hash_attach(soc);
2657 	if (QDF_IS_STATUS_SUCCESS(status)) {
2658 		dp_soc_wds_attach(soc);
2659 		return status;
2660 	}
2661 
2662 	dp_peer_ast_hash_detach(soc);
2663 ast_table_detach:
2664 	dp_peer_ast_table_detach(soc);
2665 hash_detach:
2666 	dp_peer_find_hash_detach(soc);
2667 map_detach:
2668 	dp_peer_find_map_detach(soc);
2669 
2670 	return status;
2671 }
2672 #else
2673 QDF_STATUS dp_peer_find_attach(struct dp_soc *soc)
2674 {
2675 	QDF_STATUS status;
2676 
2677 	status = dp_peer_find_map_attach(soc);
2678 	if (!QDF_IS_STATUS_SUCCESS(status))
2679 		return status;
2680 
2681 	status = dp_peer_find_hash_attach(soc);
2682 	if (!QDF_IS_STATUS_SUCCESS(status))
2683 		goto map_detach;
2684 
2685 	return status;
2686 map_detach:
2687 	dp_peer_find_map_detach(soc);
2688 
2689 	return status;
2690 }
2691 #endif
2692 
2693 #ifdef IPA_OFFLOAD
2694 /*
2695  * dp_peer_update_tid_stats_from_reo() - update rx pkt and byte count from reo
2696  * @soc - soc handle
2697  * @cb_ctxt - combination of peer_id and tid
2698  * @reo_status - reo status
2699  *
2700  * return: void
2701  */
2702 void dp_peer_update_tid_stats_from_reo(struct dp_soc *soc, void *cb_ctxt,
2703 				       union hal_reo_status *reo_status)
2704 {
2705 	struct dp_peer *peer = NULL;
2706 	struct dp_rx_tid *rx_tid = NULL;
2707 	unsigned long comb_peer_id_tid;
2708 	struct hal_reo_queue_status *queue_status = &reo_status->queue_status;
2709 	uint16_t tid;
2710 	uint16_t peer_id;
2711 
2712 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
2713 		dp_err("REO stats failure %d\n",
2714 		       queue_status->header.status);
2715 		return;
2716 	}
2717 	comb_peer_id_tid = (unsigned long)cb_ctxt;
2718 	tid = DP_PEER_GET_REO_STATS_TID(comb_peer_id_tid);
2719 	peer_id = DP_PEER_GET_REO_STATS_PEER_ID(comb_peer_id_tid);
2720 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_GENERIC_STATS);
2721 	if (!peer)
2722 		return;
2723 	rx_tid  = &peer->rx_tid[tid];
2724 
2725 	if (!rx_tid) {
2726 		dp_peer_unref_delete(peer, DP_MOD_ID_GENERIC_STATS);
2727 		return;
2728 	}
2729 
2730 	rx_tid->rx_msdu_cnt.bytes += queue_status->total_cnt;
2731 	rx_tid->rx_msdu_cnt.num += queue_status->msdu_frms_cnt;
2732 	dp_peer_unref_delete(peer, DP_MOD_ID_GENERIC_STATS);
2733 }
2734 
2735 qdf_export_symbol(dp_peer_update_tid_stats_from_reo);
2736 #endif
2737 
2738 void dp_rx_tid_stats_cb(struct dp_soc *soc, void *cb_ctxt,
2739 	union hal_reo_status *reo_status)
2740 {
2741 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
2742 	struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
2743 
2744 	if (queue_status->header.status == HAL_REO_CMD_DRAIN)
2745 		return;
2746 
2747 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
2748 		DP_PRINT_STATS("REO stats failure %d for TID %d\n",
2749 			       queue_status->header.status, rx_tid->tid);
2750 		return;
2751 	}
2752 
2753 	DP_PRINT_STATS("REO queue stats (TID: %d):\n"
2754 		       "ssn: %d\n"
2755 		       "curr_idx  : %d\n"
2756 		       "pn_31_0   : %08x\n"
2757 		       "pn_63_32  : %08x\n"
2758 		       "pn_95_64  : %08x\n"
2759 		       "pn_127_96 : %08x\n"
2760 		       "last_rx_enq_tstamp : %08x\n"
2761 		       "last_rx_deq_tstamp : %08x\n"
2762 		       "rx_bitmap_31_0     : %08x\n"
2763 		       "rx_bitmap_63_32    : %08x\n"
2764 		       "rx_bitmap_95_64    : %08x\n"
2765 		       "rx_bitmap_127_96   : %08x\n"
2766 		       "rx_bitmap_159_128  : %08x\n"
2767 		       "rx_bitmap_191_160  : %08x\n"
2768 		       "rx_bitmap_223_192  : %08x\n"
2769 		       "rx_bitmap_255_224  : %08x\n",
2770 		       rx_tid->tid,
2771 		       queue_status->ssn, queue_status->curr_idx,
2772 		       queue_status->pn_31_0, queue_status->pn_63_32,
2773 		       queue_status->pn_95_64, queue_status->pn_127_96,
2774 		       queue_status->last_rx_enq_tstamp,
2775 		       queue_status->last_rx_deq_tstamp,
2776 		       queue_status->rx_bitmap_31_0,
2777 		       queue_status->rx_bitmap_63_32,
2778 		       queue_status->rx_bitmap_95_64,
2779 		       queue_status->rx_bitmap_127_96,
2780 		       queue_status->rx_bitmap_159_128,
2781 		       queue_status->rx_bitmap_191_160,
2782 		       queue_status->rx_bitmap_223_192,
2783 		       queue_status->rx_bitmap_255_224);
2784 
2785 	DP_PRINT_STATS(
2786 		       "curr_mpdu_cnt      : %d\n"
2787 		       "curr_msdu_cnt      : %d\n"
2788 		       "fwd_timeout_cnt    : %d\n"
2789 		       "fwd_bar_cnt        : %d\n"
2790 		       "dup_cnt            : %d\n"
2791 		       "frms_in_order_cnt  : %d\n"
2792 		       "bar_rcvd_cnt       : %d\n"
2793 		       "mpdu_frms_cnt      : %d\n"
2794 		       "msdu_frms_cnt      : %d\n"
2795 		       "total_byte_cnt     : %d\n"
2796 		       "late_recv_mpdu_cnt : %d\n"
2797 		       "win_jump_2k        : %d\n"
2798 		       "hole_cnt           : %d\n",
2799 		       queue_status->curr_mpdu_cnt,
2800 		       queue_status->curr_msdu_cnt,
2801 		       queue_status->fwd_timeout_cnt,
2802 		       queue_status->fwd_bar_cnt,
2803 		       queue_status->dup_cnt,
2804 		       queue_status->frms_in_order_cnt,
2805 		       queue_status->bar_rcvd_cnt,
2806 		       queue_status->mpdu_frms_cnt,
2807 		       queue_status->msdu_frms_cnt,
2808 		       queue_status->total_cnt,
2809 		       queue_status->late_recv_mpdu_cnt,
2810 		       queue_status->win_jump_2k,
2811 		       queue_status->hole_cnt);
2812 
2813 	DP_PRINT_STATS("Addba Req          : %d\n"
2814 			"Addba Resp         : %d\n"
2815 			"Addba Resp success : %d\n"
2816 			"Addba Resp failed  : %d\n"
2817 			"Delba Req received : %d\n"
2818 			"Delba Tx success   : %d\n"
2819 			"Delba Tx Fail      : %d\n"
2820 			"BA window size     : %d\n"
2821 			"Pn size            : %d\n",
2822 			rx_tid->num_of_addba_req,
2823 			rx_tid->num_of_addba_resp,
2824 			rx_tid->num_addba_rsp_success,
2825 			rx_tid->num_addba_rsp_failed,
2826 			rx_tid->num_of_delba_req,
2827 			rx_tid->delba_tx_success_cnt,
2828 			rx_tid->delba_tx_fail_cnt,
2829 			rx_tid->ba_win_size,
2830 			rx_tid->pn_size);
2831 }
2832 
2833 #ifdef REO_SHARED_QREF_TABLE_EN
2834 void dp_peer_rx_reo_shared_qaddr_delete(struct dp_soc *soc,
2835 					struct dp_peer *peer)
2836 {
2837 	uint8_t tid;
2838 
2839 	if (peer->peer_id > soc->max_peer_id)
2840 		return;
2841 	if (IS_MLO_DP_LINK_PEER(peer))
2842 		return;
2843 	if (hal_reo_shared_qaddr_is_enable(soc->hal_soc)) {
2844 		for (tid = 0; tid < DP_MAX_TIDS; tid++)
2845 			hal_reo_shared_qaddr_write(soc->hal_soc,
2846 						   peer->peer_id, tid, 0);
2847 	}
2848 }
2849 #endif
2850 
2851 /*
2852  * dp_peer_find_add_id() - map peer_id with peer
2853  * @soc: soc handle
2854  * @peer_mac_addr: peer mac address
2855  * @peer_id: peer id to be mapped
2856  * @hw_peer_id: HW ast index
2857  * @vdev_id: vdev_id
2858  * @peer_type: peer type (link or MLD)
2859  *
2860  * return: peer in success
2861  *         NULL in failure
2862  */
2863 static inline struct dp_peer *dp_peer_find_add_id(struct dp_soc *soc,
2864 	uint8_t *peer_mac_addr, uint16_t peer_id, uint16_t hw_peer_id,
2865 	uint8_t vdev_id, enum cdp_peer_type peer_type)
2866 {
2867 	struct dp_peer *peer;
2868 	struct cdp_peer_info peer_info = { 0 };
2869 
2870 	QDF_ASSERT(peer_id <= soc->max_peer_id);
2871 	/* check if there's already a peer object with this MAC address */
2872 	DP_PEER_INFO_PARAMS_INIT(&peer_info, vdev_id, peer_mac_addr,
2873 				 false, peer_type);
2874 	peer = dp_peer_hash_find_wrapper(soc, &peer_info, DP_MOD_ID_CONFIG);
2875 	dp_peer_err("%pK: peer %pK ID %d vid %d mac " QDF_MAC_ADDR_FMT,
2876 		    soc, peer, peer_id, vdev_id,
2877 		    QDF_MAC_ADDR_REF(peer_mac_addr));
2878 
2879 	if (peer) {
2880 		/* peer's ref count was already incremented by
2881 		 * peer_find_hash_find
2882 		 */
2883 		dp_peer_info("%pK: ref_cnt: %d", soc,
2884 			     qdf_atomic_read(&peer->ref_cnt));
2885 
2886 		/*
2887 		 * if peer is in logical delete CP triggered delete before map
2888 		 * is received ignore this event
2889 		 */
2890 		if (dp_peer_state_cmp(peer, DP_PEER_STATE_LOGICAL_DELETE)) {
2891 			dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
2892 			dp_alert("Peer %pK["QDF_MAC_ADDR_FMT"] logical delete state vid %d",
2893 				 peer, QDF_MAC_ADDR_REF(peer_mac_addr),
2894 				 vdev_id);
2895 			return NULL;
2896 		}
2897 
2898 		if (peer->peer_id == HTT_INVALID_PEER) {
2899 			if (!IS_MLO_DP_MLD_PEER(peer))
2900 				dp_monitor_peer_tid_peer_id_update(soc, peer,
2901 								   peer_id);
2902 		} else {
2903 			dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
2904 			QDF_ASSERT(0);
2905 			return NULL;
2906 		}
2907 		dp_peer_find_id_to_obj_add(soc, peer, peer_id);
2908 		if (soc->arch_ops.dp_partner_chips_map)
2909 			soc->arch_ops.dp_partner_chips_map(soc, peer, peer_id);
2910 
2911 		dp_peer_update_state(soc, peer, DP_PEER_STATE_ACTIVE);
2912 		return peer;
2913 	}
2914 
2915 	return NULL;
2916 }
2917 
2918 #ifdef WLAN_FEATURE_11BE_MLO
2919 #ifdef DP_USE_REDUCED_PEER_ID_FIELD_WIDTH
2920 static inline uint16_t dp_gen_ml_peer_id(struct dp_soc *soc,
2921 					 uint16_t peer_id)
2922 {
2923 	return ((peer_id & soc->peer_id_mask) | (1 << soc->peer_id_shift));
2924 }
2925 #else
2926 static inline uint16_t dp_gen_ml_peer_id(struct dp_soc *soc,
2927 					 uint16_t peer_id)
2928 {
2929 	return (peer_id | (1 << HTT_RX_PEER_META_DATA_V1_ML_PEER_VALID_S));
2930 }
2931 #endif
2932 
2933 QDF_STATUS
2934 dp_rx_mlo_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
2935 			   uint8_t *peer_mac_addr,
2936 			   struct dp_mlo_flow_override_info *mlo_flow_info,
2937 			   struct dp_mlo_link_info *mlo_link_info)
2938 {
2939 	struct dp_peer *peer = NULL;
2940 	uint16_t hw_peer_id = mlo_flow_info[0].ast_idx;
2941 	uint16_t ast_hash = mlo_flow_info[0].cache_set_num;
2942 	uint8_t vdev_id = 0;
2943 	uint8_t is_wds = 0;
2944 	int i;
2945 	uint16_t ml_peer_id = dp_gen_ml_peer_id(soc, peer_id);
2946 	enum cdp_txrx_ast_entry_type type = CDP_TXRX_AST_TYPE_STATIC;
2947 	QDF_STATUS err = QDF_STATUS_SUCCESS;
2948 	struct dp_soc *primary_soc;
2949 
2950 	dp_cfg_event_record_peer_map_unmap_evt(soc, DP_CFG_EVENT_MLO_PEER_MAP,
2951 					       NULL, peer_mac_addr,
2952 					       1, peer_id, ml_peer_id, 0,
2953 					       vdev_id);
2954 
2955 	dp_info("mlo_peer_map_event (soc:%pK): peer_id %d ml_peer_id %d, peer_mac "QDF_MAC_ADDR_FMT,
2956 		soc, peer_id, ml_peer_id,
2957 		QDF_MAC_ADDR_REF(peer_mac_addr));
2958 
2959 	/* Get corresponding vdev ID for the peer based
2960 	 * on chip ID obtained from mlo peer_map event
2961 	 */
2962 	for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
2963 		if (mlo_link_info[i].peer_chip_id == dp_mlo_get_chip_id(soc)) {
2964 			vdev_id = mlo_link_info[i].vdev_id;
2965 			break;
2966 		}
2967 	}
2968 
2969 	peer = dp_peer_find_add_id(soc, peer_mac_addr, ml_peer_id,
2970 				   hw_peer_id, vdev_id, CDP_MLD_PEER_TYPE);
2971 	if (peer) {
2972 		if (wlan_op_mode_sta == peer->vdev->opmode &&
2973 		    qdf_mem_cmp(peer->mac_addr.raw,
2974 				peer->vdev->mld_mac_addr.raw,
2975 				QDF_MAC_ADDR_SIZE) != 0) {
2976 			dp_peer_info("%pK: STA vdev bss_peer!!!!", soc);
2977 			peer->bss_peer = 1;
2978 			if (peer->txrx_peer)
2979 				peer->txrx_peer->bss_peer = 1;
2980 		}
2981 
2982 		if (peer->vdev->opmode == wlan_op_mode_sta) {
2983 			peer->vdev->bss_ast_hash = ast_hash;
2984 			peer->vdev->bss_ast_idx = hw_peer_id;
2985 		}
2986 
2987 		/* Add ast entry incase self ast entry is
2988 		 * deleted due to DP CP sync issue
2989 		 *
2990 		 * self_ast_entry is modified in peer create
2991 		 * and peer unmap path which cannot run in
2992 		 * parllel with peer map, no lock need before
2993 		 * referring it
2994 		 */
2995 		if (!peer->self_ast_entry) {
2996 			dp_info("Add self ast from map "QDF_MAC_ADDR_FMT,
2997 				QDF_MAC_ADDR_REF(peer_mac_addr));
2998 			dp_peer_add_ast(soc, peer,
2999 					peer_mac_addr,
3000 					type, 0);
3001 		}
3002 		/* If peer setup and hence rx_tid setup got called
3003 		 * before htt peer map then Qref write to LUT did not
3004 		 * happen in rx_tid setup as peer_id was invalid.
3005 		 * So defer Qref write to peer map handler. Check if
3006 		 * rx_tid qdesc for tid 0 is already setup and perform
3007 		 * qref write to LUT for Tid 0 and 16.
3008 		 *
3009 		 * Peer map could be obtained on assoc link, hence
3010 		 * change to primary link's soc.
3011 		 */
3012 		primary_soc = peer->vdev->pdev->soc;
3013 		if (hal_reo_shared_qaddr_is_enable(primary_soc->hal_soc) &&
3014 		    peer->rx_tid[0].hw_qdesc_vaddr_unaligned) {
3015 			hal_reo_shared_qaddr_write(primary_soc->hal_soc,
3016 						   ml_peer_id,
3017 						   0,
3018 						   peer->rx_tid[0].hw_qdesc_paddr);
3019 			hal_reo_shared_qaddr_write(primary_soc->hal_soc,
3020 						   ml_peer_id,
3021 						   DP_NON_QOS_TID,
3022 						   peer->rx_tid[DP_NON_QOS_TID].hw_qdesc_paddr);
3023 		}
3024 	}
3025 
3026 	err = dp_peer_map_ast(soc, peer, peer_mac_addr, hw_peer_id,
3027 			      vdev_id, ast_hash, is_wds);
3028 
3029 	/*
3030 	 * If AST offload and host AST DB is enabled, populate AST entries on
3031 	 * host based on mlo peer map event from FW
3032 	 */
3033 	if (soc->ast_offload_support && soc->host_ast_db_enable) {
3034 		dp_peer_host_add_map_ast(soc, ml_peer_id, peer_mac_addr,
3035 					 hw_peer_id, vdev_id,
3036 					 ast_hash, is_wds);
3037 	}
3038 
3039 	return err;
3040 }
3041 #endif
3042 
3043 #ifdef DP_RX_UDP_OVER_PEER_ROAM
3044 void dp_rx_reset_roaming_peer(struct dp_soc *soc, uint8_t vdev_id,
3045 			      uint8_t *peer_mac_addr)
3046 {
3047 	struct dp_vdev *vdev = NULL;
3048 
3049 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_HTT);
3050 	if (vdev) {
3051 		if (qdf_mem_cmp(vdev->roaming_peer_mac.raw, peer_mac_addr,
3052 				QDF_MAC_ADDR_SIZE) == 0) {
3053 			vdev->roaming_peer_status =
3054 						WLAN_ROAM_PEER_AUTH_STATUS_NONE;
3055 			qdf_mem_zero(vdev->roaming_peer_mac.raw,
3056 				     QDF_MAC_ADDR_SIZE);
3057 		}
3058 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT);
3059 	}
3060 }
3061 #endif
3062 
3063 #ifdef WLAN_SUPPORT_PPEDS
3064 static void
3065 dp_tx_ppeds_cfg_astidx_cache_mapping(struct dp_soc *soc, struct dp_vdev *vdev,
3066 				     bool peer_map)
3067 {
3068 	if (soc->arch_ops.dp_tx_ppeds_cfg_astidx_cache_mapping)
3069 		soc->arch_ops.dp_tx_ppeds_cfg_astidx_cache_mapping(soc, vdev,
3070 								   peer_map);
3071 }
3072 #else
3073 static void
3074 dp_tx_ppeds_cfg_astidx_cache_mapping(struct dp_soc *soc, struct dp_vdev *vdev,
3075 				     bool peer_map)
3076 {
3077 }
3078 #endif
3079 
3080 /**
3081  * dp_rx_peer_map_handler() - handle peer map event from firmware
3082  * @soc_handle - generic soc handle
3083  * @peeri_id - peer_id from firmware
3084  * @hw_peer_id - ast index for this peer
3085  * @vdev_id - vdev ID
3086  * @peer_mac_addr - mac address of the peer
3087  * @ast_hash - ast hash value
3088  * @is_wds - flag to indicate peer map event for WDS ast entry
3089  *
3090  * associate the peer_id that firmware provided with peer entry
3091  * and update the ast table in the host with the hw_peer_id.
3092  *
3093  * Return: QDF_STATUS code
3094  */
3095 
3096 QDF_STATUS
3097 dp_rx_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
3098 		       uint16_t hw_peer_id, uint8_t vdev_id,
3099 		       uint8_t *peer_mac_addr, uint16_t ast_hash,
3100 		       uint8_t is_wds)
3101 {
3102 	struct dp_peer *peer = NULL;
3103 	struct dp_vdev *vdev = NULL;
3104 	enum cdp_txrx_ast_entry_type type = CDP_TXRX_AST_TYPE_STATIC;
3105 	QDF_STATUS err = QDF_STATUS_SUCCESS;
3106 
3107 	dp_cfg_event_record_peer_map_unmap_evt(soc, DP_CFG_EVENT_PEER_MAP,
3108 					       NULL, peer_mac_addr, 1, peer_id,
3109 					       0, 0, vdev_id);
3110 	dp_info("peer_map_event (soc:%pK): peer_id %d, hw_peer_id %d, peer_mac "QDF_MAC_ADDR_FMT", vdev_id %d",
3111 		soc, peer_id, hw_peer_id,
3112 		QDF_MAC_ADDR_REF(peer_mac_addr), vdev_id);
3113 
3114 	/* Peer map event for WDS ast entry get the peer from
3115 	 * obj map
3116 	 */
3117 	if (is_wds) {
3118 		if (!soc->ast_offload_support) {
3119 			peer = dp_peer_get_ref_by_id(soc, peer_id,
3120 						     DP_MOD_ID_HTT);
3121 
3122 			err = dp_peer_map_ast(soc, peer, peer_mac_addr,
3123 					      hw_peer_id,
3124 					      vdev_id, ast_hash, is_wds);
3125 			if (peer)
3126 				dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
3127 		}
3128 	} else {
3129 		/*
3130 		 * It's the responsibility of the CP and FW to ensure
3131 		 * that peer is created successfully. Ideally DP should
3132 		 * not hit the below condition for directly associated
3133 		 * peers.
3134 		 */
3135 		if ((!soc->ast_offload_support) && ((hw_peer_id < 0) ||
3136 		    (hw_peer_id >=
3137 		     wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx)))) {
3138 			dp_peer_err("%pK: invalid hw_peer_id: %d", soc, hw_peer_id);
3139 			qdf_assert_always(0);
3140 		}
3141 
3142 		peer = dp_peer_find_add_id(soc, peer_mac_addr, peer_id,
3143 					   hw_peer_id, vdev_id,
3144 					   CDP_LINK_PEER_TYPE);
3145 
3146 		if (peer) {
3147 			bool peer_map = true;
3148 
3149 			/* Updating ast_hash and ast_idx in peer level */
3150 			peer->ast_hash = ast_hash;
3151 			peer->ast_idx = hw_peer_id;
3152 			vdev = peer->vdev;
3153 			/* Only check for STA Vdev and peer is not for TDLS */
3154 			if (wlan_op_mode_sta == vdev->opmode &&
3155 			    !peer->is_tdls_peer) {
3156 				if (qdf_mem_cmp(peer->mac_addr.raw,
3157 						vdev->mac_addr.raw,
3158 						QDF_MAC_ADDR_SIZE) != 0) {
3159 					dp_info("%pK: STA vdev bss_peer", soc);
3160 					peer->bss_peer = 1;
3161 					if (peer->txrx_peer)
3162 						peer->txrx_peer->bss_peer = 1;
3163 				}
3164 
3165 				dp_info("bss ast_hash 0x%x, ast_index 0x%x",
3166 					ast_hash, hw_peer_id);
3167 				vdev->bss_ast_hash = ast_hash;
3168 				vdev->bss_ast_idx = hw_peer_id;
3169 
3170 				dp_tx_ppeds_cfg_astidx_cache_mapping(soc, vdev,
3171 								     peer_map);
3172 			}
3173 
3174 			/* Add ast entry incase self ast entry is
3175 			 * deleted due to DP CP sync issue
3176 			 *
3177 			 * self_ast_entry is modified in peer create
3178 			 * and peer unmap path which cannot run in
3179 			 * parllel with peer map, no lock need before
3180 			 * referring it
3181 			 */
3182 			if (!soc->ast_offload_support &&
3183 				!peer->self_ast_entry) {
3184 				dp_info("Add self ast from map "QDF_MAC_ADDR_FMT,
3185 					QDF_MAC_ADDR_REF(peer_mac_addr));
3186 				dp_peer_add_ast(soc, peer,
3187 						peer_mac_addr,
3188 						type, 0);
3189 			}
3190 
3191 			/* If peer setup and hence rx_tid setup got called
3192 			 * before htt peer map then Qref write to LUT did
3193 			 * not happen in rx_tid setup as peer_id was invalid.
3194 			 * So defer Qref write to peer map handler. Check if
3195 			 * rx_tid qdesc for tid 0 is already setup perform qref
3196 			 * write to LUT for Tid 0 and 16.
3197 			 */
3198 			if (hal_reo_shared_qaddr_is_enable(soc->hal_soc) &&
3199 			    peer->rx_tid[0].hw_qdesc_vaddr_unaligned &&
3200 			    !IS_MLO_DP_LINK_PEER(peer)) {
3201 				hal_reo_shared_qaddr_write(soc->hal_soc,
3202 							   peer_id,
3203 							   0,
3204 							   peer->rx_tid[0].hw_qdesc_paddr);
3205 				hal_reo_shared_qaddr_write(soc->hal_soc,
3206 							   peer_id,
3207 							   DP_NON_QOS_TID,
3208 							   peer->rx_tid[DP_NON_QOS_TID].hw_qdesc_paddr);
3209 			}
3210 		}
3211 
3212 		err = dp_peer_map_ast(soc, peer, peer_mac_addr, hw_peer_id,
3213 				      vdev_id, ast_hash, is_wds);
3214 	}
3215 
3216 	dp_rx_reset_roaming_peer(soc, vdev_id, peer_mac_addr);
3217 
3218 	/*
3219 	 * If AST offload and host AST DB is enabled, populate AST entries on
3220 	 * host based on peer map event from FW
3221 	 */
3222 	if (soc->ast_offload_support && soc->host_ast_db_enable) {
3223 		dp_peer_host_add_map_ast(soc, peer_id, peer_mac_addr,
3224 					 hw_peer_id, vdev_id,
3225 					 ast_hash, is_wds);
3226 	}
3227 
3228 	return err;
3229 }
3230 
3231 /**
3232  * dp_rx_peer_unmap_handler() - handle peer unmap event from firmware
3233  * @soc_handle - generic soc handle
3234  * @peeri_id - peer_id from firmware
3235  * @vdev_id - vdev ID
3236  * @mac_addr - mac address of the peer or wds entry
3237  * @is_wds - flag to indicate peer map event for WDS ast entry
3238  * @free_wds_count - number of wds entries freed by FW with peer delete
3239  *
3240  * Return: none
3241  */
3242 void
3243 dp_rx_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id,
3244 			 uint8_t vdev_id, uint8_t *mac_addr,
3245 			 uint8_t is_wds, uint32_t free_wds_count)
3246 {
3247 	struct dp_peer *peer;
3248 	struct dp_vdev *vdev = NULL;
3249 
3250 	/*
3251 	 * If FW AST offload is enabled and host AST DB is enabled,
3252 	 * the AST entries are created during peer map from FW.
3253 	 */
3254 	if (soc->ast_offload_support && is_wds) {
3255 		if (!soc->host_ast_db_enable)
3256 			return;
3257 	}
3258 
3259 	peer = __dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_HTT);
3260 
3261 	/*
3262 	 * Currently peer IDs are assigned for vdevs as well as peers.
3263 	 * If the peer ID is for a vdev, then the peer pointer stored
3264 	 * in peer_id_to_obj_map will be NULL.
3265 	 */
3266 	if (!peer) {
3267 		dp_err("Received unmap event for invalid peer_id %u",
3268 		       peer_id);
3269 		return;
3270 	}
3271 
3272 	vdev = peer->vdev;
3273 
3274 	if (peer->txrx_peer) {
3275 		struct cdp_txrx_peer_params_update params = {0};
3276 
3277 		params.osif_vdev = (void *)vdev->osif_vdev;
3278 		params.peer_mac = peer->mac_addr.raw;
3279 		params.chip_id = dp_mlo_get_chip_id(soc);
3280 		params.pdev_id = vdev->pdev->pdev_id;
3281 
3282 		dp_wdi_event_handler(WDI_EVENT_PEER_UNMAP, soc,
3283 				     (void *)&params, peer_id,
3284 				     WDI_NO_VAL, vdev->pdev->pdev_id);
3285 	}
3286 
3287 	/* If V2 Peer map messages are enabled AST entry has to be
3288 	 * freed here
3289 	 */
3290 	if (is_wds) {
3291 		if (!dp_peer_ast_free_entry_by_mac(soc, peer, vdev_id,
3292 						   mac_addr)) {
3293 			dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
3294 			return;
3295 		}
3296 
3297 		dp_alert("AST entry not found with peer %pK peer_id %u peer_mac "QDF_MAC_ADDR_FMT" mac_addr "QDF_MAC_ADDR_FMT" vdev_id %u next_hop %u",
3298 			  peer, peer->peer_id,
3299 			  QDF_MAC_ADDR_REF(peer->mac_addr.raw),
3300 			  QDF_MAC_ADDR_REF(mac_addr), vdev_id,
3301 			  is_wds);
3302 
3303 		dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
3304 		return;
3305 	}
3306 
3307 	dp_peer_clean_wds_entries(soc, peer, free_wds_count);
3308 
3309 	dp_cfg_event_record_peer_map_unmap_evt(soc, DP_CFG_EVENT_PEER_UNMAP,
3310 					       peer, mac_addr, 0, peer_id,
3311 					       0, 0, vdev_id);
3312 	dp_info("peer_unmap_event (soc:%pK) peer_id %d peer %pK",
3313 		soc, peer_id, peer);
3314 
3315 	/* Clear entries in Qref LUT */
3316 	/* TODO: Check if this is to be called from
3317 	 * dp_peer_delete for MLO case if there is race between
3318 	 * new peer id assignment and still not having received
3319 	 * peer unmap for MLD peer with same peer id.
3320 	 */
3321 	dp_peer_rx_reo_shared_qaddr_delete(soc, peer);
3322 
3323 	vdev = peer->vdev;
3324 
3325 	/* only if peer is in STA mode and not tdls peer */
3326 	if (wlan_op_mode_sta == vdev->opmode && !peer->is_tdls_peer) {
3327 		bool peer_map = false;
3328 
3329 		dp_tx_ppeds_cfg_astidx_cache_mapping(soc, vdev, peer_map);
3330 	}
3331 
3332 	dp_peer_find_id_to_obj_remove(soc, peer_id);
3333 
3334 	if (soc->arch_ops.dp_partner_chips_unmap)
3335 		soc->arch_ops.dp_partner_chips_unmap(soc, peer_id);
3336 
3337 	peer->peer_id = HTT_INVALID_PEER;
3338 
3339 	/*
3340 	 *	 Reset ast flow mapping table
3341 	 */
3342 	if (!soc->ast_offload_support)
3343 		dp_peer_reset_flowq_map(peer);
3344 
3345 	if (soc->cdp_soc.ol_ops->peer_unmap_event) {
3346 		soc->cdp_soc.ol_ops->peer_unmap_event(soc->ctrl_psoc,
3347 				peer_id, vdev_id, mac_addr);
3348 	}
3349 
3350 	dp_update_vdev_stats_on_peer_unmap(vdev, peer);
3351 
3352 	dp_peer_update_state(soc, peer, DP_PEER_STATE_INACTIVE);
3353 	dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
3354 	/*
3355 	 * Remove a reference to the peer.
3356 	 * If there are no more references, delete the peer object.
3357 	 */
3358 	dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
3359 }
3360 
3361 #ifdef WLAN_FEATURE_11BE_MLO
3362 void dp_rx_mlo_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id)
3363 {
3364 	uint16_t ml_peer_id = dp_gen_ml_peer_id(soc, peer_id);
3365 	uint8_t mac_addr[QDF_MAC_ADDR_SIZE] = {0};
3366 	uint8_t vdev_id = DP_VDEV_ALL;
3367 	uint8_t is_wds = 0;
3368 
3369 	dp_cfg_event_record_peer_map_unmap_evt(soc, DP_CFG_EVENT_MLO_PEER_UNMAP,
3370 					       NULL, mac_addr, 0, peer_id,
3371 					       0, 0, vdev_id);
3372 	dp_info("MLO peer_unmap_event (soc:%pK) peer_id %d",
3373 		soc, peer_id);
3374 
3375 	dp_rx_peer_unmap_handler(soc, ml_peer_id, vdev_id,
3376 				 mac_addr, is_wds,
3377 				 DP_PEER_WDS_COUNT_INVALID);
3378 }
3379 #endif
3380 
3381 #ifndef AST_OFFLOAD_ENABLE
3382 void
3383 dp_peer_find_detach(struct dp_soc *soc)
3384 {
3385 	dp_soc_wds_detach(soc);
3386 	dp_peer_find_map_detach(soc);
3387 	dp_peer_find_hash_detach(soc);
3388 	dp_peer_ast_hash_detach(soc);
3389 	dp_peer_ast_table_detach(soc);
3390 	dp_peer_mec_hash_detach(soc);
3391 }
3392 #else
3393 void
3394 dp_peer_find_detach(struct dp_soc *soc)
3395 {
3396 	dp_peer_find_map_detach(soc);
3397 	dp_peer_find_hash_detach(soc);
3398 }
3399 #endif
3400 
3401 static void dp_rx_tid_update_cb(struct dp_soc *soc, void *cb_ctxt,
3402 	union hal_reo_status *reo_status)
3403 {
3404 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
3405 
3406 	if ((reo_status->rx_queue_status.header.status !=
3407 		HAL_REO_CMD_SUCCESS) &&
3408 		(reo_status->rx_queue_status.header.status !=
3409 		HAL_REO_CMD_DRAIN)) {
3410 		/* Should not happen normally. Just print error for now */
3411 		dp_peer_err("%pK: Rx tid HW desc update failed(%d): tid %d",
3412 			    soc, reo_status->rx_queue_status.header.status,
3413 			    rx_tid->tid);
3414 	}
3415 }
3416 
3417 static bool dp_get_peer_vdev_roaming_in_progress(struct dp_peer *peer)
3418 {
3419 	struct ol_if_ops *ol_ops = NULL;
3420 	bool is_roaming = false;
3421 	uint8_t vdev_id = -1;
3422 	struct cdp_soc_t *soc;
3423 
3424 	if (!peer) {
3425 		dp_peer_info("Peer is NULL. No roaming possible");
3426 		return false;
3427 	}
3428 
3429 	soc = dp_soc_to_cdp_soc_t(peer->vdev->pdev->soc);
3430 	ol_ops = peer->vdev->pdev->soc->cdp_soc.ol_ops;
3431 
3432 	if (ol_ops && ol_ops->is_roam_inprogress) {
3433 		dp_get_vdevid(soc, peer->mac_addr.raw, &vdev_id);
3434 		is_roaming = ol_ops->is_roam_inprogress(vdev_id);
3435 	}
3436 
3437 	dp_peer_info("peer: " QDF_MAC_ADDR_FMT ", vdev_id: %d, is_roaming: %d",
3438 		     QDF_MAC_ADDR_REF(peer->mac_addr.raw), vdev_id, is_roaming);
3439 
3440 	return is_roaming;
3441 }
3442 
3443 #ifdef WLAN_FEATURE_11BE_MLO
3444 /**
3445  * dp_rx_tid_setup_allow() - check if rx_tid and reo queue desc
3446 			     setup is necessary
3447  * @peer: DP peer handle
3448  *
3449  * Return: true - allow, false - disallow
3450  */
3451 static inline
3452 bool dp_rx_tid_setup_allow(struct dp_peer *peer)
3453 {
3454 	if (IS_MLO_DP_LINK_PEER(peer) && !peer->first_link)
3455 		return false;
3456 
3457 	return true;
3458 }
3459 
3460 /**
3461  * dp_rx_tid_update_allow() - check if rx_tid update needed
3462  * @peer: DP peer handle
3463  *
3464  * Return: true - allow, false - disallow
3465  */
3466 static inline
3467 bool dp_rx_tid_update_allow(struct dp_peer *peer)
3468 {
3469 	/* not as expected for MLO connection link peer */
3470 	if (IS_MLO_DP_LINK_PEER(peer)) {
3471 		QDF_BUG(0);
3472 		return false;
3473 	}
3474 
3475 	return true;
3476 }
3477 #else
3478 static inline
3479 bool dp_rx_tid_setup_allow(struct dp_peer *peer)
3480 {
3481 	return true;
3482 }
3483 
3484 static inline
3485 bool dp_rx_tid_update_allow(struct dp_peer *peer)
3486 {
3487 	return true;
3488 }
3489 #endif
3490 
3491 QDF_STATUS dp_rx_tid_update_wifi3(struct dp_peer *peer, int tid, uint32_t
3492 					 ba_window_size, uint32_t start_seq,
3493 					 bool bar_update)
3494 {
3495 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
3496 	struct dp_soc *soc = peer->vdev->pdev->soc;
3497 	struct hal_reo_cmd_params params;
3498 
3499 	if (!dp_rx_tid_update_allow(peer)) {
3500 		dp_peer_err("skip tid update for peer:" QDF_MAC_ADDR_FMT,
3501 			    QDF_MAC_ADDR_REF(peer->mac_addr.raw));
3502 		return QDF_STATUS_E_FAILURE;
3503 	}
3504 
3505 	qdf_mem_zero(&params, sizeof(params));
3506 
3507 	params.std.need_status = 1;
3508 	params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
3509 	params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
3510 	params.u.upd_queue_params.update_ba_window_size = 1;
3511 	params.u.upd_queue_params.ba_window_size = ba_window_size;
3512 
3513 	if (start_seq < IEEE80211_SEQ_MAX) {
3514 		params.u.upd_queue_params.update_ssn = 1;
3515 		params.u.upd_queue_params.ssn = start_seq;
3516 	} else {
3517 	    dp_set_ssn_valid_flag(&params, 0);
3518 	}
3519 
3520 	if (dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
3521 			    dp_rx_tid_update_cb, rx_tid)) {
3522 		dp_err_log("failed to send reo cmd CMD_UPDATE_RX_REO_QUEUE");
3523 		DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
3524 	}
3525 
3526 	rx_tid->ba_win_size = ba_window_size;
3527 
3528 	if (dp_get_peer_vdev_roaming_in_progress(peer))
3529 		return QDF_STATUS_E_PERM;
3530 
3531 	if (!bar_update)
3532 		dp_peer_rx_reorder_queue_setup(soc, peer,
3533 					       tid, ba_window_size);
3534 
3535 	return QDF_STATUS_SUCCESS;
3536 }
3537 
3538 #ifdef WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY
3539 /*
3540  * dp_reo_desc_defer_free_enqueue() - enqueue REO QDESC to be freed into
3541  *                                    the deferred list
3542  * @soc: Datapath soc handle
3543  * @free_desc: REO DESC reference that needs to be freed
3544  *
3545  * Return: true if enqueued, else false
3546  */
3547 static bool dp_reo_desc_defer_free_enqueue(struct dp_soc *soc,
3548 					   struct reo_desc_list_node *freedesc)
3549 {
3550 	struct reo_desc_deferred_freelist_node *desc;
3551 
3552 	if (!qdf_atomic_read(&soc->cmn_init_done))
3553 		return false;
3554 
3555 	desc = qdf_mem_malloc(sizeof(*desc));
3556 	if (!desc)
3557 		return false;
3558 
3559 	desc->hw_qdesc_paddr = freedesc->rx_tid.hw_qdesc_paddr;
3560 	desc->hw_qdesc_alloc_size = freedesc->rx_tid.hw_qdesc_alloc_size;
3561 	desc->hw_qdesc_vaddr_unaligned =
3562 			freedesc->rx_tid.hw_qdesc_vaddr_unaligned;
3563 	desc->free_ts = qdf_get_system_timestamp();
3564 	DP_RX_REO_QDESC_DEFERRED_GET_MAC(desc, freedesc);
3565 
3566 	qdf_spin_lock_bh(&soc->reo_desc_deferred_freelist_lock);
3567 	if (!soc->reo_desc_deferred_freelist_init) {
3568 		qdf_mem_free(desc);
3569 		qdf_spin_unlock_bh(&soc->reo_desc_deferred_freelist_lock);
3570 		return false;
3571 	}
3572 	qdf_list_insert_back(&soc->reo_desc_deferred_freelist,
3573 			     (qdf_list_node_t *)desc);
3574 	qdf_spin_unlock_bh(&soc->reo_desc_deferred_freelist_lock);
3575 
3576 	return true;
3577 }
3578 
3579 /*
3580  * dp_reo_desc_defer_free() - free the REO QDESC in the deferred list
3581  *                            based on time threshold
3582  * @soc: Datapath soc handle
3583  * @free_desc: REO DESC reference that needs to be freed
3584  *
3585  * Return: true if enqueued, else false
3586  */
3587 static void dp_reo_desc_defer_free(struct dp_soc *soc)
3588 {
3589 	struct reo_desc_deferred_freelist_node *desc;
3590 	unsigned long curr_ts = qdf_get_system_timestamp();
3591 
3592 	qdf_spin_lock_bh(&soc->reo_desc_deferred_freelist_lock);
3593 
3594 	while ((qdf_list_peek_front(&soc->reo_desc_deferred_freelist,
3595 	       (qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) &&
3596 	       (curr_ts > (desc->free_ts + REO_DESC_DEFERRED_FREE_MS))) {
3597 		qdf_list_remove_front(&soc->reo_desc_deferred_freelist,
3598 				      (qdf_list_node_t **)&desc);
3599 
3600 		DP_RX_REO_QDESC_DEFERRED_FREE_EVT(desc);
3601 
3602 		qdf_mem_unmap_nbytes_single(soc->osdev,
3603 					    desc->hw_qdesc_paddr,
3604 					    QDF_DMA_BIDIRECTIONAL,
3605 					    desc->hw_qdesc_alloc_size);
3606 		qdf_mem_free(desc->hw_qdesc_vaddr_unaligned);
3607 		qdf_mem_free(desc);
3608 
3609 		curr_ts = qdf_get_system_timestamp();
3610 	}
3611 
3612 	qdf_spin_unlock_bh(&soc->reo_desc_deferred_freelist_lock);
3613 }
3614 #else
3615 static inline bool
3616 dp_reo_desc_defer_free_enqueue(struct dp_soc *soc,
3617 			       struct reo_desc_list_node *freedesc)
3618 {
3619 	return false;
3620 }
3621 
3622 static void dp_reo_desc_defer_free(struct dp_soc *soc)
3623 {
3624 }
3625 #endif /* !WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY */
3626 
3627 /*
3628  * dp_reo_desc_free() - Callback free reo descriptor memory after
3629  * HW cache flush
3630  *
3631  * @soc: DP SOC handle
3632  * @cb_ctxt: Callback context
3633  * @reo_status: REO command status
3634  */
3635 static void dp_reo_desc_free(struct dp_soc *soc, void *cb_ctxt,
3636 	union hal_reo_status *reo_status)
3637 {
3638 	struct reo_desc_list_node *freedesc =
3639 		(struct reo_desc_list_node *)cb_ctxt;
3640 	struct dp_rx_tid *rx_tid = &freedesc->rx_tid;
3641 	unsigned long curr_ts = qdf_get_system_timestamp();
3642 
3643 	if ((reo_status->fl_cache_status.header.status !=
3644 		HAL_REO_CMD_SUCCESS) &&
3645 		(reo_status->fl_cache_status.header.status !=
3646 		HAL_REO_CMD_DRAIN)) {
3647 		dp_peer_err("%pK: Rx tid HW desc flush failed(%d): tid %d",
3648 			    soc, reo_status->rx_queue_status.header.status,
3649 			    freedesc->rx_tid.tid);
3650 	}
3651 	dp_peer_info("%pK: %lu hw_qdesc_paddr: %pK, tid:%d", soc,
3652 		     curr_ts, (void *)(rx_tid->hw_qdesc_paddr),
3653 		     rx_tid->tid);
3654 
3655 	/* REO desc is enqueued to be freed at a later point
3656 	 * in time, just free the freedesc alone and return
3657 	 */
3658 	if (dp_reo_desc_defer_free_enqueue(soc, freedesc))
3659 		goto out;
3660 
3661 	DP_RX_REO_QDESC_FREE_EVT(freedesc);
3662 
3663 	hal_reo_shared_qaddr_cache_clear(soc->hal_soc);
3664 	qdf_mem_unmap_nbytes_single(soc->osdev,
3665 		rx_tid->hw_qdesc_paddr,
3666 		QDF_DMA_BIDIRECTIONAL,
3667 		rx_tid->hw_qdesc_alloc_size);
3668 	qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
3669 out:
3670 	qdf_mem_free(freedesc);
3671 }
3672 
3673 #if defined(CONFIG_WIFI_EMULATION_WIFI_3_0) && defined(BUILD_X86)
3674 /* Hawkeye emulation requires bus address to be >= 0x50000000 */
3675 static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
3676 {
3677 	if (dma_addr < 0x50000000)
3678 		return QDF_STATUS_E_FAILURE;
3679 	else
3680 		return QDF_STATUS_SUCCESS;
3681 }
3682 #else
3683 static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
3684 {
3685 	return QDF_STATUS_SUCCESS;
3686 }
3687 #endif
3688 
3689 /*
3690  * dp_rx_tid_setup_wifi3() – Setup receive TID state
3691  * @peer: Datapath peer handle
3692  * @tid: TID
3693  * @ba_window_size: BlockAck window size
3694  * @start_seq: Starting sequence number
3695  *
3696  * Return: QDF_STATUS code
3697  */
3698 QDF_STATUS dp_rx_tid_setup_wifi3(struct dp_peer *peer, int tid,
3699 				 uint32_t ba_window_size, uint32_t start_seq)
3700 {
3701 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
3702 	struct dp_vdev *vdev = peer->vdev;
3703 	struct dp_soc *soc = vdev->pdev->soc;
3704 	uint32_t hw_qdesc_size;
3705 	uint32_t hw_qdesc_align;
3706 	int hal_pn_type;
3707 	void *hw_qdesc_vaddr;
3708 	uint32_t alloc_tries = 0;
3709 	QDF_STATUS status = QDF_STATUS_SUCCESS;
3710 	struct dp_txrx_peer *txrx_peer;
3711 
3712 	if (!qdf_atomic_read(&peer->is_default_route_set))
3713 		return QDF_STATUS_E_FAILURE;
3714 
3715 	if (!dp_rx_tid_setup_allow(peer)) {
3716 		dp_peer_info("skip rx tid setup for peer" QDF_MAC_ADDR_FMT,
3717 			     QDF_MAC_ADDR_REF(peer->mac_addr.raw));
3718 		goto send_wmi_reo_cmd;
3719 	}
3720 
3721 	rx_tid->ba_win_size = ba_window_size;
3722 	if (rx_tid->hw_qdesc_vaddr_unaligned)
3723 		return dp_rx_tid_update_wifi3(peer, tid, ba_window_size,
3724 			start_seq, false);
3725 	rx_tid->delba_tx_status = 0;
3726 	rx_tid->ppdu_id_2k = 0;
3727 	rx_tid->num_of_addba_req = 0;
3728 	rx_tid->num_of_delba_req = 0;
3729 	rx_tid->num_of_addba_resp = 0;
3730 	rx_tid->num_addba_rsp_failed = 0;
3731 	rx_tid->num_addba_rsp_success = 0;
3732 	rx_tid->delba_tx_success_cnt = 0;
3733 	rx_tid->delba_tx_fail_cnt = 0;
3734 	rx_tid->statuscode = 0;
3735 
3736 	/* TODO: Allocating HW queue descriptors based on max BA window size
3737 	 * for all QOS TIDs so that same descriptor can be used later when
3738 	 * ADDBA request is received. This should be changed to allocate HW
3739 	 * queue descriptors based on BA window size being negotiated (0 for
3740 	 * non BA cases), and reallocate when BA window size changes and also
3741 	 * send WMI message to FW to change the REO queue descriptor in Rx
3742 	 * peer entry as part of dp_rx_tid_update.
3743 	 */
3744 	hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc,
3745 					       ba_window_size, tid);
3746 
3747 	hw_qdesc_align = hal_get_reo_qdesc_align(soc->hal_soc);
3748 	/* To avoid unnecessary extra allocation for alignment, try allocating
3749 	 * exact size and see if we already have aligned address.
3750 	 */
3751 	rx_tid->hw_qdesc_alloc_size = hw_qdesc_size;
3752 
3753 try_desc_alloc:
3754 	rx_tid->hw_qdesc_vaddr_unaligned =
3755 		qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size);
3756 
3757 	if (!rx_tid->hw_qdesc_vaddr_unaligned) {
3758 		dp_peer_err("%pK: Rx tid HW desc alloc failed: tid %d",
3759 			    soc, tid);
3760 		return QDF_STATUS_E_NOMEM;
3761 	}
3762 
3763 	if ((unsigned long)(rx_tid->hw_qdesc_vaddr_unaligned) %
3764 		hw_qdesc_align) {
3765 		/* Address allocated above is not aligned. Allocate extra
3766 		 * memory for alignment
3767 		 */
3768 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
3769 		rx_tid->hw_qdesc_vaddr_unaligned =
3770 			qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size +
3771 					hw_qdesc_align - 1);
3772 
3773 		if (!rx_tid->hw_qdesc_vaddr_unaligned) {
3774 			dp_peer_err("%pK: Rx tid HW desc alloc failed: tid %d",
3775 				    soc, tid);
3776 			return QDF_STATUS_E_NOMEM;
3777 		}
3778 
3779 		hw_qdesc_vaddr = (void *)qdf_align((unsigned long)
3780 			rx_tid->hw_qdesc_vaddr_unaligned,
3781 			hw_qdesc_align);
3782 
3783 		dp_peer_debug("%pK: Total Size %d Aligned Addr %pK",
3784 			      soc, rx_tid->hw_qdesc_alloc_size,
3785 			      hw_qdesc_vaddr);
3786 
3787 	} else {
3788 		hw_qdesc_vaddr = rx_tid->hw_qdesc_vaddr_unaligned;
3789 	}
3790 	rx_tid->hw_qdesc_vaddr_aligned = hw_qdesc_vaddr;
3791 
3792 	txrx_peer = dp_get_txrx_peer(peer);
3793 
3794 	/* TODO: Ensure that sec_type is set before ADDBA is received.
3795 	 * Currently this is set based on htt indication
3796 	 * HTT_T2H_MSG_TYPE_SEC_IND from target
3797 	 */
3798 	switch (txrx_peer->security[dp_sec_ucast].sec_type) {
3799 	case cdp_sec_type_tkip_nomic:
3800 	case cdp_sec_type_aes_ccmp:
3801 	case cdp_sec_type_aes_ccmp_256:
3802 	case cdp_sec_type_aes_gcmp:
3803 	case cdp_sec_type_aes_gcmp_256:
3804 		hal_pn_type = HAL_PN_WPA;
3805 		break;
3806 	case cdp_sec_type_wapi:
3807 		if (vdev->opmode == wlan_op_mode_ap)
3808 			hal_pn_type = HAL_PN_WAPI_EVEN;
3809 		else
3810 			hal_pn_type = HAL_PN_WAPI_UNEVEN;
3811 		break;
3812 	default:
3813 		hal_pn_type = HAL_PN_NONE;
3814 		break;
3815 	}
3816 
3817 	hal_reo_qdesc_setup(soc->hal_soc, tid, ba_window_size, start_seq,
3818 		hw_qdesc_vaddr, rx_tid->hw_qdesc_paddr, hal_pn_type,
3819 		vdev->vdev_stats_id);
3820 
3821 	qdf_mem_map_nbytes_single(soc->osdev, hw_qdesc_vaddr,
3822 		QDF_DMA_BIDIRECTIONAL, rx_tid->hw_qdesc_alloc_size,
3823 		&(rx_tid->hw_qdesc_paddr));
3824 
3825 	if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) !=
3826 			QDF_STATUS_SUCCESS) {
3827 		if (alloc_tries++ < 10) {
3828 			qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
3829 			rx_tid->hw_qdesc_vaddr_unaligned = NULL;
3830 			goto try_desc_alloc;
3831 		} else {
3832 			dp_peer_err("%pK: Rx tid HW desc alloc failed (lowmem): tid %d",
3833 				    soc, tid);
3834 			status = QDF_STATUS_E_NOMEM;
3835 			goto error;
3836 		}
3837 	}
3838 
3839 send_wmi_reo_cmd:
3840 	if (dp_get_peer_vdev_roaming_in_progress(peer)) {
3841 		status = QDF_STATUS_E_PERM;
3842 		goto error;
3843 	}
3844 
3845 	status = dp_peer_rx_reorder_queue_setup(soc, peer,
3846 						tid, ba_window_size);
3847 	if (QDF_IS_STATUS_SUCCESS(status))
3848 		return status;
3849 
3850 error:
3851 	if (rx_tid->hw_qdesc_vaddr_unaligned) {
3852 		if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) ==
3853 		    QDF_STATUS_SUCCESS)
3854 			qdf_mem_unmap_nbytes_single(
3855 				soc->osdev,
3856 				rx_tid->hw_qdesc_paddr,
3857 				QDF_DMA_BIDIRECTIONAL,
3858 				rx_tid->hw_qdesc_alloc_size);
3859 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
3860 		rx_tid->hw_qdesc_vaddr_unaligned = NULL;
3861 		rx_tid->hw_qdesc_paddr = 0;
3862 	}
3863 	return status;
3864 }
3865 
3866 #ifdef DP_UMAC_HW_RESET_SUPPORT
3867 static
3868 void dp_peer_rst_tids(struct dp_soc *soc, struct dp_peer *peer, void *arg)
3869 {
3870 	int tid;
3871 
3872 	for (tid = 0; tid < (DP_MAX_TIDS - 1); tid++) {
3873 		struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
3874 		void *vaddr = rx_tid->hw_qdesc_vaddr_aligned;
3875 
3876 		if (vaddr)
3877 			dp_reset_rx_reo_tid_queue(soc, vaddr,
3878 						  rx_tid->hw_qdesc_alloc_size);
3879 	}
3880 }
3881 
3882 void dp_reset_tid_q_setup(struct dp_soc *soc)
3883 {
3884 	dp_soc_iterate_peer(soc, dp_peer_rst_tids, NULL, DP_MOD_ID_UMAC_RESET);
3885 }
3886 #endif
3887 #ifdef REO_DESC_DEFER_FREE
3888 /*
3889  * dp_reo_desc_clean_up() - If cmd to flush base desc fails add
3890  * desc back to freelist and defer the deletion
3891  *
3892  * @soc: DP SOC handle
3893  * @desc: Base descriptor to be freed
3894  * @reo_status: REO command status
3895  */
3896 static void dp_reo_desc_clean_up(struct dp_soc *soc,
3897 				 struct reo_desc_list_node *desc,
3898 				 union hal_reo_status *reo_status)
3899 {
3900 	desc->free_ts = qdf_get_system_timestamp();
3901 	DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
3902 	qdf_list_insert_back(&soc->reo_desc_freelist,
3903 			     (qdf_list_node_t *)desc);
3904 }
3905 
3906 /*
3907  * dp_reo_limit_clean_batch_sz() - Limit number REO CMD queued to cmd
3908  * ring in avoid of REO hang
3909  *
3910  * @list_size: REO desc list size to be cleaned
3911  */
3912 static inline void dp_reo_limit_clean_batch_sz(uint32_t *list_size)
3913 {
3914 	unsigned long curr_ts = qdf_get_system_timestamp();
3915 
3916 	if ((*list_size) > REO_DESC_FREELIST_SIZE) {
3917 		dp_err_log("%lu:freedesc number %d in freelist",
3918 			   curr_ts, *list_size);
3919 		/* limit the batch queue size */
3920 		*list_size = REO_DESC_FREELIST_SIZE;
3921 	}
3922 }
3923 #else
3924 /*
3925  * dp_reo_desc_clean_up() - If send cmd to REO inorder to flush
3926  * cache fails free the base REO desc anyway
3927  *
3928  * @soc: DP SOC handle
3929  * @desc: Base descriptor to be freed
3930  * @reo_status: REO command status
3931  */
3932 static void dp_reo_desc_clean_up(struct dp_soc *soc,
3933 				 struct reo_desc_list_node *desc,
3934 				 union hal_reo_status *reo_status)
3935 {
3936 	if (reo_status) {
3937 		qdf_mem_zero(reo_status, sizeof(*reo_status));
3938 		reo_status->fl_cache_status.header.status = 0;
3939 		dp_reo_desc_free(soc, (void *)desc, reo_status);
3940 	}
3941 }
3942 
3943 /*
3944  * dp_reo_limit_clean_batch_sz() - Limit number REO CMD queued to cmd
3945  * ring in avoid of REO hang
3946  *
3947  * @list_size: REO desc list size to be cleaned
3948  */
3949 static inline void dp_reo_limit_clean_batch_sz(uint32_t *list_size)
3950 {
3951 }
3952 #endif
3953 
3954 /*
3955  * dp_resend_update_reo_cmd() - Resend the UPDATE_REO_QUEUE
3956  * cmd and re-insert desc into free list if send fails.
3957  *
3958  * @soc: DP SOC handle
3959  * @desc: desc with resend update cmd flag set
3960  * @rx_tid: Desc RX tid associated with update cmd for resetting
3961  * valid field to 0 in h/w
3962  *
3963  * Return: QDF status
3964  */
3965 static QDF_STATUS
3966 dp_resend_update_reo_cmd(struct dp_soc *soc,
3967 			 struct reo_desc_list_node *desc,
3968 			 struct dp_rx_tid *rx_tid)
3969 {
3970 	struct hal_reo_cmd_params params;
3971 
3972 	qdf_mem_zero(&params, sizeof(params));
3973 	params.std.need_status = 1;
3974 	params.std.addr_lo =
3975 		rx_tid->hw_qdesc_paddr & 0xffffffff;
3976 	params.std.addr_hi =
3977 		(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
3978 	params.u.upd_queue_params.update_vld = 1;
3979 	params.u.upd_queue_params.vld = 0;
3980 	desc->resend_update_reo_cmd = false;
3981 	/*
3982 	 * If the cmd send fails then set resend_update_reo_cmd flag
3983 	 * and insert the desc at the end of the free list to retry.
3984 	 */
3985 	if (dp_reo_send_cmd(soc,
3986 			    CMD_UPDATE_RX_REO_QUEUE,
3987 			    &params,
3988 			    dp_rx_tid_delete_cb,
3989 			    (void *)desc)
3990 	    != QDF_STATUS_SUCCESS) {
3991 		desc->resend_update_reo_cmd = true;
3992 		desc->free_ts = qdf_get_system_timestamp();
3993 		qdf_list_insert_back(&soc->reo_desc_freelist,
3994 				     (qdf_list_node_t *)desc);
3995 		dp_err_log("failed to send reo cmd CMD_UPDATE_RX_REO_QUEUE");
3996 		DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
3997 		return QDF_STATUS_E_FAILURE;
3998 	}
3999 
4000 	return QDF_STATUS_SUCCESS;
4001 }
4002 
4003 /*
4004  * dp_rx_tid_delete_cb() - Callback to flush reo descriptor HW cache
4005  * after deleting the entries (ie., setting valid=0)
4006  *
4007  * @soc: DP SOC handle
4008  * @cb_ctxt: Callback context
4009  * @reo_status: REO command status
4010  */
4011 void dp_rx_tid_delete_cb(struct dp_soc *soc, void *cb_ctxt,
4012 			 union hal_reo_status *reo_status)
4013 {
4014 	struct reo_desc_list_node *freedesc =
4015 		(struct reo_desc_list_node *)cb_ctxt;
4016 	uint32_t list_size;
4017 	struct reo_desc_list_node *desc;
4018 	unsigned long curr_ts = qdf_get_system_timestamp();
4019 	uint32_t desc_size, tot_desc_size;
4020 	struct hal_reo_cmd_params params;
4021 	bool flush_failure = false;
4022 
4023 	DP_RX_REO_QDESC_UPDATE_EVT(freedesc);
4024 
4025 	if (reo_status->rx_queue_status.header.status == HAL_REO_CMD_DRAIN) {
4026 		qdf_mem_zero(reo_status, sizeof(*reo_status));
4027 		reo_status->fl_cache_status.header.status = HAL_REO_CMD_DRAIN;
4028 		dp_reo_desc_free(soc, (void *)freedesc, reo_status);
4029 		DP_STATS_INC(soc, rx.err.reo_cmd_send_drain, 1);
4030 		return;
4031 	} else if (reo_status->rx_queue_status.header.status !=
4032 		HAL_REO_CMD_SUCCESS) {
4033 		/* Should not happen normally. Just print error for now */
4034 		dp_info_rl("Rx tid HW desc deletion failed(%d): tid %d",
4035 			   reo_status->rx_queue_status.header.status,
4036 			   freedesc->rx_tid.tid);
4037 	}
4038 
4039 	dp_peer_info("%pK: rx_tid: %d status: %d",
4040 		     soc, freedesc->rx_tid.tid,
4041 		     reo_status->rx_queue_status.header.status);
4042 
4043 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
4044 	freedesc->free_ts = curr_ts;
4045 	qdf_list_insert_back_size(&soc->reo_desc_freelist,
4046 		(qdf_list_node_t *)freedesc, &list_size);
4047 
4048 	/* MCL path add the desc back to reo_desc_freelist when REO FLUSH
4049 	 * failed. it may cause the number of REO queue pending  in free
4050 	 * list is even larger than REO_CMD_RING max size and lead REO CMD
4051 	 * flood then cause REO HW in an unexpected condition. So it's
4052 	 * needed to limit the number REO cmds in a batch operation.
4053 	 */
4054 	dp_reo_limit_clean_batch_sz(&list_size);
4055 
4056 	while ((qdf_list_peek_front(&soc->reo_desc_freelist,
4057 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) &&
4058 		((list_size >= REO_DESC_FREELIST_SIZE) ||
4059 		(curr_ts > (desc->free_ts + REO_DESC_FREE_DEFER_MS)) ||
4060 		(desc->resend_update_reo_cmd && list_size))) {
4061 		struct dp_rx_tid *rx_tid;
4062 
4063 		qdf_list_remove_front(&soc->reo_desc_freelist,
4064 				(qdf_list_node_t **)&desc);
4065 		list_size--;
4066 		rx_tid = &desc->rx_tid;
4067 
4068 		/* First process descs with resend_update_reo_cmd set */
4069 		if (desc->resend_update_reo_cmd) {
4070 			if (dp_resend_update_reo_cmd(soc, desc, rx_tid) !=
4071 			    QDF_STATUS_SUCCESS)
4072 				break;
4073 			else
4074 				continue;
4075 		}
4076 
4077 		/* Flush and invalidate REO descriptor from HW cache: Base and
4078 		 * extension descriptors should be flushed separately */
4079 		if (desc->pending_ext_desc_size)
4080 			tot_desc_size = desc->pending_ext_desc_size;
4081 		else
4082 			tot_desc_size = rx_tid->hw_qdesc_alloc_size;
4083 		/* Get base descriptor size by passing non-qos TID */
4084 		desc_size = hal_get_reo_qdesc_size(soc->hal_soc, 0,
4085 						   DP_NON_QOS_TID);
4086 
4087 		/* Flush reo extension descriptors */
4088 		while ((tot_desc_size -= desc_size) > 0) {
4089 			qdf_mem_zero(&params, sizeof(params));
4090 			params.std.addr_lo =
4091 				((uint64_t)(rx_tid->hw_qdesc_paddr) +
4092 				tot_desc_size) & 0xffffffff;
4093 			params.std.addr_hi =
4094 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
4095 
4096 			if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc,
4097 							CMD_FLUSH_CACHE,
4098 							&params,
4099 							NULL,
4100 							NULL)) {
4101 				dp_info_rl("fail to send CMD_CACHE_FLUSH:"
4102 					   "tid %d desc %pK", rx_tid->tid,
4103 					   (void *)(rx_tid->hw_qdesc_paddr));
4104 				desc->pending_ext_desc_size = tot_desc_size +
4105 								      desc_size;
4106 				dp_reo_desc_clean_up(soc, desc, reo_status);
4107 				flush_failure = true;
4108 				break;
4109 			}
4110 		}
4111 
4112 		if (flush_failure)
4113 			break;
4114 		else
4115 			desc->pending_ext_desc_size = desc_size;
4116 
4117 		/* Flush base descriptor */
4118 		qdf_mem_zero(&params, sizeof(params));
4119 		params.std.need_status = 1;
4120 		params.std.addr_lo =
4121 			(uint64_t)(rx_tid->hw_qdesc_paddr) & 0xffffffff;
4122 		params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
4123 
4124 		if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc,
4125 							  CMD_FLUSH_CACHE,
4126 							  &params,
4127 							  dp_reo_desc_free,
4128 							  (void *)desc)) {
4129 			union hal_reo_status reo_status;
4130 			/*
4131 			 * If dp_reo_send_cmd return failure, related TID queue desc
4132 			 * should be unmapped. Also locally reo_desc, together with
4133 			 * TID queue desc also need to be freed accordingly.
4134 			 *
4135 			 * Here invoke desc_free function directly to do clean up.
4136 			 *
4137 			 * In case of MCL path add the desc back to the free
4138 			 * desc list and defer deletion.
4139 			 */
4140 			dp_info_rl("fail to send REO cmd to flush cache: tid %d",
4141 				   rx_tid->tid);
4142 			dp_reo_desc_clean_up(soc, desc, &reo_status);
4143 			DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
4144 			break;
4145 		}
4146 	}
4147 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
4148 
4149 	dp_reo_desc_defer_free(soc);
4150 }
4151 
4152 /*
4153  * dp_rx_tid_delete_wifi3() – Delete receive TID queue
4154  * @peer: Datapath peer handle
4155  * @tid: TID
4156  *
4157  * Return: 0 on success, error code on failure
4158  */
4159 static int dp_rx_tid_delete_wifi3(struct dp_peer *peer, int tid)
4160 {
4161 	struct dp_rx_tid *rx_tid = &(peer->rx_tid[tid]);
4162 	struct dp_soc *soc = peer->vdev->pdev->soc;
4163 	struct hal_reo_cmd_params params;
4164 	struct reo_desc_list_node *freedesc =
4165 		qdf_mem_malloc(sizeof(*freedesc));
4166 
4167 	if (!freedesc) {
4168 		dp_peer_err("%pK: malloc failed for freedesc: tid %d",
4169 			    soc, tid);
4170 		qdf_assert(0);
4171 		return -ENOMEM;
4172 	}
4173 
4174 	freedesc->rx_tid = *rx_tid;
4175 	freedesc->resend_update_reo_cmd = false;
4176 
4177 	qdf_mem_zero(&params, sizeof(params));
4178 
4179 	DP_RX_REO_QDESC_GET_MAC(freedesc, peer);
4180 
4181 	params.std.need_status = 1;
4182 	params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
4183 	params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
4184 	params.u.upd_queue_params.update_vld = 1;
4185 	params.u.upd_queue_params.vld = 0;
4186 
4187 	if (dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
4188 			    dp_rx_tid_delete_cb, (void *)freedesc)
4189 		!= QDF_STATUS_SUCCESS) {
4190 		/* Defer the clean up to the call back context */
4191 		qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
4192 		freedesc->free_ts = qdf_get_system_timestamp();
4193 		freedesc->resend_update_reo_cmd = true;
4194 		qdf_list_insert_front(&soc->reo_desc_freelist,
4195 				      (qdf_list_node_t *)freedesc);
4196 		DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
4197 		qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
4198 		dp_info("Failed to send CMD_UPDATE_RX_REO_QUEUE");
4199 	}
4200 
4201 	rx_tid->hw_qdesc_vaddr_unaligned = NULL;
4202 	rx_tid->hw_qdesc_alloc_size = 0;
4203 	rx_tid->hw_qdesc_paddr = 0;
4204 
4205 	return 0;
4206 }
4207 
4208 #ifdef DP_LFR
4209 static void dp_peer_setup_remaining_tids(struct dp_peer *peer)
4210 {
4211 	int tid;
4212 
4213 	for (tid = 1; tid < DP_MAX_TIDS-1; tid++) {
4214 		dp_rx_tid_setup_wifi3(peer, tid, 1, 0);
4215 		dp_peer_debug("Setting up TID %d for peer %pK peer->local_id %d",
4216 			      tid, peer, peer->local_id);
4217 	}
4218 }
4219 #else
4220 static void dp_peer_setup_remaining_tids(struct dp_peer *peer) {};
4221 #endif
4222 
4223 #ifdef WLAN_FEATURE_11BE_MLO
4224 /**
4225  * dp_peer_rx_tids_init() - initialize each tids in peer
4226  * @peer: peer pointer
4227  *
4228  * Return: None
4229  */
4230 static void dp_peer_rx_tids_init(struct dp_peer *peer)
4231 {
4232 	int tid;
4233 	struct dp_rx_tid *rx_tid;
4234 	struct dp_rx_tid_defrag *rx_tid_defrag;
4235 
4236 	if (!IS_MLO_DP_LINK_PEER(peer)) {
4237 		for (tid = 0; tid < DP_MAX_TIDS; tid++) {
4238 			rx_tid_defrag = &peer->txrx_peer->rx_tid[tid];
4239 
4240 			rx_tid_defrag->array = &rx_tid_defrag->base;
4241 			rx_tid_defrag->defrag_timeout_ms = 0;
4242 			rx_tid_defrag->defrag_waitlist_elem.tqe_next = NULL;
4243 			rx_tid_defrag->defrag_waitlist_elem.tqe_prev = NULL;
4244 			rx_tid_defrag->base.head = NULL;
4245 			rx_tid_defrag->base.tail = NULL;
4246 			rx_tid_defrag->tid = tid;
4247 			rx_tid_defrag->defrag_peer = peer->txrx_peer;
4248 		}
4249 	}
4250 
4251 	/* if not first assoc link peer,
4252 	 * not to initialize rx_tids again.
4253 	 */
4254 	if (IS_MLO_DP_LINK_PEER(peer) && !peer->first_link)
4255 		return;
4256 
4257 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
4258 		rx_tid = &peer->rx_tid[tid];
4259 		rx_tid->tid = tid;
4260 		rx_tid->ba_win_size = 0;
4261 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
4262 	}
4263 }
4264 #else
4265 static void dp_peer_rx_tids_init(struct dp_peer *peer)
4266 {
4267 	int tid;
4268 	struct dp_rx_tid *rx_tid;
4269 	struct dp_rx_tid_defrag *rx_tid_defrag;
4270 
4271 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
4272 		rx_tid = &peer->rx_tid[tid];
4273 
4274 		rx_tid_defrag = &peer->txrx_peer->rx_tid[tid];
4275 		rx_tid->tid = tid;
4276 		rx_tid->ba_win_size = 0;
4277 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
4278 
4279 		rx_tid_defrag->base.head = NULL;
4280 		rx_tid_defrag->base.tail = NULL;
4281 		rx_tid_defrag->tid = tid;
4282 		rx_tid_defrag->array = &rx_tid_defrag->base;
4283 		rx_tid_defrag->defrag_timeout_ms = 0;
4284 		rx_tid_defrag->defrag_waitlist_elem.tqe_next = NULL;
4285 		rx_tid_defrag->defrag_waitlist_elem.tqe_prev = NULL;
4286 		rx_tid_defrag->defrag_peer = peer->txrx_peer;
4287 	}
4288 }
4289 #endif
4290 
4291 /*
4292  * dp_peer_rx_init() – Initialize receive TID state
4293  * @pdev: Datapath pdev
4294  * @peer: Datapath peer
4295  *
4296  */
4297 void dp_peer_rx_init(struct dp_pdev *pdev, struct dp_peer *peer)
4298 {
4299 	dp_peer_rx_tids_init(peer);
4300 
4301 	peer->active_ba_session_cnt = 0;
4302 	peer->hw_buffer_size = 0;
4303 	peer->kill_256_sessions = 0;
4304 
4305 	/* Setup default (non-qos) rx tid queue */
4306 	dp_rx_tid_setup_wifi3(peer, DP_NON_QOS_TID, 1, 0);
4307 
4308 	/* Setup rx tid queue for TID 0.
4309 	 * Other queues will be setup on receiving first packet, which will cause
4310 	 * NULL REO queue error
4311 	 */
4312 	dp_rx_tid_setup_wifi3(peer, 0, 1, 0);
4313 
4314 	/*
4315 	 * Setup the rest of TID's to handle LFR
4316 	 */
4317 	dp_peer_setup_remaining_tids(peer);
4318 
4319 	/*
4320 	 * Set security defaults: no PN check, no security. The target may
4321 	 * send a HTT SEC_IND message to overwrite these defaults.
4322 	 */
4323 	if (peer->txrx_peer)
4324 		peer->txrx_peer->security[dp_sec_ucast].sec_type =
4325 			peer->txrx_peer->security[dp_sec_mcast].sec_type =
4326 				cdp_sec_type_none;
4327 }
4328 
4329 /*
4330  * dp_peer_rx_cleanup() – Cleanup receive TID state
4331  * @vdev: Datapath vdev
4332  * @peer: Datapath peer
4333  *
4334  */
4335 void dp_peer_rx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
4336 {
4337 	int tid;
4338 	uint32_t tid_delete_mask = 0;
4339 
4340 	if (!peer->txrx_peer)
4341 		return;
4342 
4343 	dp_info("Remove tids for peer: %pK", peer);
4344 
4345 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
4346 		struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
4347 		struct dp_rx_tid_defrag *defrag_rx_tid =
4348 				&peer->txrx_peer->rx_tid[tid];
4349 
4350 		qdf_spin_lock_bh(&defrag_rx_tid->defrag_tid_lock);
4351 		if (!peer->bss_peer || peer->vdev->opmode == wlan_op_mode_sta) {
4352 			/* Cleanup defrag related resource */
4353 			dp_rx_defrag_waitlist_remove(peer->txrx_peer, tid);
4354 			dp_rx_reorder_flush_frag(peer->txrx_peer, tid);
4355 		}
4356 		qdf_spin_unlock_bh(&defrag_rx_tid->defrag_tid_lock);
4357 
4358 		qdf_spin_lock_bh(&rx_tid->tid_lock);
4359 		if (peer->rx_tid[tid].hw_qdesc_vaddr_unaligned) {
4360 			dp_rx_tid_delete_wifi3(peer, tid);
4361 
4362 			tid_delete_mask |= (1 << tid);
4363 		}
4364 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
4365 	}
4366 #ifdef notyet /* See if FW can remove queues as part of peer cleanup */
4367 	if (soc->ol_ops->peer_rx_reorder_queue_remove) {
4368 		soc->ol_ops->peer_rx_reorder_queue_remove(soc->ctrl_psoc,
4369 			peer->vdev->pdev->pdev_id,
4370 			peer->vdev->vdev_id, peer->mac_addr.raw,
4371 			tid_delete_mask);
4372 	}
4373 #endif
4374 }
4375 
4376 /*
4377  * dp_peer_cleanup() – Cleanup peer information
4378  * @vdev: Datapath vdev
4379  * @peer: Datapath peer
4380  *
4381  */
4382 void dp_peer_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
4383 {
4384 	enum wlan_op_mode vdev_opmode;
4385 	uint8_t vdev_mac_addr[QDF_MAC_ADDR_SIZE];
4386 	struct dp_pdev *pdev = vdev->pdev;
4387 	struct dp_soc *soc = pdev->soc;
4388 
4389 	/* save vdev related member in case vdev freed */
4390 	vdev_opmode = vdev->opmode;
4391 
4392 	if (!IS_MLO_DP_MLD_PEER(peer))
4393 		dp_monitor_peer_tx_cleanup(vdev, peer);
4394 
4395 	if (vdev_opmode != wlan_op_mode_monitor)
4396 	/* cleanup the Rx reorder queues for this peer */
4397 		dp_peer_rx_cleanup(vdev, peer);
4398 
4399 	dp_peer_rx_tids_destroy(peer);
4400 
4401 	if (IS_MLO_DP_LINK_PEER(peer))
4402 		dp_link_peer_del_mld_peer(peer);
4403 	if (IS_MLO_DP_MLD_PEER(peer))
4404 		dp_mld_peer_deinit_link_peers_info(peer);
4405 
4406 	qdf_mem_copy(vdev_mac_addr, vdev->mac_addr.raw,
4407 		     QDF_MAC_ADDR_SIZE);
4408 
4409 	if (soc->cdp_soc.ol_ops->peer_unref_delete)
4410 		soc->cdp_soc.ol_ops->peer_unref_delete(
4411 				soc->ctrl_psoc,
4412 				vdev->pdev->pdev_id,
4413 				peer->mac_addr.raw, vdev_mac_addr,
4414 				vdev_opmode);
4415 }
4416 
4417 /* dp_teardown_256_ba_session() - Teardown sessions using 256
4418  *                                window size when a request with
4419  *                                64 window size is received.
4420  *                                This is done as a WAR since HW can
4421  *                                have only one setting per peer (64 or 256).
4422  *                                For HKv2, we use per tid buffersize setting
4423  *                                for 0 to per_tid_basize_max_tid. For tid
4424  *                                more than per_tid_basize_max_tid we use HKv1
4425  *                                method.
4426  * @peer: Datapath peer
4427  *
4428  * Return: void
4429  */
4430 static void dp_teardown_256_ba_sessions(struct dp_peer *peer)
4431 {
4432 	uint8_t delba_rcode = 0;
4433 	int tid;
4434 	struct dp_rx_tid *rx_tid = NULL;
4435 
4436 	tid = peer->vdev->pdev->soc->per_tid_basize_max_tid;
4437 	for (; tid < DP_MAX_TIDS; tid++) {
4438 		rx_tid = &peer->rx_tid[tid];
4439 		qdf_spin_lock_bh(&rx_tid->tid_lock);
4440 
4441 		if (rx_tid->ba_win_size <= 64) {
4442 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
4443 			continue;
4444 		} else {
4445 			if (rx_tid->ba_status == DP_RX_BA_ACTIVE ||
4446 			    rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
4447 				/* send delba */
4448 				if (!rx_tid->delba_tx_status) {
4449 					rx_tid->delba_tx_retry++;
4450 					rx_tid->delba_tx_status = 1;
4451 					rx_tid->delba_rcode =
4452 					IEEE80211_REASON_QOS_SETUP_REQUIRED;
4453 					delba_rcode = rx_tid->delba_rcode;
4454 
4455 					qdf_spin_unlock_bh(&rx_tid->tid_lock);
4456 					if (peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba)
4457 						peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba(
4458 							peer->vdev->pdev->soc->ctrl_psoc,
4459 							peer->vdev->vdev_id,
4460 							peer->mac_addr.raw,
4461 							tid, delba_rcode,
4462 							CDP_DELBA_REASON_NONE);
4463 				} else {
4464 					qdf_spin_unlock_bh(&rx_tid->tid_lock);
4465 				}
4466 			} else {
4467 				qdf_spin_unlock_bh(&rx_tid->tid_lock);
4468 			}
4469 		}
4470 	}
4471 }
4472 
4473 /*
4474 * dp_rx_addba_resp_tx_completion_wifi3() – Update Rx Tid State
4475 *
4476 * @soc: Datapath soc handle
4477 * @peer_mac: Datapath peer mac address
4478 * @vdev_id: id of atapath vdev
4479 * @tid: TID number
4480 * @status: tx completion status
4481 * Return: 0 on success, error code on failure
4482 */
4483 int dp_addba_resp_tx_completion_wifi3(struct cdp_soc_t *cdp_soc,
4484 				      uint8_t *peer_mac,
4485 				      uint16_t vdev_id,
4486 				      uint8_t tid, int status)
4487 {
4488 	struct dp_peer *peer = dp_peer_get_tgt_peer_hash_find(
4489 					(struct dp_soc *)cdp_soc,
4490 					peer_mac, 0, vdev_id,
4491 					DP_MOD_ID_CDP);
4492 	struct dp_rx_tid *rx_tid = NULL;
4493 
4494 	if (!peer) {
4495 		dp_peer_debug("%pK: Peer is NULL!\n", cdp_soc);
4496 		goto fail;
4497 	}
4498 	rx_tid = &peer->rx_tid[tid];
4499 	qdf_spin_lock_bh(&rx_tid->tid_lock);
4500 	if (status) {
4501 		rx_tid->num_addba_rsp_failed++;
4502 		if (rx_tid->hw_qdesc_vaddr_unaligned)
4503 			dp_rx_tid_update_wifi3(peer, tid, 1,
4504 					       IEEE80211_SEQ_MAX, false);
4505 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
4506 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
4507 		dp_err("RxTid- %d addba rsp tx completion failed", tid);
4508 
4509 		goto success;
4510 	}
4511 
4512 	rx_tid->num_addba_rsp_success++;
4513 	if (rx_tid->ba_status == DP_RX_BA_INACTIVE) {
4514 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
4515 		dp_peer_err("%pK: Rx Tid- %d hw qdesc is not in IN_PROGRESS",
4516 			    cdp_soc, tid);
4517 		goto fail;
4518 	}
4519 
4520 	if (!qdf_atomic_read(&peer->is_default_route_set)) {
4521 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
4522 		dp_peer_debug("%pK: default route is not set for peer: " QDF_MAC_ADDR_FMT,
4523 			      cdp_soc, QDF_MAC_ADDR_REF(peer->mac_addr.raw));
4524 		goto fail;
4525 	}
4526 
4527 	if (dp_rx_tid_update_wifi3(peer, tid,
4528 				   rx_tid->ba_win_size,
4529 				   rx_tid->startseqnum,
4530 				   false)) {
4531 		dp_err("Failed update REO SSN");
4532 	}
4533 
4534 	dp_info("tid %u window_size %u start_seq_num %u",
4535 		tid, rx_tid->ba_win_size,
4536 		rx_tid->startseqnum);
4537 
4538 	/* First Session */
4539 	if (peer->active_ba_session_cnt == 0) {
4540 		if (rx_tid->ba_win_size > 64 && rx_tid->ba_win_size <= 256)
4541 			peer->hw_buffer_size = 256;
4542 		else if (rx_tid->ba_win_size <= 1024 &&
4543 			 rx_tid->ba_win_size > 256)
4544 			peer->hw_buffer_size = 1024;
4545 		else
4546 			peer->hw_buffer_size = 64;
4547 	}
4548 
4549 	rx_tid->ba_status = DP_RX_BA_ACTIVE;
4550 
4551 	peer->active_ba_session_cnt++;
4552 
4553 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
4554 
4555 	/* Kill any session having 256 buffer size
4556 	 * when 64 buffer size request is received.
4557 	 * Also, latch on to 64 as new buffer size.
4558 	 */
4559 	if (peer->kill_256_sessions) {
4560 		dp_teardown_256_ba_sessions(peer);
4561 		peer->kill_256_sessions = 0;
4562 	}
4563 
4564 success:
4565 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4566 	return QDF_STATUS_SUCCESS;
4567 
4568 fail:
4569 	if (peer)
4570 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4571 
4572 	return QDF_STATUS_E_FAILURE;
4573 }
4574 
4575 /*
4576 * dp_rx_addba_responsesetup_wifi3() – Process ADDBA request from peer
4577 *
4578 * @soc: Datapath soc handle
4579 * @peer_mac: Datapath peer mac address
4580 * @vdev_id: id of atapath vdev
4581 * @tid: TID number
4582 * @dialogtoken: output dialogtoken
4583 * @statuscode: output dialogtoken
4584 * @buffersize: Output BA window size
4585 * @batimeout: Output BA timeout
4586 */
4587 QDF_STATUS
4588 dp_addba_responsesetup_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
4589 			     uint16_t vdev_id, uint8_t tid,
4590 			     uint8_t *dialogtoken, uint16_t *statuscode,
4591 			     uint16_t *buffersize, uint16_t *batimeout)
4592 {
4593 	struct dp_rx_tid *rx_tid = NULL;
4594 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4595 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
4596 						       peer_mac, 0, vdev_id,
4597 						       DP_MOD_ID_CDP);
4598 
4599 	if (!peer) {
4600 		dp_peer_debug("%pK: Peer is NULL!\n", cdp_soc);
4601 		return QDF_STATUS_E_FAILURE;
4602 	}
4603 	rx_tid = &peer->rx_tid[tid];
4604 	qdf_spin_lock_bh(&rx_tid->tid_lock);
4605 	rx_tid->num_of_addba_resp++;
4606 	/* setup ADDBA response parameters */
4607 	*dialogtoken = rx_tid->dialogtoken;
4608 	*statuscode = rx_tid->statuscode;
4609 	*buffersize = rx_tid->ba_win_size;
4610 	*batimeout  = 0;
4611 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
4612 
4613 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4614 
4615 	return status;
4616 }
4617 
4618 /* dp_check_ba_buffersize() - Check buffer size in request
4619  *                            and latch onto this size based on
4620  *                            size used in first active session.
4621  * @peer: Datapath peer
4622  * @tid: Tid
4623  * @buffersize: Block ack window size
4624  *
4625  * Return: void
4626  */
4627 static void dp_check_ba_buffersize(struct dp_peer *peer,
4628 				   uint16_t tid,
4629 				   uint16_t buffersize)
4630 {
4631 	struct dp_rx_tid *rx_tid = NULL;
4632 	struct dp_soc *soc = peer->vdev->pdev->soc;
4633 	uint16_t max_ba_window;
4634 
4635 	max_ba_window = hal_get_rx_max_ba_window(soc->hal_soc, tid);
4636 	dp_info("Input buffersize %d, max dp allowed %d",
4637 		buffersize, max_ba_window);
4638 	/* Adjust BA window size, restrict it to max DP allowed */
4639 	buffersize = QDF_MIN(buffersize, max_ba_window);
4640 
4641 	dp_info(QDF_MAC_ADDR_FMT" per_tid_basize_max_tid %d tid %d buffersize %d hw_buffer_size %d",
4642 		QDF_MAC_ADDR_REF(peer->mac_addr.raw),
4643 		soc->per_tid_basize_max_tid, tid, buffersize,
4644 		peer->hw_buffer_size);
4645 
4646 	rx_tid = &peer->rx_tid[tid];
4647 	if (soc->per_tid_basize_max_tid &&
4648 	    tid < soc->per_tid_basize_max_tid) {
4649 		rx_tid->ba_win_size = buffersize;
4650 		goto out;
4651 	} else {
4652 		if (peer->active_ba_session_cnt == 0) {
4653 			rx_tid->ba_win_size = buffersize;
4654 		} else {
4655 			if (peer->hw_buffer_size == 64) {
4656 				if (buffersize <= 64)
4657 					rx_tid->ba_win_size = buffersize;
4658 				else
4659 					rx_tid->ba_win_size = peer->hw_buffer_size;
4660 			} else if (peer->hw_buffer_size == 256) {
4661 				if (buffersize > 64) {
4662 					rx_tid->ba_win_size = buffersize;
4663 				} else {
4664 					rx_tid->ba_win_size = buffersize;
4665 					peer->hw_buffer_size = 64;
4666 					peer->kill_256_sessions = 1;
4667 				}
4668 			} else if (buffersize <= 1024) {
4669 				/**
4670 				 * Above checks are only for HK V2
4671 				 * Set incoming buffer size for others
4672 				 */
4673 				rx_tid->ba_win_size = buffersize;
4674 			} else {
4675 				dp_err("Invalid buffer size %d", buffersize);
4676 				qdf_assert_always(0);
4677 			}
4678 		}
4679 	}
4680 
4681 out:
4682 	dp_info("rx_tid->ba_win_size %d peer->hw_buffer_size %d peer->kill_256_sessions %d",
4683 		rx_tid->ba_win_size,
4684 		peer->hw_buffer_size,
4685 		peer->kill_256_sessions);
4686 }
4687 
4688 QDF_STATUS dp_rx_tid_update_ba_win_size(struct cdp_soc_t *cdp_soc,
4689 					uint8_t *peer_mac, uint16_t vdev_id,
4690 					uint8_t tid, uint16_t buffersize)
4691 {
4692 	struct dp_rx_tid *rx_tid = NULL;
4693 	struct dp_peer *peer;
4694 
4695 	peer = dp_peer_get_tgt_peer_hash_find((struct dp_soc *)cdp_soc,
4696 					      peer_mac, 0, vdev_id,
4697 					      DP_MOD_ID_CDP);
4698 	if (!peer) {
4699 		dp_peer_debug("%pK: Peer is NULL!\n", cdp_soc);
4700 		return QDF_STATUS_E_FAILURE;
4701 	}
4702 
4703 	rx_tid = &peer->rx_tid[tid];
4704 
4705 	qdf_spin_lock_bh(&rx_tid->tid_lock);
4706 	rx_tid->ba_win_size = buffersize;
4707 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
4708 
4709 	dp_info("peer "QDF_MAC_ADDR_FMT", tid %d, update BA win size to %d",
4710 		QDF_MAC_ADDR_REF(peer->mac_addr.raw), tid, buffersize);
4711 
4712 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4713 
4714 	return QDF_STATUS_SUCCESS;
4715 }
4716 
4717 #define DP_RX_BA_SESSION_DISABLE  1
4718 
4719 /*
4720  * dp_addba_requestprocess_wifi3() - Process ADDBA request from peer
4721  *
4722  * @soc: Datapath soc handle
4723  * @peer_mac: Datapath peer mac address
4724  * @vdev_id: id of atapath vdev
4725  * @dialogtoken: dialogtoken from ADDBA frame
4726  * @tid: TID number
4727  * @batimeout: BA timeout
4728  * @buffersize: BA window size
4729  * @startseqnum: Start seq. number received in BA sequence control
4730  *
4731  * Return: 0 on success, error code on failure
4732  */
4733 int dp_addba_requestprocess_wifi3(struct cdp_soc_t *cdp_soc,
4734 				  uint8_t *peer_mac,
4735 				  uint16_t vdev_id,
4736 				  uint8_t dialogtoken,
4737 				  uint16_t tid, uint16_t batimeout,
4738 				  uint16_t buffersize,
4739 				  uint16_t startseqnum)
4740 {
4741 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4742 	struct dp_rx_tid *rx_tid = NULL;
4743 	struct dp_peer *peer;
4744 
4745 	peer = dp_peer_get_tgt_peer_hash_find((struct dp_soc *)cdp_soc,
4746 					      peer_mac,
4747 					      0, vdev_id,
4748 					      DP_MOD_ID_CDP);
4749 
4750 	if (!peer) {
4751 		dp_peer_debug("%pK: Peer is NULL!\n", cdp_soc);
4752 		return QDF_STATUS_E_FAILURE;
4753 	}
4754 	rx_tid = &peer->rx_tid[tid];
4755 	qdf_spin_lock_bh(&rx_tid->tid_lock);
4756 	rx_tid->num_of_addba_req++;
4757 	if ((rx_tid->ba_status == DP_RX_BA_ACTIVE &&
4758 	     rx_tid->hw_qdesc_vaddr_unaligned)) {
4759 		dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX, false);
4760 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
4761 		peer->active_ba_session_cnt--;
4762 		dp_peer_debug("%pK: Rx Tid- %d hw qdesc is already setup",
4763 			      cdp_soc, tid);
4764 	}
4765 
4766 	if (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
4767 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
4768 		status = QDF_STATUS_E_FAILURE;
4769 		goto fail;
4770 	}
4771 
4772 	if (rx_tid->rx_ba_win_size_override == DP_RX_BA_SESSION_DISABLE) {
4773 		dp_peer_info("%pK: disable BA session",
4774 			     cdp_soc);
4775 
4776 		buffersize = 1;
4777 	} else if (rx_tid->rx_ba_win_size_override) {
4778 		dp_peer_info("%pK: override BA win to %d", cdp_soc,
4779 			     rx_tid->rx_ba_win_size_override);
4780 
4781 		buffersize = rx_tid->rx_ba_win_size_override;
4782 	} else {
4783 		dp_peer_info("%pK: restore BA win %d based on addba req", cdp_soc,
4784 			     buffersize);
4785 	}
4786 
4787 	dp_check_ba_buffersize(peer, tid, buffersize);
4788 
4789 	if (dp_rx_tid_setup_wifi3(peer, tid,
4790 	    rx_tid->ba_win_size, startseqnum)) {
4791 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
4792 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
4793 		status = QDF_STATUS_E_FAILURE;
4794 		goto fail;
4795 	}
4796 	rx_tid->ba_status = DP_RX_BA_IN_PROGRESS;
4797 
4798 	rx_tid->dialogtoken = dialogtoken;
4799 	rx_tid->startseqnum = startseqnum;
4800 
4801 	if (rx_tid->userstatuscode != IEEE80211_STATUS_SUCCESS)
4802 		rx_tid->statuscode = rx_tid->userstatuscode;
4803 	else
4804 		rx_tid->statuscode = IEEE80211_STATUS_SUCCESS;
4805 
4806 	if (rx_tid->rx_ba_win_size_override == DP_RX_BA_SESSION_DISABLE)
4807 		rx_tid->statuscode = IEEE80211_STATUS_REFUSED;
4808 
4809 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
4810 
4811 fail:
4812 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4813 
4814 	return status;
4815 }
4816 
4817 /*
4818 * dp_set_addba_response() – Set a user defined ADDBA response status code
4819 *
4820 * @soc: Datapath soc handle
4821 * @peer_mac: Datapath peer mac address
4822 * @vdev_id: id of atapath vdev
4823 * @tid: TID number
4824 * @statuscode: response status code to be set
4825 */
4826 QDF_STATUS
4827 dp_set_addba_response(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
4828 		      uint16_t vdev_id, uint8_t tid, uint16_t statuscode)
4829 {
4830 	struct dp_peer *peer = dp_peer_get_tgt_peer_hash_find(
4831 					(struct dp_soc *)cdp_soc,
4832 					peer_mac, 0, vdev_id,
4833 					DP_MOD_ID_CDP);
4834 	struct dp_rx_tid *rx_tid;
4835 
4836 	if (!peer) {
4837 		dp_peer_debug("%pK: Peer is NULL!\n", cdp_soc);
4838 		return QDF_STATUS_E_FAILURE;
4839 	}
4840 
4841 	rx_tid = &peer->rx_tid[tid];
4842 	qdf_spin_lock_bh(&rx_tid->tid_lock);
4843 	rx_tid->userstatuscode = statuscode;
4844 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
4845 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4846 
4847 	return QDF_STATUS_SUCCESS;
4848 }
4849 
4850 /*
4851 * dp_rx_delba_process_wifi3() – Process DELBA from peer
4852 * @soc: Datapath soc handle
4853 * @peer_mac: Datapath peer mac address
4854 * @vdev_id: id of atapath vdev
4855 * @tid: TID number
4856 * @reasoncode: Reason code received in DELBA frame
4857 *
4858 * Return: 0 on success, error code on failure
4859 */
4860 int dp_delba_process_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
4861 			   uint16_t vdev_id, int tid, uint16_t reasoncode)
4862 {
4863 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4864 	struct dp_rx_tid *rx_tid;
4865 	struct dp_peer *peer = dp_peer_get_tgt_peer_hash_find(
4866 					(struct dp_soc *)cdp_soc,
4867 					peer_mac, 0, vdev_id,
4868 					DP_MOD_ID_CDP);
4869 
4870 	if (!peer) {
4871 		dp_peer_debug("%pK: Peer is NULL!\n", cdp_soc);
4872 		return QDF_STATUS_E_FAILURE;
4873 	}
4874 	rx_tid = &peer->rx_tid[tid];
4875 	qdf_spin_lock_bh(&rx_tid->tid_lock);
4876 	if (rx_tid->ba_status == DP_RX_BA_INACTIVE ||
4877 	    rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
4878 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
4879 		status = QDF_STATUS_E_FAILURE;
4880 		goto fail;
4881 	}
4882 	/* TODO: See if we can delete the existing REO queue descriptor and
4883 	 * replace with a new one without queue extension descript to save
4884 	 * memory
4885 	 */
4886 	rx_tid->delba_rcode = reasoncode;
4887 	rx_tid->num_of_delba_req++;
4888 	dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX, false);
4889 
4890 	rx_tid->ba_status = DP_RX_BA_INACTIVE;
4891 	peer->active_ba_session_cnt--;
4892 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
4893 fail:
4894 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4895 
4896 	return status;
4897 }
4898 
4899 /*
4900  * dp_rx_delba_tx_completion_wifi3() – Send Delba Request
4901  *
4902  * @soc: Datapath soc handle
4903  * @peer_mac: Datapath peer mac address
4904  * @vdev_id: id of atapath vdev
4905  * @tid: TID number
4906  * @status: tx completion status
4907  * Return: 0 on success, error code on failure
4908  */
4909 
4910 int dp_delba_tx_completion_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
4911 				 uint16_t vdev_id,
4912 				 uint8_t tid, int status)
4913 {
4914 	QDF_STATUS ret = QDF_STATUS_SUCCESS;
4915 	struct dp_rx_tid *rx_tid = NULL;
4916 	struct dp_peer *peer = dp_peer_get_tgt_peer_hash_find(
4917 					(struct dp_soc *)cdp_soc,
4918 					peer_mac, 0, vdev_id,
4919 					DP_MOD_ID_CDP);
4920 
4921 	if (!peer) {
4922 		dp_peer_debug("%pK: Peer is NULL!", cdp_soc);
4923 		return QDF_STATUS_E_FAILURE;
4924 	}
4925 	rx_tid = &peer->rx_tid[tid];
4926 	qdf_spin_lock_bh(&rx_tid->tid_lock);
4927 	if (status) {
4928 		rx_tid->delba_tx_fail_cnt++;
4929 		if (rx_tid->delba_tx_retry >= DP_MAX_DELBA_RETRY) {
4930 			rx_tid->delba_tx_retry = 0;
4931 			rx_tid->delba_tx_status = 0;
4932 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
4933 		} else {
4934 			rx_tid->delba_tx_retry++;
4935 			rx_tid->delba_tx_status = 1;
4936 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
4937 			if (peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba)
4938 				peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba(
4939 					peer->vdev->pdev->soc->ctrl_psoc,
4940 					peer->vdev->vdev_id,
4941 					peer->mac_addr.raw, tid,
4942 					rx_tid->delba_rcode,
4943 					CDP_DELBA_REASON_NONE);
4944 		}
4945 		goto end;
4946 	} else {
4947 		rx_tid->delba_tx_success_cnt++;
4948 		rx_tid->delba_tx_retry = 0;
4949 		rx_tid->delba_tx_status = 0;
4950 	}
4951 	if (rx_tid->ba_status == DP_RX_BA_ACTIVE) {
4952 		dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX, false);
4953 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
4954 		peer->active_ba_session_cnt--;
4955 	}
4956 	if (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
4957 		dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX, false);
4958 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
4959 	}
4960 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
4961 
4962 end:
4963 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4964 
4965 	return ret;
4966 }
4967 
4968 /**
4969  * dp_set_pn_check_wifi3() - enable PN check in REO for security
4970  * @soc: Datapath soc handle
4971  * @peer_mac: Datapath peer mac address
4972  * @vdev_id: id of atapath vdev
4973  * @vdev: Datapath vdev
4974  * @pdev - data path device instance
4975  * @sec_type - security type
4976  * @rx_pn - Receive pn starting number
4977  *
4978  */
4979 
4980 QDF_STATUS
4981 dp_set_pn_check_wifi3(struct cdp_soc_t *soc_t, uint8_t vdev_id,
4982 		      uint8_t *peer_mac, enum cdp_sec_type sec_type,
4983 		      uint32_t *rx_pn)
4984 {
4985 	struct dp_pdev *pdev;
4986 	int i;
4987 	uint8_t pn_size;
4988 	struct hal_reo_cmd_params params;
4989 	struct dp_peer *peer = NULL;
4990 	struct dp_vdev *vdev = NULL;
4991 	struct dp_soc *soc = NULL;
4992 
4993 	peer = dp_peer_get_tgt_peer_hash_find((struct dp_soc *)soc_t,
4994 					      peer_mac, 0, vdev_id,
4995 					      DP_MOD_ID_CDP);
4996 
4997 	if (!peer) {
4998 		dp_peer_debug("%pK: Peer is NULL!\n", soc);
4999 		return QDF_STATUS_E_FAILURE;
5000 	}
5001 
5002 	vdev = peer->vdev;
5003 
5004 	if (!vdev) {
5005 		dp_peer_debug("%pK: VDEV is NULL!\n", soc);
5006 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5007 		return QDF_STATUS_E_FAILURE;
5008 	}
5009 
5010 	pdev = vdev->pdev;
5011 	soc = pdev->soc;
5012 	qdf_mem_zero(&params, sizeof(params));
5013 
5014 	params.std.need_status = 1;
5015 	params.u.upd_queue_params.update_pn_valid = 1;
5016 	params.u.upd_queue_params.update_pn_size = 1;
5017 	params.u.upd_queue_params.update_pn = 1;
5018 	params.u.upd_queue_params.update_pn_check_needed = 1;
5019 	params.u.upd_queue_params.update_svld = 1;
5020 	params.u.upd_queue_params.svld = 0;
5021 
5022 	switch (sec_type) {
5023 	case cdp_sec_type_tkip_nomic:
5024 	case cdp_sec_type_aes_ccmp:
5025 	case cdp_sec_type_aes_ccmp_256:
5026 	case cdp_sec_type_aes_gcmp:
5027 	case cdp_sec_type_aes_gcmp_256:
5028 		params.u.upd_queue_params.pn_check_needed = 1;
5029 		params.u.upd_queue_params.pn_size = PN_SIZE_48;
5030 		pn_size = 48;
5031 		break;
5032 	case cdp_sec_type_wapi:
5033 		params.u.upd_queue_params.pn_check_needed = 1;
5034 		params.u.upd_queue_params.pn_size = PN_SIZE_128;
5035 		pn_size = 128;
5036 		if (vdev->opmode == wlan_op_mode_ap) {
5037 			params.u.upd_queue_params.pn_even = 1;
5038 			params.u.upd_queue_params.update_pn_even = 1;
5039 		} else {
5040 			params.u.upd_queue_params.pn_uneven = 1;
5041 			params.u.upd_queue_params.update_pn_uneven = 1;
5042 		}
5043 		break;
5044 	default:
5045 		params.u.upd_queue_params.pn_check_needed = 0;
5046 		pn_size = 0;
5047 		break;
5048 	}
5049 
5050 
5051 	for (i = 0; i < DP_MAX_TIDS; i++) {
5052 		struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
5053 		qdf_spin_lock_bh(&rx_tid->tid_lock);
5054 		if (rx_tid->hw_qdesc_vaddr_unaligned) {
5055 			params.std.addr_lo =
5056 				rx_tid->hw_qdesc_paddr & 0xffffffff;
5057 			params.std.addr_hi =
5058 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
5059 
5060 			if (pn_size) {
5061 				dp_peer_info("%pK: PN set for TID:%d pn:%x:%x:%x:%x",
5062 					     soc, i, rx_pn[3], rx_pn[2],
5063 					     rx_pn[1], rx_pn[0]);
5064 				params.u.upd_queue_params.update_pn_valid = 1;
5065 				params.u.upd_queue_params.pn_31_0 = rx_pn[0];
5066 				params.u.upd_queue_params.pn_63_32 = rx_pn[1];
5067 				params.u.upd_queue_params.pn_95_64 = rx_pn[2];
5068 				params.u.upd_queue_params.pn_127_96 = rx_pn[3];
5069 			}
5070 			rx_tid->pn_size = pn_size;
5071 			if (dp_reo_send_cmd(soc,
5072 					    CMD_UPDATE_RX_REO_QUEUE,
5073 					    &params, dp_rx_tid_update_cb,
5074 					    rx_tid)) {
5075 				dp_err_log("fail to send CMD_UPDATE_RX_REO_QUEUE"
5076 					   "tid %d desc %pK", rx_tid->tid,
5077 					   (void *)(rx_tid->hw_qdesc_paddr));
5078 				DP_STATS_INC(soc,
5079 					     rx.err.reo_cmd_send_fail, 1);
5080 			}
5081 		} else {
5082 			dp_peer_info("%pK: PN Check not setup for TID :%d ", soc, i);
5083 		}
5084 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
5085 	}
5086 
5087 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5088 
5089 	return QDF_STATUS_SUCCESS;
5090 }
5091 
5092 
5093 /**
5094  * dp_set_key_sec_type_wifi3() - set security mode of key
5095  * @soc: Datapath soc handle
5096  * @peer_mac: Datapath peer mac address
5097  * @vdev_id: id of atapath vdev
5098  * @vdev: Datapath vdev
5099  * @pdev - data path device instance
5100  * @sec_type - security type
5101  * #is_unicast - key type
5102  *
5103  */
5104 
5105 QDF_STATUS
5106 dp_set_key_sec_type_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id,
5107 			  uint8_t *peer_mac, enum cdp_sec_type sec_type,
5108 			  bool is_unicast)
5109 {
5110 	struct dp_peer *peer =
5111 			dp_peer_get_tgt_peer_hash_find((struct dp_soc *)soc,
5112 						       peer_mac, 0, vdev_id,
5113 						       DP_MOD_ID_CDP);
5114 	int sec_index;
5115 
5116 	if (!peer) {
5117 		dp_peer_debug("%pK: Peer is NULL!\n", soc);
5118 		return QDF_STATUS_E_FAILURE;
5119 	}
5120 
5121 	if (!peer->txrx_peer) {
5122 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5123 		dp_peer_debug("%pK: txrx peer is NULL!\n", soc);
5124 		return QDF_STATUS_E_FAILURE;
5125 	}
5126 
5127 	dp_peer_info("%pK: key sec spec for peer %pK " QDF_MAC_ADDR_FMT ": %s key of type %d",
5128 		     soc, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw),
5129 		     is_unicast ? "ucast" : "mcast", sec_type);
5130 
5131 	sec_index = is_unicast ? dp_sec_ucast : dp_sec_mcast;
5132 	peer->txrx_peer->security[sec_index].sec_type = sec_type;
5133 
5134 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5135 
5136 	return QDF_STATUS_SUCCESS;
5137 }
5138 
5139 void
5140 dp_rx_sec_ind_handler(struct dp_soc *soc, uint16_t peer_id,
5141 		      enum cdp_sec_type sec_type, int is_unicast,
5142 		      u_int32_t *michael_key,
5143 		      u_int32_t *rx_pn)
5144 {
5145 	struct dp_peer *peer;
5146 	struct dp_txrx_peer *txrx_peer;
5147 	int sec_index;
5148 
5149 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_HTT);
5150 	if (!peer) {
5151 		dp_peer_err("Couldn't find peer from ID %d - skipping security inits",
5152 			    peer_id);
5153 		return;
5154 	}
5155 	txrx_peer = dp_get_txrx_peer(peer);
5156 	if (!txrx_peer) {
5157 		dp_peer_err("Couldn't find txrx peer from ID %d - skipping security inits",
5158 			    peer_id);
5159 		return;
5160 	}
5161 
5162 	dp_peer_info("%pK: sec spec for peer %pK " QDF_MAC_ADDR_FMT ": %s key of type %d",
5163 		     soc, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw),
5164 			  is_unicast ? "ucast" : "mcast", sec_type);
5165 	sec_index = is_unicast ? dp_sec_ucast : dp_sec_mcast;
5166 
5167 	peer->txrx_peer->security[sec_index].sec_type = sec_type;
5168 #ifdef notyet /* TODO: See if this is required for defrag support */
5169 	/* michael key only valid for TKIP, but for simplicity,
5170 	 * copy it anyway
5171 	 */
5172 	qdf_mem_copy(
5173 		&peer->txrx_peer->security[sec_index].michael_key[0],
5174 		michael_key,
5175 		sizeof(peer->txrx_peer->security[sec_index].michael_key));
5176 #ifdef BIG_ENDIAN_HOST
5177 	OL_IF_SWAPBO(peer->txrx_peer->security[sec_index].michael_key[0],
5178 		     sizeof(peer->txrx_peer->security[sec_index].michael_key));
5179 #endif /* BIG_ENDIAN_HOST */
5180 #endif
5181 
5182 #ifdef notyet /* TODO: Check if this is required for wifi3.0 */
5183 	if (sec_type != cdp_sec_type_wapi) {
5184 		qdf_mem_zero(peer->tids_last_pn_valid, _EXT_TIDS);
5185 	} else {
5186 		for (i = 0; i < DP_MAX_TIDS; i++) {
5187 			/*
5188 			 * Setting PN valid bit for WAPI sec_type,
5189 			 * since WAPI PN has to be started with predefined value
5190 			 */
5191 			peer->tids_last_pn_valid[i] = 1;
5192 			qdf_mem_copy(
5193 				(u_int8_t *) &peer->tids_last_pn[i],
5194 				(u_int8_t *) rx_pn, sizeof(union htt_rx_pn_t));
5195 			peer->tids_last_pn[i].pn128[1] =
5196 				qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[1]);
5197 			peer->tids_last_pn[i].pn128[0] =
5198 				qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[0]);
5199 		}
5200 	}
5201 #endif
5202 	/* TODO: Update HW TID queue with PN check parameters (pn type for
5203 	 * all security types and last pn for WAPI) once REO command API
5204 	 * is available
5205 	 */
5206 
5207 	dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
5208 }
5209 
5210 #ifdef QCA_PEER_EXT_STATS
5211 /*
5212  * dp_peer_delay_stats_ctx_alloc() - Allocate peer delay
5213  *                                 stats content
5214  * @soc: DP SoC context
5215  * @txrx_peer: DP txrx peer context
5216  *
5217  * Allocate the peer delay stats context
5218  *
5219  * Return: QDF_STATUS_SUCCESS if allocation is
5220  *	   successful
5221  */
5222 QDF_STATUS dp_peer_delay_stats_ctx_alloc(struct dp_soc *soc,
5223 					 struct dp_txrx_peer *txrx_peer)
5224 {
5225 	uint8_t tid, ctx_id;
5226 
5227 	if (!soc || !txrx_peer) {
5228 		dp_warn("Null soc%pK or peer%pK", soc, txrx_peer);
5229 		return QDF_STATUS_E_INVAL;
5230 	}
5231 
5232 	if (!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx))
5233 		return QDF_STATUS_SUCCESS;
5234 
5235 	/*
5236 	 * Allocate memory for peer extended stats.
5237 	 */
5238 	txrx_peer->delay_stats =
5239 			qdf_mem_malloc(sizeof(struct dp_peer_delay_stats));
5240 	if (!txrx_peer->delay_stats) {
5241 		dp_err("Peer extended stats obj alloc failed!!");
5242 		return QDF_STATUS_E_NOMEM;
5243 	}
5244 
5245 	for (tid = 0; tid < CDP_MAX_DATA_TIDS; tid++) {
5246 		for (ctx_id = 0; ctx_id < CDP_MAX_TXRX_CTX; ctx_id++) {
5247 			struct cdp_delay_tx_stats *tx_delay =
5248 			&txrx_peer->delay_stats->delay_tid_stats[tid][ctx_id].tx_delay;
5249 			struct cdp_delay_rx_stats *rx_delay =
5250 			&txrx_peer->delay_stats->delay_tid_stats[tid][ctx_id].rx_delay;
5251 
5252 			dp_hist_init(&tx_delay->tx_swq_delay,
5253 				     CDP_HIST_TYPE_SW_ENQEUE_DELAY);
5254 			dp_hist_init(&tx_delay->hwtx_delay,
5255 				     CDP_HIST_TYPE_HW_COMP_DELAY);
5256 			dp_hist_init(&rx_delay->to_stack_delay,
5257 				     CDP_HIST_TYPE_REAP_STACK);
5258 		}
5259 	}
5260 
5261 	return QDF_STATUS_SUCCESS;
5262 }
5263 
5264 /*
5265  * dp_peer_delay_stats_ctx_dealloc() - Dealloc the peer delay stats context
5266  * @txrx_peer: txrx DP peer context
5267  *
5268  * Free the peer delay stats context
5269  *
5270  * Return: Void
5271  */
5272 void dp_peer_delay_stats_ctx_dealloc(struct dp_soc *soc,
5273 				     struct dp_txrx_peer *txrx_peer)
5274 {
5275 	if (!txrx_peer) {
5276 		dp_warn("peer_ext dealloc failed due to NULL peer object");
5277 		return;
5278 	}
5279 
5280 	if (!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx))
5281 		return;
5282 
5283 	if (!txrx_peer->delay_stats)
5284 		return;
5285 
5286 	qdf_mem_free(txrx_peer->delay_stats);
5287 	txrx_peer->delay_stats = NULL;
5288 }
5289 
5290 /**
5291  * dp_peer_delay_stats_ctx_clr() - Clear delay stats context of peer
5292  *
5293  * @txrx_peer: dp_txrx_peer handle
5294  *
5295  * Return: void
5296  */
5297 void dp_peer_delay_stats_ctx_clr(struct dp_txrx_peer *txrx_peer)
5298 {
5299 	if (txrx_peer->delay_stats)
5300 		qdf_mem_zero(txrx_peer->delay_stats,
5301 			     sizeof(struct dp_peer_delay_stats));
5302 }
5303 #endif
5304 
5305 #ifdef WLAN_PEER_JITTER
5306 /**
5307  * dp_peer_jitter_stats_ctx_alloc() - Allocate jitter stats context for peer
5308  *
5309  * @soc: Datapath pdev handle
5310  * @txrx_peer: dp_txrx_peer handle
5311  *
5312  * Return: QDF_STATUS
5313  */
5314 QDF_STATUS dp_peer_jitter_stats_ctx_alloc(struct dp_pdev *pdev,
5315 					  struct dp_txrx_peer *txrx_peer)
5316 {
5317 	if (!pdev || !txrx_peer) {
5318 		dp_warn("Null pdev or peer");
5319 		return QDF_STATUS_E_INVAL;
5320 	}
5321 
5322 	if (!wlan_cfg_is_peer_jitter_stats_enabled(pdev->soc->wlan_cfg_ctx))
5323 		return QDF_STATUS_SUCCESS;
5324 
5325 	if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
5326 		/*
5327 		 * Allocate memory on per tid basis when nss is enabled
5328 		 */
5329 		txrx_peer->jitter_stats =
5330 			qdf_mem_malloc(sizeof(struct cdp_peer_tid_stats)
5331 					* DP_MAX_TIDS);
5332 	} else {
5333 		/*
5334 		 * Allocate memory on per tid per ring basis
5335 		 */
5336 		txrx_peer->jitter_stats =
5337 			qdf_mem_malloc(sizeof(struct cdp_peer_tid_stats)
5338 					* DP_MAX_TIDS * CDP_MAX_TXRX_CTX);
5339 	}
5340 
5341 	if (!txrx_peer->jitter_stats) {
5342 		dp_warn("Jitter stats obj alloc failed!!");
5343 		return QDF_STATUS_E_NOMEM;
5344 	}
5345 
5346 	return QDF_STATUS_SUCCESS;
5347 }
5348 
5349 /**
5350  * dp_peer_jitter_stats_ctx_dealloc() - Deallocate jitter stats context
5351  *
5352  * @pdev: Datapath pdev handle
5353  * @txrx_peer: dp_txrx_peer handle
5354  *
5355  * Return: void
5356  */
5357 void dp_peer_jitter_stats_ctx_dealloc(struct dp_pdev *pdev,
5358 				      struct dp_txrx_peer *txrx_peer)
5359 {
5360 	if (!pdev || !txrx_peer) {
5361 		dp_warn("Null pdev or peer");
5362 		return;
5363 	}
5364 
5365 	if (!wlan_cfg_is_peer_jitter_stats_enabled(pdev->soc->wlan_cfg_ctx))
5366 		return;
5367 
5368 	if (txrx_peer->jitter_stats) {
5369 		qdf_mem_free(txrx_peer->jitter_stats);
5370 		txrx_peer->jitter_stats = NULL;
5371 	}
5372 }
5373 
5374 /**
5375  * dp_peer_jitter_stats_ctx_clr() - Clear jitter stats context of peer
5376  *
5377  * @txrx_peer: dp_txrx_peer handle
5378  *
5379  * Return: void
5380  */
5381 void dp_peer_jitter_stats_ctx_clr(struct dp_txrx_peer *txrx_peer)
5382 {
5383 	struct cdp_peer_tid_stats *jitter_stats = NULL;
5384 
5385 	if (!txrx_peer) {
5386 		dp_warn("Null peer");
5387 		return;
5388 	}
5389 
5390 	if (!wlan_cfg_is_peer_jitter_stats_enabled(txrx_peer->
5391 						   vdev->
5392 						   pdev->soc->wlan_cfg_ctx))
5393 		return;
5394 
5395 	jitter_stats = txrx_peer->jitter_stats;
5396 	if (!jitter_stats)
5397 		return;
5398 
5399 	if (wlan_cfg_get_dp_pdev_nss_enabled(txrx_peer->
5400 					     vdev->pdev->wlan_cfg_ctx))
5401 		qdf_mem_zero(jitter_stats,
5402 			     sizeof(struct cdp_peer_tid_stats) *
5403 			     DP_MAX_TIDS);
5404 
5405 	else
5406 		qdf_mem_zero(jitter_stats,
5407 			     sizeof(struct cdp_peer_tid_stats) *
5408 			     DP_MAX_TIDS * CDP_MAX_TXRX_CTX);
5409 
5410 }
5411 #endif
5412 
5413 QDF_STATUS
5414 dp_rx_delba_ind_handler(void *soc_handle, uint16_t peer_id,
5415 			uint8_t tid, uint16_t win_sz)
5416 {
5417 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
5418 	struct dp_peer *peer;
5419 	struct dp_rx_tid *rx_tid;
5420 	QDF_STATUS status = QDF_STATUS_SUCCESS;
5421 
5422 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_HTT);
5423 
5424 	if (!peer) {
5425 		dp_peer_err("%pK: Couldn't find peer from ID %d",
5426 			    soc, peer_id);
5427 		return QDF_STATUS_E_FAILURE;
5428 	}
5429 
5430 	qdf_assert_always(tid < DP_MAX_TIDS);
5431 
5432 	rx_tid = &peer->rx_tid[tid];
5433 
5434 	if (rx_tid->hw_qdesc_vaddr_unaligned) {
5435 		if (!rx_tid->delba_tx_status) {
5436 			dp_peer_info("%pK: PEER_ID: %d TID: %d, BA win: %d ",
5437 				     soc, peer_id, tid, win_sz);
5438 
5439 			qdf_spin_lock_bh(&rx_tid->tid_lock);
5440 
5441 			rx_tid->delba_tx_status = 1;
5442 
5443 			rx_tid->rx_ba_win_size_override =
5444 			    qdf_min((uint16_t)63, win_sz);
5445 
5446 			rx_tid->delba_rcode =
5447 			    IEEE80211_REASON_QOS_SETUP_REQUIRED;
5448 
5449 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
5450 
5451 			if (soc->cdp_soc.ol_ops->send_delba)
5452 				soc->cdp_soc.ol_ops->send_delba(
5453 					peer->vdev->pdev->soc->ctrl_psoc,
5454 					peer->vdev->vdev_id,
5455 					peer->mac_addr.raw,
5456 					tid,
5457 					rx_tid->delba_rcode,
5458 					CDP_DELBA_REASON_NONE);
5459 		}
5460 	} else {
5461 		dp_peer_err("%pK: BA session is not setup for TID:%d ", soc, tid);
5462 		status = QDF_STATUS_E_FAILURE;
5463 	}
5464 
5465 	dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
5466 
5467 	return status;
5468 }
5469 
5470 #ifdef DP_PEER_EXTENDED_API
5471 /**
5472  * dp_peer_set_bw() - Set bandwidth and mpdu retry count threshold for peer
5473  * @soc: DP soc handle
5474  * @txrx_peer: Core txrx_peer handle
5475  * @set_bw: enum of bandwidth to be set for this peer connection
5476  *
5477  * Return: None
5478  */
5479 static void dp_peer_set_bw(struct dp_soc *soc, struct dp_txrx_peer *txrx_peer,
5480 			   enum cdp_peer_bw set_bw)
5481 {
5482 	if (!txrx_peer)
5483 		return;
5484 
5485 	txrx_peer->bw = set_bw;
5486 
5487 	switch (set_bw) {
5488 	case CDP_160_MHZ:
5489 	case CDP_320_MHZ:
5490 		txrx_peer->mpdu_retry_threshold =
5491 				soc->wlan_cfg_ctx->mpdu_retry_threshold_2;
5492 		break;
5493 	case CDP_20_MHZ:
5494 	case CDP_40_MHZ:
5495 	case CDP_80_MHZ:
5496 	default:
5497 		txrx_peer->mpdu_retry_threshold =
5498 				soc->wlan_cfg_ctx->mpdu_retry_threshold_1;
5499 		break;
5500 	}
5501 
5502 	dp_info("Peer id: %u: BW: %u, mpdu retry threshold: %u",
5503 		txrx_peer->peer_id, txrx_peer->bw,
5504 		txrx_peer->mpdu_retry_threshold);
5505 }
5506 
5507 #ifdef WLAN_FEATURE_11BE_MLO
5508 QDF_STATUS dp_register_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
5509 			    struct ol_txrx_desc_type *sta_desc)
5510 {
5511 	struct dp_peer *peer;
5512 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5513 
5514 	peer = dp_peer_find_hash_find(soc, sta_desc->peer_addr.bytes,
5515 				      0, DP_VDEV_ALL, DP_MOD_ID_CDP);
5516 
5517 	if (!peer)
5518 		return QDF_STATUS_E_FAULT;
5519 
5520 	qdf_spin_lock_bh(&peer->peer_info_lock);
5521 	peer->state = OL_TXRX_PEER_STATE_CONN;
5522 	qdf_spin_unlock_bh(&peer->peer_info_lock);
5523 
5524 	dp_peer_set_bw(soc, peer->txrx_peer, sta_desc->bw);
5525 
5526 	dp_rx_flush_rx_cached(peer, false);
5527 
5528 	if (IS_MLO_DP_LINK_PEER(peer) && peer->first_link) {
5529 		dp_peer_info("register for mld peer" QDF_MAC_ADDR_FMT,
5530 			     QDF_MAC_ADDR_REF(peer->mld_peer->mac_addr.raw));
5531 		qdf_spin_lock_bh(&peer->mld_peer->peer_info_lock);
5532 		peer->mld_peer->state = peer->state;
5533 		qdf_spin_unlock_bh(&peer->mld_peer->peer_info_lock);
5534 		dp_rx_flush_rx_cached(peer->mld_peer, false);
5535 	}
5536 
5537 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5538 
5539 	return QDF_STATUS_SUCCESS;
5540 }
5541 
5542 QDF_STATUS dp_peer_state_update(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
5543 				enum ol_txrx_peer_state state)
5544 {
5545 	struct dp_peer *peer;
5546 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5547 
5548 	peer =  dp_peer_find_hash_find(soc, peer_mac, 0, DP_VDEV_ALL,
5549 				       DP_MOD_ID_CDP);
5550 	if (!peer) {
5551 		dp_peer_err("%pK: Failed to find peer[" QDF_MAC_ADDR_FMT "]",
5552 			    soc, QDF_MAC_ADDR_REF(peer_mac));
5553 		return QDF_STATUS_E_FAILURE;
5554 	}
5555 	peer->state = state;
5556 	peer->authorize = (state == OL_TXRX_PEER_STATE_AUTH) ? 1 : 0;
5557 
5558 	if (peer->txrx_peer)
5559 		peer->txrx_peer->authorize = peer->authorize;
5560 
5561 	dp_peer_info("peer" QDF_MAC_ADDR_FMT "state %d",
5562 		     QDF_MAC_ADDR_REF(peer->mac_addr.raw),
5563 		     peer->state);
5564 
5565 	if (IS_MLO_DP_LINK_PEER(peer) && peer->first_link) {
5566 		peer->mld_peer->state = peer->state;
5567 		peer->mld_peer->txrx_peer->authorize = peer->authorize;
5568 		dp_peer_info("mld peer" QDF_MAC_ADDR_FMT "state %d",
5569 			     QDF_MAC_ADDR_REF(peer->mld_peer->mac_addr.raw),
5570 			     peer->mld_peer->state);
5571 	}
5572 
5573 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
5574 	 * Decrement it here.
5575 	 */
5576 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5577 
5578 	return QDF_STATUS_SUCCESS;
5579 }
5580 #else
5581 QDF_STATUS dp_register_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
5582 			    struct ol_txrx_desc_type *sta_desc)
5583 {
5584 	struct dp_peer *peer;
5585 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5586 
5587 	peer = dp_peer_find_hash_find(soc, sta_desc->peer_addr.bytes,
5588 				      0, DP_VDEV_ALL, DP_MOD_ID_CDP);
5589 
5590 	if (!peer)
5591 		return QDF_STATUS_E_FAULT;
5592 
5593 	qdf_spin_lock_bh(&peer->peer_info_lock);
5594 	peer->state = OL_TXRX_PEER_STATE_CONN;
5595 	qdf_spin_unlock_bh(&peer->peer_info_lock);
5596 
5597 	dp_peer_set_bw(soc, peer->txrx_peer, sta_desc->bw);
5598 
5599 	dp_rx_flush_rx_cached(peer, false);
5600 
5601 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5602 
5603 	return QDF_STATUS_SUCCESS;
5604 }
5605 
5606 QDF_STATUS dp_peer_state_update(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
5607 				enum ol_txrx_peer_state state)
5608 {
5609 	struct dp_peer *peer;
5610 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5611 
5612 	peer =  dp_peer_find_hash_find(soc, peer_mac, 0, DP_VDEV_ALL,
5613 				       DP_MOD_ID_CDP);
5614 	if (!peer) {
5615 		dp_peer_err("%pK: Failed to find peer for: [" QDF_MAC_ADDR_FMT "]",
5616 			    soc, QDF_MAC_ADDR_REF(peer_mac));
5617 		return QDF_STATUS_E_FAILURE;
5618 	}
5619 	peer->state = state;
5620 	peer->authorize = (state == OL_TXRX_PEER_STATE_AUTH) ? 1 : 0;
5621 
5622 	if (peer->txrx_peer)
5623 		peer->txrx_peer->authorize = peer->authorize;
5624 
5625 	dp_info("peer %pK state %d", peer, peer->state);
5626 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
5627 	 * Decrement it here.
5628 	 */
5629 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5630 
5631 	return QDF_STATUS_SUCCESS;
5632 }
5633 #endif
5634 
5635 QDF_STATUS
5636 dp_clear_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
5637 	      struct qdf_mac_addr peer_addr)
5638 {
5639 	struct dp_peer *peer;
5640 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5641 
5642 	peer = dp_peer_find_hash_find(soc, peer_addr.bytes,
5643 				      0, DP_VDEV_ALL, DP_MOD_ID_CDP);
5644 	if (!peer || !peer->valid)
5645 		return QDF_STATUS_E_FAULT;
5646 
5647 	dp_clear_peer_internal(soc, peer);
5648 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5649 	return QDF_STATUS_SUCCESS;
5650 }
5651 
5652 QDF_STATUS dp_get_vdevid(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
5653 			 uint8_t *vdev_id)
5654 {
5655 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5656 	struct dp_peer *peer =
5657 		dp_peer_find_hash_find(soc, peer_mac, 0, DP_VDEV_ALL,
5658 				       DP_MOD_ID_CDP);
5659 
5660 	if (!peer)
5661 		return QDF_STATUS_E_FAILURE;
5662 
5663 	dp_info("peer %pK vdev %pK vdev id %d",
5664 		peer, peer->vdev, peer->vdev->vdev_id);
5665 	*vdev_id = peer->vdev->vdev_id;
5666 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
5667 	 * Decrement it here.
5668 	 */
5669 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5670 
5671 	return QDF_STATUS_SUCCESS;
5672 }
5673 
5674 struct cdp_vdev *
5675 dp_get_vdev_by_peer_addr(struct cdp_pdev *pdev_handle,
5676 			 struct qdf_mac_addr peer_addr)
5677 {
5678 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5679 	struct dp_peer *peer = NULL;
5680 	struct cdp_vdev *vdev = NULL;
5681 
5682 	if (!pdev) {
5683 		dp_peer_info("PDEV not found for peer_addr: " QDF_MAC_ADDR_FMT,
5684 			     QDF_MAC_ADDR_REF(peer_addr.bytes));
5685 		return NULL;
5686 	}
5687 
5688 	peer = dp_peer_find_hash_find(pdev->soc, peer_addr.bytes, 0,
5689 				      DP_VDEV_ALL, DP_MOD_ID_CDP);
5690 	if (!peer) {
5691 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
5692 			  "PDEV not found for peer_addr: "QDF_MAC_ADDR_FMT,
5693 			  QDF_MAC_ADDR_REF(peer_addr.bytes));
5694 		return NULL;
5695 	}
5696 
5697 	vdev = (struct cdp_vdev *)peer->vdev;
5698 
5699 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5700 	return vdev;
5701 }
5702 
5703 /**
5704  * dp_get_vdev_for_peer() - Get virtual interface instance which peer belongs
5705  * @peer - peer instance
5706  *
5707  * Get virtual interface instance which peer belongs
5708  *
5709  * Return: virtual interface instance pointer
5710  *         NULL in case cannot find
5711  */
5712 struct cdp_vdev *dp_get_vdev_for_peer(void *peer_handle)
5713 {
5714 	struct dp_peer *peer = peer_handle;
5715 
5716 	DP_TRACE(DEBUG, "peer %pK vdev %pK", peer, peer->vdev);
5717 	return (struct cdp_vdev *)peer->vdev;
5718 }
5719 
5720 /**
5721  * dp_peer_get_peer_mac_addr() - Get peer mac address
5722  * @peer - peer instance
5723  *
5724  * Get peer mac address
5725  *
5726  * Return: peer mac address pointer
5727  *         NULL in case cannot find
5728  */
5729 uint8_t *dp_peer_get_peer_mac_addr(void *peer_handle)
5730 {
5731 	struct dp_peer *peer = peer_handle;
5732 	uint8_t *mac;
5733 
5734 	mac = peer->mac_addr.raw;
5735 	dp_info("peer %pK mac 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
5736 		peer, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
5737 	return peer->mac_addr.raw;
5738 }
5739 
5740 int dp_get_peer_state(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
5741 		      uint8_t *peer_mac)
5742 {
5743 	enum ol_txrx_peer_state peer_state;
5744 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5745 	struct cdp_peer_info peer_info = { 0 };
5746 	struct dp_peer *peer;
5747 	struct dp_peer *tgt_peer;
5748 
5749 	DP_PEER_INFO_PARAMS_INIT(&peer_info, vdev_id, peer_mac,
5750 				 false, CDP_WILD_PEER_TYPE);
5751 
5752 	peer =  dp_peer_hash_find_wrapper(soc, &peer_info, DP_MOD_ID_CDP);
5753 
5754 	if (!peer)
5755 		return OL_TXRX_PEER_STATE_INVALID;
5756 
5757 	DP_TRACE(DEBUG, "peer %pK stats %d", peer, peer->state);
5758 
5759 	tgt_peer = dp_get_tgt_peer_from_peer(peer);
5760 	peer_state = tgt_peer->state;
5761 
5762 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5763 
5764 	return peer_state;
5765 }
5766 
5767 /**
5768  * dp_local_peer_id_pool_init() - local peer id pool alloc for physical device
5769  * @pdev - data path device instance
5770  *
5771  * local peer id pool alloc for physical device
5772  *
5773  * Return: none
5774  */
5775 void dp_local_peer_id_pool_init(struct dp_pdev *pdev)
5776 {
5777 	int i;
5778 
5779 	/* point the freelist to the first ID */
5780 	pdev->local_peer_ids.freelist = 0;
5781 
5782 	/* link each ID to the next one */
5783 	for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) {
5784 		pdev->local_peer_ids.pool[i] = i + 1;
5785 		pdev->local_peer_ids.map[i] = NULL;
5786 	}
5787 
5788 	/* link the last ID to itself, to mark the end of the list */
5789 	i = OL_TXRX_NUM_LOCAL_PEER_IDS;
5790 	pdev->local_peer_ids.pool[i] = i;
5791 
5792 	qdf_spinlock_create(&pdev->local_peer_ids.lock);
5793 	DP_TRACE(INFO, "Peer pool init");
5794 }
5795 
5796 /**
5797  * dp_local_peer_id_alloc() - allocate local peer id
5798  * @pdev - data path device instance
5799  * @peer - new peer instance
5800  *
5801  * allocate local peer id
5802  *
5803  * Return: none
5804  */
5805 void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer)
5806 {
5807 	int i;
5808 
5809 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
5810 	i = pdev->local_peer_ids.freelist;
5811 	if (pdev->local_peer_ids.pool[i] == i) {
5812 		/* the list is empty, except for the list-end marker */
5813 		peer->local_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
5814 	} else {
5815 		/* take the head ID and advance the freelist */
5816 		peer->local_id = i;
5817 		pdev->local_peer_ids.freelist = pdev->local_peer_ids.pool[i];
5818 		pdev->local_peer_ids.map[i] = peer;
5819 	}
5820 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
5821 	dp_info("peer %pK, local id %d", peer, peer->local_id);
5822 }
5823 
5824 /**
5825  * dp_local_peer_id_free() - remove local peer id
5826  * @pdev - data path device instance
5827  * @peer - peer instance should be removed
5828  *
5829  * remove local peer id
5830  *
5831  * Return: none
5832  */
5833 void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer)
5834 {
5835 	int i = peer->local_id;
5836 	if ((i == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
5837 	    (i >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
5838 		return;
5839 	}
5840 
5841 	/* put this ID on the head of the freelist */
5842 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
5843 	pdev->local_peer_ids.pool[i] = pdev->local_peer_ids.freelist;
5844 	pdev->local_peer_ids.freelist = i;
5845 	pdev->local_peer_ids.map[i] = NULL;
5846 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
5847 }
5848 
5849 bool dp_find_peer_exist_on_vdev(struct cdp_soc_t *soc_hdl,
5850 				uint8_t vdev_id, uint8_t *peer_addr)
5851 {
5852 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5853 	struct dp_peer *peer = NULL;
5854 
5855 	peer = dp_peer_find_hash_find(soc, peer_addr, 0, vdev_id,
5856 				      DP_MOD_ID_CDP);
5857 	if (!peer)
5858 		return false;
5859 
5860 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5861 
5862 	return true;
5863 }
5864 
5865 bool dp_find_peer_exist_on_other_vdev(struct cdp_soc_t *soc_hdl,
5866 				      uint8_t vdev_id, uint8_t *peer_addr,
5867 				      uint16_t max_bssid)
5868 {
5869 	int i;
5870 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5871 	struct dp_peer *peer = NULL;
5872 
5873 	for (i = 0; i < max_bssid; i++) {
5874 		/* Need to check vdevs other than the vdev_id */
5875 		if (vdev_id == i)
5876 			continue;
5877 		peer = dp_peer_find_hash_find(soc, peer_addr, 0, i,
5878 					      DP_MOD_ID_CDP);
5879 		if (peer) {
5880 			dp_err("Duplicate peer "QDF_MAC_ADDR_FMT" already exist on vdev %d",
5881 			       QDF_MAC_ADDR_REF(peer_addr), i);
5882 			dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5883 			return true;
5884 		}
5885 	}
5886 
5887 	return false;
5888 }
5889 
5890 void dp_set_peer_as_tdls_peer(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
5891 			      uint8_t *peer_mac, bool val)
5892 {
5893 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5894 	struct dp_peer *peer = NULL;
5895 
5896 	peer = dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id,
5897 				      DP_MOD_ID_CDP);
5898 	if (!peer) {
5899 		dp_err("Failed to find peer for:" QDF_MAC_ADDR_FMT,
5900 		       QDF_MAC_ADDR_REF(peer_mac));
5901 		return;
5902 	}
5903 
5904 	dp_info("Set tdls flag %d for peer:" QDF_MAC_ADDR_FMT,
5905 		val, QDF_MAC_ADDR_REF(peer_mac));
5906 	peer->is_tdls_peer = val;
5907 
5908 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5909 }
5910 #endif
5911 
5912 bool dp_find_peer_exist(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
5913 			uint8_t *peer_addr)
5914 {
5915 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5916 	struct dp_peer *peer = NULL;
5917 
5918 	peer = dp_peer_find_hash_find(soc, peer_addr, 0, DP_VDEV_ALL,
5919 				      DP_MOD_ID_CDP);
5920 	if (peer) {
5921 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5922 		return true;
5923 	}
5924 
5925 	return false;
5926 }
5927 
5928 #ifdef IPA_OFFLOAD
5929 int dp_peer_get_rxtid_stats_ipa(struct dp_peer *peer,
5930 				dp_rxtid_stats_cmd_cb dp_stats_cmd_cb)
5931 {
5932 	struct dp_soc *soc = peer->vdev->pdev->soc;
5933 	struct hal_reo_cmd_params params;
5934 	int i;
5935 	int stats_cmd_sent_cnt = 0;
5936 	QDF_STATUS status;
5937 	uint16_t peer_id = peer->peer_id;
5938 	unsigned long comb_peer_id_tid;
5939 	struct dp_rx_tid *rx_tid;
5940 
5941 	if (!dp_stats_cmd_cb)
5942 		return stats_cmd_sent_cnt;
5943 
5944 	qdf_mem_zero(&params, sizeof(params));
5945 	for (i = 0; i < DP_MAX_TIDS; i++) {
5946 		if ((i >= CDP_DATA_TID_MAX) && (i != CDP_DATA_NON_QOS_TID))
5947 			continue;
5948 
5949 		rx_tid = &peer->rx_tid[i];
5950 		if (rx_tid->hw_qdesc_vaddr_unaligned) {
5951 			params.std.need_status = 1;
5952 			params.std.addr_lo =
5953 				rx_tid->hw_qdesc_paddr & 0xffffffff;
5954 			params.std.addr_hi =
5955 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
5956 			params.u.stats_params.clear = 1;
5957 			comb_peer_id_tid = ((i << DP_PEER_REO_STATS_TID_SHIFT)
5958 					    | peer_id);
5959 			status = dp_reo_send_cmd(soc, CMD_GET_QUEUE_STATS,
5960 						 &params, dp_stats_cmd_cb,
5961 						 (void *)comb_peer_id_tid);
5962 			if (QDF_IS_STATUS_SUCCESS(status))
5963 				stats_cmd_sent_cnt++;
5964 
5965 			/* Flush REO descriptor from HW cache to update stats
5966 			 * in descriptor memory. This is to help debugging
5967 			 */
5968 			qdf_mem_zero(&params, sizeof(params));
5969 			params.std.need_status = 0;
5970 			params.std.addr_lo =
5971 				rx_tid->hw_qdesc_paddr & 0xffffffff;
5972 			params.std.addr_hi =
5973 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
5974 			params.u.fl_cache_params.flush_no_inval = 1;
5975 			dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, &params, NULL,
5976 					NULL);
5977 		}
5978 	}
5979 
5980 	return stats_cmd_sent_cnt;
5981 }
5982 
5983 qdf_export_symbol(dp_peer_get_rxtid_stats_ipa);
5984 
5985 #endif
5986 /**
5987  * dp_peer_rxtid_stats: Retried Rx TID (REO queue) stats from HW
5988  * @peer: DP peer handle
5989  * @dp_stats_cmd_cb: REO command callback function
5990  * @cb_ctxt: Callback context
5991  *
5992  * Return: count of tid stats cmd send succeeded
5993  */
5994 int dp_peer_rxtid_stats(struct dp_peer *peer,
5995 			dp_rxtid_stats_cmd_cb dp_stats_cmd_cb,
5996 			void *cb_ctxt)
5997 {
5998 	struct dp_soc *soc = peer->vdev->pdev->soc;
5999 	struct hal_reo_cmd_params params;
6000 	int i;
6001 	int stats_cmd_sent_cnt = 0;
6002 	QDF_STATUS status;
6003 	struct dp_rx_tid *rx_tid;
6004 
6005 	if (!dp_stats_cmd_cb)
6006 		return stats_cmd_sent_cnt;
6007 
6008 	qdf_mem_zero(&params, sizeof(params));
6009 	for (i = 0; i < DP_MAX_TIDS; i++) {
6010 		if ((i >= CDP_DATA_TID_MAX) && (i != CDP_DATA_NON_QOS_TID))
6011 			continue;
6012 
6013 		rx_tid = &peer->rx_tid[i];
6014 		if (rx_tid->hw_qdesc_vaddr_unaligned) {
6015 			params.std.need_status = 1;
6016 			params.std.addr_lo =
6017 				rx_tid->hw_qdesc_paddr & 0xffffffff;
6018 			params.std.addr_hi =
6019 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
6020 
6021 			if (cb_ctxt) {
6022 				status = dp_reo_send_cmd(
6023 						soc, CMD_GET_QUEUE_STATS,
6024 						&params, dp_stats_cmd_cb,
6025 						cb_ctxt);
6026 			} else {
6027 				status = dp_reo_send_cmd(
6028 						soc, CMD_GET_QUEUE_STATS,
6029 						&params, dp_stats_cmd_cb,
6030 						rx_tid);
6031 			}
6032 
6033 			if (QDF_IS_STATUS_SUCCESS(status))
6034 				stats_cmd_sent_cnt++;
6035 
6036 
6037 			/* Flush REO descriptor from HW cache to update stats
6038 			 * in descriptor memory. This is to help debugging
6039 			 */
6040 			qdf_mem_zero(&params, sizeof(params));
6041 			params.std.need_status = 0;
6042 			params.std.addr_lo =
6043 				rx_tid->hw_qdesc_paddr & 0xffffffff;
6044 			params.std.addr_hi =
6045 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
6046 			params.u.fl_cache_params.flush_no_inval = 1;
6047 			dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, &params, NULL,
6048 					NULL);
6049 		}
6050 	}
6051 
6052 	return stats_cmd_sent_cnt;
6053 }
6054 
6055 QDF_STATUS
6056 dp_set_michael_key(struct cdp_soc_t *soc,
6057 		   uint8_t vdev_id,
6058 		   uint8_t *peer_mac,
6059 		   bool is_unicast, uint32_t *key)
6060 {
6061 	uint8_t sec_index = is_unicast ? 1 : 0;
6062 	struct dp_peer *peer =
6063 			dp_peer_get_tgt_peer_hash_find((struct dp_soc *)soc,
6064 						       peer_mac, 0, vdev_id,
6065 						       DP_MOD_ID_CDP);
6066 
6067 	if (!peer) {
6068 		dp_peer_err("%pK: peer not found ", soc);
6069 		return QDF_STATUS_E_FAILURE;
6070 	}
6071 
6072 	qdf_mem_copy(&peer->txrx_peer->security[sec_index].michael_key[0],
6073 		     key, IEEE80211_WEP_MICLEN);
6074 
6075 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
6076 
6077 	return QDF_STATUS_SUCCESS;
6078 }
6079 
6080 
6081 /**
6082  * dp_vdev_bss_peer_ref_n_get: Get bss peer of a vdev
6083  * @soc: DP soc
6084  * @vdev: vdev
6085  * @mod_id: id of module requesting reference
6086  *
6087  * Return: VDEV BSS peer
6088  */
6089 struct dp_peer *dp_vdev_bss_peer_ref_n_get(struct dp_soc *soc,
6090 					   struct dp_vdev *vdev,
6091 					   enum dp_mod_id mod_id)
6092 {
6093 	struct dp_peer *peer = NULL;
6094 
6095 	qdf_spin_lock_bh(&vdev->peer_list_lock);
6096 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
6097 		if (peer->bss_peer)
6098 			break;
6099 	}
6100 
6101 	if (!peer) {
6102 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
6103 		return NULL;
6104 	}
6105 
6106 	if (dp_peer_get_ref(soc, peer, mod_id) == QDF_STATUS_SUCCESS) {
6107 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
6108 		return peer;
6109 	}
6110 
6111 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
6112 	return peer;
6113 }
6114 
6115 /**
6116  * dp_sta_vdev_self_peer_ref_n_get: Get self peer of sta vdev
6117  * @soc: DP soc
6118  * @vdev: vdev
6119  * @mod_id: id of module requesting reference
6120  *
6121  * Return: VDEV self peer
6122  */
6123 struct dp_peer *dp_sta_vdev_self_peer_ref_n_get(struct dp_soc *soc,
6124 						struct dp_vdev *vdev,
6125 						enum dp_mod_id mod_id)
6126 {
6127 	struct dp_peer *peer;
6128 
6129 	if (vdev->opmode != wlan_op_mode_sta)
6130 		return NULL;
6131 
6132 	qdf_spin_lock_bh(&vdev->peer_list_lock);
6133 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
6134 		if (peer->sta_self_peer)
6135 			break;
6136 	}
6137 
6138 	if (!peer) {
6139 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
6140 		return NULL;
6141 	}
6142 
6143 	if (dp_peer_get_ref(soc, peer, mod_id) == QDF_STATUS_SUCCESS) {
6144 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
6145 		return peer;
6146 	}
6147 
6148 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
6149 	return peer;
6150 }
6151 
6152 #ifdef DUMP_REO_QUEUE_INFO_IN_DDR
6153 void dp_dump_rx_reo_queue_info(
6154 	struct dp_soc *soc, void *cb_ctxt, union hal_reo_status *reo_status)
6155 {
6156 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
6157 
6158 	if (!rx_tid)
6159 		return;
6160 
6161 	if (reo_status->fl_cache_status.header.status !=
6162 		HAL_REO_CMD_SUCCESS) {
6163 		dp_err_rl("Rx tid REO HW desc flush failed(%d)",
6164 			  reo_status->rx_queue_status.header.status);
6165 		return;
6166 	}
6167 	qdf_spin_lock_bh(&rx_tid->tid_lock);
6168 	hal_dump_rx_reo_queue_desc(rx_tid->hw_qdesc_vaddr_aligned);
6169 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
6170 }
6171 
6172 void dp_send_cache_flush_for_rx_tid(
6173 	struct dp_soc *soc, struct dp_peer *peer)
6174 {
6175 	int i;
6176 	struct dp_rx_tid *rx_tid;
6177 	struct hal_reo_cmd_params params;
6178 
6179 	if (!peer) {
6180 		dp_err_rl("Peer is NULL");
6181 		return;
6182 	}
6183 
6184 	for (i = 0; i < DP_MAX_TIDS; i++) {
6185 		rx_tid = &peer->rx_tid[i];
6186 		if (!rx_tid)
6187 			continue;
6188 		qdf_spin_lock_bh(&rx_tid->tid_lock);
6189 		if (rx_tid->hw_qdesc_vaddr_aligned) {
6190 			qdf_mem_zero(&params, sizeof(params));
6191 			params.std.need_status = 1;
6192 			params.std.addr_lo =
6193 				rx_tid->hw_qdesc_paddr & 0xffffffff;
6194 			params.std.addr_hi =
6195 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
6196 			params.u.fl_cache_params.flush_no_inval = 0;
6197 			if (QDF_STATUS_SUCCESS !=
6198 				dp_reo_send_cmd(
6199 					soc, CMD_FLUSH_CACHE,
6200 					&params, dp_dump_rx_reo_queue_info,
6201 					(void *)rx_tid)) {
6202 				dp_err_rl("cache flush send failed tid %d",
6203 					  rx_tid->tid);
6204 				qdf_spin_unlock_bh(&rx_tid->tid_lock);
6205 				break;
6206 			}
6207 		}
6208 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
6209 	}
6210 }
6211 
6212 void dp_get_rx_reo_queue_info(
6213 	struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
6214 {
6215 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
6216 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
6217 						     DP_MOD_ID_GENERIC_STATS);
6218 	struct dp_peer *peer = NULL;
6219 
6220 	if (!vdev) {
6221 		dp_err_rl("vdev is null for vdev_id: %u", vdev_id);
6222 		goto failed;
6223 	}
6224 
6225 	peer = dp_vdev_bss_peer_ref_n_get(soc, vdev, DP_MOD_ID_GENERIC_STATS);
6226 
6227 	if (!peer) {
6228 		dp_err_rl("Peer is NULL");
6229 		goto failed;
6230 	}
6231 	dp_send_cache_flush_for_rx_tid(soc, peer);
6232 failed:
6233 	if (peer)
6234 		dp_peer_unref_delete(peer, DP_MOD_ID_GENERIC_STATS);
6235 	if (vdev)
6236 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_GENERIC_STATS);
6237 }
6238 #endif /* DUMP_REO_QUEUE_INFO_IN_DDR */
6239 
6240 void dp_peer_flush_frags(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
6241 			 uint8_t *peer_mac)
6242 {
6243 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
6244 	struct dp_peer *peer = dp_peer_get_tgt_peer_hash_find(soc, peer_mac, 0,
6245 							      vdev_id,
6246 							      DP_MOD_ID_CDP);
6247 	struct dp_txrx_peer *txrx_peer;
6248 	uint8_t tid;
6249 	struct dp_rx_tid_defrag *defrag_rx_tid;
6250 
6251 	if (!peer)
6252 		return;
6253 
6254 	if (!peer->txrx_peer)
6255 		goto fail;
6256 
6257 	dp_info("Flushing fragments for peer " QDF_MAC_ADDR_FMT,
6258 		QDF_MAC_ADDR_REF(peer->mac_addr.raw));
6259 
6260 	txrx_peer = peer->txrx_peer;
6261 
6262 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
6263 		defrag_rx_tid = &txrx_peer->rx_tid[tid];
6264 
6265 		qdf_spin_lock_bh(&defrag_rx_tid->defrag_tid_lock);
6266 		dp_rx_defrag_waitlist_remove(txrx_peer, tid);
6267 		dp_rx_reorder_flush_frag(txrx_peer, tid);
6268 		qdf_spin_unlock_bh(&defrag_rx_tid->defrag_tid_lock);
6269 	}
6270 fail:
6271 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
6272 }
6273 
6274 /*
6275  * dp_peer_find_by_id_valid - check if peer exists for given id
6276  * @soc: core DP soc context
6277  * @peer_id: peer id from peer object can be retrieved
6278  *
6279  * Return: true if peer exists of false otherwise
6280  */
6281 bool dp_peer_find_by_id_valid(struct dp_soc *soc, uint16_t peer_id)
6282 {
6283 	struct dp_peer *peer = dp_peer_get_ref_by_id(soc, peer_id,
6284 						     DP_MOD_ID_HTT);
6285 
6286 	if (peer) {
6287 		/*
6288 		 * Decrement the peer ref which is taken as part of
6289 		 * dp_peer_get_ref_by_id if PEER_LOCK_REF_PROTECT is enabled
6290 		 */
6291 		dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
6292 
6293 		return true;
6294 	}
6295 
6296 	return false;
6297 }
6298 
6299 qdf_export_symbol(dp_peer_find_by_id_valid);
6300