xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_peer.c (revision ccf6794c7efeda37a9772e5eb4d4dab2ab5af07a)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <qdf_types.h>
21 #include <qdf_lock.h>
22 #include <hal_hw_headers.h>
23 #include "dp_htt.h"
24 #include "dp_types.h"
25 #include "dp_internal.h"
26 #include "dp_peer.h"
27 #include "dp_rx_defrag.h"
28 #include "dp_rx.h"
29 #include <hal_api.h>
30 #include <hal_reo.h>
31 #include <cdp_txrx_handle.h>
32 #include <wlan_cfg.h>
33 #ifdef WIFI_MONITOR_SUPPORT
34 #include <dp_mon.h>
35 #endif
36 #ifdef FEATURE_WDS
37 #include "dp_txrx_wds.h"
38 #endif
39 #include <qdf_module.h>
40 #ifdef QCA_PEER_EXT_STATS
41 #include "dp_hist.h"
42 #endif
43 
44 #ifdef REO_QDESC_HISTORY
45 #define REO_QDESC_HISTORY_SIZE 512
46 uint64_t reo_qdesc_history_idx;
47 struct reo_qdesc_event reo_qdesc_history[REO_QDESC_HISTORY_SIZE];
48 #endif
49 
50 #ifdef FEATURE_WDS
51 static inline bool
52 dp_peer_ast_free_in_unmap_supported(struct dp_soc *soc,
53 				    struct dp_ast_entry *ast_entry)
54 {
55 	/* if peer map v2 is enabled we are not freeing ast entry
56 	 * here and it is supposed to be freed in unmap event (after
57 	 * we receive delete confirmation from target)
58 	 *
59 	 * if peer_id is invalid we did not get the peer map event
60 	 * for the peer free ast entry from here only in this case
61 	 */
62 
63 	if ((ast_entry->type != CDP_TXRX_AST_TYPE_WDS_HM_SEC) &&
64 	    (ast_entry->type != CDP_TXRX_AST_TYPE_SELF))
65 		return true;
66 
67 	return false;
68 }
69 #else
70 static inline bool
71 dp_peer_ast_free_in_unmap_supported(struct dp_soc *soc,
72 				    struct dp_ast_entry *ast_entry)
73 {
74 	return false;
75 }
76 
77 void dp_soc_wds_attach(struct dp_soc *soc)
78 {
79 }
80 
81 void dp_soc_wds_detach(struct dp_soc *soc)
82 {
83 }
84 #endif
85 
86 #ifdef REO_QDESC_HISTORY
87 static inline void
88 dp_rx_reo_qdesc_history_add(struct reo_desc_list_node *free_desc,
89 			    enum reo_qdesc_event_type type)
90 {
91 	struct reo_qdesc_event *evt;
92 	struct dp_rx_tid *rx_tid = &free_desc->rx_tid;
93 	uint32_t idx;
94 
95 	reo_qdesc_history_idx++;
96 	idx = (reo_qdesc_history_idx & (REO_QDESC_HISTORY_SIZE - 1));
97 
98 	evt = &reo_qdesc_history[idx];
99 
100 	qdf_mem_copy(evt->peer_mac, free_desc->peer_mac, QDF_MAC_ADDR_SIZE);
101 	evt->qdesc_addr = rx_tid->hw_qdesc_paddr;
102 	evt->ts = qdf_get_log_timestamp();
103 	evt->type = type;
104 }
105 
106 #ifdef WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY
107 static inline void
108 dp_rx_reo_qdesc_deferred_evt_add(struct reo_desc_deferred_freelist_node *desc,
109 				 enum reo_qdesc_event_type type)
110 {
111 	struct reo_qdesc_event *evt;
112 	uint32_t idx;
113 
114 	reo_qdesc_history_idx++;
115 	idx = (reo_qdesc_history_idx & (REO_QDESC_HISTORY_SIZE - 1));
116 
117 	evt = &reo_qdesc_history[idx];
118 
119 	qdf_mem_copy(evt->peer_mac, desc->peer_mac, QDF_MAC_ADDR_SIZE);
120 	evt->qdesc_addr = desc->hw_qdesc_paddr;
121 	evt->ts = qdf_get_log_timestamp();
122 	evt->type = type;
123 }
124 
125 #define DP_RX_REO_QDESC_DEFERRED_FREE_EVT(desc) \
126 	dp_rx_reo_qdesc_deferred_evt_add((desc), REO_QDESC_FREE)
127 
128 #define DP_RX_REO_QDESC_DEFERRED_GET_MAC(desc, freedesc) \
129 	qdf_mem_copy(desc->peer_mac, freedesc->peer_mac, QDF_MAC_ADDR_SIZE)
130 #endif /* WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY */
131 
132 #define DP_RX_REO_QDESC_GET_MAC(freedesc, peer) \
133 	qdf_mem_copy(freedesc->peer_mac, peer->mac_addr.raw, QDF_MAC_ADDR_SIZE)
134 
135 #define DP_RX_REO_QDESC_UPDATE_EVT(free_desc) \
136 	dp_rx_reo_qdesc_history_add((free_desc), REO_QDESC_UPDATE_CB)
137 
138 #define DP_RX_REO_QDESC_FREE_EVT(free_desc) \
139 	dp_rx_reo_qdesc_history_add((free_desc), REO_QDESC_FREE)
140 
141 #else
142 #define DP_RX_REO_QDESC_GET_MAC(freedesc, peer)
143 
144 #define DP_RX_REO_QDESC_UPDATE_EVT(free_desc)
145 
146 #define DP_RX_REO_QDESC_FREE_EVT(free_desc)
147 
148 #define DP_RX_REO_QDESC_DEFERRED_FREE_EVT(desc)
149 
150 #define DP_RX_REO_QDESC_DEFERRED_GET_MAC(desc, freedesc)
151 #endif
152 
153 static inline void
154 dp_set_ssn_valid_flag(struct hal_reo_cmd_params *params,
155 					uint8_t valid)
156 {
157 	params->u.upd_queue_params.update_svld = 1;
158 	params->u.upd_queue_params.svld = valid;
159 	dp_peer_debug("Setting SSN valid bit to %d",
160 		      valid);
161 }
162 
163 QDF_STATUS dp_peer_ast_table_attach(struct dp_soc *soc)
164 {
165 	uint32_t max_ast_index;
166 
167 	max_ast_index = wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx);
168 	/* allocate ast_table for ast entry to ast_index map */
169 	dp_peer_info("\n%pK:<=== cfg max ast idx %d ====>", soc, max_ast_index);
170 	soc->ast_table = qdf_mem_malloc(max_ast_index *
171 					sizeof(struct dp_ast_entry *));
172 	if (!soc->ast_table) {
173 		dp_peer_err("%pK: ast_table memory allocation failed", soc);
174 		return QDF_STATUS_E_NOMEM;
175 	}
176 	return QDF_STATUS_SUCCESS; /* success */
177 }
178 
179 /*
180  * dp_peer_find_map_attach() - allocate memory for peer_id_to_obj_map
181  * @soc: soc handle
182  *
183  * return: QDF_STATUS
184  */
185 static QDF_STATUS dp_peer_find_map_attach(struct dp_soc *soc)
186 {
187 	uint32_t max_peers, peer_map_size;
188 
189 	max_peers = soc->max_peer_id;
190 	/* allocate the peer ID -> peer object map */
191 	dp_peer_info("\n%pK:<=== cfg max peer id %d ====>", soc, max_peers);
192 	peer_map_size = max_peers * sizeof(soc->peer_id_to_obj_map[0]);
193 	soc->peer_id_to_obj_map = qdf_mem_malloc(peer_map_size);
194 	if (!soc->peer_id_to_obj_map) {
195 		dp_peer_err("%pK: peer map memory allocation failed", soc);
196 		return QDF_STATUS_E_NOMEM;
197 	}
198 
199 	/*
200 	 * The peer_id_to_obj_map doesn't really need to be initialized,
201 	 * since elements are only used after they have been individually
202 	 * initialized.
203 	 * However, it is convenient for debugging to have all elements
204 	 * that are not in use set to 0.
205 	 */
206 	qdf_mem_zero(soc->peer_id_to_obj_map, peer_map_size);
207 
208 	qdf_spinlock_create(&soc->peer_map_lock);
209 	return QDF_STATUS_SUCCESS; /* success */
210 }
211 
212 #define DP_AST_HASH_LOAD_MULT  2
213 #define DP_AST_HASH_LOAD_SHIFT 0
214 
215 static inline uint32_t
216 dp_peer_find_hash_index(struct dp_soc *soc,
217 			union dp_align_mac_addr *mac_addr)
218 {
219 	uint32_t index;
220 
221 	index =
222 		mac_addr->align2.bytes_ab ^
223 		mac_addr->align2.bytes_cd ^
224 		mac_addr->align2.bytes_ef;
225 
226 	index ^= index >> soc->peer_hash.idx_bits;
227 	index &= soc->peer_hash.mask;
228 	return index;
229 }
230 
231 #ifdef WLAN_FEATURE_11BE_MLO
232 /*
233  * dp_peer_find_hash_detach() - cleanup memory for peer_hash table
234  * @soc: soc handle
235  *
236  * return: none
237  */
238 static void dp_peer_find_hash_detach(struct dp_soc *soc)
239 {
240 	if (soc->peer_hash.bins) {
241 		qdf_mem_free(soc->peer_hash.bins);
242 		soc->peer_hash.bins = NULL;
243 		qdf_spinlock_destroy(&soc->peer_hash_lock);
244 	}
245 
246 	if (soc->arch_ops.mlo_peer_find_hash_detach)
247 		soc->arch_ops.mlo_peer_find_hash_detach(soc);
248 }
249 
250 /*
251  * dp_peer_find_hash_attach() - allocate memory for peer_hash table
252  * @soc: soc handle
253  *
254  * return: QDF_STATUS
255  */
256 static QDF_STATUS dp_peer_find_hash_attach(struct dp_soc *soc)
257 {
258 	int i, hash_elems, log2;
259 
260 	/* allocate the peer MAC address -> peer object hash table */
261 	hash_elems = soc->max_peers;
262 	hash_elems *= DP_PEER_HASH_LOAD_MULT;
263 	hash_elems >>= DP_PEER_HASH_LOAD_SHIFT;
264 	log2 = dp_log2_ceil(hash_elems);
265 	hash_elems = 1 << log2;
266 
267 	soc->peer_hash.mask = hash_elems - 1;
268 	soc->peer_hash.idx_bits = log2;
269 	/* allocate an array of TAILQ peer object lists */
270 	soc->peer_hash.bins = qdf_mem_malloc(
271 		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q, dp_peer)));
272 	if (!soc->peer_hash.bins)
273 		return QDF_STATUS_E_NOMEM;
274 
275 	for (i = 0; i < hash_elems; i++)
276 		TAILQ_INIT(&soc->peer_hash.bins[i]);
277 
278 	qdf_spinlock_create(&soc->peer_hash_lock);
279 
280 	if (soc->arch_ops.mlo_peer_find_hash_attach &&
281 	    (soc->arch_ops.mlo_peer_find_hash_attach(soc) !=
282 			QDF_STATUS_SUCCESS)) {
283 		dp_peer_find_hash_detach(soc);
284 		return QDF_STATUS_E_NOMEM;
285 	}
286 	return QDF_STATUS_SUCCESS;
287 }
288 
289 /*
290  * dp_peer_find_hash_add() - add peer to peer_hash_table
291  * @soc: soc handle
292  * @peer: peer handle
293  * @peer_type: link or mld peer
294  *
295  * return: none
296  */
297 void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer)
298 {
299 	unsigned index;
300 
301 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
302 	if (peer->peer_type == CDP_LINK_PEER_TYPE) {
303 		qdf_spin_lock_bh(&soc->peer_hash_lock);
304 
305 		if (QDF_IS_STATUS_ERROR(dp_peer_get_ref(soc, peer,
306 							DP_MOD_ID_CONFIG))) {
307 			dp_err("fail to get peer ref:" QDF_MAC_ADDR_FMT,
308 			       QDF_MAC_ADDR_REF(peer->mac_addr.raw));
309 			qdf_spin_unlock_bh(&soc->peer_hash_lock);
310 			return;
311 		}
312 
313 		/*
314 		 * It is important to add the new peer at the tail of
315 		 * peer list with the bin index. Together with having
316 		 * the hash_find function search from head to tail,
317 		 * this ensures that if two entries with the same MAC address
318 		 * are stored, the one added first will be found first.
319 		 */
320 		TAILQ_INSERT_TAIL(&soc->peer_hash.bins[index], peer,
321 				  hash_list_elem);
322 
323 		qdf_spin_unlock_bh(&soc->peer_hash_lock);
324 	} else if (peer->peer_type == CDP_MLD_PEER_TYPE) {
325 		if (soc->arch_ops.mlo_peer_find_hash_add)
326 			soc->arch_ops.mlo_peer_find_hash_add(soc, peer);
327 	} else {
328 		dp_err("unknown peer type %d", peer->peer_type);
329 	}
330 }
331 
332 /*
333  * dp_peer_find_hash_find() - returns peer from peer_hash_table matching
334  *                            vdev_id and mac_address
335  * @soc: soc handle
336  * @peer_mac_addr: peer mac address
337  * @mac_addr_is_aligned: is mac addr alligned
338  * @vdev_id: vdev_id
339  * @mod_id: id of module requesting reference
340  *
341  * return: peer in sucsess
342  *         NULL in failure
343  */
344 struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc,
345 				       uint8_t *peer_mac_addr,
346 				       int mac_addr_is_aligned,
347 				       uint8_t vdev_id,
348 				       enum dp_mod_id mod_id)
349 {
350 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
351 	unsigned index;
352 	struct dp_peer *peer;
353 
354 	if (!soc->peer_hash.bins)
355 		return NULL;
356 
357 	if (mac_addr_is_aligned) {
358 		mac_addr = (union dp_align_mac_addr *)peer_mac_addr;
359 	} else {
360 		qdf_mem_copy(
361 			&local_mac_addr_aligned.raw[0],
362 			peer_mac_addr, QDF_MAC_ADDR_SIZE);
363 		mac_addr = &local_mac_addr_aligned;
364 	}
365 	/* search link peer table firstly */
366 	index = dp_peer_find_hash_index(soc, mac_addr);
367 	qdf_spin_lock_bh(&soc->peer_hash_lock);
368 	TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
369 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
370 		    ((peer->vdev->vdev_id == vdev_id) ||
371 		     (vdev_id == DP_VDEV_ALL))) {
372 			/* take peer reference before returning */
373 			if (dp_peer_get_ref(soc, peer, mod_id) !=
374 						QDF_STATUS_SUCCESS)
375 				peer = NULL;
376 
377 			qdf_spin_unlock_bh(&soc->peer_hash_lock);
378 			return peer;
379 		}
380 	}
381 	qdf_spin_unlock_bh(&soc->peer_hash_lock);
382 
383 	if (soc->arch_ops.mlo_peer_find_hash_find)
384 		return soc->arch_ops.mlo_peer_find_hash_find(soc, peer_mac_addr,
385 							     mac_addr_is_aligned,
386 							     mod_id);
387 	return NULL;
388 }
389 
390 qdf_export_symbol(dp_peer_find_hash_find);
391 
392 /*
393  * dp_peer_find_hash_remove() - remove peer from peer_hash_table
394  * @soc: soc handle
395  * @peer: peer handle
396  *
397  * return: none
398  */
399 void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer)
400 {
401 	unsigned index;
402 	struct dp_peer *tmppeer = NULL;
403 	int found = 0;
404 
405 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
406 
407 	if (peer->peer_type == CDP_LINK_PEER_TYPE) {
408 		/* Check if tail is not empty before delete*/
409 		QDF_ASSERT(!TAILQ_EMPTY(&soc->peer_hash.bins[index]));
410 
411 		qdf_spin_lock_bh(&soc->peer_hash_lock);
412 		TAILQ_FOREACH(tmppeer, &soc->peer_hash.bins[index],
413 			      hash_list_elem) {
414 			if (tmppeer == peer) {
415 				found = 1;
416 				break;
417 			}
418 		}
419 		QDF_ASSERT(found);
420 		TAILQ_REMOVE(&soc->peer_hash.bins[index], peer,
421 			     hash_list_elem);
422 
423 		dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
424 		qdf_spin_unlock_bh(&soc->peer_hash_lock);
425 	} else if (peer->peer_type == CDP_MLD_PEER_TYPE) {
426 		if (soc->arch_ops.mlo_peer_find_hash_remove)
427 			soc->arch_ops.mlo_peer_find_hash_remove(soc, peer);
428 	} else {
429 		dp_err("unknown peer type %d", peer->peer_type);
430 	}
431 }
432 
433 /*
434  * dp_peer_exist_on_pdev - check if peer with mac address exist on pdev
435  *
436  * @soc: Datapath SOC handle
437  * @peer_mac_addr: peer mac address
438  * @mac_addr_is_aligned: is mac address aligned
439  * @pdev: Datapath PDEV handle
440  *
441  * Return: true if peer found else return false
442  */
443 static bool dp_peer_exist_on_pdev(struct dp_soc *soc,
444 				  uint8_t *peer_mac_addr,
445 				  int mac_addr_is_aligned,
446 				  struct dp_pdev *pdev)
447 {
448 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
449 	unsigned int index;
450 	struct dp_peer *peer;
451 	bool found = false;
452 
453 	if (mac_addr_is_aligned) {
454 		mac_addr = (union dp_align_mac_addr *)peer_mac_addr;
455 	} else {
456 		qdf_mem_copy(
457 			&local_mac_addr_aligned.raw[0],
458 			peer_mac_addr, QDF_MAC_ADDR_SIZE);
459 		mac_addr = &local_mac_addr_aligned;
460 	}
461 	index = dp_peer_find_hash_index(soc, mac_addr);
462 	qdf_spin_lock_bh(&soc->peer_hash_lock);
463 	TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
464 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
465 		    (peer->vdev->pdev == pdev)) {
466 			found = true;
467 			break;
468 		}
469 	}
470 	qdf_spin_unlock_bh(&soc->peer_hash_lock);
471 
472 	return found;
473 }
474 #else
475 static QDF_STATUS dp_peer_find_hash_attach(struct dp_soc *soc)
476 {
477 	int i, hash_elems, log2;
478 
479 	/* allocate the peer MAC address -> peer object hash table */
480 	hash_elems = soc->max_peers;
481 	hash_elems *= DP_PEER_HASH_LOAD_MULT;
482 	hash_elems >>= DP_PEER_HASH_LOAD_SHIFT;
483 	log2 = dp_log2_ceil(hash_elems);
484 	hash_elems = 1 << log2;
485 
486 	soc->peer_hash.mask = hash_elems - 1;
487 	soc->peer_hash.idx_bits = log2;
488 	/* allocate an array of TAILQ peer object lists */
489 	soc->peer_hash.bins = qdf_mem_malloc(
490 		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q, dp_peer)));
491 	if (!soc->peer_hash.bins)
492 		return QDF_STATUS_E_NOMEM;
493 
494 	for (i = 0; i < hash_elems; i++)
495 		TAILQ_INIT(&soc->peer_hash.bins[i]);
496 
497 	qdf_spinlock_create(&soc->peer_hash_lock);
498 	return QDF_STATUS_SUCCESS;
499 }
500 
501 static void dp_peer_find_hash_detach(struct dp_soc *soc)
502 {
503 	if (soc->peer_hash.bins) {
504 		qdf_mem_free(soc->peer_hash.bins);
505 		soc->peer_hash.bins = NULL;
506 		qdf_spinlock_destroy(&soc->peer_hash_lock);
507 	}
508 }
509 
510 void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer)
511 {
512 	unsigned index;
513 
514 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
515 	qdf_spin_lock_bh(&soc->peer_hash_lock);
516 
517 	if (QDF_IS_STATUS_ERROR(dp_peer_get_ref(soc, peer, DP_MOD_ID_CONFIG))) {
518 		dp_err("unable to get peer ref at MAP mac: "QDF_MAC_ADDR_FMT,
519 		       QDF_MAC_ADDR_REF(peer->mac_addr.raw));
520 		qdf_spin_unlock_bh(&soc->peer_hash_lock);
521 		return;
522 	}
523 
524 	/*
525 	 * It is important to add the new peer at the tail of the peer list
526 	 * with the bin index.  Together with having the hash_find function
527 	 * search from head to tail, this ensures that if two entries with
528 	 * the same MAC address are stored, the one added first will be
529 	 * found first.
530 	 */
531 	TAILQ_INSERT_TAIL(&soc->peer_hash.bins[index], peer, hash_list_elem);
532 
533 	qdf_spin_unlock_bh(&soc->peer_hash_lock);
534 }
535 
536 struct dp_peer *dp_peer_find_hash_find(
537 				struct dp_soc *soc, uint8_t *peer_mac_addr,
538 				int mac_addr_is_aligned, uint8_t vdev_id,
539 				enum dp_mod_id mod_id)
540 {
541 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
542 	unsigned index;
543 	struct dp_peer *peer;
544 
545 	if (!soc->peer_hash.bins)
546 		return NULL;
547 
548 	if (mac_addr_is_aligned) {
549 		mac_addr = (union dp_align_mac_addr *)peer_mac_addr;
550 	} else {
551 		qdf_mem_copy(
552 			&local_mac_addr_aligned.raw[0],
553 			peer_mac_addr, QDF_MAC_ADDR_SIZE);
554 		mac_addr = &local_mac_addr_aligned;
555 	}
556 	index = dp_peer_find_hash_index(soc, mac_addr);
557 	qdf_spin_lock_bh(&soc->peer_hash_lock);
558 	TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
559 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
560 		    ((peer->vdev->vdev_id == vdev_id) ||
561 		     (vdev_id == DP_VDEV_ALL))) {
562 			/* take peer reference before returning */
563 			if (dp_peer_get_ref(soc, peer, mod_id) !=
564 						QDF_STATUS_SUCCESS)
565 				peer = NULL;
566 
567 			qdf_spin_unlock_bh(&soc->peer_hash_lock);
568 			return peer;
569 		}
570 	}
571 	qdf_spin_unlock_bh(&soc->peer_hash_lock);
572 	return NULL; /* failure */
573 }
574 
575 qdf_export_symbol(dp_peer_find_hash_find);
576 
577 void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer)
578 {
579 	unsigned index;
580 	struct dp_peer *tmppeer = NULL;
581 	int found = 0;
582 
583 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
584 	/* Check if tail is not empty before delete*/
585 	QDF_ASSERT(!TAILQ_EMPTY(&soc->peer_hash.bins[index]));
586 
587 	qdf_spin_lock_bh(&soc->peer_hash_lock);
588 	TAILQ_FOREACH(tmppeer, &soc->peer_hash.bins[index], hash_list_elem) {
589 		if (tmppeer == peer) {
590 			found = 1;
591 			break;
592 		}
593 	}
594 	QDF_ASSERT(found);
595 	TAILQ_REMOVE(&soc->peer_hash.bins[index], peer, hash_list_elem);
596 
597 	dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
598 	qdf_spin_unlock_bh(&soc->peer_hash_lock);
599 }
600 
601 static bool dp_peer_exist_on_pdev(struct dp_soc *soc,
602 				  uint8_t *peer_mac_addr,
603 				  int mac_addr_is_aligned,
604 				  struct dp_pdev *pdev)
605 {
606 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
607 	unsigned int index;
608 	struct dp_peer *peer;
609 	bool found = false;
610 
611 	if (mac_addr_is_aligned) {
612 		mac_addr = (union dp_align_mac_addr *)peer_mac_addr;
613 	} else {
614 		qdf_mem_copy(
615 			&local_mac_addr_aligned.raw[0],
616 			peer_mac_addr, QDF_MAC_ADDR_SIZE);
617 		mac_addr = &local_mac_addr_aligned;
618 	}
619 	index = dp_peer_find_hash_index(soc, mac_addr);
620 	qdf_spin_lock_bh(&soc->peer_hash_lock);
621 	TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
622 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
623 		    (peer->vdev->pdev == pdev)) {
624 			found = true;
625 			break;
626 		}
627 	}
628 	qdf_spin_unlock_bh(&soc->peer_hash_lock);
629 	return found;
630 }
631 #endif/* WLAN_FEATURE_11BE_MLO */
632 
633 /*
634  * dp_peer_vdev_list_add() - add peer into vdev's peer list
635  * @soc: soc handle
636  * @vdev: vdev handle
637  * @peer: peer handle
638  *
639  * return: none
640  */
641 void dp_peer_vdev_list_add(struct dp_soc *soc, struct dp_vdev *vdev,
642 			   struct dp_peer *peer)
643 {
644 	/* only link peer will be added to vdev peer list */
645 	if (IS_MLO_DP_MLD_PEER(peer))
646 		return;
647 
648 	qdf_spin_lock_bh(&vdev->peer_list_lock);
649 	if (QDF_IS_STATUS_ERROR(dp_peer_get_ref(soc, peer, DP_MOD_ID_CONFIG))) {
650 		dp_err("unable to get peer ref at MAP mac: "QDF_MAC_ADDR_FMT,
651 		       QDF_MAC_ADDR_REF(peer->mac_addr.raw));
652 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
653 		return;
654 	}
655 
656 	/* add this peer into the vdev's list */
657 	if (wlan_op_mode_sta == vdev->opmode)
658 		TAILQ_INSERT_HEAD(&vdev->peer_list, peer, peer_list_elem);
659 	else
660 		TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
661 
662 	vdev->num_peers++;
663 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
664 }
665 
666 /*
667  * dp_peer_vdev_list_remove() - remove peer from vdev's peer list
668  * @soc: SoC handle
669  * @vdev: VDEV handle
670  * @peer: peer handle
671  *
672  * Return: none
673  */
674 void dp_peer_vdev_list_remove(struct dp_soc *soc, struct dp_vdev *vdev,
675 			      struct dp_peer *peer)
676 {
677 	uint8_t found = 0;
678 	struct dp_peer *tmppeer = NULL;
679 
680 	/* only link peer will be added to vdev peer list */
681 	if (IS_MLO_DP_MLD_PEER(peer))
682 		return;
683 
684 	qdf_spin_lock_bh(&vdev->peer_list_lock);
685 	TAILQ_FOREACH(tmppeer, &peer->vdev->peer_list, peer_list_elem) {
686 		if (tmppeer == peer) {
687 			found = 1;
688 			break;
689 		}
690 	}
691 
692 	if (found) {
693 		TAILQ_REMOVE(&peer->vdev->peer_list, peer,
694 			     peer_list_elem);
695 		dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
696 		vdev->num_peers--;
697 	} else {
698 		/*Ignoring the remove operation as peer not found*/
699 		dp_peer_debug("%pK: peer:%pK not found in vdev:%pK peerlist:%pK"
700 			      , soc, peer, vdev, &peer->vdev->peer_list);
701 	}
702 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
703 }
704 
705 /*
706  * dp_peer_find_id_to_obj_add() - Add peer into peer_id table
707  * @soc: SoC handle
708  * @peer: peer handle
709  * @peer_id: peer_id
710  *
711  * Return: None
712  */
713 void dp_peer_find_id_to_obj_add(struct dp_soc *soc,
714 				struct dp_peer *peer,
715 				uint16_t peer_id)
716 {
717 	QDF_ASSERT(peer_id <= soc->max_peer_id);
718 
719 	qdf_spin_lock_bh(&soc->peer_map_lock);
720 
721 	if (QDF_IS_STATUS_ERROR(dp_peer_get_ref(soc, peer, DP_MOD_ID_CONFIG))) {
722 		dp_err("unable to get peer ref at MAP mac: "QDF_MAC_ADDR_FMT" peer_id %u",
723 		       QDF_MAC_ADDR_REF(peer->mac_addr.raw), peer_id);
724 		qdf_spin_unlock_bh(&soc->peer_map_lock);
725 		return;
726 	}
727 
728 	if (!soc->peer_id_to_obj_map[peer_id]) {
729 		soc->peer_id_to_obj_map[peer_id] = peer;
730 	} else {
731 		/* Peer map event came for peer_id which
732 		 * is already mapped, this is not expected
733 		 */
734 		QDF_ASSERT(0);
735 	}
736 	qdf_spin_unlock_bh(&soc->peer_map_lock);
737 }
738 
739 /*
740  * dp_peer_find_id_to_obj_remove() - remove peer from peer_id table
741  * @soc: SoC handle
742  * @peer_id: peer_id
743  *
744  * Return: None
745  */
746 void dp_peer_find_id_to_obj_remove(struct dp_soc *soc,
747 				   uint16_t peer_id)
748 {
749 	struct dp_peer *peer = NULL;
750 	QDF_ASSERT(peer_id <= soc->max_peer_id);
751 
752 	qdf_spin_lock_bh(&soc->peer_map_lock);
753 	peer = soc->peer_id_to_obj_map[peer_id];
754 	soc->peer_id_to_obj_map[peer_id] = NULL;
755 	dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
756 	qdf_spin_unlock_bh(&soc->peer_map_lock);
757 }
758 
759 #ifdef FEATURE_MEC
760 /**
761  * dp_peer_mec_hash_attach() - Allocate and initialize MEC Hash Table
762  * @soc: SoC handle
763  *
764  * Return: QDF_STATUS
765  */
766 QDF_STATUS dp_peer_mec_hash_attach(struct dp_soc *soc)
767 {
768 	int log2, hash_elems, i;
769 
770 	log2 = dp_log2_ceil(DP_PEER_MAX_MEC_IDX);
771 	hash_elems = 1 << log2;
772 
773 	soc->mec_hash.mask = hash_elems - 1;
774 	soc->mec_hash.idx_bits = log2;
775 
776 	dp_peer_info("%pK: max mec index: %d",
777 		     soc, DP_PEER_MAX_MEC_IDX);
778 
779 	/* allocate an array of TAILQ mec object lists */
780 	soc->mec_hash.bins = qdf_mem_malloc(hash_elems *
781 					    sizeof(TAILQ_HEAD(anonymous_tail_q,
782 							      dp_mec_entry)));
783 
784 	if (!soc->mec_hash.bins)
785 		return QDF_STATUS_E_NOMEM;
786 
787 	for (i = 0; i < hash_elems; i++)
788 		TAILQ_INIT(&soc->mec_hash.bins[i]);
789 
790 	return QDF_STATUS_SUCCESS;
791 }
792 
793 /**
794  * dp_peer_mec_hash_index() - Compute the MEC hash from MAC address
795  * @soc: SoC handle
796  *
797  * Return: MEC hash
798  */
799 static inline uint32_t dp_peer_mec_hash_index(struct dp_soc *soc,
800 					      union dp_align_mac_addr *mac_addr)
801 {
802 	uint32_t index;
803 
804 	index =
805 		mac_addr->align2.bytes_ab ^
806 		mac_addr->align2.bytes_cd ^
807 		mac_addr->align2.bytes_ef;
808 	index ^= index >> soc->mec_hash.idx_bits;
809 	index &= soc->mec_hash.mask;
810 	return index;
811 }
812 
813 struct dp_mec_entry *dp_peer_mec_hash_find_by_pdevid(struct dp_soc *soc,
814 						     uint8_t pdev_id,
815 						     uint8_t *mec_mac_addr)
816 {
817 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
818 	uint32_t index;
819 	struct dp_mec_entry *mecentry;
820 
821 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
822 		     mec_mac_addr, QDF_MAC_ADDR_SIZE);
823 	mac_addr = &local_mac_addr_aligned;
824 
825 	index = dp_peer_mec_hash_index(soc, mac_addr);
826 	TAILQ_FOREACH(mecentry, &soc->mec_hash.bins[index], hash_list_elem) {
827 		if ((pdev_id == mecentry->pdev_id) &&
828 		    !dp_peer_find_mac_addr_cmp(mac_addr, &mecentry->mac_addr))
829 			return mecentry;
830 	}
831 
832 	return NULL;
833 }
834 
835 /**
836  * dp_peer_mec_hash_add() - Add MEC entry into hash table
837  * @soc: SoC handle
838  *
839  * This function adds the MEC entry into SoC MEC hash table
840  *
841  * Return: None
842  */
843 static inline void dp_peer_mec_hash_add(struct dp_soc *soc,
844 					struct dp_mec_entry *mecentry)
845 {
846 	uint32_t index;
847 
848 	index = dp_peer_mec_hash_index(soc, &mecentry->mac_addr);
849 	qdf_spin_lock_bh(&soc->mec_lock);
850 	TAILQ_INSERT_TAIL(&soc->mec_hash.bins[index], mecentry, hash_list_elem);
851 	qdf_spin_unlock_bh(&soc->mec_lock);
852 }
853 
854 QDF_STATUS dp_peer_mec_add_entry(struct dp_soc *soc,
855 				 struct dp_vdev *vdev,
856 				 uint8_t *mac_addr)
857 {
858 	struct dp_mec_entry *mecentry = NULL;
859 	struct dp_pdev *pdev = NULL;
860 
861 	if (!vdev) {
862 		dp_peer_err("%pK: Peers vdev is NULL", soc);
863 		return QDF_STATUS_E_INVAL;
864 	}
865 
866 	pdev = vdev->pdev;
867 
868 	if (qdf_unlikely(qdf_atomic_read(&soc->mec_cnt) >=
869 					 DP_PEER_MAX_MEC_ENTRY)) {
870 		dp_peer_warn("%pK: max MEC entry limit reached mac_addr: "
871 			     QDF_MAC_ADDR_FMT, soc, QDF_MAC_ADDR_REF(mac_addr));
872 		return QDF_STATUS_E_NOMEM;
873 	}
874 
875 	qdf_spin_lock_bh(&soc->mec_lock);
876 	mecentry = dp_peer_mec_hash_find_by_pdevid(soc, pdev->pdev_id,
877 						   mac_addr);
878 	if (qdf_likely(mecentry)) {
879 		mecentry->is_active = TRUE;
880 		qdf_spin_unlock_bh(&soc->mec_lock);
881 		return QDF_STATUS_E_ALREADY;
882 	}
883 
884 	qdf_spin_unlock_bh(&soc->mec_lock);
885 
886 	dp_peer_debug("%pK: pdevid: %u vdev: %u type: MEC mac_addr: "
887 		      QDF_MAC_ADDR_FMT,
888 		      soc, pdev->pdev_id, vdev->vdev_id,
889 		      QDF_MAC_ADDR_REF(mac_addr));
890 
891 	mecentry = (struct dp_mec_entry *)
892 			qdf_mem_malloc(sizeof(struct dp_mec_entry));
893 
894 	if (qdf_unlikely(!mecentry)) {
895 		dp_peer_err("%pK: fail to allocate mecentry", soc);
896 		return QDF_STATUS_E_NOMEM;
897 	}
898 
899 	qdf_copy_macaddr((struct qdf_mac_addr *)&mecentry->mac_addr.raw[0],
900 			 (struct qdf_mac_addr *)mac_addr);
901 	mecentry->pdev_id = pdev->pdev_id;
902 	mecentry->vdev_id = vdev->vdev_id;
903 	mecentry->is_active = TRUE;
904 	dp_peer_mec_hash_add(soc, mecentry);
905 
906 	qdf_atomic_inc(&soc->mec_cnt);
907 	DP_STATS_INC(soc, mec.added, 1);
908 
909 	return QDF_STATUS_SUCCESS;
910 }
911 
912 void dp_peer_mec_detach_entry(struct dp_soc *soc, struct dp_mec_entry *mecentry,
913 			      void *ptr)
914 {
915 	uint32_t index = dp_peer_mec_hash_index(soc, &mecentry->mac_addr);
916 
917 	TAILQ_HEAD(, dp_mec_entry) * free_list = ptr;
918 
919 	TAILQ_REMOVE(&soc->mec_hash.bins[index], mecentry,
920 		     hash_list_elem);
921 	TAILQ_INSERT_TAIL(free_list, mecentry, hash_list_elem);
922 }
923 
924 void dp_peer_mec_free_list(struct dp_soc *soc, void *ptr)
925 {
926 	struct dp_mec_entry *mecentry, *mecentry_next;
927 
928 	TAILQ_HEAD(, dp_mec_entry) * free_list = ptr;
929 
930 	TAILQ_FOREACH_SAFE(mecentry, free_list, hash_list_elem,
931 			   mecentry_next) {
932 		dp_peer_debug("%pK: MEC delete for mac_addr " QDF_MAC_ADDR_FMT,
933 			      soc, QDF_MAC_ADDR_REF(&mecentry->mac_addr));
934 		qdf_mem_free(mecentry);
935 		qdf_atomic_dec(&soc->mec_cnt);
936 		DP_STATS_INC(soc, mec.deleted, 1);
937 	}
938 }
939 
940 /**
941  * dp_peer_mec_hash_detach() - Free MEC Hash table
942  * @soc: SoC handle
943  *
944  * Return: None
945  */
946 void dp_peer_mec_hash_detach(struct dp_soc *soc)
947 {
948 	dp_peer_mec_flush_entries(soc);
949 	qdf_mem_free(soc->mec_hash.bins);
950 	soc->mec_hash.bins = NULL;
951 }
952 
953 void dp_peer_mec_spinlock_destroy(struct dp_soc *soc)
954 {
955 	qdf_spinlock_destroy(&soc->mec_lock);
956 }
957 
958 void dp_peer_mec_spinlock_create(struct dp_soc *soc)
959 {
960 	qdf_spinlock_create(&soc->mec_lock);
961 }
962 #else
963 QDF_STATUS dp_peer_mec_hash_attach(struct dp_soc *soc)
964 {
965 	return QDF_STATUS_SUCCESS;
966 }
967 
968 void dp_peer_mec_hash_detach(struct dp_soc *soc)
969 {
970 }
971 #endif
972 
973 #ifdef FEATURE_AST
974 /*
975  * dp_peer_ast_hash_attach() - Allocate and initialize AST Hash Table
976  * @soc: SoC handle
977  *
978  * Return: QDF_STATUS
979  */
980 QDF_STATUS dp_peer_ast_hash_attach(struct dp_soc *soc)
981 {
982 	int i, hash_elems, log2;
983 	unsigned int max_ast_idx = wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx);
984 
985 	hash_elems = ((max_ast_idx * DP_AST_HASH_LOAD_MULT) >>
986 		DP_AST_HASH_LOAD_SHIFT);
987 
988 	log2 = dp_log2_ceil(hash_elems);
989 	hash_elems = 1 << log2;
990 
991 	soc->ast_hash.mask = hash_elems - 1;
992 	soc->ast_hash.idx_bits = log2;
993 
994 	dp_peer_info("%pK: ast hash_elems: %d, max_ast_idx: %d",
995 		     soc, hash_elems, max_ast_idx);
996 
997 	/* allocate an array of TAILQ peer object lists */
998 	soc->ast_hash.bins = qdf_mem_malloc(
999 		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q,
1000 				dp_ast_entry)));
1001 
1002 	if (!soc->ast_hash.bins)
1003 		return QDF_STATUS_E_NOMEM;
1004 
1005 	for (i = 0; i < hash_elems; i++)
1006 		TAILQ_INIT(&soc->ast_hash.bins[i]);
1007 
1008 	return QDF_STATUS_SUCCESS;
1009 }
1010 
1011 /*
1012  * dp_peer_ast_cleanup() - cleanup the references
1013  * @soc: SoC handle
1014  * @ast: ast entry
1015  *
1016  * Return: None
1017  */
1018 static inline void dp_peer_ast_cleanup(struct dp_soc *soc,
1019 				       struct dp_ast_entry *ast)
1020 {
1021 	txrx_ast_free_cb cb = ast->callback;
1022 	void *cookie = ast->cookie;
1023 
1024 	dp_peer_debug("mac_addr: " QDF_MAC_ADDR_FMT ", cb: %pK, cookie: %pK",
1025 		      QDF_MAC_ADDR_REF(ast->mac_addr.raw), cb, cookie);
1026 
1027 	/* Call the callbacks to free up the cookie */
1028 	if (cb) {
1029 		ast->callback = NULL;
1030 		ast->cookie = NULL;
1031 		cb(soc->ctrl_psoc,
1032 		   dp_soc_to_cdp_soc(soc),
1033 		   cookie,
1034 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
1035 	}
1036 }
1037 
1038 /*
1039  * dp_peer_ast_hash_detach() - Free AST Hash table
1040  * @soc: SoC handle
1041  *
1042  * Return: None
1043  */
1044 void dp_peer_ast_hash_detach(struct dp_soc *soc)
1045 {
1046 	unsigned int index;
1047 	struct dp_ast_entry *ast, *ast_next;
1048 
1049 	if (!soc->ast_hash.mask)
1050 		return;
1051 
1052 	if (!soc->ast_hash.bins)
1053 		return;
1054 
1055 	dp_peer_debug("%pK: num_ast_entries: %u", soc, soc->num_ast_entries);
1056 
1057 	qdf_spin_lock_bh(&soc->ast_lock);
1058 	for (index = 0; index <= soc->ast_hash.mask; index++) {
1059 		if (!TAILQ_EMPTY(&soc->ast_hash.bins[index])) {
1060 			TAILQ_FOREACH_SAFE(ast, &soc->ast_hash.bins[index],
1061 					   hash_list_elem, ast_next) {
1062 				TAILQ_REMOVE(&soc->ast_hash.bins[index], ast,
1063 					     hash_list_elem);
1064 				dp_peer_ast_cleanup(soc, ast);
1065 				soc->num_ast_entries--;
1066 				qdf_mem_free(ast);
1067 			}
1068 		}
1069 	}
1070 	qdf_spin_unlock_bh(&soc->ast_lock);
1071 
1072 	qdf_mem_free(soc->ast_hash.bins);
1073 	soc->ast_hash.bins = NULL;
1074 }
1075 
1076 /*
1077  * dp_peer_ast_hash_index() - Compute the AST hash from MAC address
1078  * @soc: SoC handle
1079  *
1080  * Return: AST hash
1081  */
1082 static inline uint32_t dp_peer_ast_hash_index(struct dp_soc *soc,
1083 	union dp_align_mac_addr *mac_addr)
1084 {
1085 	uint32_t index;
1086 
1087 	index =
1088 		mac_addr->align2.bytes_ab ^
1089 		mac_addr->align2.bytes_cd ^
1090 		mac_addr->align2.bytes_ef;
1091 	index ^= index >> soc->ast_hash.idx_bits;
1092 	index &= soc->ast_hash.mask;
1093 	return index;
1094 }
1095 
1096 /*
1097  * dp_peer_ast_hash_add() - Add AST entry into hash table
1098  * @soc: SoC handle
1099  *
1100  * This function adds the AST entry into SoC AST hash table
1101  * It assumes caller has taken the ast lock to protect the access to this table
1102  *
1103  * Return: None
1104  */
1105 static inline void dp_peer_ast_hash_add(struct dp_soc *soc,
1106 		struct dp_ast_entry *ase)
1107 {
1108 	uint32_t index;
1109 
1110 	index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
1111 	TAILQ_INSERT_TAIL(&soc->ast_hash.bins[index], ase, hash_list_elem);
1112 }
1113 
1114 /*
1115  * dp_peer_ast_hash_remove() - Look up and remove AST entry from hash table
1116  * @soc: SoC handle
1117  *
1118  * This function removes the AST entry from soc AST hash table
1119  * It assumes caller has taken the ast lock to protect the access to this table
1120  *
1121  * Return: None
1122  */
1123 void dp_peer_ast_hash_remove(struct dp_soc *soc,
1124 			     struct dp_ast_entry *ase)
1125 {
1126 	unsigned index;
1127 	struct dp_ast_entry *tmpase;
1128 	int found = 0;
1129 
1130 	if (soc->ast_offload_support)
1131 		return;
1132 
1133 	index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
1134 	/* Check if tail is not empty before delete*/
1135 	QDF_ASSERT(!TAILQ_EMPTY(&soc->ast_hash.bins[index]));
1136 
1137 	dp_peer_debug("ID: %u idx: %u mac_addr: " QDF_MAC_ADDR_FMT,
1138 		      ase->peer_id, index, QDF_MAC_ADDR_REF(ase->mac_addr.raw));
1139 
1140 	TAILQ_FOREACH(tmpase, &soc->ast_hash.bins[index], hash_list_elem) {
1141 		if (tmpase == ase) {
1142 			found = 1;
1143 			break;
1144 		}
1145 	}
1146 
1147 	QDF_ASSERT(found);
1148 
1149 	if (found)
1150 		TAILQ_REMOVE(&soc->ast_hash.bins[index], ase, hash_list_elem);
1151 }
1152 
1153 /*
1154  * dp_peer_ast_hash_find_by_vdevid() - Find AST entry by MAC address
1155  * @soc: SoC handle
1156  *
1157  * It assumes caller has taken the ast lock to protect the access to
1158  * AST hash table
1159  *
1160  * Return: AST entry
1161  */
1162 struct dp_ast_entry *dp_peer_ast_hash_find_by_vdevid(struct dp_soc *soc,
1163 						     uint8_t *ast_mac_addr,
1164 						     uint8_t vdev_id)
1165 {
1166 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
1167 	uint32_t index;
1168 	struct dp_ast_entry *ase;
1169 
1170 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
1171 		     ast_mac_addr, QDF_MAC_ADDR_SIZE);
1172 	mac_addr = &local_mac_addr_aligned;
1173 
1174 	index = dp_peer_ast_hash_index(soc, mac_addr);
1175 	TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
1176 		if ((vdev_id == ase->vdev_id) &&
1177 		    !dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr)) {
1178 			return ase;
1179 		}
1180 	}
1181 
1182 	return NULL;
1183 }
1184 
1185 /*
1186  * dp_peer_ast_hash_find_by_pdevid() - Find AST entry by MAC address
1187  * @soc: SoC handle
1188  *
1189  * It assumes caller has taken the ast lock to protect the access to
1190  * AST hash table
1191  *
1192  * Return: AST entry
1193  */
1194 struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
1195 						     uint8_t *ast_mac_addr,
1196 						     uint8_t pdev_id)
1197 {
1198 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
1199 	uint32_t index;
1200 	struct dp_ast_entry *ase;
1201 
1202 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
1203 		     ast_mac_addr, QDF_MAC_ADDR_SIZE);
1204 	mac_addr = &local_mac_addr_aligned;
1205 
1206 	index = dp_peer_ast_hash_index(soc, mac_addr);
1207 	TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
1208 		if ((pdev_id == ase->pdev_id) &&
1209 		    !dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr)) {
1210 			return ase;
1211 		}
1212 	}
1213 
1214 	return NULL;
1215 }
1216 
1217 /*
1218  * dp_peer_ast_hash_find_soc() - Find AST entry by MAC address
1219  * @soc: SoC handle
1220  *
1221  * It assumes caller has taken the ast lock to protect the access to
1222  * AST hash table
1223  *
1224  * Return: AST entry
1225  */
1226 struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
1227 					       uint8_t *ast_mac_addr)
1228 {
1229 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
1230 	unsigned index;
1231 	struct dp_ast_entry *ase;
1232 
1233 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
1234 			ast_mac_addr, QDF_MAC_ADDR_SIZE);
1235 	mac_addr = &local_mac_addr_aligned;
1236 
1237 	index = dp_peer_ast_hash_index(soc, mac_addr);
1238 	TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
1239 		if (dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr) == 0) {
1240 			return ase;
1241 		}
1242 	}
1243 
1244 	return NULL;
1245 }
1246 
1247 /*
1248  * dp_peer_map_ast() - Map the ast entry with HW AST Index
1249  * @soc: SoC handle
1250  * @peer: peer to which ast node belongs
1251  * @mac_addr: MAC address of ast node
1252  * @hw_peer_id: HW AST Index returned by target in peer map event
1253  * @vdev_id: vdev id for VAP to which the peer belongs to
1254  * @ast_hash: ast hash value in HW
1255  * @is_wds: flag to indicate peer map event for WDS ast entry
1256  *
1257  * Return: QDF_STATUS code
1258  */
1259 static inline QDF_STATUS dp_peer_map_ast(struct dp_soc *soc,
1260 					 struct dp_peer *peer,
1261 					 uint8_t *mac_addr,
1262 					 uint16_t hw_peer_id,
1263 					 uint8_t vdev_id,
1264 					 uint16_t ast_hash,
1265 					 uint8_t is_wds)
1266 {
1267 	struct dp_ast_entry *ast_entry = NULL;
1268 	enum cdp_txrx_ast_entry_type peer_type = CDP_TXRX_AST_TYPE_STATIC;
1269 	void *cookie = NULL;
1270 	txrx_ast_free_cb cb = NULL;
1271 	QDF_STATUS err = QDF_STATUS_SUCCESS;
1272 
1273 	if (soc->ast_offload_support)
1274 		return QDF_STATUS_SUCCESS;
1275 
1276 	if (!peer) {
1277 		return QDF_STATUS_E_INVAL;
1278 	}
1279 
1280 	dp_peer_err("%pK: peer %pK ID %d vid %d mac " QDF_MAC_ADDR_FMT,
1281 		    soc, peer, hw_peer_id, vdev_id,
1282 		    QDF_MAC_ADDR_REF(mac_addr));
1283 
1284 	qdf_spin_lock_bh(&soc->ast_lock);
1285 
1286 	ast_entry = dp_peer_ast_hash_find_by_vdevid(soc, mac_addr, vdev_id);
1287 
1288 	if (is_wds) {
1289 		/*
1290 		 * In certain cases like Auth attack on a repeater
1291 		 * can result in the number of ast_entries falling
1292 		 * in the same hash bucket to exceed the max_skid
1293 		 * length supported by HW in root AP. In these cases
1294 		 * the FW will return the hw_peer_id (ast_index) as
1295 		 * 0xffff indicating HW could not add the entry in
1296 		 * its table. Host has to delete the entry from its
1297 		 * table in these cases.
1298 		 */
1299 		if (hw_peer_id == HTT_INVALID_PEER) {
1300 			DP_STATS_INC(soc, ast.map_err, 1);
1301 			if (ast_entry) {
1302 				if (ast_entry->is_mapped) {
1303 					soc->ast_table[ast_entry->ast_idx] =
1304 						NULL;
1305 				}
1306 
1307 				cb = ast_entry->callback;
1308 				cookie = ast_entry->cookie;
1309 				peer_type = ast_entry->type;
1310 
1311 				dp_peer_unlink_ast_entry(soc, ast_entry, peer);
1312 				dp_peer_free_ast_entry(soc, ast_entry);
1313 
1314 				qdf_spin_unlock_bh(&soc->ast_lock);
1315 
1316 				if (cb) {
1317 					cb(soc->ctrl_psoc,
1318 					   dp_soc_to_cdp_soc(soc),
1319 					   cookie,
1320 					   CDP_TXRX_AST_DELETED);
1321 				}
1322 			} else {
1323 				qdf_spin_unlock_bh(&soc->ast_lock);
1324 				dp_peer_alert("AST entry not found with peer %pK peer_id %u peer_mac " QDF_MAC_ADDR_FMT " mac_addr " QDF_MAC_ADDR_FMT " vdev_id %u next_hop %u",
1325 					      peer, peer->peer_id,
1326 					      QDF_MAC_ADDR_REF(peer->mac_addr.raw),
1327 					      QDF_MAC_ADDR_REF(mac_addr),
1328 					      vdev_id, is_wds);
1329 			}
1330 			err = QDF_STATUS_E_INVAL;
1331 
1332 			dp_hmwds_ast_add_notify(peer, mac_addr,
1333 						peer_type, err, true);
1334 
1335 			return err;
1336 		}
1337 	}
1338 
1339 	if (ast_entry) {
1340 		ast_entry->ast_idx = hw_peer_id;
1341 		soc->ast_table[hw_peer_id] = ast_entry;
1342 		ast_entry->is_active = TRUE;
1343 		peer_type = ast_entry->type;
1344 		ast_entry->ast_hash_value = ast_hash;
1345 		ast_entry->is_mapped = TRUE;
1346 		qdf_assert_always(ast_entry->peer_id == HTT_INVALID_PEER);
1347 
1348 		ast_entry->peer_id = peer->peer_id;
1349 		TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry,
1350 				  ase_list_elem);
1351 	}
1352 
1353 	if (ast_entry || (peer->vdev && peer->vdev->proxysta_vdev)) {
1354 		if (soc->cdp_soc.ol_ops->peer_map_event) {
1355 			soc->cdp_soc.ol_ops->peer_map_event(
1356 			soc->ctrl_psoc, peer->peer_id,
1357 			hw_peer_id, vdev_id,
1358 			mac_addr, peer_type, ast_hash);
1359 		}
1360 	} else {
1361 		dp_peer_err("%pK: AST entry not found", soc);
1362 		err = QDF_STATUS_E_NOENT;
1363 	}
1364 
1365 	qdf_spin_unlock_bh(&soc->ast_lock);
1366 
1367 	dp_hmwds_ast_add_notify(peer, mac_addr,
1368 				peer_type, err, true);
1369 
1370 	return err;
1371 }
1372 
1373 void dp_peer_free_hmwds_cb(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
1374 			   struct cdp_soc *dp_soc,
1375 			   void *cookie,
1376 			   enum cdp_ast_free_status status)
1377 {
1378 	struct dp_ast_free_cb_params *param =
1379 		(struct dp_ast_free_cb_params *)cookie;
1380 	struct dp_soc *soc = (struct dp_soc *)dp_soc;
1381 	struct dp_peer *peer = NULL;
1382 	QDF_STATUS err = QDF_STATUS_SUCCESS;
1383 
1384 	if (status != CDP_TXRX_AST_DELETED) {
1385 		qdf_mem_free(cookie);
1386 		return;
1387 	}
1388 
1389 	peer = dp_peer_find_hash_find(soc, &param->peer_mac_addr.raw[0],
1390 				      0, param->vdev_id, DP_MOD_ID_AST);
1391 	if (peer) {
1392 		err = dp_peer_add_ast(soc, peer,
1393 				      &param->mac_addr.raw[0],
1394 				      param->type,
1395 				      param->flags);
1396 
1397 		dp_hmwds_ast_add_notify(peer, &param->mac_addr.raw[0],
1398 					param->type, err, false);
1399 
1400 		dp_peer_unref_delete(peer, DP_MOD_ID_AST);
1401 	}
1402 	qdf_mem_free(cookie);
1403 }
1404 
1405 /*
1406  * dp_peer_add_ast() - Allocate and add AST entry into peer list
1407  * @soc: SoC handle
1408  * @peer: peer to which ast node belongs
1409  * @mac_addr: MAC address of ast node
1410  * @is_self: Is this base AST entry with peer mac address
1411  *
1412  * This API is used by WDS source port learning function to
1413  * add a new AST entry into peer AST list
1414  *
1415  * Return: QDF_STATUS code
1416  */
1417 QDF_STATUS dp_peer_add_ast(struct dp_soc *soc,
1418 			   struct dp_peer *peer,
1419 			   uint8_t *mac_addr,
1420 			   enum cdp_txrx_ast_entry_type type,
1421 			   uint32_t flags)
1422 {
1423 	struct dp_ast_entry *ast_entry = NULL;
1424 	struct dp_vdev *vdev = NULL;
1425 	struct dp_pdev *pdev = NULL;
1426 	uint8_t next_node_mac[6];
1427 	txrx_ast_free_cb cb = NULL;
1428 	void *cookie = NULL;
1429 	struct dp_peer *vap_bss_peer = NULL;
1430 	bool is_peer_found = false;
1431 
1432 	if (soc->ast_offload_support)
1433 		return QDF_STATUS_E_INVAL;
1434 
1435 	vdev = peer->vdev;
1436 	if (!vdev) {
1437 		dp_peer_err("%pK: Peers vdev is NULL", soc);
1438 		QDF_ASSERT(0);
1439 		return QDF_STATUS_E_INVAL;
1440 	}
1441 
1442 	pdev = vdev->pdev;
1443 
1444 	is_peer_found = dp_peer_exist_on_pdev(soc, mac_addr, 0, pdev);
1445 
1446 	qdf_spin_lock_bh(&soc->ast_lock);
1447 
1448 	if (!dp_peer_state_cmp(peer, DP_PEER_STATE_ACTIVE)) {
1449 		if ((type != CDP_TXRX_AST_TYPE_STATIC) &&
1450 		    (type != CDP_TXRX_AST_TYPE_SELF)) {
1451 			qdf_spin_unlock_bh(&soc->ast_lock);
1452 			return QDF_STATUS_E_BUSY;
1453 		}
1454 	}
1455 
1456 	dp_peer_debug("%pK: pdevid: %u vdev: %u  ast_entry->type: %d flags: 0x%x peer_mac: " QDF_MAC_ADDR_FMT " peer: %pK mac " QDF_MAC_ADDR_FMT,
1457 		      soc, pdev->pdev_id, vdev->vdev_id, type, flags,
1458 		      QDF_MAC_ADDR_REF(peer->mac_addr.raw), peer,
1459 		      QDF_MAC_ADDR_REF(mac_addr));
1460 
1461 	/* fw supports only 2 times the max_peers ast entries */
1462 	if (soc->num_ast_entries >=
1463 	    wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx)) {
1464 		qdf_spin_unlock_bh(&soc->ast_lock);
1465 		dp_peer_err("%pK: Max ast entries reached", soc);
1466 		return QDF_STATUS_E_RESOURCES;
1467 	}
1468 
1469 	/* If AST entry already exists , just return from here
1470 	 * ast entry with same mac address can exist on different radios
1471 	 * if ast_override support is enabled use search by pdev in this
1472 	 * case
1473 	 */
1474 	if (soc->ast_override_support) {
1475 		ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, mac_addr,
1476 							    pdev->pdev_id);
1477 		if (ast_entry) {
1478 			qdf_spin_unlock_bh(&soc->ast_lock);
1479 			return QDF_STATUS_E_ALREADY;
1480 		}
1481 
1482 		if (is_peer_found) {
1483 			/* During WDS to static roaming, peer is added
1484 			 * to the list before static AST entry create.
1485 			 * So, allow AST entry for STATIC type
1486 			 * even if peer is present
1487 			 */
1488 			if (type != CDP_TXRX_AST_TYPE_STATIC) {
1489 				qdf_spin_unlock_bh(&soc->ast_lock);
1490 				return QDF_STATUS_E_ALREADY;
1491 			}
1492 		}
1493 	} else {
1494 		/* For HWMWDS_SEC entries can be added for same mac address
1495 		 * do not check for existing entry
1496 		 */
1497 		if (type == CDP_TXRX_AST_TYPE_WDS_HM_SEC)
1498 			goto add_ast_entry;
1499 
1500 		ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
1501 
1502 		if (ast_entry) {
1503 			if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) &&
1504 			    !ast_entry->delete_in_progress) {
1505 				qdf_spin_unlock_bh(&soc->ast_lock);
1506 				return QDF_STATUS_E_ALREADY;
1507 			}
1508 
1509 			/* Add for HMWDS entry we cannot be ignored if there
1510 			 * is AST entry with same mac address
1511 			 *
1512 			 * if ast entry exists with the requested mac address
1513 			 * send a delete command and register callback which
1514 			 * can take care of adding HMWDS ast enty on delete
1515 			 * confirmation from target
1516 			 */
1517 			if (type == CDP_TXRX_AST_TYPE_WDS_HM) {
1518 				struct dp_ast_free_cb_params *param = NULL;
1519 
1520 				if (ast_entry->type ==
1521 					CDP_TXRX_AST_TYPE_WDS_HM_SEC)
1522 					goto add_ast_entry;
1523 
1524 				/* save existing callback */
1525 				if (ast_entry->callback) {
1526 					cb = ast_entry->callback;
1527 					cookie = ast_entry->cookie;
1528 				}
1529 
1530 				param = qdf_mem_malloc(sizeof(*param));
1531 				if (!param) {
1532 					QDF_TRACE(QDF_MODULE_ID_TXRX,
1533 						  QDF_TRACE_LEVEL_ERROR,
1534 						  "Allocation failed");
1535 					qdf_spin_unlock_bh(&soc->ast_lock);
1536 					return QDF_STATUS_E_NOMEM;
1537 				}
1538 
1539 				qdf_mem_copy(&param->mac_addr.raw[0], mac_addr,
1540 					     QDF_MAC_ADDR_SIZE);
1541 				qdf_mem_copy(&param->peer_mac_addr.raw[0],
1542 					     &peer->mac_addr.raw[0],
1543 					     QDF_MAC_ADDR_SIZE);
1544 				param->type = type;
1545 				param->flags = flags;
1546 				param->vdev_id = vdev->vdev_id;
1547 				ast_entry->callback = dp_peer_free_hmwds_cb;
1548 				ast_entry->pdev_id = vdev->pdev->pdev_id;
1549 				ast_entry->type = type;
1550 				ast_entry->cookie = (void *)param;
1551 				if (!ast_entry->delete_in_progress)
1552 					dp_peer_del_ast(soc, ast_entry);
1553 
1554 				qdf_spin_unlock_bh(&soc->ast_lock);
1555 
1556 				/* Call the saved callback*/
1557 				if (cb) {
1558 					cb(soc->ctrl_psoc,
1559 					   dp_soc_to_cdp_soc(soc),
1560 					   cookie,
1561 					   CDP_TXRX_AST_DELETE_IN_PROGRESS);
1562 				}
1563 				return QDF_STATUS_E_AGAIN;
1564 			}
1565 
1566 			qdf_spin_unlock_bh(&soc->ast_lock);
1567 			return QDF_STATUS_E_ALREADY;
1568 		}
1569 	}
1570 
1571 add_ast_entry:
1572 	ast_entry = (struct dp_ast_entry *)
1573 			qdf_mem_malloc(sizeof(struct dp_ast_entry));
1574 
1575 	if (!ast_entry) {
1576 		qdf_spin_unlock_bh(&soc->ast_lock);
1577 		dp_peer_err("%pK: fail to allocate ast_entry", soc);
1578 		QDF_ASSERT(0);
1579 		return QDF_STATUS_E_NOMEM;
1580 	}
1581 
1582 	qdf_mem_copy(&ast_entry->mac_addr.raw[0], mac_addr, QDF_MAC_ADDR_SIZE);
1583 	ast_entry->pdev_id = vdev->pdev->pdev_id;
1584 	ast_entry->is_mapped = false;
1585 	ast_entry->delete_in_progress = false;
1586 	ast_entry->peer_id = HTT_INVALID_PEER;
1587 	ast_entry->next_hop = 0;
1588 	ast_entry->vdev_id = vdev->vdev_id;
1589 
1590 	switch (type) {
1591 	case CDP_TXRX_AST_TYPE_STATIC:
1592 		peer->self_ast_entry = ast_entry;
1593 		ast_entry->type = CDP_TXRX_AST_TYPE_STATIC;
1594 		if (peer->vdev->opmode == wlan_op_mode_sta)
1595 			ast_entry->type = CDP_TXRX_AST_TYPE_STA_BSS;
1596 		break;
1597 	case CDP_TXRX_AST_TYPE_SELF:
1598 		peer->self_ast_entry = ast_entry;
1599 		ast_entry->type = CDP_TXRX_AST_TYPE_SELF;
1600 		break;
1601 	case CDP_TXRX_AST_TYPE_WDS:
1602 		ast_entry->next_hop = 1;
1603 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
1604 		break;
1605 	case CDP_TXRX_AST_TYPE_WDS_HM:
1606 		ast_entry->next_hop = 1;
1607 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM;
1608 		break;
1609 	case CDP_TXRX_AST_TYPE_WDS_HM_SEC:
1610 		ast_entry->next_hop = 1;
1611 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM_SEC;
1612 		ast_entry->peer_id = peer->peer_id;
1613 		TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry,
1614 				  ase_list_elem);
1615 		break;
1616 	case CDP_TXRX_AST_TYPE_DA:
1617 		vap_bss_peer = dp_vdev_bss_peer_ref_n_get(soc, vdev,
1618 							  DP_MOD_ID_AST);
1619 		if (!vap_bss_peer) {
1620 			qdf_spin_unlock_bh(&soc->ast_lock);
1621 			qdf_mem_free(ast_entry);
1622 			return QDF_STATUS_E_FAILURE;
1623 		}
1624 		peer = vap_bss_peer;
1625 		ast_entry->next_hop = 1;
1626 		ast_entry->type = CDP_TXRX_AST_TYPE_DA;
1627 		break;
1628 	default:
1629 		dp_peer_err("%pK: Incorrect AST entry type", soc);
1630 	}
1631 
1632 	ast_entry->is_active = TRUE;
1633 	DP_STATS_INC(soc, ast.added, 1);
1634 	soc->num_ast_entries++;
1635 	dp_peer_ast_hash_add(soc, ast_entry);
1636 
1637 	qdf_copy_macaddr((struct qdf_mac_addr *)next_node_mac,
1638 			 (struct qdf_mac_addr *)peer->mac_addr.raw);
1639 
1640 	if ((ast_entry->type != CDP_TXRX_AST_TYPE_STATIC) &&
1641 	    (ast_entry->type != CDP_TXRX_AST_TYPE_SELF) &&
1642 	    (ast_entry->type != CDP_TXRX_AST_TYPE_STA_BSS) &&
1643 	    (ast_entry->type != CDP_TXRX_AST_TYPE_WDS_HM_SEC)) {
1644 		if (QDF_STATUS_SUCCESS ==
1645 				soc->cdp_soc.ol_ops->peer_add_wds_entry(
1646 				soc->ctrl_psoc,
1647 				peer->vdev->vdev_id,
1648 				peer->mac_addr.raw,
1649 				peer->peer_id,
1650 				mac_addr,
1651 				next_node_mac,
1652 				flags,
1653 				ast_entry->type)) {
1654 			if (vap_bss_peer)
1655 				dp_peer_unref_delete(vap_bss_peer,
1656 						     DP_MOD_ID_AST);
1657 			qdf_spin_unlock_bh(&soc->ast_lock);
1658 			return QDF_STATUS_SUCCESS;
1659 		}
1660 	}
1661 
1662 	if (vap_bss_peer)
1663 		dp_peer_unref_delete(vap_bss_peer, DP_MOD_ID_AST);
1664 
1665 	qdf_spin_unlock_bh(&soc->ast_lock);
1666 	return QDF_STATUS_E_FAILURE;
1667 }
1668 
1669 qdf_export_symbol(dp_peer_add_ast);
1670 
1671 /*
1672  * dp_peer_free_ast_entry() - Free up the ast entry memory
1673  * @soc: SoC handle
1674  * @ast_entry: Address search entry
1675  *
1676  * This API is used to free up the memory associated with
1677  * AST entry.
1678  *
1679  * Return: None
1680  */
1681 void dp_peer_free_ast_entry(struct dp_soc *soc,
1682 			    struct dp_ast_entry *ast_entry)
1683 {
1684 	/*
1685 	 * NOTE: Ensure that call to this API is done
1686 	 * after soc->ast_lock is taken
1687 	 */
1688 	dp_peer_debug("type: %d ID: %u vid: %u mac_addr: " QDF_MAC_ADDR_FMT,
1689 		      ast_entry->type, ast_entry->peer_id, ast_entry->vdev_id,
1690 		      QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw));
1691 
1692 	ast_entry->callback = NULL;
1693 	ast_entry->cookie = NULL;
1694 
1695 	DP_STATS_INC(soc, ast.deleted, 1);
1696 	dp_peer_ast_hash_remove(soc, ast_entry);
1697 	dp_peer_ast_cleanup(soc, ast_entry);
1698 	qdf_mem_free(ast_entry);
1699 	soc->num_ast_entries--;
1700 }
1701 
1702 /*
1703  * dp_peer_unlink_ast_entry() - Free up the ast entry memory
1704  * @soc: SoC handle
1705  * @ast_entry: Address search entry
1706  * @peer: peer
1707  *
1708  * This API is used to remove/unlink AST entry from the peer list
1709  * and hash list.
1710  *
1711  * Return: None
1712  */
1713 void dp_peer_unlink_ast_entry(struct dp_soc *soc,
1714 			      struct dp_ast_entry *ast_entry,
1715 			      struct dp_peer *peer)
1716 {
1717 	if (!peer) {
1718 		dp_info_rl("NULL peer");
1719 		return;
1720 	}
1721 
1722 	if (ast_entry->peer_id == HTT_INVALID_PEER) {
1723 		dp_info_rl("Invalid peer id in AST entry mac addr:"QDF_MAC_ADDR_FMT" type:%d",
1724 			  QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw),
1725 			  ast_entry->type);
1726 		return;
1727 	}
1728 	/*
1729 	 * NOTE: Ensure that call to this API is done
1730 	 * after soc->ast_lock is taken
1731 	 */
1732 
1733 	qdf_assert_always(ast_entry->peer_id == peer->peer_id);
1734 	TAILQ_REMOVE(&peer->ast_entry_list, ast_entry, ase_list_elem);
1735 
1736 	if (ast_entry == peer->self_ast_entry)
1737 		peer->self_ast_entry = NULL;
1738 
1739 	/*
1740 	 * release the reference only if it is mapped
1741 	 * to ast_table
1742 	 */
1743 	if (ast_entry->is_mapped)
1744 		soc->ast_table[ast_entry->ast_idx] = NULL;
1745 
1746 	ast_entry->peer_id = HTT_INVALID_PEER;
1747 }
1748 
1749 /*
1750  * dp_peer_del_ast() - Delete and free AST entry
1751  * @soc: SoC handle
1752  * @ast_entry: AST entry of the node
1753  *
1754  * This function removes the AST entry from peer and soc tables
1755  * It assumes caller has taken the ast lock to protect the access to these
1756  * tables
1757  *
1758  * Return: None
1759  */
1760 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
1761 {
1762 	struct dp_peer *peer = NULL;
1763 
1764 	if (soc->ast_offload_support)
1765 		return;
1766 
1767 	if (!ast_entry) {
1768 		dp_info_rl("NULL AST entry");
1769 		return;
1770 	}
1771 
1772 	if (ast_entry->delete_in_progress) {
1773 		dp_info_rl("AST entry deletion in progress mac addr:"QDF_MAC_ADDR_FMT" type:%d",
1774 			  QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw),
1775 			  ast_entry->type);
1776 		return;
1777 	}
1778 
1779 	dp_peer_debug("call by %ps: ID: %u vid: %u mac_addr: " QDF_MAC_ADDR_FMT,
1780 		      (void *)_RET_IP_, ast_entry->peer_id, ast_entry->vdev_id,
1781 		      QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw));
1782 
1783 	ast_entry->delete_in_progress = true;
1784 
1785 	/* In teardown del ast is called after setting logical delete state
1786 	 * use __dp_peer_get_ref_by_id to get the reference irrespective of
1787 	 * state
1788 	 */
1789 	peer = __dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
1790 				       DP_MOD_ID_AST);
1791 
1792 	dp_peer_ast_send_wds_del(soc, ast_entry, peer);
1793 
1794 	/* Remove SELF and STATIC entries in teardown itself */
1795 	if (!ast_entry->next_hop)
1796 		dp_peer_unlink_ast_entry(soc, ast_entry, peer);
1797 
1798 	if (ast_entry->is_mapped)
1799 		soc->ast_table[ast_entry->ast_idx] = NULL;
1800 
1801 	/* if peer map v2 is enabled we are not freeing ast entry
1802 	 * here and it is supposed to be freed in unmap event (after
1803 	 * we receive delete confirmation from target)
1804 	 *
1805 	 * if peer_id is invalid we did not get the peer map event
1806 	 * for the peer free ast entry from here only in this case
1807 	 */
1808 	if (dp_peer_ast_free_in_unmap_supported(soc, ast_entry))
1809 		goto end;
1810 
1811 	/* for WDS secondary entry ast_entry->next_hop would be set so
1812 	 * unlinking has to be done explicitly here.
1813 	 * As this entry is not a mapped entry unmap notification from
1814 	 * FW wil not come. Hence unlinkling is done right here.
1815 	 */
1816 
1817 	if (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC)
1818 		dp_peer_unlink_ast_entry(soc, ast_entry, peer);
1819 
1820 	dp_peer_free_ast_entry(soc, ast_entry);
1821 
1822 end:
1823 	if (peer)
1824 		dp_peer_unref_delete(peer, DP_MOD_ID_AST);
1825 }
1826 
1827 /*
1828  * dp_peer_update_ast() - Delete and free AST entry
1829  * @soc: SoC handle
1830  * @peer: peer to which ast node belongs
1831  * @ast_entry: AST entry of the node
1832  * @flags: wds or hmwds
1833  *
1834  * This function update the AST entry to the roamed peer and soc tables
1835  * It assumes caller has taken the ast lock to protect the access to these
1836  * tables
1837  *
1838  * Return: 0 if ast entry is updated successfully
1839  *         -1 failure
1840  */
1841 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
1842 		       struct dp_ast_entry *ast_entry, uint32_t flags)
1843 {
1844 	int ret = -1;
1845 	struct dp_peer *old_peer;
1846 
1847 	if (soc->ast_offload_support)
1848 		return QDF_STATUS_E_INVAL;
1849 
1850 	dp_peer_debug("%pK: ast_entry->type: %d pdevid: %u vdevid: %u flags: 0x%x mac_addr: " QDF_MAC_ADDR_FMT " peer_mac: " QDF_MAC_ADDR_FMT "\n",
1851 		      soc, ast_entry->type, peer->vdev->pdev->pdev_id,
1852 		      peer->vdev->vdev_id, flags,
1853 		      QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw),
1854 		      QDF_MAC_ADDR_REF(peer->mac_addr.raw));
1855 
1856 	/* Do not send AST update in below cases
1857 	 *  1) Ast entry delete has already triggered
1858 	 *  2) Peer delete is already triggered
1859 	 *  3) We did not get the HTT map for create event
1860 	 */
1861 	if (ast_entry->delete_in_progress ||
1862 	    !dp_peer_state_cmp(peer, DP_PEER_STATE_ACTIVE) ||
1863 	    !ast_entry->is_mapped)
1864 		return ret;
1865 
1866 	if ((ast_entry->type == CDP_TXRX_AST_TYPE_STATIC) ||
1867 	    (ast_entry->type == CDP_TXRX_AST_TYPE_SELF) ||
1868 	    (ast_entry->type == CDP_TXRX_AST_TYPE_STA_BSS) ||
1869 	    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
1870 		return 0;
1871 
1872 	/*
1873 	 * Avoids flood of WMI update messages sent to FW for same peer.
1874 	 */
1875 	if (qdf_unlikely(ast_entry->peer_id == peer->peer_id) &&
1876 	    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS) &&
1877 	    (ast_entry->vdev_id == peer->vdev->vdev_id) &&
1878 	    (ast_entry->is_active))
1879 		return 0;
1880 
1881 	old_peer = dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
1882 					 DP_MOD_ID_AST);
1883 	if (!old_peer)
1884 		return 0;
1885 
1886 	TAILQ_REMOVE(&old_peer->ast_entry_list, ast_entry, ase_list_elem);
1887 
1888 	dp_peer_unref_delete(old_peer, DP_MOD_ID_AST);
1889 
1890 	ast_entry->peer_id = peer->peer_id;
1891 	ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
1892 	ast_entry->pdev_id = peer->vdev->pdev->pdev_id;
1893 	ast_entry->vdev_id = peer->vdev->vdev_id;
1894 	ast_entry->is_active = TRUE;
1895 	TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem);
1896 
1897 	ret = soc->cdp_soc.ol_ops->peer_update_wds_entry(
1898 				soc->ctrl_psoc,
1899 				peer->vdev->vdev_id,
1900 				ast_entry->mac_addr.raw,
1901 				peer->mac_addr.raw,
1902 				flags);
1903 
1904 	return ret;
1905 }
1906 
1907 /*
1908  * dp_peer_ast_get_pdev_id() - get pdev_id from the ast entry
1909  * @soc: SoC handle
1910  * @ast_entry: AST entry of the node
1911  *
1912  * This function gets the pdev_id from the ast entry.
1913  *
1914  * Return: (uint8_t) pdev_id
1915  */
1916 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
1917 				struct dp_ast_entry *ast_entry)
1918 {
1919 	return ast_entry->pdev_id;
1920 }
1921 
1922 /*
1923  * dp_peer_ast_get_next_hop() - get next_hop from the ast entry
1924  * @soc: SoC handle
1925  * @ast_entry: AST entry of the node
1926  *
1927  * This function gets the next hop from the ast entry.
1928  *
1929  * Return: (uint8_t) next_hop
1930  */
1931 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
1932 				struct dp_ast_entry *ast_entry)
1933 {
1934 	return ast_entry->next_hop;
1935 }
1936 
1937 /*
1938  * dp_peer_ast_set_type() - set type from the ast entry
1939  * @soc: SoC handle
1940  * @ast_entry: AST entry of the node
1941  *
1942  * This function sets the type in the ast entry.
1943  *
1944  * Return:
1945  */
1946 void dp_peer_ast_set_type(struct dp_soc *soc,
1947 				struct dp_ast_entry *ast_entry,
1948 				enum cdp_txrx_ast_entry_type type)
1949 {
1950 	ast_entry->type = type;
1951 }
1952 
1953 #else
1954 QDF_STATUS dp_peer_add_ast(struct dp_soc *soc,
1955 			   struct dp_peer *peer,
1956 			   uint8_t *mac_addr,
1957 			   enum cdp_txrx_ast_entry_type type,
1958 			   uint32_t flags)
1959 {
1960 	return QDF_STATUS_E_FAILURE;
1961 }
1962 
1963 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
1964 {
1965 }
1966 
1967 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
1968 			struct dp_ast_entry *ast_entry, uint32_t flags)
1969 {
1970 	return 1;
1971 }
1972 
1973 struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
1974 					       uint8_t *ast_mac_addr)
1975 {
1976 	return NULL;
1977 }
1978 
1979 struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
1980 						     uint8_t *ast_mac_addr,
1981 						     uint8_t pdev_id)
1982 {
1983 	return NULL;
1984 }
1985 
1986 QDF_STATUS dp_peer_ast_hash_attach(struct dp_soc *soc)
1987 {
1988 	return QDF_STATUS_SUCCESS;
1989 }
1990 
1991 static inline QDF_STATUS dp_peer_map_ast(struct dp_soc *soc,
1992 					 struct dp_peer *peer,
1993 					 uint8_t *mac_addr,
1994 					 uint16_t hw_peer_id,
1995 					 uint8_t vdev_id,
1996 					 uint16_t ast_hash,
1997 					 uint8_t is_wds)
1998 {
1999 	return QDF_STATUS_SUCCESS;
2000 }
2001 
2002 void dp_peer_ast_hash_detach(struct dp_soc *soc)
2003 {
2004 }
2005 
2006 void dp_peer_ast_set_type(struct dp_soc *soc,
2007 				struct dp_ast_entry *ast_entry,
2008 				enum cdp_txrx_ast_entry_type type)
2009 {
2010 }
2011 
2012 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
2013 				struct dp_ast_entry *ast_entry)
2014 {
2015 	return 0xff;
2016 }
2017 
2018 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
2019 				struct dp_ast_entry *ast_entry)
2020 {
2021 	return 0xff;
2022 }
2023 
2024 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
2025 		       struct dp_ast_entry *ast_entry, uint32_t flags)
2026 {
2027 	return 1;
2028 }
2029 
2030 #endif
2031 
2032 void dp_peer_ast_send_wds_del(struct dp_soc *soc,
2033 			      struct dp_ast_entry *ast_entry,
2034 			      struct dp_peer *peer)
2035 {
2036 	struct cdp_soc_t *cdp_soc = &soc->cdp_soc;
2037 	bool delete_in_fw = false;
2038 
2039 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_TRACE,
2040 		  "%s: ast_entry->type: %d pdevid: %u vdev: %u mac_addr: "QDF_MAC_ADDR_FMT" next_hop: %u peer_id: %uM\n",
2041 		  __func__, ast_entry->type, ast_entry->pdev_id,
2042 		  ast_entry->vdev_id,
2043 		  QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw),
2044 		  ast_entry->next_hop, ast_entry->peer_id);
2045 
2046 	/*
2047 	 * If peer state is logical delete, the peer is about to get
2048 	 * teared down with a peer delete command to firmware,
2049 	 * which will cleanup all the wds ast entries.
2050 	 * So, no need to send explicit wds ast delete to firmware.
2051 	 */
2052 	if (ast_entry->next_hop) {
2053 		if (peer && dp_peer_state_cmp(peer,
2054 					      DP_PEER_STATE_LOGICAL_DELETE))
2055 			delete_in_fw = false;
2056 		else
2057 			delete_in_fw = true;
2058 
2059 		cdp_soc->ol_ops->peer_del_wds_entry(soc->ctrl_psoc,
2060 						    ast_entry->vdev_id,
2061 						    ast_entry->mac_addr.raw,
2062 						    ast_entry->type,
2063 						    delete_in_fw);
2064 	}
2065 
2066 }
2067 
2068 #ifdef FEATURE_WDS
2069 /**
2070  * dp_peer_ast_free_wds_entries() - Free wds ast entries associated with peer
2071  * @soc: soc handle
2072  * @peer: peer handle
2073  *
2074  * Free all the wds ast entries associated with peer
2075  *
2076  * Return: Number of wds ast entries freed
2077  */
2078 static uint32_t dp_peer_ast_free_wds_entries(struct dp_soc *soc,
2079 					     struct dp_peer *peer)
2080 {
2081 	TAILQ_HEAD(, dp_ast_entry) ast_local_list = {0};
2082 	struct dp_ast_entry *ast_entry, *temp_ast_entry;
2083 	uint32_t num_ast = 0;
2084 
2085 	TAILQ_INIT(&ast_local_list);
2086 	qdf_spin_lock_bh(&soc->ast_lock);
2087 
2088 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry) {
2089 		if (ast_entry->next_hop)
2090 			num_ast++;
2091 
2092 		if (ast_entry->is_mapped)
2093 			soc->ast_table[ast_entry->ast_idx] = NULL;
2094 
2095 		dp_peer_unlink_ast_entry(soc, ast_entry, peer);
2096 		DP_STATS_INC(soc, ast.deleted, 1);
2097 		dp_peer_ast_hash_remove(soc, ast_entry);
2098 		TAILQ_INSERT_TAIL(&ast_local_list, ast_entry,
2099 				  ase_list_elem);
2100 		soc->num_ast_entries--;
2101 	}
2102 
2103 	qdf_spin_unlock_bh(&soc->ast_lock);
2104 
2105 	TAILQ_FOREACH_SAFE(ast_entry, &ast_local_list, ase_list_elem,
2106 			   temp_ast_entry) {
2107 		if (ast_entry->callback)
2108 			ast_entry->callback(soc->ctrl_psoc,
2109 					    dp_soc_to_cdp_soc(soc),
2110 					    ast_entry->cookie,
2111 					    CDP_TXRX_AST_DELETED);
2112 
2113 		qdf_mem_free(ast_entry);
2114 	}
2115 
2116 	return num_ast;
2117 }
2118 /**
2119  * dp_peer_clean_wds_entries() - Clean wds ast entries and compare
2120  * @soc: soc handle
2121  * @peer: peer handle
2122  * @free_wds_count - number of wds entries freed by FW with peer delete
2123  *
2124  * Free all the wds ast entries associated with peer and compare with
2125  * the value received from firmware
2126  *
2127  * Return: Number of wds ast entries freed
2128  */
2129 static void
2130 dp_peer_clean_wds_entries(struct dp_soc *soc, struct dp_peer *peer,
2131 			  uint32_t free_wds_count)
2132 {
2133 	uint32_t wds_deleted = 0;
2134 
2135 	if (soc->ast_offload_support)
2136 		return;
2137 
2138 	wds_deleted = dp_peer_ast_free_wds_entries(soc, peer);
2139 	if ((DP_PEER_WDS_COUNT_INVALID != free_wds_count) &&
2140 	    (free_wds_count != wds_deleted)) {
2141 		DP_STATS_INC(soc, ast.ast_mismatch, 1);
2142 		dp_alert("For peer %pK (mac: "QDF_MAC_ADDR_FMT")number of wds entries deleted by fw = %d during peer delete is not same as the numbers deleted by host = %d",
2143 			 peer, peer->mac_addr.raw, free_wds_count,
2144 			 wds_deleted);
2145 	}
2146 }
2147 
2148 #else
2149 static void
2150 dp_peer_clean_wds_entries(struct dp_soc *soc, struct dp_peer *peer,
2151 			  uint32_t free_wds_count)
2152 {
2153 	struct dp_ast_entry *ast_entry, *temp_ast_entry;
2154 
2155 	qdf_spin_lock_bh(&soc->ast_lock);
2156 
2157 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry) {
2158 		dp_peer_unlink_ast_entry(soc, ast_entry, peer);
2159 
2160 		if (ast_entry->is_mapped)
2161 			soc->ast_table[ast_entry->ast_idx] = NULL;
2162 
2163 		dp_peer_free_ast_entry(soc, ast_entry);
2164 	}
2165 
2166 	peer->self_ast_entry = NULL;
2167 	qdf_spin_unlock_bh(&soc->ast_lock);
2168 }
2169 #endif
2170 
2171 /**
2172  * dp_peer_ast_free_entry_by_mac() - find ast entry by MAC address and delete
2173  * @soc: soc handle
2174  * @peer: peer handle
2175  * @vdev_id: vdev_id
2176  * @mac_addr: mac address of the AST entry to searc and delete
2177  *
2178  * find the ast entry from the peer list using the mac address and free
2179  * the entry.
2180  *
2181  * Return: SUCCESS or NOENT
2182  */
2183 static int dp_peer_ast_free_entry_by_mac(struct dp_soc *soc,
2184 					 struct dp_peer *peer,
2185 					 uint8_t vdev_id,
2186 					 uint8_t *mac_addr)
2187 {
2188 	struct dp_ast_entry *ast_entry;
2189 	void *cookie = NULL;
2190 	txrx_ast_free_cb cb = NULL;
2191 
2192 	/*
2193 	 * release the reference only if it is mapped
2194 	 * to ast_table
2195 	 */
2196 
2197 	qdf_spin_lock_bh(&soc->ast_lock);
2198 
2199 	ast_entry = dp_peer_ast_hash_find_by_vdevid(soc, mac_addr, vdev_id);
2200 	if (!ast_entry) {
2201 		qdf_spin_unlock_bh(&soc->ast_lock);
2202 		return QDF_STATUS_E_NOENT;
2203 	} else if (ast_entry->is_mapped) {
2204 		soc->ast_table[ast_entry->ast_idx] = NULL;
2205 	}
2206 
2207 	cb = ast_entry->callback;
2208 	cookie = ast_entry->cookie;
2209 
2210 
2211 	dp_peer_unlink_ast_entry(soc, ast_entry, peer);
2212 
2213 	dp_peer_free_ast_entry(soc, ast_entry);
2214 
2215 	qdf_spin_unlock_bh(&soc->ast_lock);
2216 
2217 	if (cb) {
2218 		cb(soc->ctrl_psoc,
2219 		   dp_soc_to_cdp_soc(soc),
2220 		   cookie,
2221 		   CDP_TXRX_AST_DELETED);
2222 	}
2223 
2224 	return QDF_STATUS_SUCCESS;
2225 }
2226 
2227 void dp_peer_find_hash_erase(struct dp_soc *soc)
2228 {
2229 	int i;
2230 
2231 	/*
2232 	 * Not really necessary to take peer_ref_mutex lock - by this point,
2233 	 * it's known that the soc is no longer in use.
2234 	 */
2235 	for (i = 0; i <= soc->peer_hash.mask; i++) {
2236 		if (!TAILQ_EMPTY(&soc->peer_hash.bins[i])) {
2237 			struct dp_peer *peer, *peer_next;
2238 
2239 			/*
2240 			 * TAILQ_FOREACH_SAFE must be used here to avoid any
2241 			 * memory access violation after peer is freed
2242 			 */
2243 			TAILQ_FOREACH_SAFE(peer, &soc->peer_hash.bins[i],
2244 				hash_list_elem, peer_next) {
2245 				/*
2246 				 * Don't remove the peer from the hash table -
2247 				 * that would modify the list we are currently
2248 				 * traversing, and it's not necessary anyway.
2249 				 */
2250 				/*
2251 				 * Artificially adjust the peer's ref count to
2252 				 * 1, so it will get deleted by
2253 				 * dp_peer_unref_delete.
2254 				 */
2255 				/* set to zero */
2256 				qdf_atomic_init(&peer->ref_cnt);
2257 				for (i = 0; i < DP_MOD_ID_MAX; i++)
2258 					qdf_atomic_init(&peer->mod_refs[i]);
2259 				/* incr to one */
2260 				qdf_atomic_inc(&peer->ref_cnt);
2261 				qdf_atomic_inc(&peer->mod_refs
2262 						[DP_MOD_ID_CONFIG]);
2263 				dp_peer_unref_delete(peer,
2264 						     DP_MOD_ID_CONFIG);
2265 			}
2266 		}
2267 	}
2268 }
2269 
2270 void dp_peer_ast_table_detach(struct dp_soc *soc)
2271 {
2272 	if (soc->ast_table) {
2273 		qdf_mem_free(soc->ast_table);
2274 		soc->ast_table = NULL;
2275 	}
2276 }
2277 
2278 /*
2279  * dp_peer_find_map_detach() - cleanup memory for peer_id_to_obj_map
2280  * @soc: soc handle
2281  *
2282  * return: none
2283  */
2284 void dp_peer_find_map_detach(struct dp_soc *soc)
2285 {
2286 	if (soc->peer_id_to_obj_map) {
2287 		qdf_mem_free(soc->peer_id_to_obj_map);
2288 		soc->peer_id_to_obj_map = NULL;
2289 		qdf_spinlock_destroy(&soc->peer_map_lock);
2290 	}
2291 }
2292 
2293 #ifndef AST_OFFLOAD_ENABLE
2294 QDF_STATUS dp_peer_find_attach(struct dp_soc *soc)
2295 {
2296 	QDF_STATUS status;
2297 
2298 	status = dp_peer_find_map_attach(soc);
2299 	if (!QDF_IS_STATUS_SUCCESS(status))
2300 		return status;
2301 
2302 	status = dp_peer_find_hash_attach(soc);
2303 	if (!QDF_IS_STATUS_SUCCESS(status))
2304 		goto map_detach;
2305 
2306 	status = dp_peer_ast_table_attach(soc);
2307 	if (!QDF_IS_STATUS_SUCCESS(status))
2308 		goto hash_detach;
2309 
2310 	status = dp_peer_ast_hash_attach(soc);
2311 	if (!QDF_IS_STATUS_SUCCESS(status))
2312 		goto ast_table_detach;
2313 
2314 	status = dp_peer_mec_hash_attach(soc);
2315 	if (QDF_IS_STATUS_SUCCESS(status)) {
2316 		dp_soc_wds_attach(soc);
2317 		return status;
2318 	}
2319 
2320 	dp_peer_ast_hash_detach(soc);
2321 ast_table_detach:
2322 	dp_peer_ast_table_detach(soc);
2323 hash_detach:
2324 	dp_peer_find_hash_detach(soc);
2325 map_detach:
2326 	dp_peer_find_map_detach(soc);
2327 
2328 	return status;
2329 }
2330 #else
2331 QDF_STATUS dp_peer_find_attach(struct dp_soc *soc)
2332 {
2333 	QDF_STATUS status;
2334 
2335 	status = dp_peer_find_map_attach(soc);
2336 	if (!QDF_IS_STATUS_SUCCESS(status))
2337 		return status;
2338 
2339 	status = dp_peer_find_hash_attach(soc);
2340 	if (!QDF_IS_STATUS_SUCCESS(status))
2341 		goto map_detach;
2342 
2343 	return status;
2344 map_detach:
2345 	dp_peer_find_map_detach(soc);
2346 
2347 	return status;
2348 }
2349 #endif
2350 
2351 void dp_rx_tid_stats_cb(struct dp_soc *soc, void *cb_ctxt,
2352 	union hal_reo_status *reo_status)
2353 {
2354 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
2355 	struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
2356 
2357 	if (queue_status->header.status == HAL_REO_CMD_DRAIN)
2358 		return;
2359 
2360 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
2361 		DP_PRINT_STATS("REO stats failure %d for TID %d\n",
2362 			       queue_status->header.status, rx_tid->tid);
2363 		return;
2364 	}
2365 
2366 	DP_PRINT_STATS("REO queue stats (TID: %d):\n"
2367 		       "ssn: %d\n"
2368 		       "curr_idx  : %d\n"
2369 		       "pn_31_0   : %08x\n"
2370 		       "pn_63_32  : %08x\n"
2371 		       "pn_95_64  : %08x\n"
2372 		       "pn_127_96 : %08x\n"
2373 		       "last_rx_enq_tstamp : %08x\n"
2374 		       "last_rx_deq_tstamp : %08x\n"
2375 		       "rx_bitmap_31_0     : %08x\n"
2376 		       "rx_bitmap_63_32    : %08x\n"
2377 		       "rx_bitmap_95_64    : %08x\n"
2378 		       "rx_bitmap_127_96   : %08x\n"
2379 		       "rx_bitmap_159_128  : %08x\n"
2380 		       "rx_bitmap_191_160  : %08x\n"
2381 		       "rx_bitmap_223_192  : %08x\n"
2382 		       "rx_bitmap_255_224  : %08x\n",
2383 		       rx_tid->tid,
2384 		       queue_status->ssn, queue_status->curr_idx,
2385 		       queue_status->pn_31_0, queue_status->pn_63_32,
2386 		       queue_status->pn_95_64, queue_status->pn_127_96,
2387 		       queue_status->last_rx_enq_tstamp,
2388 		       queue_status->last_rx_deq_tstamp,
2389 		       queue_status->rx_bitmap_31_0,
2390 		       queue_status->rx_bitmap_63_32,
2391 		       queue_status->rx_bitmap_95_64,
2392 		       queue_status->rx_bitmap_127_96,
2393 		       queue_status->rx_bitmap_159_128,
2394 		       queue_status->rx_bitmap_191_160,
2395 		       queue_status->rx_bitmap_223_192,
2396 		       queue_status->rx_bitmap_255_224);
2397 
2398 	DP_PRINT_STATS(
2399 		       "curr_mpdu_cnt      : %d\n"
2400 		       "curr_msdu_cnt      : %d\n"
2401 		       "fwd_timeout_cnt    : %d\n"
2402 		       "fwd_bar_cnt        : %d\n"
2403 		       "dup_cnt            : %d\n"
2404 		       "frms_in_order_cnt  : %d\n"
2405 		       "bar_rcvd_cnt       : %d\n"
2406 		       "mpdu_frms_cnt      : %d\n"
2407 		       "msdu_frms_cnt      : %d\n"
2408 		       "total_byte_cnt     : %d\n"
2409 		       "late_recv_mpdu_cnt : %d\n"
2410 		       "win_jump_2k        : %d\n"
2411 		       "hole_cnt           : %d\n",
2412 		       queue_status->curr_mpdu_cnt,
2413 		       queue_status->curr_msdu_cnt,
2414 		       queue_status->fwd_timeout_cnt,
2415 		       queue_status->fwd_bar_cnt,
2416 		       queue_status->dup_cnt,
2417 		       queue_status->frms_in_order_cnt,
2418 		       queue_status->bar_rcvd_cnt,
2419 		       queue_status->mpdu_frms_cnt,
2420 		       queue_status->msdu_frms_cnt,
2421 		       queue_status->total_cnt,
2422 		       queue_status->late_recv_mpdu_cnt,
2423 		       queue_status->win_jump_2k,
2424 		       queue_status->hole_cnt);
2425 
2426 	DP_PRINT_STATS("Addba Req          : %d\n"
2427 			"Addba Resp         : %d\n"
2428 			"Addba Resp success : %d\n"
2429 			"Addba Resp failed  : %d\n"
2430 			"Delba Req received : %d\n"
2431 			"Delba Tx success   : %d\n"
2432 			"Delba Tx Fail      : %d\n"
2433 			"BA window size     : %d\n"
2434 			"Pn size            : %d\n",
2435 			rx_tid->num_of_addba_req,
2436 			rx_tid->num_of_addba_resp,
2437 			rx_tid->num_addba_rsp_success,
2438 			rx_tid->num_addba_rsp_failed,
2439 			rx_tid->num_of_delba_req,
2440 			rx_tid->delba_tx_success_cnt,
2441 			rx_tid->delba_tx_fail_cnt,
2442 			rx_tid->ba_win_size,
2443 			rx_tid->pn_size);
2444 }
2445 
2446 /*
2447  * dp_peer_find_add_id() - map peer_id with peer
2448  * @soc: soc handle
2449  * @peer_mac_addr: peer mac address
2450  * @peer_id: peer id to be mapped
2451  * @hw_peer_id: HW ast index
2452  * @vdev_id: vdev_id
2453  *
2454  * return: peer in success
2455  *         NULL in failure
2456  */
2457 static inline struct dp_peer *dp_peer_find_add_id(struct dp_soc *soc,
2458 	uint8_t *peer_mac_addr, uint16_t peer_id, uint16_t hw_peer_id,
2459 	uint8_t vdev_id)
2460 {
2461 	struct dp_peer *peer;
2462 
2463 	QDF_ASSERT(peer_id <= soc->max_peer_id);
2464 	/* check if there's already a peer object with this MAC address */
2465 	peer = dp_peer_find_hash_find(soc, peer_mac_addr,
2466 		0 /* is aligned */, vdev_id, DP_MOD_ID_CONFIG);
2467 	dp_peer_err("%pK: peer %pK ID %d vid %d mac " QDF_MAC_ADDR_FMT,
2468 		    soc, peer, peer_id, vdev_id,
2469 		    QDF_MAC_ADDR_REF(peer_mac_addr));
2470 
2471 	if (peer) {
2472 		/* peer's ref count was already incremented by
2473 		 * peer_find_hash_find
2474 		 */
2475 		dp_peer_info("%pK: ref_cnt: %d", soc,
2476 			     qdf_atomic_read(&peer->ref_cnt));
2477 
2478 		/*
2479 		 * if peer is in logical delete CP triggered delete before map
2480 		 * is received ignore this event
2481 		 */
2482 		if (dp_peer_state_cmp(peer, DP_PEER_STATE_LOGICAL_DELETE)) {
2483 			dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
2484 			dp_alert("Peer %pK["QDF_MAC_ADDR_FMT"] logical delete state vid %d",
2485 				 peer, QDF_MAC_ADDR_REF(peer_mac_addr),
2486 				 vdev_id);
2487 			return NULL;
2488 		}
2489 		dp_peer_find_id_to_obj_add(soc, peer, peer_id);
2490 		dp_mlo_partner_chips_map(soc, peer, peer_id);
2491 		if (peer->peer_id == HTT_INVALID_PEER) {
2492 			peer->peer_id = peer_id;
2493 			dp_monitor_peer_tid_peer_id_update(soc, peer,
2494 							   peer->peer_id);
2495 		} else {
2496 			QDF_ASSERT(0);
2497 		}
2498 
2499 		dp_peer_update_state(soc, peer, DP_PEER_STATE_ACTIVE);
2500 		return peer;
2501 	}
2502 
2503 	return NULL;
2504 }
2505 
2506 #ifdef WLAN_FEATURE_11BE_MLO
2507 #ifdef DP_USE_REDUCED_PEER_ID_FIELD_WIDTH
2508 static inline uint16_t dp_gen_ml_peer_id(struct dp_soc *soc,
2509 					 uint16_t peer_id)
2510 {
2511 	return ((peer_id & soc->peer_id_mask) | (1 << soc->peer_id_shift));
2512 }
2513 #else
2514 static inline uint16_t dp_gen_ml_peer_id(struct dp_soc *soc,
2515 					 uint16_t peer_id)
2516 {
2517 	return (peer_id | (1 << HTT_RX_PEER_META_DATA_V1_ML_PEER_VALID_S));
2518 }
2519 #endif
2520 
2521 QDF_STATUS
2522 dp_rx_mlo_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
2523 			   uint8_t *peer_mac_addr,
2524 			   struct dp_mlo_flow_override_info *mlo_flow_info)
2525 {
2526 	struct dp_peer *peer = NULL;
2527 	uint16_t hw_peer_id = mlo_flow_info[0].ast_idx;
2528 	uint16_t ast_hash = mlo_flow_info[0].cache_set_num;
2529 	uint8_t vdev_id = DP_VDEV_ALL;
2530 	uint8_t is_wds = 0;
2531 	uint16_t ml_peer_id = dp_gen_ml_peer_id(soc, peer_id);
2532 	enum cdp_txrx_ast_entry_type type = CDP_TXRX_AST_TYPE_STATIC;
2533 	QDF_STATUS err = QDF_STATUS_SUCCESS;
2534 
2535 	dp_info("mlo_peer_map_event (soc:%pK): peer_id %d ml_peer_id %d, peer_mac "QDF_MAC_ADDR_FMT,
2536 		soc, peer_id, ml_peer_id,
2537 		QDF_MAC_ADDR_REF(peer_mac_addr));
2538 
2539 	peer = dp_peer_find_add_id(soc, peer_mac_addr, ml_peer_id,
2540 				   hw_peer_id, vdev_id);
2541 
2542 	if (peer) {
2543 		if (wlan_op_mode_sta == peer->vdev->opmode &&
2544 		    qdf_mem_cmp(peer->mac_addr.raw,
2545 				peer->vdev->mld_mac_addr.raw,
2546 				QDF_MAC_ADDR_SIZE) != 0) {
2547 			dp_peer_info("%pK: STA vdev bss_peer!!!!", soc);
2548 			peer->bss_peer = 1;
2549 		}
2550 
2551 		if (peer->vdev->opmode == wlan_op_mode_sta) {
2552 			peer->vdev->bss_ast_hash = ast_hash;
2553 			peer->vdev->bss_ast_idx = hw_peer_id;
2554 		}
2555 
2556 		/* Add ast entry incase self ast entry is
2557 		 * deleted due to DP CP sync issue
2558 		 *
2559 		 * self_ast_entry is modified in peer create
2560 		 * and peer unmap path which cannot run in
2561 		 * parllel with peer map, no lock need before
2562 		 * referring it
2563 		 */
2564 		if (!peer->self_ast_entry) {
2565 			dp_info("Add self ast from map "QDF_MAC_ADDR_FMT,
2566 				QDF_MAC_ADDR_REF(peer_mac_addr));
2567 			dp_peer_add_ast(soc, peer,
2568 					peer_mac_addr,
2569 					type, 0);
2570 		}
2571 	}
2572 
2573 	err = dp_peer_map_ast(soc, peer, peer_mac_addr, hw_peer_id,
2574 			      vdev_id, ast_hash, is_wds);
2575 
2576 	return err;
2577 }
2578 #endif
2579 
2580 /**
2581  * dp_rx_peer_map_handler() - handle peer map event from firmware
2582  * @soc_handle - genereic soc handle
2583  * @peeri_id - peer_id from firmware
2584  * @hw_peer_id - ast index for this peer
2585  * @vdev_id - vdev ID
2586  * @peer_mac_addr - mac address of the peer
2587  * @ast_hash - ast hash value
2588  * @is_wds - flag to indicate peer map event for WDS ast entry
2589  *
2590  * associate the peer_id that firmware provided with peer entry
2591  * and update the ast table in the host with the hw_peer_id.
2592  *
2593  * Return: QDF_STATUS code
2594  */
2595 
2596 QDF_STATUS
2597 dp_rx_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
2598 		       uint16_t hw_peer_id, uint8_t vdev_id,
2599 		       uint8_t *peer_mac_addr, uint16_t ast_hash,
2600 		       uint8_t is_wds)
2601 {
2602 	struct dp_peer *peer = NULL;
2603 	enum cdp_txrx_ast_entry_type type = CDP_TXRX_AST_TYPE_STATIC;
2604 	QDF_STATUS err = QDF_STATUS_SUCCESS;
2605 
2606 	dp_info("peer_map_event (soc:%pK): peer_id %d, hw_peer_id %d, peer_mac "QDF_MAC_ADDR_FMT", vdev_id %d",
2607 		soc, peer_id, hw_peer_id,
2608 		QDF_MAC_ADDR_REF(peer_mac_addr), vdev_id);
2609 
2610 	/* Peer map event for WDS ast entry get the peer from
2611 	 * obj map
2612 	 */
2613 	if (is_wds) {
2614 		if (!soc->ast_offload_support) {
2615 			peer = dp_peer_get_ref_by_id(soc, peer_id,
2616 						     DP_MOD_ID_HTT);
2617 
2618 			err = dp_peer_map_ast(soc, peer, peer_mac_addr,
2619 					      hw_peer_id,
2620 					      vdev_id, ast_hash, is_wds);
2621 			if (peer)
2622 				dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
2623 		}
2624 	} else {
2625 		/*
2626 		 * It's the responsibility of the CP and FW to ensure
2627 		 * that peer is created successfully. Ideally DP should
2628 		 * not hit the below condition for directly assocaited
2629 		 * peers.
2630 		 */
2631 		if ((!soc->ast_offload_support) && ((hw_peer_id < 0) ||
2632 		    (hw_peer_id >=
2633 		     wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx)))) {
2634 			dp_peer_err("%pK: invalid hw_peer_id: %d", soc, hw_peer_id);
2635 			qdf_assert_always(0);
2636 		}
2637 
2638 		peer = dp_peer_find_add_id(soc, peer_mac_addr, peer_id,
2639 					   hw_peer_id, vdev_id);
2640 
2641 		if (peer) {
2642 			if (wlan_op_mode_sta == peer->vdev->opmode &&
2643 			    qdf_mem_cmp(peer->mac_addr.raw,
2644 					peer->vdev->mac_addr.raw,
2645 					QDF_MAC_ADDR_SIZE) != 0) {
2646 				dp_peer_info("%pK: STA vdev bss_peer!!!!", soc);
2647 				peer->bss_peer = 1;
2648 			}
2649 
2650 			if (peer->vdev->opmode == wlan_op_mode_sta) {
2651 				peer->vdev->bss_ast_hash = ast_hash;
2652 				peer->vdev->bss_ast_idx = hw_peer_id;
2653 			}
2654 
2655 			/* Add ast entry incase self ast entry is
2656 			 * deleted due to DP CP sync issue
2657 			 *
2658 			 * self_ast_entry is modified in peer create
2659 			 * and peer unmap path which cannot run in
2660 			 * parllel with peer map, no lock need before
2661 			 * referring it
2662 			 */
2663 			if (!soc->ast_offload_support &&
2664 				!peer->self_ast_entry) {
2665 				dp_info("Add self ast from map "QDF_MAC_ADDR_FMT,
2666 					QDF_MAC_ADDR_REF(peer_mac_addr));
2667 				dp_peer_add_ast(soc, peer,
2668 						peer_mac_addr,
2669 						type, 0);
2670 			}
2671 		}
2672 		err = dp_peer_map_ast(soc, peer, peer_mac_addr, hw_peer_id,
2673 				      vdev_id, ast_hash, is_wds);
2674 	}
2675 
2676 	return err;
2677 }
2678 
2679 /**
2680  * dp_rx_peer_unmap_handler() - handle peer unmap event from firmware
2681  * @soc_handle - genereic soc handle
2682  * @peeri_id - peer_id from firmware
2683  * @vdev_id - vdev ID
2684  * @mac_addr - mac address of the peer or wds entry
2685  * @is_wds - flag to indicate peer map event for WDS ast entry
2686  * @free_wds_count - number of wds entries freed by FW with peer delete
2687  *
2688  * Return: none
2689  */
2690 void
2691 dp_rx_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id,
2692 			 uint8_t vdev_id, uint8_t *mac_addr,
2693 			 uint8_t is_wds, uint32_t free_wds_count)
2694 {
2695 	struct dp_peer *peer;
2696 	struct dp_vdev *vdev = NULL;
2697 
2698 	if (soc->ast_offload_support && is_wds)
2699 		return;
2700 
2701 	peer = __dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_HTT);
2702 
2703 	/*
2704 	 * Currently peer IDs are assigned for vdevs as well as peers.
2705 	 * If the peer ID is for a vdev, then the peer pointer stored
2706 	 * in peer_id_to_obj_map will be NULL.
2707 	 */
2708 	if (!peer) {
2709 		dp_err("Received unmap event for invalid peer_id %u",
2710 		       peer_id);
2711 		return;
2712 	}
2713 
2714 	/* If V2 Peer map messages are enabled AST entry has to be
2715 	 * freed here
2716 	 */
2717 	if (is_wds) {
2718 		if (!dp_peer_ast_free_entry_by_mac(soc, peer, vdev_id,
2719 						   mac_addr)) {
2720 			dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
2721 			return;
2722 		}
2723 
2724 		dp_alert("AST entry not found with peer %pK peer_id %u peer_mac "QDF_MAC_ADDR_FMT" mac_addr "QDF_MAC_ADDR_FMT" vdev_id %u next_hop %u",
2725 			  peer, peer->peer_id,
2726 			  QDF_MAC_ADDR_REF(peer->mac_addr.raw),
2727 			  QDF_MAC_ADDR_REF(mac_addr), vdev_id,
2728 			  is_wds);
2729 
2730 		dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
2731 		return;
2732 	}
2733 
2734 	dp_peer_clean_wds_entries(soc, peer, free_wds_count);
2735 
2736 	dp_info("peer_unmap_event (soc:%pK) peer_id %d peer %pK",
2737 		soc, peer_id, peer);
2738 
2739 	dp_peer_find_id_to_obj_remove(soc, peer_id);
2740 	dp_mlo_partner_chips_unmap(soc, peer_id);
2741 	peer->peer_id = HTT_INVALID_PEER;
2742 
2743 	/*
2744 	 *	 Reset ast flow mapping table
2745 	 */
2746 	if (!soc->ast_offload_support)
2747 		dp_peer_reset_flowq_map(peer);
2748 
2749 	if (soc->cdp_soc.ol_ops->peer_unmap_event) {
2750 		soc->cdp_soc.ol_ops->peer_unmap_event(soc->ctrl_psoc,
2751 				peer_id, vdev_id);
2752 	}
2753 
2754 	vdev = peer->vdev;
2755 	DP_UPDATE_STATS(vdev, peer);
2756 
2757 	dp_peer_update_state(soc, peer, DP_PEER_STATE_INACTIVE);
2758 	dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
2759 	/*
2760 	 * Remove a reference to the peer.
2761 	 * If there are no more references, delete the peer object.
2762 	 */
2763 	dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
2764 }
2765 
2766 #ifdef WLAN_FEATURE_11BE_MLO
2767 void dp_rx_mlo_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id)
2768 {
2769 	uint16_t ml_peer_id = dp_gen_ml_peer_id(soc, peer_id);
2770 	uint8_t mac_addr[QDF_MAC_ADDR_SIZE] = {0};
2771 	uint8_t vdev_id = DP_VDEV_ALL;
2772 	uint8_t is_wds = 0;
2773 
2774 	dp_info("MLO peer_unmap_event (soc:%pK) peer_id %d",
2775 		soc, peer_id);
2776 
2777 	dp_rx_peer_unmap_handler(soc, ml_peer_id, vdev_id,
2778 				 mac_addr, is_wds,
2779 				 DP_PEER_WDS_COUNT_INVALID);
2780 }
2781 #endif
2782 
2783 #ifndef AST_OFFLOAD_ENABLE
2784 void
2785 dp_peer_find_detach(struct dp_soc *soc)
2786 {
2787 	dp_soc_wds_detach(soc);
2788 	dp_peer_find_map_detach(soc);
2789 	dp_peer_find_hash_detach(soc);
2790 	dp_peer_ast_hash_detach(soc);
2791 	dp_peer_ast_table_detach(soc);
2792 	dp_peer_mec_hash_detach(soc);
2793 }
2794 #else
2795 void
2796 dp_peer_find_detach(struct dp_soc *soc)
2797 {
2798 	dp_peer_find_map_detach(soc);
2799 	dp_peer_find_hash_detach(soc);
2800 }
2801 #endif
2802 
2803 static void dp_rx_tid_update_cb(struct dp_soc *soc, void *cb_ctxt,
2804 	union hal_reo_status *reo_status)
2805 {
2806 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
2807 
2808 	if ((reo_status->rx_queue_status.header.status !=
2809 		HAL_REO_CMD_SUCCESS) &&
2810 		(reo_status->rx_queue_status.header.status !=
2811 		HAL_REO_CMD_DRAIN)) {
2812 		/* Should not happen normally. Just print error for now */
2813 		dp_peer_err("%pK: Rx tid HW desc update failed(%d): tid %d",
2814 			    soc, reo_status->rx_queue_status.header.status,
2815 			    rx_tid->tid);
2816 	}
2817 }
2818 
2819 static bool dp_get_peer_vdev_roaming_in_progress(struct dp_peer *peer)
2820 {
2821 	struct ol_if_ops *ol_ops = NULL;
2822 	bool is_roaming = false;
2823 	uint8_t vdev_id = -1;
2824 	struct cdp_soc_t *soc;
2825 
2826 	if (!peer) {
2827 		dp_peer_info("Peer is NULL. No roaming possible");
2828 		return false;
2829 	}
2830 
2831 	soc = dp_soc_to_cdp_soc_t(peer->vdev->pdev->soc);
2832 	ol_ops = peer->vdev->pdev->soc->cdp_soc.ol_ops;
2833 
2834 	if (ol_ops && ol_ops->is_roam_inprogress) {
2835 		dp_get_vdevid(soc, peer->mac_addr.raw, &vdev_id);
2836 		is_roaming = ol_ops->is_roam_inprogress(vdev_id);
2837 	}
2838 
2839 	dp_peer_info("peer: " QDF_MAC_ADDR_FMT ", vdev_id: %d, is_roaming: %d",
2840 		     QDF_MAC_ADDR_REF(peer->mac_addr.raw), vdev_id, is_roaming);
2841 
2842 	return is_roaming;
2843 }
2844 
2845 #ifdef WLAN_FEATURE_11BE_MLO
2846 /**
2847  * dp_rx_tid_setup_allow() - check if rx_tid and reo queue desc
2848 			     setup is necessary
2849  * @peer: DP peer handle
2850  *
2851  * Return: true - allow, false - disallow
2852  */
2853 static inline
2854 bool dp_rx_tid_setup_allow(struct dp_peer *peer)
2855 {
2856 	if (IS_MLO_DP_LINK_PEER(peer) && !peer->assoc_link)
2857 		return false;
2858 
2859 	return true;
2860 }
2861 
2862 /**
2863  * dp_rx_tid_update_allow() - check if rx_tid update needed
2864  * @peer: DP peer handle
2865  *
2866  * Return: true - allow, false - disallow
2867  */
2868 static inline
2869 bool dp_rx_tid_update_allow(struct dp_peer *peer)
2870 {
2871 	/* not as expected for MLO connection link peer */
2872 	if (IS_MLO_DP_LINK_PEER(peer)) {
2873 		QDF_BUG(0);
2874 		return false;
2875 	}
2876 
2877 	return true;
2878 }
2879 
2880 /**
2881  * dp_peer_rx_reorder_queue_setup() - Send reo queue setup wmi cmd to FW
2882 				      per peer type
2883  * @soc: DP Soc handle
2884  * @peer: dp peer to operate on
2885  * @tid: TID
2886  * @ba_window_size: BlockAck window size
2887  *
2888  * Return: 0 - success, others - failure
2889  */
2890 static QDF_STATUS dp_peer_rx_reorder_queue_setup(struct dp_soc *soc,
2891 						 struct dp_peer *peer,
2892 						 int tid,
2893 						 uint32_t ba_window_size)
2894 {
2895 	uint8_t i;
2896 	struct dp_mld_link_peers link_peers_info;
2897 	struct dp_peer *link_peer;
2898 	struct dp_rx_tid *rx_tid;
2899 	struct dp_soc *link_peer_soc;
2900 
2901 	if (IS_MLO_DP_MLD_PEER(peer)) {
2902 		/* get link peers with reference */
2903 		dp_get_link_peers_ref_from_mld_peer(soc, peer,
2904 						    &link_peers_info,
2905 						    DP_MOD_ID_CDP);
2906 		/* send WMI cmd to each link peers */
2907 		for (i = 0; i < link_peers_info.num_links; i++) {
2908 			link_peer = link_peers_info.link_peers[i];
2909 			rx_tid = &link_peer->rx_tid[tid];
2910 			link_peer_soc = link_peer->vdev->pdev->soc;
2911 			if (link_peer_soc->cdp_soc.ol_ops->
2912 					peer_rx_reorder_queue_setup) {
2913 				if (link_peer_soc->cdp_soc.ol_ops->
2914 					peer_rx_reorder_queue_setup(
2915 						link_peer_soc->ctrl_psoc,
2916 						link_peer->vdev->pdev->pdev_id,
2917 						link_peer->vdev->vdev_id,
2918 						link_peer->mac_addr.raw,
2919 						rx_tid->hw_qdesc_paddr,
2920 						tid, tid,
2921 						1, ba_window_size)) {
2922 					dp_peer_err("%pK: Failed to send reo queue setup to FW - tid %d\n",
2923 						    link_peer_soc, tid);
2924 					return QDF_STATUS_E_FAILURE;
2925 				}
2926 			}
2927 		}
2928 		/* release link peers reference */
2929 		dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP);
2930 	} else if (peer->peer_type == CDP_LINK_PEER_TYPE) {
2931 			rx_tid = &peer->rx_tid[tid];
2932 			if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
2933 				if (soc->cdp_soc.ol_ops->
2934 					peer_rx_reorder_queue_setup(
2935 						soc->ctrl_psoc,
2936 						peer->vdev->pdev->pdev_id,
2937 						peer->vdev->vdev_id,
2938 						peer->mac_addr.raw,
2939 						rx_tid->hw_qdesc_paddr,
2940 						tid, tid,
2941 						1, ba_window_size)) {
2942 					dp_peer_err("%pK: Failed to send reo queue setup to FW - tid %d\n",
2943 						    soc, tid);
2944 					return QDF_STATUS_E_FAILURE;
2945 				}
2946 			}
2947 	} else {
2948 		dp_peer_err("invalid peer type %d", peer->peer_type);
2949 		return QDF_STATUS_E_FAILURE;
2950 	}
2951 
2952 	return QDF_STATUS_SUCCESS;
2953 }
2954 #else
2955 static inline
2956 bool dp_rx_tid_setup_allow(struct dp_peer *peer)
2957 {
2958 	return true;
2959 }
2960 
2961 static inline
2962 bool dp_rx_tid_update_allow(struct dp_peer *peer)
2963 {
2964 	return true;
2965 }
2966 
2967 static QDF_STATUS dp_peer_rx_reorder_queue_setup(struct dp_soc *soc,
2968 						 struct dp_peer *peer,
2969 						 int tid,
2970 						 uint32_t ba_window_size)
2971 {
2972 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
2973 
2974 	if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
2975 		if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup(
2976 		    soc->ctrl_psoc,
2977 		    peer->vdev->pdev->pdev_id,
2978 		    peer->vdev->vdev_id,
2979 		    peer->mac_addr.raw, rx_tid->hw_qdesc_paddr, tid, tid,
2980 		    1, ba_window_size)) {
2981 			dp_peer_err("%pK: Failed to send reo queue setup to FW - tid %d\n",
2982 				    soc, tid);
2983 			return QDF_STATUS_E_FAILURE;
2984 		}
2985 	}
2986 
2987 	return QDF_STATUS_SUCCESS;
2988 }
2989 #endif
2990 
2991 QDF_STATUS dp_rx_tid_update_wifi3(struct dp_peer *peer, int tid, uint32_t
2992 					 ba_window_size, uint32_t start_seq,
2993 					 bool bar_update)
2994 {
2995 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
2996 	struct dp_soc *soc = peer->vdev->pdev->soc;
2997 	struct hal_reo_cmd_params params;
2998 
2999 	if (!dp_rx_tid_update_allow(peer)) {
3000 		dp_peer_err("skip tid update for peer:" QDF_MAC_ADDR_FMT,
3001 			    QDF_MAC_ADDR_REF(peer->mac_addr.raw));
3002 		return QDF_STATUS_E_FAILURE;
3003 	}
3004 
3005 	qdf_mem_zero(&params, sizeof(params));
3006 
3007 	params.std.need_status = 1;
3008 	params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
3009 	params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
3010 	params.u.upd_queue_params.update_ba_window_size = 1;
3011 	params.u.upd_queue_params.ba_window_size = ba_window_size;
3012 
3013 	if (start_seq < IEEE80211_SEQ_MAX) {
3014 		params.u.upd_queue_params.update_ssn = 1;
3015 		params.u.upd_queue_params.ssn = start_seq;
3016 	} else {
3017 	    dp_set_ssn_valid_flag(&params, 0);
3018 	}
3019 
3020 	if (dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
3021 			    dp_rx_tid_update_cb, rx_tid)) {
3022 		dp_err_log("failed to send reo cmd CMD_UPDATE_RX_REO_QUEUE");
3023 		DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
3024 	}
3025 
3026 	rx_tid->ba_win_size = ba_window_size;
3027 
3028 	if (dp_get_peer_vdev_roaming_in_progress(peer))
3029 		return QDF_STATUS_E_PERM;
3030 
3031 	if (!bar_update)
3032 		dp_peer_rx_reorder_queue_setup(soc, peer,
3033 					       tid, ba_window_size);
3034 
3035 	return QDF_STATUS_SUCCESS;
3036 }
3037 
3038 #ifdef WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY
3039 /*
3040  * dp_reo_desc_defer_free_enqueue() - enqueue REO QDESC to be freed into
3041  *                                    the deferred list
3042  * @soc: Datapath soc handle
3043  * @free_desc: REO DESC reference that needs to be freed
3044  *
3045  * Return: true if enqueued, else false
3046  */
3047 static bool dp_reo_desc_defer_free_enqueue(struct dp_soc *soc,
3048 					   struct reo_desc_list_node *freedesc)
3049 {
3050 	struct reo_desc_deferred_freelist_node *desc;
3051 
3052 	if (!qdf_atomic_read(&soc->cmn_init_done))
3053 		return false;
3054 
3055 	desc = qdf_mem_malloc(sizeof(*desc));
3056 	if (!desc)
3057 		return false;
3058 
3059 	desc->hw_qdesc_paddr = freedesc->rx_tid.hw_qdesc_paddr;
3060 	desc->hw_qdesc_alloc_size = freedesc->rx_tid.hw_qdesc_alloc_size;
3061 	desc->hw_qdesc_vaddr_unaligned =
3062 			freedesc->rx_tid.hw_qdesc_vaddr_unaligned;
3063 	desc->free_ts = qdf_get_system_timestamp();
3064 	DP_RX_REO_QDESC_DEFERRED_GET_MAC(desc, freedesc);
3065 
3066 	qdf_spin_lock_bh(&soc->reo_desc_deferred_freelist_lock);
3067 	if (!soc->reo_desc_deferred_freelist_init) {
3068 		qdf_mem_free(desc);
3069 		qdf_spin_unlock_bh(&soc->reo_desc_deferred_freelist_lock);
3070 		return false;
3071 	}
3072 	qdf_list_insert_back(&soc->reo_desc_deferred_freelist,
3073 			     (qdf_list_node_t *)desc);
3074 	qdf_spin_unlock_bh(&soc->reo_desc_deferred_freelist_lock);
3075 
3076 	return true;
3077 }
3078 
3079 /*
3080  * dp_reo_desc_defer_free() - free the REO QDESC in the deferred list
3081  *                            based on time threshold
3082  * @soc: Datapath soc handle
3083  * @free_desc: REO DESC reference that needs to be freed
3084  *
3085  * Return: true if enqueued, else false
3086  */
3087 static void dp_reo_desc_defer_free(struct dp_soc *soc)
3088 {
3089 	struct reo_desc_deferred_freelist_node *desc;
3090 	unsigned long curr_ts = qdf_get_system_timestamp();
3091 
3092 	qdf_spin_lock_bh(&soc->reo_desc_deferred_freelist_lock);
3093 
3094 	while ((qdf_list_peek_front(&soc->reo_desc_deferred_freelist,
3095 	       (qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) &&
3096 	       (curr_ts > (desc->free_ts + REO_DESC_DEFERRED_FREE_MS))) {
3097 		qdf_list_remove_front(&soc->reo_desc_deferred_freelist,
3098 				      (qdf_list_node_t **)&desc);
3099 
3100 		DP_RX_REO_QDESC_DEFERRED_FREE_EVT(desc);
3101 
3102 		qdf_mem_unmap_nbytes_single(soc->osdev,
3103 					    desc->hw_qdesc_paddr,
3104 					    QDF_DMA_BIDIRECTIONAL,
3105 					    desc->hw_qdesc_alloc_size);
3106 		qdf_mem_free(desc->hw_qdesc_vaddr_unaligned);
3107 		qdf_mem_free(desc);
3108 
3109 		curr_ts = qdf_get_system_timestamp();
3110 	}
3111 
3112 	qdf_spin_unlock_bh(&soc->reo_desc_deferred_freelist_lock);
3113 }
3114 #else
3115 static inline bool
3116 dp_reo_desc_defer_free_enqueue(struct dp_soc *soc,
3117 			       struct reo_desc_list_node *freedesc)
3118 {
3119 	return false;
3120 }
3121 
3122 static void dp_reo_desc_defer_free(struct dp_soc *soc)
3123 {
3124 }
3125 #endif /* !WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY */
3126 
3127 /*
3128  * dp_reo_desc_free() - Callback free reo descriptor memory after
3129  * HW cache flush
3130  *
3131  * @soc: DP SOC handle
3132  * @cb_ctxt: Callback context
3133  * @reo_status: REO command status
3134  */
3135 static void dp_reo_desc_free(struct dp_soc *soc, void *cb_ctxt,
3136 	union hal_reo_status *reo_status)
3137 {
3138 	struct reo_desc_list_node *freedesc =
3139 		(struct reo_desc_list_node *)cb_ctxt;
3140 	struct dp_rx_tid *rx_tid = &freedesc->rx_tid;
3141 	unsigned long curr_ts = qdf_get_system_timestamp();
3142 
3143 	if ((reo_status->fl_cache_status.header.status !=
3144 		HAL_REO_CMD_SUCCESS) &&
3145 		(reo_status->fl_cache_status.header.status !=
3146 		HAL_REO_CMD_DRAIN)) {
3147 		dp_peer_err("%pK: Rx tid HW desc flush failed(%d): tid %d",
3148 			    soc, reo_status->rx_queue_status.header.status,
3149 			    freedesc->rx_tid.tid);
3150 	}
3151 	dp_peer_info("%pK: %lu hw_qdesc_paddr: %pK, tid:%d", soc,
3152 		     curr_ts, (void *)(rx_tid->hw_qdesc_paddr),
3153 		     rx_tid->tid);
3154 
3155 	/* REO desc is enqueued to be freed at a later point
3156 	 * in time, just free the freedesc alone and return
3157 	 */
3158 	if (dp_reo_desc_defer_free_enqueue(soc, freedesc))
3159 		goto out;
3160 
3161 	DP_RX_REO_QDESC_FREE_EVT(freedesc);
3162 
3163 	qdf_mem_unmap_nbytes_single(soc->osdev,
3164 		rx_tid->hw_qdesc_paddr,
3165 		QDF_DMA_BIDIRECTIONAL,
3166 		rx_tid->hw_qdesc_alloc_size);
3167 	qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
3168 out:
3169 	qdf_mem_free(freedesc);
3170 }
3171 
3172 #if defined(CONFIG_WIFI_EMULATION_WIFI_3_0) && defined(BUILD_X86)
3173 /* Hawkeye emulation requires bus address to be >= 0x50000000 */
3174 static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
3175 {
3176 	if (dma_addr < 0x50000000)
3177 		return QDF_STATUS_E_FAILURE;
3178 	else
3179 		return QDF_STATUS_SUCCESS;
3180 }
3181 #else
3182 static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
3183 {
3184 	return QDF_STATUS_SUCCESS;
3185 }
3186 #endif
3187 
3188 /*
3189  * dp_rx_tid_setup_wifi3() – Setup receive TID state
3190  * @peer: Datapath peer handle
3191  * @tid: TID
3192  * @ba_window_size: BlockAck window size
3193  * @start_seq: Starting sequence number
3194  *
3195  * Return: QDF_STATUS code
3196  */
3197 QDF_STATUS dp_rx_tid_setup_wifi3(struct dp_peer *peer, int tid,
3198 				 uint32_t ba_window_size, uint32_t start_seq)
3199 {
3200 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
3201 	struct dp_vdev *vdev = peer->vdev;
3202 	struct dp_soc *soc = vdev->pdev->soc;
3203 	uint32_t hw_qdesc_size;
3204 	uint32_t hw_qdesc_align;
3205 	int hal_pn_type;
3206 	void *hw_qdesc_vaddr;
3207 	uint32_t alloc_tries = 0;
3208 	QDF_STATUS status = QDF_STATUS_SUCCESS;
3209 
3210 	if (!qdf_atomic_read(&peer->is_default_route_set))
3211 		return QDF_STATUS_E_FAILURE;
3212 
3213 	if (!dp_rx_tid_setup_allow(peer)) {
3214 		dp_peer_info("skip rx tid setup for peer" QDF_MAC_ADDR_FMT,
3215 			     QDF_MAC_ADDR_REF(peer->mac_addr.raw));
3216 		goto send_wmi_reo_cmd;
3217 	}
3218 
3219 	rx_tid->ba_win_size = ba_window_size;
3220 	if (rx_tid->hw_qdesc_vaddr_unaligned)
3221 		return dp_rx_tid_update_wifi3(peer, tid, ba_window_size,
3222 			start_seq, false);
3223 	rx_tid->delba_tx_status = 0;
3224 	rx_tid->ppdu_id_2k = 0;
3225 	rx_tid->num_of_addba_req = 0;
3226 	rx_tid->num_of_delba_req = 0;
3227 	rx_tid->num_of_addba_resp = 0;
3228 	rx_tid->num_addba_rsp_failed = 0;
3229 	rx_tid->num_addba_rsp_success = 0;
3230 	rx_tid->delba_tx_success_cnt = 0;
3231 	rx_tid->delba_tx_fail_cnt = 0;
3232 	rx_tid->statuscode = 0;
3233 
3234 	/* TODO: Allocating HW queue descriptors based on max BA window size
3235 	 * for all QOS TIDs so that same descriptor can be used later when
3236 	 * ADDBA request is recevied. This should be changed to allocate HW
3237 	 * queue descriptors based on BA window size being negotiated (0 for
3238 	 * non BA cases), and reallocate when BA window size changes and also
3239 	 * send WMI message to FW to change the REO queue descriptor in Rx
3240 	 * peer entry as part of dp_rx_tid_update.
3241 	 */
3242 	if (tid != DP_NON_QOS_TID)
3243 		hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc,
3244 			HAL_RX_MAX_BA_WINDOW, tid);
3245 	else
3246 		hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc,
3247 			ba_window_size, tid);
3248 
3249 	hw_qdesc_align = hal_get_reo_qdesc_align(soc->hal_soc);
3250 	/* To avoid unnecessary extra allocation for alignment, try allocating
3251 	 * exact size and see if we already have aligned address.
3252 	 */
3253 	rx_tid->hw_qdesc_alloc_size = hw_qdesc_size;
3254 
3255 try_desc_alloc:
3256 	rx_tid->hw_qdesc_vaddr_unaligned =
3257 		qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size);
3258 
3259 	if (!rx_tid->hw_qdesc_vaddr_unaligned) {
3260 		dp_peer_err("%pK: Rx tid HW desc alloc failed: tid %d",
3261 			    soc, tid);
3262 		return QDF_STATUS_E_NOMEM;
3263 	}
3264 
3265 	if ((unsigned long)(rx_tid->hw_qdesc_vaddr_unaligned) %
3266 		hw_qdesc_align) {
3267 		/* Address allocated above is not alinged. Allocate extra
3268 		 * memory for alignment
3269 		 */
3270 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
3271 		rx_tid->hw_qdesc_vaddr_unaligned =
3272 			qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size +
3273 					hw_qdesc_align - 1);
3274 
3275 		if (!rx_tid->hw_qdesc_vaddr_unaligned) {
3276 			dp_peer_err("%pK: Rx tid HW desc alloc failed: tid %d",
3277 				    soc, tid);
3278 			return QDF_STATUS_E_NOMEM;
3279 		}
3280 
3281 		hw_qdesc_vaddr = (void *)qdf_align((unsigned long)
3282 			rx_tid->hw_qdesc_vaddr_unaligned,
3283 			hw_qdesc_align);
3284 
3285 		dp_peer_debug("%pK: Total Size %d Aligned Addr %pK",
3286 			      soc, rx_tid->hw_qdesc_alloc_size,
3287 			      hw_qdesc_vaddr);
3288 
3289 	} else {
3290 		hw_qdesc_vaddr = rx_tid->hw_qdesc_vaddr_unaligned;
3291 	}
3292 	rx_tid->hw_qdesc_vaddr_aligned = hw_qdesc_vaddr;
3293 
3294 	/* TODO: Ensure that sec_type is set before ADDBA is received.
3295 	 * Currently this is set based on htt indication
3296 	 * HTT_T2H_MSG_TYPE_SEC_IND from target
3297 	 */
3298 	switch (peer->security[dp_sec_ucast].sec_type) {
3299 	case cdp_sec_type_tkip_nomic:
3300 	case cdp_sec_type_aes_ccmp:
3301 	case cdp_sec_type_aes_ccmp_256:
3302 	case cdp_sec_type_aes_gcmp:
3303 	case cdp_sec_type_aes_gcmp_256:
3304 		hal_pn_type = HAL_PN_WPA;
3305 		break;
3306 	case cdp_sec_type_wapi:
3307 		if (vdev->opmode == wlan_op_mode_ap)
3308 			hal_pn_type = HAL_PN_WAPI_EVEN;
3309 		else
3310 			hal_pn_type = HAL_PN_WAPI_UNEVEN;
3311 		break;
3312 	default:
3313 		hal_pn_type = HAL_PN_NONE;
3314 		break;
3315 	}
3316 
3317 	hal_reo_qdesc_setup(soc->hal_soc, tid, ba_window_size, start_seq,
3318 		hw_qdesc_vaddr, rx_tid->hw_qdesc_paddr, hal_pn_type,
3319 		vdev->vdev_stats_id);
3320 
3321 	qdf_mem_map_nbytes_single(soc->osdev, hw_qdesc_vaddr,
3322 		QDF_DMA_BIDIRECTIONAL, rx_tid->hw_qdesc_alloc_size,
3323 		&(rx_tid->hw_qdesc_paddr));
3324 
3325 	if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) !=
3326 			QDF_STATUS_SUCCESS) {
3327 		if (alloc_tries++ < 10) {
3328 			qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
3329 			rx_tid->hw_qdesc_vaddr_unaligned = NULL;
3330 			goto try_desc_alloc;
3331 		} else {
3332 			dp_peer_err("%pK: Rx tid HW desc alloc failed (lowmem): tid %d",
3333 				    soc, tid);
3334 			status = QDF_STATUS_E_NOMEM;
3335 			goto error;
3336 		}
3337 	}
3338 
3339 	if (dp_get_peer_vdev_roaming_in_progress(peer)) {
3340 		status = QDF_STATUS_E_PERM;
3341 		goto error;
3342 	}
3343 
3344 send_wmi_reo_cmd:
3345 	status = dp_peer_rx_reorder_queue_setup(soc, peer,
3346 						tid, ba_window_size);
3347 	if (QDF_IS_STATUS_SUCCESS(status))
3348 		return status;
3349 
3350 error:
3351 	if (rx_tid->hw_qdesc_vaddr_unaligned) {
3352 		if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) ==
3353 		    QDF_STATUS_SUCCESS)
3354 			qdf_mem_unmap_nbytes_single(
3355 				soc->osdev,
3356 				rx_tid->hw_qdesc_paddr,
3357 				QDF_DMA_BIDIRECTIONAL,
3358 				rx_tid->hw_qdesc_alloc_size);
3359 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
3360 		rx_tid->hw_qdesc_vaddr_unaligned = NULL;
3361 	}
3362 	return status;
3363 }
3364 
3365 #ifdef REO_DESC_DEFER_FREE
3366 /*
3367  * dp_reo_desc_clean_up() - If cmd to flush base desc fails add
3368  * desc back to freelist and defer the deletion
3369  *
3370  * @soc: DP SOC handle
3371  * @desc: Base descriptor to be freed
3372  * @reo_status: REO command status
3373  */
3374 static void dp_reo_desc_clean_up(struct dp_soc *soc,
3375 				 struct reo_desc_list_node *desc,
3376 				 union hal_reo_status *reo_status)
3377 {
3378 	desc->free_ts = qdf_get_system_timestamp();
3379 	DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
3380 	qdf_list_insert_back(&soc->reo_desc_freelist,
3381 			     (qdf_list_node_t *)desc);
3382 }
3383 
3384 /*
3385  * dp_reo_limit_clean_batch_sz() - Limit number REO CMD queued to cmd
3386  * ring in aviod of REO hang
3387  *
3388  * @list_size: REO desc list size to be cleaned
3389  */
3390 static inline void dp_reo_limit_clean_batch_sz(uint32_t *list_size)
3391 {
3392 	unsigned long curr_ts = qdf_get_system_timestamp();
3393 
3394 	if ((*list_size) > REO_DESC_FREELIST_SIZE) {
3395 		dp_err_log("%lu:freedesc number %d in freelist",
3396 			   curr_ts, *list_size);
3397 		/* limit the batch queue size */
3398 		*list_size = REO_DESC_FREELIST_SIZE;
3399 	}
3400 }
3401 #else
3402 /*
3403  * dp_reo_desc_clean_up() - If send cmd to REO inorder to flush
3404  * cache fails free the base REO desc anyway
3405  *
3406  * @soc: DP SOC handle
3407  * @desc: Base descriptor to be freed
3408  * @reo_status: REO command status
3409  */
3410 static void dp_reo_desc_clean_up(struct dp_soc *soc,
3411 				 struct reo_desc_list_node *desc,
3412 				 union hal_reo_status *reo_status)
3413 {
3414 	if (reo_status) {
3415 		qdf_mem_zero(reo_status, sizeof(*reo_status));
3416 		reo_status->fl_cache_status.header.status = 0;
3417 		dp_reo_desc_free(soc, (void *)desc, reo_status);
3418 	}
3419 }
3420 
3421 /*
3422  * dp_reo_limit_clean_batch_sz() - Limit number REO CMD queued to cmd
3423  * ring in aviod of REO hang
3424  *
3425  * @list_size: REO desc list size to be cleaned
3426  */
3427 static inline void dp_reo_limit_clean_batch_sz(uint32_t *list_size)
3428 {
3429 }
3430 #endif
3431 
3432 /*
3433  * dp_resend_update_reo_cmd() - Resend the UPDATE_REO_QUEUE
3434  * cmd and re-insert desc into free list if send fails.
3435  *
3436  * @soc: DP SOC handle
3437  * @desc: desc with resend update cmd flag set
3438  * @rx_tid: Desc RX tid associated with update cmd for resetting
3439  * valid field to 0 in h/w
3440  *
3441  * Return: QDF status
3442  */
3443 static QDF_STATUS
3444 dp_resend_update_reo_cmd(struct dp_soc *soc,
3445 			 struct reo_desc_list_node *desc,
3446 			 struct dp_rx_tid *rx_tid)
3447 {
3448 	struct hal_reo_cmd_params params;
3449 
3450 	qdf_mem_zero(&params, sizeof(params));
3451 	params.std.need_status = 1;
3452 	params.std.addr_lo =
3453 		rx_tid->hw_qdesc_paddr & 0xffffffff;
3454 	params.std.addr_hi =
3455 		(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
3456 	params.u.upd_queue_params.update_vld = 1;
3457 	params.u.upd_queue_params.vld = 0;
3458 	desc->resend_update_reo_cmd = false;
3459 	/*
3460 	 * If the cmd send fails then set resend_update_reo_cmd flag
3461 	 * and insert the desc at the end of the free list to retry.
3462 	 */
3463 	if (dp_reo_send_cmd(soc,
3464 			    CMD_UPDATE_RX_REO_QUEUE,
3465 			    &params,
3466 			    dp_rx_tid_delete_cb,
3467 			    (void *)desc)
3468 	    != QDF_STATUS_SUCCESS) {
3469 		desc->resend_update_reo_cmd = true;
3470 		desc->free_ts = qdf_get_system_timestamp();
3471 		qdf_list_insert_back(&soc->reo_desc_freelist,
3472 				     (qdf_list_node_t *)desc);
3473 		dp_err_log("failed to send reo cmd CMD_UPDATE_RX_REO_QUEUE");
3474 		DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
3475 		return QDF_STATUS_E_FAILURE;
3476 	}
3477 
3478 	return QDF_STATUS_SUCCESS;
3479 }
3480 
3481 /*
3482  * dp_rx_tid_delete_cb() - Callback to flush reo descriptor HW cache
3483  * after deleting the entries (ie., setting valid=0)
3484  *
3485  * @soc: DP SOC handle
3486  * @cb_ctxt: Callback context
3487  * @reo_status: REO command status
3488  */
3489 void dp_rx_tid_delete_cb(struct dp_soc *soc, void *cb_ctxt,
3490 			 union hal_reo_status *reo_status)
3491 {
3492 	struct reo_desc_list_node *freedesc =
3493 		(struct reo_desc_list_node *)cb_ctxt;
3494 	uint32_t list_size;
3495 	struct reo_desc_list_node *desc;
3496 	unsigned long curr_ts = qdf_get_system_timestamp();
3497 	uint32_t desc_size, tot_desc_size;
3498 	struct hal_reo_cmd_params params;
3499 	bool flush_failure = false;
3500 
3501 	DP_RX_REO_QDESC_UPDATE_EVT(freedesc);
3502 
3503 	if (reo_status->rx_queue_status.header.status == HAL_REO_CMD_DRAIN) {
3504 		qdf_mem_zero(reo_status, sizeof(*reo_status));
3505 		reo_status->fl_cache_status.header.status = HAL_REO_CMD_DRAIN;
3506 		dp_reo_desc_free(soc, (void *)freedesc, reo_status);
3507 		DP_STATS_INC(soc, rx.err.reo_cmd_send_drain, 1);
3508 		return;
3509 	} else if (reo_status->rx_queue_status.header.status !=
3510 		HAL_REO_CMD_SUCCESS) {
3511 		/* Should not happen normally. Just print error for now */
3512 		dp_info_rl("Rx tid HW desc deletion failed(%d): tid %d",
3513 			   reo_status->rx_queue_status.header.status,
3514 			   freedesc->rx_tid.tid);
3515 	}
3516 
3517 	dp_peer_info("%pK: rx_tid: %d status: %d",
3518 		     soc, freedesc->rx_tid.tid,
3519 		     reo_status->rx_queue_status.header.status);
3520 
3521 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
3522 	freedesc->free_ts = curr_ts;
3523 	qdf_list_insert_back_size(&soc->reo_desc_freelist,
3524 		(qdf_list_node_t *)freedesc, &list_size);
3525 
3526 	/* MCL path add the desc back to reo_desc_freelist when REO FLUSH
3527 	 * failed. it may cause the number of REO queue pending  in free
3528 	 * list is even larger than REO_CMD_RING max size and lead REO CMD
3529 	 * flood then cause REO HW in an unexpected condition. So it's
3530 	 * needed to limit the number REO cmds in a batch operation.
3531 	 */
3532 	dp_reo_limit_clean_batch_sz(&list_size);
3533 
3534 	while ((qdf_list_peek_front(&soc->reo_desc_freelist,
3535 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) &&
3536 		((list_size >= REO_DESC_FREELIST_SIZE) ||
3537 		(curr_ts > (desc->free_ts + REO_DESC_FREE_DEFER_MS)) ||
3538 		(desc->resend_update_reo_cmd && list_size))) {
3539 		struct dp_rx_tid *rx_tid;
3540 
3541 		qdf_list_remove_front(&soc->reo_desc_freelist,
3542 				(qdf_list_node_t **)&desc);
3543 		list_size--;
3544 		rx_tid = &desc->rx_tid;
3545 
3546 		/* First process descs with resend_update_reo_cmd set */
3547 		if (desc->resend_update_reo_cmd) {
3548 			if (dp_resend_update_reo_cmd(soc, desc, rx_tid) !=
3549 			    QDF_STATUS_SUCCESS)
3550 				break;
3551 			else
3552 				continue;
3553 		}
3554 
3555 		/* Flush and invalidate REO descriptor from HW cache: Base and
3556 		 * extension descriptors should be flushed separately */
3557 		if (desc->pending_ext_desc_size)
3558 			tot_desc_size = desc->pending_ext_desc_size;
3559 		else
3560 			tot_desc_size = rx_tid->hw_qdesc_alloc_size;
3561 		/* Get base descriptor size by passing non-qos TID */
3562 		desc_size = hal_get_reo_qdesc_size(soc->hal_soc, 0,
3563 						   DP_NON_QOS_TID);
3564 
3565 		/* Flush reo extension descriptors */
3566 		while ((tot_desc_size -= desc_size) > 0) {
3567 			qdf_mem_zero(&params, sizeof(params));
3568 			params.std.addr_lo =
3569 				((uint64_t)(rx_tid->hw_qdesc_paddr) +
3570 				tot_desc_size) & 0xffffffff;
3571 			params.std.addr_hi =
3572 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
3573 
3574 			if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc,
3575 							CMD_FLUSH_CACHE,
3576 							&params,
3577 							NULL,
3578 							NULL)) {
3579 				dp_info_rl("fail to send CMD_CACHE_FLUSH:"
3580 					   "tid %d desc %pK", rx_tid->tid,
3581 					   (void *)(rx_tid->hw_qdesc_paddr));
3582 				desc->pending_ext_desc_size = tot_desc_size +
3583 								      desc_size;
3584 				dp_reo_desc_clean_up(soc, desc, reo_status);
3585 				flush_failure = true;
3586 				break;
3587 			}
3588 		}
3589 
3590 		if (flush_failure)
3591 			break;
3592 		else
3593 			desc->pending_ext_desc_size = desc_size;
3594 
3595 		/* Flush base descriptor */
3596 		qdf_mem_zero(&params, sizeof(params));
3597 		params.std.need_status = 1;
3598 		params.std.addr_lo =
3599 			(uint64_t)(rx_tid->hw_qdesc_paddr) & 0xffffffff;
3600 		params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
3601 
3602 		if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc,
3603 							  CMD_FLUSH_CACHE,
3604 							  &params,
3605 							  dp_reo_desc_free,
3606 							  (void *)desc)) {
3607 			union hal_reo_status reo_status;
3608 			/*
3609 			 * If dp_reo_send_cmd return failure, related TID queue desc
3610 			 * should be unmapped. Also locally reo_desc, together with
3611 			 * TID queue desc also need to be freed accordingly.
3612 			 *
3613 			 * Here invoke desc_free function directly to do clean up.
3614 			 *
3615 			 * In case of MCL path add the desc back to the free
3616 			 * desc list and defer deletion.
3617 			 */
3618 			dp_info_rl("fail to send REO cmd to flush cache: tid %d",
3619 				   rx_tid->tid);
3620 			dp_reo_desc_clean_up(soc, desc, &reo_status);
3621 			DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
3622 			break;
3623 		}
3624 	}
3625 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
3626 
3627 	dp_reo_desc_defer_free(soc);
3628 }
3629 
3630 /*
3631  * dp_rx_tid_delete_wifi3() – Delete receive TID queue
3632  * @peer: Datapath peer handle
3633  * @tid: TID
3634  *
3635  * Return: 0 on success, error code on failure
3636  */
3637 static int dp_rx_tid_delete_wifi3(struct dp_peer *peer, int tid)
3638 {
3639 	struct dp_rx_tid *rx_tid = &(peer->rx_tid[tid]);
3640 	struct dp_soc *soc = peer->vdev->pdev->soc;
3641 	struct hal_reo_cmd_params params;
3642 	struct reo_desc_list_node *freedesc =
3643 		qdf_mem_malloc(sizeof(*freedesc));
3644 
3645 	if (!freedesc) {
3646 		dp_peer_err("%pK: malloc failed for freedesc: tid %d",
3647 			    soc, tid);
3648 		return -ENOMEM;
3649 	}
3650 
3651 	freedesc->rx_tid = *rx_tid;
3652 	freedesc->resend_update_reo_cmd = false;
3653 
3654 	qdf_mem_zero(&params, sizeof(params));
3655 
3656 	DP_RX_REO_QDESC_GET_MAC(freedesc, peer);
3657 
3658 	params.std.need_status = 1;
3659 	params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
3660 	params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
3661 	params.u.upd_queue_params.update_vld = 1;
3662 	params.u.upd_queue_params.vld = 0;
3663 
3664 	if (dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
3665 			    dp_rx_tid_delete_cb, (void *)freedesc)
3666 		!= QDF_STATUS_SUCCESS) {
3667 		/* Defer the clean up to the call back context */
3668 		qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
3669 		freedesc->free_ts = qdf_get_system_timestamp();
3670 		freedesc->resend_update_reo_cmd = true;
3671 		qdf_list_insert_front(&soc->reo_desc_freelist,
3672 				      (qdf_list_node_t *)freedesc);
3673 		DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
3674 		qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
3675 		dp_info("Failed to send CMD_UPDATE_RX_REO_QUEUE");
3676 	}
3677 
3678 	rx_tid->hw_qdesc_vaddr_unaligned = NULL;
3679 	rx_tid->hw_qdesc_alloc_size = 0;
3680 	rx_tid->hw_qdesc_paddr = 0;
3681 
3682 	return 0;
3683 }
3684 
3685 #ifdef DP_LFR
3686 static void dp_peer_setup_remaining_tids(struct dp_peer *peer)
3687 {
3688 	int tid;
3689 
3690 	for (tid = 1; tid < DP_MAX_TIDS-1; tid++) {
3691 		dp_rx_tid_setup_wifi3(peer, tid, 1, 0);
3692 		dp_peer_debug("Setting up TID %d for peer %pK peer->local_id %d",
3693 			      tid, peer, peer->local_id);
3694 	}
3695 }
3696 #else
3697 static void dp_peer_setup_remaining_tids(struct dp_peer *peer) {};
3698 #endif
3699 
3700 #ifdef WLAN_FEATURE_11BE_MLO
3701 /**
3702  * dp_peer_rx_tids_init() - initialize each tids in peer
3703  * @peer: peer pointer
3704  *
3705  * Return: None
3706  */
3707 static void dp_peer_rx_tids_init(struct dp_peer *peer)
3708 {
3709 	int tid;
3710 	struct dp_rx_tid *rx_tid;
3711 
3712 	/* if not first assoc link peer or MLD peer,
3713 	 * not to initialize rx_tids again.
3714 	 */
3715 	if ((IS_MLO_DP_LINK_PEER(peer) && !peer->assoc_link) ||
3716 	    IS_MLO_DP_MLD_PEER(peer))
3717 		return;
3718 
3719 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
3720 		rx_tid = &peer->rx_tid[tid];
3721 		rx_tid->array = &rx_tid->base;
3722 		rx_tid->base.head = NULL;
3723 		rx_tid->base.tail = NULL;
3724 		rx_tid->tid = tid;
3725 		rx_tid->defrag_timeout_ms = 0;
3726 		rx_tid->ba_win_size = 0;
3727 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
3728 
3729 		rx_tid->defrag_waitlist_elem.tqe_next = NULL;
3730 		rx_tid->defrag_waitlist_elem.tqe_prev = NULL;
3731 		rx_tid->defrag_peer =
3732 			IS_MLO_DP_LINK_PEER(peer) ? peer->mld_peer : peer;
3733 	}
3734 }
3735 #else
3736 static void dp_peer_rx_tids_init(struct dp_peer *peer)
3737 {
3738 	int tid;
3739 	struct dp_rx_tid *rx_tid;
3740 
3741 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
3742 		rx_tid = &peer->rx_tid[tid];
3743 		rx_tid->array = &rx_tid->base;
3744 		rx_tid->base.head = NULL;
3745 		rx_tid->base.tail = NULL;
3746 		rx_tid->tid = tid;
3747 		rx_tid->defrag_timeout_ms = 0;
3748 		rx_tid->ba_win_size = 0;
3749 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
3750 
3751 		rx_tid->defrag_waitlist_elem.tqe_next = NULL;
3752 		rx_tid->defrag_waitlist_elem.tqe_prev = NULL;
3753 		rx_tid->defrag_peer = peer;
3754 	}
3755 }
3756 #endif
3757 
3758 /*
3759  * dp_peer_rx_init() – Initialize receive TID state
3760  * @pdev: Datapath pdev
3761  * @peer: Datapath peer
3762  *
3763  */
3764 void dp_peer_rx_init(struct dp_pdev *pdev, struct dp_peer *peer)
3765 {
3766 	dp_peer_rx_tids_init(peer);
3767 
3768 	peer->active_ba_session_cnt = 0;
3769 	peer->hw_buffer_size = 0;
3770 	peer->kill_256_sessions = 0;
3771 
3772 	/* Setup default (non-qos) rx tid queue */
3773 	dp_rx_tid_setup_wifi3(peer, DP_NON_QOS_TID, 1, 0);
3774 
3775 	/* Setup rx tid queue for TID 0.
3776 	 * Other queues will be setup on receiving first packet, which will cause
3777 	 * NULL REO queue error
3778 	 */
3779 	dp_rx_tid_setup_wifi3(peer, 0, 1, 0);
3780 
3781 	/*
3782 	 * Setup the rest of TID's to handle LFR
3783 	 */
3784 	dp_peer_setup_remaining_tids(peer);
3785 
3786 	/*
3787 	 * Set security defaults: no PN check, no security. The target may
3788 	 * send a HTT SEC_IND message to overwrite these defaults.
3789 	 */
3790 	peer->security[dp_sec_ucast].sec_type =
3791 		peer->security[dp_sec_mcast].sec_type = cdp_sec_type_none;
3792 }
3793 
3794 /*
3795  * dp_peer_rx_cleanup() – Cleanup receive TID state
3796  * @vdev: Datapath vdev
3797  * @peer: Datapath peer
3798  *
3799  */
3800 void dp_peer_rx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
3801 {
3802 	int tid;
3803 	uint32_t tid_delete_mask = 0;
3804 
3805 	dp_info("Remove tids for peer: %pK", peer);
3806 	if (IS_MLO_DP_LINK_PEER(peer))
3807 		return;
3808 
3809 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
3810 		struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
3811 
3812 		qdf_spin_lock_bh(&rx_tid->tid_lock);
3813 		if (!peer->bss_peer || peer->vdev->opmode == wlan_op_mode_sta) {
3814 			/* Cleanup defrag related resource */
3815 			dp_rx_defrag_waitlist_remove(peer, tid);
3816 			dp_rx_reorder_flush_frag(peer, tid);
3817 		}
3818 
3819 		if (peer->rx_tid[tid].hw_qdesc_vaddr_unaligned) {
3820 			dp_rx_tid_delete_wifi3(peer, tid);
3821 
3822 			tid_delete_mask |= (1 << tid);
3823 		}
3824 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
3825 	}
3826 #ifdef notyet /* See if FW can remove queues as part of peer cleanup */
3827 	if (soc->ol_ops->peer_rx_reorder_queue_remove) {
3828 		soc->ol_ops->peer_rx_reorder_queue_remove(soc->ctrl_psoc,
3829 			peer->vdev->pdev->pdev_id,
3830 			peer->vdev->vdev_id, peer->mac_addr.raw,
3831 			tid_delete_mask);
3832 	}
3833 #endif
3834 }
3835 
3836 /*
3837  * dp_peer_cleanup() – Cleanup peer information
3838  * @vdev: Datapath vdev
3839  * @peer: Datapath peer
3840  *
3841  */
3842 void dp_peer_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
3843 {
3844 	enum wlan_op_mode vdev_opmode;
3845 	uint8_t vdev_mac_addr[QDF_MAC_ADDR_SIZE];
3846 	struct dp_pdev *pdev = vdev->pdev;
3847 	struct dp_soc *soc = pdev->soc;
3848 
3849 	/* save vdev related member in case vdev freed */
3850 	vdev_opmode = vdev->opmode;
3851 
3852 	dp_monitor_peer_tx_cleanup(vdev, peer);
3853 
3854 	if (vdev_opmode != wlan_op_mode_monitor)
3855 	/* cleanup the Rx reorder queues for this peer */
3856 		dp_peer_rx_cleanup(vdev, peer);
3857 
3858 	dp_peer_rx_tids_destroy(peer);
3859 
3860 	if (IS_MLO_DP_LINK_PEER(peer))
3861 		dp_link_peer_del_mld_peer(peer);
3862 	if (IS_MLO_DP_MLD_PEER(peer))
3863 		dp_mld_peer_deinit_link_peers_info(peer);
3864 
3865 	qdf_mem_copy(vdev_mac_addr, vdev->mac_addr.raw,
3866 		     QDF_MAC_ADDR_SIZE);
3867 
3868 	if (soc->cdp_soc.ol_ops->peer_unref_delete)
3869 		soc->cdp_soc.ol_ops->peer_unref_delete(
3870 				soc->ctrl_psoc,
3871 				vdev->pdev->pdev_id,
3872 				peer->mac_addr.raw, vdev_mac_addr,
3873 				vdev_opmode);
3874 }
3875 
3876 /* dp_teardown_256_ba_session() - Teardown sessions using 256
3877  *                                window size when a request with
3878  *                                64 window size is received.
3879  *                                This is done as a WAR since HW can
3880  *                                have only one setting per peer (64 or 256).
3881  *                                For HKv2, we use per tid buffersize setting
3882  *                                for 0 to per_tid_basize_max_tid. For tid
3883  *                                more than per_tid_basize_max_tid we use HKv1
3884  *                                method.
3885  * @peer: Datapath peer
3886  *
3887  * Return: void
3888  */
3889 static void dp_teardown_256_ba_sessions(struct dp_peer *peer)
3890 {
3891 	uint8_t delba_rcode = 0;
3892 	int tid;
3893 	struct dp_rx_tid *rx_tid = NULL;
3894 
3895 	tid = peer->vdev->pdev->soc->per_tid_basize_max_tid;
3896 	for (; tid < DP_MAX_TIDS; tid++) {
3897 		rx_tid = &peer->rx_tid[tid];
3898 		qdf_spin_lock_bh(&rx_tid->tid_lock);
3899 
3900 		if (rx_tid->ba_win_size <= 64) {
3901 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
3902 			continue;
3903 		} else {
3904 			if (rx_tid->ba_status == DP_RX_BA_ACTIVE ||
3905 			    rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
3906 				/* send delba */
3907 				if (!rx_tid->delba_tx_status) {
3908 					rx_tid->delba_tx_retry++;
3909 					rx_tid->delba_tx_status = 1;
3910 					rx_tid->delba_rcode =
3911 					IEEE80211_REASON_QOS_SETUP_REQUIRED;
3912 					delba_rcode = rx_tid->delba_rcode;
3913 
3914 					qdf_spin_unlock_bh(&rx_tid->tid_lock);
3915 					if (peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba)
3916 						peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba(
3917 							peer->vdev->pdev->soc->ctrl_psoc,
3918 							peer->vdev->vdev_id,
3919 							peer->mac_addr.raw,
3920 							tid, delba_rcode,
3921 							CDP_DELBA_REASON_NONE);
3922 				} else {
3923 					qdf_spin_unlock_bh(&rx_tid->tid_lock);
3924 				}
3925 			} else {
3926 				qdf_spin_unlock_bh(&rx_tid->tid_lock);
3927 			}
3928 		}
3929 	}
3930 }
3931 
3932 /*
3933 * dp_rx_addba_resp_tx_completion_wifi3() – Update Rx Tid State
3934 *
3935 * @soc: Datapath soc handle
3936 * @peer_mac: Datapath peer mac address
3937 * @vdev_id: id of atapath vdev
3938 * @tid: TID number
3939 * @status: tx completion status
3940 * Return: 0 on success, error code on failure
3941 */
3942 int dp_addba_resp_tx_completion_wifi3(struct cdp_soc_t *cdp_soc,
3943 				      uint8_t *peer_mac,
3944 				      uint16_t vdev_id,
3945 				      uint8_t tid, int status)
3946 {
3947 	struct dp_peer *peer = dp_peer_get_tgt_peer_hash_find(
3948 					(struct dp_soc *)cdp_soc,
3949 					peer_mac, 0, vdev_id,
3950 					DP_MOD_ID_CDP);
3951 	struct dp_rx_tid *rx_tid = NULL;
3952 
3953 	if (!peer) {
3954 		dp_peer_debug("%pK: Peer is NULL!\n", cdp_soc);
3955 		goto fail;
3956 	}
3957 	rx_tid = &peer->rx_tid[tid];
3958 	qdf_spin_lock_bh(&rx_tid->tid_lock);
3959 	if (status) {
3960 		rx_tid->num_addba_rsp_failed++;
3961 		dp_rx_tid_update_wifi3(peer, tid, 1,
3962 				       IEEE80211_SEQ_MAX, false);
3963 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
3964 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
3965 		dp_err("RxTid- %d addba rsp tx completion failed", tid);
3966 
3967 		goto success;
3968 	}
3969 
3970 	rx_tid->num_addba_rsp_success++;
3971 	if (rx_tid->ba_status == DP_RX_BA_INACTIVE) {
3972 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
3973 		dp_peer_err("%pK: Rx Tid- %d hw qdesc is not in IN_PROGRESS",
3974 			    cdp_soc, tid);
3975 		goto fail;
3976 	}
3977 
3978 	if (!qdf_atomic_read(&peer->is_default_route_set)) {
3979 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
3980 		dp_peer_debug("%pK: default route is not set for peer: " QDF_MAC_ADDR_FMT,
3981 			      cdp_soc, QDF_MAC_ADDR_REF(peer->mac_addr.raw));
3982 		goto fail;
3983 	}
3984 
3985 	if (dp_rx_tid_update_wifi3(peer, tid,
3986 				   rx_tid->ba_win_size,
3987 				   rx_tid->startseqnum,
3988 				   false)) {
3989 		dp_err("Failed update REO SSN");
3990 	}
3991 
3992 	dp_info("tid %u window_size %u start_seq_num %u",
3993 		tid, rx_tid->ba_win_size,
3994 		rx_tid->startseqnum);
3995 
3996 	/* First Session */
3997 	if (peer->active_ba_session_cnt == 0) {
3998 		if (rx_tid->ba_win_size > 64 && rx_tid->ba_win_size <= 256)
3999 			peer->hw_buffer_size = 256;
4000 		else
4001 			peer->hw_buffer_size = 64;
4002 	}
4003 
4004 	rx_tid->ba_status = DP_RX_BA_ACTIVE;
4005 
4006 	peer->active_ba_session_cnt++;
4007 
4008 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
4009 
4010 	/* Kill any session having 256 buffer size
4011 	 * when 64 buffer size request is received.
4012 	 * Also, latch on to 64 as new buffer size.
4013 	 */
4014 	if (peer->kill_256_sessions) {
4015 		dp_teardown_256_ba_sessions(peer);
4016 		peer->kill_256_sessions = 0;
4017 	}
4018 
4019 success:
4020 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4021 	return QDF_STATUS_SUCCESS;
4022 
4023 fail:
4024 	if (peer)
4025 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4026 
4027 	return QDF_STATUS_E_FAILURE;
4028 }
4029 
4030 /*
4031 * dp_rx_addba_responsesetup_wifi3() – Process ADDBA request from peer
4032 *
4033 * @soc: Datapath soc handle
4034 * @peer_mac: Datapath peer mac address
4035 * @vdev_id: id of atapath vdev
4036 * @tid: TID number
4037 * @dialogtoken: output dialogtoken
4038 * @statuscode: output dialogtoken
4039 * @buffersize: Output BA window size
4040 * @batimeout: Output BA timeout
4041 */
4042 QDF_STATUS
4043 dp_addba_responsesetup_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
4044 			     uint16_t vdev_id, uint8_t tid,
4045 			     uint8_t *dialogtoken, uint16_t *statuscode,
4046 			     uint16_t *buffersize, uint16_t *batimeout)
4047 {
4048 	struct dp_rx_tid *rx_tid = NULL;
4049 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4050 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
4051 						       peer_mac, 0, vdev_id,
4052 						       DP_MOD_ID_CDP);
4053 
4054 	if (!peer) {
4055 		dp_peer_debug("%pK: Peer is NULL!\n", cdp_soc);
4056 		return QDF_STATUS_E_FAILURE;
4057 	}
4058 	rx_tid = &peer->rx_tid[tid];
4059 	qdf_spin_lock_bh(&rx_tid->tid_lock);
4060 	rx_tid->num_of_addba_resp++;
4061 	/* setup ADDBA response parameters */
4062 	*dialogtoken = rx_tid->dialogtoken;
4063 	*statuscode = rx_tid->statuscode;
4064 	*buffersize = rx_tid->ba_win_size;
4065 	*batimeout  = 0;
4066 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
4067 
4068 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4069 
4070 	return status;
4071 }
4072 
4073 /* dp_check_ba_buffersize() - Check buffer size in request
4074  *                            and latch onto this size based on
4075  *                            size used in first active session.
4076  * @peer: Datapath peer
4077  * @tid: Tid
4078  * @buffersize: Block ack window size
4079  *
4080  * Return: void
4081  */
4082 static void dp_check_ba_buffersize(struct dp_peer *peer,
4083 				   uint16_t tid,
4084 				   uint16_t buffersize)
4085 {
4086 	struct dp_rx_tid *rx_tid = NULL;
4087 
4088 	rx_tid = &peer->rx_tid[tid];
4089 	if (peer->vdev->pdev->soc->per_tid_basize_max_tid &&
4090 	    tid < peer->vdev->pdev->soc->per_tid_basize_max_tid) {
4091 		rx_tid->ba_win_size = buffersize;
4092 		return;
4093 	} else {
4094 		if (peer->active_ba_session_cnt == 0) {
4095 			rx_tid->ba_win_size = buffersize;
4096 		} else {
4097 			if (peer->hw_buffer_size == 64) {
4098 				if (buffersize <= 64)
4099 					rx_tid->ba_win_size = buffersize;
4100 				else
4101 					rx_tid->ba_win_size = peer->hw_buffer_size;
4102 			} else if (peer->hw_buffer_size == 256) {
4103 				if (buffersize > 64) {
4104 					rx_tid->ba_win_size = buffersize;
4105 				} else {
4106 					rx_tid->ba_win_size = buffersize;
4107 					peer->hw_buffer_size = 64;
4108 					peer->kill_256_sessions = 1;
4109 				}
4110 			}
4111 		}
4112 	}
4113 }
4114 
4115 #define DP_RX_BA_SESSION_DISABLE  1
4116 
4117 /*
4118  * dp_addba_requestprocess_wifi3() - Process ADDBA request from peer
4119  *
4120  * @soc: Datapath soc handle
4121  * @peer_mac: Datapath peer mac address
4122  * @vdev_id: id of atapath vdev
4123  * @dialogtoken: dialogtoken from ADDBA frame
4124  * @tid: TID number
4125  * @batimeout: BA timeout
4126  * @buffersize: BA window size
4127  * @startseqnum: Start seq. number received in BA sequence control
4128  *
4129  * Return: 0 on success, error code on failure
4130  */
4131 int dp_addba_requestprocess_wifi3(struct cdp_soc_t *cdp_soc,
4132 				  uint8_t *peer_mac,
4133 				  uint16_t vdev_id,
4134 				  uint8_t dialogtoken,
4135 				  uint16_t tid, uint16_t batimeout,
4136 				  uint16_t buffersize,
4137 				  uint16_t startseqnum)
4138 {
4139 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4140 	struct dp_rx_tid *rx_tid = NULL;
4141 	struct dp_peer *peer;
4142 
4143 	peer = dp_peer_get_tgt_peer_hash_find((struct dp_soc *)cdp_soc,
4144 					      peer_mac,
4145 					      0, vdev_id,
4146 					      DP_MOD_ID_CDP);
4147 
4148 	if (!peer) {
4149 		dp_peer_debug("%pK: Peer is NULL!\n", cdp_soc);
4150 		return QDF_STATUS_E_FAILURE;
4151 	}
4152 	rx_tid = &peer->rx_tid[tid];
4153 	qdf_spin_lock_bh(&rx_tid->tid_lock);
4154 	rx_tid->num_of_addba_req++;
4155 	if ((rx_tid->ba_status == DP_RX_BA_ACTIVE &&
4156 	     rx_tid->hw_qdesc_vaddr_unaligned)) {
4157 		dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX, false);
4158 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
4159 		peer->active_ba_session_cnt--;
4160 		dp_peer_debug("%pK: Rx Tid- %d hw qdesc is already setup",
4161 			      cdp_soc, tid);
4162 	}
4163 
4164 	if (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
4165 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
4166 		status = QDF_STATUS_E_FAILURE;
4167 		goto fail;
4168 	}
4169 
4170 	if (rx_tid->rx_ba_win_size_override == DP_RX_BA_SESSION_DISABLE) {
4171 		dp_peer_info("%pK: disable BA session",
4172 			     cdp_soc);
4173 
4174 		buffersize = 1;
4175 	} else if (rx_tid->rx_ba_win_size_override) {
4176 		dp_peer_info("%pK: override BA win to %d", cdp_soc,
4177 			     rx_tid->rx_ba_win_size_override);
4178 
4179 		buffersize = rx_tid->rx_ba_win_size_override;
4180 	} else {
4181 		dp_peer_info("%pK: restore BA win %d based on addba req", cdp_soc,
4182 			     buffersize);
4183 	}
4184 
4185 	dp_check_ba_buffersize(peer, tid, buffersize);
4186 
4187 	if (dp_rx_tid_setup_wifi3(peer, tid,
4188 	    rx_tid->ba_win_size, startseqnum)) {
4189 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
4190 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
4191 		status = QDF_STATUS_E_FAILURE;
4192 		goto fail;
4193 	}
4194 	rx_tid->ba_status = DP_RX_BA_IN_PROGRESS;
4195 
4196 	rx_tid->dialogtoken = dialogtoken;
4197 	rx_tid->startseqnum = startseqnum;
4198 
4199 	if (rx_tid->userstatuscode != IEEE80211_STATUS_SUCCESS)
4200 		rx_tid->statuscode = rx_tid->userstatuscode;
4201 	else
4202 		rx_tid->statuscode = IEEE80211_STATUS_SUCCESS;
4203 
4204 	if (rx_tid->rx_ba_win_size_override == DP_RX_BA_SESSION_DISABLE)
4205 		rx_tid->statuscode = IEEE80211_STATUS_REFUSED;
4206 
4207 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
4208 
4209 fail:
4210 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4211 
4212 	return status;
4213 }
4214 
4215 /*
4216 * dp_set_addba_response() – Set a user defined ADDBA response status code
4217 *
4218 * @soc: Datapath soc handle
4219 * @peer_mac: Datapath peer mac address
4220 * @vdev_id: id of atapath vdev
4221 * @tid: TID number
4222 * @statuscode: response status code to be set
4223 */
4224 QDF_STATUS
4225 dp_set_addba_response(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
4226 		      uint16_t vdev_id, uint8_t tid, uint16_t statuscode)
4227 {
4228 	struct dp_peer *peer = dp_peer_get_tgt_peer_hash_find(
4229 					(struct dp_soc *)cdp_soc,
4230 					peer_mac, 0, vdev_id,
4231 					DP_MOD_ID_CDP);
4232 	struct dp_rx_tid *rx_tid;
4233 
4234 	if (!peer) {
4235 		dp_peer_debug("%pK: Peer is NULL!\n", cdp_soc);
4236 		return QDF_STATUS_E_FAILURE;
4237 	}
4238 
4239 	rx_tid = &peer->rx_tid[tid];
4240 	qdf_spin_lock_bh(&rx_tid->tid_lock);
4241 	rx_tid->userstatuscode = statuscode;
4242 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
4243 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4244 
4245 	return QDF_STATUS_SUCCESS;
4246 }
4247 
4248 /*
4249 * dp_rx_delba_process_wifi3() – Process DELBA from peer
4250 * @soc: Datapath soc handle
4251 * @peer_mac: Datapath peer mac address
4252 * @vdev_id: id of atapath vdev
4253 * @tid: TID number
4254 * @reasoncode: Reason code received in DELBA frame
4255 *
4256 * Return: 0 on success, error code on failure
4257 */
4258 int dp_delba_process_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
4259 			   uint16_t vdev_id, int tid, uint16_t reasoncode)
4260 {
4261 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4262 	struct dp_rx_tid *rx_tid;
4263 	struct dp_peer *peer = dp_peer_get_tgt_peer_hash_find(
4264 					(struct dp_soc *)cdp_soc,
4265 					peer_mac, 0, vdev_id,
4266 					DP_MOD_ID_CDP);
4267 
4268 	if (!peer) {
4269 		dp_peer_debug("%pK: Peer is NULL!\n", cdp_soc);
4270 		return QDF_STATUS_E_FAILURE;
4271 	}
4272 	rx_tid = &peer->rx_tid[tid];
4273 	qdf_spin_lock_bh(&rx_tid->tid_lock);
4274 	if (rx_tid->ba_status == DP_RX_BA_INACTIVE ||
4275 	    rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
4276 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
4277 		status = QDF_STATUS_E_FAILURE;
4278 		goto fail;
4279 	}
4280 	/* TODO: See if we can delete the existing REO queue descriptor and
4281 	 * replace with a new one without queue extenstion descript to save
4282 	 * memory
4283 	 */
4284 	rx_tid->delba_rcode = reasoncode;
4285 	rx_tid->num_of_delba_req++;
4286 	dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX, false);
4287 
4288 	rx_tid->ba_status = DP_RX_BA_INACTIVE;
4289 	peer->active_ba_session_cnt--;
4290 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
4291 fail:
4292 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4293 
4294 	return status;
4295 }
4296 
4297 /*
4298  * dp_rx_delba_tx_completion_wifi3() – Send Delba Request
4299  *
4300  * @soc: Datapath soc handle
4301  * @peer_mac: Datapath peer mac address
4302  * @vdev_id: id of atapath vdev
4303  * @tid: TID number
4304  * @status: tx completion status
4305  * Return: 0 on success, error code on failure
4306  */
4307 
4308 int dp_delba_tx_completion_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
4309 				 uint16_t vdev_id,
4310 				 uint8_t tid, int status)
4311 {
4312 	QDF_STATUS ret = QDF_STATUS_SUCCESS;
4313 	struct dp_rx_tid *rx_tid = NULL;
4314 	struct dp_peer *peer = dp_peer_get_tgt_peer_hash_find(
4315 					(struct dp_soc *)cdp_soc,
4316 					peer_mac, 0, vdev_id,
4317 					DP_MOD_ID_CDP);
4318 
4319 	if (!peer) {
4320 		dp_peer_debug("%pK: Peer is NULL!", cdp_soc);
4321 		return QDF_STATUS_E_FAILURE;
4322 	}
4323 	rx_tid = &peer->rx_tid[tid];
4324 	qdf_spin_lock_bh(&rx_tid->tid_lock);
4325 	if (status) {
4326 		rx_tid->delba_tx_fail_cnt++;
4327 		if (rx_tid->delba_tx_retry >= DP_MAX_DELBA_RETRY) {
4328 			rx_tid->delba_tx_retry = 0;
4329 			rx_tid->delba_tx_status = 0;
4330 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
4331 		} else {
4332 			rx_tid->delba_tx_retry++;
4333 			rx_tid->delba_tx_status = 1;
4334 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
4335 			if (peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba)
4336 				peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba(
4337 					peer->vdev->pdev->soc->ctrl_psoc,
4338 					peer->vdev->vdev_id,
4339 					peer->mac_addr.raw, tid,
4340 					rx_tid->delba_rcode,
4341 					CDP_DELBA_REASON_NONE);
4342 		}
4343 		goto end;
4344 	} else {
4345 		rx_tid->delba_tx_success_cnt++;
4346 		rx_tid->delba_tx_retry = 0;
4347 		rx_tid->delba_tx_status = 0;
4348 	}
4349 	if (rx_tid->ba_status == DP_RX_BA_ACTIVE) {
4350 		dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX, false);
4351 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
4352 		peer->active_ba_session_cnt--;
4353 	}
4354 	if (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
4355 		dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX, false);
4356 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
4357 	}
4358 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
4359 
4360 end:
4361 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4362 
4363 	return ret;
4364 }
4365 
4366 /**
4367  * dp_set_pn_check_wifi3() - enable PN check in REO for security
4368  * @soc: Datapath soc handle
4369  * @peer_mac: Datapath peer mac address
4370  * @vdev_id: id of atapath vdev
4371  * @vdev: Datapath vdev
4372  * @pdev - data path device instance
4373  * @sec_type - security type
4374  * @rx_pn - Receive pn starting number
4375  *
4376  */
4377 
4378 QDF_STATUS
4379 dp_set_pn_check_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id,
4380 		      uint8_t *peer_mac, enum cdp_sec_type sec_type,
4381 		      uint32_t *rx_pn)
4382 {
4383 	struct dp_pdev *pdev;
4384 	int i;
4385 	uint8_t pn_size;
4386 	struct hal_reo_cmd_params params;
4387 	struct dp_peer *peer = NULL;
4388 	struct dp_vdev *vdev = NULL;
4389 
4390 	peer = dp_peer_find_hash_find((struct dp_soc *)soc,
4391 				      peer_mac, 0, vdev_id,
4392 				      DP_MOD_ID_CDP);
4393 
4394 	if (!peer) {
4395 		dp_peer_debug("%pK: Peer is NULL!\n", soc);
4396 		return QDF_STATUS_E_FAILURE;
4397 	}
4398 
4399 	vdev = peer->vdev;
4400 
4401 	if (!vdev) {
4402 		dp_peer_debug("%pK: VDEV is NULL!\n", soc);
4403 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4404 		return QDF_STATUS_E_FAILURE;
4405 	}
4406 
4407 	pdev = vdev->pdev;
4408 	qdf_mem_zero(&params, sizeof(params));
4409 
4410 	params.std.need_status = 1;
4411 	params.u.upd_queue_params.update_pn_valid = 1;
4412 	params.u.upd_queue_params.update_pn_size = 1;
4413 	params.u.upd_queue_params.update_pn = 1;
4414 	params.u.upd_queue_params.update_pn_check_needed = 1;
4415 	params.u.upd_queue_params.update_svld = 1;
4416 	params.u.upd_queue_params.svld = 0;
4417 
4418 	switch (sec_type) {
4419 	case cdp_sec_type_tkip_nomic:
4420 	case cdp_sec_type_aes_ccmp:
4421 	case cdp_sec_type_aes_ccmp_256:
4422 	case cdp_sec_type_aes_gcmp:
4423 	case cdp_sec_type_aes_gcmp_256:
4424 		params.u.upd_queue_params.pn_check_needed = 1;
4425 		params.u.upd_queue_params.pn_size = 48;
4426 		pn_size = 48;
4427 		break;
4428 	case cdp_sec_type_wapi:
4429 		params.u.upd_queue_params.pn_check_needed = 1;
4430 		params.u.upd_queue_params.pn_size = 128;
4431 		pn_size = 128;
4432 		if (vdev->opmode == wlan_op_mode_ap) {
4433 			params.u.upd_queue_params.pn_even = 1;
4434 			params.u.upd_queue_params.update_pn_even = 1;
4435 		} else {
4436 			params.u.upd_queue_params.pn_uneven = 1;
4437 			params.u.upd_queue_params.update_pn_uneven = 1;
4438 		}
4439 		break;
4440 	default:
4441 		params.u.upd_queue_params.pn_check_needed = 0;
4442 		pn_size = 0;
4443 		break;
4444 	}
4445 
4446 
4447 	for (i = 0; i < DP_MAX_TIDS; i++) {
4448 		struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
4449 		qdf_spin_lock_bh(&rx_tid->tid_lock);
4450 		if (rx_tid->hw_qdesc_vaddr_unaligned) {
4451 			params.std.addr_lo =
4452 				rx_tid->hw_qdesc_paddr & 0xffffffff;
4453 			params.std.addr_hi =
4454 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
4455 
4456 			if (pn_size) {
4457 				dp_peer_info("%pK: PN set for TID:%d pn:%x:%x:%x:%x",
4458 					     soc, i, rx_pn[3], rx_pn[2],
4459 					     rx_pn[1], rx_pn[0]);
4460 				params.u.upd_queue_params.update_pn_valid = 1;
4461 				params.u.upd_queue_params.pn_31_0 = rx_pn[0];
4462 				params.u.upd_queue_params.pn_63_32 = rx_pn[1];
4463 				params.u.upd_queue_params.pn_95_64 = rx_pn[2];
4464 				params.u.upd_queue_params.pn_127_96 = rx_pn[3];
4465 			}
4466 			rx_tid->pn_size = pn_size;
4467 			if (dp_reo_send_cmd(cdp_soc_t_to_dp_soc(soc),
4468 					    CMD_UPDATE_RX_REO_QUEUE,
4469 					    &params, dp_rx_tid_update_cb,
4470 					    rx_tid)) {
4471 				dp_err_log("fail to send CMD_UPDATE_RX_REO_QUEUE"
4472 					   "tid %d desc %pK", rx_tid->tid,
4473 					   (void *)(rx_tid->hw_qdesc_paddr));
4474 				DP_STATS_INC(cdp_soc_t_to_dp_soc(soc),
4475 					     rx.err.reo_cmd_send_fail, 1);
4476 			}
4477 		} else {
4478 			dp_peer_info("%pK: PN Check not setup for TID :%d ", soc, i);
4479 		}
4480 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
4481 	}
4482 
4483 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4484 
4485 	return QDF_STATUS_SUCCESS;
4486 }
4487 
4488 
4489 /**
4490  * dp_set_key_sec_type_wifi3() - set security mode of key
4491  * @soc: Datapath soc handle
4492  * @peer_mac: Datapath peer mac address
4493  * @vdev_id: id of atapath vdev
4494  * @vdev: Datapath vdev
4495  * @pdev - data path device instance
4496  * @sec_type - security type
4497  * #is_unicast - key type
4498  *
4499  */
4500 
4501 QDF_STATUS
4502 dp_set_key_sec_type_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id,
4503 			  uint8_t *peer_mac, enum cdp_sec_type sec_type,
4504 			  bool is_unicast)
4505 {
4506 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
4507 						       peer_mac, 0, vdev_id,
4508 						       DP_MOD_ID_CDP);
4509 	int sec_index;
4510 
4511 	if (!peer) {
4512 		dp_peer_debug("%pK: Peer is NULL!\n", soc);
4513 		return QDF_STATUS_E_FAILURE;
4514 	}
4515 
4516 	dp_peer_info("%pK: key sec spec for peer %pK " QDF_MAC_ADDR_FMT ": %s key of type %d",
4517 		     soc, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw),
4518 		     is_unicast ? "ucast" : "mcast", sec_type);
4519 
4520 	sec_index = is_unicast ? dp_sec_ucast : dp_sec_mcast;
4521 	peer->security[sec_index].sec_type = sec_type;
4522 
4523 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4524 
4525 	return QDF_STATUS_SUCCESS;
4526 }
4527 
4528 void
4529 dp_rx_sec_ind_handler(struct dp_soc *soc, uint16_t peer_id,
4530 		      enum cdp_sec_type sec_type, int is_unicast,
4531 		      u_int32_t *michael_key,
4532 		      u_int32_t *rx_pn)
4533 {
4534 	struct dp_peer *peer;
4535 	int sec_index;
4536 
4537 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_HTT);
4538 	if (!peer) {
4539 		dp_peer_err("Couldn't find peer from ID %d - skipping security inits",
4540 			    peer_id);
4541 		return;
4542 	}
4543 	dp_peer_info("%pK: sec spec for peer %pK " QDF_MAC_ADDR_FMT ": %s key of type %d",
4544 		     soc, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw),
4545 			  is_unicast ? "ucast" : "mcast", sec_type);
4546 	sec_index = is_unicast ? dp_sec_ucast : dp_sec_mcast;
4547 	peer->security[sec_index].sec_type = sec_type;
4548 #ifdef notyet /* TODO: See if this is required for defrag support */
4549 	/* michael key only valid for TKIP, but for simplicity,
4550 	 * copy it anyway
4551 	 */
4552 	qdf_mem_copy(
4553 		&peer->security[sec_index].michael_key[0],
4554 		michael_key,
4555 		sizeof(peer->security[sec_index].michael_key));
4556 #ifdef BIG_ENDIAN_HOST
4557 	OL_IF_SWAPBO(peer->security[sec_index].michael_key[0],
4558 				 sizeof(peer->security[sec_index].michael_key));
4559 #endif /* BIG_ENDIAN_HOST */
4560 #endif
4561 
4562 #ifdef notyet /* TODO: Check if this is required for wifi3.0 */
4563 	if (sec_type != cdp_sec_type_wapi) {
4564 		qdf_mem_zero(peer->tids_last_pn_valid, _EXT_TIDS);
4565 	} else {
4566 		for (i = 0; i < DP_MAX_TIDS; i++) {
4567 			/*
4568 			 * Setting PN valid bit for WAPI sec_type,
4569 			 * since WAPI PN has to be started with predefined value
4570 			 */
4571 			peer->tids_last_pn_valid[i] = 1;
4572 			qdf_mem_copy(
4573 				(u_int8_t *) &peer->tids_last_pn[i],
4574 				(u_int8_t *) rx_pn, sizeof(union htt_rx_pn_t));
4575 			peer->tids_last_pn[i].pn128[1] =
4576 				qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[1]);
4577 			peer->tids_last_pn[i].pn128[0] =
4578 				qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[0]);
4579 		}
4580 	}
4581 #endif
4582 	/* TODO: Update HW TID queue with PN check parameters (pn type for
4583 	 * all security types and last pn for WAPI) once REO command API
4584 	 * is available
4585 	 */
4586 
4587 	dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
4588 }
4589 
4590 #ifdef QCA_PEER_EXT_STATS
4591 /*
4592  * dp_peer_ext_stats_ctx_alloc() - Allocate peer ext
4593  *                                 stats content
4594  * @soc: DP SoC context
4595  * @peer: DP peer context
4596  *
4597  * Allocate the peer extended stats context
4598  *
4599  * Return: QDF_STATUS_SUCCESS if allocation is
4600  *	   successful
4601  */
4602 QDF_STATUS dp_peer_ext_stats_ctx_alloc(struct dp_soc *soc,
4603 				       struct dp_peer *peer)
4604 {
4605 	uint8_t tid, ctx_id;
4606 
4607 	if (!soc || !peer) {
4608 		dp_warn("Null soc%pK or peer%pK", soc, peer);
4609 		return QDF_STATUS_E_INVAL;
4610 	}
4611 
4612 	if (!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx))
4613 		return QDF_STATUS_SUCCESS;
4614 
4615 	/*
4616 	 * Allocate memory for peer extended stats.
4617 	 */
4618 	peer->pext_stats = qdf_mem_malloc(sizeof(struct cdp_peer_ext_stats));
4619 	if (!peer->pext_stats) {
4620 		dp_err("Peer extended stats obj alloc failed!!");
4621 		return QDF_STATUS_E_NOMEM;
4622 	}
4623 
4624 	for (tid = 0; tid < CDP_MAX_DATA_TIDS; tid++) {
4625 		for (ctx_id = 0; ctx_id < CDP_MAX_TXRX_CTX; ctx_id++) {
4626 			struct cdp_delay_tx_stats *tx_delay =
4627 			&peer->pext_stats->delay_stats[tid][ctx_id].tx_delay;
4628 			struct cdp_delay_rx_stats *rx_delay =
4629 			&peer->pext_stats->delay_stats[tid][ctx_id].rx_delay;
4630 
4631 			dp_hist_init(&tx_delay->tx_swq_delay,
4632 				     CDP_HIST_TYPE_SW_ENQEUE_DELAY);
4633 			dp_hist_init(&tx_delay->hwtx_delay,
4634 				     CDP_HIST_TYPE_HW_COMP_DELAY);
4635 			dp_hist_init(&rx_delay->to_stack_delay,
4636 				     CDP_HIST_TYPE_REAP_STACK);
4637 		}
4638 	}
4639 
4640 	return QDF_STATUS_SUCCESS;
4641 }
4642 
4643 /*
4644  * dp_peer_ext_stats_ctx_dealloc() - Dealloc the peer context
4645  * @peer: DP peer context
4646  *
4647  * Free the peer extended stats context
4648  *
4649  * Return: Void
4650  */
4651 void dp_peer_ext_stats_ctx_dealloc(struct dp_soc *soc, struct dp_peer *peer)
4652 {
4653 	if (!peer) {
4654 		dp_warn("peer_ext dealloc failed due to NULL peer object");
4655 		return;
4656 	}
4657 
4658 	if (!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx))
4659 		return;
4660 
4661 	if (!peer->pext_stats)
4662 		return;
4663 
4664 	qdf_mem_free(peer->pext_stats);
4665 	peer->pext_stats = NULL;
4666 }
4667 #endif
4668 
4669 QDF_STATUS
4670 dp_rx_delba_ind_handler(void *soc_handle, uint16_t peer_id,
4671 			uint8_t tid, uint16_t win_sz)
4672 {
4673 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
4674 	struct dp_peer *peer;
4675 	struct dp_rx_tid *rx_tid;
4676 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4677 
4678 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_HTT);
4679 
4680 	if (!peer) {
4681 		dp_peer_err("%pK: Couldn't find peer from ID %d",
4682 			    soc, peer_id);
4683 		return QDF_STATUS_E_FAILURE;
4684 	}
4685 
4686 	qdf_assert_always(tid < DP_MAX_TIDS);
4687 
4688 	rx_tid = &peer->rx_tid[tid];
4689 
4690 	if (rx_tid->hw_qdesc_vaddr_unaligned) {
4691 		if (!rx_tid->delba_tx_status) {
4692 			dp_peer_info("%pK: PEER_ID: %d TID: %d, BA win: %d ",
4693 				     soc, peer_id, tid, win_sz);
4694 
4695 			qdf_spin_lock_bh(&rx_tid->tid_lock);
4696 
4697 			rx_tid->delba_tx_status = 1;
4698 
4699 			rx_tid->rx_ba_win_size_override =
4700 			    qdf_min((uint16_t)63, win_sz);
4701 
4702 			rx_tid->delba_rcode =
4703 			    IEEE80211_REASON_QOS_SETUP_REQUIRED;
4704 
4705 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
4706 
4707 			if (soc->cdp_soc.ol_ops->send_delba)
4708 				soc->cdp_soc.ol_ops->send_delba(
4709 					peer->vdev->pdev->soc->ctrl_psoc,
4710 					peer->vdev->vdev_id,
4711 					peer->mac_addr.raw,
4712 					tid,
4713 					rx_tid->delba_rcode,
4714 					CDP_DELBA_REASON_NONE);
4715 		}
4716 	} else {
4717 		dp_peer_err("%pK: BA session is not setup for TID:%d ", soc, tid);
4718 		status = QDF_STATUS_E_FAILURE;
4719 	}
4720 
4721 	dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
4722 
4723 	return status;
4724 }
4725 
4726 #ifdef DP_PEER_EXTENDED_API
4727 #ifdef WLAN_FEATURE_11BE_MLO
4728 QDF_STATUS dp_register_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
4729 			    struct ol_txrx_desc_type *sta_desc)
4730 {
4731 	struct dp_peer *peer;
4732 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4733 
4734 	peer = dp_peer_find_hash_find(soc, sta_desc->peer_addr.bytes,
4735 				      0, DP_VDEV_ALL, DP_MOD_ID_CDP);
4736 
4737 	if (!peer)
4738 		return QDF_STATUS_E_FAULT;
4739 
4740 	qdf_spin_lock_bh(&peer->peer_info_lock);
4741 	peer->state = OL_TXRX_PEER_STATE_CONN;
4742 	qdf_spin_unlock_bh(&peer->peer_info_lock);
4743 
4744 	/* For MLO connection, no RX packet to link peer */
4745 	if (!IS_MLO_DP_LINK_PEER(peer))
4746 		dp_rx_flush_rx_cached(peer, false);
4747 
4748 	if (IS_MLO_DP_LINK_PEER(peer) && peer->assoc_link) {
4749 		dp_peer_info("register for mld peer" QDF_MAC_ADDR_FMT,
4750 			     QDF_MAC_ADDR_REF(peer->mld_peer->mac_addr.raw));
4751 		qdf_spin_lock_bh(&peer->mld_peer->peer_info_lock);
4752 		peer->mld_peer->state = peer->state;
4753 		qdf_spin_unlock_bh(&peer->mld_peer->peer_info_lock);
4754 		dp_rx_flush_rx_cached(peer->mld_peer, false);
4755 	}
4756 
4757 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4758 
4759 	return QDF_STATUS_SUCCESS;
4760 }
4761 
4762 QDF_STATUS dp_peer_state_update(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
4763 				enum ol_txrx_peer_state state)
4764 {
4765 	struct dp_peer *peer;
4766 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4767 
4768 	peer =  dp_peer_find_hash_find(soc, peer_mac, 0, DP_VDEV_ALL,
4769 				       DP_MOD_ID_CDP);
4770 	if (!peer) {
4771 		dp_peer_err("%pK: Failed to find peer[" QDF_MAC_ADDR_FMT "]",
4772 			    soc, QDF_MAC_ADDR_REF(peer_mac));
4773 		return QDF_STATUS_E_FAILURE;
4774 	}
4775 	peer->state = state;
4776 	peer->authorize = (state == OL_TXRX_PEER_STATE_AUTH) ? 1 : 0;
4777 
4778 	dp_peer_info("peer" QDF_MAC_ADDR_FMT "state %d",
4779 		     QDF_MAC_ADDR_REF(peer->mac_addr.raw),
4780 		     peer->state);
4781 
4782 	if (IS_MLO_DP_LINK_PEER(peer) && peer->assoc_link) {
4783 		peer->mld_peer->state = peer->state;
4784 		peer->mld_peer->authorize = peer->authorize;
4785 		dp_peer_info("mld peer" QDF_MAC_ADDR_FMT "state %d",
4786 			     QDF_MAC_ADDR_REF(peer->mld_peer->mac_addr.raw),
4787 			     peer->mld_peer->state);
4788 	}
4789 
4790 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
4791 	 * Decrement it here.
4792 	 */
4793 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4794 
4795 	return QDF_STATUS_SUCCESS;
4796 }
4797 #else
4798 QDF_STATUS dp_register_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
4799 			    struct ol_txrx_desc_type *sta_desc)
4800 {
4801 	struct dp_peer *peer;
4802 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4803 
4804 	peer = dp_peer_find_hash_find(soc, sta_desc->peer_addr.bytes,
4805 				      0, DP_VDEV_ALL, DP_MOD_ID_CDP);
4806 
4807 	if (!peer)
4808 		return QDF_STATUS_E_FAULT;
4809 
4810 	qdf_spin_lock_bh(&peer->peer_info_lock);
4811 	peer->state = OL_TXRX_PEER_STATE_CONN;
4812 	qdf_spin_unlock_bh(&peer->peer_info_lock);
4813 
4814 	dp_rx_flush_rx_cached(peer, false);
4815 
4816 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4817 
4818 	return QDF_STATUS_SUCCESS;
4819 }
4820 
4821 QDF_STATUS dp_peer_state_update(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
4822 				enum ol_txrx_peer_state state)
4823 {
4824 	struct dp_peer *peer;
4825 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4826 
4827 	peer =  dp_peer_find_hash_find(soc, peer_mac, 0, DP_VDEV_ALL,
4828 				       DP_MOD_ID_CDP);
4829 	if (!peer) {
4830 		dp_peer_err("%pK: Failed to find peer for: [" QDF_MAC_ADDR_FMT "]",
4831 			    soc, QDF_MAC_ADDR_REF(peer_mac));
4832 		return QDF_STATUS_E_FAILURE;
4833 	}
4834 	peer->state = state;
4835 	peer->authorize = (state == OL_TXRX_PEER_STATE_AUTH) ? 1 : 0;
4836 
4837 	dp_info("peer %pK state %d", peer, peer->state);
4838 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
4839 	 * Decrement it here.
4840 	 */
4841 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4842 
4843 	return QDF_STATUS_SUCCESS;
4844 }
4845 #endif
4846 
4847 QDF_STATUS
4848 dp_clear_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
4849 	      struct qdf_mac_addr peer_addr)
4850 {
4851 	struct dp_peer *peer;
4852 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4853 
4854 	peer = dp_peer_find_hash_find(soc, peer_addr.bytes,
4855 				      0, DP_VDEV_ALL, DP_MOD_ID_CDP);
4856 	if (!peer || !peer->valid)
4857 		return QDF_STATUS_E_FAULT;
4858 
4859 	dp_clear_peer_internal(soc, peer);
4860 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4861 	return QDF_STATUS_SUCCESS;
4862 }
4863 
4864 QDF_STATUS dp_get_vdevid(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
4865 			 uint8_t *vdev_id)
4866 {
4867 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4868 	struct dp_peer *peer =
4869 		dp_peer_find_hash_find(soc, peer_mac, 0, DP_VDEV_ALL,
4870 				       DP_MOD_ID_CDP);
4871 
4872 	if (!peer)
4873 		return QDF_STATUS_E_FAILURE;
4874 
4875 	dp_info("peer %pK vdev %pK vdev id %d",
4876 		peer, peer->vdev, peer->vdev->vdev_id);
4877 	*vdev_id = peer->vdev->vdev_id;
4878 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
4879 	 * Decrement it here.
4880 	 */
4881 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4882 
4883 	return QDF_STATUS_SUCCESS;
4884 }
4885 
4886 struct cdp_vdev *
4887 dp_get_vdev_by_peer_addr(struct cdp_pdev *pdev_handle,
4888 			 struct qdf_mac_addr peer_addr)
4889 {
4890 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4891 	struct dp_peer *peer = NULL;
4892 	struct cdp_vdev *vdev = NULL;
4893 
4894 	if (!pdev) {
4895 		dp_peer_info("PDEV not found for peer_addr: " QDF_MAC_ADDR_FMT,
4896 			     QDF_MAC_ADDR_REF(peer_addr.bytes));
4897 		return NULL;
4898 	}
4899 
4900 	peer = dp_peer_find_hash_find(pdev->soc, peer_addr.bytes, 0,
4901 				      DP_VDEV_ALL, DP_MOD_ID_CDP);
4902 	if (!peer) {
4903 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
4904 			  "PDEV not found for peer_addr: "QDF_MAC_ADDR_FMT,
4905 			  QDF_MAC_ADDR_REF(peer_addr.bytes));
4906 		return NULL;
4907 	}
4908 
4909 	vdev = (struct cdp_vdev *)peer->vdev;
4910 
4911 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4912 	return vdev;
4913 }
4914 
4915 /**
4916  * dp_get_vdev_for_peer() - Get virtual interface instance which peer belongs
4917  * @peer - peer instance
4918  *
4919  * Get virtual interface instance which peer belongs
4920  *
4921  * Return: virtual interface instance pointer
4922  *         NULL in case cannot find
4923  */
4924 struct cdp_vdev *dp_get_vdev_for_peer(void *peer_handle)
4925 {
4926 	struct dp_peer *peer = peer_handle;
4927 
4928 	DP_TRACE(DEBUG, "peer %pK vdev %pK", peer, peer->vdev);
4929 	return (struct cdp_vdev *)peer->vdev;
4930 }
4931 
4932 /**
4933  * dp_peer_get_peer_mac_addr() - Get peer mac address
4934  * @peer - peer instance
4935  *
4936  * Get peer mac address
4937  *
4938  * Return: peer mac address pointer
4939  *         NULL in case cannot find
4940  */
4941 uint8_t *dp_peer_get_peer_mac_addr(void *peer_handle)
4942 {
4943 	struct dp_peer *peer = peer_handle;
4944 	uint8_t *mac;
4945 
4946 	mac = peer->mac_addr.raw;
4947 	dp_info("peer %pK mac 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
4948 		peer, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
4949 	return peer->mac_addr.raw;
4950 }
4951 
4952 int dp_get_peer_state(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
4953 		      uint8_t *peer_mac)
4954 {
4955 	enum ol_txrx_peer_state peer_state;
4956 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4957 	struct dp_peer *peer =  dp_peer_find_hash_find(soc, peer_mac, 0,
4958 						       vdev_id, DP_MOD_ID_CDP);
4959 
4960 	if (!peer)
4961 		return OL_TXRX_PEER_STATE_INVALID;
4962 
4963 	DP_TRACE(DEBUG, "peer %pK stats %d", peer, peer->state);
4964 	peer_state = peer->state;
4965 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4966 
4967 	return peer_state;
4968 }
4969 
4970 /**
4971  * dp_local_peer_id_pool_init() - local peer id pool alloc for physical device
4972  * @pdev - data path device instance
4973  *
4974  * local peer id pool alloc for physical device
4975  *
4976  * Return: none
4977  */
4978 void dp_local_peer_id_pool_init(struct dp_pdev *pdev)
4979 {
4980 	int i;
4981 
4982 	/* point the freelist to the first ID */
4983 	pdev->local_peer_ids.freelist = 0;
4984 
4985 	/* link each ID to the next one */
4986 	for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) {
4987 		pdev->local_peer_ids.pool[i] = i + 1;
4988 		pdev->local_peer_ids.map[i] = NULL;
4989 	}
4990 
4991 	/* link the last ID to itself, to mark the end of the list */
4992 	i = OL_TXRX_NUM_LOCAL_PEER_IDS;
4993 	pdev->local_peer_ids.pool[i] = i;
4994 
4995 	qdf_spinlock_create(&pdev->local_peer_ids.lock);
4996 	DP_TRACE(INFO, "Peer pool init");
4997 }
4998 
4999 /**
5000  * dp_local_peer_id_alloc() - allocate local peer id
5001  * @pdev - data path device instance
5002  * @peer - new peer instance
5003  *
5004  * allocate local peer id
5005  *
5006  * Return: none
5007  */
5008 void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer)
5009 {
5010 	int i;
5011 
5012 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
5013 	i = pdev->local_peer_ids.freelist;
5014 	if (pdev->local_peer_ids.pool[i] == i) {
5015 		/* the list is empty, except for the list-end marker */
5016 		peer->local_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
5017 	} else {
5018 		/* take the head ID and advance the freelist */
5019 		peer->local_id = i;
5020 		pdev->local_peer_ids.freelist = pdev->local_peer_ids.pool[i];
5021 		pdev->local_peer_ids.map[i] = peer;
5022 	}
5023 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
5024 	dp_info("peer %pK, local id %d", peer, peer->local_id);
5025 }
5026 
5027 /**
5028  * dp_local_peer_id_free() - remove local peer id
5029  * @pdev - data path device instance
5030  * @peer - peer instance should be removed
5031  *
5032  * remove local peer id
5033  *
5034  * Return: none
5035  */
5036 void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer)
5037 {
5038 	int i = peer->local_id;
5039 	if ((i == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
5040 	    (i >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
5041 		return;
5042 	}
5043 
5044 	/* put this ID on the head of the freelist */
5045 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
5046 	pdev->local_peer_ids.pool[i] = pdev->local_peer_ids.freelist;
5047 	pdev->local_peer_ids.freelist = i;
5048 	pdev->local_peer_ids.map[i] = NULL;
5049 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
5050 }
5051 
5052 bool dp_find_peer_exist_on_vdev(struct cdp_soc_t *soc_hdl,
5053 				uint8_t vdev_id, uint8_t *peer_addr)
5054 {
5055 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5056 	struct dp_peer *peer = NULL;
5057 
5058 	peer = dp_peer_find_hash_find(soc, peer_addr, 0, vdev_id,
5059 				      DP_MOD_ID_CDP);
5060 	if (!peer)
5061 		return false;
5062 
5063 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5064 
5065 	return true;
5066 }
5067 
5068 bool dp_find_peer_exist_on_other_vdev(struct cdp_soc_t *soc_hdl,
5069 				      uint8_t vdev_id, uint8_t *peer_addr,
5070 				      uint16_t max_bssid)
5071 {
5072 	int i;
5073 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5074 	struct dp_peer *peer = NULL;
5075 
5076 	for (i = 0; i < max_bssid; i++) {
5077 		/* Need to check vdevs other than the vdev_id */
5078 		if (vdev_id == i)
5079 			continue;
5080 		peer = dp_peer_find_hash_find(soc, peer_addr, 0, i,
5081 					      DP_MOD_ID_CDP);
5082 		if (peer) {
5083 			dp_err("Duplicate peer "QDF_MAC_ADDR_FMT" already exist on vdev %d",
5084 			       QDF_MAC_ADDR_REF(peer_addr), i);
5085 			dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5086 			return true;
5087 		}
5088 	}
5089 
5090 	return false;
5091 }
5092 
5093 bool dp_find_peer_exist(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
5094 			uint8_t *peer_addr)
5095 {
5096 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5097 	struct dp_peer *peer = NULL;
5098 
5099 	peer = dp_peer_find_hash_find(soc, peer_addr, 0, DP_VDEV_ALL,
5100 				      DP_MOD_ID_CDP);
5101 	if (peer) {
5102 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5103 		return true;
5104 	}
5105 
5106 	return false;
5107 }
5108 #endif
5109 
5110 /**
5111  * dp_peer_rxtid_stats: Retried Rx TID (REO queue) stats from HW
5112  * @peer: DP peer handle
5113  * @dp_stats_cmd_cb: REO command callback function
5114  * @cb_ctxt: Callback context
5115  *
5116  * Return: count of tid stats cmd send succeeded
5117  */
5118 int dp_peer_rxtid_stats(struct dp_peer *peer,
5119 			dp_rxtid_stats_cmd_cb dp_stats_cmd_cb,
5120 			void *cb_ctxt)
5121 {
5122 	struct dp_soc *soc = peer->vdev->pdev->soc;
5123 	struct hal_reo_cmd_params params;
5124 	int i;
5125 	int stats_cmd_sent_cnt = 0;
5126 	QDF_STATUS status;
5127 
5128 	if (!dp_stats_cmd_cb)
5129 		return stats_cmd_sent_cnt;
5130 
5131 	qdf_mem_zero(&params, sizeof(params));
5132 	for (i = 0; i < DP_MAX_TIDS; i++) {
5133 		struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
5134 		if (rx_tid->hw_qdesc_vaddr_unaligned) {
5135 			params.std.need_status = 1;
5136 			params.std.addr_lo =
5137 				rx_tid->hw_qdesc_paddr & 0xffffffff;
5138 			params.std.addr_hi =
5139 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
5140 
5141 			if (cb_ctxt) {
5142 				status = dp_reo_send_cmd(
5143 						soc, CMD_GET_QUEUE_STATS,
5144 						&params, dp_stats_cmd_cb,
5145 						cb_ctxt);
5146 			} else {
5147 				status = dp_reo_send_cmd(
5148 						soc, CMD_GET_QUEUE_STATS,
5149 						&params, dp_stats_cmd_cb,
5150 						rx_tid);
5151 			}
5152 
5153 			if (QDF_IS_STATUS_SUCCESS(status))
5154 				stats_cmd_sent_cnt++;
5155 
5156 			/* Flush REO descriptor from HW cache to update stats
5157 			 * in descriptor memory. This is to help debugging */
5158 			qdf_mem_zero(&params, sizeof(params));
5159 			params.std.need_status = 0;
5160 			params.std.addr_lo =
5161 				rx_tid->hw_qdesc_paddr & 0xffffffff;
5162 			params.std.addr_hi =
5163 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
5164 			params.u.fl_cache_params.flush_no_inval = 1;
5165 			dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, &params, NULL,
5166 				NULL);
5167 		}
5168 	}
5169 
5170 	return stats_cmd_sent_cnt;
5171 }
5172 
5173 QDF_STATUS
5174 dp_set_michael_key(struct cdp_soc_t *soc,
5175 		   uint8_t vdev_id,
5176 		   uint8_t *peer_mac,
5177 		   bool is_unicast, uint32_t *key)
5178 {
5179 	uint8_t sec_index = is_unicast ? 1 : 0;
5180 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
5181 						      peer_mac, 0, vdev_id,
5182 						      DP_MOD_ID_CDP);
5183 
5184 	if (!peer) {
5185 		dp_peer_err("%pK: peer not found ", soc);
5186 		return QDF_STATUS_E_FAILURE;
5187 	}
5188 
5189 	qdf_mem_copy(&peer->security[sec_index].michael_key[0],
5190 		     key, IEEE80211_WEP_MICLEN);
5191 
5192 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5193 
5194 	return QDF_STATUS_SUCCESS;
5195 }
5196 
5197 
5198 /**
5199  * dp_vdev_bss_peer_ref_n_get: Get bss peer of a vdev
5200  * @soc: DP soc
5201  * @vdev: vdev
5202  * @mod_id: id of module requesting reference
5203  *
5204  * Return: VDEV BSS peer
5205  */
5206 struct dp_peer *dp_vdev_bss_peer_ref_n_get(struct dp_soc *soc,
5207 					   struct dp_vdev *vdev,
5208 					   enum dp_mod_id mod_id)
5209 {
5210 	struct dp_peer *peer = NULL;
5211 
5212 	qdf_spin_lock_bh(&vdev->peer_list_lock);
5213 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
5214 		if (peer->bss_peer)
5215 			break;
5216 	}
5217 
5218 	if (!peer) {
5219 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
5220 		return NULL;
5221 	}
5222 
5223 	if (dp_peer_get_ref(soc, peer, mod_id) == QDF_STATUS_SUCCESS) {
5224 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
5225 		return peer;
5226 	}
5227 
5228 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
5229 	return peer;
5230 }
5231 
5232 /**
5233  * dp_sta_vdev_self_peer_ref_n_get: Get self peer of sta vdev
5234  * @soc: DP soc
5235  * @vdev: vdev
5236  * @mod_id: id of module requesting reference
5237  *
5238  * Return: VDEV self peer
5239  */
5240 struct dp_peer *dp_sta_vdev_self_peer_ref_n_get(struct dp_soc *soc,
5241 						struct dp_vdev *vdev,
5242 						enum dp_mod_id mod_id)
5243 {
5244 	struct dp_peer *peer;
5245 
5246 	if (vdev->opmode != wlan_op_mode_sta)
5247 		return NULL;
5248 
5249 	qdf_spin_lock_bh(&vdev->peer_list_lock);
5250 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
5251 		if (peer->sta_self_peer)
5252 			break;
5253 	}
5254 
5255 	if (!peer) {
5256 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
5257 		return NULL;
5258 	}
5259 
5260 	if (dp_peer_get_ref(soc, peer, mod_id) == QDF_STATUS_SUCCESS) {
5261 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
5262 		return peer;
5263 	}
5264 
5265 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
5266 	return peer;
5267 }
5268 
5269 #ifdef DUMP_REO_QUEUE_INFO_IN_DDR
5270 void dp_dump_rx_reo_queue_info(
5271 	struct dp_soc *soc, void *cb_ctxt, union hal_reo_status *reo_status)
5272 {
5273 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
5274 
5275 	if (!rx_tid)
5276 		return;
5277 
5278 	if (reo_status->fl_cache_status.header.status !=
5279 		HAL_REO_CMD_SUCCESS) {
5280 		dp_err_rl("Rx tid REO HW desc flush failed(%d)",
5281 			  reo_status->rx_queue_status.header.status);
5282 		return;
5283 	}
5284 	qdf_spin_lock_bh(&rx_tid->tid_lock);
5285 	hal_dump_rx_reo_queue_desc(rx_tid->hw_qdesc_vaddr_aligned);
5286 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
5287 }
5288 
5289 void dp_send_cache_flush_for_rx_tid(
5290 	struct dp_soc *soc, struct dp_peer *peer)
5291 {
5292 	int i;
5293 	struct dp_rx_tid *rx_tid;
5294 	struct hal_reo_cmd_params params;
5295 
5296 	if (!peer) {
5297 		dp_err_rl("Peer is NULL");
5298 		return;
5299 	}
5300 
5301 	for (i = 0; i < DP_MAX_TIDS; i++) {
5302 		rx_tid = &peer->rx_tid[i];
5303 		if (!rx_tid)
5304 			continue;
5305 		qdf_spin_lock_bh(&rx_tid->tid_lock);
5306 		if (rx_tid->hw_qdesc_vaddr_aligned) {
5307 			qdf_mem_zero(&params, sizeof(params));
5308 			params.std.need_status = 1;
5309 			params.std.addr_lo =
5310 				rx_tid->hw_qdesc_paddr & 0xffffffff;
5311 			params.std.addr_hi =
5312 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
5313 			params.u.fl_cache_params.flush_no_inval = 0;
5314 			if (QDF_STATUS_SUCCESS !=
5315 				dp_reo_send_cmd(
5316 					soc, CMD_FLUSH_CACHE,
5317 					&params, dp_dump_rx_reo_queue_info,
5318 					(void *)rx_tid)) {
5319 				dp_err_rl("cache flush send failed tid %d",
5320 					  rx_tid->tid);
5321 				qdf_spin_unlock_bh(&rx_tid->tid_lock);
5322 				break;
5323 			}
5324 		}
5325 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
5326 	}
5327 }
5328 
5329 void dp_get_rx_reo_queue_info(
5330 	struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
5331 {
5332 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
5333 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
5334 						     DP_MOD_ID_GENERIC_STATS);
5335 	struct dp_peer *peer = NULL;
5336 
5337 	if (!vdev) {
5338 		dp_err_rl("vdev is null for vdev_id: %u", vdev_id);
5339 		goto failed;
5340 	}
5341 
5342 	peer = dp_vdev_bss_peer_ref_n_get(soc, vdev, DP_MOD_ID_GENERIC_STATS);
5343 
5344 	if (!peer) {
5345 		dp_err_rl("Peer is NULL");
5346 		goto failed;
5347 	}
5348 	dp_send_cache_flush_for_rx_tid(soc, peer);
5349 failed:
5350 	if (peer)
5351 		dp_peer_unref_delete(peer, DP_MOD_ID_GENERIC_STATS);
5352 	if (vdev)
5353 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_GENERIC_STATS);
5354 }
5355 #endif /* DUMP_REO_QUEUE_INFO_IN_DDR */
5356 
5357 void dp_peer_flush_frags(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
5358 			 uint8_t *peer_mac)
5359 {
5360 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5361 	struct dp_peer *peer = dp_peer_find_hash_find(soc, peer_mac, 0,
5362 						      vdev_id, DP_MOD_ID_CDP);
5363 	struct dp_rx_tid *rx_tid;
5364 	uint8_t tid;
5365 
5366 	if (!peer)
5367 		return;
5368 
5369 	dp_info("Flushing fragments for peer " QDF_MAC_ADDR_FMT,
5370 		QDF_MAC_ADDR_REF(peer->mac_addr.raw));
5371 
5372 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
5373 		rx_tid = &peer->rx_tid[tid];
5374 
5375 		qdf_spin_lock_bh(&rx_tid->tid_lock);
5376 		dp_rx_defrag_waitlist_remove(peer, tid);
5377 		dp_rx_reorder_flush_frag(peer, tid);
5378 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
5379 	}
5380 
5381 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5382 }
5383 
5384 /*
5385  * dp_peer_find_by_id_valid - check if peer exists for given id
5386  * @soc: core DP soc context
5387  * @peer_id: peer id from peer object can be retrieved
5388  *
5389  * Return: true if peer exists of false otherwise
5390  */
5391 bool dp_peer_find_by_id_valid(struct dp_soc *soc, uint16_t peer_id)
5392 {
5393 	struct dp_peer *peer = dp_peer_get_ref_by_id(soc, peer_id,
5394 						     DP_MOD_ID_HTT);
5395 
5396 	if (peer) {
5397 		/*
5398 		 * Decrement the peer ref which is taken as part of
5399 		 * dp_peer_get_ref_by_id if PEER_LOCK_REF_PROTECT is enabled
5400 		 */
5401 		dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
5402 
5403 		return true;
5404 	}
5405 
5406 	return false;
5407 }
5408 
5409 qdf_export_symbol(dp_peer_find_by_id_valid);
5410