xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_peer.c (revision 2f4b444fb7e689b83a4ab0e7b3b38f0bf4def8e0)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <qdf_types.h>
20 #include <qdf_lock.h>
21 #include <hal_hw_headers.h>
22 #include "dp_htt.h"
23 #include "dp_types.h"
24 #include "dp_internal.h"
25 #include "dp_peer.h"
26 #include "dp_rx_defrag.h"
27 #include "dp_rx.h"
28 #include <hal_api.h>
29 #include <hal_reo.h>
30 #include <cdp_txrx_handle.h>
31 #include <wlan_cfg.h>
32 #ifdef WIFI_MONITOR_SUPPORT
33 #include <dp_mon.h>
34 #endif
35 #ifdef FEATURE_WDS
36 #include "dp_txrx_wds.h"
37 #endif
38 #include <qdf_module.h>
39 #ifdef QCA_PEER_EXT_STATS
40 #include "dp_hist.h"
41 #endif
42 
43 #ifdef REO_QDESC_HISTORY
44 #define REO_QDESC_HISTORY_SIZE 512
45 uint64_t reo_qdesc_history_idx;
46 struct reo_qdesc_event reo_qdesc_history[REO_QDESC_HISTORY_SIZE];
47 #endif
48 
49 #ifdef FEATURE_WDS
50 static inline bool
51 dp_peer_ast_free_in_unmap_supported(struct dp_soc *soc,
52 				    struct dp_ast_entry *ast_entry)
53 {
54 	/* if peer map v2 is enabled we are not freeing ast entry
55 	 * here and it is supposed to be freed in unmap event (after
56 	 * we receive delete confirmation from target)
57 	 *
58 	 * if peer_id is invalid we did not get the peer map event
59 	 * for the peer free ast entry from here only in this case
60 	 */
61 
62 	if ((ast_entry->type != CDP_TXRX_AST_TYPE_WDS_HM_SEC) &&
63 	    (ast_entry->type != CDP_TXRX_AST_TYPE_SELF))
64 		return true;
65 
66 	return false;
67 }
68 #else
69 static inline bool
70 dp_peer_ast_free_in_unmap_supported(struct dp_soc *soc,
71 				    struct dp_ast_entry *ast_entry)
72 {
73 	return false;
74 }
75 
76 static void dp_soc_wds_attach(struct dp_soc *soc)
77 {
78 }
79 
80 static void dp_soc_wds_detach(struct dp_soc *soc)
81 {
82 }
83 #endif
84 
85 #ifdef REO_QDESC_HISTORY
86 static inline void
87 dp_rx_reo_qdesc_history_add(struct reo_desc_list_node *free_desc,
88 			    enum reo_qdesc_event_type type)
89 {
90 	struct reo_qdesc_event *evt;
91 	struct dp_rx_tid *rx_tid = &free_desc->rx_tid;
92 	uint32_t idx;
93 
94 	reo_qdesc_history_idx++;
95 	idx = (reo_qdesc_history_idx & (REO_QDESC_HISTORY_SIZE - 1));
96 
97 	evt = &reo_qdesc_history[idx];
98 
99 	qdf_mem_copy(evt->peer_mac, free_desc->peer_mac, QDF_MAC_ADDR_SIZE);
100 	evt->qdesc_addr = rx_tid->hw_qdesc_paddr;
101 	evt->ts = qdf_get_log_timestamp();
102 	evt->type = type;
103 }
104 
105 #ifdef WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY
106 static inline void
107 dp_rx_reo_qdesc_deferred_evt_add(struct reo_desc_deferred_freelist_node *desc,
108 				 enum reo_qdesc_event_type type)
109 {
110 	struct reo_qdesc_event *evt;
111 	uint32_t idx;
112 
113 	reo_qdesc_history_idx++;
114 	idx = (reo_qdesc_history_idx & (REO_QDESC_HISTORY_SIZE - 1));
115 
116 	evt = &reo_qdesc_history[idx];
117 
118 	qdf_mem_copy(evt->peer_mac, desc->peer_mac, QDF_MAC_ADDR_SIZE);
119 	evt->qdesc_addr = desc->hw_qdesc_paddr;
120 	evt->ts = qdf_get_log_timestamp();
121 	evt->type = type;
122 }
123 
124 #define DP_RX_REO_QDESC_DEFERRED_FREE_EVT(desc) \
125 	dp_rx_reo_qdesc_deferred_evt_add((desc), REO_QDESC_FREE)
126 
127 #define DP_RX_REO_QDESC_DEFERRED_GET_MAC(desc, freedesc) \
128 	qdf_mem_copy(desc->peer_mac, freedesc->peer_mac, QDF_MAC_ADDR_SIZE)
129 #endif /* WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY */
130 
131 #define DP_RX_REO_QDESC_GET_MAC(freedesc, peer) \
132 	qdf_mem_copy(freedesc->peer_mac, peer->mac_addr.raw, QDF_MAC_ADDR_SIZE)
133 
134 #define DP_RX_REO_QDESC_UPDATE_EVT(free_desc) \
135 	dp_rx_reo_qdesc_history_add((free_desc), REO_QDESC_UPDATE_CB)
136 
137 #define DP_RX_REO_QDESC_FREE_EVT(free_desc) \
138 	dp_rx_reo_qdesc_history_add((free_desc), REO_QDESC_FREE)
139 
140 #else
141 #define DP_RX_REO_QDESC_GET_MAC(freedesc, peer)
142 
143 #define DP_RX_REO_QDESC_UPDATE_EVT(free_desc)
144 
145 #define DP_RX_REO_QDESC_FREE_EVT(free_desc)
146 
147 #define DP_RX_REO_QDESC_DEFERRED_FREE_EVT(desc)
148 
149 #define DP_RX_REO_QDESC_DEFERRED_GET_MAC(desc, freedesc)
150 #endif
151 
152 static inline void
153 dp_set_ssn_valid_flag(struct hal_reo_cmd_params *params,
154 					uint8_t valid)
155 {
156 	params->u.upd_queue_params.update_svld = 1;
157 	params->u.upd_queue_params.svld = valid;
158 	dp_peer_debug("Setting SSN valid bit to %d",
159 		      valid);
160 }
161 
162 static inline int dp_peer_find_mac_addr_cmp(
163 	union dp_align_mac_addr *mac_addr1,
164 	union dp_align_mac_addr *mac_addr2)
165 {
166 		/*
167 		 * Intentionally use & rather than &&.
168 		 * because the operands are binary rather than generic boolean,
169 		 * the functionality is equivalent.
170 		 * Using && has the advantage of short-circuited evaluation,
171 		 * but using & has the advantage of no conditional branching,
172 		 * which is a more significant benefit.
173 		 */
174 	return !((mac_addr1->align4.bytes_abcd == mac_addr2->align4.bytes_abcd)
175 		 & (mac_addr1->align4.bytes_ef == mac_addr2->align4.bytes_ef));
176 }
177 
178 static QDF_STATUS dp_peer_ast_table_attach(struct dp_soc *soc)
179 {
180 	uint32_t max_ast_index;
181 
182 	max_ast_index = wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx);
183 	/* allocate ast_table for ast entry to ast_index map */
184 	dp_peer_info("\n%pK:<=== cfg max ast idx %d ====>", soc, max_ast_index);
185 	soc->ast_table = qdf_mem_malloc(max_ast_index *
186 					sizeof(struct dp_ast_entry *));
187 	if (!soc->ast_table) {
188 		dp_peer_err("%pK: ast_table memory allocation failed", soc);
189 		return QDF_STATUS_E_NOMEM;
190 	}
191 	return QDF_STATUS_SUCCESS; /* success */
192 }
193 
194 /*
195  * dp_peer_find_map_attach() - allocate memory for peer_id_to_obj_map
196  * @soc: soc handle
197  *
198  * return: QDF_STATUS
199  */
200 static QDF_STATUS dp_peer_find_map_attach(struct dp_soc *soc)
201 {
202 	uint32_t max_peers, peer_map_size;
203 
204 	max_peers = soc->max_peers;
205 	/* allocate the peer ID -> peer object map */
206 	dp_peer_info("\n%pK:<=== cfg max peer id %d ====>", soc, max_peers);
207 	peer_map_size = max_peers * sizeof(soc->peer_id_to_obj_map[0]);
208 	soc->peer_id_to_obj_map = qdf_mem_malloc(peer_map_size);
209 	if (!soc->peer_id_to_obj_map) {
210 		dp_peer_err("%pK: peer map memory allocation failed", soc);
211 		return QDF_STATUS_E_NOMEM;
212 	}
213 
214 	/*
215 	 * The peer_id_to_obj_map doesn't really need to be initialized,
216 	 * since elements are only used after they have been individually
217 	 * initialized.
218 	 * However, it is convenient for debugging to have all elements
219 	 * that are not in use set to 0.
220 	 */
221 	qdf_mem_zero(soc->peer_id_to_obj_map, peer_map_size);
222 
223 	qdf_spinlock_create(&soc->peer_map_lock);
224 	return QDF_STATUS_SUCCESS; /* success */
225 }
226 
227 static int dp_log2_ceil(unsigned int value)
228 {
229 	unsigned int tmp = value;
230 	int log2 = -1;
231 
232 	while (tmp) {
233 		log2++;
234 		tmp >>= 1;
235 	}
236 	if (1 << log2 != value)
237 		log2++;
238 	return log2;
239 }
240 
241 #define DP_PEER_HASH_LOAD_MULT  2
242 #define DP_PEER_HASH_LOAD_SHIFT 0
243 
244 #define DP_AST_HASH_LOAD_MULT  2
245 #define DP_AST_HASH_LOAD_SHIFT 0
246 
247 /*
248  * dp_peer_find_hash_attach() - allocate memory for peer_hash table
249  * @soc: soc handle
250  *
251  * return: QDF_STATUS
252  */
253 static QDF_STATUS dp_peer_find_hash_attach(struct dp_soc *soc)
254 {
255 	int i, hash_elems, log2;
256 
257 	/* allocate the peer MAC address -> peer object hash table */
258 	hash_elems = soc->max_peers;
259 	hash_elems *= DP_PEER_HASH_LOAD_MULT;
260 	hash_elems >>= DP_PEER_HASH_LOAD_SHIFT;
261 	log2 = dp_log2_ceil(hash_elems);
262 	hash_elems = 1 << log2;
263 
264 	soc->peer_hash.mask = hash_elems - 1;
265 	soc->peer_hash.idx_bits = log2;
266 	/* allocate an array of TAILQ peer object lists */
267 	soc->peer_hash.bins = qdf_mem_malloc(
268 		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q, dp_peer)));
269 	if (!soc->peer_hash.bins)
270 		return QDF_STATUS_E_NOMEM;
271 
272 	for (i = 0; i < hash_elems; i++)
273 		TAILQ_INIT(&soc->peer_hash.bins[i]);
274 
275 	qdf_spinlock_create(&soc->peer_hash_lock);
276 	return QDF_STATUS_SUCCESS;
277 }
278 
279 /*
280  * dp_peer_find_hash_detach() - cleanup memory for peer_hash table
281  * @soc: soc handle
282  *
283  * return: none
284  */
285 static void dp_peer_find_hash_detach(struct dp_soc *soc)
286 {
287 	if (soc->peer_hash.bins) {
288 		qdf_mem_free(soc->peer_hash.bins);
289 		soc->peer_hash.bins = NULL;
290 		qdf_spinlock_destroy(&soc->peer_hash_lock);
291 	}
292 }
293 
294 static inline unsigned dp_peer_find_hash_index(struct dp_soc *soc,
295 	union dp_align_mac_addr *mac_addr)
296 {
297 	unsigned index;
298 
299 	index =
300 		mac_addr->align2.bytes_ab ^
301 		mac_addr->align2.bytes_cd ^
302 		mac_addr->align2.bytes_ef;
303 	index ^= index >> soc->peer_hash.idx_bits;
304 	index &= soc->peer_hash.mask;
305 	return index;
306 }
307 
308 /*
309  * dp_peer_find_hash_add() - add peer to peer_hash_table
310  * @soc: soc handle
311  * @peer: peer handle
312  *
313  * return: none
314  */
315 void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer)
316 {
317 	unsigned index;
318 
319 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
320 	qdf_spin_lock_bh(&soc->peer_hash_lock);
321 
322 	if (QDF_IS_STATUS_ERROR(dp_peer_get_ref(soc, peer, DP_MOD_ID_CONFIG))) {
323 		dp_err("unable to get peer ref at MAP mac: "QDF_MAC_ADDR_FMT,
324 		       QDF_MAC_ADDR_REF(peer->mac_addr.raw));
325 		qdf_spin_unlock_bh(&soc->peer_hash_lock);
326 		return;
327 	}
328 
329 	/*
330 	 * It is important to add the new peer at the tail of the peer list
331 	 * with the bin index.  Together with having the hash_find function
332 	 * search from head to tail, this ensures that if two entries with
333 	 * the same MAC address are stored, the one added first will be
334 	 * found first.
335 	 */
336 	TAILQ_INSERT_TAIL(&soc->peer_hash.bins[index], peer, hash_list_elem);
337 
338 	qdf_spin_unlock_bh(&soc->peer_hash_lock);
339 }
340 
341 /*
342  * dp_peer_vdev_list_add() - add peer into vdev's peer list
343  * @soc: soc handle
344  * @vdev: vdev handle
345  * @peer: peer handle
346  *
347  * return: none
348  */
349 void dp_peer_vdev_list_add(struct dp_soc *soc, struct dp_vdev *vdev,
350 			   struct dp_peer *peer)
351 {
352 	qdf_spin_lock_bh(&vdev->peer_list_lock);
353 	if (QDF_IS_STATUS_ERROR(dp_peer_get_ref(soc, peer, DP_MOD_ID_CONFIG))) {
354 		dp_err("unable to get peer ref at MAP mac: "QDF_MAC_ADDR_FMT,
355 		       QDF_MAC_ADDR_REF(peer->mac_addr.raw));
356 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
357 		return;
358 	}
359 
360 	/* add this peer into the vdev's list */
361 	if (wlan_op_mode_sta == vdev->opmode)
362 		TAILQ_INSERT_HEAD(&vdev->peer_list, peer, peer_list_elem);
363 	else
364 		TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
365 
366 	vdev->num_peers++;
367 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
368 }
369 
370 /*
371  * dp_peer_vdev_list_remove() - remove peer from vdev's peer list
372  * @soc: SoC handle
373  * @vdev: VDEV handle
374  * @peer: peer handle
375  *
376  * Return: none
377  */
378 void dp_peer_vdev_list_remove(struct dp_soc *soc, struct dp_vdev *vdev,
379 			      struct dp_peer *peer)
380 {
381 	uint8_t found = 0;
382 	struct dp_peer *tmppeer = NULL;
383 
384 	qdf_spin_lock_bh(&vdev->peer_list_lock);
385 	TAILQ_FOREACH(tmppeer, &peer->vdev->peer_list, peer_list_elem) {
386 		if (tmppeer == peer) {
387 			found = 1;
388 			break;
389 		}
390 	}
391 
392 	if (found) {
393 		TAILQ_REMOVE(&peer->vdev->peer_list, peer,
394 			     peer_list_elem);
395 		dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
396 		vdev->num_peers--;
397 	} else {
398 		/*Ignoring the remove operation as peer not found*/
399 		dp_peer_debug("%pK: peer:%pK not found in vdev:%pK peerlist:%pK"
400 			      , soc, peer, vdev, &peer->vdev->peer_list);
401 	}
402 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
403 }
404 
405 /*
406  * dp_peer_find_id_to_obj_add() - Add peer into peer_id table
407  * @soc: SoC handle
408  * @peer: peer handle
409  * @peer_id: peer_id
410  *
411  * Return: None
412  */
413 void dp_peer_find_id_to_obj_add(struct dp_soc *soc,
414 				struct dp_peer *peer,
415 				uint16_t peer_id)
416 {
417 	QDF_ASSERT(peer_id <= soc->max_peers);
418 
419 	qdf_spin_lock_bh(&soc->peer_map_lock);
420 
421 	if (QDF_IS_STATUS_ERROR(dp_peer_get_ref(soc, peer, DP_MOD_ID_CONFIG))) {
422 		dp_err("unable to get peer ref at MAP mac: "QDF_MAC_ADDR_FMT" peer_id %u",
423 		       QDF_MAC_ADDR_REF(peer->mac_addr.raw), peer_id);
424 		qdf_spin_unlock_bh(&soc->peer_map_lock);
425 		return;
426 	}
427 
428 	if (!soc->peer_id_to_obj_map[peer_id]) {
429 		soc->peer_id_to_obj_map[peer_id] = peer;
430 	} else {
431 		/* Peer map event came for peer_id which
432 		 * is already mapped, this is not expected
433 		 */
434 		QDF_ASSERT(0);
435 	}
436 	qdf_spin_unlock_bh(&soc->peer_map_lock);
437 }
438 
439 /*
440  * dp_peer_find_id_to_obj_remove() - remove peer from peer_id table
441  * @soc: SoC handle
442  * @peer_id: peer_id
443  *
444  * Return: None
445  */
446 void dp_peer_find_id_to_obj_remove(struct dp_soc *soc,
447 				   uint16_t peer_id)
448 {
449 	struct dp_peer *peer = NULL;
450 	QDF_ASSERT(peer_id <= soc->max_peers);
451 
452 	qdf_spin_lock_bh(&soc->peer_map_lock);
453 	peer = soc->peer_id_to_obj_map[peer_id];
454 	soc->peer_id_to_obj_map[peer_id] = NULL;
455 	dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
456 	qdf_spin_unlock_bh(&soc->peer_map_lock);
457 }
458 
459 /*
460  * dp_peer_exist_on_pdev - check if peer with mac address exist on pdev
461  *
462  * @soc: Datapath SOC handle
463  * @peer_mac_addr: peer mac address
464  * @mac_addr_is_aligned: is mac address aligned
465  * @pdev: Datapath PDEV handle
466  *
467  * Return: true if peer found else return false
468  */
469 static bool dp_peer_exist_on_pdev(struct dp_soc *soc,
470 				  uint8_t *peer_mac_addr,
471 				  int mac_addr_is_aligned,
472 				  struct dp_pdev *pdev)
473 {
474 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
475 	unsigned int index;
476 	struct dp_peer *peer;
477 	bool found = false;
478 
479 	if (mac_addr_is_aligned) {
480 		mac_addr = (union dp_align_mac_addr *)peer_mac_addr;
481 	} else {
482 		qdf_mem_copy(
483 			&local_mac_addr_aligned.raw[0],
484 			peer_mac_addr, QDF_MAC_ADDR_SIZE);
485 		mac_addr = &local_mac_addr_aligned;
486 	}
487 	index = dp_peer_find_hash_index(soc, mac_addr);
488 	qdf_spin_lock_bh(&soc->peer_hash_lock);
489 	TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
490 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
491 		    (peer->vdev->pdev == pdev)) {
492 			found = true;
493 			break;
494 		}
495 	}
496 	qdf_spin_unlock_bh(&soc->peer_hash_lock);
497 	return found;
498 }
499 
500 #ifdef FEATURE_MEC
501 /**
502  * dp_peer_mec_hash_attach() - Allocate and initialize MEC Hash Table
503  * @soc: SoC handle
504  *
505  * Return: QDF_STATUS
506  */
507 static QDF_STATUS dp_peer_mec_hash_attach(struct dp_soc *soc)
508 {
509 	int log2, hash_elems, i;
510 
511 	log2 = dp_log2_ceil(DP_PEER_MAX_MEC_IDX);
512 	hash_elems = 1 << log2;
513 
514 	soc->mec_hash.mask = hash_elems - 1;
515 	soc->mec_hash.idx_bits = log2;
516 
517 	dp_peer_info("%pK: max mec index: %d",
518 		     soc, DP_PEER_MAX_MEC_IDX);
519 
520 	/* allocate an array of TAILQ mec object lists */
521 	soc->mec_hash.bins = qdf_mem_malloc(hash_elems *
522 					    sizeof(TAILQ_HEAD(anonymous_tail_q,
523 							      dp_mec_entry)));
524 
525 	if (!soc->mec_hash.bins)
526 		return QDF_STATUS_E_NOMEM;
527 
528 	for (i = 0; i < hash_elems; i++)
529 		TAILQ_INIT(&soc->mec_hash.bins[i]);
530 
531 	return QDF_STATUS_SUCCESS;
532 }
533 
534 /**
535  * dp_peer_mec_hash_index() - Compute the MEC hash from MAC address
536  * @soc: SoC handle
537  *
538  * Return: MEC hash
539  */
540 static inline uint32_t dp_peer_mec_hash_index(struct dp_soc *soc,
541 					      union dp_align_mac_addr *mac_addr)
542 {
543 	uint32_t index;
544 
545 	index =
546 		mac_addr->align2.bytes_ab ^
547 		mac_addr->align2.bytes_cd ^
548 		mac_addr->align2.bytes_ef;
549 	index ^= index >> soc->mec_hash.idx_bits;
550 	index &= soc->mec_hash.mask;
551 	return index;
552 }
553 
554 struct dp_mec_entry *dp_peer_mec_hash_find_by_pdevid(struct dp_soc *soc,
555 						     uint8_t pdev_id,
556 						     uint8_t *mec_mac_addr)
557 {
558 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
559 	uint32_t index;
560 	struct dp_mec_entry *mecentry;
561 
562 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
563 		     mec_mac_addr, QDF_MAC_ADDR_SIZE);
564 	mac_addr = &local_mac_addr_aligned;
565 
566 	index = dp_peer_mec_hash_index(soc, mac_addr);
567 	TAILQ_FOREACH(mecentry, &soc->mec_hash.bins[index], hash_list_elem) {
568 		if ((pdev_id == mecentry->pdev_id) &&
569 		    !dp_peer_find_mac_addr_cmp(mac_addr, &mecentry->mac_addr))
570 			return mecentry;
571 	}
572 
573 	return NULL;
574 }
575 
576 /**
577  * dp_peer_mec_hash_add() - Add MEC entry into hash table
578  * @soc: SoC handle
579  *
580  * This function adds the MEC entry into SoC MEC hash table
581  *
582  * Return: None
583  */
584 static inline void dp_peer_mec_hash_add(struct dp_soc *soc,
585 					struct dp_mec_entry *mecentry)
586 {
587 	uint32_t index;
588 
589 	index = dp_peer_mec_hash_index(soc, &mecentry->mac_addr);
590 	qdf_spin_lock_bh(&soc->mec_lock);
591 	TAILQ_INSERT_TAIL(&soc->mec_hash.bins[index], mecentry, hash_list_elem);
592 	qdf_spin_unlock_bh(&soc->mec_lock);
593 }
594 
595 QDF_STATUS dp_peer_mec_add_entry(struct dp_soc *soc,
596 				 struct dp_vdev *vdev,
597 				 uint8_t *mac_addr)
598 {
599 	struct dp_mec_entry *mecentry = NULL;
600 	struct dp_pdev *pdev = NULL;
601 
602 	if (!vdev) {
603 		dp_peer_err("%pK: Peers vdev is NULL", soc);
604 		return QDF_STATUS_E_INVAL;
605 	}
606 
607 	pdev = vdev->pdev;
608 
609 	if (qdf_unlikely(qdf_atomic_read(&soc->mec_cnt) >=
610 					 DP_PEER_MAX_MEC_ENTRY)) {
611 		dp_peer_warn("%pK: max MEC entry limit reached mac_addr: "
612 			     QDF_MAC_ADDR_FMT, soc, QDF_MAC_ADDR_REF(mac_addr));
613 		return QDF_STATUS_E_NOMEM;
614 	}
615 
616 	qdf_spin_lock_bh(&soc->mec_lock);
617 	mecentry = dp_peer_mec_hash_find_by_pdevid(soc, pdev->pdev_id,
618 						   mac_addr);
619 	if (qdf_likely(mecentry)) {
620 		mecentry->is_active = TRUE;
621 		qdf_spin_unlock_bh(&soc->mec_lock);
622 		return QDF_STATUS_E_ALREADY;
623 	}
624 
625 	qdf_spin_unlock_bh(&soc->mec_lock);
626 
627 	dp_peer_debug("%pK: pdevid: %u vdev: %u type: MEC mac_addr: "
628 		      QDF_MAC_ADDR_FMT,
629 		      soc, pdev->pdev_id, vdev->vdev_id,
630 		      QDF_MAC_ADDR_REF(mac_addr));
631 
632 	mecentry = (struct dp_mec_entry *)
633 			qdf_mem_malloc(sizeof(struct dp_mec_entry));
634 
635 	if (qdf_unlikely(!mecentry)) {
636 		dp_peer_err("%pK: fail to allocate mecentry", soc);
637 		return QDF_STATUS_E_NOMEM;
638 	}
639 
640 	qdf_copy_macaddr((struct qdf_mac_addr *)&mecentry->mac_addr.raw[0],
641 			 (struct qdf_mac_addr *)mac_addr);
642 	mecentry->pdev_id = pdev->pdev_id;
643 	mecentry->vdev_id = vdev->vdev_id;
644 	mecentry->is_active = TRUE;
645 	dp_peer_mec_hash_add(soc, mecentry);
646 
647 	qdf_atomic_inc(&soc->mec_cnt);
648 	DP_STATS_INC(soc, mec.added, 1);
649 
650 	return QDF_STATUS_SUCCESS;
651 }
652 
653 void dp_peer_mec_detach_entry(struct dp_soc *soc, struct dp_mec_entry *mecentry,
654 			      void *ptr)
655 {
656 	uint32_t index = dp_peer_mec_hash_index(soc, &mecentry->mac_addr);
657 
658 	TAILQ_HEAD(, dp_mec_entry) * free_list = ptr;
659 
660 	TAILQ_REMOVE(&soc->mec_hash.bins[index], mecentry,
661 		     hash_list_elem);
662 	TAILQ_INSERT_TAIL(free_list, mecentry, hash_list_elem);
663 }
664 
665 void dp_peer_mec_free_list(struct dp_soc *soc, void *ptr)
666 {
667 	struct dp_mec_entry *mecentry, *mecentry_next;
668 
669 	TAILQ_HEAD(, dp_mec_entry) * free_list = ptr;
670 
671 	TAILQ_FOREACH_SAFE(mecentry, free_list, hash_list_elem,
672 			   mecentry_next) {
673 		dp_peer_debug("%pK: MEC delete for mac_addr " QDF_MAC_ADDR_FMT,
674 			      soc, QDF_MAC_ADDR_REF(&mecentry->mac_addr));
675 		qdf_mem_free(mecentry);
676 		qdf_atomic_dec(&soc->mec_cnt);
677 		DP_STATS_INC(soc, mec.deleted, 1);
678 	}
679 }
680 
681 /**
682  * dp_peer_mec_hash_detach() - Free MEC Hash table
683  * @soc: SoC handle
684  *
685  * Return: None
686  */
687 static void dp_peer_mec_hash_detach(struct dp_soc *soc)
688 {
689 	dp_peer_mec_flush_entries(soc);
690 	qdf_mem_free(soc->mec_hash.bins);
691 	soc->mec_hash.bins = NULL;
692 }
693 
694 void dp_peer_mec_spinlock_destroy(struct dp_soc *soc)
695 {
696 	qdf_spinlock_destroy(&soc->mec_lock);
697 }
698 
699 void dp_peer_mec_spinlock_create(struct dp_soc *soc)
700 {
701 	qdf_spinlock_create(&soc->mec_lock);
702 }
703 #else
704 static QDF_STATUS dp_peer_mec_hash_attach(struct dp_soc *soc)
705 {
706 	return QDF_STATUS_SUCCESS;
707 }
708 
709 static void dp_peer_mec_hash_detach(struct dp_soc *soc)
710 {
711 }
712 #endif
713 
714 #ifdef FEATURE_AST
715 /*
716  * dp_peer_ast_hash_attach() - Allocate and initialize AST Hash Table
717  * @soc: SoC handle
718  *
719  * Return: QDF_STATUS
720  */
721 static QDF_STATUS dp_peer_ast_hash_attach(struct dp_soc *soc)
722 {
723 	int i, hash_elems, log2;
724 	unsigned int max_ast_idx = wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx);
725 
726 	hash_elems = ((max_ast_idx * DP_AST_HASH_LOAD_MULT) >>
727 		DP_AST_HASH_LOAD_SHIFT);
728 
729 	log2 = dp_log2_ceil(hash_elems);
730 	hash_elems = 1 << log2;
731 
732 	soc->ast_hash.mask = hash_elems - 1;
733 	soc->ast_hash.idx_bits = log2;
734 
735 	dp_peer_info("%pK: ast hash_elems: %d, max_ast_idx: %d",
736 		     soc, hash_elems, max_ast_idx);
737 
738 	/* allocate an array of TAILQ peer object lists */
739 	soc->ast_hash.bins = qdf_mem_malloc(
740 		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q,
741 				dp_ast_entry)));
742 
743 	if (!soc->ast_hash.bins)
744 		return QDF_STATUS_E_NOMEM;
745 
746 	for (i = 0; i < hash_elems; i++)
747 		TAILQ_INIT(&soc->ast_hash.bins[i]);
748 
749 	return QDF_STATUS_SUCCESS;
750 }
751 
752 /*
753  * dp_peer_ast_cleanup() - cleanup the references
754  * @soc: SoC handle
755  * @ast: ast entry
756  *
757  * Return: None
758  */
759 static inline void dp_peer_ast_cleanup(struct dp_soc *soc,
760 				       struct dp_ast_entry *ast)
761 {
762 	txrx_ast_free_cb cb = ast->callback;
763 	void *cookie = ast->cookie;
764 
765 	dp_peer_debug("mac_addr: " QDF_MAC_ADDR_FMT ", cb: %pK, cookie: %pK",
766 		      QDF_MAC_ADDR_REF(ast->mac_addr.raw), cb, cookie);
767 
768 	/* Call the callbacks to free up the cookie */
769 	if (cb) {
770 		ast->callback = NULL;
771 		ast->cookie = NULL;
772 		cb(soc->ctrl_psoc,
773 		   dp_soc_to_cdp_soc(soc),
774 		   cookie,
775 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
776 	}
777 }
778 
779 /*
780  * dp_peer_ast_hash_detach() - Free AST Hash table
781  * @soc: SoC handle
782  *
783  * Return: None
784  */
785 static void dp_peer_ast_hash_detach(struct dp_soc *soc)
786 {
787 	unsigned int index;
788 	struct dp_ast_entry *ast, *ast_next;
789 
790 	if (!soc->ast_hash.mask)
791 		return;
792 
793 	if (!soc->ast_hash.bins)
794 		return;
795 
796 	dp_peer_debug("%pK: num_ast_entries: %u", soc, soc->num_ast_entries);
797 
798 	qdf_spin_lock_bh(&soc->ast_lock);
799 	for (index = 0; index <= soc->ast_hash.mask; index++) {
800 		if (!TAILQ_EMPTY(&soc->ast_hash.bins[index])) {
801 			TAILQ_FOREACH_SAFE(ast, &soc->ast_hash.bins[index],
802 					   hash_list_elem, ast_next) {
803 				TAILQ_REMOVE(&soc->ast_hash.bins[index], ast,
804 					     hash_list_elem);
805 				dp_peer_ast_cleanup(soc, ast);
806 				soc->num_ast_entries--;
807 				qdf_mem_free(ast);
808 			}
809 		}
810 	}
811 	qdf_spin_unlock_bh(&soc->ast_lock);
812 
813 	qdf_mem_free(soc->ast_hash.bins);
814 	soc->ast_hash.bins = NULL;
815 }
816 
817 /*
818  * dp_peer_ast_hash_index() - Compute the AST hash from MAC address
819  * @soc: SoC handle
820  *
821  * Return: AST hash
822  */
823 static inline uint32_t dp_peer_ast_hash_index(struct dp_soc *soc,
824 	union dp_align_mac_addr *mac_addr)
825 {
826 	uint32_t index;
827 
828 	index =
829 		mac_addr->align2.bytes_ab ^
830 		mac_addr->align2.bytes_cd ^
831 		mac_addr->align2.bytes_ef;
832 	index ^= index >> soc->ast_hash.idx_bits;
833 	index &= soc->ast_hash.mask;
834 	return index;
835 }
836 
837 /*
838  * dp_peer_ast_hash_add() - Add AST entry into hash table
839  * @soc: SoC handle
840  *
841  * This function adds the AST entry into SoC AST hash table
842  * It assumes caller has taken the ast lock to protect the access to this table
843  *
844  * Return: None
845  */
846 static inline void dp_peer_ast_hash_add(struct dp_soc *soc,
847 		struct dp_ast_entry *ase)
848 {
849 	uint32_t index;
850 
851 	index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
852 	TAILQ_INSERT_TAIL(&soc->ast_hash.bins[index], ase, hash_list_elem);
853 }
854 
855 /*
856  * dp_peer_ast_hash_remove() - Look up and remove AST entry from hash table
857  * @soc: SoC handle
858  *
859  * This function removes the AST entry from soc AST hash table
860  * It assumes caller has taken the ast lock to protect the access to this table
861  *
862  * Return: None
863  */
864 void dp_peer_ast_hash_remove(struct dp_soc *soc,
865 			     struct dp_ast_entry *ase)
866 {
867 	unsigned index;
868 	struct dp_ast_entry *tmpase;
869 	int found = 0;
870 
871 	index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
872 	/* Check if tail is not empty before delete*/
873 	QDF_ASSERT(!TAILQ_EMPTY(&soc->ast_hash.bins[index]));
874 
875 	dp_peer_debug("ID: %u idx: %u mac_addr: " QDF_MAC_ADDR_FMT,
876 		      ase->peer_id, index, QDF_MAC_ADDR_REF(ase->mac_addr.raw));
877 
878 	TAILQ_FOREACH(tmpase, &soc->ast_hash.bins[index], hash_list_elem) {
879 		if (tmpase == ase) {
880 			found = 1;
881 			break;
882 		}
883 	}
884 
885 	QDF_ASSERT(found);
886 
887 	if (found)
888 		TAILQ_REMOVE(&soc->ast_hash.bins[index], ase, hash_list_elem);
889 }
890 
891 /*
892  * dp_peer_ast_hash_find_by_vdevid() - Find AST entry by MAC address
893  * @soc: SoC handle
894  *
895  * It assumes caller has taken the ast lock to protect the access to
896  * AST hash table
897  *
898  * Return: AST entry
899  */
900 struct dp_ast_entry *dp_peer_ast_hash_find_by_vdevid(struct dp_soc *soc,
901 						     uint8_t *ast_mac_addr,
902 						     uint8_t vdev_id)
903 {
904 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
905 	uint32_t index;
906 	struct dp_ast_entry *ase;
907 
908 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
909 		     ast_mac_addr, QDF_MAC_ADDR_SIZE);
910 	mac_addr = &local_mac_addr_aligned;
911 
912 	index = dp_peer_ast_hash_index(soc, mac_addr);
913 	TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
914 		if ((vdev_id == ase->vdev_id) &&
915 		    !dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr)) {
916 			return ase;
917 		}
918 	}
919 
920 	return NULL;
921 }
922 
923 /*
924  * dp_peer_ast_hash_find_by_pdevid() - Find AST entry by MAC address
925  * @soc: SoC handle
926  *
927  * It assumes caller has taken the ast lock to protect the access to
928  * AST hash table
929  *
930  * Return: AST entry
931  */
932 struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
933 						     uint8_t *ast_mac_addr,
934 						     uint8_t pdev_id)
935 {
936 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
937 	uint32_t index;
938 	struct dp_ast_entry *ase;
939 
940 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
941 		     ast_mac_addr, QDF_MAC_ADDR_SIZE);
942 	mac_addr = &local_mac_addr_aligned;
943 
944 	index = dp_peer_ast_hash_index(soc, mac_addr);
945 	TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
946 		if ((pdev_id == ase->pdev_id) &&
947 		    !dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr)) {
948 			return ase;
949 		}
950 	}
951 
952 	return NULL;
953 }
954 
955 /*
956  * dp_peer_ast_hash_find_soc() - Find AST entry by MAC address
957  * @soc: SoC handle
958  *
959  * It assumes caller has taken the ast lock to protect the access to
960  * AST hash table
961  *
962  * Return: AST entry
963  */
964 struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
965 					       uint8_t *ast_mac_addr)
966 {
967 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
968 	unsigned index;
969 	struct dp_ast_entry *ase;
970 
971 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
972 			ast_mac_addr, QDF_MAC_ADDR_SIZE);
973 	mac_addr = &local_mac_addr_aligned;
974 
975 	index = dp_peer_ast_hash_index(soc, mac_addr);
976 	TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
977 		if (dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr) == 0) {
978 			return ase;
979 		}
980 	}
981 
982 	return NULL;
983 }
984 
985 /*
986  * dp_peer_map_ast() - Map the ast entry with HW AST Index
987  * @soc: SoC handle
988  * @peer: peer to which ast node belongs
989  * @mac_addr: MAC address of ast node
990  * @hw_peer_id: HW AST Index returned by target in peer map event
991  * @vdev_id: vdev id for VAP to which the peer belongs to
992  * @ast_hash: ast hash value in HW
993  * @is_wds: flag to indicate peer map event for WDS ast entry
994  *
995  * Return: QDF_STATUS code
996  */
997 static inline QDF_STATUS dp_peer_map_ast(struct dp_soc *soc,
998 					 struct dp_peer *peer,
999 					 uint8_t *mac_addr,
1000 					 uint16_t hw_peer_id,
1001 					 uint8_t vdev_id,
1002 					 uint16_t ast_hash,
1003 					 uint8_t is_wds)
1004 {
1005 	struct dp_ast_entry *ast_entry = NULL;
1006 	enum cdp_txrx_ast_entry_type peer_type = CDP_TXRX_AST_TYPE_STATIC;
1007 	void *cookie = NULL;
1008 	txrx_ast_free_cb cb = NULL;
1009 	QDF_STATUS err = QDF_STATUS_SUCCESS;
1010 
1011 	if (!peer) {
1012 		return QDF_STATUS_E_INVAL;
1013 	}
1014 
1015 	dp_peer_err("%pK: peer %pK ID %d vid %d mac " QDF_MAC_ADDR_FMT,
1016 		    soc, peer, hw_peer_id, vdev_id,
1017 		    QDF_MAC_ADDR_REF(mac_addr));
1018 
1019 	qdf_spin_lock_bh(&soc->ast_lock);
1020 
1021 	ast_entry = dp_peer_ast_hash_find_by_vdevid(soc, mac_addr, vdev_id);
1022 
1023 	if (is_wds) {
1024 		/*
1025 		 * In certain cases like Auth attack on a repeater
1026 		 * can result in the number of ast_entries falling
1027 		 * in the same hash bucket to exceed the max_skid
1028 		 * length supported by HW in root AP. In these cases
1029 		 * the FW will return the hw_peer_id (ast_index) as
1030 		 * 0xffff indicating HW could not add the entry in
1031 		 * its table. Host has to delete the entry from its
1032 		 * table in these cases.
1033 		 */
1034 		if (hw_peer_id == HTT_INVALID_PEER) {
1035 			DP_STATS_INC(soc, ast.map_err, 1);
1036 			if (ast_entry) {
1037 				if (ast_entry->is_mapped) {
1038 					soc->ast_table[ast_entry->ast_idx] =
1039 						NULL;
1040 				}
1041 
1042 				cb = ast_entry->callback;
1043 				cookie = ast_entry->cookie;
1044 				peer_type = ast_entry->type;
1045 
1046 				dp_peer_unlink_ast_entry(soc, ast_entry, peer);
1047 				dp_peer_free_ast_entry(soc, ast_entry);
1048 
1049 				qdf_spin_unlock_bh(&soc->ast_lock);
1050 
1051 				if (cb) {
1052 					cb(soc->ctrl_psoc,
1053 					   dp_soc_to_cdp_soc(soc),
1054 					   cookie,
1055 					   CDP_TXRX_AST_DELETED);
1056 				}
1057 			} else {
1058 				qdf_spin_unlock_bh(&soc->ast_lock);
1059 				dp_peer_alert("AST entry not found with peer %pK peer_id %u peer_mac " QDF_MAC_ADDR_FMT " mac_addr " QDF_MAC_ADDR_FMT " vdev_id %u next_hop %u",
1060 					      peer, peer->peer_id,
1061 					      QDF_MAC_ADDR_REF(peer->mac_addr.raw),
1062 					      QDF_MAC_ADDR_REF(mac_addr),
1063 					      vdev_id, is_wds);
1064 			}
1065 			err = QDF_STATUS_E_INVAL;
1066 
1067 			dp_hmwds_ast_add_notify(peer, mac_addr,
1068 						peer_type, err, true);
1069 
1070 			return err;
1071 		}
1072 	}
1073 
1074 	if (ast_entry) {
1075 		ast_entry->ast_idx = hw_peer_id;
1076 		soc->ast_table[hw_peer_id] = ast_entry;
1077 		ast_entry->is_active = TRUE;
1078 		peer_type = ast_entry->type;
1079 		ast_entry->ast_hash_value = ast_hash;
1080 		ast_entry->is_mapped = TRUE;
1081 		qdf_assert_always(ast_entry->peer_id == HTT_INVALID_PEER);
1082 
1083 		ast_entry->peer_id = peer->peer_id;
1084 		TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry,
1085 				  ase_list_elem);
1086 	}
1087 
1088 	if (ast_entry || (peer->vdev && peer->vdev->proxysta_vdev)) {
1089 		if (soc->cdp_soc.ol_ops->peer_map_event) {
1090 			soc->cdp_soc.ol_ops->peer_map_event(
1091 			soc->ctrl_psoc, peer->peer_id,
1092 			hw_peer_id, vdev_id,
1093 			mac_addr, peer_type, ast_hash);
1094 		}
1095 	} else {
1096 		dp_peer_err("%pK: AST entry not found", soc);
1097 		err = QDF_STATUS_E_NOENT;
1098 	}
1099 
1100 	qdf_spin_unlock_bh(&soc->ast_lock);
1101 
1102 	dp_hmwds_ast_add_notify(peer, mac_addr,
1103 				peer_type, err, true);
1104 
1105 	return err;
1106 }
1107 
1108 void dp_peer_free_hmwds_cb(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
1109 			   struct cdp_soc *dp_soc,
1110 			   void *cookie,
1111 			   enum cdp_ast_free_status status)
1112 {
1113 	struct dp_ast_free_cb_params *param =
1114 		(struct dp_ast_free_cb_params *)cookie;
1115 	struct dp_soc *soc = (struct dp_soc *)dp_soc;
1116 	struct dp_peer *peer = NULL;
1117 	QDF_STATUS err = QDF_STATUS_SUCCESS;
1118 
1119 	if (status != CDP_TXRX_AST_DELETED) {
1120 		qdf_mem_free(cookie);
1121 		return;
1122 	}
1123 
1124 	peer = dp_peer_find_hash_find(soc, &param->peer_mac_addr.raw[0],
1125 				      0, param->vdev_id, DP_MOD_ID_AST);
1126 	if (peer) {
1127 		err = dp_peer_add_ast(soc, peer,
1128 				      &param->mac_addr.raw[0],
1129 				      param->type,
1130 				      param->flags);
1131 
1132 		dp_hmwds_ast_add_notify(peer, &param->mac_addr.raw[0],
1133 					param->type, err, false);
1134 
1135 		dp_peer_unref_delete(peer, DP_MOD_ID_AST);
1136 	}
1137 	qdf_mem_free(cookie);
1138 }
1139 
1140 /*
1141  * dp_peer_add_ast() - Allocate and add AST entry into peer list
1142  * @soc: SoC handle
1143  * @peer: peer to which ast node belongs
1144  * @mac_addr: MAC address of ast node
1145  * @is_self: Is this base AST entry with peer mac address
1146  *
1147  * This API is used by WDS source port learning function to
1148  * add a new AST entry into peer AST list
1149  *
1150  * Return: QDF_STATUS code
1151  */
1152 QDF_STATUS dp_peer_add_ast(struct dp_soc *soc,
1153 			   struct dp_peer *peer,
1154 			   uint8_t *mac_addr,
1155 			   enum cdp_txrx_ast_entry_type type,
1156 			   uint32_t flags)
1157 {
1158 	struct dp_ast_entry *ast_entry = NULL;
1159 	struct dp_vdev *vdev = NULL;
1160 	struct dp_pdev *pdev = NULL;
1161 	uint8_t next_node_mac[6];
1162 	txrx_ast_free_cb cb = NULL;
1163 	void *cookie = NULL;
1164 	struct dp_peer *vap_bss_peer = NULL;
1165 	bool is_peer_found = false;
1166 
1167 	vdev = peer->vdev;
1168 	if (!vdev) {
1169 		dp_peer_err("%pK: Peers vdev is NULL", soc);
1170 		QDF_ASSERT(0);
1171 		return QDF_STATUS_E_INVAL;
1172 	}
1173 
1174 	pdev = vdev->pdev;
1175 
1176 	is_peer_found = dp_peer_exist_on_pdev(soc, mac_addr, 0, pdev);
1177 
1178 	qdf_spin_lock_bh(&soc->ast_lock);
1179 
1180 	if (!dp_peer_state_cmp(peer, DP_PEER_STATE_ACTIVE)) {
1181 		if ((type != CDP_TXRX_AST_TYPE_STATIC) &&
1182 		    (type != CDP_TXRX_AST_TYPE_SELF)) {
1183 			qdf_spin_unlock_bh(&soc->ast_lock);
1184 			return QDF_STATUS_E_BUSY;
1185 		}
1186 	}
1187 
1188 	dp_peer_debug("%pK: pdevid: %u vdev: %u  ast_entry->type: %d flags: 0x%x peer_mac: " QDF_MAC_ADDR_FMT " peer: %pK mac " QDF_MAC_ADDR_FMT,
1189 		      soc, pdev->pdev_id, vdev->vdev_id, type, flags,
1190 		      QDF_MAC_ADDR_REF(peer->mac_addr.raw), peer,
1191 		      QDF_MAC_ADDR_REF(mac_addr));
1192 
1193 	/* fw supports only 2 times the max_peers ast entries */
1194 	if (soc->num_ast_entries >=
1195 	    wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx)) {
1196 		qdf_spin_unlock_bh(&soc->ast_lock);
1197 		dp_peer_err("%pK: Max ast entries reached", soc);
1198 		return QDF_STATUS_E_RESOURCES;
1199 	}
1200 
1201 	/* If AST entry already exists , just return from here
1202 	 * ast entry with same mac address can exist on different radios
1203 	 * if ast_override support is enabled use search by pdev in this
1204 	 * case
1205 	 */
1206 	if (soc->ast_override_support) {
1207 		ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, mac_addr,
1208 							    pdev->pdev_id);
1209 		if (ast_entry) {
1210 			qdf_spin_unlock_bh(&soc->ast_lock);
1211 			return QDF_STATUS_E_ALREADY;
1212 		}
1213 
1214 		if (is_peer_found) {
1215 			/* During WDS to static roaming, peer is added
1216 			 * to the list before static AST entry create.
1217 			 * So, allow AST entry for STATIC type
1218 			 * even if peer is present
1219 			 */
1220 			if (type != CDP_TXRX_AST_TYPE_STATIC) {
1221 				qdf_spin_unlock_bh(&soc->ast_lock);
1222 				return QDF_STATUS_E_ALREADY;
1223 			}
1224 		}
1225 	} else {
1226 		/* For HWMWDS_SEC entries can be added for same mac address
1227 		 * do not check for existing entry
1228 		 */
1229 		if (type == CDP_TXRX_AST_TYPE_WDS_HM_SEC)
1230 			goto add_ast_entry;
1231 
1232 		ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
1233 
1234 		if (ast_entry) {
1235 			if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) &&
1236 			    !ast_entry->delete_in_progress) {
1237 				qdf_spin_unlock_bh(&soc->ast_lock);
1238 				return QDF_STATUS_E_ALREADY;
1239 			}
1240 
1241 			/* Add for HMWDS entry we cannot be ignored if there
1242 			 * is AST entry with same mac address
1243 			 *
1244 			 * if ast entry exists with the requested mac address
1245 			 * send a delete command and register callback which
1246 			 * can take care of adding HMWDS ast enty on delete
1247 			 * confirmation from target
1248 			 */
1249 			if (type == CDP_TXRX_AST_TYPE_WDS_HM) {
1250 				struct dp_ast_free_cb_params *param = NULL;
1251 
1252 				if (ast_entry->type ==
1253 					CDP_TXRX_AST_TYPE_WDS_HM_SEC)
1254 					goto add_ast_entry;
1255 
1256 				/* save existing callback */
1257 				if (ast_entry->callback) {
1258 					cb = ast_entry->callback;
1259 					cookie = ast_entry->cookie;
1260 				}
1261 
1262 				param = qdf_mem_malloc(sizeof(*param));
1263 				if (!param) {
1264 					QDF_TRACE(QDF_MODULE_ID_TXRX,
1265 						  QDF_TRACE_LEVEL_ERROR,
1266 						  "Allocation failed");
1267 					qdf_spin_unlock_bh(&soc->ast_lock);
1268 					return QDF_STATUS_E_NOMEM;
1269 				}
1270 
1271 				qdf_mem_copy(&param->mac_addr.raw[0], mac_addr,
1272 					     QDF_MAC_ADDR_SIZE);
1273 				qdf_mem_copy(&param->peer_mac_addr.raw[0],
1274 					     &peer->mac_addr.raw[0],
1275 					     QDF_MAC_ADDR_SIZE);
1276 				param->type = type;
1277 				param->flags = flags;
1278 				param->vdev_id = vdev->vdev_id;
1279 				ast_entry->callback = dp_peer_free_hmwds_cb;
1280 				ast_entry->pdev_id = vdev->pdev->pdev_id;
1281 				ast_entry->type = type;
1282 				ast_entry->cookie = (void *)param;
1283 				if (!ast_entry->delete_in_progress)
1284 					dp_peer_del_ast(soc, ast_entry);
1285 
1286 				qdf_spin_unlock_bh(&soc->ast_lock);
1287 
1288 				/* Call the saved callback*/
1289 				if (cb) {
1290 					cb(soc->ctrl_psoc,
1291 					   dp_soc_to_cdp_soc(soc),
1292 					   cookie,
1293 					   CDP_TXRX_AST_DELETE_IN_PROGRESS);
1294 				}
1295 				return QDF_STATUS_E_AGAIN;
1296 			}
1297 
1298 			qdf_spin_unlock_bh(&soc->ast_lock);
1299 			return QDF_STATUS_E_ALREADY;
1300 		}
1301 	}
1302 
1303 add_ast_entry:
1304 	ast_entry = (struct dp_ast_entry *)
1305 			qdf_mem_malloc(sizeof(struct dp_ast_entry));
1306 
1307 	if (!ast_entry) {
1308 		qdf_spin_unlock_bh(&soc->ast_lock);
1309 		dp_peer_err("%pK: fail to allocate ast_entry", soc);
1310 		QDF_ASSERT(0);
1311 		return QDF_STATUS_E_NOMEM;
1312 	}
1313 
1314 	qdf_mem_copy(&ast_entry->mac_addr.raw[0], mac_addr, QDF_MAC_ADDR_SIZE);
1315 	ast_entry->pdev_id = vdev->pdev->pdev_id;
1316 	ast_entry->is_mapped = false;
1317 	ast_entry->delete_in_progress = false;
1318 	ast_entry->peer_id = HTT_INVALID_PEER;
1319 	ast_entry->next_hop = 0;
1320 	ast_entry->vdev_id = vdev->vdev_id;
1321 
1322 	switch (type) {
1323 	case CDP_TXRX_AST_TYPE_STATIC:
1324 		peer->self_ast_entry = ast_entry;
1325 		ast_entry->type = CDP_TXRX_AST_TYPE_STATIC;
1326 		if (peer->vdev->opmode == wlan_op_mode_sta)
1327 			ast_entry->type = CDP_TXRX_AST_TYPE_STA_BSS;
1328 		break;
1329 	case CDP_TXRX_AST_TYPE_SELF:
1330 		peer->self_ast_entry = ast_entry;
1331 		ast_entry->type = CDP_TXRX_AST_TYPE_SELF;
1332 		break;
1333 	case CDP_TXRX_AST_TYPE_WDS:
1334 		ast_entry->next_hop = 1;
1335 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
1336 		break;
1337 	case CDP_TXRX_AST_TYPE_WDS_HM:
1338 		ast_entry->next_hop = 1;
1339 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM;
1340 		break;
1341 	case CDP_TXRX_AST_TYPE_WDS_HM_SEC:
1342 		ast_entry->next_hop = 1;
1343 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM_SEC;
1344 		ast_entry->peer_id = peer->peer_id;
1345 		TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry,
1346 				  ase_list_elem);
1347 		break;
1348 	case CDP_TXRX_AST_TYPE_DA:
1349 		vap_bss_peer = dp_vdev_bss_peer_ref_n_get(soc, vdev,
1350 							  DP_MOD_ID_AST);
1351 		if (!vap_bss_peer) {
1352 			qdf_spin_unlock_bh(&soc->ast_lock);
1353 			qdf_mem_free(ast_entry);
1354 			return QDF_STATUS_E_FAILURE;
1355 		}
1356 		peer = vap_bss_peer;
1357 		ast_entry->next_hop = 1;
1358 		ast_entry->type = CDP_TXRX_AST_TYPE_DA;
1359 		break;
1360 	default:
1361 		dp_peer_err("%pK: Incorrect AST entry type", soc);
1362 	}
1363 
1364 	ast_entry->is_active = TRUE;
1365 	DP_STATS_INC(soc, ast.added, 1);
1366 	soc->num_ast_entries++;
1367 	dp_peer_ast_hash_add(soc, ast_entry);
1368 
1369 	qdf_copy_macaddr((struct qdf_mac_addr *)next_node_mac,
1370 			 (struct qdf_mac_addr *)peer->mac_addr.raw);
1371 
1372 	if ((ast_entry->type != CDP_TXRX_AST_TYPE_STATIC) &&
1373 	    (ast_entry->type != CDP_TXRX_AST_TYPE_SELF) &&
1374 	    (ast_entry->type != CDP_TXRX_AST_TYPE_STA_BSS) &&
1375 	    (ast_entry->type != CDP_TXRX_AST_TYPE_WDS_HM_SEC)) {
1376 		if (QDF_STATUS_SUCCESS ==
1377 				soc->cdp_soc.ol_ops->peer_add_wds_entry(
1378 				soc->ctrl_psoc,
1379 				peer->vdev->vdev_id,
1380 				peer->mac_addr.raw,
1381 				peer->peer_id,
1382 				mac_addr,
1383 				next_node_mac,
1384 				flags,
1385 				ast_entry->type)) {
1386 			if (vap_bss_peer)
1387 				dp_peer_unref_delete(vap_bss_peer,
1388 						     DP_MOD_ID_AST);
1389 			qdf_spin_unlock_bh(&soc->ast_lock);
1390 			return QDF_STATUS_SUCCESS;
1391 		}
1392 	}
1393 
1394 	if (vap_bss_peer)
1395 		dp_peer_unref_delete(vap_bss_peer, DP_MOD_ID_AST);
1396 
1397 	qdf_spin_unlock_bh(&soc->ast_lock);
1398 	return QDF_STATUS_E_FAILURE;
1399 }
1400 
1401 qdf_export_symbol(dp_peer_add_ast);
1402 
1403 /*
1404  * dp_peer_free_ast_entry() - Free up the ast entry memory
1405  * @soc: SoC handle
1406  * @ast_entry: Address search entry
1407  *
1408  * This API is used to free up the memory associated with
1409  * AST entry.
1410  *
1411  * Return: None
1412  */
1413 void dp_peer_free_ast_entry(struct dp_soc *soc,
1414 			    struct dp_ast_entry *ast_entry)
1415 {
1416 	/*
1417 	 * NOTE: Ensure that call to this API is done
1418 	 * after soc->ast_lock is taken
1419 	 */
1420 	dp_peer_debug("type: %d ID: %u vid: %u mac_addr: " QDF_MAC_ADDR_FMT,
1421 		      ast_entry->type, ast_entry->peer_id, ast_entry->vdev_id,
1422 		      QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw));
1423 
1424 	ast_entry->callback = NULL;
1425 	ast_entry->cookie = NULL;
1426 
1427 	DP_STATS_INC(soc, ast.deleted, 1);
1428 	dp_peer_ast_hash_remove(soc, ast_entry);
1429 	dp_peer_ast_cleanup(soc, ast_entry);
1430 	qdf_mem_free(ast_entry);
1431 	soc->num_ast_entries--;
1432 }
1433 
1434 /*
1435  * dp_peer_unlink_ast_entry() - Free up the ast entry memory
1436  * @soc: SoC handle
1437  * @ast_entry: Address search entry
1438  * @peer: peer
1439  *
1440  * This API is used to remove/unlink AST entry from the peer list
1441  * and hash list.
1442  *
1443  * Return: None
1444  */
1445 void dp_peer_unlink_ast_entry(struct dp_soc *soc,
1446 			      struct dp_ast_entry *ast_entry,
1447 			      struct dp_peer *peer)
1448 {
1449 	if (!peer) {
1450 		dp_info_rl("NULL peer");
1451 		return;
1452 	}
1453 
1454 	if (ast_entry->peer_id == HTT_INVALID_PEER) {
1455 		dp_info_rl("Invalid peer id in AST entry mac addr:"QDF_MAC_ADDR_FMT" type:%d",
1456 			  QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw),
1457 			  ast_entry->type);
1458 		return;
1459 	}
1460 	/*
1461 	 * NOTE: Ensure that call to this API is done
1462 	 * after soc->ast_lock is taken
1463 	 */
1464 
1465 	qdf_assert_always(ast_entry->peer_id == peer->peer_id);
1466 	TAILQ_REMOVE(&peer->ast_entry_list, ast_entry, ase_list_elem);
1467 
1468 	if (ast_entry == peer->self_ast_entry)
1469 		peer->self_ast_entry = NULL;
1470 
1471 	/*
1472 	 * release the reference only if it is mapped
1473 	 * to ast_table
1474 	 */
1475 	if (ast_entry->is_mapped)
1476 		soc->ast_table[ast_entry->ast_idx] = NULL;
1477 
1478 	ast_entry->peer_id = HTT_INVALID_PEER;
1479 }
1480 
1481 /*
1482  * dp_peer_del_ast() - Delete and free AST entry
1483  * @soc: SoC handle
1484  * @ast_entry: AST entry of the node
1485  *
1486  * This function removes the AST entry from peer and soc tables
1487  * It assumes caller has taken the ast lock to protect the access to these
1488  * tables
1489  *
1490  * Return: None
1491  */
1492 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
1493 {
1494 	struct dp_peer *peer = NULL;
1495 
1496 	if (!ast_entry) {
1497 		dp_info_rl("NULL AST entry");
1498 		return;
1499 	}
1500 
1501 	if (ast_entry->delete_in_progress) {
1502 		dp_info_rl("AST entry deletion in progress mac addr:"QDF_MAC_ADDR_FMT" type:%d",
1503 			  QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw),
1504 			  ast_entry->type);
1505 		return;
1506 	}
1507 
1508 	dp_peer_debug("call by %ps: ID: %u vid: %u mac_addr: " QDF_MAC_ADDR_FMT,
1509 		      (void *)_RET_IP_, ast_entry->peer_id, ast_entry->vdev_id,
1510 		      QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw));
1511 
1512 	ast_entry->delete_in_progress = true;
1513 
1514 	/* In teardown del ast is called after setting logical delete state
1515 	 * use __dp_peer_get_ref_by_id to get the reference irrespective of
1516 	 * state
1517 	 */
1518 	peer = __dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
1519 				       DP_MOD_ID_AST);
1520 
1521 	dp_peer_ast_send_wds_del(soc, ast_entry, peer);
1522 
1523 	/* Remove SELF and STATIC entries in teardown itself */
1524 	if (!ast_entry->next_hop)
1525 		dp_peer_unlink_ast_entry(soc, ast_entry, peer);
1526 
1527 	if (ast_entry->is_mapped)
1528 		soc->ast_table[ast_entry->ast_idx] = NULL;
1529 
1530 	/* if peer map v2 is enabled we are not freeing ast entry
1531 	 * here and it is supposed to be freed in unmap event (after
1532 	 * we receive delete confirmation from target)
1533 	 *
1534 	 * if peer_id is invalid we did not get the peer map event
1535 	 * for the peer free ast entry from here only in this case
1536 	 */
1537 	if (dp_peer_ast_free_in_unmap_supported(soc, ast_entry))
1538 		goto end;
1539 
1540 	/* for WDS secondary entry ast_entry->next_hop would be set so
1541 	 * unlinking has to be done explicitly here.
1542 	 * As this entry is not a mapped entry unmap notification from
1543 	 * FW wil not come. Hence unlinkling is done right here.
1544 	 */
1545 
1546 	if (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC)
1547 		dp_peer_unlink_ast_entry(soc, ast_entry, peer);
1548 
1549 	dp_peer_free_ast_entry(soc, ast_entry);
1550 
1551 end:
1552 	if (peer)
1553 		dp_peer_unref_delete(peer, DP_MOD_ID_AST);
1554 }
1555 
1556 /*
1557  * dp_peer_update_ast() - Delete and free AST entry
1558  * @soc: SoC handle
1559  * @peer: peer to which ast node belongs
1560  * @ast_entry: AST entry of the node
1561  * @flags: wds or hmwds
1562  *
1563  * This function update the AST entry to the roamed peer and soc tables
1564  * It assumes caller has taken the ast lock to protect the access to these
1565  * tables
1566  *
1567  * Return: 0 if ast entry is updated successfully
1568  *         -1 failure
1569  */
1570 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
1571 		       struct dp_ast_entry *ast_entry, uint32_t flags)
1572 {
1573 	int ret = -1;
1574 	struct dp_peer *old_peer;
1575 
1576 	dp_peer_debug("%pK: ast_entry->type: %d pdevid: %u vdevid: %u flags: 0x%x mac_addr: " QDF_MAC_ADDR_FMT " peer_mac: " QDF_MAC_ADDR_FMT "\n",
1577 		      soc, ast_entry->type, peer->vdev->pdev->pdev_id,
1578 		      peer->vdev->vdev_id, flags,
1579 		      QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw),
1580 		      QDF_MAC_ADDR_REF(peer->mac_addr.raw));
1581 
1582 	/* Do not send AST update in below cases
1583 	 *  1) Ast entry delete has already triggered
1584 	 *  2) Peer delete is already triggered
1585 	 *  3) We did not get the HTT map for create event
1586 	 */
1587 	if (ast_entry->delete_in_progress ||
1588 	    !dp_peer_state_cmp(peer, DP_PEER_STATE_ACTIVE) ||
1589 	    !ast_entry->is_mapped)
1590 		return ret;
1591 
1592 	if ((ast_entry->type == CDP_TXRX_AST_TYPE_STATIC) ||
1593 	    (ast_entry->type == CDP_TXRX_AST_TYPE_SELF) ||
1594 	    (ast_entry->type == CDP_TXRX_AST_TYPE_STA_BSS) ||
1595 	    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
1596 		return 0;
1597 
1598 	/*
1599 	 * Avoids flood of WMI update messages sent to FW for same peer.
1600 	 */
1601 	if (qdf_unlikely(ast_entry->peer_id == peer->peer_id) &&
1602 	    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS) &&
1603 	    (ast_entry->vdev_id == peer->vdev->vdev_id) &&
1604 	    (ast_entry->is_active))
1605 		return 0;
1606 
1607 	old_peer = dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
1608 					 DP_MOD_ID_AST);
1609 	if (!old_peer)
1610 		return 0;
1611 
1612 	TAILQ_REMOVE(&old_peer->ast_entry_list, ast_entry, ase_list_elem);
1613 
1614 	dp_peer_unref_delete(old_peer, DP_MOD_ID_AST);
1615 
1616 	ast_entry->peer_id = peer->peer_id;
1617 	ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
1618 	ast_entry->pdev_id = peer->vdev->pdev->pdev_id;
1619 	ast_entry->vdev_id = peer->vdev->vdev_id;
1620 	ast_entry->is_active = TRUE;
1621 	TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem);
1622 
1623 	ret = soc->cdp_soc.ol_ops->peer_update_wds_entry(
1624 				soc->ctrl_psoc,
1625 				peer->vdev->vdev_id,
1626 				ast_entry->mac_addr.raw,
1627 				peer->mac_addr.raw,
1628 				flags);
1629 
1630 	return ret;
1631 }
1632 
1633 /*
1634  * dp_peer_ast_get_pdev_id() - get pdev_id from the ast entry
1635  * @soc: SoC handle
1636  * @ast_entry: AST entry of the node
1637  *
1638  * This function gets the pdev_id from the ast entry.
1639  *
1640  * Return: (uint8_t) pdev_id
1641  */
1642 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
1643 				struct dp_ast_entry *ast_entry)
1644 {
1645 	return ast_entry->pdev_id;
1646 }
1647 
1648 /*
1649  * dp_peer_ast_get_next_hop() - get next_hop from the ast entry
1650  * @soc: SoC handle
1651  * @ast_entry: AST entry of the node
1652  *
1653  * This function gets the next hop from the ast entry.
1654  *
1655  * Return: (uint8_t) next_hop
1656  */
1657 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
1658 				struct dp_ast_entry *ast_entry)
1659 {
1660 	return ast_entry->next_hop;
1661 }
1662 
1663 /*
1664  * dp_peer_ast_set_type() - set type from the ast entry
1665  * @soc: SoC handle
1666  * @ast_entry: AST entry of the node
1667  *
1668  * This function sets the type in the ast entry.
1669  *
1670  * Return:
1671  */
1672 void dp_peer_ast_set_type(struct dp_soc *soc,
1673 				struct dp_ast_entry *ast_entry,
1674 				enum cdp_txrx_ast_entry_type type)
1675 {
1676 	ast_entry->type = type;
1677 }
1678 
1679 #else
1680 QDF_STATUS dp_peer_add_ast(struct dp_soc *soc,
1681 			   struct dp_peer *peer,
1682 			   uint8_t *mac_addr,
1683 			   enum cdp_txrx_ast_entry_type type,
1684 			   uint32_t flags)
1685 {
1686 	return QDF_STATUS_E_FAILURE;
1687 }
1688 
1689 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
1690 {
1691 }
1692 
1693 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
1694 			struct dp_ast_entry *ast_entry, uint32_t flags)
1695 {
1696 	return 1;
1697 }
1698 
1699 struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
1700 					       uint8_t *ast_mac_addr)
1701 {
1702 	return NULL;
1703 }
1704 
1705 struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
1706 						     uint8_t *ast_mac_addr,
1707 						     uint8_t pdev_id)
1708 {
1709 	return NULL;
1710 }
1711 
1712 static QDF_STATUS dp_peer_ast_hash_attach(struct dp_soc *soc)
1713 {
1714 	return QDF_STATUS_SUCCESS;
1715 }
1716 
1717 static inline QDF_STATUS dp_peer_map_ast(struct dp_soc *soc,
1718 					 struct dp_peer *peer,
1719 					 uint8_t *mac_addr,
1720 					 uint16_t hw_peer_id,
1721 					 uint8_t vdev_id,
1722 					 uint16_t ast_hash,
1723 					 uint8_t is_wds)
1724 {
1725 	return QDF_STATUS_SUCCESS;
1726 }
1727 
1728 static void dp_peer_ast_hash_detach(struct dp_soc *soc)
1729 {
1730 }
1731 
1732 void dp_peer_ast_set_type(struct dp_soc *soc,
1733 				struct dp_ast_entry *ast_entry,
1734 				enum cdp_txrx_ast_entry_type type)
1735 {
1736 }
1737 
1738 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
1739 				struct dp_ast_entry *ast_entry)
1740 {
1741 	return 0xff;
1742 }
1743 
1744 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
1745 				struct dp_ast_entry *ast_entry)
1746 {
1747 	return 0xff;
1748 }
1749 
1750 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
1751 		       struct dp_ast_entry *ast_entry, uint32_t flags)
1752 {
1753 	return 1;
1754 }
1755 
1756 #endif
1757 
1758 void dp_peer_ast_send_wds_del(struct dp_soc *soc,
1759 			      struct dp_ast_entry *ast_entry,
1760 			      struct dp_peer *peer)
1761 {
1762 	struct cdp_soc_t *cdp_soc = &soc->cdp_soc;
1763 	bool delete_in_fw = false;
1764 
1765 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_TRACE,
1766 		  "%s: ast_entry->type: %d pdevid: %u vdev: %u mac_addr: "QDF_MAC_ADDR_FMT" next_hop: %u peer_id: %uM\n",
1767 		  __func__, ast_entry->type, ast_entry->pdev_id,
1768 		  ast_entry->vdev_id,
1769 		  QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw),
1770 		  ast_entry->next_hop, ast_entry->peer_id);
1771 
1772 	/*
1773 	 * If peer state is logical delete, the peer is about to get
1774 	 * teared down with a peer delete command to firmware,
1775 	 * which will cleanup all the wds ast entries.
1776 	 * So, no need to send explicit wds ast delete to firmware.
1777 	 */
1778 	if (ast_entry->next_hop) {
1779 		if (peer && dp_peer_state_cmp(peer,
1780 					      DP_PEER_STATE_LOGICAL_DELETE))
1781 			delete_in_fw = false;
1782 		else
1783 			delete_in_fw = true;
1784 
1785 		cdp_soc->ol_ops->peer_del_wds_entry(soc->ctrl_psoc,
1786 						    ast_entry->vdev_id,
1787 						    ast_entry->mac_addr.raw,
1788 						    ast_entry->type,
1789 						    delete_in_fw);
1790 	}
1791 
1792 }
1793 
1794 #ifdef FEATURE_WDS
1795 /**
1796  * dp_peer_ast_free_wds_entries() - Free wds ast entries associated with peer
1797  * @soc: soc handle
1798  * @peer: peer handle
1799  *
1800  * Free all the wds ast entries associated with peer
1801  *
1802  * Return: Number of wds ast entries freed
1803  */
1804 static uint32_t dp_peer_ast_free_wds_entries(struct dp_soc *soc,
1805 					     struct dp_peer *peer)
1806 {
1807 	TAILQ_HEAD(, dp_ast_entry) ast_local_list = {0};
1808 	struct dp_ast_entry *ast_entry, *temp_ast_entry;
1809 	uint32_t num_ast = 0;
1810 
1811 	TAILQ_INIT(&ast_local_list);
1812 	qdf_spin_lock_bh(&soc->ast_lock);
1813 
1814 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry) {
1815 		if (ast_entry->next_hop)
1816 			num_ast++;
1817 
1818 		if (ast_entry->is_mapped)
1819 			soc->ast_table[ast_entry->ast_idx] = NULL;
1820 
1821 		dp_peer_unlink_ast_entry(soc, ast_entry, peer);
1822 		DP_STATS_INC(soc, ast.deleted, 1);
1823 		dp_peer_ast_hash_remove(soc, ast_entry);
1824 		TAILQ_INSERT_TAIL(&ast_local_list, ast_entry,
1825 				  ase_list_elem);
1826 		soc->num_ast_entries--;
1827 	}
1828 
1829 	qdf_spin_unlock_bh(&soc->ast_lock);
1830 
1831 	TAILQ_FOREACH_SAFE(ast_entry, &ast_local_list, ase_list_elem,
1832 			   temp_ast_entry) {
1833 		if (ast_entry->callback)
1834 			ast_entry->callback(soc->ctrl_psoc,
1835 					    dp_soc_to_cdp_soc(soc),
1836 					    ast_entry->cookie,
1837 					    CDP_TXRX_AST_DELETED);
1838 
1839 		qdf_mem_free(ast_entry);
1840 	}
1841 
1842 	return num_ast;
1843 }
1844 /**
1845  * dp_peer_clean_wds_entries() - Clean wds ast entries and compare
1846  * @soc: soc handle
1847  * @peer: peer handle
1848  * @free_wds_count - number of wds entries freed by FW with peer delete
1849  *
1850  * Free all the wds ast entries associated with peer and compare with
1851  * the value received from firmware
1852  *
1853  * Return: Number of wds ast entries freed
1854  */
1855 static void
1856 dp_peer_clean_wds_entries(struct dp_soc *soc, struct dp_peer *peer,
1857 			  uint32_t free_wds_count)
1858 {
1859 	uint32_t wds_deleted = 0;
1860 
1861 	wds_deleted = dp_peer_ast_free_wds_entries(soc, peer);
1862 	if ((DP_PEER_WDS_COUNT_INVALID != free_wds_count) &&
1863 	    (free_wds_count != wds_deleted)) {
1864 		DP_STATS_INC(soc, ast.ast_mismatch, 1);
1865 		dp_alert("For peer %pK (mac: "QDF_MAC_ADDR_FMT")number of wds entries deleted by fw = %d during peer delete is not same as the numbers deleted by host = %d",
1866 			 peer, peer->mac_addr.raw, free_wds_count,
1867 			 wds_deleted);
1868 	}
1869 }
1870 
1871 #else
1872 static void
1873 dp_peer_clean_wds_entries(struct dp_soc *soc, struct dp_peer *peer,
1874 			  uint32_t free_wds_count)
1875 {
1876 	struct dp_ast_entry *ast_entry, *temp_ast_entry;
1877 
1878 	qdf_spin_lock_bh(&soc->ast_lock);
1879 
1880 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry) {
1881 		dp_peer_unlink_ast_entry(soc, ast_entry, peer);
1882 
1883 		if (ast_entry->is_mapped)
1884 			soc->ast_table[ast_entry->ast_idx] = NULL;
1885 
1886 		dp_peer_free_ast_entry(soc, ast_entry);
1887 	}
1888 
1889 	peer->self_ast_entry = NULL;
1890 	qdf_spin_unlock_bh(&soc->ast_lock);
1891 }
1892 #endif
1893 
1894 /**
1895  * dp_peer_ast_free_entry_by_mac() - find ast entry by MAC address and delete
1896  * @soc: soc handle
1897  * @peer: peer handle
1898  * @vdev_id: vdev_id
1899  * @mac_addr: mac address of the AST entry to searc and delete
1900  *
1901  * find the ast entry from the peer list using the mac address and free
1902  * the entry.
1903  *
1904  * Return: SUCCESS or NOENT
1905  */
1906 static int dp_peer_ast_free_entry_by_mac(struct dp_soc *soc,
1907 					 struct dp_peer *peer,
1908 					 uint8_t vdev_id,
1909 					 uint8_t *mac_addr)
1910 {
1911 	struct dp_ast_entry *ast_entry;
1912 	void *cookie = NULL;
1913 	txrx_ast_free_cb cb = NULL;
1914 
1915 	/*
1916 	 * release the reference only if it is mapped
1917 	 * to ast_table
1918 	 */
1919 
1920 	qdf_spin_lock_bh(&soc->ast_lock);
1921 
1922 	ast_entry = dp_peer_ast_hash_find_by_vdevid(soc, mac_addr, vdev_id);
1923 	if (!ast_entry) {
1924 		qdf_spin_unlock_bh(&soc->ast_lock);
1925 		return QDF_STATUS_E_NOENT;
1926 	} else if (ast_entry->is_mapped) {
1927 		soc->ast_table[ast_entry->ast_idx] = NULL;
1928 	}
1929 
1930 	cb = ast_entry->callback;
1931 	cookie = ast_entry->cookie;
1932 
1933 
1934 	dp_peer_unlink_ast_entry(soc, ast_entry, peer);
1935 
1936 	dp_peer_free_ast_entry(soc, ast_entry);
1937 
1938 	qdf_spin_unlock_bh(&soc->ast_lock);
1939 
1940 	if (cb) {
1941 		cb(soc->ctrl_psoc,
1942 		   dp_soc_to_cdp_soc(soc),
1943 		   cookie,
1944 		   CDP_TXRX_AST_DELETED);
1945 	}
1946 
1947 	return QDF_STATUS_SUCCESS;
1948 }
1949 
1950 /*
1951  * dp_peer_find_hash_find() - returns peer from peer_hash_table matching
1952  *                            vdev_id and mac_address
1953  * @soc: soc handle
1954  * @peer_mac_addr: peer mac address
1955  * @mac_addr_is_aligned: is mac addr alligned
1956  * @vdev_id: vdev_id
1957  * @mod_id: id of module requesting reference
1958  *
1959  * return: peer in sucsess
1960  *         NULL in failure
1961  */
1962 struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc,
1963 	uint8_t *peer_mac_addr, int mac_addr_is_aligned, uint8_t vdev_id,
1964 	enum dp_mod_id mod_id)
1965 {
1966 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
1967 	unsigned index;
1968 	struct dp_peer *peer;
1969 
1970 	if (!soc->peer_hash.bins)
1971 		return NULL;
1972 
1973 	if (mac_addr_is_aligned) {
1974 		mac_addr = (union dp_align_mac_addr *) peer_mac_addr;
1975 	} else {
1976 		qdf_mem_copy(
1977 			&local_mac_addr_aligned.raw[0],
1978 			peer_mac_addr, QDF_MAC_ADDR_SIZE);
1979 		mac_addr = &local_mac_addr_aligned;
1980 	}
1981 	index = dp_peer_find_hash_index(soc, mac_addr);
1982 	qdf_spin_lock_bh(&soc->peer_hash_lock);
1983 	TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
1984 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
1985 			((peer->vdev->vdev_id == vdev_id) ||
1986 			 (vdev_id == DP_VDEV_ALL))) {
1987 			/* take peer reference before returning */
1988 			if (dp_peer_get_ref(soc, peer, mod_id) !=
1989 						QDF_STATUS_SUCCESS)
1990 				peer = NULL;
1991 
1992 			qdf_spin_unlock_bh(&soc->peer_hash_lock);
1993 			return peer;
1994 		}
1995 	}
1996 	qdf_spin_unlock_bh(&soc->peer_hash_lock);
1997 	return NULL; /* failure */
1998 }
1999 
2000 qdf_export_symbol(dp_peer_find_hash_find);
2001 
2002 /*
2003  * dp_peer_find_hash_remove() - remove peer from peer_hash_table
2004  * @soc: soc handle
2005  * @peer: peer handle
2006  *
2007  * return: none
2008  */
2009 void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer)
2010 {
2011 	unsigned index;
2012 	struct dp_peer *tmppeer = NULL;
2013 	int found = 0;
2014 
2015 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
2016 	/* Check if tail is not empty before delete*/
2017 	QDF_ASSERT(!TAILQ_EMPTY(&soc->peer_hash.bins[index]));
2018 
2019 	qdf_spin_lock_bh(&soc->peer_hash_lock);
2020 	TAILQ_FOREACH(tmppeer, &soc->peer_hash.bins[index], hash_list_elem) {
2021 		if (tmppeer == peer) {
2022 			found = 1;
2023 			break;
2024 		}
2025 	}
2026 	QDF_ASSERT(found);
2027 	TAILQ_REMOVE(&soc->peer_hash.bins[index], peer, hash_list_elem);
2028 
2029 	dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
2030 	qdf_spin_unlock_bh(&soc->peer_hash_lock);
2031 }
2032 
2033 void dp_peer_find_hash_erase(struct dp_soc *soc)
2034 {
2035 	int i;
2036 
2037 	/*
2038 	 * Not really necessary to take peer_ref_mutex lock - by this point,
2039 	 * it's known that the soc is no longer in use.
2040 	 */
2041 	for (i = 0; i <= soc->peer_hash.mask; i++) {
2042 		if (!TAILQ_EMPTY(&soc->peer_hash.bins[i])) {
2043 			struct dp_peer *peer, *peer_next;
2044 
2045 			/*
2046 			 * TAILQ_FOREACH_SAFE must be used here to avoid any
2047 			 * memory access violation after peer is freed
2048 			 */
2049 			TAILQ_FOREACH_SAFE(peer, &soc->peer_hash.bins[i],
2050 				hash_list_elem, peer_next) {
2051 				/*
2052 				 * Don't remove the peer from the hash table -
2053 				 * that would modify the list we are currently
2054 				 * traversing, and it's not necessary anyway.
2055 				 */
2056 				/*
2057 				 * Artificially adjust the peer's ref count to
2058 				 * 1, so it will get deleted by
2059 				 * dp_peer_unref_delete.
2060 				 */
2061 				/* set to zero */
2062 				qdf_atomic_init(&peer->ref_cnt);
2063 				for (i = 0; i < DP_MOD_ID_MAX; i++)
2064 					qdf_atomic_init(&peer->mod_refs[i]);
2065 				/* incr to one */
2066 				qdf_atomic_inc(&peer->ref_cnt);
2067 				qdf_atomic_inc(&peer->mod_refs
2068 						[DP_MOD_ID_CONFIG]);
2069 				dp_peer_unref_delete(peer,
2070 						     DP_MOD_ID_CONFIG);
2071 			}
2072 		}
2073 	}
2074 }
2075 
2076 static void dp_peer_ast_table_detach(struct dp_soc *soc)
2077 {
2078 	if (soc->ast_table) {
2079 		qdf_mem_free(soc->ast_table);
2080 		soc->ast_table = NULL;
2081 	}
2082 }
2083 
2084 /*
2085  * dp_peer_find_map_detach() - cleanup memory for peer_id_to_obj_map
2086  * @soc: soc handle
2087  *
2088  * return: none
2089  */
2090 static void dp_peer_find_map_detach(struct dp_soc *soc)
2091 {
2092 	if (soc->peer_id_to_obj_map) {
2093 		qdf_mem_free(soc->peer_id_to_obj_map);
2094 		soc->peer_id_to_obj_map = NULL;
2095 		qdf_spinlock_destroy(&soc->peer_map_lock);
2096 	}
2097 }
2098 
2099 QDF_STATUS dp_peer_find_attach(struct dp_soc *soc)
2100 {
2101 	QDF_STATUS status;
2102 
2103 	status = dp_peer_find_map_attach(soc);
2104 	if (!QDF_IS_STATUS_SUCCESS(status))
2105 		return status;
2106 
2107 	status = dp_peer_find_hash_attach(soc);
2108 	if (!QDF_IS_STATUS_SUCCESS(status))
2109 		goto map_detach;
2110 
2111 	status = dp_peer_ast_table_attach(soc);
2112 	if (!QDF_IS_STATUS_SUCCESS(status))
2113 		goto hash_detach;
2114 
2115 	status = dp_peer_ast_hash_attach(soc);
2116 	if (!QDF_IS_STATUS_SUCCESS(status))
2117 		goto ast_table_detach;
2118 
2119 	status = dp_peer_mec_hash_attach(soc);
2120 	if (QDF_IS_STATUS_SUCCESS(status)) {
2121 		dp_soc_wds_attach(soc);
2122 		return status;
2123 	}
2124 
2125 	dp_peer_ast_hash_detach(soc);
2126 ast_table_detach:
2127 	dp_peer_ast_table_detach(soc);
2128 hash_detach:
2129 	dp_peer_find_hash_detach(soc);
2130 map_detach:
2131 	dp_peer_find_map_detach(soc);
2132 
2133 	return status;
2134 }
2135 
2136 void dp_rx_tid_stats_cb(struct dp_soc *soc, void *cb_ctxt,
2137 	union hal_reo_status *reo_status)
2138 {
2139 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
2140 	struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
2141 
2142 	if (queue_status->header.status == HAL_REO_CMD_DRAIN)
2143 		return;
2144 
2145 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
2146 		DP_PRINT_STATS("REO stats failure %d for TID %d\n",
2147 			       queue_status->header.status, rx_tid->tid);
2148 		return;
2149 	}
2150 
2151 	DP_PRINT_STATS("REO queue stats (TID: %d):\n"
2152 		       "ssn: %d\n"
2153 		       "curr_idx  : %d\n"
2154 		       "pn_31_0   : %08x\n"
2155 		       "pn_63_32  : %08x\n"
2156 		       "pn_95_64  : %08x\n"
2157 		       "pn_127_96 : %08x\n"
2158 		       "last_rx_enq_tstamp : %08x\n"
2159 		       "last_rx_deq_tstamp : %08x\n"
2160 		       "rx_bitmap_31_0     : %08x\n"
2161 		       "rx_bitmap_63_32    : %08x\n"
2162 		       "rx_bitmap_95_64    : %08x\n"
2163 		       "rx_bitmap_127_96   : %08x\n"
2164 		       "rx_bitmap_159_128  : %08x\n"
2165 		       "rx_bitmap_191_160  : %08x\n"
2166 		       "rx_bitmap_223_192  : %08x\n"
2167 		       "rx_bitmap_255_224  : %08x\n",
2168 		       rx_tid->tid,
2169 		       queue_status->ssn, queue_status->curr_idx,
2170 		       queue_status->pn_31_0, queue_status->pn_63_32,
2171 		       queue_status->pn_95_64, queue_status->pn_127_96,
2172 		       queue_status->last_rx_enq_tstamp,
2173 		       queue_status->last_rx_deq_tstamp,
2174 		       queue_status->rx_bitmap_31_0,
2175 		       queue_status->rx_bitmap_63_32,
2176 		       queue_status->rx_bitmap_95_64,
2177 		       queue_status->rx_bitmap_127_96,
2178 		       queue_status->rx_bitmap_159_128,
2179 		       queue_status->rx_bitmap_191_160,
2180 		       queue_status->rx_bitmap_223_192,
2181 		       queue_status->rx_bitmap_255_224);
2182 
2183 	DP_PRINT_STATS(
2184 		       "curr_mpdu_cnt      : %d\n"
2185 		       "curr_msdu_cnt      : %d\n"
2186 		       "fwd_timeout_cnt    : %d\n"
2187 		       "fwd_bar_cnt        : %d\n"
2188 		       "dup_cnt            : %d\n"
2189 		       "frms_in_order_cnt  : %d\n"
2190 		       "bar_rcvd_cnt       : %d\n"
2191 		       "mpdu_frms_cnt      : %d\n"
2192 		       "msdu_frms_cnt      : %d\n"
2193 		       "total_byte_cnt     : %d\n"
2194 		       "late_recv_mpdu_cnt : %d\n"
2195 		       "win_jump_2k        : %d\n"
2196 		       "hole_cnt           : %d\n",
2197 		       queue_status->curr_mpdu_cnt,
2198 		       queue_status->curr_msdu_cnt,
2199 		       queue_status->fwd_timeout_cnt,
2200 		       queue_status->fwd_bar_cnt,
2201 		       queue_status->dup_cnt,
2202 		       queue_status->frms_in_order_cnt,
2203 		       queue_status->bar_rcvd_cnt,
2204 		       queue_status->mpdu_frms_cnt,
2205 		       queue_status->msdu_frms_cnt,
2206 		       queue_status->total_cnt,
2207 		       queue_status->late_recv_mpdu_cnt,
2208 		       queue_status->win_jump_2k,
2209 		       queue_status->hole_cnt);
2210 
2211 	DP_PRINT_STATS("Addba Req          : %d\n"
2212 			"Addba Resp         : %d\n"
2213 			"Addba Resp success : %d\n"
2214 			"Addba Resp failed  : %d\n"
2215 			"Delba Req received : %d\n"
2216 			"Delba Tx success   : %d\n"
2217 			"Delba Tx Fail      : %d\n"
2218 			"BA window size     : %d\n"
2219 			"Pn size            : %d\n",
2220 			rx_tid->num_of_addba_req,
2221 			rx_tid->num_of_addba_resp,
2222 			rx_tid->num_addba_rsp_success,
2223 			rx_tid->num_addba_rsp_failed,
2224 			rx_tid->num_of_delba_req,
2225 			rx_tid->delba_tx_success_cnt,
2226 			rx_tid->delba_tx_fail_cnt,
2227 			rx_tid->ba_win_size,
2228 			rx_tid->pn_size);
2229 }
2230 
2231 /*
2232  * dp_peer_find_add_id() - map peer_id with peer
2233  * @soc: soc handle
2234  * @peer_mac_addr: peer mac address
2235  * @peer_id: peer id to be mapped
2236  * @hw_peer_id: HW ast index
2237  * @vdev_id: vdev_id
2238  *
2239  * return: peer in success
2240  *         NULL in failure
2241  */
2242 static inline struct dp_peer *dp_peer_find_add_id(struct dp_soc *soc,
2243 	uint8_t *peer_mac_addr, uint16_t peer_id, uint16_t hw_peer_id,
2244 	uint8_t vdev_id)
2245 {
2246 	struct dp_peer *peer;
2247 
2248 	QDF_ASSERT(peer_id <= soc->max_peers);
2249 	/* check if there's already a peer object with this MAC address */
2250 	peer = dp_peer_find_hash_find(soc, peer_mac_addr,
2251 		0 /* is aligned */, vdev_id, DP_MOD_ID_CONFIG);
2252 	dp_peer_err("%pK: peer %pK ID %d vid %d mac " QDF_MAC_ADDR_FMT,
2253 		    soc, peer, peer_id, vdev_id,
2254 		    QDF_MAC_ADDR_REF(peer_mac_addr));
2255 
2256 	if (peer) {
2257 		/* peer's ref count was already incremented by
2258 		 * peer_find_hash_find
2259 		 */
2260 		dp_peer_info("%pK: ref_cnt: %d", soc,
2261 			     qdf_atomic_read(&peer->ref_cnt));
2262 
2263 		/*
2264 		 * if peer is in logical delete CP triggered delete before map
2265 		 * is received ignore this event
2266 		 */
2267 		if (dp_peer_state_cmp(peer, DP_PEER_STATE_LOGICAL_DELETE)) {
2268 			dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
2269 			dp_alert("Peer %pK["QDF_MAC_ADDR_FMT"] logical delete state vid %d",
2270 				 peer, QDF_MAC_ADDR_REF(peer_mac_addr),
2271 				 vdev_id);
2272 			return NULL;
2273 		}
2274 		dp_peer_find_id_to_obj_add(soc, peer, peer_id);
2275 		if (peer->peer_id == HTT_INVALID_PEER) {
2276 			peer->peer_id = peer_id;
2277 			dp_monitor_peer_tid_peer_id_update(soc, peer,
2278 							   peer->peer_id);
2279 		} else {
2280 			QDF_ASSERT(0);
2281 		}
2282 
2283 		dp_peer_update_state(soc, peer, DP_PEER_STATE_ACTIVE);
2284 		return peer;
2285 	}
2286 
2287 	return NULL;
2288 }
2289 
2290 /**
2291  * dp_rx_peer_map_handler() - handle peer map event from firmware
2292  * @soc_handle - genereic soc handle
2293  * @peeri_id - peer_id from firmware
2294  * @hw_peer_id - ast index for this peer
2295  * @vdev_id - vdev ID
2296  * @peer_mac_addr - mac address of the peer
2297  * @ast_hash - ast hash value
2298  * @is_wds - flag to indicate peer map event for WDS ast entry
2299  *
2300  * associate the peer_id that firmware provided with peer entry
2301  * and update the ast table in the host with the hw_peer_id.
2302  *
2303  * Return: QDF_STATUS code
2304  */
2305 
2306 QDF_STATUS
2307 dp_rx_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
2308 		       uint16_t hw_peer_id, uint8_t vdev_id,
2309 		       uint8_t *peer_mac_addr, uint16_t ast_hash,
2310 		       uint8_t is_wds)
2311 {
2312 	struct dp_peer *peer = NULL;
2313 	enum cdp_txrx_ast_entry_type type = CDP_TXRX_AST_TYPE_STATIC;
2314 	QDF_STATUS err = QDF_STATUS_SUCCESS;
2315 
2316 	dp_info("peer_map_event (soc:%pK): peer_id %d, hw_peer_id %d, peer_mac "QDF_MAC_ADDR_FMT", vdev_id %d",
2317 		soc, peer_id, hw_peer_id,
2318 		QDF_MAC_ADDR_REF(peer_mac_addr), vdev_id);
2319 
2320 	/* Peer map event for WDS ast entry get the peer from
2321 	 * obj map
2322 	 */
2323 	if (is_wds) {
2324 		peer = dp_peer_get_ref_by_id(soc, peer_id,
2325 					     DP_MOD_ID_HTT);
2326 
2327 		err = dp_peer_map_ast(soc, peer, peer_mac_addr, hw_peer_id,
2328 				      vdev_id, ast_hash, is_wds);
2329 		if (peer)
2330 			dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
2331 	} else {
2332 		/*
2333 		 * It's the responsibility of the CP and FW to ensure
2334 		 * that peer is created successfully. Ideally DP should
2335 		 * not hit the below condition for directly assocaited
2336 		 * peers.
2337 		 */
2338 		if ((hw_peer_id < 0) ||
2339 		    (hw_peer_id >=
2340 		     wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) {
2341 			dp_peer_err("%pK: invalid hw_peer_id: %d", soc, hw_peer_id);
2342 			qdf_assert_always(0);
2343 		}
2344 
2345 		peer = dp_peer_find_add_id(soc, peer_mac_addr, peer_id,
2346 					   hw_peer_id, vdev_id);
2347 
2348 		if (peer) {
2349 			if (wlan_op_mode_sta == peer->vdev->opmode &&
2350 			    qdf_mem_cmp(peer->mac_addr.raw,
2351 					peer->vdev->mac_addr.raw,
2352 					QDF_MAC_ADDR_SIZE) != 0) {
2353 				dp_peer_info("%pK: STA vdev bss_peer!!!!", soc);
2354 				peer->bss_peer = 1;
2355 			}
2356 
2357 			if (peer->vdev->opmode == wlan_op_mode_sta) {
2358 				peer->vdev->bss_ast_hash = ast_hash;
2359 				peer->vdev->bss_ast_idx = hw_peer_id;
2360 			}
2361 
2362 			/* Add ast entry incase self ast entry is
2363 			 * deleted due to DP CP sync issue
2364 			 *
2365 			 * self_ast_entry is modified in peer create
2366 			 * and peer unmap path which cannot run in
2367 			 * parllel with peer map, no lock need before
2368 			 * referring it
2369 			 */
2370 			if (!peer->self_ast_entry) {
2371 				dp_info("Add self ast from map "QDF_MAC_ADDR_FMT,
2372 					QDF_MAC_ADDR_REF(peer_mac_addr));
2373 				dp_peer_add_ast(soc, peer,
2374 						peer_mac_addr,
2375 						type, 0);
2376 			}
2377 		}
2378 		err = dp_peer_map_ast(soc, peer, peer_mac_addr, hw_peer_id,
2379 				      vdev_id, ast_hash, is_wds);
2380 	}
2381 
2382 	return err;
2383 }
2384 
2385 /**
2386  * dp_rx_peer_unmap_handler() - handle peer unmap event from firmware
2387  * @soc_handle - genereic soc handle
2388  * @peeri_id - peer_id from firmware
2389  * @vdev_id - vdev ID
2390  * @mac_addr - mac address of the peer or wds entry
2391  * @is_wds - flag to indicate peer map event for WDS ast entry
2392  * @free_wds_count - number of wds entries freed by FW with peer delete
2393  *
2394  * Return: none
2395  */
2396 void
2397 dp_rx_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id,
2398 			 uint8_t vdev_id, uint8_t *mac_addr,
2399 			 uint8_t is_wds, uint32_t free_wds_count)
2400 {
2401 	struct dp_peer *peer;
2402 	struct dp_vdev *vdev = NULL;
2403 
2404 	peer = __dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_HTT);
2405 
2406 	/*
2407 	 * Currently peer IDs are assigned for vdevs as well as peers.
2408 	 * If the peer ID is for a vdev, then the peer pointer stored
2409 	 * in peer_id_to_obj_map will be NULL.
2410 	 */
2411 	if (!peer) {
2412 		dp_err("Received unmap event for invalid peer_id %u",
2413 		       peer_id);
2414 		return;
2415 	}
2416 
2417 	/* If V2 Peer map messages are enabled AST entry has to be freed here
2418 	 */
2419 	if (is_wds) {
2420 		if (!dp_peer_ast_free_entry_by_mac(soc, peer, vdev_id,
2421 						   mac_addr)) {
2422 			dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
2423 			return;
2424 		}
2425 
2426 		dp_alert("AST entry not found with peer %pK peer_id %u peer_mac "QDF_MAC_ADDR_FMT" mac_addr "QDF_MAC_ADDR_FMT" vdev_id %u next_hop %u",
2427 			 peer, peer->peer_id,
2428 			 QDF_MAC_ADDR_REF(peer->mac_addr.raw),
2429 			 QDF_MAC_ADDR_REF(mac_addr), vdev_id,
2430 			 is_wds);
2431 
2432 		dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
2433 		return;
2434 	} else {
2435 		dp_peer_clean_wds_entries(soc, peer, free_wds_count);
2436 	}
2437 
2438 	dp_info("peer_unmap_event (soc:%pK) peer_id %d peer %pK",
2439 		soc, peer_id, peer);
2440 
2441 	dp_peer_find_id_to_obj_remove(soc, peer_id);
2442 	peer->peer_id = HTT_INVALID_PEER;
2443 
2444 	/*
2445 	 *	 Reset ast flow mapping table
2446 	 */
2447 	dp_peer_reset_flowq_map(peer);
2448 
2449 	if (soc->cdp_soc.ol_ops->peer_unmap_event) {
2450 		soc->cdp_soc.ol_ops->peer_unmap_event(soc->ctrl_psoc,
2451 				peer_id, vdev_id);
2452 	}
2453 
2454 	vdev = peer->vdev;
2455 	DP_UPDATE_STATS(vdev, peer);
2456 
2457 	dp_peer_update_state(soc, peer, DP_PEER_STATE_INACTIVE);
2458 	dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
2459 	/*
2460 	 * Remove a reference to the peer.
2461 	 * If there are no more references, delete the peer object.
2462 	 */
2463 	dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
2464 }
2465 
2466 void
2467 dp_peer_find_detach(struct dp_soc *soc)
2468 {
2469 	dp_soc_wds_detach(soc);
2470 	dp_peer_find_map_detach(soc);
2471 	dp_peer_find_hash_detach(soc);
2472 	dp_peer_ast_hash_detach(soc);
2473 	dp_peer_ast_table_detach(soc);
2474 	dp_peer_mec_hash_detach(soc);
2475 }
2476 
2477 static void dp_rx_tid_update_cb(struct dp_soc *soc, void *cb_ctxt,
2478 	union hal_reo_status *reo_status)
2479 {
2480 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
2481 
2482 	if ((reo_status->rx_queue_status.header.status !=
2483 		HAL_REO_CMD_SUCCESS) &&
2484 		(reo_status->rx_queue_status.header.status !=
2485 		HAL_REO_CMD_DRAIN)) {
2486 		/* Should not happen normally. Just print error for now */
2487 		dp_peer_err("%pK: Rx tid HW desc update failed(%d): tid %d",
2488 			    soc, reo_status->rx_queue_status.header.status,
2489 			    rx_tid->tid);
2490 	}
2491 }
2492 
2493 static bool dp_get_peer_vdev_roaming_in_progress(struct dp_peer *peer)
2494 {
2495 	struct ol_if_ops *ol_ops = NULL;
2496 	bool is_roaming = false;
2497 	uint8_t vdev_id = -1;
2498 	struct cdp_soc_t *soc;
2499 
2500 	if (!peer) {
2501 		dp_peer_info("Peer is NULL. No roaming possible");
2502 		return false;
2503 	}
2504 
2505 	soc = dp_soc_to_cdp_soc_t(peer->vdev->pdev->soc);
2506 	ol_ops = peer->vdev->pdev->soc->cdp_soc.ol_ops;
2507 
2508 	if (ol_ops && ol_ops->is_roam_inprogress) {
2509 		dp_get_vdevid(soc, peer->mac_addr.raw, &vdev_id);
2510 		is_roaming = ol_ops->is_roam_inprogress(vdev_id);
2511 	}
2512 
2513 	dp_peer_info("peer: " QDF_MAC_ADDR_FMT ", vdev_id: %d, is_roaming: %d",
2514 		     QDF_MAC_ADDR_REF(peer->mac_addr.raw), vdev_id, is_roaming);
2515 
2516 	return is_roaming;
2517 }
2518 
2519 QDF_STATUS dp_rx_tid_update_wifi3(struct dp_peer *peer, int tid, uint32_t
2520 					 ba_window_size, uint32_t start_seq,
2521 					 bool bar_update)
2522 {
2523 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
2524 	struct dp_soc *soc = peer->vdev->pdev->soc;
2525 	struct hal_reo_cmd_params params;
2526 
2527 	qdf_mem_zero(&params, sizeof(params));
2528 
2529 	params.std.need_status = 1;
2530 	params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
2531 	params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2532 	params.u.upd_queue_params.update_ba_window_size = 1;
2533 	params.u.upd_queue_params.ba_window_size = ba_window_size;
2534 
2535 	if (start_seq < IEEE80211_SEQ_MAX) {
2536 		params.u.upd_queue_params.update_ssn = 1;
2537 		params.u.upd_queue_params.ssn = start_seq;
2538 	} else {
2539 	    dp_set_ssn_valid_flag(&params, 0);
2540 	}
2541 
2542 	if (dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
2543 			    dp_rx_tid_update_cb, rx_tid)) {
2544 		dp_err_log("failed to send reo cmd CMD_UPDATE_RX_REO_QUEUE");
2545 		DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
2546 	}
2547 
2548 	rx_tid->ba_win_size = ba_window_size;
2549 
2550 	if (dp_get_peer_vdev_roaming_in_progress(peer))
2551 		return QDF_STATUS_E_PERM;
2552 
2553 	if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup && !bar_update)
2554 		soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup(
2555 			soc->ctrl_psoc, peer->vdev->pdev->pdev_id,
2556 			peer->vdev->vdev_id, peer->mac_addr.raw,
2557 			rx_tid->hw_qdesc_paddr, tid, tid, 1, ba_window_size);
2558 
2559 	return QDF_STATUS_SUCCESS;
2560 }
2561 
2562 #ifdef WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY
2563 /*
2564  * dp_reo_desc_defer_free_enqueue() - enqueue REO QDESC to be freed into
2565  *                                    the deferred list
2566  * @soc: Datapath soc handle
2567  * @free_desc: REO DESC reference that needs to be freed
2568  *
2569  * Return: true if enqueued, else false
2570  */
2571 static bool dp_reo_desc_defer_free_enqueue(struct dp_soc *soc,
2572 					   struct reo_desc_list_node *freedesc)
2573 {
2574 	struct reo_desc_deferred_freelist_node *desc;
2575 
2576 	if (!qdf_atomic_read(&soc->cmn_init_done))
2577 		return false;
2578 
2579 	desc = qdf_mem_malloc(sizeof(*desc));
2580 	if (!desc)
2581 		return false;
2582 
2583 	desc->hw_qdesc_paddr = freedesc->rx_tid.hw_qdesc_paddr;
2584 	desc->hw_qdesc_alloc_size = freedesc->rx_tid.hw_qdesc_alloc_size;
2585 	desc->hw_qdesc_vaddr_unaligned =
2586 			freedesc->rx_tid.hw_qdesc_vaddr_unaligned;
2587 	desc->free_ts = qdf_get_system_timestamp();
2588 	DP_RX_REO_QDESC_DEFERRED_GET_MAC(desc, freedesc);
2589 
2590 	qdf_spin_lock_bh(&soc->reo_desc_deferred_freelist_lock);
2591 	if (!soc->reo_desc_deferred_freelist_init) {
2592 		qdf_mem_free(desc);
2593 		qdf_spin_unlock_bh(&soc->reo_desc_deferred_freelist_lock);
2594 		return false;
2595 	}
2596 	qdf_list_insert_back(&soc->reo_desc_deferred_freelist,
2597 			     (qdf_list_node_t *)desc);
2598 	qdf_spin_unlock_bh(&soc->reo_desc_deferred_freelist_lock);
2599 
2600 	return true;
2601 }
2602 
2603 /*
2604  * dp_reo_desc_defer_free() - free the REO QDESC in the deferred list
2605  *                            based on time threshold
2606  * @soc: Datapath soc handle
2607  * @free_desc: REO DESC reference that needs to be freed
2608  *
2609  * Return: true if enqueued, else false
2610  */
2611 static void dp_reo_desc_defer_free(struct dp_soc *soc)
2612 {
2613 	struct reo_desc_deferred_freelist_node *desc;
2614 	unsigned long curr_ts = qdf_get_system_timestamp();
2615 
2616 	qdf_spin_lock_bh(&soc->reo_desc_deferred_freelist_lock);
2617 
2618 	while ((qdf_list_peek_front(&soc->reo_desc_deferred_freelist,
2619 	       (qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) &&
2620 	       (curr_ts > (desc->free_ts + REO_DESC_DEFERRED_FREE_MS))) {
2621 		qdf_list_remove_front(&soc->reo_desc_deferred_freelist,
2622 				      (qdf_list_node_t **)&desc);
2623 
2624 		DP_RX_REO_QDESC_DEFERRED_FREE_EVT(desc);
2625 
2626 		qdf_mem_unmap_nbytes_single(soc->osdev,
2627 					    desc->hw_qdesc_paddr,
2628 					    QDF_DMA_BIDIRECTIONAL,
2629 					    desc->hw_qdesc_alloc_size);
2630 		qdf_mem_free(desc->hw_qdesc_vaddr_unaligned);
2631 		qdf_mem_free(desc);
2632 
2633 		curr_ts = qdf_get_system_timestamp();
2634 	}
2635 
2636 	qdf_spin_unlock_bh(&soc->reo_desc_deferred_freelist_lock);
2637 }
2638 #else
2639 static inline bool
2640 dp_reo_desc_defer_free_enqueue(struct dp_soc *soc,
2641 			       struct reo_desc_list_node *freedesc)
2642 {
2643 	return false;
2644 }
2645 
2646 static void dp_reo_desc_defer_free(struct dp_soc *soc)
2647 {
2648 }
2649 #endif /* !WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY */
2650 
2651 /*
2652  * dp_reo_desc_free() - Callback free reo descriptor memory after
2653  * HW cache flush
2654  *
2655  * @soc: DP SOC handle
2656  * @cb_ctxt: Callback context
2657  * @reo_status: REO command status
2658  */
2659 static void dp_reo_desc_free(struct dp_soc *soc, void *cb_ctxt,
2660 	union hal_reo_status *reo_status)
2661 {
2662 	struct reo_desc_list_node *freedesc =
2663 		(struct reo_desc_list_node *)cb_ctxt;
2664 	struct dp_rx_tid *rx_tid = &freedesc->rx_tid;
2665 	unsigned long curr_ts = qdf_get_system_timestamp();
2666 
2667 	if ((reo_status->fl_cache_status.header.status !=
2668 		HAL_REO_CMD_SUCCESS) &&
2669 		(reo_status->fl_cache_status.header.status !=
2670 		HAL_REO_CMD_DRAIN)) {
2671 		dp_peer_err("%pK: Rx tid HW desc flush failed(%d): tid %d",
2672 			    soc, reo_status->rx_queue_status.header.status,
2673 			    freedesc->rx_tid.tid);
2674 	}
2675 	dp_peer_info("%pK: %lu hw_qdesc_paddr: %pK, tid:%d", soc,
2676 		     curr_ts, (void *)(rx_tid->hw_qdesc_paddr),
2677 		     rx_tid->tid);
2678 
2679 	/* REO desc is enqueued to be freed at a later point
2680 	 * in time, just free the freedesc alone and return
2681 	 */
2682 	if (dp_reo_desc_defer_free_enqueue(soc, freedesc))
2683 		goto out;
2684 
2685 	DP_RX_REO_QDESC_FREE_EVT(freedesc);
2686 
2687 	qdf_mem_unmap_nbytes_single(soc->osdev,
2688 		rx_tid->hw_qdesc_paddr,
2689 		QDF_DMA_BIDIRECTIONAL,
2690 		rx_tid->hw_qdesc_alloc_size);
2691 	qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
2692 out:
2693 	qdf_mem_free(freedesc);
2694 }
2695 
2696 #if defined(CONFIG_WIFI_EMULATION_WIFI_3_0) && defined(BUILD_X86)
2697 /* Hawkeye emulation requires bus address to be >= 0x50000000 */
2698 static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
2699 {
2700 	if (dma_addr < 0x50000000)
2701 		return QDF_STATUS_E_FAILURE;
2702 	else
2703 		return QDF_STATUS_SUCCESS;
2704 }
2705 #else
2706 static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
2707 {
2708 	return QDF_STATUS_SUCCESS;
2709 }
2710 #endif
2711 
2712 
2713 /*
2714  * dp_rx_tid_setup_wifi3() – Setup receive TID state
2715  * @peer: Datapath peer handle
2716  * @tid: TID
2717  * @ba_window_size: BlockAck window size
2718  * @start_seq: Starting sequence number
2719  *
2720  * Return: QDF_STATUS code
2721  */
2722 QDF_STATUS dp_rx_tid_setup_wifi3(struct dp_peer *peer, int tid,
2723 				 uint32_t ba_window_size, uint32_t start_seq)
2724 {
2725 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
2726 	struct dp_vdev *vdev = peer->vdev;
2727 	struct dp_soc *soc = vdev->pdev->soc;
2728 	uint32_t hw_qdesc_size;
2729 	uint32_t hw_qdesc_align;
2730 	int hal_pn_type;
2731 	void *hw_qdesc_vaddr;
2732 	uint32_t alloc_tries = 0;
2733 	QDF_STATUS err = QDF_STATUS_SUCCESS;
2734 
2735 	if (!qdf_atomic_read(&peer->is_default_route_set))
2736 		return QDF_STATUS_E_FAILURE;
2737 
2738 	rx_tid->ba_win_size = ba_window_size;
2739 	if (rx_tid->hw_qdesc_vaddr_unaligned)
2740 		return dp_rx_tid_update_wifi3(peer, tid, ba_window_size,
2741 			start_seq, false);
2742 	rx_tid->delba_tx_status = 0;
2743 	rx_tid->ppdu_id_2k = 0;
2744 	rx_tid->num_of_addba_req = 0;
2745 	rx_tid->num_of_delba_req = 0;
2746 	rx_tid->num_of_addba_resp = 0;
2747 	rx_tid->num_addba_rsp_failed = 0;
2748 	rx_tid->num_addba_rsp_success = 0;
2749 	rx_tid->delba_tx_success_cnt = 0;
2750 	rx_tid->delba_tx_fail_cnt = 0;
2751 	rx_tid->statuscode = 0;
2752 
2753 	/* TODO: Allocating HW queue descriptors based on max BA window size
2754 	 * for all QOS TIDs so that same descriptor can be used later when
2755 	 * ADDBA request is recevied. This should be changed to allocate HW
2756 	 * queue descriptors based on BA window size being negotiated (0 for
2757 	 * non BA cases), and reallocate when BA window size changes and also
2758 	 * send WMI message to FW to change the REO queue descriptor in Rx
2759 	 * peer entry as part of dp_rx_tid_update.
2760 	 */
2761 	if (tid != DP_NON_QOS_TID)
2762 		hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc,
2763 			HAL_RX_MAX_BA_WINDOW, tid);
2764 	else
2765 		hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc,
2766 			ba_window_size, tid);
2767 
2768 	hw_qdesc_align = hal_get_reo_qdesc_align(soc->hal_soc);
2769 	/* To avoid unnecessary extra allocation for alignment, try allocating
2770 	 * exact size and see if we already have aligned address.
2771 	 */
2772 	rx_tid->hw_qdesc_alloc_size = hw_qdesc_size;
2773 
2774 try_desc_alloc:
2775 	rx_tid->hw_qdesc_vaddr_unaligned =
2776 		qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size);
2777 
2778 	if (!rx_tid->hw_qdesc_vaddr_unaligned) {
2779 		dp_peer_err("%pK: Rx tid HW desc alloc failed: tid %d",
2780 			    soc, tid);
2781 		return QDF_STATUS_E_NOMEM;
2782 	}
2783 
2784 	if ((unsigned long)(rx_tid->hw_qdesc_vaddr_unaligned) %
2785 		hw_qdesc_align) {
2786 		/* Address allocated above is not alinged. Allocate extra
2787 		 * memory for alignment
2788 		 */
2789 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
2790 		rx_tid->hw_qdesc_vaddr_unaligned =
2791 			qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size +
2792 					hw_qdesc_align - 1);
2793 
2794 		if (!rx_tid->hw_qdesc_vaddr_unaligned) {
2795 			dp_peer_err("%pK: Rx tid HW desc alloc failed: tid %d",
2796 				    soc, tid);
2797 			return QDF_STATUS_E_NOMEM;
2798 		}
2799 
2800 		hw_qdesc_vaddr = (void *)qdf_align((unsigned long)
2801 			rx_tid->hw_qdesc_vaddr_unaligned,
2802 			hw_qdesc_align);
2803 
2804 		dp_peer_debug("%pK: Total Size %d Aligned Addr %pK",
2805 			      soc, rx_tid->hw_qdesc_alloc_size,
2806 			      hw_qdesc_vaddr);
2807 
2808 	} else {
2809 		hw_qdesc_vaddr = rx_tid->hw_qdesc_vaddr_unaligned;
2810 	}
2811 	rx_tid->hw_qdesc_vaddr_aligned = hw_qdesc_vaddr;
2812 
2813 	/* TODO: Ensure that sec_type is set before ADDBA is received.
2814 	 * Currently this is set based on htt indication
2815 	 * HTT_T2H_MSG_TYPE_SEC_IND from target
2816 	 */
2817 	switch (peer->security[dp_sec_ucast].sec_type) {
2818 	case cdp_sec_type_tkip_nomic:
2819 	case cdp_sec_type_aes_ccmp:
2820 	case cdp_sec_type_aes_ccmp_256:
2821 	case cdp_sec_type_aes_gcmp:
2822 	case cdp_sec_type_aes_gcmp_256:
2823 		hal_pn_type = HAL_PN_WPA;
2824 		break;
2825 	case cdp_sec_type_wapi:
2826 		if (vdev->opmode == wlan_op_mode_ap)
2827 			hal_pn_type = HAL_PN_WAPI_EVEN;
2828 		else
2829 			hal_pn_type = HAL_PN_WAPI_UNEVEN;
2830 		break;
2831 	default:
2832 		hal_pn_type = HAL_PN_NONE;
2833 		break;
2834 	}
2835 
2836 	hal_reo_qdesc_setup(soc->hal_soc, tid, ba_window_size, start_seq,
2837 		hw_qdesc_vaddr, rx_tid->hw_qdesc_paddr, hal_pn_type);
2838 
2839 	qdf_mem_map_nbytes_single(soc->osdev, hw_qdesc_vaddr,
2840 		QDF_DMA_BIDIRECTIONAL, rx_tid->hw_qdesc_alloc_size,
2841 		&(rx_tid->hw_qdesc_paddr));
2842 
2843 	if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) !=
2844 			QDF_STATUS_SUCCESS) {
2845 		if (alloc_tries++ < 10) {
2846 			qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
2847 			rx_tid->hw_qdesc_vaddr_unaligned = NULL;
2848 			goto try_desc_alloc;
2849 		} else {
2850 			dp_peer_err("%pK: Rx tid HW desc alloc failed (lowmem): tid %d",
2851 				    soc, tid);
2852 			err = QDF_STATUS_E_NOMEM;
2853 			goto error;
2854 		}
2855 	}
2856 
2857 	if (dp_get_peer_vdev_roaming_in_progress(peer)) {
2858 		err = QDF_STATUS_E_PERM;
2859 		goto error;
2860 	}
2861 
2862 	if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
2863 		if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup(
2864 		    soc->ctrl_psoc,
2865 		    peer->vdev->pdev->pdev_id,
2866 		    peer->vdev->vdev_id,
2867 		    peer->mac_addr.raw, rx_tid->hw_qdesc_paddr, tid, tid,
2868 		    1, ba_window_size)) {
2869 			dp_peer_err("%pK: Failed to send reo queue setup to FW - tid %d\n",
2870 				    soc, tid);
2871 			err = QDF_STATUS_E_FAILURE;
2872 			goto error;
2873 		}
2874 	}
2875 	return 0;
2876 error:
2877 	if (rx_tid->hw_qdesc_vaddr_unaligned) {
2878 		if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) ==
2879 		    QDF_STATUS_SUCCESS)
2880 			qdf_mem_unmap_nbytes_single(
2881 				soc->osdev,
2882 				rx_tid->hw_qdesc_paddr,
2883 				QDF_DMA_BIDIRECTIONAL,
2884 				rx_tid->hw_qdesc_alloc_size);
2885 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
2886 		rx_tid->hw_qdesc_vaddr_unaligned = NULL;
2887 	}
2888 	return err;
2889 }
2890 
2891 #ifdef REO_DESC_DEFER_FREE
2892 /*
2893  * dp_reo_desc_clean_up() - If cmd to flush base desc fails add
2894  * desc back to freelist and defer the deletion
2895  *
2896  * @soc: DP SOC handle
2897  * @desc: Base descriptor to be freed
2898  * @reo_status: REO command status
2899  */
2900 static void dp_reo_desc_clean_up(struct dp_soc *soc,
2901 				 struct reo_desc_list_node *desc,
2902 				 union hal_reo_status *reo_status)
2903 {
2904 	desc->free_ts = qdf_get_system_timestamp();
2905 	DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
2906 	qdf_list_insert_back(&soc->reo_desc_freelist,
2907 			     (qdf_list_node_t *)desc);
2908 }
2909 
2910 /*
2911  * dp_reo_limit_clean_batch_sz() - Limit number REO CMD queued to cmd
2912  * ring in aviod of REO hang
2913  *
2914  * @list_size: REO desc list size to be cleaned
2915  */
2916 static inline void dp_reo_limit_clean_batch_sz(uint32_t *list_size)
2917 {
2918 	unsigned long curr_ts = qdf_get_system_timestamp();
2919 
2920 	if ((*list_size) > REO_DESC_FREELIST_SIZE) {
2921 		dp_err_log("%lu:freedesc number %d in freelist",
2922 			   curr_ts, *list_size);
2923 		/* limit the batch queue size */
2924 		*list_size = REO_DESC_FREELIST_SIZE;
2925 	}
2926 }
2927 #else
2928 /*
2929  * dp_reo_desc_clean_up() - If send cmd to REO inorder to flush
2930  * cache fails free the base REO desc anyway
2931  *
2932  * @soc: DP SOC handle
2933  * @desc: Base descriptor to be freed
2934  * @reo_status: REO command status
2935  */
2936 static void dp_reo_desc_clean_up(struct dp_soc *soc,
2937 				 struct reo_desc_list_node *desc,
2938 				 union hal_reo_status *reo_status)
2939 {
2940 	if (reo_status) {
2941 		qdf_mem_zero(reo_status, sizeof(*reo_status));
2942 		reo_status->fl_cache_status.header.status = 0;
2943 		dp_reo_desc_free(soc, (void *)desc, reo_status);
2944 	}
2945 }
2946 
2947 /*
2948  * dp_reo_limit_clean_batch_sz() - Limit number REO CMD queued to cmd
2949  * ring in aviod of REO hang
2950  *
2951  * @list_size: REO desc list size to be cleaned
2952  */
2953 static inline void dp_reo_limit_clean_batch_sz(uint32_t *list_size)
2954 {
2955 }
2956 #endif
2957 
2958 /*
2959  * dp_resend_update_reo_cmd() - Resend the UPDATE_REO_QUEUE
2960  * cmd and re-insert desc into free list if send fails.
2961  *
2962  * @soc: DP SOC handle
2963  * @desc: desc with resend update cmd flag set
2964  * @rx_tid: Desc RX tid associated with update cmd for resetting
2965  * valid field to 0 in h/w
2966  *
2967  * Return: QDF status
2968  */
2969 static QDF_STATUS
2970 dp_resend_update_reo_cmd(struct dp_soc *soc,
2971 			 struct reo_desc_list_node *desc,
2972 			 struct dp_rx_tid *rx_tid)
2973 {
2974 	struct hal_reo_cmd_params params;
2975 
2976 	qdf_mem_zero(&params, sizeof(params));
2977 	params.std.need_status = 1;
2978 	params.std.addr_lo =
2979 		rx_tid->hw_qdesc_paddr & 0xffffffff;
2980 	params.std.addr_hi =
2981 		(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2982 	params.u.upd_queue_params.update_vld = 1;
2983 	params.u.upd_queue_params.vld = 0;
2984 	desc->resend_update_reo_cmd = false;
2985 	/*
2986 	 * If the cmd send fails then set resend_update_reo_cmd flag
2987 	 * and insert the desc at the end of the free list to retry.
2988 	 */
2989 	if (dp_reo_send_cmd(soc,
2990 			    CMD_UPDATE_RX_REO_QUEUE,
2991 			    &params,
2992 			    dp_rx_tid_delete_cb,
2993 			    (void *)desc)
2994 	    != QDF_STATUS_SUCCESS) {
2995 		desc->resend_update_reo_cmd = true;
2996 		desc->free_ts = qdf_get_system_timestamp();
2997 		qdf_list_insert_back(&soc->reo_desc_freelist,
2998 				     (qdf_list_node_t *)desc);
2999 		dp_err_log("failed to send reo cmd CMD_UPDATE_RX_REO_QUEUE");
3000 		DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
3001 		return QDF_STATUS_E_FAILURE;
3002 	}
3003 
3004 	return QDF_STATUS_SUCCESS;
3005 }
3006 
3007 /*
3008  * dp_rx_tid_delete_cb() - Callback to flush reo descriptor HW cache
3009  * after deleting the entries (ie., setting valid=0)
3010  *
3011  * @soc: DP SOC handle
3012  * @cb_ctxt: Callback context
3013  * @reo_status: REO command status
3014  */
3015 void dp_rx_tid_delete_cb(struct dp_soc *soc, void *cb_ctxt,
3016 			 union hal_reo_status *reo_status)
3017 {
3018 	struct reo_desc_list_node *freedesc =
3019 		(struct reo_desc_list_node *)cb_ctxt;
3020 	uint32_t list_size;
3021 	struct reo_desc_list_node *desc;
3022 	unsigned long curr_ts = qdf_get_system_timestamp();
3023 	uint32_t desc_size, tot_desc_size;
3024 	struct hal_reo_cmd_params params;
3025 	bool flush_failure = false;
3026 
3027 	DP_RX_REO_QDESC_UPDATE_EVT(freedesc);
3028 
3029 	if (reo_status->rx_queue_status.header.status == HAL_REO_CMD_DRAIN) {
3030 		qdf_mem_zero(reo_status, sizeof(*reo_status));
3031 		reo_status->fl_cache_status.header.status = HAL_REO_CMD_DRAIN;
3032 		dp_reo_desc_free(soc, (void *)freedesc, reo_status);
3033 		DP_STATS_INC(soc, rx.err.reo_cmd_send_drain, 1);
3034 		return;
3035 	} else if (reo_status->rx_queue_status.header.status !=
3036 		HAL_REO_CMD_SUCCESS) {
3037 		/* Should not happen normally. Just print error for now */
3038 		dp_info_rl("Rx tid HW desc deletion failed(%d): tid %d",
3039 			   reo_status->rx_queue_status.header.status,
3040 			   freedesc->rx_tid.tid);
3041 	}
3042 
3043 	dp_peer_info("%pK: rx_tid: %d status: %d",
3044 		     soc, freedesc->rx_tid.tid,
3045 		     reo_status->rx_queue_status.header.status);
3046 
3047 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
3048 	freedesc->free_ts = curr_ts;
3049 	qdf_list_insert_back_size(&soc->reo_desc_freelist,
3050 		(qdf_list_node_t *)freedesc, &list_size);
3051 
3052 	/* MCL path add the desc back to reo_desc_freelist when REO FLUSH
3053 	 * failed. it may cause the number of REO queue pending  in free
3054 	 * list is even larger than REO_CMD_RING max size and lead REO CMD
3055 	 * flood then cause REO HW in an unexpected condition. So it's
3056 	 * needed to limit the number REO cmds in a batch operation.
3057 	 */
3058 	dp_reo_limit_clean_batch_sz(&list_size);
3059 
3060 	while ((qdf_list_peek_front(&soc->reo_desc_freelist,
3061 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) &&
3062 		((list_size >= REO_DESC_FREELIST_SIZE) ||
3063 		(curr_ts > (desc->free_ts + REO_DESC_FREE_DEFER_MS)) ||
3064 		(desc->resend_update_reo_cmd && list_size))) {
3065 		struct dp_rx_tid *rx_tid;
3066 
3067 		qdf_list_remove_front(&soc->reo_desc_freelist,
3068 				(qdf_list_node_t **)&desc);
3069 		list_size--;
3070 		rx_tid = &desc->rx_tid;
3071 
3072 		/* First process descs with resend_update_reo_cmd set */
3073 		if (desc->resend_update_reo_cmd) {
3074 			if (dp_resend_update_reo_cmd(soc, desc, rx_tid) !=
3075 			    QDF_STATUS_SUCCESS)
3076 				break;
3077 			else
3078 				continue;
3079 		}
3080 
3081 		/* Flush and invalidate REO descriptor from HW cache: Base and
3082 		 * extension descriptors should be flushed separately */
3083 		if (desc->pending_ext_desc_size)
3084 			tot_desc_size = desc->pending_ext_desc_size;
3085 		else
3086 			tot_desc_size = rx_tid->hw_qdesc_alloc_size;
3087 		/* Get base descriptor size by passing non-qos TID */
3088 		desc_size = hal_get_reo_qdesc_size(soc->hal_soc, 0,
3089 						   DP_NON_QOS_TID);
3090 
3091 		/* Flush reo extension descriptors */
3092 		while ((tot_desc_size -= desc_size) > 0) {
3093 			qdf_mem_zero(&params, sizeof(params));
3094 			params.std.addr_lo =
3095 				((uint64_t)(rx_tid->hw_qdesc_paddr) +
3096 				tot_desc_size) & 0xffffffff;
3097 			params.std.addr_hi =
3098 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
3099 
3100 			if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc,
3101 							CMD_FLUSH_CACHE,
3102 							&params,
3103 							NULL,
3104 							NULL)) {
3105 				dp_info_rl("fail to send CMD_CACHE_FLUSH:"
3106 					   "tid %d desc %pK", rx_tid->tid,
3107 					   (void *)(rx_tid->hw_qdesc_paddr));
3108 				desc->pending_ext_desc_size = tot_desc_size +
3109 								      desc_size;
3110 				dp_reo_desc_clean_up(soc, desc, reo_status);
3111 				flush_failure = true;
3112 				break;
3113 			}
3114 		}
3115 
3116 		if (flush_failure)
3117 			break;
3118 		else
3119 			desc->pending_ext_desc_size = desc_size;
3120 
3121 		/* Flush base descriptor */
3122 		qdf_mem_zero(&params, sizeof(params));
3123 		params.std.need_status = 1;
3124 		params.std.addr_lo =
3125 			(uint64_t)(rx_tid->hw_qdesc_paddr) & 0xffffffff;
3126 		params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
3127 
3128 		if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc,
3129 							  CMD_FLUSH_CACHE,
3130 							  &params,
3131 							  dp_reo_desc_free,
3132 							  (void *)desc)) {
3133 			union hal_reo_status reo_status;
3134 			/*
3135 			 * If dp_reo_send_cmd return failure, related TID queue desc
3136 			 * should be unmapped. Also locally reo_desc, together with
3137 			 * TID queue desc also need to be freed accordingly.
3138 			 *
3139 			 * Here invoke desc_free function directly to do clean up.
3140 			 *
3141 			 * In case of MCL path add the desc back to the free
3142 			 * desc list and defer deletion.
3143 			 */
3144 			dp_info_rl("fail to send REO cmd to flush cache: tid %d",
3145 				   rx_tid->tid);
3146 			dp_reo_desc_clean_up(soc, desc, &reo_status);
3147 			DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
3148 			break;
3149 		}
3150 	}
3151 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
3152 
3153 	dp_reo_desc_defer_free(soc);
3154 }
3155 
3156 /*
3157  * dp_rx_tid_delete_wifi3() – Delete receive TID queue
3158  * @peer: Datapath peer handle
3159  * @tid: TID
3160  *
3161  * Return: 0 on success, error code on failure
3162  */
3163 static int dp_rx_tid_delete_wifi3(struct dp_peer *peer, int tid)
3164 {
3165 	struct dp_rx_tid *rx_tid = &(peer->rx_tid[tid]);
3166 	struct dp_soc *soc = peer->vdev->pdev->soc;
3167 	struct hal_reo_cmd_params params;
3168 	struct reo_desc_list_node *freedesc =
3169 		qdf_mem_malloc(sizeof(*freedesc));
3170 
3171 	if (!freedesc) {
3172 		dp_peer_err("%pK: malloc failed for freedesc: tid %d",
3173 			    soc, tid);
3174 		return -ENOMEM;
3175 	}
3176 
3177 	freedesc->rx_tid = *rx_tid;
3178 	freedesc->resend_update_reo_cmd = false;
3179 
3180 	qdf_mem_zero(&params, sizeof(params));
3181 
3182 	DP_RX_REO_QDESC_GET_MAC(freedesc, peer);
3183 
3184 	params.std.need_status = 1;
3185 	params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
3186 	params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
3187 	params.u.upd_queue_params.update_vld = 1;
3188 	params.u.upd_queue_params.vld = 0;
3189 
3190 	if (dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
3191 			    dp_rx_tid_delete_cb, (void *)freedesc)
3192 		!= QDF_STATUS_SUCCESS) {
3193 		/* Defer the clean up to the call back context */
3194 		qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
3195 		freedesc->free_ts = qdf_get_system_timestamp();
3196 		freedesc->resend_update_reo_cmd = true;
3197 		qdf_list_insert_front(&soc->reo_desc_freelist,
3198 				      (qdf_list_node_t *)freedesc);
3199 		DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
3200 		qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
3201 		dp_info("Failed to send CMD_UPDATE_RX_REO_QUEUE");
3202 	}
3203 
3204 	rx_tid->hw_qdesc_vaddr_unaligned = NULL;
3205 	rx_tid->hw_qdesc_alloc_size = 0;
3206 	rx_tid->hw_qdesc_paddr = 0;
3207 
3208 	return 0;
3209 }
3210 
3211 #ifdef DP_LFR
3212 static void dp_peer_setup_remaining_tids(struct dp_peer *peer)
3213 {
3214 	int tid;
3215 
3216 	for (tid = 1; tid < DP_MAX_TIDS-1; tid++) {
3217 		dp_rx_tid_setup_wifi3(peer, tid, 1, 0);
3218 		dp_peer_debug("Setting up TID %d for peer %pK peer->local_id %d",
3219 			      tid, peer, peer->local_id);
3220 	}
3221 }
3222 #else
3223 static void dp_peer_setup_remaining_tids(struct dp_peer *peer) {};
3224 #endif
3225 
3226 /*
3227  * dp_peer_rx_init() – Initialize receive TID state
3228  * @pdev: Datapath pdev
3229  * @peer: Datapath peer
3230  *
3231  */
3232 void dp_peer_rx_init(struct dp_pdev *pdev, struct dp_peer *peer)
3233 {
3234 	int tid;
3235 	struct dp_rx_tid *rx_tid;
3236 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
3237 		rx_tid = &peer->rx_tid[tid];
3238 		rx_tid->array = &rx_tid->base;
3239 		rx_tid->base.head = rx_tid->base.tail = NULL;
3240 		rx_tid->tid = tid;
3241 		rx_tid->defrag_timeout_ms = 0;
3242 		rx_tid->ba_win_size = 0;
3243 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
3244 
3245 		rx_tid->defrag_waitlist_elem.tqe_next = NULL;
3246 		rx_tid->defrag_waitlist_elem.tqe_prev = NULL;
3247 	}
3248 
3249 	peer->active_ba_session_cnt = 0;
3250 	peer->hw_buffer_size = 0;
3251 	peer->kill_256_sessions = 0;
3252 
3253 	/* Setup default (non-qos) rx tid queue */
3254 	dp_rx_tid_setup_wifi3(peer, DP_NON_QOS_TID, 1, 0);
3255 
3256 	/* Setup rx tid queue for TID 0.
3257 	 * Other queues will be setup on receiving first packet, which will cause
3258 	 * NULL REO queue error
3259 	 */
3260 	dp_rx_tid_setup_wifi3(peer, 0, 1, 0);
3261 
3262 	/*
3263 	 * Setup the rest of TID's to handle LFR
3264 	 */
3265 	dp_peer_setup_remaining_tids(peer);
3266 
3267 	/*
3268 	 * Set security defaults: no PN check, no security. The target may
3269 	 * send a HTT SEC_IND message to overwrite these defaults.
3270 	 */
3271 	peer->security[dp_sec_ucast].sec_type =
3272 		peer->security[dp_sec_mcast].sec_type = cdp_sec_type_none;
3273 }
3274 
3275 /*
3276  * dp_peer_rx_cleanup() – Cleanup receive TID state
3277  * @vdev: Datapath vdev
3278  * @peer: Datapath peer
3279  *
3280  */
3281 void dp_peer_rx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
3282 {
3283 	int tid;
3284 	uint32_t tid_delete_mask = 0;
3285 
3286 	dp_info("Remove tids for peer: %pK", peer);
3287 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
3288 		struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
3289 
3290 		qdf_spin_lock_bh(&rx_tid->tid_lock);
3291 		if (!peer->bss_peer || peer->vdev->opmode == wlan_op_mode_sta) {
3292 			/* Cleanup defrag related resource */
3293 			dp_rx_defrag_waitlist_remove(peer, tid);
3294 			dp_rx_reorder_flush_frag(peer, tid);
3295 		}
3296 
3297 		if (peer->rx_tid[tid].hw_qdesc_vaddr_unaligned) {
3298 			dp_rx_tid_delete_wifi3(peer, tid);
3299 
3300 			tid_delete_mask |= (1 << tid);
3301 		}
3302 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
3303 	}
3304 #ifdef notyet /* See if FW can remove queues as part of peer cleanup */
3305 	if (soc->ol_ops->peer_rx_reorder_queue_remove) {
3306 		soc->ol_ops->peer_rx_reorder_queue_remove(soc->ctrl_psoc,
3307 			peer->vdev->pdev->pdev_id,
3308 			peer->vdev->vdev_id, peer->mac_addr.raw,
3309 			tid_delete_mask);
3310 	}
3311 #endif
3312 }
3313 
3314 /*
3315  * dp_peer_cleanup() – Cleanup peer information
3316  * @vdev: Datapath vdev
3317  * @peer: Datapath peer
3318  *
3319  */
3320 void dp_peer_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
3321 {
3322 	enum wlan_op_mode vdev_opmode;
3323 	uint8_t vdev_mac_addr[QDF_MAC_ADDR_SIZE];
3324 	struct dp_pdev *pdev = vdev->pdev;
3325 	struct dp_soc *soc = pdev->soc;
3326 
3327 	/* save vdev related member in case vdev freed */
3328 	vdev_opmode = vdev->opmode;
3329 
3330 	dp_monitor_peer_tx_cleanup(vdev, peer);
3331 
3332 	if (vdev_opmode != wlan_op_mode_monitor)
3333 	/* cleanup the Rx reorder queues for this peer */
3334 		dp_peer_rx_cleanup(vdev, peer);
3335 
3336 	qdf_mem_copy(vdev_mac_addr, vdev->mac_addr.raw,
3337 		     QDF_MAC_ADDR_SIZE);
3338 
3339 	if (soc->cdp_soc.ol_ops->peer_unref_delete)
3340 		soc->cdp_soc.ol_ops->peer_unref_delete(
3341 				soc->ctrl_psoc,
3342 				vdev->pdev->pdev_id,
3343 				peer->mac_addr.raw, vdev_mac_addr,
3344 				vdev_opmode);
3345 }
3346 
3347 /* dp_teardown_256_ba_session() - Teardown sessions using 256
3348  *                                window size when a request with
3349  *                                64 window size is received.
3350  *                                This is done as a WAR since HW can
3351  *                                have only one setting per peer (64 or 256).
3352  *                                For HKv2, we use per tid buffersize setting
3353  *                                for 0 to per_tid_basize_max_tid. For tid
3354  *                                more than per_tid_basize_max_tid we use HKv1
3355  *                                method.
3356  * @peer: Datapath peer
3357  *
3358  * Return: void
3359  */
3360 static void dp_teardown_256_ba_sessions(struct dp_peer *peer)
3361 {
3362 	uint8_t delba_rcode = 0;
3363 	int tid;
3364 	struct dp_rx_tid *rx_tid = NULL;
3365 
3366 	tid = peer->vdev->pdev->soc->per_tid_basize_max_tid;
3367 	for (; tid < DP_MAX_TIDS; tid++) {
3368 		rx_tid = &peer->rx_tid[tid];
3369 		qdf_spin_lock_bh(&rx_tid->tid_lock);
3370 
3371 		if (rx_tid->ba_win_size <= 64) {
3372 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
3373 			continue;
3374 		} else {
3375 			if (rx_tid->ba_status == DP_RX_BA_ACTIVE ||
3376 			    rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
3377 				/* send delba */
3378 				if (!rx_tid->delba_tx_status) {
3379 					rx_tid->delba_tx_retry++;
3380 					rx_tid->delba_tx_status = 1;
3381 					rx_tid->delba_rcode =
3382 					IEEE80211_REASON_QOS_SETUP_REQUIRED;
3383 					delba_rcode = rx_tid->delba_rcode;
3384 
3385 					qdf_spin_unlock_bh(&rx_tid->tid_lock);
3386 					if (peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba)
3387 						peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba(
3388 							peer->vdev->pdev->soc->ctrl_psoc,
3389 							peer->vdev->vdev_id,
3390 							peer->mac_addr.raw,
3391 							tid, delba_rcode,
3392 							CDP_DELBA_REASON_NONE);
3393 				} else {
3394 					qdf_spin_unlock_bh(&rx_tid->tid_lock);
3395 				}
3396 			} else {
3397 				qdf_spin_unlock_bh(&rx_tid->tid_lock);
3398 			}
3399 		}
3400 	}
3401 }
3402 
3403 /*
3404 * dp_rx_addba_resp_tx_completion_wifi3() – Update Rx Tid State
3405 *
3406 * @soc: Datapath soc handle
3407 * @peer_mac: Datapath peer mac address
3408 * @vdev_id: id of atapath vdev
3409 * @tid: TID number
3410 * @status: tx completion status
3411 * Return: 0 on success, error code on failure
3412 */
3413 int dp_addba_resp_tx_completion_wifi3(struct cdp_soc_t *cdp_soc,
3414 				      uint8_t *peer_mac,
3415 				      uint16_t vdev_id,
3416 				      uint8_t tid, int status)
3417 {
3418 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
3419 						       peer_mac, 0, vdev_id,
3420 						       DP_MOD_ID_CDP);
3421 	struct dp_rx_tid *rx_tid = NULL;
3422 
3423 	if (!peer) {
3424 		dp_peer_debug("%pK: Peer is NULL!\n", cdp_soc);
3425 		goto fail;
3426 	}
3427 	rx_tid = &peer->rx_tid[tid];
3428 	qdf_spin_lock_bh(&rx_tid->tid_lock);
3429 	if (status) {
3430 		rx_tid->num_addba_rsp_failed++;
3431 		dp_rx_tid_update_wifi3(peer, tid, 1,
3432 				       IEEE80211_SEQ_MAX, false);
3433 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
3434 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
3435 		dp_err("RxTid- %d addba rsp tx completion failed", tid);
3436 
3437 		goto success;
3438 	}
3439 
3440 	rx_tid->num_addba_rsp_success++;
3441 	if (rx_tid->ba_status == DP_RX_BA_INACTIVE) {
3442 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
3443 		dp_peer_err("%pK: Rx Tid- %d hw qdesc is not in IN_PROGRESS",
3444 			    cdp_soc, tid);
3445 		goto fail;
3446 	}
3447 
3448 	if (!qdf_atomic_read(&peer->is_default_route_set)) {
3449 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
3450 		dp_peer_debug("%pK: default route is not set for peer: " QDF_MAC_ADDR_FMT,
3451 			      cdp_soc, QDF_MAC_ADDR_REF(peer->mac_addr.raw));
3452 		goto fail;
3453 	}
3454 
3455 	if (dp_rx_tid_update_wifi3(peer, tid,
3456 				   rx_tid->ba_win_size,
3457 				   rx_tid->startseqnum,
3458 				   false)) {
3459 		dp_err("Failed update REO SSN");
3460 	}
3461 
3462 	dp_info("tid %u window_size %u start_seq_num %u",
3463 		tid, rx_tid->ba_win_size,
3464 		rx_tid->startseqnum);
3465 
3466 	/* First Session */
3467 	if (peer->active_ba_session_cnt == 0) {
3468 		if (rx_tid->ba_win_size > 64 && rx_tid->ba_win_size <= 256)
3469 			peer->hw_buffer_size = 256;
3470 		else
3471 			peer->hw_buffer_size = 64;
3472 	}
3473 
3474 	rx_tid->ba_status = DP_RX_BA_ACTIVE;
3475 
3476 	peer->active_ba_session_cnt++;
3477 
3478 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
3479 
3480 	/* Kill any session having 256 buffer size
3481 	 * when 64 buffer size request is received.
3482 	 * Also, latch on to 64 as new buffer size.
3483 	 */
3484 	if (peer->kill_256_sessions) {
3485 		dp_teardown_256_ba_sessions(peer);
3486 		peer->kill_256_sessions = 0;
3487 	}
3488 
3489 success:
3490 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3491 	return QDF_STATUS_SUCCESS;
3492 
3493 fail:
3494 	if (peer)
3495 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3496 
3497 	return QDF_STATUS_E_FAILURE;
3498 }
3499 
3500 /*
3501 * dp_rx_addba_responsesetup_wifi3() – Process ADDBA request from peer
3502 *
3503 * @soc: Datapath soc handle
3504 * @peer_mac: Datapath peer mac address
3505 * @vdev_id: id of atapath vdev
3506 * @tid: TID number
3507 * @dialogtoken: output dialogtoken
3508 * @statuscode: output dialogtoken
3509 * @buffersize: Output BA window size
3510 * @batimeout: Output BA timeout
3511 */
3512 QDF_STATUS
3513 dp_addba_responsesetup_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
3514 			     uint16_t vdev_id, uint8_t tid,
3515 			     uint8_t *dialogtoken, uint16_t *statuscode,
3516 			     uint16_t *buffersize, uint16_t *batimeout)
3517 {
3518 	struct dp_rx_tid *rx_tid = NULL;
3519 	QDF_STATUS status = QDF_STATUS_SUCCESS;
3520 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
3521 						       peer_mac, 0, vdev_id,
3522 						       DP_MOD_ID_CDP);
3523 
3524 	if (!peer) {
3525 		dp_peer_debug("%pK: Peer is NULL!\n", cdp_soc);
3526 		return QDF_STATUS_E_FAILURE;
3527 	}
3528 	rx_tid = &peer->rx_tid[tid];
3529 	qdf_spin_lock_bh(&rx_tid->tid_lock);
3530 	rx_tid->num_of_addba_resp++;
3531 	/* setup ADDBA response parameters */
3532 	*dialogtoken = rx_tid->dialogtoken;
3533 	*statuscode = rx_tid->statuscode;
3534 	*buffersize = rx_tid->ba_win_size;
3535 	*batimeout  = 0;
3536 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
3537 
3538 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3539 
3540 	return status;
3541 }
3542 
3543 /* dp_check_ba_buffersize() - Check buffer size in request
3544  *                            and latch onto this size based on
3545  *                            size used in first active session.
3546  * @peer: Datapath peer
3547  * @tid: Tid
3548  * @buffersize: Block ack window size
3549  *
3550  * Return: void
3551  */
3552 static void dp_check_ba_buffersize(struct dp_peer *peer,
3553 				   uint16_t tid,
3554 				   uint16_t buffersize)
3555 {
3556 	struct dp_rx_tid *rx_tid = NULL;
3557 
3558 	rx_tid = &peer->rx_tid[tid];
3559 	if (peer->vdev->pdev->soc->per_tid_basize_max_tid &&
3560 	    tid < peer->vdev->pdev->soc->per_tid_basize_max_tid) {
3561 		rx_tid->ba_win_size = buffersize;
3562 		return;
3563 	} else {
3564 		if (peer->active_ba_session_cnt == 0) {
3565 			rx_tid->ba_win_size = buffersize;
3566 		} else {
3567 			if (peer->hw_buffer_size == 64) {
3568 				if (buffersize <= 64)
3569 					rx_tid->ba_win_size = buffersize;
3570 				else
3571 					rx_tid->ba_win_size = peer->hw_buffer_size;
3572 			} else if (peer->hw_buffer_size == 256) {
3573 				if (buffersize > 64) {
3574 					rx_tid->ba_win_size = buffersize;
3575 				} else {
3576 					rx_tid->ba_win_size = buffersize;
3577 					peer->hw_buffer_size = 64;
3578 					peer->kill_256_sessions = 1;
3579 				}
3580 			}
3581 		}
3582 	}
3583 }
3584 
3585 #define DP_RX_BA_SESSION_DISABLE  1
3586 
3587 /*
3588  * dp_addba_requestprocess_wifi3() - Process ADDBA request from peer
3589  *
3590  * @soc: Datapath soc handle
3591  * @peer_mac: Datapath peer mac address
3592  * @vdev_id: id of atapath vdev
3593  * @dialogtoken: dialogtoken from ADDBA frame
3594  * @tid: TID number
3595  * @batimeout: BA timeout
3596  * @buffersize: BA window size
3597  * @startseqnum: Start seq. number received in BA sequence control
3598  *
3599  * Return: 0 on success, error code on failure
3600  */
3601 int dp_addba_requestprocess_wifi3(struct cdp_soc_t *cdp_soc,
3602 				  uint8_t *peer_mac,
3603 				  uint16_t vdev_id,
3604 				  uint8_t dialogtoken,
3605 				  uint16_t tid, uint16_t batimeout,
3606 				  uint16_t buffersize,
3607 				  uint16_t startseqnum)
3608 {
3609 	QDF_STATUS status = QDF_STATUS_SUCCESS;
3610 	struct dp_rx_tid *rx_tid = NULL;
3611 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
3612 	struct dp_peer *peer = dp_peer_find_hash_find(soc,
3613 						       peer_mac, 0, vdev_id,
3614 						       DP_MOD_ID_CDP);
3615 
3616 	if (!peer) {
3617 		dp_peer_debug("%pK: Peer is NULL!\n", cdp_soc);
3618 		return QDF_STATUS_E_FAILURE;
3619 	}
3620 	rx_tid = &peer->rx_tid[tid];
3621 	qdf_spin_lock_bh(&rx_tid->tid_lock);
3622 	rx_tid->num_of_addba_req++;
3623 	if ((rx_tid->ba_status == DP_RX_BA_ACTIVE &&
3624 	     rx_tid->hw_qdesc_vaddr_unaligned)) {
3625 		dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX, false);
3626 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
3627 		peer->active_ba_session_cnt--;
3628 		dp_peer_debug("%pK: Rx Tid- %d hw qdesc is already setup",
3629 			      cdp_soc, tid);
3630 	}
3631 
3632 	if (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
3633 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
3634 		status = QDF_STATUS_E_FAILURE;
3635 		goto fail;
3636 	}
3637 
3638 	if (wlan_cfg_is_dp_force_rx_64_ba(soc->wlan_cfg_ctx)) {
3639 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3640 			  "force use BA64 scheme");
3641 		buffersize = qdf_min((uint16_t)64, buffersize);
3642 	}
3643 
3644 	if (rx_tid->rx_ba_win_size_override == DP_RX_BA_SESSION_DISABLE) {
3645 		dp_peer_info("%pK: disable BA session",
3646 			     cdp_soc);
3647 
3648 		buffersize = 1;
3649 	} else if (rx_tid->rx_ba_win_size_override) {
3650 		dp_peer_info("%pK: override BA win to %d", cdp_soc,
3651 			     rx_tid->rx_ba_win_size_override);
3652 
3653 		buffersize = rx_tid->rx_ba_win_size_override;
3654 	} else {
3655 		dp_peer_info("%pK: restore BA win %d based on addba req", cdp_soc,
3656 			     buffersize);
3657 	}
3658 
3659 	dp_check_ba_buffersize(peer, tid, buffersize);
3660 
3661 	if (dp_rx_tid_setup_wifi3(peer, tid,
3662 	    rx_tid->ba_win_size, startseqnum)) {
3663 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
3664 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
3665 		status = QDF_STATUS_E_FAILURE;
3666 		goto fail;
3667 	}
3668 	rx_tid->ba_status = DP_RX_BA_IN_PROGRESS;
3669 
3670 	rx_tid->dialogtoken = dialogtoken;
3671 	rx_tid->startseqnum = startseqnum;
3672 
3673 	if (rx_tid->userstatuscode != IEEE80211_STATUS_SUCCESS)
3674 		rx_tid->statuscode = rx_tid->userstatuscode;
3675 	else
3676 		rx_tid->statuscode = IEEE80211_STATUS_SUCCESS;
3677 
3678 	if (rx_tid->rx_ba_win_size_override == DP_RX_BA_SESSION_DISABLE)
3679 		rx_tid->statuscode = IEEE80211_STATUS_REFUSED;
3680 
3681 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
3682 
3683 fail:
3684 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3685 
3686 	return status;
3687 }
3688 
3689 /*
3690 * dp_set_addba_response() – Set a user defined ADDBA response status code
3691 *
3692 * @soc: Datapath soc handle
3693 * @peer_mac: Datapath peer mac address
3694 * @vdev_id: id of atapath vdev
3695 * @tid: TID number
3696 * @statuscode: response status code to be set
3697 */
3698 QDF_STATUS
3699 dp_set_addba_response(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
3700 		      uint16_t vdev_id, uint8_t tid, uint16_t statuscode)
3701 {
3702 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
3703 						       peer_mac, 0, vdev_id,
3704 						       DP_MOD_ID_CDP);
3705 	struct dp_rx_tid *rx_tid;
3706 
3707 	if (!peer) {
3708 		dp_peer_debug("%pK: Peer is NULL!\n", cdp_soc);
3709 		return QDF_STATUS_E_FAILURE;
3710 	}
3711 
3712 	rx_tid = &peer->rx_tid[tid];
3713 	qdf_spin_lock_bh(&rx_tid->tid_lock);
3714 	rx_tid->userstatuscode = statuscode;
3715 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
3716 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3717 
3718 	return QDF_STATUS_SUCCESS;
3719 }
3720 
3721 /*
3722 * dp_rx_delba_process_wifi3() – Process DELBA from peer
3723 * @soc: Datapath soc handle
3724 * @peer_mac: Datapath peer mac address
3725 * @vdev_id: id of atapath vdev
3726 * @tid: TID number
3727 * @reasoncode: Reason code received in DELBA frame
3728 *
3729 * Return: 0 on success, error code on failure
3730 */
3731 int dp_delba_process_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
3732 			   uint16_t vdev_id, int tid, uint16_t reasoncode)
3733 {
3734 	QDF_STATUS status = QDF_STATUS_SUCCESS;
3735 	struct dp_rx_tid *rx_tid;
3736 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
3737 						      peer_mac, 0, vdev_id,
3738 						      DP_MOD_ID_CDP);
3739 
3740 	if (!peer) {
3741 		dp_peer_debug("%pK: Peer is NULL!\n", cdp_soc);
3742 		return QDF_STATUS_E_FAILURE;
3743 	}
3744 	rx_tid = &peer->rx_tid[tid];
3745 	qdf_spin_lock_bh(&rx_tid->tid_lock);
3746 	if (rx_tid->ba_status == DP_RX_BA_INACTIVE ||
3747 	    rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
3748 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
3749 		status = QDF_STATUS_E_FAILURE;
3750 		goto fail;
3751 	}
3752 	/* TODO: See if we can delete the existing REO queue descriptor and
3753 	 * replace with a new one without queue extenstion descript to save
3754 	 * memory
3755 	 */
3756 	rx_tid->delba_rcode = reasoncode;
3757 	rx_tid->num_of_delba_req++;
3758 	dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX, false);
3759 
3760 	rx_tid->ba_status = DP_RX_BA_INACTIVE;
3761 	peer->active_ba_session_cnt--;
3762 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
3763 fail:
3764 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3765 
3766 	return status;
3767 }
3768 
3769 /*
3770  * dp_rx_delba_tx_completion_wifi3() – Send Delba Request
3771  *
3772  * @soc: Datapath soc handle
3773  * @peer_mac: Datapath peer mac address
3774  * @vdev_id: id of atapath vdev
3775  * @tid: TID number
3776  * @status: tx completion status
3777  * Return: 0 on success, error code on failure
3778  */
3779 
3780 int dp_delba_tx_completion_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
3781 				 uint16_t vdev_id,
3782 				 uint8_t tid, int status)
3783 {
3784 	QDF_STATUS ret = QDF_STATUS_SUCCESS;
3785 	struct dp_rx_tid *rx_tid = NULL;
3786 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
3787 						      peer_mac, 0, vdev_id,
3788 						      DP_MOD_ID_CDP);
3789 
3790 	if (!peer) {
3791 		dp_peer_debug("%pK: Peer is NULL!", cdp_soc);
3792 		return QDF_STATUS_E_FAILURE;
3793 	}
3794 	rx_tid = &peer->rx_tid[tid];
3795 	qdf_spin_lock_bh(&rx_tid->tid_lock);
3796 	if (status) {
3797 		rx_tid->delba_tx_fail_cnt++;
3798 		if (rx_tid->delba_tx_retry >= DP_MAX_DELBA_RETRY) {
3799 			rx_tid->delba_tx_retry = 0;
3800 			rx_tid->delba_tx_status = 0;
3801 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
3802 		} else {
3803 			rx_tid->delba_tx_retry++;
3804 			rx_tid->delba_tx_status = 1;
3805 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
3806 			if (peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba)
3807 				peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba(
3808 					peer->vdev->pdev->soc->ctrl_psoc,
3809 					peer->vdev->vdev_id,
3810 					peer->mac_addr.raw, tid,
3811 					rx_tid->delba_rcode,
3812 					CDP_DELBA_REASON_NONE);
3813 		}
3814 		goto end;
3815 	} else {
3816 		rx_tid->delba_tx_success_cnt++;
3817 		rx_tid->delba_tx_retry = 0;
3818 		rx_tid->delba_tx_status = 0;
3819 	}
3820 	if (rx_tid->ba_status == DP_RX_BA_ACTIVE) {
3821 		dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX, false);
3822 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
3823 		peer->active_ba_session_cnt--;
3824 	}
3825 	if (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
3826 		dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX, false);
3827 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
3828 	}
3829 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
3830 
3831 end:
3832 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3833 
3834 	return ret;
3835 }
3836 
3837 /**
3838  * dp_set_pn_check_wifi3() - enable PN check in REO for security
3839  * @soc: Datapath soc handle
3840  * @peer_mac: Datapath peer mac address
3841  * @vdev_id: id of atapath vdev
3842  * @vdev: Datapath vdev
3843  * @pdev - data path device instance
3844  * @sec_type - security type
3845  * @rx_pn - Receive pn starting number
3846  *
3847  */
3848 
3849 QDF_STATUS
3850 dp_set_pn_check_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id,
3851 		      uint8_t *peer_mac, enum cdp_sec_type sec_type,
3852 		      uint32_t *rx_pn)
3853 {
3854 	struct dp_pdev *pdev;
3855 	int i;
3856 	uint8_t pn_size;
3857 	struct hal_reo_cmd_params params;
3858 	struct dp_peer *peer = NULL;
3859 	struct dp_vdev *vdev = NULL;
3860 
3861 	peer = dp_peer_find_hash_find((struct dp_soc *)soc,
3862 				      peer_mac, 0, vdev_id,
3863 				      DP_MOD_ID_CDP);
3864 
3865 	if (!peer) {
3866 		dp_peer_debug("%pK: Peer is NULL!\n", soc);
3867 		return QDF_STATUS_E_FAILURE;
3868 	}
3869 
3870 	vdev = peer->vdev;
3871 
3872 	if (!vdev) {
3873 		dp_peer_debug("%pK: VDEV is NULL!\n", soc);
3874 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3875 		return QDF_STATUS_E_FAILURE;
3876 	}
3877 
3878 	pdev = vdev->pdev;
3879 	qdf_mem_zero(&params, sizeof(params));
3880 
3881 	params.std.need_status = 1;
3882 	params.u.upd_queue_params.update_pn_valid = 1;
3883 	params.u.upd_queue_params.update_pn_size = 1;
3884 	params.u.upd_queue_params.update_pn = 1;
3885 	params.u.upd_queue_params.update_pn_check_needed = 1;
3886 	params.u.upd_queue_params.update_svld = 1;
3887 	params.u.upd_queue_params.svld = 0;
3888 
3889 	switch (sec_type) {
3890 	case cdp_sec_type_tkip_nomic:
3891 	case cdp_sec_type_aes_ccmp:
3892 	case cdp_sec_type_aes_ccmp_256:
3893 	case cdp_sec_type_aes_gcmp:
3894 	case cdp_sec_type_aes_gcmp_256:
3895 		params.u.upd_queue_params.pn_check_needed = 1;
3896 		params.u.upd_queue_params.pn_size = 48;
3897 		pn_size = 48;
3898 		break;
3899 	case cdp_sec_type_wapi:
3900 		params.u.upd_queue_params.pn_check_needed = 1;
3901 		params.u.upd_queue_params.pn_size = 128;
3902 		pn_size = 128;
3903 		if (vdev->opmode == wlan_op_mode_ap) {
3904 			params.u.upd_queue_params.pn_even = 1;
3905 			params.u.upd_queue_params.update_pn_even = 1;
3906 		} else {
3907 			params.u.upd_queue_params.pn_uneven = 1;
3908 			params.u.upd_queue_params.update_pn_uneven = 1;
3909 		}
3910 		break;
3911 	default:
3912 		params.u.upd_queue_params.pn_check_needed = 0;
3913 		pn_size = 0;
3914 		break;
3915 	}
3916 
3917 
3918 	for (i = 0; i < DP_MAX_TIDS; i++) {
3919 		struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
3920 		qdf_spin_lock_bh(&rx_tid->tid_lock);
3921 		if (rx_tid->hw_qdesc_vaddr_unaligned) {
3922 			params.std.addr_lo =
3923 				rx_tid->hw_qdesc_paddr & 0xffffffff;
3924 			params.std.addr_hi =
3925 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
3926 
3927 			if (pn_size) {
3928 				dp_peer_info("%pK: PN set for TID:%d pn:%x:%x:%x:%x",
3929 					     soc, i, rx_pn[3], rx_pn[2],
3930 					     rx_pn[1], rx_pn[0]);
3931 				params.u.upd_queue_params.update_pn_valid = 1;
3932 				params.u.upd_queue_params.pn_31_0 = rx_pn[0];
3933 				params.u.upd_queue_params.pn_63_32 = rx_pn[1];
3934 				params.u.upd_queue_params.pn_95_64 = rx_pn[2];
3935 				params.u.upd_queue_params.pn_127_96 = rx_pn[3];
3936 			}
3937 			rx_tid->pn_size = pn_size;
3938 			if (dp_reo_send_cmd(cdp_soc_t_to_dp_soc(soc),
3939 					    CMD_UPDATE_RX_REO_QUEUE,
3940 					    &params, dp_rx_tid_update_cb,
3941 					    rx_tid)) {
3942 				dp_err_log("fail to send CMD_UPDATE_RX_REO_QUEUE"
3943 					   "tid %d desc %pK", rx_tid->tid,
3944 					   (void *)(rx_tid->hw_qdesc_paddr));
3945 				DP_STATS_INC(cdp_soc_t_to_dp_soc(soc),
3946 					     rx.err.reo_cmd_send_fail, 1);
3947 			}
3948 		} else {
3949 			dp_peer_info("%pK: PN Check not setup for TID :%d ", soc, i);
3950 		}
3951 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
3952 	}
3953 
3954 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3955 
3956 	return QDF_STATUS_SUCCESS;
3957 }
3958 
3959 
3960 /**
3961  * dp_set_key_sec_type_wifi3() - set security mode of key
3962  * @soc: Datapath soc handle
3963  * @peer_mac: Datapath peer mac address
3964  * @vdev_id: id of atapath vdev
3965  * @vdev: Datapath vdev
3966  * @pdev - data path device instance
3967  * @sec_type - security type
3968  * #is_unicast - key type
3969  *
3970  */
3971 
3972 QDF_STATUS
3973 dp_set_key_sec_type_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id,
3974 			  uint8_t *peer_mac, enum cdp_sec_type sec_type,
3975 			  bool is_unicast)
3976 {
3977 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
3978 						       peer_mac, 0, vdev_id,
3979 						       DP_MOD_ID_CDP);
3980 	int sec_index;
3981 
3982 	if (!peer) {
3983 		dp_peer_debug("%pK: Peer is NULL!\n", soc);
3984 		return QDF_STATUS_E_FAILURE;
3985 	}
3986 
3987 	dp_peer_info("%pK: key sec spec for peer %pK " QDF_MAC_ADDR_FMT ": %s key of type %d",
3988 		     soc, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw),
3989 		     is_unicast ? "ucast" : "mcast", sec_type);
3990 
3991 	sec_index = is_unicast ? dp_sec_ucast : dp_sec_mcast;
3992 	peer->security[sec_index].sec_type = sec_type;
3993 
3994 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3995 
3996 	return QDF_STATUS_SUCCESS;
3997 }
3998 
3999 void
4000 dp_rx_sec_ind_handler(struct dp_soc *soc, uint16_t peer_id,
4001 		      enum cdp_sec_type sec_type, int is_unicast,
4002 		      u_int32_t *michael_key,
4003 		      u_int32_t *rx_pn)
4004 {
4005 	struct dp_peer *peer;
4006 	int sec_index;
4007 
4008 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_HTT);
4009 	if (!peer) {
4010 		dp_peer_err("Couldn't find peer from ID %d - skipping security inits",
4011 			    peer_id);
4012 		return;
4013 	}
4014 	dp_peer_info("%pK: sec spec for peer %pK " QDF_MAC_ADDR_FMT ": %s key of type %d",
4015 		     soc, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw),
4016 			  is_unicast ? "ucast" : "mcast", sec_type);
4017 	sec_index = is_unicast ? dp_sec_ucast : dp_sec_mcast;
4018 	peer->security[sec_index].sec_type = sec_type;
4019 #ifdef notyet /* TODO: See if this is required for defrag support */
4020 	/* michael key only valid for TKIP, but for simplicity,
4021 	 * copy it anyway
4022 	 */
4023 	qdf_mem_copy(
4024 		&peer->security[sec_index].michael_key[0],
4025 		michael_key,
4026 		sizeof(peer->security[sec_index].michael_key));
4027 #ifdef BIG_ENDIAN_HOST
4028 	OL_IF_SWAPBO(peer->security[sec_index].michael_key[0],
4029 				 sizeof(peer->security[sec_index].michael_key));
4030 #endif /* BIG_ENDIAN_HOST */
4031 #endif
4032 
4033 #ifdef notyet /* TODO: Check if this is required for wifi3.0 */
4034 	if (sec_type != cdp_sec_type_wapi) {
4035 		qdf_mem_zero(peer->tids_last_pn_valid, _EXT_TIDS);
4036 	} else {
4037 		for (i = 0; i < DP_MAX_TIDS; i++) {
4038 			/*
4039 			 * Setting PN valid bit for WAPI sec_type,
4040 			 * since WAPI PN has to be started with predefined value
4041 			 */
4042 			peer->tids_last_pn_valid[i] = 1;
4043 			qdf_mem_copy(
4044 				(u_int8_t *) &peer->tids_last_pn[i],
4045 				(u_int8_t *) rx_pn, sizeof(union htt_rx_pn_t));
4046 			peer->tids_last_pn[i].pn128[1] =
4047 				qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[1]);
4048 			peer->tids_last_pn[i].pn128[0] =
4049 				qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[0]);
4050 		}
4051 	}
4052 #endif
4053 	/* TODO: Update HW TID queue with PN check parameters (pn type for
4054 	 * all security types and last pn for WAPI) once REO command API
4055 	 * is available
4056 	 */
4057 
4058 	dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
4059 }
4060 
4061 #ifdef QCA_PEER_EXT_STATS
4062 /*
4063  * dp_peer_ext_stats_ctx_alloc() - Allocate peer ext
4064  *                                 stats content
4065  * @soc: DP SoC context
4066  * @peer: DP peer context
4067  *
4068  * Allocate the peer extended stats context
4069  *
4070  * Return: QDF_STATUS_SUCCESS if allocation is
4071  *	   successful
4072  */
4073 QDF_STATUS dp_peer_ext_stats_ctx_alloc(struct dp_soc *soc,
4074 				       struct dp_peer *peer)
4075 {
4076 	uint8_t tid, ctx_id;
4077 
4078 	if (!soc || !peer) {
4079 		dp_warn("Null soc%pK or peer%pK", soc, peer);
4080 		return QDF_STATUS_E_INVAL;
4081 	}
4082 
4083 	if (!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx))
4084 		return QDF_STATUS_SUCCESS;
4085 
4086 	/*
4087 	 * Allocate memory for peer extended stats.
4088 	 */
4089 	peer->pext_stats = qdf_mem_malloc(sizeof(struct cdp_peer_ext_stats));
4090 	if (!peer->pext_stats) {
4091 		dp_err("Peer extended stats obj alloc failed!!");
4092 		return QDF_STATUS_E_NOMEM;
4093 	}
4094 
4095 	for (tid = 0; tid < CDP_MAX_DATA_TIDS; tid++) {
4096 		for (ctx_id = 0; ctx_id < CDP_MAX_TXRX_CTX; ctx_id++) {
4097 			struct cdp_delay_tx_stats *tx_delay =
4098 			&peer->pext_stats->delay_stats[tid][ctx_id].tx_delay;
4099 			struct cdp_delay_rx_stats *rx_delay =
4100 			&peer->pext_stats->delay_stats[tid][ctx_id].rx_delay;
4101 
4102 			dp_hist_init(&tx_delay->tx_swq_delay,
4103 				     CDP_HIST_TYPE_SW_ENQEUE_DELAY);
4104 			dp_hist_init(&tx_delay->hwtx_delay,
4105 				     CDP_HIST_TYPE_HW_COMP_DELAY);
4106 			dp_hist_init(&rx_delay->to_stack_delay,
4107 				     CDP_HIST_TYPE_REAP_STACK);
4108 		}
4109 	}
4110 
4111 	return QDF_STATUS_SUCCESS;
4112 }
4113 
4114 /*
4115  * dp_peer_ext_stats_ctx_dealloc() - Dealloc the peer context
4116  * @peer: DP peer context
4117  *
4118  * Free the peer extended stats context
4119  *
4120  * Return: Void
4121  */
4122 void dp_peer_ext_stats_ctx_dealloc(struct dp_soc *soc, struct dp_peer *peer)
4123 {
4124 	if (!peer) {
4125 		dp_warn("peer_ext dealloc failed due to NULL peer object");
4126 		return;
4127 	}
4128 
4129 	if (!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx))
4130 		return;
4131 
4132 	if (!peer->pext_stats)
4133 		return;
4134 
4135 	qdf_mem_free(peer->pext_stats);
4136 	peer->pext_stats = NULL;
4137 }
4138 #endif
4139 
4140 QDF_STATUS
4141 dp_rx_delba_ind_handler(void *soc_handle, uint16_t peer_id,
4142 			uint8_t tid, uint16_t win_sz)
4143 {
4144 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
4145 	struct dp_peer *peer;
4146 	struct dp_rx_tid *rx_tid;
4147 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4148 
4149 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_HTT);
4150 
4151 	if (!peer) {
4152 		dp_peer_err("%pK: Couldn't find peer from ID %d",
4153 			    soc, peer_id);
4154 		return QDF_STATUS_E_FAILURE;
4155 	}
4156 
4157 	qdf_assert_always(tid < DP_MAX_TIDS);
4158 
4159 	rx_tid = &peer->rx_tid[tid];
4160 
4161 	if (rx_tid->hw_qdesc_vaddr_unaligned) {
4162 		if (!rx_tid->delba_tx_status) {
4163 			dp_peer_info("%pK: PEER_ID: %d TID: %d, BA win: %d ",
4164 				     soc, peer_id, tid, win_sz);
4165 
4166 			qdf_spin_lock_bh(&rx_tid->tid_lock);
4167 
4168 			rx_tid->delba_tx_status = 1;
4169 
4170 			rx_tid->rx_ba_win_size_override =
4171 			    qdf_min((uint16_t)63, win_sz);
4172 
4173 			rx_tid->delba_rcode =
4174 			    IEEE80211_REASON_QOS_SETUP_REQUIRED;
4175 
4176 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
4177 
4178 			if (soc->cdp_soc.ol_ops->send_delba)
4179 				soc->cdp_soc.ol_ops->send_delba(
4180 					peer->vdev->pdev->soc->ctrl_psoc,
4181 					peer->vdev->vdev_id,
4182 					peer->mac_addr.raw,
4183 					tid,
4184 					rx_tid->delba_rcode,
4185 					CDP_DELBA_REASON_NONE);
4186 		}
4187 	} else {
4188 		dp_peer_err("%pK: BA session is not setup for TID:%d ", soc, tid);
4189 		status = QDF_STATUS_E_FAILURE;
4190 	}
4191 
4192 	dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
4193 
4194 	return status;
4195 }
4196 
4197 #ifdef DP_PEER_EXTENDED_API
4198 QDF_STATUS dp_register_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
4199 			    struct ol_txrx_desc_type *sta_desc)
4200 {
4201 	struct dp_peer *peer;
4202 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4203 
4204 	peer = dp_peer_find_hash_find(soc, sta_desc->peer_addr.bytes,
4205 				      0, DP_VDEV_ALL, DP_MOD_ID_CDP);
4206 
4207 	if (!peer)
4208 		return QDF_STATUS_E_FAULT;
4209 
4210 	qdf_spin_lock_bh(&peer->peer_info_lock);
4211 	peer->state = OL_TXRX_PEER_STATE_CONN;
4212 	qdf_spin_unlock_bh(&peer->peer_info_lock);
4213 
4214 	dp_rx_flush_rx_cached(peer, false);
4215 
4216 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4217 
4218 	return QDF_STATUS_SUCCESS;
4219 }
4220 
4221 QDF_STATUS
4222 dp_clear_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
4223 	      struct qdf_mac_addr peer_addr)
4224 {
4225 	struct dp_peer *peer;
4226 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4227 
4228 	peer = dp_peer_find_hash_find(soc, peer_addr.bytes,
4229 				      0, DP_VDEV_ALL, DP_MOD_ID_CDP);
4230 	if (!peer || !peer->valid)
4231 		return QDF_STATUS_E_FAULT;
4232 
4233 	dp_clear_peer_internal(soc, peer);
4234 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4235 	return QDF_STATUS_SUCCESS;
4236 }
4237 
4238 QDF_STATUS dp_peer_state_update(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
4239 				enum ol_txrx_peer_state state)
4240 {
4241 	struct dp_peer *peer;
4242 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4243 
4244 	peer =  dp_peer_find_hash_find(soc, peer_mac, 0, DP_VDEV_ALL,
4245 				       DP_MOD_ID_CDP);
4246 	if (!peer) {
4247 		dp_peer_err("%pK: Failed to find peer for: [" QDF_MAC_ADDR_FMT "]",
4248 			    soc, QDF_MAC_ADDR_REF(peer_mac));
4249 		return QDF_STATUS_E_FAILURE;
4250 	}
4251 	peer->state = state;
4252 
4253 	peer->authorize = (state == OL_TXRX_PEER_STATE_AUTH) ? 1 : 0;
4254 
4255 	dp_info("peer %pK state %d", peer, peer->state);
4256 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
4257 	 * Decrement it here.
4258 	 */
4259 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4260 
4261 	return QDF_STATUS_SUCCESS;
4262 }
4263 
4264 QDF_STATUS dp_get_vdevid(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
4265 			 uint8_t *vdev_id)
4266 {
4267 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4268 	struct dp_peer *peer =
4269 		dp_peer_find_hash_find(soc, peer_mac, 0, DP_VDEV_ALL,
4270 				       DP_MOD_ID_CDP);
4271 
4272 	if (!peer)
4273 		return QDF_STATUS_E_FAILURE;
4274 
4275 	dp_info("peer %pK vdev %pK vdev id %d",
4276 		peer, peer->vdev, peer->vdev->vdev_id);
4277 	*vdev_id = peer->vdev->vdev_id;
4278 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
4279 	 * Decrement it here.
4280 	 */
4281 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4282 
4283 	return QDF_STATUS_SUCCESS;
4284 }
4285 
4286 struct cdp_vdev *
4287 dp_get_vdev_by_peer_addr(struct cdp_pdev *pdev_handle,
4288 			 struct qdf_mac_addr peer_addr)
4289 {
4290 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4291 	struct dp_peer *peer = NULL;
4292 	struct cdp_vdev *vdev = NULL;
4293 
4294 	if (!pdev) {
4295 		dp_peer_info("PDEV not found for peer_addr: " QDF_MAC_ADDR_FMT,
4296 			     QDF_MAC_ADDR_REF(peer_addr.bytes));
4297 		return NULL;
4298 	}
4299 
4300 	peer = dp_peer_find_hash_find(pdev->soc, peer_addr.bytes, 0,
4301 				      DP_VDEV_ALL, DP_MOD_ID_CDP);
4302 	if (!peer) {
4303 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
4304 			  "PDEV not found for peer_addr: "QDF_MAC_ADDR_FMT,
4305 			  QDF_MAC_ADDR_REF(peer_addr.bytes));
4306 		return NULL;
4307 	}
4308 
4309 	vdev = (struct cdp_vdev *)peer->vdev;
4310 
4311 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4312 	return vdev;
4313 }
4314 
4315 /**
4316  * dp_get_vdev_for_peer() - Get virtual interface instance which peer belongs
4317  * @peer - peer instance
4318  *
4319  * Get virtual interface instance which peer belongs
4320  *
4321  * Return: virtual interface instance pointer
4322  *         NULL in case cannot find
4323  */
4324 struct cdp_vdev *dp_get_vdev_for_peer(void *peer_handle)
4325 {
4326 	struct dp_peer *peer = peer_handle;
4327 
4328 	DP_TRACE(DEBUG, "peer %pK vdev %pK", peer, peer->vdev);
4329 	return (struct cdp_vdev *)peer->vdev;
4330 }
4331 
4332 /**
4333  * dp_peer_get_peer_mac_addr() - Get peer mac address
4334  * @peer - peer instance
4335  *
4336  * Get peer mac address
4337  *
4338  * Return: peer mac address pointer
4339  *         NULL in case cannot find
4340  */
4341 uint8_t *dp_peer_get_peer_mac_addr(void *peer_handle)
4342 {
4343 	struct dp_peer *peer = peer_handle;
4344 	uint8_t *mac;
4345 
4346 	mac = peer->mac_addr.raw;
4347 	dp_info("peer %pK mac 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
4348 		peer, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
4349 	return peer->mac_addr.raw;
4350 }
4351 
4352 int dp_get_peer_state(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
4353 		      uint8_t *peer_mac)
4354 {
4355 	enum ol_txrx_peer_state peer_state;
4356 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4357 	struct dp_peer *peer =  dp_peer_find_hash_find(soc, peer_mac, 0,
4358 						       vdev_id, DP_MOD_ID_CDP);
4359 
4360 	if (!peer)
4361 		return QDF_STATUS_E_FAILURE;
4362 
4363 	DP_TRACE(DEBUG, "peer %pK stats %d", peer, peer->state);
4364 	peer_state = peer->state;
4365 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4366 
4367 	return peer_state;
4368 }
4369 
4370 /**
4371  * dp_local_peer_id_pool_init() - local peer id pool alloc for physical device
4372  * @pdev - data path device instance
4373  *
4374  * local peer id pool alloc for physical device
4375  *
4376  * Return: none
4377  */
4378 void dp_local_peer_id_pool_init(struct dp_pdev *pdev)
4379 {
4380 	int i;
4381 
4382 	/* point the freelist to the first ID */
4383 	pdev->local_peer_ids.freelist = 0;
4384 
4385 	/* link each ID to the next one */
4386 	for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) {
4387 		pdev->local_peer_ids.pool[i] = i + 1;
4388 		pdev->local_peer_ids.map[i] = NULL;
4389 	}
4390 
4391 	/* link the last ID to itself, to mark the end of the list */
4392 	i = OL_TXRX_NUM_LOCAL_PEER_IDS;
4393 	pdev->local_peer_ids.pool[i] = i;
4394 
4395 	qdf_spinlock_create(&pdev->local_peer_ids.lock);
4396 	DP_TRACE(INFO, "Peer pool init");
4397 }
4398 
4399 /**
4400  * dp_local_peer_id_alloc() - allocate local peer id
4401  * @pdev - data path device instance
4402  * @peer - new peer instance
4403  *
4404  * allocate local peer id
4405  *
4406  * Return: none
4407  */
4408 void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer)
4409 {
4410 	int i;
4411 
4412 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
4413 	i = pdev->local_peer_ids.freelist;
4414 	if (pdev->local_peer_ids.pool[i] == i) {
4415 		/* the list is empty, except for the list-end marker */
4416 		peer->local_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
4417 	} else {
4418 		/* take the head ID and advance the freelist */
4419 		peer->local_id = i;
4420 		pdev->local_peer_ids.freelist = pdev->local_peer_ids.pool[i];
4421 		pdev->local_peer_ids.map[i] = peer;
4422 	}
4423 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
4424 	dp_info("peer %pK, local id %d", peer, peer->local_id);
4425 }
4426 
4427 /**
4428  * dp_local_peer_id_free() - remove local peer id
4429  * @pdev - data path device instance
4430  * @peer - peer instance should be removed
4431  *
4432  * remove local peer id
4433  *
4434  * Return: none
4435  */
4436 void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer)
4437 {
4438 	int i = peer->local_id;
4439 	if ((i == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
4440 	    (i >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
4441 		return;
4442 	}
4443 
4444 	/* put this ID on the head of the freelist */
4445 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
4446 	pdev->local_peer_ids.pool[i] = pdev->local_peer_ids.freelist;
4447 	pdev->local_peer_ids.freelist = i;
4448 	pdev->local_peer_ids.map[i] = NULL;
4449 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
4450 }
4451 
4452 bool dp_find_peer_exist_on_vdev(struct cdp_soc_t *soc_hdl,
4453 				uint8_t vdev_id, uint8_t *peer_addr)
4454 {
4455 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4456 	struct dp_peer *peer = NULL;
4457 
4458 	peer = dp_peer_find_hash_find(soc, peer_addr, 0, vdev_id,
4459 				      DP_MOD_ID_CDP);
4460 	if (!peer)
4461 		return false;
4462 
4463 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4464 
4465 	return true;
4466 }
4467 
4468 bool dp_find_peer_exist_on_other_vdev(struct cdp_soc_t *soc_hdl,
4469 				      uint8_t vdev_id, uint8_t *peer_addr,
4470 				      uint16_t max_bssid)
4471 {
4472 	int i;
4473 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4474 	struct dp_peer *peer = NULL;
4475 
4476 	for (i = 0; i < max_bssid; i++) {
4477 		/* Need to check vdevs other than the vdev_id */
4478 		if (vdev_id == i)
4479 			continue;
4480 		peer = dp_peer_find_hash_find(soc, peer_addr, 0, i,
4481 					      DP_MOD_ID_CDP);
4482 		if (peer) {
4483 			dp_err("Duplicate peer "QDF_MAC_ADDR_FMT" already exist on vdev %d",
4484 			       QDF_MAC_ADDR_REF(peer_addr), i);
4485 			dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4486 			return true;
4487 		}
4488 	}
4489 
4490 	return false;
4491 }
4492 
4493 bool dp_find_peer_exist(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
4494 			uint8_t *peer_addr)
4495 {
4496 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4497 	struct dp_peer *peer = NULL;
4498 
4499 	peer = dp_peer_find_hash_find(soc, peer_addr, 0, DP_VDEV_ALL,
4500 				      DP_MOD_ID_CDP);
4501 	if (peer) {
4502 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4503 		return true;
4504 	}
4505 
4506 	return false;
4507 }
4508 #endif
4509 
4510 /**
4511  * dp_peer_rxtid_stats: Retried Rx TID (REO queue) stats from HW
4512  * @peer: DP peer handle
4513  * @dp_stats_cmd_cb: REO command callback function
4514  * @cb_ctxt: Callback context
4515  *
4516  * Return: count of tid stats cmd send succeeded
4517  */
4518 int dp_peer_rxtid_stats(struct dp_peer *peer,
4519 			dp_rxtid_stats_cmd_cb dp_stats_cmd_cb,
4520 			void *cb_ctxt)
4521 {
4522 	struct dp_soc *soc = peer->vdev->pdev->soc;
4523 	struct hal_reo_cmd_params params;
4524 	int i;
4525 	int stats_cmd_sent_cnt = 0;
4526 	QDF_STATUS status;
4527 
4528 	if (!dp_stats_cmd_cb)
4529 		return stats_cmd_sent_cnt;
4530 
4531 	qdf_mem_zero(&params, sizeof(params));
4532 	for (i = 0; i < DP_MAX_TIDS; i++) {
4533 		struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
4534 		if (rx_tid->hw_qdesc_vaddr_unaligned) {
4535 			params.std.need_status = 1;
4536 			params.std.addr_lo =
4537 				rx_tid->hw_qdesc_paddr & 0xffffffff;
4538 			params.std.addr_hi =
4539 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
4540 
4541 			if (cb_ctxt) {
4542 				status = dp_reo_send_cmd(
4543 						soc, CMD_GET_QUEUE_STATS,
4544 						&params, dp_stats_cmd_cb,
4545 						cb_ctxt);
4546 			} else {
4547 				status = dp_reo_send_cmd(
4548 						soc, CMD_GET_QUEUE_STATS,
4549 						&params, dp_stats_cmd_cb,
4550 						rx_tid);
4551 			}
4552 
4553 			if (QDF_IS_STATUS_SUCCESS(status))
4554 				stats_cmd_sent_cnt++;
4555 
4556 			/* Flush REO descriptor from HW cache to update stats
4557 			 * in descriptor memory. This is to help debugging */
4558 			qdf_mem_zero(&params, sizeof(params));
4559 			params.std.need_status = 0;
4560 			params.std.addr_lo =
4561 				rx_tid->hw_qdesc_paddr & 0xffffffff;
4562 			params.std.addr_hi =
4563 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
4564 			params.u.fl_cache_params.flush_no_inval = 1;
4565 			dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, &params, NULL,
4566 				NULL);
4567 		}
4568 	}
4569 
4570 	return stats_cmd_sent_cnt;
4571 }
4572 
4573 QDF_STATUS
4574 dp_set_michael_key(struct cdp_soc_t *soc,
4575 		   uint8_t vdev_id,
4576 		   uint8_t *peer_mac,
4577 		   bool is_unicast, uint32_t *key)
4578 {
4579 	uint8_t sec_index = is_unicast ? 1 : 0;
4580 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
4581 						      peer_mac, 0, vdev_id,
4582 						      DP_MOD_ID_CDP);
4583 
4584 	if (!peer) {
4585 		dp_peer_err("%pK: peer not found ", soc);
4586 		return QDF_STATUS_E_FAILURE;
4587 	}
4588 
4589 	qdf_mem_copy(&peer->security[sec_index].michael_key[0],
4590 		     key, IEEE80211_WEP_MICLEN);
4591 
4592 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4593 
4594 	return QDF_STATUS_SUCCESS;
4595 }
4596 
4597 
4598 /**
4599  * dp_vdev_bss_peer_ref_n_get: Get bss peer of a vdev
4600  * @soc: DP soc
4601  * @vdev: vdev
4602  * @mod_id: id of module requesting reference
4603  *
4604  * Return: VDEV BSS peer
4605  */
4606 struct dp_peer *dp_vdev_bss_peer_ref_n_get(struct dp_soc *soc,
4607 					   struct dp_vdev *vdev,
4608 					   enum dp_mod_id mod_id)
4609 {
4610 	struct dp_peer *peer = NULL;
4611 
4612 	qdf_spin_lock_bh(&vdev->peer_list_lock);
4613 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
4614 		if (peer->bss_peer)
4615 			break;
4616 	}
4617 
4618 	if (!peer) {
4619 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
4620 		return NULL;
4621 	}
4622 
4623 	if (dp_peer_get_ref(soc, peer, mod_id) == QDF_STATUS_SUCCESS) {
4624 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
4625 		return peer;
4626 	}
4627 
4628 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
4629 	return peer;
4630 }
4631 
4632 /**
4633  * dp_sta_vdev_self_peer_ref_n_get: Get self peer of sta vdev
4634  * @soc: DP soc
4635  * @vdev: vdev
4636  * @mod_id: id of module requesting reference
4637  *
4638  * Return: VDEV self peer
4639  */
4640 struct dp_peer *dp_sta_vdev_self_peer_ref_n_get(struct dp_soc *soc,
4641 						struct dp_vdev *vdev,
4642 						enum dp_mod_id mod_id)
4643 {
4644 	struct dp_peer *peer;
4645 
4646 	if (vdev->opmode != wlan_op_mode_sta)
4647 		return NULL;
4648 
4649 	qdf_spin_lock_bh(&vdev->peer_list_lock);
4650 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
4651 		if (peer->sta_self_peer)
4652 			break;
4653 	}
4654 
4655 	if (!peer) {
4656 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
4657 		return NULL;
4658 	}
4659 
4660 	if (dp_peer_get_ref(soc, peer, mod_id) == QDF_STATUS_SUCCESS) {
4661 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
4662 		return peer;
4663 	}
4664 
4665 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
4666 	return peer;
4667 }
4668 
4669 #ifdef DUMP_REO_QUEUE_INFO_IN_DDR
4670 void dp_dump_rx_reo_queue_info(
4671 	struct dp_soc *soc, void *cb_ctxt, union hal_reo_status *reo_status)
4672 {
4673 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
4674 
4675 	if (!rx_tid)
4676 		return;
4677 
4678 	if (reo_status->fl_cache_status.header.status !=
4679 		HAL_REO_CMD_SUCCESS) {
4680 		dp_err_rl("Rx tid REO HW desc flush failed(%d)",
4681 			  reo_status->rx_queue_status.header.status);
4682 		return;
4683 	}
4684 	qdf_spin_lock_bh(&rx_tid->tid_lock);
4685 	hal_dump_rx_reo_queue_desc(rx_tid->hw_qdesc_vaddr_aligned);
4686 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
4687 }
4688 
4689 void dp_send_cache_flush_for_rx_tid(
4690 	struct dp_soc *soc, struct dp_peer *peer)
4691 {
4692 	int i;
4693 	struct dp_rx_tid *rx_tid;
4694 	struct hal_reo_cmd_params params;
4695 
4696 	if (!peer) {
4697 		dp_err_rl("Peer is NULL");
4698 		return;
4699 	}
4700 
4701 	for (i = 0; i < DP_MAX_TIDS; i++) {
4702 		rx_tid = &peer->rx_tid[i];
4703 		if (!rx_tid)
4704 			continue;
4705 		qdf_spin_lock_bh(&rx_tid->tid_lock);
4706 		if (rx_tid->hw_qdesc_vaddr_aligned) {
4707 			qdf_mem_zero(&params, sizeof(params));
4708 			params.std.need_status = 1;
4709 			params.std.addr_lo =
4710 				rx_tid->hw_qdesc_paddr & 0xffffffff;
4711 			params.std.addr_hi =
4712 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
4713 			params.u.fl_cache_params.flush_no_inval = 0;
4714 			if (QDF_STATUS_SUCCESS !=
4715 				dp_reo_send_cmd(
4716 					soc, CMD_FLUSH_CACHE,
4717 					&params, dp_dump_rx_reo_queue_info,
4718 					(void *)rx_tid)) {
4719 				dp_err_rl("cache flush send failed tid %d",
4720 					  rx_tid->tid);
4721 				qdf_spin_unlock_bh(&rx_tid->tid_lock);
4722 				break;
4723 			}
4724 		}
4725 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
4726 	}
4727 }
4728 
4729 void dp_get_rx_reo_queue_info(
4730 	struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
4731 {
4732 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
4733 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
4734 						     DP_MOD_ID_GENERIC_STATS);
4735 	struct dp_peer *peer = NULL;
4736 
4737 	if (!vdev) {
4738 		dp_err_rl("vdev is null for vdev_id: %u", vdev_id);
4739 		goto failed;
4740 	}
4741 
4742 	peer = dp_vdev_bss_peer_ref_n_get(soc, vdev, DP_MOD_ID_GENERIC_STATS);
4743 
4744 	if (!peer) {
4745 		dp_err_rl("Peer is NULL");
4746 		goto failed;
4747 	}
4748 	dp_send_cache_flush_for_rx_tid(soc, peer);
4749 failed:
4750 	if (peer)
4751 		dp_peer_unref_delete(peer, DP_MOD_ID_GENERIC_STATS);
4752 	if (vdev)
4753 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_GENERIC_STATS);
4754 }
4755 #endif /* DUMP_REO_QUEUE_INFO_IN_DDR */
4756 
4757 void dp_peer_flush_frags(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
4758 			 uint8_t *peer_mac)
4759 {
4760 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4761 	struct dp_peer *peer = dp_peer_find_hash_find(soc, peer_mac, 0,
4762 						      vdev_id, DP_MOD_ID_CDP);
4763 	struct dp_rx_tid *rx_tid;
4764 	uint8_t tid;
4765 
4766 	if (!peer)
4767 		return;
4768 
4769 	dp_info("Flushing fragments for peer " QDF_MAC_ADDR_FMT,
4770 		QDF_MAC_ADDR_REF(peer->mac_addr.raw));
4771 
4772 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
4773 		rx_tid = &peer->rx_tid[tid];
4774 
4775 		qdf_spin_lock_bh(&rx_tid->tid_lock);
4776 		dp_rx_defrag_waitlist_remove(peer, tid);
4777 		dp_rx_reorder_flush_frag(peer, tid);
4778 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
4779 	}
4780 
4781 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4782 }
4783