xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_peer.c (revision e9dba9646bfd1954b96d80bae0adc757244cbde8)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <qdf_types.h>
20 #include <qdf_lock.h>
21 #include <hal_hw_headers.h>
22 #include "dp_htt.h"
23 #include "dp_types.h"
24 #include "dp_internal.h"
25 #include "dp_peer.h"
26 #include "dp_rx_defrag.h"
27 #include "dp_rx.h"
28 #include <hal_api.h>
29 #include <hal_reo.h>
30 #include <cdp_txrx_handle.h>
31 #include <wlan_cfg.h>
32 #ifdef FEATURE_WDS
33 #include "dp_txrx_wds.h"
34 #endif
35 
36 #ifdef WLAN_TX_PKT_CAPTURE_ENH
37 #include "dp_tx_capture.h"
38 #endif
39 
40 #ifdef QCA_PEER_EXT_STATS
41 #include "dp_hist.h"
42 #endif
43 
44 #ifdef FEATURE_WDS
45 static inline bool
46 dp_peer_ast_free_in_unmap_supported(struct dp_soc *soc,
47 				    struct dp_ast_entry *ast_entry)
48 {
49 	/* if peer map v2 is enabled we are not freeing ast entry
50 	 * here and it is supposed to be freed in unmap event (after
51 	 * we receive delete confirmation from target)
52 	 *
53 	 * if peer_id is invalid we did not get the peer map event
54 	 * for the peer free ast entry from here only in this case
55 	 */
56 
57 	if ((ast_entry->type != CDP_TXRX_AST_TYPE_WDS_HM_SEC) &&
58 	    (ast_entry->type != CDP_TXRX_AST_TYPE_SELF))
59 		return true;
60 
61 	return false;
62 }
63 #else
64 static inline bool
65 dp_peer_ast_free_in_unmap_supported(struct dp_soc *soc,
66 				    struct dp_ast_entry *ast_entry)
67 {
68 	return false;
69 }
70 
71 static void dp_soc_wds_attach(struct dp_soc *soc)
72 {
73 }
74 
75 static void dp_soc_wds_detach(struct dp_soc *soc)
76 {
77 }
78 #endif
79 
80 static inline void
81 dp_set_ssn_valid_flag(struct hal_reo_cmd_params *params,
82 					uint8_t valid)
83 {
84 	params->u.upd_queue_params.update_svld = 1;
85 	params->u.upd_queue_params.svld = valid;
86 	dp_peer_debug("Setting SSN valid bit to %d",
87 		      valid);
88 }
89 
90 static inline int dp_peer_find_mac_addr_cmp(
91 	union dp_align_mac_addr *mac_addr1,
92 	union dp_align_mac_addr *mac_addr2)
93 {
94 		/*
95 		 * Intentionally use & rather than &&.
96 		 * because the operands are binary rather than generic boolean,
97 		 * the functionality is equivalent.
98 		 * Using && has the advantage of short-circuited evaluation,
99 		 * but using & has the advantage of no conditional branching,
100 		 * which is a more significant benefit.
101 		 */
102 	return !((mac_addr1->align4.bytes_abcd == mac_addr2->align4.bytes_abcd)
103 		 & (mac_addr1->align4.bytes_ef == mac_addr2->align4.bytes_ef));
104 }
105 
106 static QDF_STATUS dp_peer_ast_table_attach(struct dp_soc *soc)
107 {
108 	uint32_t max_ast_index;
109 
110 	max_ast_index = wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx);
111 	/* allocate ast_table for ast entry to ast_index map */
112 	dp_peer_info("\n%pK:<=== cfg max ast idx %d ====>", soc, max_ast_index);
113 	soc->ast_table = qdf_mem_malloc(max_ast_index *
114 					sizeof(struct dp_ast_entry *));
115 	if (!soc->ast_table) {
116 		dp_peer_err("%pK: ast_table memory allocation failed", soc);
117 		return QDF_STATUS_E_NOMEM;
118 	}
119 	return QDF_STATUS_SUCCESS; /* success */
120 }
121 
122 /*
123  * dp_peer_find_map_attach() - allocate memory for peer_id_to_obj_map
124  * @soc: soc handle
125  *
126  * return: QDF_STATUS
127  */
128 static QDF_STATUS dp_peer_find_map_attach(struct dp_soc *soc)
129 {
130 	uint32_t max_peers, peer_map_size;
131 
132 	max_peers = soc->max_peers;
133 	/* allocate the peer ID -> peer object map */
134 	dp_peer_info("\n%pK:<=== cfg max peer id %d ====>", soc, max_peers);
135 	peer_map_size = max_peers * sizeof(soc->peer_id_to_obj_map[0]);
136 	soc->peer_id_to_obj_map = qdf_mem_malloc(peer_map_size);
137 	if (!soc->peer_id_to_obj_map) {
138 		dp_peer_err("%pK: peer map memory allocation failed", soc);
139 		return QDF_STATUS_E_NOMEM;
140 	}
141 
142 	/*
143 	 * The peer_id_to_obj_map doesn't really need to be initialized,
144 	 * since elements are only used after they have been individually
145 	 * initialized.
146 	 * However, it is convenient for debugging to have all elements
147 	 * that are not in use set to 0.
148 	 */
149 	qdf_mem_zero(soc->peer_id_to_obj_map, peer_map_size);
150 
151 	qdf_spinlock_create(&soc->peer_map_lock);
152 	return QDF_STATUS_SUCCESS; /* success */
153 }
154 
155 static int dp_log2_ceil(unsigned int value)
156 {
157 	unsigned int tmp = value;
158 	int log2 = -1;
159 
160 	while (tmp) {
161 		log2++;
162 		tmp >>= 1;
163 	}
164 	if (1 << log2 != value)
165 		log2++;
166 	return log2;
167 }
168 
169 #define DP_PEER_HASH_LOAD_MULT  2
170 #define DP_PEER_HASH_LOAD_SHIFT 0
171 
172 #define DP_AST_HASH_LOAD_MULT  2
173 #define DP_AST_HASH_LOAD_SHIFT 0
174 
175 /*
176  * dp_peer_find_hash_attach() - allocate memory for peer_hash table
177  * @soc: soc handle
178  *
179  * return: QDF_STATUS
180  */
181 static QDF_STATUS dp_peer_find_hash_attach(struct dp_soc *soc)
182 {
183 	int i, hash_elems, log2;
184 
185 	/* allocate the peer MAC address -> peer object hash table */
186 	hash_elems = soc->max_peers;
187 	hash_elems *= DP_PEER_HASH_LOAD_MULT;
188 	hash_elems >>= DP_PEER_HASH_LOAD_SHIFT;
189 	log2 = dp_log2_ceil(hash_elems);
190 	hash_elems = 1 << log2;
191 
192 	soc->peer_hash.mask = hash_elems - 1;
193 	soc->peer_hash.idx_bits = log2;
194 	/* allocate an array of TAILQ peer object lists */
195 	soc->peer_hash.bins = qdf_mem_malloc(
196 		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q, dp_peer)));
197 	if (!soc->peer_hash.bins)
198 		return QDF_STATUS_E_NOMEM;
199 
200 	for (i = 0; i < hash_elems; i++)
201 		TAILQ_INIT(&soc->peer_hash.bins[i]);
202 
203 	qdf_spinlock_create(&soc->peer_hash_lock);
204 	return QDF_STATUS_SUCCESS;
205 }
206 
207 /*
208  * dp_peer_find_hash_detach() - cleanup memory for peer_hash table
209  * @soc: soc handle
210  *
211  * return: none
212  */
213 static void dp_peer_find_hash_detach(struct dp_soc *soc)
214 {
215 	if (soc->peer_hash.bins) {
216 		qdf_mem_free(soc->peer_hash.bins);
217 		soc->peer_hash.bins = NULL;
218 		qdf_spinlock_destroy(&soc->peer_hash_lock);
219 	}
220 }
221 
222 static inline unsigned dp_peer_find_hash_index(struct dp_soc *soc,
223 	union dp_align_mac_addr *mac_addr)
224 {
225 	unsigned index;
226 
227 	index =
228 		mac_addr->align2.bytes_ab ^
229 		mac_addr->align2.bytes_cd ^
230 		mac_addr->align2.bytes_ef;
231 	index ^= index >> soc->peer_hash.idx_bits;
232 	index &= soc->peer_hash.mask;
233 	return index;
234 }
235 
236 /*
237  * dp_peer_find_hash_add() - add peer to peer_hash_table
238  * @soc: soc handle
239  * @peer: peer handle
240  *
241  * return: none
242  */
243 void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer)
244 {
245 	unsigned index;
246 
247 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
248 	qdf_spin_lock_bh(&soc->peer_hash_lock);
249 
250 	if (QDF_IS_STATUS_ERROR(dp_peer_get_ref(soc, peer, DP_MOD_ID_CONFIG))) {
251 		dp_err("unable to get peer ref at MAP mac: "QDF_MAC_ADDR_FMT,
252 		       QDF_MAC_ADDR_REF(peer->mac_addr.raw));
253 		qdf_spin_unlock_bh(&soc->peer_hash_lock);
254 		return;
255 	}
256 
257 	/*
258 	 * It is important to add the new peer at the tail of the peer list
259 	 * with the bin index.  Together with having the hash_find function
260 	 * search from head to tail, this ensures that if two entries with
261 	 * the same MAC address are stored, the one added first will be
262 	 * found first.
263 	 */
264 	TAILQ_INSERT_TAIL(&soc->peer_hash.bins[index], peer, hash_list_elem);
265 
266 	qdf_spin_unlock_bh(&soc->peer_hash_lock);
267 }
268 
269 /*
270  * dp_peer_vdev_list_add() - add peer into vdev's peer list
271  * @soc: soc handle
272  * @vdev: vdev handle
273  * @peer: peer handle
274  *
275  * return: none
276  */
277 void dp_peer_vdev_list_add(struct dp_soc *soc, struct dp_vdev *vdev,
278 			   struct dp_peer *peer)
279 {
280 	qdf_spin_lock_bh(&vdev->peer_list_lock);
281 	if (QDF_IS_STATUS_ERROR(dp_peer_get_ref(soc, peer, DP_MOD_ID_CONFIG))) {
282 		dp_err("unable to get peer ref at MAP mac: "QDF_MAC_ADDR_FMT,
283 		       QDF_MAC_ADDR_REF(peer->mac_addr.raw));
284 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
285 		return;
286 	}
287 
288 	/* add this peer into the vdev's list */
289 	if (wlan_op_mode_sta == vdev->opmode)
290 		TAILQ_INSERT_HEAD(&vdev->peer_list, peer, peer_list_elem);
291 	else
292 		TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
293 
294 	vdev->num_peers++;
295 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
296 }
297 
298 /*
299  * dp_peer_vdev_list_remove() - remove peer from vdev's peer list
300  * @soc: SoC handle
301  * @vdev: VDEV handle
302  * @peer: peer handle
303  *
304  * Return: none
305  */
306 void dp_peer_vdev_list_remove(struct dp_soc *soc, struct dp_vdev *vdev,
307 			      struct dp_peer *peer)
308 {
309 	uint8_t found = 0;
310 	struct dp_peer *tmppeer = NULL;
311 
312 	qdf_spin_lock_bh(&vdev->peer_list_lock);
313 	TAILQ_FOREACH(tmppeer, &peer->vdev->peer_list, peer_list_elem) {
314 		if (tmppeer == peer) {
315 			found = 1;
316 			break;
317 		}
318 	}
319 
320 	if (found) {
321 		TAILQ_REMOVE(&peer->vdev->peer_list, peer,
322 			     peer_list_elem);
323 		dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
324 		vdev->num_peers--;
325 	} else {
326 		/*Ignoring the remove operation as peer not found*/
327 		dp_peer_debug("%pK: peer:%pK not found in vdev:%pK peerlist:%pK"
328 			      , soc, peer, vdev, &peer->vdev->peer_list);
329 	}
330 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
331 }
332 
333 /*
334  * dp_peer_find_id_to_obj_add() - Add peer into peer_id table
335  * @soc: SoC handle
336  * @peer: peer handle
337  * @peer_id: peer_id
338  *
339  * Return: None
340  */
341 void dp_peer_find_id_to_obj_add(struct dp_soc *soc,
342 				struct dp_peer *peer,
343 				uint16_t peer_id)
344 {
345 	QDF_ASSERT(peer_id <= soc->max_peers);
346 
347 	qdf_spin_lock_bh(&soc->peer_map_lock);
348 
349 	if (QDF_IS_STATUS_ERROR(dp_peer_get_ref(soc, peer, DP_MOD_ID_CONFIG))) {
350 		dp_err("unable to get peer ref at MAP mac: "QDF_MAC_ADDR_FMT" peer_id %u",
351 		       QDF_MAC_ADDR_REF(peer->mac_addr.raw), peer_id);
352 		qdf_spin_unlock_bh(&soc->peer_map_lock);
353 		return;
354 	}
355 
356 	if (!soc->peer_id_to_obj_map[peer_id]) {
357 		soc->peer_id_to_obj_map[peer_id] = peer;
358 	} else {
359 		/* Peer map event came for peer_id which
360 		 * is already mapped, this is not expected
361 		 */
362 		QDF_ASSERT(0);
363 	}
364 	qdf_spin_unlock_bh(&soc->peer_map_lock);
365 }
366 
367 /*
368  * dp_peer_find_id_to_obj_remove() - remove peer from peer_id table
369  * @soc: SoC handle
370  * @peer_id: peer_id
371  *
372  * Return: None
373  */
374 void dp_peer_find_id_to_obj_remove(struct dp_soc *soc,
375 				   uint16_t peer_id)
376 {
377 	struct dp_peer *peer = NULL;
378 	QDF_ASSERT(peer_id <= soc->max_peers);
379 
380 	qdf_spin_lock_bh(&soc->peer_map_lock);
381 	peer = soc->peer_id_to_obj_map[peer_id];
382 	soc->peer_id_to_obj_map[peer_id] = NULL;
383 	dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
384 	qdf_spin_unlock_bh(&soc->peer_map_lock);
385 }
386 
387 /*
388  * dp_peer_exist_on_pdev - check if peer with mac address exist on pdev
389  *
390  * @soc: Datapath SOC handle
391  * @peer_mac_addr: peer mac address
392  * @mac_addr_is_aligned: is mac address aligned
393  * @pdev: Datapath PDEV handle
394  *
395  * Return: true if peer found else return false
396  */
397 static bool dp_peer_exist_on_pdev(struct dp_soc *soc,
398 				  uint8_t *peer_mac_addr,
399 				  int mac_addr_is_aligned,
400 				  struct dp_pdev *pdev)
401 {
402 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
403 	unsigned int index;
404 	struct dp_peer *peer;
405 	bool found = false;
406 
407 	if (mac_addr_is_aligned) {
408 		mac_addr = (union dp_align_mac_addr *)peer_mac_addr;
409 	} else {
410 		qdf_mem_copy(
411 			&local_mac_addr_aligned.raw[0],
412 			peer_mac_addr, QDF_MAC_ADDR_SIZE);
413 		mac_addr = &local_mac_addr_aligned;
414 	}
415 	index = dp_peer_find_hash_index(soc, mac_addr);
416 	qdf_spin_lock_bh(&soc->peer_hash_lock);
417 	TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
418 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
419 		    (peer->vdev->pdev == pdev)) {
420 			found = true;
421 			break;
422 		}
423 	}
424 	qdf_spin_unlock_bh(&soc->peer_hash_lock);
425 	return found;
426 }
427 
428 #ifdef FEATURE_MEC
429 /**
430  * dp_peer_mec_hash_attach() - Allocate and initialize MEC Hash Table
431  * @soc: SoC handle
432  *
433  * Return: QDF_STATUS
434  */
435 static QDF_STATUS dp_peer_mec_hash_attach(struct dp_soc *soc)
436 {
437 	int log2, hash_elems, i;
438 
439 	log2 = dp_log2_ceil(DP_PEER_MAX_MEC_IDX);
440 	hash_elems = 1 << log2;
441 
442 	soc->mec_hash.mask = hash_elems - 1;
443 	soc->mec_hash.idx_bits = log2;
444 
445 	dp_peer_info("%pK: max mec index: %d",
446 		     soc, DP_PEER_MAX_MEC_IDX);
447 
448 	/* allocate an array of TAILQ mec object lists */
449 	soc->mec_hash.bins = qdf_mem_malloc(hash_elems *
450 					    sizeof(TAILQ_HEAD(anonymous_tail_q,
451 							      dp_mec_entry)));
452 
453 	if (!soc->mec_hash.bins)
454 		return QDF_STATUS_E_NOMEM;
455 
456 	for (i = 0; i < hash_elems; i++)
457 		TAILQ_INIT(&soc->mec_hash.bins[i]);
458 
459 	return QDF_STATUS_SUCCESS;
460 }
461 
462 /**
463  * dp_peer_mec_hash_index() - Compute the MEC hash from MAC address
464  * @soc: SoC handle
465  *
466  * Return: MEC hash
467  */
468 static inline uint32_t dp_peer_mec_hash_index(struct dp_soc *soc,
469 					      union dp_align_mac_addr *mac_addr)
470 {
471 	uint32_t index;
472 
473 	index =
474 		mac_addr->align2.bytes_ab ^
475 		mac_addr->align2.bytes_cd ^
476 		mac_addr->align2.bytes_ef;
477 	index ^= index >> soc->mec_hash.idx_bits;
478 	index &= soc->mec_hash.mask;
479 	return index;
480 }
481 
482 struct dp_mec_entry *dp_peer_mec_hash_find_by_pdevid(struct dp_soc *soc,
483 						     uint8_t pdev_id,
484 						     uint8_t *mec_mac_addr)
485 {
486 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
487 	uint32_t index;
488 	struct dp_mec_entry *mecentry;
489 
490 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
491 		     mec_mac_addr, QDF_MAC_ADDR_SIZE);
492 	mac_addr = &local_mac_addr_aligned;
493 
494 	index = dp_peer_mec_hash_index(soc, mac_addr);
495 	TAILQ_FOREACH(mecentry, &soc->mec_hash.bins[index], hash_list_elem) {
496 		if ((pdev_id == mecentry->pdev_id) &&
497 		    !dp_peer_find_mac_addr_cmp(mac_addr, &mecentry->mac_addr))
498 			return mecentry;
499 	}
500 
501 	return NULL;
502 }
503 
504 /**
505  * dp_peer_mec_hash_add() - Add MEC entry into hash table
506  * @soc: SoC handle
507  *
508  * This function adds the MEC entry into SoC MEC hash table
509  *
510  * Return: None
511  */
512 static inline void dp_peer_mec_hash_add(struct dp_soc *soc,
513 					struct dp_mec_entry *mecentry)
514 {
515 	uint32_t index;
516 
517 	index = dp_peer_mec_hash_index(soc, &mecentry->mac_addr);
518 	qdf_spin_lock_bh(&soc->mec_lock);
519 	TAILQ_INSERT_TAIL(&soc->mec_hash.bins[index], mecentry, hash_list_elem);
520 	qdf_spin_unlock_bh(&soc->mec_lock);
521 }
522 
523 QDF_STATUS dp_peer_mec_add_entry(struct dp_soc *soc,
524 				 struct dp_vdev *vdev,
525 				 uint8_t *mac_addr)
526 {
527 	struct dp_mec_entry *mecentry = NULL;
528 	struct dp_pdev *pdev = NULL;
529 
530 	if (!vdev) {
531 		dp_peer_err("%pK: Peers vdev is NULL", soc);
532 		return QDF_STATUS_E_INVAL;
533 	}
534 
535 	pdev = vdev->pdev;
536 
537 	if (qdf_unlikely(qdf_atomic_read(&soc->mec_cnt) >=
538 					 DP_PEER_MAX_MEC_ENTRY)) {
539 		dp_peer_warn("%pK: max MEC entry limit reached mac_addr: "
540 			     QDF_MAC_ADDR_FMT, soc, QDF_MAC_ADDR_REF(mac_addr));
541 		return QDF_STATUS_E_NOMEM;
542 	}
543 
544 	qdf_spin_lock_bh(&soc->mec_lock);
545 	mecentry = dp_peer_mec_hash_find_by_pdevid(soc, pdev->pdev_id,
546 						   mac_addr);
547 	if (qdf_likely(mecentry)) {
548 		mecentry->is_active = TRUE;
549 		qdf_spin_unlock_bh(&soc->mec_lock);
550 		return QDF_STATUS_E_ALREADY;
551 	}
552 
553 	qdf_spin_unlock_bh(&soc->mec_lock);
554 
555 	dp_peer_debug("%pK: pdevid: %u vdev: %u type: MEC mac_addr: "
556 		      QDF_MAC_ADDR_FMT,
557 		      soc, pdev->pdev_id, vdev->vdev_id,
558 		      QDF_MAC_ADDR_REF(mac_addr));
559 
560 	mecentry = (struct dp_mec_entry *)
561 			qdf_mem_malloc(sizeof(struct dp_mec_entry));
562 
563 	if (qdf_unlikely(!mecentry)) {
564 		dp_peer_err("%pK: fail to allocate mecentry", soc);
565 		return QDF_STATUS_E_NOMEM;
566 	}
567 
568 	qdf_copy_macaddr((struct qdf_mac_addr *)&mecentry->mac_addr.raw[0],
569 			 (struct qdf_mac_addr *)mac_addr);
570 	mecentry->pdev_id = pdev->pdev_id;
571 	mecentry->vdev_id = vdev->vdev_id;
572 	mecentry->is_active = TRUE;
573 	dp_peer_mec_hash_add(soc, mecentry);
574 
575 	qdf_atomic_inc(&soc->mec_cnt);
576 	DP_STATS_INC(soc, mec.added, 1);
577 
578 	return QDF_STATUS_SUCCESS;
579 }
580 
581 void dp_peer_mec_detach_entry(struct dp_soc *soc, struct dp_mec_entry *mecentry,
582 			      void *ptr)
583 {
584 	uint32_t index = dp_peer_mec_hash_index(soc, &mecentry->mac_addr);
585 
586 	TAILQ_HEAD(, dp_mec_entry) * free_list = ptr;
587 
588 	TAILQ_REMOVE(&soc->mec_hash.bins[index], mecentry,
589 		     hash_list_elem);
590 	TAILQ_INSERT_TAIL(free_list, mecentry, hash_list_elem);
591 }
592 
593 void dp_peer_mec_free_list(struct dp_soc *soc, void *ptr)
594 {
595 	struct dp_mec_entry *mecentry, *mecentry_next;
596 
597 	TAILQ_HEAD(, dp_mec_entry) * free_list = ptr;
598 
599 	TAILQ_FOREACH_SAFE(mecentry, free_list, hash_list_elem,
600 			   mecentry_next) {
601 		dp_peer_debug("%pK: MEC delete for mac_addr " QDF_MAC_ADDR_FMT,
602 			      soc, QDF_MAC_ADDR_REF(&mecentry->mac_addr));
603 		qdf_mem_free(mecentry);
604 		qdf_atomic_dec(&soc->mec_cnt);
605 		DP_STATS_INC(soc, mec.deleted, 1);
606 	}
607 }
608 
609 /**
610  * dp_peer_mec_hash_detach() - Free MEC Hash table
611  * @soc: SoC handle
612  *
613  * Return: None
614  */
615 static void dp_peer_mec_hash_detach(struct dp_soc *soc)
616 {
617 	dp_peer_mec_flush_entries(soc);
618 	qdf_mem_free(soc->mec_hash.bins);
619 	soc->mec_hash.bins = NULL;
620 }
621 
622 void dp_peer_mec_spinlock_destroy(struct dp_soc *soc)
623 {
624 	qdf_spinlock_destroy(&soc->mec_lock);
625 }
626 
627 void dp_peer_mec_spinlock_create(struct dp_soc *soc)
628 {
629 	qdf_spinlock_create(&soc->mec_lock);
630 }
631 #else
632 static QDF_STATUS dp_peer_mec_hash_attach(struct dp_soc *soc)
633 {
634 	return QDF_STATUS_SUCCESS;
635 }
636 
637 static void dp_peer_mec_hash_detach(struct dp_soc *soc)
638 {
639 }
640 #endif
641 
642 #ifdef FEATURE_AST
643 /*
644  * dp_peer_ast_hash_attach() - Allocate and initialize AST Hash Table
645  * @soc: SoC handle
646  *
647  * Return: QDF_STATUS
648  */
649 static QDF_STATUS dp_peer_ast_hash_attach(struct dp_soc *soc)
650 {
651 	int i, hash_elems, log2;
652 	unsigned int max_ast_idx = wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx);
653 
654 	hash_elems = ((max_ast_idx * DP_AST_HASH_LOAD_MULT) >>
655 		DP_AST_HASH_LOAD_SHIFT);
656 
657 	log2 = dp_log2_ceil(hash_elems);
658 	hash_elems = 1 << log2;
659 
660 	soc->ast_hash.mask = hash_elems - 1;
661 	soc->ast_hash.idx_bits = log2;
662 
663 	dp_peer_info("%pK: ast hash_elems: %d, max_ast_idx: %d",
664 		     soc, hash_elems, max_ast_idx);
665 
666 	/* allocate an array of TAILQ peer object lists */
667 	soc->ast_hash.bins = qdf_mem_malloc(
668 		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q,
669 				dp_ast_entry)));
670 
671 	if (!soc->ast_hash.bins)
672 		return QDF_STATUS_E_NOMEM;
673 
674 	for (i = 0; i < hash_elems; i++)
675 		TAILQ_INIT(&soc->ast_hash.bins[i]);
676 
677 	return QDF_STATUS_SUCCESS;
678 }
679 
680 /*
681  * dp_peer_ast_cleanup() - cleanup the references
682  * @soc: SoC handle
683  * @ast: ast entry
684  *
685  * Return: None
686  */
687 static inline void dp_peer_ast_cleanup(struct dp_soc *soc,
688 				       struct dp_ast_entry *ast)
689 {
690 	txrx_ast_free_cb cb = ast->callback;
691 	void *cookie = ast->cookie;
692 
693 	/* Call the callbacks to free up the cookie */
694 	if (cb) {
695 		ast->callback = NULL;
696 		ast->cookie = NULL;
697 		cb(soc->ctrl_psoc,
698 		   dp_soc_to_cdp_soc(soc),
699 		   cookie,
700 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
701 	}
702 }
703 
704 /*
705  * dp_peer_ast_hash_detach() - Free AST Hash table
706  * @soc: SoC handle
707  *
708  * Return: None
709  */
710 static void dp_peer_ast_hash_detach(struct dp_soc *soc)
711 {
712 	unsigned int index;
713 	struct dp_ast_entry *ast, *ast_next;
714 
715 	if (!soc->ast_hash.mask)
716 		return;
717 
718 	if (!soc->ast_hash.bins)
719 		return;
720 
721 	qdf_spin_lock_bh(&soc->ast_lock);
722 	for (index = 0; index <= soc->ast_hash.mask; index++) {
723 		if (!TAILQ_EMPTY(&soc->ast_hash.bins[index])) {
724 			TAILQ_FOREACH_SAFE(ast, &soc->ast_hash.bins[index],
725 					   hash_list_elem, ast_next) {
726 				TAILQ_REMOVE(&soc->ast_hash.bins[index], ast,
727 					     hash_list_elem);
728 				dp_peer_ast_cleanup(soc, ast);
729 				soc->num_ast_entries--;
730 				qdf_mem_free(ast);
731 			}
732 		}
733 	}
734 	qdf_spin_unlock_bh(&soc->ast_lock);
735 
736 	qdf_mem_free(soc->ast_hash.bins);
737 	soc->ast_hash.bins = NULL;
738 }
739 
740 /*
741  * dp_peer_ast_hash_index() - Compute the AST hash from MAC address
742  * @soc: SoC handle
743  *
744  * Return: AST hash
745  */
746 static inline uint32_t dp_peer_ast_hash_index(struct dp_soc *soc,
747 	union dp_align_mac_addr *mac_addr)
748 {
749 	uint32_t index;
750 
751 	index =
752 		mac_addr->align2.bytes_ab ^
753 		mac_addr->align2.bytes_cd ^
754 		mac_addr->align2.bytes_ef;
755 	index ^= index >> soc->ast_hash.idx_bits;
756 	index &= soc->ast_hash.mask;
757 	return index;
758 }
759 
760 /*
761  * dp_peer_ast_hash_add() - Add AST entry into hash table
762  * @soc: SoC handle
763  *
764  * This function adds the AST entry into SoC AST hash table
765  * It assumes caller has taken the ast lock to protect the access to this table
766  *
767  * Return: None
768  */
769 static inline void dp_peer_ast_hash_add(struct dp_soc *soc,
770 		struct dp_ast_entry *ase)
771 {
772 	uint32_t index;
773 
774 	index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
775 	TAILQ_INSERT_TAIL(&soc->ast_hash.bins[index], ase, hash_list_elem);
776 }
777 
778 /*
779  * dp_peer_ast_hash_remove() - Look up and remove AST entry from hash table
780  * @soc: SoC handle
781  *
782  * This function removes the AST entry from soc AST hash table
783  * It assumes caller has taken the ast lock to protect the access to this table
784  *
785  * Return: None
786  */
787 void dp_peer_ast_hash_remove(struct dp_soc *soc,
788 			     struct dp_ast_entry *ase)
789 {
790 	unsigned index;
791 	struct dp_ast_entry *tmpase;
792 	int found = 0;
793 
794 	index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
795 	/* Check if tail is not empty before delete*/
796 	QDF_ASSERT(!TAILQ_EMPTY(&soc->ast_hash.bins[index]));
797 
798 	TAILQ_FOREACH(tmpase, &soc->ast_hash.bins[index], hash_list_elem) {
799 		if (tmpase == ase) {
800 			found = 1;
801 			break;
802 		}
803 	}
804 
805 	QDF_ASSERT(found);
806 	TAILQ_REMOVE(&soc->ast_hash.bins[index], ase, hash_list_elem);
807 }
808 
809 /*
810  * dp_peer_ast_hash_find_by_vdevid() - Find AST entry by MAC address
811  * @soc: SoC handle
812  *
813  * It assumes caller has taken the ast lock to protect the access to
814  * AST hash table
815  *
816  * Return: AST entry
817  */
818 struct dp_ast_entry *dp_peer_ast_hash_find_by_vdevid(struct dp_soc *soc,
819 						     uint8_t *ast_mac_addr,
820 						     uint8_t vdev_id)
821 {
822 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
823 	uint32_t index;
824 	struct dp_ast_entry *ase;
825 
826 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
827 		     ast_mac_addr, QDF_MAC_ADDR_SIZE);
828 	mac_addr = &local_mac_addr_aligned;
829 
830 	index = dp_peer_ast_hash_index(soc, mac_addr);
831 	TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
832 		if ((vdev_id == ase->vdev_id) &&
833 		    !dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr)) {
834 			return ase;
835 		}
836 	}
837 
838 	return NULL;
839 }
840 
841 /*
842  * dp_peer_ast_hash_find_by_pdevid() - Find AST entry by MAC address
843  * @soc: SoC handle
844  *
845  * It assumes caller has taken the ast lock to protect the access to
846  * AST hash table
847  *
848  * Return: AST entry
849  */
850 struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
851 						     uint8_t *ast_mac_addr,
852 						     uint8_t pdev_id)
853 {
854 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
855 	uint32_t index;
856 	struct dp_ast_entry *ase;
857 
858 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
859 		     ast_mac_addr, QDF_MAC_ADDR_SIZE);
860 	mac_addr = &local_mac_addr_aligned;
861 
862 	index = dp_peer_ast_hash_index(soc, mac_addr);
863 	TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
864 		if ((pdev_id == ase->pdev_id) &&
865 		    !dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr)) {
866 			return ase;
867 		}
868 	}
869 
870 	return NULL;
871 }
872 
873 /*
874  * dp_peer_ast_hash_find_soc() - Find AST entry by MAC address
875  * @soc: SoC handle
876  *
877  * It assumes caller has taken the ast lock to protect the access to
878  * AST hash table
879  *
880  * Return: AST entry
881  */
882 struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
883 					       uint8_t *ast_mac_addr)
884 {
885 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
886 	unsigned index;
887 	struct dp_ast_entry *ase;
888 
889 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
890 			ast_mac_addr, QDF_MAC_ADDR_SIZE);
891 	mac_addr = &local_mac_addr_aligned;
892 
893 	index = dp_peer_ast_hash_index(soc, mac_addr);
894 	TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
895 		if (dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr) == 0) {
896 			return ase;
897 		}
898 	}
899 
900 	return NULL;
901 }
902 
903 /*
904  * dp_peer_map_ast() - Map the ast entry with HW AST Index
905  * @soc: SoC handle
906  * @peer: peer to which ast node belongs
907  * @mac_addr: MAC address of ast node
908  * @hw_peer_id: HW AST Index returned by target in peer map event
909  * @vdev_id: vdev id for VAP to which the peer belongs to
910  * @ast_hash: ast hash value in HW
911  * @is_wds: flag to indicate peer map event for WDS ast entry
912  *
913  * Return: QDF_STATUS code
914  */
915 static inline QDF_STATUS dp_peer_map_ast(struct dp_soc *soc,
916 					 struct dp_peer *peer,
917 					 uint8_t *mac_addr,
918 					 uint16_t hw_peer_id,
919 					 uint8_t vdev_id,
920 					 uint16_t ast_hash,
921 					 uint8_t is_wds)
922 {
923 	struct dp_ast_entry *ast_entry = NULL;
924 	enum cdp_txrx_ast_entry_type peer_type = CDP_TXRX_AST_TYPE_STATIC;
925 	void *cookie = NULL;
926 	txrx_ast_free_cb cb = NULL;
927 	QDF_STATUS err = QDF_STATUS_SUCCESS;
928 
929 	if (!peer) {
930 		return QDF_STATUS_E_INVAL;
931 	}
932 
933 	dp_peer_err("%pK: peer %pK ID %d vid %d mac " QDF_MAC_ADDR_FMT,
934 		    soc, peer, hw_peer_id, vdev_id,
935 		    QDF_MAC_ADDR_REF(mac_addr));
936 
937 	qdf_spin_lock_bh(&soc->ast_lock);
938 
939 	ast_entry = dp_peer_ast_hash_find_by_vdevid(soc, mac_addr, vdev_id);
940 
941 	if (is_wds) {
942 		/*
943 		 * In certain cases like Auth attack on a repeater
944 		 * can result in the number of ast_entries falling
945 		 * in the same hash bucket to exceed the max_skid
946 		 * length supported by HW in root AP. In these cases
947 		 * the FW will return the hw_peer_id (ast_index) as
948 		 * 0xffff indicating HW could not add the entry in
949 		 * its table. Host has to delete the entry from its
950 		 * table in these cases.
951 		 */
952 		if (hw_peer_id == HTT_INVALID_PEER) {
953 			DP_STATS_INC(soc, ast.map_err, 1);
954 			if (ast_entry) {
955 				if (ast_entry->is_mapped) {
956 					soc->ast_table[ast_entry->ast_idx] =
957 						NULL;
958 				}
959 
960 				cb = ast_entry->callback;
961 				cookie = ast_entry->cookie;
962 				peer_type = ast_entry->type;
963 
964 				dp_peer_unlink_ast_entry(soc, ast_entry, peer);
965 				dp_peer_free_ast_entry(soc, ast_entry);
966 
967 				qdf_spin_unlock_bh(&soc->ast_lock);
968 
969 				if (cb) {
970 					cb(soc->ctrl_psoc,
971 					   dp_soc_to_cdp_soc(soc),
972 					   cookie,
973 					   CDP_TXRX_AST_DELETED);
974 				}
975 			} else {
976 				qdf_spin_unlock_bh(&soc->ast_lock);
977 				dp_peer_alert("AST entry not found with peer %pK peer_id %u peer_mac " QDF_MAC_ADDR_FMT " mac_addr " QDF_MAC_ADDR_FMT " vdev_id %u next_hop %u",
978 					      peer, peer->peer_id,
979 					      QDF_MAC_ADDR_REF(peer->mac_addr.raw),
980 					      QDF_MAC_ADDR_REF(mac_addr),
981 					      vdev_id, is_wds);
982 			}
983 			err = QDF_STATUS_E_INVAL;
984 
985 			dp_hmwds_ast_add_notify(peer, mac_addr,
986 						peer_type, err, true);
987 
988 			return err;
989 		}
990 	}
991 
992 	if (ast_entry) {
993 		ast_entry->ast_idx = hw_peer_id;
994 		soc->ast_table[hw_peer_id] = ast_entry;
995 		ast_entry->is_active = TRUE;
996 		peer_type = ast_entry->type;
997 		ast_entry->ast_hash_value = ast_hash;
998 		ast_entry->is_mapped = TRUE;
999 		qdf_assert_always(ast_entry->peer_id == HTT_INVALID_PEER);
1000 
1001 		ast_entry->peer_id = peer->peer_id;
1002 		TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry,
1003 				  ase_list_elem);
1004 	}
1005 
1006 	if (ast_entry || (peer->vdev && peer->vdev->proxysta_vdev)) {
1007 		if (soc->cdp_soc.ol_ops->peer_map_event) {
1008 			soc->cdp_soc.ol_ops->peer_map_event(
1009 			soc->ctrl_psoc, peer->peer_id,
1010 			hw_peer_id, vdev_id,
1011 			mac_addr, peer_type, ast_hash);
1012 		}
1013 	} else {
1014 		dp_peer_err("%pK: AST entry not found", soc);
1015 		err = QDF_STATUS_E_NOENT;
1016 	}
1017 
1018 	qdf_spin_unlock_bh(&soc->ast_lock);
1019 
1020 	dp_hmwds_ast_add_notify(peer, mac_addr,
1021 				peer_type, err, true);
1022 
1023 	return err;
1024 }
1025 
1026 void dp_peer_free_hmwds_cb(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
1027 			   struct cdp_soc *dp_soc,
1028 			   void *cookie,
1029 			   enum cdp_ast_free_status status)
1030 {
1031 	struct dp_ast_free_cb_params *param =
1032 		(struct dp_ast_free_cb_params *)cookie;
1033 	struct dp_soc *soc = (struct dp_soc *)dp_soc;
1034 	struct dp_peer *peer = NULL;
1035 	QDF_STATUS err = QDF_STATUS_SUCCESS;
1036 
1037 	if (status != CDP_TXRX_AST_DELETED) {
1038 		qdf_mem_free(cookie);
1039 		return;
1040 	}
1041 
1042 	peer = dp_peer_find_hash_find(soc, &param->peer_mac_addr.raw[0],
1043 				      0, param->vdev_id, DP_MOD_ID_AST);
1044 	if (peer) {
1045 		err = dp_peer_add_ast(soc, peer,
1046 				      &param->mac_addr.raw[0],
1047 				      param->type,
1048 				      param->flags);
1049 
1050 		dp_hmwds_ast_add_notify(peer, &param->mac_addr.raw[0],
1051 					param->type, err, false);
1052 
1053 		dp_peer_unref_delete(peer, DP_MOD_ID_AST);
1054 	}
1055 	qdf_mem_free(cookie);
1056 }
1057 
1058 /*
1059  * dp_peer_add_ast() - Allocate and add AST entry into peer list
1060  * @soc: SoC handle
1061  * @peer: peer to which ast node belongs
1062  * @mac_addr: MAC address of ast node
1063  * @is_self: Is this base AST entry with peer mac address
1064  *
1065  * This API is used by WDS source port learning function to
1066  * add a new AST entry into peer AST list
1067  *
1068  * Return: QDF_STATUS code
1069  */
1070 QDF_STATUS dp_peer_add_ast(struct dp_soc *soc,
1071 			   struct dp_peer *peer,
1072 			   uint8_t *mac_addr,
1073 			   enum cdp_txrx_ast_entry_type type,
1074 			   uint32_t flags)
1075 {
1076 	struct dp_ast_entry *ast_entry = NULL;
1077 	struct dp_vdev *vdev = NULL;
1078 	struct dp_pdev *pdev = NULL;
1079 	uint8_t next_node_mac[6];
1080 	txrx_ast_free_cb cb = NULL;
1081 	void *cookie = NULL;
1082 	struct dp_peer *vap_bss_peer = NULL;
1083 	bool is_peer_found = false;
1084 
1085 	vdev = peer->vdev;
1086 	if (!vdev) {
1087 		dp_peer_err("%pK: Peers vdev is NULL", soc);
1088 		QDF_ASSERT(0);
1089 		return QDF_STATUS_E_INVAL;
1090 	}
1091 
1092 	pdev = vdev->pdev;
1093 
1094 	is_peer_found = dp_peer_exist_on_pdev(soc, mac_addr, 0, pdev);
1095 
1096 	qdf_spin_lock_bh(&soc->ast_lock);
1097 
1098 	if (!dp_peer_state_cmp(peer, DP_PEER_STATE_ACTIVE)) {
1099 		if ((type != CDP_TXRX_AST_TYPE_STATIC) &&
1100 		    (type != CDP_TXRX_AST_TYPE_SELF)) {
1101 			qdf_spin_unlock_bh(&soc->ast_lock);
1102 			return QDF_STATUS_E_BUSY;
1103 		}
1104 	}
1105 
1106 	dp_peer_debug("%pK: pdevid: %u vdev: %u  ast_entry->type: %d flags: 0x%x peer_mac: " QDF_MAC_ADDR_FMT " peer: %pK mac " QDF_MAC_ADDR_FMT,
1107 		      soc, pdev->pdev_id, vdev->vdev_id, type, flags,
1108 		      QDF_MAC_ADDR_REF(peer->mac_addr.raw), peer,
1109 		      QDF_MAC_ADDR_REF(mac_addr));
1110 
1111 	/* fw supports only 2 times the max_peers ast entries */
1112 	if (soc->num_ast_entries >=
1113 	    wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx)) {
1114 		qdf_spin_unlock_bh(&soc->ast_lock);
1115 		dp_peer_err("%pK: Max ast entries reached", soc);
1116 		return QDF_STATUS_E_RESOURCES;
1117 	}
1118 
1119 	/* If AST entry already exists , just return from here
1120 	 * ast entry with same mac address can exist on different radios
1121 	 * if ast_override support is enabled use search by pdev in this
1122 	 * case
1123 	 */
1124 	if (soc->ast_override_support) {
1125 		ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, mac_addr,
1126 							    pdev->pdev_id);
1127 		if (ast_entry) {
1128 			qdf_spin_unlock_bh(&soc->ast_lock);
1129 			return QDF_STATUS_E_ALREADY;
1130 		}
1131 
1132 		if (is_peer_found) {
1133 			/* During WDS to static roaming, peer is added
1134 			 * to the list before static AST entry create.
1135 			 * So, allow AST entry for STATIC type
1136 			 * even if peer is present
1137 			 */
1138 			if (type != CDP_TXRX_AST_TYPE_STATIC) {
1139 				qdf_spin_unlock_bh(&soc->ast_lock);
1140 				return QDF_STATUS_E_ALREADY;
1141 			}
1142 		}
1143 	} else {
1144 		/* For HWMWDS_SEC entries can be added for same mac address
1145 		 * do not check for existing entry
1146 		 */
1147 		if (type == CDP_TXRX_AST_TYPE_WDS_HM_SEC)
1148 			goto add_ast_entry;
1149 
1150 		ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
1151 
1152 		if (ast_entry) {
1153 			if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) &&
1154 			    !ast_entry->delete_in_progress) {
1155 				qdf_spin_unlock_bh(&soc->ast_lock);
1156 				return QDF_STATUS_E_ALREADY;
1157 			}
1158 
1159 			/* Add for HMWDS entry we cannot be ignored if there
1160 			 * is AST entry with same mac address
1161 			 *
1162 			 * if ast entry exists with the requested mac address
1163 			 * send a delete command and register callback which
1164 			 * can take care of adding HMWDS ast enty on delete
1165 			 * confirmation from target
1166 			 */
1167 			if (type == CDP_TXRX_AST_TYPE_WDS_HM) {
1168 				struct dp_ast_free_cb_params *param = NULL;
1169 
1170 				if (ast_entry->type ==
1171 					CDP_TXRX_AST_TYPE_WDS_HM_SEC)
1172 					goto add_ast_entry;
1173 
1174 				/* save existing callback */
1175 				if (ast_entry->callback) {
1176 					cb = ast_entry->callback;
1177 					cookie = ast_entry->cookie;
1178 				}
1179 
1180 				param = qdf_mem_malloc(sizeof(*param));
1181 				if (!param) {
1182 					QDF_TRACE(QDF_MODULE_ID_TXRX,
1183 						  QDF_TRACE_LEVEL_ERROR,
1184 						  "Allocation failed");
1185 					qdf_spin_unlock_bh(&soc->ast_lock);
1186 					return QDF_STATUS_E_NOMEM;
1187 				}
1188 
1189 				qdf_mem_copy(&param->mac_addr.raw[0], mac_addr,
1190 					     QDF_MAC_ADDR_SIZE);
1191 				qdf_mem_copy(&param->peer_mac_addr.raw[0],
1192 					     &peer->mac_addr.raw[0],
1193 					     QDF_MAC_ADDR_SIZE);
1194 				param->type = type;
1195 				param->flags = flags;
1196 				param->vdev_id = vdev->vdev_id;
1197 				ast_entry->callback = dp_peer_free_hmwds_cb;
1198 				ast_entry->pdev_id = vdev->pdev->pdev_id;
1199 				ast_entry->type = type;
1200 				ast_entry->cookie = (void *)param;
1201 				if (!ast_entry->delete_in_progress)
1202 					dp_peer_del_ast(soc, ast_entry);
1203 
1204 				qdf_spin_unlock_bh(&soc->ast_lock);
1205 
1206 				/* Call the saved callback*/
1207 				if (cb) {
1208 					cb(soc->ctrl_psoc,
1209 					   dp_soc_to_cdp_soc(soc),
1210 					   cookie,
1211 					   CDP_TXRX_AST_DELETE_IN_PROGRESS);
1212 				}
1213 				return QDF_STATUS_E_AGAIN;
1214 			}
1215 
1216 			qdf_spin_unlock_bh(&soc->ast_lock);
1217 			return QDF_STATUS_E_ALREADY;
1218 		}
1219 	}
1220 
1221 add_ast_entry:
1222 	ast_entry = (struct dp_ast_entry *)
1223 			qdf_mem_malloc(sizeof(struct dp_ast_entry));
1224 
1225 	if (!ast_entry) {
1226 		qdf_spin_unlock_bh(&soc->ast_lock);
1227 		dp_peer_err("%pK: fail to allocate ast_entry", soc);
1228 		QDF_ASSERT(0);
1229 		return QDF_STATUS_E_NOMEM;
1230 	}
1231 
1232 	qdf_mem_copy(&ast_entry->mac_addr.raw[0], mac_addr, QDF_MAC_ADDR_SIZE);
1233 	ast_entry->pdev_id = vdev->pdev->pdev_id;
1234 	ast_entry->is_mapped = false;
1235 	ast_entry->delete_in_progress = false;
1236 	ast_entry->peer_id = HTT_INVALID_PEER;
1237 	ast_entry->next_hop = 0;
1238 	ast_entry->vdev_id = vdev->vdev_id;
1239 
1240 	switch (type) {
1241 	case CDP_TXRX_AST_TYPE_STATIC:
1242 		peer->self_ast_entry = ast_entry;
1243 		ast_entry->type = CDP_TXRX_AST_TYPE_STATIC;
1244 		if (peer->vdev->opmode == wlan_op_mode_sta)
1245 			ast_entry->type = CDP_TXRX_AST_TYPE_STA_BSS;
1246 		break;
1247 	case CDP_TXRX_AST_TYPE_SELF:
1248 		peer->self_ast_entry = ast_entry;
1249 		ast_entry->type = CDP_TXRX_AST_TYPE_SELF;
1250 		break;
1251 	case CDP_TXRX_AST_TYPE_WDS:
1252 		ast_entry->next_hop = 1;
1253 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
1254 		break;
1255 	case CDP_TXRX_AST_TYPE_WDS_HM:
1256 		ast_entry->next_hop = 1;
1257 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM;
1258 		break;
1259 	case CDP_TXRX_AST_TYPE_WDS_HM_SEC:
1260 		ast_entry->next_hop = 1;
1261 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM_SEC;
1262 		ast_entry->peer_id = peer->peer_id;
1263 		TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry,
1264 				  ase_list_elem);
1265 		break;
1266 	case CDP_TXRX_AST_TYPE_DA:
1267 		vap_bss_peer = dp_vdev_bss_peer_ref_n_get(soc, vdev,
1268 							  DP_MOD_ID_AST);
1269 		if (!vap_bss_peer) {
1270 			qdf_spin_unlock_bh(&soc->ast_lock);
1271 			qdf_mem_free(ast_entry);
1272 			return QDF_STATUS_E_FAILURE;
1273 		}
1274 		peer = vap_bss_peer;
1275 		ast_entry->next_hop = 1;
1276 		ast_entry->type = CDP_TXRX_AST_TYPE_DA;
1277 		break;
1278 	default:
1279 		dp_peer_err("%pK: Incorrect AST entry type", soc);
1280 	}
1281 
1282 	ast_entry->is_active = TRUE;
1283 	DP_STATS_INC(soc, ast.added, 1);
1284 	soc->num_ast_entries++;
1285 	dp_peer_ast_hash_add(soc, ast_entry);
1286 
1287 	qdf_copy_macaddr((struct qdf_mac_addr *)next_node_mac,
1288 			 (struct qdf_mac_addr *)peer->mac_addr.raw);
1289 
1290 	if ((ast_entry->type != CDP_TXRX_AST_TYPE_STATIC) &&
1291 	    (ast_entry->type != CDP_TXRX_AST_TYPE_SELF) &&
1292 	    (ast_entry->type != CDP_TXRX_AST_TYPE_STA_BSS) &&
1293 	    (ast_entry->type != CDP_TXRX_AST_TYPE_WDS_HM_SEC)) {
1294 		if (QDF_STATUS_SUCCESS ==
1295 				soc->cdp_soc.ol_ops->peer_add_wds_entry(
1296 				soc->ctrl_psoc,
1297 				peer->vdev->vdev_id,
1298 				peer->mac_addr.raw,
1299 				peer->peer_id,
1300 				mac_addr,
1301 				next_node_mac,
1302 				flags,
1303 				ast_entry->type)) {
1304 			if (vap_bss_peer)
1305 				dp_peer_unref_delete(vap_bss_peer,
1306 						     DP_MOD_ID_AST);
1307 			qdf_spin_unlock_bh(&soc->ast_lock);
1308 			return QDF_STATUS_SUCCESS;
1309 		}
1310 	}
1311 
1312 	if (vap_bss_peer)
1313 		dp_peer_unref_delete(vap_bss_peer, DP_MOD_ID_AST);
1314 
1315 	qdf_spin_unlock_bh(&soc->ast_lock);
1316 	return QDF_STATUS_E_FAILURE;
1317 }
1318 
1319 /*
1320  * dp_peer_free_ast_entry() - Free up the ast entry memory
1321  * @soc: SoC handle
1322  * @ast_entry: Address search entry
1323  *
1324  * This API is used to free up the memory associated with
1325  * AST entry.
1326  *
1327  * Return: None
1328  */
1329 void dp_peer_free_ast_entry(struct dp_soc *soc,
1330 			    struct dp_ast_entry *ast_entry)
1331 {
1332 	/*
1333 	 * NOTE: Ensure that call to this API is done
1334 	 * after soc->ast_lock is taken
1335 	 */
1336 	ast_entry->callback = NULL;
1337 	ast_entry->cookie = NULL;
1338 
1339 	DP_STATS_INC(soc, ast.deleted, 1);
1340 	dp_peer_ast_hash_remove(soc, ast_entry);
1341 	dp_peer_ast_cleanup(soc, ast_entry);
1342 	qdf_mem_free(ast_entry);
1343 	soc->num_ast_entries--;
1344 }
1345 
1346 /*
1347  * dp_peer_unlink_ast_entry() - Free up the ast entry memory
1348  * @soc: SoC handle
1349  * @ast_entry: Address search entry
1350  * @peer: peer
1351  *
1352  * This API is used to remove/unlink AST entry from the peer list
1353  * and hash list.
1354  *
1355  * Return: None
1356  */
1357 void dp_peer_unlink_ast_entry(struct dp_soc *soc,
1358 			      struct dp_ast_entry *ast_entry,
1359 			      struct dp_peer *peer)
1360 {
1361 	if (!peer) {
1362 		dp_err_rl("NULL peer");
1363 		return;
1364 	}
1365 
1366 	if (ast_entry->peer_id == HTT_INVALID_PEER) {
1367 		dp_err_rl("Invalid peer id in AST entry mac addr:"QDF_MAC_ADDR_FMT" type:%d",
1368 			  QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw),
1369 			  ast_entry->type);
1370 		return;
1371 	}
1372 	/*
1373 	 * NOTE: Ensure that call to this API is done
1374 	 * after soc->ast_lock is taken
1375 	 */
1376 
1377 	qdf_assert_always(ast_entry->peer_id == peer->peer_id);
1378 	TAILQ_REMOVE(&peer->ast_entry_list, ast_entry, ase_list_elem);
1379 
1380 	if (ast_entry == peer->self_ast_entry)
1381 		peer->self_ast_entry = NULL;
1382 
1383 	/*
1384 	 * release the reference only if it is mapped
1385 	 * to ast_table
1386 	 */
1387 	if (ast_entry->is_mapped)
1388 		soc->ast_table[ast_entry->ast_idx] = NULL;
1389 
1390 	ast_entry->peer_id = HTT_INVALID_PEER;
1391 }
1392 
1393 /*
1394  * dp_peer_del_ast() - Delete and free AST entry
1395  * @soc: SoC handle
1396  * @ast_entry: AST entry of the node
1397  *
1398  * This function removes the AST entry from peer and soc tables
1399  * It assumes caller has taken the ast lock to protect the access to these
1400  * tables
1401  *
1402  * Return: None
1403  */
1404 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
1405 {
1406 	struct dp_peer *peer = NULL;
1407 
1408 	if (!ast_entry) {
1409 		dp_err_rl("NULL AST entry");
1410 		return;
1411 	}
1412 
1413 	if (ast_entry->delete_in_progress) {
1414 		dp_err_rl("AST entry deletion in progress mac addr:"QDF_MAC_ADDR_FMT" type:%d",
1415 			  QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw),
1416 			  ast_entry->type);
1417 		return;
1418 	}
1419 
1420 	ast_entry->delete_in_progress = true;
1421 
1422 	/* In teardown del ast is called after setting logical delete state
1423 	 * use __dp_peer_get_ref_by_id to get the reference irrespective of
1424 	 * state
1425 	 */
1426 	peer = __dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
1427 				       DP_MOD_ID_AST);
1428 
1429 	dp_peer_ast_send_wds_del(soc, ast_entry, peer);
1430 
1431 	/* Remove SELF and STATIC entries in teardown itself */
1432 	if (!ast_entry->next_hop)
1433 		dp_peer_unlink_ast_entry(soc, ast_entry, peer);
1434 
1435 	if (ast_entry->is_mapped)
1436 		soc->ast_table[ast_entry->ast_idx] = NULL;
1437 
1438 	/* if peer map v2 is enabled we are not freeing ast entry
1439 	 * here and it is supposed to be freed in unmap event (after
1440 	 * we receive delete confirmation from target)
1441 	 *
1442 	 * if peer_id is invalid we did not get the peer map event
1443 	 * for the peer free ast entry from here only in this case
1444 	 */
1445 	if (dp_peer_ast_free_in_unmap_supported(soc, ast_entry))
1446 		goto end;
1447 
1448 	/* for WDS secondary entry ast_entry->next_hop would be set so
1449 	 * unlinking has to be done explicitly here.
1450 	 * As this entry is not a mapped entry unmap notification from
1451 	 * FW wil not come. Hence unlinkling is done right here.
1452 	 */
1453 
1454 	if (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC)
1455 		dp_peer_unlink_ast_entry(soc, ast_entry, peer);
1456 
1457 	dp_peer_free_ast_entry(soc, ast_entry);
1458 
1459 end:
1460 	if (peer)
1461 		dp_peer_unref_delete(peer, DP_MOD_ID_AST);
1462 }
1463 
1464 /*
1465  * dp_peer_update_ast() - Delete and free AST entry
1466  * @soc: SoC handle
1467  * @peer: peer to which ast node belongs
1468  * @ast_entry: AST entry of the node
1469  * @flags: wds or hmwds
1470  *
1471  * This function update the AST entry to the roamed peer and soc tables
1472  * It assumes caller has taken the ast lock to protect the access to these
1473  * tables
1474  *
1475  * Return: 0 if ast entry is updated successfully
1476  *         -1 failure
1477  */
1478 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
1479 		       struct dp_ast_entry *ast_entry, uint32_t flags)
1480 {
1481 	int ret = -1;
1482 	struct dp_peer *old_peer;
1483 
1484 	dp_peer_debug("%pK: ast_entry->type: %d pdevid: %u vdevid: %u flags: 0x%x mac_addr: " QDF_MAC_ADDR_FMT " peer_mac: " QDF_MAC_ADDR_FMT "\n",
1485 		      soc, ast_entry->type, peer->vdev->pdev->pdev_id,
1486 		      peer->vdev->vdev_id, flags,
1487 		      QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw),
1488 		      QDF_MAC_ADDR_REF(peer->mac_addr.raw));
1489 
1490 	/* Do not send AST update in below cases
1491 	 *  1) Ast entry delete has already triggered
1492 	 *  2) Peer delete is already triggered
1493 	 *  3) We did not get the HTT map for create event
1494 	 */
1495 	if (ast_entry->delete_in_progress ||
1496 	    !dp_peer_state_cmp(peer, DP_PEER_STATE_ACTIVE) ||
1497 	    !ast_entry->is_mapped)
1498 		return ret;
1499 
1500 	if ((ast_entry->type == CDP_TXRX_AST_TYPE_STATIC) ||
1501 	    (ast_entry->type == CDP_TXRX_AST_TYPE_SELF) ||
1502 	    (ast_entry->type == CDP_TXRX_AST_TYPE_STA_BSS) ||
1503 	    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
1504 		return 0;
1505 
1506 	/*
1507 	 * Avoids flood of WMI update messages sent to FW for same peer.
1508 	 */
1509 	if (qdf_unlikely(ast_entry->peer_id == peer->peer_id) &&
1510 	    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS) &&
1511 	    (ast_entry->vdev_id == peer->vdev->vdev_id) &&
1512 	    (ast_entry->is_active))
1513 		return 0;
1514 
1515 	old_peer = dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
1516 					 DP_MOD_ID_AST);
1517 	if (!old_peer)
1518 		return 0;
1519 
1520 	TAILQ_REMOVE(&old_peer->ast_entry_list, ast_entry, ase_list_elem);
1521 
1522 	dp_peer_unref_delete(old_peer, DP_MOD_ID_AST);
1523 
1524 	ast_entry->peer_id = peer->peer_id;
1525 	ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
1526 	ast_entry->pdev_id = peer->vdev->pdev->pdev_id;
1527 	ast_entry->is_active = TRUE;
1528 	TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem);
1529 
1530 	ret = soc->cdp_soc.ol_ops->peer_update_wds_entry(
1531 				soc->ctrl_psoc,
1532 				peer->vdev->vdev_id,
1533 				ast_entry->mac_addr.raw,
1534 				peer->mac_addr.raw,
1535 				flags);
1536 
1537 	return ret;
1538 }
1539 
1540 /*
1541  * dp_peer_ast_get_pdev_id() - get pdev_id from the ast entry
1542  * @soc: SoC handle
1543  * @ast_entry: AST entry of the node
1544  *
1545  * This function gets the pdev_id from the ast entry.
1546  *
1547  * Return: (uint8_t) pdev_id
1548  */
1549 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
1550 				struct dp_ast_entry *ast_entry)
1551 {
1552 	return ast_entry->pdev_id;
1553 }
1554 
1555 /*
1556  * dp_peer_ast_get_next_hop() - get next_hop from the ast entry
1557  * @soc: SoC handle
1558  * @ast_entry: AST entry of the node
1559  *
1560  * This function gets the next hop from the ast entry.
1561  *
1562  * Return: (uint8_t) next_hop
1563  */
1564 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
1565 				struct dp_ast_entry *ast_entry)
1566 {
1567 	return ast_entry->next_hop;
1568 }
1569 
1570 /*
1571  * dp_peer_ast_set_type() - set type from the ast entry
1572  * @soc: SoC handle
1573  * @ast_entry: AST entry of the node
1574  *
1575  * This function sets the type in the ast entry.
1576  *
1577  * Return:
1578  */
1579 void dp_peer_ast_set_type(struct dp_soc *soc,
1580 				struct dp_ast_entry *ast_entry,
1581 				enum cdp_txrx_ast_entry_type type)
1582 {
1583 	ast_entry->type = type;
1584 }
1585 
1586 #else
1587 QDF_STATUS dp_peer_add_ast(struct dp_soc *soc,
1588 			   struct dp_peer *peer,
1589 			   uint8_t *mac_addr,
1590 			   enum cdp_txrx_ast_entry_type type,
1591 			   uint32_t flags)
1592 {
1593 	return QDF_STATUS_E_FAILURE;
1594 }
1595 
1596 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
1597 {
1598 }
1599 
1600 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
1601 			struct dp_ast_entry *ast_entry, uint32_t flags)
1602 {
1603 	return 1;
1604 }
1605 
1606 struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
1607 					       uint8_t *ast_mac_addr)
1608 {
1609 	return NULL;
1610 }
1611 
1612 struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
1613 						     uint8_t *ast_mac_addr,
1614 						     uint8_t pdev_id)
1615 {
1616 	return NULL;
1617 }
1618 
1619 static QDF_STATUS dp_peer_ast_hash_attach(struct dp_soc *soc)
1620 {
1621 	return QDF_STATUS_SUCCESS;
1622 }
1623 
1624 static inline QDF_STATUS dp_peer_map_ast(struct dp_soc *soc,
1625 					 struct dp_peer *peer,
1626 					 uint8_t *mac_addr,
1627 					 uint16_t hw_peer_id,
1628 					 uint8_t vdev_id,
1629 					 uint16_t ast_hash,
1630 					 uint8_t is_wds)
1631 {
1632 	return QDF_STATUS_SUCCESS;
1633 }
1634 
1635 static void dp_peer_ast_hash_detach(struct dp_soc *soc)
1636 {
1637 }
1638 
1639 void dp_peer_ast_set_type(struct dp_soc *soc,
1640 				struct dp_ast_entry *ast_entry,
1641 				enum cdp_txrx_ast_entry_type type)
1642 {
1643 }
1644 
1645 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
1646 				struct dp_ast_entry *ast_entry)
1647 {
1648 	return 0xff;
1649 }
1650 
1651 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
1652 				struct dp_ast_entry *ast_entry)
1653 {
1654 	return 0xff;
1655 }
1656 
1657 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
1658 		       struct dp_ast_entry *ast_entry, uint32_t flags)
1659 {
1660 	return 1;
1661 }
1662 
1663 #endif
1664 
1665 void dp_peer_ast_send_wds_del(struct dp_soc *soc,
1666 			      struct dp_ast_entry *ast_entry,
1667 			      struct dp_peer *peer)
1668 {
1669 	struct cdp_soc_t *cdp_soc = &soc->cdp_soc;
1670 	bool delete_in_fw = false;
1671 
1672 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_TRACE,
1673 		  "%s: ast_entry->type: %d pdevid: %u vdev: %u mac_addr: "QDF_MAC_ADDR_FMT" next_hop: %u peer_id: %uM\n",
1674 		  __func__, ast_entry->type, ast_entry->pdev_id,
1675 		  ast_entry->vdev_id,
1676 		  QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw),
1677 		  ast_entry->next_hop, ast_entry->peer_id);
1678 
1679 	/*
1680 	 * If peer state is logical delete, the peer is about to get
1681 	 * teared down with a peer delete command to firmware,
1682 	 * which will cleanup all the wds ast entries.
1683 	 * So, no need to send explicit wds ast delete to firmware.
1684 	 */
1685 	if (ast_entry->next_hop) {
1686 		if (peer && dp_peer_state_cmp(peer,
1687 					      DP_PEER_STATE_LOGICAL_DELETE))
1688 			delete_in_fw = false;
1689 		else
1690 			delete_in_fw = true;
1691 
1692 		cdp_soc->ol_ops->peer_del_wds_entry(soc->ctrl_psoc,
1693 						    ast_entry->vdev_id,
1694 						    ast_entry->mac_addr.raw,
1695 						    ast_entry->type,
1696 						    delete_in_fw);
1697 	}
1698 
1699 }
1700 
1701 #ifdef FEATURE_WDS
1702 /**
1703  * dp_peer_ast_free_wds_entries() - Free wds ast entries associated with peer
1704  * @soc: soc handle
1705  * @peer: peer handle
1706  *
1707  * Free all the wds ast entries associated with peer
1708  *
1709  * Return: Number of wds ast entries freed
1710  */
1711 static uint32_t dp_peer_ast_free_wds_entries(struct dp_soc *soc,
1712 					     struct dp_peer *peer)
1713 {
1714 	TAILQ_HEAD(, dp_ast_entry) ast_local_list = {0};
1715 	struct dp_ast_entry *ast_entry, *temp_ast_entry;
1716 	uint32_t num_ast = 0;
1717 
1718 	TAILQ_INIT(&ast_local_list);
1719 	qdf_spin_lock_bh(&soc->ast_lock);
1720 
1721 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry) {
1722 		if (ast_entry->next_hop)
1723 			num_ast++;
1724 
1725 		if (ast_entry->is_mapped)
1726 			soc->ast_table[ast_entry->ast_idx] = NULL;
1727 
1728 		dp_peer_unlink_ast_entry(soc, ast_entry, peer);
1729 		DP_STATS_INC(soc, ast.deleted, 1);
1730 		dp_peer_ast_hash_remove(soc, ast_entry);
1731 		TAILQ_INSERT_TAIL(&ast_local_list, ast_entry,
1732 				  ase_list_elem);
1733 		soc->num_ast_entries--;
1734 	}
1735 
1736 	qdf_spin_unlock_bh(&soc->ast_lock);
1737 
1738 	TAILQ_FOREACH_SAFE(ast_entry, &ast_local_list, ase_list_elem,
1739 			   temp_ast_entry) {
1740 		if (ast_entry->callback)
1741 			ast_entry->callback(soc->ctrl_psoc,
1742 					    dp_soc_to_cdp_soc(soc),
1743 					    ast_entry->cookie,
1744 					    CDP_TXRX_AST_DELETED);
1745 
1746 		qdf_mem_free(ast_entry);
1747 	}
1748 
1749 	return num_ast;
1750 }
1751 /**
1752  * dp_peer_clean_wds_entries() - Clean wds ast entries and compare
1753  * @soc: soc handle
1754  * @peer: peer handle
1755  * @free_wds_count - number of wds entries freed by FW with peer delete
1756  *
1757  * Free all the wds ast entries associated with peer and compare with
1758  * the value received from firmware
1759  *
1760  * Return: Number of wds ast entries freed
1761  */
1762 static void
1763 dp_peer_clean_wds_entries(struct dp_soc *soc, struct dp_peer *peer,
1764 			  uint32_t free_wds_count)
1765 {
1766 	uint32_t wds_deleted = 0;
1767 
1768 	wds_deleted = dp_peer_ast_free_wds_entries(soc, peer);
1769 	if ((DP_PEER_WDS_COUNT_INVALID != free_wds_count) &&
1770 	    (free_wds_count != wds_deleted)) {
1771 		DP_STATS_INC(soc, ast.ast_mismatch, 1);
1772 		dp_alert("For peer %pK (mac: "QDF_MAC_ADDR_FMT")number of wds entries deleted by fw = %d during peer delete is not same as the numbers deleted by host = %d",
1773 			 peer, peer->mac_addr.raw, free_wds_count,
1774 			 wds_deleted);
1775 	}
1776 }
1777 
1778 #else
1779 static void
1780 dp_peer_clean_wds_entries(struct dp_soc *soc, struct dp_peer *peer,
1781 			  uint32_t free_wds_count)
1782 {
1783 	struct dp_ast_entry *ast_entry, *temp_ast_entry;
1784 
1785 	qdf_spin_lock_bh(&soc->ast_lock);
1786 
1787 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry) {
1788 		dp_peer_unlink_ast_entry(soc, ast_entry, peer);
1789 
1790 		if (ast_entry->is_mapped)
1791 			soc->ast_table[ast_entry->ast_idx] = NULL;
1792 
1793 		dp_peer_free_ast_entry(soc, ast_entry);
1794 	}
1795 
1796 	peer->self_ast_entry = NULL;
1797 	qdf_spin_unlock_bh(&soc->ast_lock);
1798 }
1799 #endif
1800 
1801 /**
1802  * dp_peer_ast_free_entry_by_mac() - find ast entry by MAC address and delete
1803  * @soc: soc handle
1804  * @peer: peer handle
1805  * @vdev_id: vdev_id
1806  * @mac_addr: mac address of the AST entry to searc and delete
1807  *
1808  * find the ast entry from the peer list using the mac address and free
1809  * the entry.
1810  *
1811  * Return: SUCCESS or NOENT
1812  */
1813 static int dp_peer_ast_free_entry_by_mac(struct dp_soc *soc,
1814 					 struct dp_peer *peer,
1815 					 uint8_t vdev_id,
1816 					 uint8_t *mac_addr)
1817 {
1818 	struct dp_ast_entry *ast_entry;
1819 	void *cookie = NULL;
1820 	txrx_ast_free_cb cb = NULL;
1821 
1822 	/*
1823 	 * release the reference only if it is mapped
1824 	 * to ast_table
1825 	 */
1826 
1827 	qdf_spin_lock_bh(&soc->ast_lock);
1828 
1829 	ast_entry = dp_peer_ast_hash_find_by_vdevid(soc, mac_addr, vdev_id);
1830 	if (!ast_entry) {
1831 		qdf_spin_unlock_bh(&soc->ast_lock);
1832 		return QDF_STATUS_E_NOENT;
1833 	} else if (ast_entry->is_mapped) {
1834 		soc->ast_table[ast_entry->ast_idx] = NULL;
1835 	}
1836 
1837 	cb = ast_entry->callback;
1838 	cookie = ast_entry->cookie;
1839 
1840 
1841 	dp_peer_unlink_ast_entry(soc, ast_entry, peer);
1842 
1843 	dp_peer_free_ast_entry(soc, ast_entry);
1844 
1845 	qdf_spin_unlock_bh(&soc->ast_lock);
1846 
1847 	if (cb) {
1848 		cb(soc->ctrl_psoc,
1849 		   dp_soc_to_cdp_soc(soc),
1850 		   cookie,
1851 		   CDP_TXRX_AST_DELETED);
1852 	}
1853 
1854 	return QDF_STATUS_SUCCESS;
1855 }
1856 
1857 /*
1858  * dp_peer_find_hash_find() - returns peer from peer_hash_table matching
1859  *                            vdev_id and mac_address
1860  * @soc: soc handle
1861  * @peer_mac_addr: peer mac address
1862  * @mac_addr_is_aligned: is mac addr alligned
1863  * @vdev_id: vdev_id
1864  * @mod_id: id of module requesting reference
1865  *
1866  * return: peer in sucsess
1867  *         NULL in failure
1868  */
1869 struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc,
1870 	uint8_t *peer_mac_addr, int mac_addr_is_aligned, uint8_t vdev_id,
1871 	enum dp_mod_id mod_id)
1872 {
1873 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
1874 	unsigned index;
1875 	struct dp_peer *peer;
1876 
1877 	if (!soc->peer_hash.bins)
1878 		return NULL;
1879 
1880 	if (mac_addr_is_aligned) {
1881 		mac_addr = (union dp_align_mac_addr *) peer_mac_addr;
1882 	} else {
1883 		qdf_mem_copy(
1884 			&local_mac_addr_aligned.raw[0],
1885 			peer_mac_addr, QDF_MAC_ADDR_SIZE);
1886 		mac_addr = &local_mac_addr_aligned;
1887 	}
1888 	index = dp_peer_find_hash_index(soc, mac_addr);
1889 	qdf_spin_lock_bh(&soc->peer_hash_lock);
1890 	TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
1891 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
1892 			((peer->vdev->vdev_id == vdev_id) ||
1893 			 (vdev_id == DP_VDEV_ALL))) {
1894 			/* take peer reference before returning */
1895 			if (dp_peer_get_ref(soc, peer, mod_id) !=
1896 						QDF_STATUS_SUCCESS)
1897 				peer = NULL;
1898 
1899 			qdf_spin_unlock_bh(&soc->peer_hash_lock);
1900 			return peer;
1901 		}
1902 	}
1903 	qdf_spin_unlock_bh(&soc->peer_hash_lock);
1904 	return NULL; /* failure */
1905 }
1906 
1907 /*
1908  * dp_peer_find_hash_remove() - remove peer from peer_hash_table
1909  * @soc: soc handle
1910  * @peer: peer handle
1911  *
1912  * return: none
1913  */
1914 void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer)
1915 {
1916 	unsigned index;
1917 	struct dp_peer *tmppeer = NULL;
1918 	int found = 0;
1919 
1920 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
1921 	/* Check if tail is not empty before delete*/
1922 	QDF_ASSERT(!TAILQ_EMPTY(&soc->peer_hash.bins[index]));
1923 
1924 	qdf_spin_lock_bh(&soc->peer_hash_lock);
1925 	TAILQ_FOREACH(tmppeer, &soc->peer_hash.bins[index], hash_list_elem) {
1926 		if (tmppeer == peer) {
1927 			found = 1;
1928 			break;
1929 		}
1930 	}
1931 	QDF_ASSERT(found);
1932 	TAILQ_REMOVE(&soc->peer_hash.bins[index], peer, hash_list_elem);
1933 
1934 	dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
1935 	qdf_spin_unlock_bh(&soc->peer_hash_lock);
1936 }
1937 
1938 void dp_peer_find_hash_erase(struct dp_soc *soc)
1939 {
1940 	int i;
1941 
1942 	/*
1943 	 * Not really necessary to take peer_ref_mutex lock - by this point,
1944 	 * it's known that the soc is no longer in use.
1945 	 */
1946 	for (i = 0; i <= soc->peer_hash.mask; i++) {
1947 		if (!TAILQ_EMPTY(&soc->peer_hash.bins[i])) {
1948 			struct dp_peer *peer, *peer_next;
1949 
1950 			/*
1951 			 * TAILQ_FOREACH_SAFE must be used here to avoid any
1952 			 * memory access violation after peer is freed
1953 			 */
1954 			TAILQ_FOREACH_SAFE(peer, &soc->peer_hash.bins[i],
1955 				hash_list_elem, peer_next) {
1956 				/*
1957 				 * Don't remove the peer from the hash table -
1958 				 * that would modify the list we are currently
1959 				 * traversing, and it's not necessary anyway.
1960 				 */
1961 				/*
1962 				 * Artificially adjust the peer's ref count to
1963 				 * 1, so it will get deleted by
1964 				 * dp_peer_unref_delete.
1965 				 */
1966 				/* set to zero */
1967 				qdf_atomic_init(&peer->ref_cnt);
1968 				for (i = 0; i < DP_MOD_ID_MAX; i++)
1969 					qdf_atomic_init(&peer->mod_refs[i]);
1970 				/* incr to one */
1971 				qdf_atomic_inc(&peer->ref_cnt);
1972 				qdf_atomic_inc(&peer->mod_refs
1973 						[DP_MOD_ID_CONFIG]);
1974 				dp_peer_unref_delete(peer,
1975 						     DP_MOD_ID_CONFIG);
1976 			}
1977 		}
1978 	}
1979 }
1980 
1981 static void dp_peer_ast_table_detach(struct dp_soc *soc)
1982 {
1983 	if (soc->ast_table) {
1984 		qdf_mem_free(soc->ast_table);
1985 		soc->ast_table = NULL;
1986 	}
1987 }
1988 
1989 /*
1990  * dp_peer_find_map_detach() - cleanup memory for peer_id_to_obj_map
1991  * @soc: soc handle
1992  *
1993  * return: none
1994  */
1995 static void dp_peer_find_map_detach(struct dp_soc *soc)
1996 {
1997 	if (soc->peer_id_to_obj_map) {
1998 		qdf_mem_free(soc->peer_id_to_obj_map);
1999 		soc->peer_id_to_obj_map = NULL;
2000 		qdf_spinlock_destroy(&soc->peer_map_lock);
2001 	}
2002 }
2003 
2004 QDF_STATUS dp_peer_find_attach(struct dp_soc *soc)
2005 {
2006 	QDF_STATUS status;
2007 
2008 	status = dp_peer_find_map_attach(soc);
2009 	if (!QDF_IS_STATUS_SUCCESS(status))
2010 		return status;
2011 
2012 	status = dp_peer_find_hash_attach(soc);
2013 	if (!QDF_IS_STATUS_SUCCESS(status))
2014 		goto map_detach;
2015 
2016 	status = dp_peer_ast_table_attach(soc);
2017 	if (!QDF_IS_STATUS_SUCCESS(status))
2018 		goto hash_detach;
2019 
2020 	status = dp_peer_ast_hash_attach(soc);
2021 	if (!QDF_IS_STATUS_SUCCESS(status))
2022 		goto ast_table_detach;
2023 
2024 	status = dp_peer_mec_hash_attach(soc);
2025 	if (QDF_IS_STATUS_SUCCESS(status)) {
2026 		dp_soc_wds_attach(soc);
2027 		return status;
2028 	}
2029 
2030 	dp_peer_ast_hash_detach(soc);
2031 ast_table_detach:
2032 	dp_peer_ast_table_detach(soc);
2033 hash_detach:
2034 	dp_peer_find_hash_detach(soc);
2035 map_detach:
2036 	dp_peer_find_map_detach(soc);
2037 
2038 	return status;
2039 }
2040 
2041 void dp_rx_tid_stats_cb(struct dp_soc *soc, void *cb_ctxt,
2042 	union hal_reo_status *reo_status)
2043 {
2044 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
2045 	struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
2046 
2047 	if (queue_status->header.status == HAL_REO_CMD_DRAIN)
2048 		return;
2049 
2050 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
2051 		DP_PRINT_STATS("REO stats failure %d for TID %d\n",
2052 			       queue_status->header.status, rx_tid->tid);
2053 		return;
2054 	}
2055 
2056 	DP_PRINT_STATS("REO queue stats (TID: %d):\n"
2057 		       "ssn: %d\n"
2058 		       "curr_idx  : %d\n"
2059 		       "pn_31_0   : %08x\n"
2060 		       "pn_63_32  : %08x\n"
2061 		       "pn_95_64  : %08x\n"
2062 		       "pn_127_96 : %08x\n"
2063 		       "last_rx_enq_tstamp : %08x\n"
2064 		       "last_rx_deq_tstamp : %08x\n"
2065 		       "rx_bitmap_31_0     : %08x\n"
2066 		       "rx_bitmap_63_32    : %08x\n"
2067 		       "rx_bitmap_95_64    : %08x\n"
2068 		       "rx_bitmap_127_96   : %08x\n"
2069 		       "rx_bitmap_159_128  : %08x\n"
2070 		       "rx_bitmap_191_160  : %08x\n"
2071 		       "rx_bitmap_223_192  : %08x\n"
2072 		       "rx_bitmap_255_224  : %08x\n",
2073 		       rx_tid->tid,
2074 		       queue_status->ssn, queue_status->curr_idx,
2075 		       queue_status->pn_31_0, queue_status->pn_63_32,
2076 		       queue_status->pn_95_64, queue_status->pn_127_96,
2077 		       queue_status->last_rx_enq_tstamp,
2078 		       queue_status->last_rx_deq_tstamp,
2079 		       queue_status->rx_bitmap_31_0,
2080 		       queue_status->rx_bitmap_63_32,
2081 		       queue_status->rx_bitmap_95_64,
2082 		       queue_status->rx_bitmap_127_96,
2083 		       queue_status->rx_bitmap_159_128,
2084 		       queue_status->rx_bitmap_191_160,
2085 		       queue_status->rx_bitmap_223_192,
2086 		       queue_status->rx_bitmap_255_224);
2087 
2088 	DP_PRINT_STATS(
2089 		       "curr_mpdu_cnt      : %d\n"
2090 		       "curr_msdu_cnt      : %d\n"
2091 		       "fwd_timeout_cnt    : %d\n"
2092 		       "fwd_bar_cnt        : %d\n"
2093 		       "dup_cnt            : %d\n"
2094 		       "frms_in_order_cnt  : %d\n"
2095 		       "bar_rcvd_cnt       : %d\n"
2096 		       "mpdu_frms_cnt      : %d\n"
2097 		       "msdu_frms_cnt      : %d\n"
2098 		       "total_byte_cnt     : %d\n"
2099 		       "late_recv_mpdu_cnt : %d\n"
2100 		       "win_jump_2k        : %d\n"
2101 		       "hole_cnt           : %d\n",
2102 		       queue_status->curr_mpdu_cnt,
2103 		       queue_status->curr_msdu_cnt,
2104 		       queue_status->fwd_timeout_cnt,
2105 		       queue_status->fwd_bar_cnt,
2106 		       queue_status->dup_cnt,
2107 		       queue_status->frms_in_order_cnt,
2108 		       queue_status->bar_rcvd_cnt,
2109 		       queue_status->mpdu_frms_cnt,
2110 		       queue_status->msdu_frms_cnt,
2111 		       queue_status->total_cnt,
2112 		       queue_status->late_recv_mpdu_cnt,
2113 		       queue_status->win_jump_2k,
2114 		       queue_status->hole_cnt);
2115 
2116 	DP_PRINT_STATS("Addba Req          : %d\n"
2117 			"Addba Resp         : %d\n"
2118 			"Addba Resp success : %d\n"
2119 			"Addba Resp failed  : %d\n"
2120 			"Delba Req received : %d\n"
2121 			"Delba Tx success   : %d\n"
2122 			"Delba Tx Fail      : %d\n"
2123 			"BA window size     : %d\n"
2124 			"Pn size            : %d\n",
2125 			rx_tid->num_of_addba_req,
2126 			rx_tid->num_of_addba_resp,
2127 			rx_tid->num_addba_rsp_success,
2128 			rx_tid->num_addba_rsp_failed,
2129 			rx_tid->num_of_delba_req,
2130 			rx_tid->delba_tx_success_cnt,
2131 			rx_tid->delba_tx_fail_cnt,
2132 			rx_tid->ba_win_size,
2133 			rx_tid->pn_size);
2134 }
2135 
2136 /*
2137  * dp_peer_find_add_id() - map peer_id with peer
2138  * @soc: soc handle
2139  * @peer_mac_addr: peer mac address
2140  * @peer_id: peer id to be mapped
2141  * @hw_peer_id: HW ast index
2142  * @vdev_id: vdev_id
2143  *
2144  * return: peer in success
2145  *         NULL in failure
2146  */
2147 static inline struct dp_peer *dp_peer_find_add_id(struct dp_soc *soc,
2148 	uint8_t *peer_mac_addr, uint16_t peer_id, uint16_t hw_peer_id,
2149 	uint8_t vdev_id)
2150 {
2151 	struct dp_peer *peer;
2152 
2153 	QDF_ASSERT(peer_id <= soc->max_peers);
2154 	/* check if there's already a peer object with this MAC address */
2155 	peer = dp_peer_find_hash_find(soc, peer_mac_addr,
2156 		0 /* is aligned */, vdev_id, DP_MOD_ID_CONFIG);
2157 	dp_peer_err("%pK: peer %pK ID %d vid %d mac " QDF_MAC_ADDR_FMT,
2158 		    soc, peer, peer_id, vdev_id,
2159 		    QDF_MAC_ADDR_REF(peer_mac_addr));
2160 
2161 	if (peer) {
2162 		/* peer's ref count was already incremented by
2163 		 * peer_find_hash_find
2164 		 */
2165 		dp_peer_info("%pK: ref_cnt: %d", soc,
2166 			     qdf_atomic_read(&peer->ref_cnt));
2167 
2168 		/*
2169 		 * if peer is in logical delete CP triggered delete before map
2170 		 * is received ignore this event
2171 		 */
2172 		if (dp_peer_state_cmp(peer, DP_PEER_STATE_LOGICAL_DELETE)) {
2173 			dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
2174 			dp_alert("Peer %pK["QDF_MAC_ADDR_FMT"] logical delete state vid %d",
2175 				 peer, QDF_MAC_ADDR_REF(peer_mac_addr),
2176 				 vdev_id);
2177 			return NULL;
2178 		}
2179 		dp_peer_find_id_to_obj_add(soc, peer, peer_id);
2180 		if (peer->peer_id == HTT_INVALID_PEER) {
2181 			peer->peer_id = peer_id;
2182 			dp_peer_tid_peer_id_update(peer, peer->peer_id);
2183 		} else {
2184 			QDF_ASSERT(0);
2185 		}
2186 
2187 		dp_peer_update_state(soc, peer, DP_PEER_STATE_ACTIVE);
2188 		return peer;
2189 	}
2190 
2191 	return NULL;
2192 }
2193 
2194 /**
2195  * dp_rx_peer_map_handler() - handle peer map event from firmware
2196  * @soc_handle - genereic soc handle
2197  * @peeri_id - peer_id from firmware
2198  * @hw_peer_id - ast index for this peer
2199  * @vdev_id - vdev ID
2200  * @peer_mac_addr - mac address of the peer
2201  * @ast_hash - ast hash value
2202  * @is_wds - flag to indicate peer map event for WDS ast entry
2203  *
2204  * associate the peer_id that firmware provided with peer entry
2205  * and update the ast table in the host with the hw_peer_id.
2206  *
2207  * Return: QDF_STATUS code
2208  */
2209 
2210 QDF_STATUS
2211 dp_rx_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
2212 		       uint16_t hw_peer_id, uint8_t vdev_id,
2213 		       uint8_t *peer_mac_addr, uint16_t ast_hash,
2214 		       uint8_t is_wds)
2215 {
2216 	struct dp_peer *peer = NULL;
2217 	enum cdp_txrx_ast_entry_type type = CDP_TXRX_AST_TYPE_STATIC;
2218 	QDF_STATUS err = QDF_STATUS_SUCCESS;
2219 
2220 	dp_info("peer_map_event (soc:%pK): peer_id %d, hw_peer_id %d, peer_mac "QDF_MAC_ADDR_FMT", vdev_id %d",
2221 		soc, peer_id, hw_peer_id,
2222 		QDF_MAC_ADDR_REF(peer_mac_addr), vdev_id);
2223 
2224 	/* Peer map event for WDS ast entry get the peer from
2225 	 * obj map
2226 	 */
2227 	if (is_wds) {
2228 		peer = dp_peer_get_ref_by_id(soc, peer_id,
2229 					     DP_MOD_ID_HTT);
2230 
2231 		err = dp_peer_map_ast(soc, peer, peer_mac_addr, hw_peer_id,
2232 				      vdev_id, ast_hash, is_wds);
2233 		if (peer)
2234 			dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
2235 	} else {
2236 		/*
2237 		 * It's the responsibility of the CP and FW to ensure
2238 		 * that peer is created successfully. Ideally DP should
2239 		 * not hit the below condition for directly assocaited
2240 		 * peers.
2241 		 */
2242 		if ((hw_peer_id < 0) ||
2243 		    (hw_peer_id >=
2244 		     wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) {
2245 			dp_peer_err("%pK: invalid hw_peer_id: %d", soc, hw_peer_id);
2246 			qdf_assert_always(0);
2247 		}
2248 
2249 		peer = dp_peer_find_add_id(soc, peer_mac_addr, peer_id,
2250 					   hw_peer_id, vdev_id);
2251 
2252 		if (peer) {
2253 			if (wlan_op_mode_sta == peer->vdev->opmode &&
2254 			    qdf_mem_cmp(peer->mac_addr.raw,
2255 					peer->vdev->mac_addr.raw,
2256 					QDF_MAC_ADDR_SIZE) != 0) {
2257 				dp_peer_info("%pK: STA vdev bss_peer!!!!", soc);
2258 				peer->bss_peer = 1;
2259 			}
2260 
2261 			if (peer->vdev->opmode == wlan_op_mode_sta) {
2262 				peer->vdev->bss_ast_hash = ast_hash;
2263 				peer->vdev->bss_ast_idx = hw_peer_id;
2264 			}
2265 
2266 			/* Add ast entry incase self ast entry is
2267 			 * deleted due to DP CP sync issue
2268 			 *
2269 			 * self_ast_entry is modified in peer create
2270 			 * and peer unmap path which cannot run in
2271 			 * parllel with peer map, no lock need before
2272 			 * referring it
2273 			 */
2274 			if (!peer->self_ast_entry) {
2275 				dp_info("Add self ast from map "QDF_MAC_ADDR_FMT,
2276 					QDF_MAC_ADDR_REF(peer_mac_addr));
2277 				dp_peer_add_ast(soc, peer,
2278 						peer_mac_addr,
2279 						type, 0);
2280 			}
2281 		}
2282 		err = dp_peer_map_ast(soc, peer, peer_mac_addr, hw_peer_id,
2283 				      vdev_id, ast_hash, is_wds);
2284 	}
2285 
2286 	return err;
2287 }
2288 
2289 /**
2290  * dp_rx_peer_unmap_handler() - handle peer unmap event from firmware
2291  * @soc_handle - genereic soc handle
2292  * @peeri_id - peer_id from firmware
2293  * @vdev_id - vdev ID
2294  * @mac_addr - mac address of the peer or wds entry
2295  * @is_wds - flag to indicate peer map event for WDS ast entry
2296  * @free_wds_count - number of wds entries freed by FW with peer delete
2297  *
2298  * Return: none
2299  */
2300 void
2301 dp_rx_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id,
2302 			 uint8_t vdev_id, uint8_t *mac_addr,
2303 			 uint8_t is_wds, uint32_t free_wds_count)
2304 {
2305 	struct dp_peer *peer;
2306 	struct dp_vdev *vdev = NULL;
2307 
2308 	peer = __dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_HTT);
2309 
2310 	/*
2311 	 * Currently peer IDs are assigned for vdevs as well as peers.
2312 	 * If the peer ID is for a vdev, then the peer pointer stored
2313 	 * in peer_id_to_obj_map will be NULL.
2314 	 */
2315 	if (!peer) {
2316 		dp_err("Received unmap event for invalid peer_id %u",
2317 		       peer_id);
2318 		return;
2319 	}
2320 
2321 	/* If V2 Peer map messages are enabled AST entry has to be freed here
2322 	 */
2323 	if (is_wds) {
2324 		if (!dp_peer_ast_free_entry_by_mac(soc, peer, vdev_id,
2325 						   mac_addr)) {
2326 			dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
2327 			return;
2328 		}
2329 
2330 		dp_alert("AST entry not found with peer %pK peer_id %u peer_mac "QDF_MAC_ADDR_FMT" mac_addr "QDF_MAC_ADDR_FMT" vdev_id %u next_hop %u",
2331 			 peer, peer->peer_id,
2332 			 QDF_MAC_ADDR_REF(peer->mac_addr.raw),
2333 			 QDF_MAC_ADDR_REF(mac_addr), vdev_id,
2334 			 is_wds);
2335 
2336 		dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
2337 		return;
2338 	} else {
2339 		dp_peer_clean_wds_entries(soc, peer, free_wds_count);
2340 	}
2341 
2342 	dp_info("peer_unmap_event (soc:%pK) peer_id %d peer %pK",
2343 		soc, peer_id, peer);
2344 
2345 	dp_peer_find_id_to_obj_remove(soc, peer_id);
2346 	peer->peer_id = HTT_INVALID_PEER;
2347 
2348 	/*
2349 	 *	 Reset ast flow mapping table
2350 	 */
2351 	dp_peer_reset_flowq_map(peer);
2352 
2353 	if (soc->cdp_soc.ol_ops->peer_unmap_event) {
2354 		soc->cdp_soc.ol_ops->peer_unmap_event(soc->ctrl_psoc,
2355 				peer_id, vdev_id);
2356 	}
2357 
2358 	vdev = peer->vdev;
2359 	DP_UPDATE_STATS(vdev, peer);
2360 
2361 	dp_peer_update_state(soc, peer, DP_PEER_STATE_INACTIVE);
2362 	dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
2363 	/*
2364 	 * Remove a reference to the peer.
2365 	 * If there are no more references, delete the peer object.
2366 	 */
2367 	dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
2368 }
2369 
2370 void
2371 dp_peer_find_detach(struct dp_soc *soc)
2372 {
2373 	dp_soc_wds_detach(soc);
2374 	dp_peer_find_map_detach(soc);
2375 	dp_peer_find_hash_detach(soc);
2376 	dp_peer_ast_hash_detach(soc);
2377 	dp_peer_ast_table_detach(soc);
2378 	dp_peer_mec_hash_detach(soc);
2379 }
2380 
2381 static void dp_rx_tid_update_cb(struct dp_soc *soc, void *cb_ctxt,
2382 	union hal_reo_status *reo_status)
2383 {
2384 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
2385 
2386 	if ((reo_status->rx_queue_status.header.status !=
2387 		HAL_REO_CMD_SUCCESS) &&
2388 		(reo_status->rx_queue_status.header.status !=
2389 		HAL_REO_CMD_DRAIN)) {
2390 		/* Should not happen normally. Just print error for now */
2391 		dp_peer_err("%pK: Rx tid HW desc update failed(%d): tid %d",
2392 			    soc, reo_status->rx_queue_status.header.status,
2393 			    rx_tid->tid);
2394 	}
2395 }
2396 
2397 static bool dp_get_peer_vdev_roaming_in_progress(struct dp_peer *peer)
2398 {
2399 	struct ol_if_ops *ol_ops = NULL;
2400 	bool is_roaming = false;
2401 	uint8_t vdev_id = -1;
2402 	struct cdp_soc_t *soc;
2403 
2404 	if (!peer) {
2405 		dp_peer_info("Peer is NULL. No roaming possible");
2406 		return false;
2407 	}
2408 
2409 	soc = dp_soc_to_cdp_soc_t(peer->vdev->pdev->soc);
2410 	ol_ops = peer->vdev->pdev->soc->cdp_soc.ol_ops;
2411 
2412 	if (ol_ops && ol_ops->is_roam_inprogress) {
2413 		dp_get_vdevid(soc, peer->mac_addr.raw, &vdev_id);
2414 		is_roaming = ol_ops->is_roam_inprogress(vdev_id);
2415 	}
2416 
2417 	dp_peer_info("peer: " QDF_MAC_ADDR_FMT ", vdev_id: %d, is_roaming: %d",
2418 		     QDF_MAC_ADDR_REF(peer->mac_addr.raw), vdev_id, is_roaming);
2419 
2420 	return is_roaming;
2421 }
2422 
2423 QDF_STATUS dp_rx_tid_update_wifi3(struct dp_peer *peer, int tid, uint32_t
2424 					 ba_window_size, uint32_t start_seq)
2425 {
2426 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
2427 	struct dp_soc *soc = peer->vdev->pdev->soc;
2428 	struct hal_reo_cmd_params params;
2429 
2430 	qdf_mem_zero(&params, sizeof(params));
2431 
2432 	params.std.need_status = 1;
2433 	params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
2434 	params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2435 	params.u.upd_queue_params.update_ba_window_size = 1;
2436 	params.u.upd_queue_params.ba_window_size = ba_window_size;
2437 
2438 	if (start_seq < IEEE80211_SEQ_MAX) {
2439 		params.u.upd_queue_params.update_ssn = 1;
2440 		params.u.upd_queue_params.ssn = start_seq;
2441 	} else {
2442 	    dp_set_ssn_valid_flag(&params, 0);
2443 	}
2444 
2445 	if (dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
2446 			    dp_rx_tid_update_cb, rx_tid)) {
2447 		dp_err_log("failed to send reo cmd CMD_UPDATE_RX_REO_QUEUE");
2448 		DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
2449 	}
2450 
2451 	rx_tid->ba_win_size = ba_window_size;
2452 
2453 	if (dp_get_peer_vdev_roaming_in_progress(peer))
2454 		return QDF_STATUS_E_PERM;
2455 
2456 	if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup)
2457 		soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup(
2458 			soc->ctrl_psoc, peer->vdev->pdev->pdev_id,
2459 			peer->vdev->vdev_id, peer->mac_addr.raw,
2460 			rx_tid->hw_qdesc_paddr, tid, tid, 1, ba_window_size);
2461 
2462 	return QDF_STATUS_SUCCESS;
2463 }
2464 
2465 /*
2466  * dp_reo_desc_free() - Callback free reo descriptor memory after
2467  * HW cache flush
2468  *
2469  * @soc: DP SOC handle
2470  * @cb_ctxt: Callback context
2471  * @reo_status: REO command status
2472  */
2473 static void dp_reo_desc_free(struct dp_soc *soc, void *cb_ctxt,
2474 	union hal_reo_status *reo_status)
2475 {
2476 	struct reo_desc_list_node *freedesc =
2477 		(struct reo_desc_list_node *)cb_ctxt;
2478 	struct dp_rx_tid *rx_tid = &freedesc->rx_tid;
2479 	unsigned long curr_ts = qdf_get_system_timestamp();
2480 
2481 	if ((reo_status->fl_cache_status.header.status !=
2482 		HAL_REO_CMD_SUCCESS) &&
2483 		(reo_status->fl_cache_status.header.status !=
2484 		HAL_REO_CMD_DRAIN)) {
2485 		dp_peer_err("%pK: Rx tid HW desc flush failed(%d): tid %d",
2486 			    soc, reo_status->rx_queue_status.header.status,
2487 			    freedesc->rx_tid.tid);
2488 	}
2489 	dp_peer_info("%pK: %lu hw_qdesc_paddr: %pK, tid:%d", soc,
2490 		     curr_ts, (void *)(rx_tid->hw_qdesc_paddr),
2491 		     rx_tid->tid);
2492 	qdf_mem_unmap_nbytes_single(soc->osdev,
2493 		rx_tid->hw_qdesc_paddr,
2494 		QDF_DMA_BIDIRECTIONAL,
2495 		rx_tid->hw_qdesc_alloc_size);
2496 	qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
2497 	qdf_mem_free(freedesc);
2498 }
2499 
2500 #if defined(CONFIG_WIFI_EMULATION_WIFI_3_0) && defined(BUILD_X86)
2501 /* Hawkeye emulation requires bus address to be >= 0x50000000 */
2502 static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
2503 {
2504 	if (dma_addr < 0x50000000)
2505 		return QDF_STATUS_E_FAILURE;
2506 	else
2507 		return QDF_STATUS_SUCCESS;
2508 }
2509 #else
2510 static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
2511 {
2512 	return QDF_STATUS_SUCCESS;
2513 }
2514 #endif
2515 
2516 
2517 /*
2518  * dp_rx_tid_setup_wifi3() – Setup receive TID state
2519  * @peer: Datapath peer handle
2520  * @tid: TID
2521  * @ba_window_size: BlockAck window size
2522  * @start_seq: Starting sequence number
2523  *
2524  * Return: QDF_STATUS code
2525  */
2526 QDF_STATUS dp_rx_tid_setup_wifi3(struct dp_peer *peer, int tid,
2527 				 uint32_t ba_window_size, uint32_t start_seq)
2528 {
2529 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
2530 	struct dp_vdev *vdev = peer->vdev;
2531 	struct dp_soc *soc = vdev->pdev->soc;
2532 	uint32_t hw_qdesc_size;
2533 	uint32_t hw_qdesc_align;
2534 	int hal_pn_type;
2535 	void *hw_qdesc_vaddr;
2536 	uint32_t alloc_tries = 0;
2537 	QDF_STATUS err = QDF_STATUS_SUCCESS;
2538 
2539 	if (!qdf_atomic_read(&peer->is_default_route_set))
2540 		return QDF_STATUS_E_FAILURE;
2541 
2542 	rx_tid->ba_win_size = ba_window_size;
2543 	if (rx_tid->hw_qdesc_vaddr_unaligned)
2544 		return dp_rx_tid_update_wifi3(peer, tid, ba_window_size,
2545 			start_seq);
2546 	rx_tid->delba_tx_status = 0;
2547 	rx_tid->ppdu_id_2k = 0;
2548 	rx_tid->num_of_addba_req = 0;
2549 	rx_tid->num_of_delba_req = 0;
2550 	rx_tid->num_of_addba_resp = 0;
2551 	rx_tid->num_addba_rsp_failed = 0;
2552 	rx_tid->num_addba_rsp_success = 0;
2553 	rx_tid->delba_tx_success_cnt = 0;
2554 	rx_tid->delba_tx_fail_cnt = 0;
2555 	rx_tid->statuscode = 0;
2556 
2557 	/* TODO: Allocating HW queue descriptors based on max BA window size
2558 	 * for all QOS TIDs so that same descriptor can be used later when
2559 	 * ADDBA request is recevied. This should be changed to allocate HW
2560 	 * queue descriptors based on BA window size being negotiated (0 for
2561 	 * non BA cases), and reallocate when BA window size changes and also
2562 	 * send WMI message to FW to change the REO queue descriptor in Rx
2563 	 * peer entry as part of dp_rx_tid_update.
2564 	 */
2565 	if (tid != DP_NON_QOS_TID)
2566 		hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc,
2567 			HAL_RX_MAX_BA_WINDOW, tid);
2568 	else
2569 		hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc,
2570 			ba_window_size, tid);
2571 
2572 	hw_qdesc_align = hal_get_reo_qdesc_align(soc->hal_soc);
2573 	/* To avoid unnecessary extra allocation for alignment, try allocating
2574 	 * exact size and see if we already have aligned address.
2575 	 */
2576 	rx_tid->hw_qdesc_alloc_size = hw_qdesc_size;
2577 
2578 try_desc_alloc:
2579 	rx_tid->hw_qdesc_vaddr_unaligned =
2580 		qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size);
2581 
2582 	if (!rx_tid->hw_qdesc_vaddr_unaligned) {
2583 		dp_peer_err("%pK: Rx tid HW desc alloc failed: tid %d",
2584 			    soc, tid);
2585 		return QDF_STATUS_E_NOMEM;
2586 	}
2587 
2588 	if ((unsigned long)(rx_tid->hw_qdesc_vaddr_unaligned) %
2589 		hw_qdesc_align) {
2590 		/* Address allocated above is not alinged. Allocate extra
2591 		 * memory for alignment
2592 		 */
2593 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
2594 		rx_tid->hw_qdesc_vaddr_unaligned =
2595 			qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size +
2596 					hw_qdesc_align - 1);
2597 
2598 		if (!rx_tid->hw_qdesc_vaddr_unaligned) {
2599 			dp_peer_err("%pK: Rx tid HW desc alloc failed: tid %d",
2600 				    soc, tid);
2601 			return QDF_STATUS_E_NOMEM;
2602 		}
2603 
2604 		hw_qdesc_vaddr = (void *)qdf_align((unsigned long)
2605 			rx_tid->hw_qdesc_vaddr_unaligned,
2606 			hw_qdesc_align);
2607 
2608 		dp_peer_debug("%pK: Total Size %d Aligned Addr %pK",
2609 			      soc, rx_tid->hw_qdesc_alloc_size,
2610 			      hw_qdesc_vaddr);
2611 
2612 	} else {
2613 		hw_qdesc_vaddr = rx_tid->hw_qdesc_vaddr_unaligned;
2614 	}
2615 
2616 	/* TODO: Ensure that sec_type is set before ADDBA is received.
2617 	 * Currently this is set based on htt indication
2618 	 * HTT_T2H_MSG_TYPE_SEC_IND from target
2619 	 */
2620 	switch (peer->security[dp_sec_ucast].sec_type) {
2621 	case cdp_sec_type_tkip_nomic:
2622 	case cdp_sec_type_aes_ccmp:
2623 	case cdp_sec_type_aes_ccmp_256:
2624 	case cdp_sec_type_aes_gcmp:
2625 	case cdp_sec_type_aes_gcmp_256:
2626 		hal_pn_type = HAL_PN_WPA;
2627 		break;
2628 	case cdp_sec_type_wapi:
2629 		if (vdev->opmode == wlan_op_mode_ap)
2630 			hal_pn_type = HAL_PN_WAPI_EVEN;
2631 		else
2632 			hal_pn_type = HAL_PN_WAPI_UNEVEN;
2633 		break;
2634 	default:
2635 		hal_pn_type = HAL_PN_NONE;
2636 		break;
2637 	}
2638 
2639 	hal_reo_qdesc_setup(soc->hal_soc, tid, ba_window_size, start_seq,
2640 		hw_qdesc_vaddr, rx_tid->hw_qdesc_paddr, hal_pn_type);
2641 
2642 	qdf_mem_map_nbytes_single(soc->osdev, hw_qdesc_vaddr,
2643 		QDF_DMA_BIDIRECTIONAL, rx_tid->hw_qdesc_alloc_size,
2644 		&(rx_tid->hw_qdesc_paddr));
2645 
2646 	if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) !=
2647 			QDF_STATUS_SUCCESS) {
2648 		if (alloc_tries++ < 10) {
2649 			qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
2650 			rx_tid->hw_qdesc_vaddr_unaligned = NULL;
2651 			goto try_desc_alloc;
2652 		} else {
2653 			dp_peer_err("%pK: Rx tid HW desc alloc failed (lowmem): tid %d",
2654 				    soc, tid);
2655 			err = QDF_STATUS_E_NOMEM;
2656 			goto error;
2657 		}
2658 	}
2659 
2660 	if (dp_get_peer_vdev_roaming_in_progress(peer)) {
2661 		err = QDF_STATUS_E_PERM;
2662 		goto error;
2663 	}
2664 
2665 	if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
2666 		if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup(
2667 		    soc->ctrl_psoc,
2668 		    peer->vdev->pdev->pdev_id,
2669 		    peer->vdev->vdev_id,
2670 		    peer->mac_addr.raw, rx_tid->hw_qdesc_paddr, tid, tid,
2671 		    1, ba_window_size)) {
2672 			dp_peer_err("%pK: Failed to send reo queue setup to FW - tid %d\n",
2673 				    soc, tid);
2674 			err = QDF_STATUS_E_FAILURE;
2675 			goto error;
2676 		}
2677 	}
2678 	return 0;
2679 error:
2680 	if (rx_tid->hw_qdesc_vaddr_unaligned) {
2681 		if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) ==
2682 		    QDF_STATUS_SUCCESS)
2683 			qdf_mem_unmap_nbytes_single(
2684 				soc->osdev,
2685 				rx_tid->hw_qdesc_paddr,
2686 				QDF_DMA_BIDIRECTIONAL,
2687 				rx_tid->hw_qdesc_alloc_size);
2688 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
2689 		rx_tid->hw_qdesc_vaddr_unaligned = NULL;
2690 	}
2691 	return err;
2692 }
2693 
2694 #ifdef REO_DESC_DEFER_FREE
2695 /*
2696  * dp_reo_desc_clean_up() - If cmd to flush base desc fails add
2697  * desc back to freelist and defer the deletion
2698  *
2699  * @soc: DP SOC handle
2700  * @desc: Base descriptor to be freed
2701  * @reo_status: REO command status
2702  */
2703 static void dp_reo_desc_clean_up(struct dp_soc *soc,
2704 				 struct reo_desc_list_node *desc,
2705 				 union hal_reo_status *reo_status)
2706 {
2707 	desc->free_ts = qdf_get_system_timestamp();
2708 	DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
2709 	qdf_list_insert_back(&soc->reo_desc_freelist,
2710 			     (qdf_list_node_t *)desc);
2711 }
2712 
2713 /*
2714  * dp_reo_limit_clean_batch_sz() - Limit number REO CMD queued to cmd
2715  * ring in aviod of REO hang
2716  *
2717  * @list_size: REO desc list size to be cleaned
2718  */
2719 static inline void dp_reo_limit_clean_batch_sz(uint32_t *list_size)
2720 {
2721 	unsigned long curr_ts = qdf_get_system_timestamp();
2722 
2723 	if ((*list_size) > REO_DESC_FREELIST_SIZE) {
2724 		dp_err_log("%lu:freedesc number %d in freelist",
2725 			   curr_ts, *list_size);
2726 		/* limit the batch queue size */
2727 		*list_size = REO_DESC_FREELIST_SIZE;
2728 	}
2729 }
2730 #else
2731 /*
2732  * dp_reo_desc_clean_up() - If send cmd to REO inorder to flush
2733  * cache fails free the base REO desc anyway
2734  *
2735  * @soc: DP SOC handle
2736  * @desc: Base descriptor to be freed
2737  * @reo_status: REO command status
2738  */
2739 static void dp_reo_desc_clean_up(struct dp_soc *soc,
2740 				 struct reo_desc_list_node *desc,
2741 				 union hal_reo_status *reo_status)
2742 {
2743 	if (reo_status) {
2744 		qdf_mem_zero(reo_status, sizeof(*reo_status));
2745 		reo_status->fl_cache_status.header.status = 0;
2746 		dp_reo_desc_free(soc, (void *)desc, reo_status);
2747 	}
2748 }
2749 
2750 /*
2751  * dp_reo_limit_clean_batch_sz() - Limit number REO CMD queued to cmd
2752  * ring in aviod of REO hang
2753  *
2754  * @list_size: REO desc list size to be cleaned
2755  */
2756 static inline void dp_reo_limit_clean_batch_sz(uint32_t *list_size)
2757 {
2758 }
2759 #endif
2760 
2761 /*
2762  * dp_resend_update_reo_cmd() - Resend the UPDATE_REO_QUEUE
2763  * cmd and re-insert desc into free list if send fails.
2764  *
2765  * @soc: DP SOC handle
2766  * @desc: desc with resend update cmd flag set
2767  * @rx_tid: Desc RX tid associated with update cmd for resetting
2768  * valid field to 0 in h/w
2769  *
2770  * Return: QDF status
2771  */
2772 static QDF_STATUS
2773 dp_resend_update_reo_cmd(struct dp_soc *soc,
2774 			 struct reo_desc_list_node *desc,
2775 			 struct dp_rx_tid *rx_tid)
2776 {
2777 	struct hal_reo_cmd_params params;
2778 
2779 	qdf_mem_zero(&params, sizeof(params));
2780 	params.std.need_status = 1;
2781 	params.std.addr_lo =
2782 		rx_tid->hw_qdesc_paddr & 0xffffffff;
2783 	params.std.addr_hi =
2784 		(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2785 	params.u.upd_queue_params.update_vld = 1;
2786 	params.u.upd_queue_params.vld = 0;
2787 	desc->resend_update_reo_cmd = false;
2788 	/*
2789 	 * If the cmd send fails then set resend_update_reo_cmd flag
2790 	 * and insert the desc at the end of the free list to retry.
2791 	 */
2792 	if (dp_reo_send_cmd(soc,
2793 			    CMD_UPDATE_RX_REO_QUEUE,
2794 			    &params,
2795 			    dp_rx_tid_delete_cb,
2796 			    (void *)desc)
2797 	    != QDF_STATUS_SUCCESS) {
2798 		desc->resend_update_reo_cmd = true;
2799 		desc->free_ts = qdf_get_system_timestamp();
2800 		qdf_list_insert_back(&soc->reo_desc_freelist,
2801 				     (qdf_list_node_t *)desc);
2802 		dp_err_log("failed to send reo cmd CMD_UPDATE_RX_REO_QUEUE");
2803 		DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
2804 		return QDF_STATUS_E_FAILURE;
2805 	}
2806 
2807 	return QDF_STATUS_SUCCESS;
2808 }
2809 
2810 /*
2811  * dp_rx_tid_delete_cb() - Callback to flush reo descriptor HW cache
2812  * after deleting the entries (ie., setting valid=0)
2813  *
2814  * @soc: DP SOC handle
2815  * @cb_ctxt: Callback context
2816  * @reo_status: REO command status
2817  */
2818 void dp_rx_tid_delete_cb(struct dp_soc *soc, void *cb_ctxt,
2819 			 union hal_reo_status *reo_status)
2820 {
2821 	struct reo_desc_list_node *freedesc =
2822 		(struct reo_desc_list_node *)cb_ctxt;
2823 	uint32_t list_size;
2824 	struct reo_desc_list_node *desc;
2825 	unsigned long curr_ts = qdf_get_system_timestamp();
2826 	uint32_t desc_size, tot_desc_size;
2827 	struct hal_reo_cmd_params params;
2828 	bool flush_failure = false;
2829 
2830 	if (reo_status->rx_queue_status.header.status == HAL_REO_CMD_DRAIN) {
2831 		qdf_mem_zero(reo_status, sizeof(*reo_status));
2832 		reo_status->fl_cache_status.header.status = HAL_REO_CMD_DRAIN;
2833 		dp_reo_desc_free(soc, (void *)freedesc, reo_status);
2834 		DP_STATS_INC(soc, rx.err.reo_cmd_send_drain, 1);
2835 		return;
2836 	} else if (reo_status->rx_queue_status.header.status !=
2837 		HAL_REO_CMD_SUCCESS) {
2838 		/* Should not happen normally. Just print error for now */
2839 		dp_info_rl("Rx tid HW desc deletion failed(%d): tid %d",
2840 			   reo_status->rx_queue_status.header.status,
2841 			   freedesc->rx_tid.tid);
2842 	}
2843 
2844 	dp_peer_info("%pK: rx_tid: %d status: %d",
2845 		     soc, freedesc->rx_tid.tid,
2846 		     reo_status->rx_queue_status.header.status);
2847 
2848 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
2849 	freedesc->free_ts = curr_ts;
2850 	qdf_list_insert_back_size(&soc->reo_desc_freelist,
2851 		(qdf_list_node_t *)freedesc, &list_size);
2852 
2853 	/* MCL path add the desc back to reo_desc_freelist when REO FLUSH
2854 	 * failed. it may cause the number of REO queue pending  in free
2855 	 * list is even larger than REO_CMD_RING max size and lead REO CMD
2856 	 * flood then cause REO HW in an unexpected condition. So it's
2857 	 * needed to limit the number REO cmds in a batch operation.
2858 	 */
2859 	dp_reo_limit_clean_batch_sz(&list_size);
2860 
2861 	while ((qdf_list_peek_front(&soc->reo_desc_freelist,
2862 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) &&
2863 		((list_size >= REO_DESC_FREELIST_SIZE) ||
2864 		(curr_ts > (desc->free_ts + REO_DESC_FREE_DEFER_MS)) ||
2865 		(desc->resend_update_reo_cmd && list_size))) {
2866 		struct dp_rx_tid *rx_tid;
2867 
2868 		qdf_list_remove_front(&soc->reo_desc_freelist,
2869 				(qdf_list_node_t **)&desc);
2870 		list_size--;
2871 		rx_tid = &desc->rx_tid;
2872 
2873 		/* First process descs with resend_update_reo_cmd set */
2874 		if (desc->resend_update_reo_cmd) {
2875 			if (dp_resend_update_reo_cmd(soc, desc, rx_tid) !=
2876 			    QDF_STATUS_SUCCESS)
2877 				break;
2878 			else
2879 				continue;
2880 		}
2881 
2882 		/* Flush and invalidate REO descriptor from HW cache: Base and
2883 		 * extension descriptors should be flushed separately */
2884 		if (desc->pending_ext_desc_size)
2885 			tot_desc_size = desc->pending_ext_desc_size;
2886 		else
2887 			tot_desc_size = rx_tid->hw_qdesc_alloc_size;
2888 		/* Get base descriptor size by passing non-qos TID */
2889 		desc_size = hal_get_reo_qdesc_size(soc->hal_soc, 0,
2890 						   DP_NON_QOS_TID);
2891 
2892 		/* Flush reo extension descriptors */
2893 		while ((tot_desc_size -= desc_size) > 0) {
2894 			qdf_mem_zero(&params, sizeof(params));
2895 			params.std.addr_lo =
2896 				((uint64_t)(rx_tid->hw_qdesc_paddr) +
2897 				tot_desc_size) & 0xffffffff;
2898 			params.std.addr_hi =
2899 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2900 
2901 			if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc,
2902 							CMD_FLUSH_CACHE,
2903 							&params,
2904 							NULL,
2905 							NULL)) {
2906 				dp_info_rl("fail to send CMD_CACHE_FLUSH:"
2907 					   "tid %d desc %pK", rx_tid->tid,
2908 					   (void *)(rx_tid->hw_qdesc_paddr));
2909 				desc->pending_ext_desc_size = tot_desc_size +
2910 								      desc_size;
2911 				dp_reo_desc_clean_up(soc, desc, reo_status);
2912 				flush_failure = true;
2913 				break;
2914 			}
2915 		}
2916 
2917 		if (flush_failure)
2918 			break;
2919 		else
2920 			desc->pending_ext_desc_size = desc_size;
2921 
2922 		/* Flush base descriptor */
2923 		qdf_mem_zero(&params, sizeof(params));
2924 		params.std.need_status = 1;
2925 		params.std.addr_lo =
2926 			(uint64_t)(rx_tid->hw_qdesc_paddr) & 0xffffffff;
2927 		params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2928 
2929 		if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc,
2930 							  CMD_FLUSH_CACHE,
2931 							  &params,
2932 							  dp_reo_desc_free,
2933 							  (void *)desc)) {
2934 			union hal_reo_status reo_status;
2935 			/*
2936 			 * If dp_reo_send_cmd return failure, related TID queue desc
2937 			 * should be unmapped. Also locally reo_desc, together with
2938 			 * TID queue desc also need to be freed accordingly.
2939 			 *
2940 			 * Here invoke desc_free function directly to do clean up.
2941 			 *
2942 			 * In case of MCL path add the desc back to the free
2943 			 * desc list and defer deletion.
2944 			 */
2945 			dp_info_rl("fail to send REO cmd to flush cache: tid %d",
2946 				   rx_tid->tid);
2947 			dp_reo_desc_clean_up(soc, desc, &reo_status);
2948 			DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
2949 			break;
2950 		}
2951 	}
2952 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
2953 }
2954 
2955 /*
2956  * dp_rx_tid_delete_wifi3() – Delete receive TID queue
2957  * @peer: Datapath peer handle
2958  * @tid: TID
2959  *
2960  * Return: 0 on success, error code on failure
2961  */
2962 static int dp_rx_tid_delete_wifi3(struct dp_peer *peer, int tid)
2963 {
2964 	struct dp_rx_tid *rx_tid = &(peer->rx_tid[tid]);
2965 	struct dp_soc *soc = peer->vdev->pdev->soc;
2966 	struct hal_reo_cmd_params params;
2967 	struct reo_desc_list_node *freedesc =
2968 		qdf_mem_malloc(sizeof(*freedesc));
2969 
2970 	if (!freedesc) {
2971 		dp_peer_err("%pK: malloc failed for freedesc: tid %d",
2972 			    soc, tid);
2973 		return -ENOMEM;
2974 	}
2975 
2976 	freedesc->rx_tid = *rx_tid;
2977 	freedesc->resend_update_reo_cmd = false;
2978 
2979 	qdf_mem_zero(&params, sizeof(params));
2980 
2981 	params.std.need_status = 1;
2982 	params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
2983 	params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2984 	params.u.upd_queue_params.update_vld = 1;
2985 	params.u.upd_queue_params.vld = 0;
2986 
2987 	if (dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
2988 			    dp_rx_tid_delete_cb, (void *)freedesc)
2989 		!= QDF_STATUS_SUCCESS) {
2990 		/* Defer the clean up to the call back context */
2991 		qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
2992 		freedesc->free_ts = qdf_get_system_timestamp();
2993 		freedesc->resend_update_reo_cmd = true;
2994 		qdf_list_insert_front(&soc->reo_desc_freelist,
2995 				      (qdf_list_node_t *)freedesc);
2996 		DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
2997 		qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
2998 		dp_info("Failed to send CMD_UPDATE_RX_REO_QUEUE");
2999 	}
3000 
3001 	rx_tid->hw_qdesc_vaddr_unaligned = NULL;
3002 	rx_tid->hw_qdesc_alloc_size = 0;
3003 	rx_tid->hw_qdesc_paddr = 0;
3004 
3005 	return 0;
3006 }
3007 
3008 #ifdef DP_LFR
3009 static void dp_peer_setup_remaining_tids(struct dp_peer *peer)
3010 {
3011 	int tid;
3012 
3013 	for (tid = 1; tid < DP_MAX_TIDS-1; tid++) {
3014 		dp_rx_tid_setup_wifi3(peer, tid, 1, 0);
3015 		dp_peer_debug("Setting up TID %d for peer %pK peer->local_id %d",
3016 			      tid, peer, peer->local_id);
3017 	}
3018 }
3019 #else
3020 static void dp_peer_setup_remaining_tids(struct dp_peer *peer) {};
3021 #endif
3022 
3023 /*
3024  * dp_peer_tx_init() – Initialize receive TID state
3025  * @pdev: Datapath pdev
3026  * @peer: Datapath peer
3027  *
3028  */
3029 void dp_peer_tx_init(struct dp_pdev *pdev, struct dp_peer *peer)
3030 {
3031 	dp_peer_tid_queue_init(peer);
3032 	dp_peer_update_80211_hdr(peer->vdev, peer);
3033 }
3034 
3035 /*
3036  * dp_peer_tx_cleanup() – Deinitialize receive TID state
3037  * @vdev: Datapath vdev
3038  * @peer: Datapath peer
3039  *
3040  */
3041 static inline void
3042 dp_peer_tx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
3043 {
3044 	dp_peer_tid_queue_cleanup(peer);
3045 }
3046 
3047 /*
3048  * dp_peer_rx_init() – Initialize receive TID state
3049  * @pdev: Datapath pdev
3050  * @peer: Datapath peer
3051  *
3052  */
3053 void dp_peer_rx_init(struct dp_pdev *pdev, struct dp_peer *peer)
3054 {
3055 	int tid;
3056 	struct dp_rx_tid *rx_tid;
3057 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
3058 		rx_tid = &peer->rx_tid[tid];
3059 		rx_tid->array = &rx_tid->base;
3060 		rx_tid->base.head = rx_tid->base.tail = NULL;
3061 		rx_tid->tid = tid;
3062 		rx_tid->defrag_timeout_ms = 0;
3063 		rx_tid->ba_win_size = 0;
3064 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
3065 
3066 		rx_tid->defrag_waitlist_elem.tqe_next = NULL;
3067 		rx_tid->defrag_waitlist_elem.tqe_prev = NULL;
3068 	}
3069 
3070 	peer->active_ba_session_cnt = 0;
3071 	peer->hw_buffer_size = 0;
3072 	peer->kill_256_sessions = 0;
3073 
3074 	/* Setup default (non-qos) rx tid queue */
3075 	dp_rx_tid_setup_wifi3(peer, DP_NON_QOS_TID, 1, 0);
3076 
3077 	/* Setup rx tid queue for TID 0.
3078 	 * Other queues will be setup on receiving first packet, which will cause
3079 	 * NULL REO queue error
3080 	 */
3081 	dp_rx_tid_setup_wifi3(peer, 0, 1, 0);
3082 
3083 	/*
3084 	 * Setup the rest of TID's to handle LFR
3085 	 */
3086 	dp_peer_setup_remaining_tids(peer);
3087 
3088 	/*
3089 	 * Set security defaults: no PN check, no security. The target may
3090 	 * send a HTT SEC_IND message to overwrite these defaults.
3091 	 */
3092 	peer->security[dp_sec_ucast].sec_type =
3093 		peer->security[dp_sec_mcast].sec_type = cdp_sec_type_none;
3094 }
3095 
3096 /*
3097  * dp_peer_rx_cleanup() – Cleanup receive TID state
3098  * @vdev: Datapath vdev
3099  * @peer: Datapath peer
3100  *
3101  */
3102 void dp_peer_rx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
3103 {
3104 	int tid;
3105 	uint32_t tid_delete_mask = 0;
3106 
3107 	dp_info("Remove tids for peer: %pK", peer);
3108 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
3109 		struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
3110 
3111 		qdf_spin_lock_bh(&rx_tid->tid_lock);
3112 		if (!peer->bss_peer || peer->vdev->opmode == wlan_op_mode_sta) {
3113 			/* Cleanup defrag related resource */
3114 			dp_rx_defrag_waitlist_remove(peer, tid);
3115 			dp_rx_reorder_flush_frag(peer, tid);
3116 		}
3117 
3118 		if (peer->rx_tid[tid].hw_qdesc_vaddr_unaligned) {
3119 			dp_rx_tid_delete_wifi3(peer, tid);
3120 
3121 			tid_delete_mask |= (1 << tid);
3122 		}
3123 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
3124 	}
3125 #ifdef notyet /* See if FW can remove queues as part of peer cleanup */
3126 	if (soc->ol_ops->peer_rx_reorder_queue_remove) {
3127 		soc->ol_ops->peer_rx_reorder_queue_remove(soc->ctrl_psoc,
3128 			peer->vdev->pdev->pdev_id,
3129 			peer->vdev->vdev_id, peer->mac_addr.raw,
3130 			tid_delete_mask);
3131 	}
3132 #endif
3133 }
3134 
3135 #ifdef FEATURE_PERPKT_INFO
3136 /*
3137  * dp_peer_ppdu_delayed_ba_init() Initialize ppdu in peer
3138  * @peer: Datapath peer
3139  *
3140  * return: void
3141  */
3142 void dp_peer_ppdu_delayed_ba_init(struct dp_peer *peer)
3143 {
3144 	qdf_mem_zero(&peer->delayed_ba_ppdu_stats,
3145 		     sizeof(struct cdp_delayed_tx_completion_ppdu_user));
3146 	peer->last_delayed_ba = false;
3147 	peer->last_delayed_ba_ppduid = 0;
3148 }
3149 #else
3150 /*
3151  * dp_peer_ppdu_delayed_ba_init() Initialize ppdu in peer
3152  * @peer: Datapath peer
3153  *
3154  * return: void
3155  */
3156 void dp_peer_ppdu_delayed_ba_init(struct dp_peer *peer)
3157 {
3158 }
3159 #endif
3160 
3161 /*
3162  * dp_peer_cleanup() – Cleanup peer information
3163  * @vdev: Datapath vdev
3164  * @peer: Datapath peer
3165  *
3166  */
3167 void dp_peer_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
3168 {
3169 	enum wlan_op_mode vdev_opmode;
3170 	uint8_t vdev_mac_addr[QDF_MAC_ADDR_SIZE];
3171 	struct dp_pdev *pdev = vdev->pdev;
3172 	struct dp_soc *soc = pdev->soc;
3173 
3174 	/* save vdev related member in case vdev freed */
3175 	vdev_opmode = vdev->opmode;
3176 
3177 	dp_peer_tx_cleanup(vdev, peer);
3178 
3179 	if (vdev_opmode != wlan_op_mode_monitor)
3180 	/* cleanup the Rx reorder queues for this peer */
3181 		dp_peer_rx_cleanup(vdev, peer);
3182 
3183 	qdf_mem_copy(vdev_mac_addr, vdev->mac_addr.raw,
3184 		     QDF_MAC_ADDR_SIZE);
3185 
3186 	if (soc->cdp_soc.ol_ops->peer_unref_delete)
3187 		soc->cdp_soc.ol_ops->peer_unref_delete(
3188 				soc->ctrl_psoc,
3189 				vdev->pdev->pdev_id,
3190 				peer->mac_addr.raw, vdev_mac_addr,
3191 				vdev_opmode);
3192 }
3193 
3194 /* dp_teardown_256_ba_session() - Teardown sessions using 256
3195  *                                window size when a request with
3196  *                                64 window size is received.
3197  *                                This is done as a WAR since HW can
3198  *                                have only one setting per peer (64 or 256).
3199  *                                For HKv2, we use per tid buffersize setting
3200  *                                for 0 to per_tid_basize_max_tid. For tid
3201  *                                more than per_tid_basize_max_tid we use HKv1
3202  *                                method.
3203  * @peer: Datapath peer
3204  *
3205  * Return: void
3206  */
3207 static void dp_teardown_256_ba_sessions(struct dp_peer *peer)
3208 {
3209 	uint8_t delba_rcode = 0;
3210 	int tid;
3211 	struct dp_rx_tid *rx_tid = NULL;
3212 
3213 	tid = peer->vdev->pdev->soc->per_tid_basize_max_tid;
3214 	for (; tid < DP_MAX_TIDS; tid++) {
3215 		rx_tid = &peer->rx_tid[tid];
3216 		qdf_spin_lock_bh(&rx_tid->tid_lock);
3217 
3218 		if (rx_tid->ba_win_size <= 64) {
3219 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
3220 			continue;
3221 		} else {
3222 			if (rx_tid->ba_status == DP_RX_BA_ACTIVE ||
3223 			    rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
3224 				/* send delba */
3225 				if (!rx_tid->delba_tx_status) {
3226 					rx_tid->delba_tx_retry++;
3227 					rx_tid->delba_tx_status = 1;
3228 					rx_tid->delba_rcode =
3229 					IEEE80211_REASON_QOS_SETUP_REQUIRED;
3230 					delba_rcode = rx_tid->delba_rcode;
3231 
3232 					qdf_spin_unlock_bh(&rx_tid->tid_lock);
3233 					if (peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba)
3234 						peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba(
3235 							peer->vdev->pdev->soc->ctrl_psoc,
3236 							peer->vdev->vdev_id,
3237 							peer->mac_addr.raw,
3238 							tid, delba_rcode);
3239 				} else {
3240 					qdf_spin_unlock_bh(&rx_tid->tid_lock);
3241 				}
3242 			} else {
3243 				qdf_spin_unlock_bh(&rx_tid->tid_lock);
3244 			}
3245 		}
3246 	}
3247 }
3248 
3249 /*
3250 * dp_rx_addba_resp_tx_completion_wifi3() – Update Rx Tid State
3251 *
3252 * @soc: Datapath soc handle
3253 * @peer_mac: Datapath peer mac address
3254 * @vdev_id: id of atapath vdev
3255 * @tid: TID number
3256 * @status: tx completion status
3257 * Return: 0 on success, error code on failure
3258 */
3259 int dp_addba_resp_tx_completion_wifi3(struct cdp_soc_t *cdp_soc,
3260 				      uint8_t *peer_mac,
3261 				      uint16_t vdev_id,
3262 				      uint8_t tid, int status)
3263 {
3264 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
3265 						       peer_mac, 0, vdev_id,
3266 						       DP_MOD_ID_CDP);
3267 	struct dp_rx_tid *rx_tid = NULL;
3268 
3269 	if (!peer) {
3270 		dp_peer_debug("%pK: Peer is NULL!\n", cdp_soc);
3271 		goto fail;
3272 	}
3273 	rx_tid = &peer->rx_tid[tid];
3274 	qdf_spin_lock_bh(&rx_tid->tid_lock);
3275 	if (status) {
3276 		rx_tid->num_addba_rsp_failed++;
3277 		dp_rx_tid_update_wifi3(peer, tid, 1,
3278 				       IEEE80211_SEQ_MAX);
3279 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
3280 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
3281 		dp_err("RxTid- %d addba rsp tx completion failed", tid);
3282 
3283 		goto success;
3284 	}
3285 
3286 	rx_tid->num_addba_rsp_success++;
3287 	if (rx_tid->ba_status == DP_RX_BA_INACTIVE) {
3288 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
3289 		dp_peer_err("%pK: Rx Tid- %d hw qdesc is not in IN_PROGRESS",
3290 			    cdp_soc, tid);
3291 		goto fail;
3292 	}
3293 
3294 	if (!qdf_atomic_read(&peer->is_default_route_set)) {
3295 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
3296 		dp_peer_debug("%pK: default route is not set for peer: " QDF_MAC_ADDR_FMT,
3297 			      cdp_soc, QDF_MAC_ADDR_REF(peer->mac_addr.raw));
3298 		goto fail;
3299 	}
3300 
3301 	if (dp_rx_tid_update_wifi3(peer, tid,
3302 				   rx_tid->ba_win_size,
3303 				   rx_tid->startseqnum)) {
3304 		dp_err("Failed update REO SSN");
3305 	}
3306 
3307 	dp_info("tid %u window_size %u start_seq_num %u",
3308 		tid, rx_tid->ba_win_size,
3309 		rx_tid->startseqnum);
3310 
3311 	/* First Session */
3312 	if (peer->active_ba_session_cnt == 0) {
3313 		if (rx_tid->ba_win_size > 64 && rx_tid->ba_win_size <= 256)
3314 			peer->hw_buffer_size = 256;
3315 		else
3316 			peer->hw_buffer_size = 64;
3317 	}
3318 
3319 	rx_tid->ba_status = DP_RX_BA_ACTIVE;
3320 
3321 	peer->active_ba_session_cnt++;
3322 
3323 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
3324 
3325 	/* Kill any session having 256 buffer size
3326 	 * when 64 buffer size request is received.
3327 	 * Also, latch on to 64 as new buffer size.
3328 	 */
3329 	if (peer->kill_256_sessions) {
3330 		dp_teardown_256_ba_sessions(peer);
3331 		peer->kill_256_sessions = 0;
3332 	}
3333 
3334 success:
3335 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3336 	return QDF_STATUS_SUCCESS;
3337 
3338 fail:
3339 	if (peer)
3340 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3341 
3342 	return QDF_STATUS_E_FAILURE;
3343 }
3344 
3345 /*
3346 * dp_rx_addba_responsesetup_wifi3() – Process ADDBA request from peer
3347 *
3348 * @soc: Datapath soc handle
3349 * @peer_mac: Datapath peer mac address
3350 * @vdev_id: id of atapath vdev
3351 * @tid: TID number
3352 * @dialogtoken: output dialogtoken
3353 * @statuscode: output dialogtoken
3354 * @buffersize: Output BA window size
3355 * @batimeout: Output BA timeout
3356 */
3357 QDF_STATUS
3358 dp_addba_responsesetup_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
3359 			     uint16_t vdev_id, uint8_t tid,
3360 			     uint8_t *dialogtoken, uint16_t *statuscode,
3361 			     uint16_t *buffersize, uint16_t *batimeout)
3362 {
3363 	struct dp_rx_tid *rx_tid = NULL;
3364 	QDF_STATUS status = QDF_STATUS_SUCCESS;
3365 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
3366 						       peer_mac, 0, vdev_id,
3367 						       DP_MOD_ID_CDP);
3368 
3369 	if (!peer) {
3370 		dp_peer_debug("%pK: Peer is NULL!\n", cdp_soc);
3371 		return QDF_STATUS_E_FAILURE;
3372 	}
3373 	rx_tid = &peer->rx_tid[tid];
3374 	qdf_spin_lock_bh(&rx_tid->tid_lock);
3375 	rx_tid->num_of_addba_resp++;
3376 	/* setup ADDBA response parameters */
3377 	*dialogtoken = rx_tid->dialogtoken;
3378 	*statuscode = rx_tid->statuscode;
3379 	*buffersize = rx_tid->ba_win_size;
3380 	*batimeout  = 0;
3381 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
3382 
3383 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3384 
3385 	return status;
3386 }
3387 
3388 /* dp_check_ba_buffersize() - Check buffer size in request
3389  *                            and latch onto this size based on
3390  *                            size used in first active session.
3391  * @peer: Datapath peer
3392  * @tid: Tid
3393  * @buffersize: Block ack window size
3394  *
3395  * Return: void
3396  */
3397 static void dp_check_ba_buffersize(struct dp_peer *peer,
3398 				   uint16_t tid,
3399 				   uint16_t buffersize)
3400 {
3401 	struct dp_rx_tid *rx_tid = NULL;
3402 
3403 	rx_tid = &peer->rx_tid[tid];
3404 	if (peer->vdev->pdev->soc->per_tid_basize_max_tid &&
3405 	    tid < peer->vdev->pdev->soc->per_tid_basize_max_tid) {
3406 		rx_tid->ba_win_size = buffersize;
3407 		return;
3408 	} else {
3409 		if (peer->active_ba_session_cnt == 0) {
3410 			rx_tid->ba_win_size = buffersize;
3411 		} else {
3412 			if (peer->hw_buffer_size == 64) {
3413 				if (buffersize <= 64)
3414 					rx_tid->ba_win_size = buffersize;
3415 				else
3416 					rx_tid->ba_win_size = peer->hw_buffer_size;
3417 			} else if (peer->hw_buffer_size == 256) {
3418 				if (buffersize > 64) {
3419 					rx_tid->ba_win_size = buffersize;
3420 				} else {
3421 					rx_tid->ba_win_size = buffersize;
3422 					peer->hw_buffer_size = 64;
3423 					peer->kill_256_sessions = 1;
3424 				}
3425 			}
3426 		}
3427 	}
3428 }
3429 
3430 #define DP_RX_BA_SESSION_DISABLE  1
3431 
3432 /*
3433  * dp_addba_requestprocess_wifi3() - Process ADDBA request from peer
3434  *
3435  * @soc: Datapath soc handle
3436  * @peer_mac: Datapath peer mac address
3437  * @vdev_id: id of atapath vdev
3438  * @dialogtoken: dialogtoken from ADDBA frame
3439  * @tid: TID number
3440  * @batimeout: BA timeout
3441  * @buffersize: BA window size
3442  * @startseqnum: Start seq. number received in BA sequence control
3443  *
3444  * Return: 0 on success, error code on failure
3445  */
3446 int dp_addba_requestprocess_wifi3(struct cdp_soc_t *cdp_soc,
3447 				  uint8_t *peer_mac,
3448 				  uint16_t vdev_id,
3449 				  uint8_t dialogtoken,
3450 				  uint16_t tid, uint16_t batimeout,
3451 				  uint16_t buffersize,
3452 				  uint16_t startseqnum)
3453 {
3454 	QDF_STATUS status = QDF_STATUS_SUCCESS;
3455 	struct dp_rx_tid *rx_tid = NULL;
3456 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
3457 	struct dp_peer *peer = dp_peer_find_hash_find(soc,
3458 						       peer_mac, 0, vdev_id,
3459 						       DP_MOD_ID_CDP);
3460 
3461 	if (!peer) {
3462 		dp_peer_debug("%pK: Peer is NULL!\n", cdp_soc);
3463 		return QDF_STATUS_E_FAILURE;
3464 	}
3465 	rx_tid = &peer->rx_tid[tid];
3466 	qdf_spin_lock_bh(&rx_tid->tid_lock);
3467 	rx_tid->num_of_addba_req++;
3468 	if ((rx_tid->ba_status == DP_RX_BA_ACTIVE &&
3469 	     rx_tid->hw_qdesc_vaddr_unaligned)) {
3470 		dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX);
3471 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
3472 		peer->active_ba_session_cnt--;
3473 		dp_peer_debug("%pK: Rx Tid- %d hw qdesc is already setup",
3474 			      cdp_soc, tid);
3475 	}
3476 
3477 	if (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
3478 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
3479 		status = QDF_STATUS_E_FAILURE;
3480 		goto fail;
3481 	}
3482 
3483 	if (wlan_cfg_is_dp_force_rx_64_ba(soc->wlan_cfg_ctx)) {
3484 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3485 			  "force use BA64 scheme");
3486 		buffersize = qdf_min((uint16_t)64, buffersize);
3487 	}
3488 
3489 	if (rx_tid->rx_ba_win_size_override == DP_RX_BA_SESSION_DISABLE) {
3490 		dp_peer_info("%pK: disable BA session",
3491 			     cdp_soc);
3492 
3493 		buffersize = 1;
3494 	} else if (rx_tid->rx_ba_win_size_override) {
3495 		dp_peer_info("%pK: override BA win to %d", cdp_soc,
3496 			     rx_tid->rx_ba_win_size_override);
3497 
3498 		buffersize = rx_tid->rx_ba_win_size_override;
3499 	} else {
3500 		dp_peer_info("%pK: restore BA win %d based on addba req", cdp_soc,
3501 			     buffersize);
3502 	}
3503 
3504 	dp_check_ba_buffersize(peer, tid, buffersize);
3505 
3506 	if (dp_rx_tid_setup_wifi3(peer, tid,
3507 	    rx_tid->ba_win_size, startseqnum)) {
3508 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
3509 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
3510 		status = QDF_STATUS_E_FAILURE;
3511 		goto fail;
3512 	}
3513 	rx_tid->ba_status = DP_RX_BA_IN_PROGRESS;
3514 
3515 	rx_tid->dialogtoken = dialogtoken;
3516 	rx_tid->startseqnum = startseqnum;
3517 
3518 	if (rx_tid->userstatuscode != IEEE80211_STATUS_SUCCESS)
3519 		rx_tid->statuscode = rx_tid->userstatuscode;
3520 	else
3521 		rx_tid->statuscode = IEEE80211_STATUS_SUCCESS;
3522 
3523 	if (rx_tid->rx_ba_win_size_override == DP_RX_BA_SESSION_DISABLE)
3524 		rx_tid->statuscode = IEEE80211_STATUS_REFUSED;
3525 
3526 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
3527 
3528 fail:
3529 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3530 
3531 	return status;
3532 }
3533 
3534 /*
3535 * dp_set_addba_response() – Set a user defined ADDBA response status code
3536 *
3537 * @soc: Datapath soc handle
3538 * @peer_mac: Datapath peer mac address
3539 * @vdev_id: id of atapath vdev
3540 * @tid: TID number
3541 * @statuscode: response status code to be set
3542 */
3543 QDF_STATUS
3544 dp_set_addba_response(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
3545 		      uint16_t vdev_id, uint8_t tid, uint16_t statuscode)
3546 {
3547 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
3548 						       peer_mac, 0, vdev_id,
3549 						       DP_MOD_ID_CDP);
3550 	struct dp_rx_tid *rx_tid;
3551 
3552 	if (!peer) {
3553 		dp_peer_debug("%pK: Peer is NULL!\n", cdp_soc);
3554 		return QDF_STATUS_E_FAILURE;
3555 	}
3556 
3557 	rx_tid = &peer->rx_tid[tid];
3558 	qdf_spin_lock_bh(&rx_tid->tid_lock);
3559 	rx_tid->userstatuscode = statuscode;
3560 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
3561 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3562 
3563 	return QDF_STATUS_SUCCESS;
3564 }
3565 
3566 /*
3567 * dp_rx_delba_process_wifi3() – Process DELBA from peer
3568 * @soc: Datapath soc handle
3569 * @peer_mac: Datapath peer mac address
3570 * @vdev_id: id of atapath vdev
3571 * @tid: TID number
3572 * @reasoncode: Reason code received in DELBA frame
3573 *
3574 * Return: 0 on success, error code on failure
3575 */
3576 int dp_delba_process_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
3577 			   uint16_t vdev_id, int tid, uint16_t reasoncode)
3578 {
3579 	QDF_STATUS status = QDF_STATUS_SUCCESS;
3580 	struct dp_rx_tid *rx_tid;
3581 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
3582 						      peer_mac, 0, vdev_id,
3583 						      DP_MOD_ID_CDP);
3584 
3585 	if (!peer) {
3586 		dp_peer_debug("%pK: Peer is NULL!\n", cdp_soc);
3587 		return QDF_STATUS_E_FAILURE;
3588 	}
3589 	rx_tid = &peer->rx_tid[tid];
3590 	qdf_spin_lock_bh(&rx_tid->tid_lock);
3591 	if (rx_tid->ba_status == DP_RX_BA_INACTIVE ||
3592 	    rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
3593 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
3594 		status = QDF_STATUS_E_FAILURE;
3595 		goto fail;
3596 	}
3597 	/* TODO: See if we can delete the existing REO queue descriptor and
3598 	 * replace with a new one without queue extenstion descript to save
3599 	 * memory
3600 	 */
3601 	rx_tid->delba_rcode = reasoncode;
3602 	rx_tid->num_of_delba_req++;
3603 	dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX);
3604 
3605 	rx_tid->ba_status = DP_RX_BA_INACTIVE;
3606 	peer->active_ba_session_cnt--;
3607 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
3608 fail:
3609 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3610 
3611 	return status;
3612 }
3613 
3614 /*
3615  * dp_rx_delba_tx_completion_wifi3() – Send Delba Request
3616  *
3617  * @soc: Datapath soc handle
3618  * @peer_mac: Datapath peer mac address
3619  * @vdev_id: id of atapath vdev
3620  * @tid: TID number
3621  * @status: tx completion status
3622  * Return: 0 on success, error code on failure
3623  */
3624 
3625 int dp_delba_tx_completion_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
3626 				 uint16_t vdev_id,
3627 				 uint8_t tid, int status)
3628 {
3629 	QDF_STATUS ret = QDF_STATUS_SUCCESS;
3630 	struct dp_rx_tid *rx_tid = NULL;
3631 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
3632 						      peer_mac, 0, vdev_id,
3633 						      DP_MOD_ID_CDP);
3634 
3635 	if (!peer) {
3636 		dp_peer_debug("%pK: Peer is NULL!", cdp_soc);
3637 		return QDF_STATUS_E_FAILURE;
3638 	}
3639 	rx_tid = &peer->rx_tid[tid];
3640 	qdf_spin_lock_bh(&rx_tid->tid_lock);
3641 	if (status) {
3642 		rx_tid->delba_tx_fail_cnt++;
3643 		if (rx_tid->delba_tx_retry >= DP_MAX_DELBA_RETRY) {
3644 			rx_tid->delba_tx_retry = 0;
3645 			rx_tid->delba_tx_status = 0;
3646 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
3647 		} else {
3648 			rx_tid->delba_tx_retry++;
3649 			rx_tid->delba_tx_status = 1;
3650 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
3651 			if (peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba)
3652 				peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba(
3653 					peer->vdev->pdev->soc->ctrl_psoc,
3654 					peer->vdev->vdev_id,
3655 					peer->mac_addr.raw, tid,
3656 					rx_tid->delba_rcode);
3657 		}
3658 		goto end;
3659 	} else {
3660 		rx_tid->delba_tx_success_cnt++;
3661 		rx_tid->delba_tx_retry = 0;
3662 		rx_tid->delba_tx_status = 0;
3663 	}
3664 	if (rx_tid->ba_status == DP_RX_BA_ACTIVE) {
3665 		dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX);
3666 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
3667 		peer->active_ba_session_cnt--;
3668 	}
3669 	if (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
3670 		dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX);
3671 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
3672 	}
3673 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
3674 
3675 end:
3676 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3677 
3678 	return ret;
3679 }
3680 
3681 /**
3682  * dp_set_pn_check_wifi3() - enable PN check in REO for security
3683  * @soc: Datapath soc handle
3684  * @peer_mac: Datapath peer mac address
3685  * @vdev_id: id of atapath vdev
3686  * @vdev: Datapath vdev
3687  * @pdev - data path device instance
3688  * @sec_type - security type
3689  * @rx_pn - Receive pn starting number
3690  *
3691  */
3692 
3693 QDF_STATUS
3694 dp_set_pn_check_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id,
3695 		      uint8_t *peer_mac, enum cdp_sec_type sec_type,
3696 		      uint32_t *rx_pn)
3697 {
3698 	struct dp_pdev *pdev;
3699 	int i;
3700 	uint8_t pn_size;
3701 	struct hal_reo_cmd_params params;
3702 	struct dp_peer *peer = NULL;
3703 	struct dp_vdev *vdev = NULL;
3704 
3705 	peer = dp_peer_find_hash_find((struct dp_soc *)soc,
3706 				      peer_mac, 0, vdev_id,
3707 				      DP_MOD_ID_CDP);
3708 
3709 	if (!peer) {
3710 		dp_peer_debug("%pK: Peer is NULL!\n", soc);
3711 		return QDF_STATUS_E_FAILURE;
3712 	}
3713 
3714 	vdev = peer->vdev;
3715 
3716 	if (!vdev) {
3717 		dp_peer_debug("%pK: VDEV is NULL!\n", soc);
3718 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3719 		return QDF_STATUS_E_FAILURE;
3720 	}
3721 
3722 	pdev = vdev->pdev;
3723 	qdf_mem_zero(&params, sizeof(params));
3724 
3725 	params.std.need_status = 1;
3726 	params.u.upd_queue_params.update_pn_valid = 1;
3727 	params.u.upd_queue_params.update_pn_size = 1;
3728 	params.u.upd_queue_params.update_pn = 1;
3729 	params.u.upd_queue_params.update_pn_check_needed = 1;
3730 	params.u.upd_queue_params.update_svld = 1;
3731 	params.u.upd_queue_params.svld = 0;
3732 
3733 	switch (sec_type) {
3734 	case cdp_sec_type_tkip_nomic:
3735 	case cdp_sec_type_aes_ccmp:
3736 	case cdp_sec_type_aes_ccmp_256:
3737 	case cdp_sec_type_aes_gcmp:
3738 	case cdp_sec_type_aes_gcmp_256:
3739 		params.u.upd_queue_params.pn_check_needed = 1;
3740 		params.u.upd_queue_params.pn_size = 48;
3741 		pn_size = 48;
3742 		break;
3743 	case cdp_sec_type_wapi:
3744 		params.u.upd_queue_params.pn_check_needed = 1;
3745 		params.u.upd_queue_params.pn_size = 128;
3746 		pn_size = 128;
3747 		if (vdev->opmode == wlan_op_mode_ap) {
3748 			params.u.upd_queue_params.pn_even = 1;
3749 			params.u.upd_queue_params.update_pn_even = 1;
3750 		} else {
3751 			params.u.upd_queue_params.pn_uneven = 1;
3752 			params.u.upd_queue_params.update_pn_uneven = 1;
3753 		}
3754 		break;
3755 	default:
3756 		params.u.upd_queue_params.pn_check_needed = 0;
3757 		pn_size = 0;
3758 		break;
3759 	}
3760 
3761 
3762 	for (i = 0; i < DP_MAX_TIDS; i++) {
3763 		struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
3764 		qdf_spin_lock_bh(&rx_tid->tid_lock);
3765 		if (rx_tid->hw_qdesc_vaddr_unaligned) {
3766 			params.std.addr_lo =
3767 				rx_tid->hw_qdesc_paddr & 0xffffffff;
3768 			params.std.addr_hi =
3769 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
3770 
3771 			if (pn_size) {
3772 				dp_peer_info("%pK: PN set for TID:%d pn:%x:%x:%x:%x",
3773 					     soc, i, rx_pn[3], rx_pn[2],
3774 					     rx_pn[1], rx_pn[0]);
3775 				params.u.upd_queue_params.update_pn_valid = 1;
3776 				params.u.upd_queue_params.pn_31_0 = rx_pn[0];
3777 				params.u.upd_queue_params.pn_63_32 = rx_pn[1];
3778 				params.u.upd_queue_params.pn_95_64 = rx_pn[2];
3779 				params.u.upd_queue_params.pn_127_96 = rx_pn[3];
3780 			}
3781 			rx_tid->pn_size = pn_size;
3782 			if (dp_reo_send_cmd(cdp_soc_t_to_dp_soc(soc),
3783 					    CMD_UPDATE_RX_REO_QUEUE,
3784 					    &params, dp_rx_tid_update_cb,
3785 					    rx_tid)) {
3786 				dp_err_log("fail to send CMD_UPDATE_RX_REO_QUEUE"
3787 					   "tid %d desc %pK", rx_tid->tid,
3788 					   (void *)(rx_tid->hw_qdesc_paddr));
3789 				DP_STATS_INC(cdp_soc_t_to_dp_soc(soc),
3790 					     rx.err.reo_cmd_send_fail, 1);
3791 			}
3792 		} else {
3793 			dp_peer_info("%pK: PN Check not setup for TID :%d ", soc, i);
3794 		}
3795 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
3796 	}
3797 
3798 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3799 
3800 	return QDF_STATUS_SUCCESS;
3801 }
3802 
3803 
3804 /**
3805  * dp_set_key_sec_type_wifi3() - set security mode of key
3806  * @soc: Datapath soc handle
3807  * @peer_mac: Datapath peer mac address
3808  * @vdev_id: id of atapath vdev
3809  * @vdev: Datapath vdev
3810  * @pdev - data path device instance
3811  * @sec_type - security type
3812  * #is_unicast - key type
3813  *
3814  */
3815 
3816 QDF_STATUS
3817 dp_set_key_sec_type_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id,
3818 			  uint8_t *peer_mac, enum cdp_sec_type sec_type,
3819 			  bool is_unicast)
3820 {
3821 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
3822 						       peer_mac, 0, vdev_id,
3823 						       DP_MOD_ID_CDP);
3824 	int sec_index;
3825 
3826 	if (!peer) {
3827 		dp_peer_debug("%pK: Peer is NULL!\n", soc);
3828 		return QDF_STATUS_E_FAILURE;
3829 	}
3830 
3831 	dp_peer_info("%pK: key sec spec for peer %pK " QDF_MAC_ADDR_FMT ": %s key of type %d",
3832 		     soc, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw),
3833 		     is_unicast ? "ucast" : "mcast", sec_type);
3834 
3835 	sec_index = is_unicast ? dp_sec_ucast : dp_sec_mcast;
3836 	peer->security[sec_index].sec_type = sec_type;
3837 
3838 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3839 
3840 	return QDF_STATUS_SUCCESS;
3841 }
3842 
3843 void
3844 dp_rx_sec_ind_handler(struct dp_soc *soc, uint16_t peer_id,
3845 		      enum cdp_sec_type sec_type, int is_unicast,
3846 		      u_int32_t *michael_key,
3847 		      u_int32_t *rx_pn)
3848 {
3849 	struct dp_peer *peer;
3850 	int sec_index;
3851 
3852 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_HTT);
3853 	if (!peer) {
3854 		dp_peer_err("Couldn't find peer from ID %d - skipping security inits",
3855 			    peer_id);
3856 		return;
3857 	}
3858 	dp_peer_info("%pK: sec spec for peer %pK " QDF_MAC_ADDR_FMT ": %s key of type %d",
3859 		     soc, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw),
3860 			  is_unicast ? "ucast" : "mcast", sec_type);
3861 	sec_index = is_unicast ? dp_sec_ucast : dp_sec_mcast;
3862 	peer->security[sec_index].sec_type = sec_type;
3863 #ifdef notyet /* TODO: See if this is required for defrag support */
3864 	/* michael key only valid for TKIP, but for simplicity,
3865 	 * copy it anyway
3866 	 */
3867 	qdf_mem_copy(
3868 		&peer->security[sec_index].michael_key[0],
3869 		michael_key,
3870 		sizeof(peer->security[sec_index].michael_key));
3871 #ifdef BIG_ENDIAN_HOST
3872 	OL_IF_SWAPBO(peer->security[sec_index].michael_key[0],
3873 				 sizeof(peer->security[sec_index].michael_key));
3874 #endif /* BIG_ENDIAN_HOST */
3875 #endif
3876 
3877 #ifdef notyet /* TODO: Check if this is required for wifi3.0 */
3878 	if (sec_type != cdp_sec_type_wapi) {
3879 		qdf_mem_zero(peer->tids_last_pn_valid, _EXT_TIDS);
3880 	} else {
3881 		for (i = 0; i < DP_MAX_TIDS; i++) {
3882 			/*
3883 			 * Setting PN valid bit for WAPI sec_type,
3884 			 * since WAPI PN has to be started with predefined value
3885 			 */
3886 			peer->tids_last_pn_valid[i] = 1;
3887 			qdf_mem_copy(
3888 				(u_int8_t *) &peer->tids_last_pn[i],
3889 				(u_int8_t *) rx_pn, sizeof(union htt_rx_pn_t));
3890 			peer->tids_last_pn[i].pn128[1] =
3891 				qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[1]);
3892 			peer->tids_last_pn[i].pn128[0] =
3893 				qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[0]);
3894 		}
3895 	}
3896 #endif
3897 	/* TODO: Update HW TID queue with PN check parameters (pn type for
3898 	 * all security types and last pn for WAPI) once REO command API
3899 	 * is available
3900 	 */
3901 
3902 	dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
3903 }
3904 
3905 #ifdef QCA_PEER_EXT_STATS
3906 /*
3907  * dp_peer_ext_stats_ctx_alloc() - Allocate peer ext
3908  *                                 stats content
3909  * @soc: DP SoC context
3910  * @peer: DP peer context
3911  *
3912  * Allocate the peer extended stats context
3913  *
3914  * Return: QDF_STATUS_SUCCESS if allocation is
3915  *	   successful
3916  */
3917 QDF_STATUS dp_peer_ext_stats_ctx_alloc(struct dp_soc *soc,
3918 				       struct dp_peer *peer)
3919 {
3920 	uint8_t tid, ctx_id;
3921 
3922 	if (!soc || !peer) {
3923 		dp_warn("Null soc%pK or peer%pK", soc, peer);
3924 		return QDF_STATUS_E_INVAL;
3925 	}
3926 
3927 	if (!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx))
3928 		return QDF_STATUS_SUCCESS;
3929 
3930 	/*
3931 	 * Allocate memory for peer extended stats.
3932 	 */
3933 	peer->pext_stats = qdf_mem_malloc(sizeof(struct cdp_peer_ext_stats));
3934 	if (!peer->pext_stats) {
3935 		dp_err("Peer extended stats obj alloc failed!!");
3936 		return QDF_STATUS_E_NOMEM;
3937 	}
3938 
3939 	for (tid = 0; tid < CDP_MAX_DATA_TIDS; tid++) {
3940 		for (ctx_id = 0; ctx_id < CDP_MAX_TXRX_CTX; ctx_id++) {
3941 			struct cdp_delay_tx_stats *tx_delay =
3942 			&peer->pext_stats->delay_stats[tid][ctx_id].tx_delay;
3943 			struct cdp_delay_rx_stats *rx_delay =
3944 			&peer->pext_stats->delay_stats[tid][ctx_id].rx_delay;
3945 
3946 			dp_hist_init(&tx_delay->tx_swq_delay,
3947 				     CDP_HIST_TYPE_SW_ENQEUE_DELAY);
3948 			dp_hist_init(&tx_delay->hwtx_delay,
3949 				     CDP_HIST_TYPE_HW_COMP_DELAY);
3950 			dp_hist_init(&rx_delay->to_stack_delay,
3951 				     CDP_HIST_TYPE_REAP_STACK);
3952 		}
3953 	}
3954 
3955 	return QDF_STATUS_SUCCESS;
3956 }
3957 
3958 /*
3959  * dp_peer_ext_stats_ctx_dealloc() - Dealloc the peer context
3960  * @peer: DP peer context
3961  *
3962  * Free the peer extended stats context
3963  *
3964  * Return: Void
3965  */
3966 void dp_peer_ext_stats_ctx_dealloc(struct dp_soc *soc, struct dp_peer *peer)
3967 {
3968 	if (!peer) {
3969 		dp_warn("peer_ext dealloc failed due to NULL peer object");
3970 		return;
3971 	}
3972 
3973 	if (!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx))
3974 		return;
3975 
3976 	if (!peer->pext_stats)
3977 		return;
3978 
3979 	qdf_mem_free(peer->pext_stats);
3980 	peer->pext_stats = NULL;
3981 }
3982 #endif
3983 
3984 QDF_STATUS
3985 dp_rx_delba_ind_handler(void *soc_handle, uint16_t peer_id,
3986 			uint8_t tid, uint16_t win_sz)
3987 {
3988 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
3989 	struct dp_peer *peer;
3990 	struct dp_rx_tid *rx_tid;
3991 	QDF_STATUS status = QDF_STATUS_SUCCESS;
3992 
3993 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_HTT);
3994 
3995 	if (!peer) {
3996 		dp_peer_err("%pK: Couldn't find peer from ID %d",
3997 			    soc, peer_id);
3998 		return QDF_STATUS_E_FAILURE;
3999 	}
4000 
4001 	qdf_assert_always(tid < DP_MAX_TIDS);
4002 
4003 	rx_tid = &peer->rx_tid[tid];
4004 
4005 	if (rx_tid->hw_qdesc_vaddr_unaligned) {
4006 		if (!rx_tid->delba_tx_status) {
4007 			dp_peer_info("%pK: PEER_ID: %d TID: %d, BA win: %d ",
4008 				     soc, peer_id, tid, win_sz);
4009 
4010 			qdf_spin_lock_bh(&rx_tid->tid_lock);
4011 
4012 			rx_tid->delba_tx_status = 1;
4013 
4014 			rx_tid->rx_ba_win_size_override =
4015 			    qdf_min((uint16_t)63, win_sz);
4016 
4017 			rx_tid->delba_rcode =
4018 			    IEEE80211_REASON_QOS_SETUP_REQUIRED;
4019 
4020 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
4021 
4022 			if (soc->cdp_soc.ol_ops->send_delba)
4023 				soc->cdp_soc.ol_ops->send_delba(
4024 					peer->vdev->pdev->soc->ctrl_psoc,
4025 					peer->vdev->vdev_id,
4026 					peer->mac_addr.raw,
4027 					tid,
4028 					rx_tid->delba_rcode);
4029 		}
4030 	} else {
4031 		dp_peer_err("%pK: BA session is not setup for TID:%d ", soc, tid);
4032 		status = QDF_STATUS_E_FAILURE;
4033 	}
4034 
4035 	dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
4036 
4037 	return status;
4038 }
4039 
4040 #ifdef DP_PEER_EXTENDED_API
4041 QDF_STATUS dp_register_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
4042 			    struct ol_txrx_desc_type *sta_desc)
4043 {
4044 	struct dp_peer *peer;
4045 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4046 
4047 	peer = dp_peer_find_hash_find(soc, sta_desc->peer_addr.bytes,
4048 				      0, DP_VDEV_ALL, DP_MOD_ID_CDP);
4049 
4050 	if (!peer)
4051 		return QDF_STATUS_E_FAULT;
4052 
4053 	qdf_spin_lock_bh(&peer->peer_info_lock);
4054 	peer->state = OL_TXRX_PEER_STATE_CONN;
4055 	qdf_spin_unlock_bh(&peer->peer_info_lock);
4056 
4057 	dp_rx_flush_rx_cached(peer, false);
4058 
4059 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4060 
4061 	return QDF_STATUS_SUCCESS;
4062 }
4063 
4064 QDF_STATUS
4065 dp_clear_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
4066 	      struct qdf_mac_addr peer_addr)
4067 {
4068 	struct dp_peer *peer;
4069 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4070 
4071 	peer = dp_peer_find_hash_find(soc, peer_addr.bytes,
4072 				      0, DP_VDEV_ALL, DP_MOD_ID_CDP);
4073 	if (!peer || !peer->valid)
4074 		return QDF_STATUS_E_FAULT;
4075 
4076 	dp_clear_peer_internal(soc, peer);
4077 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4078 	return QDF_STATUS_SUCCESS;
4079 }
4080 
4081 QDF_STATUS dp_peer_state_update(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
4082 				enum ol_txrx_peer_state state)
4083 {
4084 	struct dp_peer *peer;
4085 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4086 
4087 	peer =  dp_peer_find_hash_find(soc, peer_mac, 0, DP_VDEV_ALL,
4088 				       DP_MOD_ID_CDP);
4089 	if (!peer) {
4090 		dp_peer_err("%pK: Failed to find peer for: [" QDF_MAC_ADDR_FMT "]",
4091 			    soc, QDF_MAC_ADDR_REF(peer_mac));
4092 		return QDF_STATUS_E_FAILURE;
4093 	}
4094 	peer->state = state;
4095 
4096 	peer->authorize = (state == OL_TXRX_PEER_STATE_AUTH) ? 1 : 0;
4097 
4098 	dp_info("peer %pK state %d", peer, peer->state);
4099 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
4100 	 * Decrement it here.
4101 	 */
4102 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4103 
4104 	return QDF_STATUS_SUCCESS;
4105 }
4106 
4107 QDF_STATUS dp_get_vdevid(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
4108 			 uint8_t *vdev_id)
4109 {
4110 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4111 	struct dp_peer *peer =
4112 		dp_peer_find_hash_find(soc, peer_mac, 0, DP_VDEV_ALL,
4113 				       DP_MOD_ID_CDP);
4114 
4115 	if (!peer)
4116 		return QDF_STATUS_E_FAILURE;
4117 
4118 	dp_info("peer %pK vdev %pK vdev id %d",
4119 		peer, peer->vdev, peer->vdev->vdev_id);
4120 	*vdev_id = peer->vdev->vdev_id;
4121 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
4122 	 * Decrement it here.
4123 	 */
4124 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4125 
4126 	return QDF_STATUS_SUCCESS;
4127 }
4128 
4129 struct cdp_vdev *
4130 dp_get_vdev_by_peer_addr(struct cdp_pdev *pdev_handle,
4131 			 struct qdf_mac_addr peer_addr)
4132 {
4133 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4134 	struct dp_peer *peer = NULL;
4135 	struct cdp_vdev *vdev = NULL;
4136 
4137 	if (!pdev) {
4138 		dp_peer_info("PDEV not found for peer_addr: " QDF_MAC_ADDR_FMT,
4139 			     QDF_MAC_ADDR_REF(peer_addr.bytes));
4140 		return NULL;
4141 	}
4142 
4143 	peer = dp_peer_find_hash_find(pdev->soc, peer_addr.bytes, 0,
4144 				      DP_VDEV_ALL, DP_MOD_ID_CDP);
4145 	if (!peer) {
4146 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
4147 			  "PDEV not found for peer_addr: "QDF_MAC_ADDR_FMT,
4148 			  QDF_MAC_ADDR_REF(peer_addr.bytes));
4149 		return NULL;
4150 	}
4151 
4152 	vdev = (struct cdp_vdev *)peer->vdev;
4153 
4154 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4155 	return vdev;
4156 }
4157 
4158 /**
4159  * dp_get_vdev_for_peer() - Get virtual interface instance which peer belongs
4160  * @peer - peer instance
4161  *
4162  * Get virtual interface instance which peer belongs
4163  *
4164  * Return: virtual interface instance pointer
4165  *         NULL in case cannot find
4166  */
4167 struct cdp_vdev *dp_get_vdev_for_peer(void *peer_handle)
4168 {
4169 	struct dp_peer *peer = peer_handle;
4170 
4171 	DP_TRACE(DEBUG, "peer %pK vdev %pK", peer, peer->vdev);
4172 	return (struct cdp_vdev *)peer->vdev;
4173 }
4174 
4175 /**
4176  * dp_peer_get_peer_mac_addr() - Get peer mac address
4177  * @peer - peer instance
4178  *
4179  * Get peer mac address
4180  *
4181  * Return: peer mac address pointer
4182  *         NULL in case cannot find
4183  */
4184 uint8_t *dp_peer_get_peer_mac_addr(void *peer_handle)
4185 {
4186 	struct dp_peer *peer = peer_handle;
4187 	uint8_t *mac;
4188 
4189 	mac = peer->mac_addr.raw;
4190 	dp_info("peer %pK mac 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
4191 		peer, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
4192 	return peer->mac_addr.raw;
4193 }
4194 
4195 int dp_get_peer_state(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
4196 		      uint8_t *peer_mac)
4197 {
4198 	enum ol_txrx_peer_state peer_state;
4199 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4200 	struct dp_peer *peer =  dp_peer_find_hash_find(soc, peer_mac, 0,
4201 						       vdev_id, DP_MOD_ID_CDP);
4202 
4203 	if (!peer)
4204 		return QDF_STATUS_E_FAILURE;
4205 
4206 	DP_TRACE(DEBUG, "peer %pK stats %d", peer, peer->state);
4207 	peer_state = peer->state;
4208 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4209 
4210 	return peer_state;
4211 }
4212 
4213 /**
4214  * dp_local_peer_id_pool_init() - local peer id pool alloc for physical device
4215  * @pdev - data path device instance
4216  *
4217  * local peer id pool alloc for physical device
4218  *
4219  * Return: none
4220  */
4221 void dp_local_peer_id_pool_init(struct dp_pdev *pdev)
4222 {
4223 	int i;
4224 
4225 	/* point the freelist to the first ID */
4226 	pdev->local_peer_ids.freelist = 0;
4227 
4228 	/* link each ID to the next one */
4229 	for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) {
4230 		pdev->local_peer_ids.pool[i] = i + 1;
4231 		pdev->local_peer_ids.map[i] = NULL;
4232 	}
4233 
4234 	/* link the last ID to itself, to mark the end of the list */
4235 	i = OL_TXRX_NUM_LOCAL_PEER_IDS;
4236 	pdev->local_peer_ids.pool[i] = i;
4237 
4238 	qdf_spinlock_create(&pdev->local_peer_ids.lock);
4239 	DP_TRACE(INFO, "Peer pool init");
4240 }
4241 
4242 /**
4243  * dp_local_peer_id_alloc() - allocate local peer id
4244  * @pdev - data path device instance
4245  * @peer - new peer instance
4246  *
4247  * allocate local peer id
4248  *
4249  * Return: none
4250  */
4251 void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer)
4252 {
4253 	int i;
4254 
4255 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
4256 	i = pdev->local_peer_ids.freelist;
4257 	if (pdev->local_peer_ids.pool[i] == i) {
4258 		/* the list is empty, except for the list-end marker */
4259 		peer->local_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
4260 	} else {
4261 		/* take the head ID and advance the freelist */
4262 		peer->local_id = i;
4263 		pdev->local_peer_ids.freelist = pdev->local_peer_ids.pool[i];
4264 		pdev->local_peer_ids.map[i] = peer;
4265 	}
4266 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
4267 	dp_info("peer %pK, local id %d", peer, peer->local_id);
4268 }
4269 
4270 /**
4271  * dp_local_peer_id_free() - remove local peer id
4272  * @pdev - data path device instance
4273  * @peer - peer instance should be removed
4274  *
4275  * remove local peer id
4276  *
4277  * Return: none
4278  */
4279 void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer)
4280 {
4281 	int i = peer->local_id;
4282 	if ((i == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
4283 	    (i >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
4284 		return;
4285 	}
4286 
4287 	/* put this ID on the head of the freelist */
4288 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
4289 	pdev->local_peer_ids.pool[i] = pdev->local_peer_ids.freelist;
4290 	pdev->local_peer_ids.freelist = i;
4291 	pdev->local_peer_ids.map[i] = NULL;
4292 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
4293 }
4294 
4295 bool dp_find_peer_exist_on_vdev(struct cdp_soc_t *soc_hdl,
4296 				uint8_t vdev_id, uint8_t *peer_addr)
4297 {
4298 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4299 	struct dp_peer *peer = NULL;
4300 
4301 	peer = dp_peer_find_hash_find(soc, peer_addr, 0, vdev_id,
4302 				      DP_MOD_ID_CDP);
4303 	if (!peer)
4304 		return false;
4305 
4306 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4307 
4308 	return true;
4309 }
4310 
4311 bool dp_find_peer_exist_on_other_vdev(struct cdp_soc_t *soc_hdl,
4312 				      uint8_t vdev_id, uint8_t *peer_addr,
4313 				      uint16_t max_bssid)
4314 {
4315 	int i;
4316 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4317 	struct dp_peer *peer = NULL;
4318 
4319 	for (i = 0; i < max_bssid; i++) {
4320 		/* Need to check vdevs other than the vdev_id */
4321 		if (vdev_id == i)
4322 			continue;
4323 		peer = dp_peer_find_hash_find(soc, peer_addr, 0, i,
4324 					      DP_MOD_ID_CDP);
4325 		if (peer) {
4326 			dp_err("Duplicate peer "QDF_MAC_ADDR_FMT" already exist on vdev %d",
4327 			       QDF_MAC_ADDR_REF(peer_addr), i);
4328 			dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4329 			return true;
4330 		}
4331 	}
4332 
4333 	return false;
4334 }
4335 
4336 bool dp_find_peer_exist(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
4337 			uint8_t *peer_addr)
4338 {
4339 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4340 	struct dp_peer *peer = NULL;
4341 
4342 	peer = dp_peer_find_hash_find(soc, peer_addr, 0, DP_VDEV_ALL,
4343 				      DP_MOD_ID_CDP);
4344 	if (peer) {
4345 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4346 		return true;
4347 	}
4348 
4349 	return false;
4350 }
4351 #endif
4352 
4353 /**
4354  * dp_peer_rxtid_stats: Retried Rx TID (REO queue) stats from HW
4355  * @peer: DP peer handle
4356  * @dp_stats_cmd_cb: REO command callback function
4357  * @cb_ctxt: Callback context
4358  *
4359  * Return: count of tid stats cmd send succeeded
4360  */
4361 int dp_peer_rxtid_stats(struct dp_peer *peer,
4362 			dp_rxtid_stats_cmd_cb dp_stats_cmd_cb,
4363 			void *cb_ctxt)
4364 {
4365 	struct dp_soc *soc = peer->vdev->pdev->soc;
4366 	struct hal_reo_cmd_params params;
4367 	int i;
4368 	int stats_cmd_sent_cnt = 0;
4369 	QDF_STATUS status;
4370 
4371 	if (!dp_stats_cmd_cb)
4372 		return stats_cmd_sent_cnt;
4373 
4374 	qdf_mem_zero(&params, sizeof(params));
4375 	for (i = 0; i < DP_MAX_TIDS; i++) {
4376 		struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
4377 		if (rx_tid->hw_qdesc_vaddr_unaligned) {
4378 			params.std.need_status = 1;
4379 			params.std.addr_lo =
4380 				rx_tid->hw_qdesc_paddr & 0xffffffff;
4381 			params.std.addr_hi =
4382 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
4383 
4384 			if (cb_ctxt) {
4385 				status = dp_reo_send_cmd(
4386 						soc, CMD_GET_QUEUE_STATS,
4387 						&params, dp_stats_cmd_cb,
4388 						cb_ctxt);
4389 			} else {
4390 				status = dp_reo_send_cmd(
4391 						soc, CMD_GET_QUEUE_STATS,
4392 						&params, dp_stats_cmd_cb,
4393 						rx_tid);
4394 			}
4395 
4396 			if (QDF_IS_STATUS_SUCCESS(status))
4397 				stats_cmd_sent_cnt++;
4398 
4399 			/* Flush REO descriptor from HW cache to update stats
4400 			 * in descriptor memory. This is to help debugging */
4401 			qdf_mem_zero(&params, sizeof(params));
4402 			params.std.need_status = 0;
4403 			params.std.addr_lo =
4404 				rx_tid->hw_qdesc_paddr & 0xffffffff;
4405 			params.std.addr_hi =
4406 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
4407 			params.u.fl_cache_params.flush_no_inval = 1;
4408 			dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, &params, NULL,
4409 				NULL);
4410 		}
4411 	}
4412 
4413 	return stats_cmd_sent_cnt;
4414 }
4415 
4416 QDF_STATUS
4417 dp_set_michael_key(struct cdp_soc_t *soc,
4418 		   uint8_t vdev_id,
4419 		   uint8_t *peer_mac,
4420 		   bool is_unicast, uint32_t *key)
4421 {
4422 	uint8_t sec_index = is_unicast ? 1 : 0;
4423 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
4424 						      peer_mac, 0, vdev_id,
4425 						      DP_MOD_ID_CDP);
4426 
4427 	if (!peer) {
4428 		dp_peer_err("%pK: peer not found ", soc);
4429 		return QDF_STATUS_E_FAILURE;
4430 	}
4431 
4432 	qdf_mem_copy(&peer->security[sec_index].michael_key[0],
4433 		     key, IEEE80211_WEP_MICLEN);
4434 
4435 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4436 
4437 	return QDF_STATUS_SUCCESS;
4438 }
4439 
4440 
4441 /**
4442  * dp_vdev_bss_peer_ref_n_get: Get bss peer of a vdev
4443  * @soc: DP soc
4444  * @vdev: vdev
4445  * @mod_id: id of module requesting reference
4446  *
4447  * Return: VDEV BSS peer
4448  */
4449 struct dp_peer *dp_vdev_bss_peer_ref_n_get(struct dp_soc *soc,
4450 					   struct dp_vdev *vdev,
4451 					   enum dp_mod_id mod_id)
4452 {
4453 	struct dp_peer *peer = NULL;
4454 
4455 	qdf_spin_lock_bh(&vdev->peer_list_lock);
4456 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
4457 		if (peer->bss_peer)
4458 			break;
4459 	}
4460 
4461 	if (!peer) {
4462 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
4463 		return NULL;
4464 	}
4465 
4466 	if (dp_peer_get_ref(soc, peer, mod_id) == QDF_STATUS_SUCCESS) {
4467 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
4468 		return peer;
4469 	}
4470 
4471 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
4472 	return peer;
4473 }
4474 
4475 /**
4476  * dp_sta_vdev_self_peer_ref_n_get: Get self peer of sta vdev
4477  * @soc: DP soc
4478  * @vdev: vdev
4479  * @mod_id: id of module requesting reference
4480  *
4481  * Return: VDEV self peer
4482  */
4483 struct dp_peer *dp_sta_vdev_self_peer_ref_n_get(struct dp_soc *soc,
4484 						struct dp_vdev *vdev,
4485 						enum dp_mod_id mod_id)
4486 {
4487 	struct dp_peer *peer;
4488 
4489 	if (vdev->opmode != wlan_op_mode_sta)
4490 		return NULL;
4491 
4492 	qdf_spin_lock_bh(&vdev->peer_list_lock);
4493 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
4494 		if (peer->sta_self_peer)
4495 			break;
4496 	}
4497 
4498 	if (!peer) {
4499 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
4500 		return NULL;
4501 	}
4502 
4503 	if (dp_peer_get_ref(soc, peer, mod_id) == QDF_STATUS_SUCCESS) {
4504 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
4505 		return peer;
4506 	}
4507 
4508 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
4509 	return peer;
4510 }
4511