xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_peer.c (revision 97f44cd39e4ff816eaa1710279d28cf6b9e65ad9)
1 /*
2  * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <qdf_types.h>
20 #include <qdf_lock.h>
21 #include <hal_hw_headers.h>
22 #include "dp_htt.h"
23 #include "dp_types.h"
24 #include "dp_internal.h"
25 #include "dp_peer.h"
26 #include "dp_rx_defrag.h"
27 #include "dp_rx.h"
28 #include <hal_api.h>
29 #include <hal_reo.h>
30 #include <cdp_txrx_handle.h>
31 #include <wlan_cfg.h>
32 #ifdef FEATURE_WDS
33 #include "dp_txrx_wds.h"
34 #endif
35 
36 #ifdef WLAN_TX_PKT_CAPTURE_ENH
37 #include "dp_tx_capture.h"
38 #endif
39 
40 #ifdef QCA_PEER_EXT_STATS
41 #include "dp_hist.h"
42 #endif
43 
44 #ifdef FEATURE_WDS
45 static inline bool
46 dp_peer_ast_free_in_unmap_supported(struct dp_soc *soc,
47 				    struct dp_ast_entry *ast_entry)
48 {
49 	/* if peer map v2 is enabled we are not freeing ast entry
50 	 * here and it is supposed to be freed in unmap event (after
51 	 * we receive delete confirmation from target)
52 	 *
53 	 * if peer_id is invalid we did not get the peer map event
54 	 * for the peer free ast entry from here only in this case
55 	 */
56 
57 	if ((ast_entry->type != CDP_TXRX_AST_TYPE_WDS_HM_SEC) &&
58 	    (ast_entry->type != CDP_TXRX_AST_TYPE_SELF))
59 		return true;
60 
61 	return false;
62 }
63 #else
64 static inline bool
65 dp_peer_ast_free_in_unmap_supported(struct dp_soc *soc,
66 				    struct dp_ast_entry *ast_entry)
67 {
68 	return false;
69 }
70 #endif
71 
72 static inline void
73 dp_set_ssn_valid_flag(struct hal_reo_cmd_params *params,
74 					uint8_t valid)
75 {
76 	params->u.upd_queue_params.update_svld = 1;
77 	params->u.upd_queue_params.svld = valid;
78 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
79 		  "%s: Setting SSN valid bit to %d",
80 		  __func__, valid);
81 }
82 
83 static inline int dp_peer_find_mac_addr_cmp(
84 	union dp_align_mac_addr *mac_addr1,
85 	union dp_align_mac_addr *mac_addr2)
86 {
87 		/*
88 		 * Intentionally use & rather than &&.
89 		 * because the operands are binary rather than generic boolean,
90 		 * the functionality is equivalent.
91 		 * Using && has the advantage of short-circuited evaluation,
92 		 * but using & has the advantage of no conditional branching,
93 		 * which is a more significant benefit.
94 		 */
95 	return !((mac_addr1->align4.bytes_abcd == mac_addr2->align4.bytes_abcd)
96 		 & (mac_addr1->align4.bytes_ef == mac_addr2->align4.bytes_ef));
97 }
98 
99 static int dp_peer_ast_table_attach(struct dp_soc *soc)
100 {
101 	uint32_t max_ast_index;
102 
103 	max_ast_index = wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx);
104 	/* allocate ast_table for ast entry to ast_index map */
105 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
106 		  "\n<=== cfg max ast idx %d ====>", max_ast_index);
107 	soc->ast_table = qdf_mem_malloc(max_ast_index *
108 					sizeof(struct dp_ast_entry *));
109 	if (!soc->ast_table) {
110 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
111 			  "%s: ast_table memory allocation failed", __func__);
112 		return QDF_STATUS_E_NOMEM;
113 	}
114 	return 0; /* success */
115 }
116 
117 /*
118  * dp_peer_find_map_attach() - allocate memory for peer_id_to_obj_map
119  * @soc: soc handle
120  *
121  * return: none
122  */
123 static int dp_peer_find_map_attach(struct dp_soc *soc)
124 {
125 	uint32_t max_peers, peer_map_size;
126 
127 	max_peers = soc->max_peers;
128 	/* allocate the peer ID -> peer object map */
129 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
130 		  "\n<=== cfg max peer id %d ====>", max_peers);
131 	peer_map_size = max_peers * sizeof(soc->peer_id_to_obj_map[0]);
132 	soc->peer_id_to_obj_map = qdf_mem_malloc(peer_map_size);
133 	if (!soc->peer_id_to_obj_map) {
134 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
135 			  "%s: peer map memory allocation failed", __func__);
136 		return QDF_STATUS_E_NOMEM;
137 	}
138 
139 	/*
140 	 * The peer_id_to_obj_map doesn't really need to be initialized,
141 	 * since elements are only used after they have been individually
142 	 * initialized.
143 	 * However, it is convenient for debugging to have all elements
144 	 * that are not in use set to 0.
145 	 */
146 	qdf_mem_zero(soc->peer_id_to_obj_map, peer_map_size);
147 
148 	qdf_spinlock_create(&soc->peer_map_lock);
149 	return 0; /* success */
150 }
151 
152 static int dp_log2_ceil(unsigned int value)
153 {
154 	unsigned int tmp = value;
155 	int log2 = -1;
156 
157 	while (tmp) {
158 		log2++;
159 		tmp >>= 1;
160 	}
161 	if (1 << log2 != value)
162 		log2++;
163 	return log2;
164 }
165 
166 #define DP_PEER_HASH_LOAD_MULT  2
167 #define DP_PEER_HASH_LOAD_SHIFT 0
168 
169 #define DP_AST_HASH_LOAD_MULT  2
170 #define DP_AST_HASH_LOAD_SHIFT 0
171 
172 /*
173  * dp_peer_find_hash_attach() - allocate memory for peer_hash table
174  * @soc: soc handle
175  *
176  * return: none
177  */
178 static int dp_peer_find_hash_attach(struct dp_soc *soc)
179 {
180 	int i, hash_elems, log2;
181 
182 	/* allocate the peer MAC address -> peer object hash table */
183 	hash_elems = soc->max_peers;
184 	hash_elems *= DP_PEER_HASH_LOAD_MULT;
185 	hash_elems >>= DP_PEER_HASH_LOAD_SHIFT;
186 	log2 = dp_log2_ceil(hash_elems);
187 	hash_elems = 1 << log2;
188 
189 	soc->peer_hash.mask = hash_elems - 1;
190 	soc->peer_hash.idx_bits = log2;
191 	/* allocate an array of TAILQ peer object lists */
192 	soc->peer_hash.bins = qdf_mem_malloc(
193 		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q, dp_peer)));
194 	if (!soc->peer_hash.bins)
195 		return QDF_STATUS_E_NOMEM;
196 
197 	for (i = 0; i < hash_elems; i++)
198 		TAILQ_INIT(&soc->peer_hash.bins[i]);
199 
200 	qdf_spinlock_create(&soc->peer_hash_lock);
201 	return 0;
202 }
203 
204 /*
205  * dp_peer_find_hash_detach() - cleanup memory for peer_hash table
206  * @soc: soc handle
207  *
208  * return: none
209  */
210 static void dp_peer_find_hash_detach(struct dp_soc *soc)
211 {
212 	if (soc->peer_hash.bins) {
213 		qdf_mem_free(soc->peer_hash.bins);
214 		soc->peer_hash.bins = NULL;
215 		qdf_spinlock_destroy(&soc->peer_hash_lock);
216 	}
217 }
218 
219 static inline unsigned dp_peer_find_hash_index(struct dp_soc *soc,
220 	union dp_align_mac_addr *mac_addr)
221 {
222 	unsigned index;
223 
224 	index =
225 		mac_addr->align2.bytes_ab ^
226 		mac_addr->align2.bytes_cd ^
227 		mac_addr->align2.bytes_ef;
228 	index ^= index >> soc->peer_hash.idx_bits;
229 	index &= soc->peer_hash.mask;
230 	return index;
231 }
232 
233 /*
234  * dp_peer_find_hash_add() - add peer to peer_hash_table
235  * @soc: soc handle
236  * @peer: peer handle
237  *
238  * return: none
239  */
240 void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer)
241 {
242 	unsigned index;
243 
244 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
245 	qdf_spin_lock_bh(&soc->peer_hash_lock);
246 
247 	if (QDF_IS_STATUS_ERROR(dp_peer_get_ref(soc, peer, DP_MOD_ID_CONFIG))) {
248 		dp_err("unable to get peer ref at MAP mac: "QDF_MAC_ADDR_FMT,
249 		       QDF_MAC_ADDR_REF(peer->mac_addr.raw));
250 		qdf_spin_unlock_bh(&soc->peer_hash_lock);
251 		return;
252 	}
253 
254 	/*
255 	 * It is important to add the new peer at the tail of the peer list
256 	 * with the bin index.  Together with having the hash_find function
257 	 * search from head to tail, this ensures that if two entries with
258 	 * the same MAC address are stored, the one added first will be
259 	 * found first.
260 	 */
261 	TAILQ_INSERT_TAIL(&soc->peer_hash.bins[index], peer, hash_list_elem);
262 
263 	qdf_spin_unlock_bh(&soc->peer_hash_lock);
264 }
265 
266 /*
267  * dp_peer_vdev_list_add() - add peer into vdev's peer list
268  * @soc: soc handle
269  * @vdev: vdev handle
270  * @peer: peer handle
271  *
272  * return: none
273  */
274 void dp_peer_vdev_list_add(struct dp_soc *soc, struct dp_vdev *vdev,
275 			   struct dp_peer *peer)
276 {
277 	qdf_spin_lock_bh(&vdev->peer_list_lock);
278 	if (QDF_IS_STATUS_ERROR(dp_peer_get_ref(soc, peer, DP_MOD_ID_CONFIG))) {
279 		dp_err("unable to get peer ref at MAP mac: "QDF_MAC_ADDR_FMT,
280 		       QDF_MAC_ADDR_REF(peer->mac_addr.raw));
281 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
282 		return;
283 	}
284 
285 	/* add this peer into the vdev's list */
286 	if (wlan_op_mode_sta == vdev->opmode)
287 		TAILQ_INSERT_HEAD(&vdev->peer_list, peer, peer_list_elem);
288 	else
289 		TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
290 
291 	vdev->num_peers++;
292 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
293 }
294 
295 /*
296  * dp_peer_vdev_list_remove() - remove peer from vdev's peer list
297  * @soc: SoC handle
298  * @vdev: VDEV handle
299  * @peer: peer handle
300  *
301  * Return: none
302  */
303 void dp_peer_vdev_list_remove(struct dp_soc *soc, struct dp_vdev *vdev,
304 			      struct dp_peer *peer)
305 {
306 	uint8_t found = 0;
307 	struct dp_peer *tmppeer = NULL;
308 
309 	qdf_spin_lock_bh(&vdev->peer_list_lock);
310 	TAILQ_FOREACH(tmppeer, &peer->vdev->peer_list, peer_list_elem) {
311 		if (tmppeer == peer) {
312 			found = 1;
313 			break;
314 		}
315 	}
316 
317 	if (found) {
318 		TAILQ_REMOVE(&peer->vdev->peer_list, peer,
319 			     peer_list_elem);
320 		dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
321 		vdev->num_peers--;
322 	} else {
323 		/*Ignoring the remove operation as peer not found*/
324 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
325 			  "peer:%pK not found in vdev:%pK peerlist:%pK",
326 			  peer, vdev, &peer->vdev->peer_list);
327 	}
328 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
329 }
330 
331 /*
332  * dp_peer_find_id_to_obj_add() - Add peer into peer_id table
333  * @soc: SoC handle
334  * @peer: peer handle
335  * @peer_id: peer_id
336  *
337  * Return: None
338  */
339 void dp_peer_find_id_to_obj_add(struct dp_soc *soc,
340 				struct dp_peer *peer,
341 				uint16_t peer_id)
342 {
343 	QDF_ASSERT(peer_id <= soc->max_peers);
344 
345 	qdf_spin_lock_bh(&soc->peer_map_lock);
346 
347 	if (QDF_IS_STATUS_ERROR(dp_peer_get_ref(soc, peer, DP_MOD_ID_CONFIG))) {
348 		dp_err("unable to get peer ref at MAP mac: "QDF_MAC_ADDR_FMT" peer_id %u",
349 		       QDF_MAC_ADDR_REF(peer->mac_addr.raw), peer_id);
350 		qdf_spin_unlock_bh(&soc->peer_map_lock);
351 		return;
352 	}
353 
354 	if (!soc->peer_id_to_obj_map[peer_id]) {
355 		soc->peer_id_to_obj_map[peer_id] = peer;
356 	} else {
357 		/* Peer map event came for peer_id which
358 		 * is already mapped, this is not expected
359 		 */
360 		QDF_ASSERT(0);
361 	}
362 	qdf_spin_unlock_bh(&soc->peer_map_lock);
363 }
364 
365 /*
366  * dp_peer_find_id_to_obj_remove() - remove peer from peer_id table
367  * @soc: SoC handle
368  * @peer_id: peer_id
369  *
370  * Return: None
371  */
372 void dp_peer_find_id_to_obj_remove(struct dp_soc *soc,
373 				   uint16_t peer_id)
374 {
375 	struct dp_peer *peer = NULL;
376 	QDF_ASSERT(peer_id <= soc->max_peers);
377 
378 	qdf_spin_lock_bh(&soc->peer_map_lock);
379 	peer = soc->peer_id_to_obj_map[peer_id];
380 	soc->peer_id_to_obj_map[peer_id] = NULL;
381 	dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
382 	qdf_spin_unlock_bh(&soc->peer_map_lock);
383 }
384 
385 /*
386  * dp_peer_exist_on_pdev - check if peer with mac address exist on pdev
387  *
388  * @soc: Datapath SOC handle
389  * @peer_mac_addr: peer mac address
390  * @mac_addr_is_aligned: is mac address aligned
391  * @pdev: Datapath PDEV handle
392  *
393  * Return: true if peer found else return false
394  */
395 static bool dp_peer_exist_on_pdev(struct dp_soc *soc,
396 				  uint8_t *peer_mac_addr,
397 				  int mac_addr_is_aligned,
398 				  struct dp_pdev *pdev)
399 {
400 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
401 	unsigned int index;
402 	struct dp_peer *peer;
403 	bool found = false;
404 
405 	if (mac_addr_is_aligned) {
406 		mac_addr = (union dp_align_mac_addr *)peer_mac_addr;
407 	} else {
408 		qdf_mem_copy(
409 			&local_mac_addr_aligned.raw[0],
410 			peer_mac_addr, QDF_MAC_ADDR_SIZE);
411 		mac_addr = &local_mac_addr_aligned;
412 	}
413 	index = dp_peer_find_hash_index(soc, mac_addr);
414 	qdf_spin_lock_bh(&soc->peer_hash_lock);
415 	TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
416 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
417 		    (peer->vdev->pdev == pdev)) {
418 			found = true;
419 			break;
420 		}
421 	}
422 	qdf_spin_unlock_bh(&soc->peer_hash_lock);
423 	return found;
424 }
425 
426 #ifdef FEATURE_AST
427 /*
428  * dp_peer_ast_hash_attach() - Allocate and initialize AST Hash Table
429  * @soc: SoC handle
430  *
431  * Return: None
432  */
433 static int dp_peer_ast_hash_attach(struct dp_soc *soc)
434 {
435 	int i, hash_elems, log2;
436 	unsigned int max_ast_idx = wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx);
437 
438 	hash_elems = ((max_ast_idx * DP_AST_HASH_LOAD_MULT) >>
439 		DP_AST_HASH_LOAD_SHIFT);
440 
441 	log2 = dp_log2_ceil(hash_elems);
442 	hash_elems = 1 << log2;
443 
444 	soc->ast_hash.mask = hash_elems - 1;
445 	soc->ast_hash.idx_bits = log2;
446 
447 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
448 		  "ast hash_elems: %d, max_ast_idx: %d",
449 		  hash_elems, max_ast_idx);
450 
451 	/* allocate an array of TAILQ peer object lists */
452 	soc->ast_hash.bins = qdf_mem_malloc(
453 		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q,
454 				dp_ast_entry)));
455 
456 	if (!soc->ast_hash.bins)
457 		return QDF_STATUS_E_NOMEM;
458 
459 	for (i = 0; i < hash_elems; i++)
460 		TAILQ_INIT(&soc->ast_hash.bins[i]);
461 
462 	return 0;
463 }
464 
465 /*
466  * dp_peer_ast_cleanup() - cleanup the references
467  * @soc: SoC handle
468  * @ast: ast entry
469  *
470  * Return: None
471  */
472 static inline void dp_peer_ast_cleanup(struct dp_soc *soc,
473 				       struct dp_ast_entry *ast)
474 {
475 	txrx_ast_free_cb cb = ast->callback;
476 	void *cookie = ast->cookie;
477 
478 	/* Call the callbacks to free up the cookie */
479 	if (cb) {
480 		ast->callback = NULL;
481 		ast->cookie = NULL;
482 		cb(soc->ctrl_psoc,
483 		   dp_soc_to_cdp_soc(soc),
484 		   cookie,
485 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
486 	}
487 }
488 
489 /*
490  * dp_peer_ast_hash_detach() - Free AST Hash table
491  * @soc: SoC handle
492  *
493  * Return: None
494  */
495 static void dp_peer_ast_hash_detach(struct dp_soc *soc)
496 {
497 	unsigned int index;
498 	struct dp_ast_entry *ast, *ast_next;
499 
500 	if (!soc->ast_hash.mask)
501 		return;
502 
503 	if (!soc->ast_hash.bins)
504 		return;
505 
506 	qdf_spin_lock_bh(&soc->ast_lock);
507 	for (index = 0; index <= soc->ast_hash.mask; index++) {
508 		if (!TAILQ_EMPTY(&soc->ast_hash.bins[index])) {
509 			TAILQ_FOREACH_SAFE(ast, &soc->ast_hash.bins[index],
510 					   hash_list_elem, ast_next) {
511 				TAILQ_REMOVE(&soc->ast_hash.bins[index], ast,
512 					     hash_list_elem);
513 				dp_peer_ast_cleanup(soc, ast);
514 				soc->num_ast_entries--;
515 				qdf_mem_free(ast);
516 			}
517 		}
518 	}
519 	qdf_spin_unlock_bh(&soc->ast_lock);
520 
521 	qdf_mem_free(soc->ast_hash.bins);
522 	soc->ast_hash.bins = NULL;
523 }
524 
525 /*
526  * dp_peer_ast_hash_index() - Compute the AST hash from MAC address
527  * @soc: SoC handle
528  *
529  * Return: AST hash
530  */
531 static inline uint32_t dp_peer_ast_hash_index(struct dp_soc *soc,
532 	union dp_align_mac_addr *mac_addr)
533 {
534 	uint32_t index;
535 
536 	index =
537 		mac_addr->align2.bytes_ab ^
538 		mac_addr->align2.bytes_cd ^
539 		mac_addr->align2.bytes_ef;
540 	index ^= index >> soc->ast_hash.idx_bits;
541 	index &= soc->ast_hash.mask;
542 	return index;
543 }
544 
545 /*
546  * dp_peer_ast_hash_add() - Add AST entry into hash table
547  * @soc: SoC handle
548  *
549  * This function adds the AST entry into SoC AST hash table
550  * It assumes caller has taken the ast lock to protect the access to this table
551  *
552  * Return: None
553  */
554 static inline void dp_peer_ast_hash_add(struct dp_soc *soc,
555 		struct dp_ast_entry *ase)
556 {
557 	uint32_t index;
558 
559 	index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
560 	TAILQ_INSERT_TAIL(&soc->ast_hash.bins[index], ase, hash_list_elem);
561 }
562 
563 /*
564  * dp_peer_ast_hash_remove() - Look up and remove AST entry from hash table
565  * @soc: SoC handle
566  *
567  * This function removes the AST entry from soc AST hash table
568  * It assumes caller has taken the ast lock to protect the access to this table
569  *
570  * Return: None
571  */
572 void dp_peer_ast_hash_remove(struct dp_soc *soc,
573 			     struct dp_ast_entry *ase)
574 {
575 	unsigned index;
576 	struct dp_ast_entry *tmpase;
577 	int found = 0;
578 
579 	index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
580 	/* Check if tail is not empty before delete*/
581 	QDF_ASSERT(!TAILQ_EMPTY(&soc->ast_hash.bins[index]));
582 
583 	TAILQ_FOREACH(tmpase, &soc->ast_hash.bins[index], hash_list_elem) {
584 		if (tmpase == ase) {
585 			found = 1;
586 			break;
587 		}
588 	}
589 
590 	QDF_ASSERT(found);
591 	TAILQ_REMOVE(&soc->ast_hash.bins[index], ase, hash_list_elem);
592 }
593 
594 /*
595  * dp_peer_ast_hash_find_by_vdevid() - Find AST entry by MAC address
596  * @soc: SoC handle
597  *
598  * It assumes caller has taken the ast lock to protect the access to
599  * AST hash table
600  *
601  * Return: AST entry
602  */
603 struct dp_ast_entry *dp_peer_ast_hash_find_by_vdevid(struct dp_soc *soc,
604 						     uint8_t *ast_mac_addr,
605 						     uint8_t vdev_id)
606 {
607 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
608 	uint32_t index;
609 	struct dp_ast_entry *ase;
610 
611 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
612 		     ast_mac_addr, QDF_MAC_ADDR_SIZE);
613 	mac_addr = &local_mac_addr_aligned;
614 
615 	index = dp_peer_ast_hash_index(soc, mac_addr);
616 	TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
617 		if ((vdev_id == ase->vdev_id) &&
618 		    !dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr)) {
619 			return ase;
620 		}
621 	}
622 
623 	return NULL;
624 }
625 
626 /*
627  * dp_peer_ast_hash_find_by_pdevid() - Find AST entry by MAC address
628  * @soc: SoC handle
629  *
630  * It assumes caller has taken the ast lock to protect the access to
631  * AST hash table
632  *
633  * Return: AST entry
634  */
635 struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
636 						     uint8_t *ast_mac_addr,
637 						     uint8_t pdev_id)
638 {
639 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
640 	uint32_t index;
641 	struct dp_ast_entry *ase;
642 
643 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
644 		     ast_mac_addr, QDF_MAC_ADDR_SIZE);
645 	mac_addr = &local_mac_addr_aligned;
646 
647 	index = dp_peer_ast_hash_index(soc, mac_addr);
648 	TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
649 		if ((pdev_id == ase->pdev_id) &&
650 		    !dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr)) {
651 			return ase;
652 		}
653 	}
654 
655 	return NULL;
656 }
657 
658 /*
659  * dp_peer_ast_hash_find_soc() - Find AST entry by MAC address
660  * @soc: SoC handle
661  *
662  * It assumes caller has taken the ast lock to protect the access to
663  * AST hash table
664  *
665  * Return: AST entry
666  */
667 struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
668 					       uint8_t *ast_mac_addr)
669 {
670 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
671 	unsigned index;
672 	struct dp_ast_entry *ase;
673 
674 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
675 			ast_mac_addr, QDF_MAC_ADDR_SIZE);
676 	mac_addr = &local_mac_addr_aligned;
677 
678 	index = dp_peer_ast_hash_index(soc, mac_addr);
679 	TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
680 		if (dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr) == 0) {
681 			return ase;
682 		}
683 	}
684 
685 	return NULL;
686 }
687 
688 /*
689  * dp_peer_map_ast() - Map the ast entry with HW AST Index
690  * @soc: SoC handle
691  * @peer: peer to which ast node belongs
692  * @mac_addr: MAC address of ast node
693  * @hw_peer_id: HW AST Index returned by target in peer map event
694  * @vdev_id: vdev id for VAP to which the peer belongs to
695  * @ast_hash: ast hash value in HW
696  * @is_wds: flag to indicate peer map event for WDS ast entry
697  *
698  * Return: QDF_STATUS code
699  */
700 static inline QDF_STATUS dp_peer_map_ast(struct dp_soc *soc,
701 					 struct dp_peer *peer,
702 					 uint8_t *mac_addr,
703 					 uint16_t hw_peer_id,
704 					 uint8_t vdev_id,
705 					 uint16_t ast_hash,
706 					 uint8_t is_wds)
707 {
708 	struct dp_ast_entry *ast_entry = NULL;
709 	enum cdp_txrx_ast_entry_type peer_type = CDP_TXRX_AST_TYPE_STATIC;
710 	void *cookie = NULL;
711 	txrx_ast_free_cb cb = NULL;
712 	QDF_STATUS err = QDF_STATUS_SUCCESS;
713 
714 	if (!peer) {
715 		return QDF_STATUS_E_INVAL;
716 	}
717 
718 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
719 		  "%s: peer %pK ID %d vid %d mac "QDF_MAC_ADDR_FMT,
720 		  __func__, peer, hw_peer_id, vdev_id,
721 		   QDF_MAC_ADDR_REF(mac_addr));
722 
723 	qdf_spin_lock_bh(&soc->ast_lock);
724 
725 	ast_entry = dp_peer_ast_hash_find_by_vdevid(soc, mac_addr, vdev_id);
726 
727 	if (is_wds) {
728 		/*
729 		 * In certain cases like Auth attack on a repeater
730 		 * can result in the number of ast_entries falling
731 		 * in the same hash bucket to exceed the max_skid
732 		 * length supported by HW in root AP. In these cases
733 		 * the FW will return the hw_peer_id (ast_index) as
734 		 * 0xffff indicating HW could not add the entry in
735 		 * its table. Host has to delete the entry from its
736 		 * table in these cases.
737 		 */
738 		if (hw_peer_id == HTT_INVALID_PEER) {
739 			DP_STATS_INC(soc, ast.map_err, 1);
740 			if (ast_entry) {
741 				if (ast_entry->is_mapped) {
742 					soc->ast_table[ast_entry->ast_idx] =
743 						NULL;
744 				}
745 
746 				cb = ast_entry->callback;
747 				cookie = ast_entry->cookie;
748 				peer_type = ast_entry->type;
749 
750 				dp_peer_unlink_ast_entry(soc, ast_entry, peer);
751 				dp_peer_free_ast_entry(soc, ast_entry);
752 
753 				qdf_spin_unlock_bh(&soc->ast_lock);
754 
755 				if (cb) {
756 					cb(soc->ctrl_psoc,
757 					   dp_soc_to_cdp_soc(soc),
758 					   cookie,
759 					   CDP_TXRX_AST_DELETED);
760 				}
761 			} else {
762 				qdf_spin_unlock_bh(&soc->ast_lock);
763 				dp_alert("AST entry not found with peer %pK peer_id %u peer_mac "QDF_MAC_ADDR_FMT" mac_addr "QDF_MAC_ADDR_FMT" vdev_id %u next_hop %u",
764 					 peer, peer->peer_id,
765 					 QDF_MAC_ADDR_REF(peer->mac_addr.raw),
766 					 QDF_MAC_ADDR_REF(mac_addr),
767 					 vdev_id, is_wds);
768 			}
769 			err = QDF_STATUS_E_INVAL;
770 
771 			dp_hmwds_ast_add_notify(peer, mac_addr,
772 						peer_type, err, true);
773 
774 			return err;
775 		}
776 	}
777 
778 	if (ast_entry) {
779 		ast_entry->ast_idx = hw_peer_id;
780 		soc->ast_table[hw_peer_id] = ast_entry;
781 		ast_entry->is_active = TRUE;
782 		peer_type = ast_entry->type;
783 		ast_entry->ast_hash_value = ast_hash;
784 		ast_entry->is_mapped = TRUE;
785 		qdf_assert_always(ast_entry->peer_id == HTT_INVALID_PEER);
786 
787 		ast_entry->peer_id = peer->peer_id;
788 		TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry,
789 				  ase_list_elem);
790 	}
791 
792 	if (ast_entry || (peer->vdev && peer->vdev->proxysta_vdev)) {
793 		if (soc->cdp_soc.ol_ops->peer_map_event) {
794 			soc->cdp_soc.ol_ops->peer_map_event(
795 			soc->ctrl_psoc, peer->peer_id,
796 			hw_peer_id, vdev_id,
797 			mac_addr, peer_type, ast_hash);
798 		}
799 	} else {
800 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
801 			  "AST entry not found");
802 		err = QDF_STATUS_E_NOENT;
803 	}
804 
805 	qdf_spin_unlock_bh(&soc->ast_lock);
806 
807 	dp_hmwds_ast_add_notify(peer, mac_addr,
808 				peer_type, err, true);
809 
810 	return err;
811 }
812 
813 void dp_peer_free_hmwds_cb(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
814 			   struct cdp_soc *dp_soc,
815 			   void *cookie,
816 			   enum cdp_ast_free_status status)
817 {
818 	struct dp_ast_free_cb_params *param =
819 		(struct dp_ast_free_cb_params *)cookie;
820 	struct dp_soc *soc = (struct dp_soc *)dp_soc;
821 	struct dp_peer *peer = NULL;
822 	QDF_STATUS err = QDF_STATUS_SUCCESS;
823 
824 	if (status != CDP_TXRX_AST_DELETED) {
825 		qdf_mem_free(cookie);
826 		return;
827 	}
828 
829 	peer = dp_peer_find_hash_find(soc, &param->peer_mac_addr.raw[0],
830 				      0, param->vdev_id, DP_MOD_ID_AST);
831 	if (peer) {
832 		err = dp_peer_add_ast(soc, peer,
833 				      &param->mac_addr.raw[0],
834 				      param->type,
835 				      param->flags);
836 
837 		dp_hmwds_ast_add_notify(peer, &param->mac_addr.raw[0],
838 					param->type, err, false);
839 
840 		dp_peer_unref_delete(peer, DP_MOD_ID_AST);
841 	}
842 	qdf_mem_free(cookie);
843 }
844 
845 /*
846  * dp_peer_add_ast() - Allocate and add AST entry into peer list
847  * @soc: SoC handle
848  * @peer: peer to which ast node belongs
849  * @mac_addr: MAC address of ast node
850  * @is_self: Is this base AST entry with peer mac address
851  *
852  * This API is used by WDS source port learning function to
853  * add a new AST entry into peer AST list
854  *
855  * Return: QDF_STATUS code
856  */
857 QDF_STATUS dp_peer_add_ast(struct dp_soc *soc,
858 			   struct dp_peer *peer,
859 			   uint8_t *mac_addr,
860 			   enum cdp_txrx_ast_entry_type type,
861 			   uint32_t flags)
862 {
863 	struct dp_ast_entry *ast_entry = NULL;
864 	struct dp_vdev *vdev = NULL;
865 	struct dp_pdev *pdev = NULL;
866 	uint8_t next_node_mac[6];
867 	txrx_ast_free_cb cb = NULL;
868 	void *cookie = NULL;
869 	struct dp_peer *vap_bss_peer = NULL;
870 	bool is_peer_found = false;
871 
872 	vdev = peer->vdev;
873 	if (!vdev) {
874 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
875 			  FL("Peers vdev is NULL"));
876 		QDF_ASSERT(0);
877 		return QDF_STATUS_E_INVAL;
878 	}
879 
880 	pdev = vdev->pdev;
881 
882 	is_peer_found = dp_peer_exist_on_pdev(soc, mac_addr, 0, pdev);
883 
884 	qdf_spin_lock_bh(&soc->ast_lock);
885 
886 	if (!dp_peer_state_cmp(peer, DP_PEER_STATE_ACTIVE)) {
887 		if ((type != CDP_TXRX_AST_TYPE_STATIC) &&
888 		    (type != CDP_TXRX_AST_TYPE_SELF)) {
889 			qdf_spin_unlock_bh(&soc->ast_lock);
890 			return QDF_STATUS_E_BUSY;
891 		}
892 	}
893 
894 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
895 		  "%s: pdevid: %u vdev: %u  ast_entry->type: %d flags: 0x%x peer_mac: "QDF_MAC_ADDR_FMT" peer: %pK mac "QDF_MAC_ADDR_FMT,
896 		  __func__, pdev->pdev_id, vdev->vdev_id, type, flags,
897 		  QDF_MAC_ADDR_REF(peer->mac_addr.raw), peer,
898 		  QDF_MAC_ADDR_REF(mac_addr));
899 
900 
901 	/* fw supports only 2 times the max_peers ast entries */
902 	if (soc->num_ast_entries >=
903 	    wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx)) {
904 		qdf_spin_unlock_bh(&soc->ast_lock);
905 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
906 			  FL("Max ast entries reached"));
907 		return QDF_STATUS_E_RESOURCES;
908 	}
909 
910 	/* If AST entry already exists , just return from here
911 	 * ast entry with same mac address can exist on different radios
912 	 * if ast_override support is enabled use search by pdev in this
913 	 * case
914 	 */
915 	if (soc->ast_override_support) {
916 		ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, mac_addr,
917 							    pdev->pdev_id);
918 		if (ast_entry) {
919 			if ((type == CDP_TXRX_AST_TYPE_MEC) &&
920 			    (ast_entry->type == CDP_TXRX_AST_TYPE_MEC))
921 				ast_entry->is_active = TRUE;
922 
923 			qdf_spin_unlock_bh(&soc->ast_lock);
924 			return QDF_STATUS_E_ALREADY;
925 		}
926 		if (is_peer_found) {
927 			/* During WDS to static roaming, peer is added
928 			 * to the list before static AST entry create.
929 			 * So, allow AST entry for STATIC type
930 			 * even if peer is present
931 			 */
932 			if (type != CDP_TXRX_AST_TYPE_STATIC) {
933 				qdf_spin_unlock_bh(&soc->ast_lock);
934 				return QDF_STATUS_E_ALREADY;
935 			}
936 		}
937 	} else {
938 		/* For HWMWDS_SEC entries can be added for same mac address
939 		 * do not check for existing entry
940 		 */
941 		if (type == CDP_TXRX_AST_TYPE_WDS_HM_SEC)
942 			goto add_ast_entry;
943 
944 		ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
945 
946 		if (ast_entry) {
947 			if ((type == CDP_TXRX_AST_TYPE_MEC) &&
948 			    (ast_entry->type == CDP_TXRX_AST_TYPE_MEC))
949 				ast_entry->is_active = TRUE;
950 
951 			if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) &&
952 			    !ast_entry->delete_in_progress) {
953 				qdf_spin_unlock_bh(&soc->ast_lock);
954 				return QDF_STATUS_E_ALREADY;
955 			}
956 
957 			/* Add for HMWDS entry we cannot be ignored if there
958 			 * is AST entry with same mac address
959 			 *
960 			 * if ast entry exists with the requested mac address
961 			 * send a delete command and register callback which
962 			 * can take care of adding HMWDS ast enty on delete
963 			 * confirmation from target
964 			 */
965 			if (type == CDP_TXRX_AST_TYPE_WDS_HM) {
966 				struct dp_ast_free_cb_params *param = NULL;
967 
968 				if (ast_entry->type ==
969 					CDP_TXRX_AST_TYPE_WDS_HM_SEC)
970 					goto add_ast_entry;
971 
972 				/* save existing callback */
973 				if (ast_entry->callback) {
974 					cb = ast_entry->callback;
975 					cookie = ast_entry->cookie;
976 				}
977 
978 				param = qdf_mem_malloc(sizeof(*param));
979 				if (!param) {
980 					QDF_TRACE(QDF_MODULE_ID_TXRX,
981 						  QDF_TRACE_LEVEL_ERROR,
982 						  "Allocation failed");
983 					qdf_spin_unlock_bh(&soc->ast_lock);
984 					return QDF_STATUS_E_NOMEM;
985 				}
986 
987 				qdf_mem_copy(&param->mac_addr.raw[0], mac_addr,
988 					     QDF_MAC_ADDR_SIZE);
989 				qdf_mem_copy(&param->peer_mac_addr.raw[0],
990 					     &peer->mac_addr.raw[0],
991 					     QDF_MAC_ADDR_SIZE);
992 				param->type = type;
993 				param->flags = flags;
994 				param->vdev_id = vdev->vdev_id;
995 				ast_entry->callback = dp_peer_free_hmwds_cb;
996 				ast_entry->pdev_id = vdev->pdev->pdev_id;
997 				ast_entry->type = type;
998 				ast_entry->cookie = (void *)param;
999 				if (!ast_entry->delete_in_progress)
1000 					dp_peer_del_ast(soc, ast_entry);
1001 
1002 				qdf_spin_unlock_bh(&soc->ast_lock);
1003 
1004 				/* Call the saved callback*/
1005 				if (cb) {
1006 					cb(soc->ctrl_psoc,
1007 					   dp_soc_to_cdp_soc(soc),
1008 					   cookie,
1009 					   CDP_TXRX_AST_DELETE_IN_PROGRESS);
1010 				}
1011 				return QDF_STATUS_E_AGAIN;
1012 			}
1013 
1014 			/* Modify an already existing AST entry from type
1015 			 * WDS to MEC on promption. This serves as a fix when
1016 			 * backbone of interfaces are interchanged wherein
1017 			 * wds entr becomes its own MEC. The entry should be
1018 			 * replaced only when the ast_entry peer matches the
1019 			 * peer received in mec event. This additional check
1020 			 * is needed in wds repeater cases where a multicast
1021 			 * packet from station to the root via the repeater
1022 			 * should not remove the wds entry.
1023 			 */
1024 			if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS) &&
1025 			    (type == CDP_TXRX_AST_TYPE_MEC) &&
1026 			    (ast_entry->peer_id == peer->peer_id)) {
1027 				ast_entry->is_active = FALSE;
1028 				dp_peer_del_ast(soc, ast_entry);
1029 			}
1030 			qdf_spin_unlock_bh(&soc->ast_lock);
1031 			return QDF_STATUS_E_ALREADY;
1032 		}
1033 	}
1034 
1035 add_ast_entry:
1036 	ast_entry = (struct dp_ast_entry *)
1037 			qdf_mem_malloc(sizeof(struct dp_ast_entry));
1038 
1039 	if (!ast_entry) {
1040 		qdf_spin_unlock_bh(&soc->ast_lock);
1041 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1042 			  FL("fail to allocate ast_entry"));
1043 		QDF_ASSERT(0);
1044 		return QDF_STATUS_E_NOMEM;
1045 	}
1046 
1047 	qdf_mem_copy(&ast_entry->mac_addr.raw[0], mac_addr, QDF_MAC_ADDR_SIZE);
1048 	ast_entry->pdev_id = vdev->pdev->pdev_id;
1049 	ast_entry->is_mapped = false;
1050 	ast_entry->delete_in_progress = false;
1051 	ast_entry->peer_id = HTT_INVALID_PEER;
1052 	ast_entry->next_hop = 0;
1053 	ast_entry->vdev_id = vdev->vdev_id;
1054 
1055 	switch (type) {
1056 	case CDP_TXRX_AST_TYPE_STATIC:
1057 		peer->self_ast_entry = ast_entry;
1058 		ast_entry->type = CDP_TXRX_AST_TYPE_STATIC;
1059 		if (peer->vdev->opmode == wlan_op_mode_sta)
1060 			ast_entry->type = CDP_TXRX_AST_TYPE_STA_BSS;
1061 		break;
1062 	case CDP_TXRX_AST_TYPE_SELF:
1063 		peer->self_ast_entry = ast_entry;
1064 		ast_entry->type = CDP_TXRX_AST_TYPE_SELF;
1065 		break;
1066 	case CDP_TXRX_AST_TYPE_WDS:
1067 		ast_entry->next_hop = 1;
1068 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
1069 		break;
1070 	case CDP_TXRX_AST_TYPE_WDS_HM:
1071 		ast_entry->next_hop = 1;
1072 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM;
1073 		break;
1074 	case CDP_TXRX_AST_TYPE_WDS_HM_SEC:
1075 		ast_entry->next_hop = 1;
1076 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM_SEC;
1077 		ast_entry->peer_id = peer->peer_id;
1078 		TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry,
1079 				  ase_list_elem);
1080 		break;
1081 	case CDP_TXRX_AST_TYPE_MEC:
1082 		ast_entry->next_hop = 1;
1083 		ast_entry->type = CDP_TXRX_AST_TYPE_MEC;
1084 		break;
1085 	case CDP_TXRX_AST_TYPE_DA:
1086 		vap_bss_peer = dp_vdev_bss_peer_ref_n_get(soc, vdev,
1087 							  DP_MOD_ID_AST);
1088 		if (!vap_bss_peer) {
1089 			qdf_spin_unlock_bh(&soc->ast_lock);
1090 			qdf_mem_free(ast_entry);
1091 			return QDF_STATUS_E_FAILURE;
1092 		}
1093 		peer = vap_bss_peer;
1094 		ast_entry->next_hop = 1;
1095 		ast_entry->type = CDP_TXRX_AST_TYPE_DA;
1096 		break;
1097 	default:
1098 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1099 			FL("Incorrect AST entry type"));
1100 	}
1101 
1102 	ast_entry->is_active = TRUE;
1103 	DP_STATS_INC(soc, ast.added, 1);
1104 	soc->num_ast_entries++;
1105 	dp_peer_ast_hash_add(soc, ast_entry);
1106 
1107 	if (type == CDP_TXRX_AST_TYPE_MEC)
1108 		qdf_mem_copy(next_node_mac, peer->vdev->mac_addr.raw, 6);
1109 	else
1110 		qdf_mem_copy(next_node_mac, peer->mac_addr.raw, 6);
1111 
1112 	if ((ast_entry->type != CDP_TXRX_AST_TYPE_STATIC) &&
1113 	    (ast_entry->type != CDP_TXRX_AST_TYPE_SELF) &&
1114 	    (ast_entry->type != CDP_TXRX_AST_TYPE_STA_BSS) &&
1115 	    (ast_entry->type != CDP_TXRX_AST_TYPE_WDS_HM_SEC)) {
1116 		if (QDF_STATUS_SUCCESS ==
1117 				soc->cdp_soc.ol_ops->peer_add_wds_entry(
1118 				soc->ctrl_psoc,
1119 				peer->vdev->vdev_id,
1120 				peer->mac_addr.raw,
1121 				peer->peer_id,
1122 				mac_addr,
1123 				next_node_mac,
1124 				flags,
1125 				ast_entry->type)) {
1126 			if (vap_bss_peer)
1127 				dp_peer_unref_delete(vap_bss_peer,
1128 						     DP_MOD_ID_AST);
1129 			qdf_spin_unlock_bh(&soc->ast_lock);
1130 			return QDF_STATUS_SUCCESS;
1131 		}
1132 	}
1133 
1134 	if (vap_bss_peer)
1135 		dp_peer_unref_delete(vap_bss_peer, DP_MOD_ID_AST);
1136 
1137 	qdf_spin_unlock_bh(&soc->ast_lock);
1138 	return QDF_STATUS_E_FAILURE;
1139 }
1140 
1141 /*
1142  * dp_peer_free_ast_entry() - Free up the ast entry memory
1143  * @soc: SoC handle
1144  * @ast_entry: Address search entry
1145  *
1146  * This API is used to free up the memory associated with
1147  * AST entry.
1148  *
1149  * Return: None
1150  */
1151 void dp_peer_free_ast_entry(struct dp_soc *soc,
1152 			    struct dp_ast_entry *ast_entry)
1153 {
1154 	/*
1155 	 * NOTE: Ensure that call to this API is done
1156 	 * after soc->ast_lock is taken
1157 	 */
1158 	ast_entry->callback = NULL;
1159 	ast_entry->cookie = NULL;
1160 
1161 	DP_STATS_INC(soc, ast.deleted, 1);
1162 	dp_peer_ast_hash_remove(soc, ast_entry);
1163 	dp_peer_ast_cleanup(soc, ast_entry);
1164 	qdf_mem_free(ast_entry);
1165 	soc->num_ast_entries--;
1166 }
1167 
1168 /*
1169  * dp_peer_unlink_ast_entry() - Free up the ast entry memory
1170  * @soc: SoC handle
1171  * @ast_entry: Address search entry
1172  * @peer: peer
1173  *
1174  * This API is used to remove/unlink AST entry from the peer list
1175  * and hash list.
1176  *
1177  * Return: None
1178  */
1179 void dp_peer_unlink_ast_entry(struct dp_soc *soc,
1180 			      struct dp_ast_entry *ast_entry,
1181 			      struct dp_peer *peer)
1182 {
1183 	if (!peer) {
1184 		dp_err_rl("NULL peer");
1185 		return;
1186 	}
1187 
1188 	if (ast_entry->peer_id == HTT_INVALID_PEER) {
1189 		dp_err_rl("Invalid peer id in AST entry mac addr:"QDF_MAC_ADDR_FMT" type:%d",
1190 			  QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw),
1191 			  ast_entry->type);
1192 		return;
1193 	}
1194 	/*
1195 	 * NOTE: Ensure that call to this API is done
1196 	 * after soc->ast_lock is taken
1197 	 */
1198 
1199 	qdf_assert_always(ast_entry->peer_id == peer->peer_id);
1200 	TAILQ_REMOVE(&peer->ast_entry_list, ast_entry, ase_list_elem);
1201 
1202 	if (ast_entry == peer->self_ast_entry)
1203 		peer->self_ast_entry = NULL;
1204 
1205 	/*
1206 	 * release the reference only if it is mapped
1207 	 * to ast_table
1208 	 */
1209 	if (ast_entry->is_mapped)
1210 		soc->ast_table[ast_entry->ast_idx] = NULL;
1211 
1212 	ast_entry->peer_id = HTT_INVALID_PEER;
1213 }
1214 
1215 /*
1216  * dp_peer_del_ast() - Delete and free AST entry
1217  * @soc: SoC handle
1218  * @ast_entry: AST entry of the node
1219  *
1220  * This function removes the AST entry from peer and soc tables
1221  * It assumes caller has taken the ast lock to protect the access to these
1222  * tables
1223  *
1224  * Return: None
1225  */
1226 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
1227 {
1228 	struct dp_peer *peer = NULL;
1229 
1230 	if (!ast_entry) {
1231 		dp_err_rl("NULL AST entry");
1232 		return;
1233 	}
1234 
1235 	if (ast_entry->delete_in_progress) {
1236 		dp_err_rl("AST entry deletion in progress mac addr:"QDF_MAC_ADDR_FMT" type:%d",
1237 			  QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw),
1238 			  ast_entry->type);
1239 		return;
1240 	}
1241 
1242 	ast_entry->delete_in_progress = true;
1243 
1244 	/* In teardown del ast is called after setting logical delete state
1245 	 * use __dp_peer_get_ref_by_id to get the reference irrespective of
1246 	 * state
1247 	 */
1248 	peer = __dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
1249 				       DP_MOD_ID_AST);
1250 
1251 	dp_peer_ast_send_wds_del(soc, ast_entry, peer);
1252 
1253 	/* Remove SELF and STATIC entries in teardown itself */
1254 	if (!ast_entry->next_hop)
1255 		dp_peer_unlink_ast_entry(soc, ast_entry, peer);
1256 
1257 	if (ast_entry->is_mapped)
1258 		soc->ast_table[ast_entry->ast_idx] = NULL;
1259 
1260 	/* if peer map v2 is enabled we are not freeing ast entry
1261 	 * here and it is supposed to be freed in unmap event (after
1262 	 * we receive delete confirmation from target)
1263 	 *
1264 	 * if peer_id is invalid we did not get the peer map event
1265 	 * for the peer free ast entry from here only in this case
1266 	 */
1267 	if (dp_peer_ast_free_in_unmap_supported(soc, ast_entry))
1268 		goto end;
1269 
1270 	/* for WDS secondary entry ast_entry->next_hop would be set so
1271 	 * unlinking has to be done explicitly here.
1272 	 * As this entry is not a mapped entry unmap notification from
1273 	 * FW wil not come. Hence unlinkling is done right here.
1274 	 */
1275 
1276 	if (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC)
1277 		dp_peer_unlink_ast_entry(soc, ast_entry, peer);
1278 
1279 	dp_peer_free_ast_entry(soc, ast_entry);
1280 
1281 end:
1282 	if (peer)
1283 		dp_peer_unref_delete(peer, DP_MOD_ID_AST);
1284 }
1285 
1286 /*
1287  * dp_peer_update_ast() - Delete and free AST entry
1288  * @soc: SoC handle
1289  * @peer: peer to which ast node belongs
1290  * @ast_entry: AST entry of the node
1291  * @flags: wds or hmwds
1292  *
1293  * This function update the AST entry to the roamed peer and soc tables
1294  * It assumes caller has taken the ast lock to protect the access to these
1295  * tables
1296  *
1297  * Return: 0 if ast entry is updated successfully
1298  *         -1 failure
1299  */
1300 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
1301 		       struct dp_ast_entry *ast_entry, uint32_t flags)
1302 {
1303 	int ret = -1;
1304 	struct dp_peer *old_peer;
1305 
1306 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1307 		  "%s: ast_entry->type: %d pdevid: %u vdevid: %u flags: 0x%x mac_addr: "QDF_MAC_ADDR_FMT" peer_mac: "QDF_MAC_ADDR_FMT"\n",
1308 		  __func__, ast_entry->type, peer->vdev->pdev->pdev_id,
1309 		  peer->vdev->vdev_id, flags,
1310 		  QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw),
1311 		  QDF_MAC_ADDR_REF(peer->mac_addr.raw));
1312 
1313 	/* Do not send AST update in below cases
1314 	 *  1) Ast entry delete has already triggered
1315 	 *  2) Peer delete is already triggered
1316 	 *  3) We did not get the HTT map for create event
1317 	 */
1318 	if (ast_entry->delete_in_progress ||
1319 	    !dp_peer_state_cmp(peer, DP_PEER_STATE_ACTIVE) ||
1320 	    !ast_entry->is_mapped)
1321 		return ret;
1322 
1323 	if ((ast_entry->type == CDP_TXRX_AST_TYPE_STATIC) ||
1324 	    (ast_entry->type == CDP_TXRX_AST_TYPE_SELF) ||
1325 	    (ast_entry->type == CDP_TXRX_AST_TYPE_STA_BSS) ||
1326 	    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
1327 		return 0;
1328 
1329 	/*
1330 	 * Avoids flood of WMI update messages sent to FW for same peer.
1331 	 */
1332 	if (qdf_unlikely(ast_entry->peer_id == peer->peer_id) &&
1333 	    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS) &&
1334 	    (ast_entry->vdev_id == peer->vdev->vdev_id) &&
1335 	    (ast_entry->is_active))
1336 		return 0;
1337 
1338 	old_peer = dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
1339 					 DP_MOD_ID_AST);
1340 	if (!old_peer)
1341 		return 0;
1342 
1343 	TAILQ_REMOVE(&old_peer->ast_entry_list, ast_entry, ase_list_elem);
1344 
1345 	dp_peer_unref_delete(old_peer, DP_MOD_ID_AST);
1346 
1347 	ast_entry->peer_id = peer->peer_id;
1348 	ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
1349 	ast_entry->pdev_id = peer->vdev->pdev->pdev_id;
1350 	ast_entry->is_active = TRUE;
1351 	TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem);
1352 
1353 	ret = soc->cdp_soc.ol_ops->peer_update_wds_entry(
1354 				soc->ctrl_psoc,
1355 				peer->vdev->vdev_id,
1356 				ast_entry->mac_addr.raw,
1357 				peer->mac_addr.raw,
1358 				flags);
1359 
1360 	return ret;
1361 }
1362 
1363 /*
1364  * dp_peer_ast_get_pdev_id() - get pdev_id from the ast entry
1365  * @soc: SoC handle
1366  * @ast_entry: AST entry of the node
1367  *
1368  * This function gets the pdev_id from the ast entry.
1369  *
1370  * Return: (uint8_t) pdev_id
1371  */
1372 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
1373 				struct dp_ast_entry *ast_entry)
1374 {
1375 	return ast_entry->pdev_id;
1376 }
1377 
1378 /*
1379  * dp_peer_ast_get_next_hop() - get next_hop from the ast entry
1380  * @soc: SoC handle
1381  * @ast_entry: AST entry of the node
1382  *
1383  * This function gets the next hop from the ast entry.
1384  *
1385  * Return: (uint8_t) next_hop
1386  */
1387 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
1388 				struct dp_ast_entry *ast_entry)
1389 {
1390 	return ast_entry->next_hop;
1391 }
1392 
1393 /*
1394  * dp_peer_ast_set_type() - set type from the ast entry
1395  * @soc: SoC handle
1396  * @ast_entry: AST entry of the node
1397  *
1398  * This function sets the type in the ast entry.
1399  *
1400  * Return:
1401  */
1402 void dp_peer_ast_set_type(struct dp_soc *soc,
1403 				struct dp_ast_entry *ast_entry,
1404 				enum cdp_txrx_ast_entry_type type)
1405 {
1406 	ast_entry->type = type;
1407 }
1408 
1409 #else
1410 QDF_STATUS dp_peer_add_ast(struct dp_soc *soc,
1411 			   struct dp_peer *peer,
1412 			   uint8_t *mac_addr,
1413 			   enum cdp_txrx_ast_entry_type type,
1414 			   uint32_t flags)
1415 {
1416 	return QDF_STATUS_E_FAILURE;
1417 }
1418 
1419 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
1420 {
1421 }
1422 
1423 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
1424 			struct dp_ast_entry *ast_entry, uint32_t flags)
1425 {
1426 	return 1;
1427 }
1428 
1429 struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
1430 					       uint8_t *ast_mac_addr)
1431 {
1432 	return NULL;
1433 }
1434 
1435 struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
1436 						     uint8_t *ast_mac_addr,
1437 						     uint8_t pdev_id)
1438 {
1439 	return NULL;
1440 }
1441 
1442 static int dp_peer_ast_hash_attach(struct dp_soc *soc)
1443 {
1444 	return 0;
1445 }
1446 
1447 static inline QDF_STATUS dp_peer_map_ast(struct dp_soc *soc,
1448 					 struct dp_peer *peer,
1449 					 uint8_t *mac_addr,
1450 					 uint16_t hw_peer_id,
1451 					 uint8_t vdev_id,
1452 					 uint16_t ast_hash,
1453 					 uint8_t is_wds)
1454 {
1455 	return QDF_STATUS_SUCCESS;
1456 }
1457 
1458 static void dp_peer_ast_hash_detach(struct dp_soc *soc)
1459 {
1460 }
1461 
1462 void dp_peer_ast_set_type(struct dp_soc *soc,
1463 				struct dp_ast_entry *ast_entry,
1464 				enum cdp_txrx_ast_entry_type type)
1465 {
1466 }
1467 
1468 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
1469 				struct dp_ast_entry *ast_entry)
1470 {
1471 	return 0xff;
1472 }
1473 
1474 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
1475 				struct dp_ast_entry *ast_entry)
1476 {
1477 	return 0xff;
1478 }
1479 
1480 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
1481 		       struct dp_ast_entry *ast_entry, uint32_t flags)
1482 {
1483 	return 1;
1484 }
1485 
1486 #endif
1487 
1488 void dp_peer_ast_send_wds_del(struct dp_soc *soc,
1489 			      struct dp_ast_entry *ast_entry,
1490 			      struct dp_peer *peer)
1491 {
1492 	struct cdp_soc_t *cdp_soc = &soc->cdp_soc;
1493 	bool delete_in_fw = false;
1494 
1495 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_TRACE,
1496 		  "%s: ast_entry->type: %d pdevid: %u vdev: %u mac_addr: "QDF_MAC_ADDR_FMT" next_hop: %u peer_id: %uM\n",
1497 		  __func__, ast_entry->type, ast_entry->pdev_id,
1498 		  ast_entry->vdev_id,
1499 		  QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw),
1500 		  ast_entry->next_hop, ast_entry->peer_id);
1501 
1502 	/*
1503 	 * If peer state is logical delete, the peer is about to get
1504 	 * teared down with a peer delete command to firmware,
1505 	 * which will cleanup all the wds ast entries.
1506 	 * So, no need to send explicit wds ast delete to firmware.
1507 	 */
1508 	if (ast_entry->next_hop) {
1509 		if (peer && dp_peer_state_cmp(peer,
1510 					      DP_PEER_STATE_LOGICAL_DELETE))
1511 			delete_in_fw = false;
1512 		else
1513 			delete_in_fw = true;
1514 
1515 		cdp_soc->ol_ops->peer_del_wds_entry(soc->ctrl_psoc,
1516 						    ast_entry->vdev_id,
1517 						    ast_entry->mac_addr.raw,
1518 						    ast_entry->type,
1519 						    delete_in_fw);
1520 	}
1521 
1522 }
1523 
1524 #ifdef FEATURE_WDS
1525 /**
1526  * dp_peer_ast_free_wds_entries() - Free wds ast entries associated with peer
1527  * @soc: soc handle
1528  * @peer: peer handle
1529  *
1530  * Free all the wds ast entries associated with peer
1531  *
1532  * Return: Number of wds ast entries freed
1533  */
1534 static uint32_t dp_peer_ast_free_wds_entries(struct dp_soc *soc,
1535 					     struct dp_peer *peer)
1536 {
1537 	TAILQ_HEAD(, dp_ast_entry) ast_local_list = {0};
1538 	struct dp_ast_entry *ast_entry, *temp_ast_entry;
1539 	uint32_t num_ast = 0;
1540 
1541 	TAILQ_INIT(&ast_local_list);
1542 	qdf_spin_lock_bh(&soc->ast_lock);
1543 
1544 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry) {
1545 		if (ast_entry->next_hop)
1546 			num_ast++;
1547 
1548 		if (ast_entry->is_mapped)
1549 			soc->ast_table[ast_entry->ast_idx] = NULL;
1550 
1551 		dp_peer_unlink_ast_entry(soc, ast_entry, peer);
1552 		DP_STATS_INC(soc, ast.deleted, 1);
1553 		dp_peer_ast_hash_remove(soc, ast_entry);
1554 		TAILQ_INSERT_TAIL(&ast_local_list, ast_entry,
1555 				  ase_list_elem);
1556 		soc->num_ast_entries--;
1557 	}
1558 
1559 	qdf_spin_unlock_bh(&soc->ast_lock);
1560 
1561 	TAILQ_FOREACH_SAFE(ast_entry, &ast_local_list, ase_list_elem,
1562 			   temp_ast_entry) {
1563 		if (ast_entry->callback)
1564 			ast_entry->callback(soc->ctrl_psoc,
1565 					    dp_soc_to_cdp_soc(soc),
1566 					    ast_entry->cookie,
1567 					    CDP_TXRX_AST_DELETED);
1568 
1569 		qdf_mem_free(ast_entry);
1570 	}
1571 
1572 	return num_ast;
1573 }
1574 /**
1575  * dp_peer_clean_wds_entries() - Clean wds ast entries and compare
1576  * @soc: soc handle
1577  * @peer: peer handle
1578  * @free_wds_count - number of wds entries freed by FW with peer delete
1579  *
1580  * Free all the wds ast entries associated with peer and compare with
1581  * the value received from firmware
1582  *
1583  * Return: Number of wds ast entries freed
1584  */
1585 static void
1586 dp_peer_clean_wds_entries(struct dp_soc *soc, struct dp_peer *peer,
1587 			  uint32_t free_wds_count)
1588 {
1589 	uint32_t wds_deleted = 0;
1590 
1591 	wds_deleted = dp_peer_ast_free_wds_entries(soc, peer);
1592 	if ((DP_PEER_WDS_COUNT_INVALID != free_wds_count) &&
1593 	    (free_wds_count != wds_deleted)) {
1594 		DP_STATS_INC(soc, ast.ast_mismatch, 1);
1595 		dp_alert("For peer %pK (mac: "QDF_MAC_ADDR_FMT")number of wds entries deleted by fw = %d during peer delete is not same as the numbers deleted by host = %d",
1596 			 peer, peer->mac_addr.raw, free_wds_count,
1597 			 wds_deleted);
1598 	}
1599 }
1600 
1601 #else
1602 static void
1603 dp_peer_clean_wds_entries(struct dp_soc *soc, struct dp_peer *peer,
1604 			  uint32_t free_wds_count)
1605 {
1606 	struct dp_ast_entry *ast_entry, *temp_ast_entry;
1607 
1608 	qdf_spin_lock_bh(&soc->ast_lock);
1609 
1610 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry) {
1611 		dp_peer_unlink_ast_entry(soc, ast_entry, peer);
1612 
1613 		if (ast_entry->is_mapped)
1614 			soc->ast_table[ast_entry->ast_idx] = NULL;
1615 
1616 		dp_peer_free_ast_entry(soc, ast_entry);
1617 	}
1618 
1619 	peer->self_ast_entry = NULL;
1620 	qdf_spin_unlock_bh(&soc->ast_lock);
1621 }
1622 #endif
1623 
1624 /**
1625  * dp_peer_ast_free_entry_by_mac() - find ast entry by MAC address and delete
1626  * @soc: soc handle
1627  * @peer: peer handle
1628  * @vdev_id: vdev_id
1629  * @mac_addr: mac address of the AST entry to searc and delete
1630  *
1631  * find the ast entry from the peer list using the mac address and free
1632  * the entry.
1633  *
1634  * Return: SUCCESS or NOENT
1635  */
1636 static int dp_peer_ast_free_entry_by_mac(struct dp_soc *soc,
1637 					 struct dp_peer *peer,
1638 					 uint8_t vdev_id,
1639 					 uint8_t *mac_addr)
1640 {
1641 	struct dp_ast_entry *ast_entry;
1642 	void *cookie = NULL;
1643 	txrx_ast_free_cb cb = NULL;
1644 
1645 	/*
1646 	 * release the reference only if it is mapped
1647 	 * to ast_table
1648 	 */
1649 
1650 	qdf_spin_lock_bh(&soc->ast_lock);
1651 
1652 	ast_entry = dp_peer_ast_hash_find_by_vdevid(soc, mac_addr, vdev_id);
1653 	if (!ast_entry) {
1654 		qdf_spin_unlock_bh(&soc->ast_lock);
1655 		return QDF_STATUS_E_NOENT;
1656 	} else if (ast_entry->is_mapped) {
1657 		soc->ast_table[ast_entry->ast_idx] = NULL;
1658 	}
1659 
1660 	cb = ast_entry->callback;
1661 	cookie = ast_entry->cookie;
1662 
1663 
1664 	dp_peer_unlink_ast_entry(soc, ast_entry, peer);
1665 
1666 	dp_peer_free_ast_entry(soc, ast_entry);
1667 
1668 	qdf_spin_unlock_bh(&soc->ast_lock);
1669 
1670 	if (cb) {
1671 		cb(soc->ctrl_psoc,
1672 		   dp_soc_to_cdp_soc(soc),
1673 		   cookie,
1674 		   CDP_TXRX_AST_DELETED);
1675 	}
1676 
1677 	return QDF_STATUS_SUCCESS;
1678 }
1679 
1680 /*
1681  * dp_peer_find_hash_find() - returns peer from peer_hash_table matching
1682  *                            vdev_id and mac_address
1683  * @soc: soc handle
1684  * @peer_mac_addr: peer mac address
1685  * @mac_addr_is_aligned: is mac addr alligned
1686  * @vdev_id: vdev_id
1687  * @mod_id: id of module requesting reference
1688  *
1689  * return: peer in sucsess
1690  *         NULL in failure
1691  */
1692 struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc,
1693 	uint8_t *peer_mac_addr, int mac_addr_is_aligned, uint8_t vdev_id,
1694 	enum dp_mod_id mod_id)
1695 {
1696 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
1697 	unsigned index;
1698 	struct dp_peer *peer;
1699 
1700 	if (!soc->peer_hash.bins)
1701 		return NULL;
1702 
1703 	if (mac_addr_is_aligned) {
1704 		mac_addr = (union dp_align_mac_addr *) peer_mac_addr;
1705 	} else {
1706 		qdf_mem_copy(
1707 			&local_mac_addr_aligned.raw[0],
1708 			peer_mac_addr, QDF_MAC_ADDR_SIZE);
1709 		mac_addr = &local_mac_addr_aligned;
1710 	}
1711 	index = dp_peer_find_hash_index(soc, mac_addr);
1712 	qdf_spin_lock_bh(&soc->peer_hash_lock);
1713 	TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
1714 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
1715 			((peer->vdev->vdev_id == vdev_id) ||
1716 			 (vdev_id == DP_VDEV_ALL))) {
1717 			/* take peer reference before returning */
1718 			if (dp_peer_get_ref(soc, peer, mod_id) !=
1719 						QDF_STATUS_SUCCESS)
1720 				peer = NULL;
1721 
1722 			qdf_spin_unlock_bh(&soc->peer_hash_lock);
1723 			return peer;
1724 		}
1725 	}
1726 	qdf_spin_unlock_bh(&soc->peer_hash_lock);
1727 	return NULL; /* failure */
1728 }
1729 
1730 /*
1731  * dp_peer_find_hash_remove() - remove peer from peer_hash_table
1732  * @soc: soc handle
1733  * @peer: peer handle
1734  *
1735  * return: none
1736  */
1737 void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer)
1738 {
1739 	unsigned index;
1740 	struct dp_peer *tmppeer = NULL;
1741 	int found = 0;
1742 
1743 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
1744 	/* Check if tail is not empty before delete*/
1745 	QDF_ASSERT(!TAILQ_EMPTY(&soc->peer_hash.bins[index]));
1746 
1747 	qdf_spin_lock_bh(&soc->peer_hash_lock);
1748 	TAILQ_FOREACH(tmppeer, &soc->peer_hash.bins[index], hash_list_elem) {
1749 		if (tmppeer == peer) {
1750 			found = 1;
1751 			break;
1752 		}
1753 	}
1754 	QDF_ASSERT(found);
1755 	TAILQ_REMOVE(&soc->peer_hash.bins[index], peer, hash_list_elem);
1756 
1757 	dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
1758 	qdf_spin_unlock_bh(&soc->peer_hash_lock);
1759 }
1760 
1761 void dp_peer_find_hash_erase(struct dp_soc *soc)
1762 {
1763 	int i;
1764 
1765 	/*
1766 	 * Not really necessary to take peer_ref_mutex lock - by this point,
1767 	 * it's known that the soc is no longer in use.
1768 	 */
1769 	for (i = 0; i <= soc->peer_hash.mask; i++) {
1770 		if (!TAILQ_EMPTY(&soc->peer_hash.bins[i])) {
1771 			struct dp_peer *peer, *peer_next;
1772 
1773 			/*
1774 			 * TAILQ_FOREACH_SAFE must be used here to avoid any
1775 			 * memory access violation after peer is freed
1776 			 */
1777 			TAILQ_FOREACH_SAFE(peer, &soc->peer_hash.bins[i],
1778 				hash_list_elem, peer_next) {
1779 				/*
1780 				 * Don't remove the peer from the hash table -
1781 				 * that would modify the list we are currently
1782 				 * traversing, and it's not necessary anyway.
1783 				 */
1784 				/*
1785 				 * Artificially adjust the peer's ref count to
1786 				 * 1, so it will get deleted by
1787 				 * dp_peer_unref_delete.
1788 				 */
1789 				/* set to zero */
1790 				qdf_atomic_init(&peer->ref_cnt);
1791 				for (i = 0; i < DP_MOD_ID_MAX; i++)
1792 					qdf_atomic_init(&peer->mod_refs[i]);
1793 				/* incr to one */
1794 				qdf_atomic_inc(&peer->ref_cnt);
1795 				qdf_atomic_inc(&peer->mod_refs
1796 						[DP_MOD_ID_CONFIG]);
1797 				dp_peer_unref_delete(peer,
1798 						     DP_MOD_ID_CONFIG);
1799 			}
1800 		}
1801 	}
1802 }
1803 
1804 static void dp_peer_ast_table_detach(struct dp_soc *soc)
1805 {
1806 	if (soc->ast_table) {
1807 		qdf_mem_free(soc->ast_table);
1808 		soc->ast_table = NULL;
1809 	}
1810 }
1811 
1812 /*
1813  * dp_peer_find_map_detach() - cleanup memory for peer_id_to_obj_map
1814  * @soc: soc handle
1815  *
1816  * return: none
1817  */
1818 static void dp_peer_find_map_detach(struct dp_soc *soc)
1819 {
1820 	if (soc->peer_id_to_obj_map) {
1821 		qdf_mem_free(soc->peer_id_to_obj_map);
1822 		soc->peer_id_to_obj_map = NULL;
1823 		qdf_spinlock_destroy(&soc->peer_map_lock);
1824 	}
1825 }
1826 
1827 int dp_peer_find_attach(struct dp_soc *soc)
1828 {
1829 	if (dp_peer_find_map_attach(soc))
1830 		return 1;
1831 
1832 	if (dp_peer_find_hash_attach(soc)) {
1833 		dp_peer_find_map_detach(soc);
1834 		return 1;
1835 	}
1836 
1837 	if (dp_peer_ast_table_attach(soc)) {
1838 		dp_peer_find_hash_detach(soc);
1839 		dp_peer_find_map_detach(soc);
1840 		return 1;
1841 	}
1842 
1843 	if (dp_peer_ast_hash_attach(soc)) {
1844 		dp_peer_ast_table_detach(soc);
1845 		dp_peer_find_hash_detach(soc);
1846 		dp_peer_find_map_detach(soc);
1847 		return 1;
1848 	}
1849 
1850 	return 0; /* success */
1851 }
1852 
1853 void dp_rx_tid_stats_cb(struct dp_soc *soc, void *cb_ctxt,
1854 	union hal_reo_status *reo_status)
1855 {
1856 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
1857 	struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
1858 
1859 	if (queue_status->header.status == HAL_REO_CMD_DRAIN)
1860 		return;
1861 
1862 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
1863 		DP_PRINT_STATS("REO stats failure %d for TID %d\n",
1864 			       queue_status->header.status, rx_tid->tid);
1865 		return;
1866 	}
1867 
1868 	DP_PRINT_STATS("REO queue stats (TID: %d):\n"
1869 		       "ssn: %d\n"
1870 		       "curr_idx  : %d\n"
1871 		       "pn_31_0   : %08x\n"
1872 		       "pn_63_32  : %08x\n"
1873 		       "pn_95_64  : %08x\n"
1874 		       "pn_127_96 : %08x\n"
1875 		       "last_rx_enq_tstamp : %08x\n"
1876 		       "last_rx_deq_tstamp : %08x\n"
1877 		       "rx_bitmap_31_0     : %08x\n"
1878 		       "rx_bitmap_63_32    : %08x\n"
1879 		       "rx_bitmap_95_64    : %08x\n"
1880 		       "rx_bitmap_127_96   : %08x\n"
1881 		       "rx_bitmap_159_128  : %08x\n"
1882 		       "rx_bitmap_191_160  : %08x\n"
1883 		       "rx_bitmap_223_192  : %08x\n"
1884 		       "rx_bitmap_255_224  : %08x\n",
1885 		       rx_tid->tid,
1886 		       queue_status->ssn, queue_status->curr_idx,
1887 		       queue_status->pn_31_0, queue_status->pn_63_32,
1888 		       queue_status->pn_95_64, queue_status->pn_127_96,
1889 		       queue_status->last_rx_enq_tstamp,
1890 		       queue_status->last_rx_deq_tstamp,
1891 		       queue_status->rx_bitmap_31_0,
1892 		       queue_status->rx_bitmap_63_32,
1893 		       queue_status->rx_bitmap_95_64,
1894 		       queue_status->rx_bitmap_127_96,
1895 		       queue_status->rx_bitmap_159_128,
1896 		       queue_status->rx_bitmap_191_160,
1897 		       queue_status->rx_bitmap_223_192,
1898 		       queue_status->rx_bitmap_255_224);
1899 
1900 	DP_PRINT_STATS(
1901 		       "curr_mpdu_cnt      : %d\n"
1902 		       "curr_msdu_cnt      : %d\n"
1903 		       "fwd_timeout_cnt    : %d\n"
1904 		       "fwd_bar_cnt        : %d\n"
1905 		       "dup_cnt            : %d\n"
1906 		       "frms_in_order_cnt  : %d\n"
1907 		       "bar_rcvd_cnt       : %d\n"
1908 		       "mpdu_frms_cnt      : %d\n"
1909 		       "msdu_frms_cnt      : %d\n"
1910 		       "total_byte_cnt     : %d\n"
1911 		       "late_recv_mpdu_cnt : %d\n"
1912 		       "win_jump_2k        : %d\n"
1913 		       "hole_cnt           : %d\n",
1914 		       queue_status->curr_mpdu_cnt,
1915 		       queue_status->curr_msdu_cnt,
1916 		       queue_status->fwd_timeout_cnt,
1917 		       queue_status->fwd_bar_cnt,
1918 		       queue_status->dup_cnt,
1919 		       queue_status->frms_in_order_cnt,
1920 		       queue_status->bar_rcvd_cnt,
1921 		       queue_status->mpdu_frms_cnt,
1922 		       queue_status->msdu_frms_cnt,
1923 		       queue_status->total_cnt,
1924 		       queue_status->late_recv_mpdu_cnt,
1925 		       queue_status->win_jump_2k,
1926 		       queue_status->hole_cnt);
1927 
1928 	DP_PRINT_STATS("Addba Req          : %d\n"
1929 			"Addba Resp         : %d\n"
1930 			"Addba Resp success : %d\n"
1931 			"Addba Resp failed  : %d\n"
1932 			"Delba Req received : %d\n"
1933 			"Delba Tx success   : %d\n"
1934 			"Delba Tx Fail      : %d\n"
1935 			"BA window size     : %d\n"
1936 			"Pn size            : %d\n",
1937 			rx_tid->num_of_addba_req,
1938 			rx_tid->num_of_addba_resp,
1939 			rx_tid->num_addba_rsp_success,
1940 			rx_tid->num_addba_rsp_failed,
1941 			rx_tid->num_of_delba_req,
1942 			rx_tid->delba_tx_success_cnt,
1943 			rx_tid->delba_tx_fail_cnt,
1944 			rx_tid->ba_win_size,
1945 			rx_tid->pn_size);
1946 }
1947 
1948 /*
1949  * dp_peer_find_add_id() - map peer_id with peer
1950  * @soc: soc handle
1951  * @peer_mac_addr: peer mac address
1952  * @peer_id: peer id to be mapped
1953  * @hw_peer_id: HW ast index
1954  * @vdev_id: vdev_id
1955  *
1956  * return: peer in success
1957  *         NULL in failure
1958  */
1959 static inline struct dp_peer *dp_peer_find_add_id(struct dp_soc *soc,
1960 	uint8_t *peer_mac_addr, uint16_t peer_id, uint16_t hw_peer_id,
1961 	uint8_t vdev_id)
1962 {
1963 	struct dp_peer *peer;
1964 
1965 	QDF_ASSERT(peer_id <= soc->max_peers);
1966 	/* check if there's already a peer object with this MAC address */
1967 	peer = dp_peer_find_hash_find(soc, peer_mac_addr,
1968 		0 /* is aligned */, vdev_id, DP_MOD_ID_CONFIG);
1969 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1970 		  "%s: peer %pK ID %d vid %d mac "QDF_MAC_ADDR_FMT,
1971 		  __func__, peer, peer_id, vdev_id,
1972 		  QDF_MAC_ADDR_REF(peer_mac_addr));
1973 
1974 	if (peer) {
1975 		/* peer's ref count was already incremented by
1976 		 * peer_find_hash_find
1977 		 */
1978 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1979 			  "%s: ref_cnt: %d", __func__,
1980 			   qdf_atomic_read(&peer->ref_cnt));
1981 
1982 		/*
1983 		 * if peer is in logical delete CP triggered delete before map
1984 		 * is received ignore this event
1985 		 */
1986 		if (dp_peer_state_cmp(peer, DP_PEER_STATE_LOGICAL_DELETE)) {
1987 			dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
1988 			dp_alert("Peer %pK["QDF_MAC_ADDR_FMT"] logical delete state vid %d",
1989 				 peer, QDF_MAC_ADDR_REF(peer_mac_addr),
1990 				 vdev_id);
1991 			return NULL;
1992 		}
1993 		dp_peer_find_id_to_obj_add(soc, peer, peer_id);
1994 		if (peer->peer_id == HTT_INVALID_PEER) {
1995 			peer->peer_id = peer_id;
1996 			dp_peer_tid_peer_id_update(peer, peer->peer_id);
1997 		} else {
1998 			QDF_ASSERT(0);
1999 		}
2000 
2001 		dp_peer_update_state(soc, peer, DP_PEER_STATE_ACTIVE);
2002 		return peer;
2003 	}
2004 
2005 	return NULL;
2006 }
2007 
2008 /**
2009  * dp_rx_peer_map_handler() - handle peer map event from firmware
2010  * @soc_handle - genereic soc handle
2011  * @peeri_id - peer_id from firmware
2012  * @hw_peer_id - ast index for this peer
2013  * @vdev_id - vdev ID
2014  * @peer_mac_addr - mac address of the peer
2015  * @ast_hash - ast hash value
2016  * @is_wds - flag to indicate peer map event for WDS ast entry
2017  *
2018  * associate the peer_id that firmware provided with peer entry
2019  * and update the ast table in the host with the hw_peer_id.
2020  *
2021  * Return: QDF_STATUS code
2022  */
2023 
2024 QDF_STATUS
2025 dp_rx_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
2026 		       uint16_t hw_peer_id, uint8_t vdev_id,
2027 		       uint8_t *peer_mac_addr, uint16_t ast_hash,
2028 		       uint8_t is_wds)
2029 {
2030 	struct dp_peer *peer = NULL;
2031 	enum cdp_txrx_ast_entry_type type = CDP_TXRX_AST_TYPE_STATIC;
2032 	QDF_STATUS err = QDF_STATUS_SUCCESS;
2033 
2034 	dp_info("peer_map_event (soc:%pK): peer_id %d, hw_peer_id %d, peer_mac "QDF_MAC_ADDR_FMT", vdev_id %d",
2035 		soc, peer_id, hw_peer_id,
2036 		QDF_MAC_ADDR_REF(peer_mac_addr), vdev_id);
2037 
2038 	/* Peer map event for WDS ast entry get the peer from
2039 	 * obj map
2040 	 */
2041 	if (is_wds) {
2042 		peer = dp_peer_get_ref_by_id(soc, peer_id,
2043 					     DP_MOD_ID_HTT);
2044 
2045 		err = dp_peer_map_ast(soc, peer, peer_mac_addr, hw_peer_id,
2046 				      vdev_id, ast_hash, is_wds);
2047 		if (peer)
2048 			dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
2049 	} else {
2050 		/*
2051 		 * It's the responsibility of the CP and FW to ensure
2052 		 * that peer is created successfully. Ideally DP should
2053 		 * not hit the below condition for directly assocaited
2054 		 * peers.
2055 		 */
2056 		if ((hw_peer_id < 0) ||
2057 		    (hw_peer_id >=
2058 		     wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) {
2059 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2060 				  "invalid hw_peer_id: %d", hw_peer_id);
2061 			qdf_assert_always(0);
2062 		}
2063 
2064 		peer = dp_peer_find_add_id(soc, peer_mac_addr, peer_id,
2065 					   hw_peer_id, vdev_id);
2066 
2067 		if (peer) {
2068 			if (wlan_op_mode_sta == peer->vdev->opmode &&
2069 			    qdf_mem_cmp(peer->mac_addr.raw,
2070 					peer->vdev->mac_addr.raw,
2071 					QDF_MAC_ADDR_SIZE) != 0) {
2072 				dp_info("STA vdev bss_peer!!!!");
2073 				peer->bss_peer = 1;
2074 			}
2075 
2076 			if (peer->vdev->opmode == wlan_op_mode_sta) {
2077 				peer->vdev->bss_ast_hash = ast_hash;
2078 				peer->vdev->bss_ast_idx = hw_peer_id;
2079 			}
2080 
2081 			/* Add ast entry incase self ast entry is
2082 			 * deleted due to DP CP sync issue
2083 			 *
2084 			 * self_ast_entry is modified in peer create
2085 			 * and peer unmap path which cannot run in
2086 			 * parllel with peer map, no lock need before
2087 			 * referring it
2088 			 */
2089 			if (!peer->self_ast_entry) {
2090 				dp_info("Add self ast from map "QDF_MAC_ADDR_FMT,
2091 					QDF_MAC_ADDR_REF(peer_mac_addr));
2092 				dp_peer_add_ast(soc, peer,
2093 						peer_mac_addr,
2094 						type, 0);
2095 			}
2096 		}
2097 		err = dp_peer_map_ast(soc, peer, peer_mac_addr, hw_peer_id,
2098 				      vdev_id, ast_hash, is_wds);
2099 	}
2100 
2101 	return err;
2102 }
2103 
2104 /**
2105  * dp_rx_peer_unmap_handler() - handle peer unmap event from firmware
2106  * @soc_handle - genereic soc handle
2107  * @peeri_id - peer_id from firmware
2108  * @vdev_id - vdev ID
2109  * @mac_addr - mac address of the peer or wds entry
2110  * @is_wds - flag to indicate peer map event for WDS ast entry
2111  * @free_wds_count - number of wds entries freed by FW with peer delete
2112  *
2113  * Return: none
2114  */
2115 void
2116 dp_rx_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id,
2117 			 uint8_t vdev_id, uint8_t *mac_addr,
2118 			 uint8_t is_wds, uint32_t free_wds_count)
2119 {
2120 	struct dp_peer *peer;
2121 	struct dp_vdev *vdev = NULL;
2122 
2123 	peer = __dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_HTT);
2124 
2125 	/*
2126 	 * Currently peer IDs are assigned for vdevs as well as peers.
2127 	 * If the peer ID is for a vdev, then the peer pointer stored
2128 	 * in peer_id_to_obj_map will be NULL.
2129 	 */
2130 	if (!peer) {
2131 		dp_err("Received unmap event for invalid peer_id %u",
2132 		       peer_id);
2133 		return;
2134 	}
2135 
2136 	/* If V2 Peer map messages are enabled AST entry has to be freed here
2137 	 */
2138 	if (is_wds) {
2139 		if (!dp_peer_ast_free_entry_by_mac(soc, peer, vdev_id,
2140 						   mac_addr)) {
2141 			dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
2142 			return;
2143 		}
2144 
2145 		dp_alert("AST entry not found with peer %pK peer_id %u peer_mac "QDF_MAC_ADDR_FMT" mac_addr "QDF_MAC_ADDR_FMT" vdev_id %u next_hop %u",
2146 			 peer, peer->peer_id,
2147 			 QDF_MAC_ADDR_REF(peer->mac_addr.raw),
2148 			 QDF_MAC_ADDR_REF(mac_addr), vdev_id,
2149 			 is_wds);
2150 
2151 		dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
2152 		return;
2153 	} else {
2154 		dp_peer_clean_wds_entries(soc, peer, free_wds_count);
2155 	}
2156 
2157 	dp_info("peer_unmap_event (soc:%pK) peer_id %d peer %pK",
2158 		soc, peer_id, peer);
2159 
2160 	dp_peer_find_id_to_obj_remove(soc, peer_id);
2161 	peer->peer_id = HTT_INVALID_PEER;
2162 
2163 	/*
2164 	 *	 Reset ast flow mapping table
2165 	 */
2166 	dp_peer_reset_flowq_map(peer);
2167 
2168 	if (soc->cdp_soc.ol_ops->peer_unmap_event) {
2169 		soc->cdp_soc.ol_ops->peer_unmap_event(soc->ctrl_psoc,
2170 				peer_id, vdev_id);
2171 	}
2172 
2173 	vdev = peer->vdev;
2174 	DP_UPDATE_STATS(vdev, peer);
2175 
2176 	dp_peer_update_state(soc, peer, DP_PEER_STATE_INACTIVE);
2177 	dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
2178 	/*
2179 	 * Remove a reference to the peer.
2180 	 * If there are no more references, delete the peer object.
2181 	 */
2182 	dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
2183 }
2184 
2185 void
2186 dp_peer_find_detach(struct dp_soc *soc)
2187 {
2188 	dp_peer_find_map_detach(soc);
2189 	dp_peer_find_hash_detach(soc);
2190 	dp_peer_ast_hash_detach(soc);
2191 	dp_peer_ast_table_detach(soc);
2192 }
2193 
2194 static void dp_rx_tid_update_cb(struct dp_soc *soc, void *cb_ctxt,
2195 	union hal_reo_status *reo_status)
2196 {
2197 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
2198 
2199 	if ((reo_status->rx_queue_status.header.status !=
2200 		HAL_REO_CMD_SUCCESS) &&
2201 		(reo_status->rx_queue_status.header.status !=
2202 		HAL_REO_CMD_DRAIN)) {
2203 		/* Should not happen normally. Just print error for now */
2204 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2205 			  "%s: Rx tid HW desc update failed(%d): tid %d",
2206 			  __func__,
2207 			  reo_status->rx_queue_status.header.status,
2208 			  rx_tid->tid);
2209 	}
2210 }
2211 
2212 static bool dp_get_peer_vdev_roaming_in_progress(struct dp_peer *peer)
2213 {
2214 	struct ol_if_ops *ol_ops = NULL;
2215 	bool is_roaming = false;
2216 	uint8_t vdev_id = -1;
2217 	struct cdp_soc_t *soc;
2218 
2219 	if (!peer) {
2220 		dp_info("Peer is NULL. No roaming possible");
2221 		return false;
2222 	}
2223 
2224 	soc = dp_soc_to_cdp_soc_t(peer->vdev->pdev->soc);
2225 	ol_ops = peer->vdev->pdev->soc->cdp_soc.ol_ops;
2226 
2227 	if (ol_ops && ol_ops->is_roam_inprogress) {
2228 		dp_get_vdevid(soc, peer->mac_addr.raw, &vdev_id);
2229 		is_roaming = ol_ops->is_roam_inprogress(vdev_id);
2230 	}
2231 
2232 	dp_info("peer: "QDF_MAC_ADDR_FMT", vdev_id: %d, is_roaming: %d",
2233 		QDF_MAC_ADDR_REF(peer->mac_addr.raw), vdev_id, is_roaming);
2234 
2235 	return is_roaming;
2236 }
2237 
2238 QDF_STATUS dp_rx_tid_update_wifi3(struct dp_peer *peer, int tid, uint32_t
2239 					 ba_window_size, uint32_t start_seq)
2240 {
2241 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
2242 	struct dp_soc *soc = peer->vdev->pdev->soc;
2243 	struct hal_reo_cmd_params params;
2244 
2245 	qdf_mem_zero(&params, sizeof(params));
2246 
2247 	params.std.need_status = 1;
2248 	params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
2249 	params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2250 	params.u.upd_queue_params.update_ba_window_size = 1;
2251 	params.u.upd_queue_params.ba_window_size = ba_window_size;
2252 
2253 	if (start_seq < IEEE80211_SEQ_MAX) {
2254 		params.u.upd_queue_params.update_ssn = 1;
2255 		params.u.upd_queue_params.ssn = start_seq;
2256 	} else {
2257 	    dp_set_ssn_valid_flag(&params, 0);
2258 	}
2259 
2260 	if (dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
2261 			    dp_rx_tid_update_cb, rx_tid)) {
2262 		dp_err_log("failed to send reo cmd CMD_UPDATE_RX_REO_QUEUE");
2263 		DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
2264 	}
2265 
2266 	rx_tid->ba_win_size = ba_window_size;
2267 
2268 	if (dp_get_peer_vdev_roaming_in_progress(peer))
2269 		return QDF_STATUS_E_PERM;
2270 
2271 	if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup)
2272 		soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup(
2273 			soc->ctrl_psoc, peer->vdev->pdev->pdev_id,
2274 			peer->vdev->vdev_id, peer->mac_addr.raw,
2275 			rx_tid->hw_qdesc_paddr, tid, tid, 1, ba_window_size);
2276 
2277 	return QDF_STATUS_SUCCESS;
2278 }
2279 
2280 /*
2281  * dp_reo_desc_free() - Callback free reo descriptor memory after
2282  * HW cache flush
2283  *
2284  * @soc: DP SOC handle
2285  * @cb_ctxt: Callback context
2286  * @reo_status: REO command status
2287  */
2288 static void dp_reo_desc_free(struct dp_soc *soc, void *cb_ctxt,
2289 	union hal_reo_status *reo_status)
2290 {
2291 	struct reo_desc_list_node *freedesc =
2292 		(struct reo_desc_list_node *)cb_ctxt;
2293 	struct dp_rx_tid *rx_tid = &freedesc->rx_tid;
2294 	unsigned long curr_ts = qdf_get_system_timestamp();
2295 
2296 	if ((reo_status->fl_cache_status.header.status !=
2297 		HAL_REO_CMD_SUCCESS) &&
2298 		(reo_status->fl_cache_status.header.status !=
2299 		HAL_REO_CMD_DRAIN)) {
2300 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2301 			  "%s: Rx tid HW desc flush failed(%d): tid %d",
2302 			  __func__,
2303 			  reo_status->rx_queue_status.header.status,
2304 			  freedesc->rx_tid.tid);
2305 	}
2306 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
2307 		  "%s:%lu hw_qdesc_paddr: %pK, tid:%d", __func__,
2308 		  curr_ts,
2309 		  (void *)(rx_tid->hw_qdesc_paddr), rx_tid->tid);
2310 	qdf_mem_unmap_nbytes_single(soc->osdev,
2311 		rx_tid->hw_qdesc_paddr,
2312 		QDF_DMA_BIDIRECTIONAL,
2313 		rx_tid->hw_qdesc_alloc_size);
2314 	qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
2315 	qdf_mem_free(freedesc);
2316 }
2317 
2318 #if defined(CONFIG_WIFI_EMULATION_WIFI_3_0) && defined(BUILD_X86)
2319 /* Hawkeye emulation requires bus address to be >= 0x50000000 */
2320 static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
2321 {
2322 	if (dma_addr < 0x50000000)
2323 		return QDF_STATUS_E_FAILURE;
2324 	else
2325 		return QDF_STATUS_SUCCESS;
2326 }
2327 #else
2328 static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
2329 {
2330 	return QDF_STATUS_SUCCESS;
2331 }
2332 #endif
2333 
2334 
2335 /*
2336  * dp_rx_tid_setup_wifi3() – Setup receive TID state
2337  * @peer: Datapath peer handle
2338  * @tid: TID
2339  * @ba_window_size: BlockAck window size
2340  * @start_seq: Starting sequence number
2341  *
2342  * Return: QDF_STATUS code
2343  */
2344 QDF_STATUS dp_rx_tid_setup_wifi3(struct dp_peer *peer, int tid,
2345 				 uint32_t ba_window_size, uint32_t start_seq)
2346 {
2347 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
2348 	struct dp_vdev *vdev = peer->vdev;
2349 	struct dp_soc *soc = vdev->pdev->soc;
2350 	uint32_t hw_qdesc_size;
2351 	uint32_t hw_qdesc_align;
2352 	int hal_pn_type;
2353 	void *hw_qdesc_vaddr;
2354 	uint32_t alloc_tries = 0;
2355 	QDF_STATUS err = QDF_STATUS_SUCCESS;
2356 
2357 	if (!qdf_atomic_read(&peer->is_default_route_set))
2358 		return QDF_STATUS_E_FAILURE;
2359 
2360 	rx_tid->ba_win_size = ba_window_size;
2361 	if (rx_tid->hw_qdesc_vaddr_unaligned)
2362 		return dp_rx_tid_update_wifi3(peer, tid, ba_window_size,
2363 			start_seq);
2364 	rx_tid->delba_tx_status = 0;
2365 	rx_tid->ppdu_id_2k = 0;
2366 	rx_tid->num_of_addba_req = 0;
2367 	rx_tid->num_of_delba_req = 0;
2368 	rx_tid->num_of_addba_resp = 0;
2369 	rx_tid->num_addba_rsp_failed = 0;
2370 	rx_tid->num_addba_rsp_success = 0;
2371 	rx_tid->delba_tx_success_cnt = 0;
2372 	rx_tid->delba_tx_fail_cnt = 0;
2373 	rx_tid->statuscode = 0;
2374 
2375 	/* TODO: Allocating HW queue descriptors based on max BA window size
2376 	 * for all QOS TIDs so that same descriptor can be used later when
2377 	 * ADDBA request is recevied. This should be changed to allocate HW
2378 	 * queue descriptors based on BA window size being negotiated (0 for
2379 	 * non BA cases), and reallocate when BA window size changes and also
2380 	 * send WMI message to FW to change the REO queue descriptor in Rx
2381 	 * peer entry as part of dp_rx_tid_update.
2382 	 */
2383 	if (tid != DP_NON_QOS_TID)
2384 		hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc,
2385 			HAL_RX_MAX_BA_WINDOW, tid);
2386 	else
2387 		hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc,
2388 			ba_window_size, tid);
2389 
2390 	hw_qdesc_align = hal_get_reo_qdesc_align(soc->hal_soc);
2391 	/* To avoid unnecessary extra allocation for alignment, try allocating
2392 	 * exact size and see if we already have aligned address.
2393 	 */
2394 	rx_tid->hw_qdesc_alloc_size = hw_qdesc_size;
2395 
2396 try_desc_alloc:
2397 	rx_tid->hw_qdesc_vaddr_unaligned =
2398 		qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size);
2399 
2400 	if (!rx_tid->hw_qdesc_vaddr_unaligned) {
2401 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2402 			  "%s: Rx tid HW desc alloc failed: tid %d",
2403 			  __func__, tid);
2404 		return QDF_STATUS_E_NOMEM;
2405 	}
2406 
2407 	if ((unsigned long)(rx_tid->hw_qdesc_vaddr_unaligned) %
2408 		hw_qdesc_align) {
2409 		/* Address allocated above is not alinged. Allocate extra
2410 		 * memory for alignment
2411 		 */
2412 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
2413 		rx_tid->hw_qdesc_vaddr_unaligned =
2414 			qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size +
2415 					hw_qdesc_align - 1);
2416 
2417 		if (!rx_tid->hw_qdesc_vaddr_unaligned) {
2418 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2419 				  "%s: Rx tid HW desc alloc failed: tid %d",
2420 				  __func__, tid);
2421 			return QDF_STATUS_E_NOMEM;
2422 		}
2423 
2424 		hw_qdesc_vaddr = (void *)qdf_align((unsigned long)
2425 			rx_tid->hw_qdesc_vaddr_unaligned,
2426 			hw_qdesc_align);
2427 
2428 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2429 			  "%s: Total Size %d Aligned Addr %pK",
2430 			  __func__, rx_tid->hw_qdesc_alloc_size,
2431 			  hw_qdesc_vaddr);
2432 
2433 	} else {
2434 		hw_qdesc_vaddr = rx_tid->hw_qdesc_vaddr_unaligned;
2435 	}
2436 
2437 	/* TODO: Ensure that sec_type is set before ADDBA is received.
2438 	 * Currently this is set based on htt indication
2439 	 * HTT_T2H_MSG_TYPE_SEC_IND from target
2440 	 */
2441 	switch (peer->security[dp_sec_ucast].sec_type) {
2442 	case cdp_sec_type_tkip_nomic:
2443 	case cdp_sec_type_aes_ccmp:
2444 	case cdp_sec_type_aes_ccmp_256:
2445 	case cdp_sec_type_aes_gcmp:
2446 	case cdp_sec_type_aes_gcmp_256:
2447 		hal_pn_type = HAL_PN_WPA;
2448 		break;
2449 	case cdp_sec_type_wapi:
2450 		if (vdev->opmode == wlan_op_mode_ap)
2451 			hal_pn_type = HAL_PN_WAPI_EVEN;
2452 		else
2453 			hal_pn_type = HAL_PN_WAPI_UNEVEN;
2454 		break;
2455 	default:
2456 		hal_pn_type = HAL_PN_NONE;
2457 		break;
2458 	}
2459 
2460 	hal_reo_qdesc_setup(soc->hal_soc, tid, ba_window_size, start_seq,
2461 		hw_qdesc_vaddr, rx_tid->hw_qdesc_paddr, hal_pn_type);
2462 
2463 	qdf_mem_map_nbytes_single(soc->osdev, hw_qdesc_vaddr,
2464 		QDF_DMA_BIDIRECTIONAL, rx_tid->hw_qdesc_alloc_size,
2465 		&(rx_tid->hw_qdesc_paddr));
2466 
2467 	if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) !=
2468 			QDF_STATUS_SUCCESS) {
2469 		if (alloc_tries++ < 10) {
2470 			qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
2471 			rx_tid->hw_qdesc_vaddr_unaligned = NULL;
2472 			goto try_desc_alloc;
2473 		} else {
2474 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2475 				  "%s: Rx tid HW desc alloc failed (lowmem): tid %d",
2476 				  __func__, tid);
2477 			err = QDF_STATUS_E_NOMEM;
2478 			goto error;
2479 		}
2480 	}
2481 
2482 	if (dp_get_peer_vdev_roaming_in_progress(peer)) {
2483 		err = QDF_STATUS_E_PERM;
2484 		goto error;
2485 	}
2486 
2487 	if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
2488 		if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup(
2489 		    soc->ctrl_psoc,
2490 		    peer->vdev->pdev->pdev_id,
2491 		    peer->vdev->vdev_id,
2492 		    peer->mac_addr.raw, rx_tid->hw_qdesc_paddr, tid, tid,
2493 		    1, ba_window_size)) {
2494 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2495 				  "%s: Failed to send reo queue setup to FW - tid %d\n",
2496 				  __func__, tid);
2497 			err = QDF_STATUS_E_FAILURE;
2498 			goto error;
2499 		}
2500 	}
2501 	return 0;
2502 error:
2503 	if (rx_tid->hw_qdesc_vaddr_unaligned) {
2504 		if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) ==
2505 		    QDF_STATUS_SUCCESS)
2506 			qdf_mem_unmap_nbytes_single(
2507 				soc->osdev,
2508 				rx_tid->hw_qdesc_paddr,
2509 				QDF_DMA_BIDIRECTIONAL,
2510 				rx_tid->hw_qdesc_alloc_size);
2511 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
2512 		rx_tid->hw_qdesc_vaddr_unaligned = NULL;
2513 	}
2514 	return err;
2515 }
2516 
2517 #ifdef REO_DESC_DEFER_FREE
2518 /*
2519  * dp_reo_desc_clean_up() - If cmd to flush base desc fails add
2520  * desc back to freelist and defer the deletion
2521  *
2522  * @soc: DP SOC handle
2523  * @desc: Base descriptor to be freed
2524  * @reo_status: REO command status
2525  */
2526 static void dp_reo_desc_clean_up(struct dp_soc *soc,
2527 				 struct reo_desc_list_node *desc,
2528 				 union hal_reo_status *reo_status)
2529 {
2530 	desc->free_ts = qdf_get_system_timestamp();
2531 	DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
2532 	qdf_list_insert_back(&soc->reo_desc_freelist,
2533 			     (qdf_list_node_t *)desc);
2534 }
2535 
2536 /*
2537  * dp_reo_limit_clean_batch_sz() - Limit number REO CMD queued to cmd
2538  * ring in aviod of REO hang
2539  *
2540  * @list_size: REO desc list size to be cleaned
2541  */
2542 static inline void dp_reo_limit_clean_batch_sz(uint32_t *list_size)
2543 {
2544 	unsigned long curr_ts = qdf_get_system_timestamp();
2545 
2546 	if ((*list_size) > REO_DESC_FREELIST_SIZE) {
2547 		dp_err_log("%lu:freedesc number %d in freelist",
2548 			   curr_ts, *list_size);
2549 		/* limit the batch queue size */
2550 		*list_size = REO_DESC_FREELIST_SIZE;
2551 	}
2552 }
2553 #else
2554 /*
2555  * dp_reo_desc_clean_up() - If send cmd to REO inorder to flush
2556  * cache fails free the base REO desc anyway
2557  *
2558  * @soc: DP SOC handle
2559  * @desc: Base descriptor to be freed
2560  * @reo_status: REO command status
2561  */
2562 static void dp_reo_desc_clean_up(struct dp_soc *soc,
2563 				 struct reo_desc_list_node *desc,
2564 				 union hal_reo_status *reo_status)
2565 {
2566 	if (reo_status) {
2567 		qdf_mem_zero(reo_status, sizeof(*reo_status));
2568 		reo_status->fl_cache_status.header.status = 0;
2569 		dp_reo_desc_free(soc, (void *)desc, reo_status);
2570 	}
2571 }
2572 
2573 /*
2574  * dp_reo_limit_clean_batch_sz() - Limit number REO CMD queued to cmd
2575  * ring in aviod of REO hang
2576  *
2577  * @list_size: REO desc list size to be cleaned
2578  */
2579 static inline void dp_reo_limit_clean_batch_sz(uint32_t *list_size)
2580 {
2581 }
2582 #endif
2583 
2584 /*
2585  * dp_resend_update_reo_cmd() - Resend the UPDATE_REO_QUEUE
2586  * cmd and re-insert desc into free list if send fails.
2587  *
2588  * @soc: DP SOC handle
2589  * @desc: desc with resend update cmd flag set
2590  * @rx_tid: Desc RX tid associated with update cmd for resetting
2591  * valid field to 0 in h/w
2592  *
2593  * Return: QDF status
2594  */
2595 static QDF_STATUS
2596 dp_resend_update_reo_cmd(struct dp_soc *soc,
2597 			 struct reo_desc_list_node *desc,
2598 			 struct dp_rx_tid *rx_tid)
2599 {
2600 	struct hal_reo_cmd_params params;
2601 
2602 	qdf_mem_zero(&params, sizeof(params));
2603 	params.std.need_status = 1;
2604 	params.std.addr_lo =
2605 		rx_tid->hw_qdesc_paddr & 0xffffffff;
2606 	params.std.addr_hi =
2607 		(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2608 	params.u.upd_queue_params.update_vld = 1;
2609 	params.u.upd_queue_params.vld = 0;
2610 	desc->resend_update_reo_cmd = false;
2611 	/*
2612 	 * If the cmd send fails then set resend_update_reo_cmd flag
2613 	 * and insert the desc at the end of the free list to retry.
2614 	 */
2615 	if (dp_reo_send_cmd(soc,
2616 			    CMD_UPDATE_RX_REO_QUEUE,
2617 			    &params,
2618 			    dp_rx_tid_delete_cb,
2619 			    (void *)desc)
2620 	    != QDF_STATUS_SUCCESS) {
2621 		desc->resend_update_reo_cmd = true;
2622 		desc->free_ts = qdf_get_system_timestamp();
2623 		qdf_list_insert_back(&soc->reo_desc_freelist,
2624 				     (qdf_list_node_t *)desc);
2625 		dp_err_log("failed to send reo cmd CMD_UPDATE_RX_REO_QUEUE");
2626 		DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
2627 		return QDF_STATUS_E_FAILURE;
2628 	}
2629 
2630 	return QDF_STATUS_SUCCESS;
2631 }
2632 
2633 /*
2634  * dp_rx_tid_delete_cb() - Callback to flush reo descriptor HW cache
2635  * after deleting the entries (ie., setting valid=0)
2636  *
2637  * @soc: DP SOC handle
2638  * @cb_ctxt: Callback context
2639  * @reo_status: REO command status
2640  */
2641 void dp_rx_tid_delete_cb(struct dp_soc *soc, void *cb_ctxt,
2642 			 union hal_reo_status *reo_status)
2643 {
2644 	struct reo_desc_list_node *freedesc =
2645 		(struct reo_desc_list_node *)cb_ctxt;
2646 	uint32_t list_size;
2647 	struct reo_desc_list_node *desc;
2648 	unsigned long curr_ts = qdf_get_system_timestamp();
2649 	uint32_t desc_size, tot_desc_size;
2650 	struct hal_reo_cmd_params params;
2651 	bool flush_failure = false;
2652 
2653 	if (reo_status->rx_queue_status.header.status == HAL_REO_CMD_DRAIN) {
2654 		qdf_mem_zero(reo_status, sizeof(*reo_status));
2655 		reo_status->fl_cache_status.header.status = HAL_REO_CMD_DRAIN;
2656 		dp_reo_desc_free(soc, (void *)freedesc, reo_status);
2657 		DP_STATS_INC(soc, rx.err.reo_cmd_send_drain, 1);
2658 		return;
2659 	} else if (reo_status->rx_queue_status.header.status !=
2660 		HAL_REO_CMD_SUCCESS) {
2661 		/* Should not happen normally. Just print error for now */
2662 		dp_info_rl("Rx tid HW desc deletion failed(%d): tid %d",
2663 			   reo_status->rx_queue_status.header.status,
2664 			   freedesc->rx_tid.tid);
2665 	}
2666 
2667 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
2668 		"%s: rx_tid: %d status: %d", __func__,
2669 		freedesc->rx_tid.tid,
2670 		reo_status->rx_queue_status.header.status);
2671 
2672 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
2673 	freedesc->free_ts = curr_ts;
2674 	qdf_list_insert_back_size(&soc->reo_desc_freelist,
2675 		(qdf_list_node_t *)freedesc, &list_size);
2676 
2677 	/* MCL path add the desc back to reo_desc_freelist when REO FLUSH
2678 	 * failed. it may cause the number of REO queue pending  in free
2679 	 * list is even larger than REO_CMD_RING max size and lead REO CMD
2680 	 * flood then cause REO HW in an unexpected condition. So it's
2681 	 * needed to limit the number REO cmds in a batch operation.
2682 	 */
2683 	dp_reo_limit_clean_batch_sz(&list_size);
2684 
2685 	while ((qdf_list_peek_front(&soc->reo_desc_freelist,
2686 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) &&
2687 		((list_size >= REO_DESC_FREELIST_SIZE) ||
2688 		(curr_ts > (desc->free_ts + REO_DESC_FREE_DEFER_MS)) ||
2689 		(desc->resend_update_reo_cmd && list_size))) {
2690 		struct dp_rx_tid *rx_tid;
2691 
2692 		qdf_list_remove_front(&soc->reo_desc_freelist,
2693 				(qdf_list_node_t **)&desc);
2694 		list_size--;
2695 		rx_tid = &desc->rx_tid;
2696 
2697 		/* First process descs with resend_update_reo_cmd set */
2698 		if (desc->resend_update_reo_cmd) {
2699 			if (dp_resend_update_reo_cmd(soc, desc, rx_tid) !=
2700 			    QDF_STATUS_SUCCESS)
2701 				break;
2702 			else
2703 				continue;
2704 		}
2705 
2706 		/* Flush and invalidate REO descriptor from HW cache: Base and
2707 		 * extension descriptors should be flushed separately */
2708 		if (desc->pending_ext_desc_size)
2709 			tot_desc_size = desc->pending_ext_desc_size;
2710 		else
2711 			tot_desc_size = rx_tid->hw_qdesc_alloc_size;
2712 		/* Get base descriptor size by passing non-qos TID */
2713 		desc_size = hal_get_reo_qdesc_size(soc->hal_soc, 0,
2714 						   DP_NON_QOS_TID);
2715 
2716 		/* Flush reo extension descriptors */
2717 		while ((tot_desc_size -= desc_size) > 0) {
2718 			qdf_mem_zero(&params, sizeof(params));
2719 			params.std.addr_lo =
2720 				((uint64_t)(rx_tid->hw_qdesc_paddr) +
2721 				tot_desc_size) & 0xffffffff;
2722 			params.std.addr_hi =
2723 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2724 
2725 			if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc,
2726 							CMD_FLUSH_CACHE,
2727 							&params,
2728 							NULL,
2729 							NULL)) {
2730 				dp_info_rl("fail to send CMD_CACHE_FLUSH:"
2731 					   "tid %d desc %pK", rx_tid->tid,
2732 					   (void *)(rx_tid->hw_qdesc_paddr));
2733 				desc->pending_ext_desc_size = tot_desc_size +
2734 								      desc_size;
2735 				dp_reo_desc_clean_up(soc, desc, reo_status);
2736 				flush_failure = true;
2737 				break;
2738 			}
2739 		}
2740 
2741 		if (flush_failure)
2742 			break;
2743 		else
2744 			desc->pending_ext_desc_size = desc_size;
2745 
2746 		/* Flush base descriptor */
2747 		qdf_mem_zero(&params, sizeof(params));
2748 		params.std.need_status = 1;
2749 		params.std.addr_lo =
2750 			(uint64_t)(rx_tid->hw_qdesc_paddr) & 0xffffffff;
2751 		params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2752 
2753 		if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc,
2754 							  CMD_FLUSH_CACHE,
2755 							  &params,
2756 							  dp_reo_desc_free,
2757 							  (void *)desc)) {
2758 			union hal_reo_status reo_status;
2759 			/*
2760 			 * If dp_reo_send_cmd return failure, related TID queue desc
2761 			 * should be unmapped. Also locally reo_desc, together with
2762 			 * TID queue desc also need to be freed accordingly.
2763 			 *
2764 			 * Here invoke desc_free function directly to do clean up.
2765 			 *
2766 			 * In case of MCL path add the desc back to the free
2767 			 * desc list and defer deletion.
2768 			 */
2769 			dp_info_rl("fail to send REO cmd to flush cache: tid %d",
2770 				   rx_tid->tid);
2771 			dp_reo_desc_clean_up(soc, desc, &reo_status);
2772 			DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
2773 			break;
2774 		}
2775 	}
2776 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
2777 }
2778 
2779 /*
2780  * dp_rx_tid_delete_wifi3() – Delete receive TID queue
2781  * @peer: Datapath peer handle
2782  * @tid: TID
2783  *
2784  * Return: 0 on success, error code on failure
2785  */
2786 static int dp_rx_tid_delete_wifi3(struct dp_peer *peer, int tid)
2787 {
2788 	struct dp_rx_tid *rx_tid = &(peer->rx_tid[tid]);
2789 	struct dp_soc *soc = peer->vdev->pdev->soc;
2790 	struct hal_reo_cmd_params params;
2791 	struct reo_desc_list_node *freedesc =
2792 		qdf_mem_malloc(sizeof(*freedesc));
2793 
2794 	if (!freedesc) {
2795 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2796 			  "%s: malloc failed for freedesc: tid %d",
2797 			  __func__, tid);
2798 		return -ENOMEM;
2799 	}
2800 
2801 	freedesc->rx_tid = *rx_tid;
2802 	freedesc->resend_update_reo_cmd = false;
2803 
2804 	qdf_mem_zero(&params, sizeof(params));
2805 
2806 	params.std.need_status = 1;
2807 	params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
2808 	params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2809 	params.u.upd_queue_params.update_vld = 1;
2810 	params.u.upd_queue_params.vld = 0;
2811 
2812 	if (dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
2813 			    dp_rx_tid_delete_cb, (void *)freedesc)
2814 		!= QDF_STATUS_SUCCESS) {
2815 		/* Defer the clean up to the call back context */
2816 		qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
2817 		freedesc->free_ts = qdf_get_system_timestamp();
2818 		freedesc->resend_update_reo_cmd = true;
2819 		qdf_list_insert_front(&soc->reo_desc_freelist,
2820 				      (qdf_list_node_t *)freedesc);
2821 		DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
2822 		qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
2823 		dp_info("Failed to send CMD_UPDATE_RX_REO_QUEUE");
2824 	}
2825 
2826 	rx_tid->hw_qdesc_vaddr_unaligned = NULL;
2827 	rx_tid->hw_qdesc_alloc_size = 0;
2828 	rx_tid->hw_qdesc_paddr = 0;
2829 
2830 	return 0;
2831 }
2832 
2833 #ifdef DP_LFR
2834 static void dp_peer_setup_remaining_tids(struct dp_peer *peer)
2835 {
2836 	int tid;
2837 
2838 	for (tid = 1; tid < DP_MAX_TIDS-1; tid++) {
2839 		dp_rx_tid_setup_wifi3(peer, tid, 1, 0);
2840 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2841 			  "Setting up TID %d for peer %pK peer->local_id %d",
2842 			  tid, peer, peer->local_id);
2843 	}
2844 }
2845 #else
2846 static void dp_peer_setup_remaining_tids(struct dp_peer *peer) {};
2847 #endif
2848 
2849 /*
2850  * dp_peer_tx_init() – Initialize receive TID state
2851  * @pdev: Datapath pdev
2852  * @peer: Datapath peer
2853  *
2854  */
2855 void dp_peer_tx_init(struct dp_pdev *pdev, struct dp_peer *peer)
2856 {
2857 	dp_peer_tid_queue_init(peer);
2858 	dp_peer_update_80211_hdr(peer->vdev, peer);
2859 }
2860 
2861 /*
2862  * dp_peer_tx_cleanup() – Deinitialize receive TID state
2863  * @vdev: Datapath vdev
2864  * @peer: Datapath peer
2865  *
2866  */
2867 static inline void
2868 dp_peer_tx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
2869 {
2870 	dp_peer_tid_queue_cleanup(peer);
2871 }
2872 
2873 /*
2874  * dp_peer_rx_init() – Initialize receive TID state
2875  * @pdev: Datapath pdev
2876  * @peer: Datapath peer
2877  *
2878  */
2879 void dp_peer_rx_init(struct dp_pdev *pdev, struct dp_peer *peer)
2880 {
2881 	int tid;
2882 	struct dp_rx_tid *rx_tid;
2883 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
2884 		rx_tid = &peer->rx_tid[tid];
2885 		rx_tid->array = &rx_tid->base;
2886 		rx_tid->base.head = rx_tid->base.tail = NULL;
2887 		rx_tid->tid = tid;
2888 		rx_tid->defrag_timeout_ms = 0;
2889 		rx_tid->ba_win_size = 0;
2890 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
2891 
2892 		rx_tid->defrag_waitlist_elem.tqe_next = NULL;
2893 		rx_tid->defrag_waitlist_elem.tqe_prev = NULL;
2894 	}
2895 
2896 	peer->active_ba_session_cnt = 0;
2897 	peer->hw_buffer_size = 0;
2898 	peer->kill_256_sessions = 0;
2899 
2900 	/* Setup default (non-qos) rx tid queue */
2901 	dp_rx_tid_setup_wifi3(peer, DP_NON_QOS_TID, 1, 0);
2902 
2903 	/* Setup rx tid queue for TID 0.
2904 	 * Other queues will be setup on receiving first packet, which will cause
2905 	 * NULL REO queue error
2906 	 */
2907 	dp_rx_tid_setup_wifi3(peer, 0, 1, 0);
2908 
2909 	/*
2910 	 * Setup the rest of TID's to handle LFR
2911 	 */
2912 	dp_peer_setup_remaining_tids(peer);
2913 
2914 	/*
2915 	 * Set security defaults: no PN check, no security. The target may
2916 	 * send a HTT SEC_IND message to overwrite these defaults.
2917 	 */
2918 	peer->security[dp_sec_ucast].sec_type =
2919 		peer->security[dp_sec_mcast].sec_type = cdp_sec_type_none;
2920 }
2921 
2922 /*
2923  * dp_peer_rx_cleanup() – Cleanup receive TID state
2924  * @vdev: Datapath vdev
2925  * @peer: Datapath peer
2926  *
2927  */
2928 void dp_peer_rx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
2929 {
2930 	int tid;
2931 	uint32_t tid_delete_mask = 0;
2932 
2933 	dp_info("Remove tids for peer: %pK", peer);
2934 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
2935 		struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
2936 
2937 		qdf_spin_lock_bh(&rx_tid->tid_lock);
2938 		if (!peer->bss_peer || peer->vdev->opmode == wlan_op_mode_sta) {
2939 			/* Cleanup defrag related resource */
2940 			dp_rx_defrag_waitlist_remove(peer, tid);
2941 			dp_rx_reorder_flush_frag(peer, tid);
2942 		}
2943 
2944 		if (peer->rx_tid[tid].hw_qdesc_vaddr_unaligned) {
2945 			dp_rx_tid_delete_wifi3(peer, tid);
2946 
2947 			tid_delete_mask |= (1 << tid);
2948 		}
2949 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2950 	}
2951 #ifdef notyet /* See if FW can remove queues as part of peer cleanup */
2952 	if (soc->ol_ops->peer_rx_reorder_queue_remove) {
2953 		soc->ol_ops->peer_rx_reorder_queue_remove(soc->ctrl_psoc,
2954 			peer->vdev->pdev->pdev_id,
2955 			peer->vdev->vdev_id, peer->mac_addr.raw,
2956 			tid_delete_mask);
2957 	}
2958 #endif
2959 }
2960 
2961 #ifdef FEATURE_PERPKT_INFO
2962 /*
2963  * dp_peer_ppdu_delayed_ba_init() Initialize ppdu in peer
2964  * @peer: Datapath peer
2965  *
2966  * return: void
2967  */
2968 void dp_peer_ppdu_delayed_ba_init(struct dp_peer *peer)
2969 {
2970 	qdf_mem_zero(&peer->delayed_ba_ppdu_stats,
2971 		     sizeof(struct cdp_delayed_tx_completion_ppdu_user));
2972 	peer->last_delayed_ba = false;
2973 	peer->last_delayed_ba_ppduid = 0;
2974 }
2975 #else
2976 /*
2977  * dp_peer_ppdu_delayed_ba_init() Initialize ppdu in peer
2978  * @peer: Datapath peer
2979  *
2980  * return: void
2981  */
2982 void dp_peer_ppdu_delayed_ba_init(struct dp_peer *peer)
2983 {
2984 }
2985 #endif
2986 
2987 /*
2988  * dp_peer_cleanup() – Cleanup peer information
2989  * @vdev: Datapath vdev
2990  * @peer: Datapath peer
2991  *
2992  */
2993 void dp_peer_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
2994 {
2995 	enum wlan_op_mode vdev_opmode;
2996 	uint8_t vdev_mac_addr[QDF_MAC_ADDR_SIZE];
2997 	struct dp_pdev *pdev = vdev->pdev;
2998 	struct dp_soc *soc = pdev->soc;
2999 
3000 	/* save vdev related member in case vdev freed */
3001 	vdev_opmode = vdev->opmode;
3002 
3003 	dp_peer_tx_cleanup(vdev, peer);
3004 
3005 	if (vdev_opmode != wlan_op_mode_monitor)
3006 	/* cleanup the Rx reorder queues for this peer */
3007 		dp_peer_rx_cleanup(vdev, peer);
3008 
3009 	qdf_mem_copy(vdev_mac_addr, vdev->mac_addr.raw,
3010 		     QDF_MAC_ADDR_SIZE);
3011 
3012 	if (soc->cdp_soc.ol_ops->peer_unref_delete)
3013 		soc->cdp_soc.ol_ops->peer_unref_delete(
3014 				soc->ctrl_psoc,
3015 				vdev->pdev->pdev_id,
3016 				peer->mac_addr.raw, vdev_mac_addr,
3017 				vdev_opmode);
3018 }
3019 
3020 /* dp_teardown_256_ba_session() - Teardown sessions using 256
3021  *                                window size when a request with
3022  *                                64 window size is received.
3023  *                                This is done as a WAR since HW can
3024  *                                have only one setting per peer (64 or 256).
3025  *                                For HKv2, we use per tid buffersize setting
3026  *                                for 0 to per_tid_basize_max_tid. For tid
3027  *                                more than per_tid_basize_max_tid we use HKv1
3028  *                                method.
3029  * @peer: Datapath peer
3030  *
3031  * Return: void
3032  */
3033 static void dp_teardown_256_ba_sessions(struct dp_peer *peer)
3034 {
3035 	uint8_t delba_rcode = 0;
3036 	int tid;
3037 	struct dp_rx_tid *rx_tid = NULL;
3038 
3039 	tid = peer->vdev->pdev->soc->per_tid_basize_max_tid;
3040 	for (; tid < DP_MAX_TIDS; tid++) {
3041 		rx_tid = &peer->rx_tid[tid];
3042 		qdf_spin_lock_bh(&rx_tid->tid_lock);
3043 
3044 		if (rx_tid->ba_win_size <= 64) {
3045 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
3046 			continue;
3047 		} else {
3048 			if (rx_tid->ba_status == DP_RX_BA_ACTIVE ||
3049 			    rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
3050 				/* send delba */
3051 				if (!rx_tid->delba_tx_status) {
3052 					rx_tid->delba_tx_retry++;
3053 					rx_tid->delba_tx_status = 1;
3054 					rx_tid->delba_rcode =
3055 					IEEE80211_REASON_QOS_SETUP_REQUIRED;
3056 					delba_rcode = rx_tid->delba_rcode;
3057 
3058 					qdf_spin_unlock_bh(&rx_tid->tid_lock);
3059 					if (peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba)
3060 						peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba(
3061 							peer->vdev->pdev->soc->ctrl_psoc,
3062 							peer->vdev->vdev_id,
3063 							peer->mac_addr.raw,
3064 							tid, delba_rcode);
3065 				} else {
3066 					qdf_spin_unlock_bh(&rx_tid->tid_lock);
3067 				}
3068 			} else {
3069 				qdf_spin_unlock_bh(&rx_tid->tid_lock);
3070 			}
3071 		}
3072 	}
3073 }
3074 
3075 /*
3076 * dp_rx_addba_resp_tx_completion_wifi3() – Update Rx Tid State
3077 *
3078 * @soc: Datapath soc handle
3079 * @peer_mac: Datapath peer mac address
3080 * @vdev_id: id of atapath vdev
3081 * @tid: TID number
3082 * @status: tx completion status
3083 * Return: 0 on success, error code on failure
3084 */
3085 int dp_addba_resp_tx_completion_wifi3(struct cdp_soc_t *cdp_soc,
3086 				      uint8_t *peer_mac,
3087 				      uint16_t vdev_id,
3088 				      uint8_t tid, int status)
3089 {
3090 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
3091 						       peer_mac, 0, vdev_id,
3092 						       DP_MOD_ID_CDP);
3093 	struct dp_rx_tid *rx_tid = NULL;
3094 
3095 	if (!peer) {
3096 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
3097 			  "%s: Peer is NULL!\n", __func__);
3098 		goto fail;
3099 	}
3100 	rx_tid = &peer->rx_tid[tid];
3101 	qdf_spin_lock_bh(&rx_tid->tid_lock);
3102 	if (status) {
3103 		rx_tid->num_addba_rsp_failed++;
3104 		dp_rx_tid_update_wifi3(peer, tid, 1,
3105 				       IEEE80211_SEQ_MAX);
3106 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
3107 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
3108 		dp_err("RxTid- %d addba rsp tx completion failed", tid);
3109 
3110 		goto success;
3111 	}
3112 
3113 	rx_tid->num_addba_rsp_success++;
3114 	if (rx_tid->ba_status == DP_RX_BA_INACTIVE) {
3115 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
3116 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3117 			  "%s: Rx Tid- %d hw qdesc is not in IN_PROGRESS",
3118 			__func__, tid);
3119 		goto fail;
3120 	}
3121 
3122 	if (!qdf_atomic_read(&peer->is_default_route_set)) {
3123 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
3124 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
3125 			  "%s: default route is not set for peer: "QDF_MAC_ADDR_FMT,
3126 			  __func__, QDF_MAC_ADDR_REF(peer->mac_addr.raw));
3127 		goto fail;
3128 	}
3129 
3130 	if (dp_rx_tid_update_wifi3(peer, tid,
3131 				   rx_tid->ba_win_size,
3132 				   rx_tid->startseqnum)) {
3133 		dp_err("Failed update REO SSN");
3134 	}
3135 
3136 	dp_info("tid %u window_size %u start_seq_num %u",
3137 		tid, rx_tid->ba_win_size,
3138 		rx_tid->startseqnum);
3139 
3140 	/* First Session */
3141 	if (peer->active_ba_session_cnt == 0) {
3142 		if (rx_tid->ba_win_size > 64 && rx_tid->ba_win_size <= 256)
3143 			peer->hw_buffer_size = 256;
3144 		else
3145 			peer->hw_buffer_size = 64;
3146 	}
3147 
3148 	rx_tid->ba_status = DP_RX_BA_ACTIVE;
3149 
3150 	peer->active_ba_session_cnt++;
3151 
3152 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
3153 
3154 	/* Kill any session having 256 buffer size
3155 	 * when 64 buffer size request is received.
3156 	 * Also, latch on to 64 as new buffer size.
3157 	 */
3158 	if (peer->kill_256_sessions) {
3159 		dp_teardown_256_ba_sessions(peer);
3160 		peer->kill_256_sessions = 0;
3161 	}
3162 
3163 success:
3164 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3165 	return QDF_STATUS_SUCCESS;
3166 
3167 fail:
3168 	if (peer)
3169 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3170 
3171 	return QDF_STATUS_E_FAILURE;
3172 }
3173 
3174 /*
3175 * dp_rx_addba_responsesetup_wifi3() – Process ADDBA request from peer
3176 *
3177 * @soc: Datapath soc handle
3178 * @peer_mac: Datapath peer mac address
3179 * @vdev_id: id of atapath vdev
3180 * @tid: TID number
3181 * @dialogtoken: output dialogtoken
3182 * @statuscode: output dialogtoken
3183 * @buffersize: Output BA window size
3184 * @batimeout: Output BA timeout
3185 */
3186 QDF_STATUS
3187 dp_addba_responsesetup_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
3188 			     uint16_t vdev_id, uint8_t tid,
3189 			     uint8_t *dialogtoken, uint16_t *statuscode,
3190 			     uint16_t *buffersize, uint16_t *batimeout)
3191 {
3192 	struct dp_rx_tid *rx_tid = NULL;
3193 	QDF_STATUS status = QDF_STATUS_SUCCESS;
3194 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
3195 						       peer_mac, 0, vdev_id,
3196 						       DP_MOD_ID_CDP);
3197 
3198 	if (!peer) {
3199 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
3200 			  "%s: Peer is NULL!\n", __func__);
3201 		return QDF_STATUS_E_FAILURE;
3202 	}
3203 	rx_tid = &peer->rx_tid[tid];
3204 	qdf_spin_lock_bh(&rx_tid->tid_lock);
3205 	rx_tid->num_of_addba_resp++;
3206 	/* setup ADDBA response parameters */
3207 	*dialogtoken = rx_tid->dialogtoken;
3208 	*statuscode = rx_tid->statuscode;
3209 	*buffersize = rx_tid->ba_win_size;
3210 	*batimeout  = 0;
3211 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
3212 
3213 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3214 
3215 	return status;
3216 }
3217 
3218 /* dp_check_ba_buffersize() - Check buffer size in request
3219  *                            and latch onto this size based on
3220  *                            size used in first active session.
3221  * @peer: Datapath peer
3222  * @tid: Tid
3223  * @buffersize: Block ack window size
3224  *
3225  * Return: void
3226  */
3227 static void dp_check_ba_buffersize(struct dp_peer *peer,
3228 				   uint16_t tid,
3229 				   uint16_t buffersize)
3230 {
3231 	struct dp_rx_tid *rx_tid = NULL;
3232 
3233 	rx_tid = &peer->rx_tid[tid];
3234 	if (peer->vdev->pdev->soc->per_tid_basize_max_tid &&
3235 	    tid < peer->vdev->pdev->soc->per_tid_basize_max_tid) {
3236 		rx_tid->ba_win_size = buffersize;
3237 		return;
3238 	} else {
3239 		if (peer->active_ba_session_cnt == 0) {
3240 			rx_tid->ba_win_size = buffersize;
3241 		} else {
3242 			if (peer->hw_buffer_size == 64) {
3243 				if (buffersize <= 64)
3244 					rx_tid->ba_win_size = buffersize;
3245 				else
3246 					rx_tid->ba_win_size = peer->hw_buffer_size;
3247 			} else if (peer->hw_buffer_size == 256) {
3248 				if (buffersize > 64) {
3249 					rx_tid->ba_win_size = buffersize;
3250 				} else {
3251 					rx_tid->ba_win_size = buffersize;
3252 					peer->hw_buffer_size = 64;
3253 					peer->kill_256_sessions = 1;
3254 				}
3255 			}
3256 		}
3257 	}
3258 }
3259 
3260 #define DP_RX_BA_SESSION_DISABLE  1
3261 
3262 /*
3263  * dp_addba_requestprocess_wifi3() - Process ADDBA request from peer
3264  *
3265  * @soc: Datapath soc handle
3266  * @peer_mac: Datapath peer mac address
3267  * @vdev_id: id of atapath vdev
3268  * @dialogtoken: dialogtoken from ADDBA frame
3269  * @tid: TID number
3270  * @batimeout: BA timeout
3271  * @buffersize: BA window size
3272  * @startseqnum: Start seq. number received in BA sequence control
3273  *
3274  * Return: 0 on success, error code on failure
3275  */
3276 int dp_addba_requestprocess_wifi3(struct cdp_soc_t *cdp_soc,
3277 				  uint8_t *peer_mac,
3278 				  uint16_t vdev_id,
3279 				  uint8_t dialogtoken,
3280 				  uint16_t tid, uint16_t batimeout,
3281 				  uint16_t buffersize,
3282 				  uint16_t startseqnum)
3283 {
3284 	QDF_STATUS status = QDF_STATUS_SUCCESS;
3285 	struct dp_rx_tid *rx_tid = NULL;
3286 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
3287 	struct dp_peer *peer = dp_peer_find_hash_find(soc,
3288 						       peer_mac, 0, vdev_id,
3289 						       DP_MOD_ID_CDP);
3290 
3291 	if (!peer) {
3292 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
3293 			  "%s: Peer is NULL!\n", __func__);
3294 		return QDF_STATUS_E_FAILURE;
3295 	}
3296 	rx_tid = &peer->rx_tid[tid];
3297 	qdf_spin_lock_bh(&rx_tid->tid_lock);
3298 	rx_tid->num_of_addba_req++;
3299 	if ((rx_tid->ba_status == DP_RX_BA_ACTIVE &&
3300 	     rx_tid->hw_qdesc_vaddr_unaligned)) {
3301 		dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX);
3302 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
3303 		peer->active_ba_session_cnt--;
3304 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
3305 			  "%s: Rx Tid- %d hw qdesc is already setup",
3306 			__func__, tid);
3307 	}
3308 
3309 	if (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
3310 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
3311 		status = QDF_STATUS_E_FAILURE;
3312 		goto fail;
3313 	}
3314 
3315 	if (wlan_cfg_is_dp_force_rx_64_ba(soc->wlan_cfg_ctx)) {
3316 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3317 			  "force use BA64 scheme");
3318 		buffersize = qdf_min((uint16_t)64, buffersize);
3319 	}
3320 
3321 	if (rx_tid->rx_ba_win_size_override == DP_RX_BA_SESSION_DISABLE) {
3322 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3323 			  "%s disable BA session",
3324 			    __func__);
3325 
3326 		buffersize = 1;
3327 	} else if (rx_tid->rx_ba_win_size_override) {
3328 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3329 			  "%s override BA win to %d", __func__,
3330 			      rx_tid->rx_ba_win_size_override);
3331 
3332 		buffersize = rx_tid->rx_ba_win_size_override;
3333 	} else {
3334 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3335 			  "%s restore BA win %d based on addba req",
3336 			    __func__, buffersize);
3337 	}
3338 
3339 	dp_check_ba_buffersize(peer, tid, buffersize);
3340 
3341 	if (dp_rx_tid_setup_wifi3(peer, tid,
3342 	    rx_tid->ba_win_size, startseqnum)) {
3343 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
3344 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
3345 		status = QDF_STATUS_E_FAILURE;
3346 		goto fail;
3347 	}
3348 	rx_tid->ba_status = DP_RX_BA_IN_PROGRESS;
3349 
3350 	rx_tid->dialogtoken = dialogtoken;
3351 	rx_tid->startseqnum = startseqnum;
3352 
3353 	if (rx_tid->userstatuscode != IEEE80211_STATUS_SUCCESS)
3354 		rx_tid->statuscode = rx_tid->userstatuscode;
3355 	else
3356 		rx_tid->statuscode = IEEE80211_STATUS_SUCCESS;
3357 
3358 	if (rx_tid->rx_ba_win_size_override == DP_RX_BA_SESSION_DISABLE)
3359 		rx_tid->statuscode = IEEE80211_STATUS_REFUSED;
3360 
3361 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
3362 
3363 fail:
3364 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3365 
3366 	return status;
3367 }
3368 
3369 /*
3370 * dp_set_addba_response() – Set a user defined ADDBA response status code
3371 *
3372 * @soc: Datapath soc handle
3373 * @peer_mac: Datapath peer mac address
3374 * @vdev_id: id of atapath vdev
3375 * @tid: TID number
3376 * @statuscode: response status code to be set
3377 */
3378 QDF_STATUS
3379 dp_set_addba_response(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
3380 		      uint16_t vdev_id, uint8_t tid, uint16_t statuscode)
3381 {
3382 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
3383 						       peer_mac, 0, vdev_id,
3384 						       DP_MOD_ID_CDP);
3385 	struct dp_rx_tid *rx_tid;
3386 
3387 	if (!peer) {
3388 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
3389 			  "%s: Peer is NULL!\n", __func__);
3390 		return QDF_STATUS_E_FAILURE;
3391 	}
3392 
3393 	rx_tid = &peer->rx_tid[tid];
3394 	qdf_spin_lock_bh(&rx_tid->tid_lock);
3395 	rx_tid->userstatuscode = statuscode;
3396 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
3397 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3398 
3399 	return QDF_STATUS_SUCCESS;
3400 }
3401 
3402 /*
3403 * dp_rx_delba_process_wifi3() – Process DELBA from peer
3404 * @soc: Datapath soc handle
3405 * @peer_mac: Datapath peer mac address
3406 * @vdev_id: id of atapath vdev
3407 * @tid: TID number
3408 * @reasoncode: Reason code received in DELBA frame
3409 *
3410 * Return: 0 on success, error code on failure
3411 */
3412 int dp_delba_process_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
3413 			   uint16_t vdev_id, int tid, uint16_t reasoncode)
3414 {
3415 	QDF_STATUS status = QDF_STATUS_SUCCESS;
3416 	struct dp_rx_tid *rx_tid;
3417 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
3418 						      peer_mac, 0, vdev_id,
3419 						      DP_MOD_ID_CDP);
3420 
3421 	if (!peer) {
3422 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
3423 			  "%s: Peer is NULL!\n", __func__);
3424 		return QDF_STATUS_E_FAILURE;
3425 	}
3426 	rx_tid = &peer->rx_tid[tid];
3427 	qdf_spin_lock_bh(&rx_tid->tid_lock);
3428 	if (rx_tid->ba_status == DP_RX_BA_INACTIVE ||
3429 	    rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
3430 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
3431 		status = QDF_STATUS_E_FAILURE;
3432 		goto fail;
3433 	}
3434 	/* TODO: See if we can delete the existing REO queue descriptor and
3435 	 * replace with a new one without queue extenstion descript to save
3436 	 * memory
3437 	 */
3438 	rx_tid->delba_rcode = reasoncode;
3439 	rx_tid->num_of_delba_req++;
3440 	dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX);
3441 
3442 	rx_tid->ba_status = DP_RX_BA_INACTIVE;
3443 	peer->active_ba_session_cnt--;
3444 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
3445 fail:
3446 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3447 
3448 	return status;
3449 }
3450 
3451 /*
3452  * dp_rx_delba_tx_completion_wifi3() – Send Delba Request
3453  *
3454  * @soc: Datapath soc handle
3455  * @peer_mac: Datapath peer mac address
3456  * @vdev_id: id of atapath vdev
3457  * @tid: TID number
3458  * @status: tx completion status
3459  * Return: 0 on success, error code on failure
3460  */
3461 
3462 int dp_delba_tx_completion_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
3463 				 uint16_t vdev_id,
3464 				 uint8_t tid, int status)
3465 {
3466 	QDF_STATUS ret = QDF_STATUS_SUCCESS;
3467 	struct dp_rx_tid *rx_tid = NULL;
3468 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
3469 						      peer_mac, 0, vdev_id,
3470 						      DP_MOD_ID_CDP);
3471 
3472 	if (!peer) {
3473 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
3474 			  "%s: Peer is NULL!", __func__);
3475 		return QDF_STATUS_E_FAILURE;
3476 	}
3477 	rx_tid = &peer->rx_tid[tid];
3478 	qdf_spin_lock_bh(&rx_tid->tid_lock);
3479 	if (status) {
3480 		rx_tid->delba_tx_fail_cnt++;
3481 		if (rx_tid->delba_tx_retry >= DP_MAX_DELBA_RETRY) {
3482 			rx_tid->delba_tx_retry = 0;
3483 			rx_tid->delba_tx_status = 0;
3484 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
3485 		} else {
3486 			rx_tid->delba_tx_retry++;
3487 			rx_tid->delba_tx_status = 1;
3488 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
3489 			if (peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba)
3490 				peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba(
3491 					peer->vdev->pdev->soc->ctrl_psoc,
3492 					peer->vdev->vdev_id,
3493 					peer->mac_addr.raw, tid,
3494 					rx_tid->delba_rcode);
3495 		}
3496 		goto end;
3497 	} else {
3498 		rx_tid->delba_tx_success_cnt++;
3499 		rx_tid->delba_tx_retry = 0;
3500 		rx_tid->delba_tx_status = 0;
3501 	}
3502 	if (rx_tid->ba_status == DP_RX_BA_ACTIVE) {
3503 		dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX);
3504 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
3505 		peer->active_ba_session_cnt--;
3506 	}
3507 	if (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
3508 		dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX);
3509 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
3510 	}
3511 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
3512 
3513 end:
3514 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3515 
3516 	return ret;
3517 }
3518 
3519 /**
3520  * dp_set_pn_check_wifi3() - enable PN check in REO for security
3521  * @soc: Datapath soc handle
3522  * @peer_mac: Datapath peer mac address
3523  * @vdev_id: id of atapath vdev
3524  * @vdev: Datapath vdev
3525  * @pdev - data path device instance
3526  * @sec_type - security type
3527  * @rx_pn - Receive pn starting number
3528  *
3529  */
3530 
3531 QDF_STATUS
3532 dp_set_pn_check_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id,
3533 		      uint8_t *peer_mac, enum cdp_sec_type sec_type,
3534 		      uint32_t *rx_pn)
3535 {
3536 	struct dp_pdev *pdev;
3537 	int i;
3538 	uint8_t pn_size;
3539 	struct hal_reo_cmd_params params;
3540 	struct dp_peer *peer = NULL;
3541 	struct dp_vdev *vdev = NULL;
3542 
3543 	peer = dp_peer_find_hash_find((struct dp_soc *)soc,
3544 				      peer_mac, 0, vdev_id,
3545 				      DP_MOD_ID_CDP);
3546 
3547 	if (!peer) {
3548 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
3549 			  "%s: Peer is NULL!\n", __func__);
3550 		return QDF_STATUS_E_FAILURE;
3551 	}
3552 
3553 	vdev = peer->vdev;
3554 
3555 	if (!vdev) {
3556 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
3557 			  "%s: VDEV is NULL!\n", __func__);
3558 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3559 		return QDF_STATUS_E_FAILURE;
3560 	}
3561 
3562 	pdev = vdev->pdev;
3563 	qdf_mem_zero(&params, sizeof(params));
3564 
3565 	params.std.need_status = 1;
3566 	params.u.upd_queue_params.update_pn_valid = 1;
3567 	params.u.upd_queue_params.update_pn_size = 1;
3568 	params.u.upd_queue_params.update_pn = 1;
3569 	params.u.upd_queue_params.update_pn_check_needed = 1;
3570 	params.u.upd_queue_params.update_svld = 1;
3571 	params.u.upd_queue_params.svld = 0;
3572 
3573 	switch (sec_type) {
3574 	case cdp_sec_type_tkip_nomic:
3575 	case cdp_sec_type_aes_ccmp:
3576 	case cdp_sec_type_aes_ccmp_256:
3577 	case cdp_sec_type_aes_gcmp:
3578 	case cdp_sec_type_aes_gcmp_256:
3579 		params.u.upd_queue_params.pn_check_needed = 1;
3580 		params.u.upd_queue_params.pn_size = 48;
3581 		pn_size = 48;
3582 		break;
3583 	case cdp_sec_type_wapi:
3584 		params.u.upd_queue_params.pn_check_needed = 1;
3585 		params.u.upd_queue_params.pn_size = 128;
3586 		pn_size = 128;
3587 		if (vdev->opmode == wlan_op_mode_ap) {
3588 			params.u.upd_queue_params.pn_even = 1;
3589 			params.u.upd_queue_params.update_pn_even = 1;
3590 		} else {
3591 			params.u.upd_queue_params.pn_uneven = 1;
3592 			params.u.upd_queue_params.update_pn_uneven = 1;
3593 		}
3594 		break;
3595 	default:
3596 		params.u.upd_queue_params.pn_check_needed = 0;
3597 		pn_size = 0;
3598 		break;
3599 	}
3600 
3601 
3602 	for (i = 0; i < DP_MAX_TIDS; i++) {
3603 		struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
3604 		qdf_spin_lock_bh(&rx_tid->tid_lock);
3605 		if (rx_tid->hw_qdesc_vaddr_unaligned) {
3606 			params.std.addr_lo =
3607 				rx_tid->hw_qdesc_paddr & 0xffffffff;
3608 			params.std.addr_hi =
3609 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
3610 
3611 			if (pn_size) {
3612 				QDF_TRACE(QDF_MODULE_ID_DP,
3613 					  QDF_TRACE_LEVEL_INFO_HIGH,
3614 					  "%s PN set for TID:%d pn:%x:%x:%x:%x",
3615 					  __func__, i, rx_pn[3], rx_pn[2],
3616 					  rx_pn[1], rx_pn[0]);
3617 				params.u.upd_queue_params.update_pn_valid = 1;
3618 				params.u.upd_queue_params.pn_31_0 = rx_pn[0];
3619 				params.u.upd_queue_params.pn_63_32 = rx_pn[1];
3620 				params.u.upd_queue_params.pn_95_64 = rx_pn[2];
3621 				params.u.upd_queue_params.pn_127_96 = rx_pn[3];
3622 			}
3623 			rx_tid->pn_size = pn_size;
3624 			if (dp_reo_send_cmd(cdp_soc_t_to_dp_soc(soc),
3625 					    CMD_UPDATE_RX_REO_QUEUE,
3626 					    &params, dp_rx_tid_update_cb,
3627 					    rx_tid)) {
3628 				dp_err_log("fail to send CMD_UPDATE_RX_REO_QUEUE"
3629 					   "tid %d desc %pK", rx_tid->tid,
3630 					   (void *)(rx_tid->hw_qdesc_paddr));
3631 				DP_STATS_INC(cdp_soc_t_to_dp_soc(soc),
3632 					     rx.err.reo_cmd_send_fail, 1);
3633 			}
3634 		} else {
3635 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3636 				  "PN Check not setup for TID :%d ", i);
3637 		}
3638 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
3639 	}
3640 
3641 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3642 
3643 	return QDF_STATUS_SUCCESS;
3644 }
3645 
3646 
3647 /**
3648  * dp_set_key_sec_type_wifi3() - set security mode of key
3649  * @soc: Datapath soc handle
3650  * @peer_mac: Datapath peer mac address
3651  * @vdev_id: id of atapath vdev
3652  * @vdev: Datapath vdev
3653  * @pdev - data path device instance
3654  * @sec_type - security type
3655  * #is_unicast - key type
3656  *
3657  */
3658 
3659 QDF_STATUS
3660 dp_set_key_sec_type_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id,
3661 			  uint8_t *peer_mac, enum cdp_sec_type sec_type,
3662 			  bool is_unicast)
3663 {
3664 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
3665 						       peer_mac, 0, vdev_id,
3666 						       DP_MOD_ID_CDP);
3667 	int sec_index;
3668 
3669 	if (!peer) {
3670 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
3671 			  "%s: Peer is NULL!\n", __func__);
3672 		return QDF_STATUS_E_FAILURE;
3673 	}
3674 
3675 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3676 		  "key sec spec for peer %pK "QDF_MAC_ADDR_FMT": %s key of type %d",
3677 		  peer,
3678 		  QDF_MAC_ADDR_REF(peer->mac_addr.raw),
3679 		  is_unicast ? "ucast" : "mcast",
3680 		  sec_type);
3681 
3682 	sec_index = is_unicast ? dp_sec_ucast : dp_sec_mcast;
3683 	peer->security[sec_index].sec_type = sec_type;
3684 
3685 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3686 
3687 	return QDF_STATUS_SUCCESS;
3688 }
3689 
3690 void
3691 dp_rx_sec_ind_handler(struct dp_soc *soc, uint16_t peer_id,
3692 		      enum cdp_sec_type sec_type, int is_unicast,
3693 		      u_int32_t *michael_key,
3694 		      u_int32_t *rx_pn)
3695 {
3696 	struct dp_peer *peer;
3697 	int sec_index;
3698 
3699 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_HTT);
3700 	if (!peer) {
3701 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3702 			  "Couldn't find peer from ID %d - skipping security inits",
3703 			  peer_id);
3704 		return;
3705 	}
3706 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3707 		  "sec spec for peer %pK "QDF_MAC_ADDR_FMT": %s key of type %d",
3708 		  peer,
3709 		  QDF_MAC_ADDR_REF(peer->mac_addr.raw),
3710 		  is_unicast ? "ucast" : "mcast",
3711 		  sec_type);
3712 	sec_index = is_unicast ? dp_sec_ucast : dp_sec_mcast;
3713 	peer->security[sec_index].sec_type = sec_type;
3714 #ifdef notyet /* TODO: See if this is required for defrag support */
3715 	/* michael key only valid for TKIP, but for simplicity,
3716 	 * copy it anyway
3717 	 */
3718 	qdf_mem_copy(
3719 		&peer->security[sec_index].michael_key[0],
3720 		michael_key,
3721 		sizeof(peer->security[sec_index].michael_key));
3722 #ifdef BIG_ENDIAN_HOST
3723 	OL_IF_SWAPBO(peer->security[sec_index].michael_key[0],
3724 				 sizeof(peer->security[sec_index].michael_key));
3725 #endif /* BIG_ENDIAN_HOST */
3726 #endif
3727 
3728 #ifdef notyet /* TODO: Check if this is required for wifi3.0 */
3729 	if (sec_type != cdp_sec_type_wapi) {
3730 		qdf_mem_zero(peer->tids_last_pn_valid, _EXT_TIDS);
3731 	} else {
3732 		for (i = 0; i < DP_MAX_TIDS; i++) {
3733 			/*
3734 			 * Setting PN valid bit for WAPI sec_type,
3735 			 * since WAPI PN has to be started with predefined value
3736 			 */
3737 			peer->tids_last_pn_valid[i] = 1;
3738 			qdf_mem_copy(
3739 				(u_int8_t *) &peer->tids_last_pn[i],
3740 				(u_int8_t *) rx_pn, sizeof(union htt_rx_pn_t));
3741 			peer->tids_last_pn[i].pn128[1] =
3742 				qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[1]);
3743 			peer->tids_last_pn[i].pn128[0] =
3744 				qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[0]);
3745 		}
3746 	}
3747 #endif
3748 	/* TODO: Update HW TID queue with PN check parameters (pn type for
3749 	 * all security types and last pn for WAPI) once REO command API
3750 	 * is available
3751 	 */
3752 
3753 	dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
3754 }
3755 
3756 #ifdef QCA_PEER_EXT_STATS
3757 /*
3758  * dp_peer_ext_stats_ctx_alloc() - Allocate peer ext
3759  *                                 stats content
3760  * @soc: DP SoC context
3761  * @peer: DP peer context
3762  *
3763  * Allocate the peer extended stats context
3764  *
3765  * Return: QDF_STATUS_SUCCESS if allocation is
3766  *	   successful
3767  */
3768 QDF_STATUS dp_peer_ext_stats_ctx_alloc(struct dp_soc *soc,
3769 				       struct dp_peer *peer)
3770 {
3771 	uint8_t tid, ctx_id;
3772 
3773 	if (!soc || !peer) {
3774 		dp_warn("Null soc%x or peer%x", soc, peer);
3775 		return QDF_STATUS_E_INVAL;
3776 	}
3777 
3778 	if (!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx))
3779 		return QDF_STATUS_SUCCESS;
3780 
3781 	/*
3782 	 * Allocate memory for peer extended stats.
3783 	 */
3784 	peer->pext_stats = qdf_mem_malloc(sizeof(struct cdp_peer_ext_stats));
3785 	if (!peer->pext_stats) {
3786 		dp_err("Peer extended stats obj alloc failed!!");
3787 		return QDF_STATUS_E_NOMEM;
3788 	}
3789 
3790 	for (tid = 0; tid < CDP_MAX_DATA_TIDS; tid++) {
3791 		for (ctx_id = 0; ctx_id < CDP_MAX_TXRX_CTX; ctx_id++) {
3792 			struct cdp_delay_tx_stats *tx_delay =
3793 			&peer->pext_stats->delay_stats[tid][ctx_id].tx_delay;
3794 			struct cdp_delay_rx_stats *rx_delay =
3795 			&peer->pext_stats->delay_stats[tid][ctx_id].rx_delay;
3796 
3797 			dp_hist_init(&tx_delay->tx_swq_delay,
3798 				     CDP_HIST_TYPE_SW_ENQEUE_DELAY);
3799 			dp_hist_init(&tx_delay->hwtx_delay,
3800 				     CDP_HIST_TYPE_HW_COMP_DELAY);
3801 			dp_hist_init(&rx_delay->to_stack_delay,
3802 				     CDP_HIST_TYPE_REAP_STACK);
3803 		}
3804 	}
3805 
3806 	return QDF_STATUS_SUCCESS;
3807 }
3808 
3809 /*
3810  * dp_peer_ext_stats_ctx_dealloc() - Dealloc the peer context
3811  * @peer: DP peer context
3812  *
3813  * Free the peer extended stats context
3814  *
3815  * Return: Void
3816  */
3817 void dp_peer_ext_stats_ctx_dealloc(struct dp_soc *soc, struct dp_peer *peer)
3818 {
3819 	if (!peer) {
3820 		dp_warn("peer_ext dealloc failed due to NULL peer object");
3821 		return;
3822 	}
3823 
3824 	if (!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx))
3825 		return;
3826 
3827 	if (!peer->pext_stats)
3828 		return;
3829 
3830 	qdf_mem_free(peer->pext_stats);
3831 	peer->pext_stats = NULL;
3832 }
3833 #endif
3834 
3835 QDF_STATUS
3836 dp_rx_delba_ind_handler(void *soc_handle, uint16_t peer_id,
3837 			uint8_t tid, uint16_t win_sz)
3838 {
3839 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
3840 	struct dp_peer *peer;
3841 	struct dp_rx_tid *rx_tid;
3842 	QDF_STATUS status = QDF_STATUS_SUCCESS;
3843 
3844 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_HTT);
3845 
3846 	if (!peer) {
3847 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3848 			  "Couldn't find peer from ID %d",
3849 			  peer_id);
3850 		return QDF_STATUS_E_FAILURE;
3851 	}
3852 
3853 	qdf_assert_always(tid < DP_MAX_TIDS);
3854 
3855 	rx_tid = &peer->rx_tid[tid];
3856 
3857 	if (rx_tid->hw_qdesc_vaddr_unaligned) {
3858 		if (!rx_tid->delba_tx_status) {
3859 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3860 				  "%s: PEER_ID: %d TID: %d, BA win: %d ",
3861 				  __func__, peer_id, tid, win_sz);
3862 
3863 			qdf_spin_lock_bh(&rx_tid->tid_lock);
3864 
3865 			rx_tid->delba_tx_status = 1;
3866 
3867 			rx_tid->rx_ba_win_size_override =
3868 			    qdf_min((uint16_t)63, win_sz);
3869 
3870 			rx_tid->delba_rcode =
3871 			    IEEE80211_REASON_QOS_SETUP_REQUIRED;
3872 
3873 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
3874 
3875 			if (soc->cdp_soc.ol_ops->send_delba)
3876 				soc->cdp_soc.ol_ops->send_delba(
3877 					peer->vdev->pdev->soc->ctrl_psoc,
3878 					peer->vdev->vdev_id,
3879 					peer->mac_addr.raw,
3880 					tid,
3881 					rx_tid->delba_rcode);
3882 		}
3883 	} else {
3884 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3885 			  "BA session is not setup for TID:%d ", tid);
3886 		status = QDF_STATUS_E_FAILURE;
3887 	}
3888 
3889 	dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
3890 
3891 	return status;
3892 }
3893 
3894 #ifdef DP_PEER_EXTENDED_API
3895 QDF_STATUS dp_register_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
3896 			    struct ol_txrx_desc_type *sta_desc)
3897 {
3898 	struct dp_peer *peer;
3899 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3900 
3901 	peer = dp_peer_find_hash_find(soc, sta_desc->peer_addr.bytes,
3902 				      0, DP_VDEV_ALL, DP_MOD_ID_CDP);
3903 
3904 	if (!peer)
3905 		return QDF_STATUS_E_FAULT;
3906 
3907 	qdf_spin_lock_bh(&peer->peer_info_lock);
3908 	peer->state = OL_TXRX_PEER_STATE_CONN;
3909 	qdf_spin_unlock_bh(&peer->peer_info_lock);
3910 
3911 	dp_rx_flush_rx_cached(peer, false);
3912 
3913 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3914 
3915 	return QDF_STATUS_SUCCESS;
3916 }
3917 
3918 QDF_STATUS
3919 dp_clear_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
3920 	      struct qdf_mac_addr peer_addr)
3921 {
3922 	struct dp_peer *peer;
3923 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3924 
3925 	peer = dp_peer_find_hash_find(soc, peer_addr.bytes,
3926 				      0, DP_VDEV_ALL, DP_MOD_ID_CDP);
3927 	if (!peer || !peer->valid)
3928 		return QDF_STATUS_E_FAULT;
3929 
3930 	dp_clear_peer_internal(soc, peer);
3931 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3932 	return QDF_STATUS_SUCCESS;
3933 }
3934 
3935 QDF_STATUS dp_peer_state_update(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
3936 				enum ol_txrx_peer_state state)
3937 {
3938 	struct dp_peer *peer;
3939 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3940 
3941 	peer =  dp_peer_find_hash_find(soc, peer_mac, 0, DP_VDEV_ALL,
3942 				       DP_MOD_ID_CDP);
3943 	if (!peer) {
3944 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3945 			  "Failed to find peer for: ["QDF_MAC_ADDR_FMT"]",
3946 			  QDF_MAC_ADDR_REF(peer_mac));
3947 		return QDF_STATUS_E_FAILURE;
3948 	}
3949 	peer->state = state;
3950 
3951 	dp_info("peer %pK state %d", peer, peer->state);
3952 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
3953 	 * Decrement it here.
3954 	 */
3955 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3956 
3957 	return QDF_STATUS_SUCCESS;
3958 }
3959 
3960 QDF_STATUS dp_get_vdevid(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
3961 			 uint8_t *vdev_id)
3962 {
3963 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3964 	struct dp_peer *peer =
3965 		dp_peer_find_hash_find(soc, peer_mac, 0, DP_VDEV_ALL,
3966 				       DP_MOD_ID_CDP);
3967 
3968 	if (!peer)
3969 		return QDF_STATUS_E_FAILURE;
3970 
3971 	dp_info("peer %pK vdev %pK vdev id %d",
3972 		peer, peer->vdev, peer->vdev->vdev_id);
3973 	*vdev_id = peer->vdev->vdev_id;
3974 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
3975 	 * Decrement it here.
3976 	 */
3977 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3978 
3979 	return QDF_STATUS_SUCCESS;
3980 }
3981 
3982 struct cdp_vdev *
3983 dp_get_vdev_by_peer_addr(struct cdp_pdev *pdev_handle,
3984 			 struct qdf_mac_addr peer_addr)
3985 {
3986 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3987 	struct dp_peer *peer = NULL;
3988 	struct cdp_vdev *vdev = NULL;
3989 
3990 	if (!pdev) {
3991 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3992 			  "PDEV not found for peer_addr: "QDF_MAC_ADDR_FMT,
3993 			  QDF_MAC_ADDR_REF(peer_addr.bytes));
3994 		return NULL;
3995 	}
3996 
3997 	peer = dp_peer_find_hash_find(pdev->soc, peer_addr.bytes, 0,
3998 				      DP_VDEV_ALL, DP_MOD_ID_CDP);
3999 	if (!peer) {
4000 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
4001 			  "PDEV not found for peer_addr: "QDF_MAC_ADDR_FMT,
4002 			  QDF_MAC_ADDR_REF(peer_addr.bytes));
4003 		return NULL;
4004 	}
4005 
4006 	vdev = (struct cdp_vdev *)peer->vdev;
4007 
4008 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4009 	return vdev;
4010 }
4011 
4012 /**
4013  * dp_get_vdev_for_peer() - Get virtual interface instance which peer belongs
4014  * @peer - peer instance
4015  *
4016  * Get virtual interface instance which peer belongs
4017  *
4018  * Return: virtual interface instance pointer
4019  *         NULL in case cannot find
4020  */
4021 struct cdp_vdev *dp_get_vdev_for_peer(void *peer_handle)
4022 {
4023 	struct dp_peer *peer = peer_handle;
4024 
4025 	DP_TRACE(DEBUG, "peer %pK vdev %pK", peer, peer->vdev);
4026 	return (struct cdp_vdev *)peer->vdev;
4027 }
4028 
4029 /**
4030  * dp_peer_get_peer_mac_addr() - Get peer mac address
4031  * @peer - peer instance
4032  *
4033  * Get peer mac address
4034  *
4035  * Return: peer mac address pointer
4036  *         NULL in case cannot find
4037  */
4038 uint8_t *dp_peer_get_peer_mac_addr(void *peer_handle)
4039 {
4040 	struct dp_peer *peer = peer_handle;
4041 	uint8_t *mac;
4042 
4043 	mac = peer->mac_addr.raw;
4044 	dp_info("peer %pK mac 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
4045 		peer, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
4046 	return peer->mac_addr.raw;
4047 }
4048 
4049 int dp_get_peer_state(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
4050 		      uint8_t *peer_mac)
4051 {
4052 	enum ol_txrx_peer_state peer_state;
4053 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4054 	struct dp_peer *peer =  dp_peer_find_hash_find(soc, peer_mac, 0,
4055 						       vdev_id, DP_MOD_ID_CDP);
4056 
4057 	if (!peer)
4058 		return QDF_STATUS_E_FAILURE;
4059 
4060 	DP_TRACE(DEBUG, "peer %pK stats %d", peer, peer->state);
4061 	peer_state = peer->state;
4062 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4063 
4064 	return peer_state;
4065 }
4066 
4067 /**
4068  * dp_local_peer_id_pool_init() - local peer id pool alloc for physical device
4069  * @pdev - data path device instance
4070  *
4071  * local peer id pool alloc for physical device
4072  *
4073  * Return: none
4074  */
4075 void dp_local_peer_id_pool_init(struct dp_pdev *pdev)
4076 {
4077 	int i;
4078 
4079 	/* point the freelist to the first ID */
4080 	pdev->local_peer_ids.freelist = 0;
4081 
4082 	/* link each ID to the next one */
4083 	for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) {
4084 		pdev->local_peer_ids.pool[i] = i + 1;
4085 		pdev->local_peer_ids.map[i] = NULL;
4086 	}
4087 
4088 	/* link the last ID to itself, to mark the end of the list */
4089 	i = OL_TXRX_NUM_LOCAL_PEER_IDS;
4090 	pdev->local_peer_ids.pool[i] = i;
4091 
4092 	qdf_spinlock_create(&pdev->local_peer_ids.lock);
4093 	DP_TRACE(INFO, "Peer pool init");
4094 }
4095 
4096 /**
4097  * dp_local_peer_id_alloc() - allocate local peer id
4098  * @pdev - data path device instance
4099  * @peer - new peer instance
4100  *
4101  * allocate local peer id
4102  *
4103  * Return: none
4104  */
4105 void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer)
4106 {
4107 	int i;
4108 
4109 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
4110 	i = pdev->local_peer_ids.freelist;
4111 	if (pdev->local_peer_ids.pool[i] == i) {
4112 		/* the list is empty, except for the list-end marker */
4113 		peer->local_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
4114 	} else {
4115 		/* take the head ID and advance the freelist */
4116 		peer->local_id = i;
4117 		pdev->local_peer_ids.freelist = pdev->local_peer_ids.pool[i];
4118 		pdev->local_peer_ids.map[i] = peer;
4119 	}
4120 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
4121 	dp_info("peer %pK, local id %d", peer, peer->local_id);
4122 }
4123 
4124 /**
4125  * dp_local_peer_id_free() - remove local peer id
4126  * @pdev - data path device instance
4127  * @peer - peer instance should be removed
4128  *
4129  * remove local peer id
4130  *
4131  * Return: none
4132  */
4133 void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer)
4134 {
4135 	int i = peer->local_id;
4136 	if ((i == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
4137 	    (i >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
4138 		return;
4139 	}
4140 
4141 	/* put this ID on the head of the freelist */
4142 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
4143 	pdev->local_peer_ids.pool[i] = pdev->local_peer_ids.freelist;
4144 	pdev->local_peer_ids.freelist = i;
4145 	pdev->local_peer_ids.map[i] = NULL;
4146 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
4147 }
4148 
4149 bool dp_find_peer_exist_on_vdev(struct cdp_soc_t *soc_hdl,
4150 				uint8_t vdev_id, uint8_t *peer_addr)
4151 {
4152 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4153 	struct dp_peer *peer = NULL;
4154 
4155 	peer = dp_peer_find_hash_find(soc, peer_addr, 0, vdev_id,
4156 				      DP_MOD_ID_CDP);
4157 	if (!peer)
4158 		return false;
4159 
4160 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4161 
4162 	return true;
4163 }
4164 
4165 bool dp_find_peer_exist_on_other_vdev(struct cdp_soc_t *soc_hdl,
4166 				      uint8_t vdev_id, uint8_t *peer_addr,
4167 				      uint16_t max_bssid)
4168 {
4169 	int i;
4170 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4171 	struct dp_peer *peer = NULL;
4172 
4173 	for (i = 0; i < max_bssid; i++) {
4174 		/* Need to check vdevs other than the vdev_id */
4175 		if (vdev_id == i)
4176 			continue;
4177 		peer = dp_peer_find_hash_find(soc, peer_addr, 0, i,
4178 					      DP_MOD_ID_CDP);
4179 		if (peer) {
4180 			dp_err("Duplicate peer "QDF_MAC_ADDR_FMT" already exist on vdev %d",
4181 			       QDF_MAC_ADDR_REF(peer_addr), i);
4182 			dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4183 			return true;
4184 		}
4185 	}
4186 
4187 	return false;
4188 }
4189 
4190 bool dp_find_peer_exist(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
4191 			uint8_t *peer_addr)
4192 {
4193 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4194 	struct dp_peer *peer = NULL;
4195 
4196 	peer = dp_peer_find_hash_find(soc, peer_addr, 0, DP_VDEV_ALL,
4197 				      DP_MOD_ID_CDP);
4198 	if (peer) {
4199 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4200 		return true;
4201 	}
4202 
4203 	return false;
4204 }
4205 #endif
4206 
4207 /**
4208  * dp_peer_rxtid_stats: Retried Rx TID (REO queue) stats from HW
4209  * @peer: DP peer handle
4210  * @dp_stats_cmd_cb: REO command callback function
4211  * @cb_ctxt: Callback context
4212  *
4213  * Return: count of tid stats cmd send succeeded
4214  */
4215 int dp_peer_rxtid_stats(struct dp_peer *peer,
4216 			dp_rxtid_stats_cmd_cb dp_stats_cmd_cb,
4217 			void *cb_ctxt)
4218 {
4219 	struct dp_soc *soc = peer->vdev->pdev->soc;
4220 	struct hal_reo_cmd_params params;
4221 	int i;
4222 	int stats_cmd_sent_cnt = 0;
4223 	QDF_STATUS status;
4224 
4225 	if (!dp_stats_cmd_cb)
4226 		return stats_cmd_sent_cnt;
4227 
4228 	qdf_mem_zero(&params, sizeof(params));
4229 	for (i = 0; i < DP_MAX_TIDS; i++) {
4230 		struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
4231 		if (rx_tid->hw_qdesc_vaddr_unaligned) {
4232 			params.std.need_status = 1;
4233 			params.std.addr_lo =
4234 				rx_tid->hw_qdesc_paddr & 0xffffffff;
4235 			params.std.addr_hi =
4236 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
4237 
4238 			if (cb_ctxt) {
4239 				status = dp_reo_send_cmd(
4240 						soc, CMD_GET_QUEUE_STATS,
4241 						&params, dp_stats_cmd_cb,
4242 						cb_ctxt);
4243 			} else {
4244 				status = dp_reo_send_cmd(
4245 						soc, CMD_GET_QUEUE_STATS,
4246 						&params, dp_stats_cmd_cb,
4247 						rx_tid);
4248 			}
4249 
4250 			if (QDF_IS_STATUS_SUCCESS(status))
4251 				stats_cmd_sent_cnt++;
4252 
4253 			/* Flush REO descriptor from HW cache to update stats
4254 			 * in descriptor memory. This is to help debugging */
4255 			qdf_mem_zero(&params, sizeof(params));
4256 			params.std.need_status = 0;
4257 			params.std.addr_lo =
4258 				rx_tid->hw_qdesc_paddr & 0xffffffff;
4259 			params.std.addr_hi =
4260 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
4261 			params.u.fl_cache_params.flush_no_inval = 1;
4262 			dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, &params, NULL,
4263 				NULL);
4264 		}
4265 	}
4266 
4267 	return stats_cmd_sent_cnt;
4268 }
4269 
4270 QDF_STATUS
4271 dp_set_michael_key(struct cdp_soc_t *soc,
4272 		   uint8_t vdev_id,
4273 		   uint8_t *peer_mac,
4274 		   bool is_unicast, uint32_t *key)
4275 {
4276 	uint8_t sec_index = is_unicast ? 1 : 0;
4277 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
4278 						      peer_mac, 0, vdev_id,
4279 						      DP_MOD_ID_CDP);
4280 
4281 	if (!peer) {
4282 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4283 			  "peer not found ");
4284 		return QDF_STATUS_E_FAILURE;
4285 	}
4286 
4287 	qdf_mem_copy(&peer->security[sec_index].michael_key[0],
4288 		     key, IEEE80211_WEP_MICLEN);
4289 
4290 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4291 
4292 	return QDF_STATUS_SUCCESS;
4293 }
4294 
4295 
4296 /**
4297  * dp_vdev_bss_peer_ref_n_get: Get bss peer of a vdev
4298  * @soc: DP soc
4299  * @vdev: vdev
4300  * @mod_id: id of module requesting reference
4301  *
4302  * Return: VDEV BSS peer
4303  */
4304 struct dp_peer *dp_vdev_bss_peer_ref_n_get(struct dp_soc *soc,
4305 					   struct dp_vdev *vdev,
4306 					   enum dp_mod_id mod_id)
4307 {
4308 	struct dp_peer *peer = NULL;
4309 
4310 	qdf_spin_lock_bh(&vdev->peer_list_lock);
4311 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
4312 		if (peer->bss_peer)
4313 			break;
4314 	}
4315 
4316 	if (!peer) {
4317 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
4318 		return NULL;
4319 	}
4320 
4321 	if (dp_peer_get_ref(soc, peer, mod_id) == QDF_STATUS_SUCCESS) {
4322 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
4323 		return peer;
4324 	}
4325 
4326 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
4327 	return peer;
4328 }
4329 
4330 /**
4331  * dp_sta_vdev_self_peer_ref_n_get: Get self peer of sta vdev
4332  * @soc: DP soc
4333  * @vdev: vdev
4334  * @mod_id: id of module requesting reference
4335  *
4336  * Return: VDEV self peer
4337  */
4338 struct dp_peer *dp_sta_vdev_self_peer_ref_n_get(struct dp_soc *soc,
4339 						struct dp_vdev *vdev,
4340 						enum dp_mod_id mod_id)
4341 {
4342 	struct dp_peer *peer;
4343 
4344 	if (vdev->opmode != wlan_op_mode_sta)
4345 		return NULL;
4346 
4347 	qdf_spin_lock_bh(&vdev->peer_list_lock);
4348 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
4349 		if (peer->sta_self_peer)
4350 			break;
4351 	}
4352 
4353 	if (!peer) {
4354 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
4355 		return NULL;
4356 	}
4357 
4358 	if (dp_peer_get_ref(soc, peer, mod_id) == QDF_STATUS_SUCCESS) {
4359 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
4360 		return peer;
4361 	}
4362 
4363 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
4364 	return peer;
4365 }
4366