xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_peer.c (revision f28396d060cff5c6519f883cb28ae0116ce479f1)
1 /*
2  * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <qdf_types.h>
20 #include <qdf_lock.h>
21 #include <hal_hw_headers.h>
22 #include "dp_htt.h"
23 #include "dp_types.h"
24 #include "dp_internal.h"
25 #include "dp_peer.h"
26 #include "dp_rx_defrag.h"
27 #include "dp_rx.h"
28 #include <hal_api.h>
29 #include <hal_reo.h>
30 #include <cdp_txrx_handle.h>
31 #include <wlan_cfg.h>
32 
33 #ifdef WLAN_TX_PKT_CAPTURE_ENH
34 #include "dp_tx_capture.h"
35 #endif
36 
37 #ifdef FEATURE_WDS
38 static inline bool
39 dp_peer_ast_free_in_unmap_supported(struct dp_peer *peer,
40 				    struct dp_ast_entry *ast_entry)
41 {
42 	/* if peer map v2 is enabled we are not freeing ast entry
43 	 * here and it is supposed to be freed in unmap event (after
44 	 * we receive delete confirmation from target)
45 	 *
46 	 * if peer_id is invalid we did not get the peer map event
47 	 * for the peer free ast entry from here only in this case
48 	 */
49 
50 	if ((ast_entry->type != CDP_TXRX_AST_TYPE_WDS_HM_SEC) &&
51 	    (ast_entry->type != CDP_TXRX_AST_TYPE_SELF))
52 		return true;
53 
54 	return false;
55 }
56 #else
57 static inline bool
58 dp_peer_ast_free_in_unmap_supported(struct dp_peer *peer,
59 				    struct dp_ast_entry *ast_entry)
60 {
61 	return false;
62 }
63 #endif
64 
65 static inline void
66 dp_set_ssn_valid_flag(struct hal_reo_cmd_params *params,
67 					uint8_t valid)
68 {
69 	params->u.upd_queue_params.update_svld = 1;
70 	params->u.upd_queue_params.svld = valid;
71 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
72 		  "%s: Setting SSN valid bit to %d",
73 		  __func__, valid);
74 }
75 
76 static inline int dp_peer_find_mac_addr_cmp(
77 	union dp_align_mac_addr *mac_addr1,
78 	union dp_align_mac_addr *mac_addr2)
79 {
80 		/*
81 		 * Intentionally use & rather than &&.
82 		 * because the operands are binary rather than generic boolean,
83 		 * the functionality is equivalent.
84 		 * Using && has the advantage of short-circuited evaluation,
85 		 * but using & has the advantage of no conditional branching,
86 		 * which is a more significant benefit.
87 		 */
88 	return !((mac_addr1->align4.bytes_abcd == mac_addr2->align4.bytes_abcd)
89 		 & (mac_addr1->align4.bytes_ef == mac_addr2->align4.bytes_ef));
90 }
91 
92 static int dp_peer_ast_table_attach(struct dp_soc *soc)
93 {
94 	uint32_t max_ast_index;
95 
96 	max_ast_index = wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx);
97 	/* allocate ast_table for ast entry to ast_index map */
98 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
99 		  "\n<=== cfg max ast idx %d ====>", max_ast_index);
100 	soc->ast_table = qdf_mem_malloc(max_ast_index *
101 					sizeof(struct dp_ast_entry *));
102 	if (!soc->ast_table) {
103 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
104 			  "%s: ast_table memory allocation failed", __func__);
105 		return QDF_STATUS_E_NOMEM;
106 	}
107 	return 0; /* success */
108 }
109 
110 static int dp_peer_find_map_attach(struct dp_soc *soc)
111 {
112 	uint32_t max_peers, peer_map_size;
113 
114 	max_peers = soc->max_peers;
115 	/* allocate the peer ID -> peer object map */
116 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
117 		  "\n<=== cfg max peer id %d ====>", max_peers);
118 	peer_map_size = max_peers * sizeof(soc->peer_id_to_obj_map[0]);
119 	soc->peer_id_to_obj_map = qdf_mem_malloc(peer_map_size);
120 	if (!soc->peer_id_to_obj_map) {
121 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
122 			  "%s: peer map memory allocation failed", __func__);
123 		return QDF_STATUS_E_NOMEM;
124 	}
125 
126 	/*
127 	 * The peer_id_to_obj_map doesn't really need to be initialized,
128 	 * since elements are only used after they have been individually
129 	 * initialized.
130 	 * However, it is convenient for debugging to have all elements
131 	 * that are not in use set to 0.
132 	 */
133 	qdf_mem_zero(soc->peer_id_to_obj_map, peer_map_size);
134 	return 0; /* success */
135 }
136 
137 static int dp_log2_ceil(unsigned int value)
138 {
139 	unsigned int tmp = value;
140 	int log2 = -1;
141 
142 	while (tmp) {
143 		log2++;
144 		tmp >>= 1;
145 	}
146 	if (1 << log2 != value)
147 		log2++;
148 	return log2;
149 }
150 
151 static int dp_peer_find_add_id_to_obj(
152 	struct dp_peer *peer,
153 	uint16_t peer_id)
154 {
155 	int i;
156 
157 	for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
158 		if (peer->peer_ids[i] == HTT_INVALID_PEER) {
159 			peer->peer_ids[i] = peer_id;
160 			return 0; /* success */
161 		}
162 	}
163 	return QDF_STATUS_E_FAILURE; /* failure */
164 }
165 
166 #define DP_PEER_HASH_LOAD_MULT  2
167 #define DP_PEER_HASH_LOAD_SHIFT 0
168 
169 #define DP_AST_HASH_LOAD_MULT  2
170 #define DP_AST_HASH_LOAD_SHIFT 0
171 
172 static int dp_peer_find_hash_attach(struct dp_soc *soc)
173 {
174 	int i, hash_elems, log2;
175 
176 	/* allocate the peer MAC address -> peer object hash table */
177 	hash_elems = soc->max_peers;
178 	hash_elems *= DP_PEER_HASH_LOAD_MULT;
179 	hash_elems >>= DP_PEER_HASH_LOAD_SHIFT;
180 	log2 = dp_log2_ceil(hash_elems);
181 	hash_elems = 1 << log2;
182 
183 	soc->peer_hash.mask = hash_elems - 1;
184 	soc->peer_hash.idx_bits = log2;
185 	/* allocate an array of TAILQ peer object lists */
186 	soc->peer_hash.bins = qdf_mem_malloc(
187 		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q, dp_peer)));
188 	if (!soc->peer_hash.bins)
189 		return QDF_STATUS_E_NOMEM;
190 
191 	for (i = 0; i < hash_elems; i++)
192 		TAILQ_INIT(&soc->peer_hash.bins[i]);
193 
194 	return 0;
195 }
196 
197 static void dp_peer_find_hash_detach(struct dp_soc *soc)
198 {
199 	if (soc->peer_hash.bins) {
200 		qdf_mem_free(soc->peer_hash.bins);
201 		soc->peer_hash.bins = NULL;
202 	}
203 }
204 
205 static inline unsigned dp_peer_find_hash_index(struct dp_soc *soc,
206 	union dp_align_mac_addr *mac_addr)
207 {
208 	unsigned index;
209 
210 	index =
211 		mac_addr->align2.bytes_ab ^
212 		mac_addr->align2.bytes_cd ^
213 		mac_addr->align2.bytes_ef;
214 	index ^= index >> soc->peer_hash.idx_bits;
215 	index &= soc->peer_hash.mask;
216 	return index;
217 }
218 
219 
220 void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer)
221 {
222 	unsigned index;
223 
224 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
225 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
226 	/*
227 	 * It is important to add the new peer at the tail of the peer list
228 	 * with the bin index.  Together with having the hash_find function
229 	 * search from head to tail, this ensures that if two entries with
230 	 * the same MAC address are stored, the one added first will be
231 	 * found first.
232 	 */
233 	TAILQ_INSERT_TAIL(&soc->peer_hash.bins[index], peer, hash_list_elem);
234 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
235 }
236 
237 #ifdef FEATURE_AST
238 /*
239  * dp_peer_ast_hash_attach() - Allocate and initialize AST Hash Table
240  * @soc: SoC handle
241  *
242  * Return: None
243  */
244 static int dp_peer_ast_hash_attach(struct dp_soc *soc)
245 {
246 	int i, hash_elems, log2;
247 	unsigned int max_ast_idx = wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx);
248 
249 	hash_elems = ((max_ast_idx * DP_AST_HASH_LOAD_MULT) >>
250 		DP_AST_HASH_LOAD_SHIFT);
251 
252 	log2 = dp_log2_ceil(hash_elems);
253 	hash_elems = 1 << log2;
254 
255 	soc->ast_hash.mask = hash_elems - 1;
256 	soc->ast_hash.idx_bits = log2;
257 
258 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
259 		  "ast hash_elems: %d, max_ast_idx: %d",
260 		  hash_elems, max_ast_idx);
261 
262 	/* allocate an array of TAILQ peer object lists */
263 	soc->ast_hash.bins = qdf_mem_malloc(
264 		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q,
265 				dp_ast_entry)));
266 
267 	if (!soc->ast_hash.bins)
268 		return QDF_STATUS_E_NOMEM;
269 
270 	for (i = 0; i < hash_elems; i++)
271 		TAILQ_INIT(&soc->ast_hash.bins[i]);
272 
273 	return 0;
274 }
275 
276 /*
277  * dp_peer_ast_cleanup() - cleanup the references
278  * @soc: SoC handle
279  * @ast: ast entry
280  *
281  * Return: None
282  */
283 static inline void dp_peer_ast_cleanup(struct dp_soc *soc,
284 				       struct dp_ast_entry *ast)
285 {
286 	txrx_ast_free_cb cb = ast->callback;
287 	void *cookie = ast->cookie;
288 
289 	/* Call the callbacks to free up the cookie */
290 	if (cb) {
291 		ast->callback = NULL;
292 		ast->cookie = NULL;
293 		cb(soc->ctrl_psoc,
294 		   dp_soc_to_cdp_soc(soc),
295 		   cookie,
296 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
297 	}
298 }
299 
300 /*
301  * dp_peer_ast_hash_detach() - Free AST Hash table
302  * @soc: SoC handle
303  *
304  * Return: None
305  */
306 static void dp_peer_ast_hash_detach(struct dp_soc *soc)
307 {
308 	unsigned int index;
309 	struct dp_ast_entry *ast, *ast_next;
310 
311 	if (!soc->ast_hash.mask)
312 		return;
313 
314 	if (!soc->ast_hash.bins)
315 		return;
316 
317 	qdf_spin_lock_bh(&soc->ast_lock);
318 	for (index = 0; index <= soc->ast_hash.mask; index++) {
319 		if (!TAILQ_EMPTY(&soc->ast_hash.bins[index])) {
320 			TAILQ_FOREACH_SAFE(ast, &soc->ast_hash.bins[index],
321 					   hash_list_elem, ast_next) {
322 				TAILQ_REMOVE(&soc->ast_hash.bins[index], ast,
323 					     hash_list_elem);
324 				dp_peer_ast_cleanup(soc, ast);
325 				qdf_mem_free(ast);
326 			}
327 		}
328 	}
329 	qdf_spin_unlock_bh(&soc->ast_lock);
330 
331 	qdf_mem_free(soc->ast_hash.bins);
332 	soc->ast_hash.bins = NULL;
333 }
334 
335 /*
336  * dp_peer_ast_hash_index() - Compute the AST hash from MAC address
337  * @soc: SoC handle
338  *
339  * Return: AST hash
340  */
341 static inline uint32_t dp_peer_ast_hash_index(struct dp_soc *soc,
342 	union dp_align_mac_addr *mac_addr)
343 {
344 	uint32_t index;
345 
346 	index =
347 		mac_addr->align2.bytes_ab ^
348 		mac_addr->align2.bytes_cd ^
349 		mac_addr->align2.bytes_ef;
350 	index ^= index >> soc->ast_hash.idx_bits;
351 	index &= soc->ast_hash.mask;
352 	return index;
353 }
354 
355 /*
356  * dp_peer_ast_hash_add() - Add AST entry into hash table
357  * @soc: SoC handle
358  *
359  * This function adds the AST entry into SoC AST hash table
360  * It assumes caller has taken the ast lock to protect the access to this table
361  *
362  * Return: None
363  */
364 static inline void dp_peer_ast_hash_add(struct dp_soc *soc,
365 		struct dp_ast_entry *ase)
366 {
367 	uint32_t index;
368 
369 	index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
370 	TAILQ_INSERT_TAIL(&soc->ast_hash.bins[index], ase, hash_list_elem);
371 }
372 
373 /*
374  * dp_peer_ast_hash_remove() - Look up and remove AST entry from hash table
375  * @soc: SoC handle
376  *
377  * This function removes the AST entry from soc AST hash table
378  * It assumes caller has taken the ast lock to protect the access to this table
379  *
380  * Return: None
381  */
382 void dp_peer_ast_hash_remove(struct dp_soc *soc,
383 			     struct dp_ast_entry *ase)
384 {
385 	unsigned index;
386 	struct dp_ast_entry *tmpase;
387 	int found = 0;
388 
389 	index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
390 	/* Check if tail is not empty before delete*/
391 	QDF_ASSERT(!TAILQ_EMPTY(&soc->ast_hash.bins[index]));
392 
393 	TAILQ_FOREACH(tmpase, &soc->ast_hash.bins[index], hash_list_elem) {
394 		if (tmpase == ase) {
395 			found = 1;
396 			break;
397 		}
398 	}
399 
400 	QDF_ASSERT(found);
401 	TAILQ_REMOVE(&soc->ast_hash.bins[index], ase, hash_list_elem);
402 }
403 
404 /*
405  * dp_peer_ast_list_find() - Find AST entry by MAC address from peer ast list
406  * @soc: SoC handle
407  * @peer: peer handle
408  * @ast_mac_addr: mac address
409  *
410  * It assumes caller has taken the ast lock to protect the access to ast list
411  *
412  * Return: AST entry
413  */
414 struct dp_ast_entry *dp_peer_ast_list_find(struct dp_soc *soc,
415 					   struct dp_peer *peer,
416 					   uint8_t *ast_mac_addr)
417 {
418 	struct dp_ast_entry *ast_entry = NULL;
419 	union dp_align_mac_addr *mac_addr =
420 		(union dp_align_mac_addr *)ast_mac_addr;
421 
422 	TAILQ_FOREACH(ast_entry, &peer->ast_entry_list, ase_list_elem) {
423 		if (!dp_peer_find_mac_addr_cmp(mac_addr,
424 					       &ast_entry->mac_addr)) {
425 			return ast_entry;
426 		}
427 	}
428 
429 	return NULL;
430 }
431 
432 /*
433  * dp_peer_ast_hash_find_by_pdevid() - Find AST entry by MAC address
434  * @soc: SoC handle
435  *
436  * It assumes caller has taken the ast lock to protect the access to
437  * AST hash table
438  *
439  * Return: AST entry
440  */
441 struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
442 						     uint8_t *ast_mac_addr,
443 						     uint8_t pdev_id)
444 {
445 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
446 	uint32_t index;
447 	struct dp_ast_entry *ase;
448 
449 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
450 		     ast_mac_addr, QDF_MAC_ADDR_SIZE);
451 	mac_addr = &local_mac_addr_aligned;
452 
453 	index = dp_peer_ast_hash_index(soc, mac_addr);
454 	TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
455 		if ((pdev_id == ase->pdev_id) &&
456 		    !dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr)) {
457 			return ase;
458 		}
459 	}
460 
461 	return NULL;
462 }
463 
464 /*
465  * dp_peer_ast_hash_find_soc() - Find AST entry by MAC address
466  * @soc: SoC handle
467  *
468  * It assumes caller has taken the ast lock to protect the access to
469  * AST hash table
470  *
471  * Return: AST entry
472  */
473 struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
474 					       uint8_t *ast_mac_addr)
475 {
476 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
477 	unsigned index;
478 	struct dp_ast_entry *ase;
479 
480 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
481 			ast_mac_addr, QDF_MAC_ADDR_SIZE);
482 	mac_addr = &local_mac_addr_aligned;
483 
484 	index = dp_peer_ast_hash_index(soc, mac_addr);
485 	TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
486 		if (dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr) == 0) {
487 			return ase;
488 		}
489 	}
490 
491 	return NULL;
492 }
493 
494 /*
495  * dp_peer_map_ast() - Map the ast entry with HW AST Index
496  * @soc: SoC handle
497  * @peer: peer to which ast node belongs
498  * @mac_addr: MAC address of ast node
499  * @hw_peer_id: HW AST Index returned by target in peer map event
500  * @vdev_id: vdev id for VAP to which the peer belongs to
501  * @ast_hash: ast hash value in HW
502  *
503  * Return: None
504  */
505 static inline void dp_peer_map_ast(struct dp_soc *soc,
506 	struct dp_peer *peer, uint8_t *mac_addr, uint16_t hw_peer_id,
507 	uint8_t vdev_id, uint16_t ast_hash)
508 {
509 	struct dp_ast_entry *ast_entry = NULL;
510 	enum cdp_txrx_ast_entry_type peer_type = CDP_TXRX_AST_TYPE_STATIC;
511 
512 	if (!peer) {
513 		return;
514 	}
515 
516 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
517 		  "%s: peer %pK ID %d vid %d mac %pM",
518 		  __func__, peer, hw_peer_id, vdev_id, mac_addr);
519 
520 	qdf_spin_lock_bh(&soc->ast_lock);
521 
522 	ast_entry = dp_peer_ast_list_find(soc, peer, mac_addr);
523 
524 	if (ast_entry) {
525 		ast_entry->ast_idx = hw_peer_id;
526 		soc->ast_table[hw_peer_id] = ast_entry;
527 		ast_entry->is_active = TRUE;
528 		peer_type = ast_entry->type;
529 		ast_entry->ast_hash_value = ast_hash;
530 		ast_entry->is_mapped = TRUE;
531 	}
532 
533 	if (ast_entry || (peer->vdev && peer->vdev->proxysta_vdev)) {
534 		if (soc->cdp_soc.ol_ops->peer_map_event) {
535 			soc->cdp_soc.ol_ops->peer_map_event(
536 			soc->ctrl_psoc, peer->peer_ids[0],
537 			hw_peer_id, vdev_id,
538 			mac_addr, peer_type, ast_hash);
539 		}
540 	} else {
541 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
542 			  "AST entry not found");
543 	}
544 
545 	qdf_spin_unlock_bh(&soc->ast_lock);
546 	return;
547 }
548 
549 void dp_peer_free_hmwds_cb(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
550 			   struct cdp_soc *dp_soc,
551 			   void *cookie,
552 			   enum cdp_ast_free_status status)
553 {
554 	struct dp_ast_free_cb_params *param =
555 		(struct dp_ast_free_cb_params *)cookie;
556 	struct dp_soc *soc = (struct dp_soc *)dp_soc;
557 	struct dp_peer *peer = NULL;
558 
559 	if (status != CDP_TXRX_AST_DELETED) {
560 		qdf_mem_free(cookie);
561 		return;
562 	}
563 
564 	peer = dp_peer_find_hash_find(soc, &param->peer_mac_addr.raw[0],
565 				      0, param->vdev_id);
566 	if (peer) {
567 		dp_peer_add_ast(soc, peer,
568 				&param->mac_addr.raw[0],
569 				param->type,
570 				param->flags);
571 		dp_peer_unref_delete(peer);
572 	}
573 	qdf_mem_free(cookie);
574 }
575 
576 /*
577  * dp_peer_add_ast() - Allocate and add AST entry into peer list
578  * @soc: SoC handle
579  * @peer: peer to which ast node belongs
580  * @mac_addr: MAC address of ast node
581  * @is_self: Is this base AST entry with peer mac address
582  *
583  * This API is used by WDS source port learning function to
584  * add a new AST entry into peer AST list
585  *
586  * Return: 0 if new entry is allocated,
587  *        -1 if entry add failed
588  */
589 int dp_peer_add_ast(struct dp_soc *soc,
590 			struct dp_peer *peer,
591 			uint8_t *mac_addr,
592 			enum cdp_txrx_ast_entry_type type,
593 			uint32_t flags)
594 {
595 	struct dp_ast_entry *ast_entry = NULL;
596 	struct dp_vdev *vdev = NULL, *tmp_vdev = NULL;
597 	struct dp_pdev *pdev = NULL;
598 	uint8_t next_node_mac[6];
599 	int  ret = -1;
600 	txrx_ast_free_cb cb = NULL;
601 	void *cookie = NULL;
602 	struct dp_peer *tmp_peer = NULL;
603 	bool is_peer_found = false;
604 
605 	vdev = peer->vdev;
606 	if (!vdev) {
607 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
608 			  FL("Peers vdev is NULL"));
609 		QDF_ASSERT(0);
610 		return ret;
611 	}
612 
613 	pdev = vdev->pdev;
614 
615 	tmp_peer = dp_peer_find_hash_find(soc, mac_addr, 0,
616 					  DP_VDEV_ALL);
617 	if (tmp_peer) {
618 		tmp_vdev = tmp_peer->vdev;
619 		if (!tmp_vdev) {
620 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
621 				  FL("Peers vdev is NULL"));
622 			QDF_ASSERT(0);
623 			dp_peer_unref_delete(tmp_peer);
624 			return ret;
625 		}
626 		if (tmp_vdev->pdev->pdev_id == pdev->pdev_id)
627 			is_peer_found = true;
628 
629 		dp_peer_unref_delete(tmp_peer);
630 	}
631 
632 	qdf_spin_lock_bh(&soc->ast_lock);
633 	if (peer->delete_in_progress) {
634 		qdf_spin_unlock_bh(&soc->ast_lock);
635 		return ret;
636 	}
637 
638 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
639 		  "%s: pdevid: %u vdev: %u  ast_entry->type: %d flags: 0x%x peer_mac: %pM peer: %pK mac %pM",
640 		  __func__, pdev->pdev_id, vdev->vdev_id, type, flags,
641 		  peer->mac_addr.raw, peer, mac_addr);
642 
643 
644 	/* fw supports only 2 times the max_peers ast entries */
645 	if (soc->num_ast_entries >=
646 	    wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx)) {
647 		qdf_spin_unlock_bh(&soc->ast_lock);
648 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
649 			  FL("Max ast entries reached"));
650 		return ret;
651 	}
652 
653 	/* If AST entry already exists , just return from here
654 	 * ast entry with same mac address can exist on different radios
655 	 * if ast_override support is enabled use search by pdev in this
656 	 * case
657 	 */
658 	if (soc->ast_override_support) {
659 		ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, mac_addr,
660 							    pdev->pdev_id);
661 		if (ast_entry) {
662 			if ((type == CDP_TXRX_AST_TYPE_MEC) &&
663 			    (ast_entry->type == CDP_TXRX_AST_TYPE_MEC))
664 				ast_entry->is_active = TRUE;
665 
666 			qdf_spin_unlock_bh(&soc->ast_lock);
667 			return 0;
668 		}
669 		if (is_peer_found) {
670 			/* During WDS to static roaming, peer is added
671 			 * to the list before static AST entry create.
672 			 * So, allow AST entry for STATIC type
673 			 * even if peer is present
674 			 */
675 			if (type != CDP_TXRX_AST_TYPE_STATIC) {
676 				qdf_spin_unlock_bh(&soc->ast_lock);
677 				return 0;
678 			}
679 		}
680 	} else {
681 		/* For HWMWDS_SEC entries can be added for same mac address
682 		 * do not check for existing entry
683 		 */
684 		if (type == CDP_TXRX_AST_TYPE_WDS_HM_SEC)
685 			goto add_ast_entry;
686 
687 		ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
688 
689 		if (ast_entry) {
690 			if ((type == CDP_TXRX_AST_TYPE_MEC) &&
691 			    (ast_entry->type == CDP_TXRX_AST_TYPE_MEC))
692 				ast_entry->is_active = TRUE;
693 
694 			if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) &&
695 			    !ast_entry->delete_in_progress) {
696 				qdf_spin_unlock_bh(&soc->ast_lock);
697 				return 0;
698 			}
699 
700 			/* Add for HMWDS entry we cannot be ignored if there
701 			 * is AST entry with same mac address
702 			 *
703 			 * if ast entry exists with the requested mac address
704 			 * send a delete command and register callback which
705 			 * can take care of adding HMWDS ast enty on delete
706 			 * confirmation from target
707 			 */
708 			if (type == CDP_TXRX_AST_TYPE_WDS_HM) {
709 				struct dp_ast_free_cb_params *param = NULL;
710 
711 				if (ast_entry->type ==
712 					CDP_TXRX_AST_TYPE_WDS_HM_SEC)
713 					goto add_ast_entry;
714 
715 				/* save existing callback */
716 				if (ast_entry->callback) {
717 					cb = ast_entry->callback;
718 					cookie = ast_entry->cookie;
719 				}
720 
721 				param = qdf_mem_malloc(sizeof(*param));
722 				if (!param) {
723 					QDF_TRACE(QDF_MODULE_ID_TXRX,
724 						  QDF_TRACE_LEVEL_ERROR,
725 						  "Allocation failed");
726 					qdf_spin_unlock_bh(&soc->ast_lock);
727 					return ret;
728 				}
729 
730 				qdf_mem_copy(&param->mac_addr.raw[0], mac_addr,
731 					     QDF_MAC_ADDR_SIZE);
732 				qdf_mem_copy(&param->peer_mac_addr.raw[0],
733 					     &peer->mac_addr.raw[0],
734 					     QDF_MAC_ADDR_SIZE);
735 				param->type = type;
736 				param->flags = flags;
737 				param->vdev_id = vdev->vdev_id;
738 				ast_entry->callback = dp_peer_free_hmwds_cb;
739 				ast_entry->pdev_id = vdev->pdev->pdev_id;
740 				ast_entry->type = type;
741 				ast_entry->cookie = (void *)param;
742 				if (!ast_entry->delete_in_progress)
743 					dp_peer_del_ast(soc, ast_entry);
744 			}
745 
746 			/* Modify an already existing AST entry from type
747 			 * WDS to MEC on promption. This serves as a fix when
748 			 * backbone of interfaces are interchanged wherein
749 			 * wds entr becomes its own MEC. The entry should be
750 			 * replaced only when the ast_entry peer matches the
751 			 * peer received in mec event. This additional check
752 			 * is needed in wds repeater cases where a multicast
753 			 * packet from station to the root via the repeater
754 			 * should not remove the wds entry.
755 			 */
756 			if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS) &&
757 			    (type == CDP_TXRX_AST_TYPE_MEC) &&
758 			    (ast_entry->peer == peer)) {
759 				ast_entry->is_active = FALSE;
760 				dp_peer_del_ast(soc, ast_entry);
761 			}
762 			qdf_spin_unlock_bh(&soc->ast_lock);
763 
764 			/* Call the saved callback*/
765 			if (cb) {
766 				cb(soc->ctrl_psoc,
767 				   dp_soc_to_cdp_soc(soc),
768 				   cookie,
769 				   CDP_TXRX_AST_DELETE_IN_PROGRESS);
770 			}
771 			return 0;
772 		}
773 	}
774 
775 add_ast_entry:
776 	ast_entry = (struct dp_ast_entry *)
777 			qdf_mem_malloc(sizeof(struct dp_ast_entry));
778 
779 	if (!ast_entry) {
780 		qdf_spin_unlock_bh(&soc->ast_lock);
781 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
782 			  FL("fail to allocate ast_entry"));
783 		QDF_ASSERT(0);
784 		return ret;
785 	}
786 
787 	qdf_mem_copy(&ast_entry->mac_addr.raw[0], mac_addr, QDF_MAC_ADDR_SIZE);
788 	ast_entry->pdev_id = vdev->pdev->pdev_id;
789 	ast_entry->is_mapped = false;
790 	ast_entry->delete_in_progress = false;
791 
792 	switch (type) {
793 	case CDP_TXRX_AST_TYPE_STATIC:
794 		peer->self_ast_entry = ast_entry;
795 		ast_entry->type = CDP_TXRX_AST_TYPE_STATIC;
796 		if (peer->vdev->opmode == wlan_op_mode_sta)
797 			ast_entry->type = CDP_TXRX_AST_TYPE_STA_BSS;
798 		break;
799 	case CDP_TXRX_AST_TYPE_SELF:
800 		peer->self_ast_entry = ast_entry;
801 		ast_entry->type = CDP_TXRX_AST_TYPE_SELF;
802 		break;
803 	case CDP_TXRX_AST_TYPE_WDS:
804 		ast_entry->next_hop = 1;
805 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
806 		break;
807 	case CDP_TXRX_AST_TYPE_WDS_HM:
808 		ast_entry->next_hop = 1;
809 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM;
810 		break;
811 	case CDP_TXRX_AST_TYPE_WDS_HM_SEC:
812 		ast_entry->next_hop = 1;
813 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM_SEC;
814 		break;
815 	case CDP_TXRX_AST_TYPE_MEC:
816 		ast_entry->next_hop = 1;
817 		ast_entry->type = CDP_TXRX_AST_TYPE_MEC;
818 		break;
819 	case CDP_TXRX_AST_TYPE_DA:
820 		peer = peer->vdev->vap_bss_peer;
821 		ast_entry->next_hop = 1;
822 		ast_entry->type = CDP_TXRX_AST_TYPE_DA;
823 		break;
824 	default:
825 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
826 			FL("Incorrect AST entry type"));
827 	}
828 
829 	ast_entry->is_active = TRUE;
830 	DP_STATS_INC(soc, ast.added, 1);
831 	soc->num_ast_entries++;
832 	dp_peer_ast_hash_add(soc, ast_entry);
833 
834 	ast_entry->peer = peer;
835 
836 	if (type == CDP_TXRX_AST_TYPE_MEC)
837 		qdf_mem_copy(next_node_mac, peer->vdev->mac_addr.raw, 6);
838 	else
839 		qdf_mem_copy(next_node_mac, peer->mac_addr.raw, 6);
840 
841 	TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem);
842 
843 	if ((ast_entry->type != CDP_TXRX_AST_TYPE_STATIC) &&
844 	    (ast_entry->type != CDP_TXRX_AST_TYPE_SELF) &&
845 	    (ast_entry->type != CDP_TXRX_AST_TYPE_STA_BSS) &&
846 	    (ast_entry->type != CDP_TXRX_AST_TYPE_WDS_HM_SEC)) {
847 		if (QDF_STATUS_SUCCESS ==
848 				soc->cdp_soc.ol_ops->peer_add_wds_entry(
849 				soc->ctrl_psoc,
850 				peer->vdev->vdev_id,
851 				peer->mac_addr.raw,
852 				mac_addr,
853 				next_node_mac,
854 				flags,
855 				ast_entry->type)) {
856 			qdf_spin_unlock_bh(&soc->ast_lock);
857 			return 0;
858 		}
859 	}
860 
861 	qdf_spin_unlock_bh(&soc->ast_lock);
862 	return ret;
863 }
864 
865 /*
866  * dp_peer_free_ast_entry() - Free up the ast entry memory
867  * @soc: SoC handle
868  * @ast_entry: Address search entry
869  *
870  * This API is used to free up the memory associated with
871  * AST entry.
872  *
873  * Return: None
874  */
875 void dp_peer_free_ast_entry(struct dp_soc *soc,
876 			    struct dp_ast_entry *ast_entry)
877 {
878 	/*
879 	 * NOTE: Ensure that call to this API is done
880 	 * after soc->ast_lock is taken
881 	 */
882 	ast_entry->callback = NULL;
883 	ast_entry->cookie = NULL;
884 
885 	DP_STATS_INC(soc, ast.deleted, 1);
886 	dp_peer_ast_hash_remove(soc, ast_entry);
887 	dp_peer_ast_cleanup(soc, ast_entry);
888 	qdf_mem_free(ast_entry);
889 	soc->num_ast_entries--;
890 }
891 
892 /*
893  * dp_peer_unlink_ast_entry() - Free up the ast entry memory
894  * @soc: SoC handle
895  * @ast_entry: Address search entry
896  *
897  * This API is used to remove/unlink AST entry from the peer list
898  * and hash list.
899  *
900  * Return: None
901  */
902 void dp_peer_unlink_ast_entry(struct dp_soc *soc,
903 			      struct dp_ast_entry *ast_entry)
904 {
905 	/*
906 	 * NOTE: Ensure that call to this API is done
907 	 * after soc->ast_lock is taken
908 	 */
909 	struct dp_peer *peer = ast_entry->peer;
910 
911 	TAILQ_REMOVE(&peer->ast_entry_list, ast_entry, ase_list_elem);
912 
913 	if (ast_entry == peer->self_ast_entry)
914 		peer->self_ast_entry = NULL;
915 
916 	/*
917 	 * release the reference only if it is mapped
918 	 * to ast_table
919 	 */
920 	if (ast_entry->is_mapped)
921 		soc->ast_table[ast_entry->ast_idx] = NULL;
922 
923 	ast_entry->peer = NULL;
924 }
925 
926 /*
927  * dp_peer_del_ast() - Delete and free AST entry
928  * @soc: SoC handle
929  * @ast_entry: AST entry of the node
930  *
931  * This function removes the AST entry from peer and soc tables
932  * It assumes caller has taken the ast lock to protect the access to these
933  * tables
934  *
935  * Return: None
936  */
937 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
938 {
939 	struct dp_peer *peer;
940 
941 	if (!ast_entry)
942 		return;
943 
944 	if (ast_entry->delete_in_progress)
945 		return;
946 
947 	ast_entry->delete_in_progress = true;
948 
949 	peer = ast_entry->peer;
950 	dp_peer_ast_send_wds_del(soc, ast_entry);
951 
952 	/* Remove SELF and STATIC entries in teardown itself */
953 	if (!ast_entry->next_hop)
954 		dp_peer_unlink_ast_entry(soc, ast_entry);
955 
956 	if (ast_entry->is_mapped)
957 		soc->ast_table[ast_entry->ast_idx] = NULL;
958 
959 	/* if peer map v2 is enabled we are not freeing ast entry
960 	 * here and it is supposed to be freed in unmap event (after
961 	 * we receive delete confirmation from target)
962 	 *
963 	 * if peer_id is invalid we did not get the peer map event
964 	 * for the peer free ast entry from here only in this case
965 	 */
966 	if (dp_peer_ast_free_in_unmap_supported(peer, ast_entry))
967 		return;
968 
969 	/* for WDS secondary entry ast_entry->next_hop would be set so
970 	 * unlinking has to be done explicitly here.
971 	 * As this entry is not a mapped entry unmap notification from
972 	 * FW wil not come. Hence unlinkling is done right here.
973 	 */
974 	if (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC)
975 		dp_peer_unlink_ast_entry(soc, ast_entry);
976 
977 	dp_peer_free_ast_entry(soc, ast_entry);
978 }
979 
980 /*
981  * dp_peer_update_ast() - Delete and free AST entry
982  * @soc: SoC handle
983  * @peer: peer to which ast node belongs
984  * @ast_entry: AST entry of the node
985  * @flags: wds or hmwds
986  *
987  * This function update the AST entry to the roamed peer and soc tables
988  * It assumes caller has taken the ast lock to protect the access to these
989  * tables
990  *
991  * Return: 0 if ast entry is updated successfully
992  *         -1 failure
993  */
994 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
995 		       struct dp_ast_entry *ast_entry, uint32_t flags)
996 {
997 	int ret = -1;
998 	struct dp_peer *old_peer;
999 
1000 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1001 		  "%s: ast_entry->type: %d pdevid: %u vdevid: %u flags: 0x%x mac_addr: %pM peer_mac: %pM\n",
1002 		  __func__, ast_entry->type, peer->vdev->pdev->pdev_id,
1003 		  peer->vdev->vdev_id, flags, ast_entry->mac_addr.raw,
1004 		  peer->mac_addr.raw);
1005 
1006 	/* Do not send AST update in below cases
1007 	 *  1) Ast entry delete has already triggered
1008 	 *  2) Peer delete is already triggered
1009 	 *  3) We did not get the HTT map for create event
1010 	 */
1011 	if (ast_entry->delete_in_progress || peer->delete_in_progress ||
1012 	    !ast_entry->is_mapped)
1013 		return ret;
1014 
1015 	if ((ast_entry->type == CDP_TXRX_AST_TYPE_STATIC) ||
1016 	    (ast_entry->type == CDP_TXRX_AST_TYPE_SELF) ||
1017 	    (ast_entry->type == CDP_TXRX_AST_TYPE_STA_BSS) ||
1018 	    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
1019 		return 0;
1020 
1021 	/*
1022 	 * Avoids flood of WMI update messages sent to FW for same peer.
1023 	 */
1024 	if (qdf_unlikely(ast_entry->peer == peer) &&
1025 	    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS) &&
1026 	    (ast_entry->peer->vdev == peer->vdev) &&
1027 	    (ast_entry->is_active))
1028 		return 0;
1029 
1030 	old_peer = ast_entry->peer;
1031 	TAILQ_REMOVE(&old_peer->ast_entry_list, ast_entry, ase_list_elem);
1032 
1033 	ast_entry->peer = peer;
1034 	ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
1035 	ast_entry->pdev_id = peer->vdev->pdev->pdev_id;
1036 	ast_entry->is_active = TRUE;
1037 	TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem);
1038 
1039 	ret = soc->cdp_soc.ol_ops->peer_update_wds_entry(
1040 				soc->ctrl_psoc,
1041 				peer->vdev->vdev_id,
1042 				ast_entry->mac_addr.raw,
1043 				peer->mac_addr.raw,
1044 				flags);
1045 
1046 	return ret;
1047 }
1048 
1049 /*
1050  * dp_peer_ast_get_pdev_id() - get pdev_id from the ast entry
1051  * @soc: SoC handle
1052  * @ast_entry: AST entry of the node
1053  *
1054  * This function gets the pdev_id from the ast entry.
1055  *
1056  * Return: (uint8_t) pdev_id
1057  */
1058 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
1059 				struct dp_ast_entry *ast_entry)
1060 {
1061 	return ast_entry->pdev_id;
1062 }
1063 
1064 /*
1065  * dp_peer_ast_get_next_hop() - get next_hop from the ast entry
1066  * @soc: SoC handle
1067  * @ast_entry: AST entry of the node
1068  *
1069  * This function gets the next hop from the ast entry.
1070  *
1071  * Return: (uint8_t) next_hop
1072  */
1073 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
1074 				struct dp_ast_entry *ast_entry)
1075 {
1076 	return ast_entry->next_hop;
1077 }
1078 
1079 /*
1080  * dp_peer_ast_set_type() - set type from the ast entry
1081  * @soc: SoC handle
1082  * @ast_entry: AST entry of the node
1083  *
1084  * This function sets the type in the ast entry.
1085  *
1086  * Return:
1087  */
1088 void dp_peer_ast_set_type(struct dp_soc *soc,
1089 				struct dp_ast_entry *ast_entry,
1090 				enum cdp_txrx_ast_entry_type type)
1091 {
1092 	ast_entry->type = type;
1093 }
1094 
1095 #else
1096 int dp_peer_add_ast(struct dp_soc *soc, struct dp_peer *peer,
1097 		uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type,
1098 		uint32_t flags)
1099 {
1100 	return 1;
1101 }
1102 
1103 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
1104 {
1105 }
1106 
1107 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
1108 			struct dp_ast_entry *ast_entry, uint32_t flags)
1109 {
1110 	return 1;
1111 }
1112 
1113 struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
1114 					       uint8_t *ast_mac_addr)
1115 {
1116 	return NULL;
1117 }
1118 
1119 struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
1120 						     uint8_t *ast_mac_addr,
1121 						     uint8_t pdev_id)
1122 {
1123 	return NULL;
1124 }
1125 
1126 static int dp_peer_ast_hash_attach(struct dp_soc *soc)
1127 {
1128 	return 0;
1129 }
1130 
1131 static inline void dp_peer_map_ast(struct dp_soc *soc,
1132 	struct dp_peer *peer, uint8_t *mac_addr, uint16_t hw_peer_id,
1133 	uint8_t vdev_id, uint16_t ast_hash)
1134 {
1135 	return;
1136 }
1137 
1138 static void dp_peer_ast_hash_detach(struct dp_soc *soc)
1139 {
1140 }
1141 
1142 void dp_peer_ast_set_type(struct dp_soc *soc,
1143 				struct dp_ast_entry *ast_entry,
1144 				enum cdp_txrx_ast_entry_type type)
1145 {
1146 }
1147 
1148 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
1149 				struct dp_ast_entry *ast_entry)
1150 {
1151 	return 0xff;
1152 }
1153 
1154 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
1155 				struct dp_ast_entry *ast_entry)
1156 {
1157 	return 0xff;
1158 }
1159 
1160 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
1161 		       struct dp_ast_entry *ast_entry, uint32_t flags)
1162 {
1163 	return 1;
1164 }
1165 
1166 #endif
1167 
1168 void dp_peer_ast_send_wds_del(struct dp_soc *soc,
1169 			      struct dp_ast_entry *ast_entry)
1170 {
1171 	struct dp_peer *peer = ast_entry->peer;
1172 	struct cdp_soc_t *cdp_soc = &soc->cdp_soc;
1173 
1174 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_TRACE,
1175 		  "%s: ast_entry->type: %d pdevid: %u vdev: %u mac_addr: %pM next_hop: %u peer_mac: %pM\n",
1176 		  __func__, ast_entry->type, peer->vdev->pdev->pdev_id,
1177 		  peer->vdev->vdev_id, ast_entry->mac_addr.raw,
1178 		  ast_entry->next_hop, ast_entry->peer->mac_addr.raw);
1179 
1180 	if (ast_entry->next_hop) {
1181 		cdp_soc->ol_ops->peer_del_wds_entry(soc->ctrl_psoc,
1182 						    peer->vdev->vdev_id,
1183 						    ast_entry->mac_addr.raw,
1184 						    ast_entry->type);
1185 	}
1186 
1187 }
1188 
1189 /**
1190  * dp_peer_ast_free_entry_by_mac() - find ast entry by MAC address and delete
1191  * @soc: soc handle
1192  * @peer: peer handle
1193  * @mac_addr: mac address of the AST entry to searc and delete
1194  *
1195  * find the ast entry from the peer list using the mac address and free
1196  * the entry.
1197  *
1198  * Return: SUCCESS or NOENT
1199  */
1200 static int dp_peer_ast_free_entry_by_mac(struct dp_soc *soc,
1201 					 struct dp_peer *peer,
1202 					 uint8_t *mac_addr)
1203 {
1204 	struct dp_ast_entry *ast_entry;
1205 	void *cookie = NULL;
1206 	txrx_ast_free_cb cb = NULL;
1207 
1208 	/*
1209 	 * release the reference only if it is mapped
1210 	 * to ast_table
1211 	 */
1212 
1213 	qdf_spin_lock_bh(&soc->ast_lock);
1214 
1215 	ast_entry = dp_peer_ast_list_find(soc, peer, mac_addr);
1216 	if (!ast_entry) {
1217 		qdf_spin_unlock_bh(&soc->ast_lock);
1218 		return QDF_STATUS_E_NOENT;
1219 	} else if (ast_entry->is_mapped) {
1220 		soc->ast_table[ast_entry->ast_idx] = NULL;
1221 	}
1222 
1223 	cb = ast_entry->callback;
1224 	cookie = ast_entry->cookie;
1225 
1226 
1227 	dp_peer_unlink_ast_entry(soc, ast_entry);
1228 	dp_peer_free_ast_entry(soc, ast_entry);
1229 
1230 	qdf_spin_unlock_bh(&soc->ast_lock);
1231 
1232 	if (cb) {
1233 		cb(soc->ctrl_psoc,
1234 		   dp_soc_to_cdp_soc(soc),
1235 		   cookie,
1236 		   CDP_TXRX_AST_DELETED);
1237 	}
1238 
1239 	return QDF_STATUS_SUCCESS;
1240 }
1241 
1242 struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc,
1243 	uint8_t *peer_mac_addr, int mac_addr_is_aligned, uint8_t vdev_id)
1244 {
1245 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
1246 	unsigned index;
1247 	struct dp_peer *peer;
1248 
1249 	if (mac_addr_is_aligned) {
1250 		mac_addr = (union dp_align_mac_addr *) peer_mac_addr;
1251 	} else {
1252 		qdf_mem_copy(
1253 			&local_mac_addr_aligned.raw[0],
1254 			peer_mac_addr, QDF_MAC_ADDR_SIZE);
1255 		mac_addr = &local_mac_addr_aligned;
1256 	}
1257 	index = dp_peer_find_hash_index(soc, mac_addr);
1258 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
1259 	TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
1260 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
1261 			((peer->vdev->vdev_id == vdev_id) ||
1262 			 (vdev_id == DP_VDEV_ALL))) {
1263 			/* found it - increment the ref count before releasing
1264 			 * the lock
1265 			 */
1266 			qdf_atomic_inc(&peer->ref_cnt);
1267 			qdf_spin_unlock_bh(&soc->peer_ref_mutex);
1268 			return peer;
1269 		}
1270 	}
1271 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
1272 	return NULL; /* failure */
1273 }
1274 
1275 void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer)
1276 {
1277 	unsigned index;
1278 	struct dp_peer *tmppeer = NULL;
1279 	int found = 0;
1280 
1281 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
1282 	/* Check if tail is not empty before delete*/
1283 	QDF_ASSERT(!TAILQ_EMPTY(&soc->peer_hash.bins[index]));
1284 	/*
1285 	 * DO NOT take the peer_ref_mutex lock here - it needs to be taken
1286 	 * by the caller.
1287 	 * The caller needs to hold the lock from the time the peer object's
1288 	 * reference count is decremented and tested up through the time the
1289 	 * reference to the peer object is removed from the hash table, by
1290 	 * this function.
1291 	 * Holding the lock only while removing the peer object reference
1292 	 * from the hash table keeps the hash table consistent, but does not
1293 	 * protect against a new HL tx context starting to use the peer object
1294 	 * if it looks up the peer object from its MAC address just after the
1295 	 * peer ref count is decremented to zero, but just before the peer
1296 	 * object reference is removed from the hash table.
1297 	 */
1298 	 TAILQ_FOREACH(tmppeer, &soc->peer_hash.bins[index], hash_list_elem) {
1299 		if (tmppeer == peer) {
1300 			found = 1;
1301 			break;
1302 		}
1303 	}
1304 	QDF_ASSERT(found);
1305 	TAILQ_REMOVE(&soc->peer_hash.bins[index], peer, hash_list_elem);
1306 }
1307 
1308 void dp_peer_find_hash_erase(struct dp_soc *soc)
1309 {
1310 	int i;
1311 
1312 	/*
1313 	 * Not really necessary to take peer_ref_mutex lock - by this point,
1314 	 * it's known that the soc is no longer in use.
1315 	 */
1316 	for (i = 0; i <= soc->peer_hash.mask; i++) {
1317 		if (!TAILQ_EMPTY(&soc->peer_hash.bins[i])) {
1318 			struct dp_peer *peer, *peer_next;
1319 
1320 			/*
1321 			 * TAILQ_FOREACH_SAFE must be used here to avoid any
1322 			 * memory access violation after peer is freed
1323 			 */
1324 			TAILQ_FOREACH_SAFE(peer, &soc->peer_hash.bins[i],
1325 				hash_list_elem, peer_next) {
1326 				/*
1327 				 * Don't remove the peer from the hash table -
1328 				 * that would modify the list we are currently
1329 				 * traversing, and it's not necessary anyway.
1330 				 */
1331 				/*
1332 				 * Artificially adjust the peer's ref count to
1333 				 * 1, so it will get deleted by
1334 				 * dp_peer_unref_delete.
1335 				 */
1336 				/* set to zero */
1337 				qdf_atomic_init(&peer->ref_cnt);
1338 				/* incr to one */
1339 				qdf_atomic_inc(&peer->ref_cnt);
1340 				dp_peer_unref_delete(peer);
1341 			}
1342 		}
1343 	}
1344 }
1345 
1346 static void dp_peer_ast_table_detach(struct dp_soc *soc)
1347 {
1348 	if (soc->ast_table) {
1349 		qdf_mem_free(soc->ast_table);
1350 		soc->ast_table = NULL;
1351 	}
1352 }
1353 
1354 static void dp_peer_find_map_detach(struct dp_soc *soc)
1355 {
1356 	if (soc->peer_id_to_obj_map) {
1357 		qdf_mem_free(soc->peer_id_to_obj_map);
1358 		soc->peer_id_to_obj_map = NULL;
1359 	}
1360 }
1361 
1362 int dp_peer_find_attach(struct dp_soc *soc)
1363 {
1364 	if (dp_peer_find_map_attach(soc))
1365 		return 1;
1366 
1367 	if (dp_peer_find_hash_attach(soc)) {
1368 		dp_peer_find_map_detach(soc);
1369 		return 1;
1370 	}
1371 
1372 	if (dp_peer_ast_table_attach(soc)) {
1373 		dp_peer_find_hash_detach(soc);
1374 		dp_peer_find_map_detach(soc);
1375 		return 1;
1376 	}
1377 
1378 	if (dp_peer_ast_hash_attach(soc)) {
1379 		dp_peer_ast_table_detach(soc);
1380 		dp_peer_find_hash_detach(soc);
1381 		dp_peer_find_map_detach(soc);
1382 		return 1;
1383 	}
1384 
1385 	return 0; /* success */
1386 }
1387 
1388 void dp_rx_tid_stats_cb(struct dp_soc *soc, void *cb_ctxt,
1389 	union hal_reo_status *reo_status)
1390 {
1391 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
1392 	struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
1393 
1394 	if (queue_status->header.status == HAL_REO_CMD_DRAIN)
1395 		return;
1396 
1397 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
1398 		DP_PRINT_STATS("REO stats failure %d for TID %d\n",
1399 			       queue_status->header.status, rx_tid->tid);
1400 		return;
1401 	}
1402 
1403 	DP_PRINT_STATS("REO queue stats (TID: %d):\n"
1404 		       "ssn: %d\n"
1405 		       "curr_idx  : %d\n"
1406 		       "pn_31_0   : %08x\n"
1407 		       "pn_63_32  : %08x\n"
1408 		       "pn_95_64  : %08x\n"
1409 		       "pn_127_96 : %08x\n"
1410 		       "last_rx_enq_tstamp : %08x\n"
1411 		       "last_rx_deq_tstamp : %08x\n"
1412 		       "rx_bitmap_31_0     : %08x\n"
1413 		       "rx_bitmap_63_32    : %08x\n"
1414 		       "rx_bitmap_95_64    : %08x\n"
1415 		       "rx_bitmap_127_96   : %08x\n"
1416 		       "rx_bitmap_159_128  : %08x\n"
1417 		       "rx_bitmap_191_160  : %08x\n"
1418 		       "rx_bitmap_223_192  : %08x\n"
1419 		       "rx_bitmap_255_224  : %08x\n",
1420 		       rx_tid->tid,
1421 		       queue_status->ssn, queue_status->curr_idx,
1422 		       queue_status->pn_31_0, queue_status->pn_63_32,
1423 		       queue_status->pn_95_64, queue_status->pn_127_96,
1424 		       queue_status->last_rx_enq_tstamp,
1425 		       queue_status->last_rx_deq_tstamp,
1426 		       queue_status->rx_bitmap_31_0,
1427 		       queue_status->rx_bitmap_63_32,
1428 		       queue_status->rx_bitmap_95_64,
1429 		       queue_status->rx_bitmap_127_96,
1430 		       queue_status->rx_bitmap_159_128,
1431 		       queue_status->rx_bitmap_191_160,
1432 		       queue_status->rx_bitmap_223_192,
1433 		       queue_status->rx_bitmap_255_224);
1434 
1435 	DP_PRINT_STATS(
1436 		       "curr_mpdu_cnt      : %d\n"
1437 		       "curr_msdu_cnt      : %d\n"
1438 		       "fwd_timeout_cnt    : %d\n"
1439 		       "fwd_bar_cnt        : %d\n"
1440 		       "dup_cnt            : %d\n"
1441 		       "frms_in_order_cnt  : %d\n"
1442 		       "bar_rcvd_cnt       : %d\n"
1443 		       "mpdu_frms_cnt      : %d\n"
1444 		       "msdu_frms_cnt      : %d\n"
1445 		       "total_byte_cnt     : %d\n"
1446 		       "late_recv_mpdu_cnt : %d\n"
1447 		       "win_jump_2k        : %d\n"
1448 		       "hole_cnt           : %d\n",
1449 		       queue_status->curr_mpdu_cnt,
1450 		       queue_status->curr_msdu_cnt,
1451 		       queue_status->fwd_timeout_cnt,
1452 		       queue_status->fwd_bar_cnt,
1453 		       queue_status->dup_cnt,
1454 		       queue_status->frms_in_order_cnt,
1455 		       queue_status->bar_rcvd_cnt,
1456 		       queue_status->mpdu_frms_cnt,
1457 		       queue_status->msdu_frms_cnt,
1458 		       queue_status->total_cnt,
1459 		       queue_status->late_recv_mpdu_cnt,
1460 		       queue_status->win_jump_2k,
1461 		       queue_status->hole_cnt);
1462 
1463 	DP_PRINT_STATS("Addba Req          : %d\n"
1464 			"Addba Resp         : %d\n"
1465 			"Addba Resp success : %d\n"
1466 			"Addba Resp failed  : %d\n"
1467 			"Delba Req received : %d\n"
1468 			"Delba Tx success   : %d\n"
1469 			"Delba Tx Fail      : %d\n"
1470 			"BA window size     : %d\n"
1471 			"Pn size            : %d\n",
1472 			rx_tid->num_of_addba_req,
1473 			rx_tid->num_of_addba_resp,
1474 			rx_tid->num_addba_rsp_success,
1475 			rx_tid->num_addba_rsp_failed,
1476 			rx_tid->num_of_delba_req,
1477 			rx_tid->delba_tx_success_cnt,
1478 			rx_tid->delba_tx_fail_cnt,
1479 			rx_tid->ba_win_size,
1480 			rx_tid->pn_size);
1481 }
1482 
1483 static inline struct dp_peer *dp_peer_find_add_id(struct dp_soc *soc,
1484 	uint8_t *peer_mac_addr, uint16_t peer_id, uint16_t hw_peer_id,
1485 	uint8_t vdev_id)
1486 {
1487 	struct dp_peer *peer;
1488 
1489 	QDF_ASSERT(peer_id <= soc->max_peers);
1490 	/* check if there's already a peer object with this MAC address */
1491 	peer = dp_peer_find_hash_find(soc, peer_mac_addr,
1492 		0 /* is aligned */, vdev_id);
1493 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1494 		  "%s: peer %pK ID %d vid %d mac %pM",
1495 		  __func__, peer, peer_id, vdev_id, peer_mac_addr);
1496 
1497 	if (peer) {
1498 		/* peer's ref count was already incremented by
1499 		 * peer_find_hash_find
1500 		 */
1501 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1502 			  "%s: ref_cnt: %d", __func__,
1503 			   qdf_atomic_read(&peer->ref_cnt));
1504 		if (!soc->peer_id_to_obj_map[peer_id])
1505 			soc->peer_id_to_obj_map[peer_id] = peer;
1506 		else {
1507 			/* Peer map event came for peer_id which
1508 			 * is already mapped, this is not expected
1509 			 */
1510 			QDF_ASSERT(0);
1511 		}
1512 
1513 		if (dp_peer_find_add_id_to_obj(peer, peer_id)) {
1514 			/* TBDXXX: assert for now */
1515 			QDF_ASSERT(0);
1516 		}
1517 
1518 		return peer;
1519 	}
1520 
1521 	return NULL;
1522 }
1523 
1524 /**
1525  * dp_rx_peer_map_handler() - handle peer map event from firmware
1526  * @soc_handle - genereic soc handle
1527  * @peeri_id - peer_id from firmware
1528  * @hw_peer_id - ast index for this peer
1529  * @vdev_id - vdev ID
1530  * @peer_mac_addr - mac address of the peer
1531  * @ast_hash - ast hash value
1532  * @is_wds - flag to indicate peer map event for WDS ast entry
1533  *
1534  * associate the peer_id that firmware provided with peer entry
1535  * and update the ast table in the host with the hw_peer_id.
1536  *
1537  * Return: none
1538  */
1539 
1540 void
1541 dp_rx_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
1542 		       uint16_t hw_peer_id, uint8_t vdev_id,
1543 		       uint8_t *peer_mac_addr, uint16_t ast_hash,
1544 		       uint8_t is_wds)
1545 {
1546 	struct dp_peer *peer = NULL;
1547 	enum cdp_txrx_ast_entry_type type = CDP_TXRX_AST_TYPE_STATIC;
1548 
1549 	dp_info("peer_map_event (soc:%pK): peer_id %d, hw_peer_id %d, peer_mac %pM, vdev_id %d",
1550 		soc, peer_id, hw_peer_id,
1551 		  peer_mac_addr, vdev_id);
1552 
1553 	/* Peer map event for WDS ast entry get the peer from
1554 	 * obj map
1555 	 */
1556 	if (is_wds) {
1557 		peer = soc->peer_id_to_obj_map[peer_id];
1558 		/*
1559 		 * In certain cases like Auth attack on a repeater
1560 		 * can result in the number of ast_entries falling
1561 		 * in the same hash bucket to exceed the max_skid
1562 		 * length supported by HW in root AP. In these cases
1563 		 * the FW will return the hw_peer_id (ast_index) as
1564 		 * 0xffff indicating HW could not add the entry in
1565 		 * its table. Host has to delete the entry from its
1566 		 * table in these cases.
1567 		 */
1568 		if (hw_peer_id == HTT_INVALID_PEER) {
1569 			DP_STATS_INC(soc, ast.map_err, 1);
1570 			if (!dp_peer_ast_free_entry_by_mac(soc,
1571 							   peer,
1572 							   peer_mac_addr))
1573 				return;
1574 
1575 			dp_alert("AST entry not found with peer %pK peer_id %u peer_mac %pM mac_addr %pM vdev_id %u next_hop %u",
1576 				 peer, peer->peer_ids[0],
1577 				 peer->mac_addr.raw, peer_mac_addr, vdev_id,
1578 				 is_wds);
1579 
1580 			return;
1581 		}
1582 
1583 	} else {
1584 		/*
1585 		 * It's the responsibility of the CP and FW to ensure
1586 		 * that peer is created successfully. Ideally DP should
1587 		 * not hit the below condition for directly assocaited
1588 		 * peers.
1589 		 */
1590 		if ((hw_peer_id < 0) ||
1591 		    (hw_peer_id >=
1592 		     wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) {
1593 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1594 				  "invalid hw_peer_id: %d", hw_peer_id);
1595 			qdf_assert_always(0);
1596 		}
1597 
1598 		peer = dp_peer_find_add_id(soc, peer_mac_addr, peer_id,
1599 					   hw_peer_id, vdev_id);
1600 
1601 		if (peer) {
1602 			if (wlan_op_mode_sta == peer->vdev->opmode &&
1603 			    qdf_mem_cmp(peer->mac_addr.raw,
1604 					peer->vdev->mac_addr.raw,
1605 					QDF_MAC_ADDR_SIZE) != 0) {
1606 				dp_info("STA vdev bss_peer!!!!");
1607 				peer->bss_peer = 1;
1608 				peer->vdev->vap_bss_peer = peer;
1609 			}
1610 
1611 			if (peer->vdev->opmode == wlan_op_mode_sta) {
1612 				peer->vdev->bss_ast_hash = ast_hash;
1613 				peer->vdev->bss_ast_idx = hw_peer_id;
1614 			}
1615 
1616 			/* Add ast entry incase self ast entry is
1617 			 * deleted due to DP CP sync issue
1618 			 *
1619 			 * self_ast_entry is modified in peer create
1620 			 * and peer unmap path which cannot run in
1621 			 * parllel with peer map, no lock need before
1622 			 * referring it
1623 			 */
1624 			if (!peer->self_ast_entry) {
1625 				dp_info("Add self ast from map %pM",
1626 					peer_mac_addr);
1627 				dp_peer_add_ast(soc, peer,
1628 						peer_mac_addr,
1629 						type, 0);
1630 			}
1631 
1632 		}
1633 	}
1634 	dp_peer_map_ast(soc, peer, peer_mac_addr,
1635 			hw_peer_id, vdev_id, ast_hash);
1636 }
1637 
1638 /**
1639  * dp_rx_peer_unmap_handler() - handle peer unmap event from firmware
1640  * @soc_handle - genereic soc handle
1641  * @peeri_id - peer_id from firmware
1642  * @vdev_id - vdev ID
1643  * @mac_addr - mac address of the peer or wds entry
1644  * @is_wds - flag to indicate peer map event for WDS ast entry
1645  *
1646  * Return: none
1647  */
1648 void
1649 dp_rx_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id,
1650 			 uint8_t vdev_id, uint8_t *mac_addr,
1651 			 uint8_t is_wds)
1652 {
1653 	struct dp_peer *peer;
1654 	uint8_t i;
1655 
1656 	peer = __dp_peer_find_by_id(soc, peer_id);
1657 
1658 	/*
1659 	 * Currently peer IDs are assigned for vdevs as well as peers.
1660 	 * If the peer ID is for a vdev, then the peer pointer stored
1661 	 * in peer_id_to_obj_map will be NULL.
1662 	 */
1663 	if (!peer) {
1664 		dp_err("Received unmap event for invalid peer_id %u", peer_id);
1665 		return;
1666 	}
1667 
1668 	/* If V2 Peer map messages are enabled AST entry has to be freed here
1669 	 */
1670 	if (is_wds) {
1671 		if (!dp_peer_ast_free_entry_by_mac(soc, peer, mac_addr))
1672 			return;
1673 
1674 		dp_alert("AST entry not found with peer %pK peer_id %u peer_mac %pM mac_addr %pM vdev_id %u next_hop %u",
1675 			 peer, peer->peer_ids[0],
1676 			 peer->mac_addr.raw, mac_addr, vdev_id,
1677 			 is_wds);
1678 
1679 		return;
1680 	}
1681 
1682 	dp_info("peer_unmap_event (soc:%pK) peer_id %d peer %pK",
1683 		soc, peer_id, peer);
1684 
1685 	soc->peer_id_to_obj_map[peer_id] = NULL;
1686 	for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
1687 		if (peer->peer_ids[i] == peer_id) {
1688 			peer->peer_ids[i] = HTT_INVALID_PEER;
1689 			break;
1690 		}
1691 	}
1692 
1693 	/*
1694 	 * Reset ast flow mapping table
1695 	 */
1696 	dp_peer_reset_flowq_map(peer);
1697 
1698 	if (soc->cdp_soc.ol_ops->peer_unmap_event) {
1699 		soc->cdp_soc.ol_ops->peer_unmap_event(soc->ctrl_psoc,
1700 				peer_id, vdev_id);
1701 	}
1702 
1703 	/*
1704 	 * Remove a reference to the peer.
1705 	 * If there are no more references, delete the peer object.
1706 	 */
1707 	dp_peer_unref_delete(peer);
1708 }
1709 
1710 void
1711 dp_peer_find_detach(struct dp_soc *soc)
1712 {
1713 	dp_peer_find_map_detach(soc);
1714 	dp_peer_find_hash_detach(soc);
1715 	dp_peer_ast_hash_detach(soc);
1716 	dp_peer_ast_table_detach(soc);
1717 }
1718 
1719 static void dp_rx_tid_update_cb(struct dp_soc *soc, void *cb_ctxt,
1720 	union hal_reo_status *reo_status)
1721 {
1722 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
1723 
1724 	if ((reo_status->rx_queue_status.header.status !=
1725 		HAL_REO_CMD_SUCCESS) &&
1726 		(reo_status->rx_queue_status.header.status !=
1727 		HAL_REO_CMD_DRAIN)) {
1728 		/* Should not happen normally. Just print error for now */
1729 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1730 			  "%s: Rx tid HW desc update failed(%d): tid %d",
1731 			  __func__,
1732 			  reo_status->rx_queue_status.header.status,
1733 			  rx_tid->tid);
1734 	}
1735 }
1736 
1737 /*
1738  * dp_find_peer_by_addr - find peer instance by mac address
1739  * @dev: physical device instance
1740  * @peer_mac_addr: peer mac address
1741  *
1742  * Return: peer instance pointer
1743  */
1744 void *dp_find_peer_by_addr(struct cdp_pdev *dev, uint8_t *peer_mac_addr)
1745 {
1746 	struct dp_pdev *pdev = (struct dp_pdev *)dev;
1747 	struct dp_peer *peer;
1748 
1749 	peer = dp_peer_find_hash_find(pdev->soc, peer_mac_addr, 0, DP_VDEV_ALL);
1750 
1751 	if (!peer)
1752 		return NULL;
1753 
1754 	dp_verbose_debug("peer %pK mac: %pM", peer,
1755 			 peer->mac_addr.raw);
1756 
1757 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
1758 	 * Decrement it here.
1759 	 */
1760 	dp_peer_unref_delete(peer);
1761 
1762 	return peer;
1763 }
1764 
1765 static bool dp_get_peer_vdev_roaming_in_progress(struct dp_peer *peer)
1766 {
1767 	struct ol_if_ops *ol_ops = NULL;
1768 	bool is_roaming = false;
1769 	uint8_t vdev_id = -1;
1770 	struct cdp_soc_t *soc;
1771 
1772 	if (!peer) {
1773 		dp_info("Peer is NULL. No roaming possible");
1774 		return false;
1775 	}
1776 
1777 	soc = dp_soc_to_cdp_soc_t(peer->vdev->pdev->soc);
1778 	ol_ops = peer->vdev->pdev->soc->cdp_soc.ol_ops;
1779 
1780 	if (ol_ops && ol_ops->is_roam_inprogress) {
1781 		dp_get_vdevid(soc, peer->mac_addr.raw, &vdev_id);
1782 		is_roaming = ol_ops->is_roam_inprogress(vdev_id);
1783 	}
1784 
1785 	dp_info("peer: %pM, vdev_id: %d, is_roaming: %d",
1786 		peer->mac_addr.raw, vdev_id, is_roaming);
1787 
1788 	return is_roaming;
1789 }
1790 
1791 QDF_STATUS dp_rx_tid_update_wifi3(struct dp_peer *peer, int tid, uint32_t
1792 					 ba_window_size, uint32_t start_seq)
1793 {
1794 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1795 	struct dp_soc *soc = peer->vdev->pdev->soc;
1796 	struct hal_reo_cmd_params params;
1797 
1798 	qdf_mem_zero(&params, sizeof(params));
1799 
1800 	params.std.need_status = 1;
1801 	params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
1802 	params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1803 	params.u.upd_queue_params.update_ba_window_size = 1;
1804 	params.u.upd_queue_params.ba_window_size = ba_window_size;
1805 
1806 	if (start_seq < IEEE80211_SEQ_MAX) {
1807 		params.u.upd_queue_params.update_ssn = 1;
1808 		params.u.upd_queue_params.ssn = start_seq;
1809 	} else {
1810 	    dp_set_ssn_valid_flag(&params, 0);
1811 	}
1812 
1813 	if (dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
1814 			    dp_rx_tid_update_cb, rx_tid)) {
1815 		dp_err_log("failed to send reo cmd CMD_UPDATE_RX_REO_QUEUE");
1816 		DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
1817 	}
1818 
1819 	rx_tid->ba_win_size = ba_window_size;
1820 
1821 	if (dp_get_peer_vdev_roaming_in_progress(peer))
1822 		return QDF_STATUS_E_PERM;
1823 
1824 	if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup)
1825 		soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup(
1826 			soc->ctrl_psoc, peer->vdev->pdev->pdev_id,
1827 			peer->vdev->vdev_id, peer->mac_addr.raw,
1828 			rx_tid->hw_qdesc_paddr, tid, tid, 1, ba_window_size);
1829 
1830 	return QDF_STATUS_SUCCESS;
1831 }
1832 
1833 /*
1834  * dp_reo_desc_free() - Callback free reo descriptor memory after
1835  * HW cache flush
1836  *
1837  * @soc: DP SOC handle
1838  * @cb_ctxt: Callback context
1839  * @reo_status: REO command status
1840  */
1841 static void dp_reo_desc_free(struct dp_soc *soc, void *cb_ctxt,
1842 	union hal_reo_status *reo_status)
1843 {
1844 	struct reo_desc_list_node *freedesc =
1845 		(struct reo_desc_list_node *)cb_ctxt;
1846 	struct dp_rx_tid *rx_tid = &freedesc->rx_tid;
1847 
1848 	if ((reo_status->fl_cache_status.header.status !=
1849 		HAL_REO_CMD_SUCCESS) &&
1850 		(reo_status->fl_cache_status.header.status !=
1851 		HAL_REO_CMD_DRAIN)) {
1852 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1853 			  "%s: Rx tid HW desc flush failed(%d): tid %d",
1854 			  __func__,
1855 			  reo_status->rx_queue_status.header.status,
1856 			  freedesc->rx_tid.tid);
1857 	}
1858 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1859 		  "%s: hw_qdesc_paddr: %pK, tid:%d", __func__,
1860 		  (void *)(rx_tid->hw_qdesc_paddr), rx_tid->tid);
1861 	qdf_mem_unmap_nbytes_single(soc->osdev,
1862 		rx_tid->hw_qdesc_paddr,
1863 		QDF_DMA_BIDIRECTIONAL,
1864 		rx_tid->hw_qdesc_alloc_size);
1865 	qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
1866 	qdf_mem_free(freedesc);
1867 }
1868 
1869 #if defined(CONFIG_WIFI_EMULATION_WIFI_3_0) && defined(BUILD_X86)
1870 /* Hawkeye emulation requires bus address to be >= 0x50000000 */
1871 static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
1872 {
1873 	if (dma_addr < 0x50000000)
1874 		return QDF_STATUS_E_FAILURE;
1875 	else
1876 		return QDF_STATUS_SUCCESS;
1877 }
1878 #else
1879 static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
1880 {
1881 	return QDF_STATUS_SUCCESS;
1882 }
1883 #endif
1884 
1885 
1886 /*
1887  * dp_rx_tid_setup_wifi3() – Setup receive TID state
1888  * @peer: Datapath peer handle
1889  * @tid: TID
1890  * @ba_window_size: BlockAck window size
1891  * @start_seq: Starting sequence number
1892  *
1893  * Return: QDF_STATUS code
1894  */
1895 QDF_STATUS dp_rx_tid_setup_wifi3(struct dp_peer *peer, int tid,
1896 				 uint32_t ba_window_size, uint32_t start_seq)
1897 {
1898 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1899 	struct dp_vdev *vdev = peer->vdev;
1900 	struct dp_soc *soc = vdev->pdev->soc;
1901 	uint32_t hw_qdesc_size;
1902 	uint32_t hw_qdesc_align;
1903 	int hal_pn_type;
1904 	void *hw_qdesc_vaddr;
1905 	uint32_t alloc_tries = 0;
1906 	QDF_STATUS err = QDF_STATUS_SUCCESS;
1907 
1908 	if (peer->delete_in_progress ||
1909 	    !qdf_atomic_read(&peer->is_default_route_set))
1910 		return QDF_STATUS_E_FAILURE;
1911 
1912 	rx_tid->ba_win_size = ba_window_size;
1913 	if (rx_tid->hw_qdesc_vaddr_unaligned)
1914 		return dp_rx_tid_update_wifi3(peer, tid, ba_window_size,
1915 			start_seq);
1916 	rx_tid->delba_tx_status = 0;
1917 	rx_tid->ppdu_id_2k = 0;
1918 	rx_tid->num_of_addba_req = 0;
1919 	rx_tid->num_of_delba_req = 0;
1920 	rx_tid->num_of_addba_resp = 0;
1921 	rx_tid->num_addba_rsp_failed = 0;
1922 	rx_tid->num_addba_rsp_success = 0;
1923 	rx_tid->delba_tx_success_cnt = 0;
1924 	rx_tid->delba_tx_fail_cnt = 0;
1925 	rx_tid->statuscode = 0;
1926 
1927 	/* TODO: Allocating HW queue descriptors based on max BA window size
1928 	 * for all QOS TIDs so that same descriptor can be used later when
1929 	 * ADDBA request is recevied. This should be changed to allocate HW
1930 	 * queue descriptors based on BA window size being negotiated (0 for
1931 	 * non BA cases), and reallocate when BA window size changes and also
1932 	 * send WMI message to FW to change the REO queue descriptor in Rx
1933 	 * peer entry as part of dp_rx_tid_update.
1934 	 */
1935 	if (tid != DP_NON_QOS_TID)
1936 		hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc,
1937 			HAL_RX_MAX_BA_WINDOW, tid);
1938 	else
1939 		hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc,
1940 			ba_window_size, tid);
1941 
1942 	hw_qdesc_align = hal_get_reo_qdesc_align(soc->hal_soc);
1943 	/* To avoid unnecessary extra allocation for alignment, try allocating
1944 	 * exact size and see if we already have aligned address.
1945 	 */
1946 	rx_tid->hw_qdesc_alloc_size = hw_qdesc_size;
1947 
1948 try_desc_alloc:
1949 	rx_tid->hw_qdesc_vaddr_unaligned =
1950 		qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size);
1951 
1952 	if (!rx_tid->hw_qdesc_vaddr_unaligned) {
1953 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1954 			  "%s: Rx tid HW desc alloc failed: tid %d",
1955 			  __func__, tid);
1956 		return QDF_STATUS_E_NOMEM;
1957 	}
1958 
1959 	if ((unsigned long)(rx_tid->hw_qdesc_vaddr_unaligned) %
1960 		hw_qdesc_align) {
1961 		/* Address allocated above is not alinged. Allocate extra
1962 		 * memory for alignment
1963 		 */
1964 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
1965 		rx_tid->hw_qdesc_vaddr_unaligned =
1966 			qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size +
1967 					hw_qdesc_align - 1);
1968 
1969 		if (!rx_tid->hw_qdesc_vaddr_unaligned) {
1970 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1971 				  "%s: Rx tid HW desc alloc failed: tid %d",
1972 				  __func__, tid);
1973 			return QDF_STATUS_E_NOMEM;
1974 		}
1975 
1976 		hw_qdesc_vaddr = (void *)qdf_align((unsigned long)
1977 			rx_tid->hw_qdesc_vaddr_unaligned,
1978 			hw_qdesc_align);
1979 
1980 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1981 			  "%s: Total Size %d Aligned Addr %pK",
1982 			  __func__, rx_tid->hw_qdesc_alloc_size,
1983 			  hw_qdesc_vaddr);
1984 
1985 	} else {
1986 		hw_qdesc_vaddr = rx_tid->hw_qdesc_vaddr_unaligned;
1987 	}
1988 
1989 	/* TODO: Ensure that sec_type is set before ADDBA is received.
1990 	 * Currently this is set based on htt indication
1991 	 * HTT_T2H_MSG_TYPE_SEC_IND from target
1992 	 */
1993 	switch (peer->security[dp_sec_ucast].sec_type) {
1994 	case cdp_sec_type_tkip_nomic:
1995 	case cdp_sec_type_aes_ccmp:
1996 	case cdp_sec_type_aes_ccmp_256:
1997 	case cdp_sec_type_aes_gcmp:
1998 	case cdp_sec_type_aes_gcmp_256:
1999 		hal_pn_type = HAL_PN_WPA;
2000 		break;
2001 	case cdp_sec_type_wapi:
2002 		if (vdev->opmode == wlan_op_mode_ap)
2003 			hal_pn_type = HAL_PN_WAPI_EVEN;
2004 		else
2005 			hal_pn_type = HAL_PN_WAPI_UNEVEN;
2006 		break;
2007 	default:
2008 		hal_pn_type = HAL_PN_NONE;
2009 		break;
2010 	}
2011 
2012 	hal_reo_qdesc_setup(soc->hal_soc, tid, ba_window_size, start_seq,
2013 		hw_qdesc_vaddr, rx_tid->hw_qdesc_paddr, hal_pn_type);
2014 
2015 	qdf_mem_map_nbytes_single(soc->osdev, hw_qdesc_vaddr,
2016 		QDF_DMA_BIDIRECTIONAL, rx_tid->hw_qdesc_alloc_size,
2017 		&(rx_tid->hw_qdesc_paddr));
2018 
2019 	if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) !=
2020 			QDF_STATUS_SUCCESS) {
2021 		if (alloc_tries++ < 10) {
2022 			qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
2023 			rx_tid->hw_qdesc_vaddr_unaligned = NULL;
2024 			goto try_desc_alloc;
2025 		} else {
2026 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2027 				  "%s: Rx tid HW desc alloc failed (lowmem): tid %d",
2028 				  __func__, tid);
2029 			err = QDF_STATUS_E_NOMEM;
2030 			goto error;
2031 		}
2032 	}
2033 
2034 	if (dp_get_peer_vdev_roaming_in_progress(peer)) {
2035 		err = QDF_STATUS_E_PERM;
2036 		goto error;
2037 	}
2038 
2039 	if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
2040 		if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup(
2041 		    soc->ctrl_psoc,
2042 		    peer->vdev->pdev->pdev_id,
2043 		    peer->vdev->vdev_id,
2044 		    peer->mac_addr.raw, rx_tid->hw_qdesc_paddr, tid, tid,
2045 		    1, ba_window_size)) {
2046 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2047 				  "%s: Failed to send reo queue setup to FW - tid %d\n",
2048 				  __func__, tid);
2049 			err = QDF_STATUS_E_FAILURE;
2050 			goto error;
2051 		}
2052 	}
2053 	return 0;
2054 error:
2055 	if (rx_tid->hw_qdesc_vaddr_unaligned) {
2056 		if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) ==
2057 		    QDF_STATUS_SUCCESS)
2058 			qdf_mem_unmap_nbytes_single(
2059 				soc->osdev,
2060 				rx_tid->hw_qdesc_paddr,
2061 				QDF_DMA_BIDIRECTIONAL,
2062 				rx_tid->hw_qdesc_alloc_size);
2063 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
2064 		rx_tid->hw_qdesc_vaddr_unaligned = NULL;
2065 	}
2066 	return err;
2067 }
2068 
2069 #ifdef REO_DESC_DEFER_FREE
2070 /*
2071  * dp_reo_desc_clean_up() - If cmd to flush base desc fails add
2072  * desc back to freelist and defer the deletion
2073  *
2074  * @soc: DP SOC handle
2075  * @desc: Base descriptor to be freed
2076  * @reo_status: REO command status
2077  */
2078 static void dp_reo_desc_clean_up(struct dp_soc *soc,
2079 				 struct reo_desc_list_node *desc,
2080 				 union hal_reo_status *reo_status)
2081 {
2082 	desc->free_ts = qdf_get_system_timestamp();
2083 	DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
2084 	qdf_list_insert_back(&soc->reo_desc_freelist,
2085 			     (qdf_list_node_t *)desc);
2086 }
2087 
2088 #else
2089 /*
2090  * dp_reo_desc_clean_up() - If send cmd to REO inorder to flush
2091  * cache fails free the base REO desc anyway
2092  *
2093  * @soc: DP SOC handle
2094  * @desc: Base descriptor to be freed
2095  * @reo_status: REO command status
2096  */
2097 static void dp_reo_desc_clean_up(struct dp_soc *soc,
2098 				 struct reo_desc_list_node *desc,
2099 				 union hal_reo_status *reo_status)
2100 {
2101 	if (reo_status) {
2102 		qdf_mem_zero(reo_status, sizeof(*reo_status));
2103 		reo_status->fl_cache_status.header.status = 0;
2104 		dp_reo_desc_free(soc, (void *)desc, reo_status);
2105 	}
2106 }
2107 #endif
2108 
2109 /*
2110  * dp_resend_update_reo_cmd() - Resend the UPDATE_REO_QUEUE
2111  * cmd and re-insert desc into free list if send fails.
2112  *
2113  * @soc: DP SOC handle
2114  * @desc: desc with resend update cmd flag set
2115  * @rx_tid: Desc RX tid associated with update cmd for resetting
2116  * valid field to 0 in h/w
2117  */
2118 static void dp_resend_update_reo_cmd(struct dp_soc *soc,
2119 				     struct reo_desc_list_node *desc,
2120 				     struct dp_rx_tid *rx_tid)
2121 {
2122 	struct hal_reo_cmd_params params;
2123 
2124 	qdf_mem_zero(&params, sizeof(params));
2125 	params.std.need_status = 1;
2126 	params.std.addr_lo =
2127 		rx_tid->hw_qdesc_paddr & 0xffffffff;
2128 	params.std.addr_hi =
2129 		(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2130 	params.u.upd_queue_params.update_vld = 1;
2131 	params.u.upd_queue_params.vld = 0;
2132 	desc->resend_update_reo_cmd = false;
2133 	/*
2134 	 * If the cmd send fails then set resend_update_reo_cmd flag
2135 	 * and insert the desc at the end of the free list to retry.
2136 	 */
2137 	if (dp_reo_send_cmd(soc,
2138 			    CMD_UPDATE_RX_REO_QUEUE,
2139 			    &params,
2140 			    dp_rx_tid_delete_cb,
2141 			    (void *)desc)
2142 	    != QDF_STATUS_SUCCESS) {
2143 		desc->resend_update_reo_cmd = true;
2144 		desc->free_ts = qdf_get_system_timestamp();
2145 		qdf_list_insert_back(&soc->reo_desc_freelist,
2146 				     (qdf_list_node_t *)desc);
2147 		dp_err_log("failed to send reo cmd CMD_UPDATE_RX_REO_QUEUE");
2148 		DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
2149 	}
2150 }
2151 
2152 /*
2153  * dp_rx_tid_delete_cb() - Callback to flush reo descriptor HW cache
2154  * after deleting the entries (ie., setting valid=0)
2155  *
2156  * @soc: DP SOC handle
2157  * @cb_ctxt: Callback context
2158  * @reo_status: REO command status
2159  */
2160 void dp_rx_tid_delete_cb(struct dp_soc *soc, void *cb_ctxt,
2161 			 union hal_reo_status *reo_status)
2162 {
2163 	struct reo_desc_list_node *freedesc =
2164 		(struct reo_desc_list_node *)cb_ctxt;
2165 	uint32_t list_size;
2166 	struct reo_desc_list_node *desc;
2167 	unsigned long curr_ts = qdf_get_system_timestamp();
2168 	uint32_t desc_size, tot_desc_size;
2169 	struct hal_reo_cmd_params params;
2170 
2171 	if (reo_status->rx_queue_status.header.status == HAL_REO_CMD_DRAIN) {
2172 		qdf_mem_zero(reo_status, sizeof(*reo_status));
2173 		reo_status->fl_cache_status.header.status = HAL_REO_CMD_DRAIN;
2174 		dp_reo_desc_free(soc, (void *)freedesc, reo_status);
2175 		return;
2176 	} else if (reo_status->rx_queue_status.header.status !=
2177 		HAL_REO_CMD_SUCCESS) {
2178 		/* Should not happen normally. Just print error for now */
2179 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2180 			  "%s: Rx tid HW desc deletion failed(%d): tid %d",
2181 			  __func__,
2182 			  reo_status->rx_queue_status.header.status,
2183 			  freedesc->rx_tid.tid);
2184 	}
2185 
2186 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
2187 		"%s: rx_tid: %d status: %d", __func__,
2188 		freedesc->rx_tid.tid,
2189 		reo_status->rx_queue_status.header.status);
2190 
2191 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
2192 	freedesc->free_ts = curr_ts;
2193 	qdf_list_insert_back_size(&soc->reo_desc_freelist,
2194 		(qdf_list_node_t *)freedesc, &list_size);
2195 
2196 	while ((qdf_list_peek_front(&soc->reo_desc_freelist,
2197 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) &&
2198 		((list_size >= REO_DESC_FREELIST_SIZE) ||
2199 		(curr_ts > (desc->free_ts + REO_DESC_FREE_DEFER_MS)) ||
2200 		(desc->resend_update_reo_cmd && list_size))) {
2201 		struct dp_rx_tid *rx_tid;
2202 
2203 		qdf_list_remove_front(&soc->reo_desc_freelist,
2204 				(qdf_list_node_t **)&desc);
2205 		list_size--;
2206 		rx_tid = &desc->rx_tid;
2207 
2208 		/* First process descs with resend_update_reo_cmd set */
2209 		if (desc->resend_update_reo_cmd) {
2210 			dp_resend_update_reo_cmd(soc, desc, rx_tid);
2211 			continue;
2212 		}
2213 
2214 		/* Flush and invalidate REO descriptor from HW cache: Base and
2215 		 * extension descriptors should be flushed separately */
2216 		tot_desc_size = rx_tid->hw_qdesc_alloc_size;
2217 		/* Get base descriptor size by passing non-qos TID */
2218 		desc_size = hal_get_reo_qdesc_size(soc->hal_soc, 0,
2219 						   DP_NON_QOS_TID);
2220 
2221 		/* Flush reo extension descriptors */
2222 		while ((tot_desc_size -= desc_size) > 0) {
2223 			qdf_mem_zero(&params, sizeof(params));
2224 			params.std.addr_lo =
2225 				((uint64_t)(rx_tid->hw_qdesc_paddr) +
2226 				tot_desc_size) & 0xffffffff;
2227 			params.std.addr_hi =
2228 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2229 
2230 			if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc,
2231 							CMD_FLUSH_CACHE,
2232 							&params,
2233 							NULL,
2234 							NULL)) {
2235 				dp_err_rl("fail to send CMD_CACHE_FLUSH:"
2236 					  "tid %d desc %pK", rx_tid->tid,
2237 					  (void *)(rx_tid->hw_qdesc_paddr));
2238 				DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
2239 			}
2240 		}
2241 
2242 		/* Flush base descriptor */
2243 		qdf_mem_zero(&params, sizeof(params));
2244 		params.std.need_status = 1;
2245 		params.std.addr_lo =
2246 			(uint64_t)(rx_tid->hw_qdesc_paddr) & 0xffffffff;
2247 		params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2248 
2249 		if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc,
2250 							  CMD_FLUSH_CACHE,
2251 							  &params,
2252 							  dp_reo_desc_free,
2253 							  (void *)desc)) {
2254 			union hal_reo_status reo_status;
2255 			/*
2256 			 * If dp_reo_send_cmd return failure, related TID queue desc
2257 			 * should be unmapped. Also locally reo_desc, together with
2258 			 * TID queue desc also need to be freed accordingly.
2259 			 *
2260 			 * Here invoke desc_free function directly to do clean up.
2261 			 *
2262 			 * In case of MCL path add the desc back to the free
2263 			 * desc list and defer deletion.
2264 			 */
2265 			dp_err_log("%s: fail to send REO cmd to flush cache: tid %d",
2266 				   __func__, rx_tid->tid);
2267 			dp_reo_desc_clean_up(soc, desc, &reo_status);
2268 			DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
2269 		}
2270 	}
2271 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
2272 }
2273 
2274 /*
2275  * dp_rx_tid_delete_wifi3() – Delete receive TID queue
2276  * @peer: Datapath peer handle
2277  * @tid: TID
2278  *
2279  * Return: 0 on success, error code on failure
2280  */
2281 static int dp_rx_tid_delete_wifi3(struct dp_peer *peer, int tid)
2282 {
2283 	struct dp_rx_tid *rx_tid = &(peer->rx_tid[tid]);
2284 	struct dp_soc *soc = peer->vdev->pdev->soc;
2285 	struct hal_reo_cmd_params params;
2286 	struct reo_desc_list_node *freedesc =
2287 		qdf_mem_malloc(sizeof(*freedesc));
2288 
2289 	if (!freedesc) {
2290 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2291 			  "%s: malloc failed for freedesc: tid %d",
2292 			  __func__, tid);
2293 		return -ENOMEM;
2294 	}
2295 
2296 	freedesc->rx_tid = *rx_tid;
2297 	freedesc->resend_update_reo_cmd = false;
2298 
2299 	qdf_mem_zero(&params, sizeof(params));
2300 
2301 	params.std.need_status = 1;
2302 	params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
2303 	params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2304 	params.u.upd_queue_params.update_vld = 1;
2305 	params.u.upd_queue_params.vld = 0;
2306 
2307 	if (dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
2308 			    dp_rx_tid_delete_cb, (void *)freedesc)
2309 		!= QDF_STATUS_SUCCESS) {
2310 		/* Defer the clean up to the call back context */
2311 		qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
2312 		freedesc->free_ts = qdf_get_system_timestamp();
2313 		freedesc->resend_update_reo_cmd = true;
2314 		qdf_list_insert_front(&soc->reo_desc_freelist,
2315 				      (qdf_list_node_t *)freedesc);
2316 		DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
2317 		qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
2318 		dp_info("Failed to send CMD_UPDATE_RX_REO_QUEUE");
2319 	}
2320 
2321 	rx_tid->hw_qdesc_vaddr_unaligned = NULL;
2322 	rx_tid->hw_qdesc_alloc_size = 0;
2323 	rx_tid->hw_qdesc_paddr = 0;
2324 
2325 	return 0;
2326 }
2327 
2328 #ifdef DP_LFR
2329 static void dp_peer_setup_remaining_tids(struct dp_peer *peer)
2330 {
2331 	int tid;
2332 
2333 	for (tid = 1; tid < DP_MAX_TIDS-1; tid++) {
2334 		dp_rx_tid_setup_wifi3(peer, tid, 1, 0);
2335 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2336 			  "Setting up TID %d for peer %pK peer->local_id %d",
2337 			  tid, peer, peer->local_id);
2338 	}
2339 }
2340 #else
2341 static void dp_peer_setup_remaining_tids(struct dp_peer *peer) {};
2342 #endif
2343 
2344 #ifndef WLAN_TX_PKT_CAPTURE_ENH
2345 /*
2346  * dp_peer_tid_queue_init() – Initialize ppdu stats queue per TID
2347  * @peer: Datapath peer
2348  *
2349  */
2350 static inline void dp_peer_tid_queue_init(struct dp_peer *peer)
2351 {
2352 }
2353 
2354 /*
2355  * dp_peer_tid_queue_cleanup() – remove ppdu stats queue per TID
2356  * @peer: Datapath peer
2357  *
2358  */
2359 static inline void dp_peer_tid_queue_cleanup(struct dp_peer *peer)
2360 {
2361 }
2362 
2363 /*
2364  * dp_peer_update_80211_hdr() – dp peer update 80211 hdr
2365  * @vdev: Datapath vdev
2366  * @peer: Datapath peer
2367  *
2368  */
2369 static inline void
2370 dp_peer_update_80211_hdr(struct dp_vdev *vdev, struct dp_peer *peer)
2371 {
2372 }
2373 #endif
2374 
2375 /*
2376  * dp_peer_tx_init() – Initialize receive TID state
2377  * @pdev: Datapath pdev
2378  * @peer: Datapath peer
2379  *
2380  */
2381 void dp_peer_tx_init(struct dp_pdev *pdev, struct dp_peer *peer)
2382 {
2383 	dp_peer_tid_queue_init(peer);
2384 	dp_peer_update_80211_hdr(peer->vdev, peer);
2385 }
2386 
2387 /*
2388  * dp_peer_tx_cleanup() – Deinitialize receive TID state
2389  * @vdev: Datapath vdev
2390  * @peer: Datapath peer
2391  *
2392  */
2393 static inline void
2394 dp_peer_tx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
2395 {
2396 	dp_peer_tid_queue_cleanup(peer);
2397 }
2398 
2399 /*
2400  * dp_peer_rx_init() – Initialize receive TID state
2401  * @pdev: Datapath pdev
2402  * @peer: Datapath peer
2403  *
2404  */
2405 void dp_peer_rx_init(struct dp_pdev *pdev, struct dp_peer *peer)
2406 {
2407 	int tid;
2408 	struct dp_rx_tid *rx_tid;
2409 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
2410 		rx_tid = &peer->rx_tid[tid];
2411 		rx_tid->array = &rx_tid->base;
2412 		rx_tid->base.head = rx_tid->base.tail = NULL;
2413 		rx_tid->tid = tid;
2414 		rx_tid->defrag_timeout_ms = 0;
2415 		rx_tid->ba_win_size = 0;
2416 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
2417 
2418 		rx_tid->defrag_waitlist_elem.tqe_next = NULL;
2419 		rx_tid->defrag_waitlist_elem.tqe_prev = NULL;
2420 	}
2421 
2422 	peer->active_ba_session_cnt = 0;
2423 	peer->hw_buffer_size = 0;
2424 	peer->kill_256_sessions = 0;
2425 
2426 	/* Setup default (non-qos) rx tid queue */
2427 	dp_rx_tid_setup_wifi3(peer, DP_NON_QOS_TID, 1, 0);
2428 
2429 	/* Setup rx tid queue for TID 0.
2430 	 * Other queues will be setup on receiving first packet, which will cause
2431 	 * NULL REO queue error
2432 	 */
2433 	dp_rx_tid_setup_wifi3(peer, 0, 1, 0);
2434 
2435 	/*
2436 	 * Setup the rest of TID's to handle LFR
2437 	 */
2438 	dp_peer_setup_remaining_tids(peer);
2439 
2440 	/*
2441 	 * Set security defaults: no PN check, no security. The target may
2442 	 * send a HTT SEC_IND message to overwrite these defaults.
2443 	 */
2444 	peer->security[dp_sec_ucast].sec_type =
2445 		peer->security[dp_sec_mcast].sec_type = cdp_sec_type_none;
2446 }
2447 
2448 /*
2449  * dp_peer_rx_cleanup() – Cleanup receive TID state
2450  * @vdev: Datapath vdev
2451  * @peer: Datapath peer
2452  * @reuse: Peer reference reuse
2453  *
2454  */
2455 void dp_peer_rx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer, bool reuse)
2456 {
2457 	int tid;
2458 	uint32_t tid_delete_mask = 0;
2459 
2460 	dp_info("Remove tids for peer: %pK", peer);
2461 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
2462 		struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
2463 
2464 		qdf_spin_lock_bh(&rx_tid->tid_lock);
2465 		if (!peer->bss_peer || peer->vdev->opmode == wlan_op_mode_sta) {
2466 			/* Cleanup defrag related resource */
2467 			dp_rx_defrag_waitlist_remove(peer, tid);
2468 			dp_rx_reorder_flush_frag(peer, tid);
2469 		}
2470 
2471 		if (peer->rx_tid[tid].hw_qdesc_vaddr_unaligned) {
2472 			dp_rx_tid_delete_wifi3(peer, tid);
2473 
2474 			tid_delete_mask |= (1 << tid);
2475 		}
2476 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2477 	}
2478 #ifdef notyet /* See if FW can remove queues as part of peer cleanup */
2479 	if (soc->ol_ops->peer_rx_reorder_queue_remove) {
2480 		soc->ol_ops->peer_rx_reorder_queue_remove(soc->ctrl_psoc,
2481 			peer->vdev->pdev->pdev_id,
2482 			peer->vdev->vdev_id, peer->mac_addr.raw,
2483 			tid_delete_mask);
2484 	}
2485 #endif
2486 	if (!reuse)
2487 		for (tid = 0; tid < DP_MAX_TIDS; tid++)
2488 			qdf_spinlock_destroy(&peer->rx_tid[tid].tid_lock);
2489 }
2490 
2491 #ifdef FEATURE_PERPKT_INFO
2492 /*
2493  * dp_peer_ppdu_delayed_ba_init() Initialize ppdu in peer
2494  * @peer: Datapath peer
2495  *
2496  * return: void
2497  */
2498 void dp_peer_ppdu_delayed_ba_init(struct dp_peer *peer)
2499 {
2500 	qdf_mem_zero(&peer->delayed_ba_ppdu_stats,
2501 		     sizeof(struct cdp_delayed_tx_completion_ppdu_user));
2502 	peer->last_delayed_ba = false;
2503 	peer->last_delayed_ba_ppduid = 0;
2504 }
2505 #else
2506 /*
2507  * dp_peer_ppdu_delayed_ba_init() Initialize ppdu in peer
2508  * @peer: Datapath peer
2509  *
2510  * return: void
2511  */
2512 void dp_peer_ppdu_delayed_ba_init(struct dp_peer *peer)
2513 {
2514 }
2515 #endif
2516 
2517 /*
2518  * dp_peer_cleanup() – Cleanup peer information
2519  * @vdev: Datapath vdev
2520  * @peer: Datapath peer
2521  * @reuse: Peer reference reuse
2522  *
2523  */
2524 void dp_peer_cleanup(struct dp_vdev *vdev, struct dp_peer *peer, bool reuse)
2525 {
2526 	dp_peer_tx_cleanup(vdev, peer);
2527 
2528 	/* cleanup the Rx reorder queues for this peer */
2529 	dp_peer_rx_cleanup(vdev, peer, reuse);
2530 }
2531 
2532 /* dp_teardown_256_ba_session() - Teardown sessions using 256
2533  *                                window size when a request with
2534  *                                64 window size is received.
2535  *                                This is done as a WAR since HW can
2536  *                                have only one setting per peer (64 or 256).
2537  *                                For HKv2, we use per tid buffersize setting
2538  *                                for 0 to per_tid_basize_max_tid. For tid
2539  *                                more than per_tid_basize_max_tid we use HKv1
2540  *                                method.
2541  * @peer: Datapath peer
2542  *
2543  * Return: void
2544  */
2545 static void dp_teardown_256_ba_sessions(struct dp_peer *peer)
2546 {
2547 	uint8_t delba_rcode = 0;
2548 	int tid;
2549 	struct dp_rx_tid *rx_tid = NULL;
2550 
2551 	tid = peer->vdev->pdev->soc->per_tid_basize_max_tid;
2552 	for (; tid < DP_MAX_TIDS; tid++) {
2553 		rx_tid = &peer->rx_tid[tid];
2554 		qdf_spin_lock_bh(&rx_tid->tid_lock);
2555 
2556 		if (rx_tid->ba_win_size <= 64) {
2557 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
2558 			continue;
2559 		} else {
2560 			if (rx_tid->ba_status == DP_RX_BA_ACTIVE ||
2561 			    rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
2562 				/* send delba */
2563 				if (!rx_tid->delba_tx_status) {
2564 					rx_tid->delba_tx_retry++;
2565 					rx_tid->delba_tx_status = 1;
2566 					rx_tid->delba_rcode =
2567 					IEEE80211_REASON_QOS_SETUP_REQUIRED;
2568 					delba_rcode = rx_tid->delba_rcode;
2569 
2570 					qdf_spin_unlock_bh(&rx_tid->tid_lock);
2571 					if (peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba)
2572 						peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba(
2573 							peer->vdev->pdev->soc->ctrl_psoc,
2574 							peer->vdev->vdev_id,
2575 							peer->mac_addr.raw,
2576 							tid, delba_rcode);
2577 				} else {
2578 					qdf_spin_unlock_bh(&rx_tid->tid_lock);
2579 				}
2580 			} else {
2581 				qdf_spin_unlock_bh(&rx_tid->tid_lock);
2582 			}
2583 		}
2584 	}
2585 }
2586 
2587 /*
2588 * dp_rx_addba_resp_tx_completion_wifi3() – Update Rx Tid State
2589 *
2590 * @soc: Datapath soc handle
2591 * @peer_mac: Datapath peer mac address
2592 * @vdev_id: id of atapath vdev
2593 * @tid: TID number
2594 * @status: tx completion status
2595 * Return: 0 on success, error code on failure
2596 */
2597 int dp_addba_resp_tx_completion_wifi3(struct cdp_soc_t *cdp_soc,
2598 				      uint8_t *peer_mac,
2599 				      uint16_t vdev_id,
2600 				      uint8_t tid, int status)
2601 {
2602 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
2603 						       peer_mac, 0, vdev_id);
2604 	struct dp_rx_tid *rx_tid = NULL;
2605 
2606 	if (!peer || peer->delete_in_progress) {
2607 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2608 			  "%s: Peer is NULL!\n", __func__);
2609 		goto fail;
2610 	}
2611 	rx_tid = &peer->rx_tid[tid];
2612 	qdf_spin_lock_bh(&rx_tid->tid_lock);
2613 	if (status) {
2614 		rx_tid->num_addba_rsp_failed++;
2615 		dp_rx_tid_update_wifi3(peer, tid, 1,
2616 				       IEEE80211_SEQ_MAX);
2617 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
2618 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2619 		dp_err("RxTid- %d addba rsp tx completion failed", tid);
2620 
2621 		goto success;
2622 	}
2623 
2624 	rx_tid->num_addba_rsp_success++;
2625 	if (rx_tid->ba_status == DP_RX_BA_INACTIVE) {
2626 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2627 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2628 			  "%s: Rx Tid- %d hw qdesc is not in IN_PROGRESS",
2629 			__func__, tid);
2630 		goto fail;
2631 	}
2632 
2633 	if (!qdf_atomic_read(&peer->is_default_route_set)) {
2634 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2635 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2636 			  "%s: default route is not set for peer: %pM",
2637 			  __func__, peer->mac_addr.raw);
2638 		goto fail;
2639 	}
2640 
2641 	/* First Session */
2642 	if (peer->active_ba_session_cnt == 0) {
2643 		if (rx_tid->ba_win_size > 64 && rx_tid->ba_win_size <= 256)
2644 			peer->hw_buffer_size = 256;
2645 		else
2646 			peer->hw_buffer_size = 64;
2647 	}
2648 
2649 	rx_tid->ba_status = DP_RX_BA_ACTIVE;
2650 
2651 	peer->active_ba_session_cnt++;
2652 
2653 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
2654 
2655 	/* Kill any session having 256 buffer size
2656 	 * when 64 buffer size request is received.
2657 	 * Also, latch on to 64 as new buffer size.
2658 	 */
2659 	if (peer->kill_256_sessions) {
2660 		dp_teardown_256_ba_sessions(peer);
2661 		peer->kill_256_sessions = 0;
2662 	}
2663 
2664 success:
2665 	dp_peer_unref_delete(peer);
2666 	return QDF_STATUS_SUCCESS;
2667 
2668 fail:
2669 	if (peer)
2670 		dp_peer_unref_delete(peer);
2671 
2672 	return QDF_STATUS_E_FAILURE;
2673 }
2674 
2675 /*
2676 * dp_rx_addba_responsesetup_wifi3() – Process ADDBA request from peer
2677 *
2678 * @soc: Datapath soc handle
2679 * @peer_mac: Datapath peer mac address
2680 * @vdev_id: id of atapath vdev
2681 * @tid: TID number
2682 * @dialogtoken: output dialogtoken
2683 * @statuscode: output dialogtoken
2684 * @buffersize: Output BA window size
2685 * @batimeout: Output BA timeout
2686 */
2687 QDF_STATUS
2688 dp_addba_responsesetup_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
2689 			     uint16_t vdev_id, uint8_t tid,
2690 			     uint8_t *dialogtoken, uint16_t *statuscode,
2691 			     uint16_t *buffersize, uint16_t *batimeout)
2692 {
2693 	struct dp_rx_tid *rx_tid = NULL;
2694 	QDF_STATUS status = QDF_STATUS_SUCCESS;
2695 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
2696 						       peer_mac, 0, vdev_id);
2697 
2698 	if (!peer || peer->delete_in_progress) {
2699 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2700 			  "%s: Peer is NULL!\n", __func__);
2701 		status = QDF_STATUS_E_FAILURE;
2702 		goto fail;
2703 	}
2704 	rx_tid = &peer->rx_tid[tid];
2705 	qdf_spin_lock_bh(&rx_tid->tid_lock);
2706 	rx_tid->num_of_addba_resp++;
2707 	/* setup ADDBA response parameters */
2708 	*dialogtoken = rx_tid->dialogtoken;
2709 	*statuscode = rx_tid->statuscode;
2710 	*buffersize = rx_tid->ba_win_size;
2711 	*batimeout  = 0;
2712 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
2713 
2714 fail:
2715 	if (peer)
2716 		dp_peer_unref_delete(peer);
2717 
2718 	return status;
2719 }
2720 
2721 /* dp_check_ba_buffersize() - Check buffer size in request
2722  *                            and latch onto this size based on
2723  *                            size used in first active session.
2724  * @peer: Datapath peer
2725  * @tid: Tid
2726  * @buffersize: Block ack window size
2727  *
2728  * Return: void
2729  */
2730 static void dp_check_ba_buffersize(struct dp_peer *peer,
2731 				   uint16_t tid,
2732 				   uint16_t buffersize)
2733 {
2734 	struct dp_rx_tid *rx_tid = NULL;
2735 
2736 	rx_tid = &peer->rx_tid[tid];
2737 	if (peer->vdev->pdev->soc->per_tid_basize_max_tid &&
2738 	    tid < peer->vdev->pdev->soc->per_tid_basize_max_tid) {
2739 		rx_tid->ba_win_size = buffersize;
2740 		return;
2741 	} else {
2742 		if (peer->active_ba_session_cnt == 0) {
2743 			rx_tid->ba_win_size = buffersize;
2744 		} else {
2745 			if (peer->hw_buffer_size == 64) {
2746 				if (buffersize <= 64)
2747 					rx_tid->ba_win_size = buffersize;
2748 				else
2749 					rx_tid->ba_win_size = peer->hw_buffer_size;
2750 			} else if (peer->hw_buffer_size == 256) {
2751 				if (buffersize > 64) {
2752 					rx_tid->ba_win_size = buffersize;
2753 				} else {
2754 					rx_tid->ba_win_size = buffersize;
2755 					peer->hw_buffer_size = 64;
2756 					peer->kill_256_sessions = 1;
2757 				}
2758 			}
2759 		}
2760 	}
2761 }
2762 
2763 /*
2764  * dp_addba_requestprocess_wifi3() - Process ADDBA request from peer
2765  *
2766  * @soc: Datapath soc handle
2767  * @peer_mac: Datapath peer mac address
2768  * @vdev_id: id of atapath vdev
2769  * @dialogtoken: dialogtoken from ADDBA frame
2770  * @tid: TID number
2771  * @batimeout: BA timeout
2772  * @buffersize: BA window size
2773  * @startseqnum: Start seq. number received in BA sequence control
2774  *
2775  * Return: 0 on success, error code on failure
2776  */
2777 int dp_addba_requestprocess_wifi3(struct cdp_soc_t *cdp_soc,
2778 				  uint8_t *peer_mac,
2779 				  uint16_t vdev_id,
2780 				  uint8_t dialogtoken,
2781 				  uint16_t tid, uint16_t batimeout,
2782 				  uint16_t buffersize,
2783 				  uint16_t startseqnum)
2784 {
2785 	QDF_STATUS status = QDF_STATUS_SUCCESS;
2786 	struct dp_rx_tid *rx_tid = NULL;
2787 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
2788 						       peer_mac, 0, vdev_id);
2789 
2790 	if (!peer || peer->delete_in_progress) {
2791 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2792 			  "%s: Peer is NULL!\n", __func__);
2793 		status = QDF_STATUS_E_FAILURE;
2794 		goto fail;
2795 	}
2796 	rx_tid = &peer->rx_tid[tid];
2797 	qdf_spin_lock_bh(&rx_tid->tid_lock);
2798 	rx_tid->num_of_addba_req++;
2799 	if ((rx_tid->ba_status == DP_RX_BA_ACTIVE &&
2800 	     rx_tid->hw_qdesc_vaddr_unaligned)) {
2801 		dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX);
2802 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
2803 		peer->active_ba_session_cnt--;
2804 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2805 			  "%s: Rx Tid- %d hw qdesc is already setup",
2806 			__func__, tid);
2807 	}
2808 
2809 	if (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
2810 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2811 		status = QDF_STATUS_E_FAILURE;
2812 		goto fail;
2813 	}
2814 	dp_check_ba_buffersize(peer, tid, buffersize);
2815 
2816 	if (dp_rx_tid_setup_wifi3(peer, tid,
2817 	    rx_tid->ba_win_size, startseqnum)) {
2818 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
2819 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2820 		status = QDF_STATUS_E_FAILURE;
2821 		goto fail;
2822 	}
2823 	rx_tid->ba_status = DP_RX_BA_IN_PROGRESS;
2824 
2825 	rx_tid->dialogtoken = dialogtoken;
2826 	rx_tid->startseqnum = startseqnum;
2827 
2828 	if (rx_tid->userstatuscode != IEEE80211_STATUS_SUCCESS)
2829 		rx_tid->statuscode = rx_tid->userstatuscode;
2830 	else
2831 		rx_tid->statuscode = IEEE80211_STATUS_SUCCESS;
2832 
2833 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
2834 
2835 fail:
2836 	if (peer)
2837 		dp_peer_unref_delete(peer);
2838 
2839 	return status;
2840 }
2841 
2842 /*
2843 * dp_set_addba_response() – Set a user defined ADDBA response status code
2844 *
2845 * @soc: Datapath soc handle
2846 * @peer_mac: Datapath peer mac address
2847 * @vdev_id: id of atapath vdev
2848 * @tid: TID number
2849 * @statuscode: response status code to be set
2850 */
2851 QDF_STATUS
2852 dp_set_addba_response(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
2853 		      uint16_t vdev_id, uint8_t tid, uint16_t statuscode)
2854 {
2855 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
2856 						       peer_mac, 0, vdev_id);
2857 	struct dp_rx_tid *rx_tid;
2858 	QDF_STATUS status = QDF_STATUS_SUCCESS;
2859 
2860 	if (!peer || peer->delete_in_progress) {
2861 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2862 			  "%s: Peer is NULL!\n", __func__);
2863 		status = QDF_STATUS_E_FAILURE;
2864 		goto fail;
2865 	}
2866 
2867 	rx_tid = &peer->rx_tid[tid];
2868 	qdf_spin_lock_bh(&rx_tid->tid_lock);
2869 	rx_tid->userstatuscode = statuscode;
2870 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
2871 fail:
2872 	if (peer)
2873 		dp_peer_unref_delete(peer);
2874 
2875 	return status;
2876 }
2877 
2878 /*
2879 * dp_rx_delba_process_wifi3() – Process DELBA from peer
2880 * @soc: Datapath soc handle
2881 * @peer_mac: Datapath peer mac address
2882 * @vdev_id: id of atapath vdev
2883 * @tid: TID number
2884 * @reasoncode: Reason code received in DELBA frame
2885 *
2886 * Return: 0 on success, error code on failure
2887 */
2888 int dp_delba_process_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
2889 			   uint16_t vdev_id, int tid, uint16_t reasoncode)
2890 {
2891 	QDF_STATUS status = QDF_STATUS_SUCCESS;
2892 	struct dp_rx_tid *rx_tid;
2893 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
2894 						      peer_mac, 0, vdev_id);
2895 
2896 	if (!peer || peer->delete_in_progress) {
2897 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2898 			  "%s: Peer is NULL!\n", __func__);
2899 		status = QDF_STATUS_E_FAILURE;
2900 		goto fail;
2901 	}
2902 	rx_tid = &peer->rx_tid[tid];
2903 	qdf_spin_lock_bh(&rx_tid->tid_lock);
2904 	if (rx_tid->ba_status == DP_RX_BA_INACTIVE ||
2905 	    rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
2906 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2907 		status = QDF_STATUS_E_FAILURE;
2908 		goto fail;
2909 	}
2910 	/* TODO: See if we can delete the existing REO queue descriptor and
2911 	 * replace with a new one without queue extenstion descript to save
2912 	 * memory
2913 	 */
2914 	rx_tid->delba_rcode = reasoncode;
2915 	rx_tid->num_of_delba_req++;
2916 	dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX);
2917 
2918 	rx_tid->ba_status = DP_RX_BA_INACTIVE;
2919 	peer->active_ba_session_cnt--;
2920 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
2921 fail:
2922 	if (peer)
2923 		dp_peer_unref_delete(peer);
2924 
2925 	return status;
2926 }
2927 
2928 /*
2929  * dp_rx_delba_tx_completion_wifi3() – Send Delba Request
2930  *
2931  * @soc: Datapath soc handle
2932  * @peer_mac: Datapath peer mac address
2933  * @vdev_id: id of atapath vdev
2934  * @tid: TID number
2935  * @status: tx completion status
2936  * Return: 0 on success, error code on failure
2937  */
2938 
2939 int dp_delba_tx_completion_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
2940 				 uint16_t vdev_id,
2941 				 uint8_t tid, int status)
2942 {
2943 	QDF_STATUS ret = QDF_STATUS_SUCCESS;
2944 	struct dp_rx_tid *rx_tid = NULL;
2945 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
2946 						      peer_mac, 0, vdev_id);
2947 
2948 	if (!peer || peer->delete_in_progress) {
2949 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2950 			  "%s: Peer is NULL!", __func__);
2951 		ret = QDF_STATUS_E_FAILURE;
2952 		goto end;
2953 	}
2954 	rx_tid = &peer->rx_tid[tid];
2955 	qdf_spin_lock_bh(&rx_tid->tid_lock);
2956 	if (status) {
2957 		rx_tid->delba_tx_fail_cnt++;
2958 		if (rx_tid->delba_tx_retry >= DP_MAX_DELBA_RETRY) {
2959 			rx_tid->delba_tx_retry = 0;
2960 			rx_tid->delba_tx_status = 0;
2961 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
2962 		} else {
2963 			rx_tid->delba_tx_retry++;
2964 			rx_tid->delba_tx_status = 1;
2965 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
2966 			if (peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba)
2967 				peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba(
2968 					peer->vdev->pdev->soc->ctrl_psoc,
2969 					peer->vdev->vdev_id,
2970 					peer->mac_addr.raw, tid,
2971 					rx_tid->delba_rcode);
2972 		}
2973 		goto end;
2974 	} else {
2975 		rx_tid->delba_tx_success_cnt++;
2976 		rx_tid->delba_tx_retry = 0;
2977 		rx_tid->delba_tx_status = 0;
2978 	}
2979 	if (rx_tid->ba_status == DP_RX_BA_ACTIVE) {
2980 		dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX);
2981 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
2982 		peer->active_ba_session_cnt--;
2983 	}
2984 	if (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
2985 		dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX);
2986 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
2987 	}
2988 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
2989 
2990 end:
2991 	if (peer)
2992 		dp_peer_unref_delete(peer);
2993 
2994 	return ret;
2995 }
2996 
2997 /**
2998  * dp_set_pn_check_wifi3() - enable PN check in REO for security
2999  * @soc: Datapath soc handle
3000  * @peer_mac: Datapath peer mac address
3001  * @vdev_id: id of atapath vdev
3002  * @vdev: Datapath vdev
3003  * @pdev - data path device instance
3004  * @sec_type - security type
3005  * @rx_pn - Receive pn starting number
3006  *
3007  */
3008 
3009 QDF_STATUS
3010 dp_set_pn_check_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id,
3011 		      uint8_t *peer_mac, enum cdp_sec_type sec_type,
3012 		      uint32_t *rx_pn)
3013 {
3014 	struct dp_pdev *pdev;
3015 	int i;
3016 	uint8_t pn_size;
3017 	struct hal_reo_cmd_params params;
3018 	QDF_STATUS status = QDF_STATUS_SUCCESS;
3019 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
3020 				peer_mac, 0, vdev_id);
3021 	struct dp_vdev *vdev =
3022 		dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
3023 						   vdev_id);
3024 
3025 	if (!vdev || !peer || peer->delete_in_progress) {
3026 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
3027 			  "%s: Peer is NULL!\n", __func__);
3028 		status = QDF_STATUS_E_FAILURE;
3029 		goto fail;
3030 	}
3031 
3032 	pdev = vdev->pdev;
3033 	qdf_mem_zero(&params, sizeof(params));
3034 
3035 	params.std.need_status = 1;
3036 	params.u.upd_queue_params.update_pn_valid = 1;
3037 	params.u.upd_queue_params.update_pn_size = 1;
3038 	params.u.upd_queue_params.update_pn = 1;
3039 	params.u.upd_queue_params.update_pn_check_needed = 1;
3040 	params.u.upd_queue_params.update_svld = 1;
3041 	params.u.upd_queue_params.svld = 0;
3042 
3043 	peer->security[dp_sec_ucast].sec_type = sec_type;
3044 
3045 	switch (sec_type) {
3046 	case cdp_sec_type_tkip_nomic:
3047 	case cdp_sec_type_aes_ccmp:
3048 	case cdp_sec_type_aes_ccmp_256:
3049 	case cdp_sec_type_aes_gcmp:
3050 	case cdp_sec_type_aes_gcmp_256:
3051 		params.u.upd_queue_params.pn_check_needed = 1;
3052 		params.u.upd_queue_params.pn_size = 48;
3053 		pn_size = 48;
3054 		break;
3055 	case cdp_sec_type_wapi:
3056 		params.u.upd_queue_params.pn_check_needed = 1;
3057 		params.u.upd_queue_params.pn_size = 128;
3058 		pn_size = 128;
3059 		if (vdev->opmode == wlan_op_mode_ap) {
3060 			params.u.upd_queue_params.pn_even = 1;
3061 			params.u.upd_queue_params.update_pn_even = 1;
3062 		} else {
3063 			params.u.upd_queue_params.pn_uneven = 1;
3064 			params.u.upd_queue_params.update_pn_uneven = 1;
3065 		}
3066 		break;
3067 	default:
3068 		params.u.upd_queue_params.pn_check_needed = 0;
3069 		pn_size = 0;
3070 		break;
3071 	}
3072 
3073 
3074 	for (i = 0; i < DP_MAX_TIDS; i++) {
3075 		struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
3076 		qdf_spin_lock_bh(&rx_tid->tid_lock);
3077 		if (rx_tid->hw_qdesc_vaddr_unaligned) {
3078 			params.std.addr_lo =
3079 				rx_tid->hw_qdesc_paddr & 0xffffffff;
3080 			params.std.addr_hi =
3081 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
3082 
3083 			if (pn_size) {
3084 				QDF_TRACE(QDF_MODULE_ID_DP,
3085 					  QDF_TRACE_LEVEL_INFO_HIGH,
3086 					  "%s PN set for TID:%d pn:%x:%x:%x:%x",
3087 					  __func__, i, rx_pn[3], rx_pn[2],
3088 					  rx_pn[1], rx_pn[0]);
3089 				params.u.upd_queue_params.update_pn_valid = 1;
3090 				params.u.upd_queue_params.pn_31_0 = rx_pn[0];
3091 				params.u.upd_queue_params.pn_63_32 = rx_pn[1];
3092 				params.u.upd_queue_params.pn_95_64 = rx_pn[2];
3093 				params.u.upd_queue_params.pn_127_96 = rx_pn[3];
3094 			}
3095 			rx_tid->pn_size = pn_size;
3096 			if (dp_reo_send_cmd(cdp_soc_t_to_dp_soc(soc),
3097 					    CMD_UPDATE_RX_REO_QUEUE,
3098 					    &params, dp_rx_tid_update_cb,
3099 					    rx_tid)) {
3100 				dp_err_log("fail to send CMD_UPDATE_RX_REO_QUEUE"
3101 					   "tid %d desc %pK", rx_tid->tid,
3102 					   (void *)(rx_tid->hw_qdesc_paddr));
3103 				DP_STATS_INC(cdp_soc_t_to_dp_soc(soc),
3104 					     rx.err.reo_cmd_send_fail, 1);
3105 			}
3106 		} else {
3107 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3108 				  "PN Check not setup for TID :%d ", i);
3109 		}
3110 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
3111 	}
3112 fail:
3113 	if (peer)
3114 		dp_peer_unref_delete(peer);
3115 
3116 	return status;
3117 }
3118 
3119 
3120 void
3121 dp_rx_sec_ind_handler(struct dp_soc *soc, uint16_t peer_id,
3122 		      enum cdp_sec_type sec_type, int is_unicast,
3123 		      u_int32_t *michael_key,
3124 		      u_int32_t *rx_pn)
3125 {
3126 	struct dp_peer *peer;
3127 	int sec_index;
3128 
3129 	peer = dp_peer_find_by_id(soc, peer_id);
3130 	if (!peer) {
3131 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3132 			  "Couldn't find peer from ID %d - skipping security inits",
3133 			  peer_id);
3134 		return;
3135 	}
3136 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3137 		  "sec spec for peer %pK %pM: %s key of type %d",
3138 		  peer,
3139 		  peer->mac_addr.raw,
3140 		  is_unicast ? "ucast" : "mcast",
3141 		  sec_type);
3142 	sec_index = is_unicast ? dp_sec_ucast : dp_sec_mcast;
3143 	peer->security[sec_index].sec_type = sec_type;
3144 #ifdef notyet /* TODO: See if this is required for defrag support */
3145 	/* michael key only valid for TKIP, but for simplicity,
3146 	 * copy it anyway
3147 	 */
3148 	qdf_mem_copy(
3149 		&peer->security[sec_index].michael_key[0],
3150 		michael_key,
3151 		sizeof(peer->security[sec_index].michael_key));
3152 #ifdef BIG_ENDIAN_HOST
3153 	OL_IF_SWAPBO(peer->security[sec_index].michael_key[0],
3154 				 sizeof(peer->security[sec_index].michael_key));
3155 #endif /* BIG_ENDIAN_HOST */
3156 #endif
3157 
3158 #ifdef notyet /* TODO: Check if this is required for wifi3.0 */
3159 	if (sec_type != cdp_sec_type_wapi) {
3160 		qdf_mem_zero(peer->tids_last_pn_valid, _EXT_TIDS);
3161 	} else {
3162 		for (i = 0; i < DP_MAX_TIDS; i++) {
3163 			/*
3164 			 * Setting PN valid bit for WAPI sec_type,
3165 			 * since WAPI PN has to be started with predefined value
3166 			 */
3167 			peer->tids_last_pn_valid[i] = 1;
3168 			qdf_mem_copy(
3169 				(u_int8_t *) &peer->tids_last_pn[i],
3170 				(u_int8_t *) rx_pn, sizeof(union htt_rx_pn_t));
3171 			peer->tids_last_pn[i].pn128[1] =
3172 				qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[1]);
3173 			peer->tids_last_pn[i].pn128[0] =
3174 				qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[0]);
3175 		}
3176 	}
3177 #endif
3178 	/* TODO: Update HW TID queue with PN check parameters (pn type for
3179 	 * all security types and last pn for WAPI) once REO command API
3180 	 * is available
3181 	 */
3182 
3183 	dp_peer_unref_del_find_by_id(peer);
3184 }
3185 
3186 #ifdef DP_PEER_EXTENDED_API
3187 QDF_STATUS dp_register_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
3188 			    struct ol_txrx_desc_type *sta_desc)
3189 {
3190 	struct dp_peer *peer;
3191 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3192 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
3193 
3194 	peer = dp_find_peer_by_addr((struct cdp_pdev *)pdev,
3195 				    sta_desc->peer_addr.bytes);
3196 
3197 	if (!pdev)
3198 		return QDF_STATUS_E_FAULT;
3199 
3200 	if (!peer)
3201 		return QDF_STATUS_E_FAULT;
3202 
3203 	qdf_spin_lock_bh(&peer->peer_info_lock);
3204 	peer->state = OL_TXRX_PEER_STATE_CONN;
3205 	qdf_spin_unlock_bh(&peer->peer_info_lock);
3206 
3207 	dp_rx_flush_rx_cached(peer, false);
3208 
3209 	return QDF_STATUS_SUCCESS;
3210 }
3211 
3212 QDF_STATUS
3213 dp_clear_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
3214 	      struct qdf_mac_addr peer_addr)
3215 {
3216 	struct dp_peer *peer;
3217 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3218 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
3219 
3220 	if (!pdev)
3221 		return QDF_STATUS_E_FAULT;
3222 
3223 	peer = dp_find_peer_by_addr((struct cdp_pdev *)pdev, peer_addr.bytes);
3224 	if (!peer)
3225 		return QDF_STATUS_E_FAULT;
3226 
3227 	qdf_spin_lock_bh(&peer->peer_info_lock);
3228 	peer->state = OL_TXRX_PEER_STATE_DISC;
3229 	qdf_spin_unlock_bh(&peer->peer_info_lock);
3230 
3231 	dp_rx_flush_rx_cached(peer, true);
3232 
3233 	return QDF_STATUS_SUCCESS;
3234 }
3235 
3236 /**
3237  * dp_find_peer_by_addr_and_vdev() - Find peer by peer mac address within vdev
3238  * @pdev - data path device instance
3239  * @vdev - virtual interface instance
3240  * @peer_addr - peer mac address
3241  *
3242  * Find peer by peer mac address within vdev
3243  *
3244  * Return: peer instance void pointer
3245  *         NULL cannot find target peer
3246  */
3247 void *dp_find_peer_by_addr_and_vdev(struct cdp_pdev *pdev_handle,
3248 		struct cdp_vdev *vdev_handle,
3249 		uint8_t *peer_addr)
3250 {
3251 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3252 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3253 	struct dp_peer *peer;
3254 
3255 	peer = dp_peer_find_hash_find(pdev->soc, peer_addr, 0, DP_VDEV_ALL);
3256 
3257 	if (!peer)
3258 		return NULL;
3259 
3260 	if (peer->vdev != vdev) {
3261 		dp_peer_unref_delete(peer);
3262 		return NULL;
3263 	}
3264 
3265 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
3266 	 * Decrement it here.
3267 	 */
3268 	dp_peer_unref_delete(peer);
3269 
3270 	return peer;
3271 }
3272 
3273 QDF_STATUS dp_peer_state_update(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
3274 				enum ol_txrx_peer_state state)
3275 {
3276 	struct dp_peer *peer;
3277 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3278 
3279 	peer =  dp_peer_find_hash_find(soc, peer_mac, 0, DP_VDEV_ALL);
3280 	if (!peer) {
3281 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3282 			  "Failed to find peer for: [%pM]", peer_mac);
3283 		return QDF_STATUS_E_FAILURE;
3284 	}
3285 	peer->state = state;
3286 
3287 	dp_info("peer %pK state %d", peer, peer->state);
3288 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
3289 	 * Decrement it here.
3290 	 */
3291 	dp_peer_unref_delete(peer);
3292 
3293 	return QDF_STATUS_SUCCESS;
3294 }
3295 
3296 QDF_STATUS dp_get_vdevid(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
3297 			 uint8_t *vdev_id)
3298 {
3299 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3300 	struct dp_peer *peer =
3301 		dp_peer_find_hash_find(soc, peer_mac, 0, DP_VDEV_ALL);
3302 
3303 	if (!peer)
3304 		return QDF_STATUS_E_FAILURE;
3305 
3306 	dp_info("peer %pK vdev %pK vdev id %d",
3307 		peer, peer->vdev, peer->vdev->vdev_id);
3308 	*vdev_id = peer->vdev->vdev_id;
3309 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
3310 	 * Decrement it here.
3311 	 */
3312 	dp_peer_unref_delete(peer);
3313 
3314 	return QDF_STATUS_SUCCESS;
3315 }
3316 
3317 struct cdp_vdev *
3318 dp_get_vdev_by_peer_addr(struct cdp_pdev *pdev_handle,
3319 			 struct qdf_mac_addr peer_addr)
3320 {
3321 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3322 	struct dp_peer *peer = NULL;
3323 
3324 	if (!pdev) {
3325 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3326 			  "PDEV not found for peer_addr: %pM",
3327 			  peer_addr.bytes);
3328 		return NULL;
3329 	}
3330 
3331 	peer = dp_find_peer_by_addr((struct cdp_pdev *)pdev, peer_addr.bytes);
3332 	if (!peer) {
3333 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
3334 			  "PDEV not found for peer_addr: %pM",
3335 			  peer_addr.bytes);
3336 		return NULL;
3337 	}
3338 
3339 	return (struct cdp_vdev *)peer->vdev;
3340 }
3341 
3342 /**
3343  * dp_get_vdev_for_peer() - Get virtual interface instance which peer belongs
3344  * @peer - peer instance
3345  *
3346  * Get virtual interface instance which peer belongs
3347  *
3348  * Return: virtual interface instance pointer
3349  *         NULL in case cannot find
3350  */
3351 struct cdp_vdev *dp_get_vdev_for_peer(void *peer_handle)
3352 {
3353 	struct dp_peer *peer = peer_handle;
3354 
3355 	DP_TRACE(DEBUG, "peer %pK vdev %pK", peer, peer->vdev);
3356 	return (struct cdp_vdev *)peer->vdev;
3357 }
3358 
3359 /**
3360  * dp_peer_get_peer_mac_addr() - Get peer mac address
3361  * @peer - peer instance
3362  *
3363  * Get peer mac address
3364  *
3365  * Return: peer mac address pointer
3366  *         NULL in case cannot find
3367  */
3368 uint8_t *dp_peer_get_peer_mac_addr(void *peer_handle)
3369 {
3370 	struct dp_peer *peer = peer_handle;
3371 	uint8_t *mac;
3372 
3373 	mac = peer->mac_addr.raw;
3374 	dp_info("peer %pK mac 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
3375 		peer, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
3376 	return peer->mac_addr.raw;
3377 }
3378 
3379 int dp_get_peer_state(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3380 		      uint8_t *peer_mac)
3381 {
3382 	enum ol_txrx_peer_state peer_state;
3383 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3384 	struct dp_peer *peer =  dp_peer_find_hash_find(soc, peer_mac, 0,
3385 						       vdev_id);
3386 
3387 	if (!peer)
3388 		return QDF_STATUS_E_FAILURE;
3389 
3390 	DP_TRACE(DEBUG, "peer %pK stats %d", peer, peer->state);
3391 	peer_state = peer->state;
3392 	dp_peer_unref_delete(peer);
3393 
3394 	return peer_state;
3395 }
3396 
3397 /**
3398  * dp_local_peer_id_pool_init() - local peer id pool alloc for physical device
3399  * @pdev - data path device instance
3400  *
3401  * local peer id pool alloc for physical device
3402  *
3403  * Return: none
3404  */
3405 void dp_local_peer_id_pool_init(struct dp_pdev *pdev)
3406 {
3407 	int i;
3408 
3409 	/* point the freelist to the first ID */
3410 	pdev->local_peer_ids.freelist = 0;
3411 
3412 	/* link each ID to the next one */
3413 	for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) {
3414 		pdev->local_peer_ids.pool[i] = i + 1;
3415 		pdev->local_peer_ids.map[i] = NULL;
3416 	}
3417 
3418 	/* link the last ID to itself, to mark the end of the list */
3419 	i = OL_TXRX_NUM_LOCAL_PEER_IDS;
3420 	pdev->local_peer_ids.pool[i] = i;
3421 
3422 	qdf_spinlock_create(&pdev->local_peer_ids.lock);
3423 	DP_TRACE(INFO, "Peer pool init");
3424 }
3425 
3426 /**
3427  * dp_local_peer_id_alloc() - allocate local peer id
3428  * @pdev - data path device instance
3429  * @peer - new peer instance
3430  *
3431  * allocate local peer id
3432  *
3433  * Return: none
3434  */
3435 void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer)
3436 {
3437 	int i;
3438 
3439 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
3440 	i = pdev->local_peer_ids.freelist;
3441 	if (pdev->local_peer_ids.pool[i] == i) {
3442 		/* the list is empty, except for the list-end marker */
3443 		peer->local_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
3444 	} else {
3445 		/* take the head ID and advance the freelist */
3446 		peer->local_id = i;
3447 		pdev->local_peer_ids.freelist = pdev->local_peer_ids.pool[i];
3448 		pdev->local_peer_ids.map[i] = peer;
3449 	}
3450 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
3451 	dp_info("peer %pK, local id %d", peer, peer->local_id);
3452 }
3453 
3454 /**
3455  * dp_local_peer_id_free() - remove local peer id
3456  * @pdev - data path device instance
3457  * @peer - peer instance should be removed
3458  *
3459  * remove local peer id
3460  *
3461  * Return: none
3462  */
3463 void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer)
3464 {
3465 	int i = peer->local_id;
3466 	if ((i == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
3467 	    (i >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
3468 		return;
3469 	}
3470 
3471 	/* put this ID on the head of the freelist */
3472 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
3473 	pdev->local_peer_ids.pool[i] = pdev->local_peer_ids.freelist;
3474 	pdev->local_peer_ids.freelist = i;
3475 	pdev->local_peer_ids.map[i] = NULL;
3476 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
3477 }
3478 
3479 bool dp_find_peer_exist_on_vdev(struct cdp_soc_t *soc_hdl,
3480 				uint8_t vdev_id, uint8_t *peer_addr)
3481 {
3482 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3483 	struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
3484 
3485 	if (!vdev)
3486 		return false;
3487 
3488 	return !!dp_find_peer_by_addr_and_vdev(
3489 					dp_pdev_to_cdp_pdev(vdev->pdev),
3490 					dp_vdev_to_cdp_vdev(vdev),
3491 					peer_addr);
3492 }
3493 
3494 bool dp_find_peer_exist_on_other_vdev(struct cdp_soc_t *soc_hdl,
3495 				      uint8_t vdev_id, uint8_t *peer_addr,
3496 				      uint16_t max_bssid)
3497 {
3498 	int i;
3499 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3500 	struct dp_vdev *vdev;
3501 
3502 	for (i = 0; i < max_bssid; i++) {
3503 		vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
3504 		/* Need to check vdevs other than the vdev_id */
3505 		if (vdev_id == i || !vdev)
3506 			continue;
3507 		if (dp_find_peer_by_addr_and_vdev(
3508 					dp_pdev_to_cdp_pdev(vdev->pdev),
3509 					dp_vdev_to_cdp_vdev(vdev),
3510 					peer_addr)) {
3511 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3512 				  "%s: Duplicate peer %pM already exist on vdev %d",
3513 				  __func__, peer_addr, i);
3514 			return true;
3515 		}
3516 	}
3517 
3518 	return false;
3519 }
3520 
3521 bool dp_find_peer_exist(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
3522 			uint8_t *peer_addr)
3523 {
3524 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3525 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
3526 
3527 	if (!pdev)
3528 		return false;
3529 
3530 	return !!dp_find_peer_by_addr(dp_pdev_to_cdp_pdev(pdev), peer_addr);
3531 }
3532 #endif
3533 
3534 /**
3535  * dp_peer_rxtid_stats: Retried Rx TID (REO queue) stats from HW
3536  * @peer: DP peer handle
3537  * @dp_stats_cmd_cb: REO command callback function
3538  * @cb_ctxt: Callback context
3539  *
3540  * Return: none
3541  */
3542 void dp_peer_rxtid_stats(struct dp_peer *peer, void (*dp_stats_cmd_cb),
3543 			void *cb_ctxt)
3544 {
3545 	struct dp_soc *soc = peer->vdev->pdev->soc;
3546 	struct hal_reo_cmd_params params;
3547 	int i;
3548 
3549 	if (!dp_stats_cmd_cb)
3550 		return;
3551 
3552 	qdf_mem_zero(&params, sizeof(params));
3553 	for (i = 0; i < DP_MAX_TIDS; i++) {
3554 		struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
3555 		if (rx_tid->hw_qdesc_vaddr_unaligned) {
3556 			params.std.need_status = 1;
3557 			params.std.addr_lo =
3558 				rx_tid->hw_qdesc_paddr & 0xffffffff;
3559 			params.std.addr_hi =
3560 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
3561 
3562 			if (cb_ctxt) {
3563 				dp_reo_send_cmd(soc, CMD_GET_QUEUE_STATS,
3564 					&params, dp_stats_cmd_cb, cb_ctxt);
3565 			} else {
3566 				dp_reo_send_cmd(soc, CMD_GET_QUEUE_STATS,
3567 					&params, dp_stats_cmd_cb, rx_tid);
3568 			}
3569 
3570 			/* Flush REO descriptor from HW cache to update stats
3571 			 * in descriptor memory. This is to help debugging */
3572 			qdf_mem_zero(&params, sizeof(params));
3573 			params.std.need_status = 0;
3574 			params.std.addr_lo =
3575 				rx_tid->hw_qdesc_paddr & 0xffffffff;
3576 			params.std.addr_hi =
3577 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
3578 			params.u.fl_cache_params.flush_no_inval = 1;
3579 			dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, &params, NULL,
3580 				NULL);
3581 		}
3582 	}
3583 }
3584 
3585 QDF_STATUS
3586 dp_set_michael_key(struct cdp_soc_t *soc,
3587 		   uint8_t vdev_id,
3588 		   uint8_t *peer_mac,
3589 		   bool is_unicast, uint32_t *key)
3590 {
3591 	QDF_STATUS status = QDF_STATUS_SUCCESS;
3592 	uint8_t sec_index = is_unicast ? 1 : 0;
3593 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
3594 						      peer_mac, 0, vdev_id);
3595 
3596 	if (!peer || peer->delete_in_progress) {
3597 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3598 			  "peer not found ");
3599 		status = QDF_STATUS_E_FAILURE;
3600 		goto fail;
3601 	}
3602 
3603 	qdf_mem_copy(&peer->security[sec_index].michael_key[0],
3604 		     key, IEEE80211_WEP_MICLEN);
3605 
3606 fail:
3607 	if (peer)
3608 		dp_peer_unref_delete(peer);
3609 
3610 	return status;
3611 }
3612 
3613 bool dp_peer_find_by_id_valid(struct dp_soc *soc, uint16_t peer_id)
3614 {
3615 	struct dp_peer *peer = dp_peer_find_by_id(soc, peer_id);
3616 
3617 	if (peer) {
3618 		/*
3619 		 * Decrement the peer ref which is taken as part of
3620 		 * dp_peer_find_by_id if PEER_LOCK_REF_PROTECT is enabled
3621 		 */
3622 		dp_peer_unref_del_find_by_id(peer);
3623 
3624 		return true;
3625 	}
3626 
3627 	return false;
3628 }
3629