xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_peer.c (revision 11f5a63a6cbdda84849a730de22f0a71e635d58c)
1 /*
2  * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <qdf_types.h>
20 #include <qdf_lock.h>
21 #include <hal_hw_headers.h>
22 #include "dp_htt.h"
23 #include "dp_types.h"
24 #include "dp_internal.h"
25 #include "dp_peer.h"
26 #include "dp_rx_defrag.h"
27 #include "dp_rx.h"
28 #include <hal_api.h>
29 #include <hal_reo.h>
30 #include <cdp_txrx_handle.h>
31 #include <wlan_cfg.h>
32 
33 #ifdef WLAN_TX_PKT_CAPTURE_ENH
34 #include "dp_tx_capture.h"
35 #endif
36 
37 static inline void
38 dp_set_ssn_valid_flag(struct hal_reo_cmd_params *params,
39 					uint8_t valid)
40 {
41 	params->u.upd_queue_params.update_svld = 1;
42 	params->u.upd_queue_params.svld = valid;
43 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
44 		  "%s: Setting SSN valid bit to %d",
45 		  __func__, valid);
46 }
47 
48 static inline int dp_peer_find_mac_addr_cmp(
49 	union dp_align_mac_addr *mac_addr1,
50 	union dp_align_mac_addr *mac_addr2)
51 {
52 		/*
53 		 * Intentionally use & rather than &&.
54 		 * because the operands are binary rather than generic boolean,
55 		 * the functionality is equivalent.
56 		 * Using && has the advantage of short-circuited evaluation,
57 		 * but using & has the advantage of no conditional branching,
58 		 * which is a more significant benefit.
59 		 */
60 	return !((mac_addr1->align4.bytes_abcd == mac_addr2->align4.bytes_abcd)
61 		 & (mac_addr1->align4.bytes_ef == mac_addr2->align4.bytes_ef));
62 }
63 
64 static int dp_peer_ast_table_attach(struct dp_soc *soc)
65 {
66 	uint32_t max_ast_index;
67 
68 	max_ast_index = wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx);
69 	/* allocate ast_table for ast entry to ast_index map */
70 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
71 		  "\n<=== cfg max ast idx %d ====>", max_ast_index);
72 	soc->ast_table = qdf_mem_malloc(max_ast_index *
73 					sizeof(struct dp_ast_entry *));
74 	if (!soc->ast_table) {
75 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
76 			  "%s: ast_table memory allocation failed", __func__);
77 		return QDF_STATUS_E_NOMEM;
78 	}
79 	return 0; /* success */
80 }
81 
82 static int dp_peer_find_map_attach(struct dp_soc *soc)
83 {
84 	uint32_t max_peers, peer_map_size;
85 
86 	max_peers = soc->max_peers;
87 	/* allocate the peer ID -> peer object map */
88 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
89 		  "\n<=== cfg max peer id %d ====>", max_peers);
90 	peer_map_size = max_peers * sizeof(soc->peer_id_to_obj_map[0]);
91 	soc->peer_id_to_obj_map = qdf_mem_malloc(peer_map_size);
92 	if (!soc->peer_id_to_obj_map) {
93 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
94 			  "%s: peer map memory allocation failed", __func__);
95 		return QDF_STATUS_E_NOMEM;
96 	}
97 
98 	/*
99 	 * The peer_id_to_obj_map doesn't really need to be initialized,
100 	 * since elements are only used after they have been individually
101 	 * initialized.
102 	 * However, it is convenient for debugging to have all elements
103 	 * that are not in use set to 0.
104 	 */
105 	qdf_mem_zero(soc->peer_id_to_obj_map, peer_map_size);
106 	return 0; /* success */
107 }
108 
109 static int dp_log2_ceil(unsigned int value)
110 {
111 	unsigned int tmp = value;
112 	int log2 = -1;
113 
114 	while (tmp) {
115 		log2++;
116 		tmp >>= 1;
117 	}
118 	if (1 << log2 != value)
119 		log2++;
120 	return log2;
121 }
122 
123 static int dp_peer_find_add_id_to_obj(
124 	struct dp_peer *peer,
125 	uint16_t peer_id)
126 {
127 	int i;
128 
129 	for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
130 		if (peer->peer_ids[i] == HTT_INVALID_PEER) {
131 			peer->peer_ids[i] = peer_id;
132 			return 0; /* success */
133 		}
134 	}
135 	return QDF_STATUS_E_FAILURE; /* failure */
136 }
137 
138 #define DP_PEER_HASH_LOAD_MULT  2
139 #define DP_PEER_HASH_LOAD_SHIFT 0
140 
141 #define DP_AST_HASH_LOAD_MULT  2
142 #define DP_AST_HASH_LOAD_SHIFT 0
143 
144 static int dp_peer_find_hash_attach(struct dp_soc *soc)
145 {
146 	int i, hash_elems, log2;
147 
148 	/* allocate the peer MAC address -> peer object hash table */
149 	hash_elems = soc->max_peers;
150 	hash_elems *= DP_PEER_HASH_LOAD_MULT;
151 	hash_elems >>= DP_PEER_HASH_LOAD_SHIFT;
152 	log2 = dp_log2_ceil(hash_elems);
153 	hash_elems = 1 << log2;
154 
155 	soc->peer_hash.mask = hash_elems - 1;
156 	soc->peer_hash.idx_bits = log2;
157 	/* allocate an array of TAILQ peer object lists */
158 	soc->peer_hash.bins = qdf_mem_malloc(
159 		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q, dp_peer)));
160 	if (!soc->peer_hash.bins)
161 		return QDF_STATUS_E_NOMEM;
162 
163 	for (i = 0; i < hash_elems; i++)
164 		TAILQ_INIT(&soc->peer_hash.bins[i]);
165 
166 	return 0;
167 }
168 
169 static void dp_peer_find_hash_detach(struct dp_soc *soc)
170 {
171 	if (soc->peer_hash.bins) {
172 		qdf_mem_free(soc->peer_hash.bins);
173 		soc->peer_hash.bins = NULL;
174 	}
175 }
176 
177 static inline unsigned dp_peer_find_hash_index(struct dp_soc *soc,
178 	union dp_align_mac_addr *mac_addr)
179 {
180 	unsigned index;
181 
182 	index =
183 		mac_addr->align2.bytes_ab ^
184 		mac_addr->align2.bytes_cd ^
185 		mac_addr->align2.bytes_ef;
186 	index ^= index >> soc->peer_hash.idx_bits;
187 	index &= soc->peer_hash.mask;
188 	return index;
189 }
190 
191 
192 void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer)
193 {
194 	unsigned index;
195 
196 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
197 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
198 	/*
199 	 * It is important to add the new peer at the tail of the peer list
200 	 * with the bin index.  Together with having the hash_find function
201 	 * search from head to tail, this ensures that if two entries with
202 	 * the same MAC address are stored, the one added first will be
203 	 * found first.
204 	 */
205 	TAILQ_INSERT_TAIL(&soc->peer_hash.bins[index], peer, hash_list_elem);
206 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
207 }
208 
209 #ifdef FEATURE_AST
210 /*
211  * dp_peer_ast_hash_attach() - Allocate and initialize AST Hash Table
212  * @soc: SoC handle
213  *
214  * Return: None
215  */
216 static int dp_peer_ast_hash_attach(struct dp_soc *soc)
217 {
218 	int i, hash_elems, log2;
219 	unsigned int max_ast_idx = wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx);
220 
221 	hash_elems = ((max_ast_idx * DP_AST_HASH_LOAD_MULT) >>
222 		DP_AST_HASH_LOAD_SHIFT);
223 
224 	log2 = dp_log2_ceil(hash_elems);
225 	hash_elems = 1 << log2;
226 
227 	soc->ast_hash.mask = hash_elems - 1;
228 	soc->ast_hash.idx_bits = log2;
229 
230 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
231 		  "ast hash_elems: %d, max_ast_idx: %d",
232 		  hash_elems, max_ast_idx);
233 
234 	/* allocate an array of TAILQ peer object lists */
235 	soc->ast_hash.bins = qdf_mem_malloc(
236 		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q,
237 				dp_ast_entry)));
238 
239 	if (!soc->ast_hash.bins)
240 		return QDF_STATUS_E_NOMEM;
241 
242 	for (i = 0; i < hash_elems; i++)
243 		TAILQ_INIT(&soc->ast_hash.bins[i]);
244 
245 	return 0;
246 }
247 
248 /*
249  * dp_peer_ast_cleanup() - cleanup the references
250  * @soc: SoC handle
251  * @ast: ast entry
252  *
253  * Return: None
254  */
255 static inline void dp_peer_ast_cleanup(struct dp_soc *soc,
256 				       struct dp_ast_entry *ast)
257 {
258 	txrx_ast_free_cb cb = ast->callback;
259 	void *cookie = ast->cookie;
260 
261 	/* Call the callbacks to free up the cookie */
262 	if (cb) {
263 		ast->callback = NULL;
264 		ast->cookie = NULL;
265 		cb(soc->ctrl_psoc,
266 		   dp_soc_to_cdp_soc(soc),
267 		   cookie,
268 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
269 	}
270 }
271 
272 /*
273  * dp_peer_ast_hash_detach() - Free AST Hash table
274  * @soc: SoC handle
275  *
276  * Return: None
277  */
278 static void dp_peer_ast_hash_detach(struct dp_soc *soc)
279 {
280 	unsigned int index;
281 	struct dp_ast_entry *ast, *ast_next;
282 
283 	if (!soc->ast_hash.mask)
284 		return;
285 
286 	if (!soc->ast_hash.bins)
287 		return;
288 
289 	qdf_spin_lock_bh(&soc->ast_lock);
290 	for (index = 0; index <= soc->ast_hash.mask; index++) {
291 		if (!TAILQ_EMPTY(&soc->ast_hash.bins[index])) {
292 			TAILQ_FOREACH_SAFE(ast, &soc->ast_hash.bins[index],
293 					   hash_list_elem, ast_next) {
294 				TAILQ_REMOVE(&soc->ast_hash.bins[index], ast,
295 					     hash_list_elem);
296 				dp_peer_ast_cleanup(soc, ast);
297 				qdf_mem_free(ast);
298 			}
299 		}
300 	}
301 	qdf_spin_unlock_bh(&soc->ast_lock);
302 
303 	qdf_mem_free(soc->ast_hash.bins);
304 	soc->ast_hash.bins = NULL;
305 }
306 
307 /*
308  * dp_peer_ast_hash_index() - Compute the AST hash from MAC address
309  * @soc: SoC handle
310  *
311  * Return: AST hash
312  */
313 static inline uint32_t dp_peer_ast_hash_index(struct dp_soc *soc,
314 	union dp_align_mac_addr *mac_addr)
315 {
316 	uint32_t index;
317 
318 	index =
319 		mac_addr->align2.bytes_ab ^
320 		mac_addr->align2.bytes_cd ^
321 		mac_addr->align2.bytes_ef;
322 	index ^= index >> soc->ast_hash.idx_bits;
323 	index &= soc->ast_hash.mask;
324 	return index;
325 }
326 
327 /*
328  * dp_peer_ast_hash_add() - Add AST entry into hash table
329  * @soc: SoC handle
330  *
331  * This function adds the AST entry into SoC AST hash table
332  * It assumes caller has taken the ast lock to protect the access to this table
333  *
334  * Return: None
335  */
336 static inline void dp_peer_ast_hash_add(struct dp_soc *soc,
337 		struct dp_ast_entry *ase)
338 {
339 	uint32_t index;
340 
341 	index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
342 	TAILQ_INSERT_TAIL(&soc->ast_hash.bins[index], ase, hash_list_elem);
343 }
344 
345 /*
346  * dp_peer_ast_hash_remove() - Look up and remove AST entry from hash table
347  * @soc: SoC handle
348  *
349  * This function removes the AST entry from soc AST hash table
350  * It assumes caller has taken the ast lock to protect the access to this table
351  *
352  * Return: None
353  */
354 void dp_peer_ast_hash_remove(struct dp_soc *soc,
355 			     struct dp_ast_entry *ase)
356 {
357 	unsigned index;
358 	struct dp_ast_entry *tmpase;
359 	int found = 0;
360 
361 	index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
362 	/* Check if tail is not empty before delete*/
363 	QDF_ASSERT(!TAILQ_EMPTY(&soc->ast_hash.bins[index]));
364 
365 	TAILQ_FOREACH(tmpase, &soc->ast_hash.bins[index], hash_list_elem) {
366 		if (tmpase == ase) {
367 			found = 1;
368 			break;
369 		}
370 	}
371 
372 	QDF_ASSERT(found);
373 	TAILQ_REMOVE(&soc->ast_hash.bins[index], ase, hash_list_elem);
374 }
375 
376 /*
377  * dp_peer_ast_list_find() - Find AST entry by MAC address from peer ast list
378  * @soc: SoC handle
379  * @peer: peer handle
380  * @ast_mac_addr: mac address
381  *
382  * It assumes caller has taken the ast lock to protect the access to ast list
383  *
384  * Return: AST entry
385  */
386 struct dp_ast_entry *dp_peer_ast_list_find(struct dp_soc *soc,
387 					   struct dp_peer *peer,
388 					   uint8_t *ast_mac_addr)
389 {
390 	struct dp_ast_entry *ast_entry = NULL;
391 	union dp_align_mac_addr *mac_addr =
392 		(union dp_align_mac_addr *)ast_mac_addr;
393 
394 	TAILQ_FOREACH(ast_entry, &peer->ast_entry_list, ase_list_elem) {
395 		if (!dp_peer_find_mac_addr_cmp(mac_addr,
396 					       &ast_entry->mac_addr)) {
397 			return ast_entry;
398 		}
399 	}
400 
401 	return NULL;
402 }
403 
404 /*
405  * dp_peer_ast_hash_find_by_pdevid() - Find AST entry by MAC address
406  * @soc: SoC handle
407  *
408  * It assumes caller has taken the ast lock to protect the access to
409  * AST hash table
410  *
411  * Return: AST entry
412  */
413 struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
414 						     uint8_t *ast_mac_addr,
415 						     uint8_t pdev_id)
416 {
417 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
418 	uint32_t index;
419 	struct dp_ast_entry *ase;
420 
421 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
422 		     ast_mac_addr, QDF_MAC_ADDR_SIZE);
423 	mac_addr = &local_mac_addr_aligned;
424 
425 	index = dp_peer_ast_hash_index(soc, mac_addr);
426 	TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
427 		if ((pdev_id == ase->pdev_id) &&
428 		    !dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr)) {
429 			return ase;
430 		}
431 	}
432 
433 	return NULL;
434 }
435 
436 /*
437  * dp_peer_ast_hash_find_soc() - Find AST entry by MAC address
438  * @soc: SoC handle
439  *
440  * It assumes caller has taken the ast lock to protect the access to
441  * AST hash table
442  *
443  * Return: AST entry
444  */
445 struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
446 					       uint8_t *ast_mac_addr)
447 {
448 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
449 	unsigned index;
450 	struct dp_ast_entry *ase;
451 
452 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
453 			ast_mac_addr, QDF_MAC_ADDR_SIZE);
454 	mac_addr = &local_mac_addr_aligned;
455 
456 	index = dp_peer_ast_hash_index(soc, mac_addr);
457 	TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
458 		if (dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr) == 0) {
459 			return ase;
460 		}
461 	}
462 
463 	return NULL;
464 }
465 
466 /*
467  * dp_peer_map_ast() - Map the ast entry with HW AST Index
468  * @soc: SoC handle
469  * @peer: peer to which ast node belongs
470  * @mac_addr: MAC address of ast node
471  * @hw_peer_id: HW AST Index returned by target in peer map event
472  * @vdev_id: vdev id for VAP to which the peer belongs to
473  * @ast_hash: ast hash value in HW
474  *
475  * Return: None
476  */
477 static inline void dp_peer_map_ast(struct dp_soc *soc,
478 	struct dp_peer *peer, uint8_t *mac_addr, uint16_t hw_peer_id,
479 	uint8_t vdev_id, uint16_t ast_hash)
480 {
481 	struct dp_ast_entry *ast_entry = NULL;
482 	enum cdp_txrx_ast_entry_type peer_type = CDP_TXRX_AST_TYPE_STATIC;
483 
484 	if (!peer) {
485 		return;
486 	}
487 
488 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
489 		  "%s: peer %pK ID %d vid %d mac %02x:%02x:%02x:%02x:%02x:%02x",
490 		  __func__, peer, hw_peer_id, vdev_id, mac_addr[0],
491 		  mac_addr[1], mac_addr[2], mac_addr[3],
492 		  mac_addr[4], mac_addr[5]);
493 
494 	qdf_spin_lock_bh(&soc->ast_lock);
495 
496 	ast_entry = dp_peer_ast_list_find(soc, peer, mac_addr);
497 
498 	if (ast_entry) {
499 		ast_entry->ast_idx = hw_peer_id;
500 		soc->ast_table[hw_peer_id] = ast_entry;
501 		ast_entry->is_active = TRUE;
502 		peer_type = ast_entry->type;
503 		ast_entry->ast_hash_value = ast_hash;
504 		ast_entry->is_mapped = TRUE;
505 	}
506 
507 	if (ast_entry || (peer->vdev && peer->vdev->proxysta_vdev)) {
508 		if (soc->cdp_soc.ol_ops->peer_map_event) {
509 			soc->cdp_soc.ol_ops->peer_map_event(
510 			soc->ctrl_psoc, peer->peer_ids[0],
511 			hw_peer_id, vdev_id,
512 			mac_addr, peer_type, ast_hash);
513 		}
514 	} else {
515 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
516 			  "AST entry not found");
517 	}
518 
519 	qdf_spin_unlock_bh(&soc->ast_lock);
520 	return;
521 }
522 
523 void dp_peer_free_hmwds_cb(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
524 			   struct cdp_soc *dp_soc,
525 			   void *cookie,
526 			   enum cdp_ast_free_status status)
527 {
528 	struct dp_ast_free_cb_params *param =
529 		(struct dp_ast_free_cb_params *)cookie;
530 	struct dp_soc *soc = (struct dp_soc *)dp_soc;
531 	struct dp_peer *peer = NULL;
532 
533 	if (status != CDP_TXRX_AST_DELETED) {
534 		qdf_mem_free(cookie);
535 		return;
536 	}
537 
538 	peer = dp_peer_find_hash_find(soc, &param->peer_mac_addr.raw[0],
539 				      0, param->vdev_id);
540 	if (peer) {
541 		dp_peer_add_ast(soc, peer,
542 				&param->mac_addr.raw[0],
543 				param->type,
544 				param->flags);
545 		dp_peer_unref_delete(peer);
546 	}
547 	qdf_mem_free(cookie);
548 }
549 
550 /*
551  * dp_peer_add_ast() - Allocate and add AST entry into peer list
552  * @soc: SoC handle
553  * @peer: peer to which ast node belongs
554  * @mac_addr: MAC address of ast node
555  * @is_self: Is this base AST entry with peer mac address
556  *
557  * This API is used by WDS source port learning function to
558  * add a new AST entry into peer AST list
559  *
560  * Return: 0 if new entry is allocated,
561  *        -1 if entry add failed
562  */
563 int dp_peer_add_ast(struct dp_soc *soc,
564 			struct dp_peer *peer,
565 			uint8_t *mac_addr,
566 			enum cdp_txrx_ast_entry_type type,
567 			uint32_t flags)
568 {
569 	struct dp_ast_entry *ast_entry = NULL;
570 	struct dp_vdev *vdev = NULL, *tmp_vdev = NULL;
571 	struct dp_pdev *pdev = NULL;
572 	uint8_t next_node_mac[6];
573 	int  ret = -1;
574 	txrx_ast_free_cb cb = NULL;
575 	void *cookie = NULL;
576 	struct dp_peer *tmp_peer = NULL;
577 	bool is_peer_found = false;
578 
579 	vdev = peer->vdev;
580 	if (!vdev) {
581 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
582 			  FL("Peers vdev is NULL"));
583 		QDF_ASSERT(0);
584 		return ret;
585 	}
586 
587 	pdev = vdev->pdev;
588 
589 	tmp_peer = dp_peer_find_hash_find(soc, mac_addr, 0,
590 					  DP_VDEV_ALL);
591 	if (tmp_peer) {
592 		tmp_vdev = tmp_peer->vdev;
593 		if (!tmp_vdev) {
594 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
595 				  FL("Peers vdev is NULL"));
596 			QDF_ASSERT(0);
597 			dp_peer_unref_delete(tmp_peer);
598 			return ret;
599 		}
600 		if (tmp_vdev->pdev->pdev_id == pdev->pdev_id)
601 			is_peer_found = true;
602 
603 		dp_peer_unref_delete(tmp_peer);
604 	}
605 
606 	qdf_spin_lock_bh(&soc->ast_lock);
607 	if (peer->delete_in_progress) {
608 		qdf_spin_unlock_bh(&soc->ast_lock);
609 		return ret;
610 	}
611 
612 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
613 		  "%s: pdevid: %u vdev: %u  ast_entry->type: %d flags: 0x%x peer_mac: %pM peer: %pK mac %pM",
614 		  __func__, pdev->pdev_id, vdev->vdev_id, type, flags,
615 		  peer->mac_addr.raw, peer, mac_addr);
616 
617 
618 	/* fw supports only 2 times the max_peers ast entries */
619 	if (soc->num_ast_entries >=
620 	    wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx)) {
621 		qdf_spin_unlock_bh(&soc->ast_lock);
622 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
623 			  FL("Max ast entries reached"));
624 		return ret;
625 	}
626 
627 	/* If AST entry already exists , just return from here
628 	 * ast entry with same mac address can exist on different radios
629 	 * if ast_override support is enabled use search by pdev in this
630 	 * case
631 	 */
632 	if (soc->ast_override_support) {
633 		ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, mac_addr,
634 							    pdev->pdev_id);
635 		if (ast_entry) {
636 			if ((type == CDP_TXRX_AST_TYPE_MEC) &&
637 			    (ast_entry->type == CDP_TXRX_AST_TYPE_MEC))
638 				ast_entry->is_active = TRUE;
639 
640 			qdf_spin_unlock_bh(&soc->ast_lock);
641 			return 0;
642 		}
643 		if (is_peer_found) {
644 			/* During WDS to static roaming, peer is added
645 			 * to the list before static AST entry create.
646 			 * So, allow AST entry for STATIC type
647 			 * even if peer is present
648 			 */
649 			if (type != CDP_TXRX_AST_TYPE_STATIC) {
650 				qdf_spin_unlock_bh(&soc->ast_lock);
651 				return 0;
652 			}
653 		}
654 	} else {
655 		/* For HWMWDS_SEC entries can be added for same mac address
656 		 * do not check for existing entry
657 		 */
658 		if (type == CDP_TXRX_AST_TYPE_WDS_HM_SEC)
659 			goto add_ast_entry;
660 
661 		ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
662 
663 		if (ast_entry) {
664 			if ((type == CDP_TXRX_AST_TYPE_MEC) &&
665 			    (ast_entry->type == CDP_TXRX_AST_TYPE_MEC))
666 				ast_entry->is_active = TRUE;
667 
668 			if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) &&
669 			    !ast_entry->delete_in_progress) {
670 				qdf_spin_unlock_bh(&soc->ast_lock);
671 				return 0;
672 			}
673 
674 			/* Add for HMWDS entry we cannot be ignored if there
675 			 * is AST entry with same mac address
676 			 *
677 			 * if ast entry exists with the requested mac address
678 			 * send a delete command and register callback which
679 			 * can take care of adding HMWDS ast enty on delete
680 			 * confirmation from target
681 			 */
682 			if ((type == CDP_TXRX_AST_TYPE_WDS_HM) &&
683 			    soc->is_peer_map_unmap_v2) {
684 				struct dp_ast_free_cb_params *param = NULL;
685 
686 				if (ast_entry->type ==
687 					CDP_TXRX_AST_TYPE_WDS_HM_SEC)
688 					goto add_ast_entry;
689 
690 				/* save existing callback */
691 				if (ast_entry->callback) {
692 					cb = ast_entry->callback;
693 					cookie = ast_entry->cookie;
694 				}
695 
696 				param = qdf_mem_malloc(sizeof(*param));
697 				if (!param) {
698 					QDF_TRACE(QDF_MODULE_ID_TXRX,
699 						  QDF_TRACE_LEVEL_ERROR,
700 						  "Allocation failed");
701 					qdf_spin_unlock_bh(&soc->ast_lock);
702 					return ret;
703 				}
704 
705 				qdf_mem_copy(&param->mac_addr.raw[0], mac_addr,
706 					     QDF_MAC_ADDR_SIZE);
707 				qdf_mem_copy(&param->peer_mac_addr.raw[0],
708 					     &peer->mac_addr.raw[0],
709 					     QDF_MAC_ADDR_SIZE);
710 				param->type = type;
711 				param->flags = flags;
712 				param->vdev_id = vdev->vdev_id;
713 				ast_entry->callback = dp_peer_free_hmwds_cb;
714 				ast_entry->pdev_id = vdev->pdev->pdev_id;
715 				ast_entry->type = type;
716 				ast_entry->cookie = (void *)param;
717 				if (!ast_entry->delete_in_progress)
718 					dp_peer_del_ast(soc, ast_entry);
719 			}
720 
721 			/* Modify an already existing AST entry from type
722 			 * WDS to MEC on promption. This serves as a fix when
723 			 * backbone of interfaces are interchanged wherein
724 			 * wds entr becomes its own MEC. The entry should be
725 			 * replaced only when the ast_entry peer matches the
726 			 * peer received in mec event. This additional check
727 			 * is needed in wds repeater cases where a multicast
728 			 * packet from station to the root via the repeater
729 			 * should not remove the wds entry.
730 			 */
731 			if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS) &&
732 			    (type == CDP_TXRX_AST_TYPE_MEC) &&
733 			    (ast_entry->peer == peer)) {
734 				ast_entry->is_active = FALSE;
735 				dp_peer_del_ast(soc, ast_entry);
736 			}
737 			qdf_spin_unlock_bh(&soc->ast_lock);
738 
739 			/* Call the saved callback*/
740 			if (cb) {
741 				cb(soc->ctrl_psoc,
742 				   dp_soc_to_cdp_soc(soc),
743 				   cookie,
744 				   CDP_TXRX_AST_DELETE_IN_PROGRESS);
745 			}
746 			return 0;
747 		}
748 	}
749 
750 add_ast_entry:
751 	ast_entry = (struct dp_ast_entry *)
752 			qdf_mem_malloc(sizeof(struct dp_ast_entry));
753 
754 	if (!ast_entry) {
755 		qdf_spin_unlock_bh(&soc->ast_lock);
756 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
757 			  FL("fail to allocate ast_entry"));
758 		QDF_ASSERT(0);
759 		return ret;
760 	}
761 
762 	qdf_mem_copy(&ast_entry->mac_addr.raw[0], mac_addr, QDF_MAC_ADDR_SIZE);
763 	ast_entry->pdev_id = vdev->pdev->pdev_id;
764 	ast_entry->vdev_id = vdev->vdev_id;
765 	ast_entry->is_mapped = false;
766 	ast_entry->delete_in_progress = false;
767 
768 	switch (type) {
769 	case CDP_TXRX_AST_TYPE_STATIC:
770 		peer->self_ast_entry = ast_entry;
771 		ast_entry->type = CDP_TXRX_AST_TYPE_STATIC;
772 		if (peer->vdev->opmode == wlan_op_mode_sta)
773 			ast_entry->type = CDP_TXRX_AST_TYPE_STA_BSS;
774 		break;
775 	case CDP_TXRX_AST_TYPE_SELF:
776 		peer->self_ast_entry = ast_entry;
777 		ast_entry->type = CDP_TXRX_AST_TYPE_SELF;
778 		break;
779 	case CDP_TXRX_AST_TYPE_WDS:
780 		ast_entry->next_hop = 1;
781 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
782 		break;
783 	case CDP_TXRX_AST_TYPE_WDS_HM:
784 		ast_entry->next_hop = 1;
785 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM;
786 		break;
787 	case CDP_TXRX_AST_TYPE_WDS_HM_SEC:
788 		ast_entry->next_hop = 1;
789 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM_SEC;
790 		break;
791 	case CDP_TXRX_AST_TYPE_MEC:
792 		ast_entry->next_hop = 1;
793 		ast_entry->type = CDP_TXRX_AST_TYPE_MEC;
794 		break;
795 	case CDP_TXRX_AST_TYPE_DA:
796 		peer = peer->vdev->vap_bss_peer;
797 		ast_entry->next_hop = 1;
798 		ast_entry->type = CDP_TXRX_AST_TYPE_DA;
799 		break;
800 	default:
801 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
802 			FL("Incorrect AST entry type"));
803 	}
804 
805 	ast_entry->is_active = TRUE;
806 	DP_STATS_INC(soc, ast.added, 1);
807 	soc->num_ast_entries++;
808 	dp_peer_ast_hash_add(soc, ast_entry);
809 
810 	ast_entry->peer = peer;
811 
812 	if (type == CDP_TXRX_AST_TYPE_MEC)
813 		qdf_mem_copy(next_node_mac, peer->vdev->mac_addr.raw, 6);
814 	else
815 		qdf_mem_copy(next_node_mac, peer->mac_addr.raw, 6);
816 
817 	TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem);
818 
819 	if ((ast_entry->type != CDP_TXRX_AST_TYPE_STATIC) &&
820 	    (ast_entry->type != CDP_TXRX_AST_TYPE_SELF) &&
821 	    (ast_entry->type != CDP_TXRX_AST_TYPE_STA_BSS) &&
822 	    (ast_entry->type != CDP_TXRX_AST_TYPE_WDS_HM_SEC)) {
823 		if (QDF_STATUS_SUCCESS ==
824 				soc->cdp_soc.ol_ops->peer_add_wds_entry(
825 				peer->vdev->osif_vdev,
826 				(struct cdp_peer *)peer,
827 				mac_addr,
828 				next_node_mac,
829 				flags)) {
830 			qdf_spin_unlock_bh(&soc->ast_lock);
831 			return 0;
832 		}
833 	}
834 
835 	qdf_spin_unlock_bh(&soc->ast_lock);
836 	return ret;
837 }
838 
839 /*
840  * dp_peer_del_ast() - Delete and free AST entry
841  * @soc: SoC handle
842  * @ast_entry: AST entry of the node
843  *
844  * This function removes the AST entry from peer and soc tables
845  * It assumes caller has taken the ast lock to protect the access to these
846  * tables
847  *
848  * Return: None
849  */
850 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
851 {
852 	struct dp_peer *peer;
853 
854 	if (!ast_entry)
855 		return;
856 
857 	peer =  ast_entry->peer;
858 
859 	dp_peer_ast_send_wds_del(soc, ast_entry);
860 
861 	/*
862 	 * release the reference only if it is mapped
863 	 * to ast_table
864 	 */
865 	if (ast_entry->is_mapped)
866 		soc->ast_table[ast_entry->ast_idx] = NULL;
867 
868 	/*
869 	 * if peer map v2 is enabled we are not freeing ast entry
870 	 * here and it is supposed to be freed in unmap event (after
871 	 * we receive delete confirmation from target)
872 	 *
873 	 * if peer_id is invalid we did not get the peer map event
874 	 * for the peer free ast entry from here only in this case
875 	 */
876 	if (soc->is_peer_map_unmap_v2) {
877 
878 		/*
879 		 * For HM_SEC and SELF type we do not receive unmap event
880 		 * free ast_entry from here it self
881 		 */
882 		if ((ast_entry->type != CDP_TXRX_AST_TYPE_WDS_HM_SEC) &&
883 		    (ast_entry->type != CDP_TXRX_AST_TYPE_SELF))
884 			return;
885 	}
886 
887 	/* SELF and STATIC entries are removed in teardown itself */
888 	if (ast_entry->next_hop)
889 		TAILQ_REMOVE(&peer->ast_entry_list, ast_entry, ase_list_elem);
890 
891 	DP_STATS_INC(soc, ast.deleted, 1);
892 	dp_peer_ast_hash_remove(soc, ast_entry);
893 	dp_peer_ast_cleanup(soc, ast_entry);
894 	qdf_mem_free(ast_entry);
895 	soc->num_ast_entries--;
896 }
897 
898 /*
899  * dp_peer_update_ast() - Delete and free AST entry
900  * @soc: SoC handle
901  * @peer: peer to which ast node belongs
902  * @ast_entry: AST entry of the node
903  * @flags: wds or hmwds
904  *
905  * This function update the AST entry to the roamed peer and soc tables
906  * It assumes caller has taken the ast lock to protect the access to these
907  * tables
908  *
909  * Return: 0 if ast entry is updated successfully
910  *         -1 failure
911  */
912 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
913 		       struct dp_ast_entry *ast_entry, uint32_t flags)
914 {
915 	int ret = -1;
916 	struct dp_peer *old_peer;
917 
918 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
919 		  "%s: ast_entry->type: %d pdevid: %u vdevid: %u flags: 0x%x mac_addr: %pM peer_mac: %pM\n",
920 		  __func__, ast_entry->type, peer->vdev->pdev->pdev_id,
921 		  peer->vdev->vdev_id, flags, ast_entry->mac_addr.raw,
922 		  peer->mac_addr.raw);
923 
924 	/* Do not send AST update in below cases
925 	 *  1) Ast entry delete has already triggered
926 	 *  2) Peer delete is already triggered
927 	 *  3) We did not get the HTT map for create event
928 	 */
929 	if (ast_entry->delete_in_progress || peer->delete_in_progress ||
930 	    !ast_entry->is_mapped)
931 		return ret;
932 
933 	if ((ast_entry->type == CDP_TXRX_AST_TYPE_STATIC) ||
934 	    (ast_entry->type == CDP_TXRX_AST_TYPE_SELF) ||
935 	    (ast_entry->type == CDP_TXRX_AST_TYPE_STA_BSS) ||
936 	    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
937 		return 0;
938 
939 	/*
940 	 * Avoids flood of WMI update messages sent to FW for same peer.
941 	 */
942 	if (qdf_unlikely(ast_entry->peer == peer) &&
943 	    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS) &&
944 	    (ast_entry->vdev_id == peer->vdev->vdev_id) &&
945 	    (ast_entry->is_active))
946 		return 0;
947 
948 	old_peer = ast_entry->peer;
949 	TAILQ_REMOVE(&old_peer->ast_entry_list, ast_entry, ase_list_elem);
950 
951 	ast_entry->peer = peer;
952 	ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
953 	ast_entry->pdev_id = peer->vdev->pdev->pdev_id;
954 	ast_entry->vdev_id = peer->vdev->vdev_id;
955 	ast_entry->is_active = TRUE;
956 	TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem);
957 
958 	ret = soc->cdp_soc.ol_ops->peer_update_wds_entry(
959 				peer->vdev->osif_vdev,
960 				ast_entry->mac_addr.raw,
961 				peer->mac_addr.raw,
962 				flags);
963 
964 	return ret;
965 }
966 
967 /*
968  * dp_peer_ast_get_pdev_id() - get pdev_id from the ast entry
969  * @soc: SoC handle
970  * @ast_entry: AST entry of the node
971  *
972  * This function gets the pdev_id from the ast entry.
973  *
974  * Return: (uint8_t) pdev_id
975  */
976 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
977 				struct dp_ast_entry *ast_entry)
978 {
979 	return ast_entry->pdev_id;
980 }
981 
982 /*
983  * dp_peer_ast_get_next_hop() - get next_hop from the ast entry
984  * @soc: SoC handle
985  * @ast_entry: AST entry of the node
986  *
987  * This function gets the next hop from the ast entry.
988  *
989  * Return: (uint8_t) next_hop
990  */
991 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
992 				struct dp_ast_entry *ast_entry)
993 {
994 	return ast_entry->next_hop;
995 }
996 
997 /*
998  * dp_peer_ast_set_type() - set type from the ast entry
999  * @soc: SoC handle
1000  * @ast_entry: AST entry of the node
1001  *
1002  * This function sets the type in the ast entry.
1003  *
1004  * Return:
1005  */
1006 void dp_peer_ast_set_type(struct dp_soc *soc,
1007 				struct dp_ast_entry *ast_entry,
1008 				enum cdp_txrx_ast_entry_type type)
1009 {
1010 	ast_entry->type = type;
1011 }
1012 
1013 #else
1014 int dp_peer_add_ast(struct dp_soc *soc, struct dp_peer *peer,
1015 		uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type,
1016 		uint32_t flags)
1017 {
1018 	return 1;
1019 }
1020 
1021 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
1022 {
1023 }
1024 
1025 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
1026 			struct dp_ast_entry *ast_entry, uint32_t flags)
1027 {
1028 	return 1;
1029 }
1030 
1031 struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
1032 					       uint8_t *ast_mac_addr)
1033 {
1034 	return NULL;
1035 }
1036 
1037 struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
1038 						     uint8_t *ast_mac_addr,
1039 						     uint8_t pdev_id)
1040 {
1041 	return NULL;
1042 }
1043 
1044 static int dp_peer_ast_hash_attach(struct dp_soc *soc)
1045 {
1046 	return 0;
1047 }
1048 
1049 static inline void dp_peer_map_ast(struct dp_soc *soc,
1050 	struct dp_peer *peer, uint8_t *mac_addr, uint16_t hw_peer_id,
1051 	uint8_t vdev_id, uint16_t ast_hash)
1052 {
1053 	return;
1054 }
1055 
1056 static void dp_peer_ast_hash_detach(struct dp_soc *soc)
1057 {
1058 }
1059 
1060 void dp_peer_ast_set_type(struct dp_soc *soc,
1061 				struct dp_ast_entry *ast_entry,
1062 				enum cdp_txrx_ast_entry_type type)
1063 {
1064 }
1065 
1066 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
1067 				struct dp_ast_entry *ast_entry)
1068 {
1069 	return 0xff;
1070 }
1071 
1072 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
1073 				struct dp_ast_entry *ast_entry)
1074 {
1075 	return 0xff;
1076 }
1077 
1078 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
1079 		       struct dp_ast_entry *ast_entry, uint32_t flags)
1080 {
1081 	return 1;
1082 }
1083 
1084 #endif
1085 
1086 void dp_peer_ast_send_wds_del(struct dp_soc *soc,
1087 			      struct dp_ast_entry *ast_entry)
1088 {
1089 	struct dp_peer *peer = ast_entry->peer;
1090 	struct cdp_soc_t *cdp_soc = &soc->cdp_soc;
1091 
1092 	if (ast_entry->delete_in_progress)
1093 		return;
1094 
1095 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_TRACE,
1096 		  "%s: ast_entry->type: %d pdevid: %u vdev: %u mac_addr: %pM next_hop: %u peer_mac: %pM\n",
1097 		  __func__, ast_entry->type, peer->vdev->pdev->pdev_id,
1098 		  peer->vdev->vdev_id, ast_entry->mac_addr.raw,
1099 		  ast_entry->next_hop, ast_entry->peer->mac_addr.raw);
1100 
1101 	if (ast_entry->next_hop) {
1102 		cdp_soc->ol_ops->peer_del_wds_entry(peer->vdev->osif_vdev,
1103 						    ast_entry->mac_addr.raw,
1104 						    ast_entry->type);
1105 	}
1106 
1107 	/* Remove SELF and STATIC entries in teardown itself */
1108 	if (!ast_entry->next_hop) {
1109 		TAILQ_REMOVE(&peer->ast_entry_list, ast_entry, ase_list_elem);
1110 		peer->self_ast_entry = NULL;
1111 		ast_entry->peer = NULL;
1112 	}
1113 
1114 	ast_entry->delete_in_progress = true;
1115 }
1116 
1117 /**
1118  * dp_peer_ast_free_entry_by_mac() - find ast entry by MAC address and delete
1119  * @soc: soc handle
1120  * @peer: peer handle
1121  * @mac_addr: mac address of the AST entry to searc and delete
1122  *
1123  * find the ast entry from the peer list using the mac address and free
1124  * the entry.
1125  *
1126  * Return: SUCCESS or NOENT
1127  */
1128 static int dp_peer_ast_free_entry_by_mac(struct dp_soc *soc,
1129 					 struct dp_peer *peer,
1130 					 uint8_t *mac_addr)
1131 {
1132 	struct dp_ast_entry *ast_entry;
1133 	void *cookie = NULL;
1134 	txrx_ast_free_cb cb = NULL;
1135 
1136 	/*
1137 	 * release the reference only if it is mapped
1138 	 * to ast_table
1139 	 */
1140 
1141 	qdf_spin_lock_bh(&soc->ast_lock);
1142 
1143 	ast_entry = dp_peer_ast_list_find(soc, peer, mac_addr);
1144 	if (!ast_entry) {
1145 		qdf_spin_unlock_bh(&soc->ast_lock);
1146 		return QDF_STATUS_E_NOENT;
1147 	} else if (ast_entry->is_mapped) {
1148 		soc->ast_table[ast_entry->ast_idx] = NULL;
1149 	}
1150 
1151 	TAILQ_REMOVE(&peer->ast_entry_list, ast_entry, ase_list_elem);
1152 	DP_STATS_INC(soc, ast.deleted, 1);
1153 	dp_peer_ast_hash_remove(soc, ast_entry);
1154 
1155 	cb = ast_entry->callback;
1156 	cookie = ast_entry->cookie;
1157 	ast_entry->callback = NULL;
1158 	ast_entry->cookie = NULL;
1159 
1160 	if (ast_entry == peer->self_ast_entry)
1161 		peer->self_ast_entry = NULL;
1162 
1163 	soc->num_ast_entries--;
1164 	qdf_spin_unlock_bh(&soc->ast_lock);
1165 
1166 	if (cb) {
1167 		cb(soc->ctrl_psoc,
1168 		   dp_soc_to_cdp_soc(soc),
1169 		   cookie,
1170 		   CDP_TXRX_AST_DELETED);
1171 	}
1172 	qdf_mem_free(ast_entry);
1173 
1174 	return QDF_STATUS_SUCCESS;
1175 }
1176 
1177 struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc,
1178 	uint8_t *peer_mac_addr, int mac_addr_is_aligned, uint8_t vdev_id)
1179 {
1180 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
1181 	unsigned index;
1182 	struct dp_peer *peer;
1183 
1184 	if (mac_addr_is_aligned) {
1185 		mac_addr = (union dp_align_mac_addr *) peer_mac_addr;
1186 	} else {
1187 		qdf_mem_copy(
1188 			&local_mac_addr_aligned.raw[0],
1189 			peer_mac_addr, QDF_MAC_ADDR_SIZE);
1190 		mac_addr = &local_mac_addr_aligned;
1191 	}
1192 	index = dp_peer_find_hash_index(soc, mac_addr);
1193 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
1194 	TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
1195 #if ATH_SUPPORT_WRAP
1196 		/* ProxySTA may have multiple BSS peer with same MAC address,
1197 		 * modified find will take care of finding the correct BSS peer.
1198 		 */
1199 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
1200 			((peer->vdev->vdev_id == vdev_id) ||
1201 			 (vdev_id == DP_VDEV_ALL))) {
1202 #else
1203 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0) {
1204 #endif
1205 			/* found it - increment the ref count before releasing
1206 			 * the lock
1207 			 */
1208 			qdf_atomic_inc(&peer->ref_cnt);
1209 			qdf_spin_unlock_bh(&soc->peer_ref_mutex);
1210 			return peer;
1211 		}
1212 	}
1213 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
1214 	return NULL; /* failure */
1215 }
1216 
1217 void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer)
1218 {
1219 	unsigned index;
1220 	struct dp_peer *tmppeer = NULL;
1221 	int found = 0;
1222 
1223 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
1224 	/* Check if tail is not empty before delete*/
1225 	QDF_ASSERT(!TAILQ_EMPTY(&soc->peer_hash.bins[index]));
1226 	/*
1227 	 * DO NOT take the peer_ref_mutex lock here - it needs to be taken
1228 	 * by the caller.
1229 	 * The caller needs to hold the lock from the time the peer object's
1230 	 * reference count is decremented and tested up through the time the
1231 	 * reference to the peer object is removed from the hash table, by
1232 	 * this function.
1233 	 * Holding the lock only while removing the peer object reference
1234 	 * from the hash table keeps the hash table consistent, but does not
1235 	 * protect against a new HL tx context starting to use the peer object
1236 	 * if it looks up the peer object from its MAC address just after the
1237 	 * peer ref count is decremented to zero, but just before the peer
1238 	 * object reference is removed from the hash table.
1239 	 */
1240 	 TAILQ_FOREACH(tmppeer, &soc->peer_hash.bins[index], hash_list_elem) {
1241 		if (tmppeer == peer) {
1242 			found = 1;
1243 			break;
1244 		}
1245 	}
1246 	QDF_ASSERT(found);
1247 	TAILQ_REMOVE(&soc->peer_hash.bins[index], peer, hash_list_elem);
1248 }
1249 
1250 void dp_peer_find_hash_erase(struct dp_soc *soc)
1251 {
1252 	int i;
1253 
1254 	/*
1255 	 * Not really necessary to take peer_ref_mutex lock - by this point,
1256 	 * it's known that the soc is no longer in use.
1257 	 */
1258 	for (i = 0; i <= soc->peer_hash.mask; i++) {
1259 		if (!TAILQ_EMPTY(&soc->peer_hash.bins[i])) {
1260 			struct dp_peer *peer, *peer_next;
1261 
1262 			/*
1263 			 * TAILQ_FOREACH_SAFE must be used here to avoid any
1264 			 * memory access violation after peer is freed
1265 			 */
1266 			TAILQ_FOREACH_SAFE(peer, &soc->peer_hash.bins[i],
1267 				hash_list_elem, peer_next) {
1268 				/*
1269 				 * Don't remove the peer from the hash table -
1270 				 * that would modify the list we are currently
1271 				 * traversing, and it's not necessary anyway.
1272 				 */
1273 				/*
1274 				 * Artificially adjust the peer's ref count to
1275 				 * 1, so it will get deleted by
1276 				 * dp_peer_unref_delete.
1277 				 */
1278 				/* set to zero */
1279 				qdf_atomic_init(&peer->ref_cnt);
1280 				/* incr to one */
1281 				qdf_atomic_inc(&peer->ref_cnt);
1282 				dp_peer_unref_delete(peer);
1283 			}
1284 		}
1285 	}
1286 }
1287 
1288 static void dp_peer_ast_table_detach(struct dp_soc *soc)
1289 {
1290 	if (soc->ast_table) {
1291 		qdf_mem_free(soc->ast_table);
1292 		soc->ast_table = NULL;
1293 	}
1294 }
1295 
1296 static void dp_peer_find_map_detach(struct dp_soc *soc)
1297 {
1298 	if (soc->peer_id_to_obj_map) {
1299 		qdf_mem_free(soc->peer_id_to_obj_map);
1300 		soc->peer_id_to_obj_map = NULL;
1301 	}
1302 }
1303 
1304 int dp_peer_find_attach(struct dp_soc *soc)
1305 {
1306 	if (dp_peer_find_map_attach(soc))
1307 		return 1;
1308 
1309 	if (dp_peer_find_hash_attach(soc)) {
1310 		dp_peer_find_map_detach(soc);
1311 		return 1;
1312 	}
1313 
1314 	if (dp_peer_ast_table_attach(soc)) {
1315 		dp_peer_find_hash_detach(soc);
1316 		dp_peer_find_map_detach(soc);
1317 		return 1;
1318 	}
1319 
1320 	if (dp_peer_ast_hash_attach(soc)) {
1321 		dp_peer_ast_table_detach(soc);
1322 		dp_peer_find_hash_detach(soc);
1323 		dp_peer_find_map_detach(soc);
1324 		return 1;
1325 	}
1326 
1327 	return 0; /* success */
1328 }
1329 
1330 void dp_rx_tid_stats_cb(struct dp_soc *soc, void *cb_ctxt,
1331 	union hal_reo_status *reo_status)
1332 {
1333 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
1334 	struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
1335 
1336 	if (queue_status->header.status == HAL_REO_CMD_DRAIN)
1337 		return;
1338 
1339 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
1340 		DP_PRINT_STATS("REO stats failure %d for TID %d\n",
1341 			       queue_status->header.status, rx_tid->tid);
1342 		return;
1343 	}
1344 
1345 	DP_PRINT_STATS("REO queue stats (TID: %d):\n"
1346 		       "ssn: %d\n"
1347 		       "curr_idx  : %d\n"
1348 		       "pn_31_0   : %08x\n"
1349 		       "pn_63_32  : %08x\n"
1350 		       "pn_95_64  : %08x\n"
1351 		       "pn_127_96 : %08x\n"
1352 		       "last_rx_enq_tstamp : %08x\n"
1353 		       "last_rx_deq_tstamp : %08x\n"
1354 		       "rx_bitmap_31_0     : %08x\n"
1355 		       "rx_bitmap_63_32    : %08x\n"
1356 		       "rx_bitmap_95_64    : %08x\n"
1357 		       "rx_bitmap_127_96   : %08x\n"
1358 		       "rx_bitmap_159_128  : %08x\n"
1359 		       "rx_bitmap_191_160  : %08x\n"
1360 		       "rx_bitmap_223_192  : %08x\n"
1361 		       "rx_bitmap_255_224  : %08x\n",
1362 		       rx_tid->tid,
1363 		       queue_status->ssn, queue_status->curr_idx,
1364 		       queue_status->pn_31_0, queue_status->pn_63_32,
1365 		       queue_status->pn_95_64, queue_status->pn_127_96,
1366 		       queue_status->last_rx_enq_tstamp,
1367 		       queue_status->last_rx_deq_tstamp,
1368 		       queue_status->rx_bitmap_31_0,
1369 		       queue_status->rx_bitmap_63_32,
1370 		       queue_status->rx_bitmap_95_64,
1371 		       queue_status->rx_bitmap_127_96,
1372 		       queue_status->rx_bitmap_159_128,
1373 		       queue_status->rx_bitmap_191_160,
1374 		       queue_status->rx_bitmap_223_192,
1375 		       queue_status->rx_bitmap_255_224);
1376 
1377 	DP_PRINT_STATS(
1378 		       "curr_mpdu_cnt      : %d\n"
1379 		       "curr_msdu_cnt      : %d\n"
1380 		       "fwd_timeout_cnt    : %d\n"
1381 		       "fwd_bar_cnt        : %d\n"
1382 		       "dup_cnt            : %d\n"
1383 		       "frms_in_order_cnt  : %d\n"
1384 		       "bar_rcvd_cnt       : %d\n"
1385 		       "mpdu_frms_cnt      : %d\n"
1386 		       "msdu_frms_cnt      : %d\n"
1387 		       "total_byte_cnt     : %d\n"
1388 		       "late_recv_mpdu_cnt : %d\n"
1389 		       "win_jump_2k        : %d\n"
1390 		       "hole_cnt           : %d\n",
1391 		       queue_status->curr_mpdu_cnt,
1392 		       queue_status->curr_msdu_cnt,
1393 		       queue_status->fwd_timeout_cnt,
1394 		       queue_status->fwd_bar_cnt,
1395 		       queue_status->dup_cnt,
1396 		       queue_status->frms_in_order_cnt,
1397 		       queue_status->bar_rcvd_cnt,
1398 		       queue_status->mpdu_frms_cnt,
1399 		       queue_status->msdu_frms_cnt,
1400 		       queue_status->total_cnt,
1401 		       queue_status->late_recv_mpdu_cnt,
1402 		       queue_status->win_jump_2k,
1403 		       queue_status->hole_cnt);
1404 
1405 	DP_PRINT_STATS("Addba Req          : %d\n"
1406 			"Addba Resp         : %d\n"
1407 			"Addba Resp success : %d\n"
1408 			"Addba Resp failed  : %d\n"
1409 			"Delba Req received : %d\n"
1410 			"Delba Tx success   : %d\n"
1411 			"Delba Tx Fail      : %d\n"
1412 			"BA window size     : %d\n"
1413 			"Pn size            : %d\n",
1414 			rx_tid->num_of_addba_req,
1415 			rx_tid->num_of_addba_resp,
1416 			rx_tid->num_addba_rsp_success,
1417 			rx_tid->num_addba_rsp_failed,
1418 			rx_tid->num_of_delba_req,
1419 			rx_tid->delba_tx_success_cnt,
1420 			rx_tid->delba_tx_fail_cnt,
1421 			rx_tid->ba_win_size,
1422 			rx_tid->pn_size);
1423 }
1424 
1425 static inline struct dp_peer *dp_peer_find_add_id(struct dp_soc *soc,
1426 	uint8_t *peer_mac_addr, uint16_t peer_id, uint16_t hw_peer_id,
1427 	uint8_t vdev_id)
1428 {
1429 	struct dp_peer *peer;
1430 
1431 	QDF_ASSERT(peer_id <= soc->max_peers);
1432 	/* check if there's already a peer object with this MAC address */
1433 	peer = dp_peer_find_hash_find(soc, peer_mac_addr,
1434 		0 /* is aligned */, vdev_id);
1435 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1436 		  "%s: peer %pK ID %d vid %d mac %02x:%02x:%02x:%02x:%02x:%02x",
1437 		  __func__, peer, peer_id, vdev_id, peer_mac_addr[0],
1438 		  peer_mac_addr[1], peer_mac_addr[2], peer_mac_addr[3],
1439 		  peer_mac_addr[4], peer_mac_addr[5]);
1440 
1441 	if (peer) {
1442 		/* peer's ref count was already incremented by
1443 		 * peer_find_hash_find
1444 		 */
1445 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1446 			  "%s: ref_cnt: %d", __func__,
1447 			   qdf_atomic_read(&peer->ref_cnt));
1448 		if (!soc->peer_id_to_obj_map[peer_id])
1449 			soc->peer_id_to_obj_map[peer_id] = peer;
1450 		else {
1451 			/* Peer map event came for peer_id which
1452 			 * is already mapped, this is not expected
1453 			 */
1454 			QDF_ASSERT(0);
1455 		}
1456 
1457 		if (dp_peer_find_add_id_to_obj(peer, peer_id)) {
1458 			/* TBDXXX: assert for now */
1459 			QDF_ASSERT(0);
1460 		}
1461 
1462 		return peer;
1463 	}
1464 
1465 	return NULL;
1466 }
1467 
1468 /**
1469  * dp_rx_peer_map_handler() - handle peer map event from firmware
1470  * @soc_handle - genereic soc handle
1471  * @peeri_id - peer_id from firmware
1472  * @hw_peer_id - ast index for this peer
1473  * @vdev_id - vdev ID
1474  * @peer_mac_addr - mac address of the peer
1475  * @ast_hash - ast hash value
1476  * @is_wds - flag to indicate peer map event for WDS ast entry
1477  *
1478  * associate the peer_id that firmware provided with peer entry
1479  * and update the ast table in the host with the hw_peer_id.
1480  *
1481  * Return: none
1482  */
1483 
1484 void
1485 dp_rx_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
1486 		       uint16_t hw_peer_id, uint8_t vdev_id,
1487 		       uint8_t *peer_mac_addr, uint16_t ast_hash,
1488 		       uint8_t is_wds)
1489 {
1490 	struct dp_peer *peer = NULL;
1491 	enum cdp_txrx_ast_entry_type type = CDP_TXRX_AST_TYPE_STATIC;
1492 
1493 	dp_info("peer_map_event (soc:%pK): peer_id %d, hw_peer_id %d, peer_mac %02x:%02x:%02x:%02x:%02x:%02x, vdev_id %d",
1494 		soc, peer_id, hw_peer_id, peer_mac_addr[0], peer_mac_addr[1],
1495 		  peer_mac_addr[2], peer_mac_addr[3], peer_mac_addr[4],
1496 		  peer_mac_addr[5], vdev_id);
1497 
1498 	/* Peer map event for WDS ast entry get the peer from
1499 	 * obj map
1500 	 */
1501 	if (is_wds) {
1502 		peer = soc->peer_id_to_obj_map[peer_id];
1503 		/*
1504 		 * In certain cases like Auth attack on a repeater
1505 		 * can result in the number of ast_entries falling
1506 		 * in the same hash bucket to exceed the max_skid
1507 		 * length supported by HW in root AP. In these cases
1508 		 * the FW will return the hw_peer_id (ast_index) as
1509 		 * 0xffff indicating HW could not add the entry in
1510 		 * its table. Host has to delete the entry from its
1511 		 * table in these cases.
1512 		 */
1513 		if (hw_peer_id == HTT_INVALID_PEER) {
1514 			DP_STATS_INC(soc, ast.map_err, 1);
1515 			if (!dp_peer_ast_free_entry_by_mac(soc,
1516 							   peer,
1517 							   peer_mac_addr))
1518 				return;
1519 
1520 			dp_alert("AST entry not found with peer %pK peer_id %u peer_mac %pM mac_addr %pM vdev_id %u next_hop %u",
1521 				 peer, peer->peer_ids[0],
1522 				 peer->mac_addr.raw, peer_mac_addr, vdev_id,
1523 				 is_wds);
1524 
1525 			return;
1526 		}
1527 
1528 	} else {
1529 		/*
1530 		 * It's the responsibility of the CP and FW to ensure
1531 		 * that peer is created successfully. Ideally DP should
1532 		 * not hit the below condition for directly assocaited
1533 		 * peers.
1534 		 */
1535 		if ((hw_peer_id < 0) ||
1536 		    (hw_peer_id >=
1537 		     wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) {
1538 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1539 				  "invalid hw_peer_id: %d", hw_peer_id);
1540 			qdf_assert_always(0);
1541 		}
1542 
1543 		peer = dp_peer_find_add_id(soc, peer_mac_addr, peer_id,
1544 					   hw_peer_id, vdev_id);
1545 
1546 		if (peer) {
1547 			if (wlan_op_mode_sta == peer->vdev->opmode &&
1548 			    qdf_mem_cmp(peer->mac_addr.raw,
1549 					peer->vdev->mac_addr.raw,
1550 					QDF_MAC_ADDR_SIZE) != 0) {
1551 				dp_info("STA vdev bss_peer!!!!");
1552 				peer->bss_peer = 1;
1553 				peer->vdev->vap_bss_peer = peer;
1554 			}
1555 
1556 			if (peer->vdev->opmode == wlan_op_mode_sta) {
1557 				peer->vdev->bss_ast_hash = ast_hash;
1558 				peer->vdev->bss_ast_idx = hw_peer_id;
1559 			}
1560 
1561 			/* Add ast entry incase self ast entry is
1562 			 * deleted due to DP CP sync issue
1563 			 *
1564 			 * self_ast_entry is modified in peer create
1565 			 * and peer unmap path which cannot run in
1566 			 * parllel with peer map, no lock need before
1567 			 * referring it
1568 			 */
1569 			if (!peer->self_ast_entry) {
1570 				dp_info("Add self ast from map %pM",
1571 					peer_mac_addr);
1572 				dp_peer_add_ast(soc, peer,
1573 						peer_mac_addr,
1574 						type, 0);
1575 			}
1576 
1577 		}
1578 	}
1579 	dp_peer_map_ast(soc, peer, peer_mac_addr,
1580 			hw_peer_id, vdev_id, ast_hash);
1581 }
1582 
1583 /**
1584  * dp_rx_peer_unmap_handler() - handle peer unmap event from firmware
1585  * @soc_handle - genereic soc handle
1586  * @peeri_id - peer_id from firmware
1587  * @vdev_id - vdev ID
1588  * @mac_addr - mac address of the peer or wds entry
1589  * @is_wds - flag to indicate peer map event for WDS ast entry
1590  *
1591  * Return: none
1592  */
1593 void
1594 dp_rx_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id,
1595 			 uint8_t vdev_id, uint8_t *mac_addr,
1596 			 uint8_t is_wds)
1597 {
1598 	struct dp_peer *peer;
1599 	uint8_t i;
1600 
1601 	peer = __dp_peer_find_by_id(soc, peer_id);
1602 
1603 	/*
1604 	 * Currently peer IDs are assigned for vdevs as well as peers.
1605 	 * If the peer ID is for a vdev, then the peer pointer stored
1606 	 * in peer_id_to_obj_map will be NULL.
1607 	 */
1608 	if (!peer) {
1609 		dp_err("Received unmap event for invalid peer_id %u", peer_id);
1610 		return;
1611 	}
1612 
1613 	/* If V2 Peer map messages are enabled AST entry has to be freed here
1614 	 */
1615 	if (soc->is_peer_map_unmap_v2 && is_wds) {
1616 		if (!dp_peer_ast_free_entry_by_mac(soc, peer, mac_addr))
1617 			return;
1618 
1619 		dp_alert("AST entry not found with peer %pK peer_id %u peer_mac %pM mac_addr %pM vdev_id %u next_hop %u",
1620 			 peer, peer->peer_ids[0],
1621 			 peer->mac_addr.raw, mac_addr, vdev_id,
1622 			 is_wds);
1623 
1624 		return;
1625 	}
1626 
1627 	dp_info("peer_unmap_event (soc:%pK) peer_id %d peer %pK",
1628 		soc, peer_id, peer);
1629 
1630 	soc->peer_id_to_obj_map[peer_id] = NULL;
1631 	for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
1632 		if (peer->peer_ids[i] == peer_id) {
1633 			peer->peer_ids[i] = HTT_INVALID_PEER;
1634 			break;
1635 		}
1636 	}
1637 
1638 	if (soc->cdp_soc.ol_ops->peer_unmap_event) {
1639 		soc->cdp_soc.ol_ops->peer_unmap_event(soc->ctrl_psoc,
1640 				peer_id, vdev_id);
1641 	}
1642 
1643 	/*
1644 	 * Remove a reference to the peer.
1645 	 * If there are no more references, delete the peer object.
1646 	 */
1647 	dp_peer_unref_delete(peer);
1648 }
1649 
1650 void
1651 dp_peer_find_detach(struct dp_soc *soc)
1652 {
1653 	dp_peer_find_map_detach(soc);
1654 	dp_peer_find_hash_detach(soc);
1655 	dp_peer_ast_hash_detach(soc);
1656 	dp_peer_ast_table_detach(soc);
1657 }
1658 
1659 static void dp_rx_tid_update_cb(struct dp_soc *soc, void *cb_ctxt,
1660 	union hal_reo_status *reo_status)
1661 {
1662 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
1663 
1664 	if ((reo_status->rx_queue_status.header.status !=
1665 		HAL_REO_CMD_SUCCESS) &&
1666 		(reo_status->rx_queue_status.header.status !=
1667 		HAL_REO_CMD_DRAIN)) {
1668 		/* Should not happen normally. Just print error for now */
1669 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1670 			  "%s: Rx tid HW desc update failed(%d): tid %d",
1671 			  __func__,
1672 			  reo_status->rx_queue_status.header.status,
1673 			  rx_tid->tid);
1674 	}
1675 }
1676 
1677 /*
1678  * dp_find_peer_by_addr - find peer instance by mac address
1679  * @dev: physical device instance
1680  * @peer_mac_addr: peer mac address
1681  * @local_id: local id for the peer
1682  *
1683  * Return: peer instance pointer
1684  */
1685 void *dp_find_peer_by_addr(struct cdp_pdev *dev, uint8_t *peer_mac_addr,
1686 		uint8_t *local_id)
1687 {
1688 	struct dp_pdev *pdev = (struct dp_pdev *)dev;
1689 	struct dp_peer *peer;
1690 
1691 	peer = dp_peer_find_hash_find(pdev->soc, peer_mac_addr, 0, DP_VDEV_ALL);
1692 
1693 	if (!peer)
1694 		return NULL;
1695 
1696 	/* Multiple peer ids? How can know peer id? */
1697 	*local_id = peer->local_id;
1698 	dp_verbose_debug("peer %pK id %d", peer, *local_id);
1699 
1700 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
1701 	 * Decrement it here.
1702 	 */
1703 	dp_peer_unref_delete(peer);
1704 
1705 	return peer;
1706 }
1707 
1708 static bool dp_get_peer_vdev_roaming_in_progress(struct dp_peer *peer)
1709 {
1710 	struct ol_if_ops *ol_ops = NULL;
1711 	bool is_roaming = false;
1712 	uint8_t vdev_id = -1;
1713 
1714 	if (!peer) {
1715 		dp_info("Peer is NULL. No roaming possible");
1716 		return false;
1717 	}
1718 	ol_ops = peer->vdev->pdev->soc->cdp_soc.ol_ops;
1719 
1720 	if (ol_ops && ol_ops->is_roam_inprogress) {
1721 		dp_get_vdevid(peer, &vdev_id);
1722 		is_roaming = ol_ops->is_roam_inprogress(vdev_id);
1723 	}
1724 
1725 	dp_info("peer: %pM, vdev_id: %d, is_roaming: %d",
1726 		peer->mac_addr.raw, vdev_id, is_roaming);
1727 
1728 	return is_roaming;
1729 }
1730 
1731 /*
1732  * dp_rx_tid_update_wifi3() – Update receive TID state
1733  * @peer: Datapath peer handle
1734  * @tid: TID
1735  * @ba_window_size: BlockAck window size
1736  * @start_seq: Starting sequence number
1737  *
1738  * Return: QDF_STATUS code
1739  */
1740 static QDF_STATUS dp_rx_tid_update_wifi3(struct dp_peer *peer, int tid, uint32_t
1741 					 ba_window_size, uint32_t start_seq)
1742 {
1743 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1744 	struct dp_soc *soc = peer->vdev->pdev->soc;
1745 	struct hal_reo_cmd_params params;
1746 
1747 	qdf_mem_zero(&params, sizeof(params));
1748 
1749 	params.std.need_status = 1;
1750 	params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
1751 	params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1752 	params.u.upd_queue_params.update_ba_window_size = 1;
1753 	params.u.upd_queue_params.ba_window_size = ba_window_size;
1754 
1755 	if (start_seq < IEEE80211_SEQ_MAX) {
1756 		params.u.upd_queue_params.update_ssn = 1;
1757 		params.u.upd_queue_params.ssn = start_seq;
1758 	} else {
1759 	    dp_set_ssn_valid_flag(&params, 0);
1760 	}
1761 	dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
1762 			dp_rx_tid_update_cb, rx_tid);
1763 
1764 	rx_tid->ba_win_size = ba_window_size;
1765 
1766 	if (dp_get_peer_vdev_roaming_in_progress(peer))
1767 		return QDF_STATUS_E_PERM;
1768 
1769 	if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup)
1770 		soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup(
1771 			peer->vdev->pdev->ctrl_pdev,
1772 			peer->vdev->vdev_id, peer->mac_addr.raw,
1773 			rx_tid->hw_qdesc_paddr, tid, tid, 1, ba_window_size);
1774 
1775 	return QDF_STATUS_SUCCESS;
1776 }
1777 
1778 /*
1779  * dp_reo_desc_free() - Callback free reo descriptor memory after
1780  * HW cache flush
1781  *
1782  * @soc: DP SOC handle
1783  * @cb_ctxt: Callback context
1784  * @reo_status: REO command status
1785  */
1786 static void dp_reo_desc_free(struct dp_soc *soc, void *cb_ctxt,
1787 	union hal_reo_status *reo_status)
1788 {
1789 	struct reo_desc_list_node *freedesc =
1790 		(struct reo_desc_list_node *)cb_ctxt;
1791 	struct dp_rx_tid *rx_tid = &freedesc->rx_tid;
1792 
1793 	if ((reo_status->fl_cache_status.header.status !=
1794 		HAL_REO_CMD_SUCCESS) &&
1795 		(reo_status->fl_cache_status.header.status !=
1796 		HAL_REO_CMD_DRAIN)) {
1797 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1798 			  "%s: Rx tid HW desc flush failed(%d): tid %d",
1799 			  __func__,
1800 			  reo_status->rx_queue_status.header.status,
1801 			  freedesc->rx_tid.tid);
1802 	}
1803 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1804 		  "%s: hw_qdesc_paddr: %pK, tid:%d", __func__,
1805 		  (void *)(rx_tid->hw_qdesc_paddr), rx_tid->tid);
1806 	qdf_mem_unmap_nbytes_single(soc->osdev,
1807 		rx_tid->hw_qdesc_paddr,
1808 		QDF_DMA_BIDIRECTIONAL,
1809 		rx_tid->hw_qdesc_alloc_size);
1810 	qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
1811 	qdf_mem_free(freedesc);
1812 }
1813 
1814 #if defined(QCA_WIFI_QCA8074_VP) && defined(BUILD_X86)
1815 /* Hawkeye emulation requires bus address to be >= 0x50000000 */
1816 static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
1817 {
1818 	if (dma_addr < 0x50000000)
1819 		return QDF_STATUS_E_FAILURE;
1820 	else
1821 		return QDF_STATUS_SUCCESS;
1822 }
1823 #else
1824 static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
1825 {
1826 	return QDF_STATUS_SUCCESS;
1827 }
1828 #endif
1829 
1830 
1831 /*
1832  * dp_rx_tid_setup_wifi3() – Setup receive TID state
1833  * @peer: Datapath peer handle
1834  * @tid: TID
1835  * @ba_window_size: BlockAck window size
1836  * @start_seq: Starting sequence number
1837  *
1838  * Return: QDF_STATUS code
1839  */
1840 QDF_STATUS dp_rx_tid_setup_wifi3(struct dp_peer *peer, int tid,
1841 				 uint32_t ba_window_size, uint32_t start_seq)
1842 {
1843 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1844 	struct dp_vdev *vdev = peer->vdev;
1845 	struct dp_soc *soc = vdev->pdev->soc;
1846 	uint32_t hw_qdesc_size;
1847 	uint32_t hw_qdesc_align;
1848 	int hal_pn_type;
1849 	void *hw_qdesc_vaddr;
1850 	uint32_t alloc_tries = 0;
1851 	QDF_STATUS err = QDF_STATUS_SUCCESS;
1852 
1853 	if (peer->delete_in_progress ||
1854 	    !qdf_atomic_read(&peer->is_default_route_set))
1855 		return QDF_STATUS_E_FAILURE;
1856 
1857 	rx_tid->ba_win_size = ba_window_size;
1858 	if (rx_tid->hw_qdesc_vaddr_unaligned)
1859 		return dp_rx_tid_update_wifi3(peer, tid, ba_window_size,
1860 			start_seq);
1861 	rx_tid->delba_tx_status = 0;
1862 	rx_tid->ppdu_id_2k = 0;
1863 	rx_tid->num_of_addba_req = 0;
1864 	rx_tid->num_of_delba_req = 0;
1865 	rx_tid->num_of_addba_resp = 0;
1866 	rx_tid->num_addba_rsp_failed = 0;
1867 	rx_tid->num_addba_rsp_success = 0;
1868 	rx_tid->delba_tx_success_cnt = 0;
1869 	rx_tid->delba_tx_fail_cnt = 0;
1870 	rx_tid->statuscode = 0;
1871 
1872 	/* TODO: Allocating HW queue descriptors based on max BA window size
1873 	 * for all QOS TIDs so that same descriptor can be used later when
1874 	 * ADDBA request is recevied. This should be changed to allocate HW
1875 	 * queue descriptors based on BA window size being negotiated (0 for
1876 	 * non BA cases), and reallocate when BA window size changes and also
1877 	 * send WMI message to FW to change the REO queue descriptor in Rx
1878 	 * peer entry as part of dp_rx_tid_update.
1879 	 */
1880 	if (tid != DP_NON_QOS_TID)
1881 		hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc,
1882 			HAL_RX_MAX_BA_WINDOW, tid);
1883 	else
1884 		hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc,
1885 			ba_window_size, tid);
1886 
1887 	hw_qdesc_align = hal_get_reo_qdesc_align(soc->hal_soc);
1888 	/* To avoid unnecessary extra allocation for alignment, try allocating
1889 	 * exact size and see if we already have aligned address.
1890 	 */
1891 	rx_tid->hw_qdesc_alloc_size = hw_qdesc_size;
1892 
1893 try_desc_alloc:
1894 	rx_tid->hw_qdesc_vaddr_unaligned =
1895 		qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size);
1896 
1897 	if (!rx_tid->hw_qdesc_vaddr_unaligned) {
1898 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1899 			  "%s: Rx tid HW desc alloc failed: tid %d",
1900 			  __func__, tid);
1901 		return QDF_STATUS_E_NOMEM;
1902 	}
1903 
1904 	if ((unsigned long)(rx_tid->hw_qdesc_vaddr_unaligned) %
1905 		hw_qdesc_align) {
1906 		/* Address allocated above is not alinged. Allocate extra
1907 		 * memory for alignment
1908 		 */
1909 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
1910 		rx_tid->hw_qdesc_vaddr_unaligned =
1911 			qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size +
1912 					hw_qdesc_align - 1);
1913 
1914 		if (!rx_tid->hw_qdesc_vaddr_unaligned) {
1915 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1916 				  "%s: Rx tid HW desc alloc failed: tid %d",
1917 				  __func__, tid);
1918 			return QDF_STATUS_E_NOMEM;
1919 		}
1920 
1921 		hw_qdesc_vaddr = (void *)qdf_align((unsigned long)
1922 			rx_tid->hw_qdesc_vaddr_unaligned,
1923 			hw_qdesc_align);
1924 
1925 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1926 			  "%s: Total Size %d Aligned Addr %pK",
1927 			  __func__, rx_tid->hw_qdesc_alloc_size,
1928 			  hw_qdesc_vaddr);
1929 
1930 	} else {
1931 		hw_qdesc_vaddr = rx_tid->hw_qdesc_vaddr_unaligned;
1932 	}
1933 
1934 	/* TODO: Ensure that sec_type is set before ADDBA is received.
1935 	 * Currently this is set based on htt indication
1936 	 * HTT_T2H_MSG_TYPE_SEC_IND from target
1937 	 */
1938 	switch (peer->security[dp_sec_ucast].sec_type) {
1939 	case cdp_sec_type_tkip_nomic:
1940 	case cdp_sec_type_aes_ccmp:
1941 	case cdp_sec_type_aes_ccmp_256:
1942 	case cdp_sec_type_aes_gcmp:
1943 	case cdp_sec_type_aes_gcmp_256:
1944 		hal_pn_type = HAL_PN_WPA;
1945 		break;
1946 	case cdp_sec_type_wapi:
1947 		if (vdev->opmode == wlan_op_mode_ap)
1948 			hal_pn_type = HAL_PN_WAPI_EVEN;
1949 		else
1950 			hal_pn_type = HAL_PN_WAPI_UNEVEN;
1951 		break;
1952 	default:
1953 		hal_pn_type = HAL_PN_NONE;
1954 		break;
1955 	}
1956 
1957 	hal_reo_qdesc_setup(soc->hal_soc, tid, ba_window_size, start_seq,
1958 		hw_qdesc_vaddr, rx_tid->hw_qdesc_paddr, hal_pn_type);
1959 
1960 	qdf_mem_map_nbytes_single(soc->osdev, hw_qdesc_vaddr,
1961 		QDF_DMA_BIDIRECTIONAL, rx_tid->hw_qdesc_alloc_size,
1962 		&(rx_tid->hw_qdesc_paddr));
1963 
1964 	if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) !=
1965 			QDF_STATUS_SUCCESS) {
1966 		if (alloc_tries++ < 10) {
1967 			qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
1968 			rx_tid->hw_qdesc_vaddr_unaligned = NULL;
1969 			goto try_desc_alloc;
1970 		} else {
1971 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1972 				  "%s: Rx tid HW desc alloc failed (lowmem): tid %d",
1973 				  __func__, tid);
1974 			err = QDF_STATUS_E_NOMEM;
1975 			goto error;
1976 		}
1977 	}
1978 
1979 	if (dp_get_peer_vdev_roaming_in_progress(peer)) {
1980 		err = QDF_STATUS_E_PERM;
1981 		goto error;
1982 	}
1983 
1984 	if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
1985 		if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup(
1986 		    vdev->pdev->ctrl_pdev, peer->vdev->vdev_id,
1987 		    peer->mac_addr.raw, rx_tid->hw_qdesc_paddr, tid, tid,
1988 		    1, ba_window_size)) {
1989 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1990 				  "%s: Failed to send reo queue setup to FW - tid %d\n",
1991 				  __func__, tid);
1992 			err = QDF_STATUS_E_FAILURE;
1993 			goto error;
1994 		}
1995 	}
1996 	return 0;
1997 error:
1998 	if (rx_tid->hw_qdesc_vaddr_unaligned) {
1999 		if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) ==
2000 		    QDF_STATUS_SUCCESS)
2001 			qdf_mem_unmap_nbytes_single(
2002 				soc->osdev,
2003 				rx_tid->hw_qdesc_paddr,
2004 				QDF_DMA_BIDIRECTIONAL,
2005 				rx_tid->hw_qdesc_alloc_size);
2006 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
2007 		rx_tid->hw_qdesc_vaddr_unaligned = NULL;
2008 	}
2009 	return err;
2010 }
2011 
2012 /*
2013  * dp_rx_tid_delete_cb() - Callback to flush reo descriptor HW cache
2014  * after deleting the entries (ie., setting valid=0)
2015  *
2016  * @soc: DP SOC handle
2017  * @cb_ctxt: Callback context
2018  * @reo_status: REO command status
2019  */
2020 static void dp_rx_tid_delete_cb(struct dp_soc *soc, void *cb_ctxt,
2021 	union hal_reo_status *reo_status)
2022 {
2023 	struct reo_desc_list_node *freedesc =
2024 		(struct reo_desc_list_node *)cb_ctxt;
2025 	uint32_t list_size;
2026 	struct reo_desc_list_node *desc;
2027 	unsigned long curr_ts = qdf_get_system_timestamp();
2028 	uint32_t desc_size, tot_desc_size;
2029 	struct hal_reo_cmd_params params;
2030 
2031 	if (reo_status->rx_queue_status.header.status == HAL_REO_CMD_DRAIN) {
2032 		qdf_mem_zero(reo_status, sizeof(*reo_status));
2033 		reo_status->fl_cache_status.header.status = HAL_REO_CMD_DRAIN;
2034 		dp_reo_desc_free(soc, (void *)freedesc, reo_status);
2035 		return;
2036 	} else if (reo_status->rx_queue_status.header.status !=
2037 		HAL_REO_CMD_SUCCESS) {
2038 		/* Should not happen normally. Just print error for now */
2039 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2040 			  "%s: Rx tid HW desc deletion failed(%d): tid %d",
2041 			  __func__,
2042 			  reo_status->rx_queue_status.header.status,
2043 			  freedesc->rx_tid.tid);
2044 	}
2045 
2046 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
2047 		"%s: rx_tid: %d status: %d", __func__,
2048 		freedesc->rx_tid.tid,
2049 		reo_status->rx_queue_status.header.status);
2050 
2051 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
2052 	freedesc->free_ts = curr_ts;
2053 	qdf_list_insert_back_size(&soc->reo_desc_freelist,
2054 		(qdf_list_node_t *)freedesc, &list_size);
2055 
2056 	while ((qdf_list_peek_front(&soc->reo_desc_freelist,
2057 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) &&
2058 		((list_size >= REO_DESC_FREELIST_SIZE) ||
2059 		((curr_ts - desc->free_ts) > REO_DESC_FREE_DEFER_MS))) {
2060 		struct dp_rx_tid *rx_tid;
2061 
2062 		qdf_list_remove_front(&soc->reo_desc_freelist,
2063 				(qdf_list_node_t **)&desc);
2064 		list_size--;
2065 		rx_tid = &desc->rx_tid;
2066 
2067 		/* Flush and invalidate REO descriptor from HW cache: Base and
2068 		 * extension descriptors should be flushed separately */
2069 		tot_desc_size = rx_tid->hw_qdesc_alloc_size;
2070 		/* Get base descriptor size by passing non-qos TID */
2071 		desc_size = hal_get_reo_qdesc_size(soc->hal_soc, 0,
2072 						   DP_NON_QOS_TID);
2073 
2074 		/* Flush reo extension descriptors */
2075 		while ((tot_desc_size -= desc_size) > 0) {
2076 			qdf_mem_zero(&params, sizeof(params));
2077 			params.std.addr_lo =
2078 				((uint64_t)(rx_tid->hw_qdesc_paddr) +
2079 				tot_desc_size) & 0xffffffff;
2080 			params.std.addr_hi =
2081 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2082 
2083 			if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc,
2084 							CMD_FLUSH_CACHE,
2085 							&params,
2086 							NULL,
2087 							NULL)) {
2088 				QDF_TRACE(QDF_MODULE_ID_DP,
2089 					QDF_TRACE_LEVEL_ERROR,
2090 					"%s: fail to send CMD_CACHE_FLUSH:"
2091 					"tid %d desc %pK", __func__,
2092 					rx_tid->tid,
2093 					(void *)(rx_tid->hw_qdesc_paddr));
2094 			}
2095 		}
2096 
2097 		/* Flush base descriptor */
2098 		qdf_mem_zero(&params, sizeof(params));
2099 		params.std.need_status = 1;
2100 		params.std.addr_lo =
2101 			(uint64_t)(rx_tid->hw_qdesc_paddr) & 0xffffffff;
2102 		params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2103 
2104 		if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc,
2105 							  CMD_FLUSH_CACHE,
2106 							  &params,
2107 							  dp_reo_desc_free,
2108 							  (void *)desc)) {
2109 			union hal_reo_status reo_status;
2110 			/*
2111 			 * If dp_reo_send_cmd return failure, related TID queue desc
2112 			 * should be unmapped. Also locally reo_desc, together with
2113 			 * TID queue desc also need to be freed accordingly.
2114 			 *
2115 			 * Here invoke desc_free function directly to do clean up.
2116 			 */
2117 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2118 				  "%s: fail to send REO cmd to flush cache: tid %d",
2119 				  __func__, rx_tid->tid);
2120 			qdf_mem_zero(&reo_status, sizeof(reo_status));
2121 			reo_status.fl_cache_status.header.status = 0;
2122 			dp_reo_desc_free(soc, (void *)desc, &reo_status);
2123 		}
2124 	}
2125 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
2126 }
2127 
2128 /*
2129  * dp_rx_tid_delete_wifi3() – Delete receive TID queue
2130  * @peer: Datapath peer handle
2131  * @tid: TID
2132  *
2133  * Return: 0 on success, error code on failure
2134  */
2135 static int dp_rx_tid_delete_wifi3(struct dp_peer *peer, int tid)
2136 {
2137 	struct dp_rx_tid *rx_tid = &(peer->rx_tid[tid]);
2138 	struct dp_soc *soc = peer->vdev->pdev->soc;
2139 	struct hal_reo_cmd_params params;
2140 	struct reo_desc_list_node *freedesc =
2141 		qdf_mem_malloc(sizeof(*freedesc));
2142 
2143 	if (!freedesc) {
2144 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2145 			  "%s: malloc failed for freedesc: tid %d",
2146 			  __func__, tid);
2147 		return -ENOMEM;
2148 	}
2149 
2150 	freedesc->rx_tid = *rx_tid;
2151 
2152 	qdf_mem_zero(&params, sizeof(params));
2153 
2154 	params.std.need_status = 1;
2155 	params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
2156 	params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2157 	params.u.upd_queue_params.update_vld = 1;
2158 	params.u.upd_queue_params.vld = 0;
2159 
2160 	dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
2161 		dp_rx_tid_delete_cb, (void *)freedesc);
2162 
2163 	rx_tid->hw_qdesc_vaddr_unaligned = NULL;
2164 	rx_tid->hw_qdesc_alloc_size = 0;
2165 	rx_tid->hw_qdesc_paddr = 0;
2166 
2167 	return 0;
2168 }
2169 
2170 #ifdef DP_LFR
2171 static void dp_peer_setup_remaining_tids(struct dp_peer *peer)
2172 {
2173 	int tid;
2174 
2175 	for (tid = 1; tid < DP_MAX_TIDS-1; tid++) {
2176 		dp_rx_tid_setup_wifi3(peer, tid, 1, 0);
2177 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2178 			  "Setting up TID %d for peer %pK peer->local_id %d",
2179 			  tid, peer, peer->local_id);
2180 	}
2181 }
2182 #else
2183 static void dp_peer_setup_remaining_tids(struct dp_peer *peer) {};
2184 #endif
2185 
2186 #ifndef WLAN_TX_PKT_CAPTURE_ENH
2187 /*
2188  * dp_peer_tid_queue_init() – Initialize ppdu stats queue per TID
2189  * @peer: Datapath peer
2190  *
2191  */
2192 static inline void dp_peer_tid_queue_init(struct dp_peer *peer)
2193 {
2194 }
2195 
2196 /*
2197  * dp_peer_tid_queue_cleanup() – remove ppdu stats queue per TID
2198  * @peer: Datapath peer
2199  *
2200  */
2201 static inline void dp_peer_tid_queue_cleanup(struct dp_peer *peer)
2202 {
2203 }
2204 
2205 /*
2206  * dp_peer_update_80211_hdr() – dp peer update 80211 hdr
2207  * @vdev: Datapath vdev
2208  * @peer: Datapath peer
2209  *
2210  */
2211 static inline void
2212 dp_peer_update_80211_hdr(struct dp_vdev *vdev, struct dp_peer *peer)
2213 {
2214 }
2215 #endif
2216 
2217 /*
2218  * dp_peer_tx_init() – Initialize receive TID state
2219  * @pdev: Datapath pdev
2220  * @peer: Datapath peer
2221  *
2222  */
2223 void dp_peer_tx_init(struct dp_pdev *pdev, struct dp_peer *peer)
2224 {
2225 	dp_peer_tid_queue_init(peer);
2226 	dp_peer_update_80211_hdr(peer->vdev, peer);
2227 }
2228 
2229 /*
2230  * dp_peer_tx_cleanup() – Deinitialize receive TID state
2231  * @vdev: Datapath vdev
2232  * @peer: Datapath peer
2233  *
2234  */
2235 static inline void
2236 dp_peer_tx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
2237 {
2238 	dp_peer_tid_queue_cleanup(peer);
2239 }
2240 
2241 /*
2242  * dp_peer_rx_init() – Initialize receive TID state
2243  * @pdev: Datapath pdev
2244  * @peer: Datapath peer
2245  *
2246  */
2247 void dp_peer_rx_init(struct dp_pdev *pdev, struct dp_peer *peer)
2248 {
2249 	int tid;
2250 	struct dp_rx_tid *rx_tid;
2251 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
2252 		rx_tid = &peer->rx_tid[tid];
2253 		rx_tid->array = &rx_tid->base;
2254 		rx_tid->base.head = rx_tid->base.tail = NULL;
2255 		rx_tid->tid = tid;
2256 		rx_tid->defrag_timeout_ms = 0;
2257 		rx_tid->ba_win_size = 0;
2258 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
2259 
2260 		rx_tid->defrag_waitlist_elem.tqe_next = NULL;
2261 		rx_tid->defrag_waitlist_elem.tqe_prev = NULL;
2262 	}
2263 
2264 	peer->active_ba_session_cnt = 0;
2265 	peer->hw_buffer_size = 0;
2266 	peer->kill_256_sessions = 0;
2267 
2268 	/* Setup default (non-qos) rx tid queue */
2269 	dp_rx_tid_setup_wifi3(peer, DP_NON_QOS_TID, 1, 0);
2270 
2271 	/* Setup rx tid queue for TID 0.
2272 	 * Other queues will be setup on receiving first packet, which will cause
2273 	 * NULL REO queue error
2274 	 */
2275 	dp_rx_tid_setup_wifi3(peer, 0, 1, 0);
2276 
2277 	/*
2278 	 * Setup the rest of TID's to handle LFR
2279 	 */
2280 	dp_peer_setup_remaining_tids(peer);
2281 
2282 	/*
2283 	 * Set security defaults: no PN check, no security. The target may
2284 	 * send a HTT SEC_IND message to overwrite these defaults.
2285 	 */
2286 	peer->security[dp_sec_ucast].sec_type =
2287 		peer->security[dp_sec_mcast].sec_type = cdp_sec_type_none;
2288 }
2289 
2290 /*
2291  * dp_peer_rx_cleanup() – Cleanup receive TID state
2292  * @vdev: Datapath vdev
2293  * @peer: Datapath peer
2294  * @reuse: Peer reference reuse
2295  *
2296  */
2297 void dp_peer_rx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer, bool reuse)
2298 {
2299 	int tid;
2300 	uint32_t tid_delete_mask = 0;
2301 
2302 	dp_info("Remove tids for peer: %pK", peer);
2303 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
2304 		struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
2305 
2306 		qdf_spin_lock_bh(&rx_tid->tid_lock);
2307 		if (!peer->bss_peer || peer->vdev->opmode == wlan_op_mode_sta) {
2308 			/* Cleanup defrag related resource */
2309 			dp_rx_defrag_waitlist_remove(peer, tid);
2310 			dp_rx_reorder_flush_frag(peer, tid);
2311 		}
2312 
2313 		if (peer->rx_tid[tid].hw_qdesc_vaddr_unaligned) {
2314 			dp_rx_tid_delete_wifi3(peer, tid);
2315 
2316 			tid_delete_mask |= (1 << tid);
2317 		}
2318 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2319 	}
2320 #ifdef notyet /* See if FW can remove queues as part of peer cleanup */
2321 	if (soc->ol_ops->peer_rx_reorder_queue_remove) {
2322 		soc->ol_ops->peer_rx_reorder_queue_remove(vdev->pdev->ctrl_pdev,
2323 			peer->vdev->vdev_id, peer->mac_addr.raw,
2324 			tid_delete_mask);
2325 	}
2326 #endif
2327 	if (!reuse)
2328 		for (tid = 0; tid < DP_MAX_TIDS; tid++)
2329 			qdf_spinlock_destroy(&peer->rx_tid[tid].tid_lock);
2330 }
2331 
2332 #ifdef FEATURE_PERPKT_INFO
2333 /*
2334  * dp_peer_ppdu_delayed_ba_init() Initialize ppdu in peer
2335  * @peer: Datapath peer
2336  *
2337  * return: void
2338  */
2339 void dp_peer_ppdu_delayed_ba_init(struct dp_peer *peer)
2340 {
2341 	qdf_mem_zero(&peer->delayed_ba_ppdu_stats,
2342 		     sizeof(struct cdp_delayed_tx_completion_ppdu_user));
2343 	peer->last_delayed_ba = false;
2344 	peer->last_delayed_ba_ppduid = 0;
2345 }
2346 #else
2347 /*
2348  * dp_peer_ppdu_delayed_ba_init() Initialize ppdu in peer
2349  * @peer: Datapath peer
2350  *
2351  * return: void
2352  */
2353 void dp_peer_ppdu_delayed_ba_init(struct dp_peer *peer)
2354 {
2355 }
2356 #endif
2357 
2358 /*
2359  * dp_peer_cleanup() – Cleanup peer information
2360  * @vdev: Datapath vdev
2361  * @peer: Datapath peer
2362  * @reuse: Peer reference reuse
2363  *
2364  */
2365 void dp_peer_cleanup(struct dp_vdev *vdev, struct dp_peer *peer, bool reuse)
2366 {
2367 	dp_peer_tx_cleanup(vdev, peer);
2368 
2369 	/* cleanup the Rx reorder queues for this peer */
2370 	dp_peer_rx_cleanup(vdev, peer, reuse);
2371 }
2372 
2373 /* dp_teardown_256_ba_session() - Teardown sessions using 256
2374  *                                window size when a request with
2375  *                                64 window size is received.
2376  *                                This is done as a WAR since HW can
2377  *                                have only one setting per peer (64 or 256).
2378  *                                For HKv2, we use per tid buffersize setting
2379  *                                for 0 to per_tid_basize_max_tid. For tid
2380  *                                more than per_tid_basize_max_tid we use HKv1
2381  *                                method.
2382  * @peer: Datapath peer
2383  *
2384  * Return: void
2385  */
2386 static void dp_teardown_256_ba_sessions(struct dp_peer *peer)
2387 {
2388 	uint8_t delba_rcode = 0;
2389 	int tid;
2390 	struct dp_rx_tid *rx_tid = NULL;
2391 
2392 	tid = peer->vdev->pdev->soc->per_tid_basize_max_tid;
2393 	for (; tid < DP_MAX_TIDS; tid++) {
2394 		rx_tid = &peer->rx_tid[tid];
2395 		qdf_spin_lock_bh(&rx_tid->tid_lock);
2396 
2397 		if (rx_tid->ba_win_size <= 64) {
2398 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
2399 			continue;
2400 		} else {
2401 			if (rx_tid->ba_status == DP_RX_BA_ACTIVE ||
2402 			    rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
2403 				/* send delba */
2404 				if (!rx_tid->delba_tx_status) {
2405 					rx_tid->delba_tx_retry++;
2406 					rx_tid->delba_tx_status = 1;
2407 					rx_tid->delba_rcode =
2408 					IEEE80211_REASON_QOS_SETUP_REQUIRED;
2409 					delba_rcode = rx_tid->delba_rcode;
2410 
2411 					qdf_spin_unlock_bh(&rx_tid->tid_lock);
2412 					if (peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba)
2413 						peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba(
2414 								peer->vdev->pdev->ctrl_pdev,
2415 								peer->ctrl_peer,
2416 								peer->mac_addr.raw,
2417 								tid, peer->vdev->ctrl_vdev,
2418 								delba_rcode);
2419 				} else {
2420 					qdf_spin_unlock_bh(&rx_tid->tid_lock);
2421 				}
2422 			} else {
2423 				qdf_spin_unlock_bh(&rx_tid->tid_lock);
2424 			}
2425 		}
2426 	}
2427 }
2428 
2429 /*
2430 * dp_rx_addba_resp_tx_completion_wifi3() – Update Rx Tid State
2431 *
2432 * @peer: Datapath peer handle
2433 * @tid: TID number
2434 * @status: tx completion status
2435 * Return: 0 on success, error code on failure
2436 */
2437 int dp_addba_resp_tx_completion_wifi3(void *peer_handle,
2438 				      uint8_t tid, int status)
2439 {
2440 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
2441 	struct dp_rx_tid *rx_tid = NULL;
2442 	QDF_STATUS qdf_status;
2443 
2444 	if (!peer || peer->delete_in_progress) {
2445 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2446 			  "%s: Peer is NULL!\n", __func__);
2447 		return QDF_STATUS_E_FAILURE;
2448 	}
2449 	rx_tid = &peer->rx_tid[tid];
2450 	qdf_spin_lock_bh(&rx_tid->tid_lock);
2451 	if (status) {
2452 		rx_tid->num_addba_rsp_failed++;
2453 		qdf_status = dp_rx_tid_update_wifi3(peer, tid, 1,
2454 						    IEEE80211_SEQ_MAX);
2455 		if (qdf_status == QDF_STATUS_SUCCESS)
2456 			rx_tid->ba_status = DP_RX_BA_INACTIVE;
2457 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2458 		dp_err("RxTid- %d addba rsp tx completion failed", tid);
2459 		return QDF_STATUS_SUCCESS;
2460 	}
2461 
2462 	rx_tid->num_addba_rsp_success++;
2463 	if (rx_tid->ba_status == DP_RX_BA_INACTIVE) {
2464 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2465 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2466 			  "%s: Rx Tid- %d hw qdesc is not in IN_PROGRESS",
2467 			__func__, tid);
2468 		return QDF_STATUS_E_FAILURE;
2469 	}
2470 
2471 	if (!qdf_atomic_read(&peer->is_default_route_set)) {
2472 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2473 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2474 			  "%s: default route is not set for peer: %pM",
2475 			  __func__, peer->mac_addr.raw);
2476 		return QDF_STATUS_E_FAILURE;
2477 	}
2478 
2479 	/* First Session */
2480 	if (peer->active_ba_session_cnt == 0) {
2481 		if (rx_tid->ba_win_size > 64 && rx_tid->ba_win_size <= 256)
2482 			peer->hw_buffer_size = 256;
2483 		else
2484 			peer->hw_buffer_size = 64;
2485 	}
2486 
2487 	rx_tid->ba_status = DP_RX_BA_ACTIVE;
2488 
2489 	peer->active_ba_session_cnt++;
2490 
2491 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
2492 
2493 	/* Kill any session having 256 buffer size
2494 	 * when 64 buffer size request is received.
2495 	 * Also, latch on to 64 as new buffer size.
2496 	 */
2497 	if (peer->kill_256_sessions) {
2498 		dp_teardown_256_ba_sessions(peer);
2499 		peer->kill_256_sessions = 0;
2500 	}
2501 	return QDF_STATUS_SUCCESS;
2502 }
2503 
2504 /*
2505 * dp_rx_addba_responsesetup_wifi3() – Process ADDBA request from peer
2506 *
2507 * @peer: Datapath peer handle
2508 * @tid: TID number
2509 * @dialogtoken: output dialogtoken
2510 * @statuscode: output dialogtoken
2511 * @buffersize: Output BA window size
2512 * @batimeout: Output BA timeout
2513 */
2514 void dp_addba_responsesetup_wifi3(void *peer_handle, uint8_t tid,
2515 	uint8_t *dialogtoken, uint16_t *statuscode,
2516 	uint16_t *buffersize, uint16_t *batimeout)
2517 {
2518 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
2519 	struct dp_rx_tid *rx_tid = NULL;
2520 
2521 	if (!peer || peer->delete_in_progress) {
2522 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2523 			  "%s: Peer is NULL!\n", __func__);
2524 		return;
2525 	}
2526 	rx_tid = &peer->rx_tid[tid];
2527 	qdf_spin_lock_bh(&rx_tid->tid_lock);
2528 	rx_tid->num_of_addba_resp++;
2529 	/* setup ADDBA response parameters */
2530 	*dialogtoken = rx_tid->dialogtoken;
2531 	*statuscode = rx_tid->statuscode;
2532 	*buffersize = rx_tid->ba_win_size;
2533 	*batimeout  = 0;
2534 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
2535 }
2536 
2537 /* dp_check_ba_buffersize() - Check buffer size in request
2538  *                            and latch onto this size based on
2539  *                            size used in first active session.
2540  * @peer: Datapath peer
2541  * @tid: Tid
2542  * @buffersize: Block ack window size
2543  *
2544  * Return: void
2545  */
2546 static void dp_check_ba_buffersize(struct dp_peer *peer,
2547 				   uint16_t tid,
2548 				   uint16_t buffersize)
2549 {
2550 	struct dp_rx_tid *rx_tid = NULL;
2551 
2552 	rx_tid = &peer->rx_tid[tid];
2553 	if (peer->vdev->pdev->soc->per_tid_basize_max_tid &&
2554 	    tid < peer->vdev->pdev->soc->per_tid_basize_max_tid) {
2555 		rx_tid->ba_win_size = buffersize;
2556 		return;
2557 	} else {
2558 		if (peer->active_ba_session_cnt == 0) {
2559 			rx_tid->ba_win_size = buffersize;
2560 		} else {
2561 			if (peer->hw_buffer_size == 64) {
2562 				if (buffersize <= 64)
2563 					rx_tid->ba_win_size = buffersize;
2564 				else
2565 					rx_tid->ba_win_size = peer->hw_buffer_size;
2566 			} else if (peer->hw_buffer_size == 256) {
2567 				if (buffersize > 64) {
2568 					rx_tid->ba_win_size = buffersize;
2569 				} else {
2570 					rx_tid->ba_win_size = buffersize;
2571 					peer->hw_buffer_size = 64;
2572 					peer->kill_256_sessions = 1;
2573 				}
2574 			}
2575 		}
2576 	}
2577 }
2578 
2579 /*
2580  * dp_addba_requestprocess_wifi3() - Process ADDBA request from peer
2581  *
2582  * @peer: Datapath peer handle
2583  * @dialogtoken: dialogtoken from ADDBA frame
2584  * @tid: TID number
2585  * @batimeout: BA timeout
2586  * @buffersize: BA window size
2587  * @startseqnum: Start seq. number received in BA sequence control
2588  *
2589  * Return: 0 on success, error code on failure
2590  */
2591 int dp_addba_requestprocess_wifi3(void *peer_handle,
2592 				  uint8_t dialogtoken,
2593 				  uint16_t tid, uint16_t batimeout,
2594 				  uint16_t buffersize,
2595 				  uint16_t startseqnum)
2596 {
2597 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
2598 	struct dp_rx_tid *rx_tid = NULL;
2599 
2600 	if (!peer || peer->delete_in_progress) {
2601 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2602 			  "%s: Peer is NULL!\n", __func__);
2603 		return QDF_STATUS_E_FAILURE;
2604 	}
2605 	rx_tid = &peer->rx_tid[tid];
2606 	qdf_spin_lock_bh(&rx_tid->tid_lock);
2607 	rx_tid->num_of_addba_req++;
2608 	if ((rx_tid->ba_status == DP_RX_BA_ACTIVE &&
2609 	     rx_tid->hw_qdesc_vaddr_unaligned)) {
2610 		dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX);
2611 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
2612 		peer->active_ba_session_cnt--;
2613 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2614 			  "%s: Addba recvd for Rx Tid-%d hw qdesc is already setup",
2615 			  __func__, tid);
2616 	}
2617 
2618 	if (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
2619 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2620 		return QDF_STATUS_E_FAILURE;
2621 	}
2622 	dp_check_ba_buffersize(peer, tid, buffersize);
2623 
2624 	if (dp_rx_tid_setup_wifi3(peer, tid,
2625 	    rx_tid->ba_win_size, startseqnum)) {
2626 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
2627 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2628 		return QDF_STATUS_E_FAILURE;
2629 	}
2630 	rx_tid->ba_status = DP_RX_BA_IN_PROGRESS;
2631 
2632 	rx_tid->dialogtoken = dialogtoken;
2633 	rx_tid->startseqnum = startseqnum;
2634 
2635 	if (rx_tid->userstatuscode != IEEE80211_STATUS_SUCCESS)
2636 		rx_tid->statuscode = rx_tid->userstatuscode;
2637 	else
2638 		rx_tid->statuscode = IEEE80211_STATUS_SUCCESS;
2639 
2640 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
2641 
2642 	return QDF_STATUS_SUCCESS;
2643 }
2644 
2645 /*
2646 * dp_set_addba_response() – Set a user defined ADDBA response status code
2647 *
2648 * @peer: Datapath peer handle
2649 * @tid: TID number
2650 * @statuscode: response status code to be set
2651 */
2652 void dp_set_addba_response(void *peer_handle, uint8_t tid,
2653 	uint16_t statuscode)
2654 {
2655 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
2656 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
2657 
2658 	qdf_spin_lock_bh(&rx_tid->tid_lock);
2659 	rx_tid->userstatuscode = statuscode;
2660 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
2661 }
2662 
2663 /*
2664 * dp_rx_delba_process_wifi3() – Process DELBA from peer
2665 * @peer: Datapath peer handle
2666 * @tid: TID number
2667 * @reasoncode: Reason code received in DELBA frame
2668 *
2669 * Return: 0 on success, error code on failure
2670 */
2671 int dp_delba_process_wifi3(void *peer_handle,
2672 	int tid, uint16_t reasoncode)
2673 {
2674 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
2675 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
2676 
2677 	qdf_spin_lock_bh(&rx_tid->tid_lock);
2678 	if (rx_tid->ba_status == DP_RX_BA_INACTIVE ||
2679 	    rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
2680 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2681 		return QDF_STATUS_E_FAILURE;
2682 	}
2683 	/* TODO: See if we can delete the existing REO queue descriptor and
2684 	 * replace with a new one without queue extenstion descript to save
2685 	 * memory
2686 	 */
2687 	rx_tid->delba_rcode = reasoncode;
2688 	rx_tid->num_of_delba_req++;
2689 	dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX);
2690 
2691 	rx_tid->ba_status = DP_RX_BA_INACTIVE;
2692 	peer->active_ba_session_cnt--;
2693 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
2694 	return 0;
2695 }
2696 
2697 /*
2698  * dp_rx_delba_tx_completion_wifi3() – Send Delba Request
2699  *
2700  * @peer: Datapath peer handle
2701  * @tid: TID number
2702  * @status: tx completion status
2703  * Return: 0 on success, error code on failure
2704  */
2705 
2706 int dp_delba_tx_completion_wifi3(void *peer_handle,
2707 				 uint8_t tid, int status)
2708 {
2709 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
2710 	struct dp_rx_tid *rx_tid = NULL;
2711 
2712 	if (!peer || peer->delete_in_progress) {
2713 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2714 			  "%s: Peer is NULL!", __func__);
2715 		return QDF_STATUS_E_FAILURE;
2716 	}
2717 	rx_tid = &peer->rx_tid[tid];
2718 	qdf_spin_lock_bh(&rx_tid->tid_lock);
2719 	if (status) {
2720 		rx_tid->delba_tx_fail_cnt++;
2721 		if (rx_tid->delba_tx_retry >= DP_MAX_DELBA_RETRY) {
2722 			rx_tid->delba_tx_retry = 0;
2723 			rx_tid->delba_tx_status = 0;
2724 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
2725 		} else {
2726 			rx_tid->delba_tx_retry++;
2727 			rx_tid->delba_tx_status = 1;
2728 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
2729 			if (peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba)
2730 				peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba(
2731 					peer->vdev->pdev->ctrl_pdev, peer->ctrl_peer,
2732 					peer->mac_addr.raw, tid, peer->vdev->ctrl_vdev,
2733 					rx_tid->delba_rcode);
2734 		}
2735 		return QDF_STATUS_SUCCESS;
2736 	} else {
2737 		rx_tid->delba_tx_success_cnt++;
2738 		rx_tid->delba_tx_retry = 0;
2739 		rx_tid->delba_tx_status = 0;
2740 	}
2741 	if (rx_tid->ba_status == DP_RX_BA_ACTIVE) {
2742 		dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX);
2743 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
2744 		peer->active_ba_session_cnt--;
2745 	}
2746 	if (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
2747 		dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX);
2748 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
2749 	}
2750 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
2751 
2752 	return QDF_STATUS_SUCCESS;
2753 }
2754 
2755 /**
2756  * dp_set_pn_check_wifi3() - enable PN check in REO for security
2757  * @peer: Datapath peer handle
2758  * @vdev: Datapath vdev
2759  * @pdev - data path device instance
2760  * @sec_type - security type
2761  * @rx_pn - Receive pn starting number
2762  *
2763  */
2764 
2765 void
2766 dp_set_pn_check_wifi3(struct cdp_vdev *vdev_handle, struct cdp_peer *peer_handle, enum cdp_sec_type sec_type,  uint32_t *rx_pn)
2767 {
2768 	struct dp_peer *peer =  (struct dp_peer *)peer_handle;
2769 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
2770 	struct dp_pdev *pdev;
2771 	struct dp_soc *soc;
2772 	int i;
2773 	uint8_t pn_size;
2774 	struct hal_reo_cmd_params params;
2775 
2776 	/* preconditions */
2777 	qdf_assert(vdev);
2778 
2779 	pdev = vdev->pdev;
2780 	soc = pdev->soc;
2781 
2782 
2783 	qdf_mem_zero(&params, sizeof(params));
2784 
2785 	params.std.need_status = 1;
2786 	params.u.upd_queue_params.update_pn_valid = 1;
2787 	params.u.upd_queue_params.update_pn_size = 1;
2788 	params.u.upd_queue_params.update_pn = 1;
2789 	params.u.upd_queue_params.update_pn_check_needed = 1;
2790 	params.u.upd_queue_params.update_svld = 1;
2791 	params.u.upd_queue_params.svld = 0;
2792 
2793 	peer->security[dp_sec_ucast].sec_type = sec_type;
2794 
2795 	switch (sec_type) {
2796 	case cdp_sec_type_tkip_nomic:
2797 	case cdp_sec_type_aes_ccmp:
2798 	case cdp_sec_type_aes_ccmp_256:
2799 	case cdp_sec_type_aes_gcmp:
2800 	case cdp_sec_type_aes_gcmp_256:
2801 		params.u.upd_queue_params.pn_check_needed = 1;
2802 		params.u.upd_queue_params.pn_size = 48;
2803 		pn_size = 48;
2804 		break;
2805 	case cdp_sec_type_wapi:
2806 		params.u.upd_queue_params.pn_check_needed = 1;
2807 		params.u.upd_queue_params.pn_size = 128;
2808 		pn_size = 128;
2809 		if (vdev->opmode == wlan_op_mode_ap) {
2810 			params.u.upd_queue_params.pn_even = 1;
2811 			params.u.upd_queue_params.update_pn_even = 1;
2812 		} else {
2813 			params.u.upd_queue_params.pn_uneven = 1;
2814 			params.u.upd_queue_params.update_pn_uneven = 1;
2815 		}
2816 		break;
2817 	default:
2818 		params.u.upd_queue_params.pn_check_needed = 0;
2819 		pn_size = 0;
2820 		break;
2821 	}
2822 
2823 
2824 	for (i = 0; i < DP_MAX_TIDS; i++) {
2825 		struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
2826 		qdf_spin_lock_bh(&rx_tid->tid_lock);
2827 		if (rx_tid->hw_qdesc_vaddr_unaligned) {
2828 			params.std.addr_lo =
2829 				rx_tid->hw_qdesc_paddr & 0xffffffff;
2830 			params.std.addr_hi =
2831 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2832 
2833 			if (pn_size) {
2834 				QDF_TRACE(QDF_MODULE_ID_DP,
2835 					  QDF_TRACE_LEVEL_INFO_HIGH,
2836 					  "%s PN set for TID:%d pn:%x:%x:%x:%x",
2837 					  __func__, i, rx_pn[3], rx_pn[2],
2838 					  rx_pn[1], rx_pn[0]);
2839 				params.u.upd_queue_params.update_pn_valid = 1;
2840 				params.u.upd_queue_params.pn_31_0 = rx_pn[0];
2841 				params.u.upd_queue_params.pn_63_32 = rx_pn[1];
2842 				params.u.upd_queue_params.pn_95_64 = rx_pn[2];
2843 				params.u.upd_queue_params.pn_127_96 = rx_pn[3];
2844 			}
2845 			rx_tid->pn_size = pn_size;
2846 			dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
2847 				dp_rx_tid_update_cb, rx_tid);
2848 		} else {
2849 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
2850 				  "PN Check not setup for TID :%d ", i);
2851 		}
2852 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2853 	}
2854 }
2855 
2856 
2857 void
2858 dp_rx_sec_ind_handler(struct dp_soc *soc, uint16_t peer_id,
2859 		      enum cdp_sec_type sec_type, int is_unicast,
2860 		      u_int32_t *michael_key,
2861 		      u_int32_t *rx_pn)
2862 {
2863 	struct dp_peer *peer;
2864 	int sec_index;
2865 
2866 	peer = dp_peer_find_by_id(soc, peer_id);
2867 	if (!peer) {
2868 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2869 			  "Couldn't find peer from ID %d - skipping security inits",
2870 			  peer_id);
2871 		return;
2872 	}
2873 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
2874 		  "sec spec for peer %pK (%02x:%02x:%02x:%02x:%02x:%02x): %s key of type %d",
2875 		  peer,
2876 		  peer->mac_addr.raw[0], peer->mac_addr.raw[1],
2877 		  peer->mac_addr.raw[2], peer->mac_addr.raw[3],
2878 		  peer->mac_addr.raw[4], peer->mac_addr.raw[5],
2879 		  is_unicast ? "ucast" : "mcast",
2880 		  sec_type);
2881 	sec_index = is_unicast ? dp_sec_ucast : dp_sec_mcast;
2882 	peer->security[sec_index].sec_type = sec_type;
2883 #ifdef notyet /* TODO: See if this is required for defrag support */
2884 	/* michael key only valid for TKIP, but for simplicity,
2885 	 * copy it anyway
2886 	 */
2887 	qdf_mem_copy(
2888 		&peer->security[sec_index].michael_key[0],
2889 		michael_key,
2890 		sizeof(peer->security[sec_index].michael_key));
2891 #ifdef BIG_ENDIAN_HOST
2892 	OL_IF_SWAPBO(peer->security[sec_index].michael_key[0],
2893 				 sizeof(peer->security[sec_index].michael_key));
2894 #endif /* BIG_ENDIAN_HOST */
2895 #endif
2896 
2897 #ifdef notyet /* TODO: Check if this is required for wifi3.0 */
2898 	if (sec_type != cdp_sec_type_wapi) {
2899 		qdf_mem_zero(peer->tids_last_pn_valid, _EXT_TIDS);
2900 	} else {
2901 		for (i = 0; i < DP_MAX_TIDS; i++) {
2902 			/*
2903 			 * Setting PN valid bit for WAPI sec_type,
2904 			 * since WAPI PN has to be started with predefined value
2905 			 */
2906 			peer->tids_last_pn_valid[i] = 1;
2907 			qdf_mem_copy(
2908 				(u_int8_t *) &peer->tids_last_pn[i],
2909 				(u_int8_t *) rx_pn, sizeof(union htt_rx_pn_t));
2910 			peer->tids_last_pn[i].pn128[1] =
2911 				qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[1]);
2912 			peer->tids_last_pn[i].pn128[0] =
2913 				qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[0]);
2914 		}
2915 	}
2916 #endif
2917 	/* TODO: Update HW TID queue with PN check parameters (pn type for
2918 	 * all security types and last pn for WAPI) once REO command API
2919 	 * is available
2920 	 */
2921 
2922 	dp_peer_unref_del_find_by_id(peer);
2923 }
2924 
2925 #ifdef DP_PEER_EXTENDED_API
2926 /**
2927  * dp_register_peer() - Register peer into physical device
2928  * @pdev - data path device instance
2929  * @sta_desc - peer description
2930  *
2931  * Register peer into physical device
2932  *
2933  * Return: QDF_STATUS_SUCCESS registration success
2934  *         QDF_STATUS_E_FAULT peer not found
2935  */
2936 QDF_STATUS dp_register_peer(struct cdp_pdev *pdev_handle,
2937 		struct ol_txrx_desc_type *sta_desc)
2938 {
2939 	struct dp_peer *peer;
2940 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
2941 	uint8_t peer_id;
2942 
2943 	peer = dp_find_peer_by_addr((struct cdp_pdev *)pdev,
2944 				    sta_desc->peer_addr.bytes,
2945 				    &peer_id);
2946 
2947 	if (!peer)
2948 		return QDF_STATUS_E_FAULT;
2949 
2950 	qdf_spin_lock_bh(&peer->peer_info_lock);
2951 	peer->state = OL_TXRX_PEER_STATE_CONN;
2952 	qdf_spin_unlock_bh(&peer->peer_info_lock);
2953 
2954 	dp_rx_flush_rx_cached(peer, false);
2955 
2956 	return QDF_STATUS_SUCCESS;
2957 }
2958 
2959 /**
2960  * dp_clear_peer() - remove peer from physical device
2961  * @pdev - data path device instance
2962  * @peer_addr - peer mac address
2963  *
2964  * remove peer from physical device
2965  *
2966  * Return: QDF_STATUS_SUCCESS registration success
2967  *         QDF_STATUS_E_FAULT peer not found
2968  */
2969 QDF_STATUS
2970 dp_clear_peer(struct cdp_pdev *pdev_handle, struct qdf_mac_addr peer_addr)
2971 {
2972 	struct dp_peer *peer;
2973 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
2974 	/* peer_id to be removed */
2975 	uint8_t peer_id;
2976 
2977 	peer = dp_find_peer_by_addr((struct cdp_pdev *)pdev, peer_addr.bytes,
2978 				    &peer_id);
2979 	if (!peer)
2980 		return QDF_STATUS_E_FAULT;
2981 
2982 	qdf_spin_lock_bh(&peer->peer_info_lock);
2983 	peer->state = OL_TXRX_PEER_STATE_DISC;
2984 	qdf_spin_unlock_bh(&peer->peer_info_lock);
2985 
2986 	dp_rx_flush_rx_cached(peer, true);
2987 
2988 	return QDF_STATUS_SUCCESS;
2989 }
2990 
2991 /**
2992  * dp_find_peer_by_addr_and_vdev() - Find peer by peer mac address within vdev
2993  * @pdev - data path device instance
2994  * @vdev - virtual interface instance
2995  * @peer_addr - peer mac address
2996  * @peer_id - local peer id with target mac address
2997  *
2998  * Find peer by peer mac address within vdev
2999  *
3000  * Return: peer instance void pointer
3001  *         NULL cannot find target peer
3002  */
3003 void *dp_find_peer_by_addr_and_vdev(struct cdp_pdev *pdev_handle,
3004 		struct cdp_vdev *vdev_handle,
3005 		uint8_t *peer_addr, uint8_t *local_id)
3006 {
3007 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3008 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3009 	struct dp_peer *peer;
3010 
3011 	peer = dp_peer_find_hash_find(pdev->soc, peer_addr, 0, 0);
3012 
3013 	if (!peer)
3014 		return NULL;
3015 
3016 	if (peer->vdev != vdev) {
3017 		dp_peer_unref_delete(peer);
3018 		return NULL;
3019 	}
3020 
3021 	*local_id = peer->local_id;
3022 
3023 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
3024 	 * Decrement it here.
3025 	 */
3026 	dp_peer_unref_delete(peer);
3027 
3028 	return peer;
3029 }
3030 
3031 /**
3032  * dp_local_peer_id() - Find local peer id within peer instance
3033  * @peer - peer instance
3034  *
3035  * Find local peer id within peer instance
3036  *
3037  * Return: local peer id
3038  */
3039 uint16_t dp_local_peer_id(void *peer)
3040 {
3041 	return ((struct dp_peer *)peer)->local_id;
3042 }
3043 
3044 /**
3045  * dp_peer_find_by_local_id() - Find peer by local peer id
3046  * @pdev - data path device instance
3047  * @local_peer_id - local peer id want to find
3048  *
3049  * Find peer by local peer id within physical device
3050  *
3051  * Return: peer instance void pointer
3052  *         NULL cannot find target peer
3053  */
3054 void *dp_peer_find_by_local_id(struct cdp_pdev *pdev_handle, uint8_t local_id)
3055 {
3056 	struct dp_peer *peer;
3057 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3058 
3059 	if (local_id >= OL_TXRX_NUM_LOCAL_PEER_IDS) {
3060 		QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_DP,
3061 				   "Incorrect local id %u", local_id);
3062 		return NULL;
3063 	}
3064 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
3065 	peer = pdev->local_peer_ids.map[local_id];
3066 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
3067 	DP_TRACE(DEBUG, "peer %pK local id %d", peer, local_id);
3068 	return peer;
3069 }
3070 
3071 /**
3072  * dp_peer_state_update() - update peer local state
3073  * @pdev - data path device instance
3074  * @peer_addr - peer mac address
3075  * @state - new peer local state
3076  *
3077  * update peer local state
3078  *
3079  * Return: QDF_STATUS_SUCCESS registration success
3080  */
3081 QDF_STATUS dp_peer_state_update(struct cdp_pdev *pdev_handle, uint8_t *peer_mac,
3082 		enum ol_txrx_peer_state state)
3083 {
3084 	struct dp_peer *peer;
3085 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3086 
3087 	peer =  dp_peer_find_hash_find(pdev->soc, peer_mac, 0, DP_VDEV_ALL);
3088 	if (!peer) {
3089 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3090 			  "Failed to find peer for: [%pM]", peer_mac);
3091 		return QDF_STATUS_E_FAILURE;
3092 	}
3093 	peer->state = state;
3094 
3095 	DP_TRACE(INFO, "peer %pK state %d", peer, peer->state);
3096 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
3097 	 * Decrement it here.
3098 	 */
3099 	dp_peer_unref_delete(peer);
3100 
3101 	return QDF_STATUS_SUCCESS;
3102 }
3103 
3104 /**
3105  * dp_get_vdevid() - Get virtual interface id which peer registered
3106  * @peer - peer instance
3107  * @vdev_id - virtual interface id which peer registered
3108  *
3109  * Get virtual interface id which peer registered
3110  *
3111  * Return: QDF_STATUS_SUCCESS registration success
3112  */
3113 QDF_STATUS dp_get_vdevid(void *peer_handle, uint8_t *vdev_id)
3114 {
3115 	struct dp_peer *peer = peer_handle;
3116 
3117 	dp_info("peer %pK vdev %pK vdev id %d",
3118 		peer, peer->vdev, peer->vdev->vdev_id);
3119 	*vdev_id = peer->vdev->vdev_id;
3120 	return QDF_STATUS_SUCCESS;
3121 }
3122 
3123 struct cdp_vdev *
3124 dp_get_vdev_by_peer_addr(struct cdp_pdev *pdev_handle,
3125 			 struct qdf_mac_addr peer_addr)
3126 {
3127 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3128 	struct dp_peer *peer = NULL;
3129 	/* peer_id to be removed PEER_ID_CLEANUP */
3130 	uint8_t peer_id;
3131 
3132 	if (!pdev) {
3133 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3134 			  "PDEV not found for peer_addr: " QDF_MAC_ADDR_STR,
3135 			  QDF_MAC_ADDR_ARRAY(peer_addr.bytes));
3136 		return NULL;
3137 	}
3138 
3139 	peer = dp_find_peer_by_addr((struct cdp_pdev *)pdev, peer_addr.bytes,
3140 				    &peer_id);
3141 	if (!peer) {
3142 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
3143 			  "PDEV not found for peer_addr:" QDF_MAC_ADDR_STR,
3144 			  QDF_MAC_ADDR_ARRAY(peer_addr.bytes));
3145 		return NULL;
3146 	}
3147 
3148 	return (struct cdp_vdev *)peer->vdev;
3149 }
3150 
3151 /**
3152  * dp_get_vdev_for_peer() - Get virtual interface instance which peer belongs
3153  * @peer - peer instance
3154  *
3155  * Get virtual interface instance which peer belongs
3156  *
3157  * Return: virtual interface instance pointer
3158  *         NULL in case cannot find
3159  */
3160 struct cdp_vdev *dp_get_vdev_for_peer(void *peer_handle)
3161 {
3162 	struct dp_peer *peer = peer_handle;
3163 
3164 	DP_TRACE(DEBUG, "peer %pK vdev %pK", peer, peer->vdev);
3165 	return (struct cdp_vdev *)peer->vdev;
3166 }
3167 
3168 /**
3169  * dp_peer_get_peer_mac_addr() - Get peer mac address
3170  * @peer - peer instance
3171  *
3172  * Get peer mac address
3173  *
3174  * Return: peer mac address pointer
3175  *         NULL in case cannot find
3176  */
3177 uint8_t *dp_peer_get_peer_mac_addr(void *peer_handle)
3178 {
3179 	struct dp_peer *peer = peer_handle;
3180 	uint8_t *mac;
3181 
3182 	mac = peer->mac_addr.raw;
3183 	DP_TRACE(INFO, "peer %pK mac 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
3184 		peer, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
3185 	return peer->mac_addr.raw;
3186 }
3187 
3188 /**
3189  * dp_get_peer_state() - Get local peer state
3190  * @peer - peer instance
3191  *
3192  * Get local peer state
3193  *
3194  * Return: peer status
3195  */
3196 int dp_get_peer_state(void *peer_handle)
3197 {
3198 	struct dp_peer *peer = peer_handle;
3199 
3200 	DP_TRACE(DEBUG, "peer %pK stats %d", peer, peer->state);
3201 	return peer->state;
3202 }
3203 
3204 /**
3205  * dp_local_peer_id_pool_init() - local peer id pool alloc for physical device
3206  * @pdev - data path device instance
3207  *
3208  * local peer id pool alloc for physical device
3209  *
3210  * Return: none
3211  */
3212 void dp_local_peer_id_pool_init(struct dp_pdev *pdev)
3213 {
3214 	int i;
3215 
3216 	/* point the freelist to the first ID */
3217 	pdev->local_peer_ids.freelist = 0;
3218 
3219 	/* link each ID to the next one */
3220 	for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) {
3221 		pdev->local_peer_ids.pool[i] = i + 1;
3222 		pdev->local_peer_ids.map[i] = NULL;
3223 	}
3224 
3225 	/* link the last ID to itself, to mark the end of the list */
3226 	i = OL_TXRX_NUM_LOCAL_PEER_IDS;
3227 	pdev->local_peer_ids.pool[i] = i;
3228 
3229 	qdf_spinlock_create(&pdev->local_peer_ids.lock);
3230 	DP_TRACE(INFO, "Peer pool init");
3231 }
3232 
3233 /**
3234  * dp_local_peer_id_alloc() - allocate local peer id
3235  * @pdev - data path device instance
3236  * @peer - new peer instance
3237  *
3238  * allocate local peer id
3239  *
3240  * Return: none
3241  */
3242 void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer)
3243 {
3244 	int i;
3245 
3246 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
3247 	i = pdev->local_peer_ids.freelist;
3248 	if (pdev->local_peer_ids.pool[i] == i) {
3249 		/* the list is empty, except for the list-end marker */
3250 		peer->local_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
3251 	} else {
3252 		/* take the head ID and advance the freelist */
3253 		peer->local_id = i;
3254 		pdev->local_peer_ids.freelist = pdev->local_peer_ids.pool[i];
3255 		pdev->local_peer_ids.map[i] = peer;
3256 	}
3257 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
3258 	DP_TRACE(INFO, "peer %pK, local id %d", peer, peer->local_id);
3259 }
3260 
3261 /**
3262  * dp_local_peer_id_free() - remove local peer id
3263  * @pdev - data path device instance
3264  * @peer - peer instance should be removed
3265  *
3266  * remove local peer id
3267  *
3268  * Return: none
3269  */
3270 void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer)
3271 {
3272 	int i = peer->local_id;
3273 	if ((i == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
3274 	    (i >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
3275 		return;
3276 	}
3277 
3278 	/* put this ID on the head of the freelist */
3279 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
3280 	pdev->local_peer_ids.pool[i] = pdev->local_peer_ids.freelist;
3281 	pdev->local_peer_ids.freelist = i;
3282 	pdev->local_peer_ids.map[i] = NULL;
3283 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
3284 }
3285 #endif
3286 
3287 /**
3288  * dp_get_peer_mac_addr_frm_id(): get mac address of the peer
3289  * @soc_handle: DP SOC handle
3290  * @peer_id:peer_id of the peer
3291  *
3292  * return: vdev_id of the vap
3293  */
3294 uint8_t dp_get_peer_mac_addr_frm_id(struct cdp_soc_t *soc_handle,
3295 		uint16_t peer_id, uint8_t *peer_mac)
3296 {
3297 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
3298 	struct dp_peer *peer;
3299 	uint8_t vdev_id;
3300 
3301 	peer = dp_peer_find_by_id(soc, peer_id);
3302 
3303 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
3304 		  "soc %pK peer_id %d", soc, peer_id);
3305 
3306 	if (!peer) {
3307 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3308 			  "peer not found ");
3309 		return CDP_INVALID_VDEV_ID;
3310 	}
3311 
3312 	qdf_mem_copy(peer_mac, peer->mac_addr.raw, 6);
3313 	vdev_id = peer->vdev->vdev_id;
3314 
3315 	dp_peer_unref_del_find_by_id(peer);
3316 
3317 	return vdev_id;
3318 }
3319 
3320 /**
3321  * dp_peer_rxtid_stats: Retried Rx TID (REO queue) stats from HW
3322  * @peer: DP peer handle
3323  * @dp_stats_cmd_cb: REO command callback function
3324  * @cb_ctxt: Callback context
3325  *
3326  * Return: none
3327  */
3328 void dp_peer_rxtid_stats(struct dp_peer *peer, void (*dp_stats_cmd_cb),
3329 			void *cb_ctxt)
3330 {
3331 	struct dp_soc *soc = peer->vdev->pdev->soc;
3332 	struct hal_reo_cmd_params params;
3333 	int i;
3334 
3335 	if (!dp_stats_cmd_cb)
3336 		return;
3337 
3338 	qdf_mem_zero(&params, sizeof(params));
3339 	for (i = 0; i < DP_MAX_TIDS; i++) {
3340 		struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
3341 		if (rx_tid->hw_qdesc_vaddr_unaligned) {
3342 			params.std.need_status = 1;
3343 			params.std.addr_lo =
3344 				rx_tid->hw_qdesc_paddr & 0xffffffff;
3345 			params.std.addr_hi =
3346 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
3347 
3348 			if (cb_ctxt) {
3349 				dp_reo_send_cmd(soc, CMD_GET_QUEUE_STATS,
3350 					&params, dp_stats_cmd_cb, cb_ctxt);
3351 			} else {
3352 				dp_reo_send_cmd(soc, CMD_GET_QUEUE_STATS,
3353 					&params, dp_stats_cmd_cb, rx_tid);
3354 			}
3355 
3356 			/* Flush REO descriptor from HW cache to update stats
3357 			 * in descriptor memory. This is to help debugging */
3358 			qdf_mem_zero(&params, sizeof(params));
3359 			params.std.need_status = 0;
3360 			params.std.addr_lo =
3361 				rx_tid->hw_qdesc_paddr & 0xffffffff;
3362 			params.std.addr_hi =
3363 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
3364 			params.u.fl_cache_params.flush_no_inval = 1;
3365 			dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, &params, NULL,
3366 				NULL);
3367 		}
3368 	}
3369 }
3370 
3371 void dp_set_michael_key(struct cdp_peer *peer_handle,
3372 			bool is_unicast, uint32_t *key)
3373 {
3374 	struct dp_peer *peer =  (struct dp_peer *)peer_handle;
3375 	uint8_t sec_index = is_unicast ? 1 : 0;
3376 
3377 	if (!peer) {
3378 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3379 			  "peer not found ");
3380 		return;
3381 	}
3382 
3383 	qdf_mem_copy(&peer->security[sec_index].michael_key[0],
3384 		     key, IEEE80211_WEP_MICLEN);
3385 }
3386 
3387 bool dp_peer_find_by_id_valid(struct dp_soc *soc, uint16_t peer_id)
3388 {
3389 	struct dp_peer *peer = dp_peer_find_by_id(soc, peer_id);
3390 
3391 	if (peer) {
3392 		/*
3393 		 * Decrement the peer ref which is taken as part of
3394 		 * dp_peer_find_by_id if PEER_LOCK_REF_PROTECT is enabled
3395 		 */
3396 		dp_peer_unref_del_find_by_id(peer);
3397 
3398 		return true;
3399 	}
3400 
3401 	return false;
3402 }
3403