xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_peer.c (revision bea437e2293c3d4fb1b5704fcf633aedac996962)
1 /*
2  * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <qdf_types.h>
20 #include <qdf_lock.h>
21 #include <hal_hw_headers.h>
22 #include "dp_htt.h"
23 #include "dp_types.h"
24 #include "dp_internal.h"
25 #include "dp_peer.h"
26 #include "dp_rx_defrag.h"
27 #include "dp_rx.h"
28 #include <hal_api.h>
29 #include <hal_reo.h>
30 #include <cdp_txrx_handle.h>
31 #include <wlan_cfg.h>
32 
33 #ifdef WLAN_TX_PKT_CAPTURE_ENH
34 #include "dp_tx_capture.h"
35 #endif
36 
37 static inline void
38 dp_set_ssn_valid_flag(struct hal_reo_cmd_params *params,
39 					uint8_t valid)
40 {
41 	params->u.upd_queue_params.update_svld = 1;
42 	params->u.upd_queue_params.svld = valid;
43 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
44 		  "%s: Setting SSN valid bit to %d",
45 		  __func__, valid);
46 }
47 
48 static inline int dp_peer_find_mac_addr_cmp(
49 	union dp_align_mac_addr *mac_addr1,
50 	union dp_align_mac_addr *mac_addr2)
51 {
52 		/*
53 		 * Intentionally use & rather than &&.
54 		 * because the operands are binary rather than generic boolean,
55 		 * the functionality is equivalent.
56 		 * Using && has the advantage of short-circuited evaluation,
57 		 * but using & has the advantage of no conditional branching,
58 		 * which is a more significant benefit.
59 		 */
60 	return !((mac_addr1->align4.bytes_abcd == mac_addr2->align4.bytes_abcd)
61 		 & (mac_addr1->align4.bytes_ef == mac_addr2->align4.bytes_ef));
62 }
63 
64 static int dp_peer_ast_table_attach(struct dp_soc *soc)
65 {
66 	uint32_t max_ast_index;
67 
68 	max_ast_index = wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx);
69 	/* allocate ast_table for ast entry to ast_index map */
70 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
71 		  "\n<=== cfg max ast idx %d ====>", max_ast_index);
72 	soc->ast_table = qdf_mem_malloc(max_ast_index *
73 					sizeof(struct dp_ast_entry *));
74 	if (!soc->ast_table) {
75 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
76 			  "%s: ast_table memory allocation failed", __func__);
77 		return QDF_STATUS_E_NOMEM;
78 	}
79 	return 0; /* success */
80 }
81 
82 static int dp_peer_find_map_attach(struct dp_soc *soc)
83 {
84 	uint32_t max_peers, peer_map_size;
85 
86 	max_peers = soc->max_peers;
87 	/* allocate the peer ID -> peer object map */
88 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
89 		  "\n<=== cfg max peer id %d ====>", max_peers);
90 	peer_map_size = max_peers * sizeof(soc->peer_id_to_obj_map[0]);
91 	soc->peer_id_to_obj_map = qdf_mem_malloc(peer_map_size);
92 	if (!soc->peer_id_to_obj_map) {
93 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
94 			  "%s: peer map memory allocation failed", __func__);
95 		return QDF_STATUS_E_NOMEM;
96 	}
97 
98 	/*
99 	 * The peer_id_to_obj_map doesn't really need to be initialized,
100 	 * since elements are only used after they have been individually
101 	 * initialized.
102 	 * However, it is convenient for debugging to have all elements
103 	 * that are not in use set to 0.
104 	 */
105 	qdf_mem_zero(soc->peer_id_to_obj_map, peer_map_size);
106 	return 0; /* success */
107 }
108 
109 static int dp_log2_ceil(unsigned int value)
110 {
111 	unsigned int tmp = value;
112 	int log2 = -1;
113 
114 	while (tmp) {
115 		log2++;
116 		tmp >>= 1;
117 	}
118 	if (1 << log2 != value)
119 		log2++;
120 	return log2;
121 }
122 
123 static int dp_peer_find_add_id_to_obj(
124 	struct dp_peer *peer,
125 	uint16_t peer_id)
126 {
127 	int i;
128 
129 	for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
130 		if (peer->peer_ids[i] == HTT_INVALID_PEER) {
131 			peer->peer_ids[i] = peer_id;
132 			return 0; /* success */
133 		}
134 	}
135 	return QDF_STATUS_E_FAILURE; /* failure */
136 }
137 
138 #define DP_PEER_HASH_LOAD_MULT  2
139 #define DP_PEER_HASH_LOAD_SHIFT 0
140 
141 #define DP_AST_HASH_LOAD_MULT  2
142 #define DP_AST_HASH_LOAD_SHIFT 0
143 
144 static int dp_peer_find_hash_attach(struct dp_soc *soc)
145 {
146 	int i, hash_elems, log2;
147 
148 	/* allocate the peer MAC address -> peer object hash table */
149 	hash_elems = soc->max_peers;
150 	hash_elems *= DP_PEER_HASH_LOAD_MULT;
151 	hash_elems >>= DP_PEER_HASH_LOAD_SHIFT;
152 	log2 = dp_log2_ceil(hash_elems);
153 	hash_elems = 1 << log2;
154 
155 	soc->peer_hash.mask = hash_elems - 1;
156 	soc->peer_hash.idx_bits = log2;
157 	/* allocate an array of TAILQ peer object lists */
158 	soc->peer_hash.bins = qdf_mem_malloc(
159 		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q, dp_peer)));
160 	if (!soc->peer_hash.bins)
161 		return QDF_STATUS_E_NOMEM;
162 
163 	for (i = 0; i < hash_elems; i++)
164 		TAILQ_INIT(&soc->peer_hash.bins[i]);
165 
166 	return 0;
167 }
168 
169 static void dp_peer_find_hash_detach(struct dp_soc *soc)
170 {
171 	if (soc->peer_hash.bins) {
172 		qdf_mem_free(soc->peer_hash.bins);
173 		soc->peer_hash.bins = NULL;
174 	}
175 }
176 
177 static inline unsigned dp_peer_find_hash_index(struct dp_soc *soc,
178 	union dp_align_mac_addr *mac_addr)
179 {
180 	unsigned index;
181 
182 	index =
183 		mac_addr->align2.bytes_ab ^
184 		mac_addr->align2.bytes_cd ^
185 		mac_addr->align2.bytes_ef;
186 	index ^= index >> soc->peer_hash.idx_bits;
187 	index &= soc->peer_hash.mask;
188 	return index;
189 }
190 
191 
192 void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer)
193 {
194 	unsigned index;
195 
196 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
197 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
198 	/*
199 	 * It is important to add the new peer at the tail of the peer list
200 	 * with the bin index.  Together with having the hash_find function
201 	 * search from head to tail, this ensures that if two entries with
202 	 * the same MAC address are stored, the one added first will be
203 	 * found first.
204 	 */
205 	TAILQ_INSERT_TAIL(&soc->peer_hash.bins[index], peer, hash_list_elem);
206 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
207 }
208 
209 #ifdef FEATURE_AST
210 /*
211  * dp_peer_ast_hash_attach() - Allocate and initialize AST Hash Table
212  * @soc: SoC handle
213  *
214  * Return: None
215  */
216 static int dp_peer_ast_hash_attach(struct dp_soc *soc)
217 {
218 	int i, hash_elems, log2;
219 	unsigned int max_ast_idx = wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx);
220 
221 	hash_elems = ((max_ast_idx * DP_AST_HASH_LOAD_MULT) >>
222 		DP_AST_HASH_LOAD_SHIFT);
223 
224 	log2 = dp_log2_ceil(hash_elems);
225 	hash_elems = 1 << log2;
226 
227 	soc->ast_hash.mask = hash_elems - 1;
228 	soc->ast_hash.idx_bits = log2;
229 
230 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
231 		  "ast hash_elems: %d, max_ast_idx: %d",
232 		  hash_elems, max_ast_idx);
233 
234 	/* allocate an array of TAILQ peer object lists */
235 	soc->ast_hash.bins = qdf_mem_malloc(
236 		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q,
237 				dp_ast_entry)));
238 
239 	if (!soc->ast_hash.bins)
240 		return QDF_STATUS_E_NOMEM;
241 
242 	for (i = 0; i < hash_elems; i++)
243 		TAILQ_INIT(&soc->ast_hash.bins[i]);
244 
245 	return 0;
246 }
247 
248 /*
249  * dp_peer_ast_cleanup() - cleanup the references
250  * @soc: SoC handle
251  * @ast: ast entry
252  *
253  * Return: None
254  */
255 static inline void dp_peer_ast_cleanup(struct dp_soc *soc,
256 				       struct dp_ast_entry *ast)
257 {
258 	txrx_ast_free_cb cb = ast->callback;
259 	void *cookie = ast->cookie;
260 
261 	/* Call the callbacks to free up the cookie */
262 	if (cb) {
263 		ast->callback = NULL;
264 		ast->cookie = NULL;
265 		cb(soc->ctrl_psoc,
266 		   dp_soc_to_cdp_soc(soc),
267 		   cookie,
268 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
269 	}
270 }
271 
272 /*
273  * dp_peer_ast_hash_detach() - Free AST Hash table
274  * @soc: SoC handle
275  *
276  * Return: None
277  */
278 static void dp_peer_ast_hash_detach(struct dp_soc *soc)
279 {
280 	unsigned int index;
281 	struct dp_ast_entry *ast, *ast_next;
282 
283 	if (!soc->ast_hash.mask)
284 		return;
285 
286 	if (!soc->ast_hash.bins)
287 		return;
288 
289 	qdf_spin_lock_bh(&soc->ast_lock);
290 	for (index = 0; index <= soc->ast_hash.mask; index++) {
291 		if (!TAILQ_EMPTY(&soc->ast_hash.bins[index])) {
292 			TAILQ_FOREACH_SAFE(ast, &soc->ast_hash.bins[index],
293 					   hash_list_elem, ast_next) {
294 				TAILQ_REMOVE(&soc->ast_hash.bins[index], ast,
295 					     hash_list_elem);
296 				dp_peer_ast_cleanup(soc, ast);
297 				qdf_mem_free(ast);
298 			}
299 		}
300 	}
301 	qdf_spin_unlock_bh(&soc->ast_lock);
302 
303 	qdf_mem_free(soc->ast_hash.bins);
304 	soc->ast_hash.bins = NULL;
305 }
306 
307 /*
308  * dp_peer_ast_hash_index() - Compute the AST hash from MAC address
309  * @soc: SoC handle
310  *
311  * Return: AST hash
312  */
313 static inline uint32_t dp_peer_ast_hash_index(struct dp_soc *soc,
314 	union dp_align_mac_addr *mac_addr)
315 {
316 	uint32_t index;
317 
318 	index =
319 		mac_addr->align2.bytes_ab ^
320 		mac_addr->align2.bytes_cd ^
321 		mac_addr->align2.bytes_ef;
322 	index ^= index >> soc->ast_hash.idx_bits;
323 	index &= soc->ast_hash.mask;
324 	return index;
325 }
326 
327 /*
328  * dp_peer_ast_hash_add() - Add AST entry into hash table
329  * @soc: SoC handle
330  *
331  * This function adds the AST entry into SoC AST hash table
332  * It assumes caller has taken the ast lock to protect the access to this table
333  *
334  * Return: None
335  */
336 static inline void dp_peer_ast_hash_add(struct dp_soc *soc,
337 		struct dp_ast_entry *ase)
338 {
339 	uint32_t index;
340 
341 	index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
342 	TAILQ_INSERT_TAIL(&soc->ast_hash.bins[index], ase, hash_list_elem);
343 }
344 
345 /*
346  * dp_peer_ast_hash_remove() - Look up and remove AST entry from hash table
347  * @soc: SoC handle
348  *
349  * This function removes the AST entry from soc AST hash table
350  * It assumes caller has taken the ast lock to protect the access to this table
351  *
352  * Return: None
353  */
354 void dp_peer_ast_hash_remove(struct dp_soc *soc,
355 			     struct dp_ast_entry *ase)
356 {
357 	unsigned index;
358 	struct dp_ast_entry *tmpase;
359 	int found = 0;
360 
361 	index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
362 	/* Check if tail is not empty before delete*/
363 	QDF_ASSERT(!TAILQ_EMPTY(&soc->ast_hash.bins[index]));
364 
365 	TAILQ_FOREACH(tmpase, &soc->ast_hash.bins[index], hash_list_elem) {
366 		if (tmpase == ase) {
367 			found = 1;
368 			break;
369 		}
370 	}
371 
372 	QDF_ASSERT(found);
373 	TAILQ_REMOVE(&soc->ast_hash.bins[index], ase, hash_list_elem);
374 }
375 
376 /*
377  * dp_peer_ast_list_find() - Find AST entry by MAC address from peer ast list
378  * @soc: SoC handle
379  * @peer: peer handle
380  * @ast_mac_addr: mac address
381  *
382  * It assumes caller has taken the ast lock to protect the access to ast list
383  *
384  * Return: AST entry
385  */
386 struct dp_ast_entry *dp_peer_ast_list_find(struct dp_soc *soc,
387 					   struct dp_peer *peer,
388 					   uint8_t *ast_mac_addr)
389 {
390 	struct dp_ast_entry *ast_entry = NULL;
391 	union dp_align_mac_addr *mac_addr =
392 		(union dp_align_mac_addr *)ast_mac_addr;
393 
394 	TAILQ_FOREACH(ast_entry, &peer->ast_entry_list, ase_list_elem) {
395 		if (!dp_peer_find_mac_addr_cmp(mac_addr,
396 					       &ast_entry->mac_addr)) {
397 			return ast_entry;
398 		}
399 	}
400 
401 	return NULL;
402 }
403 
404 /*
405  * dp_peer_ast_hash_find_by_pdevid() - Find AST entry by MAC address
406  * @soc: SoC handle
407  *
408  * It assumes caller has taken the ast lock to protect the access to
409  * AST hash table
410  *
411  * Return: AST entry
412  */
413 struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
414 						     uint8_t *ast_mac_addr,
415 						     uint8_t pdev_id)
416 {
417 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
418 	uint32_t index;
419 	struct dp_ast_entry *ase;
420 
421 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
422 		     ast_mac_addr, QDF_MAC_ADDR_SIZE);
423 	mac_addr = &local_mac_addr_aligned;
424 
425 	index = dp_peer_ast_hash_index(soc, mac_addr);
426 	TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
427 		if ((pdev_id == ase->pdev_id) &&
428 		    !dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr)) {
429 			return ase;
430 		}
431 	}
432 
433 	return NULL;
434 }
435 
436 /*
437  * dp_peer_ast_hash_find_soc() - Find AST entry by MAC address
438  * @soc: SoC handle
439  *
440  * It assumes caller has taken the ast lock to protect the access to
441  * AST hash table
442  *
443  * Return: AST entry
444  */
445 struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
446 					       uint8_t *ast_mac_addr)
447 {
448 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
449 	unsigned index;
450 	struct dp_ast_entry *ase;
451 
452 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
453 			ast_mac_addr, QDF_MAC_ADDR_SIZE);
454 	mac_addr = &local_mac_addr_aligned;
455 
456 	index = dp_peer_ast_hash_index(soc, mac_addr);
457 	TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
458 		if (dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr) == 0) {
459 			return ase;
460 		}
461 	}
462 
463 	return NULL;
464 }
465 
466 /*
467  * dp_peer_map_ast() - Map the ast entry with HW AST Index
468  * @soc: SoC handle
469  * @peer: peer to which ast node belongs
470  * @mac_addr: MAC address of ast node
471  * @hw_peer_id: HW AST Index returned by target in peer map event
472  * @vdev_id: vdev id for VAP to which the peer belongs to
473  * @ast_hash: ast hash value in HW
474  *
475  * Return: None
476  */
477 static inline void dp_peer_map_ast(struct dp_soc *soc,
478 	struct dp_peer *peer, uint8_t *mac_addr, uint16_t hw_peer_id,
479 	uint8_t vdev_id, uint16_t ast_hash)
480 {
481 	struct dp_ast_entry *ast_entry = NULL;
482 	enum cdp_txrx_ast_entry_type peer_type = CDP_TXRX_AST_TYPE_STATIC;
483 
484 	if (!peer) {
485 		return;
486 	}
487 
488 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
489 		  "%s: peer %pK ID %d vid %d mac %02x:%02x:%02x:%02x:%02x:%02x",
490 		  __func__, peer, hw_peer_id, vdev_id, mac_addr[0],
491 		  mac_addr[1], mac_addr[2], mac_addr[3],
492 		  mac_addr[4], mac_addr[5]);
493 
494 	qdf_spin_lock_bh(&soc->ast_lock);
495 
496 	ast_entry = dp_peer_ast_list_find(soc, peer, mac_addr);
497 
498 	if (ast_entry) {
499 		ast_entry->ast_idx = hw_peer_id;
500 		soc->ast_table[hw_peer_id] = ast_entry;
501 		ast_entry->is_active = TRUE;
502 		peer_type = ast_entry->type;
503 		ast_entry->ast_hash_value = ast_hash;
504 		ast_entry->is_mapped = TRUE;
505 	}
506 
507 	if (ast_entry || (peer->vdev && peer->vdev->proxysta_vdev)) {
508 		if (soc->cdp_soc.ol_ops->peer_map_event) {
509 			soc->cdp_soc.ol_ops->peer_map_event(
510 			soc->ctrl_psoc, peer->peer_ids[0],
511 			hw_peer_id, vdev_id,
512 			mac_addr, peer_type, ast_hash);
513 		}
514 	} else {
515 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
516 			  "AST entry not found");
517 	}
518 
519 	qdf_spin_unlock_bh(&soc->ast_lock);
520 	return;
521 }
522 
523 void dp_peer_free_hmwds_cb(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
524 			   struct cdp_soc *dp_soc,
525 			   void *cookie,
526 			   enum cdp_ast_free_status status)
527 {
528 	struct dp_ast_free_cb_params *param =
529 		(struct dp_ast_free_cb_params *)cookie;
530 	struct dp_soc *soc = (struct dp_soc *)dp_soc;
531 	struct dp_peer *peer = NULL;
532 
533 	if (status != CDP_TXRX_AST_DELETED) {
534 		qdf_mem_free(cookie);
535 		return;
536 	}
537 
538 	peer = dp_peer_find_hash_find(soc, &param->peer_mac_addr.raw[0],
539 				      0, param->vdev_id);
540 	if (peer) {
541 		dp_peer_add_ast(soc, peer,
542 				&param->mac_addr.raw[0],
543 				param->type,
544 				param->flags);
545 		dp_peer_unref_delete(peer);
546 	}
547 	qdf_mem_free(cookie);
548 }
549 
550 /*
551  * dp_peer_add_ast() - Allocate and add AST entry into peer list
552  * @soc: SoC handle
553  * @peer: peer to which ast node belongs
554  * @mac_addr: MAC address of ast node
555  * @is_self: Is this base AST entry with peer mac address
556  *
557  * This API is used by WDS source port learning function to
558  * add a new AST entry into peer AST list
559  *
560  * Return: 0 if new entry is allocated,
561  *        -1 if entry add failed
562  */
563 int dp_peer_add_ast(struct dp_soc *soc,
564 			struct dp_peer *peer,
565 			uint8_t *mac_addr,
566 			enum cdp_txrx_ast_entry_type type,
567 			uint32_t flags)
568 {
569 	struct dp_ast_entry *ast_entry = NULL;
570 	struct dp_vdev *vdev = NULL, *tmp_vdev = NULL;
571 	struct dp_pdev *pdev = NULL;
572 	uint8_t next_node_mac[6];
573 	int  ret = -1;
574 	txrx_ast_free_cb cb = NULL;
575 	void *cookie = NULL;
576 	struct dp_peer *tmp_peer = NULL;
577 	bool is_peer_found = false;
578 
579 	vdev = peer->vdev;
580 	if (!vdev) {
581 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
582 			  FL("Peers vdev is NULL"));
583 		QDF_ASSERT(0);
584 		return ret;
585 	}
586 
587 	pdev = vdev->pdev;
588 
589 	tmp_peer = dp_peer_find_hash_find(soc, mac_addr, 0,
590 					  DP_VDEV_ALL);
591 	if (tmp_peer) {
592 		tmp_vdev = tmp_peer->vdev;
593 		if (!tmp_vdev) {
594 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
595 				  FL("Peers vdev is NULL"));
596 			QDF_ASSERT(0);
597 			dp_peer_unref_delete(tmp_peer);
598 			return ret;
599 		}
600 		if (tmp_vdev->pdev->pdev_id == pdev->pdev_id)
601 			is_peer_found = true;
602 
603 		dp_peer_unref_delete(tmp_peer);
604 	}
605 
606 	qdf_spin_lock_bh(&soc->ast_lock);
607 	if (peer->delete_in_progress) {
608 		qdf_spin_unlock_bh(&soc->ast_lock);
609 		return ret;
610 	}
611 
612 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
613 		  "%s: pdevid: %u vdev: %u  ast_entry->type: %d flags: 0x%x peer_mac: %pM peer: %pK mac %pM",
614 		  __func__, pdev->pdev_id, vdev->vdev_id, type, flags,
615 		  peer->mac_addr.raw, peer, mac_addr);
616 
617 
618 	/* fw supports only 2 times the max_peers ast entries */
619 	if (soc->num_ast_entries >=
620 	    wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx)) {
621 		qdf_spin_unlock_bh(&soc->ast_lock);
622 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
623 			  FL("Max ast entries reached"));
624 		return ret;
625 	}
626 
627 	/* If AST entry already exists , just return from here
628 	 * ast entry with same mac address can exist on different radios
629 	 * if ast_override support is enabled use search by pdev in this
630 	 * case
631 	 */
632 	if (soc->ast_override_support) {
633 		ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, mac_addr,
634 							    pdev->pdev_id);
635 		if (ast_entry) {
636 			if ((type == CDP_TXRX_AST_TYPE_MEC) &&
637 			    (ast_entry->type == CDP_TXRX_AST_TYPE_MEC))
638 				ast_entry->is_active = TRUE;
639 
640 			qdf_spin_unlock_bh(&soc->ast_lock);
641 			return 0;
642 		}
643 		if (is_peer_found) {
644 			/* During WDS to static roaming, peer is added
645 			 * to the list before static AST entry create.
646 			 * So, allow AST entry for STATIC type
647 			 * even if peer is present
648 			 */
649 			if (type != CDP_TXRX_AST_TYPE_STATIC) {
650 				qdf_spin_unlock_bh(&soc->ast_lock);
651 				return 0;
652 			}
653 		}
654 	} else {
655 		/* For HWMWDS_SEC entries can be added for same mac address
656 		 * do not check for existing entry
657 		 */
658 		if (type == CDP_TXRX_AST_TYPE_WDS_HM_SEC)
659 			goto add_ast_entry;
660 
661 		ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
662 
663 		if (ast_entry) {
664 			if ((type == CDP_TXRX_AST_TYPE_MEC) &&
665 			    (ast_entry->type == CDP_TXRX_AST_TYPE_MEC))
666 				ast_entry->is_active = TRUE;
667 
668 			if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) &&
669 			    !ast_entry->delete_in_progress) {
670 				qdf_spin_unlock_bh(&soc->ast_lock);
671 				return 0;
672 			}
673 
674 			/* Add for HMWDS entry we cannot be ignored if there
675 			 * is AST entry with same mac address
676 			 *
677 			 * if ast entry exists with the requested mac address
678 			 * send a delete command and register callback which
679 			 * can take care of adding HMWDS ast enty on delete
680 			 * confirmation from target
681 			 */
682 			if ((type == CDP_TXRX_AST_TYPE_WDS_HM) &&
683 			    soc->is_peer_map_unmap_v2) {
684 				struct dp_ast_free_cb_params *param = NULL;
685 
686 				if (ast_entry->type ==
687 					CDP_TXRX_AST_TYPE_WDS_HM_SEC)
688 					goto add_ast_entry;
689 
690 				/* save existing callback */
691 				if (ast_entry->callback) {
692 					cb = ast_entry->callback;
693 					cookie = ast_entry->cookie;
694 				}
695 
696 				param = qdf_mem_malloc(sizeof(*param));
697 				if (!param) {
698 					QDF_TRACE(QDF_MODULE_ID_TXRX,
699 						  QDF_TRACE_LEVEL_ERROR,
700 						  "Allocation failed");
701 					qdf_spin_unlock_bh(&soc->ast_lock);
702 					return ret;
703 				}
704 
705 				qdf_mem_copy(&param->mac_addr.raw[0], mac_addr,
706 					     QDF_MAC_ADDR_SIZE);
707 				qdf_mem_copy(&param->peer_mac_addr.raw[0],
708 					     &peer->mac_addr.raw[0],
709 					     QDF_MAC_ADDR_SIZE);
710 				param->type = type;
711 				param->flags = flags;
712 				param->vdev_id = vdev->vdev_id;
713 				ast_entry->callback = dp_peer_free_hmwds_cb;
714 				ast_entry->pdev_id = vdev->pdev->pdev_id;
715 				ast_entry->type = type;
716 				ast_entry->cookie = (void *)param;
717 				if (!ast_entry->delete_in_progress)
718 					dp_peer_del_ast(soc, ast_entry);
719 			}
720 
721 			/* Modify an already existing AST entry from type
722 			 * WDS to MEC on promption. This serves as a fix when
723 			 * backbone of interfaces are interchanged wherein
724 			 * wds entr becomes its own MEC. The entry should be
725 			 * replaced only when the ast_entry peer matches the
726 			 * peer received in mec event. This additional check
727 			 * is needed in wds repeater cases where a multicast
728 			 * packet from station to the root via the repeater
729 			 * should not remove the wds entry.
730 			 */
731 			if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS) &&
732 			    (type == CDP_TXRX_AST_TYPE_MEC) &&
733 			    (ast_entry->peer == peer)) {
734 				ast_entry->is_active = FALSE;
735 				dp_peer_del_ast(soc, ast_entry);
736 			}
737 			qdf_spin_unlock_bh(&soc->ast_lock);
738 
739 			/* Call the saved callback*/
740 			if (cb) {
741 				cb(soc->ctrl_psoc,
742 				   dp_soc_to_cdp_soc(soc),
743 				   cookie,
744 				   CDP_TXRX_AST_DELETE_IN_PROGRESS);
745 			}
746 			return 0;
747 		}
748 	}
749 
750 add_ast_entry:
751 	ast_entry = (struct dp_ast_entry *)
752 			qdf_mem_malloc(sizeof(struct dp_ast_entry));
753 
754 	if (!ast_entry) {
755 		qdf_spin_unlock_bh(&soc->ast_lock);
756 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
757 			  FL("fail to allocate ast_entry"));
758 		QDF_ASSERT(0);
759 		return ret;
760 	}
761 
762 	qdf_mem_copy(&ast_entry->mac_addr.raw[0], mac_addr, QDF_MAC_ADDR_SIZE);
763 	ast_entry->pdev_id = vdev->pdev->pdev_id;
764 	ast_entry->is_mapped = false;
765 	ast_entry->delete_in_progress = false;
766 
767 	switch (type) {
768 	case CDP_TXRX_AST_TYPE_STATIC:
769 		peer->self_ast_entry = ast_entry;
770 		ast_entry->type = CDP_TXRX_AST_TYPE_STATIC;
771 		if (peer->vdev->opmode == wlan_op_mode_sta)
772 			ast_entry->type = CDP_TXRX_AST_TYPE_STA_BSS;
773 		break;
774 	case CDP_TXRX_AST_TYPE_SELF:
775 		peer->self_ast_entry = ast_entry;
776 		ast_entry->type = CDP_TXRX_AST_TYPE_SELF;
777 		break;
778 	case CDP_TXRX_AST_TYPE_WDS:
779 		ast_entry->next_hop = 1;
780 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
781 		break;
782 	case CDP_TXRX_AST_TYPE_WDS_HM:
783 		ast_entry->next_hop = 1;
784 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM;
785 		break;
786 	case CDP_TXRX_AST_TYPE_WDS_HM_SEC:
787 		ast_entry->next_hop = 1;
788 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM_SEC;
789 		break;
790 	case CDP_TXRX_AST_TYPE_MEC:
791 		ast_entry->next_hop = 1;
792 		ast_entry->type = CDP_TXRX_AST_TYPE_MEC;
793 		break;
794 	case CDP_TXRX_AST_TYPE_DA:
795 		peer = peer->vdev->vap_bss_peer;
796 		ast_entry->next_hop = 1;
797 		ast_entry->type = CDP_TXRX_AST_TYPE_DA;
798 		break;
799 	default:
800 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
801 			FL("Incorrect AST entry type"));
802 	}
803 
804 	ast_entry->is_active = TRUE;
805 	DP_STATS_INC(soc, ast.added, 1);
806 	soc->num_ast_entries++;
807 	dp_peer_ast_hash_add(soc, ast_entry);
808 
809 	ast_entry->peer = peer;
810 
811 	if (type == CDP_TXRX_AST_TYPE_MEC)
812 		qdf_mem_copy(next_node_mac, peer->vdev->mac_addr.raw, 6);
813 	else
814 		qdf_mem_copy(next_node_mac, peer->mac_addr.raw, 6);
815 
816 	TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem);
817 
818 	if ((ast_entry->type != CDP_TXRX_AST_TYPE_STATIC) &&
819 	    (ast_entry->type != CDP_TXRX_AST_TYPE_SELF) &&
820 	    (ast_entry->type != CDP_TXRX_AST_TYPE_STA_BSS) &&
821 	    (ast_entry->type != CDP_TXRX_AST_TYPE_WDS_HM_SEC)) {
822 		if (QDF_STATUS_SUCCESS ==
823 				soc->cdp_soc.ol_ops->peer_add_wds_entry(
824 				soc->ctrl_psoc,
825 				peer->vdev->vdev_id,
826 				peer->mac_addr.raw,
827 				mac_addr,
828 				next_node_mac,
829 				flags)) {
830 			qdf_spin_unlock_bh(&soc->ast_lock);
831 			return 0;
832 		}
833 	}
834 
835 	qdf_spin_unlock_bh(&soc->ast_lock);
836 	return ret;
837 }
838 
839 /*
840  * dp_peer_del_ast() - Delete and free AST entry
841  * @soc: SoC handle
842  * @ast_entry: AST entry of the node
843  *
844  * This function removes the AST entry from peer and soc tables
845  * It assumes caller has taken the ast lock to protect the access to these
846  * tables
847  *
848  * Return: None
849  */
850 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
851 {
852 	struct dp_peer *peer;
853 
854 	if (!ast_entry)
855 		return;
856 
857 	peer =  ast_entry->peer;
858 
859 	dp_peer_ast_send_wds_del(soc, ast_entry);
860 
861 	/*
862 	 * release the reference only if it is mapped
863 	 * to ast_table
864 	 */
865 	if (ast_entry->is_mapped)
866 		soc->ast_table[ast_entry->ast_idx] = NULL;
867 
868 	/*
869 	 * if peer map v2 is enabled we are not freeing ast entry
870 	 * here and it is supposed to be freed in unmap event (after
871 	 * we receive delete confirmation from target)
872 	 *
873 	 * if peer_id is invalid we did not get the peer map event
874 	 * for the peer free ast entry from here only in this case
875 	 */
876 	if (soc->is_peer_map_unmap_v2) {
877 
878 		/*
879 		 * For HM_SEC and SELF type we do not receive unmap event
880 		 * free ast_entry from here it self
881 		 */
882 		if ((ast_entry->type != CDP_TXRX_AST_TYPE_WDS_HM_SEC) &&
883 		    (ast_entry->type != CDP_TXRX_AST_TYPE_SELF))
884 			return;
885 	}
886 
887 	/* SELF and STATIC entries are removed in teardown itself */
888 	if (ast_entry->next_hop)
889 		TAILQ_REMOVE(&peer->ast_entry_list, ast_entry, ase_list_elem);
890 
891 	DP_STATS_INC(soc, ast.deleted, 1);
892 	dp_peer_ast_hash_remove(soc, ast_entry);
893 	dp_peer_ast_cleanup(soc, ast_entry);
894 	qdf_mem_free(ast_entry);
895 	soc->num_ast_entries--;
896 }
897 
898 /*
899  * dp_peer_update_ast() - Delete and free AST entry
900  * @soc: SoC handle
901  * @peer: peer to which ast node belongs
902  * @ast_entry: AST entry of the node
903  * @flags: wds or hmwds
904  *
905  * This function update the AST entry to the roamed peer and soc tables
906  * It assumes caller has taken the ast lock to protect the access to these
907  * tables
908  *
909  * Return: 0 if ast entry is updated successfully
910  *         -1 failure
911  */
912 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
913 		       struct dp_ast_entry *ast_entry, uint32_t flags)
914 {
915 	int ret = -1;
916 	struct dp_peer *old_peer;
917 
918 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
919 		  "%s: ast_entry->type: %d pdevid: %u vdevid: %u flags: 0x%x mac_addr: %pM peer_mac: %pM\n",
920 		  __func__, ast_entry->type, peer->vdev->pdev->pdev_id,
921 		  peer->vdev->vdev_id, flags, ast_entry->mac_addr.raw,
922 		  peer->mac_addr.raw);
923 
924 	/* Do not send AST update in below cases
925 	 *  1) Ast entry delete has already triggered
926 	 *  2) Peer delete is already triggered
927 	 *  3) We did not get the HTT map for create event
928 	 */
929 	if (ast_entry->delete_in_progress || peer->delete_in_progress ||
930 	    !ast_entry->is_mapped)
931 		return ret;
932 
933 	if ((ast_entry->type == CDP_TXRX_AST_TYPE_STATIC) ||
934 	    (ast_entry->type == CDP_TXRX_AST_TYPE_SELF) ||
935 	    (ast_entry->type == CDP_TXRX_AST_TYPE_STA_BSS) ||
936 	    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
937 		return 0;
938 
939 	/*
940 	 * Avoids flood of WMI update messages sent to FW for same peer.
941 	 */
942 	if (qdf_unlikely(ast_entry->peer == peer) &&
943 	    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS) &&
944 	    (ast_entry->peer->vdev == peer->vdev) &&
945 	    (ast_entry->is_active))
946 		return 0;
947 
948 	old_peer = ast_entry->peer;
949 	TAILQ_REMOVE(&old_peer->ast_entry_list, ast_entry, ase_list_elem);
950 
951 	ast_entry->peer = peer;
952 	ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
953 	ast_entry->pdev_id = peer->vdev->pdev->pdev_id;
954 	ast_entry->is_active = TRUE;
955 	TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem);
956 
957 	ret = soc->cdp_soc.ol_ops->peer_update_wds_entry(
958 				soc->ctrl_psoc,
959 				peer->vdev->vdev_id,
960 				ast_entry->mac_addr.raw,
961 				peer->mac_addr.raw,
962 				flags);
963 
964 	return ret;
965 }
966 
967 /*
968  * dp_peer_ast_get_pdev_id() - get pdev_id from the ast entry
969  * @soc: SoC handle
970  * @ast_entry: AST entry of the node
971  *
972  * This function gets the pdev_id from the ast entry.
973  *
974  * Return: (uint8_t) pdev_id
975  */
976 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
977 				struct dp_ast_entry *ast_entry)
978 {
979 	return ast_entry->pdev_id;
980 }
981 
982 /*
983  * dp_peer_ast_get_next_hop() - get next_hop from the ast entry
984  * @soc: SoC handle
985  * @ast_entry: AST entry of the node
986  *
987  * This function gets the next hop from the ast entry.
988  *
989  * Return: (uint8_t) next_hop
990  */
991 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
992 				struct dp_ast_entry *ast_entry)
993 {
994 	return ast_entry->next_hop;
995 }
996 
997 /*
998  * dp_peer_ast_set_type() - set type from the ast entry
999  * @soc: SoC handle
1000  * @ast_entry: AST entry of the node
1001  *
1002  * This function sets the type in the ast entry.
1003  *
1004  * Return:
1005  */
1006 void dp_peer_ast_set_type(struct dp_soc *soc,
1007 				struct dp_ast_entry *ast_entry,
1008 				enum cdp_txrx_ast_entry_type type)
1009 {
1010 	ast_entry->type = type;
1011 }
1012 
1013 #else
1014 int dp_peer_add_ast(struct dp_soc *soc, struct dp_peer *peer,
1015 		uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type,
1016 		uint32_t flags)
1017 {
1018 	return 1;
1019 }
1020 
1021 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
1022 {
1023 }
1024 
1025 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
1026 			struct dp_ast_entry *ast_entry, uint32_t flags)
1027 {
1028 	return 1;
1029 }
1030 
1031 struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
1032 					       uint8_t *ast_mac_addr)
1033 {
1034 	return NULL;
1035 }
1036 
1037 struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
1038 						     uint8_t *ast_mac_addr,
1039 						     uint8_t pdev_id)
1040 {
1041 	return NULL;
1042 }
1043 
1044 static int dp_peer_ast_hash_attach(struct dp_soc *soc)
1045 {
1046 	return 0;
1047 }
1048 
1049 static inline void dp_peer_map_ast(struct dp_soc *soc,
1050 	struct dp_peer *peer, uint8_t *mac_addr, uint16_t hw_peer_id,
1051 	uint8_t vdev_id, uint16_t ast_hash)
1052 {
1053 	return;
1054 }
1055 
1056 static void dp_peer_ast_hash_detach(struct dp_soc *soc)
1057 {
1058 }
1059 
1060 void dp_peer_ast_set_type(struct dp_soc *soc,
1061 				struct dp_ast_entry *ast_entry,
1062 				enum cdp_txrx_ast_entry_type type)
1063 {
1064 }
1065 
1066 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
1067 				struct dp_ast_entry *ast_entry)
1068 {
1069 	return 0xff;
1070 }
1071 
1072 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
1073 				struct dp_ast_entry *ast_entry)
1074 {
1075 	return 0xff;
1076 }
1077 
1078 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
1079 		       struct dp_ast_entry *ast_entry, uint32_t flags)
1080 {
1081 	return 1;
1082 }
1083 
1084 #endif
1085 
1086 void dp_peer_ast_send_wds_del(struct dp_soc *soc,
1087 			      struct dp_ast_entry *ast_entry)
1088 {
1089 	struct dp_peer *peer = ast_entry->peer;
1090 	struct cdp_soc_t *cdp_soc = &soc->cdp_soc;
1091 
1092 	if (ast_entry->delete_in_progress)
1093 		return;
1094 
1095 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_TRACE,
1096 		  "%s: ast_entry->type: %d pdevid: %u vdev: %u mac_addr: %pM next_hop: %u peer_mac: %pM\n",
1097 		  __func__, ast_entry->type, peer->vdev->pdev->pdev_id,
1098 		  peer->vdev->vdev_id, ast_entry->mac_addr.raw,
1099 		  ast_entry->next_hop, ast_entry->peer->mac_addr.raw);
1100 
1101 	if (ast_entry->next_hop) {
1102 		cdp_soc->ol_ops->peer_del_wds_entry(soc->ctrl_psoc,
1103 						    peer->vdev->vdev_id,
1104 						    ast_entry->mac_addr.raw,
1105 						    ast_entry->type);
1106 	}
1107 
1108 	/* Remove SELF and STATIC entries in teardown itself */
1109 	if (!ast_entry->next_hop) {
1110 		TAILQ_REMOVE(&peer->ast_entry_list, ast_entry, ase_list_elem);
1111 		peer->self_ast_entry = NULL;
1112 		ast_entry->peer = NULL;
1113 	}
1114 
1115 	ast_entry->delete_in_progress = true;
1116 }
1117 
1118 /**
1119  * dp_peer_ast_free_entry_by_mac() - find ast entry by MAC address and delete
1120  * @soc: soc handle
1121  * @peer: peer handle
1122  * @mac_addr: mac address of the AST entry to searc and delete
1123  *
1124  * find the ast entry from the peer list using the mac address and free
1125  * the entry.
1126  *
1127  * Return: SUCCESS or NOENT
1128  */
1129 static int dp_peer_ast_free_entry_by_mac(struct dp_soc *soc,
1130 					 struct dp_peer *peer,
1131 					 uint8_t *mac_addr)
1132 {
1133 	struct dp_ast_entry *ast_entry;
1134 	void *cookie = NULL;
1135 	txrx_ast_free_cb cb = NULL;
1136 
1137 	/*
1138 	 * release the reference only if it is mapped
1139 	 * to ast_table
1140 	 */
1141 
1142 	qdf_spin_lock_bh(&soc->ast_lock);
1143 
1144 	ast_entry = dp_peer_ast_list_find(soc, peer, mac_addr);
1145 	if (!ast_entry) {
1146 		qdf_spin_unlock_bh(&soc->ast_lock);
1147 		return QDF_STATUS_E_NOENT;
1148 	} else if (ast_entry->is_mapped) {
1149 		soc->ast_table[ast_entry->ast_idx] = NULL;
1150 	}
1151 
1152 	TAILQ_REMOVE(&peer->ast_entry_list, ast_entry, ase_list_elem);
1153 	DP_STATS_INC(soc, ast.deleted, 1);
1154 	dp_peer_ast_hash_remove(soc, ast_entry);
1155 
1156 	cb = ast_entry->callback;
1157 	cookie = ast_entry->cookie;
1158 	ast_entry->callback = NULL;
1159 	ast_entry->cookie = NULL;
1160 
1161 	if (ast_entry == peer->self_ast_entry)
1162 		peer->self_ast_entry = NULL;
1163 
1164 	soc->num_ast_entries--;
1165 	qdf_spin_unlock_bh(&soc->ast_lock);
1166 
1167 	if (cb) {
1168 		cb(soc->ctrl_psoc,
1169 		   dp_soc_to_cdp_soc(soc),
1170 		   cookie,
1171 		   CDP_TXRX_AST_DELETED);
1172 	}
1173 	qdf_mem_free(ast_entry);
1174 
1175 	return QDF_STATUS_SUCCESS;
1176 }
1177 
1178 struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc,
1179 	uint8_t *peer_mac_addr, int mac_addr_is_aligned, uint8_t vdev_id)
1180 {
1181 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
1182 	unsigned index;
1183 	struct dp_peer *peer;
1184 
1185 	if (mac_addr_is_aligned) {
1186 		mac_addr = (union dp_align_mac_addr *) peer_mac_addr;
1187 	} else {
1188 		qdf_mem_copy(
1189 			&local_mac_addr_aligned.raw[0],
1190 			peer_mac_addr, QDF_MAC_ADDR_SIZE);
1191 		mac_addr = &local_mac_addr_aligned;
1192 	}
1193 	index = dp_peer_find_hash_index(soc, mac_addr);
1194 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
1195 	TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
1196 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
1197 			((peer->vdev->vdev_id == vdev_id) ||
1198 			 (vdev_id == DP_VDEV_ALL))) {
1199 			/* found it - increment the ref count before releasing
1200 			 * the lock
1201 			 */
1202 			qdf_atomic_inc(&peer->ref_cnt);
1203 			qdf_spin_unlock_bh(&soc->peer_ref_mutex);
1204 			return peer;
1205 		}
1206 	}
1207 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
1208 	return NULL; /* failure */
1209 }
1210 
1211 void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer)
1212 {
1213 	unsigned index;
1214 	struct dp_peer *tmppeer = NULL;
1215 	int found = 0;
1216 
1217 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
1218 	/* Check if tail is not empty before delete*/
1219 	QDF_ASSERT(!TAILQ_EMPTY(&soc->peer_hash.bins[index]));
1220 	/*
1221 	 * DO NOT take the peer_ref_mutex lock here - it needs to be taken
1222 	 * by the caller.
1223 	 * The caller needs to hold the lock from the time the peer object's
1224 	 * reference count is decremented and tested up through the time the
1225 	 * reference to the peer object is removed from the hash table, by
1226 	 * this function.
1227 	 * Holding the lock only while removing the peer object reference
1228 	 * from the hash table keeps the hash table consistent, but does not
1229 	 * protect against a new HL tx context starting to use the peer object
1230 	 * if it looks up the peer object from its MAC address just after the
1231 	 * peer ref count is decremented to zero, but just before the peer
1232 	 * object reference is removed from the hash table.
1233 	 */
1234 	 TAILQ_FOREACH(tmppeer, &soc->peer_hash.bins[index], hash_list_elem) {
1235 		if (tmppeer == peer) {
1236 			found = 1;
1237 			break;
1238 		}
1239 	}
1240 	QDF_ASSERT(found);
1241 	TAILQ_REMOVE(&soc->peer_hash.bins[index], peer, hash_list_elem);
1242 }
1243 
1244 void dp_peer_find_hash_erase(struct dp_soc *soc)
1245 {
1246 	int i;
1247 
1248 	/*
1249 	 * Not really necessary to take peer_ref_mutex lock - by this point,
1250 	 * it's known that the soc is no longer in use.
1251 	 */
1252 	for (i = 0; i <= soc->peer_hash.mask; i++) {
1253 		if (!TAILQ_EMPTY(&soc->peer_hash.bins[i])) {
1254 			struct dp_peer *peer, *peer_next;
1255 
1256 			/*
1257 			 * TAILQ_FOREACH_SAFE must be used here to avoid any
1258 			 * memory access violation after peer is freed
1259 			 */
1260 			TAILQ_FOREACH_SAFE(peer, &soc->peer_hash.bins[i],
1261 				hash_list_elem, peer_next) {
1262 				/*
1263 				 * Don't remove the peer from the hash table -
1264 				 * that would modify the list we are currently
1265 				 * traversing, and it's not necessary anyway.
1266 				 */
1267 				/*
1268 				 * Artificially adjust the peer's ref count to
1269 				 * 1, so it will get deleted by
1270 				 * dp_peer_unref_delete.
1271 				 */
1272 				/* set to zero */
1273 				qdf_atomic_init(&peer->ref_cnt);
1274 				/* incr to one */
1275 				qdf_atomic_inc(&peer->ref_cnt);
1276 				dp_peer_unref_delete(peer);
1277 			}
1278 		}
1279 	}
1280 }
1281 
1282 static void dp_peer_ast_table_detach(struct dp_soc *soc)
1283 {
1284 	if (soc->ast_table) {
1285 		qdf_mem_free(soc->ast_table);
1286 		soc->ast_table = NULL;
1287 	}
1288 }
1289 
1290 static void dp_peer_find_map_detach(struct dp_soc *soc)
1291 {
1292 	if (soc->peer_id_to_obj_map) {
1293 		qdf_mem_free(soc->peer_id_to_obj_map);
1294 		soc->peer_id_to_obj_map = NULL;
1295 	}
1296 }
1297 
1298 int dp_peer_find_attach(struct dp_soc *soc)
1299 {
1300 	if (dp_peer_find_map_attach(soc))
1301 		return 1;
1302 
1303 	if (dp_peer_find_hash_attach(soc)) {
1304 		dp_peer_find_map_detach(soc);
1305 		return 1;
1306 	}
1307 
1308 	if (dp_peer_ast_table_attach(soc)) {
1309 		dp_peer_find_hash_detach(soc);
1310 		dp_peer_find_map_detach(soc);
1311 		return 1;
1312 	}
1313 
1314 	if (dp_peer_ast_hash_attach(soc)) {
1315 		dp_peer_ast_table_detach(soc);
1316 		dp_peer_find_hash_detach(soc);
1317 		dp_peer_find_map_detach(soc);
1318 		return 1;
1319 	}
1320 
1321 	return 0; /* success */
1322 }
1323 
1324 void dp_rx_tid_stats_cb(struct dp_soc *soc, void *cb_ctxt,
1325 	union hal_reo_status *reo_status)
1326 {
1327 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
1328 	struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
1329 
1330 	if (queue_status->header.status == HAL_REO_CMD_DRAIN)
1331 		return;
1332 
1333 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
1334 		DP_PRINT_STATS("REO stats failure %d for TID %d\n",
1335 			       queue_status->header.status, rx_tid->tid);
1336 		return;
1337 	}
1338 
1339 	DP_PRINT_STATS("REO queue stats (TID: %d):\n"
1340 		       "ssn: %d\n"
1341 		       "curr_idx  : %d\n"
1342 		       "pn_31_0   : %08x\n"
1343 		       "pn_63_32  : %08x\n"
1344 		       "pn_95_64  : %08x\n"
1345 		       "pn_127_96 : %08x\n"
1346 		       "last_rx_enq_tstamp : %08x\n"
1347 		       "last_rx_deq_tstamp : %08x\n"
1348 		       "rx_bitmap_31_0     : %08x\n"
1349 		       "rx_bitmap_63_32    : %08x\n"
1350 		       "rx_bitmap_95_64    : %08x\n"
1351 		       "rx_bitmap_127_96   : %08x\n"
1352 		       "rx_bitmap_159_128  : %08x\n"
1353 		       "rx_bitmap_191_160  : %08x\n"
1354 		       "rx_bitmap_223_192  : %08x\n"
1355 		       "rx_bitmap_255_224  : %08x\n",
1356 		       rx_tid->tid,
1357 		       queue_status->ssn, queue_status->curr_idx,
1358 		       queue_status->pn_31_0, queue_status->pn_63_32,
1359 		       queue_status->pn_95_64, queue_status->pn_127_96,
1360 		       queue_status->last_rx_enq_tstamp,
1361 		       queue_status->last_rx_deq_tstamp,
1362 		       queue_status->rx_bitmap_31_0,
1363 		       queue_status->rx_bitmap_63_32,
1364 		       queue_status->rx_bitmap_95_64,
1365 		       queue_status->rx_bitmap_127_96,
1366 		       queue_status->rx_bitmap_159_128,
1367 		       queue_status->rx_bitmap_191_160,
1368 		       queue_status->rx_bitmap_223_192,
1369 		       queue_status->rx_bitmap_255_224);
1370 
1371 	DP_PRINT_STATS(
1372 		       "curr_mpdu_cnt      : %d\n"
1373 		       "curr_msdu_cnt      : %d\n"
1374 		       "fwd_timeout_cnt    : %d\n"
1375 		       "fwd_bar_cnt        : %d\n"
1376 		       "dup_cnt            : %d\n"
1377 		       "frms_in_order_cnt  : %d\n"
1378 		       "bar_rcvd_cnt       : %d\n"
1379 		       "mpdu_frms_cnt      : %d\n"
1380 		       "msdu_frms_cnt      : %d\n"
1381 		       "total_byte_cnt     : %d\n"
1382 		       "late_recv_mpdu_cnt : %d\n"
1383 		       "win_jump_2k        : %d\n"
1384 		       "hole_cnt           : %d\n",
1385 		       queue_status->curr_mpdu_cnt,
1386 		       queue_status->curr_msdu_cnt,
1387 		       queue_status->fwd_timeout_cnt,
1388 		       queue_status->fwd_bar_cnt,
1389 		       queue_status->dup_cnt,
1390 		       queue_status->frms_in_order_cnt,
1391 		       queue_status->bar_rcvd_cnt,
1392 		       queue_status->mpdu_frms_cnt,
1393 		       queue_status->msdu_frms_cnt,
1394 		       queue_status->total_cnt,
1395 		       queue_status->late_recv_mpdu_cnt,
1396 		       queue_status->win_jump_2k,
1397 		       queue_status->hole_cnt);
1398 
1399 	DP_PRINT_STATS("Addba Req          : %d\n"
1400 			"Addba Resp         : %d\n"
1401 			"Addba Resp success : %d\n"
1402 			"Addba Resp failed  : %d\n"
1403 			"Delba Req received : %d\n"
1404 			"Delba Tx success   : %d\n"
1405 			"Delba Tx Fail      : %d\n"
1406 			"BA window size     : %d\n"
1407 			"Pn size            : %d\n",
1408 			rx_tid->num_of_addba_req,
1409 			rx_tid->num_of_addba_resp,
1410 			rx_tid->num_addba_rsp_success,
1411 			rx_tid->num_addba_rsp_failed,
1412 			rx_tid->num_of_delba_req,
1413 			rx_tid->delba_tx_success_cnt,
1414 			rx_tid->delba_tx_fail_cnt,
1415 			rx_tid->ba_win_size,
1416 			rx_tid->pn_size);
1417 }
1418 
1419 static inline struct dp_peer *dp_peer_find_add_id(struct dp_soc *soc,
1420 	uint8_t *peer_mac_addr, uint16_t peer_id, uint16_t hw_peer_id,
1421 	uint8_t vdev_id)
1422 {
1423 	struct dp_peer *peer;
1424 
1425 	QDF_ASSERT(peer_id <= soc->max_peers);
1426 	/* check if there's already a peer object with this MAC address */
1427 	peer = dp_peer_find_hash_find(soc, peer_mac_addr,
1428 		0 /* is aligned */, vdev_id);
1429 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1430 		  "%s: peer %pK ID %d vid %d mac %02x:%02x:%02x:%02x:%02x:%02x",
1431 		  __func__, peer, peer_id, vdev_id, peer_mac_addr[0],
1432 		  peer_mac_addr[1], peer_mac_addr[2], peer_mac_addr[3],
1433 		  peer_mac_addr[4], peer_mac_addr[5]);
1434 
1435 	if (peer) {
1436 		/* peer's ref count was already incremented by
1437 		 * peer_find_hash_find
1438 		 */
1439 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1440 			  "%s: ref_cnt: %d", __func__,
1441 			   qdf_atomic_read(&peer->ref_cnt));
1442 		if (!soc->peer_id_to_obj_map[peer_id])
1443 			soc->peer_id_to_obj_map[peer_id] = peer;
1444 		else {
1445 			/* Peer map event came for peer_id which
1446 			 * is already mapped, this is not expected
1447 			 */
1448 			QDF_ASSERT(0);
1449 		}
1450 
1451 		if (dp_peer_find_add_id_to_obj(peer, peer_id)) {
1452 			/* TBDXXX: assert for now */
1453 			QDF_ASSERT(0);
1454 		}
1455 
1456 		return peer;
1457 	}
1458 
1459 	return NULL;
1460 }
1461 
1462 /**
1463  * dp_rx_peer_map_handler() - handle peer map event from firmware
1464  * @soc_handle - genereic soc handle
1465  * @peeri_id - peer_id from firmware
1466  * @hw_peer_id - ast index for this peer
1467  * @vdev_id - vdev ID
1468  * @peer_mac_addr - mac address of the peer
1469  * @ast_hash - ast hash value
1470  * @is_wds - flag to indicate peer map event for WDS ast entry
1471  *
1472  * associate the peer_id that firmware provided with peer entry
1473  * and update the ast table in the host with the hw_peer_id.
1474  *
1475  * Return: none
1476  */
1477 
1478 void
1479 dp_rx_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
1480 		       uint16_t hw_peer_id, uint8_t vdev_id,
1481 		       uint8_t *peer_mac_addr, uint16_t ast_hash,
1482 		       uint8_t is_wds)
1483 {
1484 	struct dp_peer *peer = NULL;
1485 	enum cdp_txrx_ast_entry_type type = CDP_TXRX_AST_TYPE_STATIC;
1486 
1487 	dp_info("peer_map_event (soc:%pK): peer_id %d, hw_peer_id %d, peer_mac %02x:%02x:%02x:%02x:%02x:%02x, vdev_id %d",
1488 		soc, peer_id, hw_peer_id, peer_mac_addr[0], peer_mac_addr[1],
1489 		  peer_mac_addr[2], peer_mac_addr[3], peer_mac_addr[4],
1490 		  peer_mac_addr[5], vdev_id);
1491 
1492 	/* Peer map event for WDS ast entry get the peer from
1493 	 * obj map
1494 	 */
1495 	if (is_wds) {
1496 		peer = soc->peer_id_to_obj_map[peer_id];
1497 		/*
1498 		 * In certain cases like Auth attack on a repeater
1499 		 * can result in the number of ast_entries falling
1500 		 * in the same hash bucket to exceed the max_skid
1501 		 * length supported by HW in root AP. In these cases
1502 		 * the FW will return the hw_peer_id (ast_index) as
1503 		 * 0xffff indicating HW could not add the entry in
1504 		 * its table. Host has to delete the entry from its
1505 		 * table in these cases.
1506 		 */
1507 		if (hw_peer_id == HTT_INVALID_PEER) {
1508 			DP_STATS_INC(soc, ast.map_err, 1);
1509 			if (!dp_peer_ast_free_entry_by_mac(soc,
1510 							   peer,
1511 							   peer_mac_addr))
1512 				return;
1513 
1514 			dp_alert("AST entry not found with peer %pK peer_id %u peer_mac %pM mac_addr %pM vdev_id %u next_hop %u",
1515 				 peer, peer->peer_ids[0],
1516 				 peer->mac_addr.raw, peer_mac_addr, vdev_id,
1517 				 is_wds);
1518 
1519 			return;
1520 		}
1521 
1522 	} else {
1523 		/*
1524 		 * It's the responsibility of the CP and FW to ensure
1525 		 * that peer is created successfully. Ideally DP should
1526 		 * not hit the below condition for directly assocaited
1527 		 * peers.
1528 		 */
1529 		if ((hw_peer_id < 0) ||
1530 		    (hw_peer_id >=
1531 		     wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) {
1532 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1533 				  "invalid hw_peer_id: %d", hw_peer_id);
1534 			qdf_assert_always(0);
1535 		}
1536 
1537 		peer = dp_peer_find_add_id(soc, peer_mac_addr, peer_id,
1538 					   hw_peer_id, vdev_id);
1539 
1540 		if (peer) {
1541 			if (wlan_op_mode_sta == peer->vdev->opmode &&
1542 			    qdf_mem_cmp(peer->mac_addr.raw,
1543 					peer->vdev->mac_addr.raw,
1544 					QDF_MAC_ADDR_SIZE) != 0) {
1545 				dp_info("STA vdev bss_peer!!!!");
1546 				peer->bss_peer = 1;
1547 				peer->vdev->vap_bss_peer = peer;
1548 			}
1549 
1550 			if (peer->vdev->opmode == wlan_op_mode_sta) {
1551 				peer->vdev->bss_ast_hash = ast_hash;
1552 				peer->vdev->bss_ast_idx = hw_peer_id;
1553 			}
1554 
1555 			/* Add ast entry incase self ast entry is
1556 			 * deleted due to DP CP sync issue
1557 			 *
1558 			 * self_ast_entry is modified in peer create
1559 			 * and peer unmap path which cannot run in
1560 			 * parllel with peer map, no lock need before
1561 			 * referring it
1562 			 */
1563 			if (!peer->self_ast_entry) {
1564 				dp_info("Add self ast from map %pM",
1565 					peer_mac_addr);
1566 				dp_peer_add_ast(soc, peer,
1567 						peer_mac_addr,
1568 						type, 0);
1569 			}
1570 
1571 		}
1572 	}
1573 	dp_peer_map_ast(soc, peer, peer_mac_addr,
1574 			hw_peer_id, vdev_id, ast_hash);
1575 }
1576 
1577 /**
1578  * dp_rx_peer_unmap_handler() - handle peer unmap event from firmware
1579  * @soc_handle - genereic soc handle
1580  * @peeri_id - peer_id from firmware
1581  * @vdev_id - vdev ID
1582  * @mac_addr - mac address of the peer or wds entry
1583  * @is_wds - flag to indicate peer map event for WDS ast entry
1584  *
1585  * Return: none
1586  */
1587 void
1588 dp_rx_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id,
1589 			 uint8_t vdev_id, uint8_t *mac_addr,
1590 			 uint8_t is_wds)
1591 {
1592 	struct dp_peer *peer;
1593 	uint8_t i;
1594 
1595 	peer = __dp_peer_find_by_id(soc, peer_id);
1596 
1597 	/*
1598 	 * Currently peer IDs are assigned for vdevs as well as peers.
1599 	 * If the peer ID is for a vdev, then the peer pointer stored
1600 	 * in peer_id_to_obj_map will be NULL.
1601 	 */
1602 	if (!peer) {
1603 		dp_err("Received unmap event for invalid peer_id %u", peer_id);
1604 		return;
1605 	}
1606 
1607 	/* If V2 Peer map messages are enabled AST entry has to be freed here
1608 	 */
1609 	if (soc->is_peer_map_unmap_v2 && is_wds) {
1610 		if (!dp_peer_ast_free_entry_by_mac(soc, peer, mac_addr))
1611 			return;
1612 
1613 		dp_alert("AST entry not found with peer %pK peer_id %u peer_mac %pM mac_addr %pM vdev_id %u next_hop %u",
1614 			 peer, peer->peer_ids[0],
1615 			 peer->mac_addr.raw, mac_addr, vdev_id,
1616 			 is_wds);
1617 
1618 		return;
1619 	}
1620 
1621 	dp_info("peer_unmap_event (soc:%pK) peer_id %d peer %pK",
1622 		soc, peer_id, peer);
1623 
1624 	soc->peer_id_to_obj_map[peer_id] = NULL;
1625 	for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
1626 		if (peer->peer_ids[i] == peer_id) {
1627 			peer->peer_ids[i] = HTT_INVALID_PEER;
1628 			break;
1629 		}
1630 	}
1631 
1632 	if (soc->cdp_soc.ol_ops->peer_unmap_event) {
1633 		soc->cdp_soc.ol_ops->peer_unmap_event(soc->ctrl_psoc,
1634 				peer_id, vdev_id);
1635 	}
1636 
1637 	/*
1638 	 * Remove a reference to the peer.
1639 	 * If there are no more references, delete the peer object.
1640 	 */
1641 	dp_peer_unref_delete(peer);
1642 }
1643 
1644 void
1645 dp_peer_find_detach(struct dp_soc *soc)
1646 {
1647 	dp_peer_find_map_detach(soc);
1648 	dp_peer_find_hash_detach(soc);
1649 	dp_peer_ast_hash_detach(soc);
1650 	dp_peer_ast_table_detach(soc);
1651 }
1652 
1653 static void dp_rx_tid_update_cb(struct dp_soc *soc, void *cb_ctxt,
1654 	union hal_reo_status *reo_status)
1655 {
1656 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
1657 
1658 	if ((reo_status->rx_queue_status.header.status !=
1659 		HAL_REO_CMD_SUCCESS) &&
1660 		(reo_status->rx_queue_status.header.status !=
1661 		HAL_REO_CMD_DRAIN)) {
1662 		/* Should not happen normally. Just print error for now */
1663 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1664 			  "%s: Rx tid HW desc update failed(%d): tid %d",
1665 			  __func__,
1666 			  reo_status->rx_queue_status.header.status,
1667 			  rx_tid->tid);
1668 	}
1669 }
1670 
1671 /*
1672  * dp_find_peer_by_addr - find peer instance by mac address
1673  * @dev: physical device instance
1674  * @peer_mac_addr: peer mac address
1675  *
1676  * Return: peer instance pointer
1677  */
1678 void *dp_find_peer_by_addr(struct cdp_pdev *dev, uint8_t *peer_mac_addr)
1679 {
1680 	struct dp_pdev *pdev = (struct dp_pdev *)dev;
1681 	struct dp_peer *peer;
1682 
1683 	peer = dp_peer_find_hash_find(pdev->soc, peer_mac_addr, 0, DP_VDEV_ALL);
1684 
1685 	if (!peer)
1686 		return NULL;
1687 
1688 	dp_verbose_debug("peer %pK mac: %pM", peer,
1689 			 peer->mac_addr.raw);
1690 
1691 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
1692 	 * Decrement it here.
1693 	 */
1694 	dp_peer_unref_delete(peer);
1695 
1696 	return peer;
1697 }
1698 
1699 static bool dp_get_peer_vdev_roaming_in_progress(struct dp_peer *peer)
1700 {
1701 	struct ol_if_ops *ol_ops = NULL;
1702 	bool is_roaming = false;
1703 	uint8_t vdev_id = -1;
1704 
1705 	if (!peer) {
1706 		dp_info("Peer is NULL. No roaming possible");
1707 		return false;
1708 	}
1709 	ol_ops = peer->vdev->pdev->soc->cdp_soc.ol_ops;
1710 
1711 	if (ol_ops && ol_ops->is_roam_inprogress) {
1712 		dp_get_vdevid(peer, &vdev_id);
1713 		is_roaming = ol_ops->is_roam_inprogress(vdev_id);
1714 	}
1715 
1716 	dp_info("peer: %pM, vdev_id: %d, is_roaming: %d",
1717 		peer->mac_addr.raw, vdev_id, is_roaming);
1718 
1719 	return is_roaming;
1720 }
1721 
1722 QDF_STATUS dp_rx_tid_update_wifi3(struct dp_peer *peer, int tid, uint32_t
1723 					 ba_window_size, uint32_t start_seq)
1724 {
1725 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1726 	struct dp_soc *soc = peer->vdev->pdev->soc;
1727 	struct hal_reo_cmd_params params;
1728 
1729 	qdf_mem_zero(&params, sizeof(params));
1730 
1731 	params.std.need_status = 1;
1732 	params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
1733 	params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1734 	params.u.upd_queue_params.update_ba_window_size = 1;
1735 	params.u.upd_queue_params.ba_window_size = ba_window_size;
1736 
1737 	if (start_seq < IEEE80211_SEQ_MAX) {
1738 		params.u.upd_queue_params.update_ssn = 1;
1739 		params.u.upd_queue_params.ssn = start_seq;
1740 	} else {
1741 	    dp_set_ssn_valid_flag(&params, 0);
1742 	}
1743 	dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
1744 			dp_rx_tid_update_cb, rx_tid);
1745 
1746 	rx_tid->ba_win_size = ba_window_size;
1747 
1748 	if (dp_get_peer_vdev_roaming_in_progress(peer))
1749 		return QDF_STATUS_E_PERM;
1750 
1751 	if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup)
1752 		soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup(
1753 			soc->ctrl_psoc, peer->vdev->pdev->pdev_id,
1754 			peer->vdev->vdev_id, peer->mac_addr.raw,
1755 			rx_tid->hw_qdesc_paddr, tid, tid, 1, ba_window_size);
1756 
1757 	return QDF_STATUS_SUCCESS;
1758 }
1759 
1760 /*
1761  * dp_reo_desc_free() - Callback free reo descriptor memory after
1762  * HW cache flush
1763  *
1764  * @soc: DP SOC handle
1765  * @cb_ctxt: Callback context
1766  * @reo_status: REO command status
1767  */
1768 static void dp_reo_desc_free(struct dp_soc *soc, void *cb_ctxt,
1769 	union hal_reo_status *reo_status)
1770 {
1771 	struct reo_desc_list_node *freedesc =
1772 		(struct reo_desc_list_node *)cb_ctxt;
1773 	struct dp_rx_tid *rx_tid = &freedesc->rx_tid;
1774 
1775 	if ((reo_status->fl_cache_status.header.status !=
1776 		HAL_REO_CMD_SUCCESS) &&
1777 		(reo_status->fl_cache_status.header.status !=
1778 		HAL_REO_CMD_DRAIN)) {
1779 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1780 			  "%s: Rx tid HW desc flush failed(%d): tid %d",
1781 			  __func__,
1782 			  reo_status->rx_queue_status.header.status,
1783 			  freedesc->rx_tid.tid);
1784 	}
1785 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1786 		  "%s: hw_qdesc_paddr: %pK, tid:%d", __func__,
1787 		  (void *)(rx_tid->hw_qdesc_paddr), rx_tid->tid);
1788 	qdf_mem_unmap_nbytes_single(soc->osdev,
1789 		rx_tid->hw_qdesc_paddr,
1790 		QDF_DMA_BIDIRECTIONAL,
1791 		rx_tid->hw_qdesc_alloc_size);
1792 	qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
1793 	qdf_mem_free(freedesc);
1794 }
1795 
1796 #if defined(QCA_WIFI_QCA8074_VP) && defined(BUILD_X86)
1797 /* Hawkeye emulation requires bus address to be >= 0x50000000 */
1798 static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
1799 {
1800 	if (dma_addr < 0x50000000)
1801 		return QDF_STATUS_E_FAILURE;
1802 	else
1803 		return QDF_STATUS_SUCCESS;
1804 }
1805 #else
1806 static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
1807 {
1808 	return QDF_STATUS_SUCCESS;
1809 }
1810 #endif
1811 
1812 
1813 /*
1814  * dp_rx_tid_setup_wifi3() – Setup receive TID state
1815  * @peer: Datapath peer handle
1816  * @tid: TID
1817  * @ba_window_size: BlockAck window size
1818  * @start_seq: Starting sequence number
1819  *
1820  * Return: QDF_STATUS code
1821  */
1822 QDF_STATUS dp_rx_tid_setup_wifi3(struct dp_peer *peer, int tid,
1823 				 uint32_t ba_window_size, uint32_t start_seq)
1824 {
1825 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1826 	struct dp_vdev *vdev = peer->vdev;
1827 	struct dp_soc *soc = vdev->pdev->soc;
1828 	uint32_t hw_qdesc_size;
1829 	uint32_t hw_qdesc_align;
1830 	int hal_pn_type;
1831 	void *hw_qdesc_vaddr;
1832 	uint32_t alloc_tries = 0;
1833 	QDF_STATUS err = QDF_STATUS_SUCCESS;
1834 
1835 	if (peer->delete_in_progress ||
1836 	    !qdf_atomic_read(&peer->is_default_route_set))
1837 		return QDF_STATUS_E_FAILURE;
1838 
1839 	rx_tid->ba_win_size = ba_window_size;
1840 	if (rx_tid->hw_qdesc_vaddr_unaligned)
1841 		return dp_rx_tid_update_wifi3(peer, tid, ba_window_size,
1842 			start_seq);
1843 	rx_tid->delba_tx_status = 0;
1844 	rx_tid->ppdu_id_2k = 0;
1845 	rx_tid->num_of_addba_req = 0;
1846 	rx_tid->num_of_delba_req = 0;
1847 	rx_tid->num_of_addba_resp = 0;
1848 	rx_tid->num_addba_rsp_failed = 0;
1849 	rx_tid->num_addba_rsp_success = 0;
1850 	rx_tid->delba_tx_success_cnt = 0;
1851 	rx_tid->delba_tx_fail_cnt = 0;
1852 	rx_tid->statuscode = 0;
1853 
1854 	/* TODO: Allocating HW queue descriptors based on max BA window size
1855 	 * for all QOS TIDs so that same descriptor can be used later when
1856 	 * ADDBA request is recevied. This should be changed to allocate HW
1857 	 * queue descriptors based on BA window size being negotiated (0 for
1858 	 * non BA cases), and reallocate when BA window size changes and also
1859 	 * send WMI message to FW to change the REO queue descriptor in Rx
1860 	 * peer entry as part of dp_rx_tid_update.
1861 	 */
1862 	if (tid != DP_NON_QOS_TID)
1863 		hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc,
1864 			HAL_RX_MAX_BA_WINDOW, tid);
1865 	else
1866 		hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc,
1867 			ba_window_size, tid);
1868 
1869 	hw_qdesc_align = hal_get_reo_qdesc_align(soc->hal_soc);
1870 	/* To avoid unnecessary extra allocation for alignment, try allocating
1871 	 * exact size and see if we already have aligned address.
1872 	 */
1873 	rx_tid->hw_qdesc_alloc_size = hw_qdesc_size;
1874 
1875 try_desc_alloc:
1876 	rx_tid->hw_qdesc_vaddr_unaligned =
1877 		qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size);
1878 
1879 	if (!rx_tid->hw_qdesc_vaddr_unaligned) {
1880 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1881 			  "%s: Rx tid HW desc alloc failed: tid %d",
1882 			  __func__, tid);
1883 		return QDF_STATUS_E_NOMEM;
1884 	}
1885 
1886 	if ((unsigned long)(rx_tid->hw_qdesc_vaddr_unaligned) %
1887 		hw_qdesc_align) {
1888 		/* Address allocated above is not alinged. Allocate extra
1889 		 * memory for alignment
1890 		 */
1891 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
1892 		rx_tid->hw_qdesc_vaddr_unaligned =
1893 			qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size +
1894 					hw_qdesc_align - 1);
1895 
1896 		if (!rx_tid->hw_qdesc_vaddr_unaligned) {
1897 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1898 				  "%s: Rx tid HW desc alloc failed: tid %d",
1899 				  __func__, tid);
1900 			return QDF_STATUS_E_NOMEM;
1901 		}
1902 
1903 		hw_qdesc_vaddr = (void *)qdf_align((unsigned long)
1904 			rx_tid->hw_qdesc_vaddr_unaligned,
1905 			hw_qdesc_align);
1906 
1907 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1908 			  "%s: Total Size %d Aligned Addr %pK",
1909 			  __func__, rx_tid->hw_qdesc_alloc_size,
1910 			  hw_qdesc_vaddr);
1911 
1912 	} else {
1913 		hw_qdesc_vaddr = rx_tid->hw_qdesc_vaddr_unaligned;
1914 	}
1915 
1916 	/* TODO: Ensure that sec_type is set before ADDBA is received.
1917 	 * Currently this is set based on htt indication
1918 	 * HTT_T2H_MSG_TYPE_SEC_IND from target
1919 	 */
1920 	switch (peer->security[dp_sec_ucast].sec_type) {
1921 	case cdp_sec_type_tkip_nomic:
1922 	case cdp_sec_type_aes_ccmp:
1923 	case cdp_sec_type_aes_ccmp_256:
1924 	case cdp_sec_type_aes_gcmp:
1925 	case cdp_sec_type_aes_gcmp_256:
1926 		hal_pn_type = HAL_PN_WPA;
1927 		break;
1928 	case cdp_sec_type_wapi:
1929 		if (vdev->opmode == wlan_op_mode_ap)
1930 			hal_pn_type = HAL_PN_WAPI_EVEN;
1931 		else
1932 			hal_pn_type = HAL_PN_WAPI_UNEVEN;
1933 		break;
1934 	default:
1935 		hal_pn_type = HAL_PN_NONE;
1936 		break;
1937 	}
1938 
1939 	hal_reo_qdesc_setup(soc->hal_soc, tid, ba_window_size, start_seq,
1940 		hw_qdesc_vaddr, rx_tid->hw_qdesc_paddr, hal_pn_type);
1941 
1942 	qdf_mem_map_nbytes_single(soc->osdev, hw_qdesc_vaddr,
1943 		QDF_DMA_BIDIRECTIONAL, rx_tid->hw_qdesc_alloc_size,
1944 		&(rx_tid->hw_qdesc_paddr));
1945 
1946 	if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) !=
1947 			QDF_STATUS_SUCCESS) {
1948 		if (alloc_tries++ < 10) {
1949 			qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
1950 			rx_tid->hw_qdesc_vaddr_unaligned = NULL;
1951 			goto try_desc_alloc;
1952 		} else {
1953 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1954 				  "%s: Rx tid HW desc alloc failed (lowmem): tid %d",
1955 				  __func__, tid);
1956 			err = QDF_STATUS_E_NOMEM;
1957 			goto error;
1958 		}
1959 	}
1960 
1961 	if (dp_get_peer_vdev_roaming_in_progress(peer)) {
1962 		err = QDF_STATUS_E_PERM;
1963 		goto error;
1964 	}
1965 
1966 	if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
1967 		if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup(
1968 		    soc->ctrl_psoc,
1969 		    peer->vdev->pdev->pdev_id,
1970 		    peer->vdev->vdev_id,
1971 		    peer->mac_addr.raw, rx_tid->hw_qdesc_paddr, tid, tid,
1972 		    1, ba_window_size)) {
1973 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1974 				  "%s: Failed to send reo queue setup to FW - tid %d\n",
1975 				  __func__, tid);
1976 			err = QDF_STATUS_E_FAILURE;
1977 			goto error;
1978 		}
1979 	}
1980 	return 0;
1981 error:
1982 	if (rx_tid->hw_qdesc_vaddr_unaligned) {
1983 		if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) ==
1984 		    QDF_STATUS_SUCCESS)
1985 			qdf_mem_unmap_nbytes_single(
1986 				soc->osdev,
1987 				rx_tid->hw_qdesc_paddr,
1988 				QDF_DMA_BIDIRECTIONAL,
1989 				rx_tid->hw_qdesc_alloc_size);
1990 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
1991 		rx_tid->hw_qdesc_vaddr_unaligned = NULL;
1992 	}
1993 	return err;
1994 }
1995 
1996 #ifdef REO_DESC_DEFER_FREE
1997 /*
1998  * dp_reo_desc_clean_up() - If cmd to flush base desc fails add
1999  * desc back to freelist and defer the deletion
2000  *
2001  * @soc: DP SOC handle
2002  * @desc: Base descriptor to be freed
2003  * @reo_status: REO command status
2004  */
2005 static void dp_reo_desc_clean_up(struct dp_soc *soc,
2006 				 struct reo_desc_list_node *desc,
2007 				 union hal_reo_status *reo_status)
2008 {
2009 	desc->free_ts = qdf_get_system_timestamp();
2010 	DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
2011 	qdf_list_insert_back(&soc->reo_desc_freelist,
2012 			     (qdf_list_node_t *)desc);
2013 }
2014 
2015 #else
2016 /*
2017  * dp_reo_desc_clean_up() - If send cmd to REO inorder to flush
2018  * cache fails free the base REO desc anyway
2019  *
2020  * @soc: DP SOC handle
2021  * @desc: Base descriptor to be freed
2022  * @reo_status: REO command status
2023  */
2024 static void dp_reo_desc_clean_up(struct dp_soc *soc,
2025 				 struct reo_desc_list_node *desc,
2026 				 union hal_reo_status *reo_status)
2027 {
2028 	if (reo_status) {
2029 		qdf_mem_zero(reo_status, sizeof(*reo_status));
2030 		reo_status->fl_cache_status.header.status = 0;
2031 		dp_reo_desc_free(soc, (void *)desc, reo_status);
2032 	}
2033 }
2034 #endif
2035 
2036 /*
2037  * dp_resend_update_reo_cmd() - Resend the UPDATE_REO_QUEUE
2038  * cmd and re-insert desc into free list if send fails.
2039  *
2040  * @soc: DP SOC handle
2041  * @desc: desc with resend update cmd flag set
2042  * @rx_tid: Desc RX tid associated with update cmd for resetting
2043  * valid field to 0 in h/w
2044  */
2045 static void dp_resend_update_reo_cmd(struct dp_soc *soc,
2046 				     struct reo_desc_list_node *desc,
2047 				     struct dp_rx_tid *rx_tid)
2048 {
2049 	struct hal_reo_cmd_params params;
2050 
2051 	qdf_mem_zero(&params, sizeof(params));
2052 	params.std.need_status = 1;
2053 	params.std.addr_lo =
2054 		rx_tid->hw_qdesc_paddr & 0xffffffff;
2055 	params.std.addr_hi =
2056 		(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2057 	params.u.upd_queue_params.update_vld = 1;
2058 	params.u.upd_queue_params.vld = 0;
2059 	desc->resend_update_reo_cmd = false;
2060 	/*
2061 	 * If the cmd send fails then set resend_update_reo_cmd flag
2062 	 * and insert the desc at the end of the free list to retry.
2063 	 */
2064 	if (dp_reo_send_cmd(soc,
2065 			    CMD_UPDATE_RX_REO_QUEUE,
2066 			    &params,
2067 			    dp_rx_tid_delete_cb,
2068 			    (void *)desc)
2069 	    != QDF_STATUS_SUCCESS) {
2070 		desc->resend_update_reo_cmd = true;
2071 		desc->free_ts = qdf_get_system_timestamp();
2072 		qdf_list_insert_back(&soc->reo_desc_freelist,
2073 				     (qdf_list_node_t *)desc);
2074 		DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
2075 	}
2076 }
2077 
2078 /*
2079  * dp_rx_tid_delete_cb() - Callback to flush reo descriptor HW cache
2080  * after deleting the entries (ie., setting valid=0)
2081  *
2082  * @soc: DP SOC handle
2083  * @cb_ctxt: Callback context
2084  * @reo_status: REO command status
2085  */
2086 void dp_rx_tid_delete_cb(struct dp_soc *soc, void *cb_ctxt,
2087 			 union hal_reo_status *reo_status)
2088 {
2089 	struct reo_desc_list_node *freedesc =
2090 		(struct reo_desc_list_node *)cb_ctxt;
2091 	uint32_t list_size;
2092 	struct reo_desc_list_node *desc;
2093 	unsigned long curr_ts = qdf_get_system_timestamp();
2094 	uint32_t desc_size, tot_desc_size;
2095 	struct hal_reo_cmd_params params;
2096 
2097 	if (reo_status->rx_queue_status.header.status == HAL_REO_CMD_DRAIN) {
2098 		qdf_mem_zero(reo_status, sizeof(*reo_status));
2099 		reo_status->fl_cache_status.header.status = HAL_REO_CMD_DRAIN;
2100 		dp_reo_desc_free(soc, (void *)freedesc, reo_status);
2101 		return;
2102 	} else if (reo_status->rx_queue_status.header.status !=
2103 		HAL_REO_CMD_SUCCESS) {
2104 		/* Should not happen normally. Just print error for now */
2105 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2106 			  "%s: Rx tid HW desc deletion failed(%d): tid %d",
2107 			  __func__,
2108 			  reo_status->rx_queue_status.header.status,
2109 			  freedesc->rx_tid.tid);
2110 	}
2111 
2112 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
2113 		"%s: rx_tid: %d status: %d", __func__,
2114 		freedesc->rx_tid.tid,
2115 		reo_status->rx_queue_status.header.status);
2116 
2117 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
2118 	freedesc->free_ts = curr_ts;
2119 	qdf_list_insert_back_size(&soc->reo_desc_freelist,
2120 		(qdf_list_node_t *)freedesc, &list_size);
2121 
2122 	while ((qdf_list_peek_front(&soc->reo_desc_freelist,
2123 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) &&
2124 		((list_size >= REO_DESC_FREELIST_SIZE) ||
2125 		(curr_ts > (desc->free_ts + REO_DESC_FREE_DEFER_MS)) ||
2126 		(desc->resend_update_reo_cmd && list_size))) {
2127 		struct dp_rx_tid *rx_tid;
2128 
2129 		qdf_list_remove_front(&soc->reo_desc_freelist,
2130 				(qdf_list_node_t **)&desc);
2131 		list_size--;
2132 		rx_tid = &desc->rx_tid;
2133 
2134 		/* First process descs with resend_update_reo_cmd set */
2135 		if (desc->resend_update_reo_cmd) {
2136 			dp_resend_update_reo_cmd(soc, desc, rx_tid);
2137 			continue;
2138 		}
2139 
2140 		/* Flush and invalidate REO descriptor from HW cache: Base and
2141 		 * extension descriptors should be flushed separately */
2142 		tot_desc_size = rx_tid->hw_qdesc_alloc_size;
2143 		/* Get base descriptor size by passing non-qos TID */
2144 		desc_size = hal_get_reo_qdesc_size(soc->hal_soc, 0,
2145 						   DP_NON_QOS_TID);
2146 
2147 		/* Flush reo extension descriptors */
2148 		while ((tot_desc_size -= desc_size) > 0) {
2149 			qdf_mem_zero(&params, sizeof(params));
2150 			params.std.addr_lo =
2151 				((uint64_t)(rx_tid->hw_qdesc_paddr) +
2152 				tot_desc_size) & 0xffffffff;
2153 			params.std.addr_hi =
2154 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2155 
2156 			if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc,
2157 							CMD_FLUSH_CACHE,
2158 							&params,
2159 							NULL,
2160 							NULL)) {
2161 				dp_err_log("fail to send CMD_CACHE_FLUSH:"
2162 					   "tid %d desc %pK", rx_tid->tid,
2163 					   (void *)(rx_tid->hw_qdesc_paddr));
2164 			}
2165 		}
2166 
2167 		/* Flush base descriptor */
2168 		qdf_mem_zero(&params, sizeof(params));
2169 		params.std.need_status = 1;
2170 		params.std.addr_lo =
2171 			(uint64_t)(rx_tid->hw_qdesc_paddr) & 0xffffffff;
2172 		params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2173 
2174 		if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc,
2175 							  CMD_FLUSH_CACHE,
2176 							  &params,
2177 							  dp_reo_desc_free,
2178 							  (void *)desc)) {
2179 			union hal_reo_status reo_status;
2180 			/*
2181 			 * If dp_reo_send_cmd return failure, related TID queue desc
2182 			 * should be unmapped. Also locally reo_desc, together with
2183 			 * TID queue desc also need to be freed accordingly.
2184 			 *
2185 			 * Here invoke desc_free function directly to do clean up.
2186 			 *
2187 			 * In case of MCL path add the desc back to the free
2188 			 * desc list and defer deletion.
2189 			 */
2190 			dp_err_log("%s: fail to send REO cmd to flush cache: tid %d",
2191 				   __func__, rx_tid->tid);
2192 			dp_reo_desc_clean_up(soc, desc, &reo_status);
2193 		}
2194 	}
2195 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
2196 }
2197 
2198 /*
2199  * dp_rx_tid_delete_wifi3() – Delete receive TID queue
2200  * @peer: Datapath peer handle
2201  * @tid: TID
2202  *
2203  * Return: 0 on success, error code on failure
2204  */
2205 static int dp_rx_tid_delete_wifi3(struct dp_peer *peer, int tid)
2206 {
2207 	struct dp_rx_tid *rx_tid = &(peer->rx_tid[tid]);
2208 	struct dp_soc *soc = peer->vdev->pdev->soc;
2209 	struct hal_reo_cmd_params params;
2210 	struct reo_desc_list_node *freedesc =
2211 		qdf_mem_malloc(sizeof(*freedesc));
2212 
2213 	if (!freedesc) {
2214 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2215 			  "%s: malloc failed for freedesc: tid %d",
2216 			  __func__, tid);
2217 		return -ENOMEM;
2218 	}
2219 
2220 	freedesc->rx_tid = *rx_tid;
2221 	freedesc->resend_update_reo_cmd = false;
2222 
2223 	qdf_mem_zero(&params, sizeof(params));
2224 
2225 	params.std.need_status = 1;
2226 	params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
2227 	params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2228 	params.u.upd_queue_params.update_vld = 1;
2229 	params.u.upd_queue_params.vld = 0;
2230 
2231 	if (dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
2232 			    dp_rx_tid_delete_cb, (void *)freedesc)
2233 		!= QDF_STATUS_SUCCESS) {
2234 		/* Defer the clean up to the call back context */
2235 		qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
2236 		freedesc->free_ts = qdf_get_system_timestamp();
2237 		freedesc->resend_update_reo_cmd = true;
2238 		qdf_list_insert_front(&soc->reo_desc_freelist,
2239 				      (qdf_list_node_t *)freedesc);
2240 		DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
2241 		qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
2242 		dp_info("Failed to send CMD_UPDATE_RX_REO_QUEUE");
2243 	}
2244 
2245 	rx_tid->hw_qdesc_vaddr_unaligned = NULL;
2246 	rx_tid->hw_qdesc_alloc_size = 0;
2247 	rx_tid->hw_qdesc_paddr = 0;
2248 
2249 	return 0;
2250 }
2251 
2252 #ifdef DP_LFR
2253 static void dp_peer_setup_remaining_tids(struct dp_peer *peer)
2254 {
2255 	int tid;
2256 
2257 	for (tid = 1; tid < DP_MAX_TIDS-1; tid++) {
2258 		dp_rx_tid_setup_wifi3(peer, tid, 1, 0);
2259 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2260 			  "Setting up TID %d for peer %pK peer->local_id %d",
2261 			  tid, peer, peer->local_id);
2262 	}
2263 }
2264 #else
2265 static void dp_peer_setup_remaining_tids(struct dp_peer *peer) {};
2266 #endif
2267 
2268 #ifndef WLAN_TX_PKT_CAPTURE_ENH
2269 /*
2270  * dp_peer_tid_queue_init() – Initialize ppdu stats queue per TID
2271  * @peer: Datapath peer
2272  *
2273  */
2274 static inline void dp_peer_tid_queue_init(struct dp_peer *peer)
2275 {
2276 }
2277 
2278 /*
2279  * dp_peer_tid_queue_cleanup() – remove ppdu stats queue per TID
2280  * @peer: Datapath peer
2281  *
2282  */
2283 static inline void dp_peer_tid_queue_cleanup(struct dp_peer *peer)
2284 {
2285 }
2286 
2287 /*
2288  * dp_peer_update_80211_hdr() – dp peer update 80211 hdr
2289  * @vdev: Datapath vdev
2290  * @peer: Datapath peer
2291  *
2292  */
2293 static inline void
2294 dp_peer_update_80211_hdr(struct dp_vdev *vdev, struct dp_peer *peer)
2295 {
2296 }
2297 #endif
2298 
2299 /*
2300  * dp_peer_tx_init() – Initialize receive TID state
2301  * @pdev: Datapath pdev
2302  * @peer: Datapath peer
2303  *
2304  */
2305 void dp_peer_tx_init(struct dp_pdev *pdev, struct dp_peer *peer)
2306 {
2307 	dp_peer_tid_queue_init(peer);
2308 	dp_peer_update_80211_hdr(peer->vdev, peer);
2309 }
2310 
2311 /*
2312  * dp_peer_tx_cleanup() – Deinitialize receive TID state
2313  * @vdev: Datapath vdev
2314  * @peer: Datapath peer
2315  *
2316  */
2317 static inline void
2318 dp_peer_tx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
2319 {
2320 	dp_peer_tid_queue_cleanup(peer);
2321 }
2322 
2323 /*
2324  * dp_peer_rx_init() – Initialize receive TID state
2325  * @pdev: Datapath pdev
2326  * @peer: Datapath peer
2327  *
2328  */
2329 void dp_peer_rx_init(struct dp_pdev *pdev, struct dp_peer *peer)
2330 {
2331 	int tid;
2332 	struct dp_rx_tid *rx_tid;
2333 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
2334 		rx_tid = &peer->rx_tid[tid];
2335 		rx_tid->array = &rx_tid->base;
2336 		rx_tid->base.head = rx_tid->base.tail = NULL;
2337 		rx_tid->tid = tid;
2338 		rx_tid->defrag_timeout_ms = 0;
2339 		rx_tid->ba_win_size = 0;
2340 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
2341 
2342 		rx_tid->defrag_waitlist_elem.tqe_next = NULL;
2343 		rx_tid->defrag_waitlist_elem.tqe_prev = NULL;
2344 	}
2345 
2346 	peer->active_ba_session_cnt = 0;
2347 	peer->hw_buffer_size = 0;
2348 	peer->kill_256_sessions = 0;
2349 
2350 	/* Setup default (non-qos) rx tid queue */
2351 	dp_rx_tid_setup_wifi3(peer, DP_NON_QOS_TID, 1, 0);
2352 
2353 	/* Setup rx tid queue for TID 0.
2354 	 * Other queues will be setup on receiving first packet, which will cause
2355 	 * NULL REO queue error
2356 	 */
2357 	dp_rx_tid_setup_wifi3(peer, 0, 1, 0);
2358 
2359 	/*
2360 	 * Setup the rest of TID's to handle LFR
2361 	 */
2362 	dp_peer_setup_remaining_tids(peer);
2363 
2364 	/*
2365 	 * Set security defaults: no PN check, no security. The target may
2366 	 * send a HTT SEC_IND message to overwrite these defaults.
2367 	 */
2368 	peer->security[dp_sec_ucast].sec_type =
2369 		peer->security[dp_sec_mcast].sec_type = cdp_sec_type_none;
2370 }
2371 
2372 /*
2373  * dp_peer_rx_cleanup() – Cleanup receive TID state
2374  * @vdev: Datapath vdev
2375  * @peer: Datapath peer
2376  * @reuse: Peer reference reuse
2377  *
2378  */
2379 void dp_peer_rx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer, bool reuse)
2380 {
2381 	int tid;
2382 	uint32_t tid_delete_mask = 0;
2383 
2384 	dp_info("Remove tids for peer: %pK", peer);
2385 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
2386 		struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
2387 
2388 		qdf_spin_lock_bh(&rx_tid->tid_lock);
2389 		if (!peer->bss_peer || peer->vdev->opmode == wlan_op_mode_sta) {
2390 			/* Cleanup defrag related resource */
2391 			dp_rx_defrag_waitlist_remove(peer, tid);
2392 			dp_rx_reorder_flush_frag(peer, tid);
2393 		}
2394 
2395 		if (peer->rx_tid[tid].hw_qdesc_vaddr_unaligned) {
2396 			dp_rx_tid_delete_wifi3(peer, tid);
2397 
2398 			tid_delete_mask |= (1 << tid);
2399 		}
2400 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2401 	}
2402 #ifdef notyet /* See if FW can remove queues as part of peer cleanup */
2403 	if (soc->ol_ops->peer_rx_reorder_queue_remove) {
2404 		soc->ol_ops->peer_rx_reorder_queue_remove(soc->ctrl_psoc,
2405 			peer->vdev->pdev->pdev_id,
2406 			peer->vdev->vdev_id, peer->mac_addr.raw,
2407 			tid_delete_mask);
2408 	}
2409 #endif
2410 	if (!reuse)
2411 		for (tid = 0; tid < DP_MAX_TIDS; tid++)
2412 			qdf_spinlock_destroy(&peer->rx_tid[tid].tid_lock);
2413 }
2414 
2415 #ifdef FEATURE_PERPKT_INFO
2416 /*
2417  * dp_peer_ppdu_delayed_ba_init() Initialize ppdu in peer
2418  * @peer: Datapath peer
2419  *
2420  * return: void
2421  */
2422 void dp_peer_ppdu_delayed_ba_init(struct dp_peer *peer)
2423 {
2424 	qdf_mem_zero(&peer->delayed_ba_ppdu_stats,
2425 		     sizeof(struct cdp_delayed_tx_completion_ppdu_user));
2426 	peer->last_delayed_ba = false;
2427 	peer->last_delayed_ba_ppduid = 0;
2428 }
2429 #else
2430 /*
2431  * dp_peer_ppdu_delayed_ba_init() Initialize ppdu in peer
2432  * @peer: Datapath peer
2433  *
2434  * return: void
2435  */
2436 void dp_peer_ppdu_delayed_ba_init(struct dp_peer *peer)
2437 {
2438 }
2439 #endif
2440 
2441 /*
2442  * dp_peer_cleanup() – Cleanup peer information
2443  * @vdev: Datapath vdev
2444  * @peer: Datapath peer
2445  * @reuse: Peer reference reuse
2446  *
2447  */
2448 void dp_peer_cleanup(struct dp_vdev *vdev, struct dp_peer *peer, bool reuse)
2449 {
2450 	dp_peer_tx_cleanup(vdev, peer);
2451 
2452 	/* cleanup the Rx reorder queues for this peer */
2453 	dp_peer_rx_cleanup(vdev, peer, reuse);
2454 }
2455 
2456 /* dp_teardown_256_ba_session() - Teardown sessions using 256
2457  *                                window size when a request with
2458  *                                64 window size is received.
2459  *                                This is done as a WAR since HW can
2460  *                                have only one setting per peer (64 or 256).
2461  *                                For HKv2, we use per tid buffersize setting
2462  *                                for 0 to per_tid_basize_max_tid. For tid
2463  *                                more than per_tid_basize_max_tid we use HKv1
2464  *                                method.
2465  * @peer: Datapath peer
2466  *
2467  * Return: void
2468  */
2469 static void dp_teardown_256_ba_sessions(struct dp_peer *peer)
2470 {
2471 	uint8_t delba_rcode = 0;
2472 	int tid;
2473 	struct dp_rx_tid *rx_tid = NULL;
2474 
2475 	tid = peer->vdev->pdev->soc->per_tid_basize_max_tid;
2476 	for (; tid < DP_MAX_TIDS; tid++) {
2477 		rx_tid = &peer->rx_tid[tid];
2478 		qdf_spin_lock_bh(&rx_tid->tid_lock);
2479 
2480 		if (rx_tid->ba_win_size <= 64) {
2481 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
2482 			continue;
2483 		} else {
2484 			if (rx_tid->ba_status == DP_RX_BA_ACTIVE ||
2485 			    rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
2486 				/* send delba */
2487 				if (!rx_tid->delba_tx_status) {
2488 					rx_tid->delba_tx_retry++;
2489 					rx_tid->delba_tx_status = 1;
2490 					rx_tid->delba_rcode =
2491 					IEEE80211_REASON_QOS_SETUP_REQUIRED;
2492 					delba_rcode = rx_tid->delba_rcode;
2493 
2494 					qdf_spin_unlock_bh(&rx_tid->tid_lock);
2495 					if (peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba)
2496 						peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba(
2497 							peer->vdev->pdev->soc->ctrl_psoc,
2498 							peer->vdev->vdev_id,
2499 							peer->mac_addr.raw,
2500 							tid, delba_rcode);
2501 				} else {
2502 					qdf_spin_unlock_bh(&rx_tid->tid_lock);
2503 				}
2504 			} else {
2505 				qdf_spin_unlock_bh(&rx_tid->tid_lock);
2506 			}
2507 		}
2508 	}
2509 }
2510 
2511 /*
2512 * dp_rx_addba_resp_tx_completion_wifi3() – Update Rx Tid State
2513 *
2514 * @soc: Datapath soc handle
2515 * @peer_mac: Datapath peer mac address
2516 * @vdev_id: id of atapath vdev
2517 * @tid: TID number
2518 * @status: tx completion status
2519 * Return: 0 on success, error code on failure
2520 */
2521 int dp_addba_resp_tx_completion_wifi3(struct cdp_soc_t *cdp_soc,
2522 				      uint8_t *peer_mac,
2523 				      uint16_t vdev_id,
2524 				      uint8_t tid, int status)
2525 {
2526 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
2527 						       peer_mac, 0, vdev_id);
2528 	struct dp_rx_tid *rx_tid = NULL;
2529 
2530 	if (!peer || peer->delete_in_progress) {
2531 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2532 			  "%s: Peer is NULL!\n", __func__);
2533 		goto fail;
2534 	}
2535 	rx_tid = &peer->rx_tid[tid];
2536 	qdf_spin_lock_bh(&rx_tid->tid_lock);
2537 	if (status) {
2538 		rx_tid->num_addba_rsp_failed++;
2539 		dp_rx_tid_update_wifi3(peer, tid, 1,
2540 				       IEEE80211_SEQ_MAX);
2541 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
2542 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2543 		dp_err("RxTid- %d addba rsp tx completion failed", tid);
2544 
2545 		goto success;
2546 	}
2547 
2548 	rx_tid->num_addba_rsp_success++;
2549 	if (rx_tid->ba_status == DP_RX_BA_INACTIVE) {
2550 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2551 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2552 			  "%s: Rx Tid- %d hw qdesc is not in IN_PROGRESS",
2553 			__func__, tid);
2554 		goto fail;
2555 	}
2556 
2557 	if (!qdf_atomic_read(&peer->is_default_route_set)) {
2558 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2559 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2560 			  "%s: default route is not set for peer: %pM",
2561 			  __func__, peer->mac_addr.raw);
2562 		goto fail;
2563 	}
2564 
2565 	/* First Session */
2566 	if (peer->active_ba_session_cnt == 0) {
2567 		if (rx_tid->ba_win_size > 64 && rx_tid->ba_win_size <= 256)
2568 			peer->hw_buffer_size = 256;
2569 		else
2570 			peer->hw_buffer_size = 64;
2571 	}
2572 
2573 	rx_tid->ba_status = DP_RX_BA_ACTIVE;
2574 
2575 	peer->active_ba_session_cnt++;
2576 
2577 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
2578 
2579 	/* Kill any session having 256 buffer size
2580 	 * when 64 buffer size request is received.
2581 	 * Also, latch on to 64 as new buffer size.
2582 	 */
2583 	if (peer->kill_256_sessions) {
2584 		dp_teardown_256_ba_sessions(peer);
2585 		peer->kill_256_sessions = 0;
2586 	}
2587 
2588 success:
2589 	dp_peer_unref_delete(peer);
2590 	return QDF_STATUS_SUCCESS;
2591 
2592 fail:
2593 	if (peer)
2594 		dp_peer_unref_delete(peer);
2595 
2596 	return QDF_STATUS_E_FAILURE;
2597 }
2598 
2599 /*
2600 * dp_rx_addba_responsesetup_wifi3() – Process ADDBA request from peer
2601 *
2602 * @soc: Datapath soc handle
2603 * @peer_mac: Datapath peer mac address
2604 * @vdev_id: id of atapath vdev
2605 * @tid: TID number
2606 * @dialogtoken: output dialogtoken
2607 * @statuscode: output dialogtoken
2608 * @buffersize: Output BA window size
2609 * @batimeout: Output BA timeout
2610 */
2611 QDF_STATUS
2612 dp_addba_responsesetup_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
2613 			     uint16_t vdev_id, uint8_t tid,
2614 			     uint8_t *dialogtoken, uint16_t *statuscode,
2615 			     uint16_t *buffersize, uint16_t *batimeout)
2616 {
2617 	struct dp_rx_tid *rx_tid = NULL;
2618 	QDF_STATUS status = QDF_STATUS_SUCCESS;
2619 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
2620 						       peer_mac, 0, vdev_id);
2621 
2622 	if (!peer || peer->delete_in_progress) {
2623 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2624 			  "%s: Peer is NULL!\n", __func__);
2625 		status = QDF_STATUS_E_FAILURE;
2626 		goto fail;
2627 	}
2628 	rx_tid = &peer->rx_tid[tid];
2629 	qdf_spin_lock_bh(&rx_tid->tid_lock);
2630 	rx_tid->num_of_addba_resp++;
2631 	/* setup ADDBA response parameters */
2632 	*dialogtoken = rx_tid->dialogtoken;
2633 	*statuscode = rx_tid->statuscode;
2634 	*buffersize = rx_tid->ba_win_size;
2635 	*batimeout  = 0;
2636 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
2637 
2638 fail:
2639 	if (peer)
2640 		dp_peer_unref_delete(peer);
2641 
2642 	return status;
2643 }
2644 
2645 /* dp_check_ba_buffersize() - Check buffer size in request
2646  *                            and latch onto this size based on
2647  *                            size used in first active session.
2648  * @peer: Datapath peer
2649  * @tid: Tid
2650  * @buffersize: Block ack window size
2651  *
2652  * Return: void
2653  */
2654 static void dp_check_ba_buffersize(struct dp_peer *peer,
2655 				   uint16_t tid,
2656 				   uint16_t buffersize)
2657 {
2658 	struct dp_rx_tid *rx_tid = NULL;
2659 
2660 	rx_tid = &peer->rx_tid[tid];
2661 	if (peer->vdev->pdev->soc->per_tid_basize_max_tid &&
2662 	    tid < peer->vdev->pdev->soc->per_tid_basize_max_tid) {
2663 		rx_tid->ba_win_size = buffersize;
2664 		return;
2665 	} else {
2666 		if (peer->active_ba_session_cnt == 0) {
2667 			rx_tid->ba_win_size = buffersize;
2668 		} else {
2669 			if (peer->hw_buffer_size == 64) {
2670 				if (buffersize <= 64)
2671 					rx_tid->ba_win_size = buffersize;
2672 				else
2673 					rx_tid->ba_win_size = peer->hw_buffer_size;
2674 			} else if (peer->hw_buffer_size == 256) {
2675 				if (buffersize > 64) {
2676 					rx_tid->ba_win_size = buffersize;
2677 				} else {
2678 					rx_tid->ba_win_size = buffersize;
2679 					peer->hw_buffer_size = 64;
2680 					peer->kill_256_sessions = 1;
2681 				}
2682 			}
2683 		}
2684 	}
2685 }
2686 
2687 /*
2688  * dp_addba_requestprocess_wifi3() - Process ADDBA request from peer
2689  *
2690  * @soc: Datapath soc handle
2691  * @peer_mac: Datapath peer mac address
2692  * @vdev_id: id of atapath vdev
2693  * @dialogtoken: dialogtoken from ADDBA frame
2694  * @tid: TID number
2695  * @batimeout: BA timeout
2696  * @buffersize: BA window size
2697  * @startseqnum: Start seq. number received in BA sequence control
2698  *
2699  * Return: 0 on success, error code on failure
2700  */
2701 int dp_addba_requestprocess_wifi3(struct cdp_soc_t *cdp_soc,
2702 				  uint8_t *peer_mac,
2703 				  uint16_t vdev_id,
2704 				  uint8_t dialogtoken,
2705 				  uint16_t tid, uint16_t batimeout,
2706 				  uint16_t buffersize,
2707 				  uint16_t startseqnum)
2708 {
2709 	QDF_STATUS status = QDF_STATUS_SUCCESS;
2710 	struct dp_rx_tid *rx_tid = NULL;
2711 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
2712 						       peer_mac, 0, vdev_id);
2713 
2714 	if (!peer || peer->delete_in_progress) {
2715 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2716 			  "%s: Peer is NULL!\n", __func__);
2717 		status = QDF_STATUS_E_FAILURE;
2718 		goto fail;
2719 	}
2720 	rx_tid = &peer->rx_tid[tid];
2721 	qdf_spin_lock_bh(&rx_tid->tid_lock);
2722 	rx_tid->num_of_addba_req++;
2723 	if ((rx_tid->ba_status == DP_RX_BA_ACTIVE &&
2724 	     rx_tid->hw_qdesc_vaddr_unaligned)) {
2725 		dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX);
2726 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
2727 		peer->active_ba_session_cnt--;
2728 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2729 			  "%s: Rx Tid- %d hw qdesc is already setup",
2730 			__func__, tid);
2731 	}
2732 
2733 	if (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
2734 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2735 		status = QDF_STATUS_E_FAILURE;
2736 		goto fail;
2737 	}
2738 	dp_check_ba_buffersize(peer, tid, buffersize);
2739 
2740 	if (dp_rx_tid_setup_wifi3(peer, tid,
2741 	    rx_tid->ba_win_size, startseqnum)) {
2742 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
2743 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2744 		status = QDF_STATUS_E_FAILURE;
2745 		goto fail;
2746 	}
2747 	rx_tid->ba_status = DP_RX_BA_IN_PROGRESS;
2748 
2749 	rx_tid->dialogtoken = dialogtoken;
2750 	rx_tid->startseqnum = startseqnum;
2751 
2752 	if (rx_tid->userstatuscode != IEEE80211_STATUS_SUCCESS)
2753 		rx_tid->statuscode = rx_tid->userstatuscode;
2754 	else
2755 		rx_tid->statuscode = IEEE80211_STATUS_SUCCESS;
2756 
2757 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
2758 
2759 fail:
2760 	if (peer)
2761 		dp_peer_unref_delete(peer);
2762 
2763 	return status;
2764 }
2765 
2766 /*
2767 * dp_set_addba_response() – Set a user defined ADDBA response status code
2768 *
2769 * @soc: Datapath soc handle
2770 * @peer_mac: Datapath peer mac address
2771 * @vdev_id: id of atapath vdev
2772 * @tid: TID number
2773 * @statuscode: response status code to be set
2774 */
2775 QDF_STATUS
2776 dp_set_addba_response(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
2777 		      uint16_t vdev_id, uint8_t tid, uint16_t statuscode)
2778 {
2779 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
2780 						       peer_mac, 0, vdev_id);
2781 	struct dp_rx_tid *rx_tid;
2782 	QDF_STATUS status = QDF_STATUS_SUCCESS;
2783 
2784 	if (!peer || peer->delete_in_progress) {
2785 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2786 			  "%s: Peer is NULL!\n", __func__);
2787 		status = QDF_STATUS_E_FAILURE;
2788 		goto fail;
2789 	}
2790 
2791 	rx_tid = &peer->rx_tid[tid];
2792 	qdf_spin_lock_bh(&rx_tid->tid_lock);
2793 	rx_tid->userstatuscode = statuscode;
2794 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
2795 fail:
2796 	if (peer)
2797 		dp_peer_unref_delete(peer);
2798 
2799 	return status;
2800 }
2801 
2802 /*
2803 * dp_rx_delba_process_wifi3() – Process DELBA from peer
2804 * @soc: Datapath soc handle
2805 * @peer_mac: Datapath peer mac address
2806 * @vdev_id: id of atapath vdev
2807 * @tid: TID number
2808 * @reasoncode: Reason code received in DELBA frame
2809 *
2810 * Return: 0 on success, error code on failure
2811 */
2812 int dp_delba_process_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
2813 			   uint16_t vdev_id, int tid, uint16_t reasoncode)
2814 {
2815 	QDF_STATUS status = QDF_STATUS_SUCCESS;
2816 	struct dp_rx_tid *rx_tid;
2817 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
2818 						      peer_mac, 0, vdev_id);
2819 
2820 	if (!peer || peer->delete_in_progress) {
2821 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2822 			  "%s: Peer is NULL!\n", __func__);
2823 		status = QDF_STATUS_E_FAILURE;
2824 		goto fail;
2825 	}
2826 	rx_tid = &peer->rx_tid[tid];
2827 	qdf_spin_lock_bh(&rx_tid->tid_lock);
2828 	if (rx_tid->ba_status == DP_RX_BA_INACTIVE ||
2829 	    rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
2830 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2831 		status = QDF_STATUS_E_FAILURE;
2832 		goto fail;
2833 	}
2834 	/* TODO: See if we can delete the existing REO queue descriptor and
2835 	 * replace with a new one without queue extenstion descript to save
2836 	 * memory
2837 	 */
2838 	rx_tid->delba_rcode = reasoncode;
2839 	rx_tid->num_of_delba_req++;
2840 	dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX);
2841 
2842 	rx_tid->ba_status = DP_RX_BA_INACTIVE;
2843 	peer->active_ba_session_cnt--;
2844 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
2845 fail:
2846 	if (peer)
2847 		dp_peer_unref_delete(peer);
2848 
2849 	return status;
2850 }
2851 
2852 /*
2853  * dp_rx_delba_tx_completion_wifi3() – Send Delba Request
2854  *
2855  * @soc: Datapath soc handle
2856  * @peer_mac: Datapath peer mac address
2857  * @vdev_id: id of atapath vdev
2858  * @tid: TID number
2859  * @status: tx completion status
2860  * Return: 0 on success, error code on failure
2861  */
2862 
2863 int dp_delba_tx_completion_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
2864 				 uint16_t vdev_id,
2865 				 uint8_t tid, int status)
2866 {
2867 	QDF_STATUS ret = QDF_STATUS_SUCCESS;
2868 	struct dp_rx_tid *rx_tid = NULL;
2869 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
2870 						      peer_mac, 0, vdev_id);
2871 
2872 	if (!peer || peer->delete_in_progress) {
2873 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2874 			  "%s: Peer is NULL!", __func__);
2875 		ret = QDF_STATUS_E_FAILURE;
2876 		goto end;
2877 	}
2878 	rx_tid = &peer->rx_tid[tid];
2879 	qdf_spin_lock_bh(&rx_tid->tid_lock);
2880 	if (status) {
2881 		rx_tid->delba_tx_fail_cnt++;
2882 		if (rx_tid->delba_tx_retry >= DP_MAX_DELBA_RETRY) {
2883 			rx_tid->delba_tx_retry = 0;
2884 			rx_tid->delba_tx_status = 0;
2885 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
2886 		} else {
2887 			rx_tid->delba_tx_retry++;
2888 			rx_tid->delba_tx_status = 1;
2889 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
2890 			if (peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba)
2891 				peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba(
2892 					peer->vdev->pdev->soc->ctrl_psoc,
2893 					peer->vdev->vdev_id,
2894 					peer->mac_addr.raw, tid,
2895 					rx_tid->delba_rcode);
2896 		}
2897 		goto end;
2898 	} else {
2899 		rx_tid->delba_tx_success_cnt++;
2900 		rx_tid->delba_tx_retry = 0;
2901 		rx_tid->delba_tx_status = 0;
2902 	}
2903 	if (rx_tid->ba_status == DP_RX_BA_ACTIVE) {
2904 		dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX);
2905 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
2906 		peer->active_ba_session_cnt--;
2907 	}
2908 	if (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
2909 		dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX);
2910 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
2911 	}
2912 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
2913 
2914 end:
2915 	if (peer)
2916 		dp_peer_unref_delete(peer);
2917 
2918 	return ret;
2919 }
2920 
2921 /**
2922  * dp_set_pn_check_wifi3() - enable PN check in REO for security
2923  * @soc: Datapath soc handle
2924  * @peer_mac: Datapath peer mac address
2925  * @vdev_id: id of atapath vdev
2926  * @vdev: Datapath vdev
2927  * @pdev - data path device instance
2928  * @sec_type - security type
2929  * @rx_pn - Receive pn starting number
2930  *
2931  */
2932 
2933 QDF_STATUS
2934 dp_set_pn_check_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id,
2935 		      uint8_t *peer_mac, enum cdp_sec_type sec_type,
2936 		      uint32_t *rx_pn)
2937 {
2938 	struct dp_pdev *pdev;
2939 	int i;
2940 	uint8_t pn_size;
2941 	struct hal_reo_cmd_params params;
2942 	QDF_STATUS status = QDF_STATUS_SUCCESS;
2943 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
2944 				peer_mac, 0, vdev_id);
2945 	struct dp_vdev *vdev =
2946 		dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
2947 						   vdev_id);
2948 
2949 	if (!vdev || !peer || peer->delete_in_progress) {
2950 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2951 			  "%s: Peer is NULL!\n", __func__);
2952 		status = QDF_STATUS_E_FAILURE;
2953 		goto fail;
2954 	}
2955 
2956 	pdev = vdev->pdev;
2957 	qdf_mem_zero(&params, sizeof(params));
2958 
2959 	params.std.need_status = 1;
2960 	params.u.upd_queue_params.update_pn_valid = 1;
2961 	params.u.upd_queue_params.update_pn_size = 1;
2962 	params.u.upd_queue_params.update_pn = 1;
2963 	params.u.upd_queue_params.update_pn_check_needed = 1;
2964 	params.u.upd_queue_params.update_svld = 1;
2965 	params.u.upd_queue_params.svld = 0;
2966 
2967 	peer->security[dp_sec_ucast].sec_type = sec_type;
2968 
2969 	switch (sec_type) {
2970 	case cdp_sec_type_tkip_nomic:
2971 	case cdp_sec_type_aes_ccmp:
2972 	case cdp_sec_type_aes_ccmp_256:
2973 	case cdp_sec_type_aes_gcmp:
2974 	case cdp_sec_type_aes_gcmp_256:
2975 		params.u.upd_queue_params.pn_check_needed = 1;
2976 		params.u.upd_queue_params.pn_size = 48;
2977 		pn_size = 48;
2978 		break;
2979 	case cdp_sec_type_wapi:
2980 		params.u.upd_queue_params.pn_check_needed = 1;
2981 		params.u.upd_queue_params.pn_size = 128;
2982 		pn_size = 128;
2983 		if (vdev->opmode == wlan_op_mode_ap) {
2984 			params.u.upd_queue_params.pn_even = 1;
2985 			params.u.upd_queue_params.update_pn_even = 1;
2986 		} else {
2987 			params.u.upd_queue_params.pn_uneven = 1;
2988 			params.u.upd_queue_params.update_pn_uneven = 1;
2989 		}
2990 		break;
2991 	default:
2992 		params.u.upd_queue_params.pn_check_needed = 0;
2993 		pn_size = 0;
2994 		break;
2995 	}
2996 
2997 
2998 	for (i = 0; i < DP_MAX_TIDS; i++) {
2999 		struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
3000 		qdf_spin_lock_bh(&rx_tid->tid_lock);
3001 		if (rx_tid->hw_qdesc_vaddr_unaligned) {
3002 			params.std.addr_lo =
3003 				rx_tid->hw_qdesc_paddr & 0xffffffff;
3004 			params.std.addr_hi =
3005 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
3006 
3007 			if (pn_size) {
3008 				QDF_TRACE(QDF_MODULE_ID_DP,
3009 					  QDF_TRACE_LEVEL_INFO_HIGH,
3010 					  "%s PN set for TID:%d pn:%x:%x:%x:%x",
3011 					  __func__, i, rx_pn[3], rx_pn[2],
3012 					  rx_pn[1], rx_pn[0]);
3013 				params.u.upd_queue_params.update_pn_valid = 1;
3014 				params.u.upd_queue_params.pn_31_0 = rx_pn[0];
3015 				params.u.upd_queue_params.pn_63_32 = rx_pn[1];
3016 				params.u.upd_queue_params.pn_95_64 = rx_pn[2];
3017 				params.u.upd_queue_params.pn_127_96 = rx_pn[3];
3018 			}
3019 			rx_tid->pn_size = pn_size;
3020 			dp_reo_send_cmd((struct dp_soc *)soc,
3021 					CMD_UPDATE_RX_REO_QUEUE, &params,
3022 					dp_rx_tid_update_cb, rx_tid);
3023 		} else {
3024 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3025 				  "PN Check not setup for TID :%d ", i);
3026 		}
3027 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
3028 	}
3029 fail:
3030 	if (peer)
3031 		dp_peer_unref_delete(peer);
3032 
3033 	return status;
3034 }
3035 
3036 
3037 void
3038 dp_rx_sec_ind_handler(struct dp_soc *soc, uint16_t peer_id,
3039 		      enum cdp_sec_type sec_type, int is_unicast,
3040 		      u_int32_t *michael_key,
3041 		      u_int32_t *rx_pn)
3042 {
3043 	struct dp_peer *peer;
3044 	int sec_index;
3045 
3046 	peer = dp_peer_find_by_id(soc, peer_id);
3047 	if (!peer) {
3048 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3049 			  "Couldn't find peer from ID %d - skipping security inits",
3050 			  peer_id);
3051 		return;
3052 	}
3053 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3054 		  "sec spec for peer %pK (%02x:%02x:%02x:%02x:%02x:%02x): %s key of type %d",
3055 		  peer,
3056 		  peer->mac_addr.raw[0], peer->mac_addr.raw[1],
3057 		  peer->mac_addr.raw[2], peer->mac_addr.raw[3],
3058 		  peer->mac_addr.raw[4], peer->mac_addr.raw[5],
3059 		  is_unicast ? "ucast" : "mcast",
3060 		  sec_type);
3061 	sec_index = is_unicast ? dp_sec_ucast : dp_sec_mcast;
3062 	peer->security[sec_index].sec_type = sec_type;
3063 #ifdef notyet /* TODO: See if this is required for defrag support */
3064 	/* michael key only valid for TKIP, but for simplicity,
3065 	 * copy it anyway
3066 	 */
3067 	qdf_mem_copy(
3068 		&peer->security[sec_index].michael_key[0],
3069 		michael_key,
3070 		sizeof(peer->security[sec_index].michael_key));
3071 #ifdef BIG_ENDIAN_HOST
3072 	OL_IF_SWAPBO(peer->security[sec_index].michael_key[0],
3073 				 sizeof(peer->security[sec_index].michael_key));
3074 #endif /* BIG_ENDIAN_HOST */
3075 #endif
3076 
3077 #ifdef notyet /* TODO: Check if this is required for wifi3.0 */
3078 	if (sec_type != cdp_sec_type_wapi) {
3079 		qdf_mem_zero(peer->tids_last_pn_valid, _EXT_TIDS);
3080 	} else {
3081 		for (i = 0; i < DP_MAX_TIDS; i++) {
3082 			/*
3083 			 * Setting PN valid bit for WAPI sec_type,
3084 			 * since WAPI PN has to be started with predefined value
3085 			 */
3086 			peer->tids_last_pn_valid[i] = 1;
3087 			qdf_mem_copy(
3088 				(u_int8_t *) &peer->tids_last_pn[i],
3089 				(u_int8_t *) rx_pn, sizeof(union htt_rx_pn_t));
3090 			peer->tids_last_pn[i].pn128[1] =
3091 				qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[1]);
3092 			peer->tids_last_pn[i].pn128[0] =
3093 				qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[0]);
3094 		}
3095 	}
3096 #endif
3097 	/* TODO: Update HW TID queue with PN check parameters (pn type for
3098 	 * all security types and last pn for WAPI) once REO command API
3099 	 * is available
3100 	 */
3101 
3102 	dp_peer_unref_del_find_by_id(peer);
3103 }
3104 
3105 #ifdef DP_PEER_EXTENDED_API
3106 /**
3107  * dp_register_peer() - Register peer into physical device
3108  * @pdev - data path device instance
3109  * @sta_desc - peer description
3110  *
3111  * Register peer into physical device
3112  *
3113  * Return: QDF_STATUS_SUCCESS registration success
3114  *         QDF_STATUS_E_FAULT peer not found
3115  */
3116 QDF_STATUS dp_register_peer(struct cdp_pdev *pdev_handle,
3117 		struct ol_txrx_desc_type *sta_desc)
3118 {
3119 	struct dp_peer *peer;
3120 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3121 
3122 	peer = dp_find_peer_by_addr((struct cdp_pdev *)pdev,
3123 				    sta_desc->peer_addr.bytes);
3124 
3125 	if (!peer)
3126 		return QDF_STATUS_E_FAULT;
3127 
3128 	qdf_spin_lock_bh(&peer->peer_info_lock);
3129 	peer->state = OL_TXRX_PEER_STATE_CONN;
3130 	qdf_spin_unlock_bh(&peer->peer_info_lock);
3131 
3132 	dp_rx_flush_rx_cached(peer, false);
3133 
3134 	return QDF_STATUS_SUCCESS;
3135 }
3136 
3137 /**
3138  * dp_clear_peer() - remove peer from physical device
3139  * @pdev - data path device instance
3140  * @peer_addr - peer mac address
3141  *
3142  * remove peer from physical device
3143  *
3144  * Return: QDF_STATUS_SUCCESS registration success
3145  *         QDF_STATUS_E_FAULT peer not found
3146  */
3147 QDF_STATUS
3148 dp_clear_peer(struct cdp_pdev *pdev_handle, struct qdf_mac_addr peer_addr)
3149 {
3150 	struct dp_peer *peer;
3151 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3152 
3153 	peer = dp_find_peer_by_addr((struct cdp_pdev *)pdev, peer_addr.bytes);
3154 	if (!peer)
3155 		return QDF_STATUS_E_FAULT;
3156 
3157 	qdf_spin_lock_bh(&peer->peer_info_lock);
3158 	peer->state = OL_TXRX_PEER_STATE_DISC;
3159 	qdf_spin_unlock_bh(&peer->peer_info_lock);
3160 
3161 	dp_rx_flush_rx_cached(peer, true);
3162 
3163 	return QDF_STATUS_SUCCESS;
3164 }
3165 
3166 /**
3167  * dp_find_peer_by_addr_and_vdev() - Find peer by peer mac address within vdev
3168  * @pdev - data path device instance
3169  * @vdev - virtual interface instance
3170  * @peer_addr - peer mac address
3171  *
3172  * Find peer by peer mac address within vdev
3173  *
3174  * Return: peer instance void pointer
3175  *         NULL cannot find target peer
3176  */
3177 void *dp_find_peer_by_addr_and_vdev(struct cdp_pdev *pdev_handle,
3178 		struct cdp_vdev *vdev_handle,
3179 		uint8_t *peer_addr)
3180 {
3181 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3182 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3183 	struct dp_peer *peer;
3184 
3185 	peer = dp_peer_find_hash_find(pdev->soc, peer_addr, 0, DP_VDEV_ALL);
3186 
3187 	if (!peer)
3188 		return NULL;
3189 
3190 	if (peer->vdev != vdev) {
3191 		dp_peer_unref_delete(peer);
3192 		return NULL;
3193 	}
3194 
3195 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
3196 	 * Decrement it here.
3197 	 */
3198 	dp_peer_unref_delete(peer);
3199 
3200 	return peer;
3201 }
3202 
3203 /**
3204  * dp_peer_state_update() - update peer local state
3205  * @pdev - data path device instance
3206  * @peer_addr - peer mac address
3207  * @state - new peer local state
3208  *
3209  * update peer local state
3210  *
3211  * Return: QDF_STATUS_SUCCESS registration success
3212  */
3213 QDF_STATUS dp_peer_state_update(struct cdp_pdev *pdev_handle, uint8_t *peer_mac,
3214 		enum ol_txrx_peer_state state)
3215 {
3216 	struct dp_peer *peer;
3217 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3218 
3219 	peer =  dp_peer_find_hash_find(pdev->soc, peer_mac, 0, DP_VDEV_ALL);
3220 	if (!peer) {
3221 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3222 			  "Failed to find peer for: [%pM]", peer_mac);
3223 		return QDF_STATUS_E_FAILURE;
3224 	}
3225 	peer->state = state;
3226 
3227 	DP_TRACE(INFO, "peer %pK state %d", peer, peer->state);
3228 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
3229 	 * Decrement it here.
3230 	 */
3231 	dp_peer_unref_delete(peer);
3232 
3233 	return QDF_STATUS_SUCCESS;
3234 }
3235 
3236 /**
3237  * dp_get_vdevid() - Get virtual interface id which peer registered
3238  * @peer - peer instance
3239  * @vdev_id - virtual interface id which peer registered
3240  *
3241  * Get virtual interface id which peer registered
3242  *
3243  * Return: QDF_STATUS_SUCCESS registration success
3244  */
3245 QDF_STATUS dp_get_vdevid(void *peer_handle, uint8_t *vdev_id)
3246 {
3247 	struct dp_peer *peer = peer_handle;
3248 
3249 	dp_info("peer %pK vdev %pK vdev id %d",
3250 		peer, peer->vdev, peer->vdev->vdev_id);
3251 	*vdev_id = peer->vdev->vdev_id;
3252 	return QDF_STATUS_SUCCESS;
3253 }
3254 
3255 struct cdp_vdev *
3256 dp_get_vdev_by_peer_addr(struct cdp_pdev *pdev_handle,
3257 			 struct qdf_mac_addr peer_addr)
3258 {
3259 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3260 	struct dp_peer *peer = NULL;
3261 
3262 	if (!pdev) {
3263 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3264 			  "PDEV not found for peer_addr: " QDF_MAC_ADDR_STR,
3265 			  QDF_MAC_ADDR_ARRAY(peer_addr.bytes));
3266 		return NULL;
3267 	}
3268 
3269 	peer = dp_find_peer_by_addr((struct cdp_pdev *)pdev, peer_addr.bytes);
3270 	if (!peer) {
3271 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
3272 			  "PDEV not found for peer_addr:" QDF_MAC_ADDR_STR,
3273 			  QDF_MAC_ADDR_ARRAY(peer_addr.bytes));
3274 		return NULL;
3275 	}
3276 
3277 	return (struct cdp_vdev *)peer->vdev;
3278 }
3279 
3280 /**
3281  * dp_get_vdev_for_peer() - Get virtual interface instance which peer belongs
3282  * @peer - peer instance
3283  *
3284  * Get virtual interface instance which peer belongs
3285  *
3286  * Return: virtual interface instance pointer
3287  *         NULL in case cannot find
3288  */
3289 struct cdp_vdev *dp_get_vdev_for_peer(void *peer_handle)
3290 {
3291 	struct dp_peer *peer = peer_handle;
3292 
3293 	DP_TRACE(DEBUG, "peer %pK vdev %pK", peer, peer->vdev);
3294 	return (struct cdp_vdev *)peer->vdev;
3295 }
3296 
3297 /**
3298  * dp_peer_get_peer_mac_addr() - Get peer mac address
3299  * @peer - peer instance
3300  *
3301  * Get peer mac address
3302  *
3303  * Return: peer mac address pointer
3304  *         NULL in case cannot find
3305  */
3306 uint8_t *dp_peer_get_peer_mac_addr(void *peer_handle)
3307 {
3308 	struct dp_peer *peer = peer_handle;
3309 	uint8_t *mac;
3310 
3311 	mac = peer->mac_addr.raw;
3312 	DP_TRACE(INFO, "peer %pK mac 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
3313 		peer, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
3314 	return peer->mac_addr.raw;
3315 }
3316 
3317 /**
3318  * dp_get_peer_state() - Get local peer state
3319  * @peer - peer instance
3320  *
3321  * Get local peer state
3322  *
3323  * Return: peer status
3324  */
3325 int dp_get_peer_state(void *peer_handle)
3326 {
3327 	struct dp_peer *peer = peer_handle;
3328 
3329 	DP_TRACE(DEBUG, "peer %pK stats %d", peer, peer->state);
3330 	return peer->state;
3331 }
3332 
3333 /**
3334  * dp_local_peer_id_pool_init() - local peer id pool alloc for physical device
3335  * @pdev - data path device instance
3336  *
3337  * local peer id pool alloc for physical device
3338  *
3339  * Return: none
3340  */
3341 void dp_local_peer_id_pool_init(struct dp_pdev *pdev)
3342 {
3343 	int i;
3344 
3345 	/* point the freelist to the first ID */
3346 	pdev->local_peer_ids.freelist = 0;
3347 
3348 	/* link each ID to the next one */
3349 	for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) {
3350 		pdev->local_peer_ids.pool[i] = i + 1;
3351 		pdev->local_peer_ids.map[i] = NULL;
3352 	}
3353 
3354 	/* link the last ID to itself, to mark the end of the list */
3355 	i = OL_TXRX_NUM_LOCAL_PEER_IDS;
3356 	pdev->local_peer_ids.pool[i] = i;
3357 
3358 	qdf_spinlock_create(&pdev->local_peer_ids.lock);
3359 	DP_TRACE(INFO, "Peer pool init");
3360 }
3361 
3362 /**
3363  * dp_local_peer_id_alloc() - allocate local peer id
3364  * @pdev - data path device instance
3365  * @peer - new peer instance
3366  *
3367  * allocate local peer id
3368  *
3369  * Return: none
3370  */
3371 void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer)
3372 {
3373 	int i;
3374 
3375 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
3376 	i = pdev->local_peer_ids.freelist;
3377 	if (pdev->local_peer_ids.pool[i] == i) {
3378 		/* the list is empty, except for the list-end marker */
3379 		peer->local_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
3380 	} else {
3381 		/* take the head ID and advance the freelist */
3382 		peer->local_id = i;
3383 		pdev->local_peer_ids.freelist = pdev->local_peer_ids.pool[i];
3384 		pdev->local_peer_ids.map[i] = peer;
3385 	}
3386 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
3387 	DP_TRACE(INFO, "peer %pK, local id %d", peer, peer->local_id);
3388 }
3389 
3390 /**
3391  * dp_local_peer_id_free() - remove local peer id
3392  * @pdev - data path device instance
3393  * @peer - peer instance should be removed
3394  *
3395  * remove local peer id
3396  *
3397  * Return: none
3398  */
3399 void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer)
3400 {
3401 	int i = peer->local_id;
3402 	if ((i == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
3403 	    (i >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
3404 		return;
3405 	}
3406 
3407 	/* put this ID on the head of the freelist */
3408 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
3409 	pdev->local_peer_ids.pool[i] = pdev->local_peer_ids.freelist;
3410 	pdev->local_peer_ids.freelist = i;
3411 	pdev->local_peer_ids.map[i] = NULL;
3412 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
3413 }
3414 #endif
3415 
3416 /**
3417  * dp_get_peer_mac_addr_frm_id(): get mac address of the peer
3418  * @soc_handle: DP SOC handle
3419  * @peer_id:peer_id of the peer
3420  *
3421  * return: vdev_id of the vap
3422  */
3423 uint8_t dp_get_peer_mac_addr_frm_id(struct cdp_soc_t *soc_handle,
3424 		uint16_t peer_id, uint8_t *peer_mac)
3425 {
3426 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
3427 	struct dp_peer *peer;
3428 	uint8_t vdev_id;
3429 
3430 	peer = dp_peer_find_by_id(soc, peer_id);
3431 
3432 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
3433 		  "soc %pK peer_id %d", soc, peer_id);
3434 
3435 	if (!peer) {
3436 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3437 			  "peer not found ");
3438 		return CDP_INVALID_VDEV_ID;
3439 	}
3440 
3441 	qdf_mem_copy(peer_mac, peer->mac_addr.raw, 6);
3442 	vdev_id = peer->vdev->vdev_id;
3443 
3444 	dp_peer_unref_del_find_by_id(peer);
3445 
3446 	return vdev_id;
3447 }
3448 
3449 /**
3450  * dp_peer_rxtid_stats: Retried Rx TID (REO queue) stats from HW
3451  * @peer: DP peer handle
3452  * @dp_stats_cmd_cb: REO command callback function
3453  * @cb_ctxt: Callback context
3454  *
3455  * Return: none
3456  */
3457 void dp_peer_rxtid_stats(struct dp_peer *peer, void (*dp_stats_cmd_cb),
3458 			void *cb_ctxt)
3459 {
3460 	struct dp_soc *soc = peer->vdev->pdev->soc;
3461 	struct hal_reo_cmd_params params;
3462 	int i;
3463 
3464 	if (!dp_stats_cmd_cb)
3465 		return;
3466 
3467 	qdf_mem_zero(&params, sizeof(params));
3468 	for (i = 0; i < DP_MAX_TIDS; i++) {
3469 		struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
3470 		if (rx_tid->hw_qdesc_vaddr_unaligned) {
3471 			params.std.need_status = 1;
3472 			params.std.addr_lo =
3473 				rx_tid->hw_qdesc_paddr & 0xffffffff;
3474 			params.std.addr_hi =
3475 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
3476 
3477 			if (cb_ctxt) {
3478 				dp_reo_send_cmd(soc, CMD_GET_QUEUE_STATS,
3479 					&params, dp_stats_cmd_cb, cb_ctxt);
3480 			} else {
3481 				dp_reo_send_cmd(soc, CMD_GET_QUEUE_STATS,
3482 					&params, dp_stats_cmd_cb, rx_tid);
3483 			}
3484 
3485 			/* Flush REO descriptor from HW cache to update stats
3486 			 * in descriptor memory. This is to help debugging */
3487 			qdf_mem_zero(&params, sizeof(params));
3488 			params.std.need_status = 0;
3489 			params.std.addr_lo =
3490 				rx_tid->hw_qdesc_paddr & 0xffffffff;
3491 			params.std.addr_hi =
3492 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
3493 			params.u.fl_cache_params.flush_no_inval = 1;
3494 			dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, &params, NULL,
3495 				NULL);
3496 		}
3497 	}
3498 }
3499 
3500 void dp_set_michael_key(struct cdp_peer *peer_handle,
3501 			bool is_unicast, uint32_t *key)
3502 {
3503 	struct dp_peer *peer =  (struct dp_peer *)peer_handle;
3504 	uint8_t sec_index = is_unicast ? 1 : 0;
3505 
3506 	if (!peer) {
3507 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3508 			  "peer not found ");
3509 		return;
3510 	}
3511 
3512 	qdf_mem_copy(&peer->security[sec_index].michael_key[0],
3513 		     key, IEEE80211_WEP_MICLEN);
3514 }
3515 
3516 bool dp_peer_find_by_id_valid(struct dp_soc *soc, uint16_t peer_id)
3517 {
3518 	struct dp_peer *peer = dp_peer_find_by_id(soc, peer_id);
3519 
3520 	if (peer) {
3521 		/*
3522 		 * Decrement the peer ref which is taken as part of
3523 		 * dp_peer_find_by_id if PEER_LOCK_REF_PROTECT is enabled
3524 		 */
3525 		dp_peer_unref_del_find_by_id(peer);
3526 
3527 		return true;
3528 	}
3529 
3530 	return false;
3531 }
3532