xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_peer.c (revision a86b23ee68a2491aede2e03991f3fb37046f4e41)
1 /*
2  * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <qdf_types.h>
20 #include <qdf_lock.h>
21 #include <hal_hw_headers.h>
22 #include "dp_htt.h"
23 #include "dp_types.h"
24 #include "dp_internal.h"
25 #include "dp_peer.h"
26 #include "dp_rx_defrag.h"
27 #include "dp_rx.h"
28 #include <hal_api.h>
29 #include <hal_reo.h>
30 #include <cdp_txrx_handle.h>
31 #include <wlan_cfg.h>
32 
33 #ifdef WLAN_TX_PKT_CAPTURE_ENH
34 #include "dp_tx_capture.h"
35 #endif
36 
37 #ifdef FEATURE_WDS
38 static inline bool
39 dp_peer_ast_free_in_unmap_supported(struct dp_peer *peer,
40 				    struct dp_ast_entry *ast_entry)
41 {
42 	/* if peer map v2 is enabled we are not freeing ast entry
43 	 * here and it is supposed to be freed in unmap event (after
44 	 * we receive delete confirmation from target)
45 	 *
46 	 * if peer_id is invalid we did not get the peer map event
47 	 * for the peer free ast entry from here only in this case
48 	 */
49 
50 	if ((ast_entry->type != CDP_TXRX_AST_TYPE_WDS_HM_SEC) &&
51 	    (ast_entry->type != CDP_TXRX_AST_TYPE_SELF))
52 		return true;
53 
54 	return false;
55 }
56 #else
57 static inline bool
58 dp_peer_ast_free_in_unmap_supported(struct dp_peer *peer,
59 				    struct dp_ast_entry *ast_entry)
60 {
61 	return false;
62 }
63 #endif
64 
65 static inline void
66 dp_set_ssn_valid_flag(struct hal_reo_cmd_params *params,
67 					uint8_t valid)
68 {
69 	params->u.upd_queue_params.update_svld = 1;
70 	params->u.upd_queue_params.svld = valid;
71 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
72 		  "%s: Setting SSN valid bit to %d",
73 		  __func__, valid);
74 }
75 
76 static inline int dp_peer_find_mac_addr_cmp(
77 	union dp_align_mac_addr *mac_addr1,
78 	union dp_align_mac_addr *mac_addr2)
79 {
80 		/*
81 		 * Intentionally use & rather than &&.
82 		 * because the operands are binary rather than generic boolean,
83 		 * the functionality is equivalent.
84 		 * Using && has the advantage of short-circuited evaluation,
85 		 * but using & has the advantage of no conditional branching,
86 		 * which is a more significant benefit.
87 		 */
88 	return !((mac_addr1->align4.bytes_abcd == mac_addr2->align4.bytes_abcd)
89 		 & (mac_addr1->align4.bytes_ef == mac_addr2->align4.bytes_ef));
90 }
91 
92 static int dp_peer_ast_table_attach(struct dp_soc *soc)
93 {
94 	uint32_t max_ast_index;
95 
96 	max_ast_index = wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx);
97 	/* allocate ast_table for ast entry to ast_index map */
98 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
99 		  "\n<=== cfg max ast idx %d ====>", max_ast_index);
100 	soc->ast_table = qdf_mem_malloc(max_ast_index *
101 					sizeof(struct dp_ast_entry *));
102 	if (!soc->ast_table) {
103 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
104 			  "%s: ast_table memory allocation failed", __func__);
105 		return QDF_STATUS_E_NOMEM;
106 	}
107 	return 0; /* success */
108 }
109 
110 static int dp_peer_find_map_attach(struct dp_soc *soc)
111 {
112 	uint32_t max_peers, peer_map_size;
113 
114 	max_peers = soc->max_peers;
115 	/* allocate the peer ID -> peer object map */
116 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
117 		  "\n<=== cfg max peer id %d ====>", max_peers);
118 	peer_map_size = max_peers * sizeof(soc->peer_id_to_obj_map[0]);
119 	soc->peer_id_to_obj_map = qdf_mem_malloc(peer_map_size);
120 	if (!soc->peer_id_to_obj_map) {
121 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
122 			  "%s: peer map memory allocation failed", __func__);
123 		return QDF_STATUS_E_NOMEM;
124 	}
125 
126 	/*
127 	 * The peer_id_to_obj_map doesn't really need to be initialized,
128 	 * since elements are only used after they have been individually
129 	 * initialized.
130 	 * However, it is convenient for debugging to have all elements
131 	 * that are not in use set to 0.
132 	 */
133 	qdf_mem_zero(soc->peer_id_to_obj_map, peer_map_size);
134 	return 0; /* success */
135 }
136 
137 static int dp_log2_ceil(unsigned int value)
138 {
139 	unsigned int tmp = value;
140 	int log2 = -1;
141 
142 	while (tmp) {
143 		log2++;
144 		tmp >>= 1;
145 	}
146 	if (1 << log2 != value)
147 		log2++;
148 	return log2;
149 }
150 
151 static int dp_peer_find_add_id_to_obj(
152 	struct dp_peer *peer,
153 	uint16_t peer_id)
154 {
155 
156 	if (peer->peer_id == HTT_INVALID_PEER) {
157 		peer->peer_id = peer_id;
158 		return 0; /* success */
159 	}
160 	return QDF_STATUS_E_FAILURE; /* failure */
161 }
162 
163 #define DP_PEER_HASH_LOAD_MULT  2
164 #define DP_PEER_HASH_LOAD_SHIFT 0
165 
166 #define DP_AST_HASH_LOAD_MULT  2
167 #define DP_AST_HASH_LOAD_SHIFT 0
168 
169 static int dp_peer_find_hash_attach(struct dp_soc *soc)
170 {
171 	int i, hash_elems, log2;
172 
173 	/* allocate the peer MAC address -> peer object hash table */
174 	hash_elems = soc->max_peers;
175 	hash_elems *= DP_PEER_HASH_LOAD_MULT;
176 	hash_elems >>= DP_PEER_HASH_LOAD_SHIFT;
177 	log2 = dp_log2_ceil(hash_elems);
178 	hash_elems = 1 << log2;
179 
180 	soc->peer_hash.mask = hash_elems - 1;
181 	soc->peer_hash.idx_bits = log2;
182 	/* allocate an array of TAILQ peer object lists */
183 	soc->peer_hash.bins = qdf_mem_malloc(
184 		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q, dp_peer)));
185 	if (!soc->peer_hash.bins)
186 		return QDF_STATUS_E_NOMEM;
187 
188 	for (i = 0; i < hash_elems; i++)
189 		TAILQ_INIT(&soc->peer_hash.bins[i]);
190 
191 	return 0;
192 }
193 
194 static void dp_peer_find_hash_detach(struct dp_soc *soc)
195 {
196 	if (soc->peer_hash.bins) {
197 		qdf_mem_free(soc->peer_hash.bins);
198 		soc->peer_hash.bins = NULL;
199 	}
200 }
201 
202 static inline unsigned dp_peer_find_hash_index(struct dp_soc *soc,
203 	union dp_align_mac_addr *mac_addr)
204 {
205 	unsigned index;
206 
207 	index =
208 		mac_addr->align2.bytes_ab ^
209 		mac_addr->align2.bytes_cd ^
210 		mac_addr->align2.bytes_ef;
211 	index ^= index >> soc->peer_hash.idx_bits;
212 	index &= soc->peer_hash.mask;
213 	return index;
214 }
215 
216 
217 void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer)
218 {
219 	unsigned index;
220 
221 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
222 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
223 	/*
224 	 * It is important to add the new peer at the tail of the peer list
225 	 * with the bin index.  Together with having the hash_find function
226 	 * search from head to tail, this ensures that if two entries with
227 	 * the same MAC address are stored, the one added first will be
228 	 * found first.
229 	 */
230 	TAILQ_INSERT_TAIL(&soc->peer_hash.bins[index], peer, hash_list_elem);
231 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
232 }
233 
234 #ifdef FEATURE_AST
235 /*
236  * dp_peer_ast_hash_attach() - Allocate and initialize AST Hash Table
237  * @soc: SoC handle
238  *
239  * Return: None
240  */
241 static int dp_peer_ast_hash_attach(struct dp_soc *soc)
242 {
243 	int i, hash_elems, log2;
244 	unsigned int max_ast_idx = wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx);
245 
246 	hash_elems = ((max_ast_idx * DP_AST_HASH_LOAD_MULT) >>
247 		DP_AST_HASH_LOAD_SHIFT);
248 
249 	log2 = dp_log2_ceil(hash_elems);
250 	hash_elems = 1 << log2;
251 
252 	soc->ast_hash.mask = hash_elems - 1;
253 	soc->ast_hash.idx_bits = log2;
254 
255 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
256 		  "ast hash_elems: %d, max_ast_idx: %d",
257 		  hash_elems, max_ast_idx);
258 
259 	/* allocate an array of TAILQ peer object lists */
260 	soc->ast_hash.bins = qdf_mem_malloc(
261 		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q,
262 				dp_ast_entry)));
263 
264 	if (!soc->ast_hash.bins)
265 		return QDF_STATUS_E_NOMEM;
266 
267 	for (i = 0; i < hash_elems; i++)
268 		TAILQ_INIT(&soc->ast_hash.bins[i]);
269 
270 	return 0;
271 }
272 
273 /*
274  * dp_peer_ast_cleanup() - cleanup the references
275  * @soc: SoC handle
276  * @ast: ast entry
277  *
278  * Return: None
279  */
280 static inline void dp_peer_ast_cleanup(struct dp_soc *soc,
281 				       struct dp_ast_entry *ast)
282 {
283 	txrx_ast_free_cb cb = ast->callback;
284 	void *cookie = ast->cookie;
285 
286 	/* Call the callbacks to free up the cookie */
287 	if (cb) {
288 		ast->callback = NULL;
289 		ast->cookie = NULL;
290 		cb(soc->ctrl_psoc,
291 		   dp_soc_to_cdp_soc(soc),
292 		   cookie,
293 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
294 	}
295 }
296 
297 /*
298  * dp_peer_ast_hash_detach() - Free AST Hash table
299  * @soc: SoC handle
300  *
301  * Return: None
302  */
303 static void dp_peer_ast_hash_detach(struct dp_soc *soc)
304 {
305 	unsigned int index;
306 	struct dp_ast_entry *ast, *ast_next;
307 
308 	if (!soc->ast_hash.mask)
309 		return;
310 
311 	if (!soc->ast_hash.bins)
312 		return;
313 
314 	qdf_spin_lock_bh(&soc->ast_lock);
315 	for (index = 0; index <= soc->ast_hash.mask; index++) {
316 		if (!TAILQ_EMPTY(&soc->ast_hash.bins[index])) {
317 			TAILQ_FOREACH_SAFE(ast, &soc->ast_hash.bins[index],
318 					   hash_list_elem, ast_next) {
319 				TAILQ_REMOVE(&soc->ast_hash.bins[index], ast,
320 					     hash_list_elem);
321 				dp_peer_ast_cleanup(soc, ast);
322 				qdf_mem_free(ast);
323 			}
324 		}
325 	}
326 	qdf_spin_unlock_bh(&soc->ast_lock);
327 
328 	qdf_mem_free(soc->ast_hash.bins);
329 	soc->ast_hash.bins = NULL;
330 }
331 
332 /*
333  * dp_peer_ast_hash_index() - Compute the AST hash from MAC address
334  * @soc: SoC handle
335  *
336  * Return: AST hash
337  */
338 static inline uint32_t dp_peer_ast_hash_index(struct dp_soc *soc,
339 	union dp_align_mac_addr *mac_addr)
340 {
341 	uint32_t index;
342 
343 	index =
344 		mac_addr->align2.bytes_ab ^
345 		mac_addr->align2.bytes_cd ^
346 		mac_addr->align2.bytes_ef;
347 	index ^= index >> soc->ast_hash.idx_bits;
348 	index &= soc->ast_hash.mask;
349 	return index;
350 }
351 
352 /*
353  * dp_peer_ast_hash_add() - Add AST entry into hash table
354  * @soc: SoC handle
355  *
356  * This function adds the AST entry into SoC AST hash table
357  * It assumes caller has taken the ast lock to protect the access to this table
358  *
359  * Return: None
360  */
361 static inline void dp_peer_ast_hash_add(struct dp_soc *soc,
362 		struct dp_ast_entry *ase)
363 {
364 	uint32_t index;
365 
366 	index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
367 	TAILQ_INSERT_TAIL(&soc->ast_hash.bins[index], ase, hash_list_elem);
368 }
369 
370 /*
371  * dp_peer_ast_hash_remove() - Look up and remove AST entry from hash table
372  * @soc: SoC handle
373  *
374  * This function removes the AST entry from soc AST hash table
375  * It assumes caller has taken the ast lock to protect the access to this table
376  *
377  * Return: None
378  */
379 void dp_peer_ast_hash_remove(struct dp_soc *soc,
380 			     struct dp_ast_entry *ase)
381 {
382 	unsigned index;
383 	struct dp_ast_entry *tmpase;
384 	int found = 0;
385 
386 	index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
387 	/* Check if tail is not empty before delete*/
388 	QDF_ASSERT(!TAILQ_EMPTY(&soc->ast_hash.bins[index]));
389 
390 	TAILQ_FOREACH(tmpase, &soc->ast_hash.bins[index], hash_list_elem) {
391 		if (tmpase == ase) {
392 			found = 1;
393 			break;
394 		}
395 	}
396 
397 	QDF_ASSERT(found);
398 	TAILQ_REMOVE(&soc->ast_hash.bins[index], ase, hash_list_elem);
399 }
400 
401 /*
402  * dp_peer_ast_list_find() - Find AST entry by MAC address from peer ast list
403  * @soc: SoC handle
404  * @peer: peer handle
405  * @ast_mac_addr: mac address
406  *
407  * It assumes caller has taken the ast lock to protect the access to ast list
408  *
409  * Return: AST entry
410  */
411 struct dp_ast_entry *dp_peer_ast_list_find(struct dp_soc *soc,
412 					   struct dp_peer *peer,
413 					   uint8_t *ast_mac_addr)
414 {
415 	struct dp_ast_entry *ast_entry = NULL;
416 	union dp_align_mac_addr *mac_addr =
417 		(union dp_align_mac_addr *)ast_mac_addr;
418 
419 	TAILQ_FOREACH(ast_entry, &peer->ast_entry_list, ase_list_elem) {
420 		if (!dp_peer_find_mac_addr_cmp(mac_addr,
421 					       &ast_entry->mac_addr)) {
422 			return ast_entry;
423 		}
424 	}
425 
426 	return NULL;
427 }
428 
429 /*
430  * dp_peer_ast_hash_find_by_pdevid() - Find AST entry by MAC address
431  * @soc: SoC handle
432  *
433  * It assumes caller has taken the ast lock to protect the access to
434  * AST hash table
435  *
436  * Return: AST entry
437  */
438 struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
439 						     uint8_t *ast_mac_addr,
440 						     uint8_t pdev_id)
441 {
442 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
443 	uint32_t index;
444 	struct dp_ast_entry *ase;
445 
446 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
447 		     ast_mac_addr, QDF_MAC_ADDR_SIZE);
448 	mac_addr = &local_mac_addr_aligned;
449 
450 	index = dp_peer_ast_hash_index(soc, mac_addr);
451 	TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
452 		if ((pdev_id == ase->pdev_id) &&
453 		    !dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr)) {
454 			return ase;
455 		}
456 	}
457 
458 	return NULL;
459 }
460 
461 /*
462  * dp_peer_ast_hash_find_soc() - Find AST entry by MAC address
463  * @soc: SoC handle
464  *
465  * It assumes caller has taken the ast lock to protect the access to
466  * AST hash table
467  *
468  * Return: AST entry
469  */
470 struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
471 					       uint8_t *ast_mac_addr)
472 {
473 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
474 	unsigned index;
475 	struct dp_ast_entry *ase;
476 
477 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
478 			ast_mac_addr, QDF_MAC_ADDR_SIZE);
479 	mac_addr = &local_mac_addr_aligned;
480 
481 	index = dp_peer_ast_hash_index(soc, mac_addr);
482 	TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
483 		if (dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr) == 0) {
484 			return ase;
485 		}
486 	}
487 
488 	return NULL;
489 }
490 
491 /*
492  * dp_peer_map_ast() - Map the ast entry with HW AST Index
493  * @soc: SoC handle
494  * @peer: peer to which ast node belongs
495  * @mac_addr: MAC address of ast node
496  * @hw_peer_id: HW AST Index returned by target in peer map event
497  * @vdev_id: vdev id for VAP to which the peer belongs to
498  * @ast_hash: ast hash value in HW
499  *
500  * Return: None
501  */
502 static inline void dp_peer_map_ast(struct dp_soc *soc,
503 	struct dp_peer *peer, uint8_t *mac_addr, uint16_t hw_peer_id,
504 	uint8_t vdev_id, uint16_t ast_hash)
505 {
506 	struct dp_ast_entry *ast_entry = NULL;
507 	enum cdp_txrx_ast_entry_type peer_type = CDP_TXRX_AST_TYPE_STATIC;
508 
509 	if (!peer) {
510 		return;
511 	}
512 
513 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
514 		  "%s: peer %pK ID %d vid %d mac %pM",
515 		  __func__, peer, hw_peer_id, vdev_id, mac_addr);
516 
517 	qdf_spin_lock_bh(&soc->ast_lock);
518 
519 	ast_entry = dp_peer_ast_list_find(soc, peer, mac_addr);
520 
521 	if (ast_entry) {
522 		ast_entry->ast_idx = hw_peer_id;
523 		soc->ast_table[hw_peer_id] = ast_entry;
524 		ast_entry->is_active = TRUE;
525 		peer_type = ast_entry->type;
526 		ast_entry->ast_hash_value = ast_hash;
527 		ast_entry->is_mapped = TRUE;
528 	}
529 
530 	if (ast_entry || (peer->vdev && peer->vdev->proxysta_vdev)) {
531 		if (soc->cdp_soc.ol_ops->peer_map_event) {
532 			soc->cdp_soc.ol_ops->peer_map_event(
533 			soc->ctrl_psoc, peer->peer_id,
534 			hw_peer_id, vdev_id,
535 			mac_addr, peer_type, ast_hash);
536 		}
537 	} else {
538 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
539 			  "AST entry not found");
540 	}
541 
542 	qdf_spin_unlock_bh(&soc->ast_lock);
543 	return;
544 }
545 
546 void dp_peer_free_hmwds_cb(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
547 			   struct cdp_soc *dp_soc,
548 			   void *cookie,
549 			   enum cdp_ast_free_status status)
550 {
551 	struct dp_ast_free_cb_params *param =
552 		(struct dp_ast_free_cb_params *)cookie;
553 	struct dp_soc *soc = (struct dp_soc *)dp_soc;
554 	struct dp_peer *peer = NULL;
555 
556 	if (status != CDP_TXRX_AST_DELETED) {
557 		qdf_mem_free(cookie);
558 		return;
559 	}
560 
561 	peer = dp_peer_find_hash_find(soc, &param->peer_mac_addr.raw[0],
562 				      0, param->vdev_id);
563 	if (peer) {
564 		dp_peer_add_ast(soc, peer,
565 				&param->mac_addr.raw[0],
566 				param->type,
567 				param->flags);
568 		dp_peer_unref_delete(peer);
569 	}
570 	qdf_mem_free(cookie);
571 }
572 
573 /*
574  * dp_peer_add_ast() - Allocate and add AST entry into peer list
575  * @soc: SoC handle
576  * @peer: peer to which ast node belongs
577  * @mac_addr: MAC address of ast node
578  * @is_self: Is this base AST entry with peer mac address
579  *
580  * This API is used by WDS source port learning function to
581  * add a new AST entry into peer AST list
582  *
583  * Return: 0 if new entry is allocated,
584  *        -1 if entry add failed
585  */
586 int dp_peer_add_ast(struct dp_soc *soc,
587 			struct dp_peer *peer,
588 			uint8_t *mac_addr,
589 			enum cdp_txrx_ast_entry_type type,
590 			uint32_t flags)
591 {
592 	struct dp_ast_entry *ast_entry = NULL;
593 	struct dp_vdev *vdev = NULL, *tmp_vdev = NULL;
594 	struct dp_pdev *pdev = NULL;
595 	uint8_t next_node_mac[6];
596 	int  ret = -1;
597 	txrx_ast_free_cb cb = NULL;
598 	void *cookie = NULL;
599 	struct dp_peer *tmp_peer = NULL;
600 	bool is_peer_found = false;
601 
602 	vdev = peer->vdev;
603 	if (!vdev) {
604 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
605 			  FL("Peers vdev is NULL"));
606 		QDF_ASSERT(0);
607 		return ret;
608 	}
609 
610 	pdev = vdev->pdev;
611 
612 	tmp_peer = dp_peer_find_hash_find(soc, mac_addr, 0,
613 					  DP_VDEV_ALL);
614 	if (tmp_peer) {
615 		tmp_vdev = tmp_peer->vdev;
616 		if (!tmp_vdev) {
617 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
618 				  FL("Peers vdev is NULL"));
619 			QDF_ASSERT(0);
620 			dp_peer_unref_delete(tmp_peer);
621 			return ret;
622 		}
623 		if (tmp_vdev->pdev->pdev_id == pdev->pdev_id)
624 			is_peer_found = true;
625 
626 		dp_peer_unref_delete(tmp_peer);
627 	}
628 
629 	qdf_spin_lock_bh(&soc->ast_lock);
630 	if (peer->delete_in_progress) {
631 		qdf_spin_unlock_bh(&soc->ast_lock);
632 		return ret;
633 	}
634 
635 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
636 		  "%s: pdevid: %u vdev: %u  ast_entry->type: %d flags: 0x%x peer_mac: %pM peer: %pK mac %pM",
637 		  __func__, pdev->pdev_id, vdev->vdev_id, type, flags,
638 		  peer->mac_addr.raw, peer, mac_addr);
639 
640 
641 	/* fw supports only 2 times the max_peers ast entries */
642 	if (soc->num_ast_entries >=
643 	    wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx)) {
644 		qdf_spin_unlock_bh(&soc->ast_lock);
645 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
646 			  FL("Max ast entries reached"));
647 		return ret;
648 	}
649 
650 	/* If AST entry already exists , just return from here
651 	 * ast entry with same mac address can exist on different radios
652 	 * if ast_override support is enabled use search by pdev in this
653 	 * case
654 	 */
655 	if (soc->ast_override_support) {
656 		ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, mac_addr,
657 							    pdev->pdev_id);
658 		if (ast_entry) {
659 			if ((type == CDP_TXRX_AST_TYPE_MEC) &&
660 			    (ast_entry->type == CDP_TXRX_AST_TYPE_MEC))
661 				ast_entry->is_active = TRUE;
662 
663 			qdf_spin_unlock_bh(&soc->ast_lock);
664 			return 0;
665 		}
666 		if (is_peer_found) {
667 			/* During WDS to static roaming, peer is added
668 			 * to the list before static AST entry create.
669 			 * So, allow AST entry for STATIC type
670 			 * even if peer is present
671 			 */
672 			if (type != CDP_TXRX_AST_TYPE_STATIC) {
673 				qdf_spin_unlock_bh(&soc->ast_lock);
674 				return 0;
675 			}
676 		}
677 	} else {
678 		/* For HWMWDS_SEC entries can be added for same mac address
679 		 * do not check for existing entry
680 		 */
681 		if (type == CDP_TXRX_AST_TYPE_WDS_HM_SEC)
682 			goto add_ast_entry;
683 
684 		ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
685 
686 		if (ast_entry) {
687 			if ((type == CDP_TXRX_AST_TYPE_MEC) &&
688 			    (ast_entry->type == CDP_TXRX_AST_TYPE_MEC))
689 				ast_entry->is_active = TRUE;
690 
691 			if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) &&
692 			    !ast_entry->delete_in_progress) {
693 				qdf_spin_unlock_bh(&soc->ast_lock);
694 				return 0;
695 			}
696 
697 			/* Add for HMWDS entry we cannot be ignored if there
698 			 * is AST entry with same mac address
699 			 *
700 			 * if ast entry exists with the requested mac address
701 			 * send a delete command and register callback which
702 			 * can take care of adding HMWDS ast enty on delete
703 			 * confirmation from target
704 			 */
705 			if (type == CDP_TXRX_AST_TYPE_WDS_HM) {
706 				struct dp_ast_free_cb_params *param = NULL;
707 
708 				if (ast_entry->type ==
709 					CDP_TXRX_AST_TYPE_WDS_HM_SEC)
710 					goto add_ast_entry;
711 
712 				/* save existing callback */
713 				if (ast_entry->callback) {
714 					cb = ast_entry->callback;
715 					cookie = ast_entry->cookie;
716 				}
717 
718 				param = qdf_mem_malloc(sizeof(*param));
719 				if (!param) {
720 					QDF_TRACE(QDF_MODULE_ID_TXRX,
721 						  QDF_TRACE_LEVEL_ERROR,
722 						  "Allocation failed");
723 					qdf_spin_unlock_bh(&soc->ast_lock);
724 					return ret;
725 				}
726 
727 				qdf_mem_copy(&param->mac_addr.raw[0], mac_addr,
728 					     QDF_MAC_ADDR_SIZE);
729 				qdf_mem_copy(&param->peer_mac_addr.raw[0],
730 					     &peer->mac_addr.raw[0],
731 					     QDF_MAC_ADDR_SIZE);
732 				param->type = type;
733 				param->flags = flags;
734 				param->vdev_id = vdev->vdev_id;
735 				ast_entry->callback = dp_peer_free_hmwds_cb;
736 				ast_entry->pdev_id = vdev->pdev->pdev_id;
737 				ast_entry->type = type;
738 				ast_entry->cookie = (void *)param;
739 				if (!ast_entry->delete_in_progress)
740 					dp_peer_del_ast(soc, ast_entry);
741 			}
742 
743 			/* Modify an already existing AST entry from type
744 			 * WDS to MEC on promption. This serves as a fix when
745 			 * backbone of interfaces are interchanged wherein
746 			 * wds entr becomes its own MEC. The entry should be
747 			 * replaced only when the ast_entry peer matches the
748 			 * peer received in mec event. This additional check
749 			 * is needed in wds repeater cases where a multicast
750 			 * packet from station to the root via the repeater
751 			 * should not remove the wds entry.
752 			 */
753 			if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS) &&
754 			    (type == CDP_TXRX_AST_TYPE_MEC) &&
755 			    (ast_entry->peer == peer)) {
756 				ast_entry->is_active = FALSE;
757 				dp_peer_del_ast(soc, ast_entry);
758 			}
759 			qdf_spin_unlock_bh(&soc->ast_lock);
760 
761 			/* Call the saved callback*/
762 			if (cb) {
763 				cb(soc->ctrl_psoc,
764 				   dp_soc_to_cdp_soc(soc),
765 				   cookie,
766 				   CDP_TXRX_AST_DELETE_IN_PROGRESS);
767 			}
768 			return 0;
769 		}
770 	}
771 
772 add_ast_entry:
773 	ast_entry = (struct dp_ast_entry *)
774 			qdf_mem_malloc(sizeof(struct dp_ast_entry));
775 
776 	if (!ast_entry) {
777 		qdf_spin_unlock_bh(&soc->ast_lock);
778 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
779 			  FL("fail to allocate ast_entry"));
780 		QDF_ASSERT(0);
781 		return ret;
782 	}
783 
784 	qdf_mem_copy(&ast_entry->mac_addr.raw[0], mac_addr, QDF_MAC_ADDR_SIZE);
785 	ast_entry->pdev_id = vdev->pdev->pdev_id;
786 	ast_entry->is_mapped = false;
787 	ast_entry->delete_in_progress = false;
788 
789 	switch (type) {
790 	case CDP_TXRX_AST_TYPE_STATIC:
791 		peer->self_ast_entry = ast_entry;
792 		ast_entry->type = CDP_TXRX_AST_TYPE_STATIC;
793 		if (peer->vdev->opmode == wlan_op_mode_sta)
794 			ast_entry->type = CDP_TXRX_AST_TYPE_STA_BSS;
795 		break;
796 	case CDP_TXRX_AST_TYPE_SELF:
797 		peer->self_ast_entry = ast_entry;
798 		ast_entry->type = CDP_TXRX_AST_TYPE_SELF;
799 		break;
800 	case CDP_TXRX_AST_TYPE_WDS:
801 		ast_entry->next_hop = 1;
802 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
803 		break;
804 	case CDP_TXRX_AST_TYPE_WDS_HM:
805 		ast_entry->next_hop = 1;
806 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM;
807 		break;
808 	case CDP_TXRX_AST_TYPE_WDS_HM_SEC:
809 		ast_entry->next_hop = 1;
810 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM_SEC;
811 		break;
812 	case CDP_TXRX_AST_TYPE_MEC:
813 		ast_entry->next_hop = 1;
814 		ast_entry->type = CDP_TXRX_AST_TYPE_MEC;
815 		break;
816 	case CDP_TXRX_AST_TYPE_DA:
817 		peer = peer->vdev->vap_bss_peer;
818 		ast_entry->next_hop = 1;
819 		ast_entry->type = CDP_TXRX_AST_TYPE_DA;
820 		break;
821 	default:
822 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
823 			FL("Incorrect AST entry type"));
824 	}
825 
826 	ast_entry->is_active = TRUE;
827 	DP_STATS_INC(soc, ast.added, 1);
828 	soc->num_ast_entries++;
829 	dp_peer_ast_hash_add(soc, ast_entry);
830 
831 	ast_entry->peer = peer;
832 
833 	if (type == CDP_TXRX_AST_TYPE_MEC)
834 		qdf_mem_copy(next_node_mac, peer->vdev->mac_addr.raw, 6);
835 	else
836 		qdf_mem_copy(next_node_mac, peer->mac_addr.raw, 6);
837 
838 	TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem);
839 
840 	if ((ast_entry->type != CDP_TXRX_AST_TYPE_STATIC) &&
841 	    (ast_entry->type != CDP_TXRX_AST_TYPE_SELF) &&
842 	    (ast_entry->type != CDP_TXRX_AST_TYPE_STA_BSS) &&
843 	    (ast_entry->type != CDP_TXRX_AST_TYPE_WDS_HM_SEC)) {
844 		if (QDF_STATUS_SUCCESS ==
845 				soc->cdp_soc.ol_ops->peer_add_wds_entry(
846 				soc->ctrl_psoc,
847 				peer->vdev->vdev_id,
848 				peer->mac_addr.raw,
849 				peer->peer_id,
850 				mac_addr,
851 				next_node_mac,
852 				flags,
853 				ast_entry->type)) {
854 			qdf_spin_unlock_bh(&soc->ast_lock);
855 			return 0;
856 		}
857 	}
858 
859 	qdf_spin_unlock_bh(&soc->ast_lock);
860 	return ret;
861 }
862 
863 /*
864  * dp_peer_free_ast_entry() - Free up the ast entry memory
865  * @soc: SoC handle
866  * @ast_entry: Address search entry
867  *
868  * This API is used to free up the memory associated with
869  * AST entry.
870  *
871  * Return: None
872  */
873 void dp_peer_free_ast_entry(struct dp_soc *soc,
874 			    struct dp_ast_entry *ast_entry)
875 {
876 	/*
877 	 * NOTE: Ensure that call to this API is done
878 	 * after soc->ast_lock is taken
879 	 */
880 	ast_entry->callback = NULL;
881 	ast_entry->cookie = NULL;
882 
883 	DP_STATS_INC(soc, ast.deleted, 1);
884 	dp_peer_ast_hash_remove(soc, ast_entry);
885 	dp_peer_ast_cleanup(soc, ast_entry);
886 	qdf_mem_free(ast_entry);
887 	soc->num_ast_entries--;
888 }
889 
890 /*
891  * dp_peer_unlink_ast_entry() - Free up the ast entry memory
892  * @soc: SoC handle
893  * @ast_entry: Address search entry
894  *
895  * This API is used to remove/unlink AST entry from the peer list
896  * and hash list.
897  *
898  * Return: None
899  */
900 void dp_peer_unlink_ast_entry(struct dp_soc *soc,
901 			      struct dp_ast_entry *ast_entry)
902 {
903 	/*
904 	 * NOTE: Ensure that call to this API is done
905 	 * after soc->ast_lock is taken
906 	 */
907 	struct dp_peer *peer = ast_entry->peer;
908 
909 	TAILQ_REMOVE(&peer->ast_entry_list, ast_entry, ase_list_elem);
910 
911 	if (ast_entry == peer->self_ast_entry)
912 		peer->self_ast_entry = NULL;
913 
914 	/*
915 	 * release the reference only if it is mapped
916 	 * to ast_table
917 	 */
918 	if (ast_entry->is_mapped)
919 		soc->ast_table[ast_entry->ast_idx] = NULL;
920 
921 	ast_entry->peer = NULL;
922 }
923 
924 /*
925  * dp_peer_del_ast() - Delete and free AST entry
926  * @soc: SoC handle
927  * @ast_entry: AST entry of the node
928  *
929  * This function removes the AST entry from peer and soc tables
930  * It assumes caller has taken the ast lock to protect the access to these
931  * tables
932  *
933  * Return: None
934  */
935 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
936 {
937 	struct dp_peer *peer;
938 
939 	if (!ast_entry)
940 		return;
941 
942 	if (ast_entry->delete_in_progress)
943 		return;
944 
945 	ast_entry->delete_in_progress = true;
946 
947 	peer = ast_entry->peer;
948 	dp_peer_ast_send_wds_del(soc, ast_entry);
949 
950 	/* Remove SELF and STATIC entries in teardown itself */
951 	if (!ast_entry->next_hop)
952 		dp_peer_unlink_ast_entry(soc, ast_entry);
953 
954 	if (ast_entry->is_mapped)
955 		soc->ast_table[ast_entry->ast_idx] = NULL;
956 
957 	/* if peer map v2 is enabled we are not freeing ast entry
958 	 * here and it is supposed to be freed in unmap event (after
959 	 * we receive delete confirmation from target)
960 	 *
961 	 * if peer_id is invalid we did not get the peer map event
962 	 * for the peer free ast entry from here only in this case
963 	 */
964 	if (dp_peer_ast_free_in_unmap_supported(peer, ast_entry))
965 		return;
966 
967 	/* for WDS secondary entry ast_entry->next_hop would be set so
968 	 * unlinking has to be done explicitly here.
969 	 * As this entry is not a mapped entry unmap notification from
970 	 * FW wil not come. Hence unlinkling is done right here.
971 	 */
972 	if (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC)
973 		dp_peer_unlink_ast_entry(soc, ast_entry);
974 
975 	dp_peer_free_ast_entry(soc, ast_entry);
976 }
977 
978 /*
979  * dp_peer_update_ast() - Delete and free AST entry
980  * @soc: SoC handle
981  * @peer: peer to which ast node belongs
982  * @ast_entry: AST entry of the node
983  * @flags: wds or hmwds
984  *
985  * This function update the AST entry to the roamed peer and soc tables
986  * It assumes caller has taken the ast lock to protect the access to these
987  * tables
988  *
989  * Return: 0 if ast entry is updated successfully
990  *         -1 failure
991  */
992 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
993 		       struct dp_ast_entry *ast_entry, uint32_t flags)
994 {
995 	int ret = -1;
996 	struct dp_peer *old_peer;
997 
998 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
999 		  "%s: ast_entry->type: %d pdevid: %u vdevid: %u flags: 0x%x mac_addr: %pM peer_mac: %pM\n",
1000 		  __func__, ast_entry->type, peer->vdev->pdev->pdev_id,
1001 		  peer->vdev->vdev_id, flags, ast_entry->mac_addr.raw,
1002 		  peer->mac_addr.raw);
1003 
1004 	/* Do not send AST update in below cases
1005 	 *  1) Ast entry delete has already triggered
1006 	 *  2) Peer delete is already triggered
1007 	 *  3) We did not get the HTT map for create event
1008 	 */
1009 	if (ast_entry->delete_in_progress || peer->delete_in_progress ||
1010 	    !ast_entry->is_mapped)
1011 		return ret;
1012 
1013 	if ((ast_entry->type == CDP_TXRX_AST_TYPE_STATIC) ||
1014 	    (ast_entry->type == CDP_TXRX_AST_TYPE_SELF) ||
1015 	    (ast_entry->type == CDP_TXRX_AST_TYPE_STA_BSS) ||
1016 	    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
1017 		return 0;
1018 
1019 	/*
1020 	 * Avoids flood of WMI update messages sent to FW for same peer.
1021 	 */
1022 	if (qdf_unlikely(ast_entry->peer == peer) &&
1023 	    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS) &&
1024 	    (ast_entry->peer->vdev == peer->vdev) &&
1025 	    (ast_entry->is_active))
1026 		return 0;
1027 
1028 	old_peer = ast_entry->peer;
1029 	TAILQ_REMOVE(&old_peer->ast_entry_list, ast_entry, ase_list_elem);
1030 
1031 	ast_entry->peer = peer;
1032 	ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
1033 	ast_entry->pdev_id = peer->vdev->pdev->pdev_id;
1034 	ast_entry->is_active = TRUE;
1035 	TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem);
1036 
1037 	ret = soc->cdp_soc.ol_ops->peer_update_wds_entry(
1038 				soc->ctrl_psoc,
1039 				peer->vdev->vdev_id,
1040 				ast_entry->mac_addr.raw,
1041 				peer->mac_addr.raw,
1042 				flags);
1043 
1044 	return ret;
1045 }
1046 
1047 /*
1048  * dp_peer_ast_get_pdev_id() - get pdev_id from the ast entry
1049  * @soc: SoC handle
1050  * @ast_entry: AST entry of the node
1051  *
1052  * This function gets the pdev_id from the ast entry.
1053  *
1054  * Return: (uint8_t) pdev_id
1055  */
1056 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
1057 				struct dp_ast_entry *ast_entry)
1058 {
1059 	return ast_entry->pdev_id;
1060 }
1061 
1062 /*
1063  * dp_peer_ast_get_next_hop() - get next_hop from the ast entry
1064  * @soc: SoC handle
1065  * @ast_entry: AST entry of the node
1066  *
1067  * This function gets the next hop from the ast entry.
1068  *
1069  * Return: (uint8_t) next_hop
1070  */
1071 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
1072 				struct dp_ast_entry *ast_entry)
1073 {
1074 	return ast_entry->next_hop;
1075 }
1076 
1077 /*
1078  * dp_peer_ast_set_type() - set type from the ast entry
1079  * @soc: SoC handle
1080  * @ast_entry: AST entry of the node
1081  *
1082  * This function sets the type in the ast entry.
1083  *
1084  * Return:
1085  */
1086 void dp_peer_ast_set_type(struct dp_soc *soc,
1087 				struct dp_ast_entry *ast_entry,
1088 				enum cdp_txrx_ast_entry_type type)
1089 {
1090 	ast_entry->type = type;
1091 }
1092 
1093 #else
1094 int dp_peer_add_ast(struct dp_soc *soc, struct dp_peer *peer,
1095 		uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type,
1096 		uint32_t flags)
1097 {
1098 	return 1;
1099 }
1100 
1101 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
1102 {
1103 }
1104 
1105 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
1106 			struct dp_ast_entry *ast_entry, uint32_t flags)
1107 {
1108 	return 1;
1109 }
1110 
1111 struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
1112 					       uint8_t *ast_mac_addr)
1113 {
1114 	return NULL;
1115 }
1116 
1117 struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
1118 						     uint8_t *ast_mac_addr,
1119 						     uint8_t pdev_id)
1120 {
1121 	return NULL;
1122 }
1123 
1124 static int dp_peer_ast_hash_attach(struct dp_soc *soc)
1125 {
1126 	return 0;
1127 }
1128 
1129 static inline void dp_peer_map_ast(struct dp_soc *soc,
1130 	struct dp_peer *peer, uint8_t *mac_addr, uint16_t hw_peer_id,
1131 	uint8_t vdev_id, uint16_t ast_hash)
1132 {
1133 	return;
1134 }
1135 
1136 static void dp_peer_ast_hash_detach(struct dp_soc *soc)
1137 {
1138 }
1139 
1140 void dp_peer_ast_set_type(struct dp_soc *soc,
1141 				struct dp_ast_entry *ast_entry,
1142 				enum cdp_txrx_ast_entry_type type)
1143 {
1144 }
1145 
1146 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
1147 				struct dp_ast_entry *ast_entry)
1148 {
1149 	return 0xff;
1150 }
1151 
1152 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
1153 				struct dp_ast_entry *ast_entry)
1154 {
1155 	return 0xff;
1156 }
1157 
1158 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
1159 		       struct dp_ast_entry *ast_entry, uint32_t flags)
1160 {
1161 	return 1;
1162 }
1163 
1164 #endif
1165 
1166 void dp_peer_ast_send_wds_del(struct dp_soc *soc,
1167 			      struct dp_ast_entry *ast_entry)
1168 {
1169 	struct dp_peer *peer = ast_entry->peer;
1170 	struct cdp_soc_t *cdp_soc = &soc->cdp_soc;
1171 
1172 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_TRACE,
1173 		  "%s: ast_entry->type: %d pdevid: %u vdev: %u mac_addr: %pM next_hop: %u peer_mac: %pM\n",
1174 		  __func__, ast_entry->type, peer->vdev->pdev->pdev_id,
1175 		  peer->vdev->vdev_id, ast_entry->mac_addr.raw,
1176 		  ast_entry->next_hop, ast_entry->peer->mac_addr.raw);
1177 
1178 	/*
1179 	 * If peer delete_in_progress is set, the peer is about to get
1180 	 * teared down with a peer delete command to firmware,
1181 	 * which will cleanup all the wds ast entries.
1182 	 * So, no need to send explicit wds ast delete to firmware.
1183 	 */
1184 	if (ast_entry->next_hop) {
1185 		cdp_soc->ol_ops->peer_del_wds_entry(soc->ctrl_psoc,
1186 						    peer->vdev->vdev_id,
1187 						    ast_entry->mac_addr.raw,
1188 						    ast_entry->type,
1189 						    !peer->delete_in_progress);
1190 	}
1191 
1192 }
1193 
1194 #ifdef FEATURE_WDS
1195 /**
1196  * dp_peer_ast_free_wds_entries() - Free wds ast entries associated with peer
1197  * @soc: soc handle
1198  * @peer: peer handle
1199  *
1200  * Free all the wds ast entries associated with peer
1201  *
1202  * Return: Number of wds ast entries freed
1203  */
1204 static uint32_t dp_peer_ast_free_wds_entries(struct dp_soc *soc,
1205 					     struct dp_peer *peer)
1206 {
1207 	TAILQ_HEAD(, dp_ast_entry) ast_local_list = {0};
1208 	struct dp_ast_entry *ast_entry, *temp_ast_entry;
1209 	uint32_t num_ast = 0;
1210 
1211 	TAILQ_INIT(&ast_local_list);
1212 	qdf_spin_lock_bh(&soc->ast_lock);
1213 
1214 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry) {
1215 		if (ast_entry->next_hop) {
1216 			if (ast_entry->is_mapped)
1217 				soc->ast_table[ast_entry->ast_idx] = NULL;
1218 
1219 			dp_peer_unlink_ast_entry(soc, ast_entry);
1220 			DP_STATS_INC(soc, ast.deleted, 1);
1221 			dp_peer_ast_hash_remove(soc, ast_entry);
1222 			TAILQ_INSERT_TAIL(&ast_local_list, ast_entry,
1223 					  ase_list_elem);
1224 			soc->num_ast_entries--;
1225 			num_ast++;
1226 		}
1227 	}
1228 
1229 	qdf_spin_unlock_bh(&soc->ast_lock);
1230 
1231 	TAILQ_FOREACH_SAFE(ast_entry, &ast_local_list, ase_list_elem,
1232 			   temp_ast_entry) {
1233 		if (ast_entry->callback)
1234 			ast_entry->callback(soc->ctrl_psoc,
1235 					    dp_soc_to_cdp_soc(soc),
1236 					    ast_entry->cookie,
1237 					    CDP_TXRX_AST_DELETED);
1238 
1239 		qdf_mem_free(ast_entry);
1240 	}
1241 
1242 	return num_ast;
1243 }
1244 /**
1245  * dp_peer_clean_wds_entries() - Clean wds ast entries and compare
1246  * @soc: soc handle
1247  * @peer: peer handle
1248  * @free_wds_count - number of wds entries freed by FW with peer delete
1249  *
1250  * Free all the wds ast entries associated with peer and compare with
1251  * the value received from firmware
1252  *
1253  * Return: Number of wds ast entries freed
1254  */
1255 static void
1256 dp_peer_clean_wds_entries(struct dp_soc *soc, struct dp_peer *peer,
1257 			  uint32_t free_wds_count)
1258 {
1259 	uint32_t wds_deleted = 0;
1260 
1261 	wds_deleted = dp_peer_ast_free_wds_entries(soc, peer);
1262 	if ((DP_PEER_WDS_COUNT_INVALID != free_wds_count) &&
1263 	    (free_wds_count != wds_deleted)) {
1264 		DP_STATS_INC(soc, ast.ast_mismatch, 1);
1265 		dp_alert("For peer %pK (mac: %pM)number of wds entries deleted by fw = %d during peer delete is not same as the numbers deleted by host = %d",
1266 			 peer, peer->mac_addr.raw, free_wds_count,
1267 			 wds_deleted);
1268 	}
1269 }
1270 
1271 #else
1272 static void
1273 dp_peer_clean_wds_entries(struct dp_soc *soc, struct dp_peer *peer,
1274 			  uint32_t free_wds_count)
1275 {
1276 }
1277 #endif
1278 
1279 /**
1280  * dp_peer_ast_free_entry_by_mac() - find ast entry by MAC address and delete
1281  * @soc: soc handle
1282  * @peer: peer handle
1283  * @mac_addr: mac address of the AST entry to searc and delete
1284  *
1285  * find the ast entry from the peer list using the mac address and free
1286  * the entry.
1287  *
1288  * Return: SUCCESS or NOENT
1289  */
1290 static int dp_peer_ast_free_entry_by_mac(struct dp_soc *soc,
1291 					 struct dp_peer *peer,
1292 					 uint8_t *mac_addr)
1293 {
1294 	struct dp_ast_entry *ast_entry;
1295 	void *cookie = NULL;
1296 	txrx_ast_free_cb cb = NULL;
1297 
1298 	/*
1299 	 * release the reference only if it is mapped
1300 	 * to ast_table
1301 	 */
1302 
1303 	qdf_spin_lock_bh(&soc->ast_lock);
1304 
1305 	ast_entry = dp_peer_ast_list_find(soc, peer, mac_addr);
1306 	if (!ast_entry) {
1307 		qdf_spin_unlock_bh(&soc->ast_lock);
1308 		return QDF_STATUS_E_NOENT;
1309 	} else if (ast_entry->is_mapped) {
1310 		soc->ast_table[ast_entry->ast_idx] = NULL;
1311 	}
1312 
1313 	cb = ast_entry->callback;
1314 	cookie = ast_entry->cookie;
1315 
1316 
1317 	dp_peer_unlink_ast_entry(soc, ast_entry);
1318 	dp_peer_free_ast_entry(soc, ast_entry);
1319 
1320 	qdf_spin_unlock_bh(&soc->ast_lock);
1321 
1322 	if (cb) {
1323 		cb(soc->ctrl_psoc,
1324 		   dp_soc_to_cdp_soc(soc),
1325 		   cookie,
1326 		   CDP_TXRX_AST_DELETED);
1327 	}
1328 
1329 	return QDF_STATUS_SUCCESS;
1330 }
1331 
1332 struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc,
1333 	uint8_t *peer_mac_addr, int mac_addr_is_aligned, uint8_t vdev_id)
1334 {
1335 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
1336 	unsigned index;
1337 	struct dp_peer *peer;
1338 
1339 	if (mac_addr_is_aligned) {
1340 		mac_addr = (union dp_align_mac_addr *) peer_mac_addr;
1341 	} else {
1342 		qdf_mem_copy(
1343 			&local_mac_addr_aligned.raw[0],
1344 			peer_mac_addr, QDF_MAC_ADDR_SIZE);
1345 		mac_addr = &local_mac_addr_aligned;
1346 	}
1347 	index = dp_peer_find_hash_index(soc, mac_addr);
1348 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
1349 	TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
1350 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
1351 			((peer->vdev->vdev_id == vdev_id) ||
1352 			 (vdev_id == DP_VDEV_ALL))) {
1353 			/* found it - increment the ref count before releasing
1354 			 * the lock
1355 			 */
1356 			qdf_atomic_inc(&peer->ref_cnt);
1357 			qdf_spin_unlock_bh(&soc->peer_ref_mutex);
1358 			return peer;
1359 		}
1360 	}
1361 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
1362 	return NULL; /* failure */
1363 }
1364 
1365 void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer)
1366 {
1367 	unsigned index;
1368 	struct dp_peer *tmppeer = NULL;
1369 	int found = 0;
1370 
1371 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
1372 	/* Check if tail is not empty before delete*/
1373 	QDF_ASSERT(!TAILQ_EMPTY(&soc->peer_hash.bins[index]));
1374 	/*
1375 	 * DO NOT take the peer_ref_mutex lock here - it needs to be taken
1376 	 * by the caller.
1377 	 * The caller needs to hold the lock from the time the peer object's
1378 	 * reference count is decremented and tested up through the time the
1379 	 * reference to the peer object is removed from the hash table, by
1380 	 * this function.
1381 	 * Holding the lock only while removing the peer object reference
1382 	 * from the hash table keeps the hash table consistent, but does not
1383 	 * protect against a new HL tx context starting to use the peer object
1384 	 * if it looks up the peer object from its MAC address just after the
1385 	 * peer ref count is decremented to zero, but just before the peer
1386 	 * object reference is removed from the hash table.
1387 	 */
1388 	 TAILQ_FOREACH(tmppeer, &soc->peer_hash.bins[index], hash_list_elem) {
1389 		if (tmppeer == peer) {
1390 			found = 1;
1391 			break;
1392 		}
1393 	}
1394 	QDF_ASSERT(found);
1395 	TAILQ_REMOVE(&soc->peer_hash.bins[index], peer, hash_list_elem);
1396 }
1397 
1398 void dp_peer_find_hash_erase(struct dp_soc *soc)
1399 {
1400 	int i;
1401 
1402 	/*
1403 	 * Not really necessary to take peer_ref_mutex lock - by this point,
1404 	 * it's known that the soc is no longer in use.
1405 	 */
1406 	for (i = 0; i <= soc->peer_hash.mask; i++) {
1407 		if (!TAILQ_EMPTY(&soc->peer_hash.bins[i])) {
1408 			struct dp_peer *peer, *peer_next;
1409 
1410 			/*
1411 			 * TAILQ_FOREACH_SAFE must be used here to avoid any
1412 			 * memory access violation after peer is freed
1413 			 */
1414 			TAILQ_FOREACH_SAFE(peer, &soc->peer_hash.bins[i],
1415 				hash_list_elem, peer_next) {
1416 				/*
1417 				 * Don't remove the peer from the hash table -
1418 				 * that would modify the list we are currently
1419 				 * traversing, and it's not necessary anyway.
1420 				 */
1421 				/*
1422 				 * Artificially adjust the peer's ref count to
1423 				 * 1, so it will get deleted by
1424 				 * dp_peer_unref_delete.
1425 				 */
1426 				/* set to zero */
1427 				qdf_atomic_init(&peer->ref_cnt);
1428 				/* incr to one */
1429 				qdf_atomic_inc(&peer->ref_cnt);
1430 				dp_peer_unref_delete(peer);
1431 			}
1432 		}
1433 	}
1434 }
1435 
1436 static void dp_peer_ast_table_detach(struct dp_soc *soc)
1437 {
1438 	if (soc->ast_table) {
1439 		qdf_mem_free(soc->ast_table);
1440 		soc->ast_table = NULL;
1441 	}
1442 }
1443 
1444 static void dp_peer_find_map_detach(struct dp_soc *soc)
1445 {
1446 	if (soc->peer_id_to_obj_map) {
1447 		qdf_mem_free(soc->peer_id_to_obj_map);
1448 		soc->peer_id_to_obj_map = NULL;
1449 	}
1450 }
1451 
1452 int dp_peer_find_attach(struct dp_soc *soc)
1453 {
1454 	if (dp_peer_find_map_attach(soc))
1455 		return 1;
1456 
1457 	if (dp_peer_find_hash_attach(soc)) {
1458 		dp_peer_find_map_detach(soc);
1459 		return 1;
1460 	}
1461 
1462 	if (dp_peer_ast_table_attach(soc)) {
1463 		dp_peer_find_hash_detach(soc);
1464 		dp_peer_find_map_detach(soc);
1465 		return 1;
1466 	}
1467 
1468 	if (dp_peer_ast_hash_attach(soc)) {
1469 		dp_peer_ast_table_detach(soc);
1470 		dp_peer_find_hash_detach(soc);
1471 		dp_peer_find_map_detach(soc);
1472 		return 1;
1473 	}
1474 
1475 	return 0; /* success */
1476 }
1477 
1478 void dp_rx_tid_stats_cb(struct dp_soc *soc, void *cb_ctxt,
1479 	union hal_reo_status *reo_status)
1480 {
1481 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
1482 	struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
1483 
1484 	if (queue_status->header.status == HAL_REO_CMD_DRAIN)
1485 		return;
1486 
1487 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
1488 		DP_PRINT_STATS("REO stats failure %d for TID %d\n",
1489 			       queue_status->header.status, rx_tid->tid);
1490 		return;
1491 	}
1492 
1493 	DP_PRINT_STATS("REO queue stats (TID: %d):\n"
1494 		       "ssn: %d\n"
1495 		       "curr_idx  : %d\n"
1496 		       "pn_31_0   : %08x\n"
1497 		       "pn_63_32  : %08x\n"
1498 		       "pn_95_64  : %08x\n"
1499 		       "pn_127_96 : %08x\n"
1500 		       "last_rx_enq_tstamp : %08x\n"
1501 		       "last_rx_deq_tstamp : %08x\n"
1502 		       "rx_bitmap_31_0     : %08x\n"
1503 		       "rx_bitmap_63_32    : %08x\n"
1504 		       "rx_bitmap_95_64    : %08x\n"
1505 		       "rx_bitmap_127_96   : %08x\n"
1506 		       "rx_bitmap_159_128  : %08x\n"
1507 		       "rx_bitmap_191_160  : %08x\n"
1508 		       "rx_bitmap_223_192  : %08x\n"
1509 		       "rx_bitmap_255_224  : %08x\n",
1510 		       rx_tid->tid,
1511 		       queue_status->ssn, queue_status->curr_idx,
1512 		       queue_status->pn_31_0, queue_status->pn_63_32,
1513 		       queue_status->pn_95_64, queue_status->pn_127_96,
1514 		       queue_status->last_rx_enq_tstamp,
1515 		       queue_status->last_rx_deq_tstamp,
1516 		       queue_status->rx_bitmap_31_0,
1517 		       queue_status->rx_bitmap_63_32,
1518 		       queue_status->rx_bitmap_95_64,
1519 		       queue_status->rx_bitmap_127_96,
1520 		       queue_status->rx_bitmap_159_128,
1521 		       queue_status->rx_bitmap_191_160,
1522 		       queue_status->rx_bitmap_223_192,
1523 		       queue_status->rx_bitmap_255_224);
1524 
1525 	DP_PRINT_STATS(
1526 		       "curr_mpdu_cnt      : %d\n"
1527 		       "curr_msdu_cnt      : %d\n"
1528 		       "fwd_timeout_cnt    : %d\n"
1529 		       "fwd_bar_cnt        : %d\n"
1530 		       "dup_cnt            : %d\n"
1531 		       "frms_in_order_cnt  : %d\n"
1532 		       "bar_rcvd_cnt       : %d\n"
1533 		       "mpdu_frms_cnt      : %d\n"
1534 		       "msdu_frms_cnt      : %d\n"
1535 		       "total_byte_cnt     : %d\n"
1536 		       "late_recv_mpdu_cnt : %d\n"
1537 		       "win_jump_2k        : %d\n"
1538 		       "hole_cnt           : %d\n",
1539 		       queue_status->curr_mpdu_cnt,
1540 		       queue_status->curr_msdu_cnt,
1541 		       queue_status->fwd_timeout_cnt,
1542 		       queue_status->fwd_bar_cnt,
1543 		       queue_status->dup_cnt,
1544 		       queue_status->frms_in_order_cnt,
1545 		       queue_status->bar_rcvd_cnt,
1546 		       queue_status->mpdu_frms_cnt,
1547 		       queue_status->msdu_frms_cnt,
1548 		       queue_status->total_cnt,
1549 		       queue_status->late_recv_mpdu_cnt,
1550 		       queue_status->win_jump_2k,
1551 		       queue_status->hole_cnt);
1552 
1553 	DP_PRINT_STATS("Addba Req          : %d\n"
1554 			"Addba Resp         : %d\n"
1555 			"Addba Resp success : %d\n"
1556 			"Addba Resp failed  : %d\n"
1557 			"Delba Req received : %d\n"
1558 			"Delba Tx success   : %d\n"
1559 			"Delba Tx Fail      : %d\n"
1560 			"BA window size     : %d\n"
1561 			"Pn size            : %d\n",
1562 			rx_tid->num_of_addba_req,
1563 			rx_tid->num_of_addba_resp,
1564 			rx_tid->num_addba_rsp_success,
1565 			rx_tid->num_addba_rsp_failed,
1566 			rx_tid->num_of_delba_req,
1567 			rx_tid->delba_tx_success_cnt,
1568 			rx_tid->delba_tx_fail_cnt,
1569 			rx_tid->ba_win_size,
1570 			rx_tid->pn_size);
1571 }
1572 
1573 static inline struct dp_peer *dp_peer_find_add_id(struct dp_soc *soc,
1574 	uint8_t *peer_mac_addr, uint16_t peer_id, uint16_t hw_peer_id,
1575 	uint8_t vdev_id)
1576 {
1577 	struct dp_peer *peer;
1578 
1579 	QDF_ASSERT(peer_id <= soc->max_peers);
1580 	/* check if there's already a peer object with this MAC address */
1581 	peer = dp_peer_find_hash_find(soc, peer_mac_addr,
1582 		0 /* is aligned */, vdev_id);
1583 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1584 		  "%s: peer %pK ID %d vid %d mac %pM",
1585 		  __func__, peer, peer_id, vdev_id, peer_mac_addr);
1586 
1587 	if (peer) {
1588 		/* peer's ref count was already incremented by
1589 		 * peer_find_hash_find
1590 		 */
1591 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1592 			  "%s: ref_cnt: %d", __func__,
1593 			   qdf_atomic_read(&peer->ref_cnt));
1594 		if (!soc->peer_id_to_obj_map[peer_id])
1595 			soc->peer_id_to_obj_map[peer_id] = peer;
1596 		else {
1597 			/* Peer map event came for peer_id which
1598 			 * is already mapped, this is not expected
1599 			 */
1600 			QDF_ASSERT(0);
1601 		}
1602 
1603 		if (dp_peer_find_add_id_to_obj(peer, peer_id)) {
1604 			/* TBDXXX: assert for now */
1605 			QDF_ASSERT(0);
1606 		} else {
1607 			dp_peer_tid_peer_id_update(peer, peer->peer_id);
1608 		}
1609 
1610 		return peer;
1611 	}
1612 
1613 	return NULL;
1614 }
1615 
1616 /**
1617  * dp_rx_peer_map_handler() - handle peer map event from firmware
1618  * @soc_handle - genereic soc handle
1619  * @peeri_id - peer_id from firmware
1620  * @hw_peer_id - ast index for this peer
1621  * @vdev_id - vdev ID
1622  * @peer_mac_addr - mac address of the peer
1623  * @ast_hash - ast hash value
1624  * @is_wds - flag to indicate peer map event for WDS ast entry
1625  *
1626  * associate the peer_id that firmware provided with peer entry
1627  * and update the ast table in the host with the hw_peer_id.
1628  *
1629  * Return: none
1630  */
1631 
1632 void
1633 dp_rx_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
1634 		       uint16_t hw_peer_id, uint8_t vdev_id,
1635 		       uint8_t *peer_mac_addr, uint16_t ast_hash,
1636 		       uint8_t is_wds)
1637 {
1638 	struct dp_peer *peer = NULL;
1639 	enum cdp_txrx_ast_entry_type type = CDP_TXRX_AST_TYPE_STATIC;
1640 
1641 	dp_info("peer_map_event (soc:%pK): peer_id %d, hw_peer_id %d, peer_mac %pM, vdev_id %d",
1642 		soc, peer_id, hw_peer_id,
1643 		  peer_mac_addr, vdev_id);
1644 
1645 	/* Peer map event for WDS ast entry get the peer from
1646 	 * obj map
1647 	 */
1648 	if (is_wds) {
1649 		peer = soc->peer_id_to_obj_map[peer_id];
1650 		/*
1651 		 * In certain cases like Auth attack on a repeater
1652 		 * can result in the number of ast_entries falling
1653 		 * in the same hash bucket to exceed the max_skid
1654 		 * length supported by HW in root AP. In these cases
1655 		 * the FW will return the hw_peer_id (ast_index) as
1656 		 * 0xffff indicating HW could not add the entry in
1657 		 * its table. Host has to delete the entry from its
1658 		 * table in these cases.
1659 		 */
1660 		if (hw_peer_id == HTT_INVALID_PEER) {
1661 			DP_STATS_INC(soc, ast.map_err, 1);
1662 			if (!dp_peer_ast_free_entry_by_mac(soc,
1663 							   peer,
1664 							   peer_mac_addr))
1665 				return;
1666 
1667 			dp_alert("AST entry not found with peer %pK peer_id %u peer_mac %pM mac_addr %pM vdev_id %u next_hop %u",
1668 				 peer, peer->peer_id,
1669 				 peer->mac_addr.raw, peer_mac_addr, vdev_id,
1670 				 is_wds);
1671 
1672 			return;
1673 		}
1674 
1675 	} else {
1676 		/*
1677 		 * It's the responsibility of the CP and FW to ensure
1678 		 * that peer is created successfully. Ideally DP should
1679 		 * not hit the below condition for directly assocaited
1680 		 * peers.
1681 		 */
1682 		if ((hw_peer_id < 0) ||
1683 		    (hw_peer_id >=
1684 		     wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) {
1685 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1686 				  "invalid hw_peer_id: %d", hw_peer_id);
1687 			qdf_assert_always(0);
1688 		}
1689 
1690 		peer = dp_peer_find_add_id(soc, peer_mac_addr, peer_id,
1691 					   hw_peer_id, vdev_id);
1692 
1693 		if (peer) {
1694 			if (wlan_op_mode_sta == peer->vdev->opmode &&
1695 			    qdf_mem_cmp(peer->mac_addr.raw,
1696 					peer->vdev->mac_addr.raw,
1697 					QDF_MAC_ADDR_SIZE) != 0) {
1698 				dp_info("STA vdev bss_peer!!!!");
1699 				peer->bss_peer = 1;
1700 				peer->vdev->vap_bss_peer = peer;
1701 				qdf_mem_copy(peer->vdev->vap_bss_peer_mac_addr,
1702 					     peer->mac_addr.raw,
1703 					     QDF_MAC_ADDR_SIZE);
1704 			}
1705 
1706 			if (peer->vdev->opmode == wlan_op_mode_sta) {
1707 				peer->vdev->bss_ast_hash = ast_hash;
1708 				peer->vdev->bss_ast_idx = hw_peer_id;
1709 			}
1710 
1711 			/* Add ast entry incase self ast entry is
1712 			 * deleted due to DP CP sync issue
1713 			 *
1714 			 * self_ast_entry is modified in peer create
1715 			 * and peer unmap path which cannot run in
1716 			 * parllel with peer map, no lock need before
1717 			 * referring it
1718 			 */
1719 			if (!peer->self_ast_entry) {
1720 				dp_info("Add self ast from map %pM",
1721 					peer_mac_addr);
1722 				dp_peer_add_ast(soc, peer,
1723 						peer_mac_addr,
1724 						type, 0);
1725 			}
1726 
1727 		}
1728 	}
1729 	dp_peer_map_ast(soc, peer, peer_mac_addr,
1730 			hw_peer_id, vdev_id, ast_hash);
1731 }
1732 
1733 /**
1734  * dp_rx_peer_unmap_handler() - handle peer unmap event from firmware
1735  * @soc_handle - genereic soc handle
1736  * @peeri_id - peer_id from firmware
1737  * @vdev_id - vdev ID
1738  * @mac_addr - mac address of the peer or wds entry
1739  * @is_wds - flag to indicate peer map event for WDS ast entry
1740  * @free_wds_count - number of wds entries freed by FW with peer delete
1741  *
1742  * Return: none
1743  */
1744 void
1745 dp_rx_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id,
1746 			 uint8_t vdev_id, uint8_t *mac_addr,
1747 			 uint8_t is_wds, uint32_t free_wds_count)
1748 {
1749 	struct dp_peer *peer;
1750 
1751 	peer = __dp_peer_find_by_id(soc, peer_id);
1752 
1753 	/*
1754 	 * Currently peer IDs are assigned for vdevs as well as peers.
1755 	 * If the peer ID is for a vdev, then the peer pointer stored
1756 	 * in peer_id_to_obj_map will be NULL.
1757 	 */
1758 	if (!peer) {
1759 		dp_err("Received unmap event for invalid peer_id %u", peer_id);
1760 		return;
1761 	}
1762 
1763 	/* If V2 Peer map messages are enabled AST entry has to be freed here
1764 	 */
1765 	if (is_wds) {
1766 		if (!dp_peer_ast_free_entry_by_mac(soc, peer, mac_addr))
1767 			return;
1768 
1769 		dp_alert("AST entry not found with peer %pK peer_id %u peer_mac %pM mac_addr %pM vdev_id %u next_hop %u",
1770 			 peer, peer->peer_id,
1771 			 peer->mac_addr.raw, mac_addr, vdev_id,
1772 			 is_wds);
1773 
1774 		return;
1775 	} else {
1776 		dp_peer_clean_wds_entries(soc, peer, free_wds_count);
1777 	}
1778 
1779 	dp_info("peer_unmap_event (soc:%pK) peer_id %d peer %pK",
1780 		soc, peer_id, peer);
1781 
1782 	soc->peer_id_to_obj_map[peer_id] = NULL;
1783 	peer->peer_id = HTT_INVALID_PEER;
1784 
1785 	/*
1786 	 * Reset ast flow mapping table
1787 	 */
1788 	dp_peer_reset_flowq_map(peer);
1789 
1790 	if (soc->cdp_soc.ol_ops->peer_unmap_event) {
1791 		soc->cdp_soc.ol_ops->peer_unmap_event(soc->ctrl_psoc,
1792 				peer_id, vdev_id);
1793 	}
1794 
1795 	/*
1796 	 * Remove a reference to the peer.
1797 	 * If there are no more references, delete the peer object.
1798 	 */
1799 	dp_peer_unref_delete(peer);
1800 }
1801 
1802 void
1803 dp_peer_find_detach(struct dp_soc *soc)
1804 {
1805 	dp_peer_find_map_detach(soc);
1806 	dp_peer_find_hash_detach(soc);
1807 	dp_peer_ast_hash_detach(soc);
1808 	dp_peer_ast_table_detach(soc);
1809 }
1810 
1811 static void dp_rx_tid_update_cb(struct dp_soc *soc, void *cb_ctxt,
1812 	union hal_reo_status *reo_status)
1813 {
1814 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
1815 
1816 	if ((reo_status->rx_queue_status.header.status !=
1817 		HAL_REO_CMD_SUCCESS) &&
1818 		(reo_status->rx_queue_status.header.status !=
1819 		HAL_REO_CMD_DRAIN)) {
1820 		/* Should not happen normally. Just print error for now */
1821 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1822 			  "%s: Rx tid HW desc update failed(%d): tid %d",
1823 			  __func__,
1824 			  reo_status->rx_queue_status.header.status,
1825 			  rx_tid->tid);
1826 	}
1827 }
1828 
1829 /*
1830  * dp_find_peer_by_addr - find peer instance by mac address
1831  * @dev: physical device instance
1832  * @peer_mac_addr: peer mac address
1833  *
1834  * Return: peer instance pointer
1835  */
1836 void *dp_find_peer_by_addr(struct cdp_pdev *dev, uint8_t *peer_mac_addr)
1837 {
1838 	struct dp_pdev *pdev = (struct dp_pdev *)dev;
1839 	struct dp_peer *peer;
1840 
1841 	peer = dp_peer_find_hash_find(pdev->soc, peer_mac_addr, 0, DP_VDEV_ALL);
1842 
1843 	if (!peer)
1844 		return NULL;
1845 
1846 	dp_verbose_debug("peer %pK mac: %pM", peer,
1847 			 peer->mac_addr.raw);
1848 
1849 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
1850 	 * Decrement it here.
1851 	 */
1852 	dp_peer_unref_delete(peer);
1853 
1854 	return peer;
1855 }
1856 
1857 static bool dp_get_peer_vdev_roaming_in_progress(struct dp_peer *peer)
1858 {
1859 	struct ol_if_ops *ol_ops = NULL;
1860 	bool is_roaming = false;
1861 	uint8_t vdev_id = -1;
1862 	struct cdp_soc_t *soc;
1863 
1864 	if (!peer) {
1865 		dp_info("Peer is NULL. No roaming possible");
1866 		return false;
1867 	}
1868 
1869 	soc = dp_soc_to_cdp_soc_t(peer->vdev->pdev->soc);
1870 	ol_ops = peer->vdev->pdev->soc->cdp_soc.ol_ops;
1871 
1872 	if (ol_ops && ol_ops->is_roam_inprogress) {
1873 		dp_get_vdevid(soc, peer->mac_addr.raw, &vdev_id);
1874 		is_roaming = ol_ops->is_roam_inprogress(vdev_id);
1875 	}
1876 
1877 	dp_info("peer: %pM, vdev_id: %d, is_roaming: %d",
1878 		peer->mac_addr.raw, vdev_id, is_roaming);
1879 
1880 	return is_roaming;
1881 }
1882 
1883 QDF_STATUS dp_rx_tid_update_wifi3(struct dp_peer *peer, int tid, uint32_t
1884 					 ba_window_size, uint32_t start_seq)
1885 {
1886 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1887 	struct dp_soc *soc = peer->vdev->pdev->soc;
1888 	struct hal_reo_cmd_params params;
1889 
1890 	qdf_mem_zero(&params, sizeof(params));
1891 
1892 	params.std.need_status = 1;
1893 	params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
1894 	params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1895 	params.u.upd_queue_params.update_ba_window_size = 1;
1896 	params.u.upd_queue_params.ba_window_size = ba_window_size;
1897 
1898 	if (start_seq < IEEE80211_SEQ_MAX) {
1899 		params.u.upd_queue_params.update_ssn = 1;
1900 		params.u.upd_queue_params.ssn = start_seq;
1901 	} else {
1902 	    dp_set_ssn_valid_flag(&params, 0);
1903 	}
1904 
1905 	if (dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
1906 			    dp_rx_tid_update_cb, rx_tid)) {
1907 		dp_err_log("failed to send reo cmd CMD_UPDATE_RX_REO_QUEUE");
1908 		DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
1909 	}
1910 
1911 	rx_tid->ba_win_size = ba_window_size;
1912 
1913 	if (dp_get_peer_vdev_roaming_in_progress(peer))
1914 		return QDF_STATUS_E_PERM;
1915 
1916 	if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup)
1917 		soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup(
1918 			soc->ctrl_psoc, peer->vdev->pdev->pdev_id,
1919 			peer->vdev->vdev_id, peer->mac_addr.raw,
1920 			rx_tid->hw_qdesc_paddr, tid, tid, 1, ba_window_size);
1921 
1922 	return QDF_STATUS_SUCCESS;
1923 }
1924 
1925 /*
1926  * dp_reo_desc_free() - Callback free reo descriptor memory after
1927  * HW cache flush
1928  *
1929  * @soc: DP SOC handle
1930  * @cb_ctxt: Callback context
1931  * @reo_status: REO command status
1932  */
1933 static void dp_reo_desc_free(struct dp_soc *soc, void *cb_ctxt,
1934 	union hal_reo_status *reo_status)
1935 {
1936 	struct reo_desc_list_node *freedesc =
1937 		(struct reo_desc_list_node *)cb_ctxt;
1938 	struct dp_rx_tid *rx_tid = &freedesc->rx_tid;
1939 	unsigned long curr_ts = qdf_get_system_timestamp();
1940 
1941 	if ((reo_status->fl_cache_status.header.status !=
1942 		HAL_REO_CMD_SUCCESS) &&
1943 		(reo_status->fl_cache_status.header.status !=
1944 		HAL_REO_CMD_DRAIN)) {
1945 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1946 			  "%s: Rx tid HW desc flush failed(%d): tid %d",
1947 			  __func__,
1948 			  reo_status->rx_queue_status.header.status,
1949 			  freedesc->rx_tid.tid);
1950 	}
1951 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1952 		  "%s:%lu hw_qdesc_paddr: %pK, tid:%d", __func__,
1953 		  curr_ts,
1954 		  (void *)(rx_tid->hw_qdesc_paddr), rx_tid->tid);
1955 	qdf_mem_unmap_nbytes_single(soc->osdev,
1956 		rx_tid->hw_qdesc_paddr,
1957 		QDF_DMA_BIDIRECTIONAL,
1958 		rx_tid->hw_qdesc_alloc_size);
1959 	qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
1960 	qdf_mem_free(freedesc);
1961 }
1962 
1963 #if defined(CONFIG_WIFI_EMULATION_WIFI_3_0) && defined(BUILD_X86)
1964 /* Hawkeye emulation requires bus address to be >= 0x50000000 */
1965 static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
1966 {
1967 	if (dma_addr < 0x50000000)
1968 		return QDF_STATUS_E_FAILURE;
1969 	else
1970 		return QDF_STATUS_SUCCESS;
1971 }
1972 #else
1973 static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
1974 {
1975 	return QDF_STATUS_SUCCESS;
1976 }
1977 #endif
1978 
1979 
1980 /*
1981  * dp_rx_tid_setup_wifi3() – Setup receive TID state
1982  * @peer: Datapath peer handle
1983  * @tid: TID
1984  * @ba_window_size: BlockAck window size
1985  * @start_seq: Starting sequence number
1986  *
1987  * Return: QDF_STATUS code
1988  */
1989 QDF_STATUS dp_rx_tid_setup_wifi3(struct dp_peer *peer, int tid,
1990 				 uint32_t ba_window_size, uint32_t start_seq)
1991 {
1992 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1993 	struct dp_vdev *vdev = peer->vdev;
1994 	struct dp_soc *soc = vdev->pdev->soc;
1995 	uint32_t hw_qdesc_size;
1996 	uint32_t hw_qdesc_align;
1997 	int hal_pn_type;
1998 	void *hw_qdesc_vaddr;
1999 	uint32_t alloc_tries = 0;
2000 	QDF_STATUS err = QDF_STATUS_SUCCESS;
2001 
2002 	if (peer->delete_in_progress ||
2003 	    !qdf_atomic_read(&peer->is_default_route_set))
2004 		return QDF_STATUS_E_FAILURE;
2005 
2006 	rx_tid->ba_win_size = ba_window_size;
2007 	if (rx_tid->hw_qdesc_vaddr_unaligned)
2008 		return dp_rx_tid_update_wifi3(peer, tid, ba_window_size,
2009 			start_seq);
2010 	rx_tid->delba_tx_status = 0;
2011 	rx_tid->ppdu_id_2k = 0;
2012 	rx_tid->num_of_addba_req = 0;
2013 	rx_tid->num_of_delba_req = 0;
2014 	rx_tid->num_of_addba_resp = 0;
2015 	rx_tid->num_addba_rsp_failed = 0;
2016 	rx_tid->num_addba_rsp_success = 0;
2017 	rx_tid->delba_tx_success_cnt = 0;
2018 	rx_tid->delba_tx_fail_cnt = 0;
2019 	rx_tid->statuscode = 0;
2020 
2021 	/* TODO: Allocating HW queue descriptors based on max BA window size
2022 	 * for all QOS TIDs so that same descriptor can be used later when
2023 	 * ADDBA request is recevied. This should be changed to allocate HW
2024 	 * queue descriptors based on BA window size being negotiated (0 for
2025 	 * non BA cases), and reallocate when BA window size changes and also
2026 	 * send WMI message to FW to change the REO queue descriptor in Rx
2027 	 * peer entry as part of dp_rx_tid_update.
2028 	 */
2029 	if (tid != DP_NON_QOS_TID)
2030 		hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc,
2031 			HAL_RX_MAX_BA_WINDOW, tid);
2032 	else
2033 		hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc,
2034 			ba_window_size, tid);
2035 
2036 	hw_qdesc_align = hal_get_reo_qdesc_align(soc->hal_soc);
2037 	/* To avoid unnecessary extra allocation for alignment, try allocating
2038 	 * exact size and see if we already have aligned address.
2039 	 */
2040 	rx_tid->hw_qdesc_alloc_size = hw_qdesc_size;
2041 
2042 try_desc_alloc:
2043 	rx_tid->hw_qdesc_vaddr_unaligned =
2044 		qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size);
2045 
2046 	if (!rx_tid->hw_qdesc_vaddr_unaligned) {
2047 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2048 			  "%s: Rx tid HW desc alloc failed: tid %d",
2049 			  __func__, tid);
2050 		return QDF_STATUS_E_NOMEM;
2051 	}
2052 
2053 	if ((unsigned long)(rx_tid->hw_qdesc_vaddr_unaligned) %
2054 		hw_qdesc_align) {
2055 		/* Address allocated above is not alinged. Allocate extra
2056 		 * memory for alignment
2057 		 */
2058 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
2059 		rx_tid->hw_qdesc_vaddr_unaligned =
2060 			qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size +
2061 					hw_qdesc_align - 1);
2062 
2063 		if (!rx_tid->hw_qdesc_vaddr_unaligned) {
2064 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2065 				  "%s: Rx tid HW desc alloc failed: tid %d",
2066 				  __func__, tid);
2067 			return QDF_STATUS_E_NOMEM;
2068 		}
2069 
2070 		hw_qdesc_vaddr = (void *)qdf_align((unsigned long)
2071 			rx_tid->hw_qdesc_vaddr_unaligned,
2072 			hw_qdesc_align);
2073 
2074 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2075 			  "%s: Total Size %d Aligned Addr %pK",
2076 			  __func__, rx_tid->hw_qdesc_alloc_size,
2077 			  hw_qdesc_vaddr);
2078 
2079 	} else {
2080 		hw_qdesc_vaddr = rx_tid->hw_qdesc_vaddr_unaligned;
2081 	}
2082 
2083 	/* TODO: Ensure that sec_type is set before ADDBA is received.
2084 	 * Currently this is set based on htt indication
2085 	 * HTT_T2H_MSG_TYPE_SEC_IND from target
2086 	 */
2087 	switch (peer->security[dp_sec_ucast].sec_type) {
2088 	case cdp_sec_type_tkip_nomic:
2089 	case cdp_sec_type_aes_ccmp:
2090 	case cdp_sec_type_aes_ccmp_256:
2091 	case cdp_sec_type_aes_gcmp:
2092 	case cdp_sec_type_aes_gcmp_256:
2093 		hal_pn_type = HAL_PN_WPA;
2094 		break;
2095 	case cdp_sec_type_wapi:
2096 		if (vdev->opmode == wlan_op_mode_ap)
2097 			hal_pn_type = HAL_PN_WAPI_EVEN;
2098 		else
2099 			hal_pn_type = HAL_PN_WAPI_UNEVEN;
2100 		break;
2101 	default:
2102 		hal_pn_type = HAL_PN_NONE;
2103 		break;
2104 	}
2105 
2106 	hal_reo_qdesc_setup(soc->hal_soc, tid, ba_window_size, start_seq,
2107 		hw_qdesc_vaddr, rx_tid->hw_qdesc_paddr, hal_pn_type);
2108 
2109 	qdf_mem_map_nbytes_single(soc->osdev, hw_qdesc_vaddr,
2110 		QDF_DMA_BIDIRECTIONAL, rx_tid->hw_qdesc_alloc_size,
2111 		&(rx_tid->hw_qdesc_paddr));
2112 
2113 	if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) !=
2114 			QDF_STATUS_SUCCESS) {
2115 		if (alloc_tries++ < 10) {
2116 			qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
2117 			rx_tid->hw_qdesc_vaddr_unaligned = NULL;
2118 			goto try_desc_alloc;
2119 		} else {
2120 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2121 				  "%s: Rx tid HW desc alloc failed (lowmem): tid %d",
2122 				  __func__, tid);
2123 			err = QDF_STATUS_E_NOMEM;
2124 			goto error;
2125 		}
2126 	}
2127 
2128 	if (dp_get_peer_vdev_roaming_in_progress(peer)) {
2129 		err = QDF_STATUS_E_PERM;
2130 		goto error;
2131 	}
2132 
2133 	if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
2134 		if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup(
2135 		    soc->ctrl_psoc,
2136 		    peer->vdev->pdev->pdev_id,
2137 		    peer->vdev->vdev_id,
2138 		    peer->mac_addr.raw, rx_tid->hw_qdesc_paddr, tid, tid,
2139 		    1, ba_window_size)) {
2140 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2141 				  "%s: Failed to send reo queue setup to FW - tid %d\n",
2142 				  __func__, tid);
2143 			err = QDF_STATUS_E_FAILURE;
2144 			goto error;
2145 		}
2146 	}
2147 	return 0;
2148 error:
2149 	if (rx_tid->hw_qdesc_vaddr_unaligned) {
2150 		if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) ==
2151 		    QDF_STATUS_SUCCESS)
2152 			qdf_mem_unmap_nbytes_single(
2153 				soc->osdev,
2154 				rx_tid->hw_qdesc_paddr,
2155 				QDF_DMA_BIDIRECTIONAL,
2156 				rx_tid->hw_qdesc_alloc_size);
2157 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
2158 		rx_tid->hw_qdesc_vaddr_unaligned = NULL;
2159 	}
2160 	return err;
2161 }
2162 
2163 #ifdef REO_DESC_DEFER_FREE
2164 /*
2165  * dp_reo_desc_clean_up() - If cmd to flush base desc fails add
2166  * desc back to freelist and defer the deletion
2167  *
2168  * @soc: DP SOC handle
2169  * @desc: Base descriptor to be freed
2170  * @reo_status: REO command status
2171  */
2172 static void dp_reo_desc_clean_up(struct dp_soc *soc,
2173 				 struct reo_desc_list_node *desc,
2174 				 union hal_reo_status *reo_status)
2175 {
2176 	desc->free_ts = qdf_get_system_timestamp();
2177 	DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
2178 	qdf_list_insert_back(&soc->reo_desc_freelist,
2179 			     (qdf_list_node_t *)desc);
2180 }
2181 
2182 /*
2183  * dp_reo_limit_clean_batch_sz() - Limit number REO CMD queued to cmd
2184  * ring in aviod of REO hang
2185  *
2186  * @list_size: REO desc list size to be cleaned
2187  */
2188 static inline void dp_reo_limit_clean_batch_sz(uint32_t *list_size)
2189 {
2190 	unsigned long curr_ts = qdf_get_system_timestamp();
2191 
2192 	if ((*list_size) > REO_DESC_FREELIST_SIZE) {
2193 		dp_err_log("%lu:freedesc number %d in freelist",
2194 			   curr_ts, *list_size);
2195 		/* limit the batch queue size */
2196 		*list_size = REO_DESC_FREELIST_SIZE;
2197 	}
2198 }
2199 #else
2200 /*
2201  * dp_reo_desc_clean_up() - If send cmd to REO inorder to flush
2202  * cache fails free the base REO desc anyway
2203  *
2204  * @soc: DP SOC handle
2205  * @desc: Base descriptor to be freed
2206  * @reo_status: REO command status
2207  */
2208 static void dp_reo_desc_clean_up(struct dp_soc *soc,
2209 				 struct reo_desc_list_node *desc,
2210 				 union hal_reo_status *reo_status)
2211 {
2212 	if (reo_status) {
2213 		qdf_mem_zero(reo_status, sizeof(*reo_status));
2214 		reo_status->fl_cache_status.header.status = 0;
2215 		dp_reo_desc_free(soc, (void *)desc, reo_status);
2216 	}
2217 }
2218 
2219 /*
2220  * dp_reo_limit_clean_batch_sz() - Limit number REO CMD queued to cmd
2221  * ring in aviod of REO hang
2222  *
2223  * @list_size: REO desc list size to be cleaned
2224  */
2225 static inline void dp_reo_limit_clean_batch_sz(uint32_t *list_size)
2226 {
2227 }
2228 #endif
2229 
2230 /*
2231  * dp_resend_update_reo_cmd() - Resend the UPDATE_REO_QUEUE
2232  * cmd and re-insert desc into free list if send fails.
2233  *
2234  * @soc: DP SOC handle
2235  * @desc: desc with resend update cmd flag set
2236  * @rx_tid: Desc RX tid associated with update cmd for resetting
2237  * valid field to 0 in h/w
2238  */
2239 static void dp_resend_update_reo_cmd(struct dp_soc *soc,
2240 				     struct reo_desc_list_node *desc,
2241 				     struct dp_rx_tid *rx_tid)
2242 {
2243 	struct hal_reo_cmd_params params;
2244 
2245 	qdf_mem_zero(&params, sizeof(params));
2246 	params.std.need_status = 1;
2247 	params.std.addr_lo =
2248 		rx_tid->hw_qdesc_paddr & 0xffffffff;
2249 	params.std.addr_hi =
2250 		(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2251 	params.u.upd_queue_params.update_vld = 1;
2252 	params.u.upd_queue_params.vld = 0;
2253 	desc->resend_update_reo_cmd = false;
2254 	/*
2255 	 * If the cmd send fails then set resend_update_reo_cmd flag
2256 	 * and insert the desc at the end of the free list to retry.
2257 	 */
2258 	if (dp_reo_send_cmd(soc,
2259 			    CMD_UPDATE_RX_REO_QUEUE,
2260 			    &params,
2261 			    dp_rx_tid_delete_cb,
2262 			    (void *)desc)
2263 	    != QDF_STATUS_SUCCESS) {
2264 		desc->resend_update_reo_cmd = true;
2265 		desc->free_ts = qdf_get_system_timestamp();
2266 		qdf_list_insert_back(&soc->reo_desc_freelist,
2267 				     (qdf_list_node_t *)desc);
2268 		dp_err_log("failed to send reo cmd CMD_UPDATE_RX_REO_QUEUE");
2269 		DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
2270 	}
2271 }
2272 
2273 /*
2274  * dp_rx_tid_delete_cb() - Callback to flush reo descriptor HW cache
2275  * after deleting the entries (ie., setting valid=0)
2276  *
2277  * @soc: DP SOC handle
2278  * @cb_ctxt: Callback context
2279  * @reo_status: REO command status
2280  */
2281 void dp_rx_tid_delete_cb(struct dp_soc *soc, void *cb_ctxt,
2282 			 union hal_reo_status *reo_status)
2283 {
2284 	struct reo_desc_list_node *freedesc =
2285 		(struct reo_desc_list_node *)cb_ctxt;
2286 	uint32_t list_size;
2287 	struct reo_desc_list_node *desc;
2288 	unsigned long curr_ts = qdf_get_system_timestamp();
2289 	uint32_t desc_size, tot_desc_size;
2290 	struct hal_reo_cmd_params params;
2291 
2292 	if (reo_status->rx_queue_status.header.status == HAL_REO_CMD_DRAIN) {
2293 		qdf_mem_zero(reo_status, sizeof(*reo_status));
2294 		reo_status->fl_cache_status.header.status = HAL_REO_CMD_DRAIN;
2295 		dp_reo_desc_free(soc, (void *)freedesc, reo_status);
2296 		DP_STATS_INC(soc, rx.err.reo_cmd_send_drain, 1);
2297 		return;
2298 	} else if (reo_status->rx_queue_status.header.status !=
2299 		HAL_REO_CMD_SUCCESS) {
2300 		/* Should not happen normally. Just print error for now */
2301 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2302 			  "%s: Rx tid HW desc deletion failed(%d): tid %d",
2303 			  __func__,
2304 			  reo_status->rx_queue_status.header.status,
2305 			  freedesc->rx_tid.tid);
2306 	}
2307 
2308 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
2309 		"%s: rx_tid: %d status: %d", __func__,
2310 		freedesc->rx_tid.tid,
2311 		reo_status->rx_queue_status.header.status);
2312 
2313 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
2314 	freedesc->free_ts = curr_ts;
2315 	qdf_list_insert_back_size(&soc->reo_desc_freelist,
2316 		(qdf_list_node_t *)freedesc, &list_size);
2317 
2318 	/* MCL path add the desc back to reo_desc_freelist when REO FLUSH
2319 	 * failed. it may cause the number of REO queue pending  in free
2320 	 * list is even larger than REO_CMD_RING max size and lead REO CMD
2321 	 * flood then cause REO HW in an unexpected condition. So it's
2322 	 * needed to limit the number REO cmds in a batch operation.
2323 	 */
2324 	dp_reo_limit_clean_batch_sz(&list_size);
2325 
2326 	while ((qdf_list_peek_front(&soc->reo_desc_freelist,
2327 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) &&
2328 		((list_size >= REO_DESC_FREELIST_SIZE) ||
2329 		(curr_ts > (desc->free_ts + REO_DESC_FREE_DEFER_MS)) ||
2330 		(desc->resend_update_reo_cmd && list_size))) {
2331 		struct dp_rx_tid *rx_tid;
2332 
2333 		qdf_list_remove_front(&soc->reo_desc_freelist,
2334 				(qdf_list_node_t **)&desc);
2335 		list_size--;
2336 		rx_tid = &desc->rx_tid;
2337 
2338 		/* First process descs with resend_update_reo_cmd set */
2339 		if (desc->resend_update_reo_cmd) {
2340 			dp_resend_update_reo_cmd(soc, desc, rx_tid);
2341 			continue;
2342 		}
2343 
2344 		/* Flush and invalidate REO descriptor from HW cache: Base and
2345 		 * extension descriptors should be flushed separately */
2346 		tot_desc_size = rx_tid->hw_qdesc_alloc_size;
2347 		/* Get base descriptor size by passing non-qos TID */
2348 		desc_size = hal_get_reo_qdesc_size(soc->hal_soc, 0,
2349 						   DP_NON_QOS_TID);
2350 
2351 		/* Flush reo extension descriptors */
2352 		while ((tot_desc_size -= desc_size) > 0) {
2353 			qdf_mem_zero(&params, sizeof(params));
2354 			params.std.addr_lo =
2355 				((uint64_t)(rx_tid->hw_qdesc_paddr) +
2356 				tot_desc_size) & 0xffffffff;
2357 			params.std.addr_hi =
2358 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2359 
2360 			if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc,
2361 							CMD_FLUSH_CACHE,
2362 							&params,
2363 							NULL,
2364 							NULL)) {
2365 				dp_err_rl("fail to send CMD_CACHE_FLUSH:"
2366 					  "tid %d desc %pK", rx_tid->tid,
2367 					  (void *)(rx_tid->hw_qdesc_paddr));
2368 				DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
2369 			}
2370 		}
2371 
2372 		/* Flush base descriptor */
2373 		qdf_mem_zero(&params, sizeof(params));
2374 		params.std.need_status = 1;
2375 		params.std.addr_lo =
2376 			(uint64_t)(rx_tid->hw_qdesc_paddr) & 0xffffffff;
2377 		params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2378 
2379 		if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc,
2380 							  CMD_FLUSH_CACHE,
2381 							  &params,
2382 							  dp_reo_desc_free,
2383 							  (void *)desc)) {
2384 			union hal_reo_status reo_status;
2385 			/*
2386 			 * If dp_reo_send_cmd return failure, related TID queue desc
2387 			 * should be unmapped. Also locally reo_desc, together with
2388 			 * TID queue desc also need to be freed accordingly.
2389 			 *
2390 			 * Here invoke desc_free function directly to do clean up.
2391 			 *
2392 			 * In case of MCL path add the desc back to the free
2393 			 * desc list and defer deletion.
2394 			 */
2395 			dp_err_log("%s: fail to send REO cmd to flush cache: tid %d",
2396 				   __func__, rx_tid->tid);
2397 			dp_reo_desc_clean_up(soc, desc, &reo_status);
2398 			DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
2399 		}
2400 	}
2401 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
2402 }
2403 
2404 /*
2405  * dp_rx_tid_delete_wifi3() – Delete receive TID queue
2406  * @peer: Datapath peer handle
2407  * @tid: TID
2408  *
2409  * Return: 0 on success, error code on failure
2410  */
2411 static int dp_rx_tid_delete_wifi3(struct dp_peer *peer, int tid)
2412 {
2413 	struct dp_rx_tid *rx_tid = &(peer->rx_tid[tid]);
2414 	struct dp_soc *soc = peer->vdev->pdev->soc;
2415 	struct hal_reo_cmd_params params;
2416 	struct reo_desc_list_node *freedesc =
2417 		qdf_mem_malloc(sizeof(*freedesc));
2418 
2419 	if (!freedesc) {
2420 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2421 			  "%s: malloc failed for freedesc: tid %d",
2422 			  __func__, tid);
2423 		return -ENOMEM;
2424 	}
2425 
2426 	freedesc->rx_tid = *rx_tid;
2427 	freedesc->resend_update_reo_cmd = false;
2428 
2429 	qdf_mem_zero(&params, sizeof(params));
2430 
2431 	params.std.need_status = 1;
2432 	params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
2433 	params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2434 	params.u.upd_queue_params.update_vld = 1;
2435 	params.u.upd_queue_params.vld = 0;
2436 
2437 	if (dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
2438 			    dp_rx_tid_delete_cb, (void *)freedesc)
2439 		!= QDF_STATUS_SUCCESS) {
2440 		/* Defer the clean up to the call back context */
2441 		qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
2442 		freedesc->free_ts = qdf_get_system_timestamp();
2443 		freedesc->resend_update_reo_cmd = true;
2444 		qdf_list_insert_front(&soc->reo_desc_freelist,
2445 				      (qdf_list_node_t *)freedesc);
2446 		DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
2447 		qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
2448 		dp_info("Failed to send CMD_UPDATE_RX_REO_QUEUE");
2449 	}
2450 
2451 	rx_tid->hw_qdesc_vaddr_unaligned = NULL;
2452 	rx_tid->hw_qdesc_alloc_size = 0;
2453 	rx_tid->hw_qdesc_paddr = 0;
2454 
2455 	return 0;
2456 }
2457 
2458 #ifdef DP_LFR
2459 static void dp_peer_setup_remaining_tids(struct dp_peer *peer)
2460 {
2461 	int tid;
2462 
2463 	for (tid = 1; tid < DP_MAX_TIDS-1; tid++) {
2464 		dp_rx_tid_setup_wifi3(peer, tid, 1, 0);
2465 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2466 			  "Setting up TID %d for peer %pK peer->local_id %d",
2467 			  tid, peer, peer->local_id);
2468 	}
2469 }
2470 #else
2471 static void dp_peer_setup_remaining_tids(struct dp_peer *peer) {};
2472 #endif
2473 
2474 /*
2475  * dp_peer_tx_init() – Initialize receive TID state
2476  * @pdev: Datapath pdev
2477  * @peer: Datapath peer
2478  *
2479  */
2480 void dp_peer_tx_init(struct dp_pdev *pdev, struct dp_peer *peer)
2481 {
2482 	dp_peer_tid_queue_init(peer);
2483 	dp_peer_update_80211_hdr(peer->vdev, peer);
2484 }
2485 
2486 /*
2487  * dp_peer_tx_cleanup() – Deinitialize receive TID state
2488  * @vdev: Datapath vdev
2489  * @peer: Datapath peer
2490  *
2491  */
2492 static inline void
2493 dp_peer_tx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
2494 {
2495 	dp_peer_tid_queue_cleanup(peer);
2496 }
2497 
2498 /*
2499  * dp_peer_rx_init() – Initialize receive TID state
2500  * @pdev: Datapath pdev
2501  * @peer: Datapath peer
2502  *
2503  */
2504 void dp_peer_rx_init(struct dp_pdev *pdev, struct dp_peer *peer)
2505 {
2506 	int tid;
2507 	struct dp_rx_tid *rx_tid;
2508 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
2509 		rx_tid = &peer->rx_tid[tid];
2510 		rx_tid->array = &rx_tid->base;
2511 		rx_tid->base.head = rx_tid->base.tail = NULL;
2512 		rx_tid->tid = tid;
2513 		rx_tid->defrag_timeout_ms = 0;
2514 		rx_tid->ba_win_size = 0;
2515 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
2516 
2517 		rx_tid->defrag_waitlist_elem.tqe_next = NULL;
2518 		rx_tid->defrag_waitlist_elem.tqe_prev = NULL;
2519 	}
2520 
2521 	peer->active_ba_session_cnt = 0;
2522 	peer->hw_buffer_size = 0;
2523 	peer->kill_256_sessions = 0;
2524 
2525 	/* Setup default (non-qos) rx tid queue */
2526 	dp_rx_tid_setup_wifi3(peer, DP_NON_QOS_TID, 1, 0);
2527 
2528 	/* Setup rx tid queue for TID 0.
2529 	 * Other queues will be setup on receiving first packet, which will cause
2530 	 * NULL REO queue error
2531 	 */
2532 	dp_rx_tid_setup_wifi3(peer, 0, 1, 0);
2533 
2534 	/*
2535 	 * Setup the rest of TID's to handle LFR
2536 	 */
2537 	dp_peer_setup_remaining_tids(peer);
2538 
2539 	/*
2540 	 * Set security defaults: no PN check, no security. The target may
2541 	 * send a HTT SEC_IND message to overwrite these defaults.
2542 	 */
2543 	peer->security[dp_sec_ucast].sec_type =
2544 		peer->security[dp_sec_mcast].sec_type = cdp_sec_type_none;
2545 }
2546 
2547 /*
2548  * dp_peer_rx_cleanup() – Cleanup receive TID state
2549  * @vdev: Datapath vdev
2550  * @peer: Datapath peer
2551  * @reuse: Peer reference reuse
2552  *
2553  */
2554 void dp_peer_rx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer, bool reuse)
2555 {
2556 	int tid;
2557 	uint32_t tid_delete_mask = 0;
2558 
2559 	dp_info("Remove tids for peer: %pK", peer);
2560 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
2561 		struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
2562 
2563 		qdf_spin_lock_bh(&rx_tid->tid_lock);
2564 		if (!peer->bss_peer || peer->vdev->opmode == wlan_op_mode_sta) {
2565 			/* Cleanup defrag related resource */
2566 			dp_rx_defrag_waitlist_remove(peer, tid);
2567 			dp_rx_reorder_flush_frag(peer, tid);
2568 		}
2569 
2570 		if (peer->rx_tid[tid].hw_qdesc_vaddr_unaligned) {
2571 			dp_rx_tid_delete_wifi3(peer, tid);
2572 
2573 			tid_delete_mask |= (1 << tid);
2574 		}
2575 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2576 	}
2577 #ifdef notyet /* See if FW can remove queues as part of peer cleanup */
2578 	if (soc->ol_ops->peer_rx_reorder_queue_remove) {
2579 		soc->ol_ops->peer_rx_reorder_queue_remove(soc->ctrl_psoc,
2580 			peer->vdev->pdev->pdev_id,
2581 			peer->vdev->vdev_id, peer->mac_addr.raw,
2582 			tid_delete_mask);
2583 	}
2584 #endif
2585 	if (!reuse)
2586 		for (tid = 0; tid < DP_MAX_TIDS; tid++)
2587 			qdf_spinlock_destroy(&peer->rx_tid[tid].tid_lock);
2588 }
2589 
2590 #ifdef FEATURE_PERPKT_INFO
2591 /*
2592  * dp_peer_ppdu_delayed_ba_init() Initialize ppdu in peer
2593  * @peer: Datapath peer
2594  *
2595  * return: void
2596  */
2597 void dp_peer_ppdu_delayed_ba_init(struct dp_peer *peer)
2598 {
2599 	qdf_mem_zero(&peer->delayed_ba_ppdu_stats,
2600 		     sizeof(struct cdp_delayed_tx_completion_ppdu_user));
2601 	peer->last_delayed_ba = false;
2602 	peer->last_delayed_ba_ppduid = 0;
2603 }
2604 #else
2605 /*
2606  * dp_peer_ppdu_delayed_ba_init() Initialize ppdu in peer
2607  * @peer: Datapath peer
2608  *
2609  * return: void
2610  */
2611 void dp_peer_ppdu_delayed_ba_init(struct dp_peer *peer)
2612 {
2613 }
2614 #endif
2615 
2616 /*
2617  * dp_peer_cleanup() – Cleanup peer information
2618  * @vdev: Datapath vdev
2619  * @peer: Datapath peer
2620  * @reuse: Peer reference reuse
2621  *
2622  */
2623 void dp_peer_cleanup(struct dp_vdev *vdev, struct dp_peer *peer, bool reuse)
2624 {
2625 	dp_peer_tx_cleanup(vdev, peer);
2626 
2627 	/* cleanup the Rx reorder queues for this peer */
2628 	dp_peer_rx_cleanup(vdev, peer, reuse);
2629 }
2630 
2631 /* dp_teardown_256_ba_session() - Teardown sessions using 256
2632  *                                window size when a request with
2633  *                                64 window size is received.
2634  *                                This is done as a WAR since HW can
2635  *                                have only one setting per peer (64 or 256).
2636  *                                For HKv2, we use per tid buffersize setting
2637  *                                for 0 to per_tid_basize_max_tid. For tid
2638  *                                more than per_tid_basize_max_tid we use HKv1
2639  *                                method.
2640  * @peer: Datapath peer
2641  *
2642  * Return: void
2643  */
2644 static void dp_teardown_256_ba_sessions(struct dp_peer *peer)
2645 {
2646 	uint8_t delba_rcode = 0;
2647 	int tid;
2648 	struct dp_rx_tid *rx_tid = NULL;
2649 
2650 	tid = peer->vdev->pdev->soc->per_tid_basize_max_tid;
2651 	for (; tid < DP_MAX_TIDS; tid++) {
2652 		rx_tid = &peer->rx_tid[tid];
2653 		qdf_spin_lock_bh(&rx_tid->tid_lock);
2654 
2655 		if (rx_tid->ba_win_size <= 64) {
2656 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
2657 			continue;
2658 		} else {
2659 			if (rx_tid->ba_status == DP_RX_BA_ACTIVE ||
2660 			    rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
2661 				/* send delba */
2662 				if (!rx_tid->delba_tx_status) {
2663 					rx_tid->delba_tx_retry++;
2664 					rx_tid->delba_tx_status = 1;
2665 					rx_tid->delba_rcode =
2666 					IEEE80211_REASON_QOS_SETUP_REQUIRED;
2667 					delba_rcode = rx_tid->delba_rcode;
2668 
2669 					qdf_spin_unlock_bh(&rx_tid->tid_lock);
2670 					if (peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba)
2671 						peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba(
2672 							peer->vdev->pdev->soc->ctrl_psoc,
2673 							peer->vdev->vdev_id,
2674 							peer->mac_addr.raw,
2675 							tid, delba_rcode);
2676 				} else {
2677 					qdf_spin_unlock_bh(&rx_tid->tid_lock);
2678 				}
2679 			} else {
2680 				qdf_spin_unlock_bh(&rx_tid->tid_lock);
2681 			}
2682 		}
2683 	}
2684 }
2685 
2686 /*
2687 * dp_rx_addba_resp_tx_completion_wifi3() – Update Rx Tid State
2688 *
2689 * @soc: Datapath soc handle
2690 * @peer_mac: Datapath peer mac address
2691 * @vdev_id: id of atapath vdev
2692 * @tid: TID number
2693 * @status: tx completion status
2694 * Return: 0 on success, error code on failure
2695 */
2696 int dp_addba_resp_tx_completion_wifi3(struct cdp_soc_t *cdp_soc,
2697 				      uint8_t *peer_mac,
2698 				      uint16_t vdev_id,
2699 				      uint8_t tid, int status)
2700 {
2701 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
2702 						       peer_mac, 0, vdev_id);
2703 	struct dp_rx_tid *rx_tid = NULL;
2704 
2705 	if (!peer || peer->delete_in_progress) {
2706 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2707 			  "%s: Peer is NULL!\n", __func__);
2708 		goto fail;
2709 	}
2710 	rx_tid = &peer->rx_tid[tid];
2711 	qdf_spin_lock_bh(&rx_tid->tid_lock);
2712 	if (status) {
2713 		rx_tid->num_addba_rsp_failed++;
2714 		dp_rx_tid_update_wifi3(peer, tid, 1,
2715 				       IEEE80211_SEQ_MAX);
2716 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
2717 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2718 		dp_err("RxTid- %d addba rsp tx completion failed", tid);
2719 
2720 		goto success;
2721 	}
2722 
2723 	rx_tid->num_addba_rsp_success++;
2724 	if (rx_tid->ba_status == DP_RX_BA_INACTIVE) {
2725 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2726 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2727 			  "%s: Rx Tid- %d hw qdesc is not in IN_PROGRESS",
2728 			__func__, tid);
2729 		goto fail;
2730 	}
2731 
2732 	if (!qdf_atomic_read(&peer->is_default_route_set)) {
2733 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2734 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2735 			  "%s: default route is not set for peer: %pM",
2736 			  __func__, peer->mac_addr.raw);
2737 		goto fail;
2738 	}
2739 
2740 	if (dp_rx_tid_update_wifi3(peer, tid,
2741 				   rx_tid->ba_win_size,
2742 				   rx_tid->startseqnum)) {
2743 		dp_err("%s: failed update REO SSN", __func__);
2744 	}
2745 
2746 	dp_info("%s: tid %u window_size %u start_seq_num %u",
2747 		__func__, tid, rx_tid->ba_win_size,
2748 		rx_tid->startseqnum);
2749 
2750 	/* First Session */
2751 	if (peer->active_ba_session_cnt == 0) {
2752 		if (rx_tid->ba_win_size > 64 && rx_tid->ba_win_size <= 256)
2753 			peer->hw_buffer_size = 256;
2754 		else
2755 			peer->hw_buffer_size = 64;
2756 	}
2757 
2758 	rx_tid->ba_status = DP_RX_BA_ACTIVE;
2759 
2760 	peer->active_ba_session_cnt++;
2761 
2762 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
2763 
2764 	/* Kill any session having 256 buffer size
2765 	 * when 64 buffer size request is received.
2766 	 * Also, latch on to 64 as new buffer size.
2767 	 */
2768 	if (peer->kill_256_sessions) {
2769 		dp_teardown_256_ba_sessions(peer);
2770 		peer->kill_256_sessions = 0;
2771 	}
2772 
2773 success:
2774 	dp_peer_unref_delete(peer);
2775 	return QDF_STATUS_SUCCESS;
2776 
2777 fail:
2778 	if (peer)
2779 		dp_peer_unref_delete(peer);
2780 
2781 	return QDF_STATUS_E_FAILURE;
2782 }
2783 
2784 /*
2785 * dp_rx_addba_responsesetup_wifi3() – Process ADDBA request from peer
2786 *
2787 * @soc: Datapath soc handle
2788 * @peer_mac: Datapath peer mac address
2789 * @vdev_id: id of atapath vdev
2790 * @tid: TID number
2791 * @dialogtoken: output dialogtoken
2792 * @statuscode: output dialogtoken
2793 * @buffersize: Output BA window size
2794 * @batimeout: Output BA timeout
2795 */
2796 QDF_STATUS
2797 dp_addba_responsesetup_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
2798 			     uint16_t vdev_id, uint8_t tid,
2799 			     uint8_t *dialogtoken, uint16_t *statuscode,
2800 			     uint16_t *buffersize, uint16_t *batimeout)
2801 {
2802 	struct dp_rx_tid *rx_tid = NULL;
2803 	QDF_STATUS status = QDF_STATUS_SUCCESS;
2804 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
2805 						       peer_mac, 0, vdev_id);
2806 
2807 	if (!peer || peer->delete_in_progress) {
2808 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2809 			  "%s: Peer is NULL!\n", __func__);
2810 		status = QDF_STATUS_E_FAILURE;
2811 		goto fail;
2812 	}
2813 	rx_tid = &peer->rx_tid[tid];
2814 	qdf_spin_lock_bh(&rx_tid->tid_lock);
2815 	rx_tid->num_of_addba_resp++;
2816 	/* setup ADDBA response parameters */
2817 	*dialogtoken = rx_tid->dialogtoken;
2818 	*statuscode = rx_tid->statuscode;
2819 	*buffersize = rx_tid->ba_win_size;
2820 	*batimeout  = 0;
2821 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
2822 
2823 fail:
2824 	if (peer)
2825 		dp_peer_unref_delete(peer);
2826 
2827 	return status;
2828 }
2829 
2830 /* dp_check_ba_buffersize() - Check buffer size in request
2831  *                            and latch onto this size based on
2832  *                            size used in first active session.
2833  * @peer: Datapath peer
2834  * @tid: Tid
2835  * @buffersize: Block ack window size
2836  *
2837  * Return: void
2838  */
2839 static void dp_check_ba_buffersize(struct dp_peer *peer,
2840 				   uint16_t tid,
2841 				   uint16_t buffersize)
2842 {
2843 	struct dp_rx_tid *rx_tid = NULL;
2844 
2845 	rx_tid = &peer->rx_tid[tid];
2846 	if (peer->vdev->pdev->soc->per_tid_basize_max_tid &&
2847 	    tid < peer->vdev->pdev->soc->per_tid_basize_max_tid) {
2848 		rx_tid->ba_win_size = buffersize;
2849 		return;
2850 	} else {
2851 		if (peer->active_ba_session_cnt == 0) {
2852 			rx_tid->ba_win_size = buffersize;
2853 		} else {
2854 			if (peer->hw_buffer_size == 64) {
2855 				if (buffersize <= 64)
2856 					rx_tid->ba_win_size = buffersize;
2857 				else
2858 					rx_tid->ba_win_size = peer->hw_buffer_size;
2859 			} else if (peer->hw_buffer_size == 256) {
2860 				if (buffersize > 64) {
2861 					rx_tid->ba_win_size = buffersize;
2862 				} else {
2863 					rx_tid->ba_win_size = buffersize;
2864 					peer->hw_buffer_size = 64;
2865 					peer->kill_256_sessions = 1;
2866 				}
2867 			}
2868 		}
2869 	}
2870 }
2871 
2872 #define DP_RX_BA_SESSION_DISABLE  1
2873 
2874 /*
2875  * dp_addba_requestprocess_wifi3() - Process ADDBA request from peer
2876  *
2877  * @soc: Datapath soc handle
2878  * @peer_mac: Datapath peer mac address
2879  * @vdev_id: id of atapath vdev
2880  * @dialogtoken: dialogtoken from ADDBA frame
2881  * @tid: TID number
2882  * @batimeout: BA timeout
2883  * @buffersize: BA window size
2884  * @startseqnum: Start seq. number received in BA sequence control
2885  *
2886  * Return: 0 on success, error code on failure
2887  */
2888 int dp_addba_requestprocess_wifi3(struct cdp_soc_t *cdp_soc,
2889 				  uint8_t *peer_mac,
2890 				  uint16_t vdev_id,
2891 				  uint8_t dialogtoken,
2892 				  uint16_t tid, uint16_t batimeout,
2893 				  uint16_t buffersize,
2894 				  uint16_t startseqnum)
2895 {
2896 	QDF_STATUS status = QDF_STATUS_SUCCESS;
2897 	struct dp_rx_tid *rx_tid = NULL;
2898 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
2899 						       peer_mac, 0, vdev_id);
2900 
2901 	if (!peer || peer->delete_in_progress) {
2902 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2903 			  "%s: Peer is NULL!\n", __func__);
2904 		status = QDF_STATUS_E_FAILURE;
2905 		goto fail;
2906 	}
2907 	rx_tid = &peer->rx_tid[tid];
2908 	qdf_spin_lock_bh(&rx_tid->tid_lock);
2909 	rx_tid->num_of_addba_req++;
2910 	if ((rx_tid->ba_status == DP_RX_BA_ACTIVE &&
2911 	     rx_tid->hw_qdesc_vaddr_unaligned)) {
2912 		dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX);
2913 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
2914 		peer->active_ba_session_cnt--;
2915 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2916 			  "%s: Rx Tid- %d hw qdesc is already setup",
2917 			__func__, tid);
2918 	}
2919 
2920 	if (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
2921 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2922 		status = QDF_STATUS_E_FAILURE;
2923 		goto fail;
2924 	}
2925 
2926 	if (rx_tid->rx_ba_win_size_override == DP_RX_BA_SESSION_DISABLE) {
2927 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2928 			  "%s disable BA session",
2929 			    __func__);
2930 
2931 		buffersize = 1;
2932 	} else if (rx_tid->rx_ba_win_size_override) {
2933 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2934 			  "%s override BA win to %d", __func__,
2935 			      rx_tid->rx_ba_win_size_override);
2936 
2937 		buffersize = rx_tid->rx_ba_win_size_override;
2938 	} else {
2939 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2940 			  "%s restore BA win %d based on addba req",
2941 			    __func__, buffersize);
2942 	}
2943 
2944 	dp_check_ba_buffersize(peer, tid, buffersize);
2945 
2946 	if (dp_rx_tid_setup_wifi3(peer, tid,
2947 	    rx_tid->ba_win_size, startseqnum)) {
2948 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
2949 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2950 		status = QDF_STATUS_E_FAILURE;
2951 		goto fail;
2952 	}
2953 	rx_tid->ba_status = DP_RX_BA_IN_PROGRESS;
2954 
2955 	rx_tid->dialogtoken = dialogtoken;
2956 	rx_tid->startseqnum = startseqnum;
2957 
2958 	if (rx_tid->userstatuscode != IEEE80211_STATUS_SUCCESS)
2959 		rx_tid->statuscode = rx_tid->userstatuscode;
2960 	else
2961 		rx_tid->statuscode = IEEE80211_STATUS_SUCCESS;
2962 
2963 	if (rx_tid->rx_ba_win_size_override == DP_RX_BA_SESSION_DISABLE)
2964 		rx_tid->statuscode = IEEE80211_STATUS_REFUSED;
2965 
2966 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
2967 
2968 fail:
2969 	if (peer)
2970 		dp_peer_unref_delete(peer);
2971 
2972 	return status;
2973 }
2974 
2975 /*
2976 * dp_set_addba_response() – Set a user defined ADDBA response status code
2977 *
2978 * @soc: Datapath soc handle
2979 * @peer_mac: Datapath peer mac address
2980 * @vdev_id: id of atapath vdev
2981 * @tid: TID number
2982 * @statuscode: response status code to be set
2983 */
2984 QDF_STATUS
2985 dp_set_addba_response(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
2986 		      uint16_t vdev_id, uint8_t tid, uint16_t statuscode)
2987 {
2988 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
2989 						       peer_mac, 0, vdev_id);
2990 	struct dp_rx_tid *rx_tid;
2991 	QDF_STATUS status = QDF_STATUS_SUCCESS;
2992 
2993 	if (!peer || peer->delete_in_progress) {
2994 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2995 			  "%s: Peer is NULL!\n", __func__);
2996 		status = QDF_STATUS_E_FAILURE;
2997 		goto fail;
2998 	}
2999 
3000 	rx_tid = &peer->rx_tid[tid];
3001 	qdf_spin_lock_bh(&rx_tid->tid_lock);
3002 	rx_tid->userstatuscode = statuscode;
3003 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
3004 fail:
3005 	if (peer)
3006 		dp_peer_unref_delete(peer);
3007 
3008 	return status;
3009 }
3010 
3011 /*
3012 * dp_rx_delba_process_wifi3() – Process DELBA from peer
3013 * @soc: Datapath soc handle
3014 * @peer_mac: Datapath peer mac address
3015 * @vdev_id: id of atapath vdev
3016 * @tid: TID number
3017 * @reasoncode: Reason code received in DELBA frame
3018 *
3019 * Return: 0 on success, error code on failure
3020 */
3021 int dp_delba_process_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
3022 			   uint16_t vdev_id, int tid, uint16_t reasoncode)
3023 {
3024 	QDF_STATUS status = QDF_STATUS_SUCCESS;
3025 	struct dp_rx_tid *rx_tid;
3026 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
3027 						      peer_mac, 0, vdev_id);
3028 
3029 	if (!peer || peer->delete_in_progress) {
3030 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
3031 			  "%s: Peer is NULL!\n", __func__);
3032 		status = QDF_STATUS_E_FAILURE;
3033 		goto fail;
3034 	}
3035 	rx_tid = &peer->rx_tid[tid];
3036 	qdf_spin_lock_bh(&rx_tid->tid_lock);
3037 	if (rx_tid->ba_status == DP_RX_BA_INACTIVE ||
3038 	    rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
3039 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
3040 		status = QDF_STATUS_E_FAILURE;
3041 		goto fail;
3042 	}
3043 	/* TODO: See if we can delete the existing REO queue descriptor and
3044 	 * replace with a new one without queue extenstion descript to save
3045 	 * memory
3046 	 */
3047 	rx_tid->delba_rcode = reasoncode;
3048 	rx_tid->num_of_delba_req++;
3049 	dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX);
3050 
3051 	rx_tid->ba_status = DP_RX_BA_INACTIVE;
3052 	peer->active_ba_session_cnt--;
3053 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
3054 fail:
3055 	if (peer)
3056 		dp_peer_unref_delete(peer);
3057 
3058 	return status;
3059 }
3060 
3061 /*
3062  * dp_rx_delba_tx_completion_wifi3() – Send Delba Request
3063  *
3064  * @soc: Datapath soc handle
3065  * @peer_mac: Datapath peer mac address
3066  * @vdev_id: id of atapath vdev
3067  * @tid: TID number
3068  * @status: tx completion status
3069  * Return: 0 on success, error code on failure
3070  */
3071 
3072 int dp_delba_tx_completion_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
3073 				 uint16_t vdev_id,
3074 				 uint8_t tid, int status)
3075 {
3076 	QDF_STATUS ret = QDF_STATUS_SUCCESS;
3077 	struct dp_rx_tid *rx_tid = NULL;
3078 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
3079 						      peer_mac, 0, vdev_id);
3080 
3081 	if (!peer || peer->delete_in_progress) {
3082 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
3083 			  "%s: Peer is NULL!", __func__);
3084 		ret = QDF_STATUS_E_FAILURE;
3085 		goto end;
3086 	}
3087 	rx_tid = &peer->rx_tid[tid];
3088 	qdf_spin_lock_bh(&rx_tid->tid_lock);
3089 	if (status) {
3090 		rx_tid->delba_tx_fail_cnt++;
3091 		if (rx_tid->delba_tx_retry >= DP_MAX_DELBA_RETRY) {
3092 			rx_tid->delba_tx_retry = 0;
3093 			rx_tid->delba_tx_status = 0;
3094 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
3095 		} else {
3096 			rx_tid->delba_tx_retry++;
3097 			rx_tid->delba_tx_status = 1;
3098 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
3099 			if (peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba)
3100 				peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba(
3101 					peer->vdev->pdev->soc->ctrl_psoc,
3102 					peer->vdev->vdev_id,
3103 					peer->mac_addr.raw, tid,
3104 					rx_tid->delba_rcode);
3105 		}
3106 		goto end;
3107 	} else {
3108 		rx_tid->delba_tx_success_cnt++;
3109 		rx_tid->delba_tx_retry = 0;
3110 		rx_tid->delba_tx_status = 0;
3111 	}
3112 	if (rx_tid->ba_status == DP_RX_BA_ACTIVE) {
3113 		dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX);
3114 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
3115 		peer->active_ba_session_cnt--;
3116 	}
3117 	if (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
3118 		dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX);
3119 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
3120 	}
3121 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
3122 
3123 end:
3124 	if (peer)
3125 		dp_peer_unref_delete(peer);
3126 
3127 	return ret;
3128 }
3129 
3130 /**
3131  * dp_set_pn_check_wifi3() - enable PN check in REO for security
3132  * @soc: Datapath soc handle
3133  * @peer_mac: Datapath peer mac address
3134  * @vdev_id: id of atapath vdev
3135  * @vdev: Datapath vdev
3136  * @pdev - data path device instance
3137  * @sec_type - security type
3138  * @rx_pn - Receive pn starting number
3139  *
3140  */
3141 
3142 QDF_STATUS
3143 dp_set_pn_check_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id,
3144 		      uint8_t *peer_mac, enum cdp_sec_type sec_type,
3145 		      uint32_t *rx_pn)
3146 {
3147 	struct dp_pdev *pdev;
3148 	int i;
3149 	uint8_t pn_size;
3150 	struct hal_reo_cmd_params params;
3151 	QDF_STATUS status = QDF_STATUS_SUCCESS;
3152 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
3153 				peer_mac, 0, vdev_id);
3154 	struct dp_vdev *vdev =
3155 		dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
3156 						   vdev_id);
3157 
3158 	if (!vdev || !peer || peer->delete_in_progress) {
3159 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
3160 			  "%s: Peer is NULL!\n", __func__);
3161 		status = QDF_STATUS_E_FAILURE;
3162 		goto fail;
3163 	}
3164 
3165 	pdev = vdev->pdev;
3166 	qdf_mem_zero(&params, sizeof(params));
3167 
3168 	params.std.need_status = 1;
3169 	params.u.upd_queue_params.update_pn_valid = 1;
3170 	params.u.upd_queue_params.update_pn_size = 1;
3171 	params.u.upd_queue_params.update_pn = 1;
3172 	params.u.upd_queue_params.update_pn_check_needed = 1;
3173 	params.u.upd_queue_params.update_svld = 1;
3174 	params.u.upd_queue_params.svld = 0;
3175 
3176 	switch (sec_type) {
3177 	case cdp_sec_type_tkip_nomic:
3178 	case cdp_sec_type_aes_ccmp:
3179 	case cdp_sec_type_aes_ccmp_256:
3180 	case cdp_sec_type_aes_gcmp:
3181 	case cdp_sec_type_aes_gcmp_256:
3182 		params.u.upd_queue_params.pn_check_needed = 1;
3183 		params.u.upd_queue_params.pn_size = 48;
3184 		pn_size = 48;
3185 		break;
3186 	case cdp_sec_type_wapi:
3187 		params.u.upd_queue_params.pn_check_needed = 1;
3188 		params.u.upd_queue_params.pn_size = 128;
3189 		pn_size = 128;
3190 		if (vdev->opmode == wlan_op_mode_ap) {
3191 			params.u.upd_queue_params.pn_even = 1;
3192 			params.u.upd_queue_params.update_pn_even = 1;
3193 		} else {
3194 			params.u.upd_queue_params.pn_uneven = 1;
3195 			params.u.upd_queue_params.update_pn_uneven = 1;
3196 		}
3197 		break;
3198 	default:
3199 		params.u.upd_queue_params.pn_check_needed = 0;
3200 		pn_size = 0;
3201 		break;
3202 	}
3203 
3204 
3205 	for (i = 0; i < DP_MAX_TIDS; i++) {
3206 		struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
3207 		qdf_spin_lock_bh(&rx_tid->tid_lock);
3208 		if (rx_tid->hw_qdesc_vaddr_unaligned) {
3209 			params.std.addr_lo =
3210 				rx_tid->hw_qdesc_paddr & 0xffffffff;
3211 			params.std.addr_hi =
3212 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
3213 
3214 			if (pn_size) {
3215 				QDF_TRACE(QDF_MODULE_ID_DP,
3216 					  QDF_TRACE_LEVEL_INFO_HIGH,
3217 					  "%s PN set for TID:%d pn:%x:%x:%x:%x",
3218 					  __func__, i, rx_pn[3], rx_pn[2],
3219 					  rx_pn[1], rx_pn[0]);
3220 				params.u.upd_queue_params.update_pn_valid = 1;
3221 				params.u.upd_queue_params.pn_31_0 = rx_pn[0];
3222 				params.u.upd_queue_params.pn_63_32 = rx_pn[1];
3223 				params.u.upd_queue_params.pn_95_64 = rx_pn[2];
3224 				params.u.upd_queue_params.pn_127_96 = rx_pn[3];
3225 			}
3226 			rx_tid->pn_size = pn_size;
3227 			if (dp_reo_send_cmd(cdp_soc_t_to_dp_soc(soc),
3228 					    CMD_UPDATE_RX_REO_QUEUE,
3229 					    &params, dp_rx_tid_update_cb,
3230 					    rx_tid)) {
3231 				dp_err_log("fail to send CMD_UPDATE_RX_REO_QUEUE"
3232 					   "tid %d desc %pK", rx_tid->tid,
3233 					   (void *)(rx_tid->hw_qdesc_paddr));
3234 				DP_STATS_INC(cdp_soc_t_to_dp_soc(soc),
3235 					     rx.err.reo_cmd_send_fail, 1);
3236 			}
3237 		} else {
3238 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3239 				  "PN Check not setup for TID :%d ", i);
3240 		}
3241 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
3242 	}
3243 fail:
3244 	if (peer)
3245 		dp_peer_unref_delete(peer);
3246 
3247 	return status;
3248 }
3249 
3250 
3251 /**
3252  * dp_set_key_sec_type_wifi3() - set security mode of key
3253  * @soc: Datapath soc handle
3254  * @peer_mac: Datapath peer mac address
3255  * @vdev_id: id of atapath vdev
3256  * @vdev: Datapath vdev
3257  * @pdev - data path device instance
3258  * @sec_type - security type
3259  * #is_unicast - key type
3260  *
3261  */
3262 
3263 QDF_STATUS
3264 dp_set_key_sec_type_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id,
3265 			  uint8_t *peer_mac, enum cdp_sec_type sec_type,
3266 			  bool is_unicast)
3267 {
3268 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
3269 				peer_mac, 0, vdev_id);
3270 	QDF_STATUS status = QDF_STATUS_SUCCESS;
3271 	int sec_index;
3272 
3273 	if (!peer || peer->delete_in_progress) {
3274 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
3275 			  "%s: Peer is NULL!\n", __func__);
3276 		status = QDF_STATUS_E_FAILURE;
3277 		goto fail;
3278 	}
3279 
3280 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3281 		  "key sec spec for peer %pK %pM: %s key of type %d",
3282 		  peer,
3283 		  peer->mac_addr.raw,
3284 		  is_unicast ? "ucast" : "mcast",
3285 		  sec_type);
3286 
3287 	sec_index = is_unicast ? dp_sec_ucast : dp_sec_mcast;
3288 	peer->security[sec_index].sec_type = sec_type;
3289 
3290 fail:
3291 	if (peer)
3292 		dp_peer_unref_delete(peer);
3293 
3294 	return status;
3295 }
3296 
3297 void
3298 dp_rx_sec_ind_handler(struct dp_soc *soc, uint16_t peer_id,
3299 		      enum cdp_sec_type sec_type, int is_unicast,
3300 		      u_int32_t *michael_key,
3301 		      u_int32_t *rx_pn)
3302 {
3303 	struct dp_peer *peer;
3304 	int sec_index;
3305 
3306 	peer = dp_peer_find_by_id(soc, peer_id);
3307 	if (!peer) {
3308 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3309 			  "Couldn't find peer from ID %d - skipping security inits",
3310 			  peer_id);
3311 		return;
3312 	}
3313 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3314 		  "sec spec for peer %pK %pM: %s key of type %d",
3315 		  peer,
3316 		  peer->mac_addr.raw,
3317 		  is_unicast ? "ucast" : "mcast",
3318 		  sec_type);
3319 	sec_index = is_unicast ? dp_sec_ucast : dp_sec_mcast;
3320 	peer->security[sec_index].sec_type = sec_type;
3321 #ifdef notyet /* TODO: See if this is required for defrag support */
3322 	/* michael key only valid for TKIP, but for simplicity,
3323 	 * copy it anyway
3324 	 */
3325 	qdf_mem_copy(
3326 		&peer->security[sec_index].michael_key[0],
3327 		michael_key,
3328 		sizeof(peer->security[sec_index].michael_key));
3329 #ifdef BIG_ENDIAN_HOST
3330 	OL_IF_SWAPBO(peer->security[sec_index].michael_key[0],
3331 				 sizeof(peer->security[sec_index].michael_key));
3332 #endif /* BIG_ENDIAN_HOST */
3333 #endif
3334 
3335 #ifdef notyet /* TODO: Check if this is required for wifi3.0 */
3336 	if (sec_type != cdp_sec_type_wapi) {
3337 		qdf_mem_zero(peer->tids_last_pn_valid, _EXT_TIDS);
3338 	} else {
3339 		for (i = 0; i < DP_MAX_TIDS; i++) {
3340 			/*
3341 			 * Setting PN valid bit for WAPI sec_type,
3342 			 * since WAPI PN has to be started with predefined value
3343 			 */
3344 			peer->tids_last_pn_valid[i] = 1;
3345 			qdf_mem_copy(
3346 				(u_int8_t *) &peer->tids_last_pn[i],
3347 				(u_int8_t *) rx_pn, sizeof(union htt_rx_pn_t));
3348 			peer->tids_last_pn[i].pn128[1] =
3349 				qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[1]);
3350 			peer->tids_last_pn[i].pn128[0] =
3351 				qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[0]);
3352 		}
3353 	}
3354 #endif
3355 	/* TODO: Update HW TID queue with PN check parameters (pn type for
3356 	 * all security types and last pn for WAPI) once REO command API
3357 	 * is available
3358 	 */
3359 
3360 	dp_peer_unref_del_find_by_id(peer);
3361 }
3362 
3363 QDF_STATUS
3364 dp_rx_delba_ind_handler(void *soc_handle, uint16_t peer_id,
3365 			uint8_t tid, uint16_t win_sz)
3366 {
3367 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
3368 	struct dp_peer *peer;
3369 	struct dp_rx_tid *rx_tid;
3370 	QDF_STATUS status = QDF_STATUS_SUCCESS;
3371 
3372 	peer = dp_peer_find_by_id(soc, peer_id);
3373 
3374 	if (!peer) {
3375 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3376 			  "Couldn't find peer from ID %d",
3377 			  peer_id);
3378 		return QDF_STATUS_E_FAILURE;
3379 	}
3380 
3381 	qdf_assert_always(tid < DP_MAX_TIDS);
3382 
3383 	rx_tid = &peer->rx_tid[tid];
3384 
3385 	if (rx_tid->hw_qdesc_vaddr_unaligned) {
3386 		if (!rx_tid->delba_tx_status) {
3387 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3388 				  "%s: PEER_ID: %d TID: %d, BA win: %d ",
3389 				  __func__, peer_id, tid, win_sz);
3390 
3391 			qdf_spin_lock_bh(&rx_tid->tid_lock);
3392 
3393 			rx_tid->delba_tx_status = 1;
3394 
3395 			rx_tid->rx_ba_win_size_override =
3396 			    qdf_min((uint16_t)63, win_sz);
3397 
3398 			rx_tid->delba_rcode =
3399 			    IEEE80211_REASON_QOS_SETUP_REQUIRED;
3400 
3401 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
3402 
3403 			if (soc->cdp_soc.ol_ops->send_delba)
3404 				soc->cdp_soc.ol_ops->send_delba(
3405 					peer->vdev->pdev->soc->ctrl_psoc,
3406 					peer->vdev->vdev_id,
3407 					peer->mac_addr.raw,
3408 					tid,
3409 					rx_tid->delba_rcode);
3410 		}
3411 	} else {
3412 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3413 			  "BA session is not setup for TID:%d ", tid);
3414 		status = QDF_STATUS_E_FAILURE;
3415 	}
3416 
3417 	dp_peer_unref_del_find_by_id(peer);
3418 
3419 	return status;
3420 }
3421 
3422 #ifdef DP_PEER_EXTENDED_API
3423 QDF_STATUS dp_register_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
3424 			    struct ol_txrx_desc_type *sta_desc)
3425 {
3426 	struct dp_peer *peer;
3427 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3428 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
3429 
3430 	if (!pdev)
3431 		return QDF_STATUS_E_FAULT;
3432 
3433 	peer = dp_find_peer_by_addr((struct cdp_pdev *)pdev,
3434 				    sta_desc->peer_addr.bytes);
3435 
3436 	if (!peer)
3437 		return QDF_STATUS_E_FAULT;
3438 
3439 	qdf_spin_lock_bh(&peer->peer_info_lock);
3440 	peer->state = OL_TXRX_PEER_STATE_CONN;
3441 	qdf_spin_unlock_bh(&peer->peer_info_lock);
3442 
3443 	dp_rx_flush_rx_cached(peer, false);
3444 
3445 	return QDF_STATUS_SUCCESS;
3446 }
3447 
3448 QDF_STATUS
3449 dp_clear_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
3450 	      struct qdf_mac_addr peer_addr)
3451 {
3452 	struct dp_peer *peer;
3453 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3454 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
3455 
3456 	if (!pdev)
3457 		return QDF_STATUS_E_FAULT;
3458 
3459 	peer = dp_find_peer_by_addr((struct cdp_pdev *)pdev, peer_addr.bytes);
3460 	if (!peer || !peer->valid)
3461 		return QDF_STATUS_E_FAULT;
3462 
3463 	dp_clear_peer_internal(soc, peer);
3464 	return QDF_STATUS_SUCCESS;
3465 }
3466 
3467 /**
3468  * dp_find_peer_by_addr_and_vdev() - Find peer by peer mac address within vdev
3469  * @pdev - data path device instance
3470  * @vdev - virtual interface instance
3471  * @peer_addr - peer mac address
3472  *
3473  * Find peer by peer mac address within vdev
3474  *
3475  * Return: peer instance void pointer
3476  *         NULL cannot find target peer
3477  */
3478 void *dp_find_peer_by_addr_and_vdev(struct cdp_pdev *pdev_handle,
3479 		struct cdp_vdev *vdev_handle,
3480 		uint8_t *peer_addr)
3481 {
3482 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3483 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3484 	struct dp_peer *peer;
3485 
3486 	peer = dp_peer_find_hash_find(pdev->soc, peer_addr, 0, DP_VDEV_ALL);
3487 
3488 	if (!peer)
3489 		return NULL;
3490 
3491 	if (peer->vdev != vdev) {
3492 		dp_peer_unref_delete(peer);
3493 		return NULL;
3494 	}
3495 
3496 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
3497 	 * Decrement it here.
3498 	 */
3499 	dp_peer_unref_delete(peer);
3500 
3501 	return peer;
3502 }
3503 
3504 QDF_STATUS dp_peer_state_update(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
3505 				enum ol_txrx_peer_state state)
3506 {
3507 	struct dp_peer *peer;
3508 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3509 
3510 	peer =  dp_peer_find_hash_find(soc, peer_mac, 0, DP_VDEV_ALL);
3511 	if (!peer) {
3512 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3513 			  "Failed to find peer for: [%pM]", peer_mac);
3514 		return QDF_STATUS_E_FAILURE;
3515 	}
3516 	peer->state = state;
3517 
3518 	dp_info("peer %pK state %d", peer, peer->state);
3519 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
3520 	 * Decrement it here.
3521 	 */
3522 	dp_peer_unref_delete(peer);
3523 
3524 	return QDF_STATUS_SUCCESS;
3525 }
3526 
3527 QDF_STATUS dp_get_vdevid(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
3528 			 uint8_t *vdev_id)
3529 {
3530 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3531 	struct dp_peer *peer =
3532 		dp_peer_find_hash_find(soc, peer_mac, 0, DP_VDEV_ALL);
3533 
3534 	if (!peer)
3535 		return QDF_STATUS_E_FAILURE;
3536 
3537 	dp_info("peer %pK vdev %pK vdev id %d",
3538 		peer, peer->vdev, peer->vdev->vdev_id);
3539 	*vdev_id = peer->vdev->vdev_id;
3540 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
3541 	 * Decrement it here.
3542 	 */
3543 	dp_peer_unref_delete(peer);
3544 
3545 	return QDF_STATUS_SUCCESS;
3546 }
3547 
3548 struct cdp_vdev *
3549 dp_get_vdev_by_peer_addr(struct cdp_pdev *pdev_handle,
3550 			 struct qdf_mac_addr peer_addr)
3551 {
3552 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3553 	struct dp_peer *peer = NULL;
3554 
3555 	if (!pdev) {
3556 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3557 			  "PDEV not found for peer_addr: %pM",
3558 			  peer_addr.bytes);
3559 		return NULL;
3560 	}
3561 
3562 	peer = dp_find_peer_by_addr((struct cdp_pdev *)pdev, peer_addr.bytes);
3563 	if (!peer) {
3564 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
3565 			  "PDEV not found for peer_addr: %pM",
3566 			  peer_addr.bytes);
3567 		return NULL;
3568 	}
3569 
3570 	return (struct cdp_vdev *)peer->vdev;
3571 }
3572 
3573 /**
3574  * dp_get_vdev_for_peer() - Get virtual interface instance which peer belongs
3575  * @peer - peer instance
3576  *
3577  * Get virtual interface instance which peer belongs
3578  *
3579  * Return: virtual interface instance pointer
3580  *         NULL in case cannot find
3581  */
3582 struct cdp_vdev *dp_get_vdev_for_peer(void *peer_handle)
3583 {
3584 	struct dp_peer *peer = peer_handle;
3585 
3586 	DP_TRACE(DEBUG, "peer %pK vdev %pK", peer, peer->vdev);
3587 	return (struct cdp_vdev *)peer->vdev;
3588 }
3589 
3590 /**
3591  * dp_peer_get_peer_mac_addr() - Get peer mac address
3592  * @peer - peer instance
3593  *
3594  * Get peer mac address
3595  *
3596  * Return: peer mac address pointer
3597  *         NULL in case cannot find
3598  */
3599 uint8_t *dp_peer_get_peer_mac_addr(void *peer_handle)
3600 {
3601 	struct dp_peer *peer = peer_handle;
3602 	uint8_t *mac;
3603 
3604 	mac = peer->mac_addr.raw;
3605 	dp_info("peer %pK mac 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
3606 		peer, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
3607 	return peer->mac_addr.raw;
3608 }
3609 
3610 int dp_get_peer_state(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3611 		      uint8_t *peer_mac)
3612 {
3613 	enum ol_txrx_peer_state peer_state;
3614 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3615 	struct dp_peer *peer =  dp_peer_find_hash_find(soc, peer_mac, 0,
3616 						       vdev_id);
3617 
3618 	if (!peer)
3619 		return QDF_STATUS_E_FAILURE;
3620 
3621 	DP_TRACE(DEBUG, "peer %pK stats %d", peer, peer->state);
3622 	peer_state = peer->state;
3623 	dp_peer_unref_delete(peer);
3624 
3625 	return peer_state;
3626 }
3627 
3628 /**
3629  * dp_local_peer_id_pool_init() - local peer id pool alloc for physical device
3630  * @pdev - data path device instance
3631  *
3632  * local peer id pool alloc for physical device
3633  *
3634  * Return: none
3635  */
3636 void dp_local_peer_id_pool_init(struct dp_pdev *pdev)
3637 {
3638 	int i;
3639 
3640 	/* point the freelist to the first ID */
3641 	pdev->local_peer_ids.freelist = 0;
3642 
3643 	/* link each ID to the next one */
3644 	for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) {
3645 		pdev->local_peer_ids.pool[i] = i + 1;
3646 		pdev->local_peer_ids.map[i] = NULL;
3647 	}
3648 
3649 	/* link the last ID to itself, to mark the end of the list */
3650 	i = OL_TXRX_NUM_LOCAL_PEER_IDS;
3651 	pdev->local_peer_ids.pool[i] = i;
3652 
3653 	qdf_spinlock_create(&pdev->local_peer_ids.lock);
3654 	DP_TRACE(INFO, "Peer pool init");
3655 }
3656 
3657 /**
3658  * dp_local_peer_id_alloc() - allocate local peer id
3659  * @pdev - data path device instance
3660  * @peer - new peer instance
3661  *
3662  * allocate local peer id
3663  *
3664  * Return: none
3665  */
3666 void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer)
3667 {
3668 	int i;
3669 
3670 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
3671 	i = pdev->local_peer_ids.freelist;
3672 	if (pdev->local_peer_ids.pool[i] == i) {
3673 		/* the list is empty, except for the list-end marker */
3674 		peer->local_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
3675 	} else {
3676 		/* take the head ID and advance the freelist */
3677 		peer->local_id = i;
3678 		pdev->local_peer_ids.freelist = pdev->local_peer_ids.pool[i];
3679 		pdev->local_peer_ids.map[i] = peer;
3680 	}
3681 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
3682 	dp_info("peer %pK, local id %d", peer, peer->local_id);
3683 }
3684 
3685 /**
3686  * dp_local_peer_id_free() - remove local peer id
3687  * @pdev - data path device instance
3688  * @peer - peer instance should be removed
3689  *
3690  * remove local peer id
3691  *
3692  * Return: none
3693  */
3694 void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer)
3695 {
3696 	int i = peer->local_id;
3697 	if ((i == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
3698 	    (i >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
3699 		return;
3700 	}
3701 
3702 	/* put this ID on the head of the freelist */
3703 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
3704 	pdev->local_peer_ids.pool[i] = pdev->local_peer_ids.freelist;
3705 	pdev->local_peer_ids.freelist = i;
3706 	pdev->local_peer_ids.map[i] = NULL;
3707 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
3708 }
3709 
3710 bool dp_find_peer_exist_on_vdev(struct cdp_soc_t *soc_hdl,
3711 				uint8_t vdev_id, uint8_t *peer_addr)
3712 {
3713 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3714 	struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
3715 
3716 	if (!vdev)
3717 		return false;
3718 
3719 	return !!dp_find_peer_by_addr_and_vdev(
3720 					dp_pdev_to_cdp_pdev(vdev->pdev),
3721 					dp_vdev_to_cdp_vdev(vdev),
3722 					peer_addr);
3723 }
3724 
3725 bool dp_find_peer_exist_on_other_vdev(struct cdp_soc_t *soc_hdl,
3726 				      uint8_t vdev_id, uint8_t *peer_addr,
3727 				      uint16_t max_bssid)
3728 {
3729 	int i;
3730 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3731 	struct dp_vdev *vdev;
3732 
3733 	for (i = 0; i < max_bssid; i++) {
3734 		vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, i);
3735 		/* Need to check vdevs other than the vdev_id */
3736 		if (vdev_id == i || !vdev)
3737 			continue;
3738 		if (dp_find_peer_by_addr_and_vdev(
3739 					dp_pdev_to_cdp_pdev(vdev->pdev),
3740 					dp_vdev_to_cdp_vdev(vdev),
3741 					peer_addr)) {
3742 			dp_err("%s: Duplicate peer %pM already exist on vdev %d",
3743 			       __func__, peer_addr, i);
3744 			return true;
3745 		}
3746 	}
3747 
3748 	return false;
3749 }
3750 
3751 bool dp_find_peer_exist(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
3752 			uint8_t *peer_addr)
3753 {
3754 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3755 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
3756 
3757 	if (!pdev)
3758 		return false;
3759 
3760 	return !!dp_find_peer_by_addr(dp_pdev_to_cdp_pdev(pdev), peer_addr);
3761 }
3762 #endif
3763 
3764 /**
3765  * dp_peer_rxtid_stats: Retried Rx TID (REO queue) stats from HW
3766  * @peer: DP peer handle
3767  * @dp_stats_cmd_cb: REO command callback function
3768  * @cb_ctxt: Callback context
3769  *
3770  * Return: count of tid stats cmd send succeeded
3771  */
3772 int dp_peer_rxtid_stats(struct dp_peer *peer,
3773 			dp_rxtid_stats_cmd_cb dp_stats_cmd_cb,
3774 			void *cb_ctxt)
3775 {
3776 	struct dp_soc *soc = peer->vdev->pdev->soc;
3777 	struct hal_reo_cmd_params params;
3778 	int i;
3779 	int stats_cmd_sent_cnt = 0;
3780 	QDF_STATUS status;
3781 
3782 	if (!dp_stats_cmd_cb)
3783 		return stats_cmd_sent_cnt;
3784 
3785 	qdf_mem_zero(&params, sizeof(params));
3786 	for (i = 0; i < DP_MAX_TIDS; i++) {
3787 		struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
3788 		if (rx_tid->hw_qdesc_vaddr_unaligned) {
3789 			params.std.need_status = 1;
3790 			params.std.addr_lo =
3791 				rx_tid->hw_qdesc_paddr & 0xffffffff;
3792 			params.std.addr_hi =
3793 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
3794 
3795 			if (cb_ctxt) {
3796 				status = dp_reo_send_cmd(
3797 						soc, CMD_GET_QUEUE_STATS,
3798 						&params, dp_stats_cmd_cb,
3799 						cb_ctxt);
3800 			} else {
3801 				status = dp_reo_send_cmd(
3802 						soc, CMD_GET_QUEUE_STATS,
3803 						&params, dp_stats_cmd_cb,
3804 						rx_tid);
3805 			}
3806 
3807 			if (QDF_IS_STATUS_SUCCESS(status))
3808 				stats_cmd_sent_cnt++;
3809 
3810 			/* Flush REO descriptor from HW cache to update stats
3811 			 * in descriptor memory. This is to help debugging */
3812 			qdf_mem_zero(&params, sizeof(params));
3813 			params.std.need_status = 0;
3814 			params.std.addr_lo =
3815 				rx_tid->hw_qdesc_paddr & 0xffffffff;
3816 			params.std.addr_hi =
3817 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
3818 			params.u.fl_cache_params.flush_no_inval = 1;
3819 			dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, &params, NULL,
3820 				NULL);
3821 		}
3822 	}
3823 
3824 	return stats_cmd_sent_cnt;
3825 }
3826 
3827 QDF_STATUS
3828 dp_set_michael_key(struct cdp_soc_t *soc,
3829 		   uint8_t vdev_id,
3830 		   uint8_t *peer_mac,
3831 		   bool is_unicast, uint32_t *key)
3832 {
3833 	QDF_STATUS status = QDF_STATUS_SUCCESS;
3834 	uint8_t sec_index = is_unicast ? 1 : 0;
3835 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
3836 						      peer_mac, 0, vdev_id);
3837 
3838 	if (!peer || peer->delete_in_progress) {
3839 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3840 			  "peer not found ");
3841 		status = QDF_STATUS_E_FAILURE;
3842 		goto fail;
3843 	}
3844 
3845 	qdf_mem_copy(&peer->security[sec_index].michael_key[0],
3846 		     key, IEEE80211_WEP_MICLEN);
3847 
3848 fail:
3849 	if (peer)
3850 		dp_peer_unref_delete(peer);
3851 
3852 	return status;
3853 }
3854 
3855 bool dp_peer_find_by_id_valid(struct dp_soc *soc, uint16_t peer_id)
3856 {
3857 	struct dp_peer *peer = dp_peer_find_by_id(soc, peer_id);
3858 
3859 	if (peer) {
3860 		/*
3861 		 * Decrement the peer ref which is taken as part of
3862 		 * dp_peer_find_by_id if PEER_LOCK_REF_PROTECT is enabled
3863 		 */
3864 		dp_peer_unref_del_find_by_id(peer);
3865 
3866 		return true;
3867 	}
3868 
3869 	return false;
3870 }
3871