xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_peer.c (revision dd4dc88b837a295134aa9869114a2efee0f4894b)
1 /*
2  * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <qdf_types.h>
20 #include <qdf_lock.h>
21 #include <hal_hw_headers.h>
22 #include "dp_htt.h"
23 #include "dp_types.h"
24 #include "dp_internal.h"
25 #include "dp_peer.h"
26 #include "dp_rx_defrag.h"
27 #include "dp_rx.h"
28 #include <hal_api.h>
29 #include <hal_reo.h>
30 #ifdef CONFIG_MCL
31 #include <cds_ieee80211_common.h>
32 #include <cds_api.h>
33 #endif
34 #include <cdp_txrx_handle.h>
35 #include <wlan_cfg.h>
36 
37 #ifdef WLAN_TX_PKT_CAPTURE_ENH
38 #include "dp_tx_capture.h"
39 #endif
40 
41 #ifdef DP_LFR
42 static inline void
43 dp_set_ssn_valid_flag(struct hal_reo_cmd_params *params,
44 					uint8_t valid)
45 {
46 	params->u.upd_queue_params.update_svld = 1;
47 	params->u.upd_queue_params.svld = valid;
48 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
49 		  "%s: Setting SSN valid bit to %d",
50 		  __func__, valid);
51 }
52 #else
53 static inline void
54 dp_set_ssn_valid_flag(struct hal_reo_cmd_params *params,
55 					uint8_t valid) {};
56 #endif
57 
58 static inline int dp_peer_find_mac_addr_cmp(
59 	union dp_align_mac_addr *mac_addr1,
60 	union dp_align_mac_addr *mac_addr2)
61 {
62 	return !((mac_addr1->align4.bytes_abcd == mac_addr2->align4.bytes_abcd)
63 		/*
64 		 * Intentionally use & rather than &&.
65 		 * because the operands are binary rather than generic boolean,
66 		 * the functionality is equivalent.
67 		 * Using && has the advantage of short-circuited evaluation,
68 		 * but using & has the advantage of no conditional branching,
69 		 * which is a more significant benefit.
70 		 */
71 		&
72 		(mac_addr1->align4.bytes_ef == mac_addr2->align4.bytes_ef));
73 }
74 
75 static int dp_peer_ast_table_attach(struct dp_soc *soc)
76 {
77 	uint32_t max_ast_index;
78 
79 	max_ast_index = wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx);
80 	/* allocate ast_table for ast entry to ast_index map */
81 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
82 		  "\n<=== cfg max ast idx %d ====>", max_ast_index);
83 	soc->ast_table = qdf_mem_malloc(max_ast_index *
84 					sizeof(struct dp_ast_entry *));
85 	if (!soc->ast_table) {
86 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
87 			  "%s: ast_table memory allocation failed", __func__);
88 		return QDF_STATUS_E_NOMEM;
89 	}
90 	return 0; /* success */
91 }
92 
93 static int dp_peer_find_map_attach(struct dp_soc *soc)
94 {
95 	uint32_t max_peers, peer_map_size;
96 
97 	max_peers = soc->max_peers;
98 	/* allocate the peer ID -> peer object map */
99 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
100 		  "\n<=== cfg max peer id %d ====>", max_peers);
101 	peer_map_size = max_peers * sizeof(soc->peer_id_to_obj_map[0]);
102 	soc->peer_id_to_obj_map = qdf_mem_malloc(peer_map_size);
103 	if (!soc->peer_id_to_obj_map) {
104 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
105 			  "%s: peer map memory allocation failed", __func__);
106 		return QDF_STATUS_E_NOMEM;
107 	}
108 
109 	/*
110 	 * The peer_id_to_obj_map doesn't really need to be initialized,
111 	 * since elements are only used after they have been individually
112 	 * initialized.
113 	 * However, it is convenient for debugging to have all elements
114 	 * that are not in use set to 0.
115 	 */
116 	qdf_mem_zero(soc->peer_id_to_obj_map, peer_map_size);
117 	return 0; /* success */
118 }
119 
120 static int dp_log2_ceil(unsigned value)
121 {
122 	unsigned tmp = value;
123 	int log2 = -1;
124 
125 	while (tmp) {
126 		log2++;
127 		tmp >>= 1;
128 	}
129 	if (1 << log2 != value)
130 		log2++;
131 	return log2;
132 }
133 
134 static int dp_peer_find_add_id_to_obj(
135 	struct dp_peer *peer,
136 	uint16_t peer_id)
137 {
138 	int i;
139 
140 	for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
141 		if (peer->peer_ids[i] == HTT_INVALID_PEER) {
142 			peer->peer_ids[i] = peer_id;
143 			return 0; /* success */
144 		}
145 	}
146 	return QDF_STATUS_E_FAILURE; /* failure */
147 }
148 
149 #define DP_PEER_HASH_LOAD_MULT  2
150 #define DP_PEER_HASH_LOAD_SHIFT 0
151 
152 #define DP_AST_HASH_LOAD_MULT  2
153 #define DP_AST_HASH_LOAD_SHIFT 0
154 
155 static int dp_peer_find_hash_attach(struct dp_soc *soc)
156 {
157 	int i, hash_elems, log2;
158 
159 	/* allocate the peer MAC address -> peer object hash table */
160 	hash_elems = soc->max_peers;
161 	hash_elems *= DP_PEER_HASH_LOAD_MULT;
162 	hash_elems >>= DP_PEER_HASH_LOAD_SHIFT;
163 	log2 = dp_log2_ceil(hash_elems);
164 	hash_elems = 1 << log2;
165 
166 	soc->peer_hash.mask = hash_elems - 1;
167 	soc->peer_hash.idx_bits = log2;
168 	/* allocate an array of TAILQ peer object lists */
169 	soc->peer_hash.bins = qdf_mem_malloc(
170 		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q, dp_peer)));
171 	if (!soc->peer_hash.bins)
172 		return QDF_STATUS_E_NOMEM;
173 
174 	for (i = 0; i < hash_elems; i++)
175 		TAILQ_INIT(&soc->peer_hash.bins[i]);
176 
177 	return 0;
178 }
179 
180 static void dp_peer_find_hash_detach(struct dp_soc *soc)
181 {
182 	if (soc->peer_hash.bins) {
183 		qdf_mem_free(soc->peer_hash.bins);
184 		soc->peer_hash.bins = NULL;
185 	}
186 }
187 
188 static inline unsigned dp_peer_find_hash_index(struct dp_soc *soc,
189 	union dp_align_mac_addr *mac_addr)
190 {
191 	unsigned index;
192 
193 	index =
194 		mac_addr->align2.bytes_ab ^
195 		mac_addr->align2.bytes_cd ^
196 		mac_addr->align2.bytes_ef;
197 	index ^= index >> soc->peer_hash.idx_bits;
198 	index &= soc->peer_hash.mask;
199 	return index;
200 }
201 
202 
203 void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer)
204 {
205 	unsigned index;
206 
207 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
208 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
209 	/*
210 	 * It is important to add the new peer at the tail of the peer list
211 	 * with the bin index.  Together with having the hash_find function
212 	 * search from head to tail, this ensures that if two entries with
213 	 * the same MAC address are stored, the one added first will be
214 	 * found first.
215 	 */
216 	TAILQ_INSERT_TAIL(&soc->peer_hash.bins[index], peer, hash_list_elem);
217 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
218 }
219 
220 #ifdef FEATURE_AST
221 /*
222  * dp_peer_ast_hash_attach() - Allocate and initialize AST Hash Table
223  * @soc: SoC handle
224  *
225  * Return: None
226  */
227 static int dp_peer_ast_hash_attach(struct dp_soc *soc)
228 {
229 	int i, hash_elems, log2;
230 	unsigned int max_ast_idx = wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx);
231 
232 	hash_elems = ((max_ast_idx * DP_AST_HASH_LOAD_MULT) >>
233 		DP_AST_HASH_LOAD_SHIFT);
234 
235 	log2 = dp_log2_ceil(hash_elems);
236 	hash_elems = 1 << log2;
237 
238 	soc->ast_hash.mask = hash_elems - 1;
239 	soc->ast_hash.idx_bits = log2;
240 
241 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
242 		  "ast hash_elems: %d, max_ast_idx: %d",
243 		  hash_elems, max_ast_idx);
244 
245 	/* allocate an array of TAILQ peer object lists */
246 	soc->ast_hash.bins = qdf_mem_malloc(
247 		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q,
248 				dp_ast_entry)));
249 
250 	if (!soc->ast_hash.bins)
251 		return QDF_STATUS_E_NOMEM;
252 
253 	for (i = 0; i < hash_elems; i++)
254 		TAILQ_INIT(&soc->ast_hash.bins[i]);
255 
256 	return 0;
257 }
258 
259 /*
260  * dp_peer_ast_cleanup() - cleanup the references
261  * @soc: SoC handle
262  * @ast: ast entry
263  *
264  * Return: None
265  */
266 static inline void dp_peer_ast_cleanup(struct dp_soc *soc,
267 				       struct dp_ast_entry *ast)
268 {
269 	txrx_ast_free_cb cb = ast->callback;
270 	void *cookie = ast->cookie;
271 
272 	/* Call the callbacks to free up the cookie */
273 	if (cb) {
274 		ast->callback = NULL;
275 		ast->cookie = NULL;
276 		cb(soc->ctrl_psoc,
277 		   soc,
278 		   cookie,
279 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
280 	}
281 }
282 
283 /*
284  * dp_peer_ast_hash_detach() - Free AST Hash table
285  * @soc: SoC handle
286  *
287  * Return: None
288  */
289 static void dp_peer_ast_hash_detach(struct dp_soc *soc)
290 {
291 	unsigned int index;
292 	struct dp_ast_entry *ast, *ast_next;
293 
294 	if (!soc->ast_hash.mask)
295 		return;
296 
297 	if (!soc->ast_hash.bins)
298 		return;
299 
300 	qdf_spin_lock_bh(&soc->ast_lock);
301 	for (index = 0; index <= soc->ast_hash.mask; index++) {
302 		if (!TAILQ_EMPTY(&soc->ast_hash.bins[index])) {
303 			TAILQ_FOREACH_SAFE(ast, &soc->ast_hash.bins[index],
304 					   hash_list_elem, ast_next) {
305 				TAILQ_REMOVE(&soc->ast_hash.bins[index], ast,
306 					     hash_list_elem);
307 				dp_peer_ast_cleanup(soc, ast);
308 				qdf_mem_free(ast);
309 			}
310 		}
311 	}
312 	qdf_spin_unlock_bh(&soc->ast_lock);
313 
314 	qdf_mem_free(soc->ast_hash.bins);
315 	soc->ast_hash.bins = NULL;
316 }
317 
318 /*
319  * dp_peer_ast_hash_index() - Compute the AST hash from MAC address
320  * @soc: SoC handle
321  *
322  * Return: AST hash
323  */
324 static inline uint32_t dp_peer_ast_hash_index(struct dp_soc *soc,
325 	union dp_align_mac_addr *mac_addr)
326 {
327 	uint32_t index;
328 
329 	index =
330 		mac_addr->align2.bytes_ab ^
331 		mac_addr->align2.bytes_cd ^
332 		mac_addr->align2.bytes_ef;
333 	index ^= index >> soc->ast_hash.idx_bits;
334 	index &= soc->ast_hash.mask;
335 	return index;
336 }
337 
338 /*
339  * dp_peer_ast_hash_add() - Add AST entry into hash table
340  * @soc: SoC handle
341  *
342  * This function adds the AST entry into SoC AST hash table
343  * It assumes caller has taken the ast lock to protect the access to this table
344  *
345  * Return: None
346  */
347 static inline void dp_peer_ast_hash_add(struct dp_soc *soc,
348 		struct dp_ast_entry *ase)
349 {
350 	uint32_t index;
351 
352 	index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
353 	TAILQ_INSERT_TAIL(&soc->ast_hash.bins[index], ase, hash_list_elem);
354 }
355 
356 /*
357  * dp_peer_ast_hash_remove() - Look up and remove AST entry from hash table
358  * @soc: SoC handle
359  *
360  * This function removes the AST entry from soc AST hash table
361  * It assumes caller has taken the ast lock to protect the access to this table
362  *
363  * Return: None
364  */
365 void dp_peer_ast_hash_remove(struct dp_soc *soc,
366 			     struct dp_ast_entry *ase)
367 {
368 	unsigned index;
369 	struct dp_ast_entry *tmpase;
370 	int found = 0;
371 
372 	index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
373 	/* Check if tail is not empty before delete*/
374 	QDF_ASSERT(!TAILQ_EMPTY(&soc->ast_hash.bins[index]));
375 
376 	TAILQ_FOREACH(tmpase, &soc->ast_hash.bins[index], hash_list_elem) {
377 		if (tmpase == ase) {
378 			found = 1;
379 			break;
380 		}
381 	}
382 
383 	QDF_ASSERT(found);
384 	TAILQ_REMOVE(&soc->ast_hash.bins[index], ase, hash_list_elem);
385 }
386 
387 /*
388  * dp_peer_ast_list_find() - Find AST entry by MAC address from peer ast list
389  * @soc: SoC handle
390  * @peer: peer handle
391  * @ast_mac_addr: mac address
392  *
393  * It assumes caller has taken the ast lock to protect the access to ast list
394  *
395  * Return: AST entry
396  */
397 struct dp_ast_entry *dp_peer_ast_list_find(struct dp_soc *soc,
398 					   struct dp_peer *peer,
399 					   uint8_t *ast_mac_addr)
400 {
401 	struct dp_ast_entry *ast_entry = NULL;
402 	union dp_align_mac_addr *mac_addr =
403 		(union dp_align_mac_addr *)ast_mac_addr;
404 
405 	TAILQ_FOREACH(ast_entry, &peer->ast_entry_list, ase_list_elem) {
406 		if (!dp_peer_find_mac_addr_cmp(mac_addr,
407 					       &ast_entry->mac_addr)) {
408 			return ast_entry;
409 		}
410 	}
411 
412 	return NULL;
413 }
414 
415 /*
416  * dp_peer_ast_hash_find_by_pdevid() - Find AST entry by MAC address
417  * @soc: SoC handle
418  *
419  * It assumes caller has taken the ast lock to protect the access to
420  * AST hash table
421  *
422  * Return: AST entry
423  */
424 struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
425 						     uint8_t *ast_mac_addr,
426 						     uint8_t pdev_id)
427 {
428 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
429 	uint32_t index;
430 	struct dp_ast_entry *ase;
431 
432 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
433 		     ast_mac_addr, QDF_MAC_ADDR_SIZE);
434 	mac_addr = &local_mac_addr_aligned;
435 
436 	index = dp_peer_ast_hash_index(soc, mac_addr);
437 	TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
438 		if ((pdev_id == ase->pdev_id) &&
439 		    !dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr)) {
440 			return ase;
441 		}
442 	}
443 
444 	return NULL;
445 }
446 
447 /*
448  * dp_peer_ast_hash_find_soc() - Find AST entry by MAC address
449  * @soc: SoC handle
450  *
451  * It assumes caller has taken the ast lock to protect the access to
452  * AST hash table
453  *
454  * Return: AST entry
455  */
456 struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
457 					       uint8_t *ast_mac_addr)
458 {
459 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
460 	unsigned index;
461 	struct dp_ast_entry *ase;
462 
463 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
464 			ast_mac_addr, QDF_MAC_ADDR_SIZE);
465 	mac_addr = &local_mac_addr_aligned;
466 
467 	index = dp_peer_ast_hash_index(soc, mac_addr);
468 	TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
469 		if (dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr) == 0) {
470 			return ase;
471 		}
472 	}
473 
474 	return NULL;
475 }
476 
477 /*
478  * dp_peer_map_ast() - Map the ast entry with HW AST Index
479  * @soc: SoC handle
480  * @peer: peer to which ast node belongs
481  * @mac_addr: MAC address of ast node
482  * @hw_peer_id: HW AST Index returned by target in peer map event
483  * @vdev_id: vdev id for VAP to which the peer belongs to
484  * @ast_hash: ast hash value in HW
485  *
486  * Return: None
487  */
488 static inline void dp_peer_map_ast(struct dp_soc *soc,
489 	struct dp_peer *peer, uint8_t *mac_addr, uint16_t hw_peer_id,
490 	uint8_t vdev_id, uint16_t ast_hash)
491 {
492 	struct dp_ast_entry *ast_entry = NULL;
493 	enum cdp_txrx_ast_entry_type peer_type = CDP_TXRX_AST_TYPE_STATIC;
494 
495 	if (!peer) {
496 		return;
497 	}
498 
499 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
500 		  "%s: peer %pK ID %d vid %d mac %02x:%02x:%02x:%02x:%02x:%02x",
501 		  __func__, peer, hw_peer_id, vdev_id, mac_addr[0],
502 		  mac_addr[1], mac_addr[2], mac_addr[3],
503 		  mac_addr[4], mac_addr[5]);
504 
505 	qdf_spin_lock_bh(&soc->ast_lock);
506 
507 	ast_entry = dp_peer_ast_list_find(soc, peer, mac_addr);
508 
509 	if (ast_entry) {
510 		ast_entry->ast_idx = hw_peer_id;
511 		soc->ast_table[hw_peer_id] = ast_entry;
512 		ast_entry->is_active = TRUE;
513 		peer_type = ast_entry->type;
514 		ast_entry->ast_hash_value = ast_hash;
515 		ast_entry->is_mapped = TRUE;
516 	}
517 
518 	if (ast_entry || (peer->vdev && peer->vdev->proxysta_vdev)) {
519 		if (soc->cdp_soc.ol_ops->peer_map_event) {
520 			soc->cdp_soc.ol_ops->peer_map_event(
521 			soc->ctrl_psoc, peer->peer_ids[0],
522 			hw_peer_id, vdev_id,
523 			mac_addr, peer_type, ast_hash);
524 		}
525 	} else {
526 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
527 			  "AST entry not found");
528 	}
529 
530 	qdf_spin_unlock_bh(&soc->ast_lock);
531 	return;
532 }
533 
534 void dp_peer_free_hmwds_cb(void *ctrl_psoc,
535 			   void *dp_soc,
536 			   void *cookie,
537 			   enum cdp_ast_free_status status)
538 {
539 	struct dp_ast_free_cb_params *param =
540 		(struct dp_ast_free_cb_params *)cookie;
541 	struct dp_soc *soc = (struct dp_soc *)dp_soc;
542 	struct dp_peer *peer = NULL;
543 
544 	if (status != CDP_TXRX_AST_DELETED) {
545 		qdf_mem_free(cookie);
546 		return;
547 	}
548 
549 	peer = dp_peer_find_hash_find(soc, &param->peer_mac_addr.raw[0],
550 				      0, param->vdev_id);
551 	if (peer) {
552 		dp_peer_add_ast(soc, peer,
553 				&param->mac_addr.raw[0],
554 				param->type,
555 				param->flags);
556 		dp_peer_unref_delete(peer);
557 	}
558 	qdf_mem_free(cookie);
559 }
560 
561 /*
562  * dp_peer_add_ast() - Allocate and add AST entry into peer list
563  * @soc: SoC handle
564  * @peer: peer to which ast node belongs
565  * @mac_addr: MAC address of ast node
566  * @is_self: Is this base AST entry with peer mac address
567  *
568  * This API is used by WDS source port learning function to
569  * add a new AST entry into peer AST list
570  *
571  * Return: 0 if new entry is allocated,
572  *        -1 if entry add failed
573  */
574 int dp_peer_add_ast(struct dp_soc *soc,
575 			struct dp_peer *peer,
576 			uint8_t *mac_addr,
577 			enum cdp_txrx_ast_entry_type type,
578 			uint32_t flags)
579 {
580 	struct dp_ast_entry *ast_entry = NULL;
581 	struct dp_vdev *vdev = NULL;
582 	struct dp_pdev *pdev = NULL;
583 	uint8_t next_node_mac[6];
584 	int  ret = -1;
585 	txrx_ast_free_cb cb = NULL;
586 	void *cookie = NULL;
587 
588 	qdf_spin_lock_bh(&soc->ast_lock);
589 	if (peer->delete_in_progress) {
590 		qdf_spin_unlock_bh(&soc->ast_lock);
591 		return ret;
592 	}
593 
594 	vdev = peer->vdev;
595 	if (!vdev) {
596 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
597 			  FL("Peers vdev is NULL"));
598 		QDF_ASSERT(0);
599 		qdf_spin_unlock_bh(&soc->ast_lock);
600 		return ret;
601 	}
602 
603 	pdev = vdev->pdev;
604 
605 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
606 		  "%s: pdevid: %u vdev: %u  ast_entry->type: %d flags: 0x%x peer_mac: %pM peer: %pK mac %pM",
607 		  __func__, pdev->pdev_id, vdev->vdev_id, type, flags,
608 		  peer->mac_addr.raw, peer, mac_addr);
609 
610 
611 	/* fw supports only 2 times the max_peers ast entries */
612 	if (soc->num_ast_entries >=
613 	    wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx)) {
614 		qdf_spin_unlock_bh(&soc->ast_lock);
615 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
616 			  FL("Max ast entries reached"));
617 		return ret;
618 	}
619 
620 	/* If AST entry already exists , just return from here
621 	 * ast entry with same mac address can exist on different radios
622 	 * if ast_override support is enabled use search by pdev in this
623 	 * case
624 	 */
625 	if (soc->ast_override_support) {
626 		ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, mac_addr,
627 							    pdev->pdev_id);
628 		if (ast_entry) {
629 			if ((type == CDP_TXRX_AST_TYPE_MEC) &&
630 			    (ast_entry->type == CDP_TXRX_AST_TYPE_MEC))
631 				ast_entry->is_active = TRUE;
632 
633 			qdf_spin_unlock_bh(&soc->ast_lock);
634 			return 0;
635 		}
636 	} else {
637 		/* For HWMWDS_SEC entries can be added for same mac address
638 		 * do not check for existing entry
639 		 */
640 		if (type == CDP_TXRX_AST_TYPE_WDS_HM_SEC)
641 			goto add_ast_entry;
642 
643 		ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
644 
645 		if (ast_entry) {
646 			if ((type == CDP_TXRX_AST_TYPE_MEC) &&
647 			    (ast_entry->type == CDP_TXRX_AST_TYPE_MEC))
648 				ast_entry->is_active = TRUE;
649 
650 			if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) &&
651 			    !ast_entry->delete_in_progress) {
652 				qdf_spin_unlock_bh(&soc->ast_lock);
653 				return 0;
654 			}
655 
656 			/* Add for HMWDS entry we cannot be ignored if there
657 			 * is AST entry with same mac address
658 			 *
659 			 * if ast entry exists with the requested mac address
660 			 * send a delete command and register callback which
661 			 * can take care of adding HMWDS ast enty on delete
662 			 * confirmation from target
663 			 */
664 			if ((type == CDP_TXRX_AST_TYPE_WDS_HM) &&
665 			    soc->is_peer_map_unmap_v2) {
666 				struct dp_ast_free_cb_params *param = NULL;
667 
668 				if (ast_entry->type ==
669 					CDP_TXRX_AST_TYPE_WDS_HM_SEC)
670 					goto add_ast_entry;
671 
672 				/* save existing callback */
673 				if (ast_entry->callback) {
674 					cb = ast_entry->callback;
675 					cookie = ast_entry->cookie;
676 				}
677 
678 				param = qdf_mem_malloc(sizeof(*param));
679 				if (!param) {
680 					QDF_TRACE(QDF_MODULE_ID_TXRX,
681 						  QDF_TRACE_LEVEL_ERROR,
682 						  "Allocation failed");
683 					qdf_spin_unlock_bh(&soc->ast_lock);
684 					return ret;
685 				}
686 
687 				qdf_mem_copy(&param->mac_addr.raw[0], mac_addr,
688 					     QDF_MAC_ADDR_SIZE);
689 				qdf_mem_copy(&param->peer_mac_addr.raw[0],
690 					     &peer->mac_addr.raw[0],
691 					     QDF_MAC_ADDR_SIZE);
692 				param->type = type;
693 				param->flags = flags;
694 				param->vdev_id = vdev->vdev_id;
695 				ast_entry->callback = dp_peer_free_hmwds_cb;
696 				ast_entry->pdev_id = vdev->pdev->pdev_id;
697 				ast_entry->type = type;
698 				ast_entry->cookie = (void *)param;
699 				if (!ast_entry->delete_in_progress)
700 					dp_peer_del_ast(soc, ast_entry);
701 			}
702 
703 			/* Modify an already existing AST entry from type
704 			 * WDS to MEC on promption. This serves as a fix when
705 			 * backbone of interfaces are interchanged wherein
706 			 * wds entr becomes its own MEC. The entry should be
707 			 * replaced only when the ast_entry peer matches the
708 			 * peer received in mec event. This additional check
709 			 * is needed in wds repeater cases where a multicast
710 			 * packet from station to the root via the repeater
711 			 * should not remove the wds entry.
712 			 */
713 			if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS) &&
714 			    (type == CDP_TXRX_AST_TYPE_MEC) &&
715 			    (ast_entry->peer == peer)) {
716 				ast_entry->is_active = FALSE;
717 				dp_peer_del_ast(soc, ast_entry);
718 			}
719 			qdf_spin_unlock_bh(&soc->ast_lock);
720 
721 			/* Call the saved callback*/
722 			if (cb) {
723 				cb(soc->ctrl_psoc, soc, cookie,
724 				   CDP_TXRX_AST_DELETE_IN_PROGRESS);
725 			}
726 			return 0;
727 		}
728 	}
729 
730 add_ast_entry:
731 	ast_entry = (struct dp_ast_entry *)
732 			qdf_mem_malloc(sizeof(struct dp_ast_entry));
733 
734 	if (!ast_entry) {
735 		qdf_spin_unlock_bh(&soc->ast_lock);
736 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
737 			  FL("fail to allocate ast_entry"));
738 		QDF_ASSERT(0);
739 		return ret;
740 	}
741 
742 	qdf_mem_copy(&ast_entry->mac_addr.raw[0], mac_addr, QDF_MAC_ADDR_SIZE);
743 	ast_entry->pdev_id = vdev->pdev->pdev_id;
744 	ast_entry->vdev_id = vdev->vdev_id;
745 	ast_entry->is_mapped = false;
746 	ast_entry->delete_in_progress = false;
747 
748 	switch (type) {
749 	case CDP_TXRX_AST_TYPE_STATIC:
750 		peer->self_ast_entry = ast_entry;
751 		ast_entry->type = CDP_TXRX_AST_TYPE_STATIC;
752 		if (peer->vdev->opmode == wlan_op_mode_sta)
753 			ast_entry->type = CDP_TXRX_AST_TYPE_STA_BSS;
754 		break;
755 	case CDP_TXRX_AST_TYPE_SELF:
756 		peer->self_ast_entry = ast_entry;
757 		ast_entry->type = CDP_TXRX_AST_TYPE_SELF;
758 		break;
759 	case CDP_TXRX_AST_TYPE_WDS:
760 		ast_entry->next_hop = 1;
761 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
762 		break;
763 	case CDP_TXRX_AST_TYPE_WDS_HM:
764 		ast_entry->next_hop = 1;
765 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM;
766 		break;
767 	case CDP_TXRX_AST_TYPE_WDS_HM_SEC:
768 		ast_entry->next_hop = 1;
769 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM_SEC;
770 		break;
771 	case CDP_TXRX_AST_TYPE_MEC:
772 		ast_entry->next_hop = 1;
773 		ast_entry->type = CDP_TXRX_AST_TYPE_MEC;
774 		break;
775 	case CDP_TXRX_AST_TYPE_DA:
776 		peer = peer->vdev->vap_bss_peer;
777 		ast_entry->next_hop = 1;
778 		ast_entry->type = CDP_TXRX_AST_TYPE_DA;
779 		break;
780 	default:
781 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
782 			FL("Incorrect AST entry type"));
783 	}
784 
785 	ast_entry->is_active = TRUE;
786 	DP_STATS_INC(soc, ast.added, 1);
787 	soc->num_ast_entries++;
788 	dp_peer_ast_hash_add(soc, ast_entry);
789 
790 	ast_entry->peer = peer;
791 
792 	if (type == CDP_TXRX_AST_TYPE_MEC)
793 		qdf_mem_copy(next_node_mac, peer->vdev->mac_addr.raw, 6);
794 	else
795 		qdf_mem_copy(next_node_mac, peer->mac_addr.raw, 6);
796 
797 	TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem);
798 
799 	if ((ast_entry->type != CDP_TXRX_AST_TYPE_STATIC) &&
800 	    (ast_entry->type != CDP_TXRX_AST_TYPE_SELF) &&
801 	    (ast_entry->type != CDP_TXRX_AST_TYPE_STA_BSS) &&
802 	    (ast_entry->type != CDP_TXRX_AST_TYPE_WDS_HM_SEC)) {
803 		if (QDF_STATUS_SUCCESS ==
804 				soc->cdp_soc.ol_ops->peer_add_wds_entry(
805 				peer->vdev->osif_vdev,
806 				(struct cdp_peer *)peer,
807 				mac_addr,
808 				next_node_mac,
809 				flags)) {
810 			qdf_spin_unlock_bh(&soc->ast_lock);
811 			return 0;
812 		}
813 	}
814 
815 	qdf_spin_unlock_bh(&soc->ast_lock);
816 	return ret;
817 }
818 
819 /*
820  * dp_peer_del_ast() - Delete and free AST entry
821  * @soc: SoC handle
822  * @ast_entry: AST entry of the node
823  *
824  * This function removes the AST entry from peer and soc tables
825  * It assumes caller has taken the ast lock to protect the access to these
826  * tables
827  *
828  * Return: None
829  */
830 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
831 {
832 	struct dp_peer *peer;
833 
834 	if (!ast_entry)
835 		return;
836 
837 	peer =  ast_entry->peer;
838 
839 	dp_peer_ast_send_wds_del(soc, ast_entry);
840 
841 	/*
842 	 * release the reference only if it is mapped
843 	 * to ast_table
844 	 */
845 	if (ast_entry->is_mapped)
846 		soc->ast_table[ast_entry->ast_idx] = NULL;
847 
848 	/*
849 	 * if peer map v2 is enabled we are not freeing ast entry
850 	 * here and it is supposed to be freed in unmap event (after
851 	 * we receive delete confirmation from target)
852 	 *
853 	 * if peer_id is invalid we did not get the peer map event
854 	 * for the peer free ast entry from here only in this case
855 	 */
856 	if (soc->is_peer_map_unmap_v2) {
857 
858 		/*
859 		 * For HM_SEC and SELF type we do not receive unmap event
860 		 * free ast_entry from here it self
861 		 */
862 		if ((ast_entry->type != CDP_TXRX_AST_TYPE_WDS_HM_SEC) &&
863 		    (ast_entry->type != CDP_TXRX_AST_TYPE_SELF))
864 			return;
865 	}
866 
867 	/* SELF and STATIC entries are removed in teardown itself */
868 	if (ast_entry->next_hop)
869 		TAILQ_REMOVE(&peer->ast_entry_list, ast_entry, ase_list_elem);
870 
871 	DP_STATS_INC(soc, ast.deleted, 1);
872 	dp_peer_ast_hash_remove(soc, ast_entry);
873 	dp_peer_ast_cleanup(soc, ast_entry);
874 	qdf_mem_free(ast_entry);
875 	soc->num_ast_entries--;
876 }
877 
878 /*
879  * dp_peer_update_ast() - Delete and free AST entry
880  * @soc: SoC handle
881  * @peer: peer to which ast node belongs
882  * @ast_entry: AST entry of the node
883  * @flags: wds or hmwds
884  *
885  * This function update the AST entry to the roamed peer and soc tables
886  * It assumes caller has taken the ast lock to protect the access to these
887  * tables
888  *
889  * Return: 0 if ast entry is updated successfully
890  *         -1 failure
891  */
892 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
893 		       struct dp_ast_entry *ast_entry, uint32_t flags)
894 {
895 	int ret = -1;
896 	struct dp_peer *old_peer;
897 
898 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
899 		  "%s: ast_entry->type: %d pdevid: %u vdevid: %u flags: 0x%x mac_addr: %pM peer_mac: %pM\n",
900 		  __func__, ast_entry->type, peer->vdev->pdev->pdev_id,
901 		  peer->vdev->vdev_id, flags, ast_entry->mac_addr.raw,
902 		  peer->mac_addr.raw);
903 
904 	if (ast_entry->delete_in_progress)
905 		return ret;
906 
907 	if ((ast_entry->type == CDP_TXRX_AST_TYPE_STATIC) ||
908 	    (ast_entry->type == CDP_TXRX_AST_TYPE_SELF) ||
909 	    (ast_entry->type == CDP_TXRX_AST_TYPE_STA_BSS) ||
910 	    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
911 		return 0;
912 
913 	/*
914 	 * Avoids flood of WMI update messages sent to FW for same peer.
915 	 */
916 	if (qdf_unlikely(ast_entry->peer == peer) &&
917 	    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS) &&
918 	    (ast_entry->vdev_id == peer->vdev->vdev_id) &&
919 	    (ast_entry->is_active))
920 		return 0;
921 
922 	old_peer = ast_entry->peer;
923 	TAILQ_REMOVE(&old_peer->ast_entry_list, ast_entry, ase_list_elem);
924 
925 	ast_entry->peer = peer;
926 	ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
927 	ast_entry->pdev_id = peer->vdev->pdev->pdev_id;
928 	ast_entry->vdev_id = peer->vdev->vdev_id;
929 	ast_entry->is_active = TRUE;
930 	TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem);
931 
932 	ret = soc->cdp_soc.ol_ops->peer_update_wds_entry(
933 				peer->vdev->osif_vdev,
934 				ast_entry->mac_addr.raw,
935 				peer->mac_addr.raw,
936 				flags);
937 
938 	return ret;
939 }
940 
941 /*
942  * dp_peer_ast_get_pdev_id() - get pdev_id from the ast entry
943  * @soc: SoC handle
944  * @ast_entry: AST entry of the node
945  *
946  * This function gets the pdev_id from the ast entry.
947  *
948  * Return: (uint8_t) pdev_id
949  */
950 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
951 				struct dp_ast_entry *ast_entry)
952 {
953 	return ast_entry->pdev_id;
954 }
955 
956 /*
957  * dp_peer_ast_get_next_hop() - get next_hop from the ast entry
958  * @soc: SoC handle
959  * @ast_entry: AST entry of the node
960  *
961  * This function gets the next hop from the ast entry.
962  *
963  * Return: (uint8_t) next_hop
964  */
965 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
966 				struct dp_ast_entry *ast_entry)
967 {
968 	return ast_entry->next_hop;
969 }
970 
971 /*
972  * dp_peer_ast_set_type() - set type from the ast entry
973  * @soc: SoC handle
974  * @ast_entry: AST entry of the node
975  *
976  * This function sets the type in the ast entry.
977  *
978  * Return:
979  */
980 void dp_peer_ast_set_type(struct dp_soc *soc,
981 				struct dp_ast_entry *ast_entry,
982 				enum cdp_txrx_ast_entry_type type)
983 {
984 	ast_entry->type = type;
985 }
986 
987 #else
988 int dp_peer_add_ast(struct dp_soc *soc, struct dp_peer *peer,
989 		uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type,
990 		uint32_t flags)
991 {
992 	return 1;
993 }
994 
995 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
996 {
997 }
998 
999 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
1000 			struct dp_ast_entry *ast_entry, uint32_t flags)
1001 {
1002 	return 1;
1003 }
1004 
1005 struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
1006 					       uint8_t *ast_mac_addr)
1007 {
1008 	return NULL;
1009 }
1010 
1011 struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
1012 						     uint8_t *ast_mac_addr,
1013 						     uint8_t pdev_id)
1014 {
1015 	return NULL;
1016 }
1017 
1018 static int dp_peer_ast_hash_attach(struct dp_soc *soc)
1019 {
1020 	return 0;
1021 }
1022 
1023 static inline void dp_peer_map_ast(struct dp_soc *soc,
1024 	struct dp_peer *peer, uint8_t *mac_addr, uint16_t hw_peer_id,
1025 	uint8_t vdev_id, uint16_t ast_hash)
1026 {
1027 	return;
1028 }
1029 
1030 static void dp_peer_ast_hash_detach(struct dp_soc *soc)
1031 {
1032 }
1033 
1034 void dp_peer_ast_set_type(struct dp_soc *soc,
1035 				struct dp_ast_entry *ast_entry,
1036 				enum cdp_txrx_ast_entry_type type)
1037 {
1038 }
1039 
1040 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
1041 				struct dp_ast_entry *ast_entry)
1042 {
1043 	return 0xff;
1044 }
1045 
1046 
1047 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
1048 				struct dp_ast_entry *ast_entry)
1049 {
1050 	return 0xff;
1051 }
1052 #endif
1053 
1054 void dp_peer_ast_send_wds_del(struct dp_soc *soc,
1055 			      struct dp_ast_entry *ast_entry)
1056 {
1057 	struct dp_peer *peer = ast_entry->peer;
1058 	struct cdp_soc_t *cdp_soc = &soc->cdp_soc;
1059 
1060 	if (ast_entry->delete_in_progress)
1061 		return;
1062 
1063 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_TRACE,
1064 		  "%s: ast_entry->type: %d pdevid: %u vdev: %u mac_addr: %pM next_hop: %u peer_mac: %pM\n",
1065 		  __func__, ast_entry->type, peer->vdev->pdev->pdev_id,
1066 		  peer->vdev->vdev_id, ast_entry->mac_addr.raw,
1067 		  ast_entry->next_hop, ast_entry->peer->mac_addr.raw);
1068 
1069 	if (ast_entry->next_hop) {
1070 		cdp_soc->ol_ops->peer_del_wds_entry(peer->vdev->osif_vdev,
1071 						    ast_entry->mac_addr.raw,
1072 						    ast_entry->type);
1073 	}
1074 
1075 	/* Remove SELF and STATIC entries in teardown itself */
1076 	if (!ast_entry->next_hop) {
1077 		TAILQ_REMOVE(&peer->ast_entry_list, ast_entry, ase_list_elem);
1078 		peer->self_ast_entry = NULL;
1079 		ast_entry->peer = NULL;
1080 	}
1081 
1082 	ast_entry->delete_in_progress = true;
1083 }
1084 
1085 static void dp_peer_ast_free_entry(struct dp_soc *soc,
1086 				   struct dp_ast_entry *ast_entry)
1087 {
1088 	struct dp_peer *peer = ast_entry->peer;
1089 	void *cookie = NULL;
1090 	txrx_ast_free_cb cb = NULL;
1091 
1092 	/*
1093 	 * release the reference only if it is mapped
1094 	 * to ast_table
1095 	 */
1096 
1097 	qdf_spin_lock_bh(&soc->ast_lock);
1098 	if (ast_entry->is_mapped)
1099 		soc->ast_table[ast_entry->ast_idx] = NULL;
1100 
1101 	TAILQ_REMOVE(&peer->ast_entry_list, ast_entry, ase_list_elem);
1102 	DP_STATS_INC(soc, ast.deleted, 1);
1103 	dp_peer_ast_hash_remove(soc, ast_entry);
1104 
1105 	cb = ast_entry->callback;
1106 	cookie = ast_entry->cookie;
1107 	ast_entry->callback = NULL;
1108 	ast_entry->cookie = NULL;
1109 
1110 	if (ast_entry == peer->self_ast_entry)
1111 		peer->self_ast_entry = NULL;
1112 
1113 	qdf_spin_unlock_bh(&soc->ast_lock);
1114 
1115 	if (cb) {
1116 		cb(soc->ctrl_psoc,
1117 		   soc,
1118 		   cookie,
1119 		   CDP_TXRX_AST_DELETED);
1120 	}
1121 	qdf_mem_free(ast_entry);
1122 	soc->num_ast_entries--;
1123 }
1124 
1125 struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc,
1126 	uint8_t *peer_mac_addr, int mac_addr_is_aligned, uint8_t vdev_id)
1127 {
1128 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
1129 	unsigned index;
1130 	struct dp_peer *peer;
1131 
1132 	if (mac_addr_is_aligned) {
1133 		mac_addr = (union dp_align_mac_addr *) peer_mac_addr;
1134 	} else {
1135 		qdf_mem_copy(
1136 			&local_mac_addr_aligned.raw[0],
1137 			peer_mac_addr, QDF_MAC_ADDR_SIZE);
1138 		mac_addr = &local_mac_addr_aligned;
1139 	}
1140 	index = dp_peer_find_hash_index(soc, mac_addr);
1141 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
1142 	TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
1143 #if ATH_SUPPORT_WRAP
1144 		/* ProxySTA may have multiple BSS peer with same MAC address,
1145 		 * modified find will take care of finding the correct BSS peer.
1146 		 */
1147 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
1148 			((peer->vdev->vdev_id == vdev_id) ||
1149 			 (vdev_id == DP_VDEV_ALL))) {
1150 #else
1151 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0) {
1152 #endif
1153 			/* found it - increment the ref count before releasing
1154 			 * the lock
1155 			 */
1156 			qdf_atomic_inc(&peer->ref_cnt);
1157 			qdf_spin_unlock_bh(&soc->peer_ref_mutex);
1158 			return peer;
1159 		}
1160 	}
1161 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
1162 	return NULL; /* failure */
1163 }
1164 
1165 void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer)
1166 {
1167 	unsigned index;
1168 	struct dp_peer *tmppeer = NULL;
1169 	int found = 0;
1170 
1171 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
1172 	/* Check if tail is not empty before delete*/
1173 	QDF_ASSERT(!TAILQ_EMPTY(&soc->peer_hash.bins[index]));
1174 	/*
1175 	 * DO NOT take the peer_ref_mutex lock here - it needs to be taken
1176 	 * by the caller.
1177 	 * The caller needs to hold the lock from the time the peer object's
1178 	 * reference count is decremented and tested up through the time the
1179 	 * reference to the peer object is removed from the hash table, by
1180 	 * this function.
1181 	 * Holding the lock only while removing the peer object reference
1182 	 * from the hash table keeps the hash table consistent, but does not
1183 	 * protect against a new HL tx context starting to use the peer object
1184 	 * if it looks up the peer object from its MAC address just after the
1185 	 * peer ref count is decremented to zero, but just before the peer
1186 	 * object reference is removed from the hash table.
1187 	 */
1188 	 TAILQ_FOREACH(tmppeer, &soc->peer_hash.bins[index], hash_list_elem) {
1189 		if (tmppeer == peer) {
1190 			found = 1;
1191 			break;
1192 		}
1193 	}
1194 	QDF_ASSERT(found);
1195 	TAILQ_REMOVE(&soc->peer_hash.bins[index], peer, hash_list_elem);
1196 }
1197 
1198 void dp_peer_find_hash_erase(struct dp_soc *soc)
1199 {
1200 	int i;
1201 
1202 	/*
1203 	 * Not really necessary to take peer_ref_mutex lock - by this point,
1204 	 * it's known that the soc is no longer in use.
1205 	 */
1206 	for (i = 0; i <= soc->peer_hash.mask; i++) {
1207 		if (!TAILQ_EMPTY(&soc->peer_hash.bins[i])) {
1208 			struct dp_peer *peer, *peer_next;
1209 
1210 			/*
1211 			 * TAILQ_FOREACH_SAFE must be used here to avoid any
1212 			 * memory access violation after peer is freed
1213 			 */
1214 			TAILQ_FOREACH_SAFE(peer, &soc->peer_hash.bins[i],
1215 				hash_list_elem, peer_next) {
1216 				/*
1217 				 * Don't remove the peer from the hash table -
1218 				 * that would modify the list we are currently
1219 				 * traversing, and it's not necessary anyway.
1220 				 */
1221 				/*
1222 				 * Artificially adjust the peer's ref count to
1223 				 * 1, so it will get deleted by
1224 				 * dp_peer_unref_delete.
1225 				 */
1226 				/* set to zero */
1227 				qdf_atomic_init(&peer->ref_cnt);
1228 				/* incr to one */
1229 				qdf_atomic_inc(&peer->ref_cnt);
1230 				dp_peer_unref_delete(peer);
1231 			}
1232 		}
1233 	}
1234 }
1235 
1236 static void dp_peer_ast_table_detach(struct dp_soc *soc)
1237 {
1238 	if (soc->ast_table) {
1239 		qdf_mem_free(soc->ast_table);
1240 		soc->ast_table = NULL;
1241 	}
1242 }
1243 
1244 static void dp_peer_find_map_detach(struct dp_soc *soc)
1245 {
1246 	if (soc->peer_id_to_obj_map) {
1247 		qdf_mem_free(soc->peer_id_to_obj_map);
1248 		soc->peer_id_to_obj_map = NULL;
1249 	}
1250 }
1251 
1252 int dp_peer_find_attach(struct dp_soc *soc)
1253 {
1254 	if (dp_peer_find_map_attach(soc))
1255 		return 1;
1256 
1257 	if (dp_peer_find_hash_attach(soc)) {
1258 		dp_peer_find_map_detach(soc);
1259 		return 1;
1260 	}
1261 
1262 	if (dp_peer_ast_table_attach(soc)) {
1263 		dp_peer_find_hash_detach(soc);
1264 		dp_peer_find_map_detach(soc);
1265 		return 1;
1266 	}
1267 
1268 	if (dp_peer_ast_hash_attach(soc)) {
1269 		dp_peer_ast_table_detach(soc);
1270 		dp_peer_find_hash_detach(soc);
1271 		dp_peer_find_map_detach(soc);
1272 		return 1;
1273 	}
1274 
1275 	return 0; /* success */
1276 }
1277 
1278 void dp_rx_tid_stats_cb(struct dp_soc *soc, void *cb_ctxt,
1279 	union hal_reo_status *reo_status)
1280 {
1281 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
1282 	struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
1283 
1284 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
1285 		DP_PRINT_STATS("REO stats failure %d for TID %d\n",
1286 			       queue_status->header.status, rx_tid->tid);
1287 		return;
1288 	}
1289 
1290 	DP_PRINT_STATS("REO queue stats (TID: %d):\n"
1291 		       "ssn: %d\n"
1292 		       "curr_idx  : %d\n"
1293 		       "pn_31_0   : %08x\n"
1294 		       "pn_63_32  : %08x\n"
1295 		       "pn_95_64  : %08x\n"
1296 		       "pn_127_96 : %08x\n"
1297 		       "last_rx_enq_tstamp : %08x\n"
1298 		       "last_rx_deq_tstamp : %08x\n"
1299 		       "rx_bitmap_31_0     : %08x\n"
1300 		       "rx_bitmap_63_32    : %08x\n"
1301 		       "rx_bitmap_95_64    : %08x\n"
1302 		       "rx_bitmap_127_96   : %08x\n"
1303 		       "rx_bitmap_159_128  : %08x\n"
1304 		       "rx_bitmap_191_160  : %08x\n"
1305 		       "rx_bitmap_223_192  : %08x\n"
1306 		       "rx_bitmap_255_224  : %08x\n",
1307 		       rx_tid->tid,
1308 		       queue_status->ssn, queue_status->curr_idx,
1309 		       queue_status->pn_31_0, queue_status->pn_63_32,
1310 		       queue_status->pn_95_64, queue_status->pn_127_96,
1311 		       queue_status->last_rx_enq_tstamp,
1312 		       queue_status->last_rx_deq_tstamp,
1313 		       queue_status->rx_bitmap_31_0,
1314 		       queue_status->rx_bitmap_63_32,
1315 		       queue_status->rx_bitmap_95_64,
1316 		       queue_status->rx_bitmap_127_96,
1317 		       queue_status->rx_bitmap_159_128,
1318 		       queue_status->rx_bitmap_191_160,
1319 		       queue_status->rx_bitmap_223_192,
1320 		       queue_status->rx_bitmap_255_224);
1321 
1322 	DP_PRINT_STATS(
1323 		       "curr_mpdu_cnt      : %d\n"
1324 		       "curr_msdu_cnt      : %d\n"
1325 		       "fwd_timeout_cnt    : %d\n"
1326 		       "fwd_bar_cnt        : %d\n"
1327 		       "dup_cnt            : %d\n"
1328 		       "frms_in_order_cnt  : %d\n"
1329 		       "bar_rcvd_cnt       : %d\n"
1330 		       "mpdu_frms_cnt      : %d\n"
1331 		       "msdu_frms_cnt      : %d\n"
1332 		       "total_byte_cnt     : %d\n"
1333 		       "late_recv_mpdu_cnt : %d\n"
1334 		       "win_jump_2k        : %d\n"
1335 		       "hole_cnt           : %d\n",
1336 		       queue_status->curr_mpdu_cnt,
1337 		       queue_status->curr_msdu_cnt,
1338 		       queue_status->fwd_timeout_cnt,
1339 		       queue_status->fwd_bar_cnt,
1340 		       queue_status->dup_cnt,
1341 		       queue_status->frms_in_order_cnt,
1342 		       queue_status->bar_rcvd_cnt,
1343 		       queue_status->mpdu_frms_cnt,
1344 		       queue_status->msdu_frms_cnt,
1345 		       queue_status->total_cnt,
1346 		       queue_status->late_recv_mpdu_cnt,
1347 		       queue_status->win_jump_2k,
1348 		       queue_status->hole_cnt);
1349 
1350 	DP_PRINT_STATS("Addba Req          : %d\n"
1351 			"Addba Resp         : %d\n"
1352 			"Addba Resp success : %d\n"
1353 			"Addba Resp failed  : %d\n"
1354 			"Delba Req received : %d\n"
1355 			"Delba Tx success   : %d\n"
1356 			"Delba Tx Fail      : %d\n"
1357 			"BA window size     : %d\n"
1358 			"Pn size            : %d\n",
1359 			rx_tid->num_of_addba_req,
1360 			rx_tid->num_of_addba_resp,
1361 			rx_tid->num_addba_rsp_success,
1362 			rx_tid->num_addba_rsp_failed,
1363 			rx_tid->num_of_delba_req,
1364 			rx_tid->delba_tx_success_cnt,
1365 			rx_tid->delba_tx_fail_cnt,
1366 			rx_tid->ba_win_size,
1367 			rx_tid->pn_size);
1368 }
1369 
1370 static inline struct dp_peer *dp_peer_find_add_id(struct dp_soc *soc,
1371 	uint8_t *peer_mac_addr, uint16_t peer_id, uint16_t hw_peer_id,
1372 	uint8_t vdev_id)
1373 {
1374 	struct dp_peer *peer;
1375 
1376 	QDF_ASSERT(peer_id <= soc->max_peers);
1377 	/* check if there's already a peer object with this MAC address */
1378 	peer = dp_peer_find_hash_find(soc, peer_mac_addr,
1379 		0 /* is aligned */, vdev_id);
1380 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1381 		  "%s: peer %pK ID %d vid %d mac %02x:%02x:%02x:%02x:%02x:%02x",
1382 		  __func__, peer, peer_id, vdev_id, peer_mac_addr[0],
1383 		  peer_mac_addr[1], peer_mac_addr[2], peer_mac_addr[3],
1384 		  peer_mac_addr[4], peer_mac_addr[5]);
1385 
1386 	if (peer) {
1387 		/* peer's ref count was already incremented by
1388 		 * peer_find_hash_find
1389 		 */
1390 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1391 			  "%s: ref_cnt: %d", __func__,
1392 			   qdf_atomic_read(&peer->ref_cnt));
1393 		if (!soc->peer_id_to_obj_map[peer_id])
1394 			soc->peer_id_to_obj_map[peer_id] = peer;
1395 		else {
1396 			/* Peer map event came for peer_id which
1397 			 * is already mapped, this is not expected
1398 			 */
1399 			QDF_ASSERT(0);
1400 		}
1401 
1402 		if (dp_peer_find_add_id_to_obj(peer, peer_id)) {
1403 			/* TBDXXX: assert for now */
1404 			QDF_ASSERT(0);
1405 		}
1406 
1407 		return peer;
1408 	}
1409 
1410 	return NULL;
1411 }
1412 
1413 /**
1414  * dp_rx_peer_map_handler() - handle peer map event from firmware
1415  * @soc_handle - genereic soc handle
1416  * @peeri_id - peer_id from firmware
1417  * @hw_peer_id - ast index for this peer
1418  * @vdev_id - vdev ID
1419  * @peer_mac_addr - mac address of the peer
1420  * @ast_hash - ast hash value
1421  * @is_wds - flag to indicate peer map event for WDS ast entry
1422  *
1423  * associate the peer_id that firmware provided with peer entry
1424  * and update the ast table in the host with the hw_peer_id.
1425  *
1426  * Return: none
1427  */
1428 
1429 void
1430 dp_rx_peer_map_handler(void *soc_handle, uint16_t peer_id,
1431 		       uint16_t hw_peer_id, uint8_t vdev_id,
1432 		       uint8_t *peer_mac_addr, uint16_t ast_hash,
1433 		       uint8_t is_wds)
1434 {
1435 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
1436 	struct dp_peer *peer = NULL;
1437 	enum cdp_txrx_ast_entry_type type = CDP_TXRX_AST_TYPE_STATIC;
1438 
1439 	dp_info("peer_map_event (soc:%pK): peer_id %d, hw_peer_id %d, peer_mac %02x:%02x:%02x:%02x:%02x:%02x, vdev_id %d",
1440 		soc, peer_id, hw_peer_id, peer_mac_addr[0], peer_mac_addr[1],
1441 		  peer_mac_addr[2], peer_mac_addr[3], peer_mac_addr[4],
1442 		  peer_mac_addr[5], vdev_id);
1443 
1444 	if ((hw_peer_id < 0) ||
1445 	    (hw_peer_id >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) {
1446 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1447 			"invalid hw_peer_id: %d", hw_peer_id);
1448 		qdf_assert_always(0);
1449 	}
1450 
1451 	/* Peer map event for WDS ast entry get the peer from
1452 	 * obj map
1453 	 */
1454 	if (is_wds) {
1455 		peer = soc->peer_id_to_obj_map[peer_id];
1456 	} else {
1457 		peer = dp_peer_find_add_id(soc, peer_mac_addr, peer_id,
1458 					   hw_peer_id, vdev_id);
1459 
1460 		if (peer) {
1461 			if (wlan_op_mode_sta == peer->vdev->opmode &&
1462 			    qdf_mem_cmp(peer->mac_addr.raw,
1463 					peer->vdev->mac_addr.raw,
1464 					QDF_MAC_ADDR_SIZE) != 0) {
1465 				dp_info("STA vdev bss_peer!!!!");
1466 				peer->bss_peer = 1;
1467 				peer->vdev->vap_bss_peer = peer;
1468 			}
1469 
1470 			if (peer->vdev->opmode == wlan_op_mode_sta)
1471 				peer->vdev->bss_ast_hash = ast_hash;
1472 
1473 			/* Add ast entry incase self ast entry is
1474 			 * deleted due to DP CP sync issue
1475 			 *
1476 			 * self_ast_entry is modified in peer create
1477 			 * and peer unmap path which cannot run in
1478 			 * parllel with peer map, no lock need before
1479 			 * referring it
1480 			 */
1481 			if (!peer->self_ast_entry) {
1482 				dp_info("Add self ast from map %pM",
1483 					peer_mac_addr);
1484 				dp_peer_add_ast(soc, peer,
1485 						peer_mac_addr,
1486 						type, 0);
1487 			}
1488 
1489 		}
1490 	}
1491 
1492 	dp_peer_map_ast(soc, peer, peer_mac_addr,
1493 			hw_peer_id, vdev_id, ast_hash);
1494 }
1495 
1496 /**
1497  * dp_rx_peer_unmap_handler() - handle peer unmap event from firmware
1498  * @soc_handle - genereic soc handle
1499  * @peeri_id - peer_id from firmware
1500  * @vdev_id - vdev ID
1501  * @mac_addr - mac address of the peer or wds entry
1502  * @is_wds - flag to indicate peer map event for WDS ast entry
1503  *
1504  * Return: none
1505  */
1506 void
1507 dp_rx_peer_unmap_handler(void *soc_handle, uint16_t peer_id,
1508 			 uint8_t vdev_id, uint8_t *mac_addr,
1509 			 uint8_t is_wds)
1510 {
1511 	struct dp_peer *peer;
1512 	struct dp_ast_entry *ast_entry;
1513 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
1514 	uint8_t i;
1515 
1516 	peer = __dp_peer_find_by_id(soc, peer_id);
1517 
1518 	/*
1519 	 * Currently peer IDs are assigned for vdevs as well as peers.
1520 	 * If the peer ID is for a vdev, then the peer pointer stored
1521 	 * in peer_id_to_obj_map will be NULL.
1522 	 */
1523 	if (!peer) {
1524 		dp_err("Received unmap event for invalid peer_id %u", peer_id);
1525 		return;
1526 	}
1527 
1528 	/* If V2 Peer map messages are enabled AST entry has to be freed here
1529 	 */
1530 	if (soc->is_peer_map_unmap_v2 && is_wds) {
1531 
1532 		qdf_spin_lock_bh(&soc->ast_lock);
1533 		ast_entry = dp_peer_ast_list_find(soc, peer,
1534 						  mac_addr);
1535 		qdf_spin_unlock_bh(&soc->ast_lock);
1536 
1537 		if (ast_entry) {
1538 			dp_peer_ast_free_entry(soc, ast_entry);
1539 			return;
1540 		}
1541 
1542 		dp_alert("AST entry not found with peer %pK peer_id %u peer_mac %pM mac_addr %pM vdev_id %u next_hop %u",
1543 			 peer, peer->peer_ids[0],
1544 			 peer->mac_addr.raw, mac_addr, vdev_id,
1545 			 is_wds);
1546 
1547 		return;
1548 	}
1549 
1550 	dp_info("peer_unmap_event (soc:%pK) peer_id %d peer %pK",
1551 		soc, peer_id, peer);
1552 
1553 	soc->peer_id_to_obj_map[peer_id] = NULL;
1554 	for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
1555 		if (peer->peer_ids[i] == peer_id) {
1556 			peer->peer_ids[i] = HTT_INVALID_PEER;
1557 			break;
1558 		}
1559 	}
1560 
1561 	if (soc->cdp_soc.ol_ops->peer_unmap_event) {
1562 		soc->cdp_soc.ol_ops->peer_unmap_event(soc->ctrl_psoc,
1563 				peer_id, vdev_id);
1564 	}
1565 
1566 	/*
1567 	 * Remove a reference to the peer.
1568 	 * If there are no more references, delete the peer object.
1569 	 */
1570 	dp_peer_unref_delete(peer);
1571 }
1572 
1573 void
1574 dp_peer_find_detach(struct dp_soc *soc)
1575 {
1576 	dp_peer_find_map_detach(soc);
1577 	dp_peer_find_hash_detach(soc);
1578 	dp_peer_ast_hash_detach(soc);
1579 	dp_peer_ast_table_detach(soc);
1580 }
1581 
1582 static void dp_rx_tid_update_cb(struct dp_soc *soc, void *cb_ctxt,
1583 	union hal_reo_status *reo_status)
1584 {
1585 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
1586 
1587 	if ((reo_status->rx_queue_status.header.status !=
1588 		HAL_REO_CMD_SUCCESS) &&
1589 		(reo_status->rx_queue_status.header.status !=
1590 		HAL_REO_CMD_DRAIN)) {
1591 		/* Should not happen normally. Just print error for now */
1592 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1593 			  "%s: Rx tid HW desc update failed(%d): tid %d",
1594 			  __func__,
1595 			  reo_status->rx_queue_status.header.status,
1596 			  rx_tid->tid);
1597 	}
1598 }
1599 
1600 /*
1601  * dp_find_peer_by_addr - find peer instance by mac address
1602  * @dev: physical device instance
1603  * @peer_mac_addr: peer mac address
1604  * @local_id: local id for the peer
1605  *
1606  * Return: peer instance pointer
1607  */
1608 void *dp_find_peer_by_addr(struct cdp_pdev *dev, uint8_t *peer_mac_addr,
1609 		uint8_t *local_id)
1610 {
1611 	struct dp_pdev *pdev = (struct dp_pdev *)dev;
1612 	struct dp_peer *peer;
1613 
1614 	peer = dp_peer_find_hash_find(pdev->soc, peer_mac_addr, 0, DP_VDEV_ALL);
1615 
1616 	if (!peer)
1617 		return NULL;
1618 
1619 	/* Multiple peer ids? How can know peer id? */
1620 	*local_id = peer->local_id;
1621 	dp_verbose_debug("peer %pK id %d", peer, *local_id);
1622 
1623 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
1624 	 * Decrement it here.
1625 	 */
1626 	dp_peer_unref_delete(peer);
1627 
1628 	return peer;
1629 }
1630 
1631 /*
1632  * dp_rx_tid_update_wifi3() – Update receive TID state
1633  * @peer: Datapath peer handle
1634  * @tid: TID
1635  * @ba_window_size: BlockAck window size
1636  * @start_seq: Starting sequence number
1637  *
1638  * Return: 0 on success, error code on failure
1639  */
1640 static int dp_rx_tid_update_wifi3(struct dp_peer *peer, int tid, uint32_t
1641 				  ba_window_size, uint32_t start_seq)
1642 {
1643 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1644 	struct dp_soc *soc = peer->vdev->pdev->soc;
1645 	struct hal_reo_cmd_params params;
1646 
1647 	qdf_mem_zero(&params, sizeof(params));
1648 
1649 	params.std.need_status = 1;
1650 	params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
1651 	params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1652 	params.u.upd_queue_params.update_ba_window_size = 1;
1653 	params.u.upd_queue_params.ba_window_size = ba_window_size;
1654 
1655 	if (start_seq < IEEE80211_SEQ_MAX) {
1656 		params.u.upd_queue_params.update_ssn = 1;
1657 		params.u.upd_queue_params.ssn = start_seq;
1658 	}
1659 
1660 	dp_set_ssn_valid_flag(&params, 0);
1661 	dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
1662 			dp_rx_tid_update_cb, rx_tid);
1663 
1664 	rx_tid->ba_win_size = ba_window_size;
1665 
1666 	if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
1667 		soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup(
1668 			peer->vdev->pdev->ctrl_pdev,
1669 			peer->vdev->vdev_id, peer->mac_addr.raw,
1670 			rx_tid->hw_qdesc_paddr, tid, tid, 1, ba_window_size);
1671 	}
1672 
1673 	return 0;
1674 }
1675 
1676 /*
1677  * dp_reo_desc_free() - Callback free reo descriptor memory after
1678  * HW cache flush
1679  *
1680  * @soc: DP SOC handle
1681  * @cb_ctxt: Callback context
1682  * @reo_status: REO command status
1683  */
1684 static void dp_reo_desc_free(struct dp_soc *soc, void *cb_ctxt,
1685 	union hal_reo_status *reo_status)
1686 {
1687 	struct reo_desc_list_node *freedesc =
1688 		(struct reo_desc_list_node *)cb_ctxt;
1689 	struct dp_rx_tid *rx_tid = &freedesc->rx_tid;
1690 
1691 	if ((reo_status->fl_cache_status.header.status !=
1692 		HAL_REO_CMD_SUCCESS) &&
1693 		(reo_status->fl_cache_status.header.status !=
1694 		HAL_REO_CMD_DRAIN)) {
1695 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1696 			  "%s: Rx tid HW desc flush failed(%d): tid %d",
1697 			  __func__,
1698 			  reo_status->rx_queue_status.header.status,
1699 			  freedesc->rx_tid.tid);
1700 	}
1701 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1702 		  "%s: hw_qdesc_paddr: %pK, tid:%d", __func__,
1703 		  (void *)(rx_tid->hw_qdesc_paddr), rx_tid->tid);
1704 	qdf_mem_unmap_nbytes_single(soc->osdev,
1705 		rx_tid->hw_qdesc_paddr,
1706 		QDF_DMA_BIDIRECTIONAL,
1707 		rx_tid->hw_qdesc_alloc_size);
1708 	qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
1709 	qdf_mem_free(freedesc);
1710 }
1711 
1712 #if defined(QCA_WIFI_QCA8074_VP) && defined(BUILD_X86)
1713 /* Hawkeye emulation requires bus address to be >= 0x50000000 */
1714 static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
1715 {
1716 	if (dma_addr < 0x50000000)
1717 		return QDF_STATUS_E_FAILURE;
1718 	else
1719 		return QDF_STATUS_SUCCESS;
1720 }
1721 #else
1722 static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
1723 {
1724 	return QDF_STATUS_SUCCESS;
1725 }
1726 #endif
1727 
1728 
1729 /*
1730  * dp_rx_tid_setup_wifi3() – Setup receive TID state
1731  * @peer: Datapath peer handle
1732  * @tid: TID
1733  * @ba_window_size: BlockAck window size
1734  * @start_seq: Starting sequence number
1735  *
1736  * Return: 0 on success, error code on failure
1737  */
1738 int dp_rx_tid_setup_wifi3(struct dp_peer *peer, int tid,
1739 	uint32_t ba_window_size, uint32_t start_seq)
1740 {
1741 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1742 	struct dp_vdev *vdev = peer->vdev;
1743 	struct dp_soc *soc = vdev->pdev->soc;
1744 	uint32_t hw_qdesc_size;
1745 	uint32_t hw_qdesc_align;
1746 	int hal_pn_type;
1747 	void *hw_qdesc_vaddr;
1748 	uint32_t alloc_tries = 0;
1749 	int err = QDF_STATUS_SUCCESS;
1750 
1751 	if (peer->delete_in_progress ||
1752 	    !qdf_atomic_read(&peer->is_default_route_set))
1753 		return QDF_STATUS_E_FAILURE;
1754 
1755 	rx_tid->ba_win_size = ba_window_size;
1756 	if (rx_tid->hw_qdesc_vaddr_unaligned)
1757 		return dp_rx_tid_update_wifi3(peer, tid, ba_window_size,
1758 			start_seq);
1759 	rx_tid->delba_tx_status = 0;
1760 	rx_tid->ppdu_id_2k = 0;
1761 	rx_tid->num_of_addba_req = 0;
1762 	rx_tid->num_of_delba_req = 0;
1763 	rx_tid->num_of_addba_resp = 0;
1764 	rx_tid->num_addba_rsp_failed = 0;
1765 	rx_tid->num_addba_rsp_success = 0;
1766 	rx_tid->delba_tx_success_cnt = 0;
1767 	rx_tid->delba_tx_fail_cnt = 0;
1768 	rx_tid->statuscode = 0;
1769 
1770 	/* TODO: Allocating HW queue descriptors based on max BA window size
1771 	 * for all QOS TIDs so that same descriptor can be used later when
1772 	 * ADDBA request is recevied. This should be changed to allocate HW
1773 	 * queue descriptors based on BA window size being negotiated (0 for
1774 	 * non BA cases), and reallocate when BA window size changes and also
1775 	 * send WMI message to FW to change the REO queue descriptor in Rx
1776 	 * peer entry as part of dp_rx_tid_update.
1777 	 */
1778 	if (tid != DP_NON_QOS_TID)
1779 		hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc,
1780 			HAL_RX_MAX_BA_WINDOW, tid);
1781 	else
1782 		hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc,
1783 			ba_window_size, tid);
1784 
1785 	hw_qdesc_align = hal_get_reo_qdesc_align(soc->hal_soc);
1786 	/* To avoid unnecessary extra allocation for alignment, try allocating
1787 	 * exact size and see if we already have aligned address.
1788 	 */
1789 	rx_tid->hw_qdesc_alloc_size = hw_qdesc_size;
1790 
1791 try_desc_alloc:
1792 	rx_tid->hw_qdesc_vaddr_unaligned =
1793 		qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size);
1794 
1795 	if (!rx_tid->hw_qdesc_vaddr_unaligned) {
1796 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1797 			  "%s: Rx tid HW desc alloc failed: tid %d",
1798 			  __func__, tid);
1799 		return QDF_STATUS_E_NOMEM;
1800 	}
1801 
1802 	if ((unsigned long)(rx_tid->hw_qdesc_vaddr_unaligned) %
1803 		hw_qdesc_align) {
1804 		/* Address allocated above is not alinged. Allocate extra
1805 		 * memory for alignment
1806 		 */
1807 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
1808 		rx_tid->hw_qdesc_vaddr_unaligned =
1809 			qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size +
1810 					hw_qdesc_align - 1);
1811 
1812 		if (!rx_tid->hw_qdesc_vaddr_unaligned) {
1813 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1814 				  "%s: Rx tid HW desc alloc failed: tid %d",
1815 				  __func__, tid);
1816 			return QDF_STATUS_E_NOMEM;
1817 		}
1818 
1819 		hw_qdesc_vaddr = (void *)qdf_align((unsigned long)
1820 			rx_tid->hw_qdesc_vaddr_unaligned,
1821 			hw_qdesc_align);
1822 
1823 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1824 			  "%s: Total Size %d Aligned Addr %pK",
1825 			  __func__, rx_tid->hw_qdesc_alloc_size,
1826 			  hw_qdesc_vaddr);
1827 
1828 	} else {
1829 		hw_qdesc_vaddr = rx_tid->hw_qdesc_vaddr_unaligned;
1830 	}
1831 
1832 	/* TODO: Ensure that sec_type is set before ADDBA is received.
1833 	 * Currently this is set based on htt indication
1834 	 * HTT_T2H_MSG_TYPE_SEC_IND from target
1835 	 */
1836 	switch (peer->security[dp_sec_ucast].sec_type) {
1837 	case cdp_sec_type_tkip_nomic:
1838 	case cdp_sec_type_aes_ccmp:
1839 	case cdp_sec_type_aes_ccmp_256:
1840 	case cdp_sec_type_aes_gcmp:
1841 	case cdp_sec_type_aes_gcmp_256:
1842 		hal_pn_type = HAL_PN_WPA;
1843 		break;
1844 	case cdp_sec_type_wapi:
1845 		if (vdev->opmode == wlan_op_mode_ap)
1846 			hal_pn_type = HAL_PN_WAPI_EVEN;
1847 		else
1848 			hal_pn_type = HAL_PN_WAPI_UNEVEN;
1849 		break;
1850 	default:
1851 		hal_pn_type = HAL_PN_NONE;
1852 		break;
1853 	}
1854 
1855 	hal_reo_qdesc_setup(soc->hal_soc, tid, ba_window_size, start_seq,
1856 		hw_qdesc_vaddr, rx_tid->hw_qdesc_paddr, hal_pn_type);
1857 
1858 	qdf_mem_map_nbytes_single(soc->osdev, hw_qdesc_vaddr,
1859 		QDF_DMA_BIDIRECTIONAL, rx_tid->hw_qdesc_alloc_size,
1860 		&(rx_tid->hw_qdesc_paddr));
1861 
1862 	if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) !=
1863 			QDF_STATUS_SUCCESS) {
1864 		if (alloc_tries++ < 10) {
1865 			qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
1866 			rx_tid->hw_qdesc_vaddr_unaligned = NULL;
1867 			goto try_desc_alloc;
1868 		} else {
1869 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1870 				  "%s: Rx tid HW desc alloc failed (lowmem): tid %d",
1871 				  __func__, tid);
1872 			err = QDF_STATUS_E_NOMEM;
1873 			goto error;
1874 		}
1875 	}
1876 
1877 	if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
1878 		if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup(
1879 		    vdev->pdev->ctrl_pdev, peer->vdev->vdev_id,
1880 		    peer->mac_addr.raw, rx_tid->hw_qdesc_paddr, tid, tid,
1881 		    1, ba_window_size)) {
1882 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1883 				  "%s: Failed to send reo queue setup to FW - tid %d\n",
1884 				  __func__, tid);
1885 			err = QDF_STATUS_E_FAILURE;
1886 			goto error;
1887 		}
1888 	}
1889 	return 0;
1890 error:
1891 	if (rx_tid->hw_qdesc_vaddr_unaligned) {
1892 		if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) ==
1893 		    QDF_STATUS_SUCCESS)
1894 			qdf_mem_unmap_nbytes_single(
1895 				soc->osdev,
1896 				rx_tid->hw_qdesc_paddr,
1897 				QDF_DMA_BIDIRECTIONAL,
1898 				rx_tid->hw_qdesc_alloc_size);
1899 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
1900 		rx_tid->hw_qdesc_vaddr_unaligned = NULL;
1901 	}
1902 	return err;
1903 }
1904 
1905 /*
1906  * dp_rx_tid_delete_cb() - Callback to flush reo descriptor HW cache
1907  * after deleting the entries (ie., setting valid=0)
1908  *
1909  * @soc: DP SOC handle
1910  * @cb_ctxt: Callback context
1911  * @reo_status: REO command status
1912  */
1913 static void dp_rx_tid_delete_cb(struct dp_soc *soc, void *cb_ctxt,
1914 	union hal_reo_status *reo_status)
1915 {
1916 	struct reo_desc_list_node *freedesc =
1917 		(struct reo_desc_list_node *)cb_ctxt;
1918 	uint32_t list_size;
1919 	struct reo_desc_list_node *desc;
1920 	unsigned long curr_ts = qdf_get_system_timestamp();
1921 	uint32_t desc_size, tot_desc_size;
1922 	struct hal_reo_cmd_params params;
1923 
1924 	if (reo_status->rx_queue_status.header.status == HAL_REO_CMD_DRAIN) {
1925 		qdf_mem_zero(reo_status, sizeof(*reo_status));
1926 		reo_status->fl_cache_status.header.status = HAL_REO_CMD_DRAIN;
1927 		dp_reo_desc_free(soc, (void *)freedesc, reo_status);
1928 		return;
1929 	} else if (reo_status->rx_queue_status.header.status !=
1930 		HAL_REO_CMD_SUCCESS) {
1931 		/* Should not happen normally. Just print error for now */
1932 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1933 			  "%s: Rx tid HW desc deletion failed(%d): tid %d",
1934 			  __func__,
1935 			  reo_status->rx_queue_status.header.status,
1936 			  freedesc->rx_tid.tid);
1937 	}
1938 
1939 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
1940 		"%s: rx_tid: %d status: %d", __func__,
1941 		freedesc->rx_tid.tid,
1942 		reo_status->rx_queue_status.header.status);
1943 
1944 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
1945 	freedesc->free_ts = curr_ts;
1946 	qdf_list_insert_back_size(&soc->reo_desc_freelist,
1947 		(qdf_list_node_t *)freedesc, &list_size);
1948 
1949 	while ((qdf_list_peek_front(&soc->reo_desc_freelist,
1950 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) &&
1951 		((list_size >= REO_DESC_FREELIST_SIZE) ||
1952 		((curr_ts - desc->free_ts) > REO_DESC_FREE_DEFER_MS))) {
1953 		struct dp_rx_tid *rx_tid;
1954 
1955 		qdf_list_remove_front(&soc->reo_desc_freelist,
1956 				(qdf_list_node_t **)&desc);
1957 		list_size--;
1958 		rx_tid = &desc->rx_tid;
1959 
1960 		/* Flush and invalidate REO descriptor from HW cache: Base and
1961 		 * extension descriptors should be flushed separately */
1962 		tot_desc_size = rx_tid->hw_qdesc_alloc_size;
1963 		/* Get base descriptor size by passing non-qos TID */
1964 		desc_size = hal_get_reo_qdesc_size(soc->hal_soc, 0,
1965 						   DP_NON_QOS_TID);
1966 
1967 		/* Flush reo extension descriptors */
1968 		while ((tot_desc_size -= desc_size) > 0) {
1969 			qdf_mem_zero(&params, sizeof(params));
1970 			params.std.addr_lo =
1971 				((uint64_t)(rx_tid->hw_qdesc_paddr) +
1972 				tot_desc_size) & 0xffffffff;
1973 			params.std.addr_hi =
1974 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1975 
1976 			if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc,
1977 							CMD_FLUSH_CACHE,
1978 							&params,
1979 							NULL,
1980 							NULL)) {
1981 				QDF_TRACE(QDF_MODULE_ID_DP,
1982 					QDF_TRACE_LEVEL_ERROR,
1983 					"%s: fail to send CMD_CACHE_FLUSH:"
1984 					"tid %d desc %pK", __func__,
1985 					rx_tid->tid,
1986 					(void *)(rx_tid->hw_qdesc_paddr));
1987 			}
1988 		}
1989 
1990 		/* Flush base descriptor */
1991 		qdf_mem_zero(&params, sizeof(params));
1992 		params.std.need_status = 1;
1993 		params.std.addr_lo =
1994 			(uint64_t)(rx_tid->hw_qdesc_paddr) & 0xffffffff;
1995 		params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1996 
1997 		if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc,
1998 							  CMD_FLUSH_CACHE,
1999 							  &params,
2000 							  dp_reo_desc_free,
2001 							  (void *)desc)) {
2002 			union hal_reo_status reo_status;
2003 			/*
2004 			 * If dp_reo_send_cmd return failure, related TID queue desc
2005 			 * should be unmapped. Also locally reo_desc, together with
2006 			 * TID queue desc also need to be freed accordingly.
2007 			 *
2008 			 * Here invoke desc_free function directly to do clean up.
2009 			 */
2010 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2011 				  "%s: fail to send REO cmd to flush cache: tid %d",
2012 				  __func__, rx_tid->tid);
2013 			qdf_mem_zero(&reo_status, sizeof(reo_status));
2014 			reo_status.fl_cache_status.header.status = 0;
2015 			dp_reo_desc_free(soc, (void *)desc, &reo_status);
2016 		}
2017 	}
2018 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
2019 }
2020 
2021 /*
2022  * dp_rx_tid_delete_wifi3() – Delete receive TID queue
2023  * @peer: Datapath peer handle
2024  * @tid: TID
2025  *
2026  * Return: 0 on success, error code on failure
2027  */
2028 static int dp_rx_tid_delete_wifi3(struct dp_peer *peer, int tid)
2029 {
2030 	struct dp_rx_tid *rx_tid = &(peer->rx_tid[tid]);
2031 	struct dp_soc *soc = peer->vdev->pdev->soc;
2032 	struct hal_reo_cmd_params params;
2033 	struct reo_desc_list_node *freedesc =
2034 		qdf_mem_malloc(sizeof(*freedesc));
2035 
2036 	if (!freedesc) {
2037 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2038 			  "%s: malloc failed for freedesc: tid %d",
2039 			  __func__, tid);
2040 		return -ENOMEM;
2041 	}
2042 
2043 	freedesc->rx_tid = *rx_tid;
2044 
2045 	qdf_mem_zero(&params, sizeof(params));
2046 
2047 	params.std.need_status = 1;
2048 	params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
2049 	params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2050 	params.u.upd_queue_params.update_vld = 1;
2051 	params.u.upd_queue_params.vld = 0;
2052 
2053 	dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
2054 		dp_rx_tid_delete_cb, (void *)freedesc);
2055 
2056 	rx_tid->hw_qdesc_vaddr_unaligned = NULL;
2057 	rx_tid->hw_qdesc_alloc_size = 0;
2058 	rx_tid->hw_qdesc_paddr = 0;
2059 
2060 	return 0;
2061 }
2062 
2063 #ifdef DP_LFR
2064 static void dp_peer_setup_remaining_tids(struct dp_peer *peer)
2065 {
2066 	int tid;
2067 
2068 	for (tid = 1; tid < DP_MAX_TIDS-1; tid++) {
2069 		dp_rx_tid_setup_wifi3(peer, tid, 1, 0);
2070 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2071 			  "Setting up TID %d for peer %pK peer->local_id %d",
2072 			  tid, peer, peer->local_id);
2073 	}
2074 }
2075 #else
2076 static void dp_peer_setup_remaining_tids(struct dp_peer *peer) {};
2077 #endif
2078 
2079 #ifndef WLAN_TX_PKT_CAPTURE_ENH
2080 /*
2081  * dp_peer_tid_queue_init() – Initialize ppdu stats queue per TID
2082  * @peer: Datapath peer
2083  *
2084  */
2085 static inline void dp_peer_tid_queue_init(struct dp_peer *peer)
2086 {
2087 }
2088 
2089 /*
2090  * dp_peer_tid_queue_cleanup() – remove ppdu stats queue per TID
2091  * @peer: Datapath peer
2092  *
2093  */
2094 static inline void dp_peer_tid_queue_cleanup(struct dp_peer *peer)
2095 {
2096 }
2097 
2098 /*
2099  * dp_peer_update_80211_hdr() – dp peer update 80211 hdr
2100  * @vdev: Datapath vdev
2101  * @peer: Datapath peer
2102  *
2103  */
2104 static inline void
2105 dp_peer_update_80211_hdr(struct dp_vdev *vdev, struct dp_peer *peer)
2106 {
2107 }
2108 #endif
2109 
2110 /*
2111  * dp_peer_tx_init() – Initialize receive TID state
2112  * @pdev: Datapath pdev
2113  * @peer: Datapath peer
2114  *
2115  */
2116 void dp_peer_tx_init(struct dp_pdev *pdev, struct dp_peer *peer)
2117 {
2118 	dp_peer_tid_queue_init(peer);
2119 	dp_peer_update_80211_hdr(peer->vdev, peer);
2120 }
2121 
2122 /*
2123  * dp_peer_tx_cleanup() – Deinitialize receive TID state
2124  * @vdev: Datapath vdev
2125  * @peer: Datapath peer
2126  *
2127  */
2128 static inline void
2129 dp_peer_tx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
2130 {
2131 	dp_peer_tid_queue_cleanup(peer);
2132 }
2133 
2134 /*
2135  * dp_peer_rx_init() – Initialize receive TID state
2136  * @pdev: Datapath pdev
2137  * @peer: Datapath peer
2138  *
2139  */
2140 void dp_peer_rx_init(struct dp_pdev *pdev, struct dp_peer *peer)
2141 {
2142 	int tid;
2143 	struct dp_rx_tid *rx_tid;
2144 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
2145 		rx_tid = &peer->rx_tid[tid];
2146 		rx_tid->array = &rx_tid->base;
2147 		rx_tid->base.head = rx_tid->base.tail = NULL;
2148 		rx_tid->tid = tid;
2149 		rx_tid->defrag_timeout_ms = 0;
2150 		rx_tid->ba_win_size = 0;
2151 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
2152 
2153 		rx_tid->defrag_waitlist_elem.tqe_next = NULL;
2154 		rx_tid->defrag_waitlist_elem.tqe_prev = NULL;
2155 	}
2156 
2157 	peer->active_ba_session_cnt = 0;
2158 	peer->hw_buffer_size = 0;
2159 	peer->kill_256_sessions = 0;
2160 
2161 	/* Setup default (non-qos) rx tid queue */
2162 	dp_rx_tid_setup_wifi3(peer, DP_NON_QOS_TID, 1, 0);
2163 
2164 	/* Setup rx tid queue for TID 0.
2165 	 * Other queues will be setup on receiving first packet, which will cause
2166 	 * NULL REO queue error
2167 	 */
2168 	dp_rx_tid_setup_wifi3(peer, 0, 1, 0);
2169 
2170 	/*
2171 	 * Setup the rest of TID's to handle LFR
2172 	 */
2173 	dp_peer_setup_remaining_tids(peer);
2174 
2175 	/*
2176 	 * Set security defaults: no PN check, no security. The target may
2177 	 * send a HTT SEC_IND message to overwrite these defaults.
2178 	 */
2179 	peer->security[dp_sec_ucast].sec_type =
2180 		peer->security[dp_sec_mcast].sec_type = cdp_sec_type_none;
2181 }
2182 
2183 /*
2184  * dp_peer_rx_cleanup() – Cleanup receive TID state
2185  * @vdev: Datapath vdev
2186  * @peer: Datapath peer
2187  *
2188  */
2189 void dp_peer_rx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
2190 {
2191 	int tid;
2192 	uint32_t tid_delete_mask = 0;
2193 
2194 	DP_TRACE(INFO_HIGH, FL("Remove tids for peer: %pK"), peer);
2195 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
2196 		struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
2197 
2198 		qdf_spin_lock_bh(&rx_tid->tid_lock);
2199 		if (!peer->bss_peer && peer->vdev->opmode != wlan_op_mode_sta) {
2200 			/* Cleanup defrag related resource */
2201 			dp_rx_defrag_waitlist_remove(peer, tid);
2202 			dp_rx_reorder_flush_frag(peer, tid);
2203 		}
2204 
2205 		if (peer->rx_tid[tid].hw_qdesc_vaddr_unaligned) {
2206 			dp_rx_tid_delete_wifi3(peer, tid);
2207 
2208 			tid_delete_mask |= (1 << tid);
2209 		}
2210 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2211 	}
2212 #ifdef notyet /* See if FW can remove queues as part of peer cleanup */
2213 	if (soc->ol_ops->peer_rx_reorder_queue_remove) {
2214 		soc->ol_ops->peer_rx_reorder_queue_remove(vdev->pdev->ctrl_pdev,
2215 			peer->vdev->vdev_id, peer->mac_addr.raw,
2216 			tid_delete_mask);
2217 	}
2218 #endif
2219 	for (tid = 0; tid < DP_MAX_TIDS; tid++)
2220 		qdf_spinlock_destroy(&peer->rx_tid[tid].tid_lock);
2221 }
2222 
2223 /*
2224  * dp_peer_cleanup() – Cleanup peer information
2225  * @vdev: Datapath vdev
2226  * @peer: Datapath peer
2227  *
2228  */
2229 void dp_peer_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
2230 {
2231 	peer->last_assoc_rcvd = 0;
2232 	peer->last_disassoc_rcvd = 0;
2233 	peer->last_deauth_rcvd = 0;
2234 
2235 	dp_peer_tx_cleanup(vdev, peer);
2236 
2237 	/* cleanup the Rx reorder queues for this peer */
2238 	dp_peer_rx_cleanup(vdev, peer);
2239 }
2240 
2241 /* dp_teardown_256_ba_session() - Teardown sessions using 256
2242  *                                window size when a request with
2243  *                                64 window size is received.
2244  *                                This is done as a WAR since HW can
2245  *                                have only one setting per peer (64 or 256).
2246  *                                For HKv2, we use per tid buffersize setting
2247  *                                for 0 to per_tid_basize_max_tid. For tid
2248  *                                more than per_tid_basize_max_tid we use HKv1
2249  *                                method.
2250  * @peer: Datapath peer
2251  *
2252  * Return: void
2253  */
2254 static void dp_teardown_256_ba_sessions(struct dp_peer *peer)
2255 {
2256 	uint8_t delba_rcode = 0;
2257 	int tid;
2258 	struct dp_rx_tid *rx_tid = NULL;
2259 
2260 	tid = peer->vdev->pdev->soc->per_tid_basize_max_tid;
2261 	for (; tid < DP_MAX_TIDS; tid++) {
2262 		rx_tid = &peer->rx_tid[tid];
2263 		qdf_spin_lock_bh(&rx_tid->tid_lock);
2264 
2265 		if (rx_tid->ba_win_size <= 64) {
2266 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
2267 			continue;
2268 		} else {
2269 			if (rx_tid->ba_status == DP_RX_BA_ACTIVE ||
2270 			    rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
2271 				/* send delba */
2272 				if (!rx_tid->delba_tx_status) {
2273 					rx_tid->delba_tx_retry++;
2274 					rx_tid->delba_tx_status = 1;
2275 					rx_tid->delba_rcode =
2276 					IEEE80211_REASON_QOS_SETUP_REQUIRED;
2277 					delba_rcode = rx_tid->delba_rcode;
2278 
2279 					qdf_spin_unlock_bh(&rx_tid->tid_lock);
2280 					peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba(
2281 							peer->vdev->pdev->ctrl_pdev,
2282 							peer->ctrl_peer,
2283 							peer->mac_addr.raw,
2284 							tid, peer->vdev->ctrl_vdev,
2285 							delba_rcode);
2286 				} else {
2287 					qdf_spin_unlock_bh(&rx_tid->tid_lock);
2288 				}
2289 			} else {
2290 				qdf_spin_unlock_bh(&rx_tid->tid_lock);
2291 			}
2292 		}
2293 	}
2294 }
2295 
2296 /*
2297 * dp_rx_addba_resp_tx_completion_wifi3() – Update Rx Tid State
2298 *
2299 * @peer: Datapath peer handle
2300 * @tid: TID number
2301 * @status: tx completion status
2302 * Return: 0 on success, error code on failure
2303 */
2304 int dp_addba_resp_tx_completion_wifi3(void *peer_handle,
2305 				      uint8_t tid, int status)
2306 {
2307 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
2308 	struct dp_rx_tid *rx_tid = NULL;
2309 
2310 	if (!peer || peer->delete_in_progress) {
2311 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2312 			  "%s: Peer is NULL!\n", __func__);
2313 		return QDF_STATUS_E_FAILURE;
2314 	}
2315 	rx_tid = &peer->rx_tid[tid];
2316 	qdf_spin_lock_bh(&rx_tid->tid_lock);
2317 	if (status) {
2318 		rx_tid->num_addba_rsp_failed++;
2319 		dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX);
2320 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
2321 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2322 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2323 			  "%s: Rx Tid- %d addba rsp tx completion failed!",
2324 			 __func__, tid);
2325 		return QDF_STATUS_SUCCESS;
2326 	}
2327 
2328 	rx_tid->num_addba_rsp_success++;
2329 	if (rx_tid->ba_status == DP_RX_BA_INACTIVE) {
2330 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2331 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2332 			  "%s: Rx Tid- %d hw qdesc is not in IN_PROGRESS",
2333 			__func__, tid);
2334 		return QDF_STATUS_E_FAILURE;
2335 	}
2336 
2337 	if (!qdf_atomic_read(&peer->is_default_route_set)) {
2338 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2339 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2340 			  "%s: default route is not set for peer: %pM",
2341 			  __func__, peer->mac_addr.raw);
2342 		return QDF_STATUS_E_FAILURE;
2343 	}
2344 
2345 	/* First Session */
2346 	if (peer->active_ba_session_cnt == 0) {
2347 		if (rx_tid->ba_win_size > 64 && rx_tid->ba_win_size <= 256)
2348 			peer->hw_buffer_size = 256;
2349 		else
2350 			peer->hw_buffer_size = 64;
2351 	}
2352 
2353 	rx_tid->ba_status = DP_RX_BA_ACTIVE;
2354 
2355 	peer->active_ba_session_cnt++;
2356 
2357 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
2358 
2359 	/* Kill any session having 256 buffer size
2360 	 * when 64 buffer size request is received.
2361 	 * Also, latch on to 64 as new buffer size.
2362 	 */
2363 	if (peer->kill_256_sessions) {
2364 		dp_teardown_256_ba_sessions(peer);
2365 		peer->kill_256_sessions = 0;
2366 	}
2367 	return QDF_STATUS_SUCCESS;
2368 }
2369 
2370 /*
2371 * dp_rx_addba_responsesetup_wifi3() – Process ADDBA request from peer
2372 *
2373 * @peer: Datapath peer handle
2374 * @tid: TID number
2375 * @dialogtoken: output dialogtoken
2376 * @statuscode: output dialogtoken
2377 * @buffersize: Output BA window size
2378 * @batimeout: Output BA timeout
2379 */
2380 void dp_addba_responsesetup_wifi3(void *peer_handle, uint8_t tid,
2381 	uint8_t *dialogtoken, uint16_t *statuscode,
2382 	uint16_t *buffersize, uint16_t *batimeout)
2383 {
2384 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
2385 	struct dp_rx_tid *rx_tid = NULL;
2386 
2387 	if (!peer || peer->delete_in_progress) {
2388 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2389 			  "%s: Peer is NULL!\n", __func__);
2390 		return;
2391 	}
2392 	rx_tid = &peer->rx_tid[tid];
2393 	qdf_spin_lock_bh(&rx_tid->tid_lock);
2394 	rx_tid->num_of_addba_resp++;
2395 	/* setup ADDBA response parameters */
2396 	*dialogtoken = rx_tid->dialogtoken;
2397 	*statuscode = rx_tid->statuscode;
2398 	*buffersize = rx_tid->ba_win_size;
2399 	*batimeout  = 0;
2400 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
2401 }
2402 
2403 /* dp_check_ba_buffersize() - Check buffer size in request
2404  *                            and latch onto this size based on
2405  *                            size used in first active session.
2406  * @peer: Datapath peer
2407  * @tid: Tid
2408  * @buffersize: Block ack window size
2409  *
2410  * Return: void
2411  */
2412 static void dp_check_ba_buffersize(struct dp_peer *peer,
2413 				   uint16_t tid,
2414 				   uint16_t buffersize)
2415 {
2416 	struct dp_rx_tid *rx_tid = NULL;
2417 
2418 	rx_tid = &peer->rx_tid[tid];
2419 	if (peer->vdev->pdev->soc->per_tid_basize_max_tid &&
2420 	    tid < peer->vdev->pdev->soc->per_tid_basize_max_tid) {
2421 		rx_tid->ba_win_size = buffersize;
2422 		return;
2423 	} else {
2424 		if (peer->active_ba_session_cnt == 0) {
2425 			rx_tid->ba_win_size = buffersize;
2426 		} else {
2427 			if (peer->hw_buffer_size == 64) {
2428 				if (buffersize <= 64)
2429 					rx_tid->ba_win_size = buffersize;
2430 				else
2431 					rx_tid->ba_win_size = peer->hw_buffer_size;
2432 			} else if (peer->hw_buffer_size == 256) {
2433 				if (buffersize > 64) {
2434 					rx_tid->ba_win_size = buffersize;
2435 				} else {
2436 					rx_tid->ba_win_size = buffersize;
2437 					peer->hw_buffer_size = 64;
2438 					peer->kill_256_sessions = 1;
2439 				}
2440 			}
2441 		}
2442 	}
2443 }
2444 
2445 /*
2446  * dp_addba_requestprocess_wifi3() - Process ADDBA request from peer
2447  *
2448  * @peer: Datapath peer handle
2449  * @dialogtoken: dialogtoken from ADDBA frame
2450  * @tid: TID number
2451  * @batimeout: BA timeout
2452  * @buffersize: BA window size
2453  * @startseqnum: Start seq. number received in BA sequence control
2454  *
2455  * Return: 0 on success, error code on failure
2456  */
2457 int dp_addba_requestprocess_wifi3(void *peer_handle,
2458 				  uint8_t dialogtoken,
2459 				  uint16_t tid, uint16_t batimeout,
2460 				  uint16_t buffersize,
2461 				  uint16_t startseqnum)
2462 {
2463 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
2464 	struct dp_rx_tid *rx_tid = NULL;
2465 
2466 	if (!peer || peer->delete_in_progress) {
2467 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2468 			  "%s: Peer is NULL!\n", __func__);
2469 		return QDF_STATUS_E_FAILURE;
2470 	}
2471 	rx_tid = &peer->rx_tid[tid];
2472 	qdf_spin_lock_bh(&rx_tid->tid_lock);
2473 	rx_tid->num_of_addba_req++;
2474 	if ((rx_tid->ba_status == DP_RX_BA_ACTIVE &&
2475 	     rx_tid->hw_qdesc_vaddr_unaligned)) {
2476 		dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX);
2477 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
2478 		peer->active_ba_session_cnt--;
2479 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2480 			  "%s: Addba recvd for Rx Tid-%d hw qdesc is already setup",
2481 			  __func__, tid);
2482 	}
2483 
2484 	if (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
2485 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2486 		return QDF_STATUS_E_FAILURE;
2487 	}
2488 	dp_check_ba_buffersize(peer, tid, buffersize);
2489 
2490 	if (dp_rx_tid_setup_wifi3(peer, tid,
2491 	    rx_tid->ba_win_size, startseqnum)) {
2492 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
2493 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2494 		return QDF_STATUS_E_FAILURE;
2495 	}
2496 	rx_tid->ba_status = DP_RX_BA_IN_PROGRESS;
2497 
2498 	rx_tid->dialogtoken = dialogtoken;
2499 	rx_tid->startseqnum = startseqnum;
2500 
2501 	if (rx_tid->userstatuscode != IEEE80211_STATUS_SUCCESS)
2502 		rx_tid->statuscode = rx_tid->userstatuscode;
2503 	else
2504 		rx_tid->statuscode = IEEE80211_STATUS_SUCCESS;
2505 
2506 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
2507 
2508 	return QDF_STATUS_SUCCESS;
2509 }
2510 
2511 /*
2512 * dp_set_addba_response() – Set a user defined ADDBA response status code
2513 *
2514 * @peer: Datapath peer handle
2515 * @tid: TID number
2516 * @statuscode: response status code to be set
2517 */
2518 void dp_set_addba_response(void *peer_handle, uint8_t tid,
2519 	uint16_t statuscode)
2520 {
2521 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
2522 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
2523 
2524 	qdf_spin_lock_bh(&rx_tid->tid_lock);
2525 	rx_tid->userstatuscode = statuscode;
2526 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
2527 }
2528 
2529 /*
2530 * dp_rx_delba_process_wifi3() – Process DELBA from peer
2531 * @peer: Datapath peer handle
2532 * @tid: TID number
2533 * @reasoncode: Reason code received in DELBA frame
2534 *
2535 * Return: 0 on success, error code on failure
2536 */
2537 int dp_delba_process_wifi3(void *peer_handle,
2538 	int tid, uint16_t reasoncode)
2539 {
2540 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
2541 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
2542 
2543 	qdf_spin_lock_bh(&rx_tid->tid_lock);
2544 	if (rx_tid->ba_status == DP_RX_BA_INACTIVE ||
2545 	    rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
2546 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2547 		return QDF_STATUS_E_FAILURE;
2548 	}
2549 	/* TODO: See if we can delete the existing REO queue descriptor and
2550 	 * replace with a new one without queue extenstion descript to save
2551 	 * memory
2552 	 */
2553 	rx_tid->delba_rcode = reasoncode;
2554 	rx_tid->num_of_delba_req++;
2555 	dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX);
2556 
2557 	rx_tid->ba_status = DP_RX_BA_INACTIVE;
2558 	peer->active_ba_session_cnt--;
2559 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
2560 	return 0;
2561 }
2562 
2563 /*
2564  * dp_rx_delba_tx_completion_wifi3() – Send Delba Request
2565  *
2566  * @peer: Datapath peer handle
2567  * @tid: TID number
2568  * @status: tx completion status
2569  * Return: 0 on success, error code on failure
2570  */
2571 
2572 int dp_delba_tx_completion_wifi3(void *peer_handle,
2573 				 uint8_t tid, int status)
2574 {
2575 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
2576 	struct dp_rx_tid *rx_tid = NULL;
2577 
2578 	if (!peer || peer->delete_in_progress) {
2579 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2580 			  "%s: Peer is NULL!", __func__);
2581 		return QDF_STATUS_E_FAILURE;
2582 	}
2583 	rx_tid = &peer->rx_tid[tid];
2584 	qdf_spin_lock_bh(&rx_tid->tid_lock);
2585 	if (status) {
2586 		rx_tid->delba_tx_fail_cnt++;
2587 		if (rx_tid->delba_tx_retry >= DP_MAX_DELBA_RETRY) {
2588 			rx_tid->delba_tx_retry = 0;
2589 			rx_tid->delba_tx_status = 0;
2590 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
2591 		} else {
2592 			rx_tid->delba_tx_retry++;
2593 			rx_tid->delba_tx_status = 1;
2594 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
2595 			peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba(
2596 				peer->vdev->pdev->ctrl_pdev, peer->ctrl_peer,
2597 				peer->mac_addr.raw, tid, peer->vdev->ctrl_vdev,
2598 				rx_tid->delba_rcode);
2599 		}
2600 		return QDF_STATUS_SUCCESS;
2601 	} else {
2602 		rx_tid->delba_tx_success_cnt++;
2603 		rx_tid->delba_tx_retry = 0;
2604 		rx_tid->delba_tx_status = 0;
2605 	}
2606 	if (rx_tid->ba_status == DP_RX_BA_ACTIVE) {
2607 		dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX);
2608 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
2609 		peer->active_ba_session_cnt--;
2610 	}
2611 	if (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
2612 		dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX);
2613 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
2614 	}
2615 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
2616 
2617 	return QDF_STATUS_SUCCESS;
2618 }
2619 
2620 void dp_rx_discard(struct dp_vdev *vdev, struct dp_peer *peer, unsigned tid,
2621 	qdf_nbuf_t msdu_list)
2622 {
2623 	while (msdu_list) {
2624 		qdf_nbuf_t msdu = msdu_list;
2625 
2626 		msdu_list = qdf_nbuf_next(msdu_list);
2627 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
2628 			  "discard rx %pK from partly-deleted peer %pK (%02x:%02x:%02x:%02x:%02x:%02x)",
2629 			  msdu, peer,
2630 			  peer->mac_addr.raw[0], peer->mac_addr.raw[1],
2631 			  peer->mac_addr.raw[2], peer->mac_addr.raw[3],
2632 			  peer->mac_addr.raw[4], peer->mac_addr.raw[5]);
2633 		qdf_nbuf_free(msdu);
2634 	}
2635 }
2636 
2637 
2638 /**
2639  * dp_set_pn_check_wifi3() - enable PN check in REO for security
2640  * @peer: Datapath peer handle
2641  * @vdev: Datapath vdev
2642  * @pdev - data path device instance
2643  * @sec_type - security type
2644  * @rx_pn - Receive pn starting number
2645  *
2646  */
2647 
2648 void
2649 dp_set_pn_check_wifi3(struct cdp_vdev *vdev_handle, struct cdp_peer *peer_handle, enum cdp_sec_type sec_type,  uint32_t *rx_pn)
2650 {
2651 	struct dp_peer *peer =  (struct dp_peer *)peer_handle;
2652 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
2653 	struct dp_pdev *pdev;
2654 	struct dp_soc *soc;
2655 	int i;
2656 	uint8_t pn_size;
2657 	struct hal_reo_cmd_params params;
2658 
2659 	/* preconditions */
2660 	qdf_assert(vdev);
2661 
2662 	pdev = vdev->pdev;
2663 	soc = pdev->soc;
2664 
2665 
2666 	qdf_mem_zero(&params, sizeof(params));
2667 
2668 	params.std.need_status = 1;
2669 	params.u.upd_queue_params.update_pn_valid = 1;
2670 	params.u.upd_queue_params.update_pn_size = 1;
2671 	params.u.upd_queue_params.update_pn = 1;
2672 	params.u.upd_queue_params.update_pn_check_needed = 1;
2673 	params.u.upd_queue_params.update_svld = 1;
2674 	params.u.upd_queue_params.svld = 0;
2675 
2676 	peer->security[dp_sec_ucast].sec_type = sec_type;
2677 
2678 	switch (sec_type) {
2679 	case cdp_sec_type_tkip_nomic:
2680 	case cdp_sec_type_aes_ccmp:
2681 	case cdp_sec_type_aes_ccmp_256:
2682 	case cdp_sec_type_aes_gcmp:
2683 	case cdp_sec_type_aes_gcmp_256:
2684 		params.u.upd_queue_params.pn_check_needed = 1;
2685 		params.u.upd_queue_params.pn_size = 48;
2686 		pn_size = 48;
2687 		break;
2688 	case cdp_sec_type_wapi:
2689 		params.u.upd_queue_params.pn_check_needed = 1;
2690 		params.u.upd_queue_params.pn_size = 128;
2691 		pn_size = 128;
2692 		if (vdev->opmode == wlan_op_mode_ap) {
2693 			params.u.upd_queue_params.pn_even = 1;
2694 			params.u.upd_queue_params.update_pn_even = 1;
2695 		} else {
2696 			params.u.upd_queue_params.pn_uneven = 1;
2697 			params.u.upd_queue_params.update_pn_uneven = 1;
2698 		}
2699 		break;
2700 	default:
2701 		params.u.upd_queue_params.pn_check_needed = 0;
2702 		pn_size = 0;
2703 		break;
2704 	}
2705 
2706 
2707 	for (i = 0; i < DP_MAX_TIDS; i++) {
2708 		struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
2709 		qdf_spin_lock_bh(&rx_tid->tid_lock);
2710 		if (rx_tid->hw_qdesc_vaddr_unaligned) {
2711 			params.std.addr_lo =
2712 				rx_tid->hw_qdesc_paddr & 0xffffffff;
2713 			params.std.addr_hi =
2714 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2715 
2716 			if (pn_size) {
2717 				QDF_TRACE(QDF_MODULE_ID_DP,
2718 					  QDF_TRACE_LEVEL_INFO_HIGH,
2719 					  "%s PN set for TID:%d pn:%x:%x:%x:%x",
2720 					  __func__, i, rx_pn[3], rx_pn[2],
2721 					  rx_pn[1], rx_pn[0]);
2722 				params.u.upd_queue_params.update_pn_valid = 1;
2723 				params.u.upd_queue_params.pn_31_0 = rx_pn[0];
2724 				params.u.upd_queue_params.pn_63_32 = rx_pn[1];
2725 				params.u.upd_queue_params.pn_95_64 = rx_pn[2];
2726 				params.u.upd_queue_params.pn_127_96 = rx_pn[3];
2727 			}
2728 			rx_tid->pn_size = pn_size;
2729 			dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
2730 				dp_rx_tid_update_cb, rx_tid);
2731 		} else {
2732 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
2733 				  "PN Check not setup for TID :%d ", i);
2734 		}
2735 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2736 	}
2737 }
2738 
2739 
2740 void
2741 dp_rx_sec_ind_handler(void *soc_handle, uint16_t peer_id,
2742 	enum cdp_sec_type sec_type, int is_unicast, u_int32_t *michael_key,
2743 	u_int32_t *rx_pn)
2744 {
2745 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
2746 	struct dp_peer *peer;
2747 	int sec_index;
2748 
2749 	peer = dp_peer_find_by_id(soc, peer_id);
2750 	if (!peer) {
2751 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2752 			  "Couldn't find peer from ID %d - skipping security inits",
2753 			  peer_id);
2754 		return;
2755 	}
2756 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
2757 		  "sec spec for peer %pK (%02x:%02x:%02x:%02x:%02x:%02x): %s key of type %d",
2758 		  peer,
2759 		  peer->mac_addr.raw[0], peer->mac_addr.raw[1],
2760 		  peer->mac_addr.raw[2], peer->mac_addr.raw[3],
2761 		  peer->mac_addr.raw[4], peer->mac_addr.raw[5],
2762 		  is_unicast ? "ucast" : "mcast",
2763 		  sec_type);
2764 	sec_index = is_unicast ? dp_sec_ucast : dp_sec_mcast;
2765 	peer->security[sec_index].sec_type = sec_type;
2766 #ifdef notyet /* TODO: See if this is required for defrag support */
2767 	/* michael key only valid for TKIP, but for simplicity,
2768 	 * copy it anyway
2769 	 */
2770 	qdf_mem_copy(
2771 		&peer->security[sec_index].michael_key[0],
2772 		michael_key,
2773 		sizeof(peer->security[sec_index].michael_key));
2774 #ifdef BIG_ENDIAN_HOST
2775 	OL_IF_SWAPBO(peer->security[sec_index].michael_key[0],
2776 				 sizeof(peer->security[sec_index].michael_key));
2777 #endif /* BIG_ENDIAN_HOST */
2778 #endif
2779 
2780 #ifdef notyet /* TODO: Check if this is required for wifi3.0 */
2781 	if (sec_type != cdp_sec_type_wapi) {
2782 		qdf_mem_zero(peer->tids_last_pn_valid, _EXT_TIDS);
2783 	} else {
2784 		for (i = 0; i < DP_MAX_TIDS; i++) {
2785 			/*
2786 			 * Setting PN valid bit for WAPI sec_type,
2787 			 * since WAPI PN has to be started with predefined value
2788 			 */
2789 			peer->tids_last_pn_valid[i] = 1;
2790 			qdf_mem_copy(
2791 				(u_int8_t *) &peer->tids_last_pn[i],
2792 				(u_int8_t *) rx_pn, sizeof(union htt_rx_pn_t));
2793 			peer->tids_last_pn[i].pn128[1] =
2794 				qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[1]);
2795 			peer->tids_last_pn[i].pn128[0] =
2796 				qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[0]);
2797 		}
2798 	}
2799 #endif
2800 	/* TODO: Update HW TID queue with PN check parameters (pn type for
2801 	 * all security types and last pn for WAPI) once REO command API
2802 	 * is available
2803 	 */
2804 
2805 	dp_peer_unref_del_find_by_id(peer);
2806 }
2807 
2808 #ifdef CONFIG_MCL
2809 /**
2810  * dp_register_peer() - Register peer into physical device
2811  * @pdev - data path device instance
2812  * @sta_desc - peer description
2813  *
2814  * Register peer into physical device
2815  *
2816  * Return: QDF_STATUS_SUCCESS registration success
2817  *         QDF_STATUS_E_FAULT peer not found
2818  */
2819 QDF_STATUS dp_register_peer(struct cdp_pdev *pdev_handle,
2820 		struct ol_txrx_desc_type *sta_desc)
2821 {
2822 	struct dp_peer *peer;
2823 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
2824 
2825 	peer = dp_peer_find_by_local_id((struct cdp_pdev *)pdev,
2826 			sta_desc->sta_id);
2827 	if (!peer)
2828 		return QDF_STATUS_E_FAULT;
2829 
2830 	qdf_spin_lock_bh(&peer->peer_info_lock);
2831 	peer->state = OL_TXRX_PEER_STATE_CONN;
2832 	qdf_spin_unlock_bh(&peer->peer_info_lock);
2833 
2834 	dp_rx_flush_rx_cached(peer, false);
2835 
2836 	return QDF_STATUS_SUCCESS;
2837 }
2838 
2839 /**
2840  * dp_clear_peer() - remove peer from physical device
2841  * @pdev - data path device instance
2842  * @sta_id - local peer id
2843  *
2844  * remove peer from physical device
2845  *
2846  * Return: QDF_STATUS_SUCCESS registration success
2847  *         QDF_STATUS_E_FAULT peer not found
2848  */
2849 QDF_STATUS dp_clear_peer(struct cdp_pdev *pdev_handle, uint8_t local_id)
2850 {
2851 	struct dp_peer *peer;
2852 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
2853 
2854 	peer = dp_peer_find_by_local_id((struct cdp_pdev *)pdev, local_id);
2855 	if (!peer)
2856 		return QDF_STATUS_E_FAULT;
2857 
2858 	qdf_spin_lock_bh(&peer->peer_info_lock);
2859 	peer->state = OL_TXRX_PEER_STATE_DISC;
2860 	qdf_spin_unlock_bh(&peer->peer_info_lock);
2861 
2862 	dp_rx_flush_rx_cached(peer, true);
2863 
2864 	return QDF_STATUS_SUCCESS;
2865 }
2866 
2867 /**
2868  * dp_find_peer_by_addr_and_vdev() - Find peer by peer mac address within vdev
2869  * @pdev - data path device instance
2870  * @vdev - virtual interface instance
2871  * @peer_addr - peer mac address
2872  * @peer_id - local peer id with target mac address
2873  *
2874  * Find peer by peer mac address within vdev
2875  *
2876  * Return: peer instance void pointer
2877  *         NULL cannot find target peer
2878  */
2879 void *dp_find_peer_by_addr_and_vdev(struct cdp_pdev *pdev_handle,
2880 		struct cdp_vdev *vdev_handle,
2881 		uint8_t *peer_addr, uint8_t *local_id)
2882 {
2883 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
2884 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
2885 	struct dp_peer *peer;
2886 
2887 	peer = dp_peer_find_hash_find(pdev->soc, peer_addr, 0, 0);
2888 
2889 	if (!peer)
2890 		return NULL;
2891 
2892 	if (peer->vdev != vdev) {
2893 		dp_peer_unref_delete(peer);
2894 		return NULL;
2895 	}
2896 
2897 	*local_id = peer->local_id;
2898 
2899 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
2900 	 * Decrement it here.
2901 	 */
2902 	dp_peer_unref_delete(peer);
2903 
2904 	return peer;
2905 }
2906 
2907 /**
2908  * dp_local_peer_id() - Find local peer id within peer instance
2909  * @peer - peer instance
2910  *
2911  * Find local peer id within peer instance
2912  *
2913  * Return: local peer id
2914  */
2915 uint16_t dp_local_peer_id(void *peer)
2916 {
2917 	return ((struct dp_peer *)peer)->local_id;
2918 }
2919 
2920 /**
2921  * dp_peer_find_by_local_id() - Find peer by local peer id
2922  * @pdev - data path device instance
2923  * @local_peer_id - local peer id want to find
2924  *
2925  * Find peer by local peer id within physical device
2926  *
2927  * Return: peer instance void pointer
2928  *         NULL cannot find target peer
2929  */
2930 void *dp_peer_find_by_local_id(struct cdp_pdev *pdev_handle, uint8_t local_id)
2931 {
2932 	struct dp_peer *peer;
2933 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
2934 
2935 	if (local_id >= OL_TXRX_NUM_LOCAL_PEER_IDS) {
2936 		QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_DP,
2937 				   "Incorrect local id %u", local_id);
2938 		return NULL;
2939 	}
2940 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
2941 	peer = pdev->local_peer_ids.map[local_id];
2942 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
2943 	DP_TRACE(DEBUG, "peer %pK local id %d", peer, local_id);
2944 	return peer;
2945 }
2946 
2947 /**
2948  * dp_peer_state_update() - update peer local state
2949  * @pdev - data path device instance
2950  * @peer_addr - peer mac address
2951  * @state - new peer local state
2952  *
2953  * update peer local state
2954  *
2955  * Return: QDF_STATUS_SUCCESS registration success
2956  */
2957 QDF_STATUS dp_peer_state_update(struct cdp_pdev *pdev_handle, uint8_t *peer_mac,
2958 		enum ol_txrx_peer_state state)
2959 {
2960 	struct dp_peer *peer;
2961 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
2962 
2963 	peer =  dp_peer_find_hash_find(pdev->soc, peer_mac, 0, DP_VDEV_ALL);
2964 	if (!peer) {
2965 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2966 			  "Failed to find peer for: [%pM]", peer_mac);
2967 		return QDF_STATUS_E_FAILURE;
2968 	}
2969 	peer->state = state;
2970 
2971 	DP_TRACE(INFO, "peer %pK state %d", peer, peer->state);
2972 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
2973 	 * Decrement it here.
2974 	 */
2975 	dp_peer_unref_delete(peer);
2976 
2977 	return QDF_STATUS_SUCCESS;
2978 }
2979 
2980 /**
2981  * dp_get_vdevid() - Get virtual interface id which peer registered
2982  * @peer - peer instance
2983  * @vdev_id - virtual interface id which peer registered
2984  *
2985  * Get virtual interface id which peer registered
2986  *
2987  * Return: QDF_STATUS_SUCCESS registration success
2988  */
2989 QDF_STATUS dp_get_vdevid(void *peer_handle, uint8_t *vdev_id)
2990 {
2991 	struct dp_peer *peer = peer_handle;
2992 
2993 	DP_TRACE(INFO, "peer %pK vdev %pK vdev id %d",
2994 			peer, peer->vdev, peer->vdev->vdev_id);
2995 	*vdev_id = peer->vdev->vdev_id;
2996 	return QDF_STATUS_SUCCESS;
2997 }
2998 
2999 struct cdp_vdev *dp_get_vdev_by_sta_id(struct cdp_pdev *pdev_handle,
3000 				       uint8_t sta_id)
3001 {
3002 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3003 	struct dp_peer *peer = NULL;
3004 
3005 	if (sta_id >= WLAN_MAX_STA_COUNT) {
3006 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3007 			  "Invalid sta id passed");
3008 		return NULL;
3009 	}
3010 
3011 	if (!pdev) {
3012 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3013 			  "PDEV not found for sta_id [%d]", sta_id);
3014 		return NULL;
3015 	}
3016 
3017 	peer = dp_peer_find_by_local_id((struct cdp_pdev *)pdev, sta_id);
3018 	if (!peer) {
3019 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3020 			  "PEER [%d] not found", sta_id);
3021 		return NULL;
3022 	}
3023 
3024 	return (struct cdp_vdev *)peer->vdev;
3025 }
3026 
3027 /**
3028  * dp_get_vdev_for_peer() - Get virtual interface instance which peer belongs
3029  * @peer - peer instance
3030  *
3031  * Get virtual interface instance which peer belongs
3032  *
3033  * Return: virtual interface instance pointer
3034  *         NULL in case cannot find
3035  */
3036 struct cdp_vdev *dp_get_vdev_for_peer(void *peer_handle)
3037 {
3038 	struct dp_peer *peer = peer_handle;
3039 
3040 	DP_TRACE(DEBUG, "peer %pK vdev %pK", peer, peer->vdev);
3041 	return (struct cdp_vdev *)peer->vdev;
3042 }
3043 
3044 /**
3045  * dp_peer_get_peer_mac_addr() - Get peer mac address
3046  * @peer - peer instance
3047  *
3048  * Get peer mac address
3049  *
3050  * Return: peer mac address pointer
3051  *         NULL in case cannot find
3052  */
3053 uint8_t *dp_peer_get_peer_mac_addr(void *peer_handle)
3054 {
3055 	struct dp_peer *peer = peer_handle;
3056 	uint8_t *mac;
3057 
3058 	mac = peer->mac_addr.raw;
3059 	DP_TRACE(INFO, "peer %pK mac 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
3060 		peer, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
3061 	return peer->mac_addr.raw;
3062 }
3063 
3064 /**
3065  * dp_get_peer_state() - Get local peer state
3066  * @peer - peer instance
3067  *
3068  * Get local peer state
3069  *
3070  * Return: peer status
3071  */
3072 int dp_get_peer_state(void *peer_handle)
3073 {
3074 	struct dp_peer *peer = peer_handle;
3075 
3076 	DP_TRACE(DEBUG, "peer %pK stats %d", peer, peer->state);
3077 	return peer->state;
3078 }
3079 
3080 /**
3081  * dp_local_peer_id_pool_init() - local peer id pool alloc for physical device
3082  * @pdev - data path device instance
3083  *
3084  * local peer id pool alloc for physical device
3085  *
3086  * Return: none
3087  */
3088 void dp_local_peer_id_pool_init(struct dp_pdev *pdev)
3089 {
3090 	int i;
3091 
3092 	/* point the freelist to the first ID */
3093 	pdev->local_peer_ids.freelist = 0;
3094 
3095 	/* link each ID to the next one */
3096 	for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) {
3097 		pdev->local_peer_ids.pool[i] = i + 1;
3098 		pdev->local_peer_ids.map[i] = NULL;
3099 	}
3100 
3101 	/* link the last ID to itself, to mark the end of the list */
3102 	i = OL_TXRX_NUM_LOCAL_PEER_IDS;
3103 	pdev->local_peer_ids.pool[i] = i;
3104 
3105 	qdf_spinlock_create(&pdev->local_peer_ids.lock);
3106 	DP_TRACE(INFO, "Peer pool init");
3107 }
3108 
3109 /**
3110  * dp_local_peer_id_alloc() - allocate local peer id
3111  * @pdev - data path device instance
3112  * @peer - new peer instance
3113  *
3114  * allocate local peer id
3115  *
3116  * Return: none
3117  */
3118 void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer)
3119 {
3120 	int i;
3121 
3122 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
3123 	i = pdev->local_peer_ids.freelist;
3124 	if (pdev->local_peer_ids.pool[i] == i) {
3125 		/* the list is empty, except for the list-end marker */
3126 		peer->local_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
3127 	} else {
3128 		/* take the head ID and advance the freelist */
3129 		peer->local_id = i;
3130 		pdev->local_peer_ids.freelist = pdev->local_peer_ids.pool[i];
3131 		pdev->local_peer_ids.map[i] = peer;
3132 	}
3133 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
3134 	DP_TRACE(INFO, "peer %pK, local id %d", peer, peer->local_id);
3135 }
3136 
3137 /**
3138  * dp_local_peer_id_free() - remove local peer id
3139  * @pdev - data path device instance
3140  * @peer - peer instance should be removed
3141  *
3142  * remove local peer id
3143  *
3144  * Return: none
3145  */
3146 void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer)
3147 {
3148 	int i = peer->local_id;
3149 	if ((i == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
3150 	    (i >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
3151 		return;
3152 	}
3153 
3154 	/* put this ID on the head of the freelist */
3155 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
3156 	pdev->local_peer_ids.pool[i] = pdev->local_peer_ids.freelist;
3157 	pdev->local_peer_ids.freelist = i;
3158 	pdev->local_peer_ids.map[i] = NULL;
3159 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
3160 }
3161 #endif
3162 
3163 /**
3164  * dp_get_peer_mac_addr_frm_id(): get mac address of the peer
3165  * @soc_handle: DP SOC handle
3166  * @peer_id:peer_id of the peer
3167  *
3168  * return: vdev_id of the vap
3169  */
3170 uint8_t dp_get_peer_mac_addr_frm_id(struct cdp_soc_t *soc_handle,
3171 		uint16_t peer_id, uint8_t *peer_mac)
3172 {
3173 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
3174 	struct dp_peer *peer;
3175 	uint8_t vdev_id;
3176 
3177 	peer = dp_peer_find_by_id(soc, peer_id);
3178 
3179 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
3180 		  "soc %pK peer_id %d", soc, peer_id);
3181 
3182 	if (!peer) {
3183 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3184 			  "peer not found ");
3185 		return CDP_INVALID_VDEV_ID;
3186 	}
3187 
3188 	qdf_mem_copy(peer_mac, peer->mac_addr.raw, 6);
3189 	vdev_id = peer->vdev->vdev_id;
3190 
3191 	dp_peer_unref_del_find_by_id(peer);
3192 
3193 	return vdev_id;
3194 }
3195 
3196 /**
3197  * dp_peer_rxtid_stats: Retried Rx TID (REO queue) stats from HW
3198  * @peer: DP peer handle
3199  * @dp_stats_cmd_cb: REO command callback function
3200  * @cb_ctxt: Callback context
3201  *
3202  * Return: none
3203  */
3204 void dp_peer_rxtid_stats(struct dp_peer *peer, void (*dp_stats_cmd_cb),
3205 			void *cb_ctxt)
3206 {
3207 	struct dp_soc *soc = peer->vdev->pdev->soc;
3208 	struct hal_reo_cmd_params params;
3209 	int i;
3210 
3211 	if (!dp_stats_cmd_cb)
3212 		return;
3213 
3214 	qdf_mem_zero(&params, sizeof(params));
3215 	for (i = 0; i < DP_MAX_TIDS; i++) {
3216 		struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
3217 		if (rx_tid->hw_qdesc_vaddr_unaligned) {
3218 			params.std.need_status = 1;
3219 			params.std.addr_lo =
3220 				rx_tid->hw_qdesc_paddr & 0xffffffff;
3221 			params.std.addr_hi =
3222 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
3223 
3224 			if (cb_ctxt) {
3225 				dp_reo_send_cmd(soc, CMD_GET_QUEUE_STATS,
3226 					&params, dp_stats_cmd_cb, cb_ctxt);
3227 			} else {
3228 				dp_reo_send_cmd(soc, CMD_GET_QUEUE_STATS,
3229 					&params, dp_stats_cmd_cb, rx_tid);
3230 			}
3231 
3232 			/* Flush REO descriptor from HW cache to update stats
3233 			 * in descriptor memory. This is to help debugging */
3234 			qdf_mem_zero(&params, sizeof(params));
3235 			params.std.need_status = 0;
3236 			params.std.addr_lo =
3237 				rx_tid->hw_qdesc_paddr & 0xffffffff;
3238 			params.std.addr_hi =
3239 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
3240 			params.u.fl_cache_params.flush_no_inval = 1;
3241 			dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, &params, NULL,
3242 				NULL);
3243 		}
3244 	}
3245 }
3246 
3247 void dp_set_michael_key(struct cdp_peer *peer_handle,
3248 			bool is_unicast, uint32_t *key)
3249 {
3250 	struct dp_peer *peer =  (struct dp_peer *)peer_handle;
3251 	uint8_t sec_index = is_unicast ? 1 : 0;
3252 
3253 	if (!peer) {
3254 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3255 			  "peer not found ");
3256 		return;
3257 	}
3258 
3259 	qdf_mem_copy(&peer->security[sec_index].michael_key[0],
3260 		     key, IEEE80211_WEP_MICLEN);
3261 }
3262 
3263 bool dp_peer_find_by_id_valid(struct dp_soc *soc, uint16_t peer_id)
3264 {
3265 	struct dp_peer *peer = dp_peer_find_by_id(soc, peer_id);
3266 
3267 	if (peer) {
3268 		/*
3269 		 * Decrement the peer ref which is taken as part of
3270 		 * dp_peer_find_by_id if PEER_LOCK_REF_PROTECT is enabled
3271 		 */
3272 		dp_peer_unref_del_find_by_id(peer);
3273 
3274 		return true;
3275 	}
3276 
3277 	return false;
3278 }
3279