xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_peer.c (revision 45a38684b07295822dc8eba39e293408f203eec8)
1 /*
2  * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <qdf_types.h>
20 #include <qdf_lock.h>
21 #include <hal_hw_headers.h>
22 #include "dp_htt.h"
23 #include "dp_types.h"
24 #include "dp_internal.h"
25 #include "dp_peer.h"
26 #include "dp_rx_defrag.h"
27 #include "dp_rx.h"
28 #include <hal_api.h>
29 #include <hal_reo.h>
30 #include <cdp_txrx_handle.h>
31 #include <wlan_cfg.h>
32 #ifdef FEATURE_WDS
33 #include "dp_txrx_wds.h"
34 #endif
35 
36 #ifdef WLAN_TX_PKT_CAPTURE_ENH
37 #include "dp_tx_capture.h"
38 #endif
39 
40 #ifdef QCA_PEER_EXT_STATS
41 #include "dp_hist.h"
42 #endif
43 
44 #ifdef FEATURE_WDS
45 static inline bool
46 dp_peer_ast_free_in_unmap_supported(struct dp_peer *peer,
47 				    struct dp_ast_entry *ast_entry)
48 {
49 	/* if peer map v2 is enabled we are not freeing ast entry
50 	 * here and it is supposed to be freed in unmap event (after
51 	 * we receive delete confirmation from target)
52 	 *
53 	 * if peer_id is invalid we did not get the peer map event
54 	 * for the peer free ast entry from here only in this case
55 	 */
56 
57 	if ((ast_entry->type != CDP_TXRX_AST_TYPE_WDS_HM_SEC) &&
58 	    (ast_entry->type != CDP_TXRX_AST_TYPE_SELF))
59 		return true;
60 
61 	return false;
62 }
63 #else
64 static inline bool
65 dp_peer_ast_free_in_unmap_supported(struct dp_peer *peer,
66 				    struct dp_ast_entry *ast_entry)
67 {
68 	return false;
69 }
70 #endif
71 
72 static inline void
73 dp_set_ssn_valid_flag(struct hal_reo_cmd_params *params,
74 					uint8_t valid)
75 {
76 	params->u.upd_queue_params.update_svld = 1;
77 	params->u.upd_queue_params.svld = valid;
78 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
79 		  "%s: Setting SSN valid bit to %d",
80 		  __func__, valid);
81 }
82 
83 static inline int dp_peer_find_mac_addr_cmp(
84 	union dp_align_mac_addr *mac_addr1,
85 	union dp_align_mac_addr *mac_addr2)
86 {
87 		/*
88 		 * Intentionally use & rather than &&.
89 		 * because the operands are binary rather than generic boolean,
90 		 * the functionality is equivalent.
91 		 * Using && has the advantage of short-circuited evaluation,
92 		 * but using & has the advantage of no conditional branching,
93 		 * which is a more significant benefit.
94 		 */
95 	return !((mac_addr1->align4.bytes_abcd == mac_addr2->align4.bytes_abcd)
96 		 & (mac_addr1->align4.bytes_ef == mac_addr2->align4.bytes_ef));
97 }
98 
99 static int dp_peer_ast_table_attach(struct dp_soc *soc)
100 {
101 	uint32_t max_ast_index;
102 
103 	max_ast_index = wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx);
104 	/* allocate ast_table for ast entry to ast_index map */
105 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
106 		  "\n<=== cfg max ast idx %d ====>", max_ast_index);
107 	soc->ast_table = qdf_mem_malloc(max_ast_index *
108 					sizeof(struct dp_ast_entry *));
109 	if (!soc->ast_table) {
110 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
111 			  "%s: ast_table memory allocation failed", __func__);
112 		return QDF_STATUS_E_NOMEM;
113 	}
114 	return 0; /* success */
115 }
116 
117 static int dp_peer_find_map_attach(struct dp_soc *soc)
118 {
119 	uint32_t max_peers, peer_map_size;
120 
121 	max_peers = soc->max_peers;
122 	/* allocate the peer ID -> peer object map */
123 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
124 		  "\n<=== cfg max peer id %d ====>", max_peers);
125 	peer_map_size = max_peers * sizeof(soc->peer_id_to_obj_map[0]);
126 	soc->peer_id_to_obj_map = qdf_mem_malloc(peer_map_size);
127 	if (!soc->peer_id_to_obj_map) {
128 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
129 			  "%s: peer map memory allocation failed", __func__);
130 		return QDF_STATUS_E_NOMEM;
131 	}
132 
133 	/*
134 	 * The peer_id_to_obj_map doesn't really need to be initialized,
135 	 * since elements are only used after they have been individually
136 	 * initialized.
137 	 * However, it is convenient for debugging to have all elements
138 	 * that are not in use set to 0.
139 	 */
140 	qdf_mem_zero(soc->peer_id_to_obj_map, peer_map_size);
141 	return 0; /* success */
142 }
143 
144 static int dp_log2_ceil(unsigned int value)
145 {
146 	unsigned int tmp = value;
147 	int log2 = -1;
148 
149 	while (tmp) {
150 		log2++;
151 		tmp >>= 1;
152 	}
153 	if (1 << log2 != value)
154 		log2++;
155 	return log2;
156 }
157 
158 static int dp_peer_find_add_id_to_obj(
159 	struct dp_peer *peer,
160 	uint16_t peer_id)
161 {
162 
163 	if (peer->peer_id == HTT_INVALID_PEER) {
164 		peer->peer_id = peer_id;
165 		return 0; /* success */
166 	}
167 	return QDF_STATUS_E_FAILURE; /* failure */
168 }
169 
170 #define DP_PEER_HASH_LOAD_MULT  2
171 #define DP_PEER_HASH_LOAD_SHIFT 0
172 
173 #define DP_AST_HASH_LOAD_MULT  2
174 #define DP_AST_HASH_LOAD_SHIFT 0
175 
176 static int dp_peer_find_hash_attach(struct dp_soc *soc)
177 {
178 	int i, hash_elems, log2;
179 
180 	/* allocate the peer MAC address -> peer object hash table */
181 	hash_elems = soc->max_peers;
182 	hash_elems *= DP_PEER_HASH_LOAD_MULT;
183 	hash_elems >>= DP_PEER_HASH_LOAD_SHIFT;
184 	log2 = dp_log2_ceil(hash_elems);
185 	hash_elems = 1 << log2;
186 
187 	soc->peer_hash.mask = hash_elems - 1;
188 	soc->peer_hash.idx_bits = log2;
189 	/* allocate an array of TAILQ peer object lists */
190 	soc->peer_hash.bins = qdf_mem_malloc(
191 		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q, dp_peer)));
192 	if (!soc->peer_hash.bins)
193 		return QDF_STATUS_E_NOMEM;
194 
195 	for (i = 0; i < hash_elems; i++)
196 		TAILQ_INIT(&soc->peer_hash.bins[i]);
197 
198 	return 0;
199 }
200 
201 static void dp_peer_find_hash_detach(struct dp_soc *soc)
202 {
203 	if (soc->peer_hash.bins) {
204 		qdf_mem_free(soc->peer_hash.bins);
205 		soc->peer_hash.bins = NULL;
206 	}
207 }
208 
209 static inline unsigned dp_peer_find_hash_index(struct dp_soc *soc,
210 	union dp_align_mac_addr *mac_addr)
211 {
212 	unsigned index;
213 
214 	index =
215 		mac_addr->align2.bytes_ab ^
216 		mac_addr->align2.bytes_cd ^
217 		mac_addr->align2.bytes_ef;
218 	index ^= index >> soc->peer_hash.idx_bits;
219 	index &= soc->peer_hash.mask;
220 	return index;
221 }
222 
223 
224 void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer)
225 {
226 	unsigned index;
227 
228 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
229 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
230 	/*
231 	 * It is important to add the new peer at the tail of the peer list
232 	 * with the bin index.  Together with having the hash_find function
233 	 * search from head to tail, this ensures that if two entries with
234 	 * the same MAC address are stored, the one added first will be
235 	 * found first.
236 	 */
237 	TAILQ_INSERT_TAIL(&soc->peer_hash.bins[index], peer, hash_list_elem);
238 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
239 }
240 
241 /*
242  * dp_peer_exist_on_pdev - check if peer with mac address exist on pdev
243  *
244  * @soc: Datapath SOC handle
245  * @peer_mac_addr: peer mac address
246  * @mac_addr_is_aligned: is mac address aligned
247  * @pdev: Datapath PDEV handle
248  *
249  * Return: true if peer found else return false
250  */
251 static bool dp_peer_exist_on_pdev(struct dp_soc *soc,
252 				  uint8_t *peer_mac_addr,
253 				  int mac_addr_is_aligned,
254 				  struct dp_pdev *pdev)
255 {
256 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
257 	unsigned int index;
258 	struct dp_peer *peer;
259 	bool found = false;
260 
261 	if (mac_addr_is_aligned) {
262 		mac_addr = (union dp_align_mac_addr *)peer_mac_addr;
263 	} else {
264 		qdf_mem_copy(
265 			&local_mac_addr_aligned.raw[0],
266 			peer_mac_addr, QDF_MAC_ADDR_SIZE);
267 		mac_addr = &local_mac_addr_aligned;
268 	}
269 	index = dp_peer_find_hash_index(soc, mac_addr);
270 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
271 	TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
272 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
273 		    (peer->vdev->pdev == pdev)) {
274 			found = true;
275 			break;
276 		}
277 	}
278 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
279 	return found;
280 }
281 
282 #ifdef FEATURE_AST
283 /*
284  * dp_peer_ast_hash_attach() - Allocate and initialize AST Hash Table
285  * @soc: SoC handle
286  *
287  * Return: None
288  */
289 static int dp_peer_ast_hash_attach(struct dp_soc *soc)
290 {
291 	int i, hash_elems, log2;
292 	unsigned int max_ast_idx = wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx);
293 
294 	hash_elems = ((max_ast_idx * DP_AST_HASH_LOAD_MULT) >>
295 		DP_AST_HASH_LOAD_SHIFT);
296 
297 	log2 = dp_log2_ceil(hash_elems);
298 	hash_elems = 1 << log2;
299 
300 	soc->ast_hash.mask = hash_elems - 1;
301 	soc->ast_hash.idx_bits = log2;
302 
303 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
304 		  "ast hash_elems: %d, max_ast_idx: %d",
305 		  hash_elems, max_ast_idx);
306 
307 	/* allocate an array of TAILQ peer object lists */
308 	soc->ast_hash.bins = qdf_mem_malloc(
309 		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q,
310 				dp_ast_entry)));
311 
312 	if (!soc->ast_hash.bins)
313 		return QDF_STATUS_E_NOMEM;
314 
315 	for (i = 0; i < hash_elems; i++)
316 		TAILQ_INIT(&soc->ast_hash.bins[i]);
317 
318 	return 0;
319 }
320 
321 /*
322  * dp_peer_ast_cleanup() - cleanup the references
323  * @soc: SoC handle
324  * @ast: ast entry
325  *
326  * Return: None
327  */
328 static inline void dp_peer_ast_cleanup(struct dp_soc *soc,
329 				       struct dp_ast_entry *ast)
330 {
331 	txrx_ast_free_cb cb = ast->callback;
332 	void *cookie = ast->cookie;
333 
334 	/* Call the callbacks to free up the cookie */
335 	if (cb) {
336 		ast->callback = NULL;
337 		ast->cookie = NULL;
338 		cb(soc->ctrl_psoc,
339 		   dp_soc_to_cdp_soc(soc),
340 		   cookie,
341 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
342 	}
343 }
344 
345 /*
346  * dp_peer_ast_hash_detach() - Free AST Hash table
347  * @soc: SoC handle
348  *
349  * Return: None
350  */
351 static void dp_peer_ast_hash_detach(struct dp_soc *soc)
352 {
353 	unsigned int index;
354 	struct dp_ast_entry *ast, *ast_next;
355 
356 	if (!soc->ast_hash.mask)
357 		return;
358 
359 	if (!soc->ast_hash.bins)
360 		return;
361 
362 	qdf_spin_lock_bh(&soc->ast_lock);
363 	for (index = 0; index <= soc->ast_hash.mask; index++) {
364 		if (!TAILQ_EMPTY(&soc->ast_hash.bins[index])) {
365 			TAILQ_FOREACH_SAFE(ast, &soc->ast_hash.bins[index],
366 					   hash_list_elem, ast_next) {
367 				TAILQ_REMOVE(&soc->ast_hash.bins[index], ast,
368 					     hash_list_elem);
369 				dp_peer_ast_cleanup(soc, ast);
370 				soc->num_ast_entries--;
371 				qdf_mem_free(ast);
372 			}
373 		}
374 	}
375 	qdf_spin_unlock_bh(&soc->ast_lock);
376 
377 	qdf_mem_free(soc->ast_hash.bins);
378 	soc->ast_hash.bins = NULL;
379 }
380 
381 /*
382  * dp_peer_ast_hash_index() - Compute the AST hash from MAC address
383  * @soc: SoC handle
384  *
385  * Return: AST hash
386  */
387 static inline uint32_t dp_peer_ast_hash_index(struct dp_soc *soc,
388 	union dp_align_mac_addr *mac_addr)
389 {
390 	uint32_t index;
391 
392 	index =
393 		mac_addr->align2.bytes_ab ^
394 		mac_addr->align2.bytes_cd ^
395 		mac_addr->align2.bytes_ef;
396 	index ^= index >> soc->ast_hash.idx_bits;
397 	index &= soc->ast_hash.mask;
398 	return index;
399 }
400 
401 /*
402  * dp_peer_ast_hash_add() - Add AST entry into hash table
403  * @soc: SoC handle
404  *
405  * This function adds the AST entry into SoC AST hash table
406  * It assumes caller has taken the ast lock to protect the access to this table
407  *
408  * Return: None
409  */
410 static inline void dp_peer_ast_hash_add(struct dp_soc *soc,
411 		struct dp_ast_entry *ase)
412 {
413 	uint32_t index;
414 
415 	index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
416 	TAILQ_INSERT_TAIL(&soc->ast_hash.bins[index], ase, hash_list_elem);
417 }
418 
419 /*
420  * dp_peer_ast_hash_remove() - Look up and remove AST entry from hash table
421  * @soc: SoC handle
422  *
423  * This function removes the AST entry from soc AST hash table
424  * It assumes caller has taken the ast lock to protect the access to this table
425  *
426  * Return: None
427  */
428 void dp_peer_ast_hash_remove(struct dp_soc *soc,
429 			     struct dp_ast_entry *ase)
430 {
431 	unsigned index;
432 	struct dp_ast_entry *tmpase;
433 	int found = 0;
434 
435 	index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
436 	/* Check if tail is not empty before delete*/
437 	QDF_ASSERT(!TAILQ_EMPTY(&soc->ast_hash.bins[index]));
438 
439 	TAILQ_FOREACH(tmpase, &soc->ast_hash.bins[index], hash_list_elem) {
440 		if (tmpase == ase) {
441 			found = 1;
442 			break;
443 		}
444 	}
445 
446 	QDF_ASSERT(found);
447 	TAILQ_REMOVE(&soc->ast_hash.bins[index], ase, hash_list_elem);
448 }
449 
450 /*
451  * dp_peer_ast_list_find() - Find AST entry by MAC address from peer ast list
452  * @soc: SoC handle
453  * @peer: peer handle
454  * @ast_mac_addr: mac address
455  *
456  * It assumes caller has taken the ast lock to protect the access to ast list
457  *
458  * Return: AST entry
459  */
460 struct dp_ast_entry *dp_peer_ast_list_find(struct dp_soc *soc,
461 					   struct dp_peer *peer,
462 					   uint8_t *ast_mac_addr)
463 {
464 	struct dp_ast_entry *ast_entry = NULL;
465 	union dp_align_mac_addr *mac_addr =
466 		(union dp_align_mac_addr *)ast_mac_addr;
467 
468 	TAILQ_FOREACH(ast_entry, &peer->ast_entry_list, ase_list_elem) {
469 		if (!dp_peer_find_mac_addr_cmp(mac_addr,
470 					       &ast_entry->mac_addr)) {
471 			return ast_entry;
472 		}
473 	}
474 
475 	return NULL;
476 }
477 
478 /*
479  * dp_peer_ast_hash_find_by_pdevid() - Find AST entry by MAC address
480  * @soc: SoC handle
481  *
482  * It assumes caller has taken the ast lock to protect the access to
483  * AST hash table
484  *
485  * Return: AST entry
486  */
487 struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
488 						     uint8_t *ast_mac_addr,
489 						     uint8_t pdev_id)
490 {
491 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
492 	uint32_t index;
493 	struct dp_ast_entry *ase;
494 
495 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
496 		     ast_mac_addr, QDF_MAC_ADDR_SIZE);
497 	mac_addr = &local_mac_addr_aligned;
498 
499 	index = dp_peer_ast_hash_index(soc, mac_addr);
500 	TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
501 		if ((pdev_id == ase->pdev_id) &&
502 		    !dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr)) {
503 			return ase;
504 		}
505 	}
506 
507 	return NULL;
508 }
509 
510 /*
511  * dp_peer_ast_hash_find_soc() - Find AST entry by MAC address
512  * @soc: SoC handle
513  *
514  * It assumes caller has taken the ast lock to protect the access to
515  * AST hash table
516  *
517  * Return: AST entry
518  */
519 struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
520 					       uint8_t *ast_mac_addr)
521 {
522 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
523 	unsigned index;
524 	struct dp_ast_entry *ase;
525 
526 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
527 			ast_mac_addr, QDF_MAC_ADDR_SIZE);
528 	mac_addr = &local_mac_addr_aligned;
529 
530 	index = dp_peer_ast_hash_index(soc, mac_addr);
531 	TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
532 		if (dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr) == 0) {
533 			return ase;
534 		}
535 	}
536 
537 	return NULL;
538 }
539 
540 /*
541  * dp_peer_map_ast() - Map the ast entry with HW AST Index
542  * @soc: SoC handle
543  * @peer: peer to which ast node belongs
544  * @mac_addr: MAC address of ast node
545  * @hw_peer_id: HW AST Index returned by target in peer map event
546  * @vdev_id: vdev id for VAP to which the peer belongs to
547  * @ast_hash: ast hash value in HW
548  * @is_wds: flag to indicate peer map event for WDS ast entry
549  *
550  * Return: QDF_STATUS code
551  */
552 static inline QDF_STATUS dp_peer_map_ast(struct dp_soc *soc,
553 					 struct dp_peer *peer,
554 					 uint8_t *mac_addr,
555 					 uint16_t hw_peer_id,
556 					 uint8_t vdev_id,
557 					 uint16_t ast_hash,
558 					 uint8_t is_wds)
559 {
560 	struct dp_ast_entry *ast_entry = NULL;
561 	enum cdp_txrx_ast_entry_type peer_type = CDP_TXRX_AST_TYPE_STATIC;
562 	void *cookie = NULL;
563 	txrx_ast_free_cb cb = NULL;
564 	QDF_STATUS err = QDF_STATUS_SUCCESS;
565 
566 	if (!peer) {
567 		return QDF_STATUS_E_INVAL;
568 	}
569 
570 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
571 		  "%s: peer %pK ID %d vid %d mac %pM",
572 		  __func__, peer, hw_peer_id, vdev_id, mac_addr);
573 
574 	qdf_spin_lock_bh(&soc->ast_lock);
575 
576 	ast_entry = dp_peer_ast_list_find(soc, peer, mac_addr);
577 
578 	if (is_wds) {
579 		/*
580 		 * In certain cases like Auth attack on a repeater
581 		 * can result in the number of ast_entries falling
582 		 * in the same hash bucket to exceed the max_skid
583 		 * length supported by HW in root AP. In these cases
584 		 * the FW will return the hw_peer_id (ast_index) as
585 		 * 0xffff indicating HW could not add the entry in
586 		 * its table. Host has to delete the entry from its
587 		 * table in these cases.
588 		 */
589 		if (hw_peer_id == HTT_INVALID_PEER) {
590 			DP_STATS_INC(soc, ast.map_err, 1);
591 			if (ast_entry) {
592 				if (ast_entry->is_mapped) {
593 					soc->ast_table[ast_entry->ast_idx] =
594 						NULL;
595 				}
596 
597 				cb = ast_entry->callback;
598 				cookie = ast_entry->cookie;
599 				peer_type = ast_entry->type;
600 
601 				dp_peer_unlink_ast_entry(soc, ast_entry);
602 				dp_peer_free_ast_entry(soc, ast_entry);
603 
604 				qdf_spin_unlock_bh(&soc->ast_lock);
605 
606 				if (cb) {
607 					cb(soc->ctrl_psoc,
608 					   dp_soc_to_cdp_soc(soc),
609 					   cookie,
610 					   CDP_TXRX_AST_DELETED);
611 				}
612 			} else {
613 				qdf_spin_unlock_bh(&soc->ast_lock);
614 				dp_alert("AST entry not found with peer %pK peer_id %u peer_mac %pM mac_addr %pM vdev_id %u next_hop %u",
615 					 peer, peer->peer_id,
616 					 peer->mac_addr.raw, mac_addr,
617 					 vdev_id, is_wds);
618 			}
619 			err = QDF_STATUS_E_INVAL;
620 
621 			dp_hmwds_ast_add_notify(peer, mac_addr,
622 						peer_type, err, true);
623 
624 			return err;
625 		}
626 	}
627 
628 	if (ast_entry) {
629 		ast_entry->ast_idx = hw_peer_id;
630 		soc->ast_table[hw_peer_id] = ast_entry;
631 		ast_entry->is_active = TRUE;
632 		peer_type = ast_entry->type;
633 		ast_entry->ast_hash_value = ast_hash;
634 		ast_entry->is_mapped = TRUE;
635 	}
636 
637 	if (ast_entry || (peer->vdev && peer->vdev->proxysta_vdev)) {
638 		if (soc->cdp_soc.ol_ops->peer_map_event) {
639 			soc->cdp_soc.ol_ops->peer_map_event(
640 			soc->ctrl_psoc, peer->peer_id,
641 			hw_peer_id, vdev_id,
642 			mac_addr, peer_type, ast_hash);
643 		}
644 	} else {
645 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
646 			  "AST entry not found");
647 		err = QDF_STATUS_E_NOENT;
648 	}
649 
650 	qdf_spin_unlock_bh(&soc->ast_lock);
651 
652 	dp_hmwds_ast_add_notify(peer, mac_addr,
653 				peer_type, err, true);
654 
655 	return err;
656 }
657 
658 void dp_peer_free_hmwds_cb(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
659 			   struct cdp_soc *dp_soc,
660 			   void *cookie,
661 			   enum cdp_ast_free_status status)
662 {
663 	struct dp_ast_free_cb_params *param =
664 		(struct dp_ast_free_cb_params *)cookie;
665 	struct dp_soc *soc = (struct dp_soc *)dp_soc;
666 	struct dp_peer *peer = NULL;
667 	QDF_STATUS err = QDF_STATUS_SUCCESS;
668 
669 	if (status != CDP_TXRX_AST_DELETED) {
670 		qdf_mem_free(cookie);
671 		return;
672 	}
673 
674 	peer = dp_peer_find_hash_find(soc, &param->peer_mac_addr.raw[0],
675 				      0, param->vdev_id);
676 	if (peer) {
677 		err = dp_peer_add_ast(soc, peer,
678 				      &param->mac_addr.raw[0],
679 				      param->type,
680 				      param->flags);
681 
682 		dp_hmwds_ast_add_notify(peer, &param->mac_addr.raw[0],
683 					param->type, err, false);
684 
685 		dp_peer_unref_delete(peer);
686 	}
687 	qdf_mem_free(cookie);
688 }
689 
690 /*
691  * dp_peer_add_ast() - Allocate and add AST entry into peer list
692  * @soc: SoC handle
693  * @peer: peer to which ast node belongs
694  * @mac_addr: MAC address of ast node
695  * @is_self: Is this base AST entry with peer mac address
696  *
697  * This API is used by WDS source port learning function to
698  * add a new AST entry into peer AST list
699  *
700  * Return: QDF_STATUS code
701  */
702 QDF_STATUS dp_peer_add_ast(struct dp_soc *soc,
703 			   struct dp_peer *peer,
704 			   uint8_t *mac_addr,
705 			   enum cdp_txrx_ast_entry_type type,
706 			   uint32_t flags)
707 {
708 	struct dp_ast_entry *ast_entry = NULL;
709 	struct dp_vdev *vdev = NULL;
710 	struct dp_pdev *pdev = NULL;
711 	uint8_t next_node_mac[6];
712 	txrx_ast_free_cb cb = NULL;
713 	void *cookie = NULL;
714 	struct dp_peer *vap_bss_peer = NULL;
715 	bool is_peer_found = false;
716 
717 	vdev = peer->vdev;
718 	if (!vdev) {
719 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
720 			  FL("Peers vdev is NULL"));
721 		QDF_ASSERT(0);
722 		return QDF_STATUS_E_INVAL;
723 	}
724 
725 	pdev = vdev->pdev;
726 
727 	is_peer_found = dp_peer_exist_on_pdev(soc, mac_addr, 0, pdev);
728 
729 	qdf_spin_lock_bh(&soc->ast_lock);
730 	if (peer->delete_in_progress) {
731 		qdf_spin_unlock_bh(&soc->ast_lock);
732 		return QDF_STATUS_E_BUSY;
733 	}
734 
735 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
736 		  "%s: pdevid: %u vdev: %u  ast_entry->type: %d flags: 0x%x peer_mac: %pM peer: %pK mac %pM",
737 		  __func__, pdev->pdev_id, vdev->vdev_id, type, flags,
738 		  peer->mac_addr.raw, peer, mac_addr);
739 
740 
741 	/* fw supports only 2 times the max_peers ast entries */
742 	if (soc->num_ast_entries >=
743 	    wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx)) {
744 		qdf_spin_unlock_bh(&soc->ast_lock);
745 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
746 			  FL("Max ast entries reached"));
747 		return QDF_STATUS_E_RESOURCES;
748 	}
749 
750 	/* If AST entry already exists , just return from here
751 	 * ast entry with same mac address can exist on different radios
752 	 * if ast_override support is enabled use search by pdev in this
753 	 * case
754 	 */
755 	if (soc->ast_override_support) {
756 		ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, mac_addr,
757 							    pdev->pdev_id);
758 		if (ast_entry) {
759 			if ((type == CDP_TXRX_AST_TYPE_MEC) &&
760 			    (ast_entry->type == CDP_TXRX_AST_TYPE_MEC))
761 				ast_entry->is_active = TRUE;
762 
763 			qdf_spin_unlock_bh(&soc->ast_lock);
764 			return QDF_STATUS_E_ALREADY;
765 		}
766 		if (is_peer_found) {
767 			/* During WDS to static roaming, peer is added
768 			 * to the list before static AST entry create.
769 			 * So, allow AST entry for STATIC type
770 			 * even if peer is present
771 			 */
772 			if (type != CDP_TXRX_AST_TYPE_STATIC) {
773 				qdf_spin_unlock_bh(&soc->ast_lock);
774 				return QDF_STATUS_E_ALREADY;
775 			}
776 		}
777 	} else {
778 		/* For HWMWDS_SEC entries can be added for same mac address
779 		 * do not check for existing entry
780 		 */
781 		if (type == CDP_TXRX_AST_TYPE_WDS_HM_SEC)
782 			goto add_ast_entry;
783 
784 		ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
785 
786 		if (ast_entry) {
787 			if ((type == CDP_TXRX_AST_TYPE_MEC) &&
788 			    (ast_entry->type == CDP_TXRX_AST_TYPE_MEC))
789 				ast_entry->is_active = TRUE;
790 
791 			if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) &&
792 			    !ast_entry->delete_in_progress) {
793 				qdf_spin_unlock_bh(&soc->ast_lock);
794 				return QDF_STATUS_E_ALREADY;
795 			}
796 
797 			/* Add for HMWDS entry we cannot be ignored if there
798 			 * is AST entry with same mac address
799 			 *
800 			 * if ast entry exists with the requested mac address
801 			 * send a delete command and register callback which
802 			 * can take care of adding HMWDS ast enty on delete
803 			 * confirmation from target
804 			 */
805 			if (type == CDP_TXRX_AST_TYPE_WDS_HM) {
806 				struct dp_ast_free_cb_params *param = NULL;
807 
808 				if (ast_entry->type ==
809 					CDP_TXRX_AST_TYPE_WDS_HM_SEC)
810 					goto add_ast_entry;
811 
812 				/* save existing callback */
813 				if (ast_entry->callback) {
814 					cb = ast_entry->callback;
815 					cookie = ast_entry->cookie;
816 				}
817 
818 				param = qdf_mem_malloc(sizeof(*param));
819 				if (!param) {
820 					QDF_TRACE(QDF_MODULE_ID_TXRX,
821 						  QDF_TRACE_LEVEL_ERROR,
822 						  "Allocation failed");
823 					qdf_spin_unlock_bh(&soc->ast_lock);
824 					return QDF_STATUS_E_NOMEM;
825 				}
826 
827 				qdf_mem_copy(&param->mac_addr.raw[0], mac_addr,
828 					     QDF_MAC_ADDR_SIZE);
829 				qdf_mem_copy(&param->peer_mac_addr.raw[0],
830 					     &peer->mac_addr.raw[0],
831 					     QDF_MAC_ADDR_SIZE);
832 				param->type = type;
833 				param->flags = flags;
834 				param->vdev_id = vdev->vdev_id;
835 				ast_entry->callback = dp_peer_free_hmwds_cb;
836 				ast_entry->pdev_id = vdev->pdev->pdev_id;
837 				ast_entry->type = type;
838 				ast_entry->cookie = (void *)param;
839 				if (!ast_entry->delete_in_progress)
840 					dp_peer_del_ast(soc, ast_entry);
841 
842 				qdf_spin_unlock_bh(&soc->ast_lock);
843 
844 				/* Call the saved callback*/
845 				if (cb) {
846 					cb(soc->ctrl_psoc,
847 					   dp_soc_to_cdp_soc(soc),
848 					   cookie,
849 					   CDP_TXRX_AST_DELETE_IN_PROGRESS);
850 				}
851 				return QDF_STATUS_E_AGAIN;
852 			}
853 
854 			/* Modify an already existing AST entry from type
855 			 * WDS to MEC on promption. This serves as a fix when
856 			 * backbone of interfaces are interchanged wherein
857 			 * wds entr becomes its own MEC. The entry should be
858 			 * replaced only when the ast_entry peer matches the
859 			 * peer received in mec event. This additional check
860 			 * is needed in wds repeater cases where a multicast
861 			 * packet from station to the root via the repeater
862 			 * should not remove the wds entry.
863 			 */
864 			if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS) &&
865 			    (type == CDP_TXRX_AST_TYPE_MEC) &&
866 			    (ast_entry->peer == peer)) {
867 				ast_entry->is_active = FALSE;
868 				dp_peer_del_ast(soc, ast_entry);
869 			}
870 			qdf_spin_unlock_bh(&soc->ast_lock);
871 			return QDF_STATUS_E_ALREADY;
872 		}
873 	}
874 
875 add_ast_entry:
876 	ast_entry = (struct dp_ast_entry *)
877 			qdf_mem_malloc(sizeof(struct dp_ast_entry));
878 
879 	if (!ast_entry) {
880 		qdf_spin_unlock_bh(&soc->ast_lock);
881 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
882 			  FL("fail to allocate ast_entry"));
883 		QDF_ASSERT(0);
884 		return QDF_STATUS_E_NOMEM;
885 	}
886 
887 	qdf_mem_copy(&ast_entry->mac_addr.raw[0], mac_addr, QDF_MAC_ADDR_SIZE);
888 	ast_entry->pdev_id = vdev->pdev->pdev_id;
889 	ast_entry->is_mapped = false;
890 	ast_entry->delete_in_progress = false;
891 
892 	switch (type) {
893 	case CDP_TXRX_AST_TYPE_STATIC:
894 		peer->self_ast_entry = ast_entry;
895 		ast_entry->type = CDP_TXRX_AST_TYPE_STATIC;
896 		if (peer->vdev->opmode == wlan_op_mode_sta)
897 			ast_entry->type = CDP_TXRX_AST_TYPE_STA_BSS;
898 		break;
899 	case CDP_TXRX_AST_TYPE_SELF:
900 		peer->self_ast_entry = ast_entry;
901 		ast_entry->type = CDP_TXRX_AST_TYPE_SELF;
902 		break;
903 	case CDP_TXRX_AST_TYPE_WDS:
904 		ast_entry->next_hop = 1;
905 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
906 		break;
907 	case CDP_TXRX_AST_TYPE_WDS_HM:
908 		ast_entry->next_hop = 1;
909 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM;
910 		break;
911 	case CDP_TXRX_AST_TYPE_WDS_HM_SEC:
912 		ast_entry->next_hop = 1;
913 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM_SEC;
914 		break;
915 	case CDP_TXRX_AST_TYPE_MEC:
916 		ast_entry->next_hop = 1;
917 		ast_entry->type = CDP_TXRX_AST_TYPE_MEC;
918 		break;
919 	case CDP_TXRX_AST_TYPE_DA:
920 		vap_bss_peer = dp_vdev_bss_peer_ref_n_get(soc, vdev);
921 		if (!vap_bss_peer) {
922 			qdf_spin_unlock_bh(&soc->ast_lock);
923 			qdf_mem_free(ast_entry);
924 			return QDF_STATUS_E_FAILURE;
925 		}
926 		peer = vap_bss_peer;
927 		ast_entry->next_hop = 1;
928 		ast_entry->type = CDP_TXRX_AST_TYPE_DA;
929 		break;
930 	default:
931 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
932 			FL("Incorrect AST entry type"));
933 	}
934 
935 	ast_entry->is_active = TRUE;
936 	DP_STATS_INC(soc, ast.added, 1);
937 	soc->num_ast_entries++;
938 	dp_peer_ast_hash_add(soc, ast_entry);
939 
940 	ast_entry->peer = peer;
941 
942 	if (type == CDP_TXRX_AST_TYPE_MEC)
943 		qdf_mem_copy(next_node_mac, peer->vdev->mac_addr.raw, 6);
944 	else
945 		qdf_mem_copy(next_node_mac, peer->mac_addr.raw, 6);
946 
947 	TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem);
948 
949 	if ((ast_entry->type != CDP_TXRX_AST_TYPE_STATIC) &&
950 	    (ast_entry->type != CDP_TXRX_AST_TYPE_SELF) &&
951 	    (ast_entry->type != CDP_TXRX_AST_TYPE_STA_BSS) &&
952 	    (ast_entry->type != CDP_TXRX_AST_TYPE_WDS_HM_SEC)) {
953 		if (QDF_STATUS_SUCCESS ==
954 				soc->cdp_soc.ol_ops->peer_add_wds_entry(
955 				soc->ctrl_psoc,
956 				peer->vdev->vdev_id,
957 				peer->mac_addr.raw,
958 				peer->peer_id,
959 				mac_addr,
960 				next_node_mac,
961 				flags,
962 				ast_entry->type)) {
963 			if (vap_bss_peer)
964 				dp_peer_unref_delete(vap_bss_peer);
965 			qdf_spin_unlock_bh(&soc->ast_lock);
966 			return QDF_STATUS_SUCCESS;
967 		}
968 	}
969 
970 	if (vap_bss_peer)
971 		dp_peer_unref_delete(vap_bss_peer);
972 
973 	qdf_spin_unlock_bh(&soc->ast_lock);
974 	return QDF_STATUS_E_FAILURE;
975 }
976 
977 /*
978  * dp_peer_free_ast_entry() - Free up the ast entry memory
979  * @soc: SoC handle
980  * @ast_entry: Address search entry
981  *
982  * This API is used to free up the memory associated with
983  * AST entry.
984  *
985  * Return: None
986  */
987 void dp_peer_free_ast_entry(struct dp_soc *soc,
988 			    struct dp_ast_entry *ast_entry)
989 {
990 	/*
991 	 * NOTE: Ensure that call to this API is done
992 	 * after soc->ast_lock is taken
993 	 */
994 	ast_entry->callback = NULL;
995 	ast_entry->cookie = NULL;
996 
997 	DP_STATS_INC(soc, ast.deleted, 1);
998 	dp_peer_ast_hash_remove(soc, ast_entry);
999 	dp_peer_ast_cleanup(soc, ast_entry);
1000 	qdf_mem_free(ast_entry);
1001 	soc->num_ast_entries--;
1002 }
1003 
1004 /*
1005  * dp_peer_unlink_ast_entry() - Free up the ast entry memory
1006  * @soc: SoC handle
1007  * @ast_entry: Address search entry
1008  *
1009  * This API is used to remove/unlink AST entry from the peer list
1010  * and hash list.
1011  *
1012  * Return: None
1013  */
1014 void dp_peer_unlink_ast_entry(struct dp_soc *soc,
1015 			      struct dp_ast_entry *ast_entry)
1016 {
1017 	/*
1018 	 * NOTE: Ensure that call to this API is done
1019 	 * after soc->ast_lock is taken
1020 	 */
1021 	struct dp_peer *peer = ast_entry->peer;
1022 
1023 	TAILQ_REMOVE(&peer->ast_entry_list, ast_entry, ase_list_elem);
1024 
1025 	if (ast_entry == peer->self_ast_entry)
1026 		peer->self_ast_entry = NULL;
1027 
1028 	/*
1029 	 * release the reference only if it is mapped
1030 	 * to ast_table
1031 	 */
1032 	if (ast_entry->is_mapped)
1033 		soc->ast_table[ast_entry->ast_idx] = NULL;
1034 
1035 	ast_entry->peer = NULL;
1036 }
1037 
1038 /*
1039  * dp_peer_del_ast() - Delete and free AST entry
1040  * @soc: SoC handle
1041  * @ast_entry: AST entry of the node
1042  *
1043  * This function removes the AST entry from peer and soc tables
1044  * It assumes caller has taken the ast lock to protect the access to these
1045  * tables
1046  *
1047  * Return: None
1048  */
1049 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
1050 {
1051 	struct dp_peer *peer;
1052 
1053 	if (!ast_entry)
1054 		return;
1055 
1056 	if (ast_entry->delete_in_progress)
1057 		return;
1058 
1059 	ast_entry->delete_in_progress = true;
1060 
1061 	peer = ast_entry->peer;
1062 	dp_peer_ast_send_wds_del(soc, ast_entry);
1063 
1064 	/* Remove SELF and STATIC entries in teardown itself */
1065 	if (!ast_entry->next_hop)
1066 		dp_peer_unlink_ast_entry(soc, ast_entry);
1067 
1068 	if (ast_entry->is_mapped)
1069 		soc->ast_table[ast_entry->ast_idx] = NULL;
1070 
1071 	/* if peer map v2 is enabled we are not freeing ast entry
1072 	 * here and it is supposed to be freed in unmap event (after
1073 	 * we receive delete confirmation from target)
1074 	 *
1075 	 * if peer_id is invalid we did not get the peer map event
1076 	 * for the peer free ast entry from here only in this case
1077 	 */
1078 	if (dp_peer_ast_free_in_unmap_supported(peer, ast_entry))
1079 		return;
1080 
1081 	/* for WDS secondary entry ast_entry->next_hop would be set so
1082 	 * unlinking has to be done explicitly here.
1083 	 * As this entry is not a mapped entry unmap notification from
1084 	 * FW wil not come. Hence unlinkling is done right here.
1085 	 */
1086 	if (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC)
1087 		dp_peer_unlink_ast_entry(soc, ast_entry);
1088 
1089 	dp_peer_free_ast_entry(soc, ast_entry);
1090 }
1091 
1092 /*
1093  * dp_peer_update_ast() - Delete and free AST entry
1094  * @soc: SoC handle
1095  * @peer: peer to which ast node belongs
1096  * @ast_entry: AST entry of the node
1097  * @flags: wds or hmwds
1098  *
1099  * This function update the AST entry to the roamed peer and soc tables
1100  * It assumes caller has taken the ast lock to protect the access to these
1101  * tables
1102  *
1103  * Return: 0 if ast entry is updated successfully
1104  *         -1 failure
1105  */
1106 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
1107 		       struct dp_ast_entry *ast_entry, uint32_t flags)
1108 {
1109 	int ret = -1;
1110 	struct dp_peer *old_peer;
1111 
1112 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1113 		  "%s: ast_entry->type: %d pdevid: %u vdevid: %u flags: 0x%x mac_addr: %pM peer_mac: %pM\n",
1114 		  __func__, ast_entry->type, peer->vdev->pdev->pdev_id,
1115 		  peer->vdev->vdev_id, flags, ast_entry->mac_addr.raw,
1116 		  peer->mac_addr.raw);
1117 
1118 	/* Do not send AST update in below cases
1119 	 *  1) Ast entry delete has already triggered
1120 	 *  2) Peer delete is already triggered
1121 	 *  3) We did not get the HTT map for create event
1122 	 */
1123 	if (ast_entry->delete_in_progress || peer->delete_in_progress ||
1124 	    !ast_entry->is_mapped)
1125 		return ret;
1126 
1127 	if ((ast_entry->type == CDP_TXRX_AST_TYPE_STATIC) ||
1128 	    (ast_entry->type == CDP_TXRX_AST_TYPE_SELF) ||
1129 	    (ast_entry->type == CDP_TXRX_AST_TYPE_STA_BSS) ||
1130 	    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
1131 		return 0;
1132 
1133 	/*
1134 	 * Avoids flood of WMI update messages sent to FW for same peer.
1135 	 */
1136 	if (qdf_unlikely(ast_entry->peer == peer) &&
1137 	    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS) &&
1138 	    (ast_entry->peer->vdev == peer->vdev) &&
1139 	    (ast_entry->is_active))
1140 		return 0;
1141 
1142 	old_peer = ast_entry->peer;
1143 	TAILQ_REMOVE(&old_peer->ast_entry_list, ast_entry, ase_list_elem);
1144 
1145 	ast_entry->peer = peer;
1146 	ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
1147 	ast_entry->pdev_id = peer->vdev->pdev->pdev_id;
1148 	ast_entry->is_active = TRUE;
1149 	TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem);
1150 
1151 	ret = soc->cdp_soc.ol_ops->peer_update_wds_entry(
1152 				soc->ctrl_psoc,
1153 				peer->vdev->vdev_id,
1154 				ast_entry->mac_addr.raw,
1155 				peer->mac_addr.raw,
1156 				flags);
1157 
1158 	return ret;
1159 }
1160 
1161 /*
1162  * dp_peer_ast_get_pdev_id() - get pdev_id from the ast entry
1163  * @soc: SoC handle
1164  * @ast_entry: AST entry of the node
1165  *
1166  * This function gets the pdev_id from the ast entry.
1167  *
1168  * Return: (uint8_t) pdev_id
1169  */
1170 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
1171 				struct dp_ast_entry *ast_entry)
1172 {
1173 	return ast_entry->pdev_id;
1174 }
1175 
1176 /*
1177  * dp_peer_ast_get_next_hop() - get next_hop from the ast entry
1178  * @soc: SoC handle
1179  * @ast_entry: AST entry of the node
1180  *
1181  * This function gets the next hop from the ast entry.
1182  *
1183  * Return: (uint8_t) next_hop
1184  */
1185 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
1186 				struct dp_ast_entry *ast_entry)
1187 {
1188 	return ast_entry->next_hop;
1189 }
1190 
1191 /*
1192  * dp_peer_ast_set_type() - set type from the ast entry
1193  * @soc: SoC handle
1194  * @ast_entry: AST entry of the node
1195  *
1196  * This function sets the type in the ast entry.
1197  *
1198  * Return:
1199  */
1200 void dp_peer_ast_set_type(struct dp_soc *soc,
1201 				struct dp_ast_entry *ast_entry,
1202 				enum cdp_txrx_ast_entry_type type)
1203 {
1204 	ast_entry->type = type;
1205 }
1206 
1207 #else
1208 QDF_STATUS dp_peer_add_ast(struct dp_soc *soc,
1209 			   struct dp_peer *peer,
1210 			   uint8_t *mac_addr,
1211 			   enum cdp_txrx_ast_entry_type type,
1212 			   uint32_t flags)
1213 {
1214 	return QDF_STATUS_E_FAILURE;
1215 }
1216 
1217 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
1218 {
1219 }
1220 
1221 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
1222 			struct dp_ast_entry *ast_entry, uint32_t flags)
1223 {
1224 	return 1;
1225 }
1226 
1227 struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
1228 					       uint8_t *ast_mac_addr)
1229 {
1230 	return NULL;
1231 }
1232 
1233 struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
1234 						     uint8_t *ast_mac_addr,
1235 						     uint8_t pdev_id)
1236 {
1237 	return NULL;
1238 }
1239 
1240 static int dp_peer_ast_hash_attach(struct dp_soc *soc)
1241 {
1242 	return 0;
1243 }
1244 
1245 static inline QDF_STATUS dp_peer_map_ast(struct dp_soc *soc,
1246 					 struct dp_peer *peer,
1247 					 uint8_t *mac_addr,
1248 					 uint16_t hw_peer_id,
1249 					 uint8_t vdev_id,
1250 					 uint16_t ast_hash,
1251 					 uint8_t is_wds)
1252 {
1253 	return QDF_STATUS_SUCCESS;
1254 }
1255 
1256 static void dp_peer_ast_hash_detach(struct dp_soc *soc)
1257 {
1258 }
1259 
1260 void dp_peer_ast_set_type(struct dp_soc *soc,
1261 				struct dp_ast_entry *ast_entry,
1262 				enum cdp_txrx_ast_entry_type type)
1263 {
1264 }
1265 
1266 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
1267 				struct dp_ast_entry *ast_entry)
1268 {
1269 	return 0xff;
1270 }
1271 
1272 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
1273 				struct dp_ast_entry *ast_entry)
1274 {
1275 	return 0xff;
1276 }
1277 
1278 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
1279 		       struct dp_ast_entry *ast_entry, uint32_t flags)
1280 {
1281 	return 1;
1282 }
1283 
1284 #endif
1285 
1286 void dp_peer_ast_send_wds_del(struct dp_soc *soc,
1287 			      struct dp_ast_entry *ast_entry)
1288 {
1289 	struct dp_peer *peer = ast_entry->peer;
1290 	struct cdp_soc_t *cdp_soc = &soc->cdp_soc;
1291 
1292 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_TRACE,
1293 		  "%s: ast_entry->type: %d pdevid: %u vdev: %u mac_addr: %pM next_hop: %u peer_mac: %pM\n",
1294 		  __func__, ast_entry->type, peer->vdev->pdev->pdev_id,
1295 		  peer->vdev->vdev_id, ast_entry->mac_addr.raw,
1296 		  ast_entry->next_hop, ast_entry->peer->mac_addr.raw);
1297 
1298 	/*
1299 	 * If peer delete_in_progress is set, the peer is about to get
1300 	 * teared down with a peer delete command to firmware,
1301 	 * which will cleanup all the wds ast entries.
1302 	 * So, no need to send explicit wds ast delete to firmware.
1303 	 */
1304 	if (ast_entry->next_hop) {
1305 		cdp_soc->ol_ops->peer_del_wds_entry(soc->ctrl_psoc,
1306 						    peer->vdev->vdev_id,
1307 						    ast_entry->mac_addr.raw,
1308 						    ast_entry->type,
1309 						    !peer->delete_in_progress);
1310 	}
1311 
1312 }
1313 
1314 #ifdef FEATURE_WDS
1315 /**
1316  * dp_peer_ast_free_wds_entries() - Free wds ast entries associated with peer
1317  * @soc: soc handle
1318  * @peer: peer handle
1319  *
1320  * Free all the wds ast entries associated with peer
1321  *
1322  * Return: Number of wds ast entries freed
1323  */
1324 static uint32_t dp_peer_ast_free_wds_entries(struct dp_soc *soc,
1325 					     struct dp_peer *peer)
1326 {
1327 	TAILQ_HEAD(, dp_ast_entry) ast_local_list = {0};
1328 	struct dp_ast_entry *ast_entry, *temp_ast_entry;
1329 	uint32_t num_ast = 0;
1330 
1331 	TAILQ_INIT(&ast_local_list);
1332 	qdf_spin_lock_bh(&soc->ast_lock);
1333 
1334 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry) {
1335 		if (ast_entry->next_hop) {
1336 			if (ast_entry->is_mapped)
1337 				soc->ast_table[ast_entry->ast_idx] = NULL;
1338 
1339 			dp_peer_unlink_ast_entry(soc, ast_entry);
1340 			DP_STATS_INC(soc, ast.deleted, 1);
1341 			dp_peer_ast_hash_remove(soc, ast_entry);
1342 			TAILQ_INSERT_TAIL(&ast_local_list, ast_entry,
1343 					  ase_list_elem);
1344 			soc->num_ast_entries--;
1345 			num_ast++;
1346 		}
1347 	}
1348 
1349 	qdf_spin_unlock_bh(&soc->ast_lock);
1350 
1351 	TAILQ_FOREACH_SAFE(ast_entry, &ast_local_list, ase_list_elem,
1352 			   temp_ast_entry) {
1353 		if (ast_entry->callback)
1354 			ast_entry->callback(soc->ctrl_psoc,
1355 					    dp_soc_to_cdp_soc(soc),
1356 					    ast_entry->cookie,
1357 					    CDP_TXRX_AST_DELETED);
1358 
1359 		qdf_mem_free(ast_entry);
1360 	}
1361 
1362 	return num_ast;
1363 }
1364 /**
1365  * dp_peer_clean_wds_entries() - Clean wds ast entries and compare
1366  * @soc: soc handle
1367  * @peer: peer handle
1368  * @free_wds_count - number of wds entries freed by FW with peer delete
1369  *
1370  * Free all the wds ast entries associated with peer and compare with
1371  * the value received from firmware
1372  *
1373  * Return: Number of wds ast entries freed
1374  */
1375 static void
1376 dp_peer_clean_wds_entries(struct dp_soc *soc, struct dp_peer *peer,
1377 			  uint32_t free_wds_count)
1378 {
1379 	uint32_t wds_deleted = 0;
1380 
1381 	wds_deleted = dp_peer_ast_free_wds_entries(soc, peer);
1382 	if ((DP_PEER_WDS_COUNT_INVALID != free_wds_count) &&
1383 	    (free_wds_count != wds_deleted)) {
1384 		DP_STATS_INC(soc, ast.ast_mismatch, 1);
1385 		dp_alert("For peer %pK (mac: %pM)number of wds entries deleted by fw = %d during peer delete is not same as the numbers deleted by host = %d",
1386 			 peer, peer->mac_addr.raw, free_wds_count,
1387 			 wds_deleted);
1388 	}
1389 }
1390 
1391 #else
1392 static void
1393 dp_peer_clean_wds_entries(struct dp_soc *soc, struct dp_peer *peer,
1394 			  uint32_t free_wds_count)
1395 {
1396 }
1397 #endif
1398 
1399 /**
1400  * dp_peer_ast_free_entry_by_mac() - find ast entry by MAC address and delete
1401  * @soc: soc handle
1402  * @peer: peer handle
1403  * @mac_addr: mac address of the AST entry to searc and delete
1404  *
1405  * find the ast entry from the peer list using the mac address and free
1406  * the entry.
1407  *
1408  * Return: SUCCESS or NOENT
1409  */
1410 static int dp_peer_ast_free_entry_by_mac(struct dp_soc *soc,
1411 					 struct dp_peer *peer,
1412 					 uint8_t *mac_addr)
1413 {
1414 	struct dp_ast_entry *ast_entry;
1415 	void *cookie = NULL;
1416 	txrx_ast_free_cb cb = NULL;
1417 
1418 	/*
1419 	 * release the reference only if it is mapped
1420 	 * to ast_table
1421 	 */
1422 
1423 	qdf_spin_lock_bh(&soc->ast_lock);
1424 
1425 	ast_entry = dp_peer_ast_list_find(soc, peer, mac_addr);
1426 	if (!ast_entry) {
1427 		qdf_spin_unlock_bh(&soc->ast_lock);
1428 		return QDF_STATUS_E_NOENT;
1429 	} else if (ast_entry->is_mapped) {
1430 		soc->ast_table[ast_entry->ast_idx] = NULL;
1431 	}
1432 
1433 	cb = ast_entry->callback;
1434 	cookie = ast_entry->cookie;
1435 
1436 
1437 	dp_peer_unlink_ast_entry(soc, ast_entry);
1438 	dp_peer_free_ast_entry(soc, ast_entry);
1439 
1440 	qdf_spin_unlock_bh(&soc->ast_lock);
1441 
1442 	if (cb) {
1443 		cb(soc->ctrl_psoc,
1444 		   dp_soc_to_cdp_soc(soc),
1445 		   cookie,
1446 		   CDP_TXRX_AST_DELETED);
1447 	}
1448 
1449 	return QDF_STATUS_SUCCESS;
1450 }
1451 
1452 struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc,
1453 	uint8_t *peer_mac_addr, int mac_addr_is_aligned, uint8_t vdev_id)
1454 {
1455 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
1456 	unsigned index;
1457 	struct dp_peer *peer;
1458 
1459 	if (mac_addr_is_aligned) {
1460 		mac_addr = (union dp_align_mac_addr *) peer_mac_addr;
1461 	} else {
1462 		qdf_mem_copy(
1463 			&local_mac_addr_aligned.raw[0],
1464 			peer_mac_addr, QDF_MAC_ADDR_SIZE);
1465 		mac_addr = &local_mac_addr_aligned;
1466 	}
1467 	index = dp_peer_find_hash_index(soc, mac_addr);
1468 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
1469 	TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
1470 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
1471 			((peer->vdev->vdev_id == vdev_id) ||
1472 			 (vdev_id == DP_VDEV_ALL))) {
1473 			/* found it - increment the ref count before releasing
1474 			 * the lock
1475 			 */
1476 			qdf_atomic_inc(&peer->ref_cnt);
1477 			qdf_spin_unlock_bh(&soc->peer_ref_mutex);
1478 			return peer;
1479 		}
1480 	}
1481 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
1482 	return NULL; /* failure */
1483 }
1484 
1485 void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer)
1486 {
1487 	unsigned index;
1488 	struct dp_peer *tmppeer = NULL;
1489 	int found = 0;
1490 
1491 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
1492 	/* Check if tail is not empty before delete*/
1493 	QDF_ASSERT(!TAILQ_EMPTY(&soc->peer_hash.bins[index]));
1494 	/*
1495 	 * DO NOT take the peer_ref_mutex lock here - it needs to be taken
1496 	 * by the caller.
1497 	 * The caller needs to hold the lock from the time the peer object's
1498 	 * reference count is decremented and tested up through the time the
1499 	 * reference to the peer object is removed from the hash table, by
1500 	 * this function.
1501 	 * Holding the lock only while removing the peer object reference
1502 	 * from the hash table keeps the hash table consistent, but does not
1503 	 * protect against a new HL tx context starting to use the peer object
1504 	 * if it looks up the peer object from its MAC address just after the
1505 	 * peer ref count is decremented to zero, but just before the peer
1506 	 * object reference is removed from the hash table.
1507 	 */
1508 	 TAILQ_FOREACH(tmppeer, &soc->peer_hash.bins[index], hash_list_elem) {
1509 		if (tmppeer == peer) {
1510 			found = 1;
1511 			break;
1512 		}
1513 	}
1514 	QDF_ASSERT(found);
1515 	TAILQ_REMOVE(&soc->peer_hash.bins[index], peer, hash_list_elem);
1516 }
1517 
1518 void dp_peer_find_hash_erase(struct dp_soc *soc)
1519 {
1520 	int i;
1521 
1522 	/*
1523 	 * Not really necessary to take peer_ref_mutex lock - by this point,
1524 	 * it's known that the soc is no longer in use.
1525 	 */
1526 	for (i = 0; i <= soc->peer_hash.mask; i++) {
1527 		if (!TAILQ_EMPTY(&soc->peer_hash.bins[i])) {
1528 			struct dp_peer *peer, *peer_next;
1529 
1530 			/*
1531 			 * TAILQ_FOREACH_SAFE must be used here to avoid any
1532 			 * memory access violation after peer is freed
1533 			 */
1534 			TAILQ_FOREACH_SAFE(peer, &soc->peer_hash.bins[i],
1535 				hash_list_elem, peer_next) {
1536 				/*
1537 				 * Don't remove the peer from the hash table -
1538 				 * that would modify the list we are currently
1539 				 * traversing, and it's not necessary anyway.
1540 				 */
1541 				/*
1542 				 * Artificially adjust the peer's ref count to
1543 				 * 1, so it will get deleted by
1544 				 * dp_peer_unref_delete.
1545 				 */
1546 				/* set to zero */
1547 				qdf_atomic_init(&peer->ref_cnt);
1548 				/* incr to one */
1549 				qdf_atomic_inc(&peer->ref_cnt);
1550 				dp_peer_unref_delete(peer);
1551 			}
1552 		}
1553 	}
1554 }
1555 
1556 static void dp_peer_ast_table_detach(struct dp_soc *soc)
1557 {
1558 	if (soc->ast_table) {
1559 		qdf_mem_free(soc->ast_table);
1560 		soc->ast_table = NULL;
1561 	}
1562 }
1563 
1564 static void dp_peer_find_map_detach(struct dp_soc *soc)
1565 {
1566 	if (soc->peer_id_to_obj_map) {
1567 		qdf_mem_free(soc->peer_id_to_obj_map);
1568 		soc->peer_id_to_obj_map = NULL;
1569 	}
1570 }
1571 
1572 int dp_peer_find_attach(struct dp_soc *soc)
1573 {
1574 	if (dp_peer_find_map_attach(soc))
1575 		return 1;
1576 
1577 	if (dp_peer_find_hash_attach(soc)) {
1578 		dp_peer_find_map_detach(soc);
1579 		return 1;
1580 	}
1581 
1582 	if (dp_peer_ast_table_attach(soc)) {
1583 		dp_peer_find_hash_detach(soc);
1584 		dp_peer_find_map_detach(soc);
1585 		return 1;
1586 	}
1587 
1588 	if (dp_peer_ast_hash_attach(soc)) {
1589 		dp_peer_ast_table_detach(soc);
1590 		dp_peer_find_hash_detach(soc);
1591 		dp_peer_find_map_detach(soc);
1592 		return 1;
1593 	}
1594 
1595 	return 0; /* success */
1596 }
1597 
1598 void dp_rx_tid_stats_cb(struct dp_soc *soc, void *cb_ctxt,
1599 	union hal_reo_status *reo_status)
1600 {
1601 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
1602 	struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
1603 
1604 	if (queue_status->header.status == HAL_REO_CMD_DRAIN)
1605 		return;
1606 
1607 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
1608 		DP_PRINT_STATS("REO stats failure %d for TID %d\n",
1609 			       queue_status->header.status, rx_tid->tid);
1610 		return;
1611 	}
1612 
1613 	DP_PRINT_STATS("REO queue stats (TID: %d):\n"
1614 		       "ssn: %d\n"
1615 		       "curr_idx  : %d\n"
1616 		       "pn_31_0   : %08x\n"
1617 		       "pn_63_32  : %08x\n"
1618 		       "pn_95_64  : %08x\n"
1619 		       "pn_127_96 : %08x\n"
1620 		       "last_rx_enq_tstamp : %08x\n"
1621 		       "last_rx_deq_tstamp : %08x\n"
1622 		       "rx_bitmap_31_0     : %08x\n"
1623 		       "rx_bitmap_63_32    : %08x\n"
1624 		       "rx_bitmap_95_64    : %08x\n"
1625 		       "rx_bitmap_127_96   : %08x\n"
1626 		       "rx_bitmap_159_128  : %08x\n"
1627 		       "rx_bitmap_191_160  : %08x\n"
1628 		       "rx_bitmap_223_192  : %08x\n"
1629 		       "rx_bitmap_255_224  : %08x\n",
1630 		       rx_tid->tid,
1631 		       queue_status->ssn, queue_status->curr_idx,
1632 		       queue_status->pn_31_0, queue_status->pn_63_32,
1633 		       queue_status->pn_95_64, queue_status->pn_127_96,
1634 		       queue_status->last_rx_enq_tstamp,
1635 		       queue_status->last_rx_deq_tstamp,
1636 		       queue_status->rx_bitmap_31_0,
1637 		       queue_status->rx_bitmap_63_32,
1638 		       queue_status->rx_bitmap_95_64,
1639 		       queue_status->rx_bitmap_127_96,
1640 		       queue_status->rx_bitmap_159_128,
1641 		       queue_status->rx_bitmap_191_160,
1642 		       queue_status->rx_bitmap_223_192,
1643 		       queue_status->rx_bitmap_255_224);
1644 
1645 	DP_PRINT_STATS(
1646 		       "curr_mpdu_cnt      : %d\n"
1647 		       "curr_msdu_cnt      : %d\n"
1648 		       "fwd_timeout_cnt    : %d\n"
1649 		       "fwd_bar_cnt        : %d\n"
1650 		       "dup_cnt            : %d\n"
1651 		       "frms_in_order_cnt  : %d\n"
1652 		       "bar_rcvd_cnt       : %d\n"
1653 		       "mpdu_frms_cnt      : %d\n"
1654 		       "msdu_frms_cnt      : %d\n"
1655 		       "total_byte_cnt     : %d\n"
1656 		       "late_recv_mpdu_cnt : %d\n"
1657 		       "win_jump_2k        : %d\n"
1658 		       "hole_cnt           : %d\n",
1659 		       queue_status->curr_mpdu_cnt,
1660 		       queue_status->curr_msdu_cnt,
1661 		       queue_status->fwd_timeout_cnt,
1662 		       queue_status->fwd_bar_cnt,
1663 		       queue_status->dup_cnt,
1664 		       queue_status->frms_in_order_cnt,
1665 		       queue_status->bar_rcvd_cnt,
1666 		       queue_status->mpdu_frms_cnt,
1667 		       queue_status->msdu_frms_cnt,
1668 		       queue_status->total_cnt,
1669 		       queue_status->late_recv_mpdu_cnt,
1670 		       queue_status->win_jump_2k,
1671 		       queue_status->hole_cnt);
1672 
1673 	DP_PRINT_STATS("Addba Req          : %d\n"
1674 			"Addba Resp         : %d\n"
1675 			"Addba Resp success : %d\n"
1676 			"Addba Resp failed  : %d\n"
1677 			"Delba Req received : %d\n"
1678 			"Delba Tx success   : %d\n"
1679 			"Delba Tx Fail      : %d\n"
1680 			"BA window size     : %d\n"
1681 			"Pn size            : %d\n",
1682 			rx_tid->num_of_addba_req,
1683 			rx_tid->num_of_addba_resp,
1684 			rx_tid->num_addba_rsp_success,
1685 			rx_tid->num_addba_rsp_failed,
1686 			rx_tid->num_of_delba_req,
1687 			rx_tid->delba_tx_success_cnt,
1688 			rx_tid->delba_tx_fail_cnt,
1689 			rx_tid->ba_win_size,
1690 			rx_tid->pn_size);
1691 }
1692 
1693 static inline struct dp_peer *dp_peer_find_add_id(struct dp_soc *soc,
1694 	uint8_t *peer_mac_addr, uint16_t peer_id, uint16_t hw_peer_id,
1695 	uint8_t vdev_id)
1696 {
1697 	struct dp_peer *peer;
1698 
1699 	QDF_ASSERT(peer_id <= soc->max_peers);
1700 	/* check if there's already a peer object with this MAC address */
1701 	peer = dp_peer_find_hash_find(soc, peer_mac_addr,
1702 		0 /* is aligned */, vdev_id);
1703 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1704 		  "%s: peer %pK ID %d vid %d mac %pM",
1705 		  __func__, peer, peer_id, vdev_id, peer_mac_addr);
1706 
1707 	if (peer) {
1708 		/* peer's ref count was already incremented by
1709 		 * peer_find_hash_find
1710 		 */
1711 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1712 			  "%s: ref_cnt: %d", __func__,
1713 			   qdf_atomic_read(&peer->ref_cnt));
1714 		if (!soc->peer_id_to_obj_map[peer_id])
1715 			soc->peer_id_to_obj_map[peer_id] = peer;
1716 		else {
1717 			/* Peer map event came for peer_id which
1718 			 * is already mapped, this is not expected
1719 			 */
1720 			QDF_ASSERT(0);
1721 		}
1722 
1723 		if (dp_peer_find_add_id_to_obj(peer, peer_id)) {
1724 			/* TBDXXX: assert for now */
1725 			QDF_ASSERT(0);
1726 		} else {
1727 			dp_peer_tid_peer_id_update(peer, peer->peer_id);
1728 		}
1729 
1730 		return peer;
1731 	}
1732 
1733 	return NULL;
1734 }
1735 
1736 /**
1737  * dp_rx_peer_map_handler() - handle peer map event from firmware
1738  * @soc_handle - genereic soc handle
1739  * @peeri_id - peer_id from firmware
1740  * @hw_peer_id - ast index for this peer
1741  * @vdev_id - vdev ID
1742  * @peer_mac_addr - mac address of the peer
1743  * @ast_hash - ast hash value
1744  * @is_wds - flag to indicate peer map event for WDS ast entry
1745  *
1746  * associate the peer_id that firmware provided with peer entry
1747  * and update the ast table in the host with the hw_peer_id.
1748  *
1749  * Return: QDF_STATUS code
1750  */
1751 
1752 QDF_STATUS
1753 dp_rx_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
1754 		       uint16_t hw_peer_id, uint8_t vdev_id,
1755 		       uint8_t *peer_mac_addr, uint16_t ast_hash,
1756 		       uint8_t is_wds)
1757 {
1758 	struct dp_peer *peer = NULL;
1759 	enum cdp_txrx_ast_entry_type type = CDP_TXRX_AST_TYPE_STATIC;
1760 	QDF_STATUS err = QDF_STATUS_SUCCESS;
1761 
1762 	dp_info("peer_map_event (soc:%pK): peer_id %d, hw_peer_id %d, peer_mac %pM, vdev_id %d",
1763 		soc, peer_id, hw_peer_id,
1764 		  peer_mac_addr, vdev_id);
1765 
1766 	/* Peer map event for WDS ast entry get the peer from
1767 	 * obj map
1768 	 */
1769 	if (is_wds) {
1770 		peer = soc->peer_id_to_obj_map[peer_id];
1771 	} else {
1772 		/*
1773 		 * It's the responsibility of the CP and FW to ensure
1774 		 * that peer is created successfully. Ideally DP should
1775 		 * not hit the below condition for directly assocaited
1776 		 * peers.
1777 		 */
1778 		if ((hw_peer_id < 0) ||
1779 		    (hw_peer_id >=
1780 		     wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) {
1781 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1782 				  "invalid hw_peer_id: %d", hw_peer_id);
1783 			qdf_assert_always(0);
1784 		}
1785 
1786 		peer = dp_peer_find_add_id(soc, peer_mac_addr, peer_id,
1787 					   hw_peer_id, vdev_id);
1788 
1789 		if (peer) {
1790 			if (wlan_op_mode_sta == peer->vdev->opmode &&
1791 			    qdf_mem_cmp(peer->mac_addr.raw,
1792 					peer->vdev->mac_addr.raw,
1793 					QDF_MAC_ADDR_SIZE) != 0) {
1794 				dp_info("STA vdev bss_peer!!!!");
1795 				peer->bss_peer = 1;
1796 			}
1797 
1798 			if (peer->vdev->opmode == wlan_op_mode_sta) {
1799 				peer->vdev->bss_ast_hash = ast_hash;
1800 				peer->vdev->bss_ast_idx = hw_peer_id;
1801 			}
1802 
1803 			/* Add ast entry incase self ast entry is
1804 			 * deleted due to DP CP sync issue
1805 			 *
1806 			 * self_ast_entry is modified in peer create
1807 			 * and peer unmap path which cannot run in
1808 			 * parllel with peer map, no lock need before
1809 			 * referring it
1810 			 */
1811 			if (!peer->self_ast_entry) {
1812 				dp_info("Add self ast from map %pM",
1813 					peer_mac_addr);
1814 				dp_peer_add_ast(soc, peer,
1815 						peer_mac_addr,
1816 						type, 0);
1817 			}
1818 
1819 		}
1820 	}
1821 	err = dp_peer_map_ast(soc, peer, peer_mac_addr,
1822 			      hw_peer_id, vdev_id, ast_hash, is_wds);
1823 
1824 	return err;
1825 }
1826 
1827 /**
1828  * dp_rx_peer_unmap_handler() - handle peer unmap event from firmware
1829  * @soc_handle - genereic soc handle
1830  * @peeri_id - peer_id from firmware
1831  * @vdev_id - vdev ID
1832  * @mac_addr - mac address of the peer or wds entry
1833  * @is_wds - flag to indicate peer map event for WDS ast entry
1834  * @free_wds_count - number of wds entries freed by FW with peer delete
1835  *
1836  * Return: none
1837  */
1838 void
1839 dp_rx_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id,
1840 			 uint8_t vdev_id, uint8_t *mac_addr,
1841 			 uint8_t is_wds, uint32_t free_wds_count)
1842 {
1843 	struct dp_peer *peer;
1844 
1845 	peer = __dp_peer_find_by_id(soc, peer_id);
1846 
1847 	/*
1848 	 * Currently peer IDs are assigned for vdevs as well as peers.
1849 	 * If the peer ID is for a vdev, then the peer pointer stored
1850 	 * in peer_id_to_obj_map will be NULL.
1851 	 */
1852 	if (!peer) {
1853 		dp_err("Received unmap event for invalid peer_id %u", peer_id);
1854 		return;
1855 	}
1856 
1857 	/* If V2 Peer map messages are enabled AST entry has to be freed here
1858 	 */
1859 	if (is_wds) {
1860 		if (!dp_peer_ast_free_entry_by_mac(soc, peer, mac_addr))
1861 			return;
1862 
1863 		dp_alert("AST entry not found with peer %pK peer_id %u peer_mac %pM mac_addr %pM vdev_id %u next_hop %u",
1864 			 peer, peer->peer_id,
1865 			 peer->mac_addr.raw, mac_addr, vdev_id,
1866 			 is_wds);
1867 
1868 		return;
1869 	} else {
1870 		dp_peer_clean_wds_entries(soc, peer, free_wds_count);
1871 	}
1872 
1873 	dp_info("peer_unmap_event (soc:%pK) peer_id %d peer %pK",
1874 		soc, peer_id, peer);
1875 
1876 	soc->peer_id_to_obj_map[peer_id] = NULL;
1877 	peer->peer_id = HTT_INVALID_PEER;
1878 
1879 	/*
1880 	 * Reset ast flow mapping table
1881 	 */
1882 	dp_peer_reset_flowq_map(peer);
1883 
1884 	if (soc->cdp_soc.ol_ops->peer_unmap_event) {
1885 		soc->cdp_soc.ol_ops->peer_unmap_event(soc->ctrl_psoc,
1886 				peer_id, vdev_id);
1887 	}
1888 
1889 	/*
1890 	 * Remove a reference to the peer.
1891 	 * If there are no more references, delete the peer object.
1892 	 */
1893 	dp_peer_unref_delete(peer);
1894 }
1895 
1896 void
1897 dp_peer_find_detach(struct dp_soc *soc)
1898 {
1899 	dp_peer_find_map_detach(soc);
1900 	dp_peer_find_hash_detach(soc);
1901 	dp_peer_ast_hash_detach(soc);
1902 	dp_peer_ast_table_detach(soc);
1903 }
1904 
1905 static void dp_rx_tid_update_cb(struct dp_soc *soc, void *cb_ctxt,
1906 	union hal_reo_status *reo_status)
1907 {
1908 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
1909 
1910 	if ((reo_status->rx_queue_status.header.status !=
1911 		HAL_REO_CMD_SUCCESS) &&
1912 		(reo_status->rx_queue_status.header.status !=
1913 		HAL_REO_CMD_DRAIN)) {
1914 		/* Should not happen normally. Just print error for now */
1915 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1916 			  "%s: Rx tid HW desc update failed(%d): tid %d",
1917 			  __func__,
1918 			  reo_status->rx_queue_status.header.status,
1919 			  rx_tid->tid);
1920 	}
1921 }
1922 
1923 /*
1924  * dp_find_peer_by_addr - find peer instance by mac address
1925  * @dev: physical device instance
1926  * @peer_mac_addr: peer mac address
1927  *
1928  * Return: peer instance pointer
1929  */
1930 void *dp_find_peer_by_addr(struct cdp_pdev *dev, uint8_t *peer_mac_addr)
1931 {
1932 	struct dp_pdev *pdev = (struct dp_pdev *)dev;
1933 	struct dp_peer *peer;
1934 
1935 	peer = dp_peer_find_hash_find(pdev->soc, peer_mac_addr, 0, DP_VDEV_ALL);
1936 
1937 	if (!peer)
1938 		return NULL;
1939 
1940 	dp_verbose_debug("peer %pK mac: %pM", peer,
1941 			 peer->mac_addr.raw);
1942 
1943 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
1944 	 * Decrement it here.
1945 	 */
1946 	dp_peer_unref_delete(peer);
1947 
1948 	return peer;
1949 }
1950 
1951 static bool dp_get_peer_vdev_roaming_in_progress(struct dp_peer *peer)
1952 {
1953 	struct ol_if_ops *ol_ops = NULL;
1954 	bool is_roaming = false;
1955 	uint8_t vdev_id = -1;
1956 	struct cdp_soc_t *soc;
1957 
1958 	if (!peer) {
1959 		dp_info("Peer is NULL. No roaming possible");
1960 		return false;
1961 	}
1962 
1963 	soc = dp_soc_to_cdp_soc_t(peer->vdev->pdev->soc);
1964 	ol_ops = peer->vdev->pdev->soc->cdp_soc.ol_ops;
1965 
1966 	if (ol_ops && ol_ops->is_roam_inprogress) {
1967 		dp_get_vdevid(soc, peer->mac_addr.raw, &vdev_id);
1968 		is_roaming = ol_ops->is_roam_inprogress(vdev_id);
1969 	}
1970 
1971 	dp_info("peer: %pM, vdev_id: %d, is_roaming: %d",
1972 		peer->mac_addr.raw, vdev_id, is_roaming);
1973 
1974 	return is_roaming;
1975 }
1976 
1977 QDF_STATUS dp_rx_tid_update_wifi3(struct dp_peer *peer, int tid, uint32_t
1978 					 ba_window_size, uint32_t start_seq)
1979 {
1980 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1981 	struct dp_soc *soc = peer->vdev->pdev->soc;
1982 	struct hal_reo_cmd_params params;
1983 
1984 	qdf_mem_zero(&params, sizeof(params));
1985 
1986 	params.std.need_status = 1;
1987 	params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
1988 	params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1989 	params.u.upd_queue_params.update_ba_window_size = 1;
1990 	params.u.upd_queue_params.ba_window_size = ba_window_size;
1991 
1992 	if (start_seq < IEEE80211_SEQ_MAX) {
1993 		params.u.upd_queue_params.update_ssn = 1;
1994 		params.u.upd_queue_params.ssn = start_seq;
1995 	} else {
1996 	    dp_set_ssn_valid_flag(&params, 0);
1997 	}
1998 
1999 	if (dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
2000 			    dp_rx_tid_update_cb, rx_tid)) {
2001 		dp_err_log("failed to send reo cmd CMD_UPDATE_RX_REO_QUEUE");
2002 		DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
2003 	}
2004 
2005 	rx_tid->ba_win_size = ba_window_size;
2006 
2007 	if (dp_get_peer_vdev_roaming_in_progress(peer))
2008 		return QDF_STATUS_E_PERM;
2009 
2010 	if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup)
2011 		soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup(
2012 			soc->ctrl_psoc, peer->vdev->pdev->pdev_id,
2013 			peer->vdev->vdev_id, peer->mac_addr.raw,
2014 			rx_tid->hw_qdesc_paddr, tid, tid, 1, ba_window_size);
2015 
2016 	return QDF_STATUS_SUCCESS;
2017 }
2018 
2019 /*
2020  * dp_reo_desc_free() - Callback free reo descriptor memory after
2021  * HW cache flush
2022  *
2023  * @soc: DP SOC handle
2024  * @cb_ctxt: Callback context
2025  * @reo_status: REO command status
2026  */
2027 static void dp_reo_desc_free(struct dp_soc *soc, void *cb_ctxt,
2028 	union hal_reo_status *reo_status)
2029 {
2030 	struct reo_desc_list_node *freedesc =
2031 		(struct reo_desc_list_node *)cb_ctxt;
2032 	struct dp_rx_tid *rx_tid = &freedesc->rx_tid;
2033 	unsigned long curr_ts = qdf_get_system_timestamp();
2034 
2035 	if ((reo_status->fl_cache_status.header.status !=
2036 		HAL_REO_CMD_SUCCESS) &&
2037 		(reo_status->fl_cache_status.header.status !=
2038 		HAL_REO_CMD_DRAIN)) {
2039 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2040 			  "%s: Rx tid HW desc flush failed(%d): tid %d",
2041 			  __func__,
2042 			  reo_status->rx_queue_status.header.status,
2043 			  freedesc->rx_tid.tid);
2044 	}
2045 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
2046 		  "%s:%lu hw_qdesc_paddr: %pK, tid:%d", __func__,
2047 		  curr_ts,
2048 		  (void *)(rx_tid->hw_qdesc_paddr), rx_tid->tid);
2049 	qdf_mem_unmap_nbytes_single(soc->osdev,
2050 		rx_tid->hw_qdesc_paddr,
2051 		QDF_DMA_BIDIRECTIONAL,
2052 		rx_tid->hw_qdesc_alloc_size);
2053 	qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
2054 	qdf_mem_free(freedesc);
2055 }
2056 
2057 #if defined(CONFIG_WIFI_EMULATION_WIFI_3_0) && defined(BUILD_X86)
2058 /* Hawkeye emulation requires bus address to be >= 0x50000000 */
2059 static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
2060 {
2061 	if (dma_addr < 0x50000000)
2062 		return QDF_STATUS_E_FAILURE;
2063 	else
2064 		return QDF_STATUS_SUCCESS;
2065 }
2066 #else
2067 static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
2068 {
2069 	return QDF_STATUS_SUCCESS;
2070 }
2071 #endif
2072 
2073 
2074 /*
2075  * dp_rx_tid_setup_wifi3() – Setup receive TID state
2076  * @peer: Datapath peer handle
2077  * @tid: TID
2078  * @ba_window_size: BlockAck window size
2079  * @start_seq: Starting sequence number
2080  *
2081  * Return: QDF_STATUS code
2082  */
2083 QDF_STATUS dp_rx_tid_setup_wifi3(struct dp_peer *peer, int tid,
2084 				 uint32_t ba_window_size, uint32_t start_seq)
2085 {
2086 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
2087 	struct dp_vdev *vdev = peer->vdev;
2088 	struct dp_soc *soc = vdev->pdev->soc;
2089 	uint32_t hw_qdesc_size;
2090 	uint32_t hw_qdesc_align;
2091 	int hal_pn_type;
2092 	void *hw_qdesc_vaddr;
2093 	uint32_t alloc_tries = 0;
2094 	QDF_STATUS err = QDF_STATUS_SUCCESS;
2095 
2096 	if (peer->delete_in_progress ||
2097 	    !qdf_atomic_read(&peer->is_default_route_set))
2098 		return QDF_STATUS_E_FAILURE;
2099 
2100 	rx_tid->ba_win_size = ba_window_size;
2101 	if (rx_tid->hw_qdesc_vaddr_unaligned)
2102 		return dp_rx_tid_update_wifi3(peer, tid, ba_window_size,
2103 			start_seq);
2104 	rx_tid->delba_tx_status = 0;
2105 	rx_tid->ppdu_id_2k = 0;
2106 	rx_tid->num_of_addba_req = 0;
2107 	rx_tid->num_of_delba_req = 0;
2108 	rx_tid->num_of_addba_resp = 0;
2109 	rx_tid->num_addba_rsp_failed = 0;
2110 	rx_tid->num_addba_rsp_success = 0;
2111 	rx_tid->delba_tx_success_cnt = 0;
2112 	rx_tid->delba_tx_fail_cnt = 0;
2113 	rx_tid->statuscode = 0;
2114 
2115 	/* TODO: Allocating HW queue descriptors based on max BA window size
2116 	 * for all QOS TIDs so that same descriptor can be used later when
2117 	 * ADDBA request is recevied. This should be changed to allocate HW
2118 	 * queue descriptors based on BA window size being negotiated (0 for
2119 	 * non BA cases), and reallocate when BA window size changes and also
2120 	 * send WMI message to FW to change the REO queue descriptor in Rx
2121 	 * peer entry as part of dp_rx_tid_update.
2122 	 */
2123 	if (tid != DP_NON_QOS_TID)
2124 		hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc,
2125 			HAL_RX_MAX_BA_WINDOW, tid);
2126 	else
2127 		hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc,
2128 			ba_window_size, tid);
2129 
2130 	hw_qdesc_align = hal_get_reo_qdesc_align(soc->hal_soc);
2131 	/* To avoid unnecessary extra allocation for alignment, try allocating
2132 	 * exact size and see if we already have aligned address.
2133 	 */
2134 	rx_tid->hw_qdesc_alloc_size = hw_qdesc_size;
2135 
2136 try_desc_alloc:
2137 	rx_tid->hw_qdesc_vaddr_unaligned =
2138 		qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size);
2139 
2140 	if (!rx_tid->hw_qdesc_vaddr_unaligned) {
2141 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2142 			  "%s: Rx tid HW desc alloc failed: tid %d",
2143 			  __func__, tid);
2144 		return QDF_STATUS_E_NOMEM;
2145 	}
2146 
2147 	if ((unsigned long)(rx_tid->hw_qdesc_vaddr_unaligned) %
2148 		hw_qdesc_align) {
2149 		/* Address allocated above is not alinged. Allocate extra
2150 		 * memory for alignment
2151 		 */
2152 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
2153 		rx_tid->hw_qdesc_vaddr_unaligned =
2154 			qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size +
2155 					hw_qdesc_align - 1);
2156 
2157 		if (!rx_tid->hw_qdesc_vaddr_unaligned) {
2158 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2159 				  "%s: Rx tid HW desc alloc failed: tid %d",
2160 				  __func__, tid);
2161 			return QDF_STATUS_E_NOMEM;
2162 		}
2163 
2164 		hw_qdesc_vaddr = (void *)qdf_align((unsigned long)
2165 			rx_tid->hw_qdesc_vaddr_unaligned,
2166 			hw_qdesc_align);
2167 
2168 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2169 			  "%s: Total Size %d Aligned Addr %pK",
2170 			  __func__, rx_tid->hw_qdesc_alloc_size,
2171 			  hw_qdesc_vaddr);
2172 
2173 	} else {
2174 		hw_qdesc_vaddr = rx_tid->hw_qdesc_vaddr_unaligned;
2175 	}
2176 
2177 	/* TODO: Ensure that sec_type is set before ADDBA is received.
2178 	 * Currently this is set based on htt indication
2179 	 * HTT_T2H_MSG_TYPE_SEC_IND from target
2180 	 */
2181 	switch (peer->security[dp_sec_ucast].sec_type) {
2182 	case cdp_sec_type_tkip_nomic:
2183 	case cdp_sec_type_aes_ccmp:
2184 	case cdp_sec_type_aes_ccmp_256:
2185 	case cdp_sec_type_aes_gcmp:
2186 	case cdp_sec_type_aes_gcmp_256:
2187 		hal_pn_type = HAL_PN_WPA;
2188 		break;
2189 	case cdp_sec_type_wapi:
2190 		if (vdev->opmode == wlan_op_mode_ap)
2191 			hal_pn_type = HAL_PN_WAPI_EVEN;
2192 		else
2193 			hal_pn_type = HAL_PN_WAPI_UNEVEN;
2194 		break;
2195 	default:
2196 		hal_pn_type = HAL_PN_NONE;
2197 		break;
2198 	}
2199 
2200 	hal_reo_qdesc_setup(soc->hal_soc, tid, ba_window_size, start_seq,
2201 		hw_qdesc_vaddr, rx_tid->hw_qdesc_paddr, hal_pn_type);
2202 
2203 	qdf_mem_map_nbytes_single(soc->osdev, hw_qdesc_vaddr,
2204 		QDF_DMA_BIDIRECTIONAL, rx_tid->hw_qdesc_alloc_size,
2205 		&(rx_tid->hw_qdesc_paddr));
2206 
2207 	if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) !=
2208 			QDF_STATUS_SUCCESS) {
2209 		if (alloc_tries++ < 10) {
2210 			qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
2211 			rx_tid->hw_qdesc_vaddr_unaligned = NULL;
2212 			goto try_desc_alloc;
2213 		} else {
2214 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2215 				  "%s: Rx tid HW desc alloc failed (lowmem): tid %d",
2216 				  __func__, tid);
2217 			err = QDF_STATUS_E_NOMEM;
2218 			goto error;
2219 		}
2220 	}
2221 
2222 	if (dp_get_peer_vdev_roaming_in_progress(peer)) {
2223 		err = QDF_STATUS_E_PERM;
2224 		goto error;
2225 	}
2226 
2227 	if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
2228 		if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup(
2229 		    soc->ctrl_psoc,
2230 		    peer->vdev->pdev->pdev_id,
2231 		    peer->vdev->vdev_id,
2232 		    peer->mac_addr.raw, rx_tid->hw_qdesc_paddr, tid, tid,
2233 		    1, ba_window_size)) {
2234 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2235 				  "%s: Failed to send reo queue setup to FW - tid %d\n",
2236 				  __func__, tid);
2237 			err = QDF_STATUS_E_FAILURE;
2238 			goto error;
2239 		}
2240 	}
2241 	return 0;
2242 error:
2243 	if (rx_tid->hw_qdesc_vaddr_unaligned) {
2244 		if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) ==
2245 		    QDF_STATUS_SUCCESS)
2246 			qdf_mem_unmap_nbytes_single(
2247 				soc->osdev,
2248 				rx_tid->hw_qdesc_paddr,
2249 				QDF_DMA_BIDIRECTIONAL,
2250 				rx_tid->hw_qdesc_alloc_size);
2251 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
2252 		rx_tid->hw_qdesc_vaddr_unaligned = NULL;
2253 	}
2254 	return err;
2255 }
2256 
2257 #ifdef REO_DESC_DEFER_FREE
2258 /*
2259  * dp_reo_desc_clean_up() - If cmd to flush base desc fails add
2260  * desc back to freelist and defer the deletion
2261  *
2262  * @soc: DP SOC handle
2263  * @desc: Base descriptor to be freed
2264  * @reo_status: REO command status
2265  */
2266 static void dp_reo_desc_clean_up(struct dp_soc *soc,
2267 				 struct reo_desc_list_node *desc,
2268 				 union hal_reo_status *reo_status)
2269 {
2270 	desc->free_ts = qdf_get_system_timestamp();
2271 	DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
2272 	qdf_list_insert_back(&soc->reo_desc_freelist,
2273 			     (qdf_list_node_t *)desc);
2274 }
2275 
2276 /*
2277  * dp_reo_limit_clean_batch_sz() - Limit number REO CMD queued to cmd
2278  * ring in aviod of REO hang
2279  *
2280  * @list_size: REO desc list size to be cleaned
2281  */
2282 static inline void dp_reo_limit_clean_batch_sz(uint32_t *list_size)
2283 {
2284 	unsigned long curr_ts = qdf_get_system_timestamp();
2285 
2286 	if ((*list_size) > REO_DESC_FREELIST_SIZE) {
2287 		dp_err_log("%lu:freedesc number %d in freelist",
2288 			   curr_ts, *list_size);
2289 		/* limit the batch queue size */
2290 		*list_size = REO_DESC_FREELIST_SIZE;
2291 	}
2292 }
2293 #else
2294 /*
2295  * dp_reo_desc_clean_up() - If send cmd to REO inorder to flush
2296  * cache fails free the base REO desc anyway
2297  *
2298  * @soc: DP SOC handle
2299  * @desc: Base descriptor to be freed
2300  * @reo_status: REO command status
2301  */
2302 static void dp_reo_desc_clean_up(struct dp_soc *soc,
2303 				 struct reo_desc_list_node *desc,
2304 				 union hal_reo_status *reo_status)
2305 {
2306 	if (reo_status) {
2307 		qdf_mem_zero(reo_status, sizeof(*reo_status));
2308 		reo_status->fl_cache_status.header.status = 0;
2309 		dp_reo_desc_free(soc, (void *)desc, reo_status);
2310 	}
2311 }
2312 
2313 /*
2314  * dp_reo_limit_clean_batch_sz() - Limit number REO CMD queued to cmd
2315  * ring in aviod of REO hang
2316  *
2317  * @list_size: REO desc list size to be cleaned
2318  */
2319 static inline void dp_reo_limit_clean_batch_sz(uint32_t *list_size)
2320 {
2321 }
2322 #endif
2323 
2324 /*
2325  * dp_resend_update_reo_cmd() - Resend the UPDATE_REO_QUEUE
2326  * cmd and re-insert desc into free list if send fails.
2327  *
2328  * @soc: DP SOC handle
2329  * @desc: desc with resend update cmd flag set
2330  * @rx_tid: Desc RX tid associated with update cmd for resetting
2331  * valid field to 0 in h/w
2332  *
2333  * Return: QDF status
2334  */
2335 static QDF_STATUS
2336 dp_resend_update_reo_cmd(struct dp_soc *soc,
2337 			 struct reo_desc_list_node *desc,
2338 			 struct dp_rx_tid *rx_tid)
2339 {
2340 	struct hal_reo_cmd_params params;
2341 
2342 	qdf_mem_zero(&params, sizeof(params));
2343 	params.std.need_status = 1;
2344 	params.std.addr_lo =
2345 		rx_tid->hw_qdesc_paddr & 0xffffffff;
2346 	params.std.addr_hi =
2347 		(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2348 	params.u.upd_queue_params.update_vld = 1;
2349 	params.u.upd_queue_params.vld = 0;
2350 	desc->resend_update_reo_cmd = false;
2351 	/*
2352 	 * If the cmd send fails then set resend_update_reo_cmd flag
2353 	 * and insert the desc at the end of the free list to retry.
2354 	 */
2355 	if (dp_reo_send_cmd(soc,
2356 			    CMD_UPDATE_RX_REO_QUEUE,
2357 			    &params,
2358 			    dp_rx_tid_delete_cb,
2359 			    (void *)desc)
2360 	    != QDF_STATUS_SUCCESS) {
2361 		desc->resend_update_reo_cmd = true;
2362 		desc->free_ts = qdf_get_system_timestamp();
2363 		qdf_list_insert_back(&soc->reo_desc_freelist,
2364 				     (qdf_list_node_t *)desc);
2365 		dp_err_log("failed to send reo cmd CMD_UPDATE_RX_REO_QUEUE");
2366 		DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
2367 		return QDF_STATUS_E_FAILURE;
2368 	}
2369 
2370 	return QDF_STATUS_SUCCESS;
2371 }
2372 
2373 /*
2374  * dp_rx_tid_delete_cb() - Callback to flush reo descriptor HW cache
2375  * after deleting the entries (ie., setting valid=0)
2376  *
2377  * @soc: DP SOC handle
2378  * @cb_ctxt: Callback context
2379  * @reo_status: REO command status
2380  */
2381 void dp_rx_tid_delete_cb(struct dp_soc *soc, void *cb_ctxt,
2382 			 union hal_reo_status *reo_status)
2383 {
2384 	struct reo_desc_list_node *freedesc =
2385 		(struct reo_desc_list_node *)cb_ctxt;
2386 	uint32_t list_size;
2387 	struct reo_desc_list_node *desc;
2388 	unsigned long curr_ts = qdf_get_system_timestamp();
2389 	uint32_t desc_size, tot_desc_size;
2390 	struct hal_reo_cmd_params params;
2391 	bool flush_failure = false;
2392 
2393 	if (reo_status->rx_queue_status.header.status == HAL_REO_CMD_DRAIN) {
2394 		qdf_mem_zero(reo_status, sizeof(*reo_status));
2395 		reo_status->fl_cache_status.header.status = HAL_REO_CMD_DRAIN;
2396 		dp_reo_desc_free(soc, (void *)freedesc, reo_status);
2397 		DP_STATS_INC(soc, rx.err.reo_cmd_send_drain, 1);
2398 		return;
2399 	} else if (reo_status->rx_queue_status.header.status !=
2400 		HAL_REO_CMD_SUCCESS) {
2401 		/* Should not happen normally. Just print error for now */
2402 		dp_info_rl("%s: Rx tid HW desc deletion failed(%d): tid %d",
2403 			   __func__,
2404 			   reo_status->rx_queue_status.header.status,
2405 			   freedesc->rx_tid.tid);
2406 	}
2407 
2408 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
2409 		"%s: rx_tid: %d status: %d", __func__,
2410 		freedesc->rx_tid.tid,
2411 		reo_status->rx_queue_status.header.status);
2412 
2413 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
2414 	freedesc->free_ts = curr_ts;
2415 	qdf_list_insert_back_size(&soc->reo_desc_freelist,
2416 		(qdf_list_node_t *)freedesc, &list_size);
2417 
2418 	/* MCL path add the desc back to reo_desc_freelist when REO FLUSH
2419 	 * failed. it may cause the number of REO queue pending  in free
2420 	 * list is even larger than REO_CMD_RING max size and lead REO CMD
2421 	 * flood then cause REO HW in an unexpected condition. So it's
2422 	 * needed to limit the number REO cmds in a batch operation.
2423 	 */
2424 	dp_reo_limit_clean_batch_sz(&list_size);
2425 
2426 	while ((qdf_list_peek_front(&soc->reo_desc_freelist,
2427 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) &&
2428 		((list_size >= REO_DESC_FREELIST_SIZE) ||
2429 		(curr_ts > (desc->free_ts + REO_DESC_FREE_DEFER_MS)) ||
2430 		(desc->resend_update_reo_cmd && list_size))) {
2431 		struct dp_rx_tid *rx_tid;
2432 
2433 		qdf_list_remove_front(&soc->reo_desc_freelist,
2434 				(qdf_list_node_t **)&desc);
2435 		list_size--;
2436 		rx_tid = &desc->rx_tid;
2437 
2438 		/* First process descs with resend_update_reo_cmd set */
2439 		if (desc->resend_update_reo_cmd) {
2440 			if (dp_resend_update_reo_cmd(soc, desc, rx_tid) !=
2441 			    QDF_STATUS_SUCCESS)
2442 				break;
2443 			else
2444 				continue;
2445 		}
2446 
2447 		/* Flush and invalidate REO descriptor from HW cache: Base and
2448 		 * extension descriptors should be flushed separately */
2449 		if (desc->pending_ext_desc_size)
2450 			tot_desc_size = desc->pending_ext_desc_size;
2451 		else
2452 			tot_desc_size = rx_tid->hw_qdesc_alloc_size;
2453 		/* Get base descriptor size by passing non-qos TID */
2454 		desc_size = hal_get_reo_qdesc_size(soc->hal_soc, 0,
2455 						   DP_NON_QOS_TID);
2456 
2457 		/* Flush reo extension descriptors */
2458 		while ((tot_desc_size -= desc_size) > 0) {
2459 			qdf_mem_zero(&params, sizeof(params));
2460 			params.std.addr_lo =
2461 				((uint64_t)(rx_tid->hw_qdesc_paddr) +
2462 				tot_desc_size) & 0xffffffff;
2463 			params.std.addr_hi =
2464 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2465 
2466 			if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc,
2467 							CMD_FLUSH_CACHE,
2468 							&params,
2469 							NULL,
2470 							NULL)) {
2471 				dp_info_rl("fail to send CMD_CACHE_FLUSH:"
2472 					   "tid %d desc %pK", rx_tid->tid,
2473 					   (void *)(rx_tid->hw_qdesc_paddr));
2474 				desc->pending_ext_desc_size = tot_desc_size +
2475 								      desc_size;
2476 				dp_reo_desc_clean_up(soc, desc, reo_status);
2477 				flush_failure = true;
2478 				break;
2479 			}
2480 		}
2481 
2482 		if (flush_failure)
2483 			break;
2484 		else
2485 			desc->pending_ext_desc_size = desc_size;
2486 
2487 		/* Flush base descriptor */
2488 		qdf_mem_zero(&params, sizeof(params));
2489 		params.std.need_status = 1;
2490 		params.std.addr_lo =
2491 			(uint64_t)(rx_tid->hw_qdesc_paddr) & 0xffffffff;
2492 		params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2493 
2494 		if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc,
2495 							  CMD_FLUSH_CACHE,
2496 							  &params,
2497 							  dp_reo_desc_free,
2498 							  (void *)desc)) {
2499 			union hal_reo_status reo_status;
2500 			/*
2501 			 * If dp_reo_send_cmd return failure, related TID queue desc
2502 			 * should be unmapped. Also locally reo_desc, together with
2503 			 * TID queue desc also need to be freed accordingly.
2504 			 *
2505 			 * Here invoke desc_free function directly to do clean up.
2506 			 *
2507 			 * In case of MCL path add the desc back to the free
2508 			 * desc list and defer deletion.
2509 			 */
2510 			dp_info_rl("%s: fail to send REO cmd to flush cache: tid %d",
2511 				   __func__, rx_tid->tid);
2512 			dp_reo_desc_clean_up(soc, desc, &reo_status);
2513 			DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
2514 			break;
2515 		}
2516 	}
2517 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
2518 }
2519 
2520 /*
2521  * dp_rx_tid_delete_wifi3() – Delete receive TID queue
2522  * @peer: Datapath peer handle
2523  * @tid: TID
2524  *
2525  * Return: 0 on success, error code on failure
2526  */
2527 static int dp_rx_tid_delete_wifi3(struct dp_peer *peer, int tid)
2528 {
2529 	struct dp_rx_tid *rx_tid = &(peer->rx_tid[tid]);
2530 	struct dp_soc *soc = peer->vdev->pdev->soc;
2531 	struct hal_reo_cmd_params params;
2532 	struct reo_desc_list_node *freedesc =
2533 		qdf_mem_malloc(sizeof(*freedesc));
2534 
2535 	if (!freedesc) {
2536 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2537 			  "%s: malloc failed for freedesc: tid %d",
2538 			  __func__, tid);
2539 		return -ENOMEM;
2540 	}
2541 
2542 	freedesc->rx_tid = *rx_tid;
2543 	freedesc->resend_update_reo_cmd = false;
2544 
2545 	qdf_mem_zero(&params, sizeof(params));
2546 
2547 	params.std.need_status = 1;
2548 	params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
2549 	params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2550 	params.u.upd_queue_params.update_vld = 1;
2551 	params.u.upd_queue_params.vld = 0;
2552 
2553 	if (dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
2554 			    dp_rx_tid_delete_cb, (void *)freedesc)
2555 		!= QDF_STATUS_SUCCESS) {
2556 		/* Defer the clean up to the call back context */
2557 		qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
2558 		freedesc->free_ts = qdf_get_system_timestamp();
2559 		freedesc->resend_update_reo_cmd = true;
2560 		qdf_list_insert_front(&soc->reo_desc_freelist,
2561 				      (qdf_list_node_t *)freedesc);
2562 		DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
2563 		qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
2564 		dp_info("Failed to send CMD_UPDATE_RX_REO_QUEUE");
2565 	}
2566 
2567 	rx_tid->hw_qdesc_vaddr_unaligned = NULL;
2568 	rx_tid->hw_qdesc_alloc_size = 0;
2569 	rx_tid->hw_qdesc_paddr = 0;
2570 
2571 	return 0;
2572 }
2573 
2574 #ifdef DP_LFR
2575 static void dp_peer_setup_remaining_tids(struct dp_peer *peer)
2576 {
2577 	int tid;
2578 
2579 	for (tid = 1; tid < DP_MAX_TIDS-1; tid++) {
2580 		dp_rx_tid_setup_wifi3(peer, tid, 1, 0);
2581 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2582 			  "Setting up TID %d for peer %pK peer->local_id %d",
2583 			  tid, peer, peer->local_id);
2584 	}
2585 }
2586 #else
2587 static void dp_peer_setup_remaining_tids(struct dp_peer *peer) {};
2588 #endif
2589 
2590 /*
2591  * dp_peer_tx_init() – Initialize receive TID state
2592  * @pdev: Datapath pdev
2593  * @peer: Datapath peer
2594  *
2595  */
2596 void dp_peer_tx_init(struct dp_pdev *pdev, struct dp_peer *peer)
2597 {
2598 	dp_peer_tid_queue_init(peer);
2599 	dp_peer_update_80211_hdr(peer->vdev, peer);
2600 }
2601 
2602 /*
2603  * dp_peer_tx_cleanup() – Deinitialize receive TID state
2604  * @vdev: Datapath vdev
2605  * @peer: Datapath peer
2606  *
2607  */
2608 static inline void
2609 dp_peer_tx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
2610 {
2611 	dp_peer_tid_queue_cleanup(peer);
2612 }
2613 
2614 /*
2615  * dp_peer_rx_init() – Initialize receive TID state
2616  * @pdev: Datapath pdev
2617  * @peer: Datapath peer
2618  *
2619  */
2620 void dp_peer_rx_init(struct dp_pdev *pdev, struct dp_peer *peer)
2621 {
2622 	int tid;
2623 	struct dp_rx_tid *rx_tid;
2624 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
2625 		rx_tid = &peer->rx_tid[tid];
2626 		rx_tid->array = &rx_tid->base;
2627 		rx_tid->base.head = rx_tid->base.tail = NULL;
2628 		rx_tid->tid = tid;
2629 		rx_tid->defrag_timeout_ms = 0;
2630 		rx_tid->ba_win_size = 0;
2631 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
2632 
2633 		rx_tid->defrag_waitlist_elem.tqe_next = NULL;
2634 		rx_tid->defrag_waitlist_elem.tqe_prev = NULL;
2635 	}
2636 
2637 	peer->active_ba_session_cnt = 0;
2638 	peer->hw_buffer_size = 0;
2639 	peer->kill_256_sessions = 0;
2640 
2641 	/* Setup default (non-qos) rx tid queue */
2642 	dp_rx_tid_setup_wifi3(peer, DP_NON_QOS_TID, 1, 0);
2643 
2644 	/* Setup rx tid queue for TID 0.
2645 	 * Other queues will be setup on receiving first packet, which will cause
2646 	 * NULL REO queue error
2647 	 */
2648 	dp_rx_tid_setup_wifi3(peer, 0, 1, 0);
2649 
2650 	/*
2651 	 * Setup the rest of TID's to handle LFR
2652 	 */
2653 	dp_peer_setup_remaining_tids(peer);
2654 
2655 	/*
2656 	 * Set security defaults: no PN check, no security. The target may
2657 	 * send a HTT SEC_IND message to overwrite these defaults.
2658 	 */
2659 	peer->security[dp_sec_ucast].sec_type =
2660 		peer->security[dp_sec_mcast].sec_type = cdp_sec_type_none;
2661 }
2662 
2663 /*
2664  * dp_peer_rx_cleanup() – Cleanup receive TID state
2665  * @vdev: Datapath vdev
2666  * @peer: Datapath peer
2667  * @reuse: Peer reference reuse
2668  *
2669  */
2670 void dp_peer_rx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer, bool reuse)
2671 {
2672 	int tid;
2673 	uint32_t tid_delete_mask = 0;
2674 
2675 	dp_info("Remove tids for peer: %pK", peer);
2676 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
2677 		struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
2678 
2679 		qdf_spin_lock_bh(&rx_tid->tid_lock);
2680 		if (!peer->bss_peer || peer->vdev->opmode == wlan_op_mode_sta) {
2681 			/* Cleanup defrag related resource */
2682 			dp_rx_defrag_waitlist_remove(peer, tid);
2683 			dp_rx_reorder_flush_frag(peer, tid);
2684 		}
2685 
2686 		if (peer->rx_tid[tid].hw_qdesc_vaddr_unaligned) {
2687 			dp_rx_tid_delete_wifi3(peer, tid);
2688 
2689 			tid_delete_mask |= (1 << tid);
2690 		}
2691 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2692 	}
2693 #ifdef notyet /* See if FW can remove queues as part of peer cleanup */
2694 	if (soc->ol_ops->peer_rx_reorder_queue_remove) {
2695 		soc->ol_ops->peer_rx_reorder_queue_remove(soc->ctrl_psoc,
2696 			peer->vdev->pdev->pdev_id,
2697 			peer->vdev->vdev_id, peer->mac_addr.raw,
2698 			tid_delete_mask);
2699 	}
2700 #endif
2701 	if (!reuse)
2702 		for (tid = 0; tid < DP_MAX_TIDS; tid++)
2703 			qdf_spinlock_destroy(&peer->rx_tid[tid].tid_lock);
2704 }
2705 
2706 #ifdef FEATURE_PERPKT_INFO
2707 /*
2708  * dp_peer_ppdu_delayed_ba_init() Initialize ppdu in peer
2709  * @peer: Datapath peer
2710  *
2711  * return: void
2712  */
2713 void dp_peer_ppdu_delayed_ba_init(struct dp_peer *peer)
2714 {
2715 	qdf_mem_zero(&peer->delayed_ba_ppdu_stats,
2716 		     sizeof(struct cdp_delayed_tx_completion_ppdu_user));
2717 	peer->last_delayed_ba = false;
2718 	peer->last_delayed_ba_ppduid = 0;
2719 }
2720 #else
2721 /*
2722  * dp_peer_ppdu_delayed_ba_init() Initialize ppdu in peer
2723  * @peer: Datapath peer
2724  *
2725  * return: void
2726  */
2727 void dp_peer_ppdu_delayed_ba_init(struct dp_peer *peer)
2728 {
2729 }
2730 #endif
2731 
2732 /*
2733  * dp_peer_cleanup() – Cleanup peer information
2734  * @vdev: Datapath vdev
2735  * @peer: Datapath peer
2736  * @reuse: Peer reference reuse
2737  *
2738  */
2739 void dp_peer_cleanup(struct dp_vdev *vdev, struct dp_peer *peer, bool reuse)
2740 {
2741 	dp_peer_tx_cleanup(vdev, peer);
2742 
2743 	/* cleanup the Rx reorder queues for this peer */
2744 	dp_peer_rx_cleanup(vdev, peer, reuse);
2745 }
2746 
2747 /* dp_teardown_256_ba_session() - Teardown sessions using 256
2748  *                                window size when a request with
2749  *                                64 window size is received.
2750  *                                This is done as a WAR since HW can
2751  *                                have only one setting per peer (64 or 256).
2752  *                                For HKv2, we use per tid buffersize setting
2753  *                                for 0 to per_tid_basize_max_tid. For tid
2754  *                                more than per_tid_basize_max_tid we use HKv1
2755  *                                method.
2756  * @peer: Datapath peer
2757  *
2758  * Return: void
2759  */
2760 static void dp_teardown_256_ba_sessions(struct dp_peer *peer)
2761 {
2762 	uint8_t delba_rcode = 0;
2763 	int tid;
2764 	struct dp_rx_tid *rx_tid = NULL;
2765 
2766 	tid = peer->vdev->pdev->soc->per_tid_basize_max_tid;
2767 	for (; tid < DP_MAX_TIDS; tid++) {
2768 		rx_tid = &peer->rx_tid[tid];
2769 		qdf_spin_lock_bh(&rx_tid->tid_lock);
2770 
2771 		if (rx_tid->ba_win_size <= 64) {
2772 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
2773 			continue;
2774 		} else {
2775 			if (rx_tid->ba_status == DP_RX_BA_ACTIVE ||
2776 			    rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
2777 				/* send delba */
2778 				if (!rx_tid->delba_tx_status) {
2779 					rx_tid->delba_tx_retry++;
2780 					rx_tid->delba_tx_status = 1;
2781 					rx_tid->delba_rcode =
2782 					IEEE80211_REASON_QOS_SETUP_REQUIRED;
2783 					delba_rcode = rx_tid->delba_rcode;
2784 
2785 					qdf_spin_unlock_bh(&rx_tid->tid_lock);
2786 					if (peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba)
2787 						peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba(
2788 							peer->vdev->pdev->soc->ctrl_psoc,
2789 							peer->vdev->vdev_id,
2790 							peer->mac_addr.raw,
2791 							tid, delba_rcode);
2792 				} else {
2793 					qdf_spin_unlock_bh(&rx_tid->tid_lock);
2794 				}
2795 			} else {
2796 				qdf_spin_unlock_bh(&rx_tid->tid_lock);
2797 			}
2798 		}
2799 	}
2800 }
2801 
2802 /*
2803 * dp_rx_addba_resp_tx_completion_wifi3() – Update Rx Tid State
2804 *
2805 * @soc: Datapath soc handle
2806 * @peer_mac: Datapath peer mac address
2807 * @vdev_id: id of atapath vdev
2808 * @tid: TID number
2809 * @status: tx completion status
2810 * Return: 0 on success, error code on failure
2811 */
2812 int dp_addba_resp_tx_completion_wifi3(struct cdp_soc_t *cdp_soc,
2813 				      uint8_t *peer_mac,
2814 				      uint16_t vdev_id,
2815 				      uint8_t tid, int status)
2816 {
2817 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
2818 						       peer_mac, 0, vdev_id);
2819 	struct dp_rx_tid *rx_tid = NULL;
2820 
2821 	if (!peer || peer->delete_in_progress) {
2822 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2823 			  "%s: Peer is NULL!\n", __func__);
2824 		goto fail;
2825 	}
2826 	rx_tid = &peer->rx_tid[tid];
2827 	qdf_spin_lock_bh(&rx_tid->tid_lock);
2828 	if (status) {
2829 		rx_tid->num_addba_rsp_failed++;
2830 		dp_rx_tid_update_wifi3(peer, tid, 1,
2831 				       IEEE80211_SEQ_MAX);
2832 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
2833 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2834 		dp_err("RxTid- %d addba rsp tx completion failed", tid);
2835 
2836 		goto success;
2837 	}
2838 
2839 	rx_tid->num_addba_rsp_success++;
2840 	if (rx_tid->ba_status == DP_RX_BA_INACTIVE) {
2841 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2842 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2843 			  "%s: Rx Tid- %d hw qdesc is not in IN_PROGRESS",
2844 			__func__, tid);
2845 		goto fail;
2846 	}
2847 
2848 	if (!qdf_atomic_read(&peer->is_default_route_set)) {
2849 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2850 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2851 			  "%s: default route is not set for peer: %pM",
2852 			  __func__, peer->mac_addr.raw);
2853 		goto fail;
2854 	}
2855 
2856 	if (dp_rx_tid_update_wifi3(peer, tid,
2857 				   rx_tid->ba_win_size,
2858 				   rx_tid->startseqnum)) {
2859 		dp_err("%s: failed update REO SSN", __func__);
2860 	}
2861 
2862 	dp_info("%s: tid %u window_size %u start_seq_num %u",
2863 		__func__, tid, rx_tid->ba_win_size,
2864 		rx_tid->startseqnum);
2865 
2866 	/* First Session */
2867 	if (peer->active_ba_session_cnt == 0) {
2868 		if (rx_tid->ba_win_size > 64 && rx_tid->ba_win_size <= 256)
2869 			peer->hw_buffer_size = 256;
2870 		else
2871 			peer->hw_buffer_size = 64;
2872 	}
2873 
2874 	rx_tid->ba_status = DP_RX_BA_ACTIVE;
2875 
2876 	peer->active_ba_session_cnt++;
2877 
2878 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
2879 
2880 	/* Kill any session having 256 buffer size
2881 	 * when 64 buffer size request is received.
2882 	 * Also, latch on to 64 as new buffer size.
2883 	 */
2884 	if (peer->kill_256_sessions) {
2885 		dp_teardown_256_ba_sessions(peer);
2886 		peer->kill_256_sessions = 0;
2887 	}
2888 
2889 success:
2890 	dp_peer_unref_delete(peer);
2891 	return QDF_STATUS_SUCCESS;
2892 
2893 fail:
2894 	if (peer)
2895 		dp_peer_unref_delete(peer);
2896 
2897 	return QDF_STATUS_E_FAILURE;
2898 }
2899 
2900 /*
2901 * dp_rx_addba_responsesetup_wifi3() – Process ADDBA request from peer
2902 *
2903 * @soc: Datapath soc handle
2904 * @peer_mac: Datapath peer mac address
2905 * @vdev_id: id of atapath vdev
2906 * @tid: TID number
2907 * @dialogtoken: output dialogtoken
2908 * @statuscode: output dialogtoken
2909 * @buffersize: Output BA window size
2910 * @batimeout: Output BA timeout
2911 */
2912 QDF_STATUS
2913 dp_addba_responsesetup_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
2914 			     uint16_t vdev_id, uint8_t tid,
2915 			     uint8_t *dialogtoken, uint16_t *statuscode,
2916 			     uint16_t *buffersize, uint16_t *batimeout)
2917 {
2918 	struct dp_rx_tid *rx_tid = NULL;
2919 	QDF_STATUS status = QDF_STATUS_SUCCESS;
2920 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
2921 						       peer_mac, 0, vdev_id);
2922 
2923 	if (!peer || peer->delete_in_progress) {
2924 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2925 			  "%s: Peer is NULL!\n", __func__);
2926 		status = QDF_STATUS_E_FAILURE;
2927 		goto fail;
2928 	}
2929 	rx_tid = &peer->rx_tid[tid];
2930 	qdf_spin_lock_bh(&rx_tid->tid_lock);
2931 	rx_tid->num_of_addba_resp++;
2932 	/* setup ADDBA response parameters */
2933 	*dialogtoken = rx_tid->dialogtoken;
2934 	*statuscode = rx_tid->statuscode;
2935 	*buffersize = rx_tid->ba_win_size;
2936 	*batimeout  = 0;
2937 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
2938 
2939 fail:
2940 	if (peer)
2941 		dp_peer_unref_delete(peer);
2942 
2943 	return status;
2944 }
2945 
2946 /* dp_check_ba_buffersize() - Check buffer size in request
2947  *                            and latch onto this size based on
2948  *                            size used in first active session.
2949  * @peer: Datapath peer
2950  * @tid: Tid
2951  * @buffersize: Block ack window size
2952  *
2953  * Return: void
2954  */
2955 static void dp_check_ba_buffersize(struct dp_peer *peer,
2956 				   uint16_t tid,
2957 				   uint16_t buffersize)
2958 {
2959 	struct dp_rx_tid *rx_tid = NULL;
2960 
2961 	rx_tid = &peer->rx_tid[tid];
2962 	if (peer->vdev->pdev->soc->per_tid_basize_max_tid &&
2963 	    tid < peer->vdev->pdev->soc->per_tid_basize_max_tid) {
2964 		rx_tid->ba_win_size = buffersize;
2965 		return;
2966 	} else {
2967 		if (peer->active_ba_session_cnt == 0) {
2968 			rx_tid->ba_win_size = buffersize;
2969 		} else {
2970 			if (peer->hw_buffer_size == 64) {
2971 				if (buffersize <= 64)
2972 					rx_tid->ba_win_size = buffersize;
2973 				else
2974 					rx_tid->ba_win_size = peer->hw_buffer_size;
2975 			} else if (peer->hw_buffer_size == 256) {
2976 				if (buffersize > 64) {
2977 					rx_tid->ba_win_size = buffersize;
2978 				} else {
2979 					rx_tid->ba_win_size = buffersize;
2980 					peer->hw_buffer_size = 64;
2981 					peer->kill_256_sessions = 1;
2982 				}
2983 			}
2984 		}
2985 	}
2986 }
2987 
2988 #define DP_RX_BA_SESSION_DISABLE  1
2989 
2990 /*
2991  * dp_addba_requestprocess_wifi3() - Process ADDBA request from peer
2992  *
2993  * @soc: Datapath soc handle
2994  * @peer_mac: Datapath peer mac address
2995  * @vdev_id: id of atapath vdev
2996  * @dialogtoken: dialogtoken from ADDBA frame
2997  * @tid: TID number
2998  * @batimeout: BA timeout
2999  * @buffersize: BA window size
3000  * @startseqnum: Start seq. number received in BA sequence control
3001  *
3002  * Return: 0 on success, error code on failure
3003  */
3004 int dp_addba_requestprocess_wifi3(struct cdp_soc_t *cdp_soc,
3005 				  uint8_t *peer_mac,
3006 				  uint16_t vdev_id,
3007 				  uint8_t dialogtoken,
3008 				  uint16_t tid, uint16_t batimeout,
3009 				  uint16_t buffersize,
3010 				  uint16_t startseqnum)
3011 {
3012 	QDF_STATUS status = QDF_STATUS_SUCCESS;
3013 	struct dp_rx_tid *rx_tid = NULL;
3014 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
3015 						       peer_mac, 0, vdev_id);
3016 
3017 	if (!peer || peer->delete_in_progress) {
3018 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
3019 			  "%s: Peer is NULL!\n", __func__);
3020 		status = QDF_STATUS_E_FAILURE;
3021 		goto fail;
3022 	}
3023 	rx_tid = &peer->rx_tid[tid];
3024 	qdf_spin_lock_bh(&rx_tid->tid_lock);
3025 	rx_tid->num_of_addba_req++;
3026 	if ((rx_tid->ba_status == DP_RX_BA_ACTIVE &&
3027 	     rx_tid->hw_qdesc_vaddr_unaligned)) {
3028 		dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX);
3029 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
3030 		peer->active_ba_session_cnt--;
3031 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
3032 			  "%s: Rx Tid- %d hw qdesc is already setup",
3033 			__func__, tid);
3034 	}
3035 
3036 	if (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
3037 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
3038 		status = QDF_STATUS_E_FAILURE;
3039 		goto fail;
3040 	}
3041 
3042 	if (rx_tid->rx_ba_win_size_override == DP_RX_BA_SESSION_DISABLE) {
3043 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3044 			  "%s disable BA session",
3045 			    __func__);
3046 
3047 		buffersize = 1;
3048 	} else if (rx_tid->rx_ba_win_size_override) {
3049 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3050 			  "%s override BA win to %d", __func__,
3051 			      rx_tid->rx_ba_win_size_override);
3052 
3053 		buffersize = rx_tid->rx_ba_win_size_override;
3054 	} else {
3055 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3056 			  "%s restore BA win %d based on addba req",
3057 			    __func__, buffersize);
3058 	}
3059 
3060 	dp_check_ba_buffersize(peer, tid, buffersize);
3061 
3062 	if (dp_rx_tid_setup_wifi3(peer, tid,
3063 	    rx_tid->ba_win_size, startseqnum)) {
3064 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
3065 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
3066 		status = QDF_STATUS_E_FAILURE;
3067 		goto fail;
3068 	}
3069 	rx_tid->ba_status = DP_RX_BA_IN_PROGRESS;
3070 
3071 	rx_tid->dialogtoken = dialogtoken;
3072 	rx_tid->startseqnum = startseqnum;
3073 
3074 	if (rx_tid->userstatuscode != IEEE80211_STATUS_SUCCESS)
3075 		rx_tid->statuscode = rx_tid->userstatuscode;
3076 	else
3077 		rx_tid->statuscode = IEEE80211_STATUS_SUCCESS;
3078 
3079 	if (rx_tid->rx_ba_win_size_override == DP_RX_BA_SESSION_DISABLE)
3080 		rx_tid->statuscode = IEEE80211_STATUS_REFUSED;
3081 
3082 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
3083 
3084 fail:
3085 	if (peer)
3086 		dp_peer_unref_delete(peer);
3087 
3088 	return status;
3089 }
3090 
3091 /*
3092 * dp_set_addba_response() – Set a user defined ADDBA response status code
3093 *
3094 * @soc: Datapath soc handle
3095 * @peer_mac: Datapath peer mac address
3096 * @vdev_id: id of atapath vdev
3097 * @tid: TID number
3098 * @statuscode: response status code to be set
3099 */
3100 QDF_STATUS
3101 dp_set_addba_response(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
3102 		      uint16_t vdev_id, uint8_t tid, uint16_t statuscode)
3103 {
3104 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
3105 						       peer_mac, 0, vdev_id);
3106 	struct dp_rx_tid *rx_tid;
3107 	QDF_STATUS status = QDF_STATUS_SUCCESS;
3108 
3109 	if (!peer || peer->delete_in_progress) {
3110 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
3111 			  "%s: Peer is NULL!\n", __func__);
3112 		status = QDF_STATUS_E_FAILURE;
3113 		goto fail;
3114 	}
3115 
3116 	rx_tid = &peer->rx_tid[tid];
3117 	qdf_spin_lock_bh(&rx_tid->tid_lock);
3118 	rx_tid->userstatuscode = statuscode;
3119 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
3120 fail:
3121 	if (peer)
3122 		dp_peer_unref_delete(peer);
3123 
3124 	return status;
3125 }
3126 
3127 /*
3128 * dp_rx_delba_process_wifi3() – Process DELBA from peer
3129 * @soc: Datapath soc handle
3130 * @peer_mac: Datapath peer mac address
3131 * @vdev_id: id of atapath vdev
3132 * @tid: TID number
3133 * @reasoncode: Reason code received in DELBA frame
3134 *
3135 * Return: 0 on success, error code on failure
3136 */
3137 int dp_delba_process_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
3138 			   uint16_t vdev_id, int tid, uint16_t reasoncode)
3139 {
3140 	QDF_STATUS status = QDF_STATUS_SUCCESS;
3141 	struct dp_rx_tid *rx_tid;
3142 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
3143 						      peer_mac, 0, vdev_id);
3144 
3145 	if (!peer || peer->delete_in_progress) {
3146 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
3147 			  "%s: Peer is NULL!\n", __func__);
3148 		status = QDF_STATUS_E_FAILURE;
3149 		goto fail;
3150 	}
3151 	rx_tid = &peer->rx_tid[tid];
3152 	qdf_spin_lock_bh(&rx_tid->tid_lock);
3153 	if (rx_tid->ba_status == DP_RX_BA_INACTIVE ||
3154 	    rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
3155 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
3156 		status = QDF_STATUS_E_FAILURE;
3157 		goto fail;
3158 	}
3159 	/* TODO: See if we can delete the existing REO queue descriptor and
3160 	 * replace with a new one without queue extenstion descript to save
3161 	 * memory
3162 	 */
3163 	rx_tid->delba_rcode = reasoncode;
3164 	rx_tid->num_of_delba_req++;
3165 	dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX);
3166 
3167 	rx_tid->ba_status = DP_RX_BA_INACTIVE;
3168 	peer->active_ba_session_cnt--;
3169 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
3170 fail:
3171 	if (peer)
3172 		dp_peer_unref_delete(peer);
3173 
3174 	return status;
3175 }
3176 
3177 /*
3178  * dp_rx_delba_tx_completion_wifi3() – Send Delba Request
3179  *
3180  * @soc: Datapath soc handle
3181  * @peer_mac: Datapath peer mac address
3182  * @vdev_id: id of atapath vdev
3183  * @tid: TID number
3184  * @status: tx completion status
3185  * Return: 0 on success, error code on failure
3186  */
3187 
3188 int dp_delba_tx_completion_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
3189 				 uint16_t vdev_id,
3190 				 uint8_t tid, int status)
3191 {
3192 	QDF_STATUS ret = QDF_STATUS_SUCCESS;
3193 	struct dp_rx_tid *rx_tid = NULL;
3194 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
3195 						      peer_mac, 0, vdev_id);
3196 
3197 	if (!peer || peer->delete_in_progress) {
3198 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
3199 			  "%s: Peer is NULL!", __func__);
3200 		ret = QDF_STATUS_E_FAILURE;
3201 		goto end;
3202 	}
3203 	rx_tid = &peer->rx_tid[tid];
3204 	qdf_spin_lock_bh(&rx_tid->tid_lock);
3205 	if (status) {
3206 		rx_tid->delba_tx_fail_cnt++;
3207 		if (rx_tid->delba_tx_retry >= DP_MAX_DELBA_RETRY) {
3208 			rx_tid->delba_tx_retry = 0;
3209 			rx_tid->delba_tx_status = 0;
3210 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
3211 		} else {
3212 			rx_tid->delba_tx_retry++;
3213 			rx_tid->delba_tx_status = 1;
3214 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
3215 			if (peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba)
3216 				peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba(
3217 					peer->vdev->pdev->soc->ctrl_psoc,
3218 					peer->vdev->vdev_id,
3219 					peer->mac_addr.raw, tid,
3220 					rx_tid->delba_rcode);
3221 		}
3222 		goto end;
3223 	} else {
3224 		rx_tid->delba_tx_success_cnt++;
3225 		rx_tid->delba_tx_retry = 0;
3226 		rx_tid->delba_tx_status = 0;
3227 	}
3228 	if (rx_tid->ba_status == DP_RX_BA_ACTIVE) {
3229 		dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX);
3230 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
3231 		peer->active_ba_session_cnt--;
3232 	}
3233 	if (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
3234 		dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX);
3235 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
3236 	}
3237 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
3238 
3239 end:
3240 	if (peer)
3241 		dp_peer_unref_delete(peer);
3242 
3243 	return ret;
3244 }
3245 
3246 /**
3247  * dp_set_pn_check_wifi3() - enable PN check in REO for security
3248  * @soc: Datapath soc handle
3249  * @peer_mac: Datapath peer mac address
3250  * @vdev_id: id of atapath vdev
3251  * @vdev: Datapath vdev
3252  * @pdev - data path device instance
3253  * @sec_type - security type
3254  * @rx_pn - Receive pn starting number
3255  *
3256  */
3257 
3258 QDF_STATUS
3259 dp_set_pn_check_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id,
3260 		      uint8_t *peer_mac, enum cdp_sec_type sec_type,
3261 		      uint32_t *rx_pn)
3262 {
3263 	struct dp_pdev *pdev;
3264 	int i;
3265 	uint8_t pn_size;
3266 	struct hal_reo_cmd_params params;
3267 	QDF_STATUS status = QDF_STATUS_SUCCESS;
3268 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
3269 				peer_mac, 0, vdev_id);
3270 	struct dp_vdev *vdev =
3271 		dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
3272 						   vdev_id);
3273 
3274 	if (!vdev || !peer || peer->delete_in_progress) {
3275 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
3276 			  "%s: Peer is NULL!\n", __func__);
3277 		status = QDF_STATUS_E_FAILURE;
3278 		goto fail;
3279 	}
3280 
3281 	pdev = vdev->pdev;
3282 	qdf_mem_zero(&params, sizeof(params));
3283 
3284 	params.std.need_status = 1;
3285 	params.u.upd_queue_params.update_pn_valid = 1;
3286 	params.u.upd_queue_params.update_pn_size = 1;
3287 	params.u.upd_queue_params.update_pn = 1;
3288 	params.u.upd_queue_params.update_pn_check_needed = 1;
3289 	params.u.upd_queue_params.update_svld = 1;
3290 	params.u.upd_queue_params.svld = 0;
3291 
3292 	switch (sec_type) {
3293 	case cdp_sec_type_tkip_nomic:
3294 	case cdp_sec_type_aes_ccmp:
3295 	case cdp_sec_type_aes_ccmp_256:
3296 	case cdp_sec_type_aes_gcmp:
3297 	case cdp_sec_type_aes_gcmp_256:
3298 		params.u.upd_queue_params.pn_check_needed = 1;
3299 		params.u.upd_queue_params.pn_size = 48;
3300 		pn_size = 48;
3301 		break;
3302 	case cdp_sec_type_wapi:
3303 		params.u.upd_queue_params.pn_check_needed = 1;
3304 		params.u.upd_queue_params.pn_size = 128;
3305 		pn_size = 128;
3306 		if (vdev->opmode == wlan_op_mode_ap) {
3307 			params.u.upd_queue_params.pn_even = 1;
3308 			params.u.upd_queue_params.update_pn_even = 1;
3309 		} else {
3310 			params.u.upd_queue_params.pn_uneven = 1;
3311 			params.u.upd_queue_params.update_pn_uneven = 1;
3312 		}
3313 		break;
3314 	default:
3315 		params.u.upd_queue_params.pn_check_needed = 0;
3316 		pn_size = 0;
3317 		break;
3318 	}
3319 
3320 
3321 	for (i = 0; i < DP_MAX_TIDS; i++) {
3322 		struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
3323 		qdf_spin_lock_bh(&rx_tid->tid_lock);
3324 		if (rx_tid->hw_qdesc_vaddr_unaligned) {
3325 			params.std.addr_lo =
3326 				rx_tid->hw_qdesc_paddr & 0xffffffff;
3327 			params.std.addr_hi =
3328 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
3329 
3330 			if (pn_size) {
3331 				QDF_TRACE(QDF_MODULE_ID_DP,
3332 					  QDF_TRACE_LEVEL_INFO_HIGH,
3333 					  "%s PN set for TID:%d pn:%x:%x:%x:%x",
3334 					  __func__, i, rx_pn[3], rx_pn[2],
3335 					  rx_pn[1], rx_pn[0]);
3336 				params.u.upd_queue_params.update_pn_valid = 1;
3337 				params.u.upd_queue_params.pn_31_0 = rx_pn[0];
3338 				params.u.upd_queue_params.pn_63_32 = rx_pn[1];
3339 				params.u.upd_queue_params.pn_95_64 = rx_pn[2];
3340 				params.u.upd_queue_params.pn_127_96 = rx_pn[3];
3341 			}
3342 			rx_tid->pn_size = pn_size;
3343 			if (dp_reo_send_cmd(cdp_soc_t_to_dp_soc(soc),
3344 					    CMD_UPDATE_RX_REO_QUEUE,
3345 					    &params, dp_rx_tid_update_cb,
3346 					    rx_tid)) {
3347 				dp_err_log("fail to send CMD_UPDATE_RX_REO_QUEUE"
3348 					   "tid %d desc %pK", rx_tid->tid,
3349 					   (void *)(rx_tid->hw_qdesc_paddr));
3350 				DP_STATS_INC(cdp_soc_t_to_dp_soc(soc),
3351 					     rx.err.reo_cmd_send_fail, 1);
3352 			}
3353 		} else {
3354 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3355 				  "PN Check not setup for TID :%d ", i);
3356 		}
3357 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
3358 	}
3359 fail:
3360 	if (peer)
3361 		dp_peer_unref_delete(peer);
3362 
3363 	return status;
3364 }
3365 
3366 
3367 /**
3368  * dp_set_key_sec_type_wifi3() - set security mode of key
3369  * @soc: Datapath soc handle
3370  * @peer_mac: Datapath peer mac address
3371  * @vdev_id: id of atapath vdev
3372  * @vdev: Datapath vdev
3373  * @pdev - data path device instance
3374  * @sec_type - security type
3375  * #is_unicast - key type
3376  *
3377  */
3378 
3379 QDF_STATUS
3380 dp_set_key_sec_type_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id,
3381 			  uint8_t *peer_mac, enum cdp_sec_type sec_type,
3382 			  bool is_unicast)
3383 {
3384 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
3385 				peer_mac, 0, vdev_id);
3386 	QDF_STATUS status = QDF_STATUS_SUCCESS;
3387 	int sec_index;
3388 
3389 	if (!peer || peer->delete_in_progress) {
3390 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
3391 			  "%s: Peer is NULL!\n", __func__);
3392 		status = QDF_STATUS_E_FAILURE;
3393 		goto fail;
3394 	}
3395 
3396 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3397 		  "key sec spec for peer %pK %pM: %s key of type %d",
3398 		  peer,
3399 		  peer->mac_addr.raw,
3400 		  is_unicast ? "ucast" : "mcast",
3401 		  sec_type);
3402 
3403 	sec_index = is_unicast ? dp_sec_ucast : dp_sec_mcast;
3404 	peer->security[sec_index].sec_type = sec_type;
3405 
3406 fail:
3407 	if (peer)
3408 		dp_peer_unref_delete(peer);
3409 
3410 	return status;
3411 }
3412 
3413 void
3414 dp_rx_sec_ind_handler(struct dp_soc *soc, uint16_t peer_id,
3415 		      enum cdp_sec_type sec_type, int is_unicast,
3416 		      u_int32_t *michael_key,
3417 		      u_int32_t *rx_pn)
3418 {
3419 	struct dp_peer *peer;
3420 	int sec_index;
3421 
3422 	peer = dp_peer_find_by_id(soc, peer_id);
3423 	if (!peer) {
3424 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3425 			  "Couldn't find peer from ID %d - skipping security inits",
3426 			  peer_id);
3427 		return;
3428 	}
3429 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3430 		  "sec spec for peer %pK %pM: %s key of type %d",
3431 		  peer,
3432 		  peer->mac_addr.raw,
3433 		  is_unicast ? "ucast" : "mcast",
3434 		  sec_type);
3435 	sec_index = is_unicast ? dp_sec_ucast : dp_sec_mcast;
3436 	peer->security[sec_index].sec_type = sec_type;
3437 #ifdef notyet /* TODO: See if this is required for defrag support */
3438 	/* michael key only valid for TKIP, but for simplicity,
3439 	 * copy it anyway
3440 	 */
3441 	qdf_mem_copy(
3442 		&peer->security[sec_index].michael_key[0],
3443 		michael_key,
3444 		sizeof(peer->security[sec_index].michael_key));
3445 #ifdef BIG_ENDIAN_HOST
3446 	OL_IF_SWAPBO(peer->security[sec_index].michael_key[0],
3447 				 sizeof(peer->security[sec_index].michael_key));
3448 #endif /* BIG_ENDIAN_HOST */
3449 #endif
3450 
3451 #ifdef notyet /* TODO: Check if this is required for wifi3.0 */
3452 	if (sec_type != cdp_sec_type_wapi) {
3453 		qdf_mem_zero(peer->tids_last_pn_valid, _EXT_TIDS);
3454 	} else {
3455 		for (i = 0; i < DP_MAX_TIDS; i++) {
3456 			/*
3457 			 * Setting PN valid bit for WAPI sec_type,
3458 			 * since WAPI PN has to be started with predefined value
3459 			 */
3460 			peer->tids_last_pn_valid[i] = 1;
3461 			qdf_mem_copy(
3462 				(u_int8_t *) &peer->tids_last_pn[i],
3463 				(u_int8_t *) rx_pn, sizeof(union htt_rx_pn_t));
3464 			peer->tids_last_pn[i].pn128[1] =
3465 				qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[1]);
3466 			peer->tids_last_pn[i].pn128[0] =
3467 				qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[0]);
3468 		}
3469 	}
3470 #endif
3471 	/* TODO: Update HW TID queue with PN check parameters (pn type for
3472 	 * all security types and last pn for WAPI) once REO command API
3473 	 * is available
3474 	 */
3475 
3476 	dp_peer_unref_del_find_by_id(peer);
3477 }
3478 
3479 #ifdef QCA_PEER_EXT_STATS
3480 /*
3481  * dp_peer_ext_stats_ctx_alloc() - Allocate peer ext
3482  *                                 stats content
3483  * @soc: DP SoC context
3484  * @peer: DP peer context
3485  *
3486  * Allocate the peer extended stats context
3487  *
3488  * Return: QDF_STATUS_SUCCESS if allocation is
3489  *	   successful
3490  */
3491 QDF_STATUS dp_peer_ext_stats_ctx_alloc(struct dp_soc *soc,
3492 				       struct dp_peer *peer)
3493 {
3494 	uint8_t tid, ctx_id;
3495 
3496 	if (!soc || !peer) {
3497 		dp_warn("Null soc%x or peer%x", soc, peer);
3498 		return QDF_STATUS_E_INVAL;
3499 	}
3500 
3501 	if (!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx))
3502 		return QDF_STATUS_SUCCESS;
3503 
3504 	/*
3505 	 * Allocate memory for peer extended stats.
3506 	 */
3507 	peer->pext_stats = qdf_mem_malloc(sizeof(struct cdp_peer_ext_stats));
3508 	if (!peer->pext_stats) {
3509 		dp_err("Peer extended stats obj alloc failed!!");
3510 		return QDF_STATUS_E_NOMEM;
3511 	}
3512 
3513 	for (tid = 0; tid < CDP_MAX_DATA_TIDS; tid++) {
3514 		for (ctx_id = 0; ctx_id < CDP_MAX_TXRX_CTX; ctx_id++) {
3515 			struct cdp_delay_tx_stats *tx_delay =
3516 			&peer->pext_stats->delay_stats[tid][ctx_id].tx_delay;
3517 			struct cdp_delay_rx_stats *rx_delay =
3518 			&peer->pext_stats->delay_stats[tid][ctx_id].rx_delay;
3519 
3520 			dp_hist_init(&tx_delay->tx_swq_delay,
3521 				     CDP_HIST_TYPE_SW_ENQEUE_DELAY);
3522 			dp_hist_init(&tx_delay->hwtx_delay,
3523 				     CDP_HIST_TYPE_HW_COMP_DELAY);
3524 			dp_hist_init(&rx_delay->to_stack_delay,
3525 				     CDP_HIST_TYPE_REAP_STACK);
3526 		}
3527 	}
3528 
3529 	return QDF_STATUS_SUCCESS;
3530 }
3531 
3532 /*
3533  * dp_peer_ext_stats_ctx_dealloc() - Dealloc the peer context
3534  * @peer: DP peer context
3535  *
3536  * Free the peer extended stats context
3537  *
3538  * Return: Void
3539  */
3540 void dp_peer_ext_stats_ctx_dealloc(struct dp_soc *soc, struct dp_peer *peer)
3541 {
3542 	if (!peer) {
3543 		dp_warn("peer_ext dealloc failed due to NULL peer object");
3544 		return;
3545 	}
3546 
3547 	if (!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx))
3548 		return;
3549 
3550 	if (!peer->pext_stats)
3551 		return;
3552 
3553 	qdf_mem_free(peer->pext_stats);
3554 	peer->pext_stats = NULL;
3555 }
3556 #endif
3557 
3558 QDF_STATUS
3559 dp_rx_delba_ind_handler(void *soc_handle, uint16_t peer_id,
3560 			uint8_t tid, uint16_t win_sz)
3561 {
3562 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
3563 	struct dp_peer *peer;
3564 	struct dp_rx_tid *rx_tid;
3565 	QDF_STATUS status = QDF_STATUS_SUCCESS;
3566 
3567 	peer = dp_peer_find_by_id(soc, peer_id);
3568 
3569 	if (!peer) {
3570 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3571 			  "Couldn't find peer from ID %d",
3572 			  peer_id);
3573 		return QDF_STATUS_E_FAILURE;
3574 	}
3575 
3576 	qdf_assert_always(tid < DP_MAX_TIDS);
3577 
3578 	rx_tid = &peer->rx_tid[tid];
3579 
3580 	if (rx_tid->hw_qdesc_vaddr_unaligned) {
3581 		if (!rx_tid->delba_tx_status) {
3582 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3583 				  "%s: PEER_ID: %d TID: %d, BA win: %d ",
3584 				  __func__, peer_id, tid, win_sz);
3585 
3586 			qdf_spin_lock_bh(&rx_tid->tid_lock);
3587 
3588 			rx_tid->delba_tx_status = 1;
3589 
3590 			rx_tid->rx_ba_win_size_override =
3591 			    qdf_min((uint16_t)63, win_sz);
3592 
3593 			rx_tid->delba_rcode =
3594 			    IEEE80211_REASON_QOS_SETUP_REQUIRED;
3595 
3596 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
3597 
3598 			if (soc->cdp_soc.ol_ops->send_delba)
3599 				soc->cdp_soc.ol_ops->send_delba(
3600 					peer->vdev->pdev->soc->ctrl_psoc,
3601 					peer->vdev->vdev_id,
3602 					peer->mac_addr.raw,
3603 					tid,
3604 					rx_tid->delba_rcode);
3605 		}
3606 	} else {
3607 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3608 			  "BA session is not setup for TID:%d ", tid);
3609 		status = QDF_STATUS_E_FAILURE;
3610 	}
3611 
3612 	dp_peer_unref_del_find_by_id(peer);
3613 
3614 	return status;
3615 }
3616 
3617 #ifdef DP_PEER_EXTENDED_API
3618 QDF_STATUS dp_register_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
3619 			    struct ol_txrx_desc_type *sta_desc)
3620 {
3621 	struct dp_peer *peer;
3622 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3623 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
3624 
3625 	if (!pdev)
3626 		return QDF_STATUS_E_FAULT;
3627 
3628 	peer = dp_find_peer_by_addr((struct cdp_pdev *)pdev,
3629 				    sta_desc->peer_addr.bytes);
3630 
3631 	if (!peer)
3632 		return QDF_STATUS_E_FAULT;
3633 
3634 	qdf_spin_lock_bh(&peer->peer_info_lock);
3635 	peer->state = OL_TXRX_PEER_STATE_CONN;
3636 	qdf_spin_unlock_bh(&peer->peer_info_lock);
3637 
3638 	dp_rx_flush_rx_cached(peer, false);
3639 
3640 	return QDF_STATUS_SUCCESS;
3641 }
3642 
3643 QDF_STATUS
3644 dp_clear_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
3645 	      struct qdf_mac_addr peer_addr)
3646 {
3647 	struct dp_peer *peer;
3648 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3649 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
3650 
3651 	if (!pdev)
3652 		return QDF_STATUS_E_FAULT;
3653 
3654 	peer = dp_find_peer_by_addr((struct cdp_pdev *)pdev, peer_addr.bytes);
3655 	if (!peer || !peer->valid)
3656 		return QDF_STATUS_E_FAULT;
3657 
3658 	dp_clear_peer_internal(soc, peer);
3659 	return QDF_STATUS_SUCCESS;
3660 }
3661 
3662 /**
3663  * dp_find_peer_by_addr_and_vdev() - Find peer by peer mac address within vdev
3664  * @pdev - data path device instance
3665  * @vdev - virtual interface instance
3666  * @peer_addr - peer mac address
3667  *
3668  * Find peer by peer mac address within vdev
3669  *
3670  * Return: peer instance void pointer
3671  *         NULL cannot find target peer
3672  */
3673 void *dp_find_peer_by_addr_and_vdev(struct cdp_pdev *pdev_handle,
3674 		struct cdp_vdev *vdev_handle,
3675 		uint8_t *peer_addr)
3676 {
3677 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3678 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3679 	struct dp_peer *peer;
3680 
3681 	peer = dp_peer_find_hash_find(pdev->soc, peer_addr, 0, DP_VDEV_ALL);
3682 
3683 	if (!peer)
3684 		return NULL;
3685 
3686 	if (peer->vdev != vdev) {
3687 		dp_peer_unref_delete(peer);
3688 		return NULL;
3689 	}
3690 
3691 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
3692 	 * Decrement it here.
3693 	 */
3694 	dp_peer_unref_delete(peer);
3695 
3696 	return peer;
3697 }
3698 
3699 QDF_STATUS dp_peer_state_update(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
3700 				enum ol_txrx_peer_state state)
3701 {
3702 	struct dp_peer *peer;
3703 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3704 
3705 	peer =  dp_peer_find_hash_find(soc, peer_mac, 0, DP_VDEV_ALL);
3706 	if (!peer) {
3707 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3708 			  "Failed to find peer for: [%pM]", peer_mac);
3709 		return QDF_STATUS_E_FAILURE;
3710 	}
3711 	peer->state = state;
3712 
3713 	dp_info("peer %pK state %d", peer, peer->state);
3714 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
3715 	 * Decrement it here.
3716 	 */
3717 	dp_peer_unref_delete(peer);
3718 
3719 	return QDF_STATUS_SUCCESS;
3720 }
3721 
3722 QDF_STATUS dp_get_vdevid(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
3723 			 uint8_t *vdev_id)
3724 {
3725 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3726 	struct dp_peer *peer =
3727 		dp_peer_find_hash_find(soc, peer_mac, 0, DP_VDEV_ALL);
3728 
3729 	if (!peer)
3730 		return QDF_STATUS_E_FAILURE;
3731 
3732 	dp_info("peer %pK vdev %pK vdev id %d",
3733 		peer, peer->vdev, peer->vdev->vdev_id);
3734 	*vdev_id = peer->vdev->vdev_id;
3735 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
3736 	 * Decrement it here.
3737 	 */
3738 	dp_peer_unref_delete(peer);
3739 
3740 	return QDF_STATUS_SUCCESS;
3741 }
3742 
3743 struct cdp_vdev *
3744 dp_get_vdev_by_peer_addr(struct cdp_pdev *pdev_handle,
3745 			 struct qdf_mac_addr peer_addr)
3746 {
3747 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3748 	struct dp_peer *peer = NULL;
3749 
3750 	if (!pdev) {
3751 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3752 			  "PDEV not found for peer_addr: %pM",
3753 			  peer_addr.bytes);
3754 		return NULL;
3755 	}
3756 
3757 	peer = dp_find_peer_by_addr((struct cdp_pdev *)pdev, peer_addr.bytes);
3758 	if (!peer) {
3759 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
3760 			  "PDEV not found for peer_addr: %pM",
3761 			  peer_addr.bytes);
3762 		return NULL;
3763 	}
3764 
3765 	return (struct cdp_vdev *)peer->vdev;
3766 }
3767 
3768 /**
3769  * dp_get_vdev_for_peer() - Get virtual interface instance which peer belongs
3770  * @peer - peer instance
3771  *
3772  * Get virtual interface instance which peer belongs
3773  *
3774  * Return: virtual interface instance pointer
3775  *         NULL in case cannot find
3776  */
3777 struct cdp_vdev *dp_get_vdev_for_peer(void *peer_handle)
3778 {
3779 	struct dp_peer *peer = peer_handle;
3780 
3781 	DP_TRACE(DEBUG, "peer %pK vdev %pK", peer, peer->vdev);
3782 	return (struct cdp_vdev *)peer->vdev;
3783 }
3784 
3785 /**
3786  * dp_peer_get_peer_mac_addr() - Get peer mac address
3787  * @peer - peer instance
3788  *
3789  * Get peer mac address
3790  *
3791  * Return: peer mac address pointer
3792  *         NULL in case cannot find
3793  */
3794 uint8_t *dp_peer_get_peer_mac_addr(void *peer_handle)
3795 {
3796 	struct dp_peer *peer = peer_handle;
3797 	uint8_t *mac;
3798 
3799 	mac = peer->mac_addr.raw;
3800 	dp_info("peer %pK mac 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
3801 		peer, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
3802 	return peer->mac_addr.raw;
3803 }
3804 
3805 int dp_get_peer_state(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3806 		      uint8_t *peer_mac)
3807 {
3808 	enum ol_txrx_peer_state peer_state;
3809 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3810 	struct dp_peer *peer =  dp_peer_find_hash_find(soc, peer_mac, 0,
3811 						       vdev_id);
3812 
3813 	if (!peer)
3814 		return QDF_STATUS_E_FAILURE;
3815 
3816 	DP_TRACE(DEBUG, "peer %pK stats %d", peer, peer->state);
3817 	peer_state = peer->state;
3818 	dp_peer_unref_delete(peer);
3819 
3820 	return peer_state;
3821 }
3822 
3823 /**
3824  * dp_local_peer_id_pool_init() - local peer id pool alloc for physical device
3825  * @pdev - data path device instance
3826  *
3827  * local peer id pool alloc for physical device
3828  *
3829  * Return: none
3830  */
3831 void dp_local_peer_id_pool_init(struct dp_pdev *pdev)
3832 {
3833 	int i;
3834 
3835 	/* point the freelist to the first ID */
3836 	pdev->local_peer_ids.freelist = 0;
3837 
3838 	/* link each ID to the next one */
3839 	for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) {
3840 		pdev->local_peer_ids.pool[i] = i + 1;
3841 		pdev->local_peer_ids.map[i] = NULL;
3842 	}
3843 
3844 	/* link the last ID to itself, to mark the end of the list */
3845 	i = OL_TXRX_NUM_LOCAL_PEER_IDS;
3846 	pdev->local_peer_ids.pool[i] = i;
3847 
3848 	qdf_spinlock_create(&pdev->local_peer_ids.lock);
3849 	DP_TRACE(INFO, "Peer pool init");
3850 }
3851 
3852 /**
3853  * dp_local_peer_id_alloc() - allocate local peer id
3854  * @pdev - data path device instance
3855  * @peer - new peer instance
3856  *
3857  * allocate local peer id
3858  *
3859  * Return: none
3860  */
3861 void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer)
3862 {
3863 	int i;
3864 
3865 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
3866 	i = pdev->local_peer_ids.freelist;
3867 	if (pdev->local_peer_ids.pool[i] == i) {
3868 		/* the list is empty, except for the list-end marker */
3869 		peer->local_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
3870 	} else {
3871 		/* take the head ID and advance the freelist */
3872 		peer->local_id = i;
3873 		pdev->local_peer_ids.freelist = pdev->local_peer_ids.pool[i];
3874 		pdev->local_peer_ids.map[i] = peer;
3875 	}
3876 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
3877 	dp_info("peer %pK, local id %d", peer, peer->local_id);
3878 }
3879 
3880 /**
3881  * dp_local_peer_id_free() - remove local peer id
3882  * @pdev - data path device instance
3883  * @peer - peer instance should be removed
3884  *
3885  * remove local peer id
3886  *
3887  * Return: none
3888  */
3889 void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer)
3890 {
3891 	int i = peer->local_id;
3892 	if ((i == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
3893 	    (i >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
3894 		return;
3895 	}
3896 
3897 	/* put this ID on the head of the freelist */
3898 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
3899 	pdev->local_peer_ids.pool[i] = pdev->local_peer_ids.freelist;
3900 	pdev->local_peer_ids.freelist = i;
3901 	pdev->local_peer_ids.map[i] = NULL;
3902 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
3903 }
3904 
3905 bool dp_find_peer_exist_on_vdev(struct cdp_soc_t *soc_hdl,
3906 				uint8_t vdev_id, uint8_t *peer_addr)
3907 {
3908 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3909 	struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
3910 
3911 	if (!vdev)
3912 		return false;
3913 
3914 	return !!dp_find_peer_by_addr_and_vdev(
3915 					dp_pdev_to_cdp_pdev(vdev->pdev),
3916 					dp_vdev_to_cdp_vdev(vdev),
3917 					peer_addr);
3918 }
3919 
3920 bool dp_find_peer_exist_on_other_vdev(struct cdp_soc_t *soc_hdl,
3921 				      uint8_t vdev_id, uint8_t *peer_addr,
3922 				      uint16_t max_bssid)
3923 {
3924 	int i;
3925 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3926 	struct dp_vdev *vdev;
3927 
3928 	for (i = 0; i < max_bssid; i++) {
3929 		vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, i);
3930 		/* Need to check vdevs other than the vdev_id */
3931 		if (vdev_id == i || !vdev)
3932 			continue;
3933 		if (dp_find_peer_by_addr_and_vdev(
3934 					dp_pdev_to_cdp_pdev(vdev->pdev),
3935 					dp_vdev_to_cdp_vdev(vdev),
3936 					peer_addr)) {
3937 			dp_err("%s: Duplicate peer %pM already exist on vdev %d",
3938 			       __func__, peer_addr, i);
3939 			return true;
3940 		}
3941 	}
3942 
3943 	return false;
3944 }
3945 
3946 bool dp_find_peer_exist(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
3947 			uint8_t *peer_addr)
3948 {
3949 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3950 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
3951 
3952 	if (!pdev)
3953 		return false;
3954 
3955 	return !!dp_find_peer_by_addr(dp_pdev_to_cdp_pdev(pdev), peer_addr);
3956 }
3957 #endif
3958 
3959 /**
3960  * dp_peer_rxtid_stats: Retried Rx TID (REO queue) stats from HW
3961  * @peer: DP peer handle
3962  * @dp_stats_cmd_cb: REO command callback function
3963  * @cb_ctxt: Callback context
3964  *
3965  * Return: count of tid stats cmd send succeeded
3966  */
3967 int dp_peer_rxtid_stats(struct dp_peer *peer,
3968 			dp_rxtid_stats_cmd_cb dp_stats_cmd_cb,
3969 			void *cb_ctxt)
3970 {
3971 	struct dp_soc *soc = peer->vdev->pdev->soc;
3972 	struct hal_reo_cmd_params params;
3973 	int i;
3974 	int stats_cmd_sent_cnt = 0;
3975 	QDF_STATUS status;
3976 
3977 	if (!dp_stats_cmd_cb)
3978 		return stats_cmd_sent_cnt;
3979 
3980 	qdf_mem_zero(&params, sizeof(params));
3981 	for (i = 0; i < DP_MAX_TIDS; i++) {
3982 		struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
3983 		if (rx_tid->hw_qdesc_vaddr_unaligned) {
3984 			params.std.need_status = 1;
3985 			params.std.addr_lo =
3986 				rx_tid->hw_qdesc_paddr & 0xffffffff;
3987 			params.std.addr_hi =
3988 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
3989 
3990 			if (cb_ctxt) {
3991 				status = dp_reo_send_cmd(
3992 						soc, CMD_GET_QUEUE_STATS,
3993 						&params, dp_stats_cmd_cb,
3994 						cb_ctxt);
3995 			} else {
3996 				status = dp_reo_send_cmd(
3997 						soc, CMD_GET_QUEUE_STATS,
3998 						&params, dp_stats_cmd_cb,
3999 						rx_tid);
4000 			}
4001 
4002 			if (QDF_IS_STATUS_SUCCESS(status))
4003 				stats_cmd_sent_cnt++;
4004 
4005 			/* Flush REO descriptor from HW cache to update stats
4006 			 * in descriptor memory. This is to help debugging */
4007 			qdf_mem_zero(&params, sizeof(params));
4008 			params.std.need_status = 0;
4009 			params.std.addr_lo =
4010 				rx_tid->hw_qdesc_paddr & 0xffffffff;
4011 			params.std.addr_hi =
4012 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
4013 			params.u.fl_cache_params.flush_no_inval = 1;
4014 			dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, &params, NULL,
4015 				NULL);
4016 		}
4017 	}
4018 
4019 	return stats_cmd_sent_cnt;
4020 }
4021 
4022 QDF_STATUS
4023 dp_set_michael_key(struct cdp_soc_t *soc,
4024 		   uint8_t vdev_id,
4025 		   uint8_t *peer_mac,
4026 		   bool is_unicast, uint32_t *key)
4027 {
4028 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4029 	uint8_t sec_index = is_unicast ? 1 : 0;
4030 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
4031 						      peer_mac, 0, vdev_id);
4032 
4033 	if (!peer || peer->delete_in_progress) {
4034 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4035 			  "peer not found ");
4036 		status = QDF_STATUS_E_FAILURE;
4037 		goto fail;
4038 	}
4039 
4040 	qdf_mem_copy(&peer->security[sec_index].michael_key[0],
4041 		     key, IEEE80211_WEP_MICLEN);
4042 
4043 fail:
4044 	if (peer)
4045 		dp_peer_unref_delete(peer);
4046 
4047 	return status;
4048 }
4049 
4050 bool dp_peer_find_by_id_valid(struct dp_soc *soc, uint16_t peer_id)
4051 {
4052 	struct dp_peer *peer = dp_peer_find_by_id(soc, peer_id);
4053 
4054 	if (peer) {
4055 		/*
4056 		 * Decrement the peer ref which is taken as part of
4057 		 * dp_peer_find_by_id if PEER_LOCK_REF_PROTECT is enabled
4058 		 */
4059 		dp_peer_unref_del_find_by_id(peer);
4060 
4061 		return true;
4062 	}
4063 
4064 	return false;
4065 }
4066 
4067 /**
4068  * dp_vdev_bss_peer_ref_n_get: Get bss peer of a vdev
4069  * @soc: DP soc
4070  * @vdev: vdev
4071  *
4072  * Return: VDEV BSS peer
4073  */
4074 struct dp_peer *dp_vdev_bss_peer_ref_n_get(struct dp_soc *soc,
4075 					   struct dp_vdev *vdev)
4076 {
4077 	struct dp_peer *peer;
4078 
4079 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
4080 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
4081 		if (peer->bss_peer)
4082 			break;
4083 	}
4084 
4085 	if (!peer || !qdf_atomic_inc_not_zero(&peer->ref_cnt)) {
4086 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4087 		return NULL;
4088 	}
4089 
4090 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4091 	return peer;
4092 }
4093 
4094 /**
4095  * dp_sta_vdev_self_peer_ref_n_get: Get self peer of sta vdev
4096  * @soc: DP soc
4097  * @vdev: vdev
4098  *
4099  * Return: VDEV self peer
4100  */
4101 struct dp_peer *dp_sta_vdev_self_peer_ref_n_get(struct dp_soc *soc,
4102 						struct dp_vdev *vdev)
4103 {
4104 	struct dp_peer *peer;
4105 
4106 	if (vdev->opmode != wlan_op_mode_sta)
4107 		return NULL;
4108 
4109 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
4110 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
4111 		if (peer->sta_self_peer)
4112 			break;
4113 	}
4114 
4115 	if (!peer || !qdf_atomic_inc_not_zero(&peer->ref_cnt)) {
4116 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4117 		return NULL;
4118 	}
4119 
4120 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4121 	return peer;
4122 }
4123