xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_peer.c (revision 503663c6daafffe652fa360bde17243568cd6d2a)
1 /*
2  * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <qdf_types.h>
20 #include <qdf_lock.h>
21 #include <hal_hw_headers.h>
22 #include "dp_htt.h"
23 #include "dp_types.h"
24 #include "dp_internal.h"
25 #include "dp_peer.h"
26 #include "dp_rx_defrag.h"
27 #include "dp_rx.h"
28 #include <hal_api.h>
29 #include <hal_reo.h>
30 #include <cdp_txrx_handle.h>
31 #include <wlan_cfg.h>
32 
33 #ifdef WLAN_TX_PKT_CAPTURE_ENH
34 #include "dp_tx_capture.h"
35 #endif
36 
37 static inline void
38 dp_set_ssn_valid_flag(struct hal_reo_cmd_params *params,
39 					uint8_t valid)
40 {
41 	params->u.upd_queue_params.update_svld = 1;
42 	params->u.upd_queue_params.svld = valid;
43 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
44 		  "%s: Setting SSN valid bit to %d",
45 		  __func__, valid);
46 }
47 
48 static inline int dp_peer_find_mac_addr_cmp(
49 	union dp_align_mac_addr *mac_addr1,
50 	union dp_align_mac_addr *mac_addr2)
51 {
52 		/*
53 		 * Intentionally use & rather than &&.
54 		 * because the operands are binary rather than generic boolean,
55 		 * the functionality is equivalent.
56 		 * Using && has the advantage of short-circuited evaluation,
57 		 * but using & has the advantage of no conditional branching,
58 		 * which is a more significant benefit.
59 		 */
60 	return !((mac_addr1->align4.bytes_abcd == mac_addr2->align4.bytes_abcd)
61 		 & (mac_addr1->align4.bytes_ef == mac_addr2->align4.bytes_ef));
62 }
63 
64 static int dp_peer_ast_table_attach(struct dp_soc *soc)
65 {
66 	uint32_t max_ast_index;
67 
68 	max_ast_index = wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx);
69 	/* allocate ast_table for ast entry to ast_index map */
70 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
71 		  "\n<=== cfg max ast idx %d ====>", max_ast_index);
72 	soc->ast_table = qdf_mem_malloc(max_ast_index *
73 					sizeof(struct dp_ast_entry *));
74 	if (!soc->ast_table) {
75 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
76 			  "%s: ast_table memory allocation failed", __func__);
77 		return QDF_STATUS_E_NOMEM;
78 	}
79 	return 0; /* success */
80 }
81 
82 static int dp_peer_find_map_attach(struct dp_soc *soc)
83 {
84 	uint32_t max_peers, peer_map_size;
85 
86 	max_peers = soc->max_peers;
87 	/* allocate the peer ID -> peer object map */
88 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
89 		  "\n<=== cfg max peer id %d ====>", max_peers);
90 	peer_map_size = max_peers * sizeof(soc->peer_id_to_obj_map[0]);
91 	soc->peer_id_to_obj_map = qdf_mem_malloc(peer_map_size);
92 	if (!soc->peer_id_to_obj_map) {
93 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
94 			  "%s: peer map memory allocation failed", __func__);
95 		return QDF_STATUS_E_NOMEM;
96 	}
97 
98 	/*
99 	 * The peer_id_to_obj_map doesn't really need to be initialized,
100 	 * since elements are only used after they have been individually
101 	 * initialized.
102 	 * However, it is convenient for debugging to have all elements
103 	 * that are not in use set to 0.
104 	 */
105 	qdf_mem_zero(soc->peer_id_to_obj_map, peer_map_size);
106 	return 0; /* success */
107 }
108 
109 static int dp_log2_ceil(unsigned int value)
110 {
111 	unsigned int tmp = value;
112 	int log2 = -1;
113 
114 	while (tmp) {
115 		log2++;
116 		tmp >>= 1;
117 	}
118 	if (1 << log2 != value)
119 		log2++;
120 	return log2;
121 }
122 
123 static int dp_peer_find_add_id_to_obj(
124 	struct dp_peer *peer,
125 	uint16_t peer_id)
126 {
127 	int i;
128 
129 	for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
130 		if (peer->peer_ids[i] == HTT_INVALID_PEER) {
131 			peer->peer_ids[i] = peer_id;
132 			return 0; /* success */
133 		}
134 	}
135 	return QDF_STATUS_E_FAILURE; /* failure */
136 }
137 
138 #define DP_PEER_HASH_LOAD_MULT  2
139 #define DP_PEER_HASH_LOAD_SHIFT 0
140 
141 #define DP_AST_HASH_LOAD_MULT  2
142 #define DP_AST_HASH_LOAD_SHIFT 0
143 
144 static int dp_peer_find_hash_attach(struct dp_soc *soc)
145 {
146 	int i, hash_elems, log2;
147 
148 	/* allocate the peer MAC address -> peer object hash table */
149 	hash_elems = soc->max_peers;
150 	hash_elems *= DP_PEER_HASH_LOAD_MULT;
151 	hash_elems >>= DP_PEER_HASH_LOAD_SHIFT;
152 	log2 = dp_log2_ceil(hash_elems);
153 	hash_elems = 1 << log2;
154 
155 	soc->peer_hash.mask = hash_elems - 1;
156 	soc->peer_hash.idx_bits = log2;
157 	/* allocate an array of TAILQ peer object lists */
158 	soc->peer_hash.bins = qdf_mem_malloc(
159 		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q, dp_peer)));
160 	if (!soc->peer_hash.bins)
161 		return QDF_STATUS_E_NOMEM;
162 
163 	for (i = 0; i < hash_elems; i++)
164 		TAILQ_INIT(&soc->peer_hash.bins[i]);
165 
166 	return 0;
167 }
168 
169 static void dp_peer_find_hash_detach(struct dp_soc *soc)
170 {
171 	if (soc->peer_hash.bins) {
172 		qdf_mem_free(soc->peer_hash.bins);
173 		soc->peer_hash.bins = NULL;
174 	}
175 }
176 
177 static inline unsigned dp_peer_find_hash_index(struct dp_soc *soc,
178 	union dp_align_mac_addr *mac_addr)
179 {
180 	unsigned index;
181 
182 	index =
183 		mac_addr->align2.bytes_ab ^
184 		mac_addr->align2.bytes_cd ^
185 		mac_addr->align2.bytes_ef;
186 	index ^= index >> soc->peer_hash.idx_bits;
187 	index &= soc->peer_hash.mask;
188 	return index;
189 }
190 
191 
192 void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer)
193 {
194 	unsigned index;
195 
196 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
197 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
198 	/*
199 	 * It is important to add the new peer at the tail of the peer list
200 	 * with the bin index.  Together with having the hash_find function
201 	 * search from head to tail, this ensures that if two entries with
202 	 * the same MAC address are stored, the one added first will be
203 	 * found first.
204 	 */
205 	TAILQ_INSERT_TAIL(&soc->peer_hash.bins[index], peer, hash_list_elem);
206 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
207 }
208 
209 #ifdef FEATURE_AST
210 /*
211  * dp_peer_ast_hash_attach() - Allocate and initialize AST Hash Table
212  * @soc: SoC handle
213  *
214  * Return: None
215  */
216 static int dp_peer_ast_hash_attach(struct dp_soc *soc)
217 {
218 	int i, hash_elems, log2;
219 	unsigned int max_ast_idx = wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx);
220 
221 	hash_elems = ((max_ast_idx * DP_AST_HASH_LOAD_MULT) >>
222 		DP_AST_HASH_LOAD_SHIFT);
223 
224 	log2 = dp_log2_ceil(hash_elems);
225 	hash_elems = 1 << log2;
226 
227 	soc->ast_hash.mask = hash_elems - 1;
228 	soc->ast_hash.idx_bits = log2;
229 
230 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
231 		  "ast hash_elems: %d, max_ast_idx: %d",
232 		  hash_elems, max_ast_idx);
233 
234 	/* allocate an array of TAILQ peer object lists */
235 	soc->ast_hash.bins = qdf_mem_malloc(
236 		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q,
237 				dp_ast_entry)));
238 
239 	if (!soc->ast_hash.bins)
240 		return QDF_STATUS_E_NOMEM;
241 
242 	for (i = 0; i < hash_elems; i++)
243 		TAILQ_INIT(&soc->ast_hash.bins[i]);
244 
245 	return 0;
246 }
247 
248 /*
249  * dp_peer_ast_cleanup() - cleanup the references
250  * @soc: SoC handle
251  * @ast: ast entry
252  *
253  * Return: None
254  */
255 static inline void dp_peer_ast_cleanup(struct dp_soc *soc,
256 				       struct dp_ast_entry *ast)
257 {
258 	txrx_ast_free_cb cb = ast->callback;
259 	void *cookie = ast->cookie;
260 
261 	/* Call the callbacks to free up the cookie */
262 	if (cb) {
263 		ast->callback = NULL;
264 		ast->cookie = NULL;
265 		cb(soc->ctrl_psoc,
266 		   dp_soc_to_cdp_soc(soc),
267 		   cookie,
268 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
269 	}
270 }
271 
272 /*
273  * dp_peer_ast_hash_detach() - Free AST Hash table
274  * @soc: SoC handle
275  *
276  * Return: None
277  */
278 static void dp_peer_ast_hash_detach(struct dp_soc *soc)
279 {
280 	unsigned int index;
281 	struct dp_ast_entry *ast, *ast_next;
282 
283 	if (!soc->ast_hash.mask)
284 		return;
285 
286 	if (!soc->ast_hash.bins)
287 		return;
288 
289 	qdf_spin_lock_bh(&soc->ast_lock);
290 	for (index = 0; index <= soc->ast_hash.mask; index++) {
291 		if (!TAILQ_EMPTY(&soc->ast_hash.bins[index])) {
292 			TAILQ_FOREACH_SAFE(ast, &soc->ast_hash.bins[index],
293 					   hash_list_elem, ast_next) {
294 				TAILQ_REMOVE(&soc->ast_hash.bins[index], ast,
295 					     hash_list_elem);
296 				dp_peer_ast_cleanup(soc, ast);
297 				qdf_mem_free(ast);
298 			}
299 		}
300 	}
301 	qdf_spin_unlock_bh(&soc->ast_lock);
302 
303 	qdf_mem_free(soc->ast_hash.bins);
304 	soc->ast_hash.bins = NULL;
305 }
306 
307 /*
308  * dp_peer_ast_hash_index() - Compute the AST hash from MAC address
309  * @soc: SoC handle
310  *
311  * Return: AST hash
312  */
313 static inline uint32_t dp_peer_ast_hash_index(struct dp_soc *soc,
314 	union dp_align_mac_addr *mac_addr)
315 {
316 	uint32_t index;
317 
318 	index =
319 		mac_addr->align2.bytes_ab ^
320 		mac_addr->align2.bytes_cd ^
321 		mac_addr->align2.bytes_ef;
322 	index ^= index >> soc->ast_hash.idx_bits;
323 	index &= soc->ast_hash.mask;
324 	return index;
325 }
326 
327 /*
328  * dp_peer_ast_hash_add() - Add AST entry into hash table
329  * @soc: SoC handle
330  *
331  * This function adds the AST entry into SoC AST hash table
332  * It assumes caller has taken the ast lock to protect the access to this table
333  *
334  * Return: None
335  */
336 static inline void dp_peer_ast_hash_add(struct dp_soc *soc,
337 		struct dp_ast_entry *ase)
338 {
339 	uint32_t index;
340 
341 	index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
342 	TAILQ_INSERT_TAIL(&soc->ast_hash.bins[index], ase, hash_list_elem);
343 }
344 
345 /*
346  * dp_peer_ast_hash_remove() - Look up and remove AST entry from hash table
347  * @soc: SoC handle
348  *
349  * This function removes the AST entry from soc AST hash table
350  * It assumes caller has taken the ast lock to protect the access to this table
351  *
352  * Return: None
353  */
354 void dp_peer_ast_hash_remove(struct dp_soc *soc,
355 			     struct dp_ast_entry *ase)
356 {
357 	unsigned index;
358 	struct dp_ast_entry *tmpase;
359 	int found = 0;
360 
361 	index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
362 	/* Check if tail is not empty before delete*/
363 	QDF_ASSERT(!TAILQ_EMPTY(&soc->ast_hash.bins[index]));
364 
365 	TAILQ_FOREACH(tmpase, &soc->ast_hash.bins[index], hash_list_elem) {
366 		if (tmpase == ase) {
367 			found = 1;
368 			break;
369 		}
370 	}
371 
372 	QDF_ASSERT(found);
373 	TAILQ_REMOVE(&soc->ast_hash.bins[index], ase, hash_list_elem);
374 }
375 
376 /*
377  * dp_peer_ast_list_find() - Find AST entry by MAC address from peer ast list
378  * @soc: SoC handle
379  * @peer: peer handle
380  * @ast_mac_addr: mac address
381  *
382  * It assumes caller has taken the ast lock to protect the access to ast list
383  *
384  * Return: AST entry
385  */
386 struct dp_ast_entry *dp_peer_ast_list_find(struct dp_soc *soc,
387 					   struct dp_peer *peer,
388 					   uint8_t *ast_mac_addr)
389 {
390 	struct dp_ast_entry *ast_entry = NULL;
391 	union dp_align_mac_addr *mac_addr =
392 		(union dp_align_mac_addr *)ast_mac_addr;
393 
394 	TAILQ_FOREACH(ast_entry, &peer->ast_entry_list, ase_list_elem) {
395 		if (!dp_peer_find_mac_addr_cmp(mac_addr,
396 					       &ast_entry->mac_addr)) {
397 			return ast_entry;
398 		}
399 	}
400 
401 	return NULL;
402 }
403 
404 /*
405  * dp_peer_ast_hash_find_by_pdevid() - Find AST entry by MAC address
406  * @soc: SoC handle
407  *
408  * It assumes caller has taken the ast lock to protect the access to
409  * AST hash table
410  *
411  * Return: AST entry
412  */
413 struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
414 						     uint8_t *ast_mac_addr,
415 						     uint8_t pdev_id)
416 {
417 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
418 	uint32_t index;
419 	struct dp_ast_entry *ase;
420 
421 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
422 		     ast_mac_addr, QDF_MAC_ADDR_SIZE);
423 	mac_addr = &local_mac_addr_aligned;
424 
425 	index = dp_peer_ast_hash_index(soc, mac_addr);
426 	TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
427 		if ((pdev_id == ase->pdev_id) &&
428 		    !dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr)) {
429 			return ase;
430 		}
431 	}
432 
433 	return NULL;
434 }
435 
436 /*
437  * dp_peer_ast_hash_find_soc() - Find AST entry by MAC address
438  * @soc: SoC handle
439  *
440  * It assumes caller has taken the ast lock to protect the access to
441  * AST hash table
442  *
443  * Return: AST entry
444  */
445 struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
446 					       uint8_t *ast_mac_addr)
447 {
448 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
449 	unsigned index;
450 	struct dp_ast_entry *ase;
451 
452 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
453 			ast_mac_addr, QDF_MAC_ADDR_SIZE);
454 	mac_addr = &local_mac_addr_aligned;
455 
456 	index = dp_peer_ast_hash_index(soc, mac_addr);
457 	TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
458 		if (dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr) == 0) {
459 			return ase;
460 		}
461 	}
462 
463 	return NULL;
464 }
465 
466 /*
467  * dp_peer_map_ast() - Map the ast entry with HW AST Index
468  * @soc: SoC handle
469  * @peer: peer to which ast node belongs
470  * @mac_addr: MAC address of ast node
471  * @hw_peer_id: HW AST Index returned by target in peer map event
472  * @vdev_id: vdev id for VAP to which the peer belongs to
473  * @ast_hash: ast hash value in HW
474  *
475  * Return: None
476  */
477 static inline void dp_peer_map_ast(struct dp_soc *soc,
478 	struct dp_peer *peer, uint8_t *mac_addr, uint16_t hw_peer_id,
479 	uint8_t vdev_id, uint16_t ast_hash)
480 {
481 	struct dp_ast_entry *ast_entry = NULL;
482 	enum cdp_txrx_ast_entry_type peer_type = CDP_TXRX_AST_TYPE_STATIC;
483 
484 	if (!peer) {
485 		return;
486 	}
487 
488 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
489 		  "%s: peer %pK ID %d vid %d mac %02x:%02x:%02x:%02x:%02x:%02x",
490 		  __func__, peer, hw_peer_id, vdev_id, mac_addr[0],
491 		  mac_addr[1], mac_addr[2], mac_addr[3],
492 		  mac_addr[4], mac_addr[5]);
493 
494 	qdf_spin_lock_bh(&soc->ast_lock);
495 
496 	ast_entry = dp_peer_ast_list_find(soc, peer, mac_addr);
497 
498 	if (ast_entry) {
499 		ast_entry->ast_idx = hw_peer_id;
500 		soc->ast_table[hw_peer_id] = ast_entry;
501 		ast_entry->is_active = TRUE;
502 		peer_type = ast_entry->type;
503 		ast_entry->ast_hash_value = ast_hash;
504 		ast_entry->is_mapped = TRUE;
505 	}
506 
507 	if (ast_entry || (peer->vdev && peer->vdev->proxysta_vdev)) {
508 		if (soc->cdp_soc.ol_ops->peer_map_event) {
509 			soc->cdp_soc.ol_ops->peer_map_event(
510 			soc->ctrl_psoc, peer->peer_ids[0],
511 			hw_peer_id, vdev_id,
512 			mac_addr, peer_type, ast_hash);
513 		}
514 	} else {
515 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
516 			  "AST entry not found");
517 	}
518 
519 	qdf_spin_unlock_bh(&soc->ast_lock);
520 	return;
521 }
522 
523 void dp_peer_free_hmwds_cb(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
524 			   struct cdp_soc *dp_soc,
525 			   void *cookie,
526 			   enum cdp_ast_free_status status)
527 {
528 	struct dp_ast_free_cb_params *param =
529 		(struct dp_ast_free_cb_params *)cookie;
530 	struct dp_soc *soc = (struct dp_soc *)dp_soc;
531 	struct dp_peer *peer = NULL;
532 
533 	if (status != CDP_TXRX_AST_DELETED) {
534 		qdf_mem_free(cookie);
535 		return;
536 	}
537 
538 	peer = dp_peer_find_hash_find(soc, &param->peer_mac_addr.raw[0],
539 				      0, param->vdev_id);
540 	if (peer) {
541 		dp_peer_add_ast(soc, peer,
542 				&param->mac_addr.raw[0],
543 				param->type,
544 				param->flags);
545 		dp_peer_unref_delete(peer);
546 	}
547 	qdf_mem_free(cookie);
548 }
549 
550 /*
551  * dp_peer_add_ast() - Allocate and add AST entry into peer list
552  * @soc: SoC handle
553  * @peer: peer to which ast node belongs
554  * @mac_addr: MAC address of ast node
555  * @is_self: Is this base AST entry with peer mac address
556  *
557  * This API is used by WDS source port learning function to
558  * add a new AST entry into peer AST list
559  *
560  * Return: 0 if new entry is allocated,
561  *        -1 if entry add failed
562  */
563 int dp_peer_add_ast(struct dp_soc *soc,
564 			struct dp_peer *peer,
565 			uint8_t *mac_addr,
566 			enum cdp_txrx_ast_entry_type type,
567 			uint32_t flags)
568 {
569 	struct dp_ast_entry *ast_entry = NULL;
570 	struct dp_vdev *vdev = NULL, *tmp_vdev = NULL;
571 	struct dp_pdev *pdev = NULL;
572 	uint8_t next_node_mac[6];
573 	int  ret = -1;
574 	txrx_ast_free_cb cb = NULL;
575 	void *cookie = NULL;
576 	struct dp_peer *tmp_peer = NULL;
577 	bool is_peer_found = false;
578 
579 	vdev = peer->vdev;
580 	if (!vdev) {
581 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
582 			  FL("Peers vdev is NULL"));
583 		QDF_ASSERT(0);
584 		return ret;
585 	}
586 
587 	pdev = vdev->pdev;
588 
589 	tmp_peer = dp_peer_find_hash_find(soc, mac_addr, 0,
590 					  DP_VDEV_ALL);
591 	if (tmp_peer) {
592 		tmp_vdev = tmp_peer->vdev;
593 		if (!tmp_vdev) {
594 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
595 				  FL("Peers vdev is NULL"));
596 			QDF_ASSERT(0);
597 			dp_peer_unref_delete(tmp_peer);
598 			return ret;
599 		}
600 		if (tmp_vdev->pdev->pdev_id == pdev->pdev_id)
601 			is_peer_found = true;
602 
603 		dp_peer_unref_delete(tmp_peer);
604 	}
605 
606 	qdf_spin_lock_bh(&soc->ast_lock);
607 	if (peer->delete_in_progress) {
608 		qdf_spin_unlock_bh(&soc->ast_lock);
609 		return ret;
610 	}
611 
612 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
613 		  "%s: pdevid: %u vdev: %u  ast_entry->type: %d flags: 0x%x peer_mac: %pM peer: %pK mac %pM",
614 		  __func__, pdev->pdev_id, vdev->vdev_id, type, flags,
615 		  peer->mac_addr.raw, peer, mac_addr);
616 
617 
618 	/* fw supports only 2 times the max_peers ast entries */
619 	if (soc->num_ast_entries >=
620 	    wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx)) {
621 		qdf_spin_unlock_bh(&soc->ast_lock);
622 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
623 			  FL("Max ast entries reached"));
624 		return ret;
625 	}
626 
627 	/* If AST entry already exists , just return from here
628 	 * ast entry with same mac address can exist on different radios
629 	 * if ast_override support is enabled use search by pdev in this
630 	 * case
631 	 */
632 	if (soc->ast_override_support) {
633 		ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, mac_addr,
634 							    pdev->pdev_id);
635 		if (ast_entry) {
636 			if ((type == CDP_TXRX_AST_TYPE_MEC) &&
637 			    (ast_entry->type == CDP_TXRX_AST_TYPE_MEC))
638 				ast_entry->is_active = TRUE;
639 
640 			qdf_spin_unlock_bh(&soc->ast_lock);
641 			return 0;
642 		}
643 		if (is_peer_found) {
644 			/* During WDS to static roaming, peer is added
645 			 * to the list before static AST entry create.
646 			 * So, allow AST entry for STATIC type
647 			 * even if peer is present
648 			 */
649 			if (type != CDP_TXRX_AST_TYPE_STATIC) {
650 				qdf_spin_unlock_bh(&soc->ast_lock);
651 				return 0;
652 			}
653 		}
654 	} else {
655 		/* For HWMWDS_SEC entries can be added for same mac address
656 		 * do not check for existing entry
657 		 */
658 		if (type == CDP_TXRX_AST_TYPE_WDS_HM_SEC)
659 			goto add_ast_entry;
660 
661 		ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
662 
663 		if (ast_entry) {
664 			if ((type == CDP_TXRX_AST_TYPE_MEC) &&
665 			    (ast_entry->type == CDP_TXRX_AST_TYPE_MEC))
666 				ast_entry->is_active = TRUE;
667 
668 			if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) &&
669 			    !ast_entry->delete_in_progress) {
670 				qdf_spin_unlock_bh(&soc->ast_lock);
671 				return 0;
672 			}
673 
674 			/* Add for HMWDS entry we cannot be ignored if there
675 			 * is AST entry with same mac address
676 			 *
677 			 * if ast entry exists with the requested mac address
678 			 * send a delete command and register callback which
679 			 * can take care of adding HMWDS ast enty on delete
680 			 * confirmation from target
681 			 */
682 			if ((type == CDP_TXRX_AST_TYPE_WDS_HM) &&
683 			    soc->is_peer_map_unmap_v2) {
684 				struct dp_ast_free_cb_params *param = NULL;
685 
686 				if (ast_entry->type ==
687 					CDP_TXRX_AST_TYPE_WDS_HM_SEC)
688 					goto add_ast_entry;
689 
690 				/* save existing callback */
691 				if (ast_entry->callback) {
692 					cb = ast_entry->callback;
693 					cookie = ast_entry->cookie;
694 				}
695 
696 				param = qdf_mem_malloc(sizeof(*param));
697 				if (!param) {
698 					QDF_TRACE(QDF_MODULE_ID_TXRX,
699 						  QDF_TRACE_LEVEL_ERROR,
700 						  "Allocation failed");
701 					qdf_spin_unlock_bh(&soc->ast_lock);
702 					return ret;
703 				}
704 
705 				qdf_mem_copy(&param->mac_addr.raw[0], mac_addr,
706 					     QDF_MAC_ADDR_SIZE);
707 				qdf_mem_copy(&param->peer_mac_addr.raw[0],
708 					     &peer->mac_addr.raw[0],
709 					     QDF_MAC_ADDR_SIZE);
710 				param->type = type;
711 				param->flags = flags;
712 				param->vdev_id = vdev->vdev_id;
713 				ast_entry->callback = dp_peer_free_hmwds_cb;
714 				ast_entry->pdev_id = vdev->pdev->pdev_id;
715 				ast_entry->type = type;
716 				ast_entry->cookie = (void *)param;
717 				if (!ast_entry->delete_in_progress)
718 					dp_peer_del_ast(soc, ast_entry);
719 			}
720 
721 			/* Modify an already existing AST entry from type
722 			 * WDS to MEC on promption. This serves as a fix when
723 			 * backbone of interfaces are interchanged wherein
724 			 * wds entr becomes its own MEC. The entry should be
725 			 * replaced only when the ast_entry peer matches the
726 			 * peer received in mec event. This additional check
727 			 * is needed in wds repeater cases where a multicast
728 			 * packet from station to the root via the repeater
729 			 * should not remove the wds entry.
730 			 */
731 			if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS) &&
732 			    (type == CDP_TXRX_AST_TYPE_MEC) &&
733 			    (ast_entry->peer == peer)) {
734 				ast_entry->is_active = FALSE;
735 				dp_peer_del_ast(soc, ast_entry);
736 			}
737 			qdf_spin_unlock_bh(&soc->ast_lock);
738 
739 			/* Call the saved callback*/
740 			if (cb) {
741 				cb(soc->ctrl_psoc,
742 				   dp_soc_to_cdp_soc(soc),
743 				   cookie,
744 				   CDP_TXRX_AST_DELETE_IN_PROGRESS);
745 			}
746 			return 0;
747 		}
748 	}
749 
750 add_ast_entry:
751 	ast_entry = (struct dp_ast_entry *)
752 			qdf_mem_malloc(sizeof(struct dp_ast_entry));
753 
754 	if (!ast_entry) {
755 		qdf_spin_unlock_bh(&soc->ast_lock);
756 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
757 			  FL("fail to allocate ast_entry"));
758 		QDF_ASSERT(0);
759 		return ret;
760 	}
761 
762 	qdf_mem_copy(&ast_entry->mac_addr.raw[0], mac_addr, QDF_MAC_ADDR_SIZE);
763 	ast_entry->pdev_id = vdev->pdev->pdev_id;
764 	ast_entry->is_mapped = false;
765 	ast_entry->delete_in_progress = false;
766 
767 	switch (type) {
768 	case CDP_TXRX_AST_TYPE_STATIC:
769 		peer->self_ast_entry = ast_entry;
770 		ast_entry->type = CDP_TXRX_AST_TYPE_STATIC;
771 		if (peer->vdev->opmode == wlan_op_mode_sta)
772 			ast_entry->type = CDP_TXRX_AST_TYPE_STA_BSS;
773 		break;
774 	case CDP_TXRX_AST_TYPE_SELF:
775 		peer->self_ast_entry = ast_entry;
776 		ast_entry->type = CDP_TXRX_AST_TYPE_SELF;
777 		break;
778 	case CDP_TXRX_AST_TYPE_WDS:
779 		ast_entry->next_hop = 1;
780 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
781 		break;
782 	case CDP_TXRX_AST_TYPE_WDS_HM:
783 		ast_entry->next_hop = 1;
784 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM;
785 		break;
786 	case CDP_TXRX_AST_TYPE_WDS_HM_SEC:
787 		ast_entry->next_hop = 1;
788 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM_SEC;
789 		break;
790 	case CDP_TXRX_AST_TYPE_MEC:
791 		ast_entry->next_hop = 1;
792 		ast_entry->type = CDP_TXRX_AST_TYPE_MEC;
793 		break;
794 	case CDP_TXRX_AST_TYPE_DA:
795 		peer = peer->vdev->vap_bss_peer;
796 		ast_entry->next_hop = 1;
797 		ast_entry->type = CDP_TXRX_AST_TYPE_DA;
798 		break;
799 	default:
800 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
801 			FL("Incorrect AST entry type"));
802 	}
803 
804 	ast_entry->is_active = TRUE;
805 	DP_STATS_INC(soc, ast.added, 1);
806 	soc->num_ast_entries++;
807 	dp_peer_ast_hash_add(soc, ast_entry);
808 
809 	ast_entry->peer = peer;
810 
811 	if (type == CDP_TXRX_AST_TYPE_MEC)
812 		qdf_mem_copy(next_node_mac, peer->vdev->mac_addr.raw, 6);
813 	else
814 		qdf_mem_copy(next_node_mac, peer->mac_addr.raw, 6);
815 
816 	TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem);
817 
818 	if ((ast_entry->type != CDP_TXRX_AST_TYPE_STATIC) &&
819 	    (ast_entry->type != CDP_TXRX_AST_TYPE_SELF) &&
820 	    (ast_entry->type != CDP_TXRX_AST_TYPE_STA_BSS) &&
821 	    (ast_entry->type != CDP_TXRX_AST_TYPE_WDS_HM_SEC)) {
822 		if (QDF_STATUS_SUCCESS ==
823 				soc->cdp_soc.ol_ops->peer_add_wds_entry(
824 				soc->ctrl_psoc,
825 				peer->vdev->vdev_id,
826 				peer->mac_addr.raw,
827 				mac_addr,
828 				next_node_mac,
829 				flags)) {
830 			qdf_spin_unlock_bh(&soc->ast_lock);
831 			return 0;
832 		}
833 	}
834 
835 	qdf_spin_unlock_bh(&soc->ast_lock);
836 	return ret;
837 }
838 
839 /*
840  * dp_peer_del_ast() - Delete and free AST entry
841  * @soc: SoC handle
842  * @ast_entry: AST entry of the node
843  *
844  * This function removes the AST entry from peer and soc tables
845  * It assumes caller has taken the ast lock to protect the access to these
846  * tables
847  *
848  * Return: None
849  */
850 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
851 {
852 	struct dp_peer *peer;
853 
854 	if (!ast_entry)
855 		return;
856 
857 	peer =  ast_entry->peer;
858 
859 	dp_peer_ast_send_wds_del(soc, ast_entry);
860 
861 	/*
862 	 * release the reference only if it is mapped
863 	 * to ast_table
864 	 */
865 	if (ast_entry->is_mapped)
866 		soc->ast_table[ast_entry->ast_idx] = NULL;
867 
868 	/*
869 	 * if peer map v2 is enabled we are not freeing ast entry
870 	 * here and it is supposed to be freed in unmap event (after
871 	 * we receive delete confirmation from target)
872 	 *
873 	 * if peer_id is invalid we did not get the peer map event
874 	 * for the peer free ast entry from here only in this case
875 	 */
876 	if (soc->is_peer_map_unmap_v2) {
877 
878 		/*
879 		 * For HM_SEC and SELF type we do not receive unmap event
880 		 * free ast_entry from here it self
881 		 */
882 		if ((ast_entry->type != CDP_TXRX_AST_TYPE_WDS_HM_SEC) &&
883 		    (ast_entry->type != CDP_TXRX_AST_TYPE_SELF))
884 			return;
885 	}
886 
887 	/* SELF and STATIC entries are removed in teardown itself */
888 	if (ast_entry->next_hop)
889 		TAILQ_REMOVE(&peer->ast_entry_list, ast_entry, ase_list_elem);
890 
891 	DP_STATS_INC(soc, ast.deleted, 1);
892 	dp_peer_ast_hash_remove(soc, ast_entry);
893 	dp_peer_ast_cleanup(soc, ast_entry);
894 	qdf_mem_free(ast_entry);
895 	soc->num_ast_entries--;
896 }
897 
898 /*
899  * dp_peer_update_ast() - Delete and free AST entry
900  * @soc: SoC handle
901  * @peer: peer to which ast node belongs
902  * @ast_entry: AST entry of the node
903  * @flags: wds or hmwds
904  *
905  * This function update the AST entry to the roamed peer and soc tables
906  * It assumes caller has taken the ast lock to protect the access to these
907  * tables
908  *
909  * Return: 0 if ast entry is updated successfully
910  *         -1 failure
911  */
912 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
913 		       struct dp_ast_entry *ast_entry, uint32_t flags)
914 {
915 	int ret = -1;
916 	struct dp_peer *old_peer;
917 
918 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
919 		  "%s: ast_entry->type: %d pdevid: %u vdevid: %u flags: 0x%x mac_addr: %pM peer_mac: %pM\n",
920 		  __func__, ast_entry->type, peer->vdev->pdev->pdev_id,
921 		  peer->vdev->vdev_id, flags, ast_entry->mac_addr.raw,
922 		  peer->mac_addr.raw);
923 
924 	/* Do not send AST update in below cases
925 	 *  1) Ast entry delete has already triggered
926 	 *  2) Peer delete is already triggered
927 	 *  3) We did not get the HTT map for create event
928 	 */
929 	if (ast_entry->delete_in_progress || peer->delete_in_progress ||
930 	    !ast_entry->is_mapped)
931 		return ret;
932 
933 	if ((ast_entry->type == CDP_TXRX_AST_TYPE_STATIC) ||
934 	    (ast_entry->type == CDP_TXRX_AST_TYPE_SELF) ||
935 	    (ast_entry->type == CDP_TXRX_AST_TYPE_STA_BSS) ||
936 	    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
937 		return 0;
938 
939 	/*
940 	 * Avoids flood of WMI update messages sent to FW for same peer.
941 	 */
942 	if (qdf_unlikely(ast_entry->peer == peer) &&
943 	    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS) &&
944 	    (ast_entry->peer->vdev == peer->vdev) &&
945 	    (ast_entry->is_active))
946 		return 0;
947 
948 	old_peer = ast_entry->peer;
949 	TAILQ_REMOVE(&old_peer->ast_entry_list, ast_entry, ase_list_elem);
950 
951 	ast_entry->peer = peer;
952 	ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
953 	ast_entry->pdev_id = peer->vdev->pdev->pdev_id;
954 	ast_entry->is_active = TRUE;
955 	TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem);
956 
957 	ret = soc->cdp_soc.ol_ops->peer_update_wds_entry(
958 				soc->ctrl_psoc,
959 				peer->vdev->vdev_id,
960 				ast_entry->mac_addr.raw,
961 				peer->mac_addr.raw,
962 				flags);
963 
964 	return ret;
965 }
966 
967 /*
968  * dp_peer_ast_get_pdev_id() - get pdev_id from the ast entry
969  * @soc: SoC handle
970  * @ast_entry: AST entry of the node
971  *
972  * This function gets the pdev_id from the ast entry.
973  *
974  * Return: (uint8_t) pdev_id
975  */
976 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
977 				struct dp_ast_entry *ast_entry)
978 {
979 	return ast_entry->pdev_id;
980 }
981 
982 /*
983  * dp_peer_ast_get_next_hop() - get next_hop from the ast entry
984  * @soc: SoC handle
985  * @ast_entry: AST entry of the node
986  *
987  * This function gets the next hop from the ast entry.
988  *
989  * Return: (uint8_t) next_hop
990  */
991 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
992 				struct dp_ast_entry *ast_entry)
993 {
994 	return ast_entry->next_hop;
995 }
996 
997 /*
998  * dp_peer_ast_set_type() - set type from the ast entry
999  * @soc: SoC handle
1000  * @ast_entry: AST entry of the node
1001  *
1002  * This function sets the type in the ast entry.
1003  *
1004  * Return:
1005  */
1006 void dp_peer_ast_set_type(struct dp_soc *soc,
1007 				struct dp_ast_entry *ast_entry,
1008 				enum cdp_txrx_ast_entry_type type)
1009 {
1010 	ast_entry->type = type;
1011 }
1012 
1013 #else
1014 int dp_peer_add_ast(struct dp_soc *soc, struct dp_peer *peer,
1015 		uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type,
1016 		uint32_t flags)
1017 {
1018 	return 1;
1019 }
1020 
1021 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
1022 {
1023 }
1024 
1025 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
1026 			struct dp_ast_entry *ast_entry, uint32_t flags)
1027 {
1028 	return 1;
1029 }
1030 
1031 struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
1032 					       uint8_t *ast_mac_addr)
1033 {
1034 	return NULL;
1035 }
1036 
1037 struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
1038 						     uint8_t *ast_mac_addr,
1039 						     uint8_t pdev_id)
1040 {
1041 	return NULL;
1042 }
1043 
1044 static int dp_peer_ast_hash_attach(struct dp_soc *soc)
1045 {
1046 	return 0;
1047 }
1048 
1049 static inline void dp_peer_map_ast(struct dp_soc *soc,
1050 	struct dp_peer *peer, uint8_t *mac_addr, uint16_t hw_peer_id,
1051 	uint8_t vdev_id, uint16_t ast_hash)
1052 {
1053 	return;
1054 }
1055 
1056 static void dp_peer_ast_hash_detach(struct dp_soc *soc)
1057 {
1058 }
1059 
1060 void dp_peer_ast_set_type(struct dp_soc *soc,
1061 				struct dp_ast_entry *ast_entry,
1062 				enum cdp_txrx_ast_entry_type type)
1063 {
1064 }
1065 
1066 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
1067 				struct dp_ast_entry *ast_entry)
1068 {
1069 	return 0xff;
1070 }
1071 
1072 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
1073 				struct dp_ast_entry *ast_entry)
1074 {
1075 	return 0xff;
1076 }
1077 
1078 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
1079 		       struct dp_ast_entry *ast_entry, uint32_t flags)
1080 {
1081 	return 1;
1082 }
1083 
1084 #endif
1085 
1086 void dp_peer_ast_send_wds_del(struct dp_soc *soc,
1087 			      struct dp_ast_entry *ast_entry)
1088 {
1089 	struct dp_peer *peer = ast_entry->peer;
1090 	struct cdp_soc_t *cdp_soc = &soc->cdp_soc;
1091 
1092 	if (ast_entry->delete_in_progress)
1093 		return;
1094 
1095 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_TRACE,
1096 		  "%s: ast_entry->type: %d pdevid: %u vdev: %u mac_addr: %pM next_hop: %u peer_mac: %pM\n",
1097 		  __func__, ast_entry->type, peer->vdev->pdev->pdev_id,
1098 		  peer->vdev->vdev_id, ast_entry->mac_addr.raw,
1099 		  ast_entry->next_hop, ast_entry->peer->mac_addr.raw);
1100 
1101 	if (ast_entry->next_hop) {
1102 		cdp_soc->ol_ops->peer_del_wds_entry(soc->ctrl_psoc,
1103 						    peer->vdev->vdev_id,
1104 						    ast_entry->mac_addr.raw,
1105 						    ast_entry->type);
1106 	}
1107 
1108 	/* Remove SELF and STATIC entries in teardown itself */
1109 	if (!ast_entry->next_hop) {
1110 		TAILQ_REMOVE(&peer->ast_entry_list, ast_entry, ase_list_elem);
1111 		peer->self_ast_entry = NULL;
1112 		ast_entry->peer = NULL;
1113 	}
1114 
1115 	ast_entry->delete_in_progress = true;
1116 }
1117 
1118 /**
1119  * dp_peer_ast_free_entry_by_mac() - find ast entry by MAC address and delete
1120  * @soc: soc handle
1121  * @peer: peer handle
1122  * @mac_addr: mac address of the AST entry to searc and delete
1123  *
1124  * find the ast entry from the peer list using the mac address and free
1125  * the entry.
1126  *
1127  * Return: SUCCESS or NOENT
1128  */
1129 static int dp_peer_ast_free_entry_by_mac(struct dp_soc *soc,
1130 					 struct dp_peer *peer,
1131 					 uint8_t *mac_addr)
1132 {
1133 	struct dp_ast_entry *ast_entry;
1134 	void *cookie = NULL;
1135 	txrx_ast_free_cb cb = NULL;
1136 
1137 	/*
1138 	 * release the reference only if it is mapped
1139 	 * to ast_table
1140 	 */
1141 
1142 	qdf_spin_lock_bh(&soc->ast_lock);
1143 
1144 	ast_entry = dp_peer_ast_list_find(soc, peer, mac_addr);
1145 	if (!ast_entry) {
1146 		qdf_spin_unlock_bh(&soc->ast_lock);
1147 		return QDF_STATUS_E_NOENT;
1148 	} else if (ast_entry->is_mapped) {
1149 		soc->ast_table[ast_entry->ast_idx] = NULL;
1150 	}
1151 
1152 	TAILQ_REMOVE(&peer->ast_entry_list, ast_entry, ase_list_elem);
1153 	DP_STATS_INC(soc, ast.deleted, 1);
1154 	dp_peer_ast_hash_remove(soc, ast_entry);
1155 
1156 	cb = ast_entry->callback;
1157 	cookie = ast_entry->cookie;
1158 	ast_entry->callback = NULL;
1159 	ast_entry->cookie = NULL;
1160 
1161 	if (ast_entry == peer->self_ast_entry)
1162 		peer->self_ast_entry = NULL;
1163 
1164 	soc->num_ast_entries--;
1165 	qdf_spin_unlock_bh(&soc->ast_lock);
1166 
1167 	if (cb) {
1168 		cb(soc->ctrl_psoc,
1169 		   dp_soc_to_cdp_soc(soc),
1170 		   cookie,
1171 		   CDP_TXRX_AST_DELETED);
1172 	}
1173 	qdf_mem_free(ast_entry);
1174 
1175 	return QDF_STATUS_SUCCESS;
1176 }
1177 
1178 struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc,
1179 	uint8_t *peer_mac_addr, int mac_addr_is_aligned, uint8_t vdev_id)
1180 {
1181 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
1182 	unsigned index;
1183 	struct dp_peer *peer;
1184 
1185 	if (mac_addr_is_aligned) {
1186 		mac_addr = (union dp_align_mac_addr *) peer_mac_addr;
1187 	} else {
1188 		qdf_mem_copy(
1189 			&local_mac_addr_aligned.raw[0],
1190 			peer_mac_addr, QDF_MAC_ADDR_SIZE);
1191 		mac_addr = &local_mac_addr_aligned;
1192 	}
1193 	index = dp_peer_find_hash_index(soc, mac_addr);
1194 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
1195 	TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
1196 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
1197 			((peer->vdev->vdev_id == vdev_id) ||
1198 			 (vdev_id == DP_VDEV_ALL))) {
1199 			/* found it - increment the ref count before releasing
1200 			 * the lock
1201 			 */
1202 			qdf_atomic_inc(&peer->ref_cnt);
1203 			qdf_spin_unlock_bh(&soc->peer_ref_mutex);
1204 			return peer;
1205 		}
1206 	}
1207 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
1208 	return NULL; /* failure */
1209 }
1210 
1211 void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer)
1212 {
1213 	unsigned index;
1214 	struct dp_peer *tmppeer = NULL;
1215 	int found = 0;
1216 
1217 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
1218 	/* Check if tail is not empty before delete*/
1219 	QDF_ASSERT(!TAILQ_EMPTY(&soc->peer_hash.bins[index]));
1220 	/*
1221 	 * DO NOT take the peer_ref_mutex lock here - it needs to be taken
1222 	 * by the caller.
1223 	 * The caller needs to hold the lock from the time the peer object's
1224 	 * reference count is decremented and tested up through the time the
1225 	 * reference to the peer object is removed from the hash table, by
1226 	 * this function.
1227 	 * Holding the lock only while removing the peer object reference
1228 	 * from the hash table keeps the hash table consistent, but does not
1229 	 * protect against a new HL tx context starting to use the peer object
1230 	 * if it looks up the peer object from its MAC address just after the
1231 	 * peer ref count is decremented to zero, but just before the peer
1232 	 * object reference is removed from the hash table.
1233 	 */
1234 	 TAILQ_FOREACH(tmppeer, &soc->peer_hash.bins[index], hash_list_elem) {
1235 		if (tmppeer == peer) {
1236 			found = 1;
1237 			break;
1238 		}
1239 	}
1240 	QDF_ASSERT(found);
1241 	TAILQ_REMOVE(&soc->peer_hash.bins[index], peer, hash_list_elem);
1242 }
1243 
1244 void dp_peer_find_hash_erase(struct dp_soc *soc)
1245 {
1246 	int i;
1247 
1248 	/*
1249 	 * Not really necessary to take peer_ref_mutex lock - by this point,
1250 	 * it's known that the soc is no longer in use.
1251 	 */
1252 	for (i = 0; i <= soc->peer_hash.mask; i++) {
1253 		if (!TAILQ_EMPTY(&soc->peer_hash.bins[i])) {
1254 			struct dp_peer *peer, *peer_next;
1255 
1256 			/*
1257 			 * TAILQ_FOREACH_SAFE must be used here to avoid any
1258 			 * memory access violation after peer is freed
1259 			 */
1260 			TAILQ_FOREACH_SAFE(peer, &soc->peer_hash.bins[i],
1261 				hash_list_elem, peer_next) {
1262 				/*
1263 				 * Don't remove the peer from the hash table -
1264 				 * that would modify the list we are currently
1265 				 * traversing, and it's not necessary anyway.
1266 				 */
1267 				/*
1268 				 * Artificially adjust the peer's ref count to
1269 				 * 1, so it will get deleted by
1270 				 * dp_peer_unref_delete.
1271 				 */
1272 				/* set to zero */
1273 				qdf_atomic_init(&peer->ref_cnt);
1274 				/* incr to one */
1275 				qdf_atomic_inc(&peer->ref_cnt);
1276 				dp_peer_unref_delete(peer);
1277 			}
1278 		}
1279 	}
1280 }
1281 
1282 static void dp_peer_ast_table_detach(struct dp_soc *soc)
1283 {
1284 	if (soc->ast_table) {
1285 		qdf_mem_free(soc->ast_table);
1286 		soc->ast_table = NULL;
1287 	}
1288 }
1289 
1290 static void dp_peer_find_map_detach(struct dp_soc *soc)
1291 {
1292 	if (soc->peer_id_to_obj_map) {
1293 		qdf_mem_free(soc->peer_id_to_obj_map);
1294 		soc->peer_id_to_obj_map = NULL;
1295 	}
1296 }
1297 
1298 int dp_peer_find_attach(struct dp_soc *soc)
1299 {
1300 	if (dp_peer_find_map_attach(soc))
1301 		return 1;
1302 
1303 	if (dp_peer_find_hash_attach(soc)) {
1304 		dp_peer_find_map_detach(soc);
1305 		return 1;
1306 	}
1307 
1308 	if (dp_peer_ast_table_attach(soc)) {
1309 		dp_peer_find_hash_detach(soc);
1310 		dp_peer_find_map_detach(soc);
1311 		return 1;
1312 	}
1313 
1314 	if (dp_peer_ast_hash_attach(soc)) {
1315 		dp_peer_ast_table_detach(soc);
1316 		dp_peer_find_hash_detach(soc);
1317 		dp_peer_find_map_detach(soc);
1318 		return 1;
1319 	}
1320 
1321 	return 0; /* success */
1322 }
1323 
1324 void dp_rx_tid_stats_cb(struct dp_soc *soc, void *cb_ctxt,
1325 	union hal_reo_status *reo_status)
1326 {
1327 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
1328 	struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
1329 
1330 	if (queue_status->header.status == HAL_REO_CMD_DRAIN)
1331 		return;
1332 
1333 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
1334 		DP_PRINT_STATS("REO stats failure %d for TID %d\n",
1335 			       queue_status->header.status, rx_tid->tid);
1336 		return;
1337 	}
1338 
1339 	DP_PRINT_STATS("REO queue stats (TID: %d):\n"
1340 		       "ssn: %d\n"
1341 		       "curr_idx  : %d\n"
1342 		       "pn_31_0   : %08x\n"
1343 		       "pn_63_32  : %08x\n"
1344 		       "pn_95_64  : %08x\n"
1345 		       "pn_127_96 : %08x\n"
1346 		       "last_rx_enq_tstamp : %08x\n"
1347 		       "last_rx_deq_tstamp : %08x\n"
1348 		       "rx_bitmap_31_0     : %08x\n"
1349 		       "rx_bitmap_63_32    : %08x\n"
1350 		       "rx_bitmap_95_64    : %08x\n"
1351 		       "rx_bitmap_127_96   : %08x\n"
1352 		       "rx_bitmap_159_128  : %08x\n"
1353 		       "rx_bitmap_191_160  : %08x\n"
1354 		       "rx_bitmap_223_192  : %08x\n"
1355 		       "rx_bitmap_255_224  : %08x\n",
1356 		       rx_tid->tid,
1357 		       queue_status->ssn, queue_status->curr_idx,
1358 		       queue_status->pn_31_0, queue_status->pn_63_32,
1359 		       queue_status->pn_95_64, queue_status->pn_127_96,
1360 		       queue_status->last_rx_enq_tstamp,
1361 		       queue_status->last_rx_deq_tstamp,
1362 		       queue_status->rx_bitmap_31_0,
1363 		       queue_status->rx_bitmap_63_32,
1364 		       queue_status->rx_bitmap_95_64,
1365 		       queue_status->rx_bitmap_127_96,
1366 		       queue_status->rx_bitmap_159_128,
1367 		       queue_status->rx_bitmap_191_160,
1368 		       queue_status->rx_bitmap_223_192,
1369 		       queue_status->rx_bitmap_255_224);
1370 
1371 	DP_PRINT_STATS(
1372 		       "curr_mpdu_cnt      : %d\n"
1373 		       "curr_msdu_cnt      : %d\n"
1374 		       "fwd_timeout_cnt    : %d\n"
1375 		       "fwd_bar_cnt        : %d\n"
1376 		       "dup_cnt            : %d\n"
1377 		       "frms_in_order_cnt  : %d\n"
1378 		       "bar_rcvd_cnt       : %d\n"
1379 		       "mpdu_frms_cnt      : %d\n"
1380 		       "msdu_frms_cnt      : %d\n"
1381 		       "total_byte_cnt     : %d\n"
1382 		       "late_recv_mpdu_cnt : %d\n"
1383 		       "win_jump_2k        : %d\n"
1384 		       "hole_cnt           : %d\n",
1385 		       queue_status->curr_mpdu_cnt,
1386 		       queue_status->curr_msdu_cnt,
1387 		       queue_status->fwd_timeout_cnt,
1388 		       queue_status->fwd_bar_cnt,
1389 		       queue_status->dup_cnt,
1390 		       queue_status->frms_in_order_cnt,
1391 		       queue_status->bar_rcvd_cnt,
1392 		       queue_status->mpdu_frms_cnt,
1393 		       queue_status->msdu_frms_cnt,
1394 		       queue_status->total_cnt,
1395 		       queue_status->late_recv_mpdu_cnt,
1396 		       queue_status->win_jump_2k,
1397 		       queue_status->hole_cnt);
1398 
1399 	DP_PRINT_STATS("Addba Req          : %d\n"
1400 			"Addba Resp         : %d\n"
1401 			"Addba Resp success : %d\n"
1402 			"Addba Resp failed  : %d\n"
1403 			"Delba Req received : %d\n"
1404 			"Delba Tx success   : %d\n"
1405 			"Delba Tx Fail      : %d\n"
1406 			"BA window size     : %d\n"
1407 			"Pn size            : %d\n",
1408 			rx_tid->num_of_addba_req,
1409 			rx_tid->num_of_addba_resp,
1410 			rx_tid->num_addba_rsp_success,
1411 			rx_tid->num_addba_rsp_failed,
1412 			rx_tid->num_of_delba_req,
1413 			rx_tid->delba_tx_success_cnt,
1414 			rx_tid->delba_tx_fail_cnt,
1415 			rx_tid->ba_win_size,
1416 			rx_tid->pn_size);
1417 }
1418 
1419 static inline struct dp_peer *dp_peer_find_add_id(struct dp_soc *soc,
1420 	uint8_t *peer_mac_addr, uint16_t peer_id, uint16_t hw_peer_id,
1421 	uint8_t vdev_id)
1422 {
1423 	struct dp_peer *peer;
1424 
1425 	QDF_ASSERT(peer_id <= soc->max_peers);
1426 	/* check if there's already a peer object with this MAC address */
1427 	peer = dp_peer_find_hash_find(soc, peer_mac_addr,
1428 		0 /* is aligned */, vdev_id);
1429 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1430 		  "%s: peer %pK ID %d vid %d mac %02x:%02x:%02x:%02x:%02x:%02x",
1431 		  __func__, peer, peer_id, vdev_id, peer_mac_addr[0],
1432 		  peer_mac_addr[1], peer_mac_addr[2], peer_mac_addr[3],
1433 		  peer_mac_addr[4], peer_mac_addr[5]);
1434 
1435 	if (peer) {
1436 		/* peer's ref count was already incremented by
1437 		 * peer_find_hash_find
1438 		 */
1439 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1440 			  "%s: ref_cnt: %d", __func__,
1441 			   qdf_atomic_read(&peer->ref_cnt));
1442 		if (!soc->peer_id_to_obj_map[peer_id])
1443 			soc->peer_id_to_obj_map[peer_id] = peer;
1444 		else {
1445 			/* Peer map event came for peer_id which
1446 			 * is already mapped, this is not expected
1447 			 */
1448 			QDF_ASSERT(0);
1449 		}
1450 
1451 		if (dp_peer_find_add_id_to_obj(peer, peer_id)) {
1452 			/* TBDXXX: assert for now */
1453 			QDF_ASSERT(0);
1454 		}
1455 
1456 		return peer;
1457 	}
1458 
1459 	return NULL;
1460 }
1461 
1462 /**
1463  * dp_rx_peer_map_handler() - handle peer map event from firmware
1464  * @soc_handle - genereic soc handle
1465  * @peeri_id - peer_id from firmware
1466  * @hw_peer_id - ast index for this peer
1467  * @vdev_id - vdev ID
1468  * @peer_mac_addr - mac address of the peer
1469  * @ast_hash - ast hash value
1470  * @is_wds - flag to indicate peer map event for WDS ast entry
1471  *
1472  * associate the peer_id that firmware provided with peer entry
1473  * and update the ast table in the host with the hw_peer_id.
1474  *
1475  * Return: none
1476  */
1477 
1478 void
1479 dp_rx_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
1480 		       uint16_t hw_peer_id, uint8_t vdev_id,
1481 		       uint8_t *peer_mac_addr, uint16_t ast_hash,
1482 		       uint8_t is_wds)
1483 {
1484 	struct dp_peer *peer = NULL;
1485 	enum cdp_txrx_ast_entry_type type = CDP_TXRX_AST_TYPE_STATIC;
1486 
1487 	dp_info("peer_map_event (soc:%pK): peer_id %d, hw_peer_id %d, peer_mac %02x:%02x:%02x:%02x:%02x:%02x, vdev_id %d",
1488 		soc, peer_id, hw_peer_id, peer_mac_addr[0], peer_mac_addr[1],
1489 		  peer_mac_addr[2], peer_mac_addr[3], peer_mac_addr[4],
1490 		  peer_mac_addr[5], vdev_id);
1491 
1492 	/* Peer map event for WDS ast entry get the peer from
1493 	 * obj map
1494 	 */
1495 	if (is_wds) {
1496 		peer = soc->peer_id_to_obj_map[peer_id];
1497 		/*
1498 		 * In certain cases like Auth attack on a repeater
1499 		 * can result in the number of ast_entries falling
1500 		 * in the same hash bucket to exceed the max_skid
1501 		 * length supported by HW in root AP. In these cases
1502 		 * the FW will return the hw_peer_id (ast_index) as
1503 		 * 0xffff indicating HW could not add the entry in
1504 		 * its table. Host has to delete the entry from its
1505 		 * table in these cases.
1506 		 */
1507 		if (hw_peer_id == HTT_INVALID_PEER) {
1508 			DP_STATS_INC(soc, ast.map_err, 1);
1509 			if (!dp_peer_ast_free_entry_by_mac(soc,
1510 							   peer,
1511 							   peer_mac_addr))
1512 				return;
1513 
1514 			dp_alert("AST entry not found with peer %pK peer_id %u peer_mac %pM mac_addr %pM vdev_id %u next_hop %u",
1515 				 peer, peer->peer_ids[0],
1516 				 peer->mac_addr.raw, peer_mac_addr, vdev_id,
1517 				 is_wds);
1518 
1519 			return;
1520 		}
1521 
1522 	} else {
1523 		/*
1524 		 * It's the responsibility of the CP and FW to ensure
1525 		 * that peer is created successfully. Ideally DP should
1526 		 * not hit the below condition for directly assocaited
1527 		 * peers.
1528 		 */
1529 		if ((hw_peer_id < 0) ||
1530 		    (hw_peer_id >=
1531 		     wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) {
1532 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1533 				  "invalid hw_peer_id: %d", hw_peer_id);
1534 			qdf_assert_always(0);
1535 		}
1536 
1537 		peer = dp_peer_find_add_id(soc, peer_mac_addr, peer_id,
1538 					   hw_peer_id, vdev_id);
1539 
1540 		if (peer) {
1541 			if (wlan_op_mode_sta == peer->vdev->opmode &&
1542 			    qdf_mem_cmp(peer->mac_addr.raw,
1543 					peer->vdev->mac_addr.raw,
1544 					QDF_MAC_ADDR_SIZE) != 0) {
1545 				dp_info("STA vdev bss_peer!!!!");
1546 				peer->bss_peer = 1;
1547 				peer->vdev->vap_bss_peer = peer;
1548 			}
1549 
1550 			if (peer->vdev->opmode == wlan_op_mode_sta) {
1551 				peer->vdev->bss_ast_hash = ast_hash;
1552 				peer->vdev->bss_ast_idx = hw_peer_id;
1553 			}
1554 
1555 			/* Add ast entry incase self ast entry is
1556 			 * deleted due to DP CP sync issue
1557 			 *
1558 			 * self_ast_entry is modified in peer create
1559 			 * and peer unmap path which cannot run in
1560 			 * parllel with peer map, no lock need before
1561 			 * referring it
1562 			 */
1563 			if (!peer->self_ast_entry) {
1564 				dp_info("Add self ast from map %pM",
1565 					peer_mac_addr);
1566 				dp_peer_add_ast(soc, peer,
1567 						peer_mac_addr,
1568 						type, 0);
1569 			}
1570 
1571 		}
1572 	}
1573 	dp_peer_map_ast(soc, peer, peer_mac_addr,
1574 			hw_peer_id, vdev_id, ast_hash);
1575 }
1576 
1577 /**
1578  * dp_rx_peer_unmap_handler() - handle peer unmap event from firmware
1579  * @soc_handle - genereic soc handle
1580  * @peeri_id - peer_id from firmware
1581  * @vdev_id - vdev ID
1582  * @mac_addr - mac address of the peer or wds entry
1583  * @is_wds - flag to indicate peer map event for WDS ast entry
1584  *
1585  * Return: none
1586  */
1587 void
1588 dp_rx_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id,
1589 			 uint8_t vdev_id, uint8_t *mac_addr,
1590 			 uint8_t is_wds)
1591 {
1592 	struct dp_peer *peer;
1593 	uint8_t i;
1594 
1595 	peer = __dp_peer_find_by_id(soc, peer_id);
1596 
1597 	/*
1598 	 * Currently peer IDs are assigned for vdevs as well as peers.
1599 	 * If the peer ID is for a vdev, then the peer pointer stored
1600 	 * in peer_id_to_obj_map will be NULL.
1601 	 */
1602 	if (!peer) {
1603 		dp_err("Received unmap event for invalid peer_id %u", peer_id);
1604 		return;
1605 	}
1606 
1607 	/* If V2 Peer map messages are enabled AST entry has to be freed here
1608 	 */
1609 	if (soc->is_peer_map_unmap_v2 && is_wds) {
1610 		if (!dp_peer_ast_free_entry_by_mac(soc, peer, mac_addr))
1611 			return;
1612 
1613 		dp_alert("AST entry not found with peer %pK peer_id %u peer_mac %pM mac_addr %pM vdev_id %u next_hop %u",
1614 			 peer, peer->peer_ids[0],
1615 			 peer->mac_addr.raw, mac_addr, vdev_id,
1616 			 is_wds);
1617 
1618 		return;
1619 	}
1620 
1621 	dp_info("peer_unmap_event (soc:%pK) peer_id %d peer %pK",
1622 		soc, peer_id, peer);
1623 
1624 	soc->peer_id_to_obj_map[peer_id] = NULL;
1625 	for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
1626 		if (peer->peer_ids[i] == peer_id) {
1627 			peer->peer_ids[i] = HTT_INVALID_PEER;
1628 			break;
1629 		}
1630 	}
1631 
1632 	if (soc->cdp_soc.ol_ops->peer_unmap_event) {
1633 		soc->cdp_soc.ol_ops->peer_unmap_event(soc->ctrl_psoc,
1634 				peer_id, vdev_id);
1635 	}
1636 
1637 	/*
1638 	 * Remove a reference to the peer.
1639 	 * If there are no more references, delete the peer object.
1640 	 */
1641 	dp_peer_unref_delete(peer);
1642 }
1643 
1644 void
1645 dp_peer_find_detach(struct dp_soc *soc)
1646 {
1647 	dp_peer_find_map_detach(soc);
1648 	dp_peer_find_hash_detach(soc);
1649 	dp_peer_ast_hash_detach(soc);
1650 	dp_peer_ast_table_detach(soc);
1651 }
1652 
1653 static void dp_rx_tid_update_cb(struct dp_soc *soc, void *cb_ctxt,
1654 	union hal_reo_status *reo_status)
1655 {
1656 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
1657 
1658 	if ((reo_status->rx_queue_status.header.status !=
1659 		HAL_REO_CMD_SUCCESS) &&
1660 		(reo_status->rx_queue_status.header.status !=
1661 		HAL_REO_CMD_DRAIN)) {
1662 		/* Should not happen normally. Just print error for now */
1663 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1664 			  "%s: Rx tid HW desc update failed(%d): tid %d",
1665 			  __func__,
1666 			  reo_status->rx_queue_status.header.status,
1667 			  rx_tid->tid);
1668 	}
1669 }
1670 
1671 /*
1672  * dp_find_peer_by_addr - find peer instance by mac address
1673  * @dev: physical device instance
1674  * @peer_mac_addr: peer mac address
1675  *
1676  * Return: peer instance pointer
1677  */
1678 void *dp_find_peer_by_addr(struct cdp_pdev *dev, uint8_t *peer_mac_addr)
1679 {
1680 	struct dp_pdev *pdev = (struct dp_pdev *)dev;
1681 	struct dp_peer *peer;
1682 
1683 	peer = dp_peer_find_hash_find(pdev->soc, peer_mac_addr, 0, DP_VDEV_ALL);
1684 
1685 	if (!peer)
1686 		return NULL;
1687 
1688 	dp_verbose_debug("peer %pK mac: %pM", peer,
1689 			 peer->mac_addr.raw);
1690 
1691 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
1692 	 * Decrement it here.
1693 	 */
1694 	dp_peer_unref_delete(peer);
1695 
1696 	return peer;
1697 }
1698 
1699 static bool dp_get_peer_vdev_roaming_in_progress(struct dp_peer *peer)
1700 {
1701 	struct ol_if_ops *ol_ops = NULL;
1702 	bool is_roaming = false;
1703 	uint8_t vdev_id = -1;
1704 
1705 	if (!peer) {
1706 		dp_info("Peer is NULL. No roaming possible");
1707 		return false;
1708 	}
1709 	ol_ops = peer->vdev->pdev->soc->cdp_soc.ol_ops;
1710 
1711 	if (ol_ops && ol_ops->is_roam_inprogress) {
1712 		dp_get_vdevid(peer, &vdev_id);
1713 		is_roaming = ol_ops->is_roam_inprogress(vdev_id);
1714 	}
1715 
1716 	dp_info("peer: %pM, vdev_id: %d, is_roaming: %d",
1717 		peer->mac_addr.raw, vdev_id, is_roaming);
1718 
1719 	return is_roaming;
1720 }
1721 
1722 QDF_STATUS dp_rx_tid_update_wifi3(struct dp_peer *peer, int tid, uint32_t
1723 					 ba_window_size, uint32_t start_seq)
1724 {
1725 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1726 	struct dp_soc *soc = peer->vdev->pdev->soc;
1727 	struct hal_reo_cmd_params params;
1728 
1729 	qdf_mem_zero(&params, sizeof(params));
1730 
1731 	params.std.need_status = 1;
1732 	params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
1733 	params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1734 	params.u.upd_queue_params.update_ba_window_size = 1;
1735 	params.u.upd_queue_params.ba_window_size = ba_window_size;
1736 
1737 	if (start_seq < IEEE80211_SEQ_MAX) {
1738 		params.u.upd_queue_params.update_ssn = 1;
1739 		params.u.upd_queue_params.ssn = start_seq;
1740 	} else {
1741 	    dp_set_ssn_valid_flag(&params, 0);
1742 	}
1743 
1744 	if (dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
1745 			    dp_rx_tid_update_cb, rx_tid)) {
1746 		dp_err_log("failed to send reo cmd CMD_UPDATE_RX_REO_QUEUE");
1747 		DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
1748 	}
1749 
1750 	rx_tid->ba_win_size = ba_window_size;
1751 
1752 	if (dp_get_peer_vdev_roaming_in_progress(peer))
1753 		return QDF_STATUS_E_PERM;
1754 
1755 	if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup)
1756 		soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup(
1757 			soc->ctrl_psoc, peer->vdev->pdev->pdev_id,
1758 			peer->vdev->vdev_id, peer->mac_addr.raw,
1759 			rx_tid->hw_qdesc_paddr, tid, tid, 1, ba_window_size);
1760 
1761 	return QDF_STATUS_SUCCESS;
1762 }
1763 
1764 /*
1765  * dp_reo_desc_free() - Callback free reo descriptor memory after
1766  * HW cache flush
1767  *
1768  * @soc: DP SOC handle
1769  * @cb_ctxt: Callback context
1770  * @reo_status: REO command status
1771  */
1772 static void dp_reo_desc_free(struct dp_soc *soc, void *cb_ctxt,
1773 	union hal_reo_status *reo_status)
1774 {
1775 	struct reo_desc_list_node *freedesc =
1776 		(struct reo_desc_list_node *)cb_ctxt;
1777 	struct dp_rx_tid *rx_tid = &freedesc->rx_tid;
1778 
1779 	if ((reo_status->fl_cache_status.header.status !=
1780 		HAL_REO_CMD_SUCCESS) &&
1781 		(reo_status->fl_cache_status.header.status !=
1782 		HAL_REO_CMD_DRAIN)) {
1783 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1784 			  "%s: Rx tid HW desc flush failed(%d): tid %d",
1785 			  __func__,
1786 			  reo_status->rx_queue_status.header.status,
1787 			  freedesc->rx_tid.tid);
1788 	}
1789 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1790 		  "%s: hw_qdesc_paddr: %pK, tid:%d", __func__,
1791 		  (void *)(rx_tid->hw_qdesc_paddr), rx_tid->tid);
1792 	qdf_mem_unmap_nbytes_single(soc->osdev,
1793 		rx_tid->hw_qdesc_paddr,
1794 		QDF_DMA_BIDIRECTIONAL,
1795 		rx_tid->hw_qdesc_alloc_size);
1796 	qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
1797 	qdf_mem_free(freedesc);
1798 }
1799 
1800 #if defined(QCA_WIFI_QCA8074_VP) && defined(BUILD_X86)
1801 /* Hawkeye emulation requires bus address to be >= 0x50000000 */
1802 static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
1803 {
1804 	if (dma_addr < 0x50000000)
1805 		return QDF_STATUS_E_FAILURE;
1806 	else
1807 		return QDF_STATUS_SUCCESS;
1808 }
1809 #else
1810 static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
1811 {
1812 	return QDF_STATUS_SUCCESS;
1813 }
1814 #endif
1815 
1816 
1817 /*
1818  * dp_rx_tid_setup_wifi3() – Setup receive TID state
1819  * @peer: Datapath peer handle
1820  * @tid: TID
1821  * @ba_window_size: BlockAck window size
1822  * @start_seq: Starting sequence number
1823  *
1824  * Return: QDF_STATUS code
1825  */
1826 QDF_STATUS dp_rx_tid_setup_wifi3(struct dp_peer *peer, int tid,
1827 				 uint32_t ba_window_size, uint32_t start_seq)
1828 {
1829 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1830 	struct dp_vdev *vdev = peer->vdev;
1831 	struct dp_soc *soc = vdev->pdev->soc;
1832 	uint32_t hw_qdesc_size;
1833 	uint32_t hw_qdesc_align;
1834 	int hal_pn_type;
1835 	void *hw_qdesc_vaddr;
1836 	uint32_t alloc_tries = 0;
1837 	QDF_STATUS err = QDF_STATUS_SUCCESS;
1838 
1839 	if (peer->delete_in_progress ||
1840 	    !qdf_atomic_read(&peer->is_default_route_set))
1841 		return QDF_STATUS_E_FAILURE;
1842 
1843 	rx_tid->ba_win_size = ba_window_size;
1844 	if (rx_tid->hw_qdesc_vaddr_unaligned)
1845 		return dp_rx_tid_update_wifi3(peer, tid, ba_window_size,
1846 			start_seq);
1847 	rx_tid->delba_tx_status = 0;
1848 	rx_tid->ppdu_id_2k = 0;
1849 	rx_tid->num_of_addba_req = 0;
1850 	rx_tid->num_of_delba_req = 0;
1851 	rx_tid->num_of_addba_resp = 0;
1852 	rx_tid->num_addba_rsp_failed = 0;
1853 	rx_tid->num_addba_rsp_success = 0;
1854 	rx_tid->delba_tx_success_cnt = 0;
1855 	rx_tid->delba_tx_fail_cnt = 0;
1856 	rx_tid->statuscode = 0;
1857 
1858 	/* TODO: Allocating HW queue descriptors based on max BA window size
1859 	 * for all QOS TIDs so that same descriptor can be used later when
1860 	 * ADDBA request is recevied. This should be changed to allocate HW
1861 	 * queue descriptors based on BA window size being negotiated (0 for
1862 	 * non BA cases), and reallocate when BA window size changes and also
1863 	 * send WMI message to FW to change the REO queue descriptor in Rx
1864 	 * peer entry as part of dp_rx_tid_update.
1865 	 */
1866 	if (tid != DP_NON_QOS_TID)
1867 		hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc,
1868 			HAL_RX_MAX_BA_WINDOW, tid);
1869 	else
1870 		hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc,
1871 			ba_window_size, tid);
1872 
1873 	hw_qdesc_align = hal_get_reo_qdesc_align(soc->hal_soc);
1874 	/* To avoid unnecessary extra allocation for alignment, try allocating
1875 	 * exact size and see if we already have aligned address.
1876 	 */
1877 	rx_tid->hw_qdesc_alloc_size = hw_qdesc_size;
1878 
1879 try_desc_alloc:
1880 	rx_tid->hw_qdesc_vaddr_unaligned =
1881 		qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size);
1882 
1883 	if (!rx_tid->hw_qdesc_vaddr_unaligned) {
1884 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1885 			  "%s: Rx tid HW desc alloc failed: tid %d",
1886 			  __func__, tid);
1887 		return QDF_STATUS_E_NOMEM;
1888 	}
1889 
1890 	if ((unsigned long)(rx_tid->hw_qdesc_vaddr_unaligned) %
1891 		hw_qdesc_align) {
1892 		/* Address allocated above is not alinged. Allocate extra
1893 		 * memory for alignment
1894 		 */
1895 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
1896 		rx_tid->hw_qdesc_vaddr_unaligned =
1897 			qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size +
1898 					hw_qdesc_align - 1);
1899 
1900 		if (!rx_tid->hw_qdesc_vaddr_unaligned) {
1901 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1902 				  "%s: Rx tid HW desc alloc failed: tid %d",
1903 				  __func__, tid);
1904 			return QDF_STATUS_E_NOMEM;
1905 		}
1906 
1907 		hw_qdesc_vaddr = (void *)qdf_align((unsigned long)
1908 			rx_tid->hw_qdesc_vaddr_unaligned,
1909 			hw_qdesc_align);
1910 
1911 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1912 			  "%s: Total Size %d Aligned Addr %pK",
1913 			  __func__, rx_tid->hw_qdesc_alloc_size,
1914 			  hw_qdesc_vaddr);
1915 
1916 	} else {
1917 		hw_qdesc_vaddr = rx_tid->hw_qdesc_vaddr_unaligned;
1918 	}
1919 
1920 	/* TODO: Ensure that sec_type is set before ADDBA is received.
1921 	 * Currently this is set based on htt indication
1922 	 * HTT_T2H_MSG_TYPE_SEC_IND from target
1923 	 */
1924 	switch (peer->security[dp_sec_ucast].sec_type) {
1925 	case cdp_sec_type_tkip_nomic:
1926 	case cdp_sec_type_aes_ccmp:
1927 	case cdp_sec_type_aes_ccmp_256:
1928 	case cdp_sec_type_aes_gcmp:
1929 	case cdp_sec_type_aes_gcmp_256:
1930 		hal_pn_type = HAL_PN_WPA;
1931 		break;
1932 	case cdp_sec_type_wapi:
1933 		if (vdev->opmode == wlan_op_mode_ap)
1934 			hal_pn_type = HAL_PN_WAPI_EVEN;
1935 		else
1936 			hal_pn_type = HAL_PN_WAPI_UNEVEN;
1937 		break;
1938 	default:
1939 		hal_pn_type = HAL_PN_NONE;
1940 		break;
1941 	}
1942 
1943 	hal_reo_qdesc_setup(soc->hal_soc, tid, ba_window_size, start_seq,
1944 		hw_qdesc_vaddr, rx_tid->hw_qdesc_paddr, hal_pn_type);
1945 
1946 	qdf_mem_map_nbytes_single(soc->osdev, hw_qdesc_vaddr,
1947 		QDF_DMA_BIDIRECTIONAL, rx_tid->hw_qdesc_alloc_size,
1948 		&(rx_tid->hw_qdesc_paddr));
1949 
1950 	if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) !=
1951 			QDF_STATUS_SUCCESS) {
1952 		if (alloc_tries++ < 10) {
1953 			qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
1954 			rx_tid->hw_qdesc_vaddr_unaligned = NULL;
1955 			goto try_desc_alloc;
1956 		} else {
1957 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1958 				  "%s: Rx tid HW desc alloc failed (lowmem): tid %d",
1959 				  __func__, tid);
1960 			err = QDF_STATUS_E_NOMEM;
1961 			goto error;
1962 		}
1963 	}
1964 
1965 	if (dp_get_peer_vdev_roaming_in_progress(peer)) {
1966 		err = QDF_STATUS_E_PERM;
1967 		goto error;
1968 	}
1969 
1970 	if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
1971 		if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup(
1972 		    soc->ctrl_psoc,
1973 		    peer->vdev->pdev->pdev_id,
1974 		    peer->vdev->vdev_id,
1975 		    peer->mac_addr.raw, rx_tid->hw_qdesc_paddr, tid, tid,
1976 		    1, ba_window_size)) {
1977 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1978 				  "%s: Failed to send reo queue setup to FW - tid %d\n",
1979 				  __func__, tid);
1980 			err = QDF_STATUS_E_FAILURE;
1981 			goto error;
1982 		}
1983 	}
1984 	return 0;
1985 error:
1986 	if (rx_tid->hw_qdesc_vaddr_unaligned) {
1987 		if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) ==
1988 		    QDF_STATUS_SUCCESS)
1989 			qdf_mem_unmap_nbytes_single(
1990 				soc->osdev,
1991 				rx_tid->hw_qdesc_paddr,
1992 				QDF_DMA_BIDIRECTIONAL,
1993 				rx_tid->hw_qdesc_alloc_size);
1994 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
1995 		rx_tid->hw_qdesc_vaddr_unaligned = NULL;
1996 	}
1997 	return err;
1998 }
1999 
2000 #ifdef REO_DESC_DEFER_FREE
2001 /*
2002  * dp_reo_desc_clean_up() - If cmd to flush base desc fails add
2003  * desc back to freelist and defer the deletion
2004  *
2005  * @soc: DP SOC handle
2006  * @desc: Base descriptor to be freed
2007  * @reo_status: REO command status
2008  */
2009 static void dp_reo_desc_clean_up(struct dp_soc *soc,
2010 				 struct reo_desc_list_node *desc,
2011 				 union hal_reo_status *reo_status)
2012 {
2013 	desc->free_ts = qdf_get_system_timestamp();
2014 	DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
2015 	qdf_list_insert_back(&soc->reo_desc_freelist,
2016 			     (qdf_list_node_t *)desc);
2017 }
2018 
2019 #else
2020 /*
2021  * dp_reo_desc_clean_up() - If send cmd to REO inorder to flush
2022  * cache fails free the base REO desc anyway
2023  *
2024  * @soc: DP SOC handle
2025  * @desc: Base descriptor to be freed
2026  * @reo_status: REO command status
2027  */
2028 static void dp_reo_desc_clean_up(struct dp_soc *soc,
2029 				 struct reo_desc_list_node *desc,
2030 				 union hal_reo_status *reo_status)
2031 {
2032 	if (reo_status) {
2033 		qdf_mem_zero(reo_status, sizeof(*reo_status));
2034 		reo_status->fl_cache_status.header.status = 0;
2035 		dp_reo_desc_free(soc, (void *)desc, reo_status);
2036 	}
2037 }
2038 #endif
2039 
2040 /*
2041  * dp_resend_update_reo_cmd() - Resend the UPDATE_REO_QUEUE
2042  * cmd and re-insert desc into free list if send fails.
2043  *
2044  * @soc: DP SOC handle
2045  * @desc: desc with resend update cmd flag set
2046  * @rx_tid: Desc RX tid associated with update cmd for resetting
2047  * valid field to 0 in h/w
2048  */
2049 static void dp_resend_update_reo_cmd(struct dp_soc *soc,
2050 				     struct reo_desc_list_node *desc,
2051 				     struct dp_rx_tid *rx_tid)
2052 {
2053 	struct hal_reo_cmd_params params;
2054 
2055 	qdf_mem_zero(&params, sizeof(params));
2056 	params.std.need_status = 1;
2057 	params.std.addr_lo =
2058 		rx_tid->hw_qdesc_paddr & 0xffffffff;
2059 	params.std.addr_hi =
2060 		(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2061 	params.u.upd_queue_params.update_vld = 1;
2062 	params.u.upd_queue_params.vld = 0;
2063 	desc->resend_update_reo_cmd = false;
2064 	/*
2065 	 * If the cmd send fails then set resend_update_reo_cmd flag
2066 	 * and insert the desc at the end of the free list to retry.
2067 	 */
2068 	if (dp_reo_send_cmd(soc,
2069 			    CMD_UPDATE_RX_REO_QUEUE,
2070 			    &params,
2071 			    dp_rx_tid_delete_cb,
2072 			    (void *)desc)
2073 	    != QDF_STATUS_SUCCESS) {
2074 		desc->resend_update_reo_cmd = true;
2075 		desc->free_ts = qdf_get_system_timestamp();
2076 		qdf_list_insert_back(&soc->reo_desc_freelist,
2077 				     (qdf_list_node_t *)desc);
2078 		dp_err_log("failed to send reo cmd CMD_UPDATE_RX_REO_QUEUE");
2079 		DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
2080 	}
2081 }
2082 
2083 /*
2084  * dp_rx_tid_delete_cb() - Callback to flush reo descriptor HW cache
2085  * after deleting the entries (ie., setting valid=0)
2086  *
2087  * @soc: DP SOC handle
2088  * @cb_ctxt: Callback context
2089  * @reo_status: REO command status
2090  */
2091 void dp_rx_tid_delete_cb(struct dp_soc *soc, void *cb_ctxt,
2092 			 union hal_reo_status *reo_status)
2093 {
2094 	struct reo_desc_list_node *freedesc =
2095 		(struct reo_desc_list_node *)cb_ctxt;
2096 	uint32_t list_size;
2097 	struct reo_desc_list_node *desc;
2098 	unsigned long curr_ts = qdf_get_system_timestamp();
2099 	uint32_t desc_size, tot_desc_size;
2100 	struct hal_reo_cmd_params params;
2101 
2102 	if (reo_status->rx_queue_status.header.status == HAL_REO_CMD_DRAIN) {
2103 		qdf_mem_zero(reo_status, sizeof(*reo_status));
2104 		reo_status->fl_cache_status.header.status = HAL_REO_CMD_DRAIN;
2105 		dp_reo_desc_free(soc, (void *)freedesc, reo_status);
2106 		return;
2107 	} else if (reo_status->rx_queue_status.header.status !=
2108 		HAL_REO_CMD_SUCCESS) {
2109 		/* Should not happen normally. Just print error for now */
2110 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2111 			  "%s: Rx tid HW desc deletion failed(%d): tid %d",
2112 			  __func__,
2113 			  reo_status->rx_queue_status.header.status,
2114 			  freedesc->rx_tid.tid);
2115 	}
2116 
2117 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
2118 		"%s: rx_tid: %d status: %d", __func__,
2119 		freedesc->rx_tid.tid,
2120 		reo_status->rx_queue_status.header.status);
2121 
2122 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
2123 	freedesc->free_ts = curr_ts;
2124 	qdf_list_insert_back_size(&soc->reo_desc_freelist,
2125 		(qdf_list_node_t *)freedesc, &list_size);
2126 
2127 	while ((qdf_list_peek_front(&soc->reo_desc_freelist,
2128 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) &&
2129 		((list_size >= REO_DESC_FREELIST_SIZE) ||
2130 		(curr_ts > (desc->free_ts + REO_DESC_FREE_DEFER_MS)) ||
2131 		(desc->resend_update_reo_cmd && list_size))) {
2132 		struct dp_rx_tid *rx_tid;
2133 
2134 		qdf_list_remove_front(&soc->reo_desc_freelist,
2135 				(qdf_list_node_t **)&desc);
2136 		list_size--;
2137 		rx_tid = &desc->rx_tid;
2138 
2139 		/* First process descs with resend_update_reo_cmd set */
2140 		if (desc->resend_update_reo_cmd) {
2141 			dp_resend_update_reo_cmd(soc, desc, rx_tid);
2142 			continue;
2143 		}
2144 
2145 		/* Flush and invalidate REO descriptor from HW cache: Base and
2146 		 * extension descriptors should be flushed separately */
2147 		tot_desc_size = rx_tid->hw_qdesc_alloc_size;
2148 		/* Get base descriptor size by passing non-qos TID */
2149 		desc_size = hal_get_reo_qdesc_size(soc->hal_soc, 0,
2150 						   DP_NON_QOS_TID);
2151 
2152 		/* Flush reo extension descriptors */
2153 		while ((tot_desc_size -= desc_size) > 0) {
2154 			qdf_mem_zero(&params, sizeof(params));
2155 			params.std.addr_lo =
2156 				((uint64_t)(rx_tid->hw_qdesc_paddr) +
2157 				tot_desc_size) & 0xffffffff;
2158 			params.std.addr_hi =
2159 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2160 
2161 			if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc,
2162 							CMD_FLUSH_CACHE,
2163 							&params,
2164 							NULL,
2165 							NULL)) {
2166 				dp_err_rl("fail to send CMD_CACHE_FLUSH:"
2167 					  "tid %d desc %pK", rx_tid->tid,
2168 					  (void *)(rx_tid->hw_qdesc_paddr));
2169 				DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
2170 			}
2171 		}
2172 
2173 		/* Flush base descriptor */
2174 		qdf_mem_zero(&params, sizeof(params));
2175 		params.std.need_status = 1;
2176 		params.std.addr_lo =
2177 			(uint64_t)(rx_tid->hw_qdesc_paddr) & 0xffffffff;
2178 		params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2179 
2180 		if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc,
2181 							  CMD_FLUSH_CACHE,
2182 							  &params,
2183 							  dp_reo_desc_free,
2184 							  (void *)desc)) {
2185 			union hal_reo_status reo_status;
2186 			/*
2187 			 * If dp_reo_send_cmd return failure, related TID queue desc
2188 			 * should be unmapped. Also locally reo_desc, together with
2189 			 * TID queue desc also need to be freed accordingly.
2190 			 *
2191 			 * Here invoke desc_free function directly to do clean up.
2192 			 *
2193 			 * In case of MCL path add the desc back to the free
2194 			 * desc list and defer deletion.
2195 			 */
2196 			dp_err_log("%s: fail to send REO cmd to flush cache: tid %d",
2197 				   __func__, rx_tid->tid);
2198 			dp_reo_desc_clean_up(soc, desc, &reo_status);
2199 			DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
2200 		}
2201 	}
2202 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
2203 }
2204 
2205 /*
2206  * dp_rx_tid_delete_wifi3() – Delete receive TID queue
2207  * @peer: Datapath peer handle
2208  * @tid: TID
2209  *
2210  * Return: 0 on success, error code on failure
2211  */
2212 static int dp_rx_tid_delete_wifi3(struct dp_peer *peer, int tid)
2213 {
2214 	struct dp_rx_tid *rx_tid = &(peer->rx_tid[tid]);
2215 	struct dp_soc *soc = peer->vdev->pdev->soc;
2216 	struct hal_reo_cmd_params params;
2217 	struct reo_desc_list_node *freedesc =
2218 		qdf_mem_malloc(sizeof(*freedesc));
2219 
2220 	if (!freedesc) {
2221 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2222 			  "%s: malloc failed for freedesc: tid %d",
2223 			  __func__, tid);
2224 		return -ENOMEM;
2225 	}
2226 
2227 	freedesc->rx_tid = *rx_tid;
2228 	freedesc->resend_update_reo_cmd = false;
2229 
2230 	qdf_mem_zero(&params, sizeof(params));
2231 
2232 	params.std.need_status = 1;
2233 	params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
2234 	params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2235 	params.u.upd_queue_params.update_vld = 1;
2236 	params.u.upd_queue_params.vld = 0;
2237 
2238 	if (dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
2239 			    dp_rx_tid_delete_cb, (void *)freedesc)
2240 		!= QDF_STATUS_SUCCESS) {
2241 		/* Defer the clean up to the call back context */
2242 		qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
2243 		freedesc->free_ts = qdf_get_system_timestamp();
2244 		freedesc->resend_update_reo_cmd = true;
2245 		qdf_list_insert_front(&soc->reo_desc_freelist,
2246 				      (qdf_list_node_t *)freedesc);
2247 		DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
2248 		qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
2249 		dp_info("Failed to send CMD_UPDATE_RX_REO_QUEUE");
2250 	}
2251 
2252 	rx_tid->hw_qdesc_vaddr_unaligned = NULL;
2253 	rx_tid->hw_qdesc_alloc_size = 0;
2254 	rx_tid->hw_qdesc_paddr = 0;
2255 
2256 	return 0;
2257 }
2258 
2259 #ifdef DP_LFR
2260 static void dp_peer_setup_remaining_tids(struct dp_peer *peer)
2261 {
2262 	int tid;
2263 
2264 	for (tid = 1; tid < DP_MAX_TIDS-1; tid++) {
2265 		dp_rx_tid_setup_wifi3(peer, tid, 1, 0);
2266 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2267 			  "Setting up TID %d for peer %pK peer->local_id %d",
2268 			  tid, peer, peer->local_id);
2269 	}
2270 }
2271 #else
2272 static void dp_peer_setup_remaining_tids(struct dp_peer *peer) {};
2273 #endif
2274 
2275 #ifndef WLAN_TX_PKT_CAPTURE_ENH
2276 /*
2277  * dp_peer_tid_queue_init() – Initialize ppdu stats queue per TID
2278  * @peer: Datapath peer
2279  *
2280  */
2281 static inline void dp_peer_tid_queue_init(struct dp_peer *peer)
2282 {
2283 }
2284 
2285 /*
2286  * dp_peer_tid_queue_cleanup() – remove ppdu stats queue per TID
2287  * @peer: Datapath peer
2288  *
2289  */
2290 static inline void dp_peer_tid_queue_cleanup(struct dp_peer *peer)
2291 {
2292 }
2293 
2294 /*
2295  * dp_peer_update_80211_hdr() – dp peer update 80211 hdr
2296  * @vdev: Datapath vdev
2297  * @peer: Datapath peer
2298  *
2299  */
2300 static inline void
2301 dp_peer_update_80211_hdr(struct dp_vdev *vdev, struct dp_peer *peer)
2302 {
2303 }
2304 #endif
2305 
2306 /*
2307  * dp_peer_tx_init() – Initialize receive TID state
2308  * @pdev: Datapath pdev
2309  * @peer: Datapath peer
2310  *
2311  */
2312 void dp_peer_tx_init(struct dp_pdev *pdev, struct dp_peer *peer)
2313 {
2314 	dp_peer_tid_queue_init(peer);
2315 	dp_peer_update_80211_hdr(peer->vdev, peer);
2316 }
2317 
2318 /*
2319  * dp_peer_tx_cleanup() – Deinitialize receive TID state
2320  * @vdev: Datapath vdev
2321  * @peer: Datapath peer
2322  *
2323  */
2324 static inline void
2325 dp_peer_tx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
2326 {
2327 	dp_peer_tid_queue_cleanup(peer);
2328 }
2329 
2330 /*
2331  * dp_peer_rx_init() – Initialize receive TID state
2332  * @pdev: Datapath pdev
2333  * @peer: Datapath peer
2334  *
2335  */
2336 void dp_peer_rx_init(struct dp_pdev *pdev, struct dp_peer *peer)
2337 {
2338 	int tid;
2339 	struct dp_rx_tid *rx_tid;
2340 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
2341 		rx_tid = &peer->rx_tid[tid];
2342 		rx_tid->array = &rx_tid->base;
2343 		rx_tid->base.head = rx_tid->base.tail = NULL;
2344 		rx_tid->tid = tid;
2345 		rx_tid->defrag_timeout_ms = 0;
2346 		rx_tid->ba_win_size = 0;
2347 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
2348 
2349 		rx_tid->defrag_waitlist_elem.tqe_next = NULL;
2350 		rx_tid->defrag_waitlist_elem.tqe_prev = NULL;
2351 	}
2352 
2353 	peer->active_ba_session_cnt = 0;
2354 	peer->hw_buffer_size = 0;
2355 	peer->kill_256_sessions = 0;
2356 
2357 	/* Setup default (non-qos) rx tid queue */
2358 	dp_rx_tid_setup_wifi3(peer, DP_NON_QOS_TID, 1, 0);
2359 
2360 	/* Setup rx tid queue for TID 0.
2361 	 * Other queues will be setup on receiving first packet, which will cause
2362 	 * NULL REO queue error
2363 	 */
2364 	dp_rx_tid_setup_wifi3(peer, 0, 1, 0);
2365 
2366 	/*
2367 	 * Setup the rest of TID's to handle LFR
2368 	 */
2369 	dp_peer_setup_remaining_tids(peer);
2370 
2371 	/*
2372 	 * Set security defaults: no PN check, no security. The target may
2373 	 * send a HTT SEC_IND message to overwrite these defaults.
2374 	 */
2375 	peer->security[dp_sec_ucast].sec_type =
2376 		peer->security[dp_sec_mcast].sec_type = cdp_sec_type_none;
2377 }
2378 
2379 /*
2380  * dp_peer_rx_cleanup() – Cleanup receive TID state
2381  * @vdev: Datapath vdev
2382  * @peer: Datapath peer
2383  * @reuse: Peer reference reuse
2384  *
2385  */
2386 void dp_peer_rx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer, bool reuse)
2387 {
2388 	int tid;
2389 	uint32_t tid_delete_mask = 0;
2390 
2391 	dp_info("Remove tids for peer: %pK", peer);
2392 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
2393 		struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
2394 
2395 		qdf_spin_lock_bh(&rx_tid->tid_lock);
2396 		if (!peer->bss_peer || peer->vdev->opmode == wlan_op_mode_sta) {
2397 			/* Cleanup defrag related resource */
2398 			dp_rx_defrag_waitlist_remove(peer, tid);
2399 			dp_rx_reorder_flush_frag(peer, tid);
2400 		}
2401 
2402 		if (peer->rx_tid[tid].hw_qdesc_vaddr_unaligned) {
2403 			dp_rx_tid_delete_wifi3(peer, tid);
2404 
2405 			tid_delete_mask |= (1 << tid);
2406 		}
2407 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2408 	}
2409 #ifdef notyet /* See if FW can remove queues as part of peer cleanup */
2410 	if (soc->ol_ops->peer_rx_reorder_queue_remove) {
2411 		soc->ol_ops->peer_rx_reorder_queue_remove(soc->ctrl_psoc,
2412 			peer->vdev->pdev->pdev_id,
2413 			peer->vdev->vdev_id, peer->mac_addr.raw,
2414 			tid_delete_mask);
2415 	}
2416 #endif
2417 	if (!reuse)
2418 		for (tid = 0; tid < DP_MAX_TIDS; tid++)
2419 			qdf_spinlock_destroy(&peer->rx_tid[tid].tid_lock);
2420 }
2421 
2422 #ifdef FEATURE_PERPKT_INFO
2423 /*
2424  * dp_peer_ppdu_delayed_ba_init() Initialize ppdu in peer
2425  * @peer: Datapath peer
2426  *
2427  * return: void
2428  */
2429 void dp_peer_ppdu_delayed_ba_init(struct dp_peer *peer)
2430 {
2431 	qdf_mem_zero(&peer->delayed_ba_ppdu_stats,
2432 		     sizeof(struct cdp_delayed_tx_completion_ppdu_user));
2433 	peer->last_delayed_ba = false;
2434 	peer->last_delayed_ba_ppduid = 0;
2435 }
2436 #else
2437 /*
2438  * dp_peer_ppdu_delayed_ba_init() Initialize ppdu in peer
2439  * @peer: Datapath peer
2440  *
2441  * return: void
2442  */
2443 void dp_peer_ppdu_delayed_ba_init(struct dp_peer *peer)
2444 {
2445 }
2446 #endif
2447 
2448 /*
2449  * dp_peer_cleanup() – Cleanup peer information
2450  * @vdev: Datapath vdev
2451  * @peer: Datapath peer
2452  * @reuse: Peer reference reuse
2453  *
2454  */
2455 void dp_peer_cleanup(struct dp_vdev *vdev, struct dp_peer *peer, bool reuse)
2456 {
2457 	dp_peer_tx_cleanup(vdev, peer);
2458 
2459 	/* cleanup the Rx reorder queues for this peer */
2460 	dp_peer_rx_cleanup(vdev, peer, reuse);
2461 }
2462 
2463 /* dp_teardown_256_ba_session() - Teardown sessions using 256
2464  *                                window size when a request with
2465  *                                64 window size is received.
2466  *                                This is done as a WAR since HW can
2467  *                                have only one setting per peer (64 or 256).
2468  *                                For HKv2, we use per tid buffersize setting
2469  *                                for 0 to per_tid_basize_max_tid. For tid
2470  *                                more than per_tid_basize_max_tid we use HKv1
2471  *                                method.
2472  * @peer: Datapath peer
2473  *
2474  * Return: void
2475  */
2476 static void dp_teardown_256_ba_sessions(struct dp_peer *peer)
2477 {
2478 	uint8_t delba_rcode = 0;
2479 	int tid;
2480 	struct dp_rx_tid *rx_tid = NULL;
2481 
2482 	tid = peer->vdev->pdev->soc->per_tid_basize_max_tid;
2483 	for (; tid < DP_MAX_TIDS; tid++) {
2484 		rx_tid = &peer->rx_tid[tid];
2485 		qdf_spin_lock_bh(&rx_tid->tid_lock);
2486 
2487 		if (rx_tid->ba_win_size <= 64) {
2488 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
2489 			continue;
2490 		} else {
2491 			if (rx_tid->ba_status == DP_RX_BA_ACTIVE ||
2492 			    rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
2493 				/* send delba */
2494 				if (!rx_tid->delba_tx_status) {
2495 					rx_tid->delba_tx_retry++;
2496 					rx_tid->delba_tx_status = 1;
2497 					rx_tid->delba_rcode =
2498 					IEEE80211_REASON_QOS_SETUP_REQUIRED;
2499 					delba_rcode = rx_tid->delba_rcode;
2500 
2501 					qdf_spin_unlock_bh(&rx_tid->tid_lock);
2502 					if (peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba)
2503 						peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba(
2504 							peer->vdev->pdev->soc->ctrl_psoc,
2505 							peer->vdev->vdev_id,
2506 							peer->mac_addr.raw,
2507 							tid, delba_rcode);
2508 				} else {
2509 					qdf_spin_unlock_bh(&rx_tid->tid_lock);
2510 				}
2511 			} else {
2512 				qdf_spin_unlock_bh(&rx_tid->tid_lock);
2513 			}
2514 		}
2515 	}
2516 }
2517 
2518 /*
2519 * dp_rx_addba_resp_tx_completion_wifi3() – Update Rx Tid State
2520 *
2521 * @soc: Datapath soc handle
2522 * @peer_mac: Datapath peer mac address
2523 * @vdev_id: id of atapath vdev
2524 * @tid: TID number
2525 * @status: tx completion status
2526 * Return: 0 on success, error code on failure
2527 */
2528 int dp_addba_resp_tx_completion_wifi3(struct cdp_soc_t *cdp_soc,
2529 				      uint8_t *peer_mac,
2530 				      uint16_t vdev_id,
2531 				      uint8_t tid, int status)
2532 {
2533 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
2534 						       peer_mac, 0, vdev_id);
2535 	struct dp_rx_tid *rx_tid = NULL;
2536 
2537 	if (!peer || peer->delete_in_progress) {
2538 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2539 			  "%s: Peer is NULL!\n", __func__);
2540 		goto fail;
2541 	}
2542 	rx_tid = &peer->rx_tid[tid];
2543 	qdf_spin_lock_bh(&rx_tid->tid_lock);
2544 	if (status) {
2545 		rx_tid->num_addba_rsp_failed++;
2546 		dp_rx_tid_update_wifi3(peer, tid, 1,
2547 				       IEEE80211_SEQ_MAX);
2548 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
2549 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2550 		dp_err("RxTid- %d addba rsp tx completion failed", tid);
2551 
2552 		goto success;
2553 	}
2554 
2555 	rx_tid->num_addba_rsp_success++;
2556 	if (rx_tid->ba_status == DP_RX_BA_INACTIVE) {
2557 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2558 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2559 			  "%s: Rx Tid- %d hw qdesc is not in IN_PROGRESS",
2560 			__func__, tid);
2561 		goto fail;
2562 	}
2563 
2564 	if (!qdf_atomic_read(&peer->is_default_route_set)) {
2565 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2566 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2567 			  "%s: default route is not set for peer: %pM",
2568 			  __func__, peer->mac_addr.raw);
2569 		goto fail;
2570 	}
2571 
2572 	/* First Session */
2573 	if (peer->active_ba_session_cnt == 0) {
2574 		if (rx_tid->ba_win_size > 64 && rx_tid->ba_win_size <= 256)
2575 			peer->hw_buffer_size = 256;
2576 		else
2577 			peer->hw_buffer_size = 64;
2578 	}
2579 
2580 	rx_tid->ba_status = DP_RX_BA_ACTIVE;
2581 
2582 	peer->active_ba_session_cnt++;
2583 
2584 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
2585 
2586 	/* Kill any session having 256 buffer size
2587 	 * when 64 buffer size request is received.
2588 	 * Also, latch on to 64 as new buffer size.
2589 	 */
2590 	if (peer->kill_256_sessions) {
2591 		dp_teardown_256_ba_sessions(peer);
2592 		peer->kill_256_sessions = 0;
2593 	}
2594 
2595 success:
2596 	dp_peer_unref_delete(peer);
2597 	return QDF_STATUS_SUCCESS;
2598 
2599 fail:
2600 	if (peer)
2601 		dp_peer_unref_delete(peer);
2602 
2603 	return QDF_STATUS_E_FAILURE;
2604 }
2605 
2606 /*
2607 * dp_rx_addba_responsesetup_wifi3() – Process ADDBA request from peer
2608 *
2609 * @soc: Datapath soc handle
2610 * @peer_mac: Datapath peer mac address
2611 * @vdev_id: id of atapath vdev
2612 * @tid: TID number
2613 * @dialogtoken: output dialogtoken
2614 * @statuscode: output dialogtoken
2615 * @buffersize: Output BA window size
2616 * @batimeout: Output BA timeout
2617 */
2618 QDF_STATUS
2619 dp_addba_responsesetup_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
2620 			     uint16_t vdev_id, uint8_t tid,
2621 			     uint8_t *dialogtoken, uint16_t *statuscode,
2622 			     uint16_t *buffersize, uint16_t *batimeout)
2623 {
2624 	struct dp_rx_tid *rx_tid = NULL;
2625 	QDF_STATUS status = QDF_STATUS_SUCCESS;
2626 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
2627 						       peer_mac, 0, vdev_id);
2628 
2629 	if (!peer || peer->delete_in_progress) {
2630 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2631 			  "%s: Peer is NULL!\n", __func__);
2632 		status = QDF_STATUS_E_FAILURE;
2633 		goto fail;
2634 	}
2635 	rx_tid = &peer->rx_tid[tid];
2636 	qdf_spin_lock_bh(&rx_tid->tid_lock);
2637 	rx_tid->num_of_addba_resp++;
2638 	/* setup ADDBA response parameters */
2639 	*dialogtoken = rx_tid->dialogtoken;
2640 	*statuscode = rx_tid->statuscode;
2641 	*buffersize = rx_tid->ba_win_size;
2642 	*batimeout  = 0;
2643 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
2644 
2645 fail:
2646 	if (peer)
2647 		dp_peer_unref_delete(peer);
2648 
2649 	return status;
2650 }
2651 
2652 /* dp_check_ba_buffersize() - Check buffer size in request
2653  *                            and latch onto this size based on
2654  *                            size used in first active session.
2655  * @peer: Datapath peer
2656  * @tid: Tid
2657  * @buffersize: Block ack window size
2658  *
2659  * Return: void
2660  */
2661 static void dp_check_ba_buffersize(struct dp_peer *peer,
2662 				   uint16_t tid,
2663 				   uint16_t buffersize)
2664 {
2665 	struct dp_rx_tid *rx_tid = NULL;
2666 
2667 	rx_tid = &peer->rx_tid[tid];
2668 	if (peer->vdev->pdev->soc->per_tid_basize_max_tid &&
2669 	    tid < peer->vdev->pdev->soc->per_tid_basize_max_tid) {
2670 		rx_tid->ba_win_size = buffersize;
2671 		return;
2672 	} else {
2673 		if (peer->active_ba_session_cnt == 0) {
2674 			rx_tid->ba_win_size = buffersize;
2675 		} else {
2676 			if (peer->hw_buffer_size == 64) {
2677 				if (buffersize <= 64)
2678 					rx_tid->ba_win_size = buffersize;
2679 				else
2680 					rx_tid->ba_win_size = peer->hw_buffer_size;
2681 			} else if (peer->hw_buffer_size == 256) {
2682 				if (buffersize > 64) {
2683 					rx_tid->ba_win_size = buffersize;
2684 				} else {
2685 					rx_tid->ba_win_size = buffersize;
2686 					peer->hw_buffer_size = 64;
2687 					peer->kill_256_sessions = 1;
2688 				}
2689 			}
2690 		}
2691 	}
2692 }
2693 
2694 /*
2695  * dp_addba_requestprocess_wifi3() - Process ADDBA request from peer
2696  *
2697  * @soc: Datapath soc handle
2698  * @peer_mac: Datapath peer mac address
2699  * @vdev_id: id of atapath vdev
2700  * @dialogtoken: dialogtoken from ADDBA frame
2701  * @tid: TID number
2702  * @batimeout: BA timeout
2703  * @buffersize: BA window size
2704  * @startseqnum: Start seq. number received in BA sequence control
2705  *
2706  * Return: 0 on success, error code on failure
2707  */
2708 int dp_addba_requestprocess_wifi3(struct cdp_soc_t *cdp_soc,
2709 				  uint8_t *peer_mac,
2710 				  uint16_t vdev_id,
2711 				  uint8_t dialogtoken,
2712 				  uint16_t tid, uint16_t batimeout,
2713 				  uint16_t buffersize,
2714 				  uint16_t startseqnum)
2715 {
2716 	QDF_STATUS status = QDF_STATUS_SUCCESS;
2717 	struct dp_rx_tid *rx_tid = NULL;
2718 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
2719 						       peer_mac, 0, vdev_id);
2720 
2721 	if (!peer || peer->delete_in_progress) {
2722 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2723 			  "%s: Peer is NULL!\n", __func__);
2724 		status = QDF_STATUS_E_FAILURE;
2725 		goto fail;
2726 	}
2727 	rx_tid = &peer->rx_tid[tid];
2728 	qdf_spin_lock_bh(&rx_tid->tid_lock);
2729 	rx_tid->num_of_addba_req++;
2730 	if ((rx_tid->ba_status == DP_RX_BA_ACTIVE &&
2731 	     rx_tid->hw_qdesc_vaddr_unaligned)) {
2732 		dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX);
2733 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
2734 		peer->active_ba_session_cnt--;
2735 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2736 			  "%s: Rx Tid- %d hw qdesc is already setup",
2737 			__func__, tid);
2738 	}
2739 
2740 	if (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
2741 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2742 		status = QDF_STATUS_E_FAILURE;
2743 		goto fail;
2744 	}
2745 	dp_check_ba_buffersize(peer, tid, buffersize);
2746 
2747 	if (dp_rx_tid_setup_wifi3(peer, tid,
2748 	    rx_tid->ba_win_size, startseqnum)) {
2749 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
2750 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2751 		status = QDF_STATUS_E_FAILURE;
2752 		goto fail;
2753 	}
2754 	rx_tid->ba_status = DP_RX_BA_IN_PROGRESS;
2755 
2756 	rx_tid->dialogtoken = dialogtoken;
2757 	rx_tid->startseqnum = startseqnum;
2758 
2759 	if (rx_tid->userstatuscode != IEEE80211_STATUS_SUCCESS)
2760 		rx_tid->statuscode = rx_tid->userstatuscode;
2761 	else
2762 		rx_tid->statuscode = IEEE80211_STATUS_SUCCESS;
2763 
2764 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
2765 
2766 fail:
2767 	if (peer)
2768 		dp_peer_unref_delete(peer);
2769 
2770 	return status;
2771 }
2772 
2773 /*
2774 * dp_set_addba_response() – Set a user defined ADDBA response status code
2775 *
2776 * @soc: Datapath soc handle
2777 * @peer_mac: Datapath peer mac address
2778 * @vdev_id: id of atapath vdev
2779 * @tid: TID number
2780 * @statuscode: response status code to be set
2781 */
2782 QDF_STATUS
2783 dp_set_addba_response(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
2784 		      uint16_t vdev_id, uint8_t tid, uint16_t statuscode)
2785 {
2786 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
2787 						       peer_mac, 0, vdev_id);
2788 	struct dp_rx_tid *rx_tid;
2789 	QDF_STATUS status = QDF_STATUS_SUCCESS;
2790 
2791 	if (!peer || peer->delete_in_progress) {
2792 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2793 			  "%s: Peer is NULL!\n", __func__);
2794 		status = QDF_STATUS_E_FAILURE;
2795 		goto fail;
2796 	}
2797 
2798 	rx_tid = &peer->rx_tid[tid];
2799 	qdf_spin_lock_bh(&rx_tid->tid_lock);
2800 	rx_tid->userstatuscode = statuscode;
2801 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
2802 fail:
2803 	if (peer)
2804 		dp_peer_unref_delete(peer);
2805 
2806 	return status;
2807 }
2808 
2809 /*
2810 * dp_rx_delba_process_wifi3() – Process DELBA from peer
2811 * @soc: Datapath soc handle
2812 * @peer_mac: Datapath peer mac address
2813 * @vdev_id: id of atapath vdev
2814 * @tid: TID number
2815 * @reasoncode: Reason code received in DELBA frame
2816 *
2817 * Return: 0 on success, error code on failure
2818 */
2819 int dp_delba_process_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
2820 			   uint16_t vdev_id, int tid, uint16_t reasoncode)
2821 {
2822 	QDF_STATUS status = QDF_STATUS_SUCCESS;
2823 	struct dp_rx_tid *rx_tid;
2824 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
2825 						      peer_mac, 0, vdev_id);
2826 
2827 	if (!peer || peer->delete_in_progress) {
2828 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2829 			  "%s: Peer is NULL!\n", __func__);
2830 		status = QDF_STATUS_E_FAILURE;
2831 		goto fail;
2832 	}
2833 	rx_tid = &peer->rx_tid[tid];
2834 	qdf_spin_lock_bh(&rx_tid->tid_lock);
2835 	if (rx_tid->ba_status == DP_RX_BA_INACTIVE ||
2836 	    rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
2837 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2838 		status = QDF_STATUS_E_FAILURE;
2839 		goto fail;
2840 	}
2841 	/* TODO: See if we can delete the existing REO queue descriptor and
2842 	 * replace with a new one without queue extenstion descript to save
2843 	 * memory
2844 	 */
2845 	rx_tid->delba_rcode = reasoncode;
2846 	rx_tid->num_of_delba_req++;
2847 	dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX);
2848 
2849 	rx_tid->ba_status = DP_RX_BA_INACTIVE;
2850 	peer->active_ba_session_cnt--;
2851 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
2852 fail:
2853 	if (peer)
2854 		dp_peer_unref_delete(peer);
2855 
2856 	return status;
2857 }
2858 
2859 /*
2860  * dp_rx_delba_tx_completion_wifi3() – Send Delba Request
2861  *
2862  * @soc: Datapath soc handle
2863  * @peer_mac: Datapath peer mac address
2864  * @vdev_id: id of atapath vdev
2865  * @tid: TID number
2866  * @status: tx completion status
2867  * Return: 0 on success, error code on failure
2868  */
2869 
2870 int dp_delba_tx_completion_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
2871 				 uint16_t vdev_id,
2872 				 uint8_t tid, int status)
2873 {
2874 	QDF_STATUS ret = QDF_STATUS_SUCCESS;
2875 	struct dp_rx_tid *rx_tid = NULL;
2876 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
2877 						      peer_mac, 0, vdev_id);
2878 
2879 	if (!peer || peer->delete_in_progress) {
2880 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2881 			  "%s: Peer is NULL!", __func__);
2882 		ret = QDF_STATUS_E_FAILURE;
2883 		goto end;
2884 	}
2885 	rx_tid = &peer->rx_tid[tid];
2886 	qdf_spin_lock_bh(&rx_tid->tid_lock);
2887 	if (status) {
2888 		rx_tid->delba_tx_fail_cnt++;
2889 		if (rx_tid->delba_tx_retry >= DP_MAX_DELBA_RETRY) {
2890 			rx_tid->delba_tx_retry = 0;
2891 			rx_tid->delba_tx_status = 0;
2892 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
2893 		} else {
2894 			rx_tid->delba_tx_retry++;
2895 			rx_tid->delba_tx_status = 1;
2896 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
2897 			if (peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba)
2898 				peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba(
2899 					peer->vdev->pdev->soc->ctrl_psoc,
2900 					peer->vdev->vdev_id,
2901 					peer->mac_addr.raw, tid,
2902 					rx_tid->delba_rcode);
2903 		}
2904 		goto end;
2905 	} else {
2906 		rx_tid->delba_tx_success_cnt++;
2907 		rx_tid->delba_tx_retry = 0;
2908 		rx_tid->delba_tx_status = 0;
2909 	}
2910 	if (rx_tid->ba_status == DP_RX_BA_ACTIVE) {
2911 		dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX);
2912 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
2913 		peer->active_ba_session_cnt--;
2914 	}
2915 	if (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
2916 		dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX);
2917 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
2918 	}
2919 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
2920 
2921 end:
2922 	if (peer)
2923 		dp_peer_unref_delete(peer);
2924 
2925 	return ret;
2926 }
2927 
2928 /**
2929  * dp_set_pn_check_wifi3() - enable PN check in REO for security
2930  * @soc: Datapath soc handle
2931  * @peer_mac: Datapath peer mac address
2932  * @vdev_id: id of atapath vdev
2933  * @vdev: Datapath vdev
2934  * @pdev - data path device instance
2935  * @sec_type - security type
2936  * @rx_pn - Receive pn starting number
2937  *
2938  */
2939 
2940 QDF_STATUS
2941 dp_set_pn_check_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id,
2942 		      uint8_t *peer_mac, enum cdp_sec_type sec_type,
2943 		      uint32_t *rx_pn)
2944 {
2945 	struct dp_pdev *pdev;
2946 	int i;
2947 	uint8_t pn_size;
2948 	struct hal_reo_cmd_params params;
2949 	QDF_STATUS status = QDF_STATUS_SUCCESS;
2950 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
2951 				peer_mac, 0, vdev_id);
2952 	struct dp_vdev *vdev =
2953 		dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
2954 						   vdev_id);
2955 
2956 	if (!vdev || !peer || peer->delete_in_progress) {
2957 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2958 			  "%s: Peer is NULL!\n", __func__);
2959 		status = QDF_STATUS_E_FAILURE;
2960 		goto fail;
2961 	}
2962 
2963 	pdev = vdev->pdev;
2964 	qdf_mem_zero(&params, sizeof(params));
2965 
2966 	params.std.need_status = 1;
2967 	params.u.upd_queue_params.update_pn_valid = 1;
2968 	params.u.upd_queue_params.update_pn_size = 1;
2969 	params.u.upd_queue_params.update_pn = 1;
2970 	params.u.upd_queue_params.update_pn_check_needed = 1;
2971 	params.u.upd_queue_params.update_svld = 1;
2972 	params.u.upd_queue_params.svld = 0;
2973 
2974 	peer->security[dp_sec_ucast].sec_type = sec_type;
2975 
2976 	switch (sec_type) {
2977 	case cdp_sec_type_tkip_nomic:
2978 	case cdp_sec_type_aes_ccmp:
2979 	case cdp_sec_type_aes_ccmp_256:
2980 	case cdp_sec_type_aes_gcmp:
2981 	case cdp_sec_type_aes_gcmp_256:
2982 		params.u.upd_queue_params.pn_check_needed = 1;
2983 		params.u.upd_queue_params.pn_size = 48;
2984 		pn_size = 48;
2985 		break;
2986 	case cdp_sec_type_wapi:
2987 		params.u.upd_queue_params.pn_check_needed = 1;
2988 		params.u.upd_queue_params.pn_size = 128;
2989 		pn_size = 128;
2990 		if (vdev->opmode == wlan_op_mode_ap) {
2991 			params.u.upd_queue_params.pn_even = 1;
2992 			params.u.upd_queue_params.update_pn_even = 1;
2993 		} else {
2994 			params.u.upd_queue_params.pn_uneven = 1;
2995 			params.u.upd_queue_params.update_pn_uneven = 1;
2996 		}
2997 		break;
2998 	default:
2999 		params.u.upd_queue_params.pn_check_needed = 0;
3000 		pn_size = 0;
3001 		break;
3002 	}
3003 
3004 
3005 	for (i = 0; i < DP_MAX_TIDS; i++) {
3006 		struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
3007 		qdf_spin_lock_bh(&rx_tid->tid_lock);
3008 		if (rx_tid->hw_qdesc_vaddr_unaligned) {
3009 			params.std.addr_lo =
3010 				rx_tid->hw_qdesc_paddr & 0xffffffff;
3011 			params.std.addr_hi =
3012 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
3013 
3014 			if (pn_size) {
3015 				QDF_TRACE(QDF_MODULE_ID_DP,
3016 					  QDF_TRACE_LEVEL_INFO_HIGH,
3017 					  "%s PN set for TID:%d pn:%x:%x:%x:%x",
3018 					  __func__, i, rx_pn[3], rx_pn[2],
3019 					  rx_pn[1], rx_pn[0]);
3020 				params.u.upd_queue_params.update_pn_valid = 1;
3021 				params.u.upd_queue_params.pn_31_0 = rx_pn[0];
3022 				params.u.upd_queue_params.pn_63_32 = rx_pn[1];
3023 				params.u.upd_queue_params.pn_95_64 = rx_pn[2];
3024 				params.u.upd_queue_params.pn_127_96 = rx_pn[3];
3025 			}
3026 			rx_tid->pn_size = pn_size;
3027 			if (dp_reo_send_cmd(cdp_soc_t_to_dp_soc(soc),
3028 					    CMD_UPDATE_RX_REO_QUEUE,
3029 					    &params, dp_rx_tid_update_cb,
3030 					    rx_tid)) {
3031 				dp_err_log("fail to send CMD_UPDATE_RX_REO_QUEUE"
3032 					   "tid %d desc %pK", rx_tid->tid,
3033 					   (void *)(rx_tid->hw_qdesc_paddr));
3034 				DP_STATS_INC(cdp_soc_t_to_dp_soc(soc),
3035 					     rx.err.reo_cmd_send_fail, 1);
3036 			}
3037 		} else {
3038 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3039 				  "PN Check not setup for TID :%d ", i);
3040 		}
3041 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
3042 	}
3043 fail:
3044 	if (peer)
3045 		dp_peer_unref_delete(peer);
3046 
3047 	return status;
3048 }
3049 
3050 
3051 void
3052 dp_rx_sec_ind_handler(struct dp_soc *soc, uint16_t peer_id,
3053 		      enum cdp_sec_type sec_type, int is_unicast,
3054 		      u_int32_t *michael_key,
3055 		      u_int32_t *rx_pn)
3056 {
3057 	struct dp_peer *peer;
3058 	int sec_index;
3059 
3060 	peer = dp_peer_find_by_id(soc, peer_id);
3061 	if (!peer) {
3062 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3063 			  "Couldn't find peer from ID %d - skipping security inits",
3064 			  peer_id);
3065 		return;
3066 	}
3067 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3068 		  "sec spec for peer %pK (%02x:%02x:%02x:%02x:%02x:%02x): %s key of type %d",
3069 		  peer,
3070 		  peer->mac_addr.raw[0], peer->mac_addr.raw[1],
3071 		  peer->mac_addr.raw[2], peer->mac_addr.raw[3],
3072 		  peer->mac_addr.raw[4], peer->mac_addr.raw[5],
3073 		  is_unicast ? "ucast" : "mcast",
3074 		  sec_type);
3075 	sec_index = is_unicast ? dp_sec_ucast : dp_sec_mcast;
3076 	peer->security[sec_index].sec_type = sec_type;
3077 #ifdef notyet /* TODO: See if this is required for defrag support */
3078 	/* michael key only valid for TKIP, but for simplicity,
3079 	 * copy it anyway
3080 	 */
3081 	qdf_mem_copy(
3082 		&peer->security[sec_index].michael_key[0],
3083 		michael_key,
3084 		sizeof(peer->security[sec_index].michael_key));
3085 #ifdef BIG_ENDIAN_HOST
3086 	OL_IF_SWAPBO(peer->security[sec_index].michael_key[0],
3087 				 sizeof(peer->security[sec_index].michael_key));
3088 #endif /* BIG_ENDIAN_HOST */
3089 #endif
3090 
3091 #ifdef notyet /* TODO: Check if this is required for wifi3.0 */
3092 	if (sec_type != cdp_sec_type_wapi) {
3093 		qdf_mem_zero(peer->tids_last_pn_valid, _EXT_TIDS);
3094 	} else {
3095 		for (i = 0; i < DP_MAX_TIDS; i++) {
3096 			/*
3097 			 * Setting PN valid bit for WAPI sec_type,
3098 			 * since WAPI PN has to be started with predefined value
3099 			 */
3100 			peer->tids_last_pn_valid[i] = 1;
3101 			qdf_mem_copy(
3102 				(u_int8_t *) &peer->tids_last_pn[i],
3103 				(u_int8_t *) rx_pn, sizeof(union htt_rx_pn_t));
3104 			peer->tids_last_pn[i].pn128[1] =
3105 				qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[1]);
3106 			peer->tids_last_pn[i].pn128[0] =
3107 				qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[0]);
3108 		}
3109 	}
3110 #endif
3111 	/* TODO: Update HW TID queue with PN check parameters (pn type for
3112 	 * all security types and last pn for WAPI) once REO command API
3113 	 * is available
3114 	 */
3115 
3116 	dp_peer_unref_del_find_by_id(peer);
3117 }
3118 
3119 #ifdef DP_PEER_EXTENDED_API
3120 /**
3121  * dp_register_peer() - Register peer into physical device
3122  * @pdev - data path device instance
3123  * @sta_desc - peer description
3124  *
3125  * Register peer into physical device
3126  *
3127  * Return: QDF_STATUS_SUCCESS registration success
3128  *         QDF_STATUS_E_FAULT peer not found
3129  */
3130 QDF_STATUS dp_register_peer(struct cdp_pdev *pdev_handle,
3131 		struct ol_txrx_desc_type *sta_desc)
3132 {
3133 	struct dp_peer *peer;
3134 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3135 
3136 	peer = dp_find_peer_by_addr((struct cdp_pdev *)pdev,
3137 				    sta_desc->peer_addr.bytes);
3138 
3139 	if (!peer)
3140 		return QDF_STATUS_E_FAULT;
3141 
3142 	qdf_spin_lock_bh(&peer->peer_info_lock);
3143 	peer->state = OL_TXRX_PEER_STATE_CONN;
3144 	qdf_spin_unlock_bh(&peer->peer_info_lock);
3145 
3146 	dp_rx_flush_rx_cached(peer, false);
3147 
3148 	return QDF_STATUS_SUCCESS;
3149 }
3150 
3151 /**
3152  * dp_clear_peer() - remove peer from physical device
3153  * @pdev - data path device instance
3154  * @peer_addr - peer mac address
3155  *
3156  * remove peer from physical device
3157  *
3158  * Return: QDF_STATUS_SUCCESS registration success
3159  *         QDF_STATUS_E_FAULT peer not found
3160  */
3161 QDF_STATUS
3162 dp_clear_peer(struct cdp_pdev *pdev_handle, struct qdf_mac_addr peer_addr)
3163 {
3164 	struct dp_peer *peer;
3165 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3166 
3167 	peer = dp_find_peer_by_addr((struct cdp_pdev *)pdev, peer_addr.bytes);
3168 	if (!peer)
3169 		return QDF_STATUS_E_FAULT;
3170 
3171 	qdf_spin_lock_bh(&peer->peer_info_lock);
3172 	peer->state = OL_TXRX_PEER_STATE_DISC;
3173 	qdf_spin_unlock_bh(&peer->peer_info_lock);
3174 
3175 	dp_rx_flush_rx_cached(peer, true);
3176 
3177 	return QDF_STATUS_SUCCESS;
3178 }
3179 
3180 /**
3181  * dp_find_peer_by_addr_and_vdev() - Find peer by peer mac address within vdev
3182  * @pdev - data path device instance
3183  * @vdev - virtual interface instance
3184  * @peer_addr - peer mac address
3185  *
3186  * Find peer by peer mac address within vdev
3187  *
3188  * Return: peer instance void pointer
3189  *         NULL cannot find target peer
3190  */
3191 void *dp_find_peer_by_addr_and_vdev(struct cdp_pdev *pdev_handle,
3192 		struct cdp_vdev *vdev_handle,
3193 		uint8_t *peer_addr)
3194 {
3195 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3196 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3197 	struct dp_peer *peer;
3198 
3199 	peer = dp_peer_find_hash_find(pdev->soc, peer_addr, 0, DP_VDEV_ALL);
3200 
3201 	if (!peer)
3202 		return NULL;
3203 
3204 	if (peer->vdev != vdev) {
3205 		dp_peer_unref_delete(peer);
3206 		return NULL;
3207 	}
3208 
3209 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
3210 	 * Decrement it here.
3211 	 */
3212 	dp_peer_unref_delete(peer);
3213 
3214 	return peer;
3215 }
3216 
3217 /**
3218  * dp_peer_state_update() - update peer local state
3219  * @pdev - data path device instance
3220  * @peer_addr - peer mac address
3221  * @state - new peer local state
3222  *
3223  * update peer local state
3224  *
3225  * Return: QDF_STATUS_SUCCESS registration success
3226  */
3227 QDF_STATUS dp_peer_state_update(struct cdp_pdev *pdev_handle, uint8_t *peer_mac,
3228 		enum ol_txrx_peer_state state)
3229 {
3230 	struct dp_peer *peer;
3231 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3232 
3233 	peer =  dp_peer_find_hash_find(pdev->soc, peer_mac, 0, DP_VDEV_ALL);
3234 	if (!peer) {
3235 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3236 			  "Failed to find peer for: [%pM]", peer_mac);
3237 		return QDF_STATUS_E_FAILURE;
3238 	}
3239 	peer->state = state;
3240 
3241 	dp_info("peer %pK state %d", peer, peer->state);
3242 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
3243 	 * Decrement it here.
3244 	 */
3245 	dp_peer_unref_delete(peer);
3246 
3247 	return QDF_STATUS_SUCCESS;
3248 }
3249 
3250 /**
3251  * dp_get_vdevid() - Get virtual interface id which peer registered
3252  * @peer - peer instance
3253  * @vdev_id - virtual interface id which peer registered
3254  *
3255  * Get virtual interface id which peer registered
3256  *
3257  * Return: QDF_STATUS_SUCCESS registration success
3258  */
3259 QDF_STATUS dp_get_vdevid(void *peer_handle, uint8_t *vdev_id)
3260 {
3261 	struct dp_peer *peer = peer_handle;
3262 
3263 	dp_info("peer %pK vdev %pK vdev id %d",
3264 		peer, peer->vdev, peer->vdev->vdev_id);
3265 	*vdev_id = peer->vdev->vdev_id;
3266 	return QDF_STATUS_SUCCESS;
3267 }
3268 
3269 struct cdp_vdev *
3270 dp_get_vdev_by_peer_addr(struct cdp_pdev *pdev_handle,
3271 			 struct qdf_mac_addr peer_addr)
3272 {
3273 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3274 	struct dp_peer *peer = NULL;
3275 
3276 	if (!pdev) {
3277 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3278 			  "PDEV not found for peer_addr: " QDF_MAC_ADDR_STR,
3279 			  QDF_MAC_ADDR_ARRAY(peer_addr.bytes));
3280 		return NULL;
3281 	}
3282 
3283 	peer = dp_find_peer_by_addr((struct cdp_pdev *)pdev, peer_addr.bytes);
3284 	if (!peer) {
3285 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
3286 			  "PDEV not found for peer_addr:" QDF_MAC_ADDR_STR,
3287 			  QDF_MAC_ADDR_ARRAY(peer_addr.bytes));
3288 		return NULL;
3289 	}
3290 
3291 	return (struct cdp_vdev *)peer->vdev;
3292 }
3293 
3294 /**
3295  * dp_get_vdev_for_peer() - Get virtual interface instance which peer belongs
3296  * @peer - peer instance
3297  *
3298  * Get virtual interface instance which peer belongs
3299  *
3300  * Return: virtual interface instance pointer
3301  *         NULL in case cannot find
3302  */
3303 struct cdp_vdev *dp_get_vdev_for_peer(void *peer_handle)
3304 {
3305 	struct dp_peer *peer = peer_handle;
3306 
3307 	DP_TRACE(DEBUG, "peer %pK vdev %pK", peer, peer->vdev);
3308 	return (struct cdp_vdev *)peer->vdev;
3309 }
3310 
3311 /**
3312  * dp_peer_get_peer_mac_addr() - Get peer mac address
3313  * @peer - peer instance
3314  *
3315  * Get peer mac address
3316  *
3317  * Return: peer mac address pointer
3318  *         NULL in case cannot find
3319  */
3320 uint8_t *dp_peer_get_peer_mac_addr(void *peer_handle)
3321 {
3322 	struct dp_peer *peer = peer_handle;
3323 	uint8_t *mac;
3324 
3325 	mac = peer->mac_addr.raw;
3326 	dp_info("peer %pK mac 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
3327 		peer, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
3328 	return peer->mac_addr.raw;
3329 }
3330 
3331 /**
3332  * dp_get_peer_state() - Get local peer state
3333  * @peer - peer instance
3334  *
3335  * Get local peer state
3336  *
3337  * Return: peer status
3338  */
3339 int dp_get_peer_state(void *peer_handle)
3340 {
3341 	struct dp_peer *peer = peer_handle;
3342 
3343 	DP_TRACE(DEBUG, "peer %pK stats %d", peer, peer->state);
3344 	return peer->state;
3345 }
3346 
3347 /**
3348  * dp_local_peer_id_pool_init() - local peer id pool alloc for physical device
3349  * @pdev - data path device instance
3350  *
3351  * local peer id pool alloc for physical device
3352  *
3353  * Return: none
3354  */
3355 void dp_local_peer_id_pool_init(struct dp_pdev *pdev)
3356 {
3357 	int i;
3358 
3359 	/* point the freelist to the first ID */
3360 	pdev->local_peer_ids.freelist = 0;
3361 
3362 	/* link each ID to the next one */
3363 	for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) {
3364 		pdev->local_peer_ids.pool[i] = i + 1;
3365 		pdev->local_peer_ids.map[i] = NULL;
3366 	}
3367 
3368 	/* link the last ID to itself, to mark the end of the list */
3369 	i = OL_TXRX_NUM_LOCAL_PEER_IDS;
3370 	pdev->local_peer_ids.pool[i] = i;
3371 
3372 	qdf_spinlock_create(&pdev->local_peer_ids.lock);
3373 	DP_TRACE(INFO, "Peer pool init");
3374 }
3375 
3376 /**
3377  * dp_local_peer_id_alloc() - allocate local peer id
3378  * @pdev - data path device instance
3379  * @peer - new peer instance
3380  *
3381  * allocate local peer id
3382  *
3383  * Return: none
3384  */
3385 void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer)
3386 {
3387 	int i;
3388 
3389 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
3390 	i = pdev->local_peer_ids.freelist;
3391 	if (pdev->local_peer_ids.pool[i] == i) {
3392 		/* the list is empty, except for the list-end marker */
3393 		peer->local_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
3394 	} else {
3395 		/* take the head ID and advance the freelist */
3396 		peer->local_id = i;
3397 		pdev->local_peer_ids.freelist = pdev->local_peer_ids.pool[i];
3398 		pdev->local_peer_ids.map[i] = peer;
3399 	}
3400 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
3401 	dp_info("peer %pK, local id %d", peer, peer->local_id);
3402 }
3403 
3404 /**
3405  * dp_local_peer_id_free() - remove local peer id
3406  * @pdev - data path device instance
3407  * @peer - peer instance should be removed
3408  *
3409  * remove local peer id
3410  *
3411  * Return: none
3412  */
3413 void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer)
3414 {
3415 	int i = peer->local_id;
3416 	if ((i == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
3417 	    (i >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
3418 		return;
3419 	}
3420 
3421 	/* put this ID on the head of the freelist */
3422 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
3423 	pdev->local_peer_ids.pool[i] = pdev->local_peer_ids.freelist;
3424 	pdev->local_peer_ids.freelist = i;
3425 	pdev->local_peer_ids.map[i] = NULL;
3426 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
3427 }
3428 #endif
3429 
3430 /**
3431  * dp_get_peer_mac_addr_frm_id(): get mac address of the peer
3432  * @soc_handle: DP SOC handle
3433  * @peer_id:peer_id of the peer
3434  *
3435  * return: vdev_id of the vap
3436  */
3437 uint8_t dp_get_peer_mac_addr_frm_id(struct cdp_soc_t *soc_handle,
3438 		uint16_t peer_id, uint8_t *peer_mac)
3439 {
3440 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
3441 	struct dp_peer *peer;
3442 	uint8_t vdev_id;
3443 
3444 	peer = dp_peer_find_by_id(soc, peer_id);
3445 
3446 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
3447 		  "soc %pK peer_id %d", soc, peer_id);
3448 
3449 	if (!peer) {
3450 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3451 			  "peer not found ");
3452 		return CDP_INVALID_VDEV_ID;
3453 	}
3454 
3455 	qdf_mem_copy(peer_mac, peer->mac_addr.raw, 6);
3456 	vdev_id = peer->vdev->vdev_id;
3457 
3458 	dp_peer_unref_del_find_by_id(peer);
3459 
3460 	return vdev_id;
3461 }
3462 
3463 /**
3464  * dp_peer_rxtid_stats: Retried Rx TID (REO queue) stats from HW
3465  * @peer: DP peer handle
3466  * @dp_stats_cmd_cb: REO command callback function
3467  * @cb_ctxt: Callback context
3468  *
3469  * Return: none
3470  */
3471 void dp_peer_rxtid_stats(struct dp_peer *peer, void (*dp_stats_cmd_cb),
3472 			void *cb_ctxt)
3473 {
3474 	struct dp_soc *soc = peer->vdev->pdev->soc;
3475 	struct hal_reo_cmd_params params;
3476 	int i;
3477 
3478 	if (!dp_stats_cmd_cb)
3479 		return;
3480 
3481 	qdf_mem_zero(&params, sizeof(params));
3482 	for (i = 0; i < DP_MAX_TIDS; i++) {
3483 		struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
3484 		if (rx_tid->hw_qdesc_vaddr_unaligned) {
3485 			params.std.need_status = 1;
3486 			params.std.addr_lo =
3487 				rx_tid->hw_qdesc_paddr & 0xffffffff;
3488 			params.std.addr_hi =
3489 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
3490 
3491 			if (cb_ctxt) {
3492 				dp_reo_send_cmd(soc, CMD_GET_QUEUE_STATS,
3493 					&params, dp_stats_cmd_cb, cb_ctxt);
3494 			} else {
3495 				dp_reo_send_cmd(soc, CMD_GET_QUEUE_STATS,
3496 					&params, dp_stats_cmd_cb, rx_tid);
3497 			}
3498 
3499 			/* Flush REO descriptor from HW cache to update stats
3500 			 * in descriptor memory. This is to help debugging */
3501 			qdf_mem_zero(&params, sizeof(params));
3502 			params.std.need_status = 0;
3503 			params.std.addr_lo =
3504 				rx_tid->hw_qdesc_paddr & 0xffffffff;
3505 			params.std.addr_hi =
3506 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
3507 			params.u.fl_cache_params.flush_no_inval = 1;
3508 			dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, &params, NULL,
3509 				NULL);
3510 		}
3511 	}
3512 }
3513 
3514 void dp_set_michael_key(struct cdp_peer *peer_handle,
3515 			bool is_unicast, uint32_t *key)
3516 {
3517 	struct dp_peer *peer =  (struct dp_peer *)peer_handle;
3518 	uint8_t sec_index = is_unicast ? 1 : 0;
3519 
3520 	if (!peer) {
3521 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3522 			  "peer not found ");
3523 		return;
3524 	}
3525 
3526 	qdf_mem_copy(&peer->security[sec_index].michael_key[0],
3527 		     key, IEEE80211_WEP_MICLEN);
3528 }
3529 
3530 bool dp_peer_find_by_id_valid(struct dp_soc *soc, uint16_t peer_id)
3531 {
3532 	struct dp_peer *peer = dp_peer_find_by_id(soc, peer_id);
3533 
3534 	if (peer) {
3535 		/*
3536 		 * Decrement the peer ref which is taken as part of
3537 		 * dp_peer_find_by_id if PEER_LOCK_REF_PROTECT is enabled
3538 		 */
3539 		dp_peer_unref_del_find_by_id(peer);
3540 
3541 		return true;
3542 	}
3543 
3544 	return false;
3545 }
3546