xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_peer.c (revision 27d564647e9b50e713c60b0d7e5ea2a9b0a3ae74)
1 /*
2  * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <qdf_types.h>
20 #include <qdf_lock.h>
21 #include <hal_hw_headers.h>
22 #include "dp_htt.h"
23 #include "dp_types.h"
24 #include "dp_internal.h"
25 #include "dp_peer.h"
26 #include "dp_rx_defrag.h"
27 #include <hal_api.h>
28 #include <hal_reo.h>
29 #ifdef CONFIG_MCL
30 #include <cds_ieee80211_common.h>
31 #include <cds_api.h>
32 #endif
33 #include <cdp_txrx_handle.h>
34 #include <wlan_cfg.h>
35 
36 #ifdef DP_LFR
37 static inline void
38 dp_set_ssn_valid_flag(struct hal_reo_cmd_params *params,
39 					uint8_t valid)
40 {
41 	params->u.upd_queue_params.update_svld = 1;
42 	params->u.upd_queue_params.svld = valid;
43 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
44 		"%s: Setting SSN valid bit to %d",
45 				__func__, valid);
46 }
47 #else
48 static inline void
49 dp_set_ssn_valid_flag(struct hal_reo_cmd_params *params,
50 					uint8_t valid) {};
51 #endif
52 
53 static inline int dp_peer_find_mac_addr_cmp(
54 	union dp_align_mac_addr *mac_addr1,
55 	union dp_align_mac_addr *mac_addr2)
56 {
57 	return !((mac_addr1->align4.bytes_abcd == mac_addr2->align4.bytes_abcd)
58 		/*
59 		 * Intentionally use & rather than &&.
60 		 * because the operands are binary rather than generic boolean,
61 		 * the functionality is equivalent.
62 		 * Using && has the advantage of short-circuited evaluation,
63 		 * but using & has the advantage of no conditional branching,
64 		 * which is a more significant benefit.
65 		 */
66 		&
67 		(mac_addr1->align4.bytes_ef == mac_addr2->align4.bytes_ef));
68 }
69 
70 static int dp_peer_find_map_attach(struct dp_soc *soc)
71 {
72 	uint32_t max_peers, peer_map_size;
73 
74 	max_peers = soc->max_peers;
75 	/* allocate the peer ID -> peer object map */
76 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
77 		"\n<=== cfg max peer id %d ====>", max_peers);
78 	peer_map_size = max_peers * sizeof(soc->peer_id_to_obj_map[0]);
79 	soc->peer_id_to_obj_map = qdf_mem_malloc(peer_map_size);
80 	if (!soc->peer_id_to_obj_map) {
81 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
82 			"%s: peer map memory allocation failed", __func__);
83 		return QDF_STATUS_E_NOMEM;
84 	}
85 
86 	/*
87 	 * The peer_id_to_obj_map doesn't really need to be initialized,
88 	 * since elements are only used after they have been individually
89 	 * initialized.
90 	 * However, it is convenient for debugging to have all elements
91 	 * that are not in use set to 0.
92 	 */
93 	qdf_mem_zero(soc->peer_id_to_obj_map, peer_map_size);
94 	return 0; /* success */
95 }
96 
97 static int dp_log2_ceil(unsigned value)
98 {
99 	unsigned tmp = value;
100 	int log2 = -1;
101 
102 	while (tmp) {
103 		log2++;
104 		tmp >>= 1;
105 	}
106 	if (1 << log2 != value)
107 		log2++;
108 	return log2;
109 }
110 
111 static int dp_peer_find_add_id_to_obj(
112 	struct dp_peer *peer,
113 	uint16_t peer_id)
114 {
115 	int i;
116 
117 	for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
118 		if (peer->peer_ids[i] == HTT_INVALID_PEER) {
119 			peer->peer_ids[i] = peer_id;
120 			return 0; /* success */
121 		}
122 	}
123 	return QDF_STATUS_E_FAILURE; /* failure */
124 }
125 
126 #define DP_PEER_HASH_LOAD_MULT  2
127 #define DP_PEER_HASH_LOAD_SHIFT 0
128 
129 #define DP_AST_HASH_LOAD_MULT  2
130 #define DP_AST_HASH_LOAD_SHIFT 0
131 
132 static int dp_peer_find_hash_attach(struct dp_soc *soc)
133 {
134 	int i, hash_elems, log2;
135 
136 	/* allocate the peer MAC address -> peer object hash table */
137 	hash_elems = soc->max_peers;
138 	hash_elems *= DP_PEER_HASH_LOAD_MULT;
139 	hash_elems >>= DP_PEER_HASH_LOAD_SHIFT;
140 	log2 = dp_log2_ceil(hash_elems);
141 	hash_elems = 1 << log2;
142 
143 	soc->peer_hash.mask = hash_elems - 1;
144 	soc->peer_hash.idx_bits = log2;
145 	/* allocate an array of TAILQ peer object lists */
146 	soc->peer_hash.bins = qdf_mem_malloc(
147 		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q, dp_peer)));
148 	if (!soc->peer_hash.bins)
149 		return QDF_STATUS_E_NOMEM;
150 
151 	for (i = 0; i < hash_elems; i++)
152 		TAILQ_INIT(&soc->peer_hash.bins[i]);
153 
154 	return 0;
155 }
156 
157 static void dp_peer_find_hash_detach(struct dp_soc *soc)
158 {
159 	qdf_mem_free(soc->peer_hash.bins);
160 }
161 
162 static inline unsigned dp_peer_find_hash_index(struct dp_soc *soc,
163 	union dp_align_mac_addr *mac_addr)
164 {
165 	unsigned index;
166 
167 	index =
168 		mac_addr->align2.bytes_ab ^
169 		mac_addr->align2.bytes_cd ^
170 		mac_addr->align2.bytes_ef;
171 	index ^= index >> soc->peer_hash.idx_bits;
172 	index &= soc->peer_hash.mask;
173 	return index;
174 }
175 
176 
177 void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer)
178 {
179 	unsigned index;
180 
181 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
182 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
183 	/*
184 	 * It is important to add the new peer at the tail of the peer list
185 	 * with the bin index.  Together with having the hash_find function
186 	 * search from head to tail, this ensures that if two entries with
187 	 * the same MAC address are stored, the one added first will be
188 	 * found first.
189 	 */
190 	TAILQ_INSERT_TAIL(&soc->peer_hash.bins[index], peer, hash_list_elem);
191 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
192 }
193 
194 #ifdef FEATURE_AST
195 /*
196  * dp_peer_ast_hash_attach() - Allocate and initialize AST Hash Table
197  * @soc: SoC handle
198  *
199  * Return: None
200  */
201 static int dp_peer_ast_hash_attach(struct dp_soc *soc)
202 {
203 	int i, hash_elems, log2;
204 
205 	hash_elems = ((soc->max_peers * DP_AST_HASH_LOAD_MULT) >>
206 		DP_AST_HASH_LOAD_SHIFT);
207 
208 	log2 = dp_log2_ceil(hash_elems);
209 	hash_elems = 1 << log2;
210 
211 	soc->ast_hash.mask = hash_elems - 1;
212 	soc->ast_hash.idx_bits = log2;
213 
214 	/* allocate an array of TAILQ peer object lists */
215 	soc->ast_hash.bins = qdf_mem_malloc(
216 		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q,
217 				dp_ast_entry)));
218 
219 	if (!soc->ast_hash.bins)
220 		return QDF_STATUS_E_NOMEM;
221 
222 	for (i = 0; i < hash_elems; i++)
223 		TAILQ_INIT(&soc->ast_hash.bins[i]);
224 
225 	return 0;
226 }
227 
228 #if defined(FEATURE_AST) && defined(AST_HKV1_WORKAROUND)
229 static inline void dp_peer_ast_cleanup(struct dp_soc *soc,
230 				       struct dp_ast_entry *ast)
231 {
232 	struct cdp_soc_t *cdp_soc = &soc->cdp_soc;
233 
234 	if (ast->cp_ctx && cdp_soc->ol_ops->peer_del_wds_cp_ctx)
235 		cdp_soc->ol_ops->peer_del_wds_cp_ctx(ast->cp_ctx);
236 }
237 #else
238 static inline void dp_peer_ast_cleanup(struct dp_soc *soc,
239 				       struct dp_ast_entry *ast)
240 {
241 }
242 #endif
243 /*
244  * dp_peer_ast_hash_detach() - Free AST Hash table
245  * @soc: SoC handle
246  *
247  * Return: None
248  */
249 static void dp_peer_ast_hash_detach(struct dp_soc *soc)
250 {
251 	unsigned int index;
252 	struct dp_ast_entry *ast, *ast_next;
253 
254 	if (!soc->ast_hash.mask)
255 		return;
256 
257 	for (index = 0; index <= soc->ast_hash.mask; index++) {
258 		if (!TAILQ_EMPTY(&soc->ast_hash.bins[index])) {
259 			TAILQ_FOREACH_SAFE(ast, &soc->ast_hash.bins[index],
260 					   hash_list_elem, ast_next) {
261 				TAILQ_REMOVE(&soc->ast_hash.bins[index], ast,
262 					     hash_list_elem);
263 				dp_peer_ast_cleanup(soc, ast);
264 				qdf_mem_free(ast);
265 			}
266 		}
267 	}
268 
269 	qdf_mem_free(soc->ast_hash.bins);
270 }
271 
272 /*
273  * dp_peer_ast_hash_index() - Compute the AST hash from MAC address
274  * @soc: SoC handle
275  *
276  * Return: AST hash
277  */
278 static inline uint32_t dp_peer_ast_hash_index(struct dp_soc *soc,
279 	union dp_align_mac_addr *mac_addr)
280 {
281 	uint32_t index;
282 
283 	index =
284 		mac_addr->align2.bytes_ab ^
285 		mac_addr->align2.bytes_cd ^
286 		mac_addr->align2.bytes_ef;
287 	index ^= index >> soc->ast_hash.idx_bits;
288 	index &= soc->ast_hash.mask;
289 	return index;
290 }
291 
292 /*
293  * dp_peer_ast_hash_add() - Add AST entry into hash table
294  * @soc: SoC handle
295  *
296  * This function adds the AST entry into SoC AST hash table
297  * It assumes caller has taken the ast lock to protect the access to this table
298  *
299  * Return: None
300  */
301 static inline void dp_peer_ast_hash_add(struct dp_soc *soc,
302 		struct dp_ast_entry *ase)
303 {
304 	uint32_t index;
305 
306 	index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
307 	TAILQ_INSERT_TAIL(&soc->ast_hash.bins[index], ase, hash_list_elem);
308 }
309 
310 /*
311  * dp_peer_ast_hash_remove() - Look up and remove AST entry from hash table
312  * @soc: SoC handle
313  *
314  * This function removes the AST entry from soc AST hash table
315  * It assumes caller has taken the ast lock to protect the access to this table
316  *
317  * Return: None
318  */
319 static inline void dp_peer_ast_hash_remove(struct dp_soc *soc,
320 		struct dp_ast_entry *ase)
321 {
322 	unsigned index;
323 	struct dp_ast_entry *tmpase;
324 	int found = 0;
325 
326 	index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
327 	/* Check if tail is not empty before delete*/
328 	QDF_ASSERT(!TAILQ_EMPTY(&soc->ast_hash.bins[index]));
329 
330 	TAILQ_FOREACH(tmpase, &soc->ast_hash.bins[index], hash_list_elem) {
331 		if (tmpase == ase) {
332 			found = 1;
333 			break;
334 		}
335 	}
336 
337 	QDF_ASSERT(found);
338 	TAILQ_REMOVE(&soc->ast_hash.bins[index], ase, hash_list_elem);
339 }
340 
341 /*
342  * dp_peer_ast_list_find() - Find AST entry by MAC address from peer ast list
343  * @soc: SoC handle
344  * @peer: peer handle
345  * @ast_mac_addr: mac address
346  *
347  * It assumes caller has taken the ast lock to protect the access to ast list
348  *
349  * Return: AST entry
350  */
351 struct dp_ast_entry *dp_peer_ast_list_find(struct dp_soc *soc,
352 					   struct dp_peer *peer,
353 					   uint8_t *ast_mac_addr)
354 {
355 	struct dp_ast_entry *ast_entry = NULL;
356 	union dp_align_mac_addr *mac_addr =
357 		(union dp_align_mac_addr *)ast_mac_addr;
358 
359 	TAILQ_FOREACH(ast_entry, &peer->ast_entry_list, ase_list_elem) {
360 		if (!dp_peer_find_mac_addr_cmp(mac_addr,
361 					       &ast_entry->mac_addr)) {
362 			return ast_entry;
363 		}
364 	}
365 
366 	return NULL;
367 }
368 
369 /*
370  * dp_peer_ast_hash_find_by_pdevid() - Find AST entry by MAC address
371  * @soc: SoC handle
372  *
373  * It assumes caller has taken the ast lock to protect the access to
374  * AST hash table
375  *
376  * Return: AST entry
377  */
378 struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
379 						     uint8_t *ast_mac_addr,
380 						     uint8_t pdev_id)
381 {
382 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
383 	uint32_t index;
384 	struct dp_ast_entry *ase;
385 
386 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
387 		     ast_mac_addr, DP_MAC_ADDR_LEN);
388 	mac_addr = &local_mac_addr_aligned;
389 
390 	index = dp_peer_ast_hash_index(soc, mac_addr);
391 	TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
392 		if ((pdev_id == ase->pdev_id) &&
393 		    !dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr)) {
394 			return ase;
395 		}
396 	}
397 
398 	return NULL;
399 }
400 
401 /*
402  * dp_peer_ast_hash_find_soc() - Find AST entry by MAC address
403  * @soc: SoC handle
404  *
405  * It assumes caller has taken the ast lock to protect the access to
406  * AST hash table
407  *
408  * Return: AST entry
409  */
410 struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
411 					       uint8_t *ast_mac_addr)
412 {
413 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
414 	unsigned index;
415 	struct dp_ast_entry *ase;
416 
417 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
418 			ast_mac_addr, DP_MAC_ADDR_LEN);
419 	mac_addr = &local_mac_addr_aligned;
420 
421 	index = dp_peer_ast_hash_index(soc, mac_addr);
422 	TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
423 		if (dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr) == 0) {
424 			return ase;
425 		}
426 	}
427 
428 	return NULL;
429 }
430 
431 /*
432  * dp_peer_map_ast() - Map the ast entry with HW AST Index
433  * @soc: SoC handle
434  * @peer: peer to which ast node belongs
435  * @mac_addr: MAC address of ast node
436  * @hw_peer_id: HW AST Index returned by target in peer map event
437  * @vdev_id: vdev id for VAP to which the peer belongs to
438  * @ast_hash: ast hash value in HW
439  *
440  * Return: None
441  */
442 static inline void dp_peer_map_ast(struct dp_soc *soc,
443 	struct dp_peer *peer, uint8_t *mac_addr, uint16_t hw_peer_id,
444 	uint8_t vdev_id, uint16_t ast_hash)
445 {
446 	struct dp_ast_entry *ast_entry = NULL;
447 	enum cdp_txrx_ast_entry_type peer_type = CDP_TXRX_AST_TYPE_STATIC;
448 
449 	if (!peer) {
450 		return;
451 	}
452 
453 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
454 		"%s: peer %pK ID %d vid %d mac %02x:%02x:%02x:%02x:%02x:%02x",
455 		__func__, peer, hw_peer_id, vdev_id, mac_addr[0],
456 		mac_addr[1], mac_addr[2], mac_addr[3],
457 		mac_addr[4], mac_addr[5]);
458 
459 	qdf_spin_lock_bh(&soc->ast_lock);
460 
461 	ast_entry = dp_peer_ast_list_find(soc, peer, mac_addr);
462 
463 	if (ast_entry) {
464 		ast_entry->ast_idx = hw_peer_id;
465 		soc->ast_table[hw_peer_id] = ast_entry;
466 		ast_entry->is_active = TRUE;
467 		peer_type = ast_entry->type;
468 		ast_entry->ast_hash_value = ast_hash;
469 	}
470 
471 	if (ast_entry || (peer->vdev && peer->vdev->proxysta_vdev)) {
472 		if (soc->cdp_soc.ol_ops->peer_map_event) {
473 			soc->cdp_soc.ol_ops->peer_map_event(
474 			soc->ctrl_psoc, peer->peer_ids[0],
475 			hw_peer_id, vdev_id,
476 			mac_addr, peer_type, ast_hash);
477 		}
478 	} else {
479 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
480 			"AST entry not found");
481 	}
482 
483 	qdf_spin_unlock_bh(&soc->ast_lock);
484 	return;
485 }
486 
487 /*
488  * dp_peer_add_ast() - Allocate and add AST entry into peer list
489  * @soc: SoC handle
490  * @peer: peer to which ast node belongs
491  * @mac_addr: MAC address of ast node
492  * @is_self: Is this base AST entry with peer mac address
493  *
494  * This API is used by WDS source port learning function to
495  * add a new AST entry into peer AST list
496  *
497  * Return: 0 if new entry is allocated,
498  *        -1 if entry add failed
499  */
500 int dp_peer_add_ast(struct dp_soc *soc,
501 			struct dp_peer *peer,
502 			uint8_t *mac_addr,
503 			enum cdp_txrx_ast_entry_type type,
504 			uint32_t flags)
505 {
506 	struct dp_ast_entry *ast_entry;
507 	struct dp_vdev *vdev = peer->vdev;
508 	struct dp_pdev *pdev = NULL;
509 	uint8_t next_node_mac[6];
510 	int  ret = -1;
511 
512 	if (!vdev) {
513 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
514 			FL("Peers vdev is NULL"));
515 		QDF_ASSERT(0);
516 		return ret;
517 	}
518 
519 	pdev = vdev->pdev;
520 
521 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
522 		"%s: peer %pK mac %02x:%02x:%02x:%02x:%02x:%02x",
523 		__func__, peer, mac_addr[0], mac_addr[1], mac_addr[2],
524 		mac_addr[3], mac_addr[4], mac_addr[5]);
525 
526 	qdf_spin_lock_bh(&soc->ast_lock);
527 
528 	/* For HMWDS and HWMWDS_SEC entries can be added for same mac address
529 	 * do not check for existing entry
530 	 * SON takes care of deleting any existing AST entry with other types
531 	 * before adding HMWDS entries
532 	 */
533 	if ((type == CDP_TXRX_AST_TYPE_WDS_HM) ||
534 	    (type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
535 		goto add_ast_entry;
536 
537 	/* If AST entry already exists , just return from here
538 	 * ast entry with same mac address can exist on different radios
539 	 * if ast_override support is enabled use search by pdev in this
540 	 * case
541 	 */
542 	if (soc->ast_override_support) {
543 		ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, mac_addr,
544 							    pdev->pdev_id);
545 		if (ast_entry) {
546 			qdf_spin_unlock_bh(&soc->ast_lock);
547 			return 0;
548 		}
549 	} else {
550 		ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
551 
552 		if (ast_entry) {
553 			if (ast_entry->type == CDP_TXRX_AST_TYPE_MEC) {
554 				ast_entry->is_active = TRUE;
555 				qdf_spin_unlock_bh(&soc->ast_lock);
556 				return 0;
557 			}
558 
559 			/*
560 			 * WAR for HK 1.x AST issue
561 			 * If an AST entry with same mac address already
562 			 * exists and is mapped to a different radio, and
563 			 * if the current radio is  primary radio , delete
564 			 * the existing AST entry and return.
565 			 *
566 			 * New AST entry will be created again on next
567 			 * SA_invalid frame
568 			 */
569 			if ((ast_entry->pdev_id != vdev->pdev->pdev_id) &&
570 			    vdev->pdev->is_primary) {
571 				QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
572 					  "Deleting ast_pdev=%d pdev=%d addr=%pM\n",
573 					  ast_entry->pdev_id,
574 					  vdev->pdev->pdev_id, mac_addr);
575 				dp_peer_del_ast(soc, ast_entry);
576 			}
577 
578 			qdf_spin_unlock_bh(&soc->ast_lock);
579 			return 0;
580 		}
581 	}
582 
583 add_ast_entry:
584 	ast_entry = (struct dp_ast_entry *)
585 			qdf_mem_malloc(sizeof(struct dp_ast_entry));
586 
587 	if (!ast_entry) {
588 		qdf_spin_unlock_bh(&soc->ast_lock);
589 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
590 			FL("fail to allocate ast_entry"));
591 		QDF_ASSERT(0);
592 		return ret;
593 	}
594 
595 	qdf_mem_copy(&ast_entry->mac_addr.raw[0], mac_addr, DP_MAC_ADDR_LEN);
596 	ast_entry->peer = peer;
597 	ast_entry->pdev_id = vdev->pdev->pdev_id;
598 	ast_entry->vdev_id = vdev->vdev_id;
599 	ast_entry->ast_idx = DP_INVALID_AST_IDX;
600 
601 	switch (type) {
602 	case CDP_TXRX_AST_TYPE_STATIC:
603 		peer->self_ast_entry = ast_entry;
604 		ast_entry->type = CDP_TXRX_AST_TYPE_STATIC;
605 		if (peer->vdev->opmode == wlan_op_mode_sta)
606 			ast_entry->type = CDP_TXRX_AST_TYPE_STA_BSS;
607 		break;
608 	case CDP_TXRX_AST_TYPE_SELF:
609 		peer->self_ast_entry = ast_entry;
610 		ast_entry->type = CDP_TXRX_AST_TYPE_SELF;
611 		break;
612 	case CDP_TXRX_AST_TYPE_WDS:
613 		ast_entry->next_hop = 1;
614 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
615 		break;
616 	case CDP_TXRX_AST_TYPE_WDS_HM:
617 		ast_entry->next_hop = 1;
618 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM;
619 		break;
620 	case CDP_TXRX_AST_TYPE_WDS_HM_SEC:
621 		ast_entry->next_hop = 1;
622 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM_SEC;
623 		break;
624 	case CDP_TXRX_AST_TYPE_MEC:
625 		ast_entry->next_hop = 1;
626 		ast_entry->type = CDP_TXRX_AST_TYPE_MEC;
627 		break;
628 	case CDP_TXRX_AST_TYPE_DA:
629 		ast_entry->next_hop = 1;
630 		ast_entry->type = CDP_TXRX_AST_TYPE_DA;
631 		break;
632 	default:
633 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
634 			FL("Incorrect AST entry type"));
635 	}
636 
637 	ast_entry->is_active = TRUE;
638 	TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem);
639 	DP_STATS_INC(soc, ast.added, 1);
640 	dp_peer_ast_hash_add(soc, ast_entry);
641 	qdf_spin_unlock_bh(&soc->ast_lock);
642 
643 	if (ast_entry->type == CDP_TXRX_AST_TYPE_MEC ||
644 	    ast_entry->type == CDP_TXRX_AST_TYPE_DA)
645 		qdf_mem_copy(next_node_mac, peer->vdev->mac_addr.raw, 6);
646 	else
647 		qdf_mem_copy(next_node_mac, peer->mac_addr.raw, 6);
648 
649 	if ((ast_entry->type != CDP_TXRX_AST_TYPE_STATIC) &&
650 	    (ast_entry->type != CDP_TXRX_AST_TYPE_SELF) &&
651 	    (ast_entry->type != CDP_TXRX_AST_TYPE_STA_BSS) &&
652 	    (ast_entry->type != CDP_TXRX_AST_TYPE_WDS_HM_SEC)) {
653 		if (QDF_STATUS_SUCCESS ==
654 				soc->cdp_soc.ol_ops->peer_add_wds_entry(
655 				peer->vdev->osif_vdev,
656 				mac_addr,
657 				next_node_mac,
658 				flags))
659 			return 0;
660 	}
661 
662 	return ret;
663 }
664 
665 #if defined(FEATURE_AST) && defined(AST_HKV1_WORKAROUND)
666 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
667 {
668 	struct dp_peer *peer = ast_entry->peer;
669 
670 	if (ast_entry->next_hop) {
671 		dp_peer_ast_send_wds_del(soc, ast_entry);
672 	} else {
673 		soc->ast_table[ast_entry->ast_idx] = NULL;
674 		TAILQ_REMOVE(&peer->ast_entry_list, ast_entry, ase_list_elem);
675 
676 		if (ast_entry == peer->self_ast_entry)
677 			peer->self_ast_entry = NULL;
678 
679 		DP_STATS_INC(soc, ast.deleted, 1);
680 		dp_peer_ast_hash_remove(soc, ast_entry);
681 		qdf_mem_free(ast_entry);
682 	}
683 }
684 #else
685 /*
686  * dp_peer_del_ast() - Delete and free AST entry
687  * @soc: SoC handle
688  * @ast_entry: AST entry of the node
689  *
690  * This function removes the AST entry from peer and soc tables
691  * It assumes caller has taken the ast lock to protect the access to these
692  * tables
693  *
694  * Return: None
695  */
696 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
697 {
698 	struct dp_peer *peer = ast_entry->peer;
699 
700 	if (ast_entry->next_hop &&
701 	    ast_entry->type != CDP_TXRX_AST_TYPE_WDS_HM_SEC)
702 		soc->cdp_soc.ol_ops->peer_del_wds_entry(peer->vdev->osif_vdev,
703 						ast_entry->mac_addr.raw);
704 
705 	soc->ast_table[ast_entry->ast_idx] = NULL;
706 	TAILQ_REMOVE(&peer->ast_entry_list, ast_entry, ase_list_elem);
707 
708 	if (ast_entry == peer->self_ast_entry)
709 		peer->self_ast_entry = NULL;
710 
711 	DP_STATS_INC(soc, ast.deleted, 1);
712 	dp_peer_ast_hash_remove(soc, ast_entry);
713 	qdf_mem_free(ast_entry);
714 }
715 #endif
716 
717 /*
718  * dp_peer_update_ast() - Delete and free AST entry
719  * @soc: SoC handle
720  * @peer: peer to which ast node belongs
721  * @ast_entry: AST entry of the node
722  * @flags: wds or hmwds
723  *
724  * This function update the AST entry to the roamed peer and soc tables
725  * It assumes caller has taken the ast lock to protect the access to these
726  * tables
727  *
728  * Return: 0 if ast entry is updated successfully
729  *         -1 failure
730  */
731 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
732 		       struct dp_ast_entry *ast_entry, uint32_t flags)
733 {
734 	int ret = -1;
735 	struct dp_peer *old_peer;
736 
737 	if ((ast_entry->type == CDP_TXRX_AST_TYPE_STATIC) ||
738 	    (ast_entry->type == CDP_TXRX_AST_TYPE_SELF) ||
739 	    (ast_entry->type == CDP_TXRX_AST_TYPE_STA_BSS) ||
740 	    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
741 		return 0;
742 
743 	old_peer = ast_entry->peer;
744 	TAILQ_REMOVE(&old_peer->ast_entry_list, ast_entry, ase_list_elem);
745 
746 	ast_entry->peer = peer;
747 	ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
748 	ast_entry->pdev_id = peer->vdev->pdev->pdev_id;
749 	ast_entry->vdev_id = peer->vdev->vdev_id;
750 	ast_entry->is_active = TRUE;
751 	TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem);
752 
753 	ret = soc->cdp_soc.ol_ops->peer_update_wds_entry(
754 				peer->vdev->osif_vdev,
755 				ast_entry->mac_addr.raw,
756 				peer->mac_addr.raw,
757 				flags);
758 
759 	return ret;
760 }
761 
762 /*
763  * dp_peer_ast_get_pdev_id() - get pdev_id from the ast entry
764  * @soc: SoC handle
765  * @ast_entry: AST entry of the node
766  *
767  * This function gets the pdev_id from the ast entry.
768  *
769  * Return: (uint8_t) pdev_id
770  */
771 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
772 				struct dp_ast_entry *ast_entry)
773 {
774 	return ast_entry->pdev_id;
775 }
776 
777 /*
778  * dp_peer_ast_get_next_hop() - get next_hop from the ast entry
779  * @soc: SoC handle
780  * @ast_entry: AST entry of the node
781  *
782  * This function gets the next hop from the ast entry.
783  *
784  * Return: (uint8_t) next_hop
785  */
786 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
787 				struct dp_ast_entry *ast_entry)
788 {
789 	return ast_entry->next_hop;
790 }
791 
792 /*
793  * dp_peer_ast_set_type() - set type from the ast entry
794  * @soc: SoC handle
795  * @ast_entry: AST entry of the node
796  *
797  * This function sets the type in the ast entry.
798  *
799  * Return:
800  */
801 void dp_peer_ast_set_type(struct dp_soc *soc,
802 				struct dp_ast_entry *ast_entry,
803 				enum cdp_txrx_ast_entry_type type)
804 {
805 	ast_entry->type = type;
806 }
807 
808 #else
809 int dp_peer_add_ast(struct dp_soc *soc, struct dp_peer *peer,
810 		uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type,
811 		uint32_t flags)
812 {
813 	return 1;
814 }
815 
816 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
817 {
818 }
819 
820 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
821 			struct dp_ast_entry *ast_entry, uint32_t flags)
822 {
823 	return 1;
824 }
825 
826 struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
827 					       uint8_t *ast_mac_addr)
828 {
829 	return NULL;
830 }
831 
832 struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
833 						     uint8_t *ast_mac_addr,
834 						     uint8_t pdev_id)
835 {
836 	return NULL;
837 }
838 
839 static int dp_peer_ast_hash_attach(struct dp_soc *soc)
840 {
841 	return 0;
842 }
843 
844 static inline void dp_peer_map_ast(struct dp_soc *soc,
845 	struct dp_peer *peer, uint8_t *mac_addr, uint16_t hw_peer_id,
846 	uint8_t vdev_id, uint16_t ast_hash)
847 {
848 	return;
849 }
850 
851 static void dp_peer_ast_hash_detach(struct dp_soc *soc)
852 {
853 }
854 
855 void dp_peer_ast_set_type(struct dp_soc *soc,
856 				struct dp_ast_entry *ast_entry,
857 				enum cdp_txrx_ast_entry_type type)
858 {
859 }
860 
861 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
862 				struct dp_ast_entry *ast_entry)
863 {
864 	return 0xff;
865 }
866 
867 
868 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
869 				struct dp_ast_entry *ast_entry)
870 {
871 	return 0xff;
872 }
873 #endif
874 
875 #if defined(FEATURE_AST) && defined(AST_HKV1_WORKAROUND)
876 void dp_peer_ast_set_cp_ctx(struct dp_soc *soc,
877 			    struct dp_ast_entry *ast_entry,
878 			    void *cp_ctx)
879 {
880 	ast_entry->cp_ctx = cp_ctx;
881 }
882 
883 void *dp_peer_ast_get_cp_ctx(struct dp_soc *soc,
884 			     struct dp_ast_entry *ast_entry)
885 {
886 	void *cp_ctx = NULL;
887 
888 	cp_ctx = ast_entry->cp_ctx;
889 	ast_entry->cp_ctx = NULL;
890 
891 	return cp_ctx;
892 }
893 
894 void dp_peer_ast_send_wds_del(struct dp_soc *soc,
895 			      struct dp_ast_entry *ast_entry)
896 {
897 	struct dp_peer *peer = ast_entry->peer;
898 	struct cdp_soc_t *cdp_soc = &soc->cdp_soc;
899 
900 	if (!ast_entry->wmi_sent) {
901 		cdp_soc->ol_ops->peer_del_wds_entry(peer->vdev->osif_vdev,
902 						    ast_entry->mac_addr.raw);
903 		ast_entry->wmi_sent = true;
904 	}
905 }
906 
907 bool dp_peer_ast_get_wmi_sent(struct dp_soc *soc,
908 			      struct dp_ast_entry *ast_entry)
909 {
910 	return ast_entry->wmi_sent;
911 }
912 
913 void dp_peer_ast_free_entry(struct dp_soc *soc,
914 			    struct dp_ast_entry *ast_entry)
915 {
916 	struct dp_peer *peer = ast_entry->peer;
917 
918 	soc->ast_table[ast_entry->ast_idx] = NULL;
919 	TAILQ_REMOVE(&peer->ast_entry_list, ast_entry, ase_list_elem);
920 	DP_STATS_INC(soc, ast.deleted, 1);
921 	dp_peer_ast_hash_remove(soc, ast_entry);
922 	qdf_mem_free(ast_entry);
923 }
924 #endif
925 
926 struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc,
927 	uint8_t *peer_mac_addr, int mac_addr_is_aligned, uint8_t vdev_id)
928 {
929 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
930 	unsigned index;
931 	struct dp_peer *peer;
932 
933 	if (mac_addr_is_aligned) {
934 		mac_addr = (union dp_align_mac_addr *) peer_mac_addr;
935 	} else {
936 		qdf_mem_copy(
937 			&local_mac_addr_aligned.raw[0],
938 			peer_mac_addr, DP_MAC_ADDR_LEN);
939 		mac_addr = &local_mac_addr_aligned;
940 	}
941 	index = dp_peer_find_hash_index(soc, mac_addr);
942 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
943 	TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
944 #if ATH_SUPPORT_WRAP
945 		/* ProxySTA may have multiple BSS peer with same MAC address,
946 		 * modified find will take care of finding the correct BSS peer.
947 		 */
948 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
949 			((peer->vdev->vdev_id == vdev_id) ||
950 			 (vdev_id == DP_VDEV_ALL))) {
951 #else
952 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0) {
953 #endif
954 			/* found it - increment the ref count before releasing
955 			 * the lock
956 			 */
957 			qdf_atomic_inc(&peer->ref_cnt);
958 			qdf_spin_unlock_bh(&soc->peer_ref_mutex);
959 			return peer;
960 		}
961 	}
962 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
963 	return NULL; /* failure */
964 }
965 
966 void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer)
967 {
968 	unsigned index;
969 	struct dp_peer *tmppeer = NULL;
970 	int found = 0;
971 
972 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
973 	/* Check if tail is not empty before delete*/
974 	QDF_ASSERT(!TAILQ_EMPTY(&soc->peer_hash.bins[index]));
975 	/*
976 	 * DO NOT take the peer_ref_mutex lock here - it needs to be taken
977 	 * by the caller.
978 	 * The caller needs to hold the lock from the time the peer object's
979 	 * reference count is decremented and tested up through the time the
980 	 * reference to the peer object is removed from the hash table, by
981 	 * this function.
982 	 * Holding the lock only while removing the peer object reference
983 	 * from the hash table keeps the hash table consistent, but does not
984 	 * protect against a new HL tx context starting to use the peer object
985 	 * if it looks up the peer object from its MAC address just after the
986 	 * peer ref count is decremented to zero, but just before the peer
987 	 * object reference is removed from the hash table.
988 	 */
989 	 TAILQ_FOREACH(tmppeer, &soc->peer_hash.bins[index], hash_list_elem) {
990 		if (tmppeer == peer) {
991 			found = 1;
992 			break;
993 		}
994 	}
995 	QDF_ASSERT(found);
996 	TAILQ_REMOVE(&soc->peer_hash.bins[index], peer, hash_list_elem);
997 }
998 
999 void dp_peer_find_hash_erase(struct dp_soc *soc)
1000 {
1001 	int i;
1002 
1003 	/*
1004 	 * Not really necessary to take peer_ref_mutex lock - by this point,
1005 	 * it's known that the soc is no longer in use.
1006 	 */
1007 	for (i = 0; i <= soc->peer_hash.mask; i++) {
1008 		if (!TAILQ_EMPTY(&soc->peer_hash.bins[i])) {
1009 			struct dp_peer *peer, *peer_next;
1010 
1011 			/*
1012 			 * TAILQ_FOREACH_SAFE must be used here to avoid any
1013 			 * memory access violation after peer is freed
1014 			 */
1015 			TAILQ_FOREACH_SAFE(peer, &soc->peer_hash.bins[i],
1016 				hash_list_elem, peer_next) {
1017 				/*
1018 				 * Don't remove the peer from the hash table -
1019 				 * that would modify the list we are currently
1020 				 * traversing, and it's not necessary anyway.
1021 				 */
1022 				/*
1023 				 * Artificially adjust the peer's ref count to
1024 				 * 1, so it will get deleted by
1025 				 * dp_peer_unref_delete.
1026 				 */
1027 				/* set to zero */
1028 				qdf_atomic_init(&peer->ref_cnt);
1029 				/* incr to one */
1030 				qdf_atomic_inc(&peer->ref_cnt);
1031 				dp_peer_unref_delete(peer);
1032 			}
1033 		}
1034 	}
1035 }
1036 
1037 static void dp_peer_find_map_detach(struct dp_soc *soc)
1038 {
1039 	qdf_mem_free(soc->peer_id_to_obj_map);
1040 }
1041 
1042 int dp_peer_find_attach(struct dp_soc *soc)
1043 {
1044 	if (dp_peer_find_map_attach(soc))
1045 		return 1;
1046 
1047 	if (dp_peer_find_hash_attach(soc)) {
1048 		dp_peer_find_map_detach(soc);
1049 		return 1;
1050 	}
1051 
1052 	if (dp_peer_ast_hash_attach(soc)) {
1053 		dp_peer_find_hash_detach(soc);
1054 		dp_peer_find_map_detach(soc);
1055 		return 1;
1056 	}
1057 	return 0; /* success */
1058 }
1059 
1060 void dp_rx_tid_stats_cb(struct dp_soc *soc, void *cb_ctxt,
1061 	union hal_reo_status *reo_status)
1062 {
1063 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
1064 	struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
1065 
1066 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
1067 		DP_TRACE_STATS(FATAL, "REO stats failure %d for TID %d\n",
1068 			queue_status->header.status, rx_tid->tid);
1069 		return;
1070 	}
1071 
1072 	DP_TRACE_STATS(FATAL, "REO queue stats (TID: %d): \n"
1073 		"ssn: %d\n"
1074 		"curr_idx  : %d\n"
1075 		"pn_31_0   : %08x\n"
1076 		"pn_63_32  : %08x\n"
1077 		"pn_95_64  : %08x\n"
1078 		"pn_127_96 : %08x\n"
1079 		"last_rx_enq_tstamp : %08x\n"
1080 		"last_rx_deq_tstamp : %08x\n"
1081 		"rx_bitmap_31_0     : %08x\n"
1082 		"rx_bitmap_63_32    : %08x\n"
1083 		"rx_bitmap_95_64    : %08x\n"
1084 		"rx_bitmap_127_96   : %08x\n"
1085 		"rx_bitmap_159_128  : %08x\n"
1086 		"rx_bitmap_191_160  : %08x\n"
1087 		"rx_bitmap_223_192  : %08x\n"
1088 		"rx_bitmap_255_224  : %08x\n",
1089 		rx_tid->tid,
1090 		queue_status->ssn, queue_status->curr_idx,
1091 		queue_status->pn_31_0, queue_status->pn_63_32,
1092 		queue_status->pn_95_64, queue_status->pn_127_96,
1093 		queue_status->last_rx_enq_tstamp,
1094 		queue_status->last_rx_deq_tstamp,
1095 		queue_status->rx_bitmap_31_0, queue_status->rx_bitmap_63_32,
1096 		queue_status->rx_bitmap_95_64, queue_status->rx_bitmap_127_96,
1097 		queue_status->rx_bitmap_159_128,
1098 		queue_status->rx_bitmap_191_160,
1099 		queue_status->rx_bitmap_223_192,
1100 		queue_status->rx_bitmap_255_224);
1101 
1102 	DP_TRACE_STATS(FATAL,
1103 		"curr_mpdu_cnt      : %d\n"
1104 		"curr_msdu_cnt      : %d\n"
1105 		"fwd_timeout_cnt    : %d\n"
1106 		"fwd_bar_cnt        : %d\n"
1107 		"dup_cnt            : %d\n"
1108 		"frms_in_order_cnt  : %d\n"
1109 		"bar_rcvd_cnt       : %d\n"
1110 		"mpdu_frms_cnt      : %d\n"
1111 		"msdu_frms_cnt      : %d\n"
1112 		"total_byte_cnt     : %d\n"
1113 		"late_recv_mpdu_cnt : %d\n"
1114 		"win_jump_2k 	    : %d\n"
1115 		"hole_cnt 	    : %d\n",
1116 		queue_status->curr_mpdu_cnt, queue_status->curr_msdu_cnt,
1117 		queue_status->fwd_timeout_cnt, queue_status->fwd_bar_cnt,
1118 		queue_status->dup_cnt, queue_status->frms_in_order_cnt,
1119 		queue_status->bar_rcvd_cnt, queue_status->mpdu_frms_cnt,
1120 		queue_status->msdu_frms_cnt, queue_status->total_cnt,
1121 		queue_status->late_recv_mpdu_cnt, queue_status->win_jump_2k,
1122 		queue_status->hole_cnt);
1123 
1124 	DP_PRINT_STATS("Addba Req          : %d\n"
1125 			"Addba Resp         : %d\n"
1126 			"Addba Resp success : %d\n"
1127 			"Addba Resp failed  : %d\n"
1128 			"Delba Req received : %d\n"
1129 			"Delba Tx success   : %d\n"
1130 			"Delba Tx Fail      : %d\n"
1131 			"BA window size     : %d\n"
1132 			"Pn size            : %d\n",
1133 			rx_tid->num_of_addba_req,
1134 			rx_tid->num_of_addba_resp,
1135 			rx_tid->num_addba_rsp_success,
1136 			rx_tid->num_addba_rsp_failed,
1137 			rx_tid->num_of_delba_req,
1138 			rx_tid->delba_tx_success_cnt,
1139 			rx_tid->delba_tx_fail_cnt,
1140 			rx_tid->ba_win_size,
1141 			rx_tid->pn_size);
1142 }
1143 
1144 static inline struct dp_peer *dp_peer_find_add_id(struct dp_soc *soc,
1145 	uint8_t *peer_mac_addr, uint16_t peer_id, uint16_t hw_peer_id,
1146 	uint8_t vdev_id)
1147 {
1148 	struct dp_peer *peer;
1149 
1150 	QDF_ASSERT(peer_id <= soc->max_peers);
1151 	/* check if there's already a peer object with this MAC address */
1152 	peer = dp_peer_find_hash_find(soc, peer_mac_addr,
1153 		0 /* is aligned */, vdev_id);
1154 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1155 		"%s: peer %pK ID %d vid %d mac %02x:%02x:%02x:%02x:%02x:%02x",
1156 		__func__, peer, peer_id, vdev_id, peer_mac_addr[0],
1157 		peer_mac_addr[1], peer_mac_addr[2], peer_mac_addr[3],
1158 		peer_mac_addr[4], peer_mac_addr[5]);
1159 
1160 	if (peer) {
1161 		/* peer's ref count was already incremented by
1162 		 * peer_find_hash_find
1163 		 */
1164 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
1165 			  "%s: ref_cnt: %d", __func__,
1166 			   qdf_atomic_read(&peer->ref_cnt));
1167 		if (!soc->peer_id_to_obj_map[peer_id])
1168 			soc->peer_id_to_obj_map[peer_id] = peer;
1169 		else {
1170 			/* Peer map event came for peer_id which
1171 			 * is already mapped, this is not expected
1172 			 */
1173 			QDF_ASSERT(0);
1174 		}
1175 
1176 		if (dp_peer_find_add_id_to_obj(peer, peer_id)) {
1177 			/* TBDXXX: assert for now */
1178 			QDF_ASSERT(0);
1179 		}
1180 
1181 		return peer;
1182 	}
1183 
1184 	return NULL;
1185 }
1186 
1187 /**
1188  * dp_rx_peer_map_handler() - handle peer map event from firmware
1189  * @soc_handle - genereic soc handle
1190  * @peeri_id - peer_id from firmware
1191  * @hw_peer_id - ast index for this peer
1192  * @vdev_id - vdev ID
1193  * @peer_mac_addr - mac address of the peer
1194  * @ast_hash - ast hash value
1195  * @is_wds - flag to indicate peer map event for WDS ast entry
1196  *
1197  * associate the peer_id that firmware provided with peer entry
1198  * and update the ast table in the host with the hw_peer_id.
1199  *
1200  * Return: none
1201  */
1202 
1203 void
1204 dp_rx_peer_map_handler(void *soc_handle, uint16_t peer_id,
1205 		       uint16_t hw_peer_id, uint8_t vdev_id,
1206 		       uint8_t *peer_mac_addr, uint16_t ast_hash,
1207 		       uint8_t is_wds)
1208 {
1209 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
1210 	struct dp_peer *peer = NULL;
1211 
1212 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
1213 		"peer_map_event (soc:%pK): peer_id %di, hw_peer_id %d, peer_mac "
1214 		"%02x:%02x:%02x:%02x:%02x:%02x, vdev_id %d", soc, peer_id,
1215 		hw_peer_id, peer_mac_addr[0], peer_mac_addr[1],
1216 		peer_mac_addr[2], peer_mac_addr[3], peer_mac_addr[4],
1217 		peer_mac_addr[5], vdev_id);
1218 
1219 	if ((hw_peer_id < 0) || (hw_peer_id > (WLAN_UMAC_PSOC_MAX_PEERS * 2))) {
1220 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1221 			"invalid hw_peer_id: %d", hw_peer_id);
1222 		qdf_assert_always(0);
1223 	}
1224 
1225 	/* Peer map event for WDS ast entry get the peer from
1226 	 * obj map
1227 	 */
1228 	if (is_wds) {
1229 		peer = soc->peer_id_to_obj_map[peer_id];
1230 	} else {
1231 		peer = dp_peer_find_add_id(soc, peer_mac_addr, peer_id,
1232 					   hw_peer_id, vdev_id);
1233 
1234 		if (peer) {
1235 			/*
1236 			 * For every peer MAp message search and set if bss_peer
1237 			 */
1238 			if (!(qdf_mem_cmp(peer->mac_addr.raw,
1239 					  peer->vdev->mac_addr.raw,
1240 					  DP_MAC_ADDR_LEN))) {
1241 				QDF_TRACE(QDF_MODULE_ID_DP,
1242 					  QDF_TRACE_LEVEL_INFO_HIGH,
1243 					  "vdev bss_peer!!!!");
1244 				peer->bss_peer = 1;
1245 				peer->vdev->vap_bss_peer = peer;
1246 			}
1247 
1248 			if (peer->vdev->opmode == wlan_op_mode_sta)
1249 				peer->vdev->bss_ast_hash = ast_hash;
1250 		}
1251 	}
1252 
1253 	dp_peer_map_ast(soc, peer, peer_mac_addr,
1254 			hw_peer_id, vdev_id, ast_hash);
1255 }
1256 
1257 /**
1258  * dp_rx_peer_unmap_handler() - handle peer unmap event from firmware
1259  * @soc_handle - genereic soc handle
1260  * @peeri_id - peer_id from firmware
1261  * @vdev_id - vdev ID
1262  * @peer_mac_addr - mac address of the peer
1263  * @is_wds - flag to indicate peer map event for WDS ast entry
1264  *
1265  * Return: none
1266  */
1267 void
1268 dp_rx_peer_unmap_handler(void *soc_handle, uint16_t peer_id,
1269 			 uint8_t vdev_id, uint8_t *peer_mac_addr,
1270 			 uint8_t is_wds)
1271 {
1272 	struct dp_peer *peer;
1273 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
1274 	uint8_t i;
1275 
1276 	if (is_wds)
1277 		return;
1278 
1279 	peer = __dp_peer_find_by_id(soc, peer_id);
1280 
1281 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
1282 		"peer_unmap_event (soc:%pK) peer_id %d peer %pK",
1283 		soc, peer_id, peer);
1284 
1285 	/*
1286 	 * Currently peer IDs are assigned for vdevs as well as peers.
1287 	 * If the peer ID is for a vdev, then the peer pointer stored
1288 	 * in peer_id_to_obj_map will be NULL.
1289 	 */
1290 	if (!peer) {
1291 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1292 			"%s: Received unmap event for invalid peer_id"
1293 			" %u", __func__, peer_id);
1294 		return;
1295 	}
1296 
1297 	soc->peer_id_to_obj_map[peer_id] = NULL;
1298 	for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
1299 		if (peer->peer_ids[i] == peer_id) {
1300 			peer->peer_ids[i] = HTT_INVALID_PEER;
1301 			break;
1302 		}
1303 	}
1304 
1305 	if (soc->cdp_soc.ol_ops->peer_unmap_event) {
1306 		soc->cdp_soc.ol_ops->peer_unmap_event(soc->ctrl_psoc,
1307 				peer_id);
1308 	}
1309 
1310 	/*
1311 	 * Remove a reference to the peer.
1312 	 * If there are no more references, delete the peer object.
1313 	 */
1314 	dp_peer_unref_delete(peer);
1315 }
1316 
1317 void
1318 dp_peer_find_detach(struct dp_soc *soc)
1319 {
1320 	dp_peer_find_map_detach(soc);
1321 	dp_peer_find_hash_detach(soc);
1322 	dp_peer_ast_hash_detach(soc);
1323 }
1324 
1325 static void dp_rx_tid_update_cb(struct dp_soc *soc, void *cb_ctxt,
1326 	union hal_reo_status *reo_status)
1327 {
1328 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
1329 
1330 	if ((reo_status->rx_queue_status.header.status !=
1331 		HAL_REO_CMD_SUCCESS) &&
1332 		(reo_status->rx_queue_status.header.status !=
1333 		HAL_REO_CMD_DRAIN)) {
1334 		/* Should not happen normally. Just print error for now */
1335 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1336 			"%s: Rx tid HW desc update failed(%d): tid %d",
1337 			__func__,
1338 			reo_status->rx_queue_status.header.status,
1339 			rx_tid->tid);
1340 	}
1341 }
1342 
1343 /*
1344  * dp_find_peer_by_addr - find peer instance by mac address
1345  * @dev: physical device instance
1346  * @peer_mac_addr: peer mac address
1347  * @local_id: local id for the peer
1348  *
1349  * Return: peer instance pointer
1350  */
1351 void *dp_find_peer_by_addr(struct cdp_pdev *dev, uint8_t *peer_mac_addr,
1352 		uint8_t *local_id)
1353 {
1354 	struct dp_pdev *pdev = (struct dp_pdev *)dev;
1355 	struct dp_peer *peer;
1356 
1357 	peer = dp_peer_find_hash_find(pdev->soc, peer_mac_addr, 0, DP_VDEV_ALL);
1358 
1359 	if (!peer)
1360 		return NULL;
1361 
1362 	/* Multiple peer ids? How can know peer id? */
1363 	*local_id = peer->local_id;
1364 	DP_TRACE(INFO, "%s: peer %pK id %d", __func__, peer, *local_id);
1365 
1366 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
1367 	 * Decrement it here.
1368 	 */
1369 	dp_peer_unref_delete(peer);
1370 
1371 	return peer;
1372 }
1373 
1374 /*
1375  * dp_rx_tid_update_wifi3() – Update receive TID state
1376  * @peer: Datapath peer handle
1377  * @tid: TID
1378  * @ba_window_size: BlockAck window size
1379  * @start_seq: Starting sequence number
1380  *
1381  * Return: 0 on success, error code on failure
1382  */
1383 static int dp_rx_tid_update_wifi3(struct dp_peer *peer, int tid, uint32_t
1384 				  ba_window_size, uint32_t start_seq)
1385 {
1386 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1387 	struct dp_soc *soc = peer->vdev->pdev->soc;
1388 	struct hal_reo_cmd_params params;
1389 
1390 	qdf_mem_zero(&params, sizeof(params));
1391 
1392 	params.std.need_status = 1;
1393 	params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
1394 	params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1395 	params.u.upd_queue_params.update_ba_window_size = 1;
1396 	params.u.upd_queue_params.ba_window_size = ba_window_size;
1397 
1398 	if (start_seq < IEEE80211_SEQ_MAX) {
1399 		params.u.upd_queue_params.update_ssn = 1;
1400 		params.u.upd_queue_params.ssn = start_seq;
1401 	}
1402 
1403 	dp_set_ssn_valid_flag(&params, 0);
1404 
1405 	dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params, dp_rx_tid_update_cb, rx_tid);
1406 
1407 	rx_tid->ba_win_size = ba_window_size;
1408 	if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
1409 		soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup(
1410 			peer->vdev->pdev->ctrl_pdev,
1411 			peer->vdev->vdev_id, peer->mac_addr.raw,
1412 			rx_tid->hw_qdesc_paddr, tid, tid, 1, ba_window_size);
1413 
1414 	}
1415 	return 0;
1416 }
1417 
1418 /*
1419  * dp_reo_desc_free() - Callback free reo descriptor memory after
1420  * HW cache flush
1421  *
1422  * @soc: DP SOC handle
1423  * @cb_ctxt: Callback context
1424  * @reo_status: REO command status
1425  */
1426 static void dp_reo_desc_free(struct dp_soc *soc, void *cb_ctxt,
1427 	union hal_reo_status *reo_status)
1428 {
1429 	struct reo_desc_list_node *freedesc =
1430 		(struct reo_desc_list_node *)cb_ctxt;
1431 	struct dp_rx_tid *rx_tid = &freedesc->rx_tid;
1432 
1433 	if ((reo_status->fl_cache_status.header.status !=
1434 		HAL_REO_CMD_SUCCESS) &&
1435 		(reo_status->fl_cache_status.header.status !=
1436 		HAL_REO_CMD_DRAIN)) {
1437 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1438 			"%s: Rx tid HW desc flush failed(%d): tid %d",
1439 			__func__,
1440 			reo_status->rx_queue_status.header.status,
1441 			freedesc->rx_tid.tid);
1442 	}
1443 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1444 		"%s: hw_qdesc_paddr: %pK, tid:%d", __func__,
1445 		(void *)(rx_tid->hw_qdesc_paddr), rx_tid->tid);
1446 	qdf_mem_unmap_nbytes_single(soc->osdev,
1447 		rx_tid->hw_qdesc_paddr,
1448 		QDF_DMA_BIDIRECTIONAL,
1449 		rx_tid->hw_qdesc_alloc_size);
1450 	qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
1451 	qdf_mem_free(freedesc);
1452 }
1453 
1454 #if defined(QCA_WIFI_QCA8074) && defined(BUILD_X86)
1455 /* Hawkeye emulation requires bus address to be >= 0x50000000 */
1456 static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
1457 {
1458 	if (dma_addr < 0x50000000)
1459 		return QDF_STATUS_E_FAILURE;
1460 	else
1461 		return QDF_STATUS_SUCCESS;
1462 }
1463 #else
1464 static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
1465 {
1466 	return QDF_STATUS_SUCCESS;
1467 }
1468 #endif
1469 
1470 
1471 /*
1472  * dp_rx_tid_setup_wifi3() – Setup receive TID state
1473  * @peer: Datapath peer handle
1474  * @tid: TID
1475  * @ba_window_size: BlockAck window size
1476  * @start_seq: Starting sequence number
1477  *
1478  * Return: 0 on success, error code on failure
1479  */
1480 int dp_rx_tid_setup_wifi3(struct dp_peer *peer, int tid,
1481 	uint32_t ba_window_size, uint32_t start_seq)
1482 {
1483 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1484 	struct dp_vdev *vdev = peer->vdev;
1485 	struct dp_soc *soc = vdev->pdev->soc;
1486 	uint32_t hw_qdesc_size;
1487 	uint32_t hw_qdesc_align;
1488 	int hal_pn_type;
1489 	void *hw_qdesc_vaddr;
1490 	uint32_t alloc_tries = 0;
1491 
1492 	if (peer->delete_in_progress ||
1493 	    !qdf_atomic_read(&peer->is_default_route_set))
1494 		return QDF_STATUS_E_FAILURE;
1495 
1496 	rx_tid->ba_win_size = ba_window_size;
1497 	if (rx_tid->hw_qdesc_vaddr_unaligned != NULL)
1498 		return dp_rx_tid_update_wifi3(peer, tid, ba_window_size,
1499 			start_seq);
1500 	rx_tid->delba_tx_status = 0;
1501 	rx_tid->ppdu_id_2k = 0;
1502 	rx_tid->num_of_addba_req = 0;
1503 	rx_tid->num_of_delba_req = 0;
1504 	rx_tid->num_of_addba_resp = 0;
1505 	rx_tid->num_addba_rsp_failed = 0;
1506 	rx_tid->num_addba_rsp_success = 0;
1507 	rx_tid->delba_tx_success_cnt = 0;
1508 	rx_tid->delba_tx_fail_cnt = 0;
1509 	rx_tid->statuscode = 0;
1510 #ifdef notyet
1511 	hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc, ba_window_size);
1512 #else
1513 	/* TODO: Allocating HW queue descriptors based on max BA window size
1514 	 * for all QOS TIDs so that same descriptor can be used later when
1515 	 * ADDBA request is recevied. This should be changed to allocate HW
1516 	 * queue descriptors based on BA window size being negotiated (0 for
1517 	 * non BA cases), and reallocate when BA window size changes and also
1518 	 * send WMI message to FW to change the REO queue descriptor in Rx
1519 	 * peer entry as part of dp_rx_tid_update.
1520 	 */
1521 	if (tid != DP_NON_QOS_TID)
1522 		hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc,
1523 			HAL_RX_MAX_BA_WINDOW);
1524 	else
1525 		hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc,
1526 			ba_window_size);
1527 #endif
1528 
1529 	hw_qdesc_align = hal_get_reo_qdesc_align(soc->hal_soc);
1530 	/* To avoid unnecessary extra allocation for alignment, try allocating
1531 	 * exact size and see if we already have aligned address.
1532 	 */
1533 	rx_tid->hw_qdesc_alloc_size = hw_qdesc_size;
1534 
1535 try_desc_alloc:
1536 	rx_tid->hw_qdesc_vaddr_unaligned =
1537 		qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size);
1538 
1539 	if (!rx_tid->hw_qdesc_vaddr_unaligned) {
1540 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1541 			"%s: Rx tid HW desc alloc failed: tid %d",
1542 			__func__, tid);
1543 		return QDF_STATUS_E_NOMEM;
1544 	}
1545 
1546 	if ((unsigned long)(rx_tid->hw_qdesc_vaddr_unaligned) %
1547 		hw_qdesc_align) {
1548 		/* Address allocated above is not alinged. Allocate extra
1549 		 * memory for alignment
1550 		 */
1551 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
1552 		rx_tid->hw_qdesc_vaddr_unaligned =
1553 			qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size +
1554 					hw_qdesc_align - 1);
1555 
1556 		if (!rx_tid->hw_qdesc_vaddr_unaligned) {
1557 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1558 				"%s: Rx tid HW desc alloc failed: tid %d",
1559 				__func__, tid);
1560 			return QDF_STATUS_E_NOMEM;
1561 		}
1562 
1563 		hw_qdesc_vaddr = (void *)qdf_align((unsigned long)
1564 			rx_tid->hw_qdesc_vaddr_unaligned,
1565 			hw_qdesc_align);
1566 
1567 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1568 			"%s: Total Size %d Aligned Addr %pK",
1569 			__func__, rx_tid->hw_qdesc_alloc_size,
1570 			hw_qdesc_vaddr);
1571 
1572 	} else {
1573 		hw_qdesc_vaddr = rx_tid->hw_qdesc_vaddr_unaligned;
1574 	}
1575 
1576 	/* TODO: Ensure that sec_type is set before ADDBA is received.
1577 	 * Currently this is set based on htt indication
1578 	 * HTT_T2H_MSG_TYPE_SEC_IND from target
1579 	 */
1580 	switch (peer->security[dp_sec_ucast].sec_type) {
1581 	case cdp_sec_type_tkip_nomic:
1582 	case cdp_sec_type_aes_ccmp:
1583 	case cdp_sec_type_aes_ccmp_256:
1584 	case cdp_sec_type_aes_gcmp:
1585 	case cdp_sec_type_aes_gcmp_256:
1586 		hal_pn_type = HAL_PN_WPA;
1587 		break;
1588 	case cdp_sec_type_wapi:
1589 		if (vdev->opmode == wlan_op_mode_ap)
1590 			hal_pn_type = HAL_PN_WAPI_EVEN;
1591 		else
1592 			hal_pn_type = HAL_PN_WAPI_UNEVEN;
1593 		break;
1594 	default:
1595 		hal_pn_type = HAL_PN_NONE;
1596 		break;
1597 	}
1598 
1599 	hal_reo_qdesc_setup(soc->hal_soc, tid, ba_window_size, start_seq,
1600 		hw_qdesc_vaddr, rx_tid->hw_qdesc_paddr, hal_pn_type);
1601 
1602 	qdf_mem_map_nbytes_single(soc->osdev, hw_qdesc_vaddr,
1603 		QDF_DMA_BIDIRECTIONAL, rx_tid->hw_qdesc_alloc_size,
1604 		&(rx_tid->hw_qdesc_paddr));
1605 
1606 	if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) !=
1607 			QDF_STATUS_SUCCESS) {
1608 		if (alloc_tries++ < 10)
1609 			goto try_desc_alloc;
1610 		else {
1611 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1612 			"%s: Rx tid HW desc alloc failed (lowmem): tid %d",
1613 			__func__, tid);
1614 			return QDF_STATUS_E_NOMEM;
1615 		}
1616 	}
1617 
1618 	if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
1619 		soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup(
1620 			vdev->pdev->ctrl_pdev,
1621 			peer->vdev->vdev_id, peer->mac_addr.raw,
1622 			rx_tid->hw_qdesc_paddr, tid, tid, 1, ba_window_size);
1623 
1624 	}
1625 	return 0;
1626 }
1627 
1628 /*
1629  * dp_rx_tid_delete_cb() - Callback to flush reo descriptor HW cache
1630  * after deleting the entries (ie., setting valid=0)
1631  *
1632  * @soc: DP SOC handle
1633  * @cb_ctxt: Callback context
1634  * @reo_status: REO command status
1635  */
1636 static void dp_rx_tid_delete_cb(struct dp_soc *soc, void *cb_ctxt,
1637 	union hal_reo_status *reo_status)
1638 {
1639 	struct reo_desc_list_node *freedesc =
1640 		(struct reo_desc_list_node *)cb_ctxt;
1641 	uint32_t list_size;
1642 	struct reo_desc_list_node *desc;
1643 	unsigned long curr_ts = qdf_get_system_timestamp();
1644 	uint32_t desc_size, tot_desc_size;
1645 	struct hal_reo_cmd_params params;
1646 
1647 	if (reo_status->rx_queue_status.header.status == HAL_REO_CMD_DRAIN) {
1648 		qdf_mem_zero(reo_status, sizeof(*reo_status));
1649 		reo_status->fl_cache_status.header.status = HAL_REO_CMD_DRAIN;
1650 		dp_reo_desc_free(soc, (void *)freedesc, reo_status);
1651 		return;
1652 	} else if (reo_status->rx_queue_status.header.status !=
1653 		HAL_REO_CMD_SUCCESS) {
1654 		/* Should not happen normally. Just print error for now */
1655 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1656 			"%s: Rx tid HW desc deletion failed(%d): tid %d",
1657 			__func__,
1658 			reo_status->rx_queue_status.header.status,
1659 			freedesc->rx_tid.tid);
1660 	}
1661 
1662 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
1663 		"%s: rx_tid: %d status: %d", __func__,
1664 		freedesc->rx_tid.tid,
1665 		reo_status->rx_queue_status.header.status);
1666 
1667 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
1668 	freedesc->free_ts = curr_ts;
1669 	qdf_list_insert_back_size(&soc->reo_desc_freelist,
1670 		(qdf_list_node_t *)freedesc, &list_size);
1671 
1672 	while ((qdf_list_peek_front(&soc->reo_desc_freelist,
1673 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) &&
1674 		((list_size >= REO_DESC_FREELIST_SIZE) ||
1675 		((curr_ts - desc->free_ts) > REO_DESC_FREE_DEFER_MS))) {
1676 		struct dp_rx_tid *rx_tid;
1677 
1678 		qdf_list_remove_front(&soc->reo_desc_freelist,
1679 				(qdf_list_node_t **)&desc);
1680 		list_size--;
1681 		rx_tid = &desc->rx_tid;
1682 
1683 		/* Flush and invalidate REO descriptor from HW cache: Base and
1684 		 * extension descriptors should be flushed separately */
1685 		tot_desc_size = hal_get_reo_qdesc_size(soc->hal_soc,
1686 			rx_tid->ba_win_size);
1687 		desc_size = hal_get_reo_qdesc_size(soc->hal_soc, 0);
1688 
1689 		/* Flush reo extension descriptors */
1690 		while ((tot_desc_size -= desc_size) > 0) {
1691 			qdf_mem_zero(&params, sizeof(params));
1692 			params.std.addr_lo =
1693 				((uint64_t)(rx_tid->hw_qdesc_paddr) +
1694 				tot_desc_size) & 0xffffffff;
1695 			params.std.addr_hi =
1696 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1697 
1698 			if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc,
1699 							CMD_FLUSH_CACHE,
1700 							&params,
1701 							NULL,
1702 							NULL)) {
1703 				QDF_TRACE(QDF_MODULE_ID_DP,
1704 					QDF_TRACE_LEVEL_ERROR,
1705 					"%s: fail to send CMD_CACHE_FLUSH:"
1706 					"tid %d desc %pK", __func__,
1707 					rx_tid->tid,
1708 					(void *)(rx_tid->hw_qdesc_paddr));
1709 			}
1710 		}
1711 
1712 		/* Flush base descriptor */
1713 		qdf_mem_zero(&params, sizeof(params));
1714 		params.std.need_status = 1;
1715 		params.std.addr_lo =
1716 			(uint64_t)(rx_tid->hw_qdesc_paddr) & 0xffffffff;
1717 		params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1718 
1719 		if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc,
1720 							  CMD_FLUSH_CACHE,
1721 							  &params,
1722 							  dp_reo_desc_free,
1723 							  (void *)desc)) {
1724 			union hal_reo_status reo_status;
1725 			/*
1726 			 * If dp_reo_send_cmd return failure, related TID queue desc
1727 			 * should be unmapped. Also locally reo_desc, together with
1728 			 * TID queue desc also need to be freed accordingly.
1729 			 *
1730 			 * Here invoke desc_free function directly to do clean up.
1731 			 */
1732 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1733 				"%s: fail to send REO cmd to flush cache: tid %d",
1734 				__func__, rx_tid->tid);
1735 			qdf_mem_zero(&reo_status, sizeof(reo_status));
1736 			reo_status.fl_cache_status.header.status = 0;
1737 			dp_reo_desc_free(soc, (void *)desc, &reo_status);
1738 		}
1739 	}
1740 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
1741 }
1742 
1743 /*
1744  * dp_rx_tid_delete_wifi3() – Delete receive TID queue
1745  * @peer: Datapath peer handle
1746  * @tid: TID
1747  *
1748  * Return: 0 on success, error code on failure
1749  */
1750 static int dp_rx_tid_delete_wifi3(struct dp_peer *peer, int tid)
1751 {
1752 	struct dp_rx_tid *rx_tid = &(peer->rx_tid[tid]);
1753 	struct dp_soc *soc = peer->vdev->pdev->soc;
1754 	struct hal_reo_cmd_params params;
1755 	struct reo_desc_list_node *freedesc =
1756 		qdf_mem_malloc(sizeof(*freedesc));
1757 
1758 	if (!freedesc) {
1759 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1760 			"%s: malloc failed for freedesc: tid %d",
1761 			__func__, tid);
1762 		return -ENOMEM;
1763 	}
1764 
1765 	freedesc->rx_tid = *rx_tid;
1766 
1767 	qdf_mem_zero(&params, sizeof(params));
1768 
1769 	params.std.need_status = 1;
1770 	params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
1771 	params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1772 	params.u.upd_queue_params.update_vld = 1;
1773 	params.u.upd_queue_params.vld = 0;
1774 
1775 	dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
1776 		dp_rx_tid_delete_cb, (void *)freedesc);
1777 
1778 	rx_tid->hw_qdesc_vaddr_unaligned = NULL;
1779 	rx_tid->hw_qdesc_alloc_size = 0;
1780 	rx_tid->hw_qdesc_paddr = 0;
1781 
1782 	return 0;
1783 }
1784 
1785 #ifdef DP_LFR
1786 static void dp_peer_setup_remaining_tids(struct dp_peer *peer)
1787 {
1788 	int tid;
1789 
1790 	for (tid = 1; tid < DP_MAX_TIDS-1; tid++) {
1791 		dp_rx_tid_setup_wifi3(peer, tid, 1, 0);
1792 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1793 			"Setting up TID %d for peer %pK peer->local_id %d",
1794 			tid, peer, peer->local_id);
1795 	}
1796 }
1797 #else
1798 static void dp_peer_setup_remaining_tids(struct dp_peer *peer) {};
1799 #endif
1800 /*
1801  * dp_peer_rx_init() – Initialize receive TID state
1802  * @pdev: Datapath pdev
1803  * @peer: Datapath peer
1804  *
1805  */
1806 void dp_peer_rx_init(struct dp_pdev *pdev, struct dp_peer *peer)
1807 {
1808 	int tid;
1809 	struct dp_rx_tid *rx_tid;
1810 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
1811 		rx_tid = &peer->rx_tid[tid];
1812 		rx_tid->array = &rx_tid->base;
1813 		rx_tid->base.head = rx_tid->base.tail = NULL;
1814 		rx_tid->tid = tid;
1815 		rx_tid->defrag_timeout_ms = 0;
1816 		rx_tid->ba_win_size = 0;
1817 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
1818 
1819 		rx_tid->defrag_waitlist_elem.tqe_next = NULL;
1820 		rx_tid->defrag_waitlist_elem.tqe_prev = NULL;
1821 
1822 #ifdef notyet /* TODO: See if this is required for exception handling */
1823 		/* invalid sequence number */
1824 		peer->tids_last_seq[tid] = 0xffff;
1825 #endif
1826 	}
1827 
1828 	peer->active_ba_session_cnt = 0;
1829 	peer->hw_buffer_size = 0;
1830 	peer->kill_256_sessions = 0;
1831 
1832 	/* Setup default (non-qos) rx tid queue */
1833 	dp_rx_tid_setup_wifi3(peer, DP_NON_QOS_TID, 1, 0);
1834 
1835 	/* Setup rx tid queue for TID 0.
1836 	 * Other queues will be setup on receiving first packet, which will cause
1837 	 * NULL REO queue error
1838 	 */
1839 	dp_rx_tid_setup_wifi3(peer, 0, 1, 0);
1840 
1841 	/*
1842 	 * Setup the rest of TID's to handle LFR
1843 	 */
1844 	dp_peer_setup_remaining_tids(peer);
1845 
1846 	/*
1847 	 * Set security defaults: no PN check, no security. The target may
1848 	 * send a HTT SEC_IND message to overwrite these defaults.
1849 	 */
1850 	peer->security[dp_sec_ucast].sec_type =
1851 		peer->security[dp_sec_mcast].sec_type = cdp_sec_type_none;
1852 }
1853 
1854 /*
1855  * dp_peer_rx_cleanup() – Cleanup receive TID state
1856  * @vdev: Datapath vdev
1857  * @peer: Datapath peer
1858  *
1859  */
1860 void dp_peer_rx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
1861 {
1862 	int tid;
1863 	uint32_t tid_delete_mask = 0;
1864 
1865 	DP_TRACE(INFO_HIGH, FL("Remove tids for peer: %pK"), peer);
1866 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
1867 		struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1868 
1869 		qdf_spin_lock_bh(&rx_tid->tid_lock);
1870 		if (peer->rx_tid[tid].hw_qdesc_vaddr_unaligned != NULL) {
1871 			dp_rx_tid_delete_wifi3(peer, tid);
1872 
1873 			/* Cleanup defrag related resource */
1874 			dp_rx_defrag_waitlist_remove(peer, tid);
1875 			dp_rx_reorder_flush_frag(peer, tid);
1876 
1877 			tid_delete_mask |= (1 << tid);
1878 		}
1879 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
1880 	}
1881 #ifdef notyet /* See if FW can remove queues as part of peer cleanup */
1882 	if (soc->ol_ops->peer_rx_reorder_queue_remove) {
1883 		soc->ol_ops->peer_rx_reorder_queue_remove(vdev->pdev->ctrl_pdev,
1884 			peer->vdev->vdev_id, peer->mac_addr.raw,
1885 			tid_delete_mask);
1886 	}
1887 #endif
1888 	for (tid = 0; tid < DP_MAX_TIDS; tid++)
1889 		qdf_spinlock_destroy(&peer->rx_tid[tid].tid_lock);
1890 }
1891 
1892 /*
1893  * dp_peer_cleanup() – Cleanup peer information
1894  * @vdev: Datapath vdev
1895  * @peer: Datapath peer
1896  *
1897  */
1898 void dp_peer_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
1899 {
1900 	peer->last_assoc_rcvd = 0;
1901 	peer->last_disassoc_rcvd = 0;
1902 	peer->last_deauth_rcvd = 0;
1903 
1904 	/* cleanup the Rx reorder queues for this peer */
1905 	dp_peer_rx_cleanup(vdev, peer);
1906 }
1907 
1908 /* dp_teardown_256_ba_session() - Teardown sessions using 256
1909  *                                window size when a request with
1910  *                                64 window size is received.
1911  *                                This is done as a WAR since HW can
1912  *                                have only one setting per peer (64 or 256).
1913  * @peer: Datapath peer
1914  *
1915  * Return: void
1916  */
1917 static void dp_teardown_256_ba_sessions(struct dp_peer *peer)
1918 {
1919 	uint8_t delba_rcode = 0;
1920 	int tid;
1921 	struct dp_rx_tid *rx_tid = NULL;
1922 
1923 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
1924 		rx_tid = &peer->rx_tid[tid];
1925 		qdf_spin_lock_bh(&rx_tid->tid_lock);
1926 
1927 		if (rx_tid->ba_win_size <= 64) {
1928 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
1929 			continue;
1930 		} else {
1931 			if (rx_tid->ba_status == DP_RX_BA_ACTIVE ||
1932 			    rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
1933 				/* send delba */
1934 				if (!rx_tid->delba_tx_status) {
1935 					rx_tid->delba_tx_retry++;
1936 					rx_tid->delba_tx_status = 1;
1937 					rx_tid->delba_rcode =
1938 					IEEE80211_REASON_QOS_SETUP_REQUIRED;
1939 					delba_rcode = rx_tid->delba_rcode;
1940 
1941 					qdf_spin_unlock_bh(&rx_tid->tid_lock);
1942 					peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba(
1943 							peer->vdev->pdev->ctrl_pdev,
1944 							peer->ctrl_peer,
1945 							peer->mac_addr.raw,
1946 							tid, peer->vdev->ctrl_vdev,
1947 							delba_rcode);
1948 				} else {
1949 					qdf_spin_unlock_bh(&rx_tid->tid_lock);
1950 				}
1951 			} else {
1952 				qdf_spin_unlock_bh(&rx_tid->tid_lock);
1953 			}
1954 		}
1955 	}
1956 }
1957 
1958 /*
1959 * dp_rx_addba_resp_tx_completion_wifi3() – Update Rx Tid State
1960 *
1961 * @peer: Datapath peer handle
1962 * @tid: TID number
1963 * @status: tx completion status
1964 * Return: 0 on success, error code on failure
1965 */
1966 int dp_addba_resp_tx_completion_wifi3(void *peer_handle,
1967 				      uint8_t tid, int status)
1968 {
1969 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
1970 	struct dp_rx_tid *rx_tid = NULL;
1971 
1972 	if (!peer || peer->delete_in_progress) {
1973 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1974 			  "%s: Peer is NULL!\n", __func__);
1975 		return QDF_STATUS_E_FAILURE;
1976 	}
1977 	rx_tid = &peer->rx_tid[tid];
1978 	qdf_spin_lock_bh(&rx_tid->tid_lock);
1979 	if (status) {
1980 		rx_tid->num_addba_rsp_failed++;
1981 		dp_rx_tid_update_wifi3(peer, tid, 1, 0);
1982 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
1983 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
1984 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1985 			  "%s: Rx Tid- %d addba rsp tx completion failed!",
1986 			 __func__, tid);
1987 		return QDF_STATUS_SUCCESS;
1988 	}
1989 
1990 	rx_tid->num_addba_rsp_success++;
1991 	if (rx_tid->ba_status == DP_RX_BA_INACTIVE) {
1992 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
1993 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1994 			  "%s: Rx Tid- %d hw qdesc is not in IN_PROGRESS",
1995 			__func__, tid);
1996 		return QDF_STATUS_E_FAILURE;
1997 	}
1998 
1999 	if (!qdf_atomic_read(&peer->is_default_route_set)) {
2000 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2001 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
2002 			  "%s: default route is not set for peer: %pM",
2003 			  __func__, peer->mac_addr.raw);
2004 		return QDF_STATUS_E_FAILURE;
2005 	}
2006 
2007 	/* First Session */
2008 	if (peer->active_ba_session_cnt == 0) {
2009 		if (rx_tid->ba_win_size > 64 && rx_tid->ba_win_size <= 256)
2010 			peer->hw_buffer_size = 256;
2011 		else
2012 			peer->hw_buffer_size = 64;
2013 	}
2014 
2015 	rx_tid->ba_status = DP_RX_BA_ACTIVE;
2016 
2017 	peer->active_ba_session_cnt++;
2018 
2019 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
2020 
2021 	/* Kill any session having 256 buffer size
2022 	 * when 64 buffer size request is received.
2023 	 * Also, latch on to 64 as new buffer size.
2024 	 */
2025 	if (peer->kill_256_sessions) {
2026 		dp_teardown_256_ba_sessions(peer);
2027 		peer->kill_256_sessions = 0;
2028 	}
2029 	return QDF_STATUS_SUCCESS;
2030 }
2031 
2032 /*
2033 * dp_rx_addba_responsesetup_wifi3() – Process ADDBA request from peer
2034 *
2035 * @peer: Datapath peer handle
2036 * @tid: TID number
2037 * @dialogtoken: output dialogtoken
2038 * @statuscode: output dialogtoken
2039 * @buffersize: Output BA window size
2040 * @batimeout: Output BA timeout
2041 */
2042 void dp_addba_responsesetup_wifi3(void *peer_handle, uint8_t tid,
2043 	uint8_t *dialogtoken, uint16_t *statuscode,
2044 	uint16_t *buffersize, uint16_t *batimeout)
2045 {
2046 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
2047 	struct dp_rx_tid *rx_tid = NULL;
2048 
2049 	if (!peer || peer->delete_in_progress) {
2050 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
2051 			  "%s: Peer is NULL!\n", __func__);
2052 		return;
2053 	}
2054 	rx_tid = &peer->rx_tid[tid];
2055 	qdf_spin_lock_bh(&rx_tid->tid_lock);
2056 	rx_tid->num_of_addba_resp++;
2057 	/* setup ADDBA response parameters */
2058 	*dialogtoken = rx_tid->dialogtoken;
2059 	*statuscode = rx_tid->statuscode;
2060 	*buffersize = rx_tid->ba_win_size;
2061 	*batimeout  = 0;
2062 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
2063 }
2064 
2065 /* dp_check_ba_buffersize() - Check buffer size in request
2066  *                            and latch onto this size based on
2067  *                            size used in first active session.
2068  * @peer: Datapath peer
2069  * @tid: Tid
2070  * @buffersize: Block ack window size
2071  *
2072  * Return: void
2073  */
2074 static void dp_check_ba_buffersize(struct dp_peer *peer,
2075 				   uint16_t tid,
2076 				   uint16_t buffersize)
2077 {
2078 	struct dp_rx_tid *rx_tid = NULL;
2079 
2080 	rx_tid = &peer->rx_tid[tid];
2081 
2082 	if (peer->active_ba_session_cnt == 0) {
2083 		rx_tid->ba_win_size = buffersize;
2084 	} else {
2085 		if (peer->hw_buffer_size == 64) {
2086 			if (buffersize <= 64)
2087 				rx_tid->ba_win_size = buffersize;
2088 			else
2089 				rx_tid->ba_win_size = peer->hw_buffer_size;
2090 		} else if (peer->hw_buffer_size == 256) {
2091 			if (buffersize > 64) {
2092 				rx_tid->ba_win_size = buffersize;
2093 			} else {
2094 				rx_tid->ba_win_size = buffersize;
2095 				peer->hw_buffer_size = 64;
2096 				peer->kill_256_sessions = 1;
2097 			}
2098 		}
2099 	}
2100 }
2101 
2102 /*
2103  * dp_addba_requestprocess_wifi3() - Process ADDBA request from peer
2104  *
2105  * @peer: Datapath peer handle
2106  * @dialogtoken: dialogtoken from ADDBA frame
2107  * @tid: TID number
2108  * @batimeout: BA timeout
2109  * @buffersize: BA window size
2110  * @startseqnum: Start seq. number received in BA sequence control
2111  *
2112  * Return: 0 on success, error code on failure
2113  */
2114 int dp_addba_requestprocess_wifi3(void *peer_handle,
2115 				  uint8_t dialogtoken,
2116 				  uint16_t tid, uint16_t batimeout,
2117 				  uint16_t buffersize,
2118 				  uint16_t startseqnum)
2119 {
2120 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
2121 	struct dp_rx_tid *rx_tid = NULL;
2122 
2123 	if (!peer || peer->delete_in_progress) {
2124 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
2125 			  "%s: Peer is NULL!\n", __func__);
2126 		return QDF_STATUS_E_FAILURE;
2127 	}
2128 	rx_tid = &peer->rx_tid[tid];
2129 	qdf_spin_lock_bh(&rx_tid->tid_lock);
2130 	rx_tid->num_of_addba_req++;
2131 	if ((rx_tid->ba_status == DP_RX_BA_ACTIVE &&
2132 	     rx_tid->hw_qdesc_vaddr_unaligned != NULL) ||
2133 	    (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS)) {
2134 		dp_rx_tid_update_wifi3(peer, tid, 1, 0);
2135 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
2136 		peer->active_ba_session_cnt--;
2137 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2138 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2139 			  "%s: Rx Tid- %d hw qdesc is already setup",
2140 			__func__, tid);
2141 		return QDF_STATUS_E_FAILURE;
2142 	}
2143 
2144 	if (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
2145 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2146 		return QDF_STATUS_E_FAILURE;
2147 	}
2148 
2149 	dp_check_ba_buffersize(peer, tid, buffersize);
2150 
2151 	if (dp_rx_tid_setup_wifi3(peer, tid, buffersize, startseqnum)) {
2152 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
2153 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2154 		return QDF_STATUS_E_FAILURE;
2155 	}
2156 	rx_tid->ba_status = DP_RX_BA_IN_PROGRESS;
2157 
2158 	rx_tid->ba_win_size = buffersize;
2159 	rx_tid->dialogtoken = dialogtoken;
2160 	rx_tid->startseqnum = startseqnum;
2161 
2162 	if (rx_tid->userstatuscode != IEEE80211_STATUS_SUCCESS)
2163 		rx_tid->statuscode = rx_tid->userstatuscode;
2164 	else
2165 		rx_tid->statuscode = IEEE80211_STATUS_SUCCESS;
2166 
2167 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
2168 
2169 	return QDF_STATUS_SUCCESS;
2170 }
2171 
2172 /*
2173 * dp_set_addba_response() – Set a user defined ADDBA response status code
2174 *
2175 * @peer: Datapath peer handle
2176 * @tid: TID number
2177 * @statuscode: response status code to be set
2178 */
2179 void dp_set_addba_response(void *peer_handle, uint8_t tid,
2180 	uint16_t statuscode)
2181 {
2182 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
2183 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
2184 
2185 	qdf_spin_lock_bh(&rx_tid->tid_lock);
2186 	rx_tid->userstatuscode = statuscode;
2187 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
2188 }
2189 
2190 /*
2191 * dp_rx_delba_process_wifi3() – Process DELBA from peer
2192 * @peer: Datapath peer handle
2193 * @tid: TID number
2194 * @reasoncode: Reason code received in DELBA frame
2195 *
2196 * Return: 0 on success, error code on failure
2197 */
2198 int dp_delba_process_wifi3(void *peer_handle,
2199 	int tid, uint16_t reasoncode)
2200 {
2201 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
2202 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
2203 
2204 	qdf_spin_lock_bh(&rx_tid->tid_lock);
2205 	if (rx_tid->ba_status == DP_RX_BA_INACTIVE ||
2206 	    rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
2207 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2208 		return QDF_STATUS_E_FAILURE;
2209 	}
2210 	/* TODO: See if we can delete the existing REO queue descriptor and
2211 	 * replace with a new one without queue extenstion descript to save
2212 	 * memory
2213 	 */
2214 	rx_tid->delba_rcode = reasoncode;
2215 	rx_tid->num_of_delba_req++;
2216 	dp_rx_tid_update_wifi3(peer, tid, 1, 0);
2217 
2218 	rx_tid->ba_status = DP_RX_BA_INACTIVE;
2219 	peer->active_ba_session_cnt--;
2220 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
2221 	return 0;
2222 }
2223 
2224 /*
2225  * dp_rx_delba_tx_completion_wifi3() – Send Delba Request
2226  *
2227  * @peer: Datapath peer handle
2228  * @tid: TID number
2229  * @status: tx completion status
2230  * Return: 0 on success, error code on failure
2231  */
2232 
2233 int dp_delba_tx_completion_wifi3(void *peer_handle,
2234 				 uint8_t tid, int status)
2235 {
2236 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
2237 	struct dp_rx_tid *rx_tid = NULL;
2238 
2239 	if (!peer || peer->delete_in_progress) {
2240 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
2241 			  "%s: Peer is NULL!", __func__);
2242 		return QDF_STATUS_E_FAILURE;
2243 	}
2244 	rx_tid = &peer->rx_tid[tid];
2245 	qdf_spin_lock_bh(&rx_tid->tid_lock);
2246 	if (status) {
2247 		rx_tid->delba_tx_fail_cnt++;
2248 		if (rx_tid->delba_tx_retry >= DP_MAX_DELBA_RETRY) {
2249 			rx_tid->delba_tx_retry = 0;
2250 			rx_tid->delba_tx_status = 0;
2251 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
2252 		} else {
2253 			rx_tid->delba_tx_retry++;
2254 			rx_tid->delba_tx_status = 1;
2255 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
2256 			peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba(
2257 				peer->vdev->pdev->ctrl_pdev, peer->ctrl_peer,
2258 				peer->mac_addr.raw, tid, peer->vdev->ctrl_vdev,
2259 				rx_tid->delba_rcode);
2260 		}
2261 		return QDF_STATUS_SUCCESS;
2262 	} else {
2263 		rx_tid->delba_tx_success_cnt++;
2264 		rx_tid->delba_tx_retry = 0;
2265 		rx_tid->delba_tx_status = 0;
2266 	}
2267 	if (rx_tid->ba_status == DP_RX_BA_ACTIVE) {
2268 		dp_rx_tid_update_wifi3(peer, tid, 1, 0);
2269 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
2270 		peer->active_ba_session_cnt--;
2271 	}
2272 	if (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
2273 		dp_rx_tid_update_wifi3(peer, tid, 1, 0);
2274 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
2275 	}
2276 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
2277 
2278 	return QDF_STATUS_SUCCESS;
2279 }
2280 
2281 void dp_rx_discard(struct dp_vdev *vdev, struct dp_peer *peer, unsigned tid,
2282 	qdf_nbuf_t msdu_list)
2283 {
2284 	while (msdu_list) {
2285 		qdf_nbuf_t msdu = msdu_list;
2286 
2287 		msdu_list = qdf_nbuf_next(msdu_list);
2288 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
2289 			"discard rx %pK from partly-deleted peer %pK "
2290 			"(%02x:%02x:%02x:%02x:%02x:%02x)",
2291 			msdu, peer,
2292 			peer->mac_addr.raw[0], peer->mac_addr.raw[1],
2293 			peer->mac_addr.raw[2], peer->mac_addr.raw[3],
2294 			peer->mac_addr.raw[4], peer->mac_addr.raw[5]);
2295 		qdf_nbuf_free(msdu);
2296 	}
2297 }
2298 
2299 
2300 /**
2301  * dp_set_pn_check_wifi3() - enable PN check in REO for security
2302  * @peer: Datapath peer handle
2303  * @vdev: Datapath vdev
2304  * @pdev - data path device instance
2305  * @sec_type - security type
2306  * @rx_pn - Receive pn starting number
2307  *
2308  */
2309 
2310 void
2311 dp_set_pn_check_wifi3(struct cdp_vdev *vdev_handle, struct cdp_peer *peer_handle, enum cdp_sec_type sec_type,  uint32_t *rx_pn)
2312 {
2313 	struct dp_peer *peer =  (struct dp_peer *)peer_handle;
2314 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
2315 	struct dp_pdev *pdev;
2316 	struct dp_soc *soc;
2317 	int i;
2318 	uint8_t pn_size;
2319 	struct hal_reo_cmd_params params;
2320 
2321 	/* preconditions */
2322 	qdf_assert(vdev);
2323 
2324 	pdev = vdev->pdev;
2325 	soc = pdev->soc;
2326 
2327 
2328 	qdf_mem_zero(&params, sizeof(params));
2329 
2330 	params.std.need_status = 1;
2331 	params.u.upd_queue_params.update_pn_valid = 1;
2332 	params.u.upd_queue_params.update_pn_size = 1;
2333 	params.u.upd_queue_params.update_pn = 1;
2334 	params.u.upd_queue_params.update_pn_check_needed = 1;
2335 	params.u.upd_queue_params.update_svld = 1;
2336 	params.u.upd_queue_params.svld = 0;
2337 
2338 	peer->security[dp_sec_ucast].sec_type = sec_type;
2339 
2340 	switch (sec_type) {
2341 	case cdp_sec_type_tkip_nomic:
2342 	case cdp_sec_type_aes_ccmp:
2343 	case cdp_sec_type_aes_ccmp_256:
2344 	case cdp_sec_type_aes_gcmp:
2345 	case cdp_sec_type_aes_gcmp_256:
2346 		params.u.upd_queue_params.pn_check_needed = 1;
2347 		params.u.upd_queue_params.pn_size = 48;
2348 		pn_size = 48;
2349 		break;
2350 	case cdp_sec_type_wapi:
2351 		params.u.upd_queue_params.pn_check_needed = 1;
2352 		params.u.upd_queue_params.pn_size = 128;
2353 		pn_size = 128;
2354 		if (vdev->opmode == wlan_op_mode_ap) {
2355 			params.u.upd_queue_params.pn_even = 1;
2356 			params.u.upd_queue_params.update_pn_even = 1;
2357 		} else {
2358 			params.u.upd_queue_params.pn_uneven = 1;
2359 			params.u.upd_queue_params.update_pn_uneven = 1;
2360 		}
2361 		break;
2362 	default:
2363 		params.u.upd_queue_params.pn_check_needed = 0;
2364 		pn_size = 0;
2365 		break;
2366 	}
2367 
2368 
2369 	for (i = 0; i < DP_MAX_TIDS; i++) {
2370 		struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
2371 		qdf_spin_lock_bh(&rx_tid->tid_lock);
2372 		if (rx_tid->hw_qdesc_vaddr_unaligned != NULL) {
2373 			params.std.addr_lo =
2374 				rx_tid->hw_qdesc_paddr & 0xffffffff;
2375 			params.std.addr_hi =
2376 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2377 
2378 			if (sec_type != cdp_sec_type_wapi) {
2379 				params.u.upd_queue_params.update_pn_valid = 0;
2380 			} else {
2381 				/*
2382 				 * Setting PN valid bit for WAPI sec_type,
2383 				 * since WAPI PN has to be started with
2384 				 * predefined value
2385 				 */
2386 				params.u.upd_queue_params.update_pn_valid = 1;
2387 				params.u.upd_queue_params.pn_31_0 = rx_pn[0];
2388 				params.u.upd_queue_params.pn_63_32 = rx_pn[1];
2389 				params.u.upd_queue_params.pn_95_64 = rx_pn[2];
2390 				params.u.upd_queue_params.pn_127_96 = rx_pn[3];
2391 			}
2392 			rx_tid->pn_size = pn_size;
2393 			dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
2394 				dp_rx_tid_update_cb, rx_tid);
2395 		} else {
2396 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
2397 				"PN Check not setup for TID :%d ", i);
2398 		}
2399 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2400 	}
2401 }
2402 
2403 
2404 void
2405 dp_rx_sec_ind_handler(void *soc_handle, uint16_t peer_id,
2406 	enum cdp_sec_type sec_type, int is_unicast, u_int32_t *michael_key,
2407 	u_int32_t *rx_pn)
2408 {
2409 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
2410 	struct dp_peer *peer;
2411 	int sec_index;
2412 
2413 	peer = dp_peer_find_by_id(soc, peer_id);
2414 	if (!peer) {
2415 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2416 			"Couldn't find peer from ID %d - skipping security inits",
2417 			peer_id);
2418 		return;
2419 	}
2420 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
2421 		"sec spec for peer %pK (%02x:%02x:%02x:%02x:%02x:%02x): "
2422 		"%s key of type %d",
2423 		peer,
2424 		peer->mac_addr.raw[0], peer->mac_addr.raw[1],
2425 		peer->mac_addr.raw[2], peer->mac_addr.raw[3],
2426 		peer->mac_addr.raw[4], peer->mac_addr.raw[5],
2427 		is_unicast ? "ucast" : "mcast",
2428 		sec_type);
2429 	sec_index = is_unicast ? dp_sec_ucast : dp_sec_mcast;
2430 	peer->security[sec_index].sec_type = sec_type;
2431 #ifdef notyet /* TODO: See if this is required for defrag support */
2432 	/* michael key only valid for TKIP, but for simplicity,
2433 	 * copy it anyway
2434 	 */
2435 	qdf_mem_copy(
2436 		&peer->security[sec_index].michael_key[0],
2437 		michael_key,
2438 		sizeof(peer->security[sec_index].michael_key));
2439 #ifdef BIG_ENDIAN_HOST
2440 	OL_IF_SWAPBO(peer->security[sec_index].michael_key[0],
2441 				 sizeof(peer->security[sec_index].michael_key));
2442 #endif /* BIG_ENDIAN_HOST */
2443 #endif
2444 
2445 #ifdef notyet /* TODO: Check if this is required for wifi3.0 */
2446 	if (sec_type != cdp_sec_type_wapi) {
2447 		qdf_mem_set(peer->tids_last_pn_valid, _EXT_TIDS, 0x00);
2448 	} else {
2449 		for (i = 0; i < DP_MAX_TIDS; i++) {
2450 			/*
2451 			 * Setting PN valid bit for WAPI sec_type,
2452 			 * since WAPI PN has to be started with predefined value
2453 			 */
2454 			peer->tids_last_pn_valid[i] = 1;
2455 			qdf_mem_copy(
2456 				(u_int8_t *) &peer->tids_last_pn[i],
2457 				(u_int8_t *) rx_pn, sizeof(union htt_rx_pn_t));
2458 			peer->tids_last_pn[i].pn128[1] =
2459 				qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[1]);
2460 			peer->tids_last_pn[i].pn128[0] =
2461 				qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[0]);
2462 		}
2463 	}
2464 #endif
2465 	/* TODO: Update HW TID queue with PN check parameters (pn type for
2466 	 * all security types and last pn for WAPI) once REO command API
2467 	 * is available
2468 	 */
2469 
2470 	dp_peer_unref_del_find_by_id(peer);
2471 }
2472 
2473 #ifndef CONFIG_WIN
2474 /**
2475  * dp_register_peer() - Register peer into physical device
2476  * @pdev - data path device instance
2477  * @sta_desc - peer description
2478  *
2479  * Register peer into physical device
2480  *
2481  * Return: QDF_STATUS_SUCCESS registration success
2482  *         QDF_STATUS_E_FAULT peer not found
2483  */
2484 QDF_STATUS dp_register_peer(struct cdp_pdev *pdev_handle,
2485 		struct ol_txrx_desc_type *sta_desc)
2486 {
2487 	struct dp_peer *peer;
2488 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
2489 
2490 	peer = dp_peer_find_by_local_id((struct cdp_pdev *)pdev,
2491 			sta_desc->sta_id);
2492 	if (!peer)
2493 		return QDF_STATUS_E_FAULT;
2494 
2495 	qdf_spin_lock_bh(&peer->peer_info_lock);
2496 	peer->state = OL_TXRX_PEER_STATE_CONN;
2497 	qdf_spin_unlock_bh(&peer->peer_info_lock);
2498 
2499 	return QDF_STATUS_SUCCESS;
2500 }
2501 
2502 /**
2503  * dp_clear_peer() - remove peer from physical device
2504  * @pdev - data path device instance
2505  * @sta_id - local peer id
2506  *
2507  * remove peer from physical device
2508  *
2509  * Return: QDF_STATUS_SUCCESS registration success
2510  *         QDF_STATUS_E_FAULT peer not found
2511  */
2512 QDF_STATUS dp_clear_peer(struct cdp_pdev *pdev_handle, uint8_t local_id)
2513 {
2514 	struct dp_peer *peer;
2515 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
2516 
2517 	peer = dp_peer_find_by_local_id((struct cdp_pdev *)pdev, local_id);
2518 	if (!peer)
2519 		return QDF_STATUS_E_FAULT;
2520 
2521 	qdf_spin_lock_bh(&peer->peer_info_lock);
2522 	peer->state = OL_TXRX_PEER_STATE_DISC;
2523 	qdf_spin_unlock_bh(&peer->peer_info_lock);
2524 
2525 	return QDF_STATUS_SUCCESS;
2526 }
2527 
2528 /**
2529  * dp_find_peer_by_addr_and_vdev() - Find peer by peer mac address within vdev
2530  * @pdev - data path device instance
2531  * @vdev - virtual interface instance
2532  * @peer_addr - peer mac address
2533  * @peer_id - local peer id with target mac address
2534  *
2535  * Find peer by peer mac address within vdev
2536  *
2537  * Return: peer instance void pointer
2538  *         NULL cannot find target peer
2539  */
2540 void *dp_find_peer_by_addr_and_vdev(struct cdp_pdev *pdev_handle,
2541 		struct cdp_vdev *vdev_handle,
2542 		uint8_t *peer_addr, uint8_t *local_id)
2543 {
2544 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
2545 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
2546 	struct dp_peer *peer;
2547 
2548 	DP_TRACE(INFO, "vdev %pK peer_addr %pK", vdev, peer_addr);
2549 	peer = dp_peer_find_hash_find(pdev->soc, peer_addr, 0, 0);
2550 	DP_TRACE(INFO, "peer %pK vdev %pK", peer, vdev);
2551 
2552 	if (!peer)
2553 		return NULL;
2554 
2555 	if (peer->vdev != vdev) {
2556 		dp_peer_unref_delete(peer);
2557 		return NULL;
2558 	}
2559 
2560 	*local_id = peer->local_id;
2561 	DP_TRACE(INFO, "peer %pK vdev %pK local id %d", peer, vdev, *local_id);
2562 
2563 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
2564 	 * Decrement it here.
2565 	 */
2566 	dp_peer_unref_delete(peer);
2567 
2568 	return peer;
2569 }
2570 
2571 /**
2572  * dp_local_peer_id() - Find local peer id within peer instance
2573  * @peer - peer instance
2574  *
2575  * Find local peer id within peer instance
2576  *
2577  * Return: local peer id
2578  */
2579 uint16_t dp_local_peer_id(void *peer)
2580 {
2581 	return ((struct dp_peer *)peer)->local_id;
2582 }
2583 
2584 /**
2585  * dp_peer_find_by_local_id() - Find peer by local peer id
2586  * @pdev - data path device instance
2587  * @local_peer_id - local peer id want to find
2588  *
2589  * Find peer by local peer id within physical device
2590  *
2591  * Return: peer instance void pointer
2592  *         NULL cannot find target peer
2593  */
2594 void *dp_peer_find_by_local_id(struct cdp_pdev *pdev_handle, uint8_t local_id)
2595 {
2596 	struct dp_peer *peer;
2597 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
2598 
2599 	if (local_id >= OL_TXRX_NUM_LOCAL_PEER_IDS) {
2600 		QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_DP,
2601 				   "Incorrect local id %u", local_id);
2602 		return NULL;
2603 	}
2604 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
2605 	peer = pdev->local_peer_ids.map[local_id];
2606 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
2607 	DP_TRACE(DEBUG, "peer %pK local id %d", peer, local_id);
2608 	return peer;
2609 }
2610 
2611 /**
2612  * dp_peer_state_update() - update peer local state
2613  * @pdev - data path device instance
2614  * @peer_addr - peer mac address
2615  * @state - new peer local state
2616  *
2617  * update peer local state
2618  *
2619  * Return: QDF_STATUS_SUCCESS registration success
2620  */
2621 QDF_STATUS dp_peer_state_update(struct cdp_pdev *pdev_handle, uint8_t *peer_mac,
2622 		enum ol_txrx_peer_state state)
2623 {
2624 	struct dp_peer *peer;
2625 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
2626 
2627 	peer =  dp_peer_find_hash_find(pdev->soc, peer_mac, 0, DP_VDEV_ALL);
2628 	if (NULL == peer) {
2629 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2630 		"Failed to find peer for: [%pM]", peer_mac);
2631 		return QDF_STATUS_E_FAILURE;
2632 	}
2633 	peer->state = state;
2634 
2635 	DP_TRACE(INFO, "peer %pK state %d", peer, peer->state);
2636 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
2637 	 * Decrement it here.
2638 	 */
2639 	dp_peer_unref_delete(peer);
2640 
2641 	return QDF_STATUS_SUCCESS;
2642 }
2643 
2644 /**
2645  * dp_get_vdevid() - Get virtual interface id which peer registered
2646  * @peer - peer instance
2647  * @vdev_id - virtual interface id which peer registered
2648  *
2649  * Get virtual interface id which peer registered
2650  *
2651  * Return: QDF_STATUS_SUCCESS registration success
2652  */
2653 QDF_STATUS dp_get_vdevid(void *peer_handle, uint8_t *vdev_id)
2654 {
2655 	struct dp_peer *peer = peer_handle;
2656 
2657 	DP_TRACE(INFO, "peer %pK vdev %pK vdev id %d",
2658 			peer, peer->vdev, peer->vdev->vdev_id);
2659 	*vdev_id = peer->vdev->vdev_id;
2660 	return QDF_STATUS_SUCCESS;
2661 }
2662 
2663 struct cdp_vdev *dp_get_vdev_by_sta_id(struct cdp_pdev *pdev_handle,
2664 				       uint8_t sta_id)
2665 {
2666 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
2667 	struct dp_peer *peer = NULL;
2668 
2669 	if (sta_id >= WLAN_MAX_STA_COUNT) {
2670 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
2671 			  "Invalid sta id passed");
2672 		return NULL;
2673 	}
2674 
2675 	if (!pdev) {
2676 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
2677 			  "PDEV not found for sta_id [%d]", sta_id);
2678 		return NULL;
2679 	}
2680 
2681 	peer = dp_peer_find_by_local_id((struct cdp_pdev *)pdev, sta_id);
2682 	if (!peer) {
2683 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
2684 			  "PEER [%d] not found", sta_id);
2685 		return NULL;
2686 	}
2687 
2688 	return (struct cdp_vdev *)peer->vdev;
2689 }
2690 
2691 /**
2692  * dp_get_vdev_for_peer() - Get virtual interface instance which peer belongs
2693  * @peer - peer instance
2694  *
2695  * Get virtual interface instance which peer belongs
2696  *
2697  * Return: virtual interface instance pointer
2698  *         NULL in case cannot find
2699  */
2700 struct cdp_vdev *dp_get_vdev_for_peer(void *peer_handle)
2701 {
2702 	struct dp_peer *peer = peer_handle;
2703 
2704 	DP_TRACE(DEBUG, "peer %pK vdev %pK", peer, peer->vdev);
2705 	return (struct cdp_vdev *)peer->vdev;
2706 }
2707 
2708 /**
2709  * dp_peer_get_peer_mac_addr() - Get peer mac address
2710  * @peer - peer instance
2711  *
2712  * Get peer mac address
2713  *
2714  * Return: peer mac address pointer
2715  *         NULL in case cannot find
2716  */
2717 uint8_t *dp_peer_get_peer_mac_addr(void *peer_handle)
2718 {
2719 	struct dp_peer *peer = peer_handle;
2720 	uint8_t *mac;
2721 
2722 	mac = peer->mac_addr.raw;
2723 	DP_TRACE(INFO, "peer %pK mac 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
2724 		peer, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
2725 	return peer->mac_addr.raw;
2726 }
2727 
2728 /**
2729  * dp_get_peer_state() - Get local peer state
2730  * @peer - peer instance
2731  *
2732  * Get local peer state
2733  *
2734  * Return: peer status
2735  */
2736 int dp_get_peer_state(void *peer_handle)
2737 {
2738 	struct dp_peer *peer = peer_handle;
2739 
2740 	DP_TRACE(DEBUG, "peer %pK stats %d", peer, peer->state);
2741 	return peer->state;
2742 }
2743 
2744 /**
2745  * dp_local_peer_id_pool_init() - local peer id pool alloc for physical device
2746  * @pdev - data path device instance
2747  *
2748  * local peer id pool alloc for physical device
2749  *
2750  * Return: none
2751  */
2752 void dp_local_peer_id_pool_init(struct dp_pdev *pdev)
2753 {
2754 	int i;
2755 
2756 	/* point the freelist to the first ID */
2757 	pdev->local_peer_ids.freelist = 0;
2758 
2759 	/* link each ID to the next one */
2760 	for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) {
2761 		pdev->local_peer_ids.pool[i] = i + 1;
2762 		pdev->local_peer_ids.map[i] = NULL;
2763 	}
2764 
2765 	/* link the last ID to itself, to mark the end of the list */
2766 	i = OL_TXRX_NUM_LOCAL_PEER_IDS;
2767 	pdev->local_peer_ids.pool[i] = i;
2768 
2769 	qdf_spinlock_create(&pdev->local_peer_ids.lock);
2770 	DP_TRACE(INFO, "Peer pool init");
2771 }
2772 
2773 /**
2774  * dp_local_peer_id_alloc() - allocate local peer id
2775  * @pdev - data path device instance
2776  * @peer - new peer instance
2777  *
2778  * allocate local peer id
2779  *
2780  * Return: none
2781  */
2782 void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer)
2783 {
2784 	int i;
2785 
2786 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
2787 	i = pdev->local_peer_ids.freelist;
2788 	if (pdev->local_peer_ids.pool[i] == i) {
2789 		/* the list is empty, except for the list-end marker */
2790 		peer->local_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
2791 	} else {
2792 		/* take the head ID and advance the freelist */
2793 		peer->local_id = i;
2794 		pdev->local_peer_ids.freelist = pdev->local_peer_ids.pool[i];
2795 		pdev->local_peer_ids.map[i] = peer;
2796 	}
2797 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
2798 	DP_TRACE(INFO, "peer %pK, local id %d", peer, peer->local_id);
2799 }
2800 
2801 /**
2802  * dp_local_peer_id_free() - remove local peer id
2803  * @pdev - data path device instance
2804  * @peer - peer instance should be removed
2805  *
2806  * remove local peer id
2807  *
2808  * Return: none
2809  */
2810 void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer)
2811 {
2812 	int i = peer->local_id;
2813 	if ((i == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
2814 	    (i >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
2815 		return;
2816 	}
2817 
2818 	/* put this ID on the head of the freelist */
2819 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
2820 	pdev->local_peer_ids.pool[i] = pdev->local_peer_ids.freelist;
2821 	pdev->local_peer_ids.freelist = i;
2822 	pdev->local_peer_ids.map[i] = NULL;
2823 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
2824 }
2825 #endif
2826 
2827 /**
2828  * dp_get_peer_mac_addr_frm_id(): get mac address of the peer
2829  * @soc_handle: DP SOC handle
2830  * @peer_id:peer_id of the peer
2831  *
2832  * return: vdev_id of the vap
2833  */
2834 uint8_t dp_get_peer_mac_addr_frm_id(struct cdp_soc_t *soc_handle,
2835 		uint16_t peer_id, uint8_t *peer_mac)
2836 {
2837 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
2838 	struct dp_peer *peer;
2839 	uint8_t vdev_id;
2840 
2841 	peer = dp_peer_find_by_id(soc, peer_id);
2842 
2843 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
2844 			"soc %pK peer_id %d", soc, peer_id);
2845 
2846 	if (!peer) {
2847 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2848 				"peer not found ");
2849 		return CDP_INVALID_VDEV_ID;
2850 	}
2851 
2852 	qdf_mem_copy(peer_mac, peer->mac_addr.raw, 6);
2853 	vdev_id = peer->vdev->vdev_id;
2854 
2855 	dp_peer_unref_del_find_by_id(peer);
2856 
2857 	return vdev_id;
2858 }
2859 
2860 /**
2861  * dp_peer_rxtid_stats: Retried Rx TID (REO queue) stats from HW
2862  * @peer: DP peer handle
2863  * @dp_stats_cmd_cb: REO command callback function
2864  * @cb_ctxt: Callback context
2865  *
2866  * Return: none
2867  */
2868 void dp_peer_rxtid_stats(struct dp_peer *peer, void (*dp_stats_cmd_cb),
2869 			void *cb_ctxt)
2870 {
2871 	struct dp_soc *soc = peer->vdev->pdev->soc;
2872 	struct hal_reo_cmd_params params;
2873 	int i;
2874 
2875 	if (!dp_stats_cmd_cb)
2876 		return;
2877 
2878 	qdf_mem_zero(&params, sizeof(params));
2879 	for (i = 0; i < DP_MAX_TIDS; i++) {
2880 		struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
2881 		if (rx_tid->hw_qdesc_vaddr_unaligned != NULL) {
2882 			params.std.need_status = 1;
2883 			params.std.addr_lo =
2884 				rx_tid->hw_qdesc_paddr & 0xffffffff;
2885 			params.std.addr_hi =
2886 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2887 
2888 			if (cb_ctxt) {
2889 				dp_reo_send_cmd(soc, CMD_GET_QUEUE_STATS,
2890 					&params, dp_stats_cmd_cb, cb_ctxt);
2891 			} else {
2892 				dp_reo_send_cmd(soc, CMD_GET_QUEUE_STATS,
2893 					&params, dp_stats_cmd_cb, rx_tid);
2894 			}
2895 
2896 			/* Flush REO descriptor from HW cache to update stats
2897 			 * in descriptor memory. This is to help debugging */
2898 			qdf_mem_zero(&params, sizeof(params));
2899 			params.std.need_status = 0;
2900 			params.std.addr_lo =
2901 				rx_tid->hw_qdesc_paddr & 0xffffffff;
2902 			params.std.addr_hi =
2903 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2904 			params.u.fl_cache_params.flush_no_inval = 1;
2905 			dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, &params, NULL,
2906 				NULL);
2907 		}
2908 	}
2909 }
2910 
2911 void dp_set_michael_key(struct cdp_peer *peer_handle,
2912 			bool is_unicast, uint32_t *key)
2913 {
2914 	struct dp_peer *peer =  (struct dp_peer *)peer_handle;
2915 	uint8_t sec_index = is_unicast ? 1 : 0;
2916 
2917 	if (!peer) {
2918 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2919 			  "peer not found ");
2920 		return;
2921 	}
2922 
2923 	qdf_mem_copy(&peer->security[sec_index].michael_key[0],
2924 		     key, IEEE80211_WEP_MICLEN);
2925 }
2926 
2927 bool dp_peer_find_by_id_valid(struct dp_soc *soc, uint16_t peer_id)
2928 {
2929 	struct dp_peer *peer = dp_peer_find_by_id(soc, peer_id);
2930 
2931 	if (peer) {
2932 		/*
2933 		 * Decrement the peer ref which is taken as part of
2934 		 * dp_peer_find_by_id if PEER_LOCK_REF_PROTECT is enabled
2935 		 */
2936 		dp_peer_unref_del_find_by_id(peer);
2937 
2938 		return true;
2939 	}
2940 
2941 	return false;
2942 }
2943