xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_peer.c (revision dae10a5fbc53d54c53c4ba24fa018ad8b1e7c008)
1 /*
2  * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <qdf_types.h>
20 #include <qdf_lock.h>
21 #include <hal_hw_headers.h>
22 #include "dp_htt.h"
23 #include "dp_types.h"
24 #include "dp_internal.h"
25 #include "dp_peer.h"
26 #include "dp_rx_defrag.h"
27 #include <hal_api.h>
28 #include <hal_reo.h>
29 #ifdef CONFIG_MCL
30 #include <cds_ieee80211_common.h>
31 #include <cds_api.h>
32 #endif
33 #include <cdp_txrx_handle.h>
34 #include <wlan_cfg.h>
35 
36 #ifdef DP_LFR
37 static inline void
38 dp_set_ssn_valid_flag(struct hal_reo_cmd_params *params,
39 					uint8_t valid)
40 {
41 	params->u.upd_queue_params.update_svld = 1;
42 	params->u.upd_queue_params.svld = valid;
43 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
44 		"%s: Setting SSN valid bit to %d",
45 				__func__, valid);
46 }
47 #else
48 static inline void
49 dp_set_ssn_valid_flag(struct hal_reo_cmd_params *params,
50 					uint8_t valid) {};
51 #endif
52 
53 static inline int dp_peer_find_mac_addr_cmp(
54 	union dp_align_mac_addr *mac_addr1,
55 	union dp_align_mac_addr *mac_addr2)
56 {
57 	return !((mac_addr1->align4.bytes_abcd == mac_addr2->align4.bytes_abcd)
58 		/*
59 		 * Intentionally use & rather than &&.
60 		 * because the operands are binary rather than generic boolean,
61 		 * the functionality is equivalent.
62 		 * Using && has the advantage of short-circuited evaluation,
63 		 * but using & has the advantage of no conditional branching,
64 		 * which is a more significant benefit.
65 		 */
66 		&
67 		(mac_addr1->align4.bytes_ef == mac_addr2->align4.bytes_ef));
68 }
69 
70 static int dp_peer_find_map_attach(struct dp_soc *soc)
71 {
72 	uint32_t max_peers, peer_map_size;
73 
74 	max_peers = soc->max_peers;
75 	/* allocate the peer ID -> peer object map */
76 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
77 		"\n<=== cfg max peer id %d ====>", max_peers);
78 	peer_map_size = max_peers * sizeof(soc->peer_id_to_obj_map[0]);
79 	soc->peer_id_to_obj_map = qdf_mem_malloc(peer_map_size);
80 	if (!soc->peer_id_to_obj_map) {
81 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
82 			"%s: peer map memory allocation failed", __func__);
83 		return QDF_STATUS_E_NOMEM;
84 	}
85 
86 	/*
87 	 * The peer_id_to_obj_map doesn't really need to be initialized,
88 	 * since elements are only used after they have been individually
89 	 * initialized.
90 	 * However, it is convenient for debugging to have all elements
91 	 * that are not in use set to 0.
92 	 */
93 	qdf_mem_zero(soc->peer_id_to_obj_map, peer_map_size);
94 	return 0; /* success */
95 }
96 
97 static int dp_log2_ceil(unsigned value)
98 {
99 	unsigned tmp = value;
100 	int log2 = -1;
101 
102 	while (tmp) {
103 		log2++;
104 		tmp >>= 1;
105 	}
106 	if (1 << log2 != value)
107 		log2++;
108 	return log2;
109 }
110 
111 static int dp_peer_find_add_id_to_obj(
112 	struct dp_peer *peer,
113 	uint16_t peer_id)
114 {
115 	int i;
116 
117 	for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
118 		if (peer->peer_ids[i] == HTT_INVALID_PEER) {
119 			peer->peer_ids[i] = peer_id;
120 			return 0; /* success */
121 		}
122 	}
123 	return QDF_STATUS_E_FAILURE; /* failure */
124 }
125 
126 #define DP_PEER_HASH_LOAD_MULT  2
127 #define DP_PEER_HASH_LOAD_SHIFT 0
128 
129 #define DP_AST_HASH_LOAD_MULT  2
130 #define DP_AST_HASH_LOAD_SHIFT 0
131 
132 static int dp_peer_find_hash_attach(struct dp_soc *soc)
133 {
134 	int i, hash_elems, log2;
135 
136 	/* allocate the peer MAC address -> peer object hash table */
137 	hash_elems = soc->max_peers;
138 	hash_elems *= DP_PEER_HASH_LOAD_MULT;
139 	hash_elems >>= DP_PEER_HASH_LOAD_SHIFT;
140 	log2 = dp_log2_ceil(hash_elems);
141 	hash_elems = 1 << log2;
142 
143 	soc->peer_hash.mask = hash_elems - 1;
144 	soc->peer_hash.idx_bits = log2;
145 	/* allocate an array of TAILQ peer object lists */
146 	soc->peer_hash.bins = qdf_mem_malloc(
147 		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q, dp_peer)));
148 	if (!soc->peer_hash.bins)
149 		return QDF_STATUS_E_NOMEM;
150 
151 	for (i = 0; i < hash_elems; i++)
152 		TAILQ_INIT(&soc->peer_hash.bins[i]);
153 
154 	return 0;
155 }
156 
157 static void dp_peer_find_hash_detach(struct dp_soc *soc)
158 {
159 	qdf_mem_free(soc->peer_hash.bins);
160 }
161 
162 static inline unsigned dp_peer_find_hash_index(struct dp_soc *soc,
163 	union dp_align_mac_addr *mac_addr)
164 {
165 	unsigned index;
166 
167 	index =
168 		mac_addr->align2.bytes_ab ^
169 		mac_addr->align2.bytes_cd ^
170 		mac_addr->align2.bytes_ef;
171 	index ^= index >> soc->peer_hash.idx_bits;
172 	index &= soc->peer_hash.mask;
173 	return index;
174 }
175 
176 
177 void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer)
178 {
179 	unsigned index;
180 
181 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
182 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
183 	/*
184 	 * It is important to add the new peer at the tail of the peer list
185 	 * with the bin index.  Together with having the hash_find function
186 	 * search from head to tail, this ensures that if two entries with
187 	 * the same MAC address are stored, the one added first will be
188 	 * found first.
189 	 */
190 	TAILQ_INSERT_TAIL(&soc->peer_hash.bins[index], peer, hash_list_elem);
191 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
192 }
193 
194 #ifdef FEATURE_AST
195 /*
196  * dp_peer_ast_hash_attach() - Allocate and initialize AST Hash Table
197  * @soc: SoC handle
198  *
199  * Return: None
200  */
201 static int dp_peer_ast_hash_attach(struct dp_soc *soc)
202 {
203 	int i, hash_elems, log2;
204 
205 	hash_elems = ((soc->max_peers * DP_AST_HASH_LOAD_MULT) >>
206 		DP_AST_HASH_LOAD_SHIFT);
207 
208 	log2 = dp_log2_ceil(hash_elems);
209 	hash_elems = 1 << log2;
210 
211 	soc->ast_hash.mask = hash_elems - 1;
212 	soc->ast_hash.idx_bits = log2;
213 
214 	/* allocate an array of TAILQ peer object lists */
215 	soc->ast_hash.bins = qdf_mem_malloc(
216 		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q,
217 				dp_ast_entry)));
218 
219 	if (!soc->ast_hash.bins)
220 		return QDF_STATUS_E_NOMEM;
221 
222 	for (i = 0; i < hash_elems; i++)
223 		TAILQ_INIT(&soc->ast_hash.bins[i]);
224 
225 	return 0;
226 }
227 
228 #if defined(FEATURE_AST) && defined(AST_HKV1_WORKAROUND)
229 static inline void dp_peer_ast_cleanup(struct dp_soc *soc,
230 				       struct dp_ast_entry *ast)
231 {
232 	struct cdp_soc_t *cdp_soc = &soc->cdp_soc;
233 
234 	if (ast->cp_ctx && cdp_soc->ol_ops->peer_del_wds_cp_ctx)
235 		cdp_soc->ol_ops->peer_del_wds_cp_ctx(ast->cp_ctx);
236 }
237 #else
238 static inline void dp_peer_ast_cleanup(struct dp_soc *soc,
239 				       struct dp_ast_entry *ast)
240 {
241 }
242 #endif
243 /*
244  * dp_peer_ast_hash_detach() - Free AST Hash table
245  * @soc: SoC handle
246  *
247  * Return: None
248  */
249 static void dp_peer_ast_hash_detach(struct dp_soc *soc)
250 {
251 	unsigned int index;
252 	struct dp_ast_entry *ast, *ast_next;
253 
254 	if (!soc->ast_hash.mask)
255 		return;
256 
257 	for (index = 0; index <= soc->ast_hash.mask; index++) {
258 		if (!TAILQ_EMPTY(&soc->ast_hash.bins[index])) {
259 			TAILQ_FOREACH_SAFE(ast, &soc->ast_hash.bins[index],
260 					   hash_list_elem, ast_next) {
261 				TAILQ_REMOVE(&soc->ast_hash.bins[index], ast,
262 					     hash_list_elem);
263 				dp_peer_ast_cleanup(soc, ast);
264 				qdf_mem_free(ast);
265 			}
266 		}
267 	}
268 
269 	qdf_mem_free(soc->ast_hash.bins);
270 }
271 
272 /*
273  * dp_peer_ast_hash_index() - Compute the AST hash from MAC address
274  * @soc: SoC handle
275  *
276  * Return: AST hash
277  */
278 static inline uint32_t dp_peer_ast_hash_index(struct dp_soc *soc,
279 	union dp_align_mac_addr *mac_addr)
280 {
281 	uint32_t index;
282 
283 	index =
284 		mac_addr->align2.bytes_ab ^
285 		mac_addr->align2.bytes_cd ^
286 		mac_addr->align2.bytes_ef;
287 	index ^= index >> soc->ast_hash.idx_bits;
288 	index &= soc->ast_hash.mask;
289 	return index;
290 }
291 
292 /*
293  * dp_peer_ast_hash_add() - Add AST entry into hash table
294  * @soc: SoC handle
295  *
296  * This function adds the AST entry into SoC AST hash table
297  * It assumes caller has taken the ast lock to protect the access to this table
298  *
299  * Return: None
300  */
301 static inline void dp_peer_ast_hash_add(struct dp_soc *soc,
302 		struct dp_ast_entry *ase)
303 {
304 	uint32_t index;
305 
306 	index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
307 	TAILQ_INSERT_TAIL(&soc->ast_hash.bins[index], ase, hash_list_elem);
308 }
309 
310 /*
311  * dp_peer_ast_hash_remove() - Look up and remove AST entry from hash table
312  * @soc: SoC handle
313  *
314  * This function removes the AST entry from soc AST hash table
315  * It assumes caller has taken the ast lock to protect the access to this table
316  *
317  * Return: None
318  */
319 static inline void dp_peer_ast_hash_remove(struct dp_soc *soc,
320 		struct dp_ast_entry *ase)
321 {
322 	unsigned index;
323 	struct dp_ast_entry *tmpase;
324 	int found = 0;
325 
326 	index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
327 	/* Check if tail is not empty before delete*/
328 	QDF_ASSERT(!TAILQ_EMPTY(&soc->ast_hash.bins[index]));
329 
330 	TAILQ_FOREACH(tmpase, &soc->ast_hash.bins[index], hash_list_elem) {
331 		if (tmpase == ase) {
332 			found = 1;
333 			break;
334 		}
335 	}
336 
337 	QDF_ASSERT(found);
338 	TAILQ_REMOVE(&soc->ast_hash.bins[index], ase, hash_list_elem);
339 }
340 
341 /*
342  * dp_peer_ast_hash_find_by_pdevid() - Find AST entry by MAC address
343  *				       and pdev id
344  * @soc: SoC handle
345  * @ast_mac_addr: mac address
346  * @pdev_id: pdev_id
347  *
348  * It assumes caller has taken the ast lock to protect the access to
349  * AST hash table
350  *
351  * Return: AST entry
352  */
353 struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
354 						     uint8_t *ast_mac_addr,
355 						     uint8_t pdev_id)
356 {
357 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
358 	uint32_t index;
359 	struct dp_ast_entry *ase;
360 
361 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
362 		     ast_mac_addr, DP_MAC_ADDR_LEN);
363 	mac_addr = &local_mac_addr_aligned;
364 
365 	index = dp_peer_ast_hash_index(soc, mac_addr);
366 	TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
367 		if ((pdev_id == ase->pdev_id) &&
368 		    !dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr)) {
369 			return ase;
370 		}
371 	}
372 
373 	return NULL;
374 }
375 
376 /*
377  * dp_peer_ast_hash_find() - Find AST entry by MAC address
378  * @soc: SoC handle
379  *
380  * It assumes caller has taken the ast lock to protect the access to
381  * AST hash table
382  *
383  * Return: AST entry
384  */
385 struct dp_ast_entry *dp_peer_ast_hash_find(struct dp_soc *soc,
386 						uint8_t *ast_mac_addr)
387 {
388 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
389 	unsigned index;
390 	struct dp_ast_entry *ase;
391 
392 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
393 			ast_mac_addr, DP_MAC_ADDR_LEN);
394 	mac_addr = &local_mac_addr_aligned;
395 
396 	index = dp_peer_ast_hash_index(soc, mac_addr);
397 	TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
398 		if (dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr) == 0) {
399 			return ase;
400 		}
401 	}
402 
403 	return NULL;
404 }
405 
406 /*
407  * dp_peer_map_ast() - Map the ast entry with HW AST Index
408  * @soc: SoC handle
409  * @peer: peer to which ast node belongs
410  * @mac_addr: MAC address of ast node
411  * @hw_peer_id: HW AST Index returned by target in peer map event
412  * @vdev_id: vdev id for VAP to which the peer belongs to
413  * @ast_hash: ast hash value in HW
414  *
415  * Return: None
416  */
417 static inline void dp_peer_map_ast(struct dp_soc *soc,
418 	struct dp_peer *peer, uint8_t *mac_addr, uint16_t hw_peer_id,
419 	uint8_t vdev_id, uint16_t ast_hash)
420 {
421 	struct dp_ast_entry *ast_entry;
422 	enum cdp_txrx_ast_entry_type peer_type = CDP_TXRX_AST_TYPE_STATIC;
423 	bool ast_entry_found = FALSE;
424 
425 	if (!peer) {
426 		return;
427 	}
428 
429 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
430 		"%s: peer %pK ID %d vid %d mac %02x:%02x:%02x:%02x:%02x:%02x",
431 		__func__, peer, hw_peer_id, vdev_id, mac_addr[0],
432 		mac_addr[1], mac_addr[2], mac_addr[3],
433 		mac_addr[4], mac_addr[5]);
434 
435 	qdf_spin_lock_bh(&soc->ast_lock);
436 	TAILQ_FOREACH(ast_entry, &peer->ast_entry_list, ase_list_elem) {
437 		if (!(qdf_mem_cmp(mac_addr, ast_entry->mac_addr.raw,
438 				DP_MAC_ADDR_LEN))) {
439 			ast_entry->ast_idx = hw_peer_id;
440 			soc->ast_table[hw_peer_id] = ast_entry;
441 			ast_entry->is_active = TRUE;
442 			peer_type = ast_entry->type;
443 			ast_entry_found = TRUE;
444 			ast_entry->ast_hash_value = ast_hash;
445 		}
446 	}
447 
448 	if (ast_entry_found || (peer->vdev && peer->vdev->proxysta_vdev)) {
449 		if (soc->cdp_soc.ol_ops->peer_map_event) {
450 			soc->cdp_soc.ol_ops->peer_map_event(
451 			soc->ctrl_psoc, peer->peer_ids[0],
452 			hw_peer_id, vdev_id,
453 			mac_addr, peer_type, ast_hash);
454 		}
455 	} else {
456 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
457 			"AST entry not found");
458 	}
459 
460 	qdf_spin_unlock_bh(&soc->ast_lock);
461 	return;
462 }
463 
464 /*
465  * dp_peer_add_ast() - Allocate and add AST entry into peer list
466  * @soc: SoC handle
467  * @peer: peer to which ast node belongs
468  * @mac_addr: MAC address of ast node
469  * @is_self: Is this base AST entry with peer mac address
470  *
471  * This API is used by WDS source port learning function to
472  * add a new AST entry into peer AST list
473  *
474  * Return: 0 if new entry is allocated,
475  *        -1 if entry add failed
476  */
477 int dp_peer_add_ast(struct dp_soc *soc,
478 			struct dp_peer *peer,
479 			uint8_t *mac_addr,
480 			enum cdp_txrx_ast_entry_type type,
481 			uint32_t flags)
482 {
483 	struct dp_ast_entry *ast_entry;
484 	struct dp_vdev *vdev = peer->vdev;
485 	struct dp_pdev *pdev = NULL;
486 	uint8_t next_node_mac[6];
487 	int  ret = -1;
488 
489 	if (!vdev) {
490 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
491 			FL("Peers vdev is NULL"));
492 		QDF_ASSERT(0);
493 		return ret;
494 	}
495 
496 	pdev = vdev->pdev;
497 
498 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
499 		"%s: peer %pK mac %02x:%02x:%02x:%02x:%02x:%02x",
500 		__func__, peer, mac_addr[0], mac_addr[1], mac_addr[2],
501 		mac_addr[3], mac_addr[4], mac_addr[5]);
502 
503 	qdf_spin_lock_bh(&soc->ast_lock);
504 
505 	/* If AST entry already exists , just return from here
506 	 * ast entry with same mac address can exist on different radios
507 	 * if ast_override support is enabled use search by pdev in this
508 	 * case
509 	 */
510 	if (soc->ast_override_support) {
511 		ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, mac_addr,
512 							    pdev->pdev_id);
513 		if (ast_entry) {
514 			qdf_spin_unlock_bh(&soc->ast_lock);
515 			return 0;
516 		}
517 	} else {
518 		ast_entry = dp_peer_ast_hash_find(soc, mac_addr);
519 
520 		if (ast_entry) {
521 			if (ast_entry->type == CDP_TXRX_AST_TYPE_MEC) {
522 				ast_entry->is_active = TRUE;
523 				qdf_spin_unlock_bh(&soc->ast_lock);
524 				return 0;
525 			}
526 
527 			/*
528 			 * WAR for HK 1.x AST issue
529 			 * If an AST entry with same mac address already
530 			 * exists and is mapped to a different radio, and
531 			 * if the current radio is  primary radio , delete
532 			 * the existing AST entry and return.
533 			 *
534 			 * New AST entry will be created again on next
535 			 * SA_invalid frame
536 			 */
537 			if ((ast_entry->pdev_id != vdev->pdev->pdev_id) &&
538 			    vdev->pdev->is_primary) {
539 				QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
540 					  "Deleting ast_pdev=%d pdev=%d addr=%pM\n",
541 					  ast_entry->pdev_id,
542 					  vdev->pdev->pdev_id, mac_addr);
543 				dp_peer_del_ast(soc, ast_entry);
544 			}
545 
546 			qdf_spin_unlock_bh(&soc->ast_lock);
547 			return 0;
548 		}
549 	}
550 
551 	ast_entry = (struct dp_ast_entry *)
552 			qdf_mem_malloc(sizeof(struct dp_ast_entry));
553 
554 	if (!ast_entry) {
555 		qdf_spin_unlock_bh(&soc->ast_lock);
556 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
557 			FL("fail to allocate ast_entry"));
558 		QDF_ASSERT(0);
559 		return ret;
560 	}
561 
562 	qdf_mem_copy(&ast_entry->mac_addr.raw[0], mac_addr, DP_MAC_ADDR_LEN);
563 	ast_entry->peer = peer;
564 	ast_entry->pdev_id = vdev->pdev->pdev_id;
565 	ast_entry->vdev_id = vdev->vdev_id;
566 
567 	switch (type) {
568 	case CDP_TXRX_AST_TYPE_STATIC:
569 		peer->self_ast_entry = ast_entry;
570 		ast_entry->type = CDP_TXRX_AST_TYPE_STATIC;
571 		if (peer->vdev->opmode == wlan_op_mode_sta)
572 			ast_entry->type = CDP_TXRX_AST_TYPE_STA_BSS;
573 		break;
574 	case CDP_TXRX_AST_TYPE_SELF:
575 		peer->self_ast_entry = ast_entry;
576 		ast_entry->type = CDP_TXRX_AST_TYPE_SELF;
577 		break;
578 	case CDP_TXRX_AST_TYPE_WDS:
579 		ast_entry->next_hop = 1;
580 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
581 		break;
582 	case CDP_TXRX_AST_TYPE_WDS_HM:
583 		ast_entry->next_hop = 1;
584 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM;
585 		break;
586 	case CDP_TXRX_AST_TYPE_MEC:
587 		ast_entry->next_hop = 1;
588 		ast_entry->type = CDP_TXRX_AST_TYPE_MEC;
589 		break;
590 	default:
591 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
592 			FL("Incorrect AST entry type"));
593 	}
594 
595 	ast_entry->is_active = TRUE;
596 	TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem);
597 	DP_STATS_INC(soc, ast.added, 1);
598 	dp_peer_ast_hash_add(soc, ast_entry);
599 	qdf_spin_unlock_bh(&soc->ast_lock);
600 
601 	if (ast_entry->type == CDP_TXRX_AST_TYPE_MEC)
602 		qdf_mem_copy(next_node_mac, peer->vdev->mac_addr.raw, 6);
603 	else
604 		qdf_mem_copy(next_node_mac, peer->mac_addr.raw, 6);
605 
606 	if ((ast_entry->type != CDP_TXRX_AST_TYPE_STATIC) &&
607 	    (ast_entry->type != CDP_TXRX_AST_TYPE_SELF) &&
608 	    (ast_entry->type != CDP_TXRX_AST_TYPE_STA_BSS)) {
609 		if (QDF_STATUS_SUCCESS ==
610 				soc->cdp_soc.ol_ops->peer_add_wds_entry(
611 				peer->vdev->osif_vdev,
612 				mac_addr,
613 				next_node_mac,
614 				flags))
615 			return 0;
616 	}
617 
618 	return ret;
619 }
620 
621 #if defined(FEATURE_AST) && defined(AST_HKV1_WORKAROUND)
622 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
623 {
624 	struct dp_peer *peer = ast_entry->peer;
625 
626 	if (ast_entry->next_hop) {
627 		dp_peer_ast_send_wds_del(soc, ast_entry);
628 	} else {
629 		soc->ast_table[ast_entry->ast_idx] = NULL;
630 		TAILQ_REMOVE(&peer->ast_entry_list, ast_entry, ase_list_elem);
631 
632 		if (ast_entry == peer->self_ast_entry)
633 			peer->self_ast_entry = NULL;
634 
635 		DP_STATS_INC(soc, ast.deleted, 1);
636 		dp_peer_ast_hash_remove(soc, ast_entry);
637 		qdf_mem_free(ast_entry);
638 	}
639 }
640 #else
641 /*
642  * dp_peer_del_ast() - Delete and free AST entry
643  * @soc: SoC handle
644  * @ast_entry: AST entry of the node
645  *
646  * This function removes the AST entry from peer and soc tables
647  * It assumes caller has taken the ast lock to protect the access to these
648  * tables
649  *
650  * Return: None
651  */
652 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
653 {
654 	struct dp_peer *peer = ast_entry->peer;
655 
656 	if (ast_entry->next_hop)
657 		soc->cdp_soc.ol_ops->peer_del_wds_entry(peer->vdev->osif_vdev,
658 						ast_entry->mac_addr.raw);
659 
660 	soc->ast_table[ast_entry->ast_idx] = NULL;
661 	TAILQ_REMOVE(&peer->ast_entry_list, ast_entry, ase_list_elem);
662 
663 	if (ast_entry == peer->self_ast_entry)
664 		peer->self_ast_entry = NULL;
665 
666 	DP_STATS_INC(soc, ast.deleted, 1);
667 	dp_peer_ast_hash_remove(soc, ast_entry);
668 	qdf_mem_free(ast_entry);
669 }
670 #endif
671 
672 /*
673  * dp_peer_update_ast() - Delete and free AST entry
674  * @soc: SoC handle
675  * @peer: peer to which ast node belongs
676  * @ast_entry: AST entry of the node
677  * @flags: wds or hmwds
678  *
679  * This function update the AST entry to the roamed peer and soc tables
680  * It assumes caller has taken the ast lock to protect the access to these
681  * tables
682  *
683  * Return: 0 if ast entry is updated successfully
684  *         -1 failure
685  */
686 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
687 		       struct dp_ast_entry *ast_entry, uint32_t flags)
688 {
689 	int ret = -1;
690 	struct dp_peer *old_peer;
691 
692 	if ((ast_entry->type == CDP_TXRX_AST_TYPE_STATIC) ||
693 		(ast_entry->type == CDP_TXRX_AST_TYPE_SELF) ||
694 		(ast_entry->type == CDP_TXRX_AST_TYPE_STA_BSS))
695 		return 0;
696 
697 	old_peer = ast_entry->peer;
698 	TAILQ_REMOVE(&old_peer->ast_entry_list, ast_entry, ase_list_elem);
699 
700 	ast_entry->peer = peer;
701 	ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
702 	ast_entry->pdev_id = peer->vdev->pdev->pdev_id;
703 	ast_entry->vdev_id = peer->vdev->vdev_id;
704 	ast_entry->is_active = TRUE;
705 	TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem);
706 
707 	ret = soc->cdp_soc.ol_ops->peer_update_wds_entry(
708 				peer->vdev->osif_vdev,
709 				ast_entry->mac_addr.raw,
710 				peer->mac_addr.raw,
711 				flags);
712 
713 	return ret;
714 }
715 
716 /*
717  * dp_peer_ast_get_pdev_id() - get pdev_id from the ast entry
718  * @soc: SoC handle
719  * @ast_entry: AST entry of the node
720  *
721  * This function gets the pdev_id from the ast entry.
722  *
723  * Return: (uint8_t) pdev_id
724  */
725 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
726 				struct dp_ast_entry *ast_entry)
727 {
728 	return ast_entry->pdev_id;
729 }
730 
731 /*
732  * dp_peer_ast_get_next_hop() - get next_hop from the ast entry
733  * @soc: SoC handle
734  * @ast_entry: AST entry of the node
735  *
736  * This function gets the next hop from the ast entry.
737  *
738  * Return: (uint8_t) next_hop
739  */
740 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
741 				struct dp_ast_entry *ast_entry)
742 {
743 	return ast_entry->next_hop;
744 }
745 
746 /*
747  * dp_peer_ast_set_type() - set type from the ast entry
748  * @soc: SoC handle
749  * @ast_entry: AST entry of the node
750  *
751  * This function sets the type in the ast entry.
752  *
753  * Return:
754  */
755 void dp_peer_ast_set_type(struct dp_soc *soc,
756 				struct dp_ast_entry *ast_entry,
757 				enum cdp_txrx_ast_entry_type type)
758 {
759 	ast_entry->type = type;
760 }
761 
762 #else
763 int dp_peer_add_ast(struct dp_soc *soc, struct dp_peer *peer,
764 		uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type,
765 		uint32_t flags)
766 {
767 	return 1;
768 }
769 
770 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
771 {
772 }
773 
774 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
775 			struct dp_ast_entry *ast_entry, uint32_t flags)
776 {
777 	return 1;
778 }
779 
780 struct dp_ast_entry *dp_peer_ast_hash_find(struct dp_soc *soc,
781 						uint8_t *ast_mac_addr)
782 {
783 	return NULL;
784 }
785 
786 static int dp_peer_ast_hash_attach(struct dp_soc *soc)
787 {
788 	return 0;
789 }
790 
791 static inline void dp_peer_map_ast(struct dp_soc *soc,
792 	struct dp_peer *peer, uint8_t *mac_addr, uint16_t hw_peer_id,
793 	uint8_t vdev_id, uint16_t ast_hash)
794 {
795 	return;
796 }
797 
798 static void dp_peer_ast_hash_detach(struct dp_soc *soc)
799 {
800 }
801 
802 void dp_peer_ast_set_type(struct dp_soc *soc,
803 				struct dp_ast_entry *ast_entry,
804 				enum cdp_txrx_ast_entry_type type)
805 {
806 }
807 
808 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
809 				struct dp_ast_entry *ast_entry)
810 {
811 	return 0xff;
812 }
813 
814 
815 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
816 				struct dp_ast_entry *ast_entry)
817 {
818 	return 0xff;
819 }
820 #endif
821 
822 #if defined(FEATURE_AST) && defined(AST_HKV1_WORKAROUND)
823 void dp_peer_ast_set_cp_ctx(struct dp_soc *soc,
824 			    struct dp_ast_entry *ast_entry,
825 			    void *cp_ctx)
826 {
827 	ast_entry->cp_ctx = cp_ctx;
828 }
829 
830 void *dp_peer_ast_get_cp_ctx(struct dp_soc *soc,
831 			     struct dp_ast_entry *ast_entry)
832 {
833 	void *cp_ctx = NULL;
834 
835 	cp_ctx = ast_entry->cp_ctx;
836 	ast_entry->cp_ctx = NULL;
837 
838 	return cp_ctx;
839 }
840 
841 void dp_peer_ast_send_wds_del(struct dp_soc *soc,
842 			      struct dp_ast_entry *ast_entry)
843 {
844 	struct dp_peer *peer = ast_entry->peer;
845 	struct cdp_soc_t *cdp_soc = &soc->cdp_soc;
846 
847 	if (!ast_entry->wmi_sent) {
848 		cdp_soc->ol_ops->peer_del_wds_entry(peer->vdev->osif_vdev,
849 						    ast_entry->mac_addr.raw);
850 		ast_entry->wmi_sent = true;
851 	}
852 }
853 
854 bool dp_peer_ast_get_wmi_sent(struct dp_soc *soc,
855 			      struct dp_ast_entry *ast_entry)
856 {
857 	return ast_entry->wmi_sent;
858 }
859 
860 void dp_peer_ast_free_entry(struct dp_soc *soc,
861 			    struct dp_ast_entry *ast_entry)
862 {
863 	struct dp_peer *peer = ast_entry->peer;
864 
865 	soc->ast_table[ast_entry->ast_idx] = NULL;
866 	TAILQ_REMOVE(&peer->ast_entry_list, ast_entry, ase_list_elem);
867 	DP_STATS_INC(soc, ast.deleted, 1);
868 	dp_peer_ast_hash_remove(soc, ast_entry);
869 	qdf_mem_free(ast_entry);
870 }
871 #endif
872 
873 struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc,
874 	uint8_t *peer_mac_addr, int mac_addr_is_aligned, uint8_t vdev_id)
875 {
876 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
877 	unsigned index;
878 	struct dp_peer *peer;
879 
880 	if (mac_addr_is_aligned) {
881 		mac_addr = (union dp_align_mac_addr *) peer_mac_addr;
882 	} else {
883 		qdf_mem_copy(
884 			&local_mac_addr_aligned.raw[0],
885 			peer_mac_addr, DP_MAC_ADDR_LEN);
886 		mac_addr = &local_mac_addr_aligned;
887 	}
888 	index = dp_peer_find_hash_index(soc, mac_addr);
889 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
890 	TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
891 #if ATH_SUPPORT_WRAP
892 		/* ProxySTA may have multiple BSS peer with same MAC address,
893 		 * modified find will take care of finding the correct BSS peer.
894 		 */
895 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
896 			((peer->vdev->vdev_id == vdev_id) ||
897 			 (vdev_id == DP_VDEV_ALL))) {
898 #else
899 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0) {
900 #endif
901 			/* found it - increment the ref count before releasing
902 			 * the lock
903 			 */
904 			qdf_atomic_inc(&peer->ref_cnt);
905 			qdf_spin_unlock_bh(&soc->peer_ref_mutex);
906 			return peer;
907 		}
908 	}
909 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
910 	return NULL; /* failure */
911 }
912 
913 void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer)
914 {
915 	unsigned index;
916 	struct dp_peer *tmppeer = NULL;
917 	int found = 0;
918 
919 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
920 	/* Check if tail is not empty before delete*/
921 	QDF_ASSERT(!TAILQ_EMPTY(&soc->peer_hash.bins[index]));
922 	/*
923 	 * DO NOT take the peer_ref_mutex lock here - it needs to be taken
924 	 * by the caller.
925 	 * The caller needs to hold the lock from the time the peer object's
926 	 * reference count is decremented and tested up through the time the
927 	 * reference to the peer object is removed from the hash table, by
928 	 * this function.
929 	 * Holding the lock only while removing the peer object reference
930 	 * from the hash table keeps the hash table consistent, but does not
931 	 * protect against a new HL tx context starting to use the peer object
932 	 * if it looks up the peer object from its MAC address just after the
933 	 * peer ref count is decremented to zero, but just before the peer
934 	 * object reference is removed from the hash table.
935 	 */
936 	 TAILQ_FOREACH(tmppeer, &soc->peer_hash.bins[index], hash_list_elem) {
937 		if (tmppeer == peer) {
938 			found = 1;
939 			break;
940 		}
941 	}
942 	QDF_ASSERT(found);
943 	TAILQ_REMOVE(&soc->peer_hash.bins[index], peer, hash_list_elem);
944 }
945 
946 void dp_peer_find_hash_erase(struct dp_soc *soc)
947 {
948 	int i;
949 
950 	/*
951 	 * Not really necessary to take peer_ref_mutex lock - by this point,
952 	 * it's known that the soc is no longer in use.
953 	 */
954 	for (i = 0; i <= soc->peer_hash.mask; i++) {
955 		if (!TAILQ_EMPTY(&soc->peer_hash.bins[i])) {
956 			struct dp_peer *peer, *peer_next;
957 
958 			/*
959 			 * TAILQ_FOREACH_SAFE must be used here to avoid any
960 			 * memory access violation after peer is freed
961 			 */
962 			TAILQ_FOREACH_SAFE(peer, &soc->peer_hash.bins[i],
963 				hash_list_elem, peer_next) {
964 				/*
965 				 * Don't remove the peer from the hash table -
966 				 * that would modify the list we are currently
967 				 * traversing, and it's not necessary anyway.
968 				 */
969 				/*
970 				 * Artificially adjust the peer's ref count to
971 				 * 1, so it will get deleted by
972 				 * dp_peer_unref_delete.
973 				 */
974 				/* set to zero */
975 				qdf_atomic_init(&peer->ref_cnt);
976 				/* incr to one */
977 				qdf_atomic_inc(&peer->ref_cnt);
978 				dp_peer_unref_delete(peer);
979 			}
980 		}
981 	}
982 }
983 
984 static void dp_peer_find_map_detach(struct dp_soc *soc)
985 {
986 	qdf_mem_free(soc->peer_id_to_obj_map);
987 }
988 
989 int dp_peer_find_attach(struct dp_soc *soc)
990 {
991 	if (dp_peer_find_map_attach(soc))
992 		return 1;
993 
994 	if (dp_peer_find_hash_attach(soc)) {
995 		dp_peer_find_map_detach(soc);
996 		return 1;
997 	}
998 
999 	if (dp_peer_ast_hash_attach(soc)) {
1000 		dp_peer_find_hash_detach(soc);
1001 		dp_peer_find_map_detach(soc);
1002 		return 1;
1003 	}
1004 	return 0; /* success */
1005 }
1006 
1007 void dp_rx_tid_stats_cb(struct dp_soc *soc, void *cb_ctxt,
1008 	union hal_reo_status *reo_status)
1009 {
1010 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
1011 	struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
1012 
1013 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
1014 		DP_TRACE_STATS(FATAL, "REO stats failure %d for TID %d\n",
1015 			queue_status->header.status, rx_tid->tid);
1016 		return;
1017 	}
1018 
1019 	DP_TRACE_STATS(FATAL, "REO queue stats (TID: %d): \n"
1020 		"ssn: %d\n"
1021 		"curr_idx  : %d\n"
1022 		"pn_31_0   : %08x\n"
1023 		"pn_63_32  : %08x\n"
1024 		"pn_95_64  : %08x\n"
1025 		"pn_127_96 : %08x\n"
1026 		"last_rx_enq_tstamp : %08x\n"
1027 		"last_rx_deq_tstamp : %08x\n"
1028 		"rx_bitmap_31_0     : %08x\n"
1029 		"rx_bitmap_63_32    : %08x\n"
1030 		"rx_bitmap_95_64    : %08x\n"
1031 		"rx_bitmap_127_96   : %08x\n"
1032 		"rx_bitmap_159_128  : %08x\n"
1033 		"rx_bitmap_191_160  : %08x\n"
1034 		"rx_bitmap_223_192  : %08x\n"
1035 		"rx_bitmap_255_224  : %08x\n",
1036 		rx_tid->tid,
1037 		queue_status->ssn, queue_status->curr_idx,
1038 		queue_status->pn_31_0, queue_status->pn_63_32,
1039 		queue_status->pn_95_64, queue_status->pn_127_96,
1040 		queue_status->last_rx_enq_tstamp,
1041 		queue_status->last_rx_deq_tstamp,
1042 		queue_status->rx_bitmap_31_0, queue_status->rx_bitmap_63_32,
1043 		queue_status->rx_bitmap_95_64, queue_status->rx_bitmap_127_96,
1044 		queue_status->rx_bitmap_159_128,
1045 		queue_status->rx_bitmap_191_160,
1046 		queue_status->rx_bitmap_223_192,
1047 		queue_status->rx_bitmap_255_224);
1048 
1049 	DP_TRACE_STATS(FATAL,
1050 		"curr_mpdu_cnt      : %d\n"
1051 		"curr_msdu_cnt      : %d\n"
1052 		"fwd_timeout_cnt    : %d\n"
1053 		"fwd_bar_cnt        : %d\n"
1054 		"dup_cnt            : %d\n"
1055 		"frms_in_order_cnt  : %d\n"
1056 		"bar_rcvd_cnt       : %d\n"
1057 		"mpdu_frms_cnt      : %d\n"
1058 		"msdu_frms_cnt      : %d\n"
1059 		"total_byte_cnt     : %d\n"
1060 		"late_recv_mpdu_cnt : %d\n"
1061 		"win_jump_2k 	    : %d\n"
1062 		"hole_cnt 	    : %d\n",
1063 		queue_status->curr_mpdu_cnt, queue_status->curr_msdu_cnt,
1064 		queue_status->fwd_timeout_cnt, queue_status->fwd_bar_cnt,
1065 		queue_status->dup_cnt, queue_status->frms_in_order_cnt,
1066 		queue_status->bar_rcvd_cnt, queue_status->mpdu_frms_cnt,
1067 		queue_status->msdu_frms_cnt, queue_status->total_cnt,
1068 		queue_status->late_recv_mpdu_cnt, queue_status->win_jump_2k,
1069 		queue_status->hole_cnt);
1070 
1071 	DP_PRINT_STATS("Addba Req          : %d\n"
1072 			"Addba Resp         : %d\n"
1073 			"Addba Resp success : %d\n"
1074 			"Addba Resp failed  : %d\n"
1075 			"Delba Req received : %d\n"
1076 			"Delba Tx success   : %d\n"
1077 			"Delba Tx Fail      : %d\n"
1078 			"BA window size     : %d\n"
1079 			"Pn size            : %d\n",
1080 			rx_tid->num_of_addba_req,
1081 			rx_tid->num_of_addba_resp,
1082 			rx_tid->num_addba_rsp_success,
1083 			rx_tid->num_addba_rsp_failed,
1084 			rx_tid->num_of_delba_req,
1085 			rx_tid->delba_tx_success_cnt,
1086 			rx_tid->delba_tx_fail_cnt,
1087 			rx_tid->ba_win_size,
1088 			rx_tid->pn_size);
1089 }
1090 
1091 static inline struct dp_peer *dp_peer_find_add_id(struct dp_soc *soc,
1092 	uint8_t *peer_mac_addr, uint16_t peer_id, uint16_t hw_peer_id,
1093 	uint8_t vdev_id)
1094 {
1095 	struct dp_peer *peer;
1096 
1097 	QDF_ASSERT(peer_id <= soc->max_peers);
1098 	/* check if there's already a peer object with this MAC address */
1099 	peer = dp_peer_find_hash_find(soc, peer_mac_addr,
1100 		0 /* is aligned */, vdev_id);
1101 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1102 		"%s: peer %pK ID %d vid %d mac %02x:%02x:%02x:%02x:%02x:%02x",
1103 		__func__, peer, peer_id, vdev_id, peer_mac_addr[0],
1104 		peer_mac_addr[1], peer_mac_addr[2], peer_mac_addr[3],
1105 		peer_mac_addr[4], peer_mac_addr[5]);
1106 
1107 	if (peer) {
1108 		/* peer's ref count was already incremented by
1109 		 * peer_find_hash_find
1110 		 */
1111 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
1112 			  "%s: ref_cnt: %d", __func__,
1113 			   qdf_atomic_read(&peer->ref_cnt));
1114 		if (!soc->peer_id_to_obj_map[peer_id])
1115 			soc->peer_id_to_obj_map[peer_id] = peer;
1116 		else {
1117 			/* Peer map event came for peer_id which
1118 			 * is already mapped, this is not expected
1119 			 */
1120 			QDF_ASSERT(0);
1121 		}
1122 
1123 		if (dp_peer_find_add_id_to_obj(peer, peer_id)) {
1124 			/* TBDXXX: assert for now */
1125 			QDF_ASSERT(0);
1126 		}
1127 
1128 		return peer;
1129 	}
1130 
1131 	return NULL;
1132 }
1133 
1134 /**
1135  * dp_rx_peer_map_handler() - handle peer map event from firmware
1136  * @soc_handle - genereic soc handle
1137  * @peeri_id - peer_id from firmware
1138  * @hw_peer_id - ast index for this peer
1139  * @vdev_id - vdev ID
1140  * @peer_mac_addr - mac address of the peer
1141  * @ast_hash - ast hash value
1142  * @is_wds - flag to indicate peer map event for WDS ast entry
1143  *
1144  * associate the peer_id that firmware provided with peer entry
1145  * and update the ast table in the host with the hw_peer_id.
1146  *
1147  * Return: none
1148  */
1149 
1150 void
1151 dp_rx_peer_map_handler(void *soc_handle, uint16_t peer_id,
1152 		       uint16_t hw_peer_id, uint8_t vdev_id,
1153 		       uint8_t *peer_mac_addr, uint16_t ast_hash,
1154 		       uint8_t is_wds)
1155 {
1156 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
1157 	struct dp_peer *peer = NULL;
1158 
1159 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
1160 		"peer_map_event (soc:%pK): peer_id %di, hw_peer_id %d, peer_mac "
1161 		"%02x:%02x:%02x:%02x:%02x:%02x, vdev_id %d", soc, peer_id,
1162 		hw_peer_id, peer_mac_addr[0], peer_mac_addr[1],
1163 		peer_mac_addr[2], peer_mac_addr[3], peer_mac_addr[4],
1164 		peer_mac_addr[5], vdev_id);
1165 
1166 	if ((hw_peer_id < 0) || (hw_peer_id > (WLAN_UMAC_PSOC_MAX_PEERS * 2))) {
1167 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1168 			"invalid hw_peer_id: %d", hw_peer_id);
1169 		qdf_assert_always(0);
1170 	}
1171 
1172 	/* Peer map event for WDS ast entry get the peer from
1173 	 * obj map
1174 	 */
1175 	if (is_wds) {
1176 		peer = soc->peer_id_to_obj_map[peer_id];
1177 	} else {
1178 		peer = dp_peer_find_add_id(soc, peer_mac_addr, peer_id,
1179 					   hw_peer_id, vdev_id);
1180 
1181 		if (peer) {
1182 			/*
1183 			 * For every peer MAp message search and set if bss_peer
1184 			 */
1185 			if (!(qdf_mem_cmp(peer->mac_addr.raw,
1186 					  peer->vdev->mac_addr.raw,
1187 					  DP_MAC_ADDR_LEN))) {
1188 				QDF_TRACE(QDF_MODULE_ID_DP,
1189 					  QDF_TRACE_LEVEL_INFO_HIGH,
1190 					  "vdev bss_peer!!!!");
1191 				peer->bss_peer = 1;
1192 				peer->vdev->vap_bss_peer = peer;
1193 			}
1194 
1195 			if (peer->vdev->opmode == wlan_op_mode_sta)
1196 				peer->vdev->bss_ast_hash = ast_hash;
1197 		}
1198 	}
1199 
1200 	dp_peer_map_ast(soc, peer, peer_mac_addr,
1201 			hw_peer_id, vdev_id, ast_hash);
1202 }
1203 
1204 /**
1205  * dp_rx_peer_unmap_handler() - handle peer unmap event from firmware
1206  * @soc_handle - genereic soc handle
1207  * @peeri_id - peer_id from firmware
1208  * @vdev_id - vdev ID
1209  * @peer_mac_addr - mac address of the peer
1210  * @is_wds - flag to indicate peer map event for WDS ast entry
1211  *
1212  * Return: none
1213  */
1214 void
1215 dp_rx_peer_unmap_handler(void *soc_handle, uint16_t peer_id,
1216 			 uint8_t vdev_id, uint8_t *peer_mac_addr,
1217 			 uint8_t is_wds)
1218 {
1219 	struct dp_peer *peer;
1220 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
1221 	uint8_t i;
1222 
1223 	if (is_wds)
1224 		return;
1225 
1226 	peer = __dp_peer_find_by_id(soc, peer_id);
1227 
1228 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
1229 		"peer_unmap_event (soc:%pK) peer_id %d peer %pK",
1230 		soc, peer_id, peer);
1231 
1232 	/*
1233 	 * Currently peer IDs are assigned for vdevs as well as peers.
1234 	 * If the peer ID is for a vdev, then the peer pointer stored
1235 	 * in peer_id_to_obj_map will be NULL.
1236 	 */
1237 	if (!peer) {
1238 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1239 			"%s: Received unmap event for invalid peer_id"
1240 			" %u", __func__, peer_id);
1241 		return;
1242 	}
1243 
1244 	soc->peer_id_to_obj_map[peer_id] = NULL;
1245 	for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
1246 		if (peer->peer_ids[i] == peer_id) {
1247 			peer->peer_ids[i] = HTT_INVALID_PEER;
1248 			break;
1249 		}
1250 	}
1251 
1252 	if (soc->cdp_soc.ol_ops->peer_unmap_event) {
1253 		soc->cdp_soc.ol_ops->peer_unmap_event(soc->ctrl_psoc,
1254 				peer_id);
1255 	}
1256 
1257 	/*
1258 	 * Remove a reference to the peer.
1259 	 * If there are no more references, delete the peer object.
1260 	 */
1261 	dp_peer_unref_delete(peer);
1262 }
1263 
1264 void
1265 dp_peer_find_detach(struct dp_soc *soc)
1266 {
1267 	dp_peer_find_map_detach(soc);
1268 	dp_peer_find_hash_detach(soc);
1269 	dp_peer_ast_hash_detach(soc);
1270 }
1271 
1272 static void dp_rx_tid_update_cb(struct dp_soc *soc, void *cb_ctxt,
1273 	union hal_reo_status *reo_status)
1274 {
1275 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
1276 
1277 	if ((reo_status->rx_queue_status.header.status !=
1278 		HAL_REO_CMD_SUCCESS) &&
1279 		(reo_status->rx_queue_status.header.status !=
1280 		HAL_REO_CMD_DRAIN)) {
1281 		/* Should not happen normally. Just print error for now */
1282 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1283 			"%s: Rx tid HW desc update failed(%d): tid %d",
1284 			__func__,
1285 			reo_status->rx_queue_status.header.status,
1286 			rx_tid->tid);
1287 	}
1288 }
1289 
1290 /*
1291  * dp_find_peer_by_addr - find peer instance by mac address
1292  * @dev: physical device instance
1293  * @peer_mac_addr: peer mac address
1294  * @local_id: local id for the peer
1295  *
1296  * Return: peer instance pointer
1297  */
1298 void *dp_find_peer_by_addr(struct cdp_pdev *dev, uint8_t *peer_mac_addr,
1299 		uint8_t *local_id)
1300 {
1301 	struct dp_pdev *pdev = (struct dp_pdev *)dev;
1302 	struct dp_peer *peer;
1303 
1304 	peer = dp_peer_find_hash_find(pdev->soc, peer_mac_addr, 0, DP_VDEV_ALL);
1305 
1306 	if (!peer)
1307 		return NULL;
1308 
1309 	/* Multiple peer ids? How can know peer id? */
1310 	*local_id = peer->local_id;
1311 	DP_TRACE(INFO, "%s: peer %pK id %d", __func__, peer, *local_id);
1312 
1313 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
1314 	 * Decrement it here.
1315 	 */
1316 	qdf_atomic_dec(&peer->ref_cnt);
1317 
1318 	return peer;
1319 }
1320 
1321 /*
1322  * dp_rx_tid_update_wifi3() – Update receive TID state
1323  * @peer: Datapath peer handle
1324  * @tid: TID
1325  * @ba_window_size: BlockAck window size
1326  * @start_seq: Starting sequence number
1327  *
1328  * Return: 0 on success, error code on failure
1329  */
1330 static int dp_rx_tid_update_wifi3(struct dp_peer *peer, int tid, uint32_t
1331 				  ba_window_size, uint32_t start_seq)
1332 {
1333 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1334 	struct dp_soc *soc = peer->vdev->pdev->soc;
1335 	struct hal_reo_cmd_params params;
1336 
1337 	qdf_mem_zero(&params, sizeof(params));
1338 
1339 	params.std.need_status = 1;
1340 	params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
1341 	params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1342 	params.u.upd_queue_params.update_ba_window_size = 1;
1343 	params.u.upd_queue_params.ba_window_size = ba_window_size;
1344 
1345 	if (start_seq < IEEE80211_SEQ_MAX) {
1346 		params.u.upd_queue_params.update_ssn = 1;
1347 		params.u.upd_queue_params.ssn = start_seq;
1348 	}
1349 
1350 	dp_set_ssn_valid_flag(&params, 0);
1351 
1352 	dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params, dp_rx_tid_update_cb, rx_tid);
1353 
1354 	rx_tid->ba_win_size = ba_window_size;
1355 	if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
1356 		soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup(
1357 			peer->vdev->pdev->ctrl_pdev,
1358 			peer->vdev->vdev_id, peer->mac_addr.raw,
1359 			rx_tid->hw_qdesc_paddr, tid, tid, 1, ba_window_size);
1360 
1361 	}
1362 	return 0;
1363 }
1364 
1365 /*
1366  * dp_reo_desc_free() - Callback free reo descriptor memory after
1367  * HW cache flush
1368  *
1369  * @soc: DP SOC handle
1370  * @cb_ctxt: Callback context
1371  * @reo_status: REO command status
1372  */
1373 static void dp_reo_desc_free(struct dp_soc *soc, void *cb_ctxt,
1374 	union hal_reo_status *reo_status)
1375 {
1376 	struct reo_desc_list_node *freedesc =
1377 		(struct reo_desc_list_node *)cb_ctxt;
1378 	struct dp_rx_tid *rx_tid = &freedesc->rx_tid;
1379 
1380 	if ((reo_status->fl_cache_status.header.status !=
1381 		HAL_REO_CMD_SUCCESS) &&
1382 		(reo_status->fl_cache_status.header.status !=
1383 		HAL_REO_CMD_DRAIN)) {
1384 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1385 			"%s: Rx tid HW desc flush failed(%d): tid %d",
1386 			__func__,
1387 			reo_status->rx_queue_status.header.status,
1388 			freedesc->rx_tid.tid);
1389 	}
1390 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1391 		"%s: hw_qdesc_paddr: %pK, tid:%d", __func__,
1392 		(void *)(rx_tid->hw_qdesc_paddr), rx_tid->tid);
1393 	qdf_mem_unmap_nbytes_single(soc->osdev,
1394 		rx_tid->hw_qdesc_paddr,
1395 		QDF_DMA_BIDIRECTIONAL,
1396 		rx_tid->hw_qdesc_alloc_size);
1397 	qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
1398 	qdf_mem_free(freedesc);
1399 }
1400 
1401 #if defined(QCA_WIFI_QCA8074) && defined(BUILD_X86)
1402 /* Hawkeye emulation requires bus address to be >= 0x50000000 */
1403 static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
1404 {
1405 	if (dma_addr < 0x50000000)
1406 		return QDF_STATUS_E_FAILURE;
1407 	else
1408 		return QDF_STATUS_SUCCESS;
1409 }
1410 #else
1411 static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
1412 {
1413 	return QDF_STATUS_SUCCESS;
1414 }
1415 #endif
1416 
1417 
1418 /*
1419  * dp_rx_tid_setup_wifi3() – Setup receive TID state
1420  * @peer: Datapath peer handle
1421  * @tid: TID
1422  * @ba_window_size: BlockAck window size
1423  * @start_seq: Starting sequence number
1424  *
1425  * Return: 0 on success, error code on failure
1426  */
1427 int dp_rx_tid_setup_wifi3(struct dp_peer *peer, int tid,
1428 	uint32_t ba_window_size, uint32_t start_seq)
1429 {
1430 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1431 	struct dp_vdev *vdev = peer->vdev;
1432 	struct dp_soc *soc = vdev->pdev->soc;
1433 	uint32_t hw_qdesc_size;
1434 	uint32_t hw_qdesc_align;
1435 	int hal_pn_type;
1436 	void *hw_qdesc_vaddr;
1437 	uint32_t alloc_tries = 0;
1438 
1439 	if (peer->delete_in_progress)
1440 		return QDF_STATUS_E_FAILURE;
1441 
1442 	rx_tid->ba_win_size = ba_window_size;
1443 	if (rx_tid->hw_qdesc_vaddr_unaligned != NULL)
1444 		return dp_rx_tid_update_wifi3(peer, tid, ba_window_size,
1445 			start_seq);
1446 	rx_tid->delba_tx_status = 0;
1447 	rx_tid->ppdu_id_2k = 0;
1448 	rx_tid->num_of_addba_req = 0;
1449 	rx_tid->num_of_delba_req = 0;
1450 	rx_tid->num_of_addba_resp = 0;
1451 	rx_tid->num_addba_rsp_failed = 0;
1452 	rx_tid->num_addba_rsp_success = 0;
1453 	rx_tid->delba_tx_success_cnt = 0;
1454 	rx_tid->delba_tx_fail_cnt = 0;
1455 	rx_tid->statuscode = 0;
1456 #ifdef notyet
1457 	hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc, ba_window_size);
1458 #else
1459 	/* TODO: Allocating HW queue descriptors based on max BA window size
1460 	 * for all QOS TIDs so that same descriptor can be used later when
1461 	 * ADDBA request is recevied. This should be changed to allocate HW
1462 	 * queue descriptors based on BA window size being negotiated (0 for
1463 	 * non BA cases), and reallocate when BA window size changes and also
1464 	 * send WMI message to FW to change the REO queue descriptor in Rx
1465 	 * peer entry as part of dp_rx_tid_update.
1466 	 */
1467 	if (tid != DP_NON_QOS_TID)
1468 		hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc,
1469 			HAL_RX_MAX_BA_WINDOW);
1470 	else
1471 		hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc,
1472 			ba_window_size);
1473 #endif
1474 
1475 	hw_qdesc_align = hal_get_reo_qdesc_align(soc->hal_soc);
1476 	/* To avoid unnecessary extra allocation for alignment, try allocating
1477 	 * exact size and see if we already have aligned address.
1478 	 */
1479 	rx_tid->hw_qdesc_alloc_size = hw_qdesc_size;
1480 
1481 try_desc_alloc:
1482 	rx_tid->hw_qdesc_vaddr_unaligned =
1483 		qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size);
1484 
1485 	if (!rx_tid->hw_qdesc_vaddr_unaligned) {
1486 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1487 			"%s: Rx tid HW desc alloc failed: tid %d",
1488 			__func__, tid);
1489 		return QDF_STATUS_E_NOMEM;
1490 	}
1491 
1492 	if ((unsigned long)(rx_tid->hw_qdesc_vaddr_unaligned) %
1493 		hw_qdesc_align) {
1494 		/* Address allocated above is not alinged. Allocate extra
1495 		 * memory for alignment
1496 		 */
1497 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
1498 		rx_tid->hw_qdesc_vaddr_unaligned =
1499 			qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size +
1500 					hw_qdesc_align - 1);
1501 
1502 		if (!rx_tid->hw_qdesc_vaddr_unaligned) {
1503 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1504 				"%s: Rx tid HW desc alloc failed: tid %d",
1505 				__func__, tid);
1506 			return QDF_STATUS_E_NOMEM;
1507 		}
1508 
1509 		hw_qdesc_vaddr = (void *)qdf_align((unsigned long)
1510 			rx_tid->hw_qdesc_vaddr_unaligned,
1511 			hw_qdesc_align);
1512 
1513 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1514 			"%s: Total Size %d Aligned Addr %pK",
1515 			__func__, rx_tid->hw_qdesc_alloc_size,
1516 			hw_qdesc_vaddr);
1517 
1518 	} else {
1519 		hw_qdesc_vaddr = rx_tid->hw_qdesc_vaddr_unaligned;
1520 	}
1521 
1522 	/* TODO: Ensure that sec_type is set before ADDBA is received.
1523 	 * Currently this is set based on htt indication
1524 	 * HTT_T2H_MSG_TYPE_SEC_IND from target
1525 	 */
1526 	switch (peer->security[dp_sec_ucast].sec_type) {
1527 	case cdp_sec_type_tkip_nomic:
1528 	case cdp_sec_type_aes_ccmp:
1529 	case cdp_sec_type_aes_ccmp_256:
1530 	case cdp_sec_type_aes_gcmp:
1531 	case cdp_sec_type_aes_gcmp_256:
1532 		hal_pn_type = HAL_PN_WPA;
1533 		break;
1534 	case cdp_sec_type_wapi:
1535 		if (vdev->opmode == wlan_op_mode_ap)
1536 			hal_pn_type = HAL_PN_WAPI_EVEN;
1537 		else
1538 			hal_pn_type = HAL_PN_WAPI_UNEVEN;
1539 		break;
1540 	default:
1541 		hal_pn_type = HAL_PN_NONE;
1542 		break;
1543 	}
1544 
1545 	hal_reo_qdesc_setup(soc->hal_soc, tid, ba_window_size, start_seq,
1546 		hw_qdesc_vaddr, rx_tid->hw_qdesc_paddr, hal_pn_type);
1547 
1548 	qdf_mem_map_nbytes_single(soc->osdev, hw_qdesc_vaddr,
1549 		QDF_DMA_BIDIRECTIONAL, rx_tid->hw_qdesc_alloc_size,
1550 		&(rx_tid->hw_qdesc_paddr));
1551 
1552 	if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) !=
1553 			QDF_STATUS_SUCCESS) {
1554 		if (alloc_tries++ < 10)
1555 			goto try_desc_alloc;
1556 		else {
1557 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1558 			"%s: Rx tid HW desc alloc failed (lowmem): tid %d",
1559 			__func__, tid);
1560 			return QDF_STATUS_E_NOMEM;
1561 		}
1562 	}
1563 
1564 	if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
1565 		soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup(
1566 			vdev->pdev->ctrl_pdev,
1567 			peer->vdev->vdev_id, peer->mac_addr.raw,
1568 			rx_tid->hw_qdesc_paddr, tid, tid, 1, ba_window_size);
1569 
1570 	}
1571 	return 0;
1572 }
1573 
1574 /*
1575  * dp_rx_tid_delete_cb() - Callback to flush reo descriptor HW cache
1576  * after deleting the entries (ie., setting valid=0)
1577  *
1578  * @soc: DP SOC handle
1579  * @cb_ctxt: Callback context
1580  * @reo_status: REO command status
1581  */
1582 static void dp_rx_tid_delete_cb(struct dp_soc *soc, void *cb_ctxt,
1583 	union hal_reo_status *reo_status)
1584 {
1585 	struct reo_desc_list_node *freedesc =
1586 		(struct reo_desc_list_node *)cb_ctxt;
1587 	uint32_t list_size;
1588 	struct reo_desc_list_node *desc;
1589 	unsigned long curr_ts = qdf_get_system_timestamp();
1590 	uint32_t desc_size, tot_desc_size;
1591 	struct hal_reo_cmd_params params;
1592 
1593 	if (reo_status->rx_queue_status.header.status == HAL_REO_CMD_DRAIN) {
1594 		qdf_mem_zero(reo_status, sizeof(*reo_status));
1595 		reo_status->fl_cache_status.header.status = HAL_REO_CMD_DRAIN;
1596 		dp_reo_desc_free(soc, (void *)freedesc, reo_status);
1597 		return;
1598 	} else if (reo_status->rx_queue_status.header.status !=
1599 		HAL_REO_CMD_SUCCESS) {
1600 		/* Should not happen normally. Just print error for now */
1601 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1602 			"%s: Rx tid HW desc deletion failed(%d): tid %d",
1603 			__func__,
1604 			reo_status->rx_queue_status.header.status,
1605 			freedesc->rx_tid.tid);
1606 	}
1607 
1608 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
1609 		"%s: rx_tid: %d status: %d", __func__,
1610 		freedesc->rx_tid.tid,
1611 		reo_status->rx_queue_status.header.status);
1612 
1613 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
1614 	freedesc->free_ts = curr_ts;
1615 	qdf_list_insert_back_size(&soc->reo_desc_freelist,
1616 		(qdf_list_node_t *)freedesc, &list_size);
1617 
1618 	while ((qdf_list_peek_front(&soc->reo_desc_freelist,
1619 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) &&
1620 		((list_size >= REO_DESC_FREELIST_SIZE) ||
1621 		((curr_ts - desc->free_ts) > REO_DESC_FREE_DEFER_MS))) {
1622 		struct dp_rx_tid *rx_tid;
1623 
1624 		qdf_list_remove_front(&soc->reo_desc_freelist,
1625 				(qdf_list_node_t **)&desc);
1626 		list_size--;
1627 		rx_tid = &desc->rx_tid;
1628 
1629 		/* Flush and invalidate REO descriptor from HW cache: Base and
1630 		 * extension descriptors should be flushed separately */
1631 		tot_desc_size = hal_get_reo_qdesc_size(soc->hal_soc,
1632 			rx_tid->ba_win_size);
1633 		desc_size = hal_get_reo_qdesc_size(soc->hal_soc, 0);
1634 
1635 		/* Flush reo extension descriptors */
1636 		while ((tot_desc_size -= desc_size) > 0) {
1637 			qdf_mem_zero(&params, sizeof(params));
1638 			params.std.addr_lo =
1639 				((uint64_t)(rx_tid->hw_qdesc_paddr) +
1640 				tot_desc_size) & 0xffffffff;
1641 			params.std.addr_hi =
1642 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1643 
1644 			if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc,
1645 							CMD_FLUSH_CACHE,
1646 							&params,
1647 							NULL,
1648 							NULL)) {
1649 				QDF_TRACE(QDF_MODULE_ID_DP,
1650 					QDF_TRACE_LEVEL_ERROR,
1651 					"%s: fail to send CMD_CACHE_FLUSH:"
1652 					"tid %d desc %pK", __func__,
1653 					rx_tid->tid,
1654 					(void *)(rx_tid->hw_qdesc_paddr));
1655 			}
1656 		}
1657 
1658 		/* Flush base descriptor */
1659 		qdf_mem_zero(&params, sizeof(params));
1660 		params.std.need_status = 1;
1661 		params.std.addr_lo =
1662 			(uint64_t)(rx_tid->hw_qdesc_paddr) & 0xffffffff;
1663 		params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1664 
1665 		if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc,
1666 							  CMD_FLUSH_CACHE,
1667 							  &params,
1668 							  dp_reo_desc_free,
1669 							  (void *)desc)) {
1670 			union hal_reo_status reo_status;
1671 			/*
1672 			 * If dp_reo_send_cmd return failure, related TID queue desc
1673 			 * should be unmapped. Also locally reo_desc, together with
1674 			 * TID queue desc also need to be freed accordingly.
1675 			 *
1676 			 * Here invoke desc_free function directly to do clean up.
1677 			 */
1678 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1679 				"%s: fail to send REO cmd to flush cache: tid %d",
1680 				__func__, rx_tid->tid);
1681 			qdf_mem_zero(&reo_status, sizeof(reo_status));
1682 			reo_status.fl_cache_status.header.status = 0;
1683 			dp_reo_desc_free(soc, (void *)desc, &reo_status);
1684 		}
1685 	}
1686 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
1687 }
1688 
1689 /*
1690  * dp_rx_tid_delete_wifi3() – Delete receive TID queue
1691  * @peer: Datapath peer handle
1692  * @tid: TID
1693  *
1694  * Return: 0 on success, error code on failure
1695  */
1696 static int dp_rx_tid_delete_wifi3(struct dp_peer *peer, int tid)
1697 {
1698 	struct dp_rx_tid *rx_tid = &(peer->rx_tid[tid]);
1699 	struct dp_soc *soc = peer->vdev->pdev->soc;
1700 	struct hal_reo_cmd_params params;
1701 	struct reo_desc_list_node *freedesc =
1702 		qdf_mem_malloc(sizeof(*freedesc));
1703 
1704 	if (!freedesc) {
1705 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1706 			"%s: malloc failed for freedesc: tid %d",
1707 			__func__, tid);
1708 		return -ENOMEM;
1709 	}
1710 
1711 	freedesc->rx_tid = *rx_tid;
1712 
1713 	qdf_mem_zero(&params, sizeof(params));
1714 
1715 	params.std.need_status = 1;
1716 	params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
1717 	params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1718 	params.u.upd_queue_params.update_vld = 1;
1719 	params.u.upd_queue_params.vld = 0;
1720 
1721 	dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
1722 		dp_rx_tid_delete_cb, (void *)freedesc);
1723 
1724 	rx_tid->hw_qdesc_vaddr_unaligned = NULL;
1725 	rx_tid->hw_qdesc_alloc_size = 0;
1726 	rx_tid->hw_qdesc_paddr = 0;
1727 
1728 	return 0;
1729 }
1730 
1731 #ifdef DP_LFR
1732 static void dp_peer_setup_remaining_tids(struct dp_peer *peer)
1733 {
1734 	int tid;
1735 
1736 	for (tid = 1; tid < DP_MAX_TIDS-1; tid++) {
1737 		dp_rx_tid_setup_wifi3(peer, tid, 1, 0);
1738 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1739 			"Setting up TID %d for peer %pK peer->local_id %d",
1740 			tid, peer, peer->local_id);
1741 	}
1742 }
1743 #else
1744 static void dp_peer_setup_remaining_tids(struct dp_peer *peer) {};
1745 #endif
1746 /*
1747  * dp_peer_rx_init() – Initialize receive TID state
1748  * @pdev: Datapath pdev
1749  * @peer: Datapath peer
1750  *
1751  */
1752 void dp_peer_rx_init(struct dp_pdev *pdev, struct dp_peer *peer)
1753 {
1754 	int tid;
1755 	struct dp_rx_tid *rx_tid;
1756 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
1757 		rx_tid = &peer->rx_tid[tid];
1758 		rx_tid->array = &rx_tid->base;
1759 		rx_tid->base.head = rx_tid->base.tail = NULL;
1760 		rx_tid->tid = tid;
1761 		rx_tid->defrag_timeout_ms = 0;
1762 		rx_tid->ba_win_size = 0;
1763 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
1764 
1765 		rx_tid->defrag_waitlist_elem.tqe_next = NULL;
1766 		rx_tid->defrag_waitlist_elem.tqe_prev = NULL;
1767 
1768 #ifdef notyet /* TODO: See if this is required for exception handling */
1769 		/* invalid sequence number */
1770 		peer->tids_last_seq[tid] = 0xffff;
1771 #endif
1772 	}
1773 
1774 	peer->active_ba_session_cnt = 0;
1775 	peer->hw_buffer_size = 0;
1776 	peer->kill_256_sessions = 0;
1777 
1778 	/* Setup default (non-qos) rx tid queue */
1779 	dp_rx_tid_setup_wifi3(peer, DP_NON_QOS_TID, 1, 0);
1780 
1781 	/* Setup rx tid queue for TID 0.
1782 	 * Other queues will be setup on receiving first packet, which will cause
1783 	 * NULL REO queue error
1784 	 */
1785 	dp_rx_tid_setup_wifi3(peer, 0, 1, 0);
1786 
1787 	/*
1788 	 * Setup the rest of TID's to handle LFR
1789 	 */
1790 	dp_peer_setup_remaining_tids(peer);
1791 
1792 	/*
1793 	 * Set security defaults: no PN check, no security. The target may
1794 	 * send a HTT SEC_IND message to overwrite these defaults.
1795 	 */
1796 	peer->security[dp_sec_ucast].sec_type =
1797 		peer->security[dp_sec_mcast].sec_type = cdp_sec_type_none;
1798 }
1799 
1800 /*
1801  * dp_peer_rx_cleanup() – Cleanup receive TID state
1802  * @vdev: Datapath vdev
1803  * @peer: Datapath peer
1804  *
1805  */
1806 void dp_peer_rx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
1807 {
1808 	int tid;
1809 	uint32_t tid_delete_mask = 0;
1810 
1811 	DP_TRACE(INFO_HIGH, FL("Remove tids for peer: %pK"), peer);
1812 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
1813 		struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1814 
1815 		qdf_spin_lock_bh(&rx_tid->tid_lock);
1816 		if (peer->rx_tid[tid].hw_qdesc_vaddr_unaligned != NULL) {
1817 			dp_rx_tid_delete_wifi3(peer, tid);
1818 
1819 			/* Cleanup defrag related resource */
1820 			dp_rx_defrag_waitlist_remove(peer, tid);
1821 			dp_rx_reorder_flush_frag(peer, tid);
1822 
1823 			tid_delete_mask |= (1 << tid);
1824 		}
1825 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
1826 	}
1827 #ifdef notyet /* See if FW can remove queues as part of peer cleanup */
1828 	if (soc->ol_ops->peer_rx_reorder_queue_remove) {
1829 		soc->ol_ops->peer_rx_reorder_queue_remove(vdev->pdev->ctrl_pdev,
1830 			peer->vdev->vdev_id, peer->mac_addr.raw,
1831 			tid_delete_mask);
1832 	}
1833 #endif
1834 	for (tid = 0; tid < DP_MAX_TIDS; tid++)
1835 		qdf_spinlock_destroy(&peer->rx_tid[tid].tid_lock);
1836 }
1837 
1838 /*
1839  * dp_peer_cleanup() – Cleanup peer information
1840  * @vdev: Datapath vdev
1841  * @peer: Datapath peer
1842  *
1843  */
1844 void dp_peer_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
1845 {
1846 	peer->last_assoc_rcvd = 0;
1847 	peer->last_disassoc_rcvd = 0;
1848 	peer->last_deauth_rcvd = 0;
1849 
1850 	/* cleanup the Rx reorder queues for this peer */
1851 	dp_peer_rx_cleanup(vdev, peer);
1852 }
1853 
1854 /* dp_teardown_256_ba_session() - Teardown sessions using 256
1855  *                                window size when a request with
1856  *                                64 window size is received.
1857  *                                This is done as a WAR since HW can
1858  *                                have only one setting per peer (64 or 256).
1859  * @peer: Datapath peer
1860  *
1861  * Return: void
1862  */
1863 static void dp_teardown_256_ba_sessions(struct dp_peer *peer)
1864 {
1865 	uint8_t delba_rcode = 0;
1866 	int tid;
1867 	struct dp_rx_tid *rx_tid = NULL;
1868 
1869 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
1870 		rx_tid = &peer->rx_tid[tid];
1871 		qdf_spin_lock_bh(&rx_tid->tid_lock);
1872 
1873 		if (rx_tid->ba_win_size <= 64) {
1874 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
1875 			continue;
1876 		} else {
1877 			if (rx_tid->ba_status == DP_RX_BA_ACTIVE ||
1878 			    rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
1879 				/* send delba */
1880 				if (!rx_tid->delba_tx_status) {
1881 					rx_tid->delba_tx_retry++;
1882 					rx_tid->delba_tx_status = 1;
1883 					rx_tid->delba_rcode =
1884 					IEEE80211_REASON_QOS_SETUP_REQUIRED;
1885 					delba_rcode = rx_tid->delba_rcode;
1886 
1887 					qdf_spin_unlock_bh(&rx_tid->tid_lock);
1888 					peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba(
1889 							peer->vdev->pdev->ctrl_pdev,
1890 							peer->ctrl_peer,
1891 							peer->mac_addr.raw,
1892 							tid, peer->vdev->ctrl_vdev,
1893 							delba_rcode);
1894 				} else {
1895 					qdf_spin_unlock_bh(&rx_tid->tid_lock);
1896 				}
1897 			} else {
1898 				qdf_spin_unlock_bh(&rx_tid->tid_lock);
1899 			}
1900 		}
1901 	}
1902 }
1903 
1904 /*
1905 * dp_rx_addba_resp_tx_completion_wifi3() – Update Rx Tid State
1906 *
1907 * @peer: Datapath peer handle
1908 * @tid: TID number
1909 * @status: tx completion status
1910 * Return: 0 on success, error code on failure
1911 */
1912 int dp_addba_resp_tx_completion_wifi3(void *peer_handle,
1913 				      uint8_t tid, int status)
1914 {
1915 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
1916 	struct dp_rx_tid *rx_tid = NULL;
1917 
1918 	if (!peer || peer->delete_in_progress) {
1919 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1920 			  "%s: Peer is NULL!\n", __func__);
1921 		return QDF_STATUS_E_FAILURE;
1922 	}
1923 	rx_tid = &peer->rx_tid[tid];
1924 	qdf_spin_lock_bh(&rx_tid->tid_lock);
1925 	if (status) {
1926 		rx_tid->num_addba_rsp_failed++;
1927 		dp_rx_tid_update_wifi3(peer, tid, 1, 0);
1928 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
1929 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
1930 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1931 			  "%s: Rx Tid- %d addba rsp tx completion failed!",
1932 			 __func__, tid);
1933 		return QDF_STATUS_SUCCESS;
1934 	}
1935 
1936 	rx_tid->num_addba_rsp_success++;
1937 	if (rx_tid->ba_status == DP_RX_BA_INACTIVE) {
1938 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
1939 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1940 			  "%s: Rx Tid- %d hw qdesc is not in IN_PROGRESS",
1941 			__func__, tid);
1942 		return QDF_STATUS_E_FAILURE;
1943 	}
1944 
1945 	/* First Session */
1946 	if (peer->active_ba_session_cnt == 0) {
1947 		if (rx_tid->ba_win_size > 64 && rx_tid->ba_win_size <= 256)
1948 			peer->hw_buffer_size = 256;
1949 		else
1950 			peer->hw_buffer_size = 64;
1951 	}
1952 
1953 	rx_tid->ba_status = DP_RX_BA_ACTIVE;
1954 
1955 	peer->active_ba_session_cnt++;
1956 
1957 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
1958 
1959 	/* Kill any session having 256 buffer size
1960 	 * when 64 buffer size request is received.
1961 	 * Also, latch on to 64 as new buffer size.
1962 	 */
1963 	if (peer->kill_256_sessions) {
1964 		dp_teardown_256_ba_sessions(peer);
1965 		peer->kill_256_sessions = 0;
1966 	}
1967 	return QDF_STATUS_SUCCESS;
1968 }
1969 
1970 /*
1971 * dp_rx_addba_responsesetup_wifi3() – Process ADDBA request from peer
1972 *
1973 * @peer: Datapath peer handle
1974 * @tid: TID number
1975 * @dialogtoken: output dialogtoken
1976 * @statuscode: output dialogtoken
1977 * @buffersize: Output BA window size
1978 * @batimeout: Output BA timeout
1979 */
1980 void dp_addba_responsesetup_wifi3(void *peer_handle, uint8_t tid,
1981 	uint8_t *dialogtoken, uint16_t *statuscode,
1982 	uint16_t *buffersize, uint16_t *batimeout)
1983 {
1984 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
1985 	struct dp_rx_tid *rx_tid = NULL;
1986 
1987 	if (!peer || peer->delete_in_progress) {
1988 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1989 			  "%s: Peer is NULL!\n", __func__);
1990 		return;
1991 	}
1992 	rx_tid = &peer->rx_tid[tid];
1993 	qdf_spin_lock_bh(&rx_tid->tid_lock);
1994 	rx_tid->num_of_addba_resp++;
1995 	/* setup ADDBA response parameters */
1996 	*dialogtoken = rx_tid->dialogtoken;
1997 	*statuscode = rx_tid->statuscode;
1998 	*buffersize = rx_tid->ba_win_size;
1999 	*batimeout  = 0;
2000 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
2001 }
2002 
2003 /* dp_check_ba_buffersize() - Check buffer size in request
2004  *                            and latch onto this size based on
2005  *                            size used in first active session.
2006  * @peer: Datapath peer
2007  * @tid: Tid
2008  * @buffersize: Block ack window size
2009  *
2010  * Return: void
2011  */
2012 static void dp_check_ba_buffersize(struct dp_peer *peer,
2013 				   uint16_t tid,
2014 				   uint16_t buffersize)
2015 {
2016 	struct dp_rx_tid *rx_tid = NULL;
2017 
2018 	rx_tid = &peer->rx_tid[tid];
2019 
2020 	if (peer->active_ba_session_cnt == 0) {
2021 		rx_tid->ba_win_size = buffersize;
2022 	} else {
2023 		if (peer->hw_buffer_size == 64) {
2024 			if (buffersize <= 64)
2025 				rx_tid->ba_win_size = buffersize;
2026 			else
2027 				rx_tid->ba_win_size = peer->hw_buffer_size;
2028 		} else if (peer->hw_buffer_size == 256) {
2029 			if (buffersize > 64) {
2030 				rx_tid->ba_win_size = buffersize;
2031 			} else {
2032 				rx_tid->ba_win_size = buffersize;
2033 				peer->hw_buffer_size = 64;
2034 				peer->kill_256_sessions = 1;
2035 			}
2036 		}
2037 	}
2038 }
2039 
2040 /*
2041  * dp_addba_requestprocess_wifi3() - Process ADDBA request from peer
2042  *
2043  * @peer: Datapath peer handle
2044  * @dialogtoken: dialogtoken from ADDBA frame
2045  * @tid: TID number
2046  * @batimeout: BA timeout
2047  * @buffersize: BA window size
2048  * @startseqnum: Start seq. number received in BA sequence control
2049  *
2050  * Return: 0 on success, error code on failure
2051  */
2052 int dp_addba_requestprocess_wifi3(void *peer_handle,
2053 				  uint8_t dialogtoken,
2054 				  uint16_t tid, uint16_t batimeout,
2055 				  uint16_t buffersize,
2056 				  uint16_t startseqnum)
2057 {
2058 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
2059 	struct dp_rx_tid *rx_tid = NULL;
2060 
2061 	if (!peer || peer->delete_in_progress) {
2062 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
2063 			  "%s: Peer is NULL!\n", __func__);
2064 		return QDF_STATUS_E_FAILURE;
2065 	}
2066 	rx_tid = &peer->rx_tid[tid];
2067 	qdf_spin_lock_bh(&rx_tid->tid_lock);
2068 	rx_tid->num_of_addba_req++;
2069 	if ((rx_tid->ba_status == DP_RX_BA_ACTIVE &&
2070 	     rx_tid->hw_qdesc_vaddr_unaligned != NULL) ||
2071 	    (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS)) {
2072 		dp_rx_tid_update_wifi3(peer, tid, 1, 0);
2073 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
2074 		peer->active_ba_session_cnt--;
2075 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2076 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2077 			  "%s: Rx Tid- %d hw qdesc is already setup",
2078 			__func__, tid);
2079 		return QDF_STATUS_E_FAILURE;
2080 	}
2081 
2082 	if (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
2083 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2084 		return QDF_STATUS_E_FAILURE;
2085 	}
2086 
2087 	dp_check_ba_buffersize(peer, tid, buffersize);
2088 
2089 	if (dp_rx_tid_setup_wifi3(peer, tid, buffersize, startseqnum)) {
2090 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
2091 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2092 		return QDF_STATUS_E_FAILURE;
2093 	}
2094 	rx_tid->ba_status = DP_RX_BA_IN_PROGRESS;
2095 
2096 	rx_tid->ba_win_size = buffersize;
2097 	rx_tid->dialogtoken = dialogtoken;
2098 	rx_tid->startseqnum = startseqnum;
2099 
2100 	if (rx_tid->userstatuscode != IEEE80211_STATUS_SUCCESS)
2101 		rx_tid->statuscode = rx_tid->userstatuscode;
2102 	else
2103 		rx_tid->statuscode = IEEE80211_STATUS_SUCCESS;
2104 
2105 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
2106 
2107 	return QDF_STATUS_SUCCESS;
2108 }
2109 
2110 /*
2111 * dp_set_addba_response() – Set a user defined ADDBA response status code
2112 *
2113 * @peer: Datapath peer handle
2114 * @tid: TID number
2115 * @statuscode: response status code to be set
2116 */
2117 void dp_set_addba_response(void *peer_handle, uint8_t tid,
2118 	uint16_t statuscode)
2119 {
2120 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
2121 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
2122 
2123 	qdf_spin_lock_bh(&rx_tid->tid_lock);
2124 	rx_tid->userstatuscode = statuscode;
2125 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
2126 }
2127 
2128 /*
2129 * dp_rx_delba_process_wifi3() – Process DELBA from peer
2130 * @peer: Datapath peer handle
2131 * @tid: TID number
2132 * @reasoncode: Reason code received in DELBA frame
2133 *
2134 * Return: 0 on success, error code on failure
2135 */
2136 int dp_delba_process_wifi3(void *peer_handle,
2137 	int tid, uint16_t reasoncode)
2138 {
2139 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
2140 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
2141 
2142 	qdf_spin_lock_bh(&rx_tid->tid_lock);
2143 	if (rx_tid->ba_status == DP_RX_BA_INACTIVE ||
2144 	    rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
2145 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2146 		return QDF_STATUS_E_FAILURE;
2147 	}
2148 	/* TODO: See if we can delete the existing REO queue descriptor and
2149 	 * replace with a new one without queue extenstion descript to save
2150 	 * memory
2151 	 */
2152 	rx_tid->delba_rcode = reasoncode;
2153 	rx_tid->num_of_delba_req++;
2154 	dp_rx_tid_update_wifi3(peer, tid, 1, 0);
2155 
2156 	rx_tid->ba_status = DP_RX_BA_INACTIVE;
2157 	peer->active_ba_session_cnt--;
2158 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
2159 	return 0;
2160 }
2161 
2162 /*
2163  * dp_rx_delba_tx_completion_wifi3() – Send Delba Request
2164  *
2165  * @peer: Datapath peer handle
2166  * @tid: TID number
2167  * @status: tx completion status
2168  * Return: 0 on success, error code on failure
2169  */
2170 
2171 int dp_delba_tx_completion_wifi3(void *peer_handle,
2172 				 uint8_t tid, int status)
2173 {
2174 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
2175 	struct dp_rx_tid *rx_tid = NULL;
2176 
2177 	if (!peer || peer->delete_in_progress) {
2178 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
2179 			  "%s: Peer is NULL!", __func__);
2180 		return QDF_STATUS_E_FAILURE;
2181 	}
2182 	rx_tid = &peer->rx_tid[tid];
2183 	qdf_spin_lock_bh(&rx_tid->tid_lock);
2184 	if (status) {
2185 		rx_tid->delba_tx_fail_cnt++;
2186 		if (rx_tid->delba_tx_retry >= DP_MAX_DELBA_RETRY) {
2187 			rx_tid->delba_tx_retry = 0;
2188 			rx_tid->delba_tx_status = 0;
2189 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
2190 		} else {
2191 			rx_tid->delba_tx_retry++;
2192 			rx_tid->delba_tx_status = 1;
2193 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
2194 			peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba(
2195 				peer->vdev->pdev->ctrl_pdev, peer->ctrl_peer,
2196 				peer->mac_addr.raw, tid, peer->vdev->ctrl_vdev,
2197 				rx_tid->delba_rcode);
2198 		}
2199 		return QDF_STATUS_SUCCESS;
2200 	} else {
2201 		rx_tid->delba_tx_success_cnt++;
2202 		rx_tid->delba_tx_retry = 0;
2203 		rx_tid->delba_tx_status = 0;
2204 	}
2205 	if (rx_tid->ba_status == DP_RX_BA_ACTIVE) {
2206 		dp_rx_tid_update_wifi3(peer, tid, 1, 0);
2207 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
2208 		peer->active_ba_session_cnt--;
2209 	}
2210 	if (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
2211 		dp_rx_tid_update_wifi3(peer, tid, 1, 0);
2212 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
2213 	}
2214 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
2215 
2216 	return QDF_STATUS_SUCCESS;
2217 }
2218 
2219 void dp_rx_discard(struct dp_vdev *vdev, struct dp_peer *peer, unsigned tid,
2220 	qdf_nbuf_t msdu_list)
2221 {
2222 	while (msdu_list) {
2223 		qdf_nbuf_t msdu = msdu_list;
2224 
2225 		msdu_list = qdf_nbuf_next(msdu_list);
2226 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
2227 			"discard rx %pK from partly-deleted peer %pK "
2228 			"(%02x:%02x:%02x:%02x:%02x:%02x)",
2229 			msdu, peer,
2230 			peer->mac_addr.raw[0], peer->mac_addr.raw[1],
2231 			peer->mac_addr.raw[2], peer->mac_addr.raw[3],
2232 			peer->mac_addr.raw[4], peer->mac_addr.raw[5]);
2233 		qdf_nbuf_free(msdu);
2234 	}
2235 }
2236 
2237 
2238 /**
2239  * dp_set_pn_check_wifi3() - enable PN check in REO for security
2240  * @peer: Datapath peer handle
2241  * @vdev: Datapath vdev
2242  * @pdev - data path device instance
2243  * @sec_type - security type
2244  * @rx_pn - Receive pn starting number
2245  *
2246  */
2247 
2248 void
2249 dp_set_pn_check_wifi3(struct cdp_vdev *vdev_handle, struct cdp_peer *peer_handle, enum cdp_sec_type sec_type,  uint32_t *rx_pn)
2250 {
2251 	struct dp_peer *peer =  (struct dp_peer *)peer_handle;
2252 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
2253 	struct dp_pdev *pdev;
2254 	struct dp_soc *soc;
2255 	int i;
2256 	uint8_t pn_size;
2257 	struct hal_reo_cmd_params params;
2258 
2259 	/* preconditions */
2260 	qdf_assert(vdev);
2261 
2262 	pdev = vdev->pdev;
2263 	soc = pdev->soc;
2264 
2265 
2266 	qdf_mem_zero(&params, sizeof(params));
2267 
2268 	params.std.need_status = 1;
2269 	params.u.upd_queue_params.update_pn_valid = 1;
2270 	params.u.upd_queue_params.update_pn_size = 1;
2271 	params.u.upd_queue_params.update_pn = 1;
2272 	params.u.upd_queue_params.update_pn_check_needed = 1;
2273 	params.u.upd_queue_params.update_svld = 1;
2274 	params.u.upd_queue_params.svld = 0;
2275 
2276 	peer->security[dp_sec_ucast].sec_type = sec_type;
2277 
2278 	switch (sec_type) {
2279 	case cdp_sec_type_tkip_nomic:
2280 	case cdp_sec_type_aes_ccmp:
2281 	case cdp_sec_type_aes_ccmp_256:
2282 	case cdp_sec_type_aes_gcmp:
2283 	case cdp_sec_type_aes_gcmp_256:
2284 		params.u.upd_queue_params.pn_check_needed = 1;
2285 		params.u.upd_queue_params.pn_size = 48;
2286 		pn_size = 48;
2287 		break;
2288 	case cdp_sec_type_wapi:
2289 		params.u.upd_queue_params.pn_check_needed = 1;
2290 		params.u.upd_queue_params.pn_size = 128;
2291 		pn_size = 128;
2292 		if (vdev->opmode == wlan_op_mode_ap) {
2293 			params.u.upd_queue_params.pn_even = 1;
2294 			params.u.upd_queue_params.update_pn_even = 1;
2295 		} else {
2296 			params.u.upd_queue_params.pn_uneven = 1;
2297 			params.u.upd_queue_params.update_pn_uneven = 1;
2298 		}
2299 		break;
2300 	default:
2301 		params.u.upd_queue_params.pn_check_needed = 0;
2302 		pn_size = 0;
2303 		break;
2304 	}
2305 
2306 
2307 	for (i = 0; i < DP_MAX_TIDS; i++) {
2308 		struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
2309 		qdf_spin_lock_bh(&rx_tid->tid_lock);
2310 		if (rx_tid->hw_qdesc_vaddr_unaligned != NULL) {
2311 			params.std.addr_lo =
2312 				rx_tid->hw_qdesc_paddr & 0xffffffff;
2313 			params.std.addr_hi =
2314 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2315 
2316 			if (sec_type != cdp_sec_type_wapi) {
2317 				params.u.upd_queue_params.update_pn_valid = 0;
2318 			} else {
2319 				/*
2320 				 * Setting PN valid bit for WAPI sec_type,
2321 				 * since WAPI PN has to be started with
2322 				 * predefined value
2323 				 */
2324 				params.u.upd_queue_params.update_pn_valid = 1;
2325 				params.u.upd_queue_params.pn_31_0 = rx_pn[0];
2326 				params.u.upd_queue_params.pn_63_32 = rx_pn[1];
2327 				params.u.upd_queue_params.pn_95_64 = rx_pn[2];
2328 				params.u.upd_queue_params.pn_127_96 = rx_pn[3];
2329 			}
2330 			rx_tid->pn_size = pn_size;
2331 			dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
2332 				dp_rx_tid_update_cb, rx_tid);
2333 		} else {
2334 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
2335 				"PN Check not setup for TID :%d ", i);
2336 		}
2337 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2338 	}
2339 }
2340 
2341 
2342 void
2343 dp_rx_sec_ind_handler(void *soc_handle, uint16_t peer_id,
2344 	enum htt_sec_type sec_type, int is_unicast, u_int32_t *michael_key,
2345 	u_int32_t *rx_pn)
2346 {
2347 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
2348 	struct dp_peer *peer;
2349 	int sec_index;
2350 
2351 	peer = dp_peer_find_by_id(soc, peer_id);
2352 	if (!peer) {
2353 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2354 			"Couldn't find peer from ID %d - skipping security inits",
2355 			peer_id);
2356 		return;
2357 	}
2358 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
2359 		"sec spec for peer %pK (%02x:%02x:%02x:%02x:%02x:%02x): "
2360 		"%s key of type %d",
2361 		peer,
2362 		peer->mac_addr.raw[0], peer->mac_addr.raw[1],
2363 		peer->mac_addr.raw[2], peer->mac_addr.raw[3],
2364 		peer->mac_addr.raw[4], peer->mac_addr.raw[5],
2365 		is_unicast ? "ucast" : "mcast",
2366 		sec_type);
2367 	sec_index = is_unicast ? dp_sec_ucast : dp_sec_mcast;
2368 	peer->security[sec_index].sec_type = sec_type;
2369 #ifdef notyet /* TODO: See if this is required for defrag support */
2370 	/* michael key only valid for TKIP, but for simplicity,
2371 	 * copy it anyway
2372 	 */
2373 	qdf_mem_copy(
2374 		&peer->security[sec_index].michael_key[0],
2375 		michael_key,
2376 		sizeof(peer->security[sec_index].michael_key));
2377 #ifdef BIG_ENDIAN_HOST
2378 	OL_IF_SWAPBO(peer->security[sec_index].michael_key[0],
2379 				 sizeof(peer->security[sec_index].michael_key));
2380 #endif /* BIG_ENDIAN_HOST */
2381 #endif
2382 
2383 #ifdef notyet /* TODO: Check if this is required for wifi3.0 */
2384 	if (sec_type != htt_sec_type_wapi) {
2385 		qdf_mem_set(peer->tids_last_pn_valid, _EXT_TIDS, 0x00);
2386 	} else {
2387 		for (i = 0; i < DP_MAX_TIDS; i++) {
2388 			/*
2389 			 * Setting PN valid bit for WAPI sec_type,
2390 			 * since WAPI PN has to be started with predefined value
2391 			 */
2392 			peer->tids_last_pn_valid[i] = 1;
2393 			qdf_mem_copy(
2394 				(u_int8_t *) &peer->tids_last_pn[i],
2395 				(u_int8_t *) rx_pn, sizeof(union htt_rx_pn_t));
2396 			peer->tids_last_pn[i].pn128[1] =
2397 				qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[1]);
2398 			peer->tids_last_pn[i].pn128[0] =
2399 				qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[0]);
2400 		}
2401 	}
2402 #endif
2403 	/* TODO: Update HW TID queue with PN check parameters (pn type for
2404 	 * all security types and last pn for WAPI) once REO command API
2405 	 * is available
2406 	 */
2407 
2408 	dp_peer_unref_del_find_by_id(peer);
2409 }
2410 
2411 #ifndef CONFIG_WIN
2412 /**
2413  * dp_register_peer() - Register peer into physical device
2414  * @pdev - data path device instance
2415  * @sta_desc - peer description
2416  *
2417  * Register peer into physical device
2418  *
2419  * Return: QDF_STATUS_SUCCESS registration success
2420  *         QDF_STATUS_E_FAULT peer not found
2421  */
2422 QDF_STATUS dp_register_peer(struct cdp_pdev *pdev_handle,
2423 		struct ol_txrx_desc_type *sta_desc)
2424 {
2425 	struct dp_peer *peer;
2426 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
2427 
2428 	peer = dp_peer_find_by_local_id((struct cdp_pdev *)pdev,
2429 			sta_desc->sta_id);
2430 	if (!peer)
2431 		return QDF_STATUS_E_FAULT;
2432 
2433 	qdf_spin_lock_bh(&peer->peer_info_lock);
2434 	peer->state = OL_TXRX_PEER_STATE_CONN;
2435 	qdf_spin_unlock_bh(&peer->peer_info_lock);
2436 
2437 	return QDF_STATUS_SUCCESS;
2438 }
2439 
2440 /**
2441  * dp_clear_peer() - remove peer from physical device
2442  * @pdev - data path device instance
2443  * @sta_id - local peer id
2444  *
2445  * remove peer from physical device
2446  *
2447  * Return: QDF_STATUS_SUCCESS registration success
2448  *         QDF_STATUS_E_FAULT peer not found
2449  */
2450 QDF_STATUS dp_clear_peer(struct cdp_pdev *pdev_handle, uint8_t local_id)
2451 {
2452 	struct dp_peer *peer;
2453 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
2454 
2455 	peer = dp_peer_find_by_local_id((struct cdp_pdev *)pdev, local_id);
2456 	if (!peer)
2457 		return QDF_STATUS_E_FAULT;
2458 
2459 	qdf_spin_lock_bh(&peer->peer_info_lock);
2460 	peer->state = OL_TXRX_PEER_STATE_DISC;
2461 	qdf_spin_unlock_bh(&peer->peer_info_lock);
2462 
2463 	return QDF_STATUS_SUCCESS;
2464 }
2465 
2466 /**
2467  * dp_find_peer_by_addr_and_vdev() - Find peer by peer mac address within vdev
2468  * @pdev - data path device instance
2469  * @vdev - virtual interface instance
2470  * @peer_addr - peer mac address
2471  * @peer_id - local peer id with target mac address
2472  *
2473  * Find peer by peer mac address within vdev
2474  *
2475  * Return: peer instance void pointer
2476  *         NULL cannot find target peer
2477  */
2478 void *dp_find_peer_by_addr_and_vdev(struct cdp_pdev *pdev_handle,
2479 		struct cdp_vdev *vdev_handle,
2480 		uint8_t *peer_addr, uint8_t *local_id)
2481 {
2482 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
2483 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
2484 	struct dp_peer *peer;
2485 
2486 	DP_TRACE(INFO, "vdev %pK peer_addr %pK", vdev, peer_addr);
2487 	peer = dp_peer_find_hash_find(pdev->soc, peer_addr, 0, 0);
2488 	DP_TRACE(INFO, "peer %pK vdev %pK", peer, vdev);
2489 
2490 	if (!peer)
2491 		return NULL;
2492 
2493 	if (peer->vdev != vdev) {
2494 		qdf_atomic_dec(&peer->ref_cnt);
2495 		return NULL;
2496 	}
2497 
2498 	*local_id = peer->local_id;
2499 	DP_TRACE(INFO, "peer %pK vdev %pK local id %d", peer, vdev, *local_id);
2500 
2501 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
2502 	 * Decrement it here.
2503 	 */
2504 	qdf_atomic_dec(&peer->ref_cnt);
2505 
2506 	return peer;
2507 }
2508 
2509 /**
2510  * dp_local_peer_id() - Find local peer id within peer instance
2511  * @peer - peer instance
2512  *
2513  * Find local peer id within peer instance
2514  *
2515  * Return: local peer id
2516  */
2517 uint16_t dp_local_peer_id(void *peer)
2518 {
2519 	return ((struct dp_peer *)peer)->local_id;
2520 }
2521 
2522 /**
2523  * dp_peer_find_by_local_id() - Find peer by local peer id
2524  * @pdev - data path device instance
2525  * @local_peer_id - local peer id want to find
2526  *
2527  * Find peer by local peer id within physical device
2528  *
2529  * Return: peer instance void pointer
2530  *         NULL cannot find target peer
2531  */
2532 void *dp_peer_find_by_local_id(struct cdp_pdev *pdev_handle, uint8_t local_id)
2533 {
2534 	struct dp_peer *peer;
2535 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
2536 
2537 	if (local_id >= OL_TXRX_NUM_LOCAL_PEER_IDS) {
2538 		QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_DP,
2539 				   "Incorrect local id %u", local_id);
2540 		return NULL;
2541 	}
2542 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
2543 	peer = pdev->local_peer_ids.map[local_id];
2544 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
2545 	DP_TRACE(DEBUG, "peer %pK local id %d", peer, local_id);
2546 	return peer;
2547 }
2548 
2549 /**
2550  * dp_peer_state_update() - update peer local state
2551  * @pdev - data path device instance
2552  * @peer_addr - peer mac address
2553  * @state - new peer local state
2554  *
2555  * update peer local state
2556  *
2557  * Return: QDF_STATUS_SUCCESS registration success
2558  */
2559 QDF_STATUS dp_peer_state_update(struct cdp_pdev *pdev_handle, uint8_t *peer_mac,
2560 		enum ol_txrx_peer_state state)
2561 {
2562 	struct dp_peer *peer;
2563 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
2564 
2565 	peer =  dp_peer_find_hash_find(pdev->soc, peer_mac, 0, DP_VDEV_ALL);
2566 	if (NULL == peer) {
2567 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2568 		"Failed to find peer for: [%pM]", peer_mac);
2569 		return QDF_STATUS_E_FAILURE;
2570 	}
2571 	peer->state = state;
2572 
2573 	DP_TRACE(INFO, "peer %pK state %d", peer, peer->state);
2574 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
2575 	 * Decrement it here.
2576 	 */
2577 	qdf_atomic_dec(&peer->ref_cnt);
2578 
2579 	return QDF_STATUS_SUCCESS;
2580 }
2581 
2582 /**
2583  * dp_get_vdevid() - Get virtual interface id which peer registered
2584  * @peer - peer instance
2585  * @vdev_id - virtual interface id which peer registered
2586  *
2587  * Get virtual interface id which peer registered
2588  *
2589  * Return: QDF_STATUS_SUCCESS registration success
2590  */
2591 QDF_STATUS dp_get_vdevid(void *peer_handle, uint8_t *vdev_id)
2592 {
2593 	struct dp_peer *peer = peer_handle;
2594 
2595 	DP_TRACE(INFO, "peer %pK vdev %pK vdev id %d",
2596 			peer, peer->vdev, peer->vdev->vdev_id);
2597 	*vdev_id = peer->vdev->vdev_id;
2598 	return QDF_STATUS_SUCCESS;
2599 }
2600 
2601 struct cdp_vdev *dp_get_vdev_by_sta_id(struct cdp_pdev *pdev_handle,
2602 				       uint8_t sta_id)
2603 {
2604 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
2605 	struct dp_peer *peer = NULL;
2606 
2607 	if (sta_id >= WLAN_MAX_STA_COUNT) {
2608 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
2609 			  "Invalid sta id passed");
2610 		return NULL;
2611 	}
2612 
2613 	if (!pdev) {
2614 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
2615 			  "PDEV not found for sta_id [%d]", sta_id);
2616 		return NULL;
2617 	}
2618 
2619 	peer = dp_peer_find_by_local_id((struct cdp_pdev *)pdev, sta_id);
2620 	if (!peer) {
2621 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
2622 			  "PEER [%d] not found", sta_id);
2623 		return NULL;
2624 	}
2625 
2626 	return (struct cdp_vdev *)peer->vdev;
2627 }
2628 
2629 /**
2630  * dp_get_vdev_for_peer() - Get virtual interface instance which peer belongs
2631  * @peer - peer instance
2632  *
2633  * Get virtual interface instance which peer belongs
2634  *
2635  * Return: virtual interface instance pointer
2636  *         NULL in case cannot find
2637  */
2638 struct cdp_vdev *dp_get_vdev_for_peer(void *peer_handle)
2639 {
2640 	struct dp_peer *peer = peer_handle;
2641 
2642 	DP_TRACE(DEBUG, "peer %pK vdev %pK", peer, peer->vdev);
2643 	return (struct cdp_vdev *)peer->vdev;
2644 }
2645 
2646 /**
2647  * dp_peer_get_peer_mac_addr() - Get peer mac address
2648  * @peer - peer instance
2649  *
2650  * Get peer mac address
2651  *
2652  * Return: peer mac address pointer
2653  *         NULL in case cannot find
2654  */
2655 uint8_t *dp_peer_get_peer_mac_addr(void *peer_handle)
2656 {
2657 	struct dp_peer *peer = peer_handle;
2658 	uint8_t *mac;
2659 
2660 	mac = peer->mac_addr.raw;
2661 	DP_TRACE(INFO, "peer %pK mac 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
2662 		peer, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
2663 	return peer->mac_addr.raw;
2664 }
2665 
2666 /**
2667  * dp_get_peer_state() - Get local peer state
2668  * @peer - peer instance
2669  *
2670  * Get local peer state
2671  *
2672  * Return: peer status
2673  */
2674 int dp_get_peer_state(void *peer_handle)
2675 {
2676 	struct dp_peer *peer = peer_handle;
2677 
2678 	DP_TRACE(DEBUG, "peer %pK stats %d", peer, peer->state);
2679 	return peer->state;
2680 }
2681 
2682 /**
2683  * dp_local_peer_id_pool_init() - local peer id pool alloc for physical device
2684  * @pdev - data path device instance
2685  *
2686  * local peer id pool alloc for physical device
2687  *
2688  * Return: none
2689  */
2690 void dp_local_peer_id_pool_init(struct dp_pdev *pdev)
2691 {
2692 	int i;
2693 
2694 	/* point the freelist to the first ID */
2695 	pdev->local_peer_ids.freelist = 0;
2696 
2697 	/* link each ID to the next one */
2698 	for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) {
2699 		pdev->local_peer_ids.pool[i] = i + 1;
2700 		pdev->local_peer_ids.map[i] = NULL;
2701 	}
2702 
2703 	/* link the last ID to itself, to mark the end of the list */
2704 	i = OL_TXRX_NUM_LOCAL_PEER_IDS;
2705 	pdev->local_peer_ids.pool[i] = i;
2706 
2707 	qdf_spinlock_create(&pdev->local_peer_ids.lock);
2708 	DP_TRACE(INFO, "Peer pool init");
2709 }
2710 
2711 /**
2712  * dp_local_peer_id_alloc() - allocate local peer id
2713  * @pdev - data path device instance
2714  * @peer - new peer instance
2715  *
2716  * allocate local peer id
2717  *
2718  * Return: none
2719  */
2720 void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer)
2721 {
2722 	int i;
2723 
2724 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
2725 	i = pdev->local_peer_ids.freelist;
2726 	if (pdev->local_peer_ids.pool[i] == i) {
2727 		/* the list is empty, except for the list-end marker */
2728 		peer->local_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
2729 	} else {
2730 		/* take the head ID and advance the freelist */
2731 		peer->local_id = i;
2732 		pdev->local_peer_ids.freelist = pdev->local_peer_ids.pool[i];
2733 		pdev->local_peer_ids.map[i] = peer;
2734 	}
2735 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
2736 	DP_TRACE(INFO, "peer %pK, local id %d", peer, peer->local_id);
2737 }
2738 
2739 /**
2740  * dp_local_peer_id_free() - remove local peer id
2741  * @pdev - data path device instance
2742  * @peer - peer instance should be removed
2743  *
2744  * remove local peer id
2745  *
2746  * Return: none
2747  */
2748 void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer)
2749 {
2750 	int i = peer->local_id;
2751 	if ((i == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
2752 	    (i >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
2753 		return;
2754 	}
2755 
2756 	/* put this ID on the head of the freelist */
2757 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
2758 	pdev->local_peer_ids.pool[i] = pdev->local_peer_ids.freelist;
2759 	pdev->local_peer_ids.freelist = i;
2760 	pdev->local_peer_ids.map[i] = NULL;
2761 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
2762 }
2763 #endif
2764 
2765 /**
2766  * dp_get_peer_mac_addr_frm_id(): get mac address of the peer
2767  * @soc_handle: DP SOC handle
2768  * @peer_id:peer_id of the peer
2769  *
2770  * return: vdev_id of the vap
2771  */
2772 uint8_t dp_get_peer_mac_addr_frm_id(struct cdp_soc_t *soc_handle,
2773 		uint16_t peer_id, uint8_t *peer_mac)
2774 {
2775 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
2776 	struct dp_peer *peer;
2777 	uint8_t vdev_id;
2778 
2779 	peer = dp_peer_find_by_id(soc, peer_id);
2780 
2781 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
2782 			"soc %pK peer_id %d", soc, peer_id);
2783 
2784 	if (!peer) {
2785 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2786 				"peer not found ");
2787 		return CDP_INVALID_VDEV_ID;
2788 	}
2789 
2790 	qdf_mem_copy(peer_mac, peer->mac_addr.raw, 6);
2791 	vdev_id = peer->vdev->vdev_id;
2792 
2793 	dp_peer_unref_del_find_by_id(peer);
2794 
2795 	return vdev_id;
2796 }
2797 
2798 /**
2799  * dp_peer_rxtid_stats: Retried Rx TID (REO queue) stats from HW
2800  * @peer: DP peer handle
2801  * @dp_stats_cmd_cb: REO command callback function
2802  * @cb_ctxt: Callback context
2803  *
2804  * Return: none
2805  */
2806 void dp_peer_rxtid_stats(struct dp_peer *peer, void (*dp_stats_cmd_cb),
2807 			void *cb_ctxt)
2808 {
2809 	struct dp_soc *soc = peer->vdev->pdev->soc;
2810 	struct hal_reo_cmd_params params;
2811 	int i;
2812 
2813 	if (!dp_stats_cmd_cb)
2814 		return;
2815 
2816 	qdf_mem_zero(&params, sizeof(params));
2817 	for (i = 0; i < DP_MAX_TIDS; i++) {
2818 		struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
2819 		if (rx_tid->hw_qdesc_vaddr_unaligned != NULL) {
2820 			params.std.need_status = 1;
2821 			params.std.addr_lo =
2822 				rx_tid->hw_qdesc_paddr & 0xffffffff;
2823 			params.std.addr_hi =
2824 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2825 
2826 			if (cb_ctxt) {
2827 				dp_reo_send_cmd(soc, CMD_GET_QUEUE_STATS,
2828 					&params, dp_stats_cmd_cb, cb_ctxt);
2829 			} else {
2830 				dp_reo_send_cmd(soc, CMD_GET_QUEUE_STATS,
2831 					&params, dp_stats_cmd_cb, rx_tid);
2832 			}
2833 
2834 			/* Flush REO descriptor from HW cache to update stats
2835 			 * in descriptor memory. This is to help debugging */
2836 			qdf_mem_zero(&params, sizeof(params));
2837 			params.std.need_status = 0;
2838 			params.std.addr_lo =
2839 				rx_tid->hw_qdesc_paddr & 0xffffffff;
2840 			params.std.addr_hi =
2841 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2842 			params.u.fl_cache_params.flush_no_inval = 1;
2843 			dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, &params, NULL,
2844 				NULL);
2845 		}
2846 	}
2847 }
2848 
2849 void dp_set_michael_key(struct cdp_peer *peer_handle,
2850 			bool is_unicast, uint32_t *key)
2851 {
2852 	struct dp_peer *peer =  (struct dp_peer *)peer_handle;
2853 	uint8_t sec_index = is_unicast ? 1 : 0;
2854 
2855 	if (!peer) {
2856 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2857 			  "peer not found ");
2858 		return;
2859 	}
2860 
2861 	qdf_mem_copy(&peer->security[sec_index].michael_key[0],
2862 		     key, IEEE80211_WEP_MICLEN);
2863 }
2864 
2865 bool dp_peer_find_by_id_valid(struct dp_soc *soc, uint16_t peer_id)
2866 {
2867 	struct dp_peer *peer = dp_peer_find_by_id(soc, peer_id);
2868 
2869 	if (peer) {
2870 		/*
2871 		 * Decrement the peer ref which is taken as part of
2872 		 * dp_peer_find_by_id if PEER_LOCK_REF_PROTECT is enabled
2873 		 */
2874 		dp_peer_unref_del_find_by_id(peer);
2875 
2876 		return true;
2877 	}
2878 
2879 	return false;
2880 }
2881