xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_peer.c (revision 1397a33f48ea6455be40871470b286e535820eb8)
1 /*
2  * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <qdf_types.h>
20 #include <qdf_lock.h>
21 #include <hal_hw_headers.h>
22 #include "dp_htt.h"
23 #include "dp_types.h"
24 #include "dp_internal.h"
25 #include "dp_peer.h"
26 #include "dp_rx_defrag.h"
27 #include <hal_api.h>
28 #include <hal_reo.h>
29 #ifdef CONFIG_MCL
30 #include <cds_ieee80211_common.h>
31 #include <cds_api.h>
32 #endif
33 #include <cdp_txrx_handle.h>
34 #include <wlan_cfg.h>
35 
36 #ifdef DP_LFR
37 static inline void
38 dp_set_ssn_valid_flag(struct hal_reo_cmd_params *params,
39 					uint8_t valid)
40 {
41 	params->u.upd_queue_params.update_svld = 1;
42 	params->u.upd_queue_params.svld = valid;
43 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
44 		"%s: Setting SSN valid bit to %d",
45 				__func__, valid);
46 }
47 #else
48 static inline void
49 dp_set_ssn_valid_flag(struct hal_reo_cmd_params *params,
50 					uint8_t valid) {};
51 #endif
52 
53 static inline int dp_peer_find_mac_addr_cmp(
54 	union dp_align_mac_addr *mac_addr1,
55 	union dp_align_mac_addr *mac_addr2)
56 {
57 	return !((mac_addr1->align4.bytes_abcd == mac_addr2->align4.bytes_abcd)
58 		/*
59 		 * Intentionally use & rather than &&.
60 		 * because the operands are binary rather than generic boolean,
61 		 * the functionality is equivalent.
62 		 * Using && has the advantage of short-circuited evaluation,
63 		 * but using & has the advantage of no conditional branching,
64 		 * which is a more significant benefit.
65 		 */
66 		&
67 		(mac_addr1->align4.bytes_ef == mac_addr2->align4.bytes_ef));
68 }
69 
70 static int dp_peer_find_map_attach(struct dp_soc *soc)
71 {
72 	uint32_t max_peers, peer_map_size;
73 
74 	max_peers = soc->max_peers;
75 	/* allocate the peer ID -> peer object map */
76 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
77 		"\n<=== cfg max peer id %d ====>", max_peers);
78 	peer_map_size = max_peers * sizeof(soc->peer_id_to_obj_map[0]);
79 	soc->peer_id_to_obj_map = qdf_mem_malloc(peer_map_size);
80 	if (!soc->peer_id_to_obj_map) {
81 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
82 			"%s: peer map memory allocation failed", __func__);
83 		return QDF_STATUS_E_NOMEM;
84 	}
85 
86 	/*
87 	 * The peer_id_to_obj_map doesn't really need to be initialized,
88 	 * since elements are only used after they have been individually
89 	 * initialized.
90 	 * However, it is convenient for debugging to have all elements
91 	 * that are not in use set to 0.
92 	 */
93 	qdf_mem_zero(soc->peer_id_to_obj_map, peer_map_size);
94 	return 0; /* success */
95 }
96 
97 static int dp_log2_ceil(unsigned value)
98 {
99 	unsigned tmp = value;
100 	int log2 = -1;
101 
102 	while (tmp) {
103 		log2++;
104 		tmp >>= 1;
105 	}
106 	if (1 << log2 != value)
107 		log2++;
108 	return log2;
109 }
110 
111 static int dp_peer_find_add_id_to_obj(
112 	struct dp_peer *peer,
113 	uint16_t peer_id)
114 {
115 	int i;
116 
117 	for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
118 		if (peer->peer_ids[i] == HTT_INVALID_PEER) {
119 			peer->peer_ids[i] = peer_id;
120 			return 0; /* success */
121 		}
122 	}
123 	return QDF_STATUS_E_FAILURE; /* failure */
124 }
125 
126 #define DP_PEER_HASH_LOAD_MULT  2
127 #define DP_PEER_HASH_LOAD_SHIFT 0
128 
129 #define DP_AST_HASH_LOAD_MULT  2
130 #define DP_AST_HASH_LOAD_SHIFT 0
131 
132 static int dp_peer_find_hash_attach(struct dp_soc *soc)
133 {
134 	int i, hash_elems, log2;
135 
136 	/* allocate the peer MAC address -> peer object hash table */
137 	hash_elems = soc->max_peers;
138 	hash_elems *= DP_PEER_HASH_LOAD_MULT;
139 	hash_elems >>= DP_PEER_HASH_LOAD_SHIFT;
140 	log2 = dp_log2_ceil(hash_elems);
141 	hash_elems = 1 << log2;
142 
143 	soc->peer_hash.mask = hash_elems - 1;
144 	soc->peer_hash.idx_bits = log2;
145 	/* allocate an array of TAILQ peer object lists */
146 	soc->peer_hash.bins = qdf_mem_malloc(
147 		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q, dp_peer)));
148 	if (!soc->peer_hash.bins)
149 		return QDF_STATUS_E_NOMEM;
150 
151 	for (i = 0; i < hash_elems; i++)
152 		TAILQ_INIT(&soc->peer_hash.bins[i]);
153 
154 	return 0;
155 }
156 
157 static void dp_peer_find_hash_detach(struct dp_soc *soc)
158 {
159 	qdf_mem_free(soc->peer_hash.bins);
160 }
161 
162 static inline unsigned dp_peer_find_hash_index(struct dp_soc *soc,
163 	union dp_align_mac_addr *mac_addr)
164 {
165 	unsigned index;
166 
167 	index =
168 		mac_addr->align2.bytes_ab ^
169 		mac_addr->align2.bytes_cd ^
170 		mac_addr->align2.bytes_ef;
171 	index ^= index >> soc->peer_hash.idx_bits;
172 	index &= soc->peer_hash.mask;
173 	return index;
174 }
175 
176 
177 void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer)
178 {
179 	unsigned index;
180 
181 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
182 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
183 	/*
184 	 * It is important to add the new peer at the tail of the peer list
185 	 * with the bin index.  Together with having the hash_find function
186 	 * search from head to tail, this ensures that if two entries with
187 	 * the same MAC address are stored, the one added first will be
188 	 * found first.
189 	 */
190 	TAILQ_INSERT_TAIL(&soc->peer_hash.bins[index], peer, hash_list_elem);
191 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
192 }
193 
194 #ifdef FEATURE_AST
195 /*
196  * dp_peer_ast_hash_attach() - Allocate and initialize AST Hash Table
197  * @soc: SoC handle
198  *
199  * Return: None
200  */
201 static int dp_peer_ast_hash_attach(struct dp_soc *soc)
202 {
203 	int i, hash_elems, log2;
204 
205 	hash_elems = ((soc->max_peers * DP_AST_HASH_LOAD_MULT) >>
206 		DP_AST_HASH_LOAD_SHIFT);
207 
208 	log2 = dp_log2_ceil(hash_elems);
209 	hash_elems = 1 << log2;
210 
211 	soc->ast_hash.mask = hash_elems - 1;
212 	soc->ast_hash.idx_bits = log2;
213 
214 	/* allocate an array of TAILQ peer object lists */
215 	soc->ast_hash.bins = qdf_mem_malloc(
216 		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q,
217 				dp_ast_entry)));
218 
219 	if (!soc->ast_hash.bins)
220 		return QDF_STATUS_E_NOMEM;
221 
222 	for (i = 0; i < hash_elems; i++)
223 		TAILQ_INIT(&soc->ast_hash.bins[i]);
224 
225 	return 0;
226 }
227 
228 /*
229  * dp_peer_ast_cleanup() - cleanup the references
230  * @soc: SoC handle
231  * @ast: ast entry
232  *
233  * Return: None
234  */
235 static inline void dp_peer_ast_cleanup(struct dp_soc *soc,
236 				       struct dp_ast_entry *ast)
237 {
238 	txrx_ast_free_cb cb = ast->callback;
239 	void *cookie = ast->cookie;
240 
241 	/* Call the callbacks to free up the cookie */
242 	if (cb) {
243 		ast->callback = NULL;
244 		ast->cookie = NULL;
245 		cb(soc->ctrl_psoc,
246 		   soc,
247 		   cookie,
248 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
249 	}
250 }
251 
252 /*
253  * dp_peer_ast_hash_detach() - Free AST Hash table
254  * @soc: SoC handle
255  *
256  * Return: None
257  */
258 static void dp_peer_ast_hash_detach(struct dp_soc *soc)
259 {
260 	unsigned int index;
261 	struct dp_ast_entry *ast, *ast_next;
262 
263 	if (!soc->ast_hash.mask)
264 		return;
265 
266 	qdf_spin_lock_bh(&soc->ast_lock);
267 	for (index = 0; index <= soc->ast_hash.mask; index++) {
268 		if (!TAILQ_EMPTY(&soc->ast_hash.bins[index])) {
269 			TAILQ_FOREACH_SAFE(ast, &soc->ast_hash.bins[index],
270 					   hash_list_elem, ast_next) {
271 				TAILQ_REMOVE(&soc->ast_hash.bins[index], ast,
272 					     hash_list_elem);
273 				dp_peer_ast_cleanup(soc, ast);
274 				qdf_mem_free(ast);
275 			}
276 		}
277 	}
278 	qdf_spin_unlock_bh(&soc->ast_lock);
279 
280 	qdf_mem_free(soc->ast_hash.bins);
281 }
282 
283 /*
284  * dp_peer_ast_hash_index() - Compute the AST hash from MAC address
285  * @soc: SoC handle
286  *
287  * Return: AST hash
288  */
289 static inline uint32_t dp_peer_ast_hash_index(struct dp_soc *soc,
290 	union dp_align_mac_addr *mac_addr)
291 {
292 	uint32_t index;
293 
294 	index =
295 		mac_addr->align2.bytes_ab ^
296 		mac_addr->align2.bytes_cd ^
297 		mac_addr->align2.bytes_ef;
298 	index ^= index >> soc->ast_hash.idx_bits;
299 	index &= soc->ast_hash.mask;
300 	return index;
301 }
302 
303 /*
304  * dp_peer_ast_hash_add() - Add AST entry into hash table
305  * @soc: SoC handle
306  *
307  * This function adds the AST entry into SoC AST hash table
308  * It assumes caller has taken the ast lock to protect the access to this table
309  *
310  * Return: None
311  */
312 static inline void dp_peer_ast_hash_add(struct dp_soc *soc,
313 		struct dp_ast_entry *ase)
314 {
315 	uint32_t index;
316 
317 	index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
318 	TAILQ_INSERT_TAIL(&soc->ast_hash.bins[index], ase, hash_list_elem);
319 }
320 
321 /*
322  * dp_peer_ast_hash_remove() - Look up and remove AST entry from hash table
323  * @soc: SoC handle
324  *
325  * This function removes the AST entry from soc AST hash table
326  * It assumes caller has taken the ast lock to protect the access to this table
327  *
328  * Return: None
329  */
330 static inline void dp_peer_ast_hash_remove(struct dp_soc *soc,
331 		struct dp_ast_entry *ase)
332 {
333 	unsigned index;
334 	struct dp_ast_entry *tmpase;
335 	int found = 0;
336 
337 	index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
338 	/* Check if tail is not empty before delete*/
339 	QDF_ASSERT(!TAILQ_EMPTY(&soc->ast_hash.bins[index]));
340 
341 	TAILQ_FOREACH(tmpase, &soc->ast_hash.bins[index], hash_list_elem) {
342 		if (tmpase == ase) {
343 			found = 1;
344 			break;
345 		}
346 	}
347 
348 	QDF_ASSERT(found);
349 	TAILQ_REMOVE(&soc->ast_hash.bins[index], ase, hash_list_elem);
350 }
351 
352 /*
353  * dp_peer_ast_list_find() - Find AST entry by MAC address from peer ast list
354  * @soc: SoC handle
355  * @peer: peer handle
356  * @ast_mac_addr: mac address
357  *
358  * It assumes caller has taken the ast lock to protect the access to ast list
359  *
360  * Return: AST entry
361  */
362 struct dp_ast_entry *dp_peer_ast_list_find(struct dp_soc *soc,
363 					   struct dp_peer *peer,
364 					   uint8_t *ast_mac_addr)
365 {
366 	struct dp_ast_entry *ast_entry = NULL;
367 	union dp_align_mac_addr *mac_addr =
368 		(union dp_align_mac_addr *)ast_mac_addr;
369 
370 	TAILQ_FOREACH(ast_entry, &peer->ast_entry_list, ase_list_elem) {
371 		if (!dp_peer_find_mac_addr_cmp(mac_addr,
372 					       &ast_entry->mac_addr)) {
373 			return ast_entry;
374 		}
375 	}
376 
377 	return NULL;
378 }
379 
380 /*
381  * dp_peer_ast_hash_find_by_pdevid() - Find AST entry by MAC address
382  * @soc: SoC handle
383  *
384  * It assumes caller has taken the ast lock to protect the access to
385  * AST hash table
386  *
387  * Return: AST entry
388  */
389 struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
390 						     uint8_t *ast_mac_addr,
391 						     uint8_t pdev_id)
392 {
393 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
394 	uint32_t index;
395 	struct dp_ast_entry *ase;
396 
397 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
398 		     ast_mac_addr, DP_MAC_ADDR_LEN);
399 	mac_addr = &local_mac_addr_aligned;
400 
401 	index = dp_peer_ast_hash_index(soc, mac_addr);
402 	TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
403 		if ((pdev_id == ase->pdev_id) &&
404 		    !dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr)) {
405 			return ase;
406 		}
407 	}
408 
409 	return NULL;
410 }
411 
412 /*
413  * dp_peer_ast_hash_find_soc() - Find AST entry by MAC address
414  * @soc: SoC handle
415  *
416  * It assumes caller has taken the ast lock to protect the access to
417  * AST hash table
418  *
419  * Return: AST entry
420  */
421 struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
422 					       uint8_t *ast_mac_addr)
423 {
424 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
425 	unsigned index;
426 	struct dp_ast_entry *ase;
427 
428 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
429 			ast_mac_addr, DP_MAC_ADDR_LEN);
430 	mac_addr = &local_mac_addr_aligned;
431 
432 	index = dp_peer_ast_hash_index(soc, mac_addr);
433 	TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
434 		if (dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr) == 0) {
435 			return ase;
436 		}
437 	}
438 
439 	return NULL;
440 }
441 
442 /*
443  * dp_peer_map_ast() - Map the ast entry with HW AST Index
444  * @soc: SoC handle
445  * @peer: peer to which ast node belongs
446  * @mac_addr: MAC address of ast node
447  * @hw_peer_id: HW AST Index returned by target in peer map event
448  * @vdev_id: vdev id for VAP to which the peer belongs to
449  * @ast_hash: ast hash value in HW
450  *
451  * Return: None
452  */
453 static inline void dp_peer_map_ast(struct dp_soc *soc,
454 	struct dp_peer *peer, uint8_t *mac_addr, uint16_t hw_peer_id,
455 	uint8_t vdev_id, uint16_t ast_hash)
456 {
457 	struct dp_ast_entry *ast_entry = NULL;
458 	enum cdp_txrx_ast_entry_type peer_type = CDP_TXRX_AST_TYPE_STATIC;
459 
460 	if (!peer) {
461 		return;
462 	}
463 
464 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
465 		"%s: peer %pK ID %d vid %d mac %02x:%02x:%02x:%02x:%02x:%02x",
466 		__func__, peer, hw_peer_id, vdev_id, mac_addr[0],
467 		mac_addr[1], mac_addr[2], mac_addr[3],
468 		mac_addr[4], mac_addr[5]);
469 
470 	qdf_spin_lock_bh(&soc->ast_lock);
471 
472 	ast_entry = dp_peer_ast_list_find(soc, peer, mac_addr);
473 
474 	if (ast_entry) {
475 		ast_entry->ast_idx = hw_peer_id;
476 		soc->ast_table[hw_peer_id] = ast_entry;
477 		ast_entry->is_active = TRUE;
478 		peer_type = ast_entry->type;
479 		ast_entry->ast_hash_value = ast_hash;
480 		ast_entry->is_mapped = TRUE;
481 	}
482 
483 	if (ast_entry || (peer->vdev && peer->vdev->proxysta_vdev)) {
484 		if (soc->cdp_soc.ol_ops->peer_map_event) {
485 			soc->cdp_soc.ol_ops->peer_map_event(
486 			soc->ctrl_psoc, peer->peer_ids[0],
487 			hw_peer_id, vdev_id,
488 			mac_addr, peer_type, ast_hash);
489 		}
490 	} else {
491 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
492 			"AST entry not found");
493 	}
494 
495 	qdf_spin_unlock_bh(&soc->ast_lock);
496 	return;
497 }
498 
499 void dp_peer_free_hmwds_cb(void *ctrl_psoc,
500 			   void *dp_soc,
501 			   void *cookie,
502 			   enum cdp_ast_free_status status)
503 {
504 	struct dp_ast_free_cb_params *param =
505 		(struct dp_ast_free_cb_params *)cookie;
506 	struct dp_soc *soc = (struct dp_soc *)dp_soc;
507 	struct dp_peer *peer = NULL;
508 
509 	if (status != CDP_TXRX_AST_DELETED) {
510 		qdf_mem_free(cookie);
511 		return;
512 	}
513 
514 	peer = dp_peer_find_hash_find(soc, &param->peer_mac_addr.raw[0],
515 				      0, param->vdev_id);
516 	if (peer) {
517 		dp_peer_add_ast(soc, peer,
518 				&param->mac_addr.raw[0],
519 				param->type,
520 				param->flags);
521 		dp_peer_unref_delete(peer);
522 	}
523 	qdf_mem_free(cookie);
524 }
525 
526 /*
527  * dp_peer_add_ast() - Allocate and add AST entry into peer list
528  * @soc: SoC handle
529  * @peer: peer to which ast node belongs
530  * @mac_addr: MAC address of ast node
531  * @is_self: Is this base AST entry with peer mac address
532  *
533  * This API is used by WDS source port learning function to
534  * add a new AST entry into peer AST list
535  *
536  * Return: 0 if new entry is allocated,
537  *        -1 if entry add failed
538  */
539 int dp_peer_add_ast(struct dp_soc *soc,
540 			struct dp_peer *peer,
541 			uint8_t *mac_addr,
542 			enum cdp_txrx_ast_entry_type type,
543 			uint32_t flags)
544 {
545 	struct dp_ast_entry *ast_entry = NULL;
546 	struct dp_vdev *vdev = NULL;
547 	struct dp_pdev *pdev = NULL;
548 	uint8_t next_node_mac[6];
549 	int  ret = -1;
550 	txrx_ast_free_cb cb = NULL;
551 	void *cookie = NULL;
552 
553 	if (peer->delete_in_progress)
554 		return ret;
555 
556 	vdev = peer->vdev;
557 	if (!vdev) {
558 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
559 			FL("Peers vdev is NULL"));
560 		QDF_ASSERT(0);
561 		return ret;
562 	}
563 
564 	pdev = vdev->pdev;
565 
566 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_TRACE,
567 		  "%s: pdevid: %u vdev: %u  ast_entry->type: %d flags: 0x%x peer_mac: %pM peer: %pK mac %pM",
568 		  __func__, pdev->pdev_id, vdev->vdev_id, type, flags,
569 		  peer->mac_addr.raw, peer, mac_addr);
570 
571 	qdf_spin_lock_bh(&soc->ast_lock);
572 
573 	/* If AST entry already exists , just return from here
574 	 * ast entry with same mac address can exist on different radios
575 	 * if ast_override support is enabled use search by pdev in this
576 	 * case
577 	 */
578 	if (soc->ast_override_support) {
579 		ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, mac_addr,
580 							    pdev->pdev_id);
581 		if (ast_entry) {
582 			qdf_spin_unlock_bh(&soc->ast_lock);
583 			return 0;
584 		}
585 	} else {
586 		/* For HWMWDS_SEC entries can be added for same mac address
587 		 * do not check for existing entry
588 		 */
589 		if (type == CDP_TXRX_AST_TYPE_WDS_HM_SEC)
590 			goto add_ast_entry;
591 
592 		ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
593 
594 		if (ast_entry) {
595 			if (ast_entry->type == CDP_TXRX_AST_TYPE_MEC)
596 				ast_entry->is_active = TRUE;
597 
598 			if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) &&
599 			    !ast_entry->delete_in_progress) {
600 				qdf_spin_unlock_bh(&soc->ast_lock);
601 				return 0;
602 			}
603 
604 			/* Add for HMWDS entry we cannot be ignored if there
605 			 * is AST entry with same mac address
606 			 *
607 			 * if ast entry exists with the requested mac address
608 			 * send a delete command and register callback which
609 			 * can take care of adding HMWDS ast enty on delete
610 			 * confirmation from target
611 			 */
612 			if ((type == CDP_TXRX_AST_TYPE_WDS_HM) &&
613 			    soc->is_peer_map_unmap_v2) {
614 				struct dp_ast_free_cb_params *param = NULL;
615 
616 				if (ast_entry->type ==
617 					CDP_TXRX_AST_TYPE_WDS_HM_SEC)
618 					goto add_ast_entry;
619 
620 				/* save existing callback */
621 				if (ast_entry->callback) {
622 					cb = ast_entry->callback;
623 					cookie = ast_entry->cookie;
624 				}
625 
626 				param = qdf_mem_malloc(sizeof(*param));
627 				if (!param) {
628 					QDF_TRACE(QDF_MODULE_ID_TXRX,
629 						  QDF_TRACE_LEVEL_ERROR,
630 						  "Allocation failed");
631 					qdf_spin_unlock_bh(&soc->ast_lock);
632 					return ret;
633 				}
634 
635 				qdf_mem_copy(&param->mac_addr.raw[0], mac_addr,
636 					     DP_MAC_ADDR_LEN);
637 				qdf_mem_copy(&param->peer_mac_addr.raw[0],
638 					     &peer->mac_addr.raw[0],
639 					     DP_MAC_ADDR_LEN);
640 				param->type = type;
641 				param->flags = flags;
642 				param->vdev_id = vdev->vdev_id;
643 				ast_entry->callback = dp_peer_free_hmwds_cb;
644 				ast_entry->cookie = (void *)param;
645 				if (!ast_entry->delete_in_progress)
646 					dp_peer_del_ast(soc, ast_entry);
647 			}
648 
649 			/* Modify an already existing AST entry from type
650 			 * WDS to MEC on promption. This serves as a fix when
651 			 * backbone of interfaces are interchanged wherein
652 			 * wds entr becomes its own MEC. The entry should be
653 			 * replaced only when the ast_entry peer matches the
654 			 * peer received in mec event. This additional check
655 			 * is needed in wds repeater cases where a multicast
656 			 * packet from station to the root via the repeater
657 			 * should not remove the wds entry.
658 			 */
659 			if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS) &&
660 			    (type == CDP_TXRX_AST_TYPE_MEC) &&
661 			    (ast_entry->peer == peer)) {
662 				ast_entry->is_active = FALSE;
663 				dp_peer_del_ast(soc, ast_entry);
664 			}
665 			qdf_spin_unlock_bh(&soc->ast_lock);
666 
667 			/* Call the saved callback*/
668 			if (cb) {
669 				cb(soc->ctrl_psoc, soc, cookie,
670 				   CDP_TXRX_AST_DELETE_IN_PROGRESS);
671 			}
672 			return 0;
673 		}
674 	}
675 
676 add_ast_entry:
677 	ast_entry = (struct dp_ast_entry *)
678 			qdf_mem_malloc(sizeof(struct dp_ast_entry));
679 
680 	if (!ast_entry) {
681 		qdf_spin_unlock_bh(&soc->ast_lock);
682 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
683 			FL("fail to allocate ast_entry"));
684 		QDF_ASSERT(0);
685 		return ret;
686 	}
687 
688 	qdf_mem_copy(&ast_entry->mac_addr.raw[0], mac_addr, DP_MAC_ADDR_LEN);
689 	ast_entry->pdev_id = vdev->pdev->pdev_id;
690 	ast_entry->vdev_id = vdev->vdev_id;
691 	ast_entry->is_mapped = false;
692 	ast_entry->delete_in_progress = false;
693 
694 	switch (type) {
695 	case CDP_TXRX_AST_TYPE_STATIC:
696 		peer->self_ast_entry = ast_entry;
697 		ast_entry->type = CDP_TXRX_AST_TYPE_STATIC;
698 		if (peer->vdev->opmode == wlan_op_mode_sta)
699 			ast_entry->type = CDP_TXRX_AST_TYPE_STA_BSS;
700 		break;
701 	case CDP_TXRX_AST_TYPE_SELF:
702 		peer->self_ast_entry = ast_entry;
703 		ast_entry->type = CDP_TXRX_AST_TYPE_SELF;
704 		break;
705 	case CDP_TXRX_AST_TYPE_WDS:
706 		ast_entry->next_hop = 1;
707 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
708 		break;
709 	case CDP_TXRX_AST_TYPE_WDS_HM:
710 		ast_entry->next_hop = 1;
711 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM;
712 		break;
713 	case CDP_TXRX_AST_TYPE_WDS_HM_SEC:
714 		ast_entry->next_hop = 1;
715 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM_SEC;
716 		break;
717 	case CDP_TXRX_AST_TYPE_MEC:
718 		ast_entry->next_hop = 1;
719 		ast_entry->type = CDP_TXRX_AST_TYPE_MEC;
720 		break;
721 	case CDP_TXRX_AST_TYPE_DA:
722 		peer = peer->vdev->vap_bss_peer;
723 		ast_entry->next_hop = 1;
724 		ast_entry->type = CDP_TXRX_AST_TYPE_DA;
725 		break;
726 	default:
727 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
728 			FL("Incorrect AST entry type"));
729 	}
730 
731 	ast_entry->is_active = TRUE;
732 	DP_STATS_INC(soc, ast.added, 1);
733 	dp_peer_ast_hash_add(soc, ast_entry);
734 
735 	ast_entry->peer = peer;
736 
737 	if (type == CDP_TXRX_AST_TYPE_MEC)
738 		qdf_mem_copy(next_node_mac, peer->vdev->mac_addr.raw, 6);
739 	else
740 		qdf_mem_copy(next_node_mac, peer->mac_addr.raw, 6);
741 
742 	TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem);
743 	qdf_spin_unlock_bh(&soc->ast_lock);
744 
745 	if ((ast_entry->type != CDP_TXRX_AST_TYPE_STATIC) &&
746 	    (ast_entry->type != CDP_TXRX_AST_TYPE_SELF) &&
747 	    (ast_entry->type != CDP_TXRX_AST_TYPE_STA_BSS) &&
748 	    (ast_entry->type != CDP_TXRX_AST_TYPE_WDS_HM_SEC)) {
749 		if (QDF_STATUS_SUCCESS ==
750 				soc->cdp_soc.ol_ops->peer_add_wds_entry(
751 				peer->vdev->osif_vdev,
752 				(struct cdp_peer *)peer,
753 				mac_addr,
754 				next_node_mac,
755 				flags))
756 			return 0;
757 	}
758 
759 	return ret;
760 }
761 
762 /*
763  * dp_peer_del_ast() - Delete and free AST entry
764  * @soc: SoC handle
765  * @ast_entry: AST entry of the node
766  *
767  * This function removes the AST entry from peer and soc tables
768  * It assumes caller has taken the ast lock to protect the access to these
769  * tables
770  *
771  * Return: None
772  */
773 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
774 {
775 	struct dp_peer *peer = ast_entry->peer;
776 	uint16_t peer_id = peer->peer_ids[0];
777 
778 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_TRACE,
779 		  "%s: ast_entry->type: %d pdevid: %u vdev: %u mac_addr: %pM next_hop: %u peer_mac: %pM\n",
780 		  __func__, ast_entry->type, peer->vdev->pdev->pdev_id,
781 		  peer->vdev->vdev_id, ast_entry->mac_addr.raw,
782 		  ast_entry->next_hop, ast_entry->peer->mac_addr.raw);
783 
784 	dp_peer_ast_send_wds_del(soc, ast_entry);
785 
786 	/*
787 	 * if peer map v2 is enabled we are not freeing ast entry
788 	 * here and it is supposed to be freed in unmap event (after
789 	 * we receive delete confirmation from target)
790 	 *
791 	 * if peer_id is invalid we did not get the peer map event
792 	 * for the peer free ast entry from here only in this case
793 	 */
794 	if (soc->is_peer_map_unmap_v2 && (peer_id != HTT_INVALID_PEER)) {
795 
796 		/*
797 		 * For HM_SEC and SELF type we do not receive unmap event
798 		 * free ast_entry from here it self
799 		 */
800 		if ((ast_entry->type != CDP_TXRX_AST_TYPE_WDS_HM_SEC) &&
801 		    (ast_entry->type != CDP_TXRX_AST_TYPE_SELF))
802 			return;
803 	}
804 
805 	/*
806 	 * release the reference only if it is mapped
807 	 * to ast_table
808 	 */
809 	if (ast_entry->is_mapped)
810 		soc->ast_table[ast_entry->ast_idx] = NULL;
811 	TAILQ_REMOVE(&peer->ast_entry_list, ast_entry, ase_list_elem);
812 
813 	if (ast_entry == peer->self_ast_entry)
814 		peer->self_ast_entry = NULL;
815 
816 	DP_STATS_INC(soc, ast.deleted, 1);
817 	dp_peer_ast_hash_remove(soc, ast_entry);
818 	dp_peer_ast_cleanup(soc, ast_entry);
819 	qdf_mem_free(ast_entry);
820 }
821 
822 /*
823  * dp_peer_update_ast() - Delete and free AST entry
824  * @soc: SoC handle
825  * @peer: peer to which ast node belongs
826  * @ast_entry: AST entry of the node
827  * @flags: wds or hmwds
828  *
829  * This function update the AST entry to the roamed peer and soc tables
830  * It assumes caller has taken the ast lock to protect the access to these
831  * tables
832  *
833  * Return: 0 if ast entry is updated successfully
834  *         -1 failure
835  */
836 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
837 		       struct dp_ast_entry *ast_entry, uint32_t flags)
838 {
839 	int ret = -1;
840 	struct dp_peer *old_peer;
841 
842 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_TRACE,
843 		  "%s: ast_entry->type: %d pdevid: %u vdevid: %u flags: 0x%x mac_addr: %pM peer_mac: %pM\n",
844 		  __func__, ast_entry->type, peer->vdev->pdev->pdev_id,
845 		  peer->vdev->vdev_id, flags, ast_entry->mac_addr.raw,
846 		  peer->mac_addr.raw);
847 
848 	if (ast_entry->delete_in_progress)
849 		return ret;
850 
851 	if ((ast_entry->type == CDP_TXRX_AST_TYPE_STATIC) ||
852 	    (ast_entry->type == CDP_TXRX_AST_TYPE_SELF) ||
853 	    (ast_entry->type == CDP_TXRX_AST_TYPE_STA_BSS) ||
854 	    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
855 		return 0;
856 
857 	old_peer = ast_entry->peer;
858 	TAILQ_REMOVE(&old_peer->ast_entry_list, ast_entry, ase_list_elem);
859 
860 	ast_entry->peer = peer;
861 	ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
862 	ast_entry->pdev_id = peer->vdev->pdev->pdev_id;
863 	ast_entry->vdev_id = peer->vdev->vdev_id;
864 	ast_entry->is_active = TRUE;
865 	TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem);
866 
867 	ret = soc->cdp_soc.ol_ops->peer_update_wds_entry(
868 				peer->vdev->osif_vdev,
869 				ast_entry->mac_addr.raw,
870 				peer->mac_addr.raw,
871 				flags);
872 
873 	return ret;
874 }
875 
876 /*
877  * dp_peer_ast_get_pdev_id() - get pdev_id from the ast entry
878  * @soc: SoC handle
879  * @ast_entry: AST entry of the node
880  *
881  * This function gets the pdev_id from the ast entry.
882  *
883  * Return: (uint8_t) pdev_id
884  */
885 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
886 				struct dp_ast_entry *ast_entry)
887 {
888 	return ast_entry->pdev_id;
889 }
890 
891 /*
892  * dp_peer_ast_get_next_hop() - get next_hop from the ast entry
893  * @soc: SoC handle
894  * @ast_entry: AST entry of the node
895  *
896  * This function gets the next hop from the ast entry.
897  *
898  * Return: (uint8_t) next_hop
899  */
900 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
901 				struct dp_ast_entry *ast_entry)
902 {
903 	return ast_entry->next_hop;
904 }
905 
906 /*
907  * dp_peer_ast_set_type() - set type from the ast entry
908  * @soc: SoC handle
909  * @ast_entry: AST entry of the node
910  *
911  * This function sets the type in the ast entry.
912  *
913  * Return:
914  */
915 void dp_peer_ast_set_type(struct dp_soc *soc,
916 				struct dp_ast_entry *ast_entry,
917 				enum cdp_txrx_ast_entry_type type)
918 {
919 	ast_entry->type = type;
920 }
921 
922 #else
923 int dp_peer_add_ast(struct dp_soc *soc, struct dp_peer *peer,
924 		uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type,
925 		uint32_t flags)
926 {
927 	return 1;
928 }
929 
930 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
931 {
932 }
933 
934 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
935 			struct dp_ast_entry *ast_entry, uint32_t flags)
936 {
937 	return 1;
938 }
939 
940 struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
941 					       uint8_t *ast_mac_addr)
942 {
943 	return NULL;
944 }
945 
946 struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
947 						     uint8_t *ast_mac_addr,
948 						     uint8_t pdev_id)
949 {
950 	return NULL;
951 }
952 
953 static int dp_peer_ast_hash_attach(struct dp_soc *soc)
954 {
955 	return 0;
956 }
957 
958 static inline void dp_peer_map_ast(struct dp_soc *soc,
959 	struct dp_peer *peer, uint8_t *mac_addr, uint16_t hw_peer_id,
960 	uint8_t vdev_id, uint16_t ast_hash)
961 {
962 	return;
963 }
964 
965 static void dp_peer_ast_hash_detach(struct dp_soc *soc)
966 {
967 }
968 
969 void dp_peer_ast_set_type(struct dp_soc *soc,
970 				struct dp_ast_entry *ast_entry,
971 				enum cdp_txrx_ast_entry_type type)
972 {
973 }
974 
975 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
976 				struct dp_ast_entry *ast_entry)
977 {
978 	return 0xff;
979 }
980 
981 
982 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
983 				struct dp_ast_entry *ast_entry)
984 {
985 	return 0xff;
986 }
987 #endif
988 
989 void dp_peer_ast_send_wds_del(struct dp_soc *soc,
990 			      struct dp_ast_entry *ast_entry)
991 {
992 	struct dp_peer *peer = ast_entry->peer;
993 	struct cdp_soc_t *cdp_soc = &soc->cdp_soc;
994 
995 	if (ast_entry->delete_in_progress)
996 		return;
997 
998 	if (ast_entry->next_hop &&
999 	    ast_entry->type != CDP_TXRX_AST_TYPE_WDS_HM_SEC)
1000 		cdp_soc->ol_ops->peer_del_wds_entry(peer->vdev->osif_vdev,
1001 						    ast_entry->mac_addr.raw);
1002 
1003 	ast_entry->delete_in_progress = true;
1004 }
1005 
1006 static void dp_peer_ast_free_entry(struct dp_soc *soc,
1007 				   struct dp_ast_entry *ast_entry)
1008 {
1009 	struct dp_peer *peer = ast_entry->peer;
1010 	void *cookie = NULL;
1011 	txrx_ast_free_cb cb = NULL;
1012 
1013 	/*
1014 	 * release the reference only if it is mapped
1015 	 * to ast_table
1016 	 */
1017 
1018 	qdf_spin_lock_bh(&soc->ast_lock);
1019 	if (ast_entry->is_mapped)
1020 		soc->ast_table[ast_entry->ast_idx] = NULL;
1021 
1022 	TAILQ_REMOVE(&peer->ast_entry_list, ast_entry, ase_list_elem);
1023 	DP_STATS_INC(soc, ast.deleted, 1);
1024 	dp_peer_ast_hash_remove(soc, ast_entry);
1025 
1026 	cb = ast_entry->callback;
1027 	cookie = ast_entry->cookie;
1028 	ast_entry->callback = NULL;
1029 	ast_entry->cookie = NULL;
1030 
1031 	if (ast_entry == peer->self_ast_entry)
1032 		peer->self_ast_entry = NULL;
1033 
1034 	qdf_spin_unlock_bh(&soc->ast_lock);
1035 
1036 	if (cb) {
1037 		cb(soc->ctrl_psoc,
1038 		   soc,
1039 		   cookie,
1040 		   CDP_TXRX_AST_DELETED);
1041 	}
1042 	qdf_mem_free(ast_entry);
1043 }
1044 
1045 struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc,
1046 	uint8_t *peer_mac_addr, int mac_addr_is_aligned, uint8_t vdev_id)
1047 {
1048 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
1049 	unsigned index;
1050 	struct dp_peer *peer;
1051 
1052 	if (mac_addr_is_aligned) {
1053 		mac_addr = (union dp_align_mac_addr *) peer_mac_addr;
1054 	} else {
1055 		qdf_mem_copy(
1056 			&local_mac_addr_aligned.raw[0],
1057 			peer_mac_addr, DP_MAC_ADDR_LEN);
1058 		mac_addr = &local_mac_addr_aligned;
1059 	}
1060 	index = dp_peer_find_hash_index(soc, mac_addr);
1061 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
1062 	TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
1063 #if ATH_SUPPORT_WRAP
1064 		/* ProxySTA may have multiple BSS peer with same MAC address,
1065 		 * modified find will take care of finding the correct BSS peer.
1066 		 */
1067 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
1068 			((peer->vdev->vdev_id == vdev_id) ||
1069 			 (vdev_id == DP_VDEV_ALL))) {
1070 #else
1071 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0) {
1072 #endif
1073 			/* found it - increment the ref count before releasing
1074 			 * the lock
1075 			 */
1076 			qdf_atomic_inc(&peer->ref_cnt);
1077 			qdf_spin_unlock_bh(&soc->peer_ref_mutex);
1078 			return peer;
1079 		}
1080 	}
1081 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
1082 	return NULL; /* failure */
1083 }
1084 
1085 void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer)
1086 {
1087 	unsigned index;
1088 	struct dp_peer *tmppeer = NULL;
1089 	int found = 0;
1090 
1091 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
1092 	/* Check if tail is not empty before delete*/
1093 	QDF_ASSERT(!TAILQ_EMPTY(&soc->peer_hash.bins[index]));
1094 	/*
1095 	 * DO NOT take the peer_ref_mutex lock here - it needs to be taken
1096 	 * by the caller.
1097 	 * The caller needs to hold the lock from the time the peer object's
1098 	 * reference count is decremented and tested up through the time the
1099 	 * reference to the peer object is removed from the hash table, by
1100 	 * this function.
1101 	 * Holding the lock only while removing the peer object reference
1102 	 * from the hash table keeps the hash table consistent, but does not
1103 	 * protect against a new HL tx context starting to use the peer object
1104 	 * if it looks up the peer object from its MAC address just after the
1105 	 * peer ref count is decremented to zero, but just before the peer
1106 	 * object reference is removed from the hash table.
1107 	 */
1108 	 TAILQ_FOREACH(tmppeer, &soc->peer_hash.bins[index], hash_list_elem) {
1109 		if (tmppeer == peer) {
1110 			found = 1;
1111 			break;
1112 		}
1113 	}
1114 	QDF_ASSERT(found);
1115 	TAILQ_REMOVE(&soc->peer_hash.bins[index], peer, hash_list_elem);
1116 }
1117 
1118 void dp_peer_find_hash_erase(struct dp_soc *soc)
1119 {
1120 	int i;
1121 
1122 	/*
1123 	 * Not really necessary to take peer_ref_mutex lock - by this point,
1124 	 * it's known that the soc is no longer in use.
1125 	 */
1126 	for (i = 0; i <= soc->peer_hash.mask; i++) {
1127 		if (!TAILQ_EMPTY(&soc->peer_hash.bins[i])) {
1128 			struct dp_peer *peer, *peer_next;
1129 
1130 			/*
1131 			 * TAILQ_FOREACH_SAFE must be used here to avoid any
1132 			 * memory access violation after peer is freed
1133 			 */
1134 			TAILQ_FOREACH_SAFE(peer, &soc->peer_hash.bins[i],
1135 				hash_list_elem, peer_next) {
1136 				/*
1137 				 * Don't remove the peer from the hash table -
1138 				 * that would modify the list we are currently
1139 				 * traversing, and it's not necessary anyway.
1140 				 */
1141 				/*
1142 				 * Artificially adjust the peer's ref count to
1143 				 * 1, so it will get deleted by
1144 				 * dp_peer_unref_delete.
1145 				 */
1146 				/* set to zero */
1147 				qdf_atomic_init(&peer->ref_cnt);
1148 				/* incr to one */
1149 				qdf_atomic_inc(&peer->ref_cnt);
1150 				dp_peer_unref_delete(peer);
1151 			}
1152 		}
1153 	}
1154 }
1155 
1156 static void dp_peer_find_map_detach(struct dp_soc *soc)
1157 {
1158 	qdf_mem_free(soc->peer_id_to_obj_map);
1159 }
1160 
1161 int dp_peer_find_attach(struct dp_soc *soc)
1162 {
1163 	if (dp_peer_find_map_attach(soc))
1164 		return 1;
1165 
1166 	if (dp_peer_find_hash_attach(soc)) {
1167 		dp_peer_find_map_detach(soc);
1168 		return 1;
1169 	}
1170 
1171 	if (dp_peer_ast_hash_attach(soc)) {
1172 		dp_peer_find_hash_detach(soc);
1173 		dp_peer_find_map_detach(soc);
1174 		return 1;
1175 	}
1176 	return 0; /* success */
1177 }
1178 
1179 void dp_rx_tid_stats_cb(struct dp_soc *soc, void *cb_ctxt,
1180 	union hal_reo_status *reo_status)
1181 {
1182 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
1183 	struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
1184 
1185 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
1186 		DP_TRACE_STATS(FATAL, "REO stats failure %d for TID %d\n",
1187 			queue_status->header.status, rx_tid->tid);
1188 		return;
1189 	}
1190 
1191 	DP_TRACE_STATS(FATAL, "REO queue stats (TID: %d): \n"
1192 		"ssn: %d\n"
1193 		"curr_idx  : %d\n"
1194 		"pn_31_0   : %08x\n"
1195 		"pn_63_32  : %08x\n"
1196 		"pn_95_64  : %08x\n"
1197 		"pn_127_96 : %08x\n"
1198 		"last_rx_enq_tstamp : %08x\n"
1199 		"last_rx_deq_tstamp : %08x\n"
1200 		"rx_bitmap_31_0     : %08x\n"
1201 		"rx_bitmap_63_32    : %08x\n"
1202 		"rx_bitmap_95_64    : %08x\n"
1203 		"rx_bitmap_127_96   : %08x\n"
1204 		"rx_bitmap_159_128  : %08x\n"
1205 		"rx_bitmap_191_160  : %08x\n"
1206 		"rx_bitmap_223_192  : %08x\n"
1207 		"rx_bitmap_255_224  : %08x\n",
1208 		rx_tid->tid,
1209 		queue_status->ssn, queue_status->curr_idx,
1210 		queue_status->pn_31_0, queue_status->pn_63_32,
1211 		queue_status->pn_95_64, queue_status->pn_127_96,
1212 		queue_status->last_rx_enq_tstamp,
1213 		queue_status->last_rx_deq_tstamp,
1214 		queue_status->rx_bitmap_31_0, queue_status->rx_bitmap_63_32,
1215 		queue_status->rx_bitmap_95_64, queue_status->rx_bitmap_127_96,
1216 		queue_status->rx_bitmap_159_128,
1217 		queue_status->rx_bitmap_191_160,
1218 		queue_status->rx_bitmap_223_192,
1219 		queue_status->rx_bitmap_255_224);
1220 
1221 	DP_TRACE_STATS(FATAL,
1222 		"curr_mpdu_cnt      : %d\n"
1223 		"curr_msdu_cnt      : %d\n"
1224 		"fwd_timeout_cnt    : %d\n"
1225 		"fwd_bar_cnt        : %d\n"
1226 		"dup_cnt            : %d\n"
1227 		"frms_in_order_cnt  : %d\n"
1228 		"bar_rcvd_cnt       : %d\n"
1229 		"mpdu_frms_cnt      : %d\n"
1230 		"msdu_frms_cnt      : %d\n"
1231 		"total_byte_cnt     : %d\n"
1232 		"late_recv_mpdu_cnt : %d\n"
1233 		"win_jump_2k 	    : %d\n"
1234 		"hole_cnt 	    : %d\n",
1235 		queue_status->curr_mpdu_cnt, queue_status->curr_msdu_cnt,
1236 		queue_status->fwd_timeout_cnt, queue_status->fwd_bar_cnt,
1237 		queue_status->dup_cnt, queue_status->frms_in_order_cnt,
1238 		queue_status->bar_rcvd_cnt, queue_status->mpdu_frms_cnt,
1239 		queue_status->msdu_frms_cnt, queue_status->total_cnt,
1240 		queue_status->late_recv_mpdu_cnt, queue_status->win_jump_2k,
1241 		queue_status->hole_cnt);
1242 
1243 	DP_PRINT_STATS("Addba Req          : %d\n"
1244 			"Addba Resp         : %d\n"
1245 			"Addba Resp success : %d\n"
1246 			"Addba Resp failed  : %d\n"
1247 			"Delba Req received : %d\n"
1248 			"Delba Tx success   : %d\n"
1249 			"Delba Tx Fail      : %d\n"
1250 			"BA window size     : %d\n"
1251 			"Pn size            : %d\n",
1252 			rx_tid->num_of_addba_req,
1253 			rx_tid->num_of_addba_resp,
1254 			rx_tid->num_addba_rsp_success,
1255 			rx_tid->num_addba_rsp_failed,
1256 			rx_tid->num_of_delba_req,
1257 			rx_tid->delba_tx_success_cnt,
1258 			rx_tid->delba_tx_fail_cnt,
1259 			rx_tid->ba_win_size,
1260 			rx_tid->pn_size);
1261 }
1262 
1263 static inline struct dp_peer *dp_peer_find_add_id(struct dp_soc *soc,
1264 	uint8_t *peer_mac_addr, uint16_t peer_id, uint16_t hw_peer_id,
1265 	uint8_t vdev_id)
1266 {
1267 	struct dp_peer *peer;
1268 
1269 	QDF_ASSERT(peer_id <= soc->max_peers);
1270 	/* check if there's already a peer object with this MAC address */
1271 	peer = dp_peer_find_hash_find(soc, peer_mac_addr,
1272 		0 /* is aligned */, vdev_id);
1273 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1274 		"%s: peer %pK ID %d vid %d mac %02x:%02x:%02x:%02x:%02x:%02x",
1275 		__func__, peer, peer_id, vdev_id, peer_mac_addr[0],
1276 		peer_mac_addr[1], peer_mac_addr[2], peer_mac_addr[3],
1277 		peer_mac_addr[4], peer_mac_addr[5]);
1278 
1279 	if (peer) {
1280 		/* peer's ref count was already incremented by
1281 		 * peer_find_hash_find
1282 		 */
1283 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
1284 			  "%s: ref_cnt: %d", __func__,
1285 			   qdf_atomic_read(&peer->ref_cnt));
1286 		if (!soc->peer_id_to_obj_map[peer_id])
1287 			soc->peer_id_to_obj_map[peer_id] = peer;
1288 		else {
1289 			/* Peer map event came for peer_id which
1290 			 * is already mapped, this is not expected
1291 			 */
1292 			QDF_ASSERT(0);
1293 		}
1294 
1295 		if (dp_peer_find_add_id_to_obj(peer, peer_id)) {
1296 			/* TBDXXX: assert for now */
1297 			QDF_ASSERT(0);
1298 		}
1299 
1300 		return peer;
1301 	}
1302 
1303 	return NULL;
1304 }
1305 
1306 /**
1307  * dp_rx_peer_map_handler() - handle peer map event from firmware
1308  * @soc_handle - genereic soc handle
1309  * @peeri_id - peer_id from firmware
1310  * @hw_peer_id - ast index for this peer
1311  * @vdev_id - vdev ID
1312  * @peer_mac_addr - mac address of the peer
1313  * @ast_hash - ast hash value
1314  * @is_wds - flag to indicate peer map event for WDS ast entry
1315  *
1316  * associate the peer_id that firmware provided with peer entry
1317  * and update the ast table in the host with the hw_peer_id.
1318  *
1319  * Return: none
1320  */
1321 
1322 void
1323 dp_rx_peer_map_handler(void *soc_handle, uint16_t peer_id,
1324 		       uint16_t hw_peer_id, uint8_t vdev_id,
1325 		       uint8_t *peer_mac_addr, uint16_t ast_hash,
1326 		       uint8_t is_wds)
1327 {
1328 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
1329 	struct dp_peer *peer = NULL;
1330 	enum cdp_txrx_ast_entry_type type = CDP_TXRX_AST_TYPE_STATIC;
1331 
1332 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
1333 		"peer_map_event (soc:%pK): peer_id %di, hw_peer_id %d, peer_mac "
1334 		"%02x:%02x:%02x:%02x:%02x:%02x, vdev_id %d", soc, peer_id,
1335 		hw_peer_id, peer_mac_addr[0], peer_mac_addr[1],
1336 		peer_mac_addr[2], peer_mac_addr[3], peer_mac_addr[4],
1337 		peer_mac_addr[5], vdev_id);
1338 
1339 	if ((hw_peer_id < 0) || (hw_peer_id > (WLAN_UMAC_PSOC_MAX_PEERS * 2))) {
1340 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1341 			"invalid hw_peer_id: %d", hw_peer_id);
1342 		qdf_assert_always(0);
1343 	}
1344 
1345 	/* Peer map event for WDS ast entry get the peer from
1346 	 * obj map
1347 	 */
1348 	if (is_wds) {
1349 		peer = soc->peer_id_to_obj_map[peer_id];
1350 	} else {
1351 		peer = dp_peer_find_add_id(soc, peer_mac_addr, peer_id,
1352 					   hw_peer_id, vdev_id);
1353 
1354 		if (peer) {
1355 			/*
1356 			 * For every peer Map message search and set if bss_peer
1357 			 */
1358 			if (!(qdf_mem_cmp(peer->mac_addr.raw,
1359 					  peer->vdev->mac_addr.raw,
1360 					  DP_MAC_ADDR_LEN))) {
1361 				QDF_TRACE(QDF_MODULE_ID_DP,
1362 					  QDF_TRACE_LEVEL_INFO_HIGH,
1363 					  "vdev bss_peer!!!!");
1364 				peer->bss_peer = 1;
1365 				peer->vdev->vap_bss_peer = peer;
1366 			}
1367 
1368 			if (peer->vdev->opmode == wlan_op_mode_sta)
1369 				peer->vdev->bss_ast_hash = ast_hash;
1370 
1371 			/* Add ast entry incase self ast entry is
1372 			 * deleted due to DP CP sync issue
1373 			 *
1374 			 * self_ast_entry is modified in peer create
1375 			 * and peer unmap path which cannot run in
1376 			 * parllel with peer map, no lock need before
1377 			 * referring it
1378 			 */
1379 			if (!peer->self_ast_entry) {
1380 				QDF_TRACE(QDF_MODULE_ID_DP,
1381 					  QDF_TRACE_LEVEL_INFO_HIGH,
1382 					  "Add self ast from map %pM",
1383 					  peer_mac_addr);
1384 				dp_peer_add_ast(soc, peer,
1385 						peer_mac_addr,
1386 						type, 0);
1387 			}
1388 
1389 		}
1390 	}
1391 
1392 	dp_peer_map_ast(soc, peer, peer_mac_addr,
1393 			hw_peer_id, vdev_id, ast_hash);
1394 }
1395 
1396 /**
1397  * dp_rx_peer_unmap_handler() - handle peer unmap event from firmware
1398  * @soc_handle - genereic soc handle
1399  * @peeri_id - peer_id from firmware
1400  * @vdev_id - vdev ID
1401  * @mac_addr - mac address of the peer or wds entry
1402  * @is_wds - flag to indicate peer map event for WDS ast entry
1403  *
1404  * Return: none
1405  */
1406 void
1407 dp_rx_peer_unmap_handler(void *soc_handle, uint16_t peer_id,
1408 			 uint8_t vdev_id, uint8_t *mac_addr,
1409 			 uint8_t is_wds)
1410 {
1411 	struct dp_peer *peer;
1412 	struct dp_ast_entry *ast_entry;
1413 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
1414 	uint8_t i;
1415 
1416 	peer = __dp_peer_find_by_id(soc, peer_id);
1417 
1418 	/*
1419 	 * Currently peer IDs are assigned for vdevs as well as peers.
1420 	 * If the peer ID is for a vdev, then the peer pointer stored
1421 	 * in peer_id_to_obj_map will be NULL.
1422 	 */
1423 	if (!peer) {
1424 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1425 			"%s: Received unmap event for invalid peer_id"
1426 			" %u", __func__, peer_id);
1427 		return;
1428 	}
1429 
1430 	/* If V2 Peer map messages are enabled AST entry has to be freed here
1431 	 */
1432 	if (soc->is_peer_map_unmap_v2) {
1433 
1434 		qdf_spin_lock_bh(&soc->ast_lock);
1435 		ast_entry = dp_peer_ast_list_find(soc, peer,
1436 						  mac_addr);
1437 
1438 		if (!ast_entry) {
1439 			/* in case of qwrap we have multiple BSS peers
1440 			 * with same mac address
1441 			 *
1442 			 * AST entry for this mac address will be created
1443 			 * only for one peer
1444 			 */
1445 			if (peer->vdev->proxysta_vdev) {
1446 				qdf_spin_unlock_bh(&soc->ast_lock);
1447 				goto peer_unmap;
1448 			}
1449 
1450 			/* Ideally we should not enter this case where
1451 			 * ast_entry is not present in host table and
1452 			 * we received a unmap event
1453 			 */
1454 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
1455 				  "%s:%d AST entry not found with peer %pK peer_id %u peer_mac %pM mac_addr %pM vdev_id %u next_hop %u\n",
1456 				  __func__, __LINE__, peer, peer->peer_ids[0],
1457 				  peer->mac_addr.raw, mac_addr, vdev_id,
1458 				  is_wds);
1459 
1460 			if (!is_wds) {
1461 				qdf_spin_unlock_bh(&soc->ast_lock);
1462 				goto peer_unmap;
1463 			}
1464 		}
1465 		qdf_spin_unlock_bh(&soc->ast_lock);
1466 
1467 		/* Reuse the AST entry if delete_in_progress
1468 		 * not set
1469 		 */
1470 		if (ast_entry->delete_in_progress)
1471 			dp_peer_ast_free_entry(soc, ast_entry);
1472 
1473 		if (is_wds)
1474 			return;
1475 	}
1476 
1477 peer_unmap:
1478 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
1479 		"peer_unmap_event (soc:%pK) peer_id %d peer %pK",
1480 		soc, peer_id, peer);
1481 
1482 	soc->peer_id_to_obj_map[peer_id] = NULL;
1483 	for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
1484 		if (peer->peer_ids[i] == peer_id) {
1485 			peer->peer_ids[i] = HTT_INVALID_PEER;
1486 			break;
1487 		}
1488 	}
1489 
1490 	if (soc->cdp_soc.ol_ops->peer_unmap_event) {
1491 		soc->cdp_soc.ol_ops->peer_unmap_event(soc->ctrl_psoc,
1492 				peer_id);
1493 	}
1494 
1495 	/*
1496 	 * Remove a reference to the peer.
1497 	 * If there are no more references, delete the peer object.
1498 	 */
1499 	dp_peer_unref_delete(peer);
1500 }
1501 
1502 void
1503 dp_peer_find_detach(struct dp_soc *soc)
1504 {
1505 	dp_peer_find_map_detach(soc);
1506 	dp_peer_find_hash_detach(soc);
1507 	dp_peer_ast_hash_detach(soc);
1508 }
1509 
1510 static void dp_rx_tid_update_cb(struct dp_soc *soc, void *cb_ctxt,
1511 	union hal_reo_status *reo_status)
1512 {
1513 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
1514 
1515 	if ((reo_status->rx_queue_status.header.status !=
1516 		HAL_REO_CMD_SUCCESS) &&
1517 		(reo_status->rx_queue_status.header.status !=
1518 		HAL_REO_CMD_DRAIN)) {
1519 		/* Should not happen normally. Just print error for now */
1520 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1521 			"%s: Rx tid HW desc update failed(%d): tid %d",
1522 			__func__,
1523 			reo_status->rx_queue_status.header.status,
1524 			rx_tid->tid);
1525 	}
1526 }
1527 
1528 /*
1529  * dp_find_peer_by_addr - find peer instance by mac address
1530  * @dev: physical device instance
1531  * @peer_mac_addr: peer mac address
1532  * @local_id: local id for the peer
1533  *
1534  * Return: peer instance pointer
1535  */
1536 void *dp_find_peer_by_addr(struct cdp_pdev *dev, uint8_t *peer_mac_addr,
1537 		uint8_t *local_id)
1538 {
1539 	struct dp_pdev *pdev = (struct dp_pdev *)dev;
1540 	struct dp_peer *peer;
1541 
1542 	peer = dp_peer_find_hash_find(pdev->soc, peer_mac_addr, 0, DP_VDEV_ALL);
1543 
1544 	if (!peer)
1545 		return NULL;
1546 
1547 	/* Multiple peer ids? How can know peer id? */
1548 	*local_id = peer->local_id;
1549 	DP_TRACE(INFO, "%s: peer %pK id %d", __func__, peer, *local_id);
1550 
1551 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
1552 	 * Decrement it here.
1553 	 */
1554 	dp_peer_unref_delete(peer);
1555 
1556 	return peer;
1557 }
1558 
1559 /*
1560  * dp_rx_tid_update_wifi3() – Update receive TID state
1561  * @peer: Datapath peer handle
1562  * @tid: TID
1563  * @ba_window_size: BlockAck window size
1564  * @start_seq: Starting sequence number
1565  *
1566  * Return: 0 on success, error code on failure
1567  */
1568 static int dp_rx_tid_update_wifi3(struct dp_peer *peer, int tid, uint32_t
1569 				  ba_window_size, uint32_t start_seq)
1570 {
1571 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1572 	struct dp_soc *soc = peer->vdev->pdev->soc;
1573 	struct hal_reo_cmd_params params;
1574 
1575 	qdf_mem_zero(&params, sizeof(params));
1576 
1577 	params.std.need_status = 1;
1578 	params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
1579 	params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1580 	params.u.upd_queue_params.update_ba_window_size = 1;
1581 	params.u.upd_queue_params.ba_window_size = ba_window_size;
1582 
1583 	if (start_seq < IEEE80211_SEQ_MAX) {
1584 		params.u.upd_queue_params.update_ssn = 1;
1585 		params.u.upd_queue_params.ssn = start_seq;
1586 	}
1587 
1588 	dp_set_ssn_valid_flag(&params, 0);
1589 
1590 	dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params, dp_rx_tid_update_cb, rx_tid);
1591 
1592 	rx_tid->ba_win_size = ba_window_size;
1593 	if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
1594 		soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup(
1595 			peer->vdev->pdev->ctrl_pdev,
1596 			peer->vdev->vdev_id, peer->mac_addr.raw,
1597 			rx_tid->hw_qdesc_paddr, tid, tid, 1, ba_window_size);
1598 
1599 	}
1600 	return 0;
1601 }
1602 
1603 /*
1604  * dp_reo_desc_free() - Callback free reo descriptor memory after
1605  * HW cache flush
1606  *
1607  * @soc: DP SOC handle
1608  * @cb_ctxt: Callback context
1609  * @reo_status: REO command status
1610  */
1611 static void dp_reo_desc_free(struct dp_soc *soc, void *cb_ctxt,
1612 	union hal_reo_status *reo_status)
1613 {
1614 	struct reo_desc_list_node *freedesc =
1615 		(struct reo_desc_list_node *)cb_ctxt;
1616 	struct dp_rx_tid *rx_tid = &freedesc->rx_tid;
1617 
1618 	if ((reo_status->fl_cache_status.header.status !=
1619 		HAL_REO_CMD_SUCCESS) &&
1620 		(reo_status->fl_cache_status.header.status !=
1621 		HAL_REO_CMD_DRAIN)) {
1622 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1623 			"%s: Rx tid HW desc flush failed(%d): tid %d",
1624 			__func__,
1625 			reo_status->rx_queue_status.header.status,
1626 			freedesc->rx_tid.tid);
1627 	}
1628 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1629 		  "%s: hw_qdesc_paddr: %pK, tid:%d", __func__,
1630 		  (void *)(rx_tid->hw_qdesc_paddr), rx_tid->tid);
1631 	qdf_mem_unmap_nbytes_single(soc->osdev,
1632 		rx_tid->hw_qdesc_paddr,
1633 		QDF_DMA_BIDIRECTIONAL,
1634 		rx_tid->hw_qdesc_alloc_size);
1635 	qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
1636 	qdf_mem_free(freedesc);
1637 }
1638 
1639 #if defined(QCA_WIFI_QCA8074) && defined(BUILD_X86)
1640 /* Hawkeye emulation requires bus address to be >= 0x50000000 */
1641 static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
1642 {
1643 	if (dma_addr < 0x50000000)
1644 		return QDF_STATUS_E_FAILURE;
1645 	else
1646 		return QDF_STATUS_SUCCESS;
1647 }
1648 #else
1649 static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
1650 {
1651 	return QDF_STATUS_SUCCESS;
1652 }
1653 #endif
1654 
1655 
1656 /*
1657  * dp_rx_tid_setup_wifi3() – Setup receive TID state
1658  * @peer: Datapath peer handle
1659  * @tid: TID
1660  * @ba_window_size: BlockAck window size
1661  * @start_seq: Starting sequence number
1662  *
1663  * Return: 0 on success, error code on failure
1664  */
1665 int dp_rx_tid_setup_wifi3(struct dp_peer *peer, int tid,
1666 	uint32_t ba_window_size, uint32_t start_seq)
1667 {
1668 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1669 	struct dp_vdev *vdev = peer->vdev;
1670 	struct dp_soc *soc = vdev->pdev->soc;
1671 	uint32_t hw_qdesc_size;
1672 	uint32_t hw_qdesc_align;
1673 	int hal_pn_type;
1674 	void *hw_qdesc_vaddr;
1675 	uint32_t alloc_tries = 0;
1676 
1677 	if (peer->delete_in_progress ||
1678 	    !qdf_atomic_read(&peer->is_default_route_set))
1679 		return QDF_STATUS_E_FAILURE;
1680 
1681 	rx_tid->ba_win_size = ba_window_size;
1682 	if (rx_tid->hw_qdesc_vaddr_unaligned != NULL)
1683 		return dp_rx_tid_update_wifi3(peer, tid, ba_window_size,
1684 			start_seq);
1685 	rx_tid->delba_tx_status = 0;
1686 	rx_tid->ppdu_id_2k = 0;
1687 	rx_tid->num_of_addba_req = 0;
1688 	rx_tid->num_of_delba_req = 0;
1689 	rx_tid->num_of_addba_resp = 0;
1690 	rx_tid->num_addba_rsp_failed = 0;
1691 	rx_tid->num_addba_rsp_success = 0;
1692 	rx_tid->delba_tx_success_cnt = 0;
1693 	rx_tid->delba_tx_fail_cnt = 0;
1694 	rx_tid->statuscode = 0;
1695 
1696 	/* TODO: Allocating HW queue descriptors based on max BA window size
1697 	 * for all QOS TIDs so that same descriptor can be used later when
1698 	 * ADDBA request is recevied. This should be changed to allocate HW
1699 	 * queue descriptors based on BA window size being negotiated (0 for
1700 	 * non BA cases), and reallocate when BA window size changes and also
1701 	 * send WMI message to FW to change the REO queue descriptor in Rx
1702 	 * peer entry as part of dp_rx_tid_update.
1703 	 */
1704 	if (tid != DP_NON_QOS_TID)
1705 		hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc,
1706 			HAL_RX_MAX_BA_WINDOW, tid);
1707 	else
1708 		hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc,
1709 			ba_window_size, tid);
1710 
1711 	hw_qdesc_align = hal_get_reo_qdesc_align(soc->hal_soc);
1712 	/* To avoid unnecessary extra allocation for alignment, try allocating
1713 	 * exact size and see if we already have aligned address.
1714 	 */
1715 	rx_tid->hw_qdesc_alloc_size = hw_qdesc_size;
1716 
1717 try_desc_alloc:
1718 	rx_tid->hw_qdesc_vaddr_unaligned =
1719 		qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size);
1720 
1721 	if (!rx_tid->hw_qdesc_vaddr_unaligned) {
1722 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1723 			"%s: Rx tid HW desc alloc failed: tid %d",
1724 			__func__, tid);
1725 		return QDF_STATUS_E_NOMEM;
1726 	}
1727 
1728 	if ((unsigned long)(rx_tid->hw_qdesc_vaddr_unaligned) %
1729 		hw_qdesc_align) {
1730 		/* Address allocated above is not alinged. Allocate extra
1731 		 * memory for alignment
1732 		 */
1733 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
1734 		rx_tid->hw_qdesc_vaddr_unaligned =
1735 			qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size +
1736 					hw_qdesc_align - 1);
1737 
1738 		if (!rx_tid->hw_qdesc_vaddr_unaligned) {
1739 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1740 				"%s: Rx tid HW desc alloc failed: tid %d",
1741 				__func__, tid);
1742 			return QDF_STATUS_E_NOMEM;
1743 		}
1744 
1745 		hw_qdesc_vaddr = (void *)qdf_align((unsigned long)
1746 			rx_tid->hw_qdesc_vaddr_unaligned,
1747 			hw_qdesc_align);
1748 
1749 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1750 			"%s: Total Size %d Aligned Addr %pK",
1751 			__func__, rx_tid->hw_qdesc_alloc_size,
1752 			hw_qdesc_vaddr);
1753 
1754 	} else {
1755 		hw_qdesc_vaddr = rx_tid->hw_qdesc_vaddr_unaligned;
1756 	}
1757 
1758 	/* TODO: Ensure that sec_type is set before ADDBA is received.
1759 	 * Currently this is set based on htt indication
1760 	 * HTT_T2H_MSG_TYPE_SEC_IND from target
1761 	 */
1762 	switch (peer->security[dp_sec_ucast].sec_type) {
1763 	case cdp_sec_type_tkip_nomic:
1764 	case cdp_sec_type_aes_ccmp:
1765 	case cdp_sec_type_aes_ccmp_256:
1766 	case cdp_sec_type_aes_gcmp:
1767 	case cdp_sec_type_aes_gcmp_256:
1768 		hal_pn_type = HAL_PN_WPA;
1769 		break;
1770 	case cdp_sec_type_wapi:
1771 		if (vdev->opmode == wlan_op_mode_ap)
1772 			hal_pn_type = HAL_PN_WAPI_EVEN;
1773 		else
1774 			hal_pn_type = HAL_PN_WAPI_UNEVEN;
1775 		break;
1776 	default:
1777 		hal_pn_type = HAL_PN_NONE;
1778 		break;
1779 	}
1780 
1781 	hal_reo_qdesc_setup(soc->hal_soc, tid, ba_window_size, start_seq,
1782 		hw_qdesc_vaddr, rx_tid->hw_qdesc_paddr, hal_pn_type);
1783 
1784 	qdf_mem_map_nbytes_single(soc->osdev, hw_qdesc_vaddr,
1785 		QDF_DMA_BIDIRECTIONAL, rx_tid->hw_qdesc_alloc_size,
1786 		&(rx_tid->hw_qdesc_paddr));
1787 
1788 	if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) !=
1789 			QDF_STATUS_SUCCESS) {
1790 		if (alloc_tries++ < 10)
1791 			goto try_desc_alloc;
1792 		else {
1793 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1794 			"%s: Rx tid HW desc alloc failed (lowmem): tid %d",
1795 			__func__, tid);
1796 			return QDF_STATUS_E_NOMEM;
1797 		}
1798 	}
1799 
1800 	if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
1801 		soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup(
1802 			vdev->pdev->ctrl_pdev,
1803 			peer->vdev->vdev_id, peer->mac_addr.raw,
1804 			rx_tid->hw_qdesc_paddr, tid, tid, 1, ba_window_size);
1805 
1806 	}
1807 	return 0;
1808 }
1809 
1810 /*
1811  * dp_rx_tid_delete_cb() - Callback to flush reo descriptor HW cache
1812  * after deleting the entries (ie., setting valid=0)
1813  *
1814  * @soc: DP SOC handle
1815  * @cb_ctxt: Callback context
1816  * @reo_status: REO command status
1817  */
1818 static void dp_rx_tid_delete_cb(struct dp_soc *soc, void *cb_ctxt,
1819 	union hal_reo_status *reo_status)
1820 {
1821 	struct reo_desc_list_node *freedesc =
1822 		(struct reo_desc_list_node *)cb_ctxt;
1823 	uint32_t list_size;
1824 	struct reo_desc_list_node *desc;
1825 	unsigned long curr_ts = qdf_get_system_timestamp();
1826 	uint32_t desc_size, tot_desc_size;
1827 	struct hal_reo_cmd_params params;
1828 
1829 	if (reo_status->rx_queue_status.header.status == HAL_REO_CMD_DRAIN) {
1830 		qdf_mem_zero(reo_status, sizeof(*reo_status));
1831 		reo_status->fl_cache_status.header.status = HAL_REO_CMD_DRAIN;
1832 		dp_reo_desc_free(soc, (void *)freedesc, reo_status);
1833 		return;
1834 	} else if (reo_status->rx_queue_status.header.status !=
1835 		HAL_REO_CMD_SUCCESS) {
1836 		/* Should not happen normally. Just print error for now */
1837 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1838 			"%s: Rx tid HW desc deletion failed(%d): tid %d",
1839 			__func__,
1840 			reo_status->rx_queue_status.header.status,
1841 			freedesc->rx_tid.tid);
1842 	}
1843 
1844 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
1845 		"%s: rx_tid: %d status: %d", __func__,
1846 		freedesc->rx_tid.tid,
1847 		reo_status->rx_queue_status.header.status);
1848 
1849 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
1850 	freedesc->free_ts = curr_ts;
1851 	qdf_list_insert_back_size(&soc->reo_desc_freelist,
1852 		(qdf_list_node_t *)freedesc, &list_size);
1853 
1854 	while ((qdf_list_peek_front(&soc->reo_desc_freelist,
1855 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) &&
1856 		((list_size >= REO_DESC_FREELIST_SIZE) ||
1857 		((curr_ts - desc->free_ts) > REO_DESC_FREE_DEFER_MS))) {
1858 		struct dp_rx_tid *rx_tid;
1859 
1860 		qdf_list_remove_front(&soc->reo_desc_freelist,
1861 				(qdf_list_node_t **)&desc);
1862 		list_size--;
1863 		rx_tid = &desc->rx_tid;
1864 
1865 		/* Flush and invalidate REO descriptor from HW cache: Base and
1866 		 * extension descriptors should be flushed separately */
1867 		tot_desc_size = rx_tid->hw_qdesc_alloc_size;
1868 		/* Get base descriptor size by passing non-qos TID */
1869 		desc_size = hal_get_reo_qdesc_size(soc->hal_soc, 0,
1870 						   DP_NON_QOS_TID);
1871 
1872 		/* Flush reo extension descriptors */
1873 		while ((tot_desc_size -= desc_size) > 0) {
1874 			qdf_mem_zero(&params, sizeof(params));
1875 			params.std.addr_lo =
1876 				((uint64_t)(rx_tid->hw_qdesc_paddr) +
1877 				tot_desc_size) & 0xffffffff;
1878 			params.std.addr_hi =
1879 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1880 
1881 			if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc,
1882 							CMD_FLUSH_CACHE,
1883 							&params,
1884 							NULL,
1885 							NULL)) {
1886 				QDF_TRACE(QDF_MODULE_ID_DP,
1887 					QDF_TRACE_LEVEL_ERROR,
1888 					"%s: fail to send CMD_CACHE_FLUSH:"
1889 					"tid %d desc %pK", __func__,
1890 					rx_tid->tid,
1891 					(void *)(rx_tid->hw_qdesc_paddr));
1892 			}
1893 		}
1894 
1895 		/* Flush base descriptor */
1896 		qdf_mem_zero(&params, sizeof(params));
1897 		params.std.need_status = 1;
1898 		params.std.addr_lo =
1899 			(uint64_t)(rx_tid->hw_qdesc_paddr) & 0xffffffff;
1900 		params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1901 
1902 		if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc,
1903 							  CMD_FLUSH_CACHE,
1904 							  &params,
1905 							  dp_reo_desc_free,
1906 							  (void *)desc)) {
1907 			union hal_reo_status reo_status;
1908 			/*
1909 			 * If dp_reo_send_cmd return failure, related TID queue desc
1910 			 * should be unmapped. Also locally reo_desc, together with
1911 			 * TID queue desc also need to be freed accordingly.
1912 			 *
1913 			 * Here invoke desc_free function directly to do clean up.
1914 			 */
1915 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1916 				"%s: fail to send REO cmd to flush cache: tid %d",
1917 				__func__, rx_tid->tid);
1918 			qdf_mem_zero(&reo_status, sizeof(reo_status));
1919 			reo_status.fl_cache_status.header.status = 0;
1920 			dp_reo_desc_free(soc, (void *)desc, &reo_status);
1921 		}
1922 	}
1923 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
1924 }
1925 
1926 /*
1927  * dp_rx_tid_delete_wifi3() – Delete receive TID queue
1928  * @peer: Datapath peer handle
1929  * @tid: TID
1930  *
1931  * Return: 0 on success, error code on failure
1932  */
1933 static int dp_rx_tid_delete_wifi3(struct dp_peer *peer, int tid)
1934 {
1935 	struct dp_rx_tid *rx_tid = &(peer->rx_tid[tid]);
1936 	struct dp_soc *soc = peer->vdev->pdev->soc;
1937 	struct hal_reo_cmd_params params;
1938 	struct reo_desc_list_node *freedesc =
1939 		qdf_mem_malloc(sizeof(*freedesc));
1940 
1941 	if (!freedesc) {
1942 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1943 			"%s: malloc failed for freedesc: tid %d",
1944 			__func__, tid);
1945 		return -ENOMEM;
1946 	}
1947 
1948 	freedesc->rx_tid = *rx_tid;
1949 
1950 	qdf_mem_zero(&params, sizeof(params));
1951 
1952 	params.std.need_status = 1;
1953 	params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
1954 	params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1955 	params.u.upd_queue_params.update_vld = 1;
1956 	params.u.upd_queue_params.vld = 0;
1957 
1958 	dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
1959 		dp_rx_tid_delete_cb, (void *)freedesc);
1960 
1961 	rx_tid->hw_qdesc_vaddr_unaligned = NULL;
1962 	rx_tid->hw_qdesc_alloc_size = 0;
1963 	rx_tid->hw_qdesc_paddr = 0;
1964 
1965 	return 0;
1966 }
1967 
1968 #ifdef DP_LFR
1969 static void dp_peer_setup_remaining_tids(struct dp_peer *peer)
1970 {
1971 	int tid;
1972 
1973 	for (tid = 1; tid < DP_MAX_TIDS-1; tid++) {
1974 		dp_rx_tid_setup_wifi3(peer, tid, 1, 0);
1975 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1976 			"Setting up TID %d for peer %pK peer->local_id %d",
1977 			tid, peer, peer->local_id);
1978 	}
1979 }
1980 #else
1981 static void dp_peer_setup_remaining_tids(struct dp_peer *peer) {};
1982 #endif
1983 /*
1984  * dp_peer_rx_init() – Initialize receive TID state
1985  * @pdev: Datapath pdev
1986  * @peer: Datapath peer
1987  *
1988  */
1989 void dp_peer_rx_init(struct dp_pdev *pdev, struct dp_peer *peer)
1990 {
1991 	int tid;
1992 	struct dp_rx_tid *rx_tid;
1993 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
1994 		rx_tid = &peer->rx_tid[tid];
1995 		rx_tid->array = &rx_tid->base;
1996 		rx_tid->base.head = rx_tid->base.tail = NULL;
1997 		rx_tid->tid = tid;
1998 		rx_tid->defrag_timeout_ms = 0;
1999 		rx_tid->ba_win_size = 0;
2000 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
2001 
2002 		rx_tid->defrag_waitlist_elem.tqe_next = NULL;
2003 		rx_tid->defrag_waitlist_elem.tqe_prev = NULL;
2004 
2005 #ifdef notyet /* TODO: See if this is required for exception handling */
2006 		/* invalid sequence number */
2007 		peer->tids_last_seq[tid] = 0xffff;
2008 #endif
2009 	}
2010 
2011 	peer->active_ba_session_cnt = 0;
2012 	peer->hw_buffer_size = 0;
2013 	peer->kill_256_sessions = 0;
2014 
2015 	/* Setup default (non-qos) rx tid queue */
2016 	dp_rx_tid_setup_wifi3(peer, DP_NON_QOS_TID, 1, 0);
2017 
2018 	/* Setup rx tid queue for TID 0.
2019 	 * Other queues will be setup on receiving first packet, which will cause
2020 	 * NULL REO queue error
2021 	 */
2022 	dp_rx_tid_setup_wifi3(peer, 0, 1, 0);
2023 
2024 	/*
2025 	 * Setup the rest of TID's to handle LFR
2026 	 */
2027 	dp_peer_setup_remaining_tids(peer);
2028 
2029 	/*
2030 	 * Set security defaults: no PN check, no security. The target may
2031 	 * send a HTT SEC_IND message to overwrite these defaults.
2032 	 */
2033 	peer->security[dp_sec_ucast].sec_type =
2034 		peer->security[dp_sec_mcast].sec_type = cdp_sec_type_none;
2035 }
2036 
2037 /*
2038  * dp_peer_rx_cleanup() – Cleanup receive TID state
2039  * @vdev: Datapath vdev
2040  * @peer: Datapath peer
2041  *
2042  */
2043 void dp_peer_rx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
2044 {
2045 	int tid;
2046 	uint32_t tid_delete_mask = 0;
2047 
2048 	DP_TRACE(INFO_HIGH, FL("Remove tids for peer: %pK"), peer);
2049 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
2050 		struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
2051 
2052 		qdf_spin_lock_bh(&rx_tid->tid_lock);
2053 		if (peer->rx_tid[tid].hw_qdesc_vaddr_unaligned != NULL) {
2054 			dp_rx_tid_delete_wifi3(peer, tid);
2055 
2056 			/* Cleanup defrag related resource */
2057 			dp_rx_defrag_waitlist_remove(peer, tid);
2058 			dp_rx_reorder_flush_frag(peer, tid);
2059 
2060 			tid_delete_mask |= (1 << tid);
2061 		}
2062 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2063 	}
2064 #ifdef notyet /* See if FW can remove queues as part of peer cleanup */
2065 	if (soc->ol_ops->peer_rx_reorder_queue_remove) {
2066 		soc->ol_ops->peer_rx_reorder_queue_remove(vdev->pdev->ctrl_pdev,
2067 			peer->vdev->vdev_id, peer->mac_addr.raw,
2068 			tid_delete_mask);
2069 	}
2070 #endif
2071 	for (tid = 0; tid < DP_MAX_TIDS; tid++)
2072 		qdf_spinlock_destroy(&peer->rx_tid[tid].tid_lock);
2073 }
2074 
2075 /*
2076  * dp_peer_cleanup() – Cleanup peer information
2077  * @vdev: Datapath vdev
2078  * @peer: Datapath peer
2079  *
2080  */
2081 void dp_peer_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
2082 {
2083 	peer->last_assoc_rcvd = 0;
2084 	peer->last_disassoc_rcvd = 0;
2085 	peer->last_deauth_rcvd = 0;
2086 
2087 	/* cleanup the Rx reorder queues for this peer */
2088 	dp_peer_rx_cleanup(vdev, peer);
2089 }
2090 
2091 /* dp_teardown_256_ba_session() - Teardown sessions using 256
2092  *                                window size when a request with
2093  *                                64 window size is received.
2094  *                                This is done as a WAR since HW can
2095  *                                have only one setting per peer (64 or 256).
2096  *                                For HKv2, we use per tid buffersize setting
2097  *                                for 0 to per_tid_basize_max_tid. For tid
2098  *                                more than per_tid_basize_max_tid we use HKv1
2099  *                                method.
2100  * @peer: Datapath peer
2101  *
2102  * Return: void
2103  */
2104 static void dp_teardown_256_ba_sessions(struct dp_peer *peer)
2105 {
2106 	uint8_t delba_rcode = 0;
2107 	int tid;
2108 	struct dp_rx_tid *rx_tid = NULL;
2109 
2110 	tid = peer->vdev->pdev->soc->per_tid_basize_max_tid;
2111 	for (; tid < DP_MAX_TIDS; tid++) {
2112 		rx_tid = &peer->rx_tid[tid];
2113 		qdf_spin_lock_bh(&rx_tid->tid_lock);
2114 
2115 		if (rx_tid->ba_win_size <= 64) {
2116 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
2117 			continue;
2118 		} else {
2119 			if (rx_tid->ba_status == DP_RX_BA_ACTIVE ||
2120 			    rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
2121 				/* send delba */
2122 				if (!rx_tid->delba_tx_status) {
2123 					rx_tid->delba_tx_retry++;
2124 					rx_tid->delba_tx_status = 1;
2125 					rx_tid->delba_rcode =
2126 					IEEE80211_REASON_QOS_SETUP_REQUIRED;
2127 					delba_rcode = rx_tid->delba_rcode;
2128 
2129 					qdf_spin_unlock_bh(&rx_tid->tid_lock);
2130 					peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba(
2131 							peer->vdev->pdev->ctrl_pdev,
2132 							peer->ctrl_peer,
2133 							peer->mac_addr.raw,
2134 							tid, peer->vdev->ctrl_vdev,
2135 							delba_rcode);
2136 				} else {
2137 					qdf_spin_unlock_bh(&rx_tid->tid_lock);
2138 				}
2139 			} else {
2140 				qdf_spin_unlock_bh(&rx_tid->tid_lock);
2141 			}
2142 		}
2143 	}
2144 }
2145 
2146 /*
2147 * dp_rx_addba_resp_tx_completion_wifi3() – Update Rx Tid State
2148 *
2149 * @peer: Datapath peer handle
2150 * @tid: TID number
2151 * @status: tx completion status
2152 * Return: 0 on success, error code on failure
2153 */
2154 int dp_addba_resp_tx_completion_wifi3(void *peer_handle,
2155 				      uint8_t tid, int status)
2156 {
2157 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
2158 	struct dp_rx_tid *rx_tid = NULL;
2159 
2160 	if (!peer || peer->delete_in_progress) {
2161 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
2162 			  "%s: Peer is NULL!\n", __func__);
2163 		return QDF_STATUS_E_FAILURE;
2164 	}
2165 	rx_tid = &peer->rx_tid[tid];
2166 	qdf_spin_lock_bh(&rx_tid->tid_lock);
2167 	if (status) {
2168 		rx_tid->num_addba_rsp_failed++;
2169 		dp_rx_tid_update_wifi3(peer, tid, 1, 0);
2170 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
2171 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2172 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2173 			  "%s: Rx Tid- %d addba rsp tx completion failed!",
2174 			 __func__, tid);
2175 		return QDF_STATUS_SUCCESS;
2176 	}
2177 
2178 	rx_tid->num_addba_rsp_success++;
2179 	if (rx_tid->ba_status == DP_RX_BA_INACTIVE) {
2180 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2181 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2182 			  "%s: Rx Tid- %d hw qdesc is not in IN_PROGRESS",
2183 			__func__, tid);
2184 		return QDF_STATUS_E_FAILURE;
2185 	}
2186 
2187 	if (!qdf_atomic_read(&peer->is_default_route_set)) {
2188 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2189 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
2190 			  "%s: default route is not set for peer: %pM",
2191 			  __func__, peer->mac_addr.raw);
2192 		return QDF_STATUS_E_FAILURE;
2193 	}
2194 
2195 	/* First Session */
2196 	if (peer->active_ba_session_cnt == 0) {
2197 		if (rx_tid->ba_win_size > 64 && rx_tid->ba_win_size <= 256)
2198 			peer->hw_buffer_size = 256;
2199 		else
2200 			peer->hw_buffer_size = 64;
2201 	}
2202 
2203 	rx_tid->ba_status = DP_RX_BA_ACTIVE;
2204 
2205 	peer->active_ba_session_cnt++;
2206 
2207 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
2208 
2209 	/* Kill any session having 256 buffer size
2210 	 * when 64 buffer size request is received.
2211 	 * Also, latch on to 64 as new buffer size.
2212 	 */
2213 	if (peer->kill_256_sessions) {
2214 		dp_teardown_256_ba_sessions(peer);
2215 		peer->kill_256_sessions = 0;
2216 	}
2217 	return QDF_STATUS_SUCCESS;
2218 }
2219 
2220 /*
2221 * dp_rx_addba_responsesetup_wifi3() – Process ADDBA request from peer
2222 *
2223 * @peer: Datapath peer handle
2224 * @tid: TID number
2225 * @dialogtoken: output dialogtoken
2226 * @statuscode: output dialogtoken
2227 * @buffersize: Output BA window size
2228 * @batimeout: Output BA timeout
2229 */
2230 void dp_addba_responsesetup_wifi3(void *peer_handle, uint8_t tid,
2231 	uint8_t *dialogtoken, uint16_t *statuscode,
2232 	uint16_t *buffersize, uint16_t *batimeout)
2233 {
2234 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
2235 	struct dp_rx_tid *rx_tid = NULL;
2236 
2237 	if (!peer || peer->delete_in_progress) {
2238 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
2239 			  "%s: Peer is NULL!\n", __func__);
2240 		return;
2241 	}
2242 	rx_tid = &peer->rx_tid[tid];
2243 	qdf_spin_lock_bh(&rx_tid->tid_lock);
2244 	rx_tid->num_of_addba_resp++;
2245 	/* setup ADDBA response parameters */
2246 	*dialogtoken = rx_tid->dialogtoken;
2247 	*statuscode = rx_tid->statuscode;
2248 	*buffersize = rx_tid->ba_win_size;
2249 	*batimeout  = 0;
2250 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
2251 }
2252 
2253 /* dp_check_ba_buffersize() - Check buffer size in request
2254  *                            and latch onto this size based on
2255  *                            size used in first active session.
2256  * @peer: Datapath peer
2257  * @tid: Tid
2258  * @buffersize: Block ack window size
2259  *
2260  * Return: void
2261  */
2262 static void dp_check_ba_buffersize(struct dp_peer *peer,
2263 				   uint16_t tid,
2264 				   uint16_t buffersize)
2265 {
2266 	struct dp_rx_tid *rx_tid = NULL;
2267 
2268 	rx_tid = &peer->rx_tid[tid];
2269 	if (peer->vdev->pdev->soc->per_tid_basize_max_tid &&
2270 	    tid < peer->vdev->pdev->soc->per_tid_basize_max_tid) {
2271 		rx_tid->ba_win_size = buffersize;
2272 		return;
2273 	} else {
2274 		if (peer->active_ba_session_cnt == 0) {
2275 			rx_tid->ba_win_size = buffersize;
2276 		} else {
2277 			if (peer->hw_buffer_size == 64) {
2278 				if (buffersize <= 64)
2279 					rx_tid->ba_win_size = buffersize;
2280 				else
2281 					rx_tid->ba_win_size = peer->hw_buffer_size;
2282 			} else if (peer->hw_buffer_size == 256) {
2283 				if (buffersize > 64) {
2284 					rx_tid->ba_win_size = buffersize;
2285 				} else {
2286 					rx_tid->ba_win_size = buffersize;
2287 					peer->hw_buffer_size = 64;
2288 					peer->kill_256_sessions = 1;
2289 				}
2290 			}
2291 		}
2292 	}
2293 }
2294 
2295 /*
2296  * dp_addba_requestprocess_wifi3() - Process ADDBA request from peer
2297  *
2298  * @peer: Datapath peer handle
2299  * @dialogtoken: dialogtoken from ADDBA frame
2300  * @tid: TID number
2301  * @batimeout: BA timeout
2302  * @buffersize: BA window size
2303  * @startseqnum: Start seq. number received in BA sequence control
2304  *
2305  * Return: 0 on success, error code on failure
2306  */
2307 int dp_addba_requestprocess_wifi3(void *peer_handle,
2308 				  uint8_t dialogtoken,
2309 				  uint16_t tid, uint16_t batimeout,
2310 				  uint16_t buffersize,
2311 				  uint16_t startseqnum)
2312 {
2313 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
2314 	struct dp_rx_tid *rx_tid = NULL;
2315 
2316 	if (!peer || peer->delete_in_progress) {
2317 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
2318 			  "%s: Peer is NULL!\n", __func__);
2319 		return QDF_STATUS_E_FAILURE;
2320 	}
2321 	rx_tid = &peer->rx_tid[tid];
2322 	qdf_spin_lock_bh(&rx_tid->tid_lock);
2323 	rx_tid->num_of_addba_req++;
2324 	if ((rx_tid->ba_status == DP_RX_BA_ACTIVE &&
2325 	     rx_tid->hw_qdesc_vaddr_unaligned != NULL) ||
2326 	    (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS)) {
2327 		dp_rx_tid_update_wifi3(peer, tid, 1, 0);
2328 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
2329 		peer->active_ba_session_cnt--;
2330 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2331 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2332 			  "%s: Rx Tid- %d hw qdesc is already setup",
2333 			__func__, tid);
2334 		return QDF_STATUS_E_FAILURE;
2335 	}
2336 
2337 	if (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
2338 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2339 		return QDF_STATUS_E_FAILURE;
2340 	}
2341 	dp_check_ba_buffersize(peer, tid, buffersize);
2342 
2343 	if (dp_rx_tid_setup_wifi3(peer, tid,
2344 	    rx_tid->ba_win_size, startseqnum)) {
2345 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
2346 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2347 		return QDF_STATUS_E_FAILURE;
2348 	}
2349 	rx_tid->ba_status = DP_RX_BA_IN_PROGRESS;
2350 
2351 	rx_tid->dialogtoken = dialogtoken;
2352 	rx_tid->startseqnum = startseqnum;
2353 
2354 	if (rx_tid->userstatuscode != IEEE80211_STATUS_SUCCESS)
2355 		rx_tid->statuscode = rx_tid->userstatuscode;
2356 	else
2357 		rx_tid->statuscode = IEEE80211_STATUS_SUCCESS;
2358 
2359 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
2360 
2361 	return QDF_STATUS_SUCCESS;
2362 }
2363 
2364 /*
2365 * dp_set_addba_response() – Set a user defined ADDBA response status code
2366 *
2367 * @peer: Datapath peer handle
2368 * @tid: TID number
2369 * @statuscode: response status code to be set
2370 */
2371 void dp_set_addba_response(void *peer_handle, uint8_t tid,
2372 	uint16_t statuscode)
2373 {
2374 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
2375 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
2376 
2377 	qdf_spin_lock_bh(&rx_tid->tid_lock);
2378 	rx_tid->userstatuscode = statuscode;
2379 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
2380 }
2381 
2382 /*
2383 * dp_rx_delba_process_wifi3() – Process DELBA from peer
2384 * @peer: Datapath peer handle
2385 * @tid: TID number
2386 * @reasoncode: Reason code received in DELBA frame
2387 *
2388 * Return: 0 on success, error code on failure
2389 */
2390 int dp_delba_process_wifi3(void *peer_handle,
2391 	int tid, uint16_t reasoncode)
2392 {
2393 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
2394 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
2395 
2396 	qdf_spin_lock_bh(&rx_tid->tid_lock);
2397 	if (rx_tid->ba_status == DP_RX_BA_INACTIVE ||
2398 	    rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
2399 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2400 		return QDF_STATUS_E_FAILURE;
2401 	}
2402 	/* TODO: See if we can delete the existing REO queue descriptor and
2403 	 * replace with a new one without queue extenstion descript to save
2404 	 * memory
2405 	 */
2406 	rx_tid->delba_rcode = reasoncode;
2407 	rx_tid->num_of_delba_req++;
2408 	dp_rx_tid_update_wifi3(peer, tid, 1, 0);
2409 
2410 	rx_tid->ba_status = DP_RX_BA_INACTIVE;
2411 	peer->active_ba_session_cnt--;
2412 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
2413 	return 0;
2414 }
2415 
2416 /*
2417  * dp_rx_delba_tx_completion_wifi3() – Send Delba Request
2418  *
2419  * @peer: Datapath peer handle
2420  * @tid: TID number
2421  * @status: tx completion status
2422  * Return: 0 on success, error code on failure
2423  */
2424 
2425 int dp_delba_tx_completion_wifi3(void *peer_handle,
2426 				 uint8_t tid, int status)
2427 {
2428 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
2429 	struct dp_rx_tid *rx_tid = NULL;
2430 
2431 	if (!peer || peer->delete_in_progress) {
2432 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
2433 			  "%s: Peer is NULL!", __func__);
2434 		return QDF_STATUS_E_FAILURE;
2435 	}
2436 	rx_tid = &peer->rx_tid[tid];
2437 	qdf_spin_lock_bh(&rx_tid->tid_lock);
2438 	if (status) {
2439 		rx_tid->delba_tx_fail_cnt++;
2440 		if (rx_tid->delba_tx_retry >= DP_MAX_DELBA_RETRY) {
2441 			rx_tid->delba_tx_retry = 0;
2442 			rx_tid->delba_tx_status = 0;
2443 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
2444 		} else {
2445 			rx_tid->delba_tx_retry++;
2446 			rx_tid->delba_tx_status = 1;
2447 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
2448 			peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba(
2449 				peer->vdev->pdev->ctrl_pdev, peer->ctrl_peer,
2450 				peer->mac_addr.raw, tid, peer->vdev->ctrl_vdev,
2451 				rx_tid->delba_rcode);
2452 		}
2453 		return QDF_STATUS_SUCCESS;
2454 	} else {
2455 		rx_tid->delba_tx_success_cnt++;
2456 		rx_tid->delba_tx_retry = 0;
2457 		rx_tid->delba_tx_status = 0;
2458 	}
2459 	if (rx_tid->ba_status == DP_RX_BA_ACTIVE) {
2460 		dp_rx_tid_update_wifi3(peer, tid, 1, 0);
2461 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
2462 		peer->active_ba_session_cnt--;
2463 	}
2464 	if (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
2465 		dp_rx_tid_update_wifi3(peer, tid, 1, 0);
2466 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
2467 	}
2468 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
2469 
2470 	return QDF_STATUS_SUCCESS;
2471 }
2472 
2473 void dp_rx_discard(struct dp_vdev *vdev, struct dp_peer *peer, unsigned tid,
2474 	qdf_nbuf_t msdu_list)
2475 {
2476 	while (msdu_list) {
2477 		qdf_nbuf_t msdu = msdu_list;
2478 
2479 		msdu_list = qdf_nbuf_next(msdu_list);
2480 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
2481 			"discard rx %pK from partly-deleted peer %pK "
2482 			"(%02x:%02x:%02x:%02x:%02x:%02x)",
2483 			msdu, peer,
2484 			peer->mac_addr.raw[0], peer->mac_addr.raw[1],
2485 			peer->mac_addr.raw[2], peer->mac_addr.raw[3],
2486 			peer->mac_addr.raw[4], peer->mac_addr.raw[5]);
2487 		qdf_nbuf_free(msdu);
2488 	}
2489 }
2490 
2491 
2492 /**
2493  * dp_set_pn_check_wifi3() - enable PN check in REO for security
2494  * @peer: Datapath peer handle
2495  * @vdev: Datapath vdev
2496  * @pdev - data path device instance
2497  * @sec_type - security type
2498  * @rx_pn - Receive pn starting number
2499  *
2500  */
2501 
2502 void
2503 dp_set_pn_check_wifi3(struct cdp_vdev *vdev_handle, struct cdp_peer *peer_handle, enum cdp_sec_type sec_type,  uint32_t *rx_pn)
2504 {
2505 	struct dp_peer *peer =  (struct dp_peer *)peer_handle;
2506 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
2507 	struct dp_pdev *pdev;
2508 	struct dp_soc *soc;
2509 	int i;
2510 	uint8_t pn_size;
2511 	struct hal_reo_cmd_params params;
2512 
2513 	/* preconditions */
2514 	qdf_assert(vdev);
2515 
2516 	pdev = vdev->pdev;
2517 	soc = pdev->soc;
2518 
2519 
2520 	qdf_mem_zero(&params, sizeof(params));
2521 
2522 	params.std.need_status = 1;
2523 	params.u.upd_queue_params.update_pn_valid = 1;
2524 	params.u.upd_queue_params.update_pn_size = 1;
2525 	params.u.upd_queue_params.update_pn = 1;
2526 	params.u.upd_queue_params.update_pn_check_needed = 1;
2527 	params.u.upd_queue_params.update_svld = 1;
2528 	params.u.upd_queue_params.svld = 0;
2529 
2530 	peer->security[dp_sec_ucast].sec_type = sec_type;
2531 
2532 	switch (sec_type) {
2533 	case cdp_sec_type_tkip_nomic:
2534 	case cdp_sec_type_aes_ccmp:
2535 	case cdp_sec_type_aes_ccmp_256:
2536 	case cdp_sec_type_aes_gcmp:
2537 	case cdp_sec_type_aes_gcmp_256:
2538 		params.u.upd_queue_params.pn_check_needed = 1;
2539 		params.u.upd_queue_params.pn_size = 48;
2540 		pn_size = 48;
2541 		break;
2542 	case cdp_sec_type_wapi:
2543 		params.u.upd_queue_params.pn_check_needed = 1;
2544 		params.u.upd_queue_params.pn_size = 128;
2545 		pn_size = 128;
2546 		if (vdev->opmode == wlan_op_mode_ap) {
2547 			params.u.upd_queue_params.pn_even = 1;
2548 			params.u.upd_queue_params.update_pn_even = 1;
2549 		} else {
2550 			params.u.upd_queue_params.pn_uneven = 1;
2551 			params.u.upd_queue_params.update_pn_uneven = 1;
2552 		}
2553 		break;
2554 	default:
2555 		params.u.upd_queue_params.pn_check_needed = 0;
2556 		pn_size = 0;
2557 		break;
2558 	}
2559 
2560 
2561 	for (i = 0; i < DP_MAX_TIDS; i++) {
2562 		struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
2563 		qdf_spin_lock_bh(&rx_tid->tid_lock);
2564 		if (rx_tid->hw_qdesc_vaddr_unaligned != NULL) {
2565 			params.std.addr_lo =
2566 				rx_tid->hw_qdesc_paddr & 0xffffffff;
2567 			params.std.addr_hi =
2568 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2569 
2570 			if (pn_size) {
2571 				QDF_TRACE(QDF_MODULE_ID_TXRX,
2572 					  QDF_TRACE_LEVEL_INFO_HIGH,
2573 					  "%s PN set for TID:%d pn:%x:%x:%x:%x",
2574 					  __func__, i, rx_pn[3], rx_pn[2],
2575 					  rx_pn[1], rx_pn[0]);
2576 				params.u.upd_queue_params.update_pn_valid = 1;
2577 				params.u.upd_queue_params.pn_31_0 = rx_pn[0];
2578 				params.u.upd_queue_params.pn_63_32 = rx_pn[1];
2579 				params.u.upd_queue_params.pn_95_64 = rx_pn[2];
2580 				params.u.upd_queue_params.pn_127_96 = rx_pn[3];
2581 			}
2582 			rx_tid->pn_size = pn_size;
2583 			dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
2584 				dp_rx_tid_update_cb, rx_tid);
2585 		} else {
2586 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
2587 				"PN Check not setup for TID :%d ", i);
2588 		}
2589 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2590 	}
2591 }
2592 
2593 
2594 void
2595 dp_rx_sec_ind_handler(void *soc_handle, uint16_t peer_id,
2596 	enum cdp_sec_type sec_type, int is_unicast, u_int32_t *michael_key,
2597 	u_int32_t *rx_pn)
2598 {
2599 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
2600 	struct dp_peer *peer;
2601 	int sec_index;
2602 
2603 	peer = dp_peer_find_by_id(soc, peer_id);
2604 	if (!peer) {
2605 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2606 			"Couldn't find peer from ID %d - skipping security inits",
2607 			peer_id);
2608 		return;
2609 	}
2610 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
2611 		"sec spec for peer %pK (%02x:%02x:%02x:%02x:%02x:%02x): "
2612 		"%s key of type %d",
2613 		peer,
2614 		peer->mac_addr.raw[0], peer->mac_addr.raw[1],
2615 		peer->mac_addr.raw[2], peer->mac_addr.raw[3],
2616 		peer->mac_addr.raw[4], peer->mac_addr.raw[5],
2617 		is_unicast ? "ucast" : "mcast",
2618 		sec_type);
2619 	sec_index = is_unicast ? dp_sec_ucast : dp_sec_mcast;
2620 	peer->security[sec_index].sec_type = sec_type;
2621 #ifdef notyet /* TODO: See if this is required for defrag support */
2622 	/* michael key only valid for TKIP, but for simplicity,
2623 	 * copy it anyway
2624 	 */
2625 	qdf_mem_copy(
2626 		&peer->security[sec_index].michael_key[0],
2627 		michael_key,
2628 		sizeof(peer->security[sec_index].michael_key));
2629 #ifdef BIG_ENDIAN_HOST
2630 	OL_IF_SWAPBO(peer->security[sec_index].michael_key[0],
2631 				 sizeof(peer->security[sec_index].michael_key));
2632 #endif /* BIG_ENDIAN_HOST */
2633 #endif
2634 
2635 #ifdef notyet /* TODO: Check if this is required for wifi3.0 */
2636 	if (sec_type != cdp_sec_type_wapi) {
2637 		qdf_mem_set(peer->tids_last_pn_valid, _EXT_TIDS, 0x00);
2638 	} else {
2639 		for (i = 0; i < DP_MAX_TIDS; i++) {
2640 			/*
2641 			 * Setting PN valid bit for WAPI sec_type,
2642 			 * since WAPI PN has to be started with predefined value
2643 			 */
2644 			peer->tids_last_pn_valid[i] = 1;
2645 			qdf_mem_copy(
2646 				(u_int8_t *) &peer->tids_last_pn[i],
2647 				(u_int8_t *) rx_pn, sizeof(union htt_rx_pn_t));
2648 			peer->tids_last_pn[i].pn128[1] =
2649 				qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[1]);
2650 			peer->tids_last_pn[i].pn128[0] =
2651 				qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[0]);
2652 		}
2653 	}
2654 #endif
2655 	/* TODO: Update HW TID queue with PN check parameters (pn type for
2656 	 * all security types and last pn for WAPI) once REO command API
2657 	 * is available
2658 	 */
2659 
2660 	dp_peer_unref_del_find_by_id(peer);
2661 }
2662 
2663 #ifndef CONFIG_WIN
2664 /**
2665  * dp_register_peer() - Register peer into physical device
2666  * @pdev - data path device instance
2667  * @sta_desc - peer description
2668  *
2669  * Register peer into physical device
2670  *
2671  * Return: QDF_STATUS_SUCCESS registration success
2672  *         QDF_STATUS_E_FAULT peer not found
2673  */
2674 QDF_STATUS dp_register_peer(struct cdp_pdev *pdev_handle,
2675 		struct ol_txrx_desc_type *sta_desc)
2676 {
2677 	struct dp_peer *peer;
2678 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
2679 
2680 	peer = dp_peer_find_by_local_id((struct cdp_pdev *)pdev,
2681 			sta_desc->sta_id);
2682 	if (!peer)
2683 		return QDF_STATUS_E_FAULT;
2684 
2685 	qdf_spin_lock_bh(&peer->peer_info_lock);
2686 	peer->state = OL_TXRX_PEER_STATE_CONN;
2687 	qdf_spin_unlock_bh(&peer->peer_info_lock);
2688 
2689 	return QDF_STATUS_SUCCESS;
2690 }
2691 
2692 /**
2693  * dp_clear_peer() - remove peer from physical device
2694  * @pdev - data path device instance
2695  * @sta_id - local peer id
2696  *
2697  * remove peer from physical device
2698  *
2699  * Return: QDF_STATUS_SUCCESS registration success
2700  *         QDF_STATUS_E_FAULT peer not found
2701  */
2702 QDF_STATUS dp_clear_peer(struct cdp_pdev *pdev_handle, uint8_t local_id)
2703 {
2704 	struct dp_peer *peer;
2705 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
2706 
2707 	peer = dp_peer_find_by_local_id((struct cdp_pdev *)pdev, local_id);
2708 	if (!peer)
2709 		return QDF_STATUS_E_FAULT;
2710 
2711 	qdf_spin_lock_bh(&peer->peer_info_lock);
2712 	peer->state = OL_TXRX_PEER_STATE_DISC;
2713 	qdf_spin_unlock_bh(&peer->peer_info_lock);
2714 
2715 	return QDF_STATUS_SUCCESS;
2716 }
2717 
2718 /**
2719  * dp_find_peer_by_addr_and_vdev() - Find peer by peer mac address within vdev
2720  * @pdev - data path device instance
2721  * @vdev - virtual interface instance
2722  * @peer_addr - peer mac address
2723  * @peer_id - local peer id with target mac address
2724  *
2725  * Find peer by peer mac address within vdev
2726  *
2727  * Return: peer instance void pointer
2728  *         NULL cannot find target peer
2729  */
2730 void *dp_find_peer_by_addr_and_vdev(struct cdp_pdev *pdev_handle,
2731 		struct cdp_vdev *vdev_handle,
2732 		uint8_t *peer_addr, uint8_t *local_id)
2733 {
2734 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
2735 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
2736 	struct dp_peer *peer;
2737 
2738 	DP_TRACE(INFO, "vdev %pK peer_addr %pK", vdev, peer_addr);
2739 	peer = dp_peer_find_hash_find(pdev->soc, peer_addr, 0, 0);
2740 	DP_TRACE(INFO, "peer %pK vdev %pK", peer, vdev);
2741 
2742 	if (!peer)
2743 		return NULL;
2744 
2745 	if (peer->vdev != vdev) {
2746 		dp_peer_unref_delete(peer);
2747 		return NULL;
2748 	}
2749 
2750 	*local_id = peer->local_id;
2751 	DP_TRACE(INFO, "peer %pK vdev %pK local id %d", peer, vdev, *local_id);
2752 
2753 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
2754 	 * Decrement it here.
2755 	 */
2756 	dp_peer_unref_delete(peer);
2757 
2758 	return peer;
2759 }
2760 
2761 /**
2762  * dp_local_peer_id() - Find local peer id within peer instance
2763  * @peer - peer instance
2764  *
2765  * Find local peer id within peer instance
2766  *
2767  * Return: local peer id
2768  */
2769 uint16_t dp_local_peer_id(void *peer)
2770 {
2771 	return ((struct dp_peer *)peer)->local_id;
2772 }
2773 
2774 /**
2775  * dp_peer_find_by_local_id() - Find peer by local peer id
2776  * @pdev - data path device instance
2777  * @local_peer_id - local peer id want to find
2778  *
2779  * Find peer by local peer id within physical device
2780  *
2781  * Return: peer instance void pointer
2782  *         NULL cannot find target peer
2783  */
2784 void *dp_peer_find_by_local_id(struct cdp_pdev *pdev_handle, uint8_t local_id)
2785 {
2786 	struct dp_peer *peer;
2787 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
2788 
2789 	if (local_id >= OL_TXRX_NUM_LOCAL_PEER_IDS) {
2790 		QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_DP,
2791 				   "Incorrect local id %u", local_id);
2792 		return NULL;
2793 	}
2794 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
2795 	peer = pdev->local_peer_ids.map[local_id];
2796 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
2797 	DP_TRACE(DEBUG, "peer %pK local id %d", peer, local_id);
2798 	return peer;
2799 }
2800 
2801 /**
2802  * dp_peer_state_update() - update peer local state
2803  * @pdev - data path device instance
2804  * @peer_addr - peer mac address
2805  * @state - new peer local state
2806  *
2807  * update peer local state
2808  *
2809  * Return: QDF_STATUS_SUCCESS registration success
2810  */
2811 QDF_STATUS dp_peer_state_update(struct cdp_pdev *pdev_handle, uint8_t *peer_mac,
2812 		enum ol_txrx_peer_state state)
2813 {
2814 	struct dp_peer *peer;
2815 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
2816 
2817 	peer =  dp_peer_find_hash_find(pdev->soc, peer_mac, 0, DP_VDEV_ALL);
2818 	if (NULL == peer) {
2819 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2820 		"Failed to find peer for: [%pM]", peer_mac);
2821 		return QDF_STATUS_E_FAILURE;
2822 	}
2823 	peer->state = state;
2824 
2825 	DP_TRACE(INFO, "peer %pK state %d", peer, peer->state);
2826 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
2827 	 * Decrement it here.
2828 	 */
2829 	dp_peer_unref_delete(peer);
2830 
2831 	return QDF_STATUS_SUCCESS;
2832 }
2833 
2834 /**
2835  * dp_get_vdevid() - Get virtual interface id which peer registered
2836  * @peer - peer instance
2837  * @vdev_id - virtual interface id which peer registered
2838  *
2839  * Get virtual interface id which peer registered
2840  *
2841  * Return: QDF_STATUS_SUCCESS registration success
2842  */
2843 QDF_STATUS dp_get_vdevid(void *peer_handle, uint8_t *vdev_id)
2844 {
2845 	struct dp_peer *peer = peer_handle;
2846 
2847 	DP_TRACE(INFO, "peer %pK vdev %pK vdev id %d",
2848 			peer, peer->vdev, peer->vdev->vdev_id);
2849 	*vdev_id = peer->vdev->vdev_id;
2850 	return QDF_STATUS_SUCCESS;
2851 }
2852 
2853 struct cdp_vdev *dp_get_vdev_by_sta_id(struct cdp_pdev *pdev_handle,
2854 				       uint8_t sta_id)
2855 {
2856 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
2857 	struct dp_peer *peer = NULL;
2858 
2859 	if (sta_id >= WLAN_MAX_STA_COUNT) {
2860 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
2861 			  "Invalid sta id passed");
2862 		return NULL;
2863 	}
2864 
2865 	if (!pdev) {
2866 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
2867 			  "PDEV not found for sta_id [%d]", sta_id);
2868 		return NULL;
2869 	}
2870 
2871 	peer = dp_peer_find_by_local_id((struct cdp_pdev *)pdev, sta_id);
2872 	if (!peer) {
2873 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
2874 			  "PEER [%d] not found", sta_id);
2875 		return NULL;
2876 	}
2877 
2878 	return (struct cdp_vdev *)peer->vdev;
2879 }
2880 
2881 /**
2882  * dp_get_vdev_for_peer() - Get virtual interface instance which peer belongs
2883  * @peer - peer instance
2884  *
2885  * Get virtual interface instance which peer belongs
2886  *
2887  * Return: virtual interface instance pointer
2888  *         NULL in case cannot find
2889  */
2890 struct cdp_vdev *dp_get_vdev_for_peer(void *peer_handle)
2891 {
2892 	struct dp_peer *peer = peer_handle;
2893 
2894 	DP_TRACE(DEBUG, "peer %pK vdev %pK", peer, peer->vdev);
2895 	return (struct cdp_vdev *)peer->vdev;
2896 }
2897 
2898 /**
2899  * dp_peer_get_peer_mac_addr() - Get peer mac address
2900  * @peer - peer instance
2901  *
2902  * Get peer mac address
2903  *
2904  * Return: peer mac address pointer
2905  *         NULL in case cannot find
2906  */
2907 uint8_t *dp_peer_get_peer_mac_addr(void *peer_handle)
2908 {
2909 	struct dp_peer *peer = peer_handle;
2910 	uint8_t *mac;
2911 
2912 	mac = peer->mac_addr.raw;
2913 	DP_TRACE(INFO, "peer %pK mac 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
2914 		peer, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
2915 	return peer->mac_addr.raw;
2916 }
2917 
2918 /**
2919  * dp_get_peer_state() - Get local peer state
2920  * @peer - peer instance
2921  *
2922  * Get local peer state
2923  *
2924  * Return: peer status
2925  */
2926 int dp_get_peer_state(void *peer_handle)
2927 {
2928 	struct dp_peer *peer = peer_handle;
2929 
2930 	DP_TRACE(DEBUG, "peer %pK stats %d", peer, peer->state);
2931 	return peer->state;
2932 }
2933 
2934 /**
2935  * dp_local_peer_id_pool_init() - local peer id pool alloc for physical device
2936  * @pdev - data path device instance
2937  *
2938  * local peer id pool alloc for physical device
2939  *
2940  * Return: none
2941  */
2942 void dp_local_peer_id_pool_init(struct dp_pdev *pdev)
2943 {
2944 	int i;
2945 
2946 	/* point the freelist to the first ID */
2947 	pdev->local_peer_ids.freelist = 0;
2948 
2949 	/* link each ID to the next one */
2950 	for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) {
2951 		pdev->local_peer_ids.pool[i] = i + 1;
2952 		pdev->local_peer_ids.map[i] = NULL;
2953 	}
2954 
2955 	/* link the last ID to itself, to mark the end of the list */
2956 	i = OL_TXRX_NUM_LOCAL_PEER_IDS;
2957 	pdev->local_peer_ids.pool[i] = i;
2958 
2959 	qdf_spinlock_create(&pdev->local_peer_ids.lock);
2960 	DP_TRACE(INFO, "Peer pool init");
2961 }
2962 
2963 /**
2964  * dp_local_peer_id_alloc() - allocate local peer id
2965  * @pdev - data path device instance
2966  * @peer - new peer instance
2967  *
2968  * allocate local peer id
2969  *
2970  * Return: none
2971  */
2972 void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer)
2973 {
2974 	int i;
2975 
2976 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
2977 	i = pdev->local_peer_ids.freelist;
2978 	if (pdev->local_peer_ids.pool[i] == i) {
2979 		/* the list is empty, except for the list-end marker */
2980 		peer->local_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
2981 	} else {
2982 		/* take the head ID and advance the freelist */
2983 		peer->local_id = i;
2984 		pdev->local_peer_ids.freelist = pdev->local_peer_ids.pool[i];
2985 		pdev->local_peer_ids.map[i] = peer;
2986 	}
2987 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
2988 	DP_TRACE(INFO, "peer %pK, local id %d", peer, peer->local_id);
2989 }
2990 
2991 /**
2992  * dp_local_peer_id_free() - remove local peer id
2993  * @pdev - data path device instance
2994  * @peer - peer instance should be removed
2995  *
2996  * remove local peer id
2997  *
2998  * Return: none
2999  */
3000 void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer)
3001 {
3002 	int i = peer->local_id;
3003 	if ((i == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
3004 	    (i >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
3005 		return;
3006 	}
3007 
3008 	/* put this ID on the head of the freelist */
3009 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
3010 	pdev->local_peer_ids.pool[i] = pdev->local_peer_ids.freelist;
3011 	pdev->local_peer_ids.freelist = i;
3012 	pdev->local_peer_ids.map[i] = NULL;
3013 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
3014 }
3015 #endif
3016 
3017 /**
3018  * dp_get_peer_mac_addr_frm_id(): get mac address of the peer
3019  * @soc_handle: DP SOC handle
3020  * @peer_id:peer_id of the peer
3021  *
3022  * return: vdev_id of the vap
3023  */
3024 uint8_t dp_get_peer_mac_addr_frm_id(struct cdp_soc_t *soc_handle,
3025 		uint16_t peer_id, uint8_t *peer_mac)
3026 {
3027 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
3028 	struct dp_peer *peer;
3029 	uint8_t vdev_id;
3030 
3031 	peer = dp_peer_find_by_id(soc, peer_id);
3032 
3033 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
3034 			"soc %pK peer_id %d", soc, peer_id);
3035 
3036 	if (!peer) {
3037 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3038 				"peer not found ");
3039 		return CDP_INVALID_VDEV_ID;
3040 	}
3041 
3042 	qdf_mem_copy(peer_mac, peer->mac_addr.raw, 6);
3043 	vdev_id = peer->vdev->vdev_id;
3044 
3045 	dp_peer_unref_del_find_by_id(peer);
3046 
3047 	return vdev_id;
3048 }
3049 
3050 /**
3051  * dp_peer_rxtid_stats: Retried Rx TID (REO queue) stats from HW
3052  * @peer: DP peer handle
3053  * @dp_stats_cmd_cb: REO command callback function
3054  * @cb_ctxt: Callback context
3055  *
3056  * Return: none
3057  */
3058 void dp_peer_rxtid_stats(struct dp_peer *peer, void (*dp_stats_cmd_cb),
3059 			void *cb_ctxt)
3060 {
3061 	struct dp_soc *soc = peer->vdev->pdev->soc;
3062 	struct hal_reo_cmd_params params;
3063 	int i;
3064 
3065 	if (!dp_stats_cmd_cb)
3066 		return;
3067 
3068 	qdf_mem_zero(&params, sizeof(params));
3069 	for (i = 0; i < DP_MAX_TIDS; i++) {
3070 		struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
3071 		if (rx_tid->hw_qdesc_vaddr_unaligned != NULL) {
3072 			params.std.need_status = 1;
3073 			params.std.addr_lo =
3074 				rx_tid->hw_qdesc_paddr & 0xffffffff;
3075 			params.std.addr_hi =
3076 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
3077 
3078 			if (cb_ctxt) {
3079 				dp_reo_send_cmd(soc, CMD_GET_QUEUE_STATS,
3080 					&params, dp_stats_cmd_cb, cb_ctxt);
3081 			} else {
3082 				dp_reo_send_cmd(soc, CMD_GET_QUEUE_STATS,
3083 					&params, dp_stats_cmd_cb, rx_tid);
3084 			}
3085 
3086 			/* Flush REO descriptor from HW cache to update stats
3087 			 * in descriptor memory. This is to help debugging */
3088 			qdf_mem_zero(&params, sizeof(params));
3089 			params.std.need_status = 0;
3090 			params.std.addr_lo =
3091 				rx_tid->hw_qdesc_paddr & 0xffffffff;
3092 			params.std.addr_hi =
3093 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
3094 			params.u.fl_cache_params.flush_no_inval = 1;
3095 			dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, &params, NULL,
3096 				NULL);
3097 		}
3098 	}
3099 }
3100 
3101 void dp_set_michael_key(struct cdp_peer *peer_handle,
3102 			bool is_unicast, uint32_t *key)
3103 {
3104 	struct dp_peer *peer =  (struct dp_peer *)peer_handle;
3105 	uint8_t sec_index = is_unicast ? 1 : 0;
3106 
3107 	if (!peer) {
3108 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3109 			  "peer not found ");
3110 		return;
3111 	}
3112 
3113 	qdf_mem_copy(&peer->security[sec_index].michael_key[0],
3114 		     key, IEEE80211_WEP_MICLEN);
3115 }
3116 
3117 bool dp_peer_find_by_id_valid(struct dp_soc *soc, uint16_t peer_id)
3118 {
3119 	struct dp_peer *peer = dp_peer_find_by_id(soc, peer_id);
3120 
3121 	if (peer) {
3122 		/*
3123 		 * Decrement the peer ref which is taken as part of
3124 		 * dp_peer_find_by_id if PEER_LOCK_REF_PROTECT is enabled
3125 		 */
3126 		dp_peer_unref_del_find_by_id(peer);
3127 
3128 		return true;
3129 	}
3130 
3131 	return false;
3132 }
3133