xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_peer.c (revision ad85c389289a03e320cd08dea21861f9857892fc)
1 /*
2  * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <qdf_types.h>
20 #include <qdf_lock.h>
21 #include <hal_hw_headers.h>
22 #include "dp_htt.h"
23 #include "dp_types.h"
24 #include "dp_internal.h"
25 #include "dp_peer.h"
26 #include "dp_rx_defrag.h"
27 #include <hal_api.h>
28 #include <hal_reo.h>
29 #ifdef CONFIG_MCL
30 #include <cds_ieee80211_common.h>
31 #include <cds_api.h>
32 #endif
33 #include <cdp_txrx_handle.h>
34 #include <wlan_cfg.h>
35 
36 #ifdef DP_LFR
37 static inline void
38 dp_set_ssn_valid_flag(struct hal_reo_cmd_params *params,
39 					uint8_t valid)
40 {
41 	params->u.upd_queue_params.update_svld = 1;
42 	params->u.upd_queue_params.svld = valid;
43 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
44 		"%s: Setting SSN valid bit to %d",
45 				__func__, valid);
46 }
47 #else
48 static inline void
49 dp_set_ssn_valid_flag(struct hal_reo_cmd_params *params,
50 					uint8_t valid) {};
51 #endif
52 
53 static inline int dp_peer_find_mac_addr_cmp(
54 	union dp_align_mac_addr *mac_addr1,
55 	union dp_align_mac_addr *mac_addr2)
56 {
57 	return !((mac_addr1->align4.bytes_abcd == mac_addr2->align4.bytes_abcd)
58 		/*
59 		 * Intentionally use & rather than &&.
60 		 * because the operands are binary rather than generic boolean,
61 		 * the functionality is equivalent.
62 		 * Using && has the advantage of short-circuited evaluation,
63 		 * but using & has the advantage of no conditional branching,
64 		 * which is a more significant benefit.
65 		 */
66 		&
67 		(mac_addr1->align4.bytes_ef == mac_addr2->align4.bytes_ef));
68 }
69 
70 static int dp_peer_find_map_attach(struct dp_soc *soc)
71 {
72 	uint32_t max_peers, peer_map_size;
73 
74 	max_peers = soc->max_peers;
75 	/* allocate the peer ID -> peer object map */
76 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
77 		"\n<=== cfg max peer id %d ====>", max_peers);
78 	peer_map_size = max_peers * sizeof(soc->peer_id_to_obj_map[0]);
79 	soc->peer_id_to_obj_map = qdf_mem_malloc(peer_map_size);
80 	if (!soc->peer_id_to_obj_map) {
81 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
82 			"%s: peer map memory allocation failed", __func__);
83 		return QDF_STATUS_E_NOMEM;
84 	}
85 
86 	/*
87 	 * The peer_id_to_obj_map doesn't really need to be initialized,
88 	 * since elements are only used after they have been individually
89 	 * initialized.
90 	 * However, it is convenient for debugging to have all elements
91 	 * that are not in use set to 0.
92 	 */
93 	qdf_mem_zero(soc->peer_id_to_obj_map, peer_map_size);
94 	return 0; /* success */
95 }
96 
97 static int dp_log2_ceil(unsigned value)
98 {
99 	unsigned tmp = value;
100 	int log2 = -1;
101 
102 	while (tmp) {
103 		log2++;
104 		tmp >>= 1;
105 	}
106 	if (1 << log2 != value)
107 		log2++;
108 	return log2;
109 }
110 
111 static int dp_peer_find_add_id_to_obj(
112 	struct dp_peer *peer,
113 	uint16_t peer_id)
114 {
115 	int i;
116 
117 	for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
118 		if (peer->peer_ids[i] == HTT_INVALID_PEER) {
119 			peer->peer_ids[i] = peer_id;
120 			return 0; /* success */
121 		}
122 	}
123 	return QDF_STATUS_E_FAILURE; /* failure */
124 }
125 
126 #define DP_PEER_HASH_LOAD_MULT  2
127 #define DP_PEER_HASH_LOAD_SHIFT 0
128 
129 #define DP_AST_HASH_LOAD_MULT  2
130 #define DP_AST_HASH_LOAD_SHIFT 0
131 
132 static int dp_peer_find_hash_attach(struct dp_soc *soc)
133 {
134 	int i, hash_elems, log2;
135 
136 	/* allocate the peer MAC address -> peer object hash table */
137 	hash_elems = soc->max_peers;
138 	hash_elems *= DP_PEER_HASH_LOAD_MULT;
139 	hash_elems >>= DP_PEER_HASH_LOAD_SHIFT;
140 	log2 = dp_log2_ceil(hash_elems);
141 	hash_elems = 1 << log2;
142 
143 	soc->peer_hash.mask = hash_elems - 1;
144 	soc->peer_hash.idx_bits = log2;
145 	/* allocate an array of TAILQ peer object lists */
146 	soc->peer_hash.bins = qdf_mem_malloc(
147 		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q, dp_peer)));
148 	if (!soc->peer_hash.bins)
149 		return QDF_STATUS_E_NOMEM;
150 
151 	for (i = 0; i < hash_elems; i++)
152 		TAILQ_INIT(&soc->peer_hash.bins[i]);
153 
154 	return 0;
155 }
156 
157 static void dp_peer_find_hash_detach(struct dp_soc *soc)
158 {
159 	qdf_mem_free(soc->peer_hash.bins);
160 }
161 
162 static inline unsigned dp_peer_find_hash_index(struct dp_soc *soc,
163 	union dp_align_mac_addr *mac_addr)
164 {
165 	unsigned index;
166 
167 	index =
168 		mac_addr->align2.bytes_ab ^
169 		mac_addr->align2.bytes_cd ^
170 		mac_addr->align2.bytes_ef;
171 	index ^= index >> soc->peer_hash.idx_bits;
172 	index &= soc->peer_hash.mask;
173 	return index;
174 }
175 
176 
177 void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer)
178 {
179 	unsigned index;
180 
181 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
182 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
183 	/*
184 	 * It is important to add the new peer at the tail of the peer list
185 	 * with the bin index.  Together with having the hash_find function
186 	 * search from head to tail, this ensures that if two entries with
187 	 * the same MAC address are stored, the one added first will be
188 	 * found first.
189 	 */
190 	TAILQ_INSERT_TAIL(&soc->peer_hash.bins[index], peer, hash_list_elem);
191 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
192 }
193 
194 #ifdef FEATURE_AST
195 /*
196  * dp_peer_ast_hash_attach() - Allocate and initialize AST Hash Table
197  * @soc: SoC handle
198  *
199  * Return: None
200  */
201 static int dp_peer_ast_hash_attach(struct dp_soc *soc)
202 {
203 	int i, hash_elems, log2;
204 
205 	hash_elems = ((soc->max_peers * DP_AST_HASH_LOAD_MULT) >>
206 		DP_AST_HASH_LOAD_SHIFT);
207 
208 	log2 = dp_log2_ceil(hash_elems);
209 	hash_elems = 1 << log2;
210 
211 	soc->ast_hash.mask = hash_elems - 1;
212 	soc->ast_hash.idx_bits = log2;
213 
214 	/* allocate an array of TAILQ peer object lists */
215 	soc->ast_hash.bins = qdf_mem_malloc(
216 		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q,
217 				dp_ast_entry)));
218 
219 	if (!soc->ast_hash.bins)
220 		return QDF_STATUS_E_NOMEM;
221 
222 	for (i = 0; i < hash_elems; i++)
223 		TAILQ_INIT(&soc->ast_hash.bins[i]);
224 
225 	return 0;
226 }
227 
228 /*
229  * dp_peer_ast_cleanup() - cleanup the references
230  * @soc: SoC handle
231  * @ast: ast entry
232  *
233  * Return: None
234  */
235 static inline void dp_peer_ast_cleanup(struct dp_soc *soc,
236 				       struct dp_ast_entry *ast)
237 {
238 	txrx_ast_free_cb cb = ast->callback;
239 	void *cookie = ast->cookie;
240 
241 	/* Call the callbacks to free up the cookie */
242 	if (cb) {
243 		ast->callback = NULL;
244 		ast->cookie = NULL;
245 		cb(soc->ctrl_psoc,
246 		   soc,
247 		   cookie,
248 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
249 	}
250 }
251 
252 /*
253  * dp_peer_ast_hash_detach() - Free AST Hash table
254  * @soc: SoC handle
255  *
256  * Return: None
257  */
258 static void dp_peer_ast_hash_detach(struct dp_soc *soc)
259 {
260 	unsigned int index;
261 	struct dp_ast_entry *ast, *ast_next;
262 
263 	if (!soc->ast_hash.mask)
264 		return;
265 
266 	qdf_spin_lock_bh(&soc->ast_lock);
267 	for (index = 0; index <= soc->ast_hash.mask; index++) {
268 		if (!TAILQ_EMPTY(&soc->ast_hash.bins[index])) {
269 			TAILQ_FOREACH_SAFE(ast, &soc->ast_hash.bins[index],
270 					   hash_list_elem, ast_next) {
271 				TAILQ_REMOVE(&soc->ast_hash.bins[index], ast,
272 					     hash_list_elem);
273 				dp_peer_ast_cleanup(soc, ast);
274 				qdf_mem_free(ast);
275 			}
276 		}
277 	}
278 	qdf_spin_unlock_bh(&soc->ast_lock);
279 
280 	qdf_mem_free(soc->ast_hash.bins);
281 }
282 
283 /*
284  * dp_peer_ast_hash_index() - Compute the AST hash from MAC address
285  * @soc: SoC handle
286  *
287  * Return: AST hash
288  */
289 static inline uint32_t dp_peer_ast_hash_index(struct dp_soc *soc,
290 	union dp_align_mac_addr *mac_addr)
291 {
292 	uint32_t index;
293 
294 	index =
295 		mac_addr->align2.bytes_ab ^
296 		mac_addr->align2.bytes_cd ^
297 		mac_addr->align2.bytes_ef;
298 	index ^= index >> soc->ast_hash.idx_bits;
299 	index &= soc->ast_hash.mask;
300 	return index;
301 }
302 
303 /*
304  * dp_peer_ast_hash_add() - Add AST entry into hash table
305  * @soc: SoC handle
306  *
307  * This function adds the AST entry into SoC AST hash table
308  * It assumes caller has taken the ast lock to protect the access to this table
309  *
310  * Return: None
311  */
312 static inline void dp_peer_ast_hash_add(struct dp_soc *soc,
313 		struct dp_ast_entry *ase)
314 {
315 	uint32_t index;
316 
317 	index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
318 	TAILQ_INSERT_TAIL(&soc->ast_hash.bins[index], ase, hash_list_elem);
319 }
320 
321 /*
322  * dp_peer_ast_hash_remove() - Look up and remove AST entry from hash table
323  * @soc: SoC handle
324  *
325  * This function removes the AST entry from soc AST hash table
326  * It assumes caller has taken the ast lock to protect the access to this table
327  *
328  * Return: None
329  */
330 static inline void dp_peer_ast_hash_remove(struct dp_soc *soc,
331 		struct dp_ast_entry *ase)
332 {
333 	unsigned index;
334 	struct dp_ast_entry *tmpase;
335 	int found = 0;
336 
337 	index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
338 	/* Check if tail is not empty before delete*/
339 	QDF_ASSERT(!TAILQ_EMPTY(&soc->ast_hash.bins[index]));
340 
341 	TAILQ_FOREACH(tmpase, &soc->ast_hash.bins[index], hash_list_elem) {
342 		if (tmpase == ase) {
343 			found = 1;
344 			break;
345 		}
346 	}
347 
348 	QDF_ASSERT(found);
349 	TAILQ_REMOVE(&soc->ast_hash.bins[index], ase, hash_list_elem);
350 }
351 
352 /*
353  * dp_peer_ast_list_find() - Find AST entry by MAC address from peer ast list
354  * @soc: SoC handle
355  * @peer: peer handle
356  * @ast_mac_addr: mac address
357  *
358  * It assumes caller has taken the ast lock to protect the access to ast list
359  *
360  * Return: AST entry
361  */
362 struct dp_ast_entry *dp_peer_ast_list_find(struct dp_soc *soc,
363 					   struct dp_peer *peer,
364 					   uint8_t *ast_mac_addr)
365 {
366 	struct dp_ast_entry *ast_entry = NULL;
367 	union dp_align_mac_addr *mac_addr =
368 		(union dp_align_mac_addr *)ast_mac_addr;
369 
370 	TAILQ_FOREACH(ast_entry, &peer->ast_entry_list, ase_list_elem) {
371 		if (!dp_peer_find_mac_addr_cmp(mac_addr,
372 					       &ast_entry->mac_addr)) {
373 			return ast_entry;
374 		}
375 	}
376 
377 	return NULL;
378 }
379 
380 /*
381  * dp_peer_ast_hash_find_by_pdevid() - Find AST entry by MAC address
382  * @soc: SoC handle
383  *
384  * It assumes caller has taken the ast lock to protect the access to
385  * AST hash table
386  *
387  * Return: AST entry
388  */
389 struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
390 						     uint8_t *ast_mac_addr,
391 						     uint8_t pdev_id)
392 {
393 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
394 	uint32_t index;
395 	struct dp_ast_entry *ase;
396 
397 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
398 		     ast_mac_addr, DP_MAC_ADDR_LEN);
399 	mac_addr = &local_mac_addr_aligned;
400 
401 	index = dp_peer_ast_hash_index(soc, mac_addr);
402 	TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
403 		if ((pdev_id == ase->pdev_id) &&
404 		    !dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr)) {
405 			return ase;
406 		}
407 	}
408 
409 	return NULL;
410 }
411 
412 /*
413  * dp_peer_ast_hash_find_soc() - Find AST entry by MAC address
414  * @soc: SoC handle
415  *
416  * It assumes caller has taken the ast lock to protect the access to
417  * AST hash table
418  *
419  * Return: AST entry
420  */
421 struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
422 					       uint8_t *ast_mac_addr)
423 {
424 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
425 	unsigned index;
426 	struct dp_ast_entry *ase;
427 
428 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
429 			ast_mac_addr, DP_MAC_ADDR_LEN);
430 	mac_addr = &local_mac_addr_aligned;
431 
432 	index = dp_peer_ast_hash_index(soc, mac_addr);
433 	TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
434 		if (dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr) == 0) {
435 			return ase;
436 		}
437 	}
438 
439 	return NULL;
440 }
441 
442 /*
443  * dp_peer_map_ast() - Map the ast entry with HW AST Index
444  * @soc: SoC handle
445  * @peer: peer to which ast node belongs
446  * @mac_addr: MAC address of ast node
447  * @hw_peer_id: HW AST Index returned by target in peer map event
448  * @vdev_id: vdev id for VAP to which the peer belongs to
449  * @ast_hash: ast hash value in HW
450  *
451  * Return: None
452  */
453 static inline void dp_peer_map_ast(struct dp_soc *soc,
454 	struct dp_peer *peer, uint8_t *mac_addr, uint16_t hw_peer_id,
455 	uint8_t vdev_id, uint16_t ast_hash)
456 {
457 	struct dp_ast_entry *ast_entry = NULL;
458 	enum cdp_txrx_ast_entry_type peer_type = CDP_TXRX_AST_TYPE_STATIC;
459 
460 	if (!peer) {
461 		return;
462 	}
463 
464 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
465 		"%s: peer %pK ID %d vid %d mac %02x:%02x:%02x:%02x:%02x:%02x",
466 		__func__, peer, hw_peer_id, vdev_id, mac_addr[0],
467 		mac_addr[1], mac_addr[2], mac_addr[3],
468 		mac_addr[4], mac_addr[5]);
469 
470 	qdf_spin_lock_bh(&soc->ast_lock);
471 
472 	ast_entry = dp_peer_ast_list_find(soc, peer, mac_addr);
473 
474 	if (ast_entry) {
475 		ast_entry->ast_idx = hw_peer_id;
476 		soc->ast_table[hw_peer_id] = ast_entry;
477 		ast_entry->is_active = TRUE;
478 		peer_type = ast_entry->type;
479 		ast_entry->ast_hash_value = ast_hash;
480 		ast_entry->is_mapped = TRUE;
481 	}
482 
483 	if (ast_entry || (peer->vdev && peer->vdev->proxysta_vdev)) {
484 		if (soc->cdp_soc.ol_ops->peer_map_event) {
485 			soc->cdp_soc.ol_ops->peer_map_event(
486 			soc->ctrl_psoc, peer->peer_ids[0],
487 			hw_peer_id, vdev_id,
488 			mac_addr, peer_type, ast_hash);
489 		}
490 	} else {
491 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
492 			"AST entry not found");
493 	}
494 
495 	qdf_spin_unlock_bh(&soc->ast_lock);
496 	return;
497 }
498 
499 void dp_peer_free_hmwds_cb(void *ctrl_psoc,
500 			   void *dp_soc,
501 			   void *cookie,
502 			   enum cdp_ast_free_status status)
503 {
504 	struct dp_ast_free_cb_params *param =
505 		(struct dp_ast_free_cb_params *)cookie;
506 	struct dp_soc *soc = (struct dp_soc *)dp_soc;
507 	struct dp_peer *peer = NULL;
508 
509 	if (status != CDP_TXRX_AST_DELETED) {
510 		qdf_mem_free(cookie);
511 		return;
512 	}
513 
514 	peer = dp_peer_find_hash_find(soc, &param->peer_mac_addr.raw[0],
515 				      0, param->vdev_id);
516 	if (peer) {
517 		dp_peer_add_ast(soc, peer,
518 				&param->mac_addr.raw[0],
519 				param->type,
520 				param->flags);
521 		dp_peer_unref_delete(peer);
522 	}
523 	qdf_mem_free(cookie);
524 }
525 
526 /*
527  * dp_peer_add_ast() - Allocate and add AST entry into peer list
528  * @soc: SoC handle
529  * @peer: peer to which ast node belongs
530  * @mac_addr: MAC address of ast node
531  * @is_self: Is this base AST entry with peer mac address
532  *
533  * This API is used by WDS source port learning function to
534  * add a new AST entry into peer AST list
535  *
536  * Return: 0 if new entry is allocated,
537  *        -1 if entry add failed
538  */
539 int dp_peer_add_ast(struct dp_soc *soc,
540 			struct dp_peer *peer,
541 			uint8_t *mac_addr,
542 			enum cdp_txrx_ast_entry_type type,
543 			uint32_t flags)
544 {
545 	struct dp_ast_entry *ast_entry = NULL;
546 	struct dp_vdev *vdev = NULL;
547 	struct dp_pdev *pdev = NULL;
548 	uint8_t next_node_mac[6];
549 	int  ret = -1;
550 	txrx_ast_free_cb cb = NULL;
551 	void *cookie = NULL;
552 
553 	if (peer->delete_in_progress)
554 		return ret;
555 
556 	vdev = peer->vdev;
557 	if (!vdev) {
558 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
559 			FL("Peers vdev is NULL"));
560 		QDF_ASSERT(0);
561 		return ret;
562 	}
563 
564 	pdev = vdev->pdev;
565 
566 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
567 		  "%s: pdevid: %u vdev: %u  ast_entry->type: %d flags: 0x%x peer_mac: %pM peer: %pK mac %pM",
568 		  __func__, pdev->pdev_id, vdev->vdev_id, type, flags,
569 		  peer->mac_addr.raw, peer, mac_addr);
570 
571 	qdf_spin_lock_bh(&soc->ast_lock);
572 
573 	/* If AST entry already exists , just return from here
574 	 * ast entry with same mac address can exist on different radios
575 	 * if ast_override support is enabled use search by pdev in this
576 	 * case
577 	 */
578 	if (soc->ast_override_support) {
579 		ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, mac_addr,
580 							    pdev->pdev_id);
581 		if (ast_entry) {
582 			qdf_spin_unlock_bh(&soc->ast_lock);
583 			return 0;
584 		}
585 	} else {
586 		/* For HWMWDS_SEC entries can be added for same mac address
587 		 * do not check for existing entry
588 		 */
589 		if (type == CDP_TXRX_AST_TYPE_WDS_HM_SEC)
590 			goto add_ast_entry;
591 
592 		ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
593 
594 		if (ast_entry) {
595 			if ((type == CDP_TXRX_AST_TYPE_MEC) &&
596 			    (ast_entry->type == CDP_TXRX_AST_TYPE_MEC))
597 				ast_entry->is_active = TRUE;
598 
599 			if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) &&
600 			    !ast_entry->delete_in_progress) {
601 				qdf_spin_unlock_bh(&soc->ast_lock);
602 				return 0;
603 			}
604 
605 			/* Add for HMWDS entry we cannot be ignored if there
606 			 * is AST entry with same mac address
607 			 *
608 			 * if ast entry exists with the requested mac address
609 			 * send a delete command and register callback which
610 			 * can take care of adding HMWDS ast enty on delete
611 			 * confirmation from target
612 			 */
613 			if ((type == CDP_TXRX_AST_TYPE_WDS_HM) &&
614 			    soc->is_peer_map_unmap_v2) {
615 				struct dp_ast_free_cb_params *param = NULL;
616 
617 				if (ast_entry->type ==
618 					CDP_TXRX_AST_TYPE_WDS_HM_SEC)
619 					goto add_ast_entry;
620 
621 				/* save existing callback */
622 				if (ast_entry->callback) {
623 					cb = ast_entry->callback;
624 					cookie = ast_entry->cookie;
625 				}
626 
627 				param = qdf_mem_malloc(sizeof(*param));
628 				if (!param) {
629 					QDF_TRACE(QDF_MODULE_ID_TXRX,
630 						  QDF_TRACE_LEVEL_ERROR,
631 						  "Allocation failed");
632 					qdf_spin_unlock_bh(&soc->ast_lock);
633 					return ret;
634 				}
635 
636 				qdf_mem_copy(&param->mac_addr.raw[0], mac_addr,
637 					     DP_MAC_ADDR_LEN);
638 				qdf_mem_copy(&param->peer_mac_addr.raw[0],
639 					     &peer->mac_addr.raw[0],
640 					     DP_MAC_ADDR_LEN);
641 				param->type = type;
642 				param->flags = flags;
643 				param->vdev_id = vdev->vdev_id;
644 				ast_entry->callback = dp_peer_free_hmwds_cb;
645 				ast_entry->cookie = (void *)param;
646 				if (!ast_entry->delete_in_progress)
647 					dp_peer_del_ast(soc, ast_entry);
648 			}
649 
650 			/* Modify an already existing AST entry from type
651 			 * WDS to MEC on promption. This serves as a fix when
652 			 * backbone of interfaces are interchanged wherein
653 			 * wds entr becomes its own MEC. The entry should be
654 			 * replaced only when the ast_entry peer matches the
655 			 * peer received in mec event. This additional check
656 			 * is needed in wds repeater cases where a multicast
657 			 * packet from station to the root via the repeater
658 			 * should not remove the wds entry.
659 			 */
660 			if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS) &&
661 			    (type == CDP_TXRX_AST_TYPE_MEC) &&
662 			    (ast_entry->peer == peer)) {
663 				ast_entry->is_active = FALSE;
664 				dp_peer_del_ast(soc, ast_entry);
665 			}
666 			qdf_spin_unlock_bh(&soc->ast_lock);
667 
668 			/* Call the saved callback*/
669 			if (cb) {
670 				cb(soc->ctrl_psoc, soc, cookie,
671 				   CDP_TXRX_AST_DELETE_IN_PROGRESS);
672 			}
673 			return 0;
674 		}
675 	}
676 
677 add_ast_entry:
678 	ast_entry = (struct dp_ast_entry *)
679 			qdf_mem_malloc(sizeof(struct dp_ast_entry));
680 
681 	if (!ast_entry) {
682 		qdf_spin_unlock_bh(&soc->ast_lock);
683 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
684 			FL("fail to allocate ast_entry"));
685 		QDF_ASSERT(0);
686 		return ret;
687 	}
688 
689 	qdf_mem_copy(&ast_entry->mac_addr.raw[0], mac_addr, DP_MAC_ADDR_LEN);
690 	ast_entry->pdev_id = vdev->pdev->pdev_id;
691 	ast_entry->vdev_id = vdev->vdev_id;
692 	ast_entry->is_mapped = false;
693 	ast_entry->delete_in_progress = false;
694 
695 	switch (type) {
696 	case CDP_TXRX_AST_TYPE_STATIC:
697 		peer->self_ast_entry = ast_entry;
698 		ast_entry->type = CDP_TXRX_AST_TYPE_STATIC;
699 		if (peer->vdev->opmode == wlan_op_mode_sta)
700 			ast_entry->type = CDP_TXRX_AST_TYPE_STA_BSS;
701 		break;
702 	case CDP_TXRX_AST_TYPE_SELF:
703 		peer->self_ast_entry = ast_entry;
704 		ast_entry->type = CDP_TXRX_AST_TYPE_SELF;
705 		break;
706 	case CDP_TXRX_AST_TYPE_WDS:
707 		ast_entry->next_hop = 1;
708 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
709 		break;
710 	case CDP_TXRX_AST_TYPE_WDS_HM:
711 		ast_entry->next_hop = 1;
712 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM;
713 		break;
714 	case CDP_TXRX_AST_TYPE_WDS_HM_SEC:
715 		ast_entry->next_hop = 1;
716 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM_SEC;
717 		break;
718 	case CDP_TXRX_AST_TYPE_MEC:
719 		ast_entry->next_hop = 1;
720 		ast_entry->type = CDP_TXRX_AST_TYPE_MEC;
721 		break;
722 	case CDP_TXRX_AST_TYPE_DA:
723 		peer = peer->vdev->vap_bss_peer;
724 		ast_entry->next_hop = 1;
725 		ast_entry->type = CDP_TXRX_AST_TYPE_DA;
726 		break;
727 	default:
728 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
729 			FL("Incorrect AST entry type"));
730 	}
731 
732 	ast_entry->is_active = TRUE;
733 	DP_STATS_INC(soc, ast.added, 1);
734 	dp_peer_ast_hash_add(soc, ast_entry);
735 
736 	ast_entry->peer = peer;
737 
738 	if (type == CDP_TXRX_AST_TYPE_MEC)
739 		qdf_mem_copy(next_node_mac, peer->vdev->mac_addr.raw, 6);
740 	else
741 		qdf_mem_copy(next_node_mac, peer->mac_addr.raw, 6);
742 
743 	TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem);
744 	qdf_spin_unlock_bh(&soc->ast_lock);
745 
746 	if ((ast_entry->type != CDP_TXRX_AST_TYPE_STATIC) &&
747 	    (ast_entry->type != CDP_TXRX_AST_TYPE_SELF) &&
748 	    (ast_entry->type != CDP_TXRX_AST_TYPE_STA_BSS) &&
749 	    (ast_entry->type != CDP_TXRX_AST_TYPE_WDS_HM_SEC)) {
750 		if (QDF_STATUS_SUCCESS ==
751 				soc->cdp_soc.ol_ops->peer_add_wds_entry(
752 				peer->vdev->osif_vdev,
753 				(struct cdp_peer *)peer,
754 				mac_addr,
755 				next_node_mac,
756 				flags))
757 			return 0;
758 	}
759 
760 	return ret;
761 }
762 
763 /*
764  * dp_peer_del_ast() - Delete and free AST entry
765  * @soc: SoC handle
766  * @ast_entry: AST entry of the node
767  *
768  * This function removes the AST entry from peer and soc tables
769  * It assumes caller has taken the ast lock to protect the access to these
770  * tables
771  *
772  * Return: None
773  */
774 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
775 {
776 	struct dp_peer *peer = ast_entry->peer;
777 	uint16_t peer_id = peer->peer_ids[0];
778 
779 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
780 		  "%s: ast_entry->type: %d pdevid: %u vdev: %u mac_addr: %pM next_hop: %u peer_mac: %pM\n",
781 		  __func__, ast_entry->type, peer->vdev->pdev->pdev_id,
782 		  peer->vdev->vdev_id, ast_entry->mac_addr.raw,
783 		  ast_entry->next_hop, ast_entry->peer->mac_addr.raw);
784 
785 	dp_peer_ast_send_wds_del(soc, ast_entry);
786 
787 	/*
788 	 * if peer map v2 is enabled we are not freeing ast entry
789 	 * here and it is supposed to be freed in unmap event (after
790 	 * we receive delete confirmation from target)
791 	 *
792 	 * if peer_id is invalid we did not get the peer map event
793 	 * for the peer free ast entry from here only in this case
794 	 */
795 	if (soc->is_peer_map_unmap_v2 && (peer_id != HTT_INVALID_PEER)) {
796 
797 		/*
798 		 * For HM_SEC and SELF type we do not receive unmap event
799 		 * free ast_entry from here it self
800 		 */
801 		if ((ast_entry->type != CDP_TXRX_AST_TYPE_WDS_HM_SEC) &&
802 		    (ast_entry->type != CDP_TXRX_AST_TYPE_SELF))
803 			return;
804 	}
805 
806 	/*
807 	 * release the reference only if it is mapped
808 	 * to ast_table
809 	 */
810 	if (ast_entry->is_mapped)
811 		soc->ast_table[ast_entry->ast_idx] = NULL;
812 	TAILQ_REMOVE(&peer->ast_entry_list, ast_entry, ase_list_elem);
813 
814 	if (ast_entry == peer->self_ast_entry)
815 		peer->self_ast_entry = NULL;
816 
817 	DP_STATS_INC(soc, ast.deleted, 1);
818 	dp_peer_ast_hash_remove(soc, ast_entry);
819 	dp_peer_ast_cleanup(soc, ast_entry);
820 	qdf_mem_free(ast_entry);
821 }
822 
823 /*
824  * dp_peer_update_ast() - Delete and free AST entry
825  * @soc: SoC handle
826  * @peer: peer to which ast node belongs
827  * @ast_entry: AST entry of the node
828  * @flags: wds or hmwds
829  *
830  * This function update the AST entry to the roamed peer and soc tables
831  * It assumes caller has taken the ast lock to protect the access to these
832  * tables
833  *
834  * Return: 0 if ast entry is updated successfully
835  *         -1 failure
836  */
837 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
838 		       struct dp_ast_entry *ast_entry, uint32_t flags)
839 {
840 	int ret = -1;
841 	struct dp_peer *old_peer;
842 
843 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
844 		  "%s: ast_entry->type: %d pdevid: %u vdevid: %u flags: 0x%x mac_addr: %pM peer_mac: %pM\n",
845 		  __func__, ast_entry->type, peer->vdev->pdev->pdev_id,
846 		  peer->vdev->vdev_id, flags, ast_entry->mac_addr.raw,
847 		  peer->mac_addr.raw);
848 
849 	if (ast_entry->delete_in_progress)
850 		return ret;
851 
852 	if ((ast_entry->type == CDP_TXRX_AST_TYPE_STATIC) ||
853 	    (ast_entry->type == CDP_TXRX_AST_TYPE_SELF) ||
854 	    (ast_entry->type == CDP_TXRX_AST_TYPE_STA_BSS) ||
855 	    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
856 		return 0;
857 
858 	old_peer = ast_entry->peer;
859 	TAILQ_REMOVE(&old_peer->ast_entry_list, ast_entry, ase_list_elem);
860 
861 	ast_entry->peer = peer;
862 	ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
863 	ast_entry->pdev_id = peer->vdev->pdev->pdev_id;
864 	ast_entry->vdev_id = peer->vdev->vdev_id;
865 	ast_entry->is_active = TRUE;
866 	TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem);
867 
868 	ret = soc->cdp_soc.ol_ops->peer_update_wds_entry(
869 				peer->vdev->osif_vdev,
870 				ast_entry->mac_addr.raw,
871 				peer->mac_addr.raw,
872 				flags);
873 
874 	return ret;
875 }
876 
877 /*
878  * dp_peer_ast_get_pdev_id() - get pdev_id from the ast entry
879  * @soc: SoC handle
880  * @ast_entry: AST entry of the node
881  *
882  * This function gets the pdev_id from the ast entry.
883  *
884  * Return: (uint8_t) pdev_id
885  */
886 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
887 				struct dp_ast_entry *ast_entry)
888 {
889 	return ast_entry->pdev_id;
890 }
891 
892 /*
893  * dp_peer_ast_get_next_hop() - get next_hop from the ast entry
894  * @soc: SoC handle
895  * @ast_entry: AST entry of the node
896  *
897  * This function gets the next hop from the ast entry.
898  *
899  * Return: (uint8_t) next_hop
900  */
901 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
902 				struct dp_ast_entry *ast_entry)
903 {
904 	return ast_entry->next_hop;
905 }
906 
907 /*
908  * dp_peer_ast_set_type() - set type from the ast entry
909  * @soc: SoC handle
910  * @ast_entry: AST entry of the node
911  *
912  * This function sets the type in the ast entry.
913  *
914  * Return:
915  */
916 void dp_peer_ast_set_type(struct dp_soc *soc,
917 				struct dp_ast_entry *ast_entry,
918 				enum cdp_txrx_ast_entry_type type)
919 {
920 	ast_entry->type = type;
921 }
922 
923 #else
924 int dp_peer_add_ast(struct dp_soc *soc, struct dp_peer *peer,
925 		uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type,
926 		uint32_t flags)
927 {
928 	return 1;
929 }
930 
931 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
932 {
933 }
934 
935 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
936 			struct dp_ast_entry *ast_entry, uint32_t flags)
937 {
938 	return 1;
939 }
940 
941 struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
942 					       uint8_t *ast_mac_addr)
943 {
944 	return NULL;
945 }
946 
947 struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
948 						     uint8_t *ast_mac_addr,
949 						     uint8_t pdev_id)
950 {
951 	return NULL;
952 }
953 
954 static int dp_peer_ast_hash_attach(struct dp_soc *soc)
955 {
956 	return 0;
957 }
958 
959 static inline void dp_peer_map_ast(struct dp_soc *soc,
960 	struct dp_peer *peer, uint8_t *mac_addr, uint16_t hw_peer_id,
961 	uint8_t vdev_id, uint16_t ast_hash)
962 {
963 	return;
964 }
965 
966 static void dp_peer_ast_hash_detach(struct dp_soc *soc)
967 {
968 }
969 
970 void dp_peer_ast_set_type(struct dp_soc *soc,
971 				struct dp_ast_entry *ast_entry,
972 				enum cdp_txrx_ast_entry_type type)
973 {
974 }
975 
976 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
977 				struct dp_ast_entry *ast_entry)
978 {
979 	return 0xff;
980 }
981 
982 
983 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
984 				struct dp_ast_entry *ast_entry)
985 {
986 	return 0xff;
987 }
988 #endif
989 
990 void dp_peer_ast_send_wds_del(struct dp_soc *soc,
991 			      struct dp_ast_entry *ast_entry)
992 {
993 	struct dp_peer *peer = ast_entry->peer;
994 	struct cdp_soc_t *cdp_soc = &soc->cdp_soc;
995 
996 	if (ast_entry->delete_in_progress)
997 		return;
998 
999 	if (ast_entry->next_hop &&
1000 	    ast_entry->type != CDP_TXRX_AST_TYPE_WDS_HM_SEC)
1001 		cdp_soc->ol_ops->peer_del_wds_entry(peer->vdev->osif_vdev,
1002 						    ast_entry->mac_addr.raw);
1003 
1004 	ast_entry->delete_in_progress = true;
1005 }
1006 
1007 static void dp_peer_ast_free_entry(struct dp_soc *soc,
1008 				   struct dp_ast_entry *ast_entry)
1009 {
1010 	struct dp_peer *peer = ast_entry->peer;
1011 	void *cookie = NULL;
1012 	txrx_ast_free_cb cb = NULL;
1013 
1014 	/*
1015 	 * release the reference only if it is mapped
1016 	 * to ast_table
1017 	 */
1018 
1019 	qdf_spin_lock_bh(&soc->ast_lock);
1020 	if (ast_entry->is_mapped)
1021 		soc->ast_table[ast_entry->ast_idx] = NULL;
1022 
1023 	TAILQ_REMOVE(&peer->ast_entry_list, ast_entry, ase_list_elem);
1024 	DP_STATS_INC(soc, ast.deleted, 1);
1025 	dp_peer_ast_hash_remove(soc, ast_entry);
1026 
1027 	cb = ast_entry->callback;
1028 	cookie = ast_entry->cookie;
1029 	ast_entry->callback = NULL;
1030 	ast_entry->cookie = NULL;
1031 
1032 	if (ast_entry == peer->self_ast_entry)
1033 		peer->self_ast_entry = NULL;
1034 
1035 	qdf_spin_unlock_bh(&soc->ast_lock);
1036 
1037 	if (cb) {
1038 		cb(soc->ctrl_psoc,
1039 		   soc,
1040 		   cookie,
1041 		   CDP_TXRX_AST_DELETED);
1042 	}
1043 	qdf_mem_free(ast_entry);
1044 }
1045 
1046 struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc,
1047 	uint8_t *peer_mac_addr, int mac_addr_is_aligned, uint8_t vdev_id)
1048 {
1049 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
1050 	unsigned index;
1051 	struct dp_peer *peer;
1052 
1053 	if (mac_addr_is_aligned) {
1054 		mac_addr = (union dp_align_mac_addr *) peer_mac_addr;
1055 	} else {
1056 		qdf_mem_copy(
1057 			&local_mac_addr_aligned.raw[0],
1058 			peer_mac_addr, DP_MAC_ADDR_LEN);
1059 		mac_addr = &local_mac_addr_aligned;
1060 	}
1061 	index = dp_peer_find_hash_index(soc, mac_addr);
1062 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
1063 	TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
1064 #if ATH_SUPPORT_WRAP
1065 		/* ProxySTA may have multiple BSS peer with same MAC address,
1066 		 * modified find will take care of finding the correct BSS peer.
1067 		 */
1068 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
1069 			((peer->vdev->vdev_id == vdev_id) ||
1070 			 (vdev_id == DP_VDEV_ALL))) {
1071 #else
1072 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0) {
1073 #endif
1074 			/* found it - increment the ref count before releasing
1075 			 * the lock
1076 			 */
1077 			qdf_atomic_inc(&peer->ref_cnt);
1078 			qdf_spin_unlock_bh(&soc->peer_ref_mutex);
1079 			return peer;
1080 		}
1081 	}
1082 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
1083 	return NULL; /* failure */
1084 }
1085 
1086 void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer)
1087 {
1088 	unsigned index;
1089 	struct dp_peer *tmppeer = NULL;
1090 	int found = 0;
1091 
1092 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
1093 	/* Check if tail is not empty before delete*/
1094 	QDF_ASSERT(!TAILQ_EMPTY(&soc->peer_hash.bins[index]));
1095 	/*
1096 	 * DO NOT take the peer_ref_mutex lock here - it needs to be taken
1097 	 * by the caller.
1098 	 * The caller needs to hold the lock from the time the peer object's
1099 	 * reference count is decremented and tested up through the time the
1100 	 * reference to the peer object is removed from the hash table, by
1101 	 * this function.
1102 	 * Holding the lock only while removing the peer object reference
1103 	 * from the hash table keeps the hash table consistent, but does not
1104 	 * protect against a new HL tx context starting to use the peer object
1105 	 * if it looks up the peer object from its MAC address just after the
1106 	 * peer ref count is decremented to zero, but just before the peer
1107 	 * object reference is removed from the hash table.
1108 	 */
1109 	 TAILQ_FOREACH(tmppeer, &soc->peer_hash.bins[index], hash_list_elem) {
1110 		if (tmppeer == peer) {
1111 			found = 1;
1112 			break;
1113 		}
1114 	}
1115 	QDF_ASSERT(found);
1116 	TAILQ_REMOVE(&soc->peer_hash.bins[index], peer, hash_list_elem);
1117 }
1118 
1119 void dp_peer_find_hash_erase(struct dp_soc *soc)
1120 {
1121 	int i;
1122 
1123 	/*
1124 	 * Not really necessary to take peer_ref_mutex lock - by this point,
1125 	 * it's known that the soc is no longer in use.
1126 	 */
1127 	for (i = 0; i <= soc->peer_hash.mask; i++) {
1128 		if (!TAILQ_EMPTY(&soc->peer_hash.bins[i])) {
1129 			struct dp_peer *peer, *peer_next;
1130 
1131 			/*
1132 			 * TAILQ_FOREACH_SAFE must be used here to avoid any
1133 			 * memory access violation after peer is freed
1134 			 */
1135 			TAILQ_FOREACH_SAFE(peer, &soc->peer_hash.bins[i],
1136 				hash_list_elem, peer_next) {
1137 				/*
1138 				 * Don't remove the peer from the hash table -
1139 				 * that would modify the list we are currently
1140 				 * traversing, and it's not necessary anyway.
1141 				 */
1142 				/*
1143 				 * Artificially adjust the peer's ref count to
1144 				 * 1, so it will get deleted by
1145 				 * dp_peer_unref_delete.
1146 				 */
1147 				/* set to zero */
1148 				qdf_atomic_init(&peer->ref_cnt);
1149 				/* incr to one */
1150 				qdf_atomic_inc(&peer->ref_cnt);
1151 				dp_peer_unref_delete(peer);
1152 			}
1153 		}
1154 	}
1155 }
1156 
1157 static void dp_peer_find_map_detach(struct dp_soc *soc)
1158 {
1159 	qdf_mem_free(soc->peer_id_to_obj_map);
1160 }
1161 
1162 int dp_peer_find_attach(struct dp_soc *soc)
1163 {
1164 	if (dp_peer_find_map_attach(soc))
1165 		return 1;
1166 
1167 	if (dp_peer_find_hash_attach(soc)) {
1168 		dp_peer_find_map_detach(soc);
1169 		return 1;
1170 	}
1171 
1172 	if (dp_peer_ast_hash_attach(soc)) {
1173 		dp_peer_find_hash_detach(soc);
1174 		dp_peer_find_map_detach(soc);
1175 		return 1;
1176 	}
1177 	return 0; /* success */
1178 }
1179 
1180 void dp_rx_tid_stats_cb(struct dp_soc *soc, void *cb_ctxt,
1181 	union hal_reo_status *reo_status)
1182 {
1183 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
1184 	struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
1185 
1186 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
1187 		DP_TRACE_STATS(FATAL, "REO stats failure %d for TID %d\n",
1188 			queue_status->header.status, rx_tid->tid);
1189 		return;
1190 	}
1191 
1192 	DP_TRACE_STATS(FATAL, "REO queue stats (TID: %d): \n"
1193 		"ssn: %d\n"
1194 		"curr_idx  : %d\n"
1195 		"pn_31_0   : %08x\n"
1196 		"pn_63_32  : %08x\n"
1197 		"pn_95_64  : %08x\n"
1198 		"pn_127_96 : %08x\n"
1199 		"last_rx_enq_tstamp : %08x\n"
1200 		"last_rx_deq_tstamp : %08x\n"
1201 		"rx_bitmap_31_0     : %08x\n"
1202 		"rx_bitmap_63_32    : %08x\n"
1203 		"rx_bitmap_95_64    : %08x\n"
1204 		"rx_bitmap_127_96   : %08x\n"
1205 		"rx_bitmap_159_128  : %08x\n"
1206 		"rx_bitmap_191_160  : %08x\n"
1207 		"rx_bitmap_223_192  : %08x\n"
1208 		"rx_bitmap_255_224  : %08x\n",
1209 		rx_tid->tid,
1210 		queue_status->ssn, queue_status->curr_idx,
1211 		queue_status->pn_31_0, queue_status->pn_63_32,
1212 		queue_status->pn_95_64, queue_status->pn_127_96,
1213 		queue_status->last_rx_enq_tstamp,
1214 		queue_status->last_rx_deq_tstamp,
1215 		queue_status->rx_bitmap_31_0, queue_status->rx_bitmap_63_32,
1216 		queue_status->rx_bitmap_95_64, queue_status->rx_bitmap_127_96,
1217 		queue_status->rx_bitmap_159_128,
1218 		queue_status->rx_bitmap_191_160,
1219 		queue_status->rx_bitmap_223_192,
1220 		queue_status->rx_bitmap_255_224);
1221 
1222 	DP_TRACE_STATS(FATAL,
1223 		"curr_mpdu_cnt      : %d\n"
1224 		"curr_msdu_cnt      : %d\n"
1225 		"fwd_timeout_cnt    : %d\n"
1226 		"fwd_bar_cnt        : %d\n"
1227 		"dup_cnt            : %d\n"
1228 		"frms_in_order_cnt  : %d\n"
1229 		"bar_rcvd_cnt       : %d\n"
1230 		"mpdu_frms_cnt      : %d\n"
1231 		"msdu_frms_cnt      : %d\n"
1232 		"total_byte_cnt     : %d\n"
1233 		"late_recv_mpdu_cnt : %d\n"
1234 		"win_jump_2k 	    : %d\n"
1235 		"hole_cnt 	    : %d\n",
1236 		queue_status->curr_mpdu_cnt, queue_status->curr_msdu_cnt,
1237 		queue_status->fwd_timeout_cnt, queue_status->fwd_bar_cnt,
1238 		queue_status->dup_cnt, queue_status->frms_in_order_cnt,
1239 		queue_status->bar_rcvd_cnt, queue_status->mpdu_frms_cnt,
1240 		queue_status->msdu_frms_cnt, queue_status->total_cnt,
1241 		queue_status->late_recv_mpdu_cnt, queue_status->win_jump_2k,
1242 		queue_status->hole_cnt);
1243 
1244 	DP_PRINT_STATS("Addba Req          : %d\n"
1245 			"Addba Resp         : %d\n"
1246 			"Addba Resp success : %d\n"
1247 			"Addba Resp failed  : %d\n"
1248 			"Delba Req received : %d\n"
1249 			"Delba Tx success   : %d\n"
1250 			"Delba Tx Fail      : %d\n"
1251 			"BA window size     : %d\n"
1252 			"Pn size            : %d\n",
1253 			rx_tid->num_of_addba_req,
1254 			rx_tid->num_of_addba_resp,
1255 			rx_tid->num_addba_rsp_success,
1256 			rx_tid->num_addba_rsp_failed,
1257 			rx_tid->num_of_delba_req,
1258 			rx_tid->delba_tx_success_cnt,
1259 			rx_tid->delba_tx_fail_cnt,
1260 			rx_tid->ba_win_size,
1261 			rx_tid->pn_size);
1262 }
1263 
1264 static inline struct dp_peer *dp_peer_find_add_id(struct dp_soc *soc,
1265 	uint8_t *peer_mac_addr, uint16_t peer_id, uint16_t hw_peer_id,
1266 	uint8_t vdev_id)
1267 {
1268 	struct dp_peer *peer;
1269 
1270 	QDF_ASSERT(peer_id <= soc->max_peers);
1271 	/* check if there's already a peer object with this MAC address */
1272 	peer = dp_peer_find_hash_find(soc, peer_mac_addr,
1273 		0 /* is aligned */, vdev_id);
1274 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1275 		"%s: peer %pK ID %d vid %d mac %02x:%02x:%02x:%02x:%02x:%02x",
1276 		__func__, peer, peer_id, vdev_id, peer_mac_addr[0],
1277 		peer_mac_addr[1], peer_mac_addr[2], peer_mac_addr[3],
1278 		peer_mac_addr[4], peer_mac_addr[5]);
1279 
1280 	if (peer) {
1281 		/* peer's ref count was already incremented by
1282 		 * peer_find_hash_find
1283 		 */
1284 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
1285 			  "%s: ref_cnt: %d", __func__,
1286 			   qdf_atomic_read(&peer->ref_cnt));
1287 		if (!soc->peer_id_to_obj_map[peer_id])
1288 			soc->peer_id_to_obj_map[peer_id] = peer;
1289 		else {
1290 			/* Peer map event came for peer_id which
1291 			 * is already mapped, this is not expected
1292 			 */
1293 			QDF_ASSERT(0);
1294 		}
1295 
1296 		if (dp_peer_find_add_id_to_obj(peer, peer_id)) {
1297 			/* TBDXXX: assert for now */
1298 			QDF_ASSERT(0);
1299 		}
1300 
1301 		return peer;
1302 	}
1303 
1304 	return NULL;
1305 }
1306 
1307 /**
1308  * dp_rx_peer_map_handler() - handle peer map event from firmware
1309  * @soc_handle - genereic soc handle
1310  * @peeri_id - peer_id from firmware
1311  * @hw_peer_id - ast index for this peer
1312  * @vdev_id - vdev ID
1313  * @peer_mac_addr - mac address of the peer
1314  * @ast_hash - ast hash value
1315  * @is_wds - flag to indicate peer map event for WDS ast entry
1316  *
1317  * associate the peer_id that firmware provided with peer entry
1318  * and update the ast table in the host with the hw_peer_id.
1319  *
1320  * Return: none
1321  */
1322 
1323 void
1324 dp_rx_peer_map_handler(void *soc_handle, uint16_t peer_id,
1325 		       uint16_t hw_peer_id, uint8_t vdev_id,
1326 		       uint8_t *peer_mac_addr, uint16_t ast_hash,
1327 		       uint8_t is_wds)
1328 {
1329 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
1330 	struct dp_peer *peer = NULL;
1331 	enum cdp_txrx_ast_entry_type type = CDP_TXRX_AST_TYPE_STATIC;
1332 
1333 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
1334 		"peer_map_event (soc:%pK): peer_id %di, hw_peer_id %d, peer_mac "
1335 		"%02x:%02x:%02x:%02x:%02x:%02x, vdev_id %d", soc, peer_id,
1336 		hw_peer_id, peer_mac_addr[0], peer_mac_addr[1],
1337 		peer_mac_addr[2], peer_mac_addr[3], peer_mac_addr[4],
1338 		peer_mac_addr[5], vdev_id);
1339 
1340 	if ((hw_peer_id < 0) || (hw_peer_id > (WLAN_UMAC_PSOC_MAX_PEERS * 2))) {
1341 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1342 			"invalid hw_peer_id: %d", hw_peer_id);
1343 		qdf_assert_always(0);
1344 	}
1345 
1346 	/* Peer map event for WDS ast entry get the peer from
1347 	 * obj map
1348 	 */
1349 	if (is_wds) {
1350 		peer = soc->peer_id_to_obj_map[peer_id];
1351 	} else {
1352 		peer = dp_peer_find_add_id(soc, peer_mac_addr, peer_id,
1353 					   hw_peer_id, vdev_id);
1354 
1355 		if (peer) {
1356 			/*
1357 			 * For every peer Map message search and set if bss_peer
1358 			 */
1359 			if (!(qdf_mem_cmp(peer->mac_addr.raw,
1360 					  peer->vdev->mac_addr.raw,
1361 					  DP_MAC_ADDR_LEN))) {
1362 				QDF_TRACE(QDF_MODULE_ID_DP,
1363 					  QDF_TRACE_LEVEL_INFO_HIGH,
1364 					  "vdev bss_peer!!!!");
1365 				peer->bss_peer = 1;
1366 				peer->vdev->vap_bss_peer = peer;
1367 			}
1368 
1369 			if (peer->vdev->opmode == wlan_op_mode_sta)
1370 				peer->vdev->bss_ast_hash = ast_hash;
1371 
1372 			/* Add ast entry incase self ast entry is
1373 			 * deleted due to DP CP sync issue
1374 			 *
1375 			 * self_ast_entry is modified in peer create
1376 			 * and peer unmap path which cannot run in
1377 			 * parllel with peer map, no lock need before
1378 			 * referring it
1379 			 */
1380 			if (!peer->self_ast_entry) {
1381 				QDF_TRACE(QDF_MODULE_ID_DP,
1382 					  QDF_TRACE_LEVEL_INFO_HIGH,
1383 					  "Add self ast from map %pM",
1384 					  peer_mac_addr);
1385 				dp_peer_add_ast(soc, peer,
1386 						peer_mac_addr,
1387 						type, 0);
1388 			}
1389 
1390 		}
1391 	}
1392 
1393 	dp_peer_map_ast(soc, peer, peer_mac_addr,
1394 			hw_peer_id, vdev_id, ast_hash);
1395 }
1396 
1397 /**
1398  * dp_rx_peer_unmap_handler() - handle peer unmap event from firmware
1399  * @soc_handle - genereic soc handle
1400  * @peeri_id - peer_id from firmware
1401  * @vdev_id - vdev ID
1402  * @mac_addr - mac address of the peer or wds entry
1403  * @is_wds - flag to indicate peer map event for WDS ast entry
1404  *
1405  * Return: none
1406  */
1407 void
1408 dp_rx_peer_unmap_handler(void *soc_handle, uint16_t peer_id,
1409 			 uint8_t vdev_id, uint8_t *mac_addr,
1410 			 uint8_t is_wds)
1411 {
1412 	struct dp_peer *peer;
1413 	struct dp_ast_entry *ast_entry;
1414 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
1415 	uint8_t i;
1416 
1417 	peer = __dp_peer_find_by_id(soc, peer_id);
1418 
1419 	/*
1420 	 * Currently peer IDs are assigned for vdevs as well as peers.
1421 	 * If the peer ID is for a vdev, then the peer pointer stored
1422 	 * in peer_id_to_obj_map will be NULL.
1423 	 */
1424 	if (!peer) {
1425 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1426 			"%s: Received unmap event for invalid peer_id"
1427 			" %u", __func__, peer_id);
1428 		return;
1429 	}
1430 
1431 	/* If V2 Peer map messages are enabled AST entry has to be freed here
1432 	 */
1433 	if (soc->is_peer_map_unmap_v2) {
1434 
1435 		qdf_spin_lock_bh(&soc->ast_lock);
1436 		ast_entry = dp_peer_ast_list_find(soc, peer,
1437 						  mac_addr);
1438 
1439 		if (!ast_entry) {
1440 			/* in case of qwrap we have multiple BSS peers
1441 			 * with same mac address
1442 			 *
1443 			 * AST entry for this mac address will be created
1444 			 * only for one peer
1445 			 */
1446 			if (peer->vdev->proxysta_vdev) {
1447 				qdf_spin_unlock_bh(&soc->ast_lock);
1448 				goto peer_unmap;
1449 			}
1450 
1451 			/* Ideally we should not enter this case where
1452 			 * ast_entry is not present in host table and
1453 			 * we received a unmap event
1454 			 */
1455 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
1456 				  "%s:%d AST entry not found with peer %pK peer_id %u peer_mac %pM mac_addr %pM vdev_id %u next_hop %u\n",
1457 				  __func__, __LINE__, peer, peer->peer_ids[0],
1458 				  peer->mac_addr.raw, mac_addr, vdev_id,
1459 				  is_wds);
1460 
1461 			if (!is_wds) {
1462 				qdf_spin_unlock_bh(&soc->ast_lock);
1463 				goto peer_unmap;
1464 			}
1465 		}
1466 		qdf_spin_unlock_bh(&soc->ast_lock);
1467 
1468 		/* Reuse the AST entry if delete_in_progress
1469 		 * not set
1470 		 */
1471 		if (ast_entry->delete_in_progress)
1472 			dp_peer_ast_free_entry(soc, ast_entry);
1473 
1474 		if (is_wds)
1475 			return;
1476 	}
1477 
1478 peer_unmap:
1479 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
1480 		"peer_unmap_event (soc:%pK) peer_id %d peer %pK",
1481 		soc, peer_id, peer);
1482 
1483 	soc->peer_id_to_obj_map[peer_id] = NULL;
1484 	for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
1485 		if (peer->peer_ids[i] == peer_id) {
1486 			peer->peer_ids[i] = HTT_INVALID_PEER;
1487 			break;
1488 		}
1489 	}
1490 
1491 	if (soc->cdp_soc.ol_ops->peer_unmap_event) {
1492 		soc->cdp_soc.ol_ops->peer_unmap_event(soc->ctrl_psoc,
1493 				peer_id);
1494 	}
1495 
1496 	/*
1497 	 * Remove a reference to the peer.
1498 	 * If there are no more references, delete the peer object.
1499 	 */
1500 	dp_peer_unref_delete(peer);
1501 }
1502 
1503 void
1504 dp_peer_find_detach(struct dp_soc *soc)
1505 {
1506 	dp_peer_find_map_detach(soc);
1507 	dp_peer_find_hash_detach(soc);
1508 	dp_peer_ast_hash_detach(soc);
1509 }
1510 
1511 static void dp_rx_tid_update_cb(struct dp_soc *soc, void *cb_ctxt,
1512 	union hal_reo_status *reo_status)
1513 {
1514 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
1515 
1516 	if ((reo_status->rx_queue_status.header.status !=
1517 		HAL_REO_CMD_SUCCESS) &&
1518 		(reo_status->rx_queue_status.header.status !=
1519 		HAL_REO_CMD_DRAIN)) {
1520 		/* Should not happen normally. Just print error for now */
1521 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1522 			"%s: Rx tid HW desc update failed(%d): tid %d",
1523 			__func__,
1524 			reo_status->rx_queue_status.header.status,
1525 			rx_tid->tid);
1526 	}
1527 }
1528 
1529 /*
1530  * dp_find_peer_by_addr - find peer instance by mac address
1531  * @dev: physical device instance
1532  * @peer_mac_addr: peer mac address
1533  * @local_id: local id for the peer
1534  *
1535  * Return: peer instance pointer
1536  */
1537 void *dp_find_peer_by_addr(struct cdp_pdev *dev, uint8_t *peer_mac_addr,
1538 		uint8_t *local_id)
1539 {
1540 	struct dp_pdev *pdev = (struct dp_pdev *)dev;
1541 	struct dp_peer *peer;
1542 
1543 	peer = dp_peer_find_hash_find(pdev->soc, peer_mac_addr, 0, DP_VDEV_ALL);
1544 
1545 	if (!peer)
1546 		return NULL;
1547 
1548 	/* Multiple peer ids? How can know peer id? */
1549 	*local_id = peer->local_id;
1550 	DP_TRACE(INFO, "%s: peer %pK id %d", __func__, peer, *local_id);
1551 
1552 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
1553 	 * Decrement it here.
1554 	 */
1555 	dp_peer_unref_delete(peer);
1556 
1557 	return peer;
1558 }
1559 
1560 /*
1561  * dp_rx_tid_update_wifi3() – Update receive TID state
1562  * @peer: Datapath peer handle
1563  * @tid: TID
1564  * @ba_window_size: BlockAck window size
1565  * @start_seq: Starting sequence number
1566  *
1567  * Return: 0 on success, error code on failure
1568  */
1569 static int dp_rx_tid_update_wifi3(struct dp_peer *peer, int tid, uint32_t
1570 				  ba_window_size, uint32_t start_seq)
1571 {
1572 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1573 	struct dp_soc *soc = peer->vdev->pdev->soc;
1574 	struct hal_reo_cmd_params params;
1575 
1576 	qdf_mem_zero(&params, sizeof(params));
1577 
1578 	params.std.need_status = 1;
1579 	params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
1580 	params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1581 	params.u.upd_queue_params.update_ba_window_size = 1;
1582 	params.u.upd_queue_params.ba_window_size = ba_window_size;
1583 
1584 	if (start_seq < IEEE80211_SEQ_MAX) {
1585 		params.u.upd_queue_params.update_ssn = 1;
1586 		params.u.upd_queue_params.ssn = start_seq;
1587 	}
1588 
1589 	dp_set_ssn_valid_flag(&params, 0);
1590 
1591 	dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params, dp_rx_tid_update_cb, rx_tid);
1592 
1593 	rx_tid->ba_win_size = ba_window_size;
1594 	if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
1595 		soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup(
1596 			peer->vdev->pdev->ctrl_pdev,
1597 			peer->vdev->vdev_id, peer->mac_addr.raw,
1598 			rx_tid->hw_qdesc_paddr, tid, tid, 1, ba_window_size);
1599 
1600 	}
1601 	return 0;
1602 }
1603 
1604 /*
1605  * dp_reo_desc_free() - Callback free reo descriptor memory after
1606  * HW cache flush
1607  *
1608  * @soc: DP SOC handle
1609  * @cb_ctxt: Callback context
1610  * @reo_status: REO command status
1611  */
1612 static void dp_reo_desc_free(struct dp_soc *soc, void *cb_ctxt,
1613 	union hal_reo_status *reo_status)
1614 {
1615 	struct reo_desc_list_node *freedesc =
1616 		(struct reo_desc_list_node *)cb_ctxt;
1617 	struct dp_rx_tid *rx_tid = &freedesc->rx_tid;
1618 
1619 	if ((reo_status->fl_cache_status.header.status !=
1620 		HAL_REO_CMD_SUCCESS) &&
1621 		(reo_status->fl_cache_status.header.status !=
1622 		HAL_REO_CMD_DRAIN)) {
1623 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1624 			"%s: Rx tid HW desc flush failed(%d): tid %d",
1625 			__func__,
1626 			reo_status->rx_queue_status.header.status,
1627 			freedesc->rx_tid.tid);
1628 	}
1629 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1630 		  "%s: hw_qdesc_paddr: %pK, tid:%d", __func__,
1631 		  (void *)(rx_tid->hw_qdesc_paddr), rx_tid->tid);
1632 	qdf_mem_unmap_nbytes_single(soc->osdev,
1633 		rx_tid->hw_qdesc_paddr,
1634 		QDF_DMA_BIDIRECTIONAL,
1635 		rx_tid->hw_qdesc_alloc_size);
1636 	qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
1637 	qdf_mem_free(freedesc);
1638 }
1639 
1640 #if defined(QCA_WIFI_QCA8074) && defined(BUILD_X86)
1641 /* Hawkeye emulation requires bus address to be >= 0x50000000 */
1642 static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
1643 {
1644 	if (dma_addr < 0x50000000)
1645 		return QDF_STATUS_E_FAILURE;
1646 	else
1647 		return QDF_STATUS_SUCCESS;
1648 }
1649 #else
1650 static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
1651 {
1652 	return QDF_STATUS_SUCCESS;
1653 }
1654 #endif
1655 
1656 
1657 /*
1658  * dp_rx_tid_setup_wifi3() – Setup receive TID state
1659  * @peer: Datapath peer handle
1660  * @tid: TID
1661  * @ba_window_size: BlockAck window size
1662  * @start_seq: Starting sequence number
1663  *
1664  * Return: 0 on success, error code on failure
1665  */
1666 int dp_rx_tid_setup_wifi3(struct dp_peer *peer, int tid,
1667 	uint32_t ba_window_size, uint32_t start_seq)
1668 {
1669 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1670 	struct dp_vdev *vdev = peer->vdev;
1671 	struct dp_soc *soc = vdev->pdev->soc;
1672 	uint32_t hw_qdesc_size;
1673 	uint32_t hw_qdesc_align;
1674 	int hal_pn_type;
1675 	void *hw_qdesc_vaddr;
1676 	uint32_t alloc_tries = 0;
1677 
1678 	if (peer->delete_in_progress ||
1679 	    !qdf_atomic_read(&peer->is_default_route_set))
1680 		return QDF_STATUS_E_FAILURE;
1681 
1682 	rx_tid->ba_win_size = ba_window_size;
1683 	if (rx_tid->hw_qdesc_vaddr_unaligned != NULL)
1684 		return dp_rx_tid_update_wifi3(peer, tid, ba_window_size,
1685 			start_seq);
1686 	rx_tid->delba_tx_status = 0;
1687 	rx_tid->ppdu_id_2k = 0;
1688 	rx_tid->num_of_addba_req = 0;
1689 	rx_tid->num_of_delba_req = 0;
1690 	rx_tid->num_of_addba_resp = 0;
1691 	rx_tid->num_addba_rsp_failed = 0;
1692 	rx_tid->num_addba_rsp_success = 0;
1693 	rx_tid->delba_tx_success_cnt = 0;
1694 	rx_tid->delba_tx_fail_cnt = 0;
1695 	rx_tid->statuscode = 0;
1696 
1697 	/* TODO: Allocating HW queue descriptors based on max BA window size
1698 	 * for all QOS TIDs so that same descriptor can be used later when
1699 	 * ADDBA request is recevied. This should be changed to allocate HW
1700 	 * queue descriptors based on BA window size being negotiated (0 for
1701 	 * non BA cases), and reallocate when BA window size changes and also
1702 	 * send WMI message to FW to change the REO queue descriptor in Rx
1703 	 * peer entry as part of dp_rx_tid_update.
1704 	 */
1705 	if (tid != DP_NON_QOS_TID)
1706 		hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc,
1707 			HAL_RX_MAX_BA_WINDOW, tid);
1708 	else
1709 		hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc,
1710 			ba_window_size, tid);
1711 
1712 	hw_qdesc_align = hal_get_reo_qdesc_align(soc->hal_soc);
1713 	/* To avoid unnecessary extra allocation for alignment, try allocating
1714 	 * exact size and see if we already have aligned address.
1715 	 */
1716 	rx_tid->hw_qdesc_alloc_size = hw_qdesc_size;
1717 
1718 try_desc_alloc:
1719 	rx_tid->hw_qdesc_vaddr_unaligned =
1720 		qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size);
1721 
1722 	if (!rx_tid->hw_qdesc_vaddr_unaligned) {
1723 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1724 			"%s: Rx tid HW desc alloc failed: tid %d",
1725 			__func__, tid);
1726 		return QDF_STATUS_E_NOMEM;
1727 	}
1728 
1729 	if ((unsigned long)(rx_tid->hw_qdesc_vaddr_unaligned) %
1730 		hw_qdesc_align) {
1731 		/* Address allocated above is not alinged. Allocate extra
1732 		 * memory for alignment
1733 		 */
1734 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
1735 		rx_tid->hw_qdesc_vaddr_unaligned =
1736 			qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size +
1737 					hw_qdesc_align - 1);
1738 
1739 		if (!rx_tid->hw_qdesc_vaddr_unaligned) {
1740 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1741 				"%s: Rx tid HW desc alloc failed: tid %d",
1742 				__func__, tid);
1743 			return QDF_STATUS_E_NOMEM;
1744 		}
1745 
1746 		hw_qdesc_vaddr = (void *)qdf_align((unsigned long)
1747 			rx_tid->hw_qdesc_vaddr_unaligned,
1748 			hw_qdesc_align);
1749 
1750 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1751 			"%s: Total Size %d Aligned Addr %pK",
1752 			__func__, rx_tid->hw_qdesc_alloc_size,
1753 			hw_qdesc_vaddr);
1754 
1755 	} else {
1756 		hw_qdesc_vaddr = rx_tid->hw_qdesc_vaddr_unaligned;
1757 	}
1758 
1759 	/* TODO: Ensure that sec_type is set before ADDBA is received.
1760 	 * Currently this is set based on htt indication
1761 	 * HTT_T2H_MSG_TYPE_SEC_IND from target
1762 	 */
1763 	switch (peer->security[dp_sec_ucast].sec_type) {
1764 	case cdp_sec_type_tkip_nomic:
1765 	case cdp_sec_type_aes_ccmp:
1766 	case cdp_sec_type_aes_ccmp_256:
1767 	case cdp_sec_type_aes_gcmp:
1768 	case cdp_sec_type_aes_gcmp_256:
1769 		hal_pn_type = HAL_PN_WPA;
1770 		break;
1771 	case cdp_sec_type_wapi:
1772 		if (vdev->opmode == wlan_op_mode_ap)
1773 			hal_pn_type = HAL_PN_WAPI_EVEN;
1774 		else
1775 			hal_pn_type = HAL_PN_WAPI_UNEVEN;
1776 		break;
1777 	default:
1778 		hal_pn_type = HAL_PN_NONE;
1779 		break;
1780 	}
1781 
1782 	hal_reo_qdesc_setup(soc->hal_soc, tid, ba_window_size, start_seq,
1783 		hw_qdesc_vaddr, rx_tid->hw_qdesc_paddr, hal_pn_type);
1784 
1785 	qdf_mem_map_nbytes_single(soc->osdev, hw_qdesc_vaddr,
1786 		QDF_DMA_BIDIRECTIONAL, rx_tid->hw_qdesc_alloc_size,
1787 		&(rx_tid->hw_qdesc_paddr));
1788 
1789 	if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) !=
1790 			QDF_STATUS_SUCCESS) {
1791 		if (alloc_tries++ < 10)
1792 			goto try_desc_alloc;
1793 		else {
1794 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1795 			"%s: Rx tid HW desc alloc failed (lowmem): tid %d",
1796 			__func__, tid);
1797 			return QDF_STATUS_E_NOMEM;
1798 		}
1799 	}
1800 
1801 	if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
1802 		soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup(
1803 			vdev->pdev->ctrl_pdev,
1804 			peer->vdev->vdev_id, peer->mac_addr.raw,
1805 			rx_tid->hw_qdesc_paddr, tid, tid, 1, ba_window_size);
1806 
1807 	}
1808 	return 0;
1809 }
1810 
1811 /*
1812  * dp_rx_tid_delete_cb() - Callback to flush reo descriptor HW cache
1813  * after deleting the entries (ie., setting valid=0)
1814  *
1815  * @soc: DP SOC handle
1816  * @cb_ctxt: Callback context
1817  * @reo_status: REO command status
1818  */
1819 static void dp_rx_tid_delete_cb(struct dp_soc *soc, void *cb_ctxt,
1820 	union hal_reo_status *reo_status)
1821 {
1822 	struct reo_desc_list_node *freedesc =
1823 		(struct reo_desc_list_node *)cb_ctxt;
1824 	uint32_t list_size;
1825 	struct reo_desc_list_node *desc;
1826 	unsigned long curr_ts = qdf_get_system_timestamp();
1827 	uint32_t desc_size, tot_desc_size;
1828 	struct hal_reo_cmd_params params;
1829 
1830 	if (reo_status->rx_queue_status.header.status == HAL_REO_CMD_DRAIN) {
1831 		qdf_mem_zero(reo_status, sizeof(*reo_status));
1832 		reo_status->fl_cache_status.header.status = HAL_REO_CMD_DRAIN;
1833 		dp_reo_desc_free(soc, (void *)freedesc, reo_status);
1834 		return;
1835 	} else if (reo_status->rx_queue_status.header.status !=
1836 		HAL_REO_CMD_SUCCESS) {
1837 		/* Should not happen normally. Just print error for now */
1838 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1839 			"%s: Rx tid HW desc deletion failed(%d): tid %d",
1840 			__func__,
1841 			reo_status->rx_queue_status.header.status,
1842 			freedesc->rx_tid.tid);
1843 	}
1844 
1845 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
1846 		"%s: rx_tid: %d status: %d", __func__,
1847 		freedesc->rx_tid.tid,
1848 		reo_status->rx_queue_status.header.status);
1849 
1850 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
1851 	freedesc->free_ts = curr_ts;
1852 	qdf_list_insert_back_size(&soc->reo_desc_freelist,
1853 		(qdf_list_node_t *)freedesc, &list_size);
1854 
1855 	while ((qdf_list_peek_front(&soc->reo_desc_freelist,
1856 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) &&
1857 		((list_size >= REO_DESC_FREELIST_SIZE) ||
1858 		((curr_ts - desc->free_ts) > REO_DESC_FREE_DEFER_MS))) {
1859 		struct dp_rx_tid *rx_tid;
1860 
1861 		qdf_list_remove_front(&soc->reo_desc_freelist,
1862 				(qdf_list_node_t **)&desc);
1863 		list_size--;
1864 		rx_tid = &desc->rx_tid;
1865 
1866 		/* Flush and invalidate REO descriptor from HW cache: Base and
1867 		 * extension descriptors should be flushed separately */
1868 		tot_desc_size = rx_tid->hw_qdesc_alloc_size;
1869 		/* Get base descriptor size by passing non-qos TID */
1870 		desc_size = hal_get_reo_qdesc_size(soc->hal_soc, 0,
1871 						   DP_NON_QOS_TID);
1872 
1873 		/* Flush reo extension descriptors */
1874 		while ((tot_desc_size -= desc_size) > 0) {
1875 			qdf_mem_zero(&params, sizeof(params));
1876 			params.std.addr_lo =
1877 				((uint64_t)(rx_tid->hw_qdesc_paddr) +
1878 				tot_desc_size) & 0xffffffff;
1879 			params.std.addr_hi =
1880 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1881 
1882 			if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc,
1883 							CMD_FLUSH_CACHE,
1884 							&params,
1885 							NULL,
1886 							NULL)) {
1887 				QDF_TRACE(QDF_MODULE_ID_DP,
1888 					QDF_TRACE_LEVEL_ERROR,
1889 					"%s: fail to send CMD_CACHE_FLUSH:"
1890 					"tid %d desc %pK", __func__,
1891 					rx_tid->tid,
1892 					(void *)(rx_tid->hw_qdesc_paddr));
1893 			}
1894 		}
1895 
1896 		/* Flush base descriptor */
1897 		qdf_mem_zero(&params, sizeof(params));
1898 		params.std.need_status = 1;
1899 		params.std.addr_lo =
1900 			(uint64_t)(rx_tid->hw_qdesc_paddr) & 0xffffffff;
1901 		params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1902 
1903 		if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc,
1904 							  CMD_FLUSH_CACHE,
1905 							  &params,
1906 							  dp_reo_desc_free,
1907 							  (void *)desc)) {
1908 			union hal_reo_status reo_status;
1909 			/*
1910 			 * If dp_reo_send_cmd return failure, related TID queue desc
1911 			 * should be unmapped. Also locally reo_desc, together with
1912 			 * TID queue desc also need to be freed accordingly.
1913 			 *
1914 			 * Here invoke desc_free function directly to do clean up.
1915 			 */
1916 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1917 				"%s: fail to send REO cmd to flush cache: tid %d",
1918 				__func__, rx_tid->tid);
1919 			qdf_mem_zero(&reo_status, sizeof(reo_status));
1920 			reo_status.fl_cache_status.header.status = 0;
1921 			dp_reo_desc_free(soc, (void *)desc, &reo_status);
1922 		}
1923 	}
1924 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
1925 }
1926 
1927 /*
1928  * dp_rx_tid_delete_wifi3() – Delete receive TID queue
1929  * @peer: Datapath peer handle
1930  * @tid: TID
1931  *
1932  * Return: 0 on success, error code on failure
1933  */
1934 static int dp_rx_tid_delete_wifi3(struct dp_peer *peer, int tid)
1935 {
1936 	struct dp_rx_tid *rx_tid = &(peer->rx_tid[tid]);
1937 	struct dp_soc *soc = peer->vdev->pdev->soc;
1938 	struct hal_reo_cmd_params params;
1939 	struct reo_desc_list_node *freedesc =
1940 		qdf_mem_malloc(sizeof(*freedesc));
1941 
1942 	if (!freedesc) {
1943 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1944 			"%s: malloc failed for freedesc: tid %d",
1945 			__func__, tid);
1946 		return -ENOMEM;
1947 	}
1948 
1949 	freedesc->rx_tid = *rx_tid;
1950 
1951 	qdf_mem_zero(&params, sizeof(params));
1952 
1953 	params.std.need_status = 1;
1954 	params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
1955 	params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1956 	params.u.upd_queue_params.update_vld = 1;
1957 	params.u.upd_queue_params.vld = 0;
1958 
1959 	dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
1960 		dp_rx_tid_delete_cb, (void *)freedesc);
1961 
1962 	rx_tid->hw_qdesc_vaddr_unaligned = NULL;
1963 	rx_tid->hw_qdesc_alloc_size = 0;
1964 	rx_tid->hw_qdesc_paddr = 0;
1965 
1966 	return 0;
1967 }
1968 
1969 #ifdef DP_LFR
1970 static void dp_peer_setup_remaining_tids(struct dp_peer *peer)
1971 {
1972 	int tid;
1973 
1974 	for (tid = 1; tid < DP_MAX_TIDS-1; tid++) {
1975 		dp_rx_tid_setup_wifi3(peer, tid, 1, 0);
1976 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1977 			"Setting up TID %d for peer %pK peer->local_id %d",
1978 			tid, peer, peer->local_id);
1979 	}
1980 }
1981 #else
1982 static void dp_peer_setup_remaining_tids(struct dp_peer *peer) {};
1983 #endif
1984 /*
1985  * dp_peer_rx_init() – Initialize receive TID state
1986  * @pdev: Datapath pdev
1987  * @peer: Datapath peer
1988  *
1989  */
1990 void dp_peer_rx_init(struct dp_pdev *pdev, struct dp_peer *peer)
1991 {
1992 	int tid;
1993 	struct dp_rx_tid *rx_tid;
1994 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
1995 		rx_tid = &peer->rx_tid[tid];
1996 		rx_tid->array = &rx_tid->base;
1997 		rx_tid->base.head = rx_tid->base.tail = NULL;
1998 		rx_tid->tid = tid;
1999 		rx_tid->defrag_timeout_ms = 0;
2000 		rx_tid->ba_win_size = 0;
2001 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
2002 
2003 		rx_tid->defrag_waitlist_elem.tqe_next = NULL;
2004 		rx_tid->defrag_waitlist_elem.tqe_prev = NULL;
2005 
2006 #ifdef notyet /* TODO: See if this is required for exception handling */
2007 		/* invalid sequence number */
2008 		peer->tids_last_seq[tid] = 0xffff;
2009 #endif
2010 	}
2011 
2012 	peer->active_ba_session_cnt = 0;
2013 	peer->hw_buffer_size = 0;
2014 	peer->kill_256_sessions = 0;
2015 
2016 	/* Setup default (non-qos) rx tid queue */
2017 	dp_rx_tid_setup_wifi3(peer, DP_NON_QOS_TID, 1, 0);
2018 
2019 	/* Setup rx tid queue for TID 0.
2020 	 * Other queues will be setup on receiving first packet, which will cause
2021 	 * NULL REO queue error
2022 	 */
2023 	dp_rx_tid_setup_wifi3(peer, 0, 1, 0);
2024 
2025 	/*
2026 	 * Setup the rest of TID's to handle LFR
2027 	 */
2028 	dp_peer_setup_remaining_tids(peer);
2029 
2030 	/*
2031 	 * Set security defaults: no PN check, no security. The target may
2032 	 * send a HTT SEC_IND message to overwrite these defaults.
2033 	 */
2034 	peer->security[dp_sec_ucast].sec_type =
2035 		peer->security[dp_sec_mcast].sec_type = cdp_sec_type_none;
2036 }
2037 
2038 /*
2039  * dp_peer_rx_cleanup() – Cleanup receive TID state
2040  * @vdev: Datapath vdev
2041  * @peer: Datapath peer
2042  *
2043  */
2044 void dp_peer_rx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
2045 {
2046 	int tid;
2047 	uint32_t tid_delete_mask = 0;
2048 
2049 	DP_TRACE(INFO_HIGH, FL("Remove tids for peer: %pK"), peer);
2050 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
2051 		struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
2052 
2053 		qdf_spin_lock_bh(&rx_tid->tid_lock);
2054 		if (peer->rx_tid[tid].hw_qdesc_vaddr_unaligned != NULL) {
2055 			dp_rx_tid_delete_wifi3(peer, tid);
2056 
2057 			/* Cleanup defrag related resource */
2058 			dp_rx_defrag_waitlist_remove(peer, tid);
2059 			dp_rx_reorder_flush_frag(peer, tid);
2060 
2061 			tid_delete_mask |= (1 << tid);
2062 		}
2063 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2064 	}
2065 #ifdef notyet /* See if FW can remove queues as part of peer cleanup */
2066 	if (soc->ol_ops->peer_rx_reorder_queue_remove) {
2067 		soc->ol_ops->peer_rx_reorder_queue_remove(vdev->pdev->ctrl_pdev,
2068 			peer->vdev->vdev_id, peer->mac_addr.raw,
2069 			tid_delete_mask);
2070 	}
2071 #endif
2072 	for (tid = 0; tid < DP_MAX_TIDS; tid++)
2073 		qdf_spinlock_destroy(&peer->rx_tid[tid].tid_lock);
2074 }
2075 
2076 /*
2077  * dp_peer_cleanup() – Cleanup peer information
2078  * @vdev: Datapath vdev
2079  * @peer: Datapath peer
2080  *
2081  */
2082 void dp_peer_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
2083 {
2084 	peer->last_assoc_rcvd = 0;
2085 	peer->last_disassoc_rcvd = 0;
2086 	peer->last_deauth_rcvd = 0;
2087 
2088 	/* cleanup the Rx reorder queues for this peer */
2089 	dp_peer_rx_cleanup(vdev, peer);
2090 }
2091 
2092 /* dp_teardown_256_ba_session() - Teardown sessions using 256
2093  *                                window size when a request with
2094  *                                64 window size is received.
2095  *                                This is done as a WAR since HW can
2096  *                                have only one setting per peer (64 or 256).
2097  *                                For HKv2, we use per tid buffersize setting
2098  *                                for 0 to per_tid_basize_max_tid. For tid
2099  *                                more than per_tid_basize_max_tid we use HKv1
2100  *                                method.
2101  * @peer: Datapath peer
2102  *
2103  * Return: void
2104  */
2105 static void dp_teardown_256_ba_sessions(struct dp_peer *peer)
2106 {
2107 	uint8_t delba_rcode = 0;
2108 	int tid;
2109 	struct dp_rx_tid *rx_tid = NULL;
2110 
2111 	tid = peer->vdev->pdev->soc->per_tid_basize_max_tid;
2112 	for (; tid < DP_MAX_TIDS; tid++) {
2113 		rx_tid = &peer->rx_tid[tid];
2114 		qdf_spin_lock_bh(&rx_tid->tid_lock);
2115 
2116 		if (rx_tid->ba_win_size <= 64) {
2117 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
2118 			continue;
2119 		} else {
2120 			if (rx_tid->ba_status == DP_RX_BA_ACTIVE ||
2121 			    rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
2122 				/* send delba */
2123 				if (!rx_tid->delba_tx_status) {
2124 					rx_tid->delba_tx_retry++;
2125 					rx_tid->delba_tx_status = 1;
2126 					rx_tid->delba_rcode =
2127 					IEEE80211_REASON_QOS_SETUP_REQUIRED;
2128 					delba_rcode = rx_tid->delba_rcode;
2129 
2130 					qdf_spin_unlock_bh(&rx_tid->tid_lock);
2131 					peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba(
2132 							peer->vdev->pdev->ctrl_pdev,
2133 							peer->ctrl_peer,
2134 							peer->mac_addr.raw,
2135 							tid, peer->vdev->ctrl_vdev,
2136 							delba_rcode);
2137 				} else {
2138 					qdf_spin_unlock_bh(&rx_tid->tid_lock);
2139 				}
2140 			} else {
2141 				qdf_spin_unlock_bh(&rx_tid->tid_lock);
2142 			}
2143 		}
2144 	}
2145 }
2146 
2147 /*
2148 * dp_rx_addba_resp_tx_completion_wifi3() – Update Rx Tid State
2149 *
2150 * @peer: Datapath peer handle
2151 * @tid: TID number
2152 * @status: tx completion status
2153 * Return: 0 on success, error code on failure
2154 */
2155 int dp_addba_resp_tx_completion_wifi3(void *peer_handle,
2156 				      uint8_t tid, int status)
2157 {
2158 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
2159 	struct dp_rx_tid *rx_tid = NULL;
2160 
2161 	if (!peer || peer->delete_in_progress) {
2162 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
2163 			  "%s: Peer is NULL!\n", __func__);
2164 		return QDF_STATUS_E_FAILURE;
2165 	}
2166 	rx_tid = &peer->rx_tid[tid];
2167 	qdf_spin_lock_bh(&rx_tid->tid_lock);
2168 	if (status) {
2169 		rx_tid->num_addba_rsp_failed++;
2170 		dp_rx_tid_update_wifi3(peer, tid, 1, 0);
2171 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
2172 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2173 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2174 			  "%s: Rx Tid- %d addba rsp tx completion failed!",
2175 			 __func__, tid);
2176 		return QDF_STATUS_SUCCESS;
2177 	}
2178 
2179 	rx_tid->num_addba_rsp_success++;
2180 	if (rx_tid->ba_status == DP_RX_BA_INACTIVE) {
2181 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2182 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2183 			  "%s: Rx Tid- %d hw qdesc is not in IN_PROGRESS",
2184 			__func__, tid);
2185 		return QDF_STATUS_E_FAILURE;
2186 	}
2187 
2188 	if (!qdf_atomic_read(&peer->is_default_route_set)) {
2189 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2190 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
2191 			  "%s: default route is not set for peer: %pM",
2192 			  __func__, peer->mac_addr.raw);
2193 		return QDF_STATUS_E_FAILURE;
2194 	}
2195 
2196 	/* First Session */
2197 	if (peer->active_ba_session_cnt == 0) {
2198 		if (rx_tid->ba_win_size > 64 && rx_tid->ba_win_size <= 256)
2199 			peer->hw_buffer_size = 256;
2200 		else
2201 			peer->hw_buffer_size = 64;
2202 	}
2203 
2204 	rx_tid->ba_status = DP_RX_BA_ACTIVE;
2205 
2206 	peer->active_ba_session_cnt++;
2207 
2208 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
2209 
2210 	/* Kill any session having 256 buffer size
2211 	 * when 64 buffer size request is received.
2212 	 * Also, latch on to 64 as new buffer size.
2213 	 */
2214 	if (peer->kill_256_sessions) {
2215 		dp_teardown_256_ba_sessions(peer);
2216 		peer->kill_256_sessions = 0;
2217 	}
2218 	return QDF_STATUS_SUCCESS;
2219 }
2220 
2221 /*
2222 * dp_rx_addba_responsesetup_wifi3() – Process ADDBA request from peer
2223 *
2224 * @peer: Datapath peer handle
2225 * @tid: TID number
2226 * @dialogtoken: output dialogtoken
2227 * @statuscode: output dialogtoken
2228 * @buffersize: Output BA window size
2229 * @batimeout: Output BA timeout
2230 */
2231 void dp_addba_responsesetup_wifi3(void *peer_handle, uint8_t tid,
2232 	uint8_t *dialogtoken, uint16_t *statuscode,
2233 	uint16_t *buffersize, uint16_t *batimeout)
2234 {
2235 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
2236 	struct dp_rx_tid *rx_tid = NULL;
2237 
2238 	if (!peer || peer->delete_in_progress) {
2239 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
2240 			  "%s: Peer is NULL!\n", __func__);
2241 		return;
2242 	}
2243 	rx_tid = &peer->rx_tid[tid];
2244 	qdf_spin_lock_bh(&rx_tid->tid_lock);
2245 	rx_tid->num_of_addba_resp++;
2246 	/* setup ADDBA response parameters */
2247 	*dialogtoken = rx_tid->dialogtoken;
2248 	*statuscode = rx_tid->statuscode;
2249 	*buffersize = rx_tid->ba_win_size;
2250 	*batimeout  = 0;
2251 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
2252 }
2253 
2254 /* dp_check_ba_buffersize() - Check buffer size in request
2255  *                            and latch onto this size based on
2256  *                            size used in first active session.
2257  * @peer: Datapath peer
2258  * @tid: Tid
2259  * @buffersize: Block ack window size
2260  *
2261  * Return: void
2262  */
2263 static void dp_check_ba_buffersize(struct dp_peer *peer,
2264 				   uint16_t tid,
2265 				   uint16_t buffersize)
2266 {
2267 	struct dp_rx_tid *rx_tid = NULL;
2268 
2269 	rx_tid = &peer->rx_tid[tid];
2270 	if (peer->vdev->pdev->soc->per_tid_basize_max_tid &&
2271 	    tid < peer->vdev->pdev->soc->per_tid_basize_max_tid) {
2272 		rx_tid->ba_win_size = buffersize;
2273 		return;
2274 	} else {
2275 		if (peer->active_ba_session_cnt == 0) {
2276 			rx_tid->ba_win_size = buffersize;
2277 		} else {
2278 			if (peer->hw_buffer_size == 64) {
2279 				if (buffersize <= 64)
2280 					rx_tid->ba_win_size = buffersize;
2281 				else
2282 					rx_tid->ba_win_size = peer->hw_buffer_size;
2283 			} else if (peer->hw_buffer_size == 256) {
2284 				if (buffersize > 64) {
2285 					rx_tid->ba_win_size = buffersize;
2286 				} else {
2287 					rx_tid->ba_win_size = buffersize;
2288 					peer->hw_buffer_size = 64;
2289 					peer->kill_256_sessions = 1;
2290 				}
2291 			}
2292 		}
2293 	}
2294 }
2295 
2296 /*
2297  * dp_addba_requestprocess_wifi3() - Process ADDBA request from peer
2298  *
2299  * @peer: Datapath peer handle
2300  * @dialogtoken: dialogtoken from ADDBA frame
2301  * @tid: TID number
2302  * @batimeout: BA timeout
2303  * @buffersize: BA window size
2304  * @startseqnum: Start seq. number received in BA sequence control
2305  *
2306  * Return: 0 on success, error code on failure
2307  */
2308 int dp_addba_requestprocess_wifi3(void *peer_handle,
2309 				  uint8_t dialogtoken,
2310 				  uint16_t tid, uint16_t batimeout,
2311 				  uint16_t buffersize,
2312 				  uint16_t startseqnum)
2313 {
2314 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
2315 	struct dp_rx_tid *rx_tid = NULL;
2316 
2317 	if (!peer || peer->delete_in_progress) {
2318 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
2319 			  "%s: Peer is NULL!\n", __func__);
2320 		return QDF_STATUS_E_FAILURE;
2321 	}
2322 	rx_tid = &peer->rx_tid[tid];
2323 	qdf_spin_lock_bh(&rx_tid->tid_lock);
2324 	rx_tid->num_of_addba_req++;
2325 	if ((rx_tid->ba_status == DP_RX_BA_ACTIVE &&
2326 	     rx_tid->hw_qdesc_vaddr_unaligned != NULL) ||
2327 	    (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS)) {
2328 		dp_rx_tid_update_wifi3(peer, tid, 1, 0);
2329 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
2330 		peer->active_ba_session_cnt--;
2331 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2332 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2333 			  "%s: Rx Tid- %d hw qdesc is already setup",
2334 			__func__, tid);
2335 		return QDF_STATUS_E_FAILURE;
2336 	}
2337 
2338 	if (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
2339 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2340 		return QDF_STATUS_E_FAILURE;
2341 	}
2342 	dp_check_ba_buffersize(peer, tid, buffersize);
2343 
2344 	if (dp_rx_tid_setup_wifi3(peer, tid,
2345 	    rx_tid->ba_win_size, startseqnum)) {
2346 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
2347 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2348 		return QDF_STATUS_E_FAILURE;
2349 	}
2350 	rx_tid->ba_status = DP_RX_BA_IN_PROGRESS;
2351 
2352 	rx_tid->dialogtoken = dialogtoken;
2353 	rx_tid->startseqnum = startseqnum;
2354 
2355 	if (rx_tid->userstatuscode != IEEE80211_STATUS_SUCCESS)
2356 		rx_tid->statuscode = rx_tid->userstatuscode;
2357 	else
2358 		rx_tid->statuscode = IEEE80211_STATUS_SUCCESS;
2359 
2360 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
2361 
2362 	return QDF_STATUS_SUCCESS;
2363 }
2364 
2365 /*
2366 * dp_set_addba_response() – Set a user defined ADDBA response status code
2367 *
2368 * @peer: Datapath peer handle
2369 * @tid: TID number
2370 * @statuscode: response status code to be set
2371 */
2372 void dp_set_addba_response(void *peer_handle, uint8_t tid,
2373 	uint16_t statuscode)
2374 {
2375 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
2376 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
2377 
2378 	qdf_spin_lock_bh(&rx_tid->tid_lock);
2379 	rx_tid->userstatuscode = statuscode;
2380 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
2381 }
2382 
2383 /*
2384 * dp_rx_delba_process_wifi3() – Process DELBA from peer
2385 * @peer: Datapath peer handle
2386 * @tid: TID number
2387 * @reasoncode: Reason code received in DELBA frame
2388 *
2389 * Return: 0 on success, error code on failure
2390 */
2391 int dp_delba_process_wifi3(void *peer_handle,
2392 	int tid, uint16_t reasoncode)
2393 {
2394 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
2395 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
2396 
2397 	qdf_spin_lock_bh(&rx_tid->tid_lock);
2398 	if (rx_tid->ba_status == DP_RX_BA_INACTIVE ||
2399 	    rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
2400 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2401 		return QDF_STATUS_E_FAILURE;
2402 	}
2403 	/* TODO: See if we can delete the existing REO queue descriptor and
2404 	 * replace with a new one without queue extenstion descript to save
2405 	 * memory
2406 	 */
2407 	rx_tid->delba_rcode = reasoncode;
2408 	rx_tid->num_of_delba_req++;
2409 	dp_rx_tid_update_wifi3(peer, tid, 1, 0);
2410 
2411 	rx_tid->ba_status = DP_RX_BA_INACTIVE;
2412 	peer->active_ba_session_cnt--;
2413 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
2414 	return 0;
2415 }
2416 
2417 /*
2418  * dp_rx_delba_tx_completion_wifi3() – Send Delba Request
2419  *
2420  * @peer: Datapath peer handle
2421  * @tid: TID number
2422  * @status: tx completion status
2423  * Return: 0 on success, error code on failure
2424  */
2425 
2426 int dp_delba_tx_completion_wifi3(void *peer_handle,
2427 				 uint8_t tid, int status)
2428 {
2429 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
2430 	struct dp_rx_tid *rx_tid = NULL;
2431 
2432 	if (!peer || peer->delete_in_progress) {
2433 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
2434 			  "%s: Peer is NULL!", __func__);
2435 		return QDF_STATUS_E_FAILURE;
2436 	}
2437 	rx_tid = &peer->rx_tid[tid];
2438 	qdf_spin_lock_bh(&rx_tid->tid_lock);
2439 	if (status) {
2440 		rx_tid->delba_tx_fail_cnt++;
2441 		if (rx_tid->delba_tx_retry >= DP_MAX_DELBA_RETRY) {
2442 			rx_tid->delba_tx_retry = 0;
2443 			rx_tid->delba_tx_status = 0;
2444 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
2445 		} else {
2446 			rx_tid->delba_tx_retry++;
2447 			rx_tid->delba_tx_status = 1;
2448 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
2449 			peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba(
2450 				peer->vdev->pdev->ctrl_pdev, peer->ctrl_peer,
2451 				peer->mac_addr.raw, tid, peer->vdev->ctrl_vdev,
2452 				rx_tid->delba_rcode);
2453 		}
2454 		return QDF_STATUS_SUCCESS;
2455 	} else {
2456 		rx_tid->delba_tx_success_cnt++;
2457 		rx_tid->delba_tx_retry = 0;
2458 		rx_tid->delba_tx_status = 0;
2459 	}
2460 	if (rx_tid->ba_status == DP_RX_BA_ACTIVE) {
2461 		dp_rx_tid_update_wifi3(peer, tid, 1, 0);
2462 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
2463 		peer->active_ba_session_cnt--;
2464 	}
2465 	if (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
2466 		dp_rx_tid_update_wifi3(peer, tid, 1, 0);
2467 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
2468 	}
2469 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
2470 
2471 	return QDF_STATUS_SUCCESS;
2472 }
2473 
2474 void dp_rx_discard(struct dp_vdev *vdev, struct dp_peer *peer, unsigned tid,
2475 	qdf_nbuf_t msdu_list)
2476 {
2477 	while (msdu_list) {
2478 		qdf_nbuf_t msdu = msdu_list;
2479 
2480 		msdu_list = qdf_nbuf_next(msdu_list);
2481 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
2482 			"discard rx %pK from partly-deleted peer %pK "
2483 			"(%02x:%02x:%02x:%02x:%02x:%02x)",
2484 			msdu, peer,
2485 			peer->mac_addr.raw[0], peer->mac_addr.raw[1],
2486 			peer->mac_addr.raw[2], peer->mac_addr.raw[3],
2487 			peer->mac_addr.raw[4], peer->mac_addr.raw[5]);
2488 		qdf_nbuf_free(msdu);
2489 	}
2490 }
2491 
2492 
2493 /**
2494  * dp_set_pn_check_wifi3() - enable PN check in REO for security
2495  * @peer: Datapath peer handle
2496  * @vdev: Datapath vdev
2497  * @pdev - data path device instance
2498  * @sec_type - security type
2499  * @rx_pn - Receive pn starting number
2500  *
2501  */
2502 
2503 void
2504 dp_set_pn_check_wifi3(struct cdp_vdev *vdev_handle, struct cdp_peer *peer_handle, enum cdp_sec_type sec_type,  uint32_t *rx_pn)
2505 {
2506 	struct dp_peer *peer =  (struct dp_peer *)peer_handle;
2507 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
2508 	struct dp_pdev *pdev;
2509 	struct dp_soc *soc;
2510 	int i;
2511 	uint8_t pn_size;
2512 	struct hal_reo_cmd_params params;
2513 
2514 	/* preconditions */
2515 	qdf_assert(vdev);
2516 
2517 	pdev = vdev->pdev;
2518 	soc = pdev->soc;
2519 
2520 
2521 	qdf_mem_zero(&params, sizeof(params));
2522 
2523 	params.std.need_status = 1;
2524 	params.u.upd_queue_params.update_pn_valid = 1;
2525 	params.u.upd_queue_params.update_pn_size = 1;
2526 	params.u.upd_queue_params.update_pn = 1;
2527 	params.u.upd_queue_params.update_pn_check_needed = 1;
2528 	params.u.upd_queue_params.update_svld = 1;
2529 	params.u.upd_queue_params.svld = 0;
2530 
2531 	peer->security[dp_sec_ucast].sec_type = sec_type;
2532 
2533 	switch (sec_type) {
2534 	case cdp_sec_type_tkip_nomic:
2535 	case cdp_sec_type_aes_ccmp:
2536 	case cdp_sec_type_aes_ccmp_256:
2537 	case cdp_sec_type_aes_gcmp:
2538 	case cdp_sec_type_aes_gcmp_256:
2539 		params.u.upd_queue_params.pn_check_needed = 1;
2540 		params.u.upd_queue_params.pn_size = 48;
2541 		pn_size = 48;
2542 		break;
2543 	case cdp_sec_type_wapi:
2544 		params.u.upd_queue_params.pn_check_needed = 1;
2545 		params.u.upd_queue_params.pn_size = 128;
2546 		pn_size = 128;
2547 		if (vdev->opmode == wlan_op_mode_ap) {
2548 			params.u.upd_queue_params.pn_even = 1;
2549 			params.u.upd_queue_params.update_pn_even = 1;
2550 		} else {
2551 			params.u.upd_queue_params.pn_uneven = 1;
2552 			params.u.upd_queue_params.update_pn_uneven = 1;
2553 		}
2554 		break;
2555 	default:
2556 		params.u.upd_queue_params.pn_check_needed = 0;
2557 		pn_size = 0;
2558 		break;
2559 	}
2560 
2561 
2562 	for (i = 0; i < DP_MAX_TIDS; i++) {
2563 		struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
2564 		qdf_spin_lock_bh(&rx_tid->tid_lock);
2565 		if (rx_tid->hw_qdesc_vaddr_unaligned != NULL) {
2566 			params.std.addr_lo =
2567 				rx_tid->hw_qdesc_paddr & 0xffffffff;
2568 			params.std.addr_hi =
2569 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2570 
2571 			if (pn_size) {
2572 				QDF_TRACE(QDF_MODULE_ID_TXRX,
2573 					  QDF_TRACE_LEVEL_INFO_HIGH,
2574 					  "%s PN set for TID:%d pn:%x:%x:%x:%x",
2575 					  __func__, i, rx_pn[3], rx_pn[2],
2576 					  rx_pn[1], rx_pn[0]);
2577 				params.u.upd_queue_params.update_pn_valid = 1;
2578 				params.u.upd_queue_params.pn_31_0 = rx_pn[0];
2579 				params.u.upd_queue_params.pn_63_32 = rx_pn[1];
2580 				params.u.upd_queue_params.pn_95_64 = rx_pn[2];
2581 				params.u.upd_queue_params.pn_127_96 = rx_pn[3];
2582 			}
2583 			rx_tid->pn_size = pn_size;
2584 			dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
2585 				dp_rx_tid_update_cb, rx_tid);
2586 		} else {
2587 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
2588 				"PN Check not setup for TID :%d ", i);
2589 		}
2590 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2591 	}
2592 }
2593 
2594 
2595 void
2596 dp_rx_sec_ind_handler(void *soc_handle, uint16_t peer_id,
2597 	enum cdp_sec_type sec_type, int is_unicast, u_int32_t *michael_key,
2598 	u_int32_t *rx_pn)
2599 {
2600 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
2601 	struct dp_peer *peer;
2602 	int sec_index;
2603 
2604 	peer = dp_peer_find_by_id(soc, peer_id);
2605 	if (!peer) {
2606 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2607 			"Couldn't find peer from ID %d - skipping security inits",
2608 			peer_id);
2609 		return;
2610 	}
2611 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
2612 		"sec spec for peer %pK (%02x:%02x:%02x:%02x:%02x:%02x): "
2613 		"%s key of type %d",
2614 		peer,
2615 		peer->mac_addr.raw[0], peer->mac_addr.raw[1],
2616 		peer->mac_addr.raw[2], peer->mac_addr.raw[3],
2617 		peer->mac_addr.raw[4], peer->mac_addr.raw[5],
2618 		is_unicast ? "ucast" : "mcast",
2619 		sec_type);
2620 	sec_index = is_unicast ? dp_sec_ucast : dp_sec_mcast;
2621 	peer->security[sec_index].sec_type = sec_type;
2622 #ifdef notyet /* TODO: See if this is required for defrag support */
2623 	/* michael key only valid for TKIP, but for simplicity,
2624 	 * copy it anyway
2625 	 */
2626 	qdf_mem_copy(
2627 		&peer->security[sec_index].michael_key[0],
2628 		michael_key,
2629 		sizeof(peer->security[sec_index].michael_key));
2630 #ifdef BIG_ENDIAN_HOST
2631 	OL_IF_SWAPBO(peer->security[sec_index].michael_key[0],
2632 				 sizeof(peer->security[sec_index].michael_key));
2633 #endif /* BIG_ENDIAN_HOST */
2634 #endif
2635 
2636 #ifdef notyet /* TODO: Check if this is required for wifi3.0 */
2637 	if (sec_type != cdp_sec_type_wapi) {
2638 		qdf_mem_set(peer->tids_last_pn_valid, _EXT_TIDS, 0x00);
2639 	} else {
2640 		for (i = 0; i < DP_MAX_TIDS; i++) {
2641 			/*
2642 			 * Setting PN valid bit for WAPI sec_type,
2643 			 * since WAPI PN has to be started with predefined value
2644 			 */
2645 			peer->tids_last_pn_valid[i] = 1;
2646 			qdf_mem_copy(
2647 				(u_int8_t *) &peer->tids_last_pn[i],
2648 				(u_int8_t *) rx_pn, sizeof(union htt_rx_pn_t));
2649 			peer->tids_last_pn[i].pn128[1] =
2650 				qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[1]);
2651 			peer->tids_last_pn[i].pn128[0] =
2652 				qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[0]);
2653 		}
2654 	}
2655 #endif
2656 	/* TODO: Update HW TID queue with PN check parameters (pn type for
2657 	 * all security types and last pn for WAPI) once REO command API
2658 	 * is available
2659 	 */
2660 
2661 	dp_peer_unref_del_find_by_id(peer);
2662 }
2663 
2664 #ifndef CONFIG_WIN
2665 /**
2666  * dp_register_peer() - Register peer into physical device
2667  * @pdev - data path device instance
2668  * @sta_desc - peer description
2669  *
2670  * Register peer into physical device
2671  *
2672  * Return: QDF_STATUS_SUCCESS registration success
2673  *         QDF_STATUS_E_FAULT peer not found
2674  */
2675 QDF_STATUS dp_register_peer(struct cdp_pdev *pdev_handle,
2676 		struct ol_txrx_desc_type *sta_desc)
2677 {
2678 	struct dp_peer *peer;
2679 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
2680 
2681 	peer = dp_peer_find_by_local_id((struct cdp_pdev *)pdev,
2682 			sta_desc->sta_id);
2683 	if (!peer)
2684 		return QDF_STATUS_E_FAULT;
2685 
2686 	qdf_spin_lock_bh(&peer->peer_info_lock);
2687 	peer->state = OL_TXRX_PEER_STATE_CONN;
2688 	qdf_spin_unlock_bh(&peer->peer_info_lock);
2689 
2690 	return QDF_STATUS_SUCCESS;
2691 }
2692 
2693 /**
2694  * dp_clear_peer() - remove peer from physical device
2695  * @pdev - data path device instance
2696  * @sta_id - local peer id
2697  *
2698  * remove peer from physical device
2699  *
2700  * Return: QDF_STATUS_SUCCESS registration success
2701  *         QDF_STATUS_E_FAULT peer not found
2702  */
2703 QDF_STATUS dp_clear_peer(struct cdp_pdev *pdev_handle, uint8_t local_id)
2704 {
2705 	struct dp_peer *peer;
2706 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
2707 
2708 	peer = dp_peer_find_by_local_id((struct cdp_pdev *)pdev, local_id);
2709 	if (!peer)
2710 		return QDF_STATUS_E_FAULT;
2711 
2712 	qdf_spin_lock_bh(&peer->peer_info_lock);
2713 	peer->state = OL_TXRX_PEER_STATE_DISC;
2714 	qdf_spin_unlock_bh(&peer->peer_info_lock);
2715 
2716 	return QDF_STATUS_SUCCESS;
2717 }
2718 
2719 /**
2720  * dp_find_peer_by_addr_and_vdev() - Find peer by peer mac address within vdev
2721  * @pdev - data path device instance
2722  * @vdev - virtual interface instance
2723  * @peer_addr - peer mac address
2724  * @peer_id - local peer id with target mac address
2725  *
2726  * Find peer by peer mac address within vdev
2727  *
2728  * Return: peer instance void pointer
2729  *         NULL cannot find target peer
2730  */
2731 void *dp_find_peer_by_addr_and_vdev(struct cdp_pdev *pdev_handle,
2732 		struct cdp_vdev *vdev_handle,
2733 		uint8_t *peer_addr, uint8_t *local_id)
2734 {
2735 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
2736 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
2737 	struct dp_peer *peer;
2738 
2739 	DP_TRACE(INFO, "vdev %pK peer_addr %pK", vdev, peer_addr);
2740 	peer = dp_peer_find_hash_find(pdev->soc, peer_addr, 0, 0);
2741 	DP_TRACE(INFO, "peer %pK vdev %pK", peer, vdev);
2742 
2743 	if (!peer)
2744 		return NULL;
2745 
2746 	if (peer->vdev != vdev) {
2747 		dp_peer_unref_delete(peer);
2748 		return NULL;
2749 	}
2750 
2751 	*local_id = peer->local_id;
2752 	DP_TRACE(INFO, "peer %pK vdev %pK local id %d", peer, vdev, *local_id);
2753 
2754 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
2755 	 * Decrement it here.
2756 	 */
2757 	dp_peer_unref_delete(peer);
2758 
2759 	return peer;
2760 }
2761 
2762 /**
2763  * dp_local_peer_id() - Find local peer id within peer instance
2764  * @peer - peer instance
2765  *
2766  * Find local peer id within peer instance
2767  *
2768  * Return: local peer id
2769  */
2770 uint16_t dp_local_peer_id(void *peer)
2771 {
2772 	return ((struct dp_peer *)peer)->local_id;
2773 }
2774 
2775 /**
2776  * dp_peer_find_by_local_id() - Find peer by local peer id
2777  * @pdev - data path device instance
2778  * @local_peer_id - local peer id want to find
2779  *
2780  * Find peer by local peer id within physical device
2781  *
2782  * Return: peer instance void pointer
2783  *         NULL cannot find target peer
2784  */
2785 void *dp_peer_find_by_local_id(struct cdp_pdev *pdev_handle, uint8_t local_id)
2786 {
2787 	struct dp_peer *peer;
2788 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
2789 
2790 	if (local_id >= OL_TXRX_NUM_LOCAL_PEER_IDS) {
2791 		QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_DP,
2792 				   "Incorrect local id %u", local_id);
2793 		return NULL;
2794 	}
2795 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
2796 	peer = pdev->local_peer_ids.map[local_id];
2797 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
2798 	DP_TRACE(DEBUG, "peer %pK local id %d", peer, local_id);
2799 	return peer;
2800 }
2801 
2802 /**
2803  * dp_peer_state_update() - update peer local state
2804  * @pdev - data path device instance
2805  * @peer_addr - peer mac address
2806  * @state - new peer local state
2807  *
2808  * update peer local state
2809  *
2810  * Return: QDF_STATUS_SUCCESS registration success
2811  */
2812 QDF_STATUS dp_peer_state_update(struct cdp_pdev *pdev_handle, uint8_t *peer_mac,
2813 		enum ol_txrx_peer_state state)
2814 {
2815 	struct dp_peer *peer;
2816 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
2817 
2818 	peer =  dp_peer_find_hash_find(pdev->soc, peer_mac, 0, DP_VDEV_ALL);
2819 	if (NULL == peer) {
2820 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2821 		"Failed to find peer for: [%pM]", peer_mac);
2822 		return QDF_STATUS_E_FAILURE;
2823 	}
2824 	peer->state = state;
2825 
2826 	DP_TRACE(INFO, "peer %pK state %d", peer, peer->state);
2827 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
2828 	 * Decrement it here.
2829 	 */
2830 	dp_peer_unref_delete(peer);
2831 
2832 	return QDF_STATUS_SUCCESS;
2833 }
2834 
2835 /**
2836  * dp_get_vdevid() - Get virtual interface id which peer registered
2837  * @peer - peer instance
2838  * @vdev_id - virtual interface id which peer registered
2839  *
2840  * Get virtual interface id which peer registered
2841  *
2842  * Return: QDF_STATUS_SUCCESS registration success
2843  */
2844 QDF_STATUS dp_get_vdevid(void *peer_handle, uint8_t *vdev_id)
2845 {
2846 	struct dp_peer *peer = peer_handle;
2847 
2848 	DP_TRACE(INFO, "peer %pK vdev %pK vdev id %d",
2849 			peer, peer->vdev, peer->vdev->vdev_id);
2850 	*vdev_id = peer->vdev->vdev_id;
2851 	return QDF_STATUS_SUCCESS;
2852 }
2853 
2854 struct cdp_vdev *dp_get_vdev_by_sta_id(struct cdp_pdev *pdev_handle,
2855 				       uint8_t sta_id)
2856 {
2857 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
2858 	struct dp_peer *peer = NULL;
2859 
2860 	if (sta_id >= WLAN_MAX_STA_COUNT) {
2861 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
2862 			  "Invalid sta id passed");
2863 		return NULL;
2864 	}
2865 
2866 	if (!pdev) {
2867 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
2868 			  "PDEV not found for sta_id [%d]", sta_id);
2869 		return NULL;
2870 	}
2871 
2872 	peer = dp_peer_find_by_local_id((struct cdp_pdev *)pdev, sta_id);
2873 	if (!peer) {
2874 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
2875 			  "PEER [%d] not found", sta_id);
2876 		return NULL;
2877 	}
2878 
2879 	return (struct cdp_vdev *)peer->vdev;
2880 }
2881 
2882 /**
2883  * dp_get_vdev_for_peer() - Get virtual interface instance which peer belongs
2884  * @peer - peer instance
2885  *
2886  * Get virtual interface instance which peer belongs
2887  *
2888  * Return: virtual interface instance pointer
2889  *         NULL in case cannot find
2890  */
2891 struct cdp_vdev *dp_get_vdev_for_peer(void *peer_handle)
2892 {
2893 	struct dp_peer *peer = peer_handle;
2894 
2895 	DP_TRACE(DEBUG, "peer %pK vdev %pK", peer, peer->vdev);
2896 	return (struct cdp_vdev *)peer->vdev;
2897 }
2898 
2899 /**
2900  * dp_peer_get_peer_mac_addr() - Get peer mac address
2901  * @peer - peer instance
2902  *
2903  * Get peer mac address
2904  *
2905  * Return: peer mac address pointer
2906  *         NULL in case cannot find
2907  */
2908 uint8_t *dp_peer_get_peer_mac_addr(void *peer_handle)
2909 {
2910 	struct dp_peer *peer = peer_handle;
2911 	uint8_t *mac;
2912 
2913 	mac = peer->mac_addr.raw;
2914 	DP_TRACE(INFO, "peer %pK mac 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
2915 		peer, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
2916 	return peer->mac_addr.raw;
2917 }
2918 
2919 /**
2920  * dp_get_peer_state() - Get local peer state
2921  * @peer - peer instance
2922  *
2923  * Get local peer state
2924  *
2925  * Return: peer status
2926  */
2927 int dp_get_peer_state(void *peer_handle)
2928 {
2929 	struct dp_peer *peer = peer_handle;
2930 
2931 	DP_TRACE(DEBUG, "peer %pK stats %d", peer, peer->state);
2932 	return peer->state;
2933 }
2934 
2935 /**
2936  * dp_local_peer_id_pool_init() - local peer id pool alloc for physical device
2937  * @pdev - data path device instance
2938  *
2939  * local peer id pool alloc for physical device
2940  *
2941  * Return: none
2942  */
2943 void dp_local_peer_id_pool_init(struct dp_pdev *pdev)
2944 {
2945 	int i;
2946 
2947 	/* point the freelist to the first ID */
2948 	pdev->local_peer_ids.freelist = 0;
2949 
2950 	/* link each ID to the next one */
2951 	for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) {
2952 		pdev->local_peer_ids.pool[i] = i + 1;
2953 		pdev->local_peer_ids.map[i] = NULL;
2954 	}
2955 
2956 	/* link the last ID to itself, to mark the end of the list */
2957 	i = OL_TXRX_NUM_LOCAL_PEER_IDS;
2958 	pdev->local_peer_ids.pool[i] = i;
2959 
2960 	qdf_spinlock_create(&pdev->local_peer_ids.lock);
2961 	DP_TRACE(INFO, "Peer pool init");
2962 }
2963 
2964 /**
2965  * dp_local_peer_id_alloc() - allocate local peer id
2966  * @pdev - data path device instance
2967  * @peer - new peer instance
2968  *
2969  * allocate local peer id
2970  *
2971  * Return: none
2972  */
2973 void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer)
2974 {
2975 	int i;
2976 
2977 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
2978 	i = pdev->local_peer_ids.freelist;
2979 	if (pdev->local_peer_ids.pool[i] == i) {
2980 		/* the list is empty, except for the list-end marker */
2981 		peer->local_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
2982 	} else {
2983 		/* take the head ID and advance the freelist */
2984 		peer->local_id = i;
2985 		pdev->local_peer_ids.freelist = pdev->local_peer_ids.pool[i];
2986 		pdev->local_peer_ids.map[i] = peer;
2987 	}
2988 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
2989 	DP_TRACE(INFO, "peer %pK, local id %d", peer, peer->local_id);
2990 }
2991 
2992 /**
2993  * dp_local_peer_id_free() - remove local peer id
2994  * @pdev - data path device instance
2995  * @peer - peer instance should be removed
2996  *
2997  * remove local peer id
2998  *
2999  * Return: none
3000  */
3001 void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer)
3002 {
3003 	int i = peer->local_id;
3004 	if ((i == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
3005 	    (i >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
3006 		return;
3007 	}
3008 
3009 	/* put this ID on the head of the freelist */
3010 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
3011 	pdev->local_peer_ids.pool[i] = pdev->local_peer_ids.freelist;
3012 	pdev->local_peer_ids.freelist = i;
3013 	pdev->local_peer_ids.map[i] = NULL;
3014 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
3015 }
3016 #endif
3017 
3018 /**
3019  * dp_get_peer_mac_addr_frm_id(): get mac address of the peer
3020  * @soc_handle: DP SOC handle
3021  * @peer_id:peer_id of the peer
3022  *
3023  * return: vdev_id of the vap
3024  */
3025 uint8_t dp_get_peer_mac_addr_frm_id(struct cdp_soc_t *soc_handle,
3026 		uint16_t peer_id, uint8_t *peer_mac)
3027 {
3028 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
3029 	struct dp_peer *peer;
3030 	uint8_t vdev_id;
3031 
3032 	peer = dp_peer_find_by_id(soc, peer_id);
3033 
3034 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
3035 			"soc %pK peer_id %d", soc, peer_id);
3036 
3037 	if (!peer) {
3038 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3039 				"peer not found ");
3040 		return CDP_INVALID_VDEV_ID;
3041 	}
3042 
3043 	qdf_mem_copy(peer_mac, peer->mac_addr.raw, 6);
3044 	vdev_id = peer->vdev->vdev_id;
3045 
3046 	dp_peer_unref_del_find_by_id(peer);
3047 
3048 	return vdev_id;
3049 }
3050 
3051 /**
3052  * dp_peer_rxtid_stats: Retried Rx TID (REO queue) stats from HW
3053  * @peer: DP peer handle
3054  * @dp_stats_cmd_cb: REO command callback function
3055  * @cb_ctxt: Callback context
3056  *
3057  * Return: none
3058  */
3059 void dp_peer_rxtid_stats(struct dp_peer *peer, void (*dp_stats_cmd_cb),
3060 			void *cb_ctxt)
3061 {
3062 	struct dp_soc *soc = peer->vdev->pdev->soc;
3063 	struct hal_reo_cmd_params params;
3064 	int i;
3065 
3066 	if (!dp_stats_cmd_cb)
3067 		return;
3068 
3069 	qdf_mem_zero(&params, sizeof(params));
3070 	for (i = 0; i < DP_MAX_TIDS; i++) {
3071 		struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
3072 		if (rx_tid->hw_qdesc_vaddr_unaligned != NULL) {
3073 			params.std.need_status = 1;
3074 			params.std.addr_lo =
3075 				rx_tid->hw_qdesc_paddr & 0xffffffff;
3076 			params.std.addr_hi =
3077 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
3078 
3079 			if (cb_ctxt) {
3080 				dp_reo_send_cmd(soc, CMD_GET_QUEUE_STATS,
3081 					&params, dp_stats_cmd_cb, cb_ctxt);
3082 			} else {
3083 				dp_reo_send_cmd(soc, CMD_GET_QUEUE_STATS,
3084 					&params, dp_stats_cmd_cb, rx_tid);
3085 			}
3086 
3087 			/* Flush REO descriptor from HW cache to update stats
3088 			 * in descriptor memory. This is to help debugging */
3089 			qdf_mem_zero(&params, sizeof(params));
3090 			params.std.need_status = 0;
3091 			params.std.addr_lo =
3092 				rx_tid->hw_qdesc_paddr & 0xffffffff;
3093 			params.std.addr_hi =
3094 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
3095 			params.u.fl_cache_params.flush_no_inval = 1;
3096 			dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, &params, NULL,
3097 				NULL);
3098 		}
3099 	}
3100 }
3101 
3102 void dp_set_michael_key(struct cdp_peer *peer_handle,
3103 			bool is_unicast, uint32_t *key)
3104 {
3105 	struct dp_peer *peer =  (struct dp_peer *)peer_handle;
3106 	uint8_t sec_index = is_unicast ? 1 : 0;
3107 
3108 	if (!peer) {
3109 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3110 			  "peer not found ");
3111 		return;
3112 	}
3113 
3114 	qdf_mem_copy(&peer->security[sec_index].michael_key[0],
3115 		     key, IEEE80211_WEP_MICLEN);
3116 }
3117 
3118 bool dp_peer_find_by_id_valid(struct dp_soc *soc, uint16_t peer_id)
3119 {
3120 	struct dp_peer *peer = dp_peer_find_by_id(soc, peer_id);
3121 
3122 	if (peer) {
3123 		/*
3124 		 * Decrement the peer ref which is taken as part of
3125 		 * dp_peer_find_by_id if PEER_LOCK_REF_PROTECT is enabled
3126 		 */
3127 		dp_peer_unref_del_find_by_id(peer);
3128 
3129 		return true;
3130 	}
3131 
3132 	return false;
3133 }
3134