xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_peer.c (revision 8ddef7dd9a290d4a9b1efd5d3efacf51d78a1a0d)
1 /*
2  * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <qdf_types.h>
20 #include <qdf_lock.h>
21 #include <hal_hw_headers.h>
22 #include "dp_htt.h"
23 #include "dp_types.h"
24 #include "dp_internal.h"
25 #include "dp_peer.h"
26 #include "dp_rx_defrag.h"
27 #include <hal_api.h>
28 #include <hal_reo.h>
29 #ifdef CONFIG_MCL
30 #include <cds_ieee80211_common.h>
31 #include <cds_api.h>
32 #endif
33 #include <cdp_txrx_handle.h>
34 #include <wlan_cfg.h>
35 
36 #ifdef DP_LFR
37 static inline void
38 dp_set_ssn_valid_flag(struct hal_reo_cmd_params *params,
39 					uint8_t valid)
40 {
41 	params->u.upd_queue_params.update_svld = 1;
42 	params->u.upd_queue_params.svld = valid;
43 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
44 		  "%s: Setting SSN valid bit to %d",
45 		  __func__, valid);
46 }
47 #else
48 static inline void
49 dp_set_ssn_valid_flag(struct hal_reo_cmd_params *params,
50 					uint8_t valid) {};
51 #endif
52 
53 static inline int dp_peer_find_mac_addr_cmp(
54 	union dp_align_mac_addr *mac_addr1,
55 	union dp_align_mac_addr *mac_addr2)
56 {
57 	return !((mac_addr1->align4.bytes_abcd == mac_addr2->align4.bytes_abcd)
58 		/*
59 		 * Intentionally use & rather than &&.
60 		 * because the operands are binary rather than generic boolean,
61 		 * the functionality is equivalent.
62 		 * Using && has the advantage of short-circuited evaluation,
63 		 * but using & has the advantage of no conditional branching,
64 		 * which is a more significant benefit.
65 		 */
66 		&
67 		(mac_addr1->align4.bytes_ef == mac_addr2->align4.bytes_ef));
68 }
69 
70 static int dp_peer_find_map_attach(struct dp_soc *soc)
71 {
72 	uint32_t max_peers, peer_map_size;
73 
74 	max_peers = soc->max_peers;
75 	/* allocate the peer ID -> peer object map */
76 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
77 		  "\n<=== cfg max peer id %d ====>", max_peers);
78 	peer_map_size = max_peers * sizeof(soc->peer_id_to_obj_map[0]);
79 	soc->peer_id_to_obj_map = qdf_mem_malloc(peer_map_size);
80 	if (!soc->peer_id_to_obj_map) {
81 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
82 			  "%s: peer map memory allocation failed", __func__);
83 		return QDF_STATUS_E_NOMEM;
84 	}
85 
86 	/*
87 	 * The peer_id_to_obj_map doesn't really need to be initialized,
88 	 * since elements are only used after they have been individually
89 	 * initialized.
90 	 * However, it is convenient for debugging to have all elements
91 	 * that are not in use set to 0.
92 	 */
93 	qdf_mem_zero(soc->peer_id_to_obj_map, peer_map_size);
94 	return 0; /* success */
95 }
96 
97 static int dp_log2_ceil(unsigned value)
98 {
99 	unsigned tmp = value;
100 	int log2 = -1;
101 
102 	while (tmp) {
103 		log2++;
104 		tmp >>= 1;
105 	}
106 	if (1 << log2 != value)
107 		log2++;
108 	return log2;
109 }
110 
111 static int dp_peer_find_add_id_to_obj(
112 	struct dp_peer *peer,
113 	uint16_t peer_id)
114 {
115 	int i;
116 
117 	for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
118 		if (peer->peer_ids[i] == HTT_INVALID_PEER) {
119 			peer->peer_ids[i] = peer_id;
120 			return 0; /* success */
121 		}
122 	}
123 	return QDF_STATUS_E_FAILURE; /* failure */
124 }
125 
126 #define DP_PEER_HASH_LOAD_MULT  2
127 #define DP_PEER_HASH_LOAD_SHIFT 0
128 
129 #define DP_AST_HASH_LOAD_MULT  2
130 #define DP_AST_HASH_LOAD_SHIFT 0
131 
132 static int dp_peer_find_hash_attach(struct dp_soc *soc)
133 {
134 	int i, hash_elems, log2;
135 
136 	/* allocate the peer MAC address -> peer object hash table */
137 	hash_elems = soc->max_peers;
138 	hash_elems *= DP_PEER_HASH_LOAD_MULT;
139 	hash_elems >>= DP_PEER_HASH_LOAD_SHIFT;
140 	log2 = dp_log2_ceil(hash_elems);
141 	hash_elems = 1 << log2;
142 
143 	soc->peer_hash.mask = hash_elems - 1;
144 	soc->peer_hash.idx_bits = log2;
145 	/* allocate an array of TAILQ peer object lists */
146 	soc->peer_hash.bins = qdf_mem_malloc(
147 		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q, dp_peer)));
148 	if (!soc->peer_hash.bins)
149 		return QDF_STATUS_E_NOMEM;
150 
151 	for (i = 0; i < hash_elems; i++)
152 		TAILQ_INIT(&soc->peer_hash.bins[i]);
153 
154 	return 0;
155 }
156 
157 static void dp_peer_find_hash_detach(struct dp_soc *soc)
158 {
159 	qdf_mem_free(soc->peer_hash.bins);
160 }
161 
162 static inline unsigned dp_peer_find_hash_index(struct dp_soc *soc,
163 	union dp_align_mac_addr *mac_addr)
164 {
165 	unsigned index;
166 
167 	index =
168 		mac_addr->align2.bytes_ab ^
169 		mac_addr->align2.bytes_cd ^
170 		mac_addr->align2.bytes_ef;
171 	index ^= index >> soc->peer_hash.idx_bits;
172 	index &= soc->peer_hash.mask;
173 	return index;
174 }
175 
176 
177 void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer)
178 {
179 	unsigned index;
180 
181 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
182 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
183 	/*
184 	 * It is important to add the new peer at the tail of the peer list
185 	 * with the bin index.  Together with having the hash_find function
186 	 * search from head to tail, this ensures that if two entries with
187 	 * the same MAC address are stored, the one added first will be
188 	 * found first.
189 	 */
190 	TAILQ_INSERT_TAIL(&soc->peer_hash.bins[index], peer, hash_list_elem);
191 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
192 }
193 
194 #ifdef FEATURE_AST
195 /*
196  * dp_peer_ast_hash_attach() - Allocate and initialize AST Hash Table
197  * @soc: SoC handle
198  *
199  * Return: None
200  */
201 static int dp_peer_ast_hash_attach(struct dp_soc *soc)
202 {
203 	int i, hash_elems, log2;
204 
205 	hash_elems = ((soc->max_peers * DP_AST_HASH_LOAD_MULT) >>
206 		DP_AST_HASH_LOAD_SHIFT);
207 
208 	log2 = dp_log2_ceil(hash_elems);
209 	hash_elems = 1 << log2;
210 
211 	soc->ast_hash.mask = hash_elems - 1;
212 	soc->ast_hash.idx_bits = log2;
213 
214 	/* allocate an array of TAILQ peer object lists */
215 	soc->ast_hash.bins = qdf_mem_malloc(
216 		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q,
217 				dp_ast_entry)));
218 
219 	if (!soc->ast_hash.bins)
220 		return QDF_STATUS_E_NOMEM;
221 
222 	for (i = 0; i < hash_elems; i++)
223 		TAILQ_INIT(&soc->ast_hash.bins[i]);
224 
225 	return 0;
226 }
227 
228 /*
229  * dp_peer_ast_cleanup() - cleanup the references
230  * @soc: SoC handle
231  * @ast: ast entry
232  *
233  * Return: None
234  */
235 static inline void dp_peer_ast_cleanup(struct dp_soc *soc,
236 				       struct dp_ast_entry *ast)
237 {
238 	txrx_ast_free_cb cb = ast->callback;
239 	void *cookie = ast->cookie;
240 
241 	/* Call the callbacks to free up the cookie */
242 	if (cb) {
243 		ast->callback = NULL;
244 		ast->cookie = NULL;
245 		cb(soc->ctrl_psoc,
246 		   soc,
247 		   cookie,
248 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
249 	}
250 }
251 
252 /*
253  * dp_peer_ast_hash_detach() - Free AST Hash table
254  * @soc: SoC handle
255  *
256  * Return: None
257  */
258 static void dp_peer_ast_hash_detach(struct dp_soc *soc)
259 {
260 	unsigned int index;
261 	struct dp_ast_entry *ast, *ast_next;
262 
263 	if (!soc->ast_hash.mask)
264 		return;
265 
266 	qdf_spin_lock_bh(&soc->ast_lock);
267 	for (index = 0; index <= soc->ast_hash.mask; index++) {
268 		if (!TAILQ_EMPTY(&soc->ast_hash.bins[index])) {
269 			TAILQ_FOREACH_SAFE(ast, &soc->ast_hash.bins[index],
270 					   hash_list_elem, ast_next) {
271 				TAILQ_REMOVE(&soc->ast_hash.bins[index], ast,
272 					     hash_list_elem);
273 				dp_peer_ast_cleanup(soc, ast);
274 				qdf_mem_free(ast);
275 			}
276 		}
277 	}
278 	qdf_spin_unlock_bh(&soc->ast_lock);
279 
280 	qdf_mem_free(soc->ast_hash.bins);
281 }
282 
283 /*
284  * dp_peer_ast_hash_index() - Compute the AST hash from MAC address
285  * @soc: SoC handle
286  *
287  * Return: AST hash
288  */
289 static inline uint32_t dp_peer_ast_hash_index(struct dp_soc *soc,
290 	union dp_align_mac_addr *mac_addr)
291 {
292 	uint32_t index;
293 
294 	index =
295 		mac_addr->align2.bytes_ab ^
296 		mac_addr->align2.bytes_cd ^
297 		mac_addr->align2.bytes_ef;
298 	index ^= index >> soc->ast_hash.idx_bits;
299 	index &= soc->ast_hash.mask;
300 	return index;
301 }
302 
303 /*
304  * dp_peer_ast_hash_add() - Add AST entry into hash table
305  * @soc: SoC handle
306  *
307  * This function adds the AST entry into SoC AST hash table
308  * It assumes caller has taken the ast lock to protect the access to this table
309  *
310  * Return: None
311  */
312 static inline void dp_peer_ast_hash_add(struct dp_soc *soc,
313 		struct dp_ast_entry *ase)
314 {
315 	uint32_t index;
316 
317 	index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
318 	TAILQ_INSERT_TAIL(&soc->ast_hash.bins[index], ase, hash_list_elem);
319 }
320 
321 /*
322  * dp_peer_ast_hash_remove() - Look up and remove AST entry from hash table
323  * @soc: SoC handle
324  *
325  * This function removes the AST entry from soc AST hash table
326  * It assumes caller has taken the ast lock to protect the access to this table
327  *
328  * Return: None
329  */
330 static inline void dp_peer_ast_hash_remove(struct dp_soc *soc,
331 		struct dp_ast_entry *ase)
332 {
333 	unsigned index;
334 	struct dp_ast_entry *tmpase;
335 	int found = 0;
336 
337 	index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
338 	/* Check if tail is not empty before delete*/
339 	QDF_ASSERT(!TAILQ_EMPTY(&soc->ast_hash.bins[index]));
340 
341 	TAILQ_FOREACH(tmpase, &soc->ast_hash.bins[index], hash_list_elem) {
342 		if (tmpase == ase) {
343 			found = 1;
344 			break;
345 		}
346 	}
347 
348 	QDF_ASSERT(found);
349 	TAILQ_REMOVE(&soc->ast_hash.bins[index], ase, hash_list_elem);
350 }
351 
352 /*
353  * dp_peer_ast_list_find() - Find AST entry by MAC address from peer ast list
354  * @soc: SoC handle
355  * @peer: peer handle
356  * @ast_mac_addr: mac address
357  *
358  * It assumes caller has taken the ast lock to protect the access to ast list
359  *
360  * Return: AST entry
361  */
362 struct dp_ast_entry *dp_peer_ast_list_find(struct dp_soc *soc,
363 					   struct dp_peer *peer,
364 					   uint8_t *ast_mac_addr)
365 {
366 	struct dp_ast_entry *ast_entry = NULL;
367 	union dp_align_mac_addr *mac_addr =
368 		(union dp_align_mac_addr *)ast_mac_addr;
369 
370 	TAILQ_FOREACH(ast_entry, &peer->ast_entry_list, ase_list_elem) {
371 		if (!dp_peer_find_mac_addr_cmp(mac_addr,
372 					       &ast_entry->mac_addr)) {
373 			return ast_entry;
374 		}
375 	}
376 
377 	return NULL;
378 }
379 
380 /*
381  * dp_peer_ast_hash_find_by_pdevid() - Find AST entry by MAC address
382  * @soc: SoC handle
383  *
384  * It assumes caller has taken the ast lock to protect the access to
385  * AST hash table
386  *
387  * Return: AST entry
388  */
389 struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
390 						     uint8_t *ast_mac_addr,
391 						     uint8_t pdev_id)
392 {
393 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
394 	uint32_t index;
395 	struct dp_ast_entry *ase;
396 
397 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
398 		     ast_mac_addr, DP_MAC_ADDR_LEN);
399 	mac_addr = &local_mac_addr_aligned;
400 
401 	index = dp_peer_ast_hash_index(soc, mac_addr);
402 	TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
403 		if ((pdev_id == ase->pdev_id) &&
404 		    !dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr)) {
405 			return ase;
406 		}
407 	}
408 
409 	return NULL;
410 }
411 
412 /*
413  * dp_peer_ast_hash_find_soc() - Find AST entry by MAC address
414  * @soc: SoC handle
415  *
416  * It assumes caller has taken the ast lock to protect the access to
417  * AST hash table
418  *
419  * Return: AST entry
420  */
421 struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
422 					       uint8_t *ast_mac_addr)
423 {
424 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
425 	unsigned index;
426 	struct dp_ast_entry *ase;
427 
428 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
429 			ast_mac_addr, DP_MAC_ADDR_LEN);
430 	mac_addr = &local_mac_addr_aligned;
431 
432 	index = dp_peer_ast_hash_index(soc, mac_addr);
433 	TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
434 		if (dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr) == 0) {
435 			return ase;
436 		}
437 	}
438 
439 	return NULL;
440 }
441 
442 /*
443  * dp_peer_map_ast() - Map the ast entry with HW AST Index
444  * @soc: SoC handle
445  * @peer: peer to which ast node belongs
446  * @mac_addr: MAC address of ast node
447  * @hw_peer_id: HW AST Index returned by target in peer map event
448  * @vdev_id: vdev id for VAP to which the peer belongs to
449  * @ast_hash: ast hash value in HW
450  *
451  * Return: None
452  */
453 static inline void dp_peer_map_ast(struct dp_soc *soc,
454 	struct dp_peer *peer, uint8_t *mac_addr, uint16_t hw_peer_id,
455 	uint8_t vdev_id, uint16_t ast_hash)
456 {
457 	struct dp_ast_entry *ast_entry = NULL;
458 	enum cdp_txrx_ast_entry_type peer_type = CDP_TXRX_AST_TYPE_STATIC;
459 
460 	if (!peer) {
461 		return;
462 	}
463 
464 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
465 		  "%s: peer %pK ID %d vid %d mac %02x:%02x:%02x:%02x:%02x:%02x",
466 		  __func__, peer, hw_peer_id, vdev_id, mac_addr[0],
467 		  mac_addr[1], mac_addr[2], mac_addr[3],
468 		  mac_addr[4], mac_addr[5]);
469 
470 	qdf_spin_lock_bh(&soc->ast_lock);
471 
472 	ast_entry = dp_peer_ast_list_find(soc, peer, mac_addr);
473 
474 	if (ast_entry) {
475 		ast_entry->ast_idx = hw_peer_id;
476 		soc->ast_table[hw_peer_id] = ast_entry;
477 		ast_entry->is_active = TRUE;
478 		peer_type = ast_entry->type;
479 		ast_entry->ast_hash_value = ast_hash;
480 		ast_entry->is_mapped = TRUE;
481 	}
482 
483 	if (ast_entry || (peer->vdev && peer->vdev->proxysta_vdev)) {
484 		if (soc->cdp_soc.ol_ops->peer_map_event) {
485 			soc->cdp_soc.ol_ops->peer_map_event(
486 			soc->ctrl_psoc, peer->peer_ids[0],
487 			hw_peer_id, vdev_id,
488 			mac_addr, peer_type, ast_hash);
489 		}
490 	} else {
491 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
492 			  "AST entry not found");
493 	}
494 
495 	qdf_spin_unlock_bh(&soc->ast_lock);
496 	return;
497 }
498 
499 void dp_peer_free_hmwds_cb(void *ctrl_psoc,
500 			   void *dp_soc,
501 			   void *cookie,
502 			   enum cdp_ast_free_status status)
503 {
504 	struct dp_ast_free_cb_params *param =
505 		(struct dp_ast_free_cb_params *)cookie;
506 	struct dp_soc *soc = (struct dp_soc *)dp_soc;
507 	struct dp_peer *peer = NULL;
508 
509 	if (status != CDP_TXRX_AST_DELETED) {
510 		qdf_mem_free(cookie);
511 		return;
512 	}
513 
514 	peer = dp_peer_find_hash_find(soc, &param->peer_mac_addr.raw[0],
515 				      0, param->vdev_id);
516 	if (peer) {
517 		dp_peer_add_ast(soc, peer,
518 				&param->mac_addr.raw[0],
519 				param->type,
520 				param->flags);
521 		dp_peer_unref_delete(peer);
522 	}
523 	qdf_mem_free(cookie);
524 }
525 
526 /*
527  * dp_peer_add_ast() - Allocate and add AST entry into peer list
528  * @soc: SoC handle
529  * @peer: peer to which ast node belongs
530  * @mac_addr: MAC address of ast node
531  * @is_self: Is this base AST entry with peer mac address
532  *
533  * This API is used by WDS source port learning function to
534  * add a new AST entry into peer AST list
535  *
536  * Return: 0 if new entry is allocated,
537  *        -1 if entry add failed
538  */
539 int dp_peer_add_ast(struct dp_soc *soc,
540 			struct dp_peer *peer,
541 			uint8_t *mac_addr,
542 			enum cdp_txrx_ast_entry_type type,
543 			uint32_t flags)
544 {
545 	struct dp_ast_entry *ast_entry = NULL;
546 	struct dp_vdev *vdev = NULL;
547 	struct dp_pdev *pdev = NULL;
548 	uint8_t next_node_mac[6];
549 	int  ret = -1;
550 	txrx_ast_free_cb cb = NULL;
551 	void *cookie = NULL;
552 
553 	qdf_spin_lock_bh(&soc->ast_lock);
554 	if (peer->delete_in_progress) {
555 		qdf_spin_unlock_bh(&soc->ast_lock);
556 		return ret;
557 	}
558 
559 	vdev = peer->vdev;
560 	if (!vdev) {
561 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
562 			  FL("Peers vdev is NULL"));
563 		QDF_ASSERT(0);
564 		qdf_spin_unlock_bh(&soc->ast_lock);
565 		return ret;
566 	}
567 
568 	pdev = vdev->pdev;
569 
570 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
571 		  "%s: pdevid: %u vdev: %u  ast_entry->type: %d flags: 0x%x peer_mac: %pM peer: %pK mac %pM",
572 		  __func__, pdev->pdev_id, vdev->vdev_id, type, flags,
573 		  peer->mac_addr.raw, peer, mac_addr);
574 
575 
576 	/* fw supports only 2 times the max_peers ast entries */
577 	if (soc->num_ast_entries >=
578 	    wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx)) {
579 		qdf_spin_unlock_bh(&soc->ast_lock);
580 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
581 			  FL("Max ast entries reached"));
582 		return ret;
583 	}
584 
585 	/* If AST entry already exists , just return from here
586 	 * ast entry with same mac address can exist on different radios
587 	 * if ast_override support is enabled use search by pdev in this
588 	 * case
589 	 */
590 	if (soc->ast_override_support) {
591 		ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, mac_addr,
592 							    pdev->pdev_id);
593 		if (ast_entry) {
594 			qdf_spin_unlock_bh(&soc->ast_lock);
595 			return 0;
596 		}
597 	} else {
598 		/* For HWMWDS_SEC entries can be added for same mac address
599 		 * do not check for existing entry
600 		 */
601 		if (type == CDP_TXRX_AST_TYPE_WDS_HM_SEC)
602 			goto add_ast_entry;
603 
604 		ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
605 
606 		if (ast_entry) {
607 			if ((type == CDP_TXRX_AST_TYPE_MEC) &&
608 			    (ast_entry->type == CDP_TXRX_AST_TYPE_MEC))
609 				ast_entry->is_active = TRUE;
610 
611 			if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) &&
612 			    !ast_entry->delete_in_progress) {
613 				qdf_spin_unlock_bh(&soc->ast_lock);
614 				return 0;
615 			}
616 
617 			/* Add for HMWDS entry we cannot be ignored if there
618 			 * is AST entry with same mac address
619 			 *
620 			 * if ast entry exists with the requested mac address
621 			 * send a delete command and register callback which
622 			 * can take care of adding HMWDS ast enty on delete
623 			 * confirmation from target
624 			 */
625 			if ((type == CDP_TXRX_AST_TYPE_WDS_HM) &&
626 			    soc->is_peer_map_unmap_v2) {
627 				struct dp_ast_free_cb_params *param = NULL;
628 
629 				if (ast_entry->type ==
630 					CDP_TXRX_AST_TYPE_WDS_HM_SEC)
631 					goto add_ast_entry;
632 
633 				/* save existing callback */
634 				if (ast_entry->callback) {
635 					cb = ast_entry->callback;
636 					cookie = ast_entry->cookie;
637 				}
638 
639 				param = qdf_mem_malloc(sizeof(*param));
640 				if (!param) {
641 					QDF_TRACE(QDF_MODULE_ID_TXRX,
642 						  QDF_TRACE_LEVEL_ERROR,
643 						  "Allocation failed");
644 					qdf_spin_unlock_bh(&soc->ast_lock);
645 					return ret;
646 				}
647 
648 				qdf_mem_copy(&param->mac_addr.raw[0], mac_addr,
649 					     DP_MAC_ADDR_LEN);
650 				qdf_mem_copy(&param->peer_mac_addr.raw[0],
651 					     &peer->mac_addr.raw[0],
652 					     DP_MAC_ADDR_LEN);
653 				param->type = type;
654 				param->flags = flags;
655 				param->vdev_id = vdev->vdev_id;
656 				ast_entry->callback = dp_peer_free_hmwds_cb;
657 				ast_entry->cookie = (void *)param;
658 				if (!ast_entry->delete_in_progress)
659 					dp_peer_del_ast(soc, ast_entry);
660 			}
661 
662 			/* Modify an already existing AST entry from type
663 			 * WDS to MEC on promption. This serves as a fix when
664 			 * backbone of interfaces are interchanged wherein
665 			 * wds entr becomes its own MEC. The entry should be
666 			 * replaced only when the ast_entry peer matches the
667 			 * peer received in mec event. This additional check
668 			 * is needed in wds repeater cases where a multicast
669 			 * packet from station to the root via the repeater
670 			 * should not remove the wds entry.
671 			 */
672 			if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS) &&
673 			    (type == CDP_TXRX_AST_TYPE_MEC) &&
674 			    (ast_entry->peer == peer)) {
675 				ast_entry->is_active = FALSE;
676 				dp_peer_del_ast(soc, ast_entry);
677 			}
678 			qdf_spin_unlock_bh(&soc->ast_lock);
679 
680 			/* Call the saved callback*/
681 			if (cb) {
682 				cb(soc->ctrl_psoc, soc, cookie,
683 				   CDP_TXRX_AST_DELETE_IN_PROGRESS);
684 			}
685 			return 0;
686 		}
687 	}
688 
689 add_ast_entry:
690 	ast_entry = (struct dp_ast_entry *)
691 			qdf_mem_malloc(sizeof(struct dp_ast_entry));
692 
693 	if (!ast_entry) {
694 		qdf_spin_unlock_bh(&soc->ast_lock);
695 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
696 			  FL("fail to allocate ast_entry"));
697 		QDF_ASSERT(0);
698 		return ret;
699 	}
700 
701 	qdf_mem_copy(&ast_entry->mac_addr.raw[0], mac_addr, DP_MAC_ADDR_LEN);
702 	ast_entry->pdev_id = vdev->pdev->pdev_id;
703 	ast_entry->vdev_id = vdev->vdev_id;
704 	ast_entry->is_mapped = false;
705 	ast_entry->delete_in_progress = false;
706 
707 	switch (type) {
708 	case CDP_TXRX_AST_TYPE_STATIC:
709 		peer->self_ast_entry = ast_entry;
710 		ast_entry->type = CDP_TXRX_AST_TYPE_STATIC;
711 		if (peer->vdev->opmode == wlan_op_mode_sta)
712 			ast_entry->type = CDP_TXRX_AST_TYPE_STA_BSS;
713 		break;
714 	case CDP_TXRX_AST_TYPE_SELF:
715 		peer->self_ast_entry = ast_entry;
716 		ast_entry->type = CDP_TXRX_AST_TYPE_SELF;
717 		break;
718 	case CDP_TXRX_AST_TYPE_WDS:
719 		ast_entry->next_hop = 1;
720 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
721 		break;
722 	case CDP_TXRX_AST_TYPE_WDS_HM:
723 		ast_entry->next_hop = 1;
724 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM;
725 		break;
726 	case CDP_TXRX_AST_TYPE_WDS_HM_SEC:
727 		ast_entry->next_hop = 1;
728 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM_SEC;
729 		break;
730 	case CDP_TXRX_AST_TYPE_MEC:
731 		ast_entry->next_hop = 1;
732 		ast_entry->type = CDP_TXRX_AST_TYPE_MEC;
733 		break;
734 	case CDP_TXRX_AST_TYPE_DA:
735 		peer = peer->vdev->vap_bss_peer;
736 		ast_entry->next_hop = 1;
737 		ast_entry->type = CDP_TXRX_AST_TYPE_DA;
738 		break;
739 	default:
740 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
741 			FL("Incorrect AST entry type"));
742 	}
743 
744 	ast_entry->is_active = TRUE;
745 	DP_STATS_INC(soc, ast.added, 1);
746 	soc->num_ast_entries++;
747 	dp_peer_ast_hash_add(soc, ast_entry);
748 
749 	ast_entry->peer = peer;
750 
751 	if (type == CDP_TXRX_AST_TYPE_MEC)
752 		qdf_mem_copy(next_node_mac, peer->vdev->mac_addr.raw, 6);
753 	else
754 		qdf_mem_copy(next_node_mac, peer->mac_addr.raw, 6);
755 
756 	TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem);
757 	qdf_spin_unlock_bh(&soc->ast_lock);
758 
759 	if ((ast_entry->type != CDP_TXRX_AST_TYPE_STATIC) &&
760 	    (ast_entry->type != CDP_TXRX_AST_TYPE_SELF) &&
761 	    (ast_entry->type != CDP_TXRX_AST_TYPE_STA_BSS) &&
762 	    (ast_entry->type != CDP_TXRX_AST_TYPE_WDS_HM_SEC)) {
763 		if (QDF_STATUS_SUCCESS ==
764 				soc->cdp_soc.ol_ops->peer_add_wds_entry(
765 				peer->vdev->osif_vdev,
766 				(struct cdp_peer *)peer,
767 				mac_addr,
768 				next_node_mac,
769 				flags))
770 			return 0;
771 	}
772 
773 	return ret;
774 }
775 
776 /*
777  * dp_peer_del_ast() - Delete and free AST entry
778  * @soc: SoC handle
779  * @ast_entry: AST entry of the node
780  *
781  * This function removes the AST entry from peer and soc tables
782  * It assumes caller has taken the ast lock to protect the access to these
783  * tables
784  *
785  * Return: None
786  */
787 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
788 {
789 	struct dp_peer *peer = ast_entry->peer;
790 	uint16_t peer_id = peer->peer_ids[0];
791 
792 	dp_peer_ast_send_wds_del(soc, ast_entry);
793 
794 	/*
795 	 * if peer map v2 is enabled we are not freeing ast entry
796 	 * here and it is supposed to be freed in unmap event (after
797 	 * we receive delete confirmation from target)
798 	 *
799 	 * if peer_id is invalid we did not get the peer map event
800 	 * for the peer free ast entry from here only in this case
801 	 */
802 	if (soc->is_peer_map_unmap_v2 && (peer_id != HTT_INVALID_PEER)) {
803 
804 		/*
805 		 * For HM_SEC and SELF type we do not receive unmap event
806 		 * free ast_entry from here it self
807 		 */
808 		if ((ast_entry->type != CDP_TXRX_AST_TYPE_WDS_HM_SEC) &&
809 		    (ast_entry->type != CDP_TXRX_AST_TYPE_SELF))
810 			return;
811 	}
812 
813 	/*
814 	 * release the reference only if it is mapped
815 	 * to ast_table
816 	 */
817 	if (ast_entry->is_mapped)
818 		soc->ast_table[ast_entry->ast_idx] = NULL;
819 	TAILQ_REMOVE(&peer->ast_entry_list, ast_entry, ase_list_elem);
820 
821 	if (ast_entry == peer->self_ast_entry)
822 		peer->self_ast_entry = NULL;
823 
824 	DP_STATS_INC(soc, ast.deleted, 1);
825 	dp_peer_ast_hash_remove(soc, ast_entry);
826 	dp_peer_ast_cleanup(soc, ast_entry);
827 	qdf_mem_free(ast_entry);
828 	soc->num_ast_entries--;
829 }
830 
831 /*
832  * dp_peer_update_ast() - Delete and free AST entry
833  * @soc: SoC handle
834  * @peer: peer to which ast node belongs
835  * @ast_entry: AST entry of the node
836  * @flags: wds or hmwds
837  *
838  * This function update the AST entry to the roamed peer and soc tables
839  * It assumes caller has taken the ast lock to protect the access to these
840  * tables
841  *
842  * Return: 0 if ast entry is updated successfully
843  *         -1 failure
844  */
845 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
846 		       struct dp_ast_entry *ast_entry, uint32_t flags)
847 {
848 	int ret = -1;
849 	struct dp_peer *old_peer;
850 
851 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
852 		  "%s: ast_entry->type: %d pdevid: %u vdevid: %u flags: 0x%x mac_addr: %pM peer_mac: %pM\n",
853 		  __func__, ast_entry->type, peer->vdev->pdev->pdev_id,
854 		  peer->vdev->vdev_id, flags, ast_entry->mac_addr.raw,
855 		  peer->mac_addr.raw);
856 
857 	if (ast_entry->delete_in_progress)
858 		return ret;
859 
860 	if ((ast_entry->type == CDP_TXRX_AST_TYPE_STATIC) ||
861 	    (ast_entry->type == CDP_TXRX_AST_TYPE_SELF) ||
862 	    (ast_entry->type == CDP_TXRX_AST_TYPE_STA_BSS) ||
863 	    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
864 		return 0;
865 
866 	old_peer = ast_entry->peer;
867 	TAILQ_REMOVE(&old_peer->ast_entry_list, ast_entry, ase_list_elem);
868 
869 	ast_entry->peer = peer;
870 	ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
871 	ast_entry->pdev_id = peer->vdev->pdev->pdev_id;
872 	ast_entry->vdev_id = peer->vdev->vdev_id;
873 	ast_entry->is_active = TRUE;
874 	TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem);
875 
876 	ret = soc->cdp_soc.ol_ops->peer_update_wds_entry(
877 				peer->vdev->osif_vdev,
878 				ast_entry->mac_addr.raw,
879 				peer->mac_addr.raw,
880 				flags);
881 
882 	return ret;
883 }
884 
885 /*
886  * dp_peer_ast_get_pdev_id() - get pdev_id from the ast entry
887  * @soc: SoC handle
888  * @ast_entry: AST entry of the node
889  *
890  * This function gets the pdev_id from the ast entry.
891  *
892  * Return: (uint8_t) pdev_id
893  */
894 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
895 				struct dp_ast_entry *ast_entry)
896 {
897 	return ast_entry->pdev_id;
898 }
899 
900 /*
901  * dp_peer_ast_get_next_hop() - get next_hop from the ast entry
902  * @soc: SoC handle
903  * @ast_entry: AST entry of the node
904  *
905  * This function gets the next hop from the ast entry.
906  *
907  * Return: (uint8_t) next_hop
908  */
909 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
910 				struct dp_ast_entry *ast_entry)
911 {
912 	return ast_entry->next_hop;
913 }
914 
915 /*
916  * dp_peer_ast_set_type() - set type from the ast entry
917  * @soc: SoC handle
918  * @ast_entry: AST entry of the node
919  *
920  * This function sets the type in the ast entry.
921  *
922  * Return:
923  */
924 void dp_peer_ast_set_type(struct dp_soc *soc,
925 				struct dp_ast_entry *ast_entry,
926 				enum cdp_txrx_ast_entry_type type)
927 {
928 	ast_entry->type = type;
929 }
930 
931 #else
932 int dp_peer_add_ast(struct dp_soc *soc, struct dp_peer *peer,
933 		uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type,
934 		uint32_t flags)
935 {
936 	return 1;
937 }
938 
939 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
940 {
941 }
942 
943 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
944 			struct dp_ast_entry *ast_entry, uint32_t flags)
945 {
946 	return 1;
947 }
948 
949 struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
950 					       uint8_t *ast_mac_addr)
951 {
952 	return NULL;
953 }
954 
955 struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
956 						     uint8_t *ast_mac_addr,
957 						     uint8_t pdev_id)
958 {
959 	return NULL;
960 }
961 
962 static int dp_peer_ast_hash_attach(struct dp_soc *soc)
963 {
964 	return 0;
965 }
966 
967 static inline void dp_peer_map_ast(struct dp_soc *soc,
968 	struct dp_peer *peer, uint8_t *mac_addr, uint16_t hw_peer_id,
969 	uint8_t vdev_id, uint16_t ast_hash)
970 {
971 	return;
972 }
973 
974 static void dp_peer_ast_hash_detach(struct dp_soc *soc)
975 {
976 }
977 
978 void dp_peer_ast_set_type(struct dp_soc *soc,
979 				struct dp_ast_entry *ast_entry,
980 				enum cdp_txrx_ast_entry_type type)
981 {
982 }
983 
984 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
985 				struct dp_ast_entry *ast_entry)
986 {
987 	return 0xff;
988 }
989 
990 
991 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
992 				struct dp_ast_entry *ast_entry)
993 {
994 	return 0xff;
995 }
996 #endif
997 
998 void dp_peer_ast_send_wds_del(struct dp_soc *soc,
999 			      struct dp_ast_entry *ast_entry)
1000 {
1001 	struct dp_peer *peer = ast_entry->peer;
1002 	struct cdp_soc_t *cdp_soc = &soc->cdp_soc;
1003 
1004 	if (ast_entry->delete_in_progress)
1005 		return;
1006 
1007 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_TRACE,
1008 		  "%s: ast_entry->type: %d pdevid: %u vdev: %u mac_addr: %pM next_hop: %u peer_mac: %pM\n",
1009 		  __func__, ast_entry->type, peer->vdev->pdev->pdev_id,
1010 		  peer->vdev->vdev_id, ast_entry->mac_addr.raw,
1011 		  ast_entry->next_hop, ast_entry->peer->mac_addr.raw);
1012 
1013 	if (ast_entry->next_hop &&
1014 	    ast_entry->type != CDP_TXRX_AST_TYPE_WDS_HM_SEC)
1015 		cdp_soc->ol_ops->peer_del_wds_entry(peer->vdev->osif_vdev,
1016 						    ast_entry->mac_addr.raw);
1017 
1018 	ast_entry->delete_in_progress = true;
1019 }
1020 
1021 static void dp_peer_ast_free_entry(struct dp_soc *soc,
1022 				   struct dp_ast_entry *ast_entry)
1023 {
1024 	struct dp_peer *peer = ast_entry->peer;
1025 	void *cookie = NULL;
1026 	txrx_ast_free_cb cb = NULL;
1027 
1028 	/*
1029 	 * release the reference only if it is mapped
1030 	 * to ast_table
1031 	 */
1032 
1033 	qdf_spin_lock_bh(&soc->ast_lock);
1034 	if (ast_entry->is_mapped)
1035 		soc->ast_table[ast_entry->ast_idx] = NULL;
1036 
1037 	TAILQ_REMOVE(&peer->ast_entry_list, ast_entry, ase_list_elem);
1038 	DP_STATS_INC(soc, ast.deleted, 1);
1039 	dp_peer_ast_hash_remove(soc, ast_entry);
1040 
1041 	cb = ast_entry->callback;
1042 	cookie = ast_entry->cookie;
1043 	ast_entry->callback = NULL;
1044 	ast_entry->cookie = NULL;
1045 
1046 	if (ast_entry == peer->self_ast_entry)
1047 		peer->self_ast_entry = NULL;
1048 
1049 	qdf_spin_unlock_bh(&soc->ast_lock);
1050 
1051 	if (cb) {
1052 		cb(soc->ctrl_psoc,
1053 		   soc,
1054 		   cookie,
1055 		   CDP_TXRX_AST_DELETED);
1056 	}
1057 	qdf_mem_free(ast_entry);
1058 	soc->num_ast_entries--;
1059 }
1060 
1061 struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc,
1062 	uint8_t *peer_mac_addr, int mac_addr_is_aligned, uint8_t vdev_id)
1063 {
1064 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
1065 	unsigned index;
1066 	struct dp_peer *peer;
1067 
1068 	if (mac_addr_is_aligned) {
1069 		mac_addr = (union dp_align_mac_addr *) peer_mac_addr;
1070 	} else {
1071 		qdf_mem_copy(
1072 			&local_mac_addr_aligned.raw[0],
1073 			peer_mac_addr, DP_MAC_ADDR_LEN);
1074 		mac_addr = &local_mac_addr_aligned;
1075 	}
1076 	index = dp_peer_find_hash_index(soc, mac_addr);
1077 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
1078 	TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
1079 #if ATH_SUPPORT_WRAP
1080 		/* ProxySTA may have multiple BSS peer with same MAC address,
1081 		 * modified find will take care of finding the correct BSS peer.
1082 		 */
1083 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
1084 			((peer->vdev->vdev_id == vdev_id) ||
1085 			 (vdev_id == DP_VDEV_ALL))) {
1086 #else
1087 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0) {
1088 #endif
1089 			/* found it - increment the ref count before releasing
1090 			 * the lock
1091 			 */
1092 			qdf_atomic_inc(&peer->ref_cnt);
1093 			qdf_spin_unlock_bh(&soc->peer_ref_mutex);
1094 			return peer;
1095 		}
1096 	}
1097 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
1098 	return NULL; /* failure */
1099 }
1100 
1101 void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer)
1102 {
1103 	unsigned index;
1104 	struct dp_peer *tmppeer = NULL;
1105 	int found = 0;
1106 
1107 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
1108 	/* Check if tail is not empty before delete*/
1109 	QDF_ASSERT(!TAILQ_EMPTY(&soc->peer_hash.bins[index]));
1110 	/*
1111 	 * DO NOT take the peer_ref_mutex lock here - it needs to be taken
1112 	 * by the caller.
1113 	 * The caller needs to hold the lock from the time the peer object's
1114 	 * reference count is decremented and tested up through the time the
1115 	 * reference to the peer object is removed from the hash table, by
1116 	 * this function.
1117 	 * Holding the lock only while removing the peer object reference
1118 	 * from the hash table keeps the hash table consistent, but does not
1119 	 * protect against a new HL tx context starting to use the peer object
1120 	 * if it looks up the peer object from its MAC address just after the
1121 	 * peer ref count is decremented to zero, but just before the peer
1122 	 * object reference is removed from the hash table.
1123 	 */
1124 	 TAILQ_FOREACH(tmppeer, &soc->peer_hash.bins[index], hash_list_elem) {
1125 		if (tmppeer == peer) {
1126 			found = 1;
1127 			break;
1128 		}
1129 	}
1130 	QDF_ASSERT(found);
1131 	TAILQ_REMOVE(&soc->peer_hash.bins[index], peer, hash_list_elem);
1132 }
1133 
1134 void dp_peer_find_hash_erase(struct dp_soc *soc)
1135 {
1136 	int i;
1137 
1138 	/*
1139 	 * Not really necessary to take peer_ref_mutex lock - by this point,
1140 	 * it's known that the soc is no longer in use.
1141 	 */
1142 	for (i = 0; i <= soc->peer_hash.mask; i++) {
1143 		if (!TAILQ_EMPTY(&soc->peer_hash.bins[i])) {
1144 			struct dp_peer *peer, *peer_next;
1145 
1146 			/*
1147 			 * TAILQ_FOREACH_SAFE must be used here to avoid any
1148 			 * memory access violation after peer is freed
1149 			 */
1150 			TAILQ_FOREACH_SAFE(peer, &soc->peer_hash.bins[i],
1151 				hash_list_elem, peer_next) {
1152 				/*
1153 				 * Don't remove the peer from the hash table -
1154 				 * that would modify the list we are currently
1155 				 * traversing, and it's not necessary anyway.
1156 				 */
1157 				/*
1158 				 * Artificially adjust the peer's ref count to
1159 				 * 1, so it will get deleted by
1160 				 * dp_peer_unref_delete.
1161 				 */
1162 				/* set to zero */
1163 				qdf_atomic_init(&peer->ref_cnt);
1164 				/* incr to one */
1165 				qdf_atomic_inc(&peer->ref_cnt);
1166 				dp_peer_unref_delete(peer);
1167 			}
1168 		}
1169 	}
1170 }
1171 
1172 static void dp_peer_find_map_detach(struct dp_soc *soc)
1173 {
1174 	qdf_mem_free(soc->peer_id_to_obj_map);
1175 }
1176 
1177 int dp_peer_find_attach(struct dp_soc *soc)
1178 {
1179 	if (dp_peer_find_map_attach(soc))
1180 		return 1;
1181 
1182 	if (dp_peer_find_hash_attach(soc)) {
1183 		dp_peer_find_map_detach(soc);
1184 		return 1;
1185 	}
1186 
1187 	if (dp_peer_ast_hash_attach(soc)) {
1188 		dp_peer_find_hash_detach(soc);
1189 		dp_peer_find_map_detach(soc);
1190 		return 1;
1191 	}
1192 	return 0; /* success */
1193 }
1194 
1195 void dp_rx_tid_stats_cb(struct dp_soc *soc, void *cb_ctxt,
1196 	union hal_reo_status *reo_status)
1197 {
1198 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
1199 	struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
1200 
1201 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
1202 		DP_TRACE_STATS(FATAL, "REO stats failure %d for TID %d\n",
1203 			queue_status->header.status, rx_tid->tid);
1204 		return;
1205 	}
1206 
1207 	DP_TRACE_STATS(FATAL, "REO queue stats (TID: %d): \n"
1208 		"ssn: %d\n"
1209 		"curr_idx  : %d\n"
1210 		"pn_31_0   : %08x\n"
1211 		"pn_63_32  : %08x\n"
1212 		"pn_95_64  : %08x\n"
1213 		"pn_127_96 : %08x\n"
1214 		"last_rx_enq_tstamp : %08x\n"
1215 		"last_rx_deq_tstamp : %08x\n"
1216 		"rx_bitmap_31_0     : %08x\n"
1217 		"rx_bitmap_63_32    : %08x\n"
1218 		"rx_bitmap_95_64    : %08x\n"
1219 		"rx_bitmap_127_96   : %08x\n"
1220 		"rx_bitmap_159_128  : %08x\n"
1221 		"rx_bitmap_191_160  : %08x\n"
1222 		"rx_bitmap_223_192  : %08x\n"
1223 		"rx_bitmap_255_224  : %08x\n",
1224 		rx_tid->tid,
1225 		queue_status->ssn, queue_status->curr_idx,
1226 		queue_status->pn_31_0, queue_status->pn_63_32,
1227 		queue_status->pn_95_64, queue_status->pn_127_96,
1228 		queue_status->last_rx_enq_tstamp,
1229 		queue_status->last_rx_deq_tstamp,
1230 		queue_status->rx_bitmap_31_0, queue_status->rx_bitmap_63_32,
1231 		queue_status->rx_bitmap_95_64, queue_status->rx_bitmap_127_96,
1232 		queue_status->rx_bitmap_159_128,
1233 		queue_status->rx_bitmap_191_160,
1234 		queue_status->rx_bitmap_223_192,
1235 		queue_status->rx_bitmap_255_224);
1236 
1237 	DP_TRACE_STATS(FATAL,
1238 		"curr_mpdu_cnt      : %d\n"
1239 		"curr_msdu_cnt      : %d\n"
1240 		"fwd_timeout_cnt    : %d\n"
1241 		"fwd_bar_cnt        : %d\n"
1242 		"dup_cnt            : %d\n"
1243 		"frms_in_order_cnt  : %d\n"
1244 		"bar_rcvd_cnt       : %d\n"
1245 		"mpdu_frms_cnt      : %d\n"
1246 		"msdu_frms_cnt      : %d\n"
1247 		"total_byte_cnt     : %d\n"
1248 		"late_recv_mpdu_cnt : %d\n"
1249 		"win_jump_2k 	    : %d\n"
1250 		"hole_cnt 	    : %d\n",
1251 		queue_status->curr_mpdu_cnt, queue_status->curr_msdu_cnt,
1252 		queue_status->fwd_timeout_cnt, queue_status->fwd_bar_cnt,
1253 		queue_status->dup_cnt, queue_status->frms_in_order_cnt,
1254 		queue_status->bar_rcvd_cnt, queue_status->mpdu_frms_cnt,
1255 		queue_status->msdu_frms_cnt, queue_status->total_cnt,
1256 		queue_status->late_recv_mpdu_cnt, queue_status->win_jump_2k,
1257 		queue_status->hole_cnt);
1258 
1259 	DP_PRINT_STATS("Addba Req          : %d\n"
1260 			"Addba Resp         : %d\n"
1261 			"Addba Resp success : %d\n"
1262 			"Addba Resp failed  : %d\n"
1263 			"Delba Req received : %d\n"
1264 			"Delba Tx success   : %d\n"
1265 			"Delba Tx Fail      : %d\n"
1266 			"BA window size     : %d\n"
1267 			"Pn size            : %d\n",
1268 			rx_tid->num_of_addba_req,
1269 			rx_tid->num_of_addba_resp,
1270 			rx_tid->num_addba_rsp_success,
1271 			rx_tid->num_addba_rsp_failed,
1272 			rx_tid->num_of_delba_req,
1273 			rx_tid->delba_tx_success_cnt,
1274 			rx_tid->delba_tx_fail_cnt,
1275 			rx_tid->ba_win_size,
1276 			rx_tid->pn_size);
1277 }
1278 
1279 static inline struct dp_peer *dp_peer_find_add_id(struct dp_soc *soc,
1280 	uint8_t *peer_mac_addr, uint16_t peer_id, uint16_t hw_peer_id,
1281 	uint8_t vdev_id)
1282 {
1283 	struct dp_peer *peer;
1284 
1285 	QDF_ASSERT(peer_id <= soc->max_peers);
1286 	/* check if there's already a peer object with this MAC address */
1287 	peer = dp_peer_find_hash_find(soc, peer_mac_addr,
1288 		0 /* is aligned */, vdev_id);
1289 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1290 		  "%s: peer %pK ID %d vid %d mac %02x:%02x:%02x:%02x:%02x:%02x",
1291 		  __func__, peer, peer_id, vdev_id, peer_mac_addr[0],
1292 		  peer_mac_addr[1], peer_mac_addr[2], peer_mac_addr[3],
1293 		  peer_mac_addr[4], peer_mac_addr[5]);
1294 
1295 	if (peer) {
1296 		/* peer's ref count was already incremented by
1297 		 * peer_find_hash_find
1298 		 */
1299 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1300 			  "%s: ref_cnt: %d", __func__,
1301 			   qdf_atomic_read(&peer->ref_cnt));
1302 		if (!soc->peer_id_to_obj_map[peer_id])
1303 			soc->peer_id_to_obj_map[peer_id] = peer;
1304 		else {
1305 			/* Peer map event came for peer_id which
1306 			 * is already mapped, this is not expected
1307 			 */
1308 			QDF_ASSERT(0);
1309 		}
1310 
1311 		if (dp_peer_find_add_id_to_obj(peer, peer_id)) {
1312 			/* TBDXXX: assert for now */
1313 			QDF_ASSERT(0);
1314 		}
1315 
1316 		return peer;
1317 	}
1318 
1319 	return NULL;
1320 }
1321 
1322 /**
1323  * dp_rx_peer_map_handler() - handle peer map event from firmware
1324  * @soc_handle - genereic soc handle
1325  * @peeri_id - peer_id from firmware
1326  * @hw_peer_id - ast index for this peer
1327  * @vdev_id - vdev ID
1328  * @peer_mac_addr - mac address of the peer
1329  * @ast_hash - ast hash value
1330  * @is_wds - flag to indicate peer map event for WDS ast entry
1331  *
1332  * associate the peer_id that firmware provided with peer entry
1333  * and update the ast table in the host with the hw_peer_id.
1334  *
1335  * Return: none
1336  */
1337 
1338 void
1339 dp_rx_peer_map_handler(void *soc_handle, uint16_t peer_id,
1340 		       uint16_t hw_peer_id, uint8_t vdev_id,
1341 		       uint8_t *peer_mac_addr, uint16_t ast_hash,
1342 		       uint8_t is_wds)
1343 {
1344 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
1345 	struct dp_peer *peer = NULL;
1346 	enum cdp_txrx_ast_entry_type type = CDP_TXRX_AST_TYPE_STATIC;
1347 
1348 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1349 		  "peer_map_event (soc:%pK): peer_id %di, hw_peer_id %d, peer_mac %02x:%02x:%02x:%02x:%02x:%02x, vdev_id %d",
1350 		  soc, peer_id,
1351 		  hw_peer_id, peer_mac_addr[0], peer_mac_addr[1],
1352 		  peer_mac_addr[2], peer_mac_addr[3], peer_mac_addr[4],
1353 		  peer_mac_addr[5], vdev_id);
1354 
1355 	if ((hw_peer_id < 0) ||
1356 	    (hw_peer_id >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) {
1357 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1358 			"invalid hw_peer_id: %d", hw_peer_id);
1359 		qdf_assert_always(0);
1360 	}
1361 
1362 	/* Peer map event for WDS ast entry get the peer from
1363 	 * obj map
1364 	 */
1365 	if (is_wds) {
1366 		peer = soc->peer_id_to_obj_map[peer_id];
1367 	} else {
1368 		peer = dp_peer_find_add_id(soc, peer_mac_addr, peer_id,
1369 					   hw_peer_id, vdev_id);
1370 
1371 		if (peer) {
1372 			/*
1373 			 * For every peer Map message search and set if bss_peer
1374 			 */
1375 			if (!(qdf_mem_cmp(peer->mac_addr.raw,
1376 					  peer->vdev->mac_addr.raw,
1377 					  DP_MAC_ADDR_LEN))) {
1378 				QDF_TRACE(QDF_MODULE_ID_DP,
1379 					  QDF_TRACE_LEVEL_INFO_HIGH,
1380 					  "vdev bss_peer!!!!");
1381 				peer->bss_peer = 1;
1382 				peer->vdev->vap_bss_peer = peer;
1383 			}
1384 
1385 			if (peer->vdev->opmode == wlan_op_mode_sta)
1386 				peer->vdev->bss_ast_hash = ast_hash;
1387 
1388 			/* Add ast entry incase self ast entry is
1389 			 * deleted due to DP CP sync issue
1390 			 *
1391 			 * self_ast_entry is modified in peer create
1392 			 * and peer unmap path which cannot run in
1393 			 * parllel with peer map, no lock need before
1394 			 * referring it
1395 			 */
1396 			if (!peer->self_ast_entry) {
1397 				QDF_TRACE(QDF_MODULE_ID_DP,
1398 					  QDF_TRACE_LEVEL_INFO_HIGH,
1399 					  "Add self ast from map %pM",
1400 					  peer_mac_addr);
1401 				dp_peer_add_ast(soc, peer,
1402 						peer_mac_addr,
1403 						type, 0);
1404 			}
1405 
1406 		}
1407 	}
1408 
1409 	dp_peer_map_ast(soc, peer, peer_mac_addr,
1410 			hw_peer_id, vdev_id, ast_hash);
1411 }
1412 
1413 /**
1414  * dp_rx_peer_unmap_handler() - handle peer unmap event from firmware
1415  * @soc_handle - genereic soc handle
1416  * @peeri_id - peer_id from firmware
1417  * @vdev_id - vdev ID
1418  * @mac_addr - mac address of the peer or wds entry
1419  * @is_wds - flag to indicate peer map event for WDS ast entry
1420  *
1421  * Return: none
1422  */
1423 void
1424 dp_rx_peer_unmap_handler(void *soc_handle, uint16_t peer_id,
1425 			 uint8_t vdev_id, uint8_t *mac_addr,
1426 			 uint8_t is_wds)
1427 {
1428 	struct dp_peer *peer;
1429 	struct dp_ast_entry *ast_entry;
1430 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
1431 	uint8_t i;
1432 
1433 	peer = __dp_peer_find_by_id(soc, peer_id);
1434 
1435 	/*
1436 	 * Currently peer IDs are assigned for vdevs as well as peers.
1437 	 * If the peer ID is for a vdev, then the peer pointer stored
1438 	 * in peer_id_to_obj_map will be NULL.
1439 	 */
1440 	if (!peer) {
1441 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1442 			  "%s: Received unmap event for invalid peer_id %u",
1443 			  __func__, peer_id);
1444 		return;
1445 	}
1446 
1447 	/* If V2 Peer map messages are enabled AST entry has to be freed here
1448 	 */
1449 	if (soc->is_peer_map_unmap_v2) {
1450 
1451 		qdf_spin_lock_bh(&soc->ast_lock);
1452 		ast_entry = dp_peer_ast_list_find(soc, peer,
1453 						  mac_addr);
1454 
1455 		if (!ast_entry) {
1456 			/* in case of qwrap we have multiple BSS peers
1457 			 * with same mac address
1458 			 *
1459 			 * AST entry for this mac address will be created
1460 			 * only for one peer
1461 			 */
1462 			if (peer->vdev->proxysta_vdev) {
1463 				qdf_spin_unlock_bh(&soc->ast_lock);
1464 				goto peer_unmap;
1465 			}
1466 
1467 			/* Ideally we should not enter this case where
1468 			 * ast_entry is not present in host table and
1469 			 * we received a unmap event
1470 			 */
1471 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
1472 				  "%s:%d AST entry not found with peer %pK peer_id %u peer_mac %pM mac_addr %pM vdev_id %u next_hop %u\n",
1473 				  __func__, __LINE__, peer, peer->peer_ids[0],
1474 				  peer->mac_addr.raw, mac_addr, vdev_id,
1475 				  is_wds);
1476 
1477 			qdf_spin_unlock_bh(&soc->ast_lock);
1478 
1479 			if (!is_wds)
1480 				goto peer_unmap;
1481 
1482 			return;
1483 		}
1484 		qdf_spin_unlock_bh(&soc->ast_lock);
1485 
1486 		/* Reuse the AST entry if delete_in_progress
1487 		 * not set
1488 		 */
1489 		if (ast_entry->delete_in_progress)
1490 			dp_peer_ast_free_entry(soc, ast_entry);
1491 
1492 		if (is_wds)
1493 			return;
1494 	}
1495 
1496 peer_unmap:
1497 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
1498 		"peer_unmap_event (soc:%pK) peer_id %d peer %pK",
1499 		soc, peer_id, peer);
1500 
1501 	soc->peer_id_to_obj_map[peer_id] = NULL;
1502 	for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
1503 		if (peer->peer_ids[i] == peer_id) {
1504 			peer->peer_ids[i] = HTT_INVALID_PEER;
1505 			break;
1506 		}
1507 	}
1508 
1509 	if (soc->cdp_soc.ol_ops->peer_unmap_event) {
1510 		soc->cdp_soc.ol_ops->peer_unmap_event(soc->ctrl_psoc,
1511 				peer_id);
1512 	}
1513 
1514 	/*
1515 	 * Remove a reference to the peer.
1516 	 * If there are no more references, delete the peer object.
1517 	 */
1518 	dp_peer_unref_delete(peer);
1519 }
1520 
1521 void
1522 dp_peer_find_detach(struct dp_soc *soc)
1523 {
1524 	dp_peer_find_map_detach(soc);
1525 	dp_peer_find_hash_detach(soc);
1526 	dp_peer_ast_hash_detach(soc);
1527 }
1528 
1529 static void dp_rx_tid_update_cb(struct dp_soc *soc, void *cb_ctxt,
1530 	union hal_reo_status *reo_status)
1531 {
1532 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
1533 
1534 	if ((reo_status->rx_queue_status.header.status !=
1535 		HAL_REO_CMD_SUCCESS) &&
1536 		(reo_status->rx_queue_status.header.status !=
1537 		HAL_REO_CMD_DRAIN)) {
1538 		/* Should not happen normally. Just print error for now */
1539 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1540 			  "%s: Rx tid HW desc update failed(%d): tid %d",
1541 			  __func__,
1542 			  reo_status->rx_queue_status.header.status,
1543 			  rx_tid->tid);
1544 	}
1545 }
1546 
1547 /*
1548  * dp_find_peer_by_addr - find peer instance by mac address
1549  * @dev: physical device instance
1550  * @peer_mac_addr: peer mac address
1551  * @local_id: local id for the peer
1552  *
1553  * Return: peer instance pointer
1554  */
1555 void *dp_find_peer_by_addr(struct cdp_pdev *dev, uint8_t *peer_mac_addr,
1556 		uint8_t *local_id)
1557 {
1558 	struct dp_pdev *pdev = (struct dp_pdev *)dev;
1559 	struct dp_peer *peer;
1560 
1561 	peer = dp_peer_find_hash_find(pdev->soc, peer_mac_addr, 0, DP_VDEV_ALL);
1562 
1563 	if (!peer)
1564 		return NULL;
1565 
1566 	/* Multiple peer ids? How can know peer id? */
1567 	*local_id = peer->local_id;
1568 	DP_TRACE(INFO, "%s: peer %pK id %d", __func__, peer, *local_id);
1569 
1570 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
1571 	 * Decrement it here.
1572 	 */
1573 	dp_peer_unref_delete(peer);
1574 
1575 	return peer;
1576 }
1577 
1578 /*
1579  * dp_rx_tid_update_wifi3() – Update receive TID state
1580  * @peer: Datapath peer handle
1581  * @tid: TID
1582  * @ba_window_size: BlockAck window size
1583  * @start_seq: Starting sequence number
1584  *
1585  * Return: 0 on success, error code on failure
1586  */
1587 static int dp_rx_tid_update_wifi3(struct dp_peer *peer, int tid, uint32_t
1588 				  ba_window_size, uint32_t start_seq)
1589 {
1590 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1591 	struct dp_soc *soc = peer->vdev->pdev->soc;
1592 	struct hal_reo_cmd_params params;
1593 
1594 	qdf_mem_zero(&params, sizeof(params));
1595 
1596 	params.std.need_status = 1;
1597 	params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
1598 	params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1599 	params.u.upd_queue_params.update_ba_window_size = 1;
1600 	params.u.upd_queue_params.ba_window_size = ba_window_size;
1601 
1602 	if (start_seq < IEEE80211_SEQ_MAX) {
1603 		params.u.upd_queue_params.update_ssn = 1;
1604 		params.u.upd_queue_params.ssn = start_seq;
1605 	}
1606 
1607 	dp_set_ssn_valid_flag(&params, 0);
1608 
1609 	dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params, dp_rx_tid_update_cb, rx_tid);
1610 
1611 	rx_tid->ba_win_size = ba_window_size;
1612 	if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
1613 		soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup(
1614 			peer->vdev->pdev->ctrl_pdev,
1615 			peer->vdev->vdev_id, peer->mac_addr.raw,
1616 			rx_tid->hw_qdesc_paddr, tid, tid, 1, ba_window_size);
1617 
1618 	}
1619 	return 0;
1620 }
1621 
1622 /*
1623  * dp_reo_desc_free() - Callback free reo descriptor memory after
1624  * HW cache flush
1625  *
1626  * @soc: DP SOC handle
1627  * @cb_ctxt: Callback context
1628  * @reo_status: REO command status
1629  */
1630 static void dp_reo_desc_free(struct dp_soc *soc, void *cb_ctxt,
1631 	union hal_reo_status *reo_status)
1632 {
1633 	struct reo_desc_list_node *freedesc =
1634 		(struct reo_desc_list_node *)cb_ctxt;
1635 	struct dp_rx_tid *rx_tid = &freedesc->rx_tid;
1636 
1637 	if ((reo_status->fl_cache_status.header.status !=
1638 		HAL_REO_CMD_SUCCESS) &&
1639 		(reo_status->fl_cache_status.header.status !=
1640 		HAL_REO_CMD_DRAIN)) {
1641 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1642 			  "%s: Rx tid HW desc flush failed(%d): tid %d",
1643 			  __func__,
1644 			  reo_status->rx_queue_status.header.status,
1645 			  freedesc->rx_tid.tid);
1646 	}
1647 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1648 		  "%s: hw_qdesc_paddr: %pK, tid:%d", __func__,
1649 		  (void *)(rx_tid->hw_qdesc_paddr), rx_tid->tid);
1650 	qdf_mem_unmap_nbytes_single(soc->osdev,
1651 		rx_tid->hw_qdesc_paddr,
1652 		QDF_DMA_BIDIRECTIONAL,
1653 		rx_tid->hw_qdesc_alloc_size);
1654 	qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
1655 	qdf_mem_free(freedesc);
1656 }
1657 
1658 #if defined(QCA_WIFI_QCA8074) && defined(BUILD_X86)
1659 /* Hawkeye emulation requires bus address to be >= 0x50000000 */
1660 static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
1661 {
1662 	if (dma_addr < 0x50000000)
1663 		return QDF_STATUS_E_FAILURE;
1664 	else
1665 		return QDF_STATUS_SUCCESS;
1666 }
1667 #else
1668 static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
1669 {
1670 	return QDF_STATUS_SUCCESS;
1671 }
1672 #endif
1673 
1674 
1675 /*
1676  * dp_rx_tid_setup_wifi3() – Setup receive TID state
1677  * @peer: Datapath peer handle
1678  * @tid: TID
1679  * @ba_window_size: BlockAck window size
1680  * @start_seq: Starting sequence number
1681  *
1682  * Return: 0 on success, error code on failure
1683  */
1684 int dp_rx_tid_setup_wifi3(struct dp_peer *peer, int tid,
1685 	uint32_t ba_window_size, uint32_t start_seq)
1686 {
1687 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1688 	struct dp_vdev *vdev = peer->vdev;
1689 	struct dp_soc *soc = vdev->pdev->soc;
1690 	uint32_t hw_qdesc_size;
1691 	uint32_t hw_qdesc_align;
1692 	int hal_pn_type;
1693 	void *hw_qdesc_vaddr;
1694 	uint32_t alloc_tries = 0;
1695 	int err = QDF_STATUS_SUCCESS;
1696 
1697 	if (peer->delete_in_progress ||
1698 	    !qdf_atomic_read(&peer->is_default_route_set))
1699 		return QDF_STATUS_E_FAILURE;
1700 
1701 	rx_tid->ba_win_size = ba_window_size;
1702 	if (rx_tid->hw_qdesc_vaddr_unaligned != NULL)
1703 		return dp_rx_tid_update_wifi3(peer, tid, ba_window_size,
1704 			start_seq);
1705 	rx_tid->delba_tx_status = 0;
1706 	rx_tid->ppdu_id_2k = 0;
1707 	rx_tid->num_of_addba_req = 0;
1708 	rx_tid->num_of_delba_req = 0;
1709 	rx_tid->num_of_addba_resp = 0;
1710 	rx_tid->num_addba_rsp_failed = 0;
1711 	rx_tid->num_addba_rsp_success = 0;
1712 	rx_tid->delba_tx_success_cnt = 0;
1713 	rx_tid->delba_tx_fail_cnt = 0;
1714 	rx_tid->statuscode = 0;
1715 
1716 	/* TODO: Allocating HW queue descriptors based on max BA window size
1717 	 * for all QOS TIDs so that same descriptor can be used later when
1718 	 * ADDBA request is recevied. This should be changed to allocate HW
1719 	 * queue descriptors based on BA window size being negotiated (0 for
1720 	 * non BA cases), and reallocate when BA window size changes and also
1721 	 * send WMI message to FW to change the REO queue descriptor in Rx
1722 	 * peer entry as part of dp_rx_tid_update.
1723 	 */
1724 	if (tid != DP_NON_QOS_TID)
1725 		hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc,
1726 			HAL_RX_MAX_BA_WINDOW, tid);
1727 	else
1728 		hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc,
1729 			ba_window_size, tid);
1730 
1731 	hw_qdesc_align = hal_get_reo_qdesc_align(soc->hal_soc);
1732 	/* To avoid unnecessary extra allocation for alignment, try allocating
1733 	 * exact size and see if we already have aligned address.
1734 	 */
1735 	rx_tid->hw_qdesc_alloc_size = hw_qdesc_size;
1736 
1737 try_desc_alloc:
1738 	rx_tid->hw_qdesc_vaddr_unaligned =
1739 		qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size);
1740 
1741 	if (!rx_tid->hw_qdesc_vaddr_unaligned) {
1742 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1743 			  "%s: Rx tid HW desc alloc failed: tid %d",
1744 			  __func__, tid);
1745 		return QDF_STATUS_E_NOMEM;
1746 	}
1747 
1748 	if ((unsigned long)(rx_tid->hw_qdesc_vaddr_unaligned) %
1749 		hw_qdesc_align) {
1750 		/* Address allocated above is not alinged. Allocate extra
1751 		 * memory for alignment
1752 		 */
1753 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
1754 		rx_tid->hw_qdesc_vaddr_unaligned =
1755 			qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size +
1756 					hw_qdesc_align - 1);
1757 
1758 		if (!rx_tid->hw_qdesc_vaddr_unaligned) {
1759 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1760 				  "%s: Rx tid HW desc alloc failed: tid %d",
1761 				  __func__, tid);
1762 			return QDF_STATUS_E_NOMEM;
1763 		}
1764 
1765 		hw_qdesc_vaddr = (void *)qdf_align((unsigned long)
1766 			rx_tid->hw_qdesc_vaddr_unaligned,
1767 			hw_qdesc_align);
1768 
1769 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1770 			  "%s: Total Size %d Aligned Addr %pK",
1771 			  __func__, rx_tid->hw_qdesc_alloc_size,
1772 			  hw_qdesc_vaddr);
1773 
1774 	} else {
1775 		hw_qdesc_vaddr = rx_tid->hw_qdesc_vaddr_unaligned;
1776 	}
1777 
1778 	/* TODO: Ensure that sec_type is set before ADDBA is received.
1779 	 * Currently this is set based on htt indication
1780 	 * HTT_T2H_MSG_TYPE_SEC_IND from target
1781 	 */
1782 	switch (peer->security[dp_sec_ucast].sec_type) {
1783 	case cdp_sec_type_tkip_nomic:
1784 	case cdp_sec_type_aes_ccmp:
1785 	case cdp_sec_type_aes_ccmp_256:
1786 	case cdp_sec_type_aes_gcmp:
1787 	case cdp_sec_type_aes_gcmp_256:
1788 		hal_pn_type = HAL_PN_WPA;
1789 		break;
1790 	case cdp_sec_type_wapi:
1791 		if (vdev->opmode == wlan_op_mode_ap)
1792 			hal_pn_type = HAL_PN_WAPI_EVEN;
1793 		else
1794 			hal_pn_type = HAL_PN_WAPI_UNEVEN;
1795 		break;
1796 	default:
1797 		hal_pn_type = HAL_PN_NONE;
1798 		break;
1799 	}
1800 
1801 	hal_reo_qdesc_setup(soc->hal_soc, tid, ba_window_size, start_seq,
1802 		hw_qdesc_vaddr, rx_tid->hw_qdesc_paddr, hal_pn_type);
1803 
1804 	qdf_mem_map_nbytes_single(soc->osdev, hw_qdesc_vaddr,
1805 		QDF_DMA_BIDIRECTIONAL, rx_tid->hw_qdesc_alloc_size,
1806 		&(rx_tid->hw_qdesc_paddr));
1807 
1808 	if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) !=
1809 			QDF_STATUS_SUCCESS) {
1810 		if (alloc_tries++ < 10) {
1811 			qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
1812 			rx_tid->hw_qdesc_vaddr_unaligned = NULL;
1813 			goto try_desc_alloc;
1814 		} else {
1815 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1816 				  "%s: Rx tid HW desc alloc failed (lowmem): tid %d",
1817 				  __func__, tid);
1818 			err = QDF_STATUS_E_NOMEM;
1819 			goto error;
1820 		}
1821 	}
1822 
1823 	if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
1824 		if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup(
1825 		    vdev->pdev->ctrl_pdev, peer->vdev->vdev_id,
1826 		    peer->mac_addr.raw, rx_tid->hw_qdesc_paddr, tid, tid,
1827 		    1, ba_window_size)) {
1828 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1829 				  "%s: Failed to send reo queue setup to FW - tid %d\n",
1830 				  __func__, tid);
1831 			err = QDF_STATUS_E_FAILURE;
1832 			goto error;
1833 		}
1834 	}
1835 	return 0;
1836 error:
1837 	if (NULL != rx_tid->hw_qdesc_vaddr_unaligned) {
1838 		if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) ==
1839 		    QDF_STATUS_SUCCESS)
1840 			qdf_mem_unmap_nbytes_single(
1841 				soc->osdev,
1842 				rx_tid->hw_qdesc_paddr,
1843 				QDF_DMA_BIDIRECTIONAL,
1844 				rx_tid->hw_qdesc_alloc_size);
1845 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
1846 		rx_tid->hw_qdesc_vaddr_unaligned = NULL;
1847 	}
1848 	return err;
1849 }
1850 
1851 /*
1852  * dp_rx_tid_delete_cb() - Callback to flush reo descriptor HW cache
1853  * after deleting the entries (ie., setting valid=0)
1854  *
1855  * @soc: DP SOC handle
1856  * @cb_ctxt: Callback context
1857  * @reo_status: REO command status
1858  */
1859 static void dp_rx_tid_delete_cb(struct dp_soc *soc, void *cb_ctxt,
1860 	union hal_reo_status *reo_status)
1861 {
1862 	struct reo_desc_list_node *freedesc =
1863 		(struct reo_desc_list_node *)cb_ctxt;
1864 	uint32_t list_size;
1865 	struct reo_desc_list_node *desc;
1866 	unsigned long curr_ts = qdf_get_system_timestamp();
1867 	uint32_t desc_size, tot_desc_size;
1868 	struct hal_reo_cmd_params params;
1869 
1870 	if (reo_status->rx_queue_status.header.status == HAL_REO_CMD_DRAIN) {
1871 		qdf_mem_zero(reo_status, sizeof(*reo_status));
1872 		reo_status->fl_cache_status.header.status = HAL_REO_CMD_DRAIN;
1873 		dp_reo_desc_free(soc, (void *)freedesc, reo_status);
1874 		return;
1875 	} else if (reo_status->rx_queue_status.header.status !=
1876 		HAL_REO_CMD_SUCCESS) {
1877 		/* Should not happen normally. Just print error for now */
1878 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1879 			  "%s: Rx tid HW desc deletion failed(%d): tid %d",
1880 			  __func__,
1881 			  reo_status->rx_queue_status.header.status,
1882 			  freedesc->rx_tid.tid);
1883 	}
1884 
1885 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
1886 		"%s: rx_tid: %d status: %d", __func__,
1887 		freedesc->rx_tid.tid,
1888 		reo_status->rx_queue_status.header.status);
1889 
1890 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
1891 	freedesc->free_ts = curr_ts;
1892 	qdf_list_insert_back_size(&soc->reo_desc_freelist,
1893 		(qdf_list_node_t *)freedesc, &list_size);
1894 
1895 	while ((qdf_list_peek_front(&soc->reo_desc_freelist,
1896 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) &&
1897 		((list_size >= REO_DESC_FREELIST_SIZE) ||
1898 		((curr_ts - desc->free_ts) > REO_DESC_FREE_DEFER_MS))) {
1899 		struct dp_rx_tid *rx_tid;
1900 
1901 		qdf_list_remove_front(&soc->reo_desc_freelist,
1902 				(qdf_list_node_t **)&desc);
1903 		list_size--;
1904 		rx_tid = &desc->rx_tid;
1905 
1906 		/* Flush and invalidate REO descriptor from HW cache: Base and
1907 		 * extension descriptors should be flushed separately */
1908 		tot_desc_size = rx_tid->hw_qdesc_alloc_size;
1909 		/* Get base descriptor size by passing non-qos TID */
1910 		desc_size = hal_get_reo_qdesc_size(soc->hal_soc, 0,
1911 						   DP_NON_QOS_TID);
1912 
1913 		/* Flush reo extension descriptors */
1914 		while ((tot_desc_size -= desc_size) > 0) {
1915 			qdf_mem_zero(&params, sizeof(params));
1916 			params.std.addr_lo =
1917 				((uint64_t)(rx_tid->hw_qdesc_paddr) +
1918 				tot_desc_size) & 0xffffffff;
1919 			params.std.addr_hi =
1920 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1921 
1922 			if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc,
1923 							CMD_FLUSH_CACHE,
1924 							&params,
1925 							NULL,
1926 							NULL)) {
1927 				QDF_TRACE(QDF_MODULE_ID_DP,
1928 					QDF_TRACE_LEVEL_ERROR,
1929 					"%s: fail to send CMD_CACHE_FLUSH:"
1930 					"tid %d desc %pK", __func__,
1931 					rx_tid->tid,
1932 					(void *)(rx_tid->hw_qdesc_paddr));
1933 			}
1934 		}
1935 
1936 		/* Flush base descriptor */
1937 		qdf_mem_zero(&params, sizeof(params));
1938 		params.std.need_status = 1;
1939 		params.std.addr_lo =
1940 			(uint64_t)(rx_tid->hw_qdesc_paddr) & 0xffffffff;
1941 		params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1942 
1943 		if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc,
1944 							  CMD_FLUSH_CACHE,
1945 							  &params,
1946 							  dp_reo_desc_free,
1947 							  (void *)desc)) {
1948 			union hal_reo_status reo_status;
1949 			/*
1950 			 * If dp_reo_send_cmd return failure, related TID queue desc
1951 			 * should be unmapped. Also locally reo_desc, together with
1952 			 * TID queue desc also need to be freed accordingly.
1953 			 *
1954 			 * Here invoke desc_free function directly to do clean up.
1955 			 */
1956 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1957 				  "%s: fail to send REO cmd to flush cache: tid %d",
1958 				  __func__, rx_tid->tid);
1959 			qdf_mem_zero(&reo_status, sizeof(reo_status));
1960 			reo_status.fl_cache_status.header.status = 0;
1961 			dp_reo_desc_free(soc, (void *)desc, &reo_status);
1962 		}
1963 	}
1964 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
1965 }
1966 
1967 /*
1968  * dp_rx_tid_delete_wifi3() – Delete receive TID queue
1969  * @peer: Datapath peer handle
1970  * @tid: TID
1971  *
1972  * Return: 0 on success, error code on failure
1973  */
1974 static int dp_rx_tid_delete_wifi3(struct dp_peer *peer, int tid)
1975 {
1976 	struct dp_rx_tid *rx_tid = &(peer->rx_tid[tid]);
1977 	struct dp_soc *soc = peer->vdev->pdev->soc;
1978 	struct hal_reo_cmd_params params;
1979 	struct reo_desc_list_node *freedesc =
1980 		qdf_mem_malloc(sizeof(*freedesc));
1981 
1982 	if (!freedesc) {
1983 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1984 			  "%s: malloc failed for freedesc: tid %d",
1985 			  __func__, tid);
1986 		return -ENOMEM;
1987 	}
1988 
1989 	freedesc->rx_tid = *rx_tid;
1990 
1991 	qdf_mem_zero(&params, sizeof(params));
1992 
1993 	params.std.need_status = 1;
1994 	params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
1995 	params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1996 	params.u.upd_queue_params.update_vld = 1;
1997 	params.u.upd_queue_params.vld = 0;
1998 
1999 	dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
2000 		dp_rx_tid_delete_cb, (void *)freedesc);
2001 
2002 	rx_tid->hw_qdesc_vaddr_unaligned = NULL;
2003 	rx_tid->hw_qdesc_alloc_size = 0;
2004 	rx_tid->hw_qdesc_paddr = 0;
2005 
2006 	return 0;
2007 }
2008 
2009 #ifdef DP_LFR
2010 static void dp_peer_setup_remaining_tids(struct dp_peer *peer)
2011 {
2012 	int tid;
2013 
2014 	for (tid = 1; tid < DP_MAX_TIDS-1; tid++) {
2015 		dp_rx_tid_setup_wifi3(peer, tid, 1, 0);
2016 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2017 			  "Setting up TID %d for peer %pK peer->local_id %d",
2018 			  tid, peer, peer->local_id);
2019 	}
2020 }
2021 #else
2022 static void dp_peer_setup_remaining_tids(struct dp_peer *peer) {};
2023 #endif
2024 /*
2025  * dp_peer_rx_init() – Initialize receive TID state
2026  * @pdev: Datapath pdev
2027  * @peer: Datapath peer
2028  *
2029  */
2030 void dp_peer_rx_init(struct dp_pdev *pdev, struct dp_peer *peer)
2031 {
2032 	int tid;
2033 	struct dp_rx_tid *rx_tid;
2034 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
2035 		rx_tid = &peer->rx_tid[tid];
2036 		rx_tid->array = &rx_tid->base;
2037 		rx_tid->base.head = rx_tid->base.tail = NULL;
2038 		rx_tid->tid = tid;
2039 		rx_tid->defrag_timeout_ms = 0;
2040 		rx_tid->ba_win_size = 0;
2041 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
2042 
2043 		rx_tid->defrag_waitlist_elem.tqe_next = NULL;
2044 		rx_tid->defrag_waitlist_elem.tqe_prev = NULL;
2045 	}
2046 
2047 	peer->active_ba_session_cnt = 0;
2048 	peer->hw_buffer_size = 0;
2049 	peer->kill_256_sessions = 0;
2050 
2051 	/* Setup default (non-qos) rx tid queue */
2052 	dp_rx_tid_setup_wifi3(peer, DP_NON_QOS_TID, 1, 0);
2053 
2054 	/* Setup rx tid queue for TID 0.
2055 	 * Other queues will be setup on receiving first packet, which will cause
2056 	 * NULL REO queue error
2057 	 */
2058 	dp_rx_tid_setup_wifi3(peer, 0, 1, 0);
2059 
2060 	/*
2061 	 * Setup the rest of TID's to handle LFR
2062 	 */
2063 	dp_peer_setup_remaining_tids(peer);
2064 
2065 	/*
2066 	 * Set security defaults: no PN check, no security. The target may
2067 	 * send a HTT SEC_IND message to overwrite these defaults.
2068 	 */
2069 	peer->security[dp_sec_ucast].sec_type =
2070 		peer->security[dp_sec_mcast].sec_type = cdp_sec_type_none;
2071 }
2072 
2073 /*
2074  * dp_peer_rx_cleanup() – Cleanup receive TID state
2075  * @vdev: Datapath vdev
2076  * @peer: Datapath peer
2077  *
2078  */
2079 void dp_peer_rx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
2080 {
2081 	int tid;
2082 	uint32_t tid_delete_mask = 0;
2083 
2084 	DP_TRACE(INFO_HIGH, FL("Remove tids for peer: %pK"), peer);
2085 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
2086 		struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
2087 
2088 		qdf_spin_lock_bh(&rx_tid->tid_lock);
2089 		if (!peer->bss_peer) {
2090 			/* Cleanup defrag related resource */
2091 			dp_rx_defrag_waitlist_remove(peer, tid);
2092 			dp_rx_reorder_flush_frag(peer, tid);
2093 		}
2094 
2095 		if (peer->rx_tid[tid].hw_qdesc_vaddr_unaligned) {
2096 			dp_rx_tid_delete_wifi3(peer, tid);
2097 
2098 			tid_delete_mask |= (1 << tid);
2099 		}
2100 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2101 	}
2102 #ifdef notyet /* See if FW can remove queues as part of peer cleanup */
2103 	if (soc->ol_ops->peer_rx_reorder_queue_remove) {
2104 		soc->ol_ops->peer_rx_reorder_queue_remove(vdev->pdev->ctrl_pdev,
2105 			peer->vdev->vdev_id, peer->mac_addr.raw,
2106 			tid_delete_mask);
2107 	}
2108 #endif
2109 	for (tid = 0; tid < DP_MAX_TIDS; tid++)
2110 		qdf_spinlock_destroy(&peer->rx_tid[tid].tid_lock);
2111 }
2112 
2113 /*
2114  * dp_peer_cleanup() – Cleanup peer information
2115  * @vdev: Datapath vdev
2116  * @peer: Datapath peer
2117  *
2118  */
2119 void dp_peer_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
2120 {
2121 	peer->last_assoc_rcvd = 0;
2122 	peer->last_disassoc_rcvd = 0;
2123 	peer->last_deauth_rcvd = 0;
2124 
2125 	/* cleanup the Rx reorder queues for this peer */
2126 	dp_peer_rx_cleanup(vdev, peer);
2127 }
2128 
2129 /* dp_teardown_256_ba_session() - Teardown sessions using 256
2130  *                                window size when a request with
2131  *                                64 window size is received.
2132  *                                This is done as a WAR since HW can
2133  *                                have only one setting per peer (64 or 256).
2134  *                                For HKv2, we use per tid buffersize setting
2135  *                                for 0 to per_tid_basize_max_tid. For tid
2136  *                                more than per_tid_basize_max_tid we use HKv1
2137  *                                method.
2138  * @peer: Datapath peer
2139  *
2140  * Return: void
2141  */
2142 static void dp_teardown_256_ba_sessions(struct dp_peer *peer)
2143 {
2144 	uint8_t delba_rcode = 0;
2145 	int tid;
2146 	struct dp_rx_tid *rx_tid = NULL;
2147 
2148 	tid = peer->vdev->pdev->soc->per_tid_basize_max_tid;
2149 	for (; tid < DP_MAX_TIDS; tid++) {
2150 		rx_tid = &peer->rx_tid[tid];
2151 		qdf_spin_lock_bh(&rx_tid->tid_lock);
2152 
2153 		if (rx_tid->ba_win_size <= 64) {
2154 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
2155 			continue;
2156 		} else {
2157 			if (rx_tid->ba_status == DP_RX_BA_ACTIVE ||
2158 			    rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
2159 				/* send delba */
2160 				if (!rx_tid->delba_tx_status) {
2161 					rx_tid->delba_tx_retry++;
2162 					rx_tid->delba_tx_status = 1;
2163 					rx_tid->delba_rcode =
2164 					IEEE80211_REASON_QOS_SETUP_REQUIRED;
2165 					delba_rcode = rx_tid->delba_rcode;
2166 
2167 					qdf_spin_unlock_bh(&rx_tid->tid_lock);
2168 					peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba(
2169 							peer->vdev->pdev->ctrl_pdev,
2170 							peer->ctrl_peer,
2171 							peer->mac_addr.raw,
2172 							tid, peer->vdev->ctrl_vdev,
2173 							delba_rcode);
2174 				} else {
2175 					qdf_spin_unlock_bh(&rx_tid->tid_lock);
2176 				}
2177 			} else {
2178 				qdf_spin_unlock_bh(&rx_tid->tid_lock);
2179 			}
2180 		}
2181 	}
2182 }
2183 
2184 /*
2185 * dp_rx_addba_resp_tx_completion_wifi3() – Update Rx Tid State
2186 *
2187 * @peer: Datapath peer handle
2188 * @tid: TID number
2189 * @status: tx completion status
2190 * Return: 0 on success, error code on failure
2191 */
2192 int dp_addba_resp_tx_completion_wifi3(void *peer_handle,
2193 				      uint8_t tid, int status)
2194 {
2195 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
2196 	struct dp_rx_tid *rx_tid = NULL;
2197 
2198 	if (!peer || peer->delete_in_progress) {
2199 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2200 			  "%s: Peer is NULL!\n", __func__);
2201 		return QDF_STATUS_E_FAILURE;
2202 	}
2203 	rx_tid = &peer->rx_tid[tid];
2204 	qdf_spin_lock_bh(&rx_tid->tid_lock);
2205 	if (status) {
2206 		rx_tid->num_addba_rsp_failed++;
2207 		dp_rx_tid_update_wifi3(peer, tid, 1, 0);
2208 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
2209 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2210 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2211 			  "%s: Rx Tid- %d addba rsp tx completion failed!",
2212 			 __func__, tid);
2213 		return QDF_STATUS_SUCCESS;
2214 	}
2215 
2216 	rx_tid->num_addba_rsp_success++;
2217 	if (rx_tid->ba_status == DP_RX_BA_INACTIVE) {
2218 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2219 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2220 			  "%s: Rx Tid- %d hw qdesc is not in IN_PROGRESS",
2221 			__func__, tid);
2222 		return QDF_STATUS_E_FAILURE;
2223 	}
2224 
2225 	if (!qdf_atomic_read(&peer->is_default_route_set)) {
2226 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2227 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2228 			  "%s: default route is not set for peer: %pM",
2229 			  __func__, peer->mac_addr.raw);
2230 		return QDF_STATUS_E_FAILURE;
2231 	}
2232 
2233 	/* First Session */
2234 	if (peer->active_ba_session_cnt == 0) {
2235 		if (rx_tid->ba_win_size > 64 && rx_tid->ba_win_size <= 256)
2236 			peer->hw_buffer_size = 256;
2237 		else
2238 			peer->hw_buffer_size = 64;
2239 	}
2240 
2241 	rx_tid->ba_status = DP_RX_BA_ACTIVE;
2242 
2243 	peer->active_ba_session_cnt++;
2244 
2245 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
2246 
2247 	/* Kill any session having 256 buffer size
2248 	 * when 64 buffer size request is received.
2249 	 * Also, latch on to 64 as new buffer size.
2250 	 */
2251 	if (peer->kill_256_sessions) {
2252 		dp_teardown_256_ba_sessions(peer);
2253 		peer->kill_256_sessions = 0;
2254 	}
2255 	return QDF_STATUS_SUCCESS;
2256 }
2257 
2258 /*
2259 * dp_rx_addba_responsesetup_wifi3() – Process ADDBA request from peer
2260 *
2261 * @peer: Datapath peer handle
2262 * @tid: TID number
2263 * @dialogtoken: output dialogtoken
2264 * @statuscode: output dialogtoken
2265 * @buffersize: Output BA window size
2266 * @batimeout: Output BA timeout
2267 */
2268 void dp_addba_responsesetup_wifi3(void *peer_handle, uint8_t tid,
2269 	uint8_t *dialogtoken, uint16_t *statuscode,
2270 	uint16_t *buffersize, uint16_t *batimeout)
2271 {
2272 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
2273 	struct dp_rx_tid *rx_tid = NULL;
2274 
2275 	if (!peer || peer->delete_in_progress) {
2276 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2277 			  "%s: Peer is NULL!\n", __func__);
2278 		return;
2279 	}
2280 	rx_tid = &peer->rx_tid[tid];
2281 	qdf_spin_lock_bh(&rx_tid->tid_lock);
2282 	rx_tid->num_of_addba_resp++;
2283 	/* setup ADDBA response parameters */
2284 	*dialogtoken = rx_tid->dialogtoken;
2285 	*statuscode = rx_tid->statuscode;
2286 	*buffersize = rx_tid->ba_win_size;
2287 	*batimeout  = 0;
2288 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
2289 }
2290 
2291 /* dp_check_ba_buffersize() - Check buffer size in request
2292  *                            and latch onto this size based on
2293  *                            size used in first active session.
2294  * @peer: Datapath peer
2295  * @tid: Tid
2296  * @buffersize: Block ack window size
2297  *
2298  * Return: void
2299  */
2300 static void dp_check_ba_buffersize(struct dp_peer *peer,
2301 				   uint16_t tid,
2302 				   uint16_t buffersize)
2303 {
2304 	struct dp_rx_tid *rx_tid = NULL;
2305 
2306 	rx_tid = &peer->rx_tid[tid];
2307 	if (peer->vdev->pdev->soc->per_tid_basize_max_tid &&
2308 	    tid < peer->vdev->pdev->soc->per_tid_basize_max_tid) {
2309 		rx_tid->ba_win_size = buffersize;
2310 		return;
2311 	} else {
2312 		if (peer->active_ba_session_cnt == 0) {
2313 			rx_tid->ba_win_size = buffersize;
2314 		} else {
2315 			if (peer->hw_buffer_size == 64) {
2316 				if (buffersize <= 64)
2317 					rx_tid->ba_win_size = buffersize;
2318 				else
2319 					rx_tid->ba_win_size = peer->hw_buffer_size;
2320 			} else if (peer->hw_buffer_size == 256) {
2321 				if (buffersize > 64) {
2322 					rx_tid->ba_win_size = buffersize;
2323 				} else {
2324 					rx_tid->ba_win_size = buffersize;
2325 					peer->hw_buffer_size = 64;
2326 					peer->kill_256_sessions = 1;
2327 				}
2328 			}
2329 		}
2330 	}
2331 }
2332 
2333 /*
2334  * dp_addba_requestprocess_wifi3() - Process ADDBA request from peer
2335  *
2336  * @peer: Datapath peer handle
2337  * @dialogtoken: dialogtoken from ADDBA frame
2338  * @tid: TID number
2339  * @batimeout: BA timeout
2340  * @buffersize: BA window size
2341  * @startseqnum: Start seq. number received in BA sequence control
2342  *
2343  * Return: 0 on success, error code on failure
2344  */
2345 int dp_addba_requestprocess_wifi3(void *peer_handle,
2346 				  uint8_t dialogtoken,
2347 				  uint16_t tid, uint16_t batimeout,
2348 				  uint16_t buffersize,
2349 				  uint16_t startseqnum)
2350 {
2351 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
2352 	struct dp_rx_tid *rx_tid = NULL;
2353 
2354 	if (!peer || peer->delete_in_progress) {
2355 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2356 			  "%s: Peer is NULL!\n", __func__);
2357 		return QDF_STATUS_E_FAILURE;
2358 	}
2359 	rx_tid = &peer->rx_tid[tid];
2360 	qdf_spin_lock_bh(&rx_tid->tid_lock);
2361 	rx_tid->num_of_addba_req++;
2362 	if ((rx_tid->ba_status == DP_RX_BA_ACTIVE &&
2363 	     rx_tid->hw_qdesc_vaddr_unaligned != NULL) ||
2364 	    (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS)) {
2365 		dp_rx_tid_update_wifi3(peer, tid, 1, 0);
2366 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
2367 		peer->active_ba_session_cnt--;
2368 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2369 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2370 			  "%s: Rx Tid- %d hw qdesc is already setup",
2371 			__func__, tid);
2372 		return QDF_STATUS_E_FAILURE;
2373 	}
2374 
2375 	if (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
2376 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2377 		return QDF_STATUS_E_FAILURE;
2378 	}
2379 	dp_check_ba_buffersize(peer, tid, buffersize);
2380 
2381 	if (dp_rx_tid_setup_wifi3(peer, tid,
2382 	    rx_tid->ba_win_size, startseqnum)) {
2383 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
2384 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2385 		return QDF_STATUS_E_FAILURE;
2386 	}
2387 	rx_tid->ba_status = DP_RX_BA_IN_PROGRESS;
2388 
2389 	rx_tid->dialogtoken = dialogtoken;
2390 	rx_tid->startseqnum = startseqnum;
2391 
2392 	if (rx_tid->userstatuscode != IEEE80211_STATUS_SUCCESS)
2393 		rx_tid->statuscode = rx_tid->userstatuscode;
2394 	else
2395 		rx_tid->statuscode = IEEE80211_STATUS_SUCCESS;
2396 
2397 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
2398 
2399 	return QDF_STATUS_SUCCESS;
2400 }
2401 
2402 /*
2403 * dp_set_addba_response() – Set a user defined ADDBA response status code
2404 *
2405 * @peer: Datapath peer handle
2406 * @tid: TID number
2407 * @statuscode: response status code to be set
2408 */
2409 void dp_set_addba_response(void *peer_handle, uint8_t tid,
2410 	uint16_t statuscode)
2411 {
2412 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
2413 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
2414 
2415 	qdf_spin_lock_bh(&rx_tid->tid_lock);
2416 	rx_tid->userstatuscode = statuscode;
2417 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
2418 }
2419 
2420 /*
2421 * dp_rx_delba_process_wifi3() – Process DELBA from peer
2422 * @peer: Datapath peer handle
2423 * @tid: TID number
2424 * @reasoncode: Reason code received in DELBA frame
2425 *
2426 * Return: 0 on success, error code on failure
2427 */
2428 int dp_delba_process_wifi3(void *peer_handle,
2429 	int tid, uint16_t reasoncode)
2430 {
2431 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
2432 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
2433 
2434 	qdf_spin_lock_bh(&rx_tid->tid_lock);
2435 	if (rx_tid->ba_status == DP_RX_BA_INACTIVE ||
2436 	    rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
2437 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2438 		return QDF_STATUS_E_FAILURE;
2439 	}
2440 	/* TODO: See if we can delete the existing REO queue descriptor and
2441 	 * replace with a new one without queue extenstion descript to save
2442 	 * memory
2443 	 */
2444 	rx_tid->delba_rcode = reasoncode;
2445 	rx_tid->num_of_delba_req++;
2446 	dp_rx_tid_update_wifi3(peer, tid, 1, 0);
2447 
2448 	rx_tid->ba_status = DP_RX_BA_INACTIVE;
2449 	peer->active_ba_session_cnt--;
2450 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
2451 	return 0;
2452 }
2453 
2454 /*
2455  * dp_rx_delba_tx_completion_wifi3() – Send Delba Request
2456  *
2457  * @peer: Datapath peer handle
2458  * @tid: TID number
2459  * @status: tx completion status
2460  * Return: 0 on success, error code on failure
2461  */
2462 
2463 int dp_delba_tx_completion_wifi3(void *peer_handle,
2464 				 uint8_t tid, int status)
2465 {
2466 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
2467 	struct dp_rx_tid *rx_tid = NULL;
2468 
2469 	if (!peer || peer->delete_in_progress) {
2470 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2471 			  "%s: Peer is NULL!", __func__);
2472 		return QDF_STATUS_E_FAILURE;
2473 	}
2474 	rx_tid = &peer->rx_tid[tid];
2475 	qdf_spin_lock_bh(&rx_tid->tid_lock);
2476 	if (status) {
2477 		rx_tid->delba_tx_fail_cnt++;
2478 		if (rx_tid->delba_tx_retry >= DP_MAX_DELBA_RETRY) {
2479 			rx_tid->delba_tx_retry = 0;
2480 			rx_tid->delba_tx_status = 0;
2481 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
2482 		} else {
2483 			rx_tid->delba_tx_retry++;
2484 			rx_tid->delba_tx_status = 1;
2485 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
2486 			peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba(
2487 				peer->vdev->pdev->ctrl_pdev, peer->ctrl_peer,
2488 				peer->mac_addr.raw, tid, peer->vdev->ctrl_vdev,
2489 				rx_tid->delba_rcode);
2490 		}
2491 		return QDF_STATUS_SUCCESS;
2492 	} else {
2493 		rx_tid->delba_tx_success_cnt++;
2494 		rx_tid->delba_tx_retry = 0;
2495 		rx_tid->delba_tx_status = 0;
2496 	}
2497 	if (rx_tid->ba_status == DP_RX_BA_ACTIVE) {
2498 		dp_rx_tid_update_wifi3(peer, tid, 1, 0);
2499 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
2500 		peer->active_ba_session_cnt--;
2501 	}
2502 	if (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
2503 		dp_rx_tid_update_wifi3(peer, tid, 1, 0);
2504 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
2505 	}
2506 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
2507 
2508 	return QDF_STATUS_SUCCESS;
2509 }
2510 
2511 void dp_rx_discard(struct dp_vdev *vdev, struct dp_peer *peer, unsigned tid,
2512 	qdf_nbuf_t msdu_list)
2513 {
2514 	while (msdu_list) {
2515 		qdf_nbuf_t msdu = msdu_list;
2516 
2517 		msdu_list = qdf_nbuf_next(msdu_list);
2518 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
2519 			  "discard rx %pK from partly-deleted peer %pK (%02x:%02x:%02x:%02x:%02x:%02x)",
2520 			  msdu, peer,
2521 			  peer->mac_addr.raw[0], peer->mac_addr.raw[1],
2522 			  peer->mac_addr.raw[2], peer->mac_addr.raw[3],
2523 			  peer->mac_addr.raw[4], peer->mac_addr.raw[5]);
2524 		qdf_nbuf_free(msdu);
2525 	}
2526 }
2527 
2528 
2529 /**
2530  * dp_set_pn_check_wifi3() - enable PN check in REO for security
2531  * @peer: Datapath peer handle
2532  * @vdev: Datapath vdev
2533  * @pdev - data path device instance
2534  * @sec_type - security type
2535  * @rx_pn - Receive pn starting number
2536  *
2537  */
2538 
2539 void
2540 dp_set_pn_check_wifi3(struct cdp_vdev *vdev_handle, struct cdp_peer *peer_handle, enum cdp_sec_type sec_type,  uint32_t *rx_pn)
2541 {
2542 	struct dp_peer *peer =  (struct dp_peer *)peer_handle;
2543 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
2544 	struct dp_pdev *pdev;
2545 	struct dp_soc *soc;
2546 	int i;
2547 	uint8_t pn_size;
2548 	struct hal_reo_cmd_params params;
2549 
2550 	/* preconditions */
2551 	qdf_assert(vdev);
2552 
2553 	pdev = vdev->pdev;
2554 	soc = pdev->soc;
2555 
2556 
2557 	qdf_mem_zero(&params, sizeof(params));
2558 
2559 	params.std.need_status = 1;
2560 	params.u.upd_queue_params.update_pn_valid = 1;
2561 	params.u.upd_queue_params.update_pn_size = 1;
2562 	params.u.upd_queue_params.update_pn = 1;
2563 	params.u.upd_queue_params.update_pn_check_needed = 1;
2564 	params.u.upd_queue_params.update_svld = 1;
2565 	params.u.upd_queue_params.svld = 0;
2566 
2567 	peer->security[dp_sec_ucast].sec_type = sec_type;
2568 
2569 	switch (sec_type) {
2570 	case cdp_sec_type_tkip_nomic:
2571 	case cdp_sec_type_aes_ccmp:
2572 	case cdp_sec_type_aes_ccmp_256:
2573 	case cdp_sec_type_aes_gcmp:
2574 	case cdp_sec_type_aes_gcmp_256:
2575 		params.u.upd_queue_params.pn_check_needed = 1;
2576 		params.u.upd_queue_params.pn_size = 48;
2577 		pn_size = 48;
2578 		break;
2579 	case cdp_sec_type_wapi:
2580 		params.u.upd_queue_params.pn_check_needed = 1;
2581 		params.u.upd_queue_params.pn_size = 128;
2582 		pn_size = 128;
2583 		if (vdev->opmode == wlan_op_mode_ap) {
2584 			params.u.upd_queue_params.pn_even = 1;
2585 			params.u.upd_queue_params.update_pn_even = 1;
2586 		} else {
2587 			params.u.upd_queue_params.pn_uneven = 1;
2588 			params.u.upd_queue_params.update_pn_uneven = 1;
2589 		}
2590 		break;
2591 	default:
2592 		params.u.upd_queue_params.pn_check_needed = 0;
2593 		pn_size = 0;
2594 		break;
2595 	}
2596 
2597 
2598 	for (i = 0; i < DP_MAX_TIDS; i++) {
2599 		struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
2600 		qdf_spin_lock_bh(&rx_tid->tid_lock);
2601 		if (rx_tid->hw_qdesc_vaddr_unaligned != NULL) {
2602 			params.std.addr_lo =
2603 				rx_tid->hw_qdesc_paddr & 0xffffffff;
2604 			params.std.addr_hi =
2605 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2606 
2607 			if (pn_size) {
2608 				QDF_TRACE(QDF_MODULE_ID_DP,
2609 					  QDF_TRACE_LEVEL_INFO_HIGH,
2610 					  "%s PN set for TID:%d pn:%x:%x:%x:%x",
2611 					  __func__, i, rx_pn[3], rx_pn[2],
2612 					  rx_pn[1], rx_pn[0]);
2613 				params.u.upd_queue_params.update_pn_valid = 1;
2614 				params.u.upd_queue_params.pn_31_0 = rx_pn[0];
2615 				params.u.upd_queue_params.pn_63_32 = rx_pn[1];
2616 				params.u.upd_queue_params.pn_95_64 = rx_pn[2];
2617 				params.u.upd_queue_params.pn_127_96 = rx_pn[3];
2618 			}
2619 			rx_tid->pn_size = pn_size;
2620 			dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
2621 				dp_rx_tid_update_cb, rx_tid);
2622 		} else {
2623 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
2624 				  "PN Check not setup for TID :%d ", i);
2625 		}
2626 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2627 	}
2628 }
2629 
2630 
2631 void
2632 dp_rx_sec_ind_handler(void *soc_handle, uint16_t peer_id,
2633 	enum cdp_sec_type sec_type, int is_unicast, u_int32_t *michael_key,
2634 	u_int32_t *rx_pn)
2635 {
2636 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
2637 	struct dp_peer *peer;
2638 	int sec_index;
2639 
2640 	peer = dp_peer_find_by_id(soc, peer_id);
2641 	if (!peer) {
2642 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2643 			  "Couldn't find peer from ID %d - skipping security inits",
2644 			  peer_id);
2645 		return;
2646 	}
2647 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
2648 		  "sec spec for peer %pK (%02x:%02x:%02x:%02x:%02x:%02x): %s key of type %d",
2649 		  peer,
2650 		  peer->mac_addr.raw[0], peer->mac_addr.raw[1],
2651 		  peer->mac_addr.raw[2], peer->mac_addr.raw[3],
2652 		  peer->mac_addr.raw[4], peer->mac_addr.raw[5],
2653 		  is_unicast ? "ucast" : "mcast",
2654 		  sec_type);
2655 	sec_index = is_unicast ? dp_sec_ucast : dp_sec_mcast;
2656 	peer->security[sec_index].sec_type = sec_type;
2657 #ifdef notyet /* TODO: See if this is required for defrag support */
2658 	/* michael key only valid for TKIP, but for simplicity,
2659 	 * copy it anyway
2660 	 */
2661 	qdf_mem_copy(
2662 		&peer->security[sec_index].michael_key[0],
2663 		michael_key,
2664 		sizeof(peer->security[sec_index].michael_key));
2665 #ifdef BIG_ENDIAN_HOST
2666 	OL_IF_SWAPBO(peer->security[sec_index].michael_key[0],
2667 				 sizeof(peer->security[sec_index].michael_key));
2668 #endif /* BIG_ENDIAN_HOST */
2669 #endif
2670 
2671 #ifdef notyet /* TODO: Check if this is required for wifi3.0 */
2672 	if (sec_type != cdp_sec_type_wapi) {
2673 		qdf_mem_zero(peer->tids_last_pn_valid, _EXT_TIDS);
2674 	} else {
2675 		for (i = 0; i < DP_MAX_TIDS; i++) {
2676 			/*
2677 			 * Setting PN valid bit for WAPI sec_type,
2678 			 * since WAPI PN has to be started with predefined value
2679 			 */
2680 			peer->tids_last_pn_valid[i] = 1;
2681 			qdf_mem_copy(
2682 				(u_int8_t *) &peer->tids_last_pn[i],
2683 				(u_int8_t *) rx_pn, sizeof(union htt_rx_pn_t));
2684 			peer->tids_last_pn[i].pn128[1] =
2685 				qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[1]);
2686 			peer->tids_last_pn[i].pn128[0] =
2687 				qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[0]);
2688 		}
2689 	}
2690 #endif
2691 	/* TODO: Update HW TID queue with PN check parameters (pn type for
2692 	 * all security types and last pn for WAPI) once REO command API
2693 	 * is available
2694 	 */
2695 
2696 	dp_peer_unref_del_find_by_id(peer);
2697 }
2698 
2699 #ifndef CONFIG_WIN
2700 /**
2701  * dp_register_peer() - Register peer into physical device
2702  * @pdev - data path device instance
2703  * @sta_desc - peer description
2704  *
2705  * Register peer into physical device
2706  *
2707  * Return: QDF_STATUS_SUCCESS registration success
2708  *         QDF_STATUS_E_FAULT peer not found
2709  */
2710 QDF_STATUS dp_register_peer(struct cdp_pdev *pdev_handle,
2711 		struct ol_txrx_desc_type *sta_desc)
2712 {
2713 	struct dp_peer *peer;
2714 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
2715 
2716 	peer = dp_peer_find_by_local_id((struct cdp_pdev *)pdev,
2717 			sta_desc->sta_id);
2718 	if (!peer)
2719 		return QDF_STATUS_E_FAULT;
2720 
2721 	qdf_spin_lock_bh(&peer->peer_info_lock);
2722 	peer->state = OL_TXRX_PEER_STATE_CONN;
2723 	qdf_spin_unlock_bh(&peer->peer_info_lock);
2724 
2725 	return QDF_STATUS_SUCCESS;
2726 }
2727 
2728 /**
2729  * dp_clear_peer() - remove peer from physical device
2730  * @pdev - data path device instance
2731  * @sta_id - local peer id
2732  *
2733  * remove peer from physical device
2734  *
2735  * Return: QDF_STATUS_SUCCESS registration success
2736  *         QDF_STATUS_E_FAULT peer not found
2737  */
2738 QDF_STATUS dp_clear_peer(struct cdp_pdev *pdev_handle, uint8_t local_id)
2739 {
2740 	struct dp_peer *peer;
2741 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
2742 
2743 	peer = dp_peer_find_by_local_id((struct cdp_pdev *)pdev, local_id);
2744 	if (!peer)
2745 		return QDF_STATUS_E_FAULT;
2746 
2747 	qdf_spin_lock_bh(&peer->peer_info_lock);
2748 	peer->state = OL_TXRX_PEER_STATE_DISC;
2749 	qdf_spin_unlock_bh(&peer->peer_info_lock);
2750 
2751 	return QDF_STATUS_SUCCESS;
2752 }
2753 
2754 /**
2755  * dp_find_peer_by_addr_and_vdev() - Find peer by peer mac address within vdev
2756  * @pdev - data path device instance
2757  * @vdev - virtual interface instance
2758  * @peer_addr - peer mac address
2759  * @peer_id - local peer id with target mac address
2760  *
2761  * Find peer by peer mac address within vdev
2762  *
2763  * Return: peer instance void pointer
2764  *         NULL cannot find target peer
2765  */
2766 void *dp_find_peer_by_addr_and_vdev(struct cdp_pdev *pdev_handle,
2767 		struct cdp_vdev *vdev_handle,
2768 		uint8_t *peer_addr, uint8_t *local_id)
2769 {
2770 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
2771 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
2772 	struct dp_peer *peer;
2773 
2774 	DP_TRACE(INFO, "vdev %pK peer_addr %pK", vdev, peer_addr);
2775 	peer = dp_peer_find_hash_find(pdev->soc, peer_addr, 0, 0);
2776 	DP_TRACE(INFO, "peer %pK vdev %pK", peer, vdev);
2777 
2778 	if (!peer)
2779 		return NULL;
2780 
2781 	if (peer->vdev != vdev) {
2782 		dp_peer_unref_delete(peer);
2783 		return NULL;
2784 	}
2785 
2786 	*local_id = peer->local_id;
2787 	DP_TRACE(INFO, "peer %pK vdev %pK local id %d", peer, vdev, *local_id);
2788 
2789 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
2790 	 * Decrement it here.
2791 	 */
2792 	dp_peer_unref_delete(peer);
2793 
2794 	return peer;
2795 }
2796 
2797 /**
2798  * dp_local_peer_id() - Find local peer id within peer instance
2799  * @peer - peer instance
2800  *
2801  * Find local peer id within peer instance
2802  *
2803  * Return: local peer id
2804  */
2805 uint16_t dp_local_peer_id(void *peer)
2806 {
2807 	return ((struct dp_peer *)peer)->local_id;
2808 }
2809 
2810 /**
2811  * dp_peer_find_by_local_id() - Find peer by local peer id
2812  * @pdev - data path device instance
2813  * @local_peer_id - local peer id want to find
2814  *
2815  * Find peer by local peer id within physical device
2816  *
2817  * Return: peer instance void pointer
2818  *         NULL cannot find target peer
2819  */
2820 void *dp_peer_find_by_local_id(struct cdp_pdev *pdev_handle, uint8_t local_id)
2821 {
2822 	struct dp_peer *peer;
2823 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
2824 
2825 	if (local_id >= OL_TXRX_NUM_LOCAL_PEER_IDS) {
2826 		QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_DP,
2827 				   "Incorrect local id %u", local_id);
2828 		return NULL;
2829 	}
2830 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
2831 	peer = pdev->local_peer_ids.map[local_id];
2832 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
2833 	DP_TRACE(DEBUG, "peer %pK local id %d", peer, local_id);
2834 	return peer;
2835 }
2836 
2837 /**
2838  * dp_peer_state_update() - update peer local state
2839  * @pdev - data path device instance
2840  * @peer_addr - peer mac address
2841  * @state - new peer local state
2842  *
2843  * update peer local state
2844  *
2845  * Return: QDF_STATUS_SUCCESS registration success
2846  */
2847 QDF_STATUS dp_peer_state_update(struct cdp_pdev *pdev_handle, uint8_t *peer_mac,
2848 		enum ol_txrx_peer_state state)
2849 {
2850 	struct dp_peer *peer;
2851 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
2852 
2853 	peer =  dp_peer_find_hash_find(pdev->soc, peer_mac, 0, DP_VDEV_ALL);
2854 	if (NULL == peer) {
2855 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2856 			  "Failed to find peer for: [%pM]", peer_mac);
2857 		return QDF_STATUS_E_FAILURE;
2858 	}
2859 	peer->state = state;
2860 
2861 	DP_TRACE(INFO, "peer %pK state %d", peer, peer->state);
2862 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
2863 	 * Decrement it here.
2864 	 */
2865 	dp_peer_unref_delete(peer);
2866 
2867 	return QDF_STATUS_SUCCESS;
2868 }
2869 
2870 /**
2871  * dp_get_vdevid() - Get virtual interface id which peer registered
2872  * @peer - peer instance
2873  * @vdev_id - virtual interface id which peer registered
2874  *
2875  * Get virtual interface id which peer registered
2876  *
2877  * Return: QDF_STATUS_SUCCESS registration success
2878  */
2879 QDF_STATUS dp_get_vdevid(void *peer_handle, uint8_t *vdev_id)
2880 {
2881 	struct dp_peer *peer = peer_handle;
2882 
2883 	DP_TRACE(INFO, "peer %pK vdev %pK vdev id %d",
2884 			peer, peer->vdev, peer->vdev->vdev_id);
2885 	*vdev_id = peer->vdev->vdev_id;
2886 	return QDF_STATUS_SUCCESS;
2887 }
2888 
2889 struct cdp_vdev *dp_get_vdev_by_sta_id(struct cdp_pdev *pdev_handle,
2890 				       uint8_t sta_id)
2891 {
2892 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
2893 	struct dp_peer *peer = NULL;
2894 
2895 	if (sta_id >= WLAN_MAX_STA_COUNT) {
2896 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
2897 			  "Invalid sta id passed");
2898 		return NULL;
2899 	}
2900 
2901 	if (!pdev) {
2902 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
2903 			  "PDEV not found for sta_id [%d]", sta_id);
2904 		return NULL;
2905 	}
2906 
2907 	peer = dp_peer_find_by_local_id((struct cdp_pdev *)pdev, sta_id);
2908 	if (!peer) {
2909 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
2910 			  "PEER [%d] not found", sta_id);
2911 		return NULL;
2912 	}
2913 
2914 	return (struct cdp_vdev *)peer->vdev;
2915 }
2916 
2917 /**
2918  * dp_get_vdev_for_peer() - Get virtual interface instance which peer belongs
2919  * @peer - peer instance
2920  *
2921  * Get virtual interface instance which peer belongs
2922  *
2923  * Return: virtual interface instance pointer
2924  *         NULL in case cannot find
2925  */
2926 struct cdp_vdev *dp_get_vdev_for_peer(void *peer_handle)
2927 {
2928 	struct dp_peer *peer = peer_handle;
2929 
2930 	DP_TRACE(DEBUG, "peer %pK vdev %pK", peer, peer->vdev);
2931 	return (struct cdp_vdev *)peer->vdev;
2932 }
2933 
2934 /**
2935  * dp_peer_get_peer_mac_addr() - Get peer mac address
2936  * @peer - peer instance
2937  *
2938  * Get peer mac address
2939  *
2940  * Return: peer mac address pointer
2941  *         NULL in case cannot find
2942  */
2943 uint8_t *dp_peer_get_peer_mac_addr(void *peer_handle)
2944 {
2945 	struct dp_peer *peer = peer_handle;
2946 	uint8_t *mac;
2947 
2948 	mac = peer->mac_addr.raw;
2949 	DP_TRACE(INFO, "peer %pK mac 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
2950 		peer, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
2951 	return peer->mac_addr.raw;
2952 }
2953 
2954 /**
2955  * dp_get_peer_state() - Get local peer state
2956  * @peer - peer instance
2957  *
2958  * Get local peer state
2959  *
2960  * Return: peer status
2961  */
2962 int dp_get_peer_state(void *peer_handle)
2963 {
2964 	struct dp_peer *peer = peer_handle;
2965 
2966 	DP_TRACE(DEBUG, "peer %pK stats %d", peer, peer->state);
2967 	return peer->state;
2968 }
2969 
2970 /**
2971  * dp_local_peer_id_pool_init() - local peer id pool alloc for physical device
2972  * @pdev - data path device instance
2973  *
2974  * local peer id pool alloc for physical device
2975  *
2976  * Return: none
2977  */
2978 void dp_local_peer_id_pool_init(struct dp_pdev *pdev)
2979 {
2980 	int i;
2981 
2982 	/* point the freelist to the first ID */
2983 	pdev->local_peer_ids.freelist = 0;
2984 
2985 	/* link each ID to the next one */
2986 	for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) {
2987 		pdev->local_peer_ids.pool[i] = i + 1;
2988 		pdev->local_peer_ids.map[i] = NULL;
2989 	}
2990 
2991 	/* link the last ID to itself, to mark the end of the list */
2992 	i = OL_TXRX_NUM_LOCAL_PEER_IDS;
2993 	pdev->local_peer_ids.pool[i] = i;
2994 
2995 	qdf_spinlock_create(&pdev->local_peer_ids.lock);
2996 	DP_TRACE(INFO, "Peer pool init");
2997 }
2998 
2999 /**
3000  * dp_local_peer_id_alloc() - allocate local peer id
3001  * @pdev - data path device instance
3002  * @peer - new peer instance
3003  *
3004  * allocate local peer id
3005  *
3006  * Return: none
3007  */
3008 void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer)
3009 {
3010 	int i;
3011 
3012 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
3013 	i = pdev->local_peer_ids.freelist;
3014 	if (pdev->local_peer_ids.pool[i] == i) {
3015 		/* the list is empty, except for the list-end marker */
3016 		peer->local_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
3017 	} else {
3018 		/* take the head ID and advance the freelist */
3019 		peer->local_id = i;
3020 		pdev->local_peer_ids.freelist = pdev->local_peer_ids.pool[i];
3021 		pdev->local_peer_ids.map[i] = peer;
3022 	}
3023 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
3024 	DP_TRACE(INFO, "peer %pK, local id %d", peer, peer->local_id);
3025 }
3026 
3027 /**
3028  * dp_local_peer_id_free() - remove local peer id
3029  * @pdev - data path device instance
3030  * @peer - peer instance should be removed
3031  *
3032  * remove local peer id
3033  *
3034  * Return: none
3035  */
3036 void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer)
3037 {
3038 	int i = peer->local_id;
3039 	if ((i == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
3040 	    (i >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
3041 		return;
3042 	}
3043 
3044 	/* put this ID on the head of the freelist */
3045 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
3046 	pdev->local_peer_ids.pool[i] = pdev->local_peer_ids.freelist;
3047 	pdev->local_peer_ids.freelist = i;
3048 	pdev->local_peer_ids.map[i] = NULL;
3049 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
3050 }
3051 #endif
3052 
3053 /**
3054  * dp_get_peer_mac_addr_frm_id(): get mac address of the peer
3055  * @soc_handle: DP SOC handle
3056  * @peer_id:peer_id of the peer
3057  *
3058  * return: vdev_id of the vap
3059  */
3060 uint8_t dp_get_peer_mac_addr_frm_id(struct cdp_soc_t *soc_handle,
3061 		uint16_t peer_id, uint8_t *peer_mac)
3062 {
3063 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
3064 	struct dp_peer *peer;
3065 	uint8_t vdev_id;
3066 
3067 	peer = dp_peer_find_by_id(soc, peer_id);
3068 
3069 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
3070 		  "soc %pK peer_id %d", soc, peer_id);
3071 
3072 	if (!peer) {
3073 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3074 			  "peer not found ");
3075 		return CDP_INVALID_VDEV_ID;
3076 	}
3077 
3078 	qdf_mem_copy(peer_mac, peer->mac_addr.raw, 6);
3079 	vdev_id = peer->vdev->vdev_id;
3080 
3081 	dp_peer_unref_del_find_by_id(peer);
3082 
3083 	return vdev_id;
3084 }
3085 
3086 /**
3087  * dp_peer_rxtid_stats: Retried Rx TID (REO queue) stats from HW
3088  * @peer: DP peer handle
3089  * @dp_stats_cmd_cb: REO command callback function
3090  * @cb_ctxt: Callback context
3091  *
3092  * Return: none
3093  */
3094 void dp_peer_rxtid_stats(struct dp_peer *peer, void (*dp_stats_cmd_cb),
3095 			void *cb_ctxt)
3096 {
3097 	struct dp_soc *soc = peer->vdev->pdev->soc;
3098 	struct hal_reo_cmd_params params;
3099 	int i;
3100 
3101 	if (!dp_stats_cmd_cb)
3102 		return;
3103 
3104 	qdf_mem_zero(&params, sizeof(params));
3105 	for (i = 0; i < DP_MAX_TIDS; i++) {
3106 		struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
3107 		if (rx_tid->hw_qdesc_vaddr_unaligned != NULL) {
3108 			params.std.need_status = 1;
3109 			params.std.addr_lo =
3110 				rx_tid->hw_qdesc_paddr & 0xffffffff;
3111 			params.std.addr_hi =
3112 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
3113 
3114 			if (cb_ctxt) {
3115 				dp_reo_send_cmd(soc, CMD_GET_QUEUE_STATS,
3116 					&params, dp_stats_cmd_cb, cb_ctxt);
3117 			} else {
3118 				dp_reo_send_cmd(soc, CMD_GET_QUEUE_STATS,
3119 					&params, dp_stats_cmd_cb, rx_tid);
3120 			}
3121 
3122 			/* Flush REO descriptor from HW cache to update stats
3123 			 * in descriptor memory. This is to help debugging */
3124 			qdf_mem_zero(&params, sizeof(params));
3125 			params.std.need_status = 0;
3126 			params.std.addr_lo =
3127 				rx_tid->hw_qdesc_paddr & 0xffffffff;
3128 			params.std.addr_hi =
3129 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
3130 			params.u.fl_cache_params.flush_no_inval = 1;
3131 			dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, &params, NULL,
3132 				NULL);
3133 		}
3134 	}
3135 }
3136 
3137 void dp_set_michael_key(struct cdp_peer *peer_handle,
3138 			bool is_unicast, uint32_t *key)
3139 {
3140 	struct dp_peer *peer =  (struct dp_peer *)peer_handle;
3141 	uint8_t sec_index = is_unicast ? 1 : 0;
3142 
3143 	if (!peer) {
3144 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3145 			  "peer not found ");
3146 		return;
3147 	}
3148 
3149 	qdf_mem_copy(&peer->security[sec_index].michael_key[0],
3150 		     key, IEEE80211_WEP_MICLEN);
3151 }
3152 
3153 bool dp_peer_find_by_id_valid(struct dp_soc *soc, uint16_t peer_id)
3154 {
3155 	struct dp_peer *peer = dp_peer_find_by_id(soc, peer_id);
3156 
3157 	if (peer) {
3158 		/*
3159 		 * Decrement the peer ref which is taken as part of
3160 		 * dp_peer_find_by_id if PEER_LOCK_REF_PROTECT is enabled
3161 		 */
3162 		dp_peer_unref_del_find_by_id(peer);
3163 
3164 		return true;
3165 	}
3166 
3167 	return false;
3168 }
3169