xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_peer.c (revision 4865edfd190c086bbe2c69aae12a8226f877b91e)
1 /*
2  * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <qdf_types.h>
20 #include <qdf_lock.h>
21 #include "dp_htt.h"
22 #include "dp_types.h"
23 #include "dp_internal.h"
24 #include "dp_peer.h"
25 #include <hal_api.h>
26 #include <hal_reo.h>
27 #ifdef CONFIG_MCL
28 #include <cds_ieee80211_common.h>
29 #include <cds_api.h>
30 #endif
31 #include <cdp_txrx_handle.h>
32 #include <wlan_cfg.h>
33 
34 #ifdef DP_LFR
35 static inline void
36 dp_set_ssn_valid_flag(struct hal_reo_cmd_params *params,
37 					uint8_t valid)
38 {
39 	params->u.upd_queue_params.update_svld = 1;
40 	params->u.upd_queue_params.svld = valid;
41 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
42 		"%s: Setting SSN valid bit to %d\n",
43 				__func__, valid);
44 }
45 #else
46 static inline void
47 dp_set_ssn_valid_flag(struct hal_reo_cmd_params *params,
48 					uint8_t valid) {};
49 #endif
50 
51 static inline int dp_peer_find_mac_addr_cmp(
52 	union dp_align_mac_addr *mac_addr1,
53 	union dp_align_mac_addr *mac_addr2)
54 {
55 	return !((mac_addr1->align4.bytes_abcd == mac_addr2->align4.bytes_abcd)
56 		/*
57 		 * Intentionally use & rather than &&.
58 		 * because the operands are binary rather than generic boolean,
59 		 * the functionality is equivalent.
60 		 * Using && has the advantage of short-circuited evaluation,
61 		 * but using & has the advantage of no conditional branching,
62 		 * which is a more significant benefit.
63 		 */
64 		&
65 		(mac_addr1->align4.bytes_ef == mac_addr2->align4.bytes_ef));
66 }
67 
68 static int dp_peer_find_map_attach(struct dp_soc *soc)
69 {
70 	uint32_t max_peers, peer_map_size;
71 
72 	max_peers = soc->max_peers;
73 	/* allocate the peer ID -> peer object map */
74 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
75 		"\n<=== cfg max peer id %d ====>\n", max_peers);
76 	peer_map_size = max_peers * sizeof(soc->peer_id_to_obj_map[0]);
77 	soc->peer_id_to_obj_map = qdf_mem_malloc(peer_map_size);
78 	if (!soc->peer_id_to_obj_map) {
79 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
80 			"%s: peer map memory allocation failed\n", __func__);
81 		return QDF_STATUS_E_NOMEM;
82 	}
83 
84 	/*
85 	 * The peer_id_to_obj_map doesn't really need to be initialized,
86 	 * since elements are only used after they have been individually
87 	 * initialized.
88 	 * However, it is convenient for debugging to have all elements
89 	 * that are not in use set to 0.
90 	 */
91 	qdf_mem_zero(soc->peer_id_to_obj_map, peer_map_size);
92 	return 0; /* success */
93 }
94 
95 static int dp_log2_ceil(unsigned value)
96 {
97 	unsigned tmp = value;
98 	int log2 = -1;
99 
100 	while (tmp) {
101 		log2++;
102 		tmp >>= 1;
103 	}
104 	if (1 << log2 != value)
105 		log2++;
106 	return log2;
107 }
108 
109 static int dp_peer_find_add_id_to_obj(
110 	struct dp_peer *peer,
111 	uint16_t peer_id)
112 {
113 	int i;
114 
115 	for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
116 		if (peer->peer_ids[i] == HTT_INVALID_PEER) {
117 			peer->peer_ids[i] = peer_id;
118 			return 0; /* success */
119 		}
120 	}
121 	return QDF_STATUS_E_FAILURE; /* failure */
122 }
123 
124 #define DP_PEER_HASH_LOAD_MULT  2
125 #define DP_PEER_HASH_LOAD_SHIFT 0
126 
127 #define DP_AST_HASH_LOAD_MULT  2
128 #define DP_AST_HASH_LOAD_SHIFT 0
129 
130 static int dp_peer_find_hash_attach(struct dp_soc *soc)
131 {
132 	int i, hash_elems, log2;
133 
134 	/* allocate the peer MAC address -> peer object hash table */
135 	hash_elems = soc->max_peers;
136 	hash_elems *= DP_PEER_HASH_LOAD_MULT;
137 	hash_elems >>= DP_PEER_HASH_LOAD_SHIFT;
138 	log2 = dp_log2_ceil(hash_elems);
139 	hash_elems = 1 << log2;
140 
141 	soc->peer_hash.mask = hash_elems - 1;
142 	soc->peer_hash.idx_bits = log2;
143 	/* allocate an array of TAILQ peer object lists */
144 	soc->peer_hash.bins = qdf_mem_malloc(
145 		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q, dp_peer)));
146 	if (!soc->peer_hash.bins)
147 		return QDF_STATUS_E_NOMEM;
148 
149 	for (i = 0; i < hash_elems; i++)
150 		TAILQ_INIT(&soc->peer_hash.bins[i]);
151 
152 	return 0;
153 }
154 
155 static void dp_peer_find_hash_detach(struct dp_soc *soc)
156 {
157 	qdf_mem_free(soc->peer_hash.bins);
158 }
159 
160 static inline unsigned dp_peer_find_hash_index(struct dp_soc *soc,
161 	union dp_align_mac_addr *mac_addr)
162 {
163 	unsigned index;
164 
165 	index =
166 		mac_addr->align2.bytes_ab ^
167 		mac_addr->align2.bytes_cd ^
168 		mac_addr->align2.bytes_ef;
169 	index ^= index >> soc->peer_hash.idx_bits;
170 	index &= soc->peer_hash.mask;
171 	return index;
172 }
173 
174 
175 void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer)
176 {
177 	unsigned index;
178 
179 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
180 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
181 	/*
182 	 * It is important to add the new peer at the tail of the peer list
183 	 * with the bin index.  Together with having the hash_find function
184 	 * search from head to tail, this ensures that if two entries with
185 	 * the same MAC address are stored, the one added first will be
186 	 * found first.
187 	 */
188 	TAILQ_INSERT_TAIL(&soc->peer_hash.bins[index], peer, hash_list_elem);
189 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
190 }
191 
192 #ifdef FEATURE_AST
193 /*
194  * dp_peer_ast_hash_attach() - Allocate and initialize AST Hash Table
195  * @soc: SoC handle
196  *
197  * Return: None
198  */
199 static int dp_peer_ast_hash_attach(struct dp_soc *soc)
200 {
201 	int i, hash_elems, log2;
202 
203 	hash_elems = ((soc->max_peers * DP_AST_HASH_LOAD_MULT) >>
204 		DP_AST_HASH_LOAD_SHIFT);
205 
206 	log2 = dp_log2_ceil(hash_elems);
207 	hash_elems = 1 << log2;
208 
209 	soc->ast_hash.mask = hash_elems - 1;
210 	soc->ast_hash.idx_bits = log2;
211 
212 	/* allocate an array of TAILQ peer object lists */
213 	soc->ast_hash.bins = qdf_mem_malloc(
214 		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q,
215 				dp_ast_entry)));
216 
217 	if (!soc->ast_hash.bins)
218 		return QDF_STATUS_E_NOMEM;
219 
220 	for (i = 0; i < hash_elems; i++)
221 		TAILQ_INIT(&soc->ast_hash.bins[i]);
222 
223 	return 0;
224 }
225 
226 /*
227  * dp_peer_ast_hash_detach() - Free AST Hash table
228  * @soc: SoC handle
229  *
230  * Return: None
231  */
232 static void dp_peer_ast_hash_detach(struct dp_soc *soc)
233 {
234 	qdf_mem_free(soc->ast_hash.bins);
235 }
236 
237 /*
238  * dp_peer_ast_hash_index() - Compute the AST hash from MAC address
239  * @soc: SoC handle
240  *
241  * Return: AST hash
242  */
243 static inline uint32_t dp_peer_ast_hash_index(struct dp_soc *soc,
244 	union dp_align_mac_addr *mac_addr)
245 {
246 	uint32_t index;
247 
248 	index =
249 		mac_addr->align2.bytes_ab ^
250 		mac_addr->align2.bytes_cd ^
251 		mac_addr->align2.bytes_ef;
252 	index ^= index >> soc->ast_hash.idx_bits;
253 	index &= soc->ast_hash.mask;
254 	return index;
255 }
256 
257 /*
258  * dp_peer_ast_hash_add() - Add AST entry into hash table
259  * @soc: SoC handle
260  *
261  * This function adds the AST entry into SoC AST hash table
262  * It assumes caller has taken the ast lock to protect the access to this table
263  *
264  * Return: None
265  */
266 static inline void dp_peer_ast_hash_add(struct dp_soc *soc,
267 		struct dp_ast_entry *ase)
268 {
269 	uint32_t index;
270 
271 	index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
272 	TAILQ_INSERT_TAIL(&soc->ast_hash.bins[index], ase, hash_list_elem);
273 }
274 
275 /*
276  * dp_peer_ast_hash_remove() - Look up and remove AST entry from hash table
277  * @soc: SoC handle
278  *
279  * This function removes the AST entry from soc AST hash table
280  * It assumes caller has taken the ast lock to protect the access to this table
281  *
282  * Return: None
283  */
284 static inline void dp_peer_ast_hash_remove(struct dp_soc *soc,
285 		struct dp_ast_entry *ase)
286 {
287 	unsigned index;
288 	struct dp_ast_entry *tmpase;
289 	int found = 0;
290 
291 	index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
292 	/* Check if tail is not empty before delete*/
293 	QDF_ASSERT(!TAILQ_EMPTY(&soc->ast_hash.bins[index]));
294 
295 	TAILQ_FOREACH(tmpase, &soc->ast_hash.bins[index], hash_list_elem) {
296 		if (tmpase == ase) {
297 			found = 1;
298 			break;
299 		}
300 	}
301 
302 	QDF_ASSERT(found);
303 	TAILQ_REMOVE(&soc->ast_hash.bins[index], ase, hash_list_elem);
304 }
305 
306 /*
307  * dp_peer_ast_hash_find() - Find AST entry by MAC address
308  * @soc: SoC handle
309  *
310  * It assumes caller has taken the ast lock to protect the access to
311  * AST hash table
312  *
313  * Return: AST entry
314  */
315 struct dp_ast_entry *dp_peer_ast_hash_find(struct dp_soc *soc,
316 						uint8_t *ast_mac_addr)
317 {
318 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
319 	unsigned index;
320 	struct dp_ast_entry *ase;
321 
322 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
323 			ast_mac_addr, DP_MAC_ADDR_LEN);
324 	mac_addr = &local_mac_addr_aligned;
325 
326 	index = dp_peer_ast_hash_index(soc, mac_addr);
327 	TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
328 		if (dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr) == 0) {
329 			return ase;
330 		}
331 	}
332 
333 	return NULL;
334 }
335 
336 /*
337  * dp_peer_map_ast() - Map the ast entry with HW AST Index
338  * @soc: SoC handle
339  * @peer: peer to which ast node belongs
340  * @mac_addr: MAC address of ast node
341  * @hw_peer_id: HW AST Index returned by target in peer map event
342  * @vdev_id: vdev id for VAP to which the peer belongs to
343  *
344  * Return: None
345  */
346 static inline void dp_peer_map_ast(struct dp_soc *soc,
347 	struct dp_peer *peer, uint8_t *mac_addr, uint16_t hw_peer_id,
348 	uint8_t vdev_id)
349 {
350 	struct dp_ast_entry *ast_entry;
351 	enum cdp_txrx_ast_entry_type peer_type = CDP_TXRX_AST_TYPE_STATIC;
352 	bool ast_entry_found = FALSE;
353 
354 	if (!peer) {
355 		return;
356 	}
357 
358 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
359 		"%s: peer %pK ID %d vid %d mac %02x:%02x:%02x:%02x:%02x:%02x\n",
360 		__func__, peer, hw_peer_id, vdev_id, mac_addr[0],
361 		mac_addr[1], mac_addr[2], mac_addr[3],
362 		mac_addr[4], mac_addr[5]);
363 
364 	qdf_spin_lock_bh(&soc->ast_lock);
365 	TAILQ_FOREACH(ast_entry, &peer->ast_entry_list, ase_list_elem) {
366 		if (!(qdf_mem_cmp(mac_addr, ast_entry->mac_addr.raw,
367 				DP_MAC_ADDR_LEN))) {
368 			ast_entry->ast_idx = hw_peer_id;
369 			soc->ast_table[hw_peer_id] = ast_entry;
370 			ast_entry->is_active = TRUE;
371 			peer_type = ast_entry->type;
372 			ast_entry_found = TRUE;
373 		}
374 	}
375 
376 	if (ast_entry_found || (peer->vdev && peer->vdev->proxysta_vdev)) {
377 		if (soc->cdp_soc.ol_ops->peer_map_event) {
378 			soc->cdp_soc.ol_ops->peer_map_event(
379 			soc->ctrl_psoc, peer->peer_ids[0],
380 			hw_peer_id, vdev_id,
381 			mac_addr, peer_type);
382 		}
383 	} else {
384 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
385 			"AST entry not found\n");
386 	}
387 
388 	qdf_spin_unlock_bh(&soc->ast_lock);
389 	return;
390 }
391 
392 /*
393  * dp_peer_add_ast() - Allocate and add AST entry into peer list
394  * @soc: SoC handle
395  * @peer: peer to which ast node belongs
396  * @mac_addr: MAC address of ast node
397  * @is_self: Is this base AST entry with peer mac address
398  *
399  * This API is used by WDS source port learning function to
400  * add a new AST entry into peer AST list
401  *
402  * Return: 0 if new entry is allocated,
403  *        -1 if entry add failed
404  */
405 int dp_peer_add_ast(struct dp_soc *soc,
406 			struct dp_peer *peer,
407 			uint8_t *mac_addr,
408 			enum cdp_txrx_ast_entry_type type,
409 			uint32_t flags)
410 {
411 	struct dp_ast_entry *ast_entry;
412 	struct dp_vdev *vdev = peer->vdev;
413 	uint8_t next_node_mac[6];
414 	int  ret = -1;
415 
416 	if (!vdev) {
417 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
418 			FL("Peers vdev is NULL"));
419 		QDF_ASSERT(0);
420 		return ret;
421 	}
422 
423 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
424 		"%s: peer %pK mac %02x:%02x:%02x:%02x:%02x:%02x\n",
425 		__func__, peer, mac_addr[0], mac_addr[1], mac_addr[2],
426 		mac_addr[3], mac_addr[4], mac_addr[5]);
427 
428 	qdf_spin_lock_bh(&soc->ast_lock);
429 
430 	/* If AST entry already exists , just return from here */
431 	ast_entry = dp_peer_ast_hash_find(soc, mac_addr);
432 
433 	if (ast_entry) {
434 		if (ast_entry->type == CDP_TXRX_AST_TYPE_MEC)
435 			ast_entry->is_active = TRUE;
436 
437 		qdf_spin_unlock_bh(&soc->ast_lock);
438 		return 0;
439 	}
440 
441 	ast_entry = (struct dp_ast_entry *)
442 			qdf_mem_malloc(sizeof(struct dp_ast_entry));
443 
444 	if (!ast_entry) {
445 		qdf_spin_unlock_bh(&soc->ast_lock);
446 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
447 			FL("fail to allocate ast_entry"));
448 		QDF_ASSERT(0);
449 		return ret;
450 	}
451 
452 	qdf_mem_copy(&ast_entry->mac_addr.raw[0], mac_addr, DP_MAC_ADDR_LEN);
453 	ast_entry->peer = peer;
454 	ast_entry->pdev_id = vdev->pdev->pdev_id;
455 	ast_entry->vdev_id = vdev->vdev_id;
456 
457 	switch (type) {
458 	case CDP_TXRX_AST_TYPE_STATIC:
459 		peer->self_ast_entry = ast_entry;
460 		ast_entry->type = CDP_TXRX_AST_TYPE_STATIC;
461 		break;
462 	case CDP_TXRX_AST_TYPE_WDS:
463 		ast_entry->next_hop = 1;
464 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
465 		break;
466 	case CDP_TXRX_AST_TYPE_WDS_HM:
467 		ast_entry->next_hop = 1;
468 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM;
469 		break;
470 	case CDP_TXRX_AST_TYPE_MEC:
471 		ast_entry->next_hop = 1;
472 		ast_entry->type = CDP_TXRX_AST_TYPE_MEC;
473 		break;
474 	default:
475 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
476 			FL("Incorrect AST entry type"));
477 	}
478 
479 	ast_entry->is_active = TRUE;
480 	TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem);
481 	DP_STATS_INC(soc, ast.added, 1);
482 	dp_peer_ast_hash_add(soc, ast_entry);
483 	qdf_spin_unlock_bh(&soc->ast_lock);
484 
485 	if (ast_entry->type == CDP_TXRX_AST_TYPE_MEC)
486 		qdf_mem_copy(next_node_mac, peer->vdev->mac_addr.raw, 6);
487 	else
488 		qdf_mem_copy(next_node_mac, peer->mac_addr.raw, 6);
489 
490 	if (ast_entry->type != CDP_TXRX_AST_TYPE_STATIC) {
491 		if (QDF_STATUS_SUCCESS ==
492 				soc->cdp_soc.ol_ops->peer_add_wds_entry(
493 				peer->vdev->osif_vdev,
494 				mac_addr,
495 				next_node_mac,
496 				flags))
497 			return 0;
498 	}
499 
500 	return ret;
501 }
502 
503 /*
504  * dp_peer_del_ast() - Delete and free AST entry
505  * @soc: SoC handle
506  * @ast_entry: AST entry of the node
507  *
508  * This function removes the AST entry from peer and soc tables
509  * It assumes caller has taken the ast lock to protect the access to these
510  * tables
511  *
512  * Return: None
513  */
514 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
515 {
516 	struct dp_peer *peer = ast_entry->peer;
517 
518 	if (ast_entry->next_hop)
519 		soc->cdp_soc.ol_ops->peer_del_wds_entry(peer->vdev->osif_vdev,
520 						ast_entry->mac_addr.raw);
521 
522 	soc->ast_table[ast_entry->ast_idx] = NULL;
523 	TAILQ_REMOVE(&peer->ast_entry_list, ast_entry, ase_list_elem);
524 	DP_STATS_INC(soc, ast.deleted, 1);
525 	dp_peer_ast_hash_remove(soc, ast_entry);
526 	qdf_mem_free(ast_entry);
527 }
528 
529 /*
530  * dp_peer_update_ast() - Delete and free AST entry
531  * @soc: SoC handle
532  * @peer: peer to which ast node belongs
533  * @ast_entry: AST entry of the node
534  * @flags: wds or hmwds
535  *
536  * This function update the AST entry to the roamed peer and soc tables
537  * It assumes caller has taken the ast lock to protect the access to these
538  * tables
539  *
540  * Return: 0 if ast entry is updated successfully
541  *         -1 failure
542  */
543 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
544 		       struct dp_ast_entry *ast_entry, uint32_t flags)
545 {
546 	int ret = -1;
547 	struct dp_peer *old_peer;
548 	struct dp_peer *sa_peer;
549 
550 	if (ast_entry->type == CDP_TXRX_AST_TYPE_STATIC) {
551 		sa_peer = ast_entry->peer;
552 
553 		/*
554 		 * Kickout, when direct associated peer(SA) roams
555 		 * to another AP and reachable via TA peer
556 		 */
557 		if (!sa_peer->delete_in_progress) {
558 			sa_peer->delete_in_progress = true;
559 			if (soc->cdp_soc.ol_ops->peer_sta_kickout) {
560 				soc->cdp_soc.ol_ops->peer_sta_kickout(
561 						sa_peer->vdev->pdev->osif_pdev,
562 						ast_entry->mac_addr.raw);
563 			}
564 			return 0;
565 		}
566 	}
567 
568 	old_peer = ast_entry->peer;
569 	TAILQ_REMOVE(&old_peer->ast_entry_list, ast_entry, ase_list_elem);
570 
571 	ast_entry->peer = peer;
572 	ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
573 	ast_entry->pdev_id = peer->vdev->pdev->pdev_id;
574 	ast_entry->vdev_id = peer->vdev->vdev_id;
575 	ast_entry->is_active = TRUE;
576 	TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem);
577 
578 	ret = soc->cdp_soc.ol_ops->peer_update_wds_entry(
579 			peer->vdev->osif_vdev,
580 				ast_entry->mac_addr.raw,
581 				peer->mac_addr.raw,
582 				flags);
583 
584 	return ret;
585 }
586 
587 /*
588  * dp_peer_ast_get_pdev_id() - get pdev_id from the ast entry
589  * @soc: SoC handle
590  * @ast_entry: AST entry of the node
591  *
592  * This function gets the pdev_id from the ast entry.
593  *
594  * Return: (uint8_t) pdev_id
595  */
596 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
597 				struct dp_ast_entry *ast_entry)
598 {
599 	return ast_entry->pdev_id;
600 }
601 
602 /*
603  * dp_peer_ast_get_next_hop() - get next_hop from the ast entry
604  * @soc: SoC handle
605  * @ast_entry: AST entry of the node
606  *
607  * This function gets the next hop from the ast entry.
608  *
609  * Return: (uint8_t) next_hop
610  */
611 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
612 				struct dp_ast_entry *ast_entry)
613 {
614 	return ast_entry->next_hop;
615 }
616 
617 /*
618  * dp_peer_ast_set_type() - set type from the ast entry
619  * @soc: SoC handle
620  * @ast_entry: AST entry of the node
621  *
622  * This function sets the type in the ast entry.
623  *
624  * Return:
625  */
626 void dp_peer_ast_set_type(struct dp_soc *soc,
627 				struct dp_ast_entry *ast_entry,
628 				enum cdp_txrx_ast_entry_type type)
629 {
630 	ast_entry->type = type;
631 }
632 
633 #else
634 int dp_peer_add_ast(struct dp_soc *soc, struct dp_peer *peer,
635 		uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type,
636 		uint32_t flags)
637 {
638 	return 1;
639 }
640 
641 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
642 {
643 }
644 
645 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
646 			struct dp_ast_entry *ast_entry, uint32_t flags)
647 {
648 	return 1;
649 }
650 
651 struct dp_ast_entry *dp_peer_ast_hash_find(struct dp_soc *soc,
652 						uint8_t *ast_mac_addr)
653 {
654 	return NULL;
655 }
656 
657 static int dp_peer_ast_hash_attach(struct dp_soc *soc)
658 {
659 	return 0;
660 }
661 
662 static inline void dp_peer_map_ast(struct dp_soc *soc,
663 	struct dp_peer *peer, uint8_t *mac_addr, uint16_t hw_peer_id,
664 	uint8_t vdev_id)
665 {
666 	return;
667 }
668 
669 static void dp_peer_ast_hash_detach(struct dp_soc *soc)
670 {
671 }
672 
673 void dp_peer_ast_set_type(struct dp_soc *soc,
674 				struct dp_ast_entry *ast_entry,
675 				enum cdp_txrx_ast_entry_type type)
676 {
677 }
678 
679 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
680 				struct dp_ast_entry *ast_entry)
681 {
682 	return 0xff;
683 }
684 
685 
686 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
687 				struct dp_ast_entry *ast_entry)
688 {
689 	return 0xff;
690 }
691 #endif
692 
693 struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc,
694 	uint8_t *peer_mac_addr, int mac_addr_is_aligned, uint8_t vdev_id)
695 {
696 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
697 	unsigned index;
698 	struct dp_peer *peer;
699 
700 	if (mac_addr_is_aligned) {
701 		mac_addr = (union dp_align_mac_addr *) peer_mac_addr;
702 	} else {
703 		qdf_mem_copy(
704 			&local_mac_addr_aligned.raw[0],
705 			peer_mac_addr, DP_MAC_ADDR_LEN);
706 		mac_addr = &local_mac_addr_aligned;
707 	}
708 	index = dp_peer_find_hash_index(soc, mac_addr);
709 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
710 	TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
711 #if ATH_SUPPORT_WRAP
712 		/* ProxySTA may have multiple BSS peer with same MAC address,
713 		 * modified find will take care of finding the correct BSS peer.
714 		 */
715 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
716 			((peer->vdev->vdev_id == vdev_id) ||
717 			 (vdev_id == DP_VDEV_ALL))) {
718 #else
719 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0) {
720 #endif
721 			/* found it - increment the ref count before releasing
722 			 * the lock
723 			 */
724 			qdf_atomic_inc(&peer->ref_cnt);
725 			qdf_spin_unlock_bh(&soc->peer_ref_mutex);
726 			return peer;
727 		}
728 	}
729 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
730 	return NULL; /* failure */
731 }
732 
733 void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer)
734 {
735 	unsigned index;
736 	struct dp_peer *tmppeer = NULL;
737 	int found = 0;
738 
739 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
740 	/* Check if tail is not empty before delete*/
741 	QDF_ASSERT(!TAILQ_EMPTY(&soc->peer_hash.bins[index]));
742 	/*
743 	 * DO NOT take the peer_ref_mutex lock here - it needs to be taken
744 	 * by the caller.
745 	 * The caller needs to hold the lock from the time the peer object's
746 	 * reference count is decremented and tested up through the time the
747 	 * reference to the peer object is removed from the hash table, by
748 	 * this function.
749 	 * Holding the lock only while removing the peer object reference
750 	 * from the hash table keeps the hash table consistent, but does not
751 	 * protect against a new HL tx context starting to use the peer object
752 	 * if it looks up the peer object from its MAC address just after the
753 	 * peer ref count is decremented to zero, but just before the peer
754 	 * object reference is removed from the hash table.
755 	 */
756 	 TAILQ_FOREACH(tmppeer, &soc->peer_hash.bins[index], hash_list_elem) {
757 		if (tmppeer == peer) {
758 			found = 1;
759 			break;
760 		}
761 	}
762 	QDF_ASSERT(found);
763 	TAILQ_REMOVE(&soc->peer_hash.bins[index], peer, hash_list_elem);
764 }
765 
766 void dp_peer_find_hash_erase(struct dp_soc *soc)
767 {
768 	int i;
769 
770 	/*
771 	 * Not really necessary to take peer_ref_mutex lock - by this point,
772 	 * it's known that the soc is no longer in use.
773 	 */
774 	for (i = 0; i <= soc->peer_hash.mask; i++) {
775 		if (!TAILQ_EMPTY(&soc->peer_hash.bins[i])) {
776 			struct dp_peer *peer, *peer_next;
777 
778 			/*
779 			 * TAILQ_FOREACH_SAFE must be used here to avoid any
780 			 * memory access violation after peer is freed
781 			 */
782 			TAILQ_FOREACH_SAFE(peer, &soc->peer_hash.bins[i],
783 				hash_list_elem, peer_next) {
784 				/*
785 				 * Don't remove the peer from the hash table -
786 				 * that would modify the list we are currently
787 				 * traversing, and it's not necessary anyway.
788 				 */
789 				/*
790 				 * Artificially adjust the peer's ref count to
791 				 * 1, so it will get deleted by
792 				 * dp_peer_unref_delete.
793 				 */
794 				/* set to zero */
795 				qdf_atomic_init(&peer->ref_cnt);
796 				/* incr to one */
797 				qdf_atomic_inc(&peer->ref_cnt);
798 				dp_peer_unref_delete(peer);
799 			}
800 		}
801 	}
802 }
803 
804 static void dp_peer_find_map_detach(struct dp_soc *soc)
805 {
806 	qdf_mem_free(soc->peer_id_to_obj_map);
807 }
808 
809 int dp_peer_find_attach(struct dp_soc *soc)
810 {
811 	if (dp_peer_find_map_attach(soc))
812 		return 1;
813 
814 	if (dp_peer_find_hash_attach(soc)) {
815 		dp_peer_find_map_detach(soc);
816 		return 1;
817 	}
818 
819 	if (dp_peer_ast_hash_attach(soc)) {
820 		dp_peer_find_hash_detach(soc);
821 		dp_peer_find_map_detach(soc);
822 		return 1;
823 	}
824 	return 0; /* success */
825 }
826 
827 void dp_rx_tid_stats_cb(struct dp_soc *soc, void *cb_ctxt,
828 	union hal_reo_status *reo_status)
829 {
830 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
831 	struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
832 
833 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
834 		DP_TRACE_STATS(FATAL, "REO stats failure %d for TID %d\n",
835 			queue_status->header.status, rx_tid->tid);
836 		return;
837 	}
838 
839 	DP_TRACE_STATS(FATAL, "REO queue stats (TID: %d): \n"
840 		"ssn: %d\n"
841 		"curr_idx  : %d\n"
842 		"pn_31_0   : %08x\n"
843 		"pn_63_32  : %08x\n"
844 		"pn_95_64  : %08x\n"
845 		"pn_127_96 : %08x\n"
846 		"last_rx_enq_tstamp : %08x\n"
847 		"last_rx_deq_tstamp : %08x\n"
848 		"rx_bitmap_31_0     : %08x\n"
849 		"rx_bitmap_63_32    : %08x\n"
850 		"rx_bitmap_95_64    : %08x\n"
851 		"rx_bitmap_127_96   : %08x\n"
852 		"rx_bitmap_159_128  : %08x\n"
853 		"rx_bitmap_191_160  : %08x\n"
854 		"rx_bitmap_223_192  : %08x\n"
855 		"rx_bitmap_255_224  : %08x\n",
856 		rx_tid->tid,
857 		queue_status->ssn, queue_status->curr_idx,
858 		queue_status->pn_31_0, queue_status->pn_63_32,
859 		queue_status->pn_95_64, queue_status->pn_127_96,
860 		queue_status->last_rx_enq_tstamp,
861 		queue_status->last_rx_deq_tstamp,
862 		queue_status->rx_bitmap_31_0, queue_status->rx_bitmap_63_32,
863 		queue_status->rx_bitmap_95_64, queue_status->rx_bitmap_127_96,
864 		queue_status->rx_bitmap_159_128,
865 		queue_status->rx_bitmap_191_160,
866 		queue_status->rx_bitmap_223_192,
867 		queue_status->rx_bitmap_255_224);
868 
869 	DP_TRACE_STATS(FATAL,
870 		"curr_mpdu_cnt      : %d\n"
871 		"curr_msdu_cnt      : %d\n"
872 		"fwd_timeout_cnt    : %d\n"
873 		"fwd_bar_cnt        : %d\n"
874 		"dup_cnt            : %d\n"
875 		"frms_in_order_cnt  : %d\n"
876 		"bar_rcvd_cnt       : %d\n"
877 		"mpdu_frms_cnt      : %d\n"
878 		"msdu_frms_cnt      : %d\n"
879 		"total_byte_cnt     : %d\n"
880 		"late_recv_mpdu_cnt : %d\n"
881 		"win_jump_2k 	    : %d\n"
882 		"hole_cnt 	    : %d\n",
883 		queue_status->curr_mpdu_cnt, queue_status->curr_msdu_cnt,
884 		queue_status->fwd_timeout_cnt, queue_status->fwd_bar_cnt,
885 		queue_status->dup_cnt, queue_status->frms_in_order_cnt,
886 		queue_status->bar_rcvd_cnt, queue_status->mpdu_frms_cnt,
887 		queue_status->msdu_frms_cnt, queue_status->total_cnt,
888 		queue_status->late_recv_mpdu_cnt, queue_status->win_jump_2k,
889 		queue_status->hole_cnt);
890 
891 	DP_PRINT_STATS("Num of Addba Req = %d\n", rx_tid->num_of_addba_req);
892 	DP_PRINT_STATS("Num of Addba Resp = %d\n", rx_tid->num_of_addba_resp);
893 	DP_PRINT_STATS("Num of Delba Req = %d\n", rx_tid->num_of_delba_req);
894 	DP_PRINT_STATS("BA window size   = %d\n", rx_tid->ba_win_size);
895 	DP_PRINT_STATS("Pn size = %d\n", rx_tid->pn_size);
896 }
897 
898 static inline struct dp_peer *dp_peer_find_add_id(struct dp_soc *soc,
899 	uint8_t *peer_mac_addr, uint16_t peer_id, uint16_t hw_peer_id,
900 	uint8_t vdev_id)
901 {
902 	struct dp_peer *peer;
903 
904 	QDF_ASSERT(peer_id <= soc->max_peers);
905 	/* check if there's already a peer object with this MAC address */
906 	peer = dp_peer_find_hash_find(soc, peer_mac_addr,
907 		0 /* is aligned */, vdev_id);
908 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
909 		"%s: peer %pK ID %d vid %d mac %02x:%02x:%02x:%02x:%02x:%02x\n",
910 		__func__, peer, peer_id, vdev_id, peer_mac_addr[0],
911 		peer_mac_addr[1], peer_mac_addr[2], peer_mac_addr[3],
912 		peer_mac_addr[4], peer_mac_addr[5]);
913 
914 	if (peer) {
915 		/* peer's ref count was already incremented by
916 		 * peer_find_hash_find
917 		 */
918 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
919 			  "%s: ref_cnt: %d", __func__,
920 			   qdf_atomic_read(&peer->ref_cnt));
921 		soc->peer_id_to_obj_map[peer_id] = peer;
922 
923 		if (dp_peer_find_add_id_to_obj(peer, peer_id)) {
924 			/* TBDXXX: assert for now */
925 			QDF_ASSERT(0);
926 		}
927 
928 		return peer;
929 	}
930 
931 	return NULL;
932 }
933 
934 /**
935  * dp_rx_peer_map_handler() - handle peer map event from firmware
936  * @soc_handle - genereic soc handle
937  * @peeri_id - peer_id from firmware
938  * @hw_peer_id - ast index for this peer
939  * vdev_id - vdev ID
940  * peer_mac_addr - macc assress of the peer
941  *
942  * associate the peer_id that firmware provided with peer entry
943  * and update the ast table in the host with the hw_peer_id.
944  *
945  * Return: none
946  */
947 
948 void
949 dp_rx_peer_map_handler(void *soc_handle, uint16_t peer_id, uint16_t hw_peer_id,
950 			uint8_t vdev_id, uint8_t *peer_mac_addr)
951 {
952 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
953 	struct dp_peer *peer = NULL;
954 
955 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
956 		"peer_map_event (soc:%pK): peer_id %di, hw_peer_id %d, peer_mac "
957 		"%02x:%02x:%02x:%02x:%02x:%02x, vdev_id %d\n", soc, peer_id,
958 		hw_peer_id, peer_mac_addr[0], peer_mac_addr[1],
959 		peer_mac_addr[2], peer_mac_addr[3], peer_mac_addr[4],
960 		peer_mac_addr[5], vdev_id);
961 
962 	peer = soc->peer_id_to_obj_map[peer_id];
963 
964 	if ((hw_peer_id < 0) || (hw_peer_id > (WLAN_UMAC_PSOC_MAX_PEERS * 2))) {
965 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
966 			"invalid hw_peer_id: %d", hw_peer_id);
967 		qdf_assert_always(0);
968 	}
969 
970 	/*
971 	 * check if peer already exists for this peer_id, if so
972 	 * this peer map event is in response for a wds peer add
973 	 * wmi command sent during wds source port learning.
974 	 * in this case just add the ast entry to the existing
975 	 * peer ast_list.
976 	 */
977 	if (!peer)
978 		peer = dp_peer_find_add_id(soc, peer_mac_addr, peer_id,
979 					hw_peer_id, vdev_id);
980 
981 	if (peer) {
982 		qdf_assert_always(peer->vdev);
983 		/*
984 		 * For every peer MAp message search and set if bss_peer
985 		 */
986 		if (!(qdf_mem_cmp(peer->mac_addr.raw, peer->vdev->mac_addr.raw,
987 				 DP_MAC_ADDR_LEN))) {
988 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
989 				"vdev bss_peer!!!!");
990 			peer->bss_peer = 1;
991 			peer->vdev->vap_bss_peer = peer;
992 		}
993 	}
994 
995 	dp_peer_map_ast(soc, peer, peer_mac_addr,
996 			hw_peer_id, vdev_id);
997 }
998 
999 void
1000 dp_rx_peer_unmap_handler(void *soc_handle, uint16_t peer_id)
1001 {
1002 	struct dp_peer *peer;
1003 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
1004 	uint8_t i;
1005 
1006 	peer = __dp_peer_find_by_id(soc, peer_id);
1007 
1008 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
1009 		"peer_unmap_event (soc:%pK) peer_id %d peer %pK\n",
1010 		soc, peer_id, peer);
1011 
1012 	/*
1013 	 * Currently peer IDs are assigned for vdevs as well as peers.
1014 	 * If the peer ID is for a vdev, then the peer pointer stored
1015 	 * in peer_id_to_obj_map will be NULL.
1016 	 */
1017 	if (!peer) {
1018 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1019 			"%s: Received unmap event for invalid peer_id"
1020 			" %u\n", __func__, peer_id);
1021 		return;
1022 	}
1023 
1024 	soc->peer_id_to_obj_map[peer_id] = NULL;
1025 	for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
1026 		if (peer->peer_ids[i] == peer_id) {
1027 			peer->peer_ids[i] = HTT_INVALID_PEER;
1028 			break;
1029 		}
1030 	}
1031 
1032 	if (soc->cdp_soc.ol_ops->peer_unmap_event) {
1033 		soc->cdp_soc.ol_ops->peer_unmap_event(soc->ctrl_psoc,
1034 				peer_id);
1035 	}
1036 
1037 	/*
1038 	 * Remove a reference to the peer.
1039 	 * If there are no more references, delete the peer object.
1040 	 */
1041 	dp_peer_unref_delete(peer);
1042 }
1043 
1044 void
1045 dp_peer_find_detach(struct dp_soc *soc)
1046 {
1047 	dp_peer_find_map_detach(soc);
1048 	dp_peer_find_hash_detach(soc);
1049 	dp_peer_ast_hash_detach(soc);
1050 }
1051 
1052 static void dp_rx_tid_update_cb(struct dp_soc *soc, void *cb_ctxt,
1053 	union hal_reo_status *reo_status)
1054 {
1055 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
1056 
1057 	if ((reo_status->rx_queue_status.header.status !=
1058 		HAL_REO_CMD_SUCCESS) &&
1059 		(reo_status->rx_queue_status.header.status !=
1060 		HAL_REO_CMD_DRAIN)) {
1061 		/* Should not happen normally. Just print error for now */
1062 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1063 			"%s: Rx tid HW desc update failed(%d): tid %d\n",
1064 			__func__,
1065 			reo_status->rx_queue_status.header.status,
1066 			rx_tid->tid);
1067 	}
1068 }
1069 
1070 /*
1071  * dp_find_peer_by_addr - find peer instance by mac address
1072  * @dev: physical device instance
1073  * @peer_mac_addr: peer mac address
1074  * @local_id: local id for the peer
1075  *
1076  * Return: peer instance pointer
1077  */
1078 void *dp_find_peer_by_addr(struct cdp_pdev *dev, uint8_t *peer_mac_addr,
1079 		uint8_t *local_id)
1080 {
1081 	struct dp_pdev *pdev = (struct dp_pdev *)dev;
1082 	struct dp_peer *peer;
1083 
1084 	peer = dp_peer_find_hash_find(pdev->soc, peer_mac_addr, 0, DP_VDEV_ALL);
1085 
1086 	if (!peer)
1087 		return NULL;
1088 
1089 	/* Multiple peer ids? How can know peer id? */
1090 	*local_id = peer->local_id;
1091 	DP_TRACE(INFO, "%s: peer %pK id %d", __func__, peer, *local_id);
1092 
1093 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
1094 	 * Decrement it here.
1095 	 */
1096 	qdf_atomic_dec(&peer->ref_cnt);
1097 
1098 	return peer;
1099 }
1100 
1101 /*
1102  * dp_rx_tid_update_wifi3() – Update receive TID state
1103  * @peer: Datapath peer handle
1104  * @tid: TID
1105  * @ba_window_size: BlockAck window size
1106  * @start_seq: Starting sequence number
1107  *
1108  * Return: 0 on success, error code on failure
1109  */
1110 static int dp_rx_tid_update_wifi3(struct dp_peer *peer, int tid, uint32_t
1111 				  ba_window_size, uint32_t start_seq)
1112 {
1113 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1114 	struct dp_soc *soc = peer->vdev->pdev->soc;
1115 	struct hal_reo_cmd_params params;
1116 
1117 	qdf_mem_zero(&params, sizeof(params));
1118 
1119 	params.std.need_status = 1;
1120 	params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
1121 	params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1122 	params.u.upd_queue_params.update_ba_window_size = 1;
1123 	params.u.upd_queue_params.ba_window_size = ba_window_size;
1124 
1125 	if (start_seq < IEEE80211_SEQ_MAX) {
1126 		params.u.upd_queue_params.update_ssn = 1;
1127 		params.u.upd_queue_params.ssn = start_seq;
1128 	}
1129 
1130 	dp_set_ssn_valid_flag(&params, 0);
1131 
1132 	dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params, dp_rx_tid_update_cb, rx_tid);
1133 	return 0;
1134 }
1135 
1136 /*
1137  * dp_reo_desc_free() - Callback free reo descriptor memory after
1138  * HW cache flush
1139  *
1140  * @soc: DP SOC handle
1141  * @cb_ctxt: Callback context
1142  * @reo_status: REO command status
1143  */
1144 static void dp_reo_desc_free(struct dp_soc *soc, void *cb_ctxt,
1145 	union hal_reo_status *reo_status)
1146 {
1147 	struct reo_desc_list_node *freedesc =
1148 		(struct reo_desc_list_node *)cb_ctxt;
1149 	struct dp_rx_tid *rx_tid = &freedesc->rx_tid;
1150 
1151 	if ((reo_status->fl_cache_status.header.status !=
1152 		HAL_REO_CMD_SUCCESS) &&
1153 		(reo_status->fl_cache_status.header.status !=
1154 		HAL_REO_CMD_DRAIN)) {
1155 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1156 			"%s: Rx tid HW desc flush failed(%d): tid %d\n",
1157 			__func__,
1158 			reo_status->rx_queue_status.header.status,
1159 			freedesc->rx_tid.tid);
1160 	}
1161 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1162 		"%s: hw_qdesc_paddr: %pK, tid:%d\n", __func__,
1163 		(void *)(rx_tid->hw_qdesc_paddr), rx_tid->tid);
1164 	qdf_mem_unmap_nbytes_single(soc->osdev,
1165 		rx_tid->hw_qdesc_paddr,
1166 		QDF_DMA_BIDIRECTIONAL,
1167 		rx_tid->hw_qdesc_alloc_size);
1168 	qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
1169 	qdf_mem_free(freedesc);
1170 }
1171 
1172 #if defined(QCA_WIFI_QCA8074) && defined(BUILD_X86)
1173 /* Hawkeye emulation requires bus address to be >= 0x50000000 */
1174 static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
1175 {
1176 	if (dma_addr < 0x50000000)
1177 		return QDF_STATUS_E_FAILURE;
1178 	else
1179 		return QDF_STATUS_SUCCESS;
1180 }
1181 #else
1182 static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
1183 {
1184 	return QDF_STATUS_SUCCESS;
1185 }
1186 #endif
1187 
1188 
1189 /*
1190  * dp_rx_tid_setup_wifi3() – Setup receive TID state
1191  * @peer: Datapath peer handle
1192  * @tid: TID
1193  * @ba_window_size: BlockAck window size
1194  * @start_seq: Starting sequence number
1195  *
1196  * Return: 0 on success, error code on failure
1197  */
1198 int dp_rx_tid_setup_wifi3(struct dp_peer *peer, int tid,
1199 	uint32_t ba_window_size, uint32_t start_seq)
1200 {
1201 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1202 	struct dp_vdev *vdev = peer->vdev;
1203 	struct dp_soc *soc = vdev->pdev->soc;
1204 	uint32_t hw_qdesc_size;
1205 	uint32_t hw_qdesc_align;
1206 	int hal_pn_type;
1207 	void *hw_qdesc_vaddr;
1208 	uint32_t alloc_tries = 0;
1209 
1210 	if (peer->delete_in_progress)
1211 		return QDF_STATUS_E_FAILURE;
1212 
1213 	rx_tid->ba_win_size = ba_window_size;
1214 	if (rx_tid->hw_qdesc_vaddr_unaligned != NULL)
1215 		return dp_rx_tid_update_wifi3(peer, tid, ba_window_size,
1216 			start_seq);
1217 	rx_tid->num_of_addba_req = 0;
1218 	rx_tid->num_of_delba_req = 0;
1219 	rx_tid->num_of_addba_resp = 0;
1220 #ifdef notyet
1221 	hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc, ba_window_size);
1222 #else
1223 	/* TODO: Allocating HW queue descriptors based on max BA window size
1224 	 * for all QOS TIDs so that same descriptor can be used later when
1225 	 * ADDBA request is recevied. This should be changed to allocate HW
1226 	 * queue descriptors based on BA window size being negotiated (0 for
1227 	 * non BA cases), and reallocate when BA window size changes and also
1228 	 * send WMI message to FW to change the REO queue descriptor in Rx
1229 	 * peer entry as part of dp_rx_tid_update.
1230 	 */
1231 	if (tid != DP_NON_QOS_TID)
1232 		hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc,
1233 			HAL_RX_MAX_BA_WINDOW);
1234 	else
1235 		hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc,
1236 			ba_window_size);
1237 #endif
1238 
1239 	hw_qdesc_align = hal_get_reo_qdesc_align(soc->hal_soc);
1240 	/* To avoid unnecessary extra allocation for alignment, try allocating
1241 	 * exact size and see if we already have aligned address.
1242 	 */
1243 	rx_tid->hw_qdesc_alloc_size = hw_qdesc_size;
1244 
1245 try_desc_alloc:
1246 	rx_tid->hw_qdesc_vaddr_unaligned =
1247 		qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size);
1248 
1249 	if (!rx_tid->hw_qdesc_vaddr_unaligned) {
1250 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1251 			"%s: Rx tid HW desc alloc failed: tid %d\n",
1252 			__func__, tid);
1253 		return QDF_STATUS_E_NOMEM;
1254 	}
1255 
1256 	if ((unsigned long)(rx_tid->hw_qdesc_vaddr_unaligned) %
1257 		hw_qdesc_align) {
1258 		/* Address allocated above is not alinged. Allocate extra
1259 		 * memory for alignment
1260 		 */
1261 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
1262 		rx_tid->hw_qdesc_vaddr_unaligned =
1263 			qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size +
1264 					hw_qdesc_align - 1);
1265 
1266 		if (!rx_tid->hw_qdesc_vaddr_unaligned) {
1267 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1268 				"%s: Rx tid HW desc alloc failed: tid %d\n",
1269 				__func__, tid);
1270 			return QDF_STATUS_E_NOMEM;
1271 		}
1272 
1273 		hw_qdesc_vaddr = (void *)qdf_align((unsigned long)
1274 			rx_tid->hw_qdesc_vaddr_unaligned,
1275 			hw_qdesc_align);
1276 
1277 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1278 			"%s: Total Size %d Aligned Addr %pK\n",
1279 			__func__, rx_tid->hw_qdesc_alloc_size,
1280 			hw_qdesc_vaddr);
1281 
1282 	} else {
1283 		hw_qdesc_vaddr = rx_tid->hw_qdesc_vaddr_unaligned;
1284 	}
1285 
1286 	/* TODO: Ensure that sec_type is set before ADDBA is received.
1287 	 * Currently this is set based on htt indication
1288 	 * HTT_T2H_MSG_TYPE_SEC_IND from target
1289 	 */
1290 	switch (peer->security[dp_sec_ucast].sec_type) {
1291 	case cdp_sec_type_tkip_nomic:
1292 	case cdp_sec_type_aes_ccmp:
1293 	case cdp_sec_type_aes_ccmp_256:
1294 	case cdp_sec_type_aes_gcmp:
1295 	case cdp_sec_type_aes_gcmp_256:
1296 		hal_pn_type = HAL_PN_WPA;
1297 		break;
1298 	case cdp_sec_type_wapi:
1299 		if (vdev->opmode == wlan_op_mode_ap)
1300 			hal_pn_type = HAL_PN_WAPI_EVEN;
1301 		else
1302 			hal_pn_type = HAL_PN_WAPI_UNEVEN;
1303 		break;
1304 	default:
1305 		hal_pn_type = HAL_PN_NONE;
1306 		break;
1307 	}
1308 
1309 	hal_reo_qdesc_setup(soc->hal_soc, tid, ba_window_size, start_seq,
1310 		hw_qdesc_vaddr, rx_tid->hw_qdesc_paddr, hal_pn_type);
1311 
1312 	qdf_mem_map_nbytes_single(soc->osdev, hw_qdesc_vaddr,
1313 		QDF_DMA_BIDIRECTIONAL, rx_tid->hw_qdesc_alloc_size,
1314 		&(rx_tid->hw_qdesc_paddr));
1315 
1316 	if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) !=
1317 			QDF_STATUS_SUCCESS) {
1318 		if (alloc_tries++ < 10)
1319 			goto try_desc_alloc;
1320 		else {
1321 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1322 			"%s: Rx tid HW desc alloc failed (lowmem): tid %d\n",
1323 			__func__, tid);
1324 			return QDF_STATUS_E_NOMEM;
1325 		}
1326 	}
1327 
1328 	if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
1329 		soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup(
1330 			vdev->pdev->osif_pdev,
1331 			peer->vdev->vdev_id, peer->mac_addr.raw,
1332 			rx_tid->hw_qdesc_paddr, tid, tid);
1333 
1334 	}
1335 	return 0;
1336 }
1337 
1338 /*
1339  * dp_rx_tid_delete_cb() - Callback to flush reo descriptor HW cache
1340  * after deleting the entries (ie., setting valid=0)
1341  *
1342  * @soc: DP SOC handle
1343  * @cb_ctxt: Callback context
1344  * @reo_status: REO command status
1345  */
1346 static void dp_rx_tid_delete_cb(struct dp_soc *soc, void *cb_ctxt,
1347 	union hal_reo_status *reo_status)
1348 {
1349 	struct reo_desc_list_node *freedesc =
1350 		(struct reo_desc_list_node *)cb_ctxt;
1351 	uint32_t list_size;
1352 	struct reo_desc_list_node *desc;
1353 	unsigned long curr_ts = qdf_get_system_timestamp();
1354 	uint32_t desc_size, tot_desc_size;
1355 	struct hal_reo_cmd_params params;
1356 
1357 	if (reo_status->rx_queue_status.header.status == HAL_REO_CMD_DRAIN) {
1358 		qdf_mem_zero(reo_status, sizeof(*reo_status));
1359 		reo_status->fl_cache_status.header.status = HAL_REO_CMD_DRAIN;
1360 		dp_reo_desc_free(soc, (void *)freedesc, reo_status);
1361 		return;
1362 	} else if (reo_status->rx_queue_status.header.status !=
1363 		HAL_REO_CMD_SUCCESS) {
1364 		/* Should not happen normally. Just print error for now */
1365 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1366 			"%s: Rx tid HW desc deletion failed(%d): tid %d\n",
1367 			__func__,
1368 			reo_status->rx_queue_status.header.status,
1369 			freedesc->rx_tid.tid);
1370 	}
1371 
1372 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
1373 		"%s: rx_tid: %d status: %d\n", __func__,
1374 		freedesc->rx_tid.tid,
1375 		reo_status->rx_queue_status.header.status);
1376 
1377 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
1378 	freedesc->free_ts = curr_ts;
1379 	qdf_list_insert_back_size(&soc->reo_desc_freelist,
1380 		(qdf_list_node_t *)freedesc, &list_size);
1381 
1382 	while ((qdf_list_peek_front(&soc->reo_desc_freelist,
1383 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) &&
1384 		((list_size >= REO_DESC_FREELIST_SIZE) ||
1385 		((curr_ts - desc->free_ts) > REO_DESC_FREE_DEFER_MS))) {
1386 		struct dp_rx_tid *rx_tid;
1387 
1388 		qdf_list_remove_front(&soc->reo_desc_freelist,
1389 				(qdf_list_node_t **)&desc);
1390 		list_size--;
1391 		rx_tid = &desc->rx_tid;
1392 
1393 		/* Flush and invalidate REO descriptor from HW cache: Base and
1394 		 * extension descriptors should be flushed separately */
1395 		tot_desc_size = hal_get_reo_qdesc_size(soc->hal_soc,
1396 			rx_tid->ba_win_size);
1397 		desc_size = hal_get_reo_qdesc_size(soc->hal_soc, 0);
1398 
1399 		/* Flush reo extension descriptors */
1400 		while ((tot_desc_size -= desc_size) > 0) {
1401 			qdf_mem_zero(&params, sizeof(params));
1402 			params.std.addr_lo =
1403 				((uint64_t)(rx_tid->hw_qdesc_paddr) +
1404 				tot_desc_size) & 0xffffffff;
1405 			params.std.addr_hi =
1406 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1407 
1408 			if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc,
1409 							CMD_FLUSH_CACHE,
1410 							&params,
1411 							NULL,
1412 							NULL)) {
1413 				QDF_TRACE(QDF_MODULE_ID_DP,
1414 					QDF_TRACE_LEVEL_ERROR,
1415 					"%s: fail to send CMD_CACHE_FLUSH:"
1416 					"tid %d desc %pK\n", __func__,
1417 					rx_tid->tid,
1418 					(void *)(rx_tid->hw_qdesc_paddr));
1419 			}
1420 		}
1421 
1422 		/* Flush base descriptor */
1423 		qdf_mem_zero(&params, sizeof(params));
1424 		params.std.need_status = 1;
1425 		params.std.addr_lo =
1426 			(uint64_t)(rx_tid->hw_qdesc_paddr) & 0xffffffff;
1427 		params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1428 
1429 		if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc,
1430 							  CMD_FLUSH_CACHE,
1431 							  &params,
1432 							  dp_reo_desc_free,
1433 							  (void *)desc)) {
1434 			union hal_reo_status reo_status;
1435 			/*
1436 			 * If dp_reo_send_cmd return failure, related TID queue desc
1437 			 * should be unmapped. Also locally reo_desc, together with
1438 			 * TID queue desc also need to be freed accordingly.
1439 			 *
1440 			 * Here invoke desc_free function directly to do clean up.
1441 			 */
1442 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1443 				"%s: fail to send REO cmd to flush cache: tid %d\n",
1444 				__func__, rx_tid->tid);
1445 			qdf_mem_zero(&reo_status, sizeof(reo_status));
1446 			reo_status.fl_cache_status.header.status = 0;
1447 			dp_reo_desc_free(soc, (void *)desc, &reo_status);
1448 		}
1449 	}
1450 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
1451 }
1452 
1453 /*
1454  * dp_rx_tid_delete_wifi3() – Delete receive TID queue
1455  * @peer: Datapath peer handle
1456  * @tid: TID
1457  *
1458  * Return: 0 on success, error code on failure
1459  */
1460 static int dp_rx_tid_delete_wifi3(struct dp_peer *peer, int tid)
1461 {
1462 	struct dp_rx_tid *rx_tid = &(peer->rx_tid[tid]);
1463 	struct dp_soc *soc = peer->vdev->pdev->soc;
1464 	struct hal_reo_cmd_params params;
1465 	struct reo_desc_list_node *freedesc =
1466 		qdf_mem_malloc(sizeof(*freedesc));
1467 
1468 	if (!freedesc) {
1469 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1470 			"%s: malloc failed for freedesc: tid %d\n",
1471 			__func__, tid);
1472 		return -ENOMEM;
1473 	}
1474 
1475 	freedesc->rx_tid = *rx_tid;
1476 
1477 	qdf_mem_zero(&params, sizeof(params));
1478 
1479 	params.std.need_status = 0;
1480 	params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
1481 	params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1482 	params.u.upd_queue_params.update_vld = 1;
1483 	params.u.upd_queue_params.vld = 0;
1484 
1485 	dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
1486 		dp_rx_tid_delete_cb, (void *)freedesc);
1487 
1488 	rx_tid->hw_qdesc_vaddr_unaligned = NULL;
1489 	rx_tid->hw_qdesc_alloc_size = 0;
1490 	rx_tid->hw_qdesc_paddr = 0;
1491 
1492 	return 0;
1493 }
1494 
1495 #ifdef DP_LFR
1496 static void dp_peer_setup_remaining_tids(struct dp_peer *peer)
1497 {
1498 	int tid;
1499 
1500 	for (tid = 1; tid < DP_MAX_TIDS-1; tid++) {
1501 		dp_rx_tid_setup_wifi3(peer, tid, 1, 0);
1502 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1503 			"Setting up TID %d for peer %pK peer->local_id %d\n",
1504 			tid, peer, peer->local_id);
1505 	}
1506 }
1507 #else
1508 static void dp_peer_setup_remaining_tids(struct dp_peer *peer) {};
1509 #endif
1510 /*
1511  * dp_peer_rx_init() – Initialize receive TID state
1512  * @pdev: Datapath pdev
1513  * @peer: Datapath peer
1514  *
1515  */
1516 void dp_peer_rx_init(struct dp_pdev *pdev, struct dp_peer *peer)
1517 {
1518 	int tid;
1519 	struct dp_rx_tid *rx_tid;
1520 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
1521 		rx_tid = &peer->rx_tid[tid];
1522 		rx_tid->array = &rx_tid->base;
1523 		rx_tid->base.head = rx_tid->base.tail = NULL;
1524 		rx_tid->tid = tid;
1525 		rx_tid->defrag_timeout_ms = 0;
1526 		rx_tid->ba_win_size = 0;
1527 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
1528 
1529 		rx_tid->defrag_waitlist_elem.tqe_next = NULL;
1530 		rx_tid->defrag_waitlist_elem.tqe_prev = NULL;
1531 
1532 #ifdef notyet /* TODO: See if this is required for exception handling */
1533 		/* invalid sequence number */
1534 		peer->tids_last_seq[tid] = 0xffff;
1535 #endif
1536 	}
1537 
1538 	/* Setup default (non-qos) rx tid queue */
1539 	dp_rx_tid_setup_wifi3(peer, DP_NON_QOS_TID, 1, 0);
1540 
1541 	/* Setup rx tid queue for TID 0.
1542 	 * Other queues will be setup on receiving first packet, which will cause
1543 	 * NULL REO queue error
1544 	 */
1545 	dp_rx_tid_setup_wifi3(peer, 0, 1, 0);
1546 
1547 	/*
1548 	 * Setup the rest of TID's to handle LFR
1549 	 */
1550 	dp_peer_setup_remaining_tids(peer);
1551 
1552 	/*
1553 	 * Set security defaults: no PN check, no security. The target may
1554 	 * send a HTT SEC_IND message to overwrite these defaults.
1555 	 */
1556 	peer->security[dp_sec_ucast].sec_type =
1557 		peer->security[dp_sec_mcast].sec_type = cdp_sec_type_none;
1558 }
1559 
1560 /*
1561  * dp_peer_rx_cleanup() – Cleanup receive TID state
1562  * @vdev: Datapath vdev
1563  * @peer: Datapath peer
1564  *
1565  */
1566 void dp_peer_rx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
1567 {
1568 	int tid;
1569 	uint32_t tid_delete_mask = 0;
1570 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
1571 		if (peer->rx_tid[tid].hw_qdesc_vaddr_unaligned != NULL) {
1572 			dp_rx_tid_delete_wifi3(peer, tid);
1573 			tid_delete_mask |= (1 << tid);
1574 		}
1575 	}
1576 #ifdef notyet /* See if FW can remove queues as part of peer cleanup */
1577 	if (soc->ol_ops->peer_rx_reorder_queue_remove) {
1578 		soc->ol_ops->peer_rx_reorder_queue_remove(vdev->pdev->osif_pdev,
1579 			peer->vdev->vdev_id, peer->mac_addr.raw,
1580 			tid_delete_mask);
1581 	}
1582 #endif
1583 }
1584 
1585 /*
1586  * dp_peer_cleanup() – Cleanup peer information
1587  * @vdev: Datapath vdev
1588  * @peer: Datapath peer
1589  *
1590  */
1591 void dp_peer_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
1592 {
1593 	peer->last_assoc_rcvd = 0;
1594 	peer->last_disassoc_rcvd = 0;
1595 	peer->last_deauth_rcvd = 0;
1596 
1597 	/* cleanup the Rx reorder queues for this peer */
1598 	dp_peer_rx_cleanup(vdev, peer);
1599 }
1600 
1601 /*
1602 * dp_rx_addba_requestprocess_wifi3() – Process ADDBA request from peer
1603 *
1604 * @peer: Datapath peer handle
1605 * @dialogtoken: dialogtoken from ADDBA frame
1606 * @tid: TID number
1607 * @startseqnum: Start seq. number received in BA sequence control
1608 * in ADDBA frame
1609 *
1610 * Return: 0 on success, error code on failure
1611 */
1612 int dp_addba_requestprocess_wifi3(void *peer_handle,
1613 	uint8_t dialogtoken, uint16_t tid, uint16_t batimeout,
1614 	uint16_t buffersize, uint16_t startseqnum)
1615 {
1616 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
1617 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1618 
1619 	if ((rx_tid->ba_status == DP_RX_BA_ACTIVE) &&
1620 			(rx_tid->hw_qdesc_vaddr_unaligned != NULL))
1621 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
1622 
1623 	if (dp_rx_tid_setup_wifi3(peer, tid, buffersize,
1624 		startseqnum)) {
1625 		/* TODO: Should we send addba reject in this case */
1626 		return QDF_STATUS_E_FAILURE;
1627 	}
1628 
1629 	if (rx_tid->userstatuscode != IEEE80211_STATUS_SUCCESS)
1630 		rx_tid->statuscode = rx_tid->userstatuscode;
1631 	else
1632 		rx_tid->statuscode = IEEE80211_STATUS_SUCCESS;
1633 
1634 	rx_tid->dialogtoken = dialogtoken;
1635 	rx_tid->ba_status = DP_RX_BA_ACTIVE;
1636 	rx_tid->num_of_addba_req++;
1637 
1638 	return 0;
1639 }
1640 
1641 /*
1642 * dp_rx_addba_responsesetup_wifi3() – Process ADDBA request from peer
1643 *
1644 * @peer: Datapath peer handle
1645 * @tid: TID number
1646 * @dialogtoken: output dialogtoken
1647 * @statuscode: output dialogtoken
1648 * @buffersize: Ouput BA window sizze
1649 * @batimeout: Ouput BA timeout
1650 */
1651 void dp_addba_responsesetup_wifi3(void *peer_handle, uint8_t tid,
1652 	uint8_t *dialogtoken, uint16_t *statuscode,
1653 	uint16_t *buffersize, uint16_t *batimeout)
1654 {
1655 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
1656 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1657 
1658 	rx_tid->num_of_addba_resp++;
1659 	/* setup ADDBA response parameters */
1660 	*dialogtoken = rx_tid->dialogtoken;
1661 	*statuscode = rx_tid->statuscode;
1662 	*buffersize = rx_tid->ba_win_size;
1663 	*batimeout  = 0;
1664 }
1665 
1666 /*
1667 * dp_set_addba_response() – Set a user defined ADDBA response status code
1668 *
1669 * @peer: Datapath peer handle
1670 * @tid: TID number
1671 * @statuscode: response status code to be set
1672 */
1673 void dp_set_addba_response(void *peer_handle, uint8_t tid,
1674 	uint16_t statuscode)
1675 {
1676 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
1677 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1678 
1679 	rx_tid->userstatuscode = statuscode;
1680 }
1681 
1682 /*
1683 * dp_rx_delba_process_wifi3() – Process DELBA from peer
1684 * @peer: Datapath peer handle
1685 * @tid: TID number
1686 * @reasoncode: Reason code received in DELBA frame
1687 *
1688 * Return: 0 on success, error code on failure
1689 */
1690 int dp_delba_process_wifi3(void *peer_handle,
1691 	int tid, uint16_t reasoncode)
1692 {
1693 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
1694 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1695 
1696 	if (rx_tid->ba_status != DP_RX_BA_ACTIVE)
1697 		return QDF_STATUS_E_FAILURE;
1698 
1699 	/* TODO: See if we can delete the existing REO queue descriptor and
1700 	 * replace with a new one without queue extenstion descript to save
1701 	 * memory
1702 	 */
1703 	rx_tid->num_of_delba_req++;
1704 	dp_rx_tid_update_wifi3(peer, tid, 1, 0);
1705 
1706 	rx_tid->ba_status = DP_RX_BA_INACTIVE;
1707 
1708 	return 0;
1709 }
1710 
1711 void dp_rx_discard(struct dp_vdev *vdev, struct dp_peer *peer, unsigned tid,
1712 	qdf_nbuf_t msdu_list)
1713 {
1714 	while (msdu_list) {
1715 		qdf_nbuf_t msdu = msdu_list;
1716 
1717 		msdu_list = qdf_nbuf_next(msdu_list);
1718 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
1719 			"discard rx %pK from partly-deleted peer %pK "
1720 			"(%02x:%02x:%02x:%02x:%02x:%02x)\n",
1721 			msdu, peer,
1722 			peer->mac_addr.raw[0], peer->mac_addr.raw[1],
1723 			peer->mac_addr.raw[2], peer->mac_addr.raw[3],
1724 			peer->mac_addr.raw[4], peer->mac_addr.raw[5]);
1725 		qdf_nbuf_free(msdu);
1726 	}
1727 }
1728 
1729 
1730 /**
1731  * dp_set_pn_check_wifi3() - enable PN check in REO for security
1732  * @peer: Datapath peer handle
1733  * @vdev: Datapath vdev
1734  * @pdev - data path device instance
1735  * @sec_type - security type
1736  * @rx_pn - Receive pn starting number
1737  *
1738  */
1739 
1740 void
1741 dp_set_pn_check_wifi3(struct cdp_vdev *vdev_handle, struct cdp_peer *peer_handle, enum cdp_sec_type sec_type,  uint32_t *rx_pn)
1742 {
1743 	struct dp_peer *peer =  (struct dp_peer *)peer_handle;
1744 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
1745 	struct dp_pdev *pdev;
1746 	struct dp_soc *soc;
1747 	int i;
1748 	uint8_t pn_size;
1749 	struct hal_reo_cmd_params params;
1750 
1751 	/* preconditions */
1752 	qdf_assert(vdev);
1753 
1754 	pdev = vdev->pdev;
1755 	soc = pdev->soc;
1756 
1757 
1758 	qdf_mem_zero(&params, sizeof(params));
1759 
1760 	params.std.need_status = 1;
1761 	params.u.upd_queue_params.update_pn_valid = 1;
1762 	params.u.upd_queue_params.update_pn_size = 1;
1763 	params.u.upd_queue_params.update_pn = 1;
1764 	params.u.upd_queue_params.update_pn_check_needed = 1;
1765 
1766 	peer->security[dp_sec_ucast].sec_type = sec_type;
1767 
1768 	switch (sec_type) {
1769 	case cdp_sec_type_tkip_nomic:
1770 	case cdp_sec_type_aes_ccmp:
1771 	case cdp_sec_type_aes_ccmp_256:
1772 	case cdp_sec_type_aes_gcmp:
1773 	case cdp_sec_type_aes_gcmp_256:
1774 		params.u.upd_queue_params.pn_check_needed = 1;
1775 		params.u.upd_queue_params.pn_size = 48;
1776 		pn_size = 48;
1777 		break;
1778 	case cdp_sec_type_wapi:
1779 		params.u.upd_queue_params.pn_check_needed = 1;
1780 		params.u.upd_queue_params.pn_size = 128;
1781 		pn_size = 128;
1782 		if (vdev->opmode == wlan_op_mode_ap) {
1783 			params.u.upd_queue_params.pn_even = 1;
1784 			params.u.upd_queue_params.update_pn_even = 1;
1785 		} else {
1786 			params.u.upd_queue_params.pn_uneven = 1;
1787 			params.u.upd_queue_params.update_pn_uneven = 1;
1788 		}
1789 		break;
1790 	default:
1791 		params.u.upd_queue_params.pn_check_needed = 0;
1792 		pn_size = 0;
1793 		break;
1794 	}
1795 
1796 
1797 	for (i = 0; i < DP_MAX_TIDS; i++) {
1798 		struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
1799 		if (rx_tid->hw_qdesc_vaddr_unaligned != NULL) {
1800 			params.std.addr_lo =
1801 				rx_tid->hw_qdesc_paddr & 0xffffffff;
1802 			params.std.addr_hi =
1803 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1804 
1805 			if (sec_type != cdp_sec_type_wapi) {
1806 				params.u.upd_queue_params.update_pn_valid = 0;
1807 			} else {
1808 				/*
1809 				 * Setting PN valid bit for WAPI sec_type,
1810 				 * since WAPI PN has to be started with
1811 				 * predefined value
1812 				 */
1813 				params.u.upd_queue_params.update_pn_valid = 1;
1814 				params.u.upd_queue_params.pn_31_0 = rx_pn[0];
1815 				params.u.upd_queue_params.pn_63_32 = rx_pn[1];
1816 				params.u.upd_queue_params.pn_95_64 = rx_pn[2];
1817 				params.u.upd_queue_params.pn_127_96 = rx_pn[3];
1818 			}
1819 			rx_tid->pn_size = pn_size;
1820 			dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
1821 				dp_rx_tid_update_cb, rx_tid);
1822 		} else {
1823 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
1824 				"PN Check not setup for TID :%d \n", i);
1825 		}
1826 	}
1827 }
1828 
1829 
1830 void
1831 dp_rx_sec_ind_handler(void *soc_handle, uint16_t peer_id,
1832 	enum htt_sec_type sec_type, int is_unicast, u_int32_t *michael_key,
1833 	u_int32_t *rx_pn)
1834 {
1835 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
1836 	struct dp_peer *peer;
1837 	int sec_index;
1838 
1839 	peer = dp_peer_find_by_id(soc, peer_id);
1840 	if (!peer) {
1841 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1842 			"Couldn't find peer from ID %d - skipping security inits\n",
1843 			peer_id);
1844 		return;
1845 	}
1846 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
1847 		"sec spec for peer %pK (%02x:%02x:%02x:%02x:%02x:%02x): "
1848 		"%s key of type %d\n",
1849 		peer,
1850 		peer->mac_addr.raw[0], peer->mac_addr.raw[1],
1851 		peer->mac_addr.raw[2], peer->mac_addr.raw[3],
1852 		peer->mac_addr.raw[4], peer->mac_addr.raw[5],
1853 		is_unicast ? "ucast" : "mcast",
1854 		sec_type);
1855 	sec_index = is_unicast ? dp_sec_ucast : dp_sec_mcast;
1856 	peer->security[sec_index].sec_type = sec_type;
1857 #ifdef notyet /* TODO: See if this is required for defrag support */
1858 	/* michael key only valid for TKIP, but for simplicity,
1859 	 * copy it anyway
1860 	 */
1861 	qdf_mem_copy(
1862 		&peer->security[sec_index].michael_key[0],
1863 		michael_key,
1864 		sizeof(peer->security[sec_index].michael_key));
1865 #ifdef BIG_ENDIAN_HOST
1866 	OL_IF_SWAPBO(peer->security[sec_index].michael_key[0],
1867 				 sizeof(peer->security[sec_index].michael_key));
1868 #endif /* BIG_ENDIAN_HOST */
1869 #endif
1870 
1871 #ifdef notyet /* TODO: Check if this is required for wifi3.0 */
1872 	if (sec_type != htt_sec_type_wapi) {
1873 		qdf_mem_set(peer->tids_last_pn_valid, _EXT_TIDS, 0x00);
1874 	} else {
1875 		for (i = 0; i < DP_MAX_TIDS; i++) {
1876 			/*
1877 			 * Setting PN valid bit for WAPI sec_type,
1878 			 * since WAPI PN has to be started with predefined value
1879 			 */
1880 			peer->tids_last_pn_valid[i] = 1;
1881 			qdf_mem_copy(
1882 				(u_int8_t *) &peer->tids_last_pn[i],
1883 				(u_int8_t *) rx_pn, sizeof(union htt_rx_pn_t));
1884 			peer->tids_last_pn[i].pn128[1] =
1885 				qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[1]);
1886 			peer->tids_last_pn[i].pn128[0] =
1887 				qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[0]);
1888 		}
1889 	}
1890 #endif
1891 	/* TODO: Update HW TID queue with PN check parameters (pn type for
1892 	 * all security types and last pn for WAPI) once REO command API
1893 	 * is available
1894 	 */
1895 }
1896 
1897 #ifndef CONFIG_WIN
1898 /**
1899  * dp_register_peer() - Register peer into physical device
1900  * @pdev - data path device instance
1901  * @sta_desc - peer description
1902  *
1903  * Register peer into physical device
1904  *
1905  * Return: QDF_STATUS_SUCCESS registration success
1906  *         QDF_STATUS_E_FAULT peer not found
1907  */
1908 QDF_STATUS dp_register_peer(struct cdp_pdev *pdev_handle,
1909 		struct ol_txrx_desc_type *sta_desc)
1910 {
1911 	struct dp_peer *peer;
1912 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
1913 
1914 	peer = dp_peer_find_by_local_id((struct cdp_pdev *)pdev,
1915 			sta_desc->sta_id);
1916 	if (!peer)
1917 		return QDF_STATUS_E_FAULT;
1918 
1919 	qdf_spin_lock_bh(&peer->peer_info_lock);
1920 	peer->state = OL_TXRX_PEER_STATE_CONN;
1921 	qdf_spin_unlock_bh(&peer->peer_info_lock);
1922 
1923 	return QDF_STATUS_SUCCESS;
1924 }
1925 
1926 /**
1927  * dp_clear_peer() - remove peer from physical device
1928  * @pdev - data path device instance
1929  * @sta_id - local peer id
1930  *
1931  * remove peer from physical device
1932  *
1933  * Return: QDF_STATUS_SUCCESS registration success
1934  *         QDF_STATUS_E_FAULT peer not found
1935  */
1936 QDF_STATUS dp_clear_peer(struct cdp_pdev *pdev_handle, uint8_t local_id)
1937 {
1938 	struct dp_peer *peer;
1939 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
1940 
1941 	peer = dp_peer_find_by_local_id((struct cdp_pdev *)pdev, local_id);
1942 	if (!peer)
1943 		return QDF_STATUS_E_FAULT;
1944 
1945 	qdf_spin_lock_bh(&peer->peer_info_lock);
1946 	peer->state = OL_TXRX_PEER_STATE_DISC;
1947 	qdf_spin_unlock_bh(&peer->peer_info_lock);
1948 
1949 	return QDF_STATUS_SUCCESS;
1950 }
1951 
1952 /**
1953  * dp_find_peer_by_addr_and_vdev() - Find peer by peer mac address within vdev
1954  * @pdev - data path device instance
1955  * @vdev - virtual interface instance
1956  * @peer_addr - peer mac address
1957  * @peer_id - local peer id with target mac address
1958  *
1959  * Find peer by peer mac address within vdev
1960  *
1961  * Return: peer instance void pointer
1962  *         NULL cannot find target peer
1963  */
1964 void *dp_find_peer_by_addr_and_vdev(struct cdp_pdev *pdev_handle,
1965 		struct cdp_vdev *vdev_handle,
1966 		uint8_t *peer_addr, uint8_t *local_id)
1967 {
1968 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
1969 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
1970 	struct dp_peer *peer;
1971 
1972 	DP_TRACE(INFO, "vdev %pK peer_addr %pK", vdev, peer_addr);
1973 	peer = dp_peer_find_hash_find(pdev->soc, peer_addr, 0, 0);
1974 	DP_TRACE(INFO, "peer %pK vdev %pK", peer, vdev);
1975 
1976 	if (!peer)
1977 		return NULL;
1978 
1979 	if (peer->vdev != vdev)
1980 		return NULL;
1981 
1982 	*local_id = peer->local_id;
1983 	DP_TRACE(INFO, "peer %pK vdev %pK local id %d", peer, vdev, *local_id);
1984 
1985 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
1986 	 * Decrement it here.
1987 	 */
1988 	qdf_atomic_dec(&peer->ref_cnt);
1989 
1990 	return peer;
1991 }
1992 
1993 /**
1994  * dp_local_peer_id() - Find local peer id within peer instance
1995  * @peer - peer instance
1996  *
1997  * Find local peer id within peer instance
1998  *
1999  * Return: local peer id
2000  */
2001 uint16_t dp_local_peer_id(void *peer)
2002 {
2003 	return ((struct dp_peer *)peer)->local_id;
2004 }
2005 
2006 /**
2007  * dp_peer_find_by_local_id() - Find peer by local peer id
2008  * @pdev - data path device instance
2009  * @local_peer_id - local peer id want to find
2010  *
2011  * Find peer by local peer id within physical device
2012  *
2013  * Return: peer instance void pointer
2014  *         NULL cannot find target peer
2015  */
2016 void *dp_peer_find_by_local_id(struct cdp_pdev *pdev_handle, uint8_t local_id)
2017 {
2018 	struct dp_peer *peer;
2019 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
2020 
2021 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
2022 	peer = pdev->local_peer_ids.map[local_id];
2023 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
2024 	DP_TRACE(DEBUG, "peer %pK local id %d", peer, local_id);
2025 	return peer;
2026 }
2027 
2028 /**
2029  * dp_peer_state_update() - update peer local state
2030  * @pdev - data path device instance
2031  * @peer_addr - peer mac address
2032  * @state - new peer local state
2033  *
2034  * update peer local state
2035  *
2036  * Return: QDF_STATUS_SUCCESS registration success
2037  */
2038 QDF_STATUS dp_peer_state_update(struct cdp_pdev *pdev_handle, uint8_t *peer_mac,
2039 		enum ol_txrx_peer_state state)
2040 {
2041 	struct dp_peer *peer;
2042 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
2043 
2044 	peer =  dp_peer_find_hash_find(pdev->soc, peer_mac, 0, DP_VDEV_ALL);
2045 	if (NULL == peer) {
2046 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2047 		"Failed to find peer for: [%pM]", peer_mac);
2048 		return QDF_STATUS_E_FAILURE;
2049 	}
2050 	peer->state = state;
2051 
2052 	DP_TRACE(INFO, "peer %pK state %d", peer, peer->state);
2053 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
2054 	 * Decrement it here.
2055 	 */
2056 	qdf_atomic_dec(&peer->ref_cnt);
2057 
2058 	return QDF_STATUS_SUCCESS;
2059 }
2060 
2061 /**
2062  * dp_get_vdevid() - Get virtaul interface id which peer registered
2063  * @peer - peer instance
2064  * @vdev_id - virtaul interface id which peer registered
2065  *
2066  * Get virtaul interface id which peer registered
2067  *
2068  * Return: QDF_STATUS_SUCCESS registration success
2069  */
2070 QDF_STATUS dp_get_vdevid(void *peer_handle, uint8_t *vdev_id)
2071 {
2072 	struct dp_peer *peer = peer_handle;
2073 
2074 	DP_TRACE(INFO, "peer %pK vdev %pK vdev id %d",
2075 			peer, peer->vdev, peer->vdev->vdev_id);
2076 	*vdev_id = peer->vdev->vdev_id;
2077 	return QDF_STATUS_SUCCESS;
2078 }
2079 
2080 struct cdp_vdev *dp_get_vdev_by_sta_id(struct cdp_pdev *pdev_handle,
2081 				       uint8_t sta_id)
2082 {
2083 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
2084 	struct dp_peer *peer = NULL;
2085 
2086 	if (sta_id >= WLAN_MAX_STA_COUNT) {
2087 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
2088 			  "Invalid sta id passed");
2089 		return NULL;
2090 	}
2091 
2092 	if (!pdev) {
2093 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
2094 			  "PDEV not found for sta_id [%d]", sta_id);
2095 		return NULL;
2096 	}
2097 
2098 	peer = dp_peer_find_by_local_id((struct cdp_pdev *)pdev, sta_id);
2099 	if (!peer) {
2100 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
2101 			  "PEER [%d] not found", sta_id);
2102 		return NULL;
2103 	}
2104 
2105 	return (struct cdp_vdev *)peer->vdev;
2106 }
2107 
2108 /**
2109  * dp_get_vdev_for_peer() - Get virtual interface instance which peer belongs
2110  * @peer - peer instance
2111  *
2112  * Get virtual interface instance which peer belongs
2113  *
2114  * Return: virtual interface instance pointer
2115  *         NULL in case cannot find
2116  */
2117 struct cdp_vdev *dp_get_vdev_for_peer(void *peer_handle)
2118 {
2119 	struct dp_peer *peer = peer_handle;
2120 
2121 	DP_TRACE(INFO, "peer %pK vdev %pK", peer, peer->vdev);
2122 	return (struct cdp_vdev *)peer->vdev;
2123 }
2124 
2125 /**
2126  * dp_peer_get_peer_mac_addr() - Get peer mac address
2127  * @peer - peer instance
2128  *
2129  * Get peer mac address
2130  *
2131  * Return: peer mac address pointer
2132  *         NULL in case cannot find
2133  */
2134 uint8_t *dp_peer_get_peer_mac_addr(void *peer_handle)
2135 {
2136 	struct dp_peer *peer = peer_handle;
2137 	uint8_t *mac;
2138 
2139 	mac = peer->mac_addr.raw;
2140 	DP_TRACE(INFO, "peer %pK mac 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
2141 		peer, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
2142 	return peer->mac_addr.raw;
2143 }
2144 
2145 /**
2146  * dp_get_peer_state() - Get local peer state
2147  * @peer - peer instance
2148  *
2149  * Get local peer state
2150  *
2151  * Return: peer status
2152  */
2153 int dp_get_peer_state(void *peer_handle)
2154 {
2155 	struct dp_peer *peer = peer_handle;
2156 
2157 	DP_TRACE(DEBUG, "peer %pK stats %d", peer, peer->state);
2158 	return peer->state;
2159 }
2160 
2161 /**
2162  * dp_get_last_assoc_received() - get time of last assoc received
2163  * @peer_handle: peer handle
2164  *
2165  * Return: pointer for the time of last assoc received
2166  */
2167 qdf_time_t *dp_get_last_assoc_received(void *peer_handle)
2168 {
2169 	struct dp_peer *peer = peer_handle;
2170 
2171 	DP_TRACE(INFO, "peer %pK last_assoc_rcvd: %lu", peer,
2172 		peer->last_assoc_rcvd);
2173 	return &peer->last_assoc_rcvd;
2174 }
2175 
2176 /**
2177  * dp_get_last_disassoc_received() - get time of last disassoc received
2178  * @peer_handle: peer handle
2179  *
2180  * Return: pointer for the time of last disassoc received
2181  */
2182 qdf_time_t *dp_get_last_disassoc_received(void *peer_handle)
2183 {
2184 	struct dp_peer *peer = peer_handle;
2185 
2186 	DP_TRACE(INFO, "peer %pK last_disassoc_rcvd: %lu", peer,
2187 		peer->last_disassoc_rcvd);
2188 	return &peer->last_disassoc_rcvd;
2189 }
2190 
2191 /**
2192  * dp_get_last_deauth_received() - get time of last deauth received
2193  * @peer_handle: peer handle
2194  *
2195  * Return: pointer for the time of last deauth received
2196  */
2197 qdf_time_t *dp_get_last_deauth_received(void *peer_handle)
2198 {
2199 	struct dp_peer *peer = peer_handle;
2200 
2201 	DP_TRACE(INFO, "peer %pK last_deauth_rcvd: %lu", peer,
2202 		peer->last_deauth_rcvd);
2203 	return &peer->last_deauth_rcvd;
2204 }
2205 
2206 /**
2207  * dp_local_peer_id_pool_init() - local peer id pool alloc for physical device
2208  * @pdev - data path device instance
2209  *
2210  * local peer id pool alloc for physical device
2211  *
2212  * Return: none
2213  */
2214 void dp_local_peer_id_pool_init(struct dp_pdev *pdev)
2215 {
2216 	int i;
2217 
2218 	/* point the freelist to the first ID */
2219 	pdev->local_peer_ids.freelist = 0;
2220 
2221 	/* link each ID to the next one */
2222 	for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) {
2223 		pdev->local_peer_ids.pool[i] = i + 1;
2224 		pdev->local_peer_ids.map[i] = NULL;
2225 	}
2226 
2227 	/* link the last ID to itself, to mark the end of the list */
2228 	i = OL_TXRX_NUM_LOCAL_PEER_IDS;
2229 	pdev->local_peer_ids.pool[i] = i;
2230 
2231 	qdf_spinlock_create(&pdev->local_peer_ids.lock);
2232 	DP_TRACE(INFO, "Peer pool init");
2233 }
2234 
2235 /**
2236  * dp_local_peer_id_alloc() - allocate local peer id
2237  * @pdev - data path device instance
2238  * @peer - new peer instance
2239  *
2240  * allocate local peer id
2241  *
2242  * Return: none
2243  */
2244 void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer)
2245 {
2246 	int i;
2247 
2248 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
2249 	i = pdev->local_peer_ids.freelist;
2250 	if (pdev->local_peer_ids.pool[i] == i) {
2251 		/* the list is empty, except for the list-end marker */
2252 		peer->local_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
2253 	} else {
2254 		/* take the head ID and advance the freelist */
2255 		peer->local_id = i;
2256 		pdev->local_peer_ids.freelist = pdev->local_peer_ids.pool[i];
2257 		pdev->local_peer_ids.map[i] = peer;
2258 	}
2259 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
2260 	DP_TRACE(INFO, "peer %pK, local id %d", peer, peer->local_id);
2261 }
2262 
2263 /**
2264  * dp_local_peer_id_free() - remove local peer id
2265  * @pdev - data path device instance
2266  * @peer - peer instance should be removed
2267  *
2268  * remove local peer id
2269  *
2270  * Return: none
2271  */
2272 void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer)
2273 {
2274 	int i = peer->local_id;
2275 	if ((i == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
2276 	    (i >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
2277 		return;
2278 	}
2279 
2280 	/* put this ID on the head of the freelist */
2281 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
2282 	pdev->local_peer_ids.pool[i] = pdev->local_peer_ids.freelist;
2283 	pdev->local_peer_ids.freelist = i;
2284 	pdev->local_peer_ids.map[i] = NULL;
2285 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
2286 }
2287 #endif
2288 
2289 /**
2290  * dp_get_peer_mac_addr_frm_id(): get mac address of the peer
2291  * @soc_handle: DP SOC handle
2292  * @peer_id:peer_id of the peer
2293  *
2294  * return: vdev_id of the vap
2295  */
2296 uint8_t dp_get_peer_mac_addr_frm_id(struct cdp_soc_t *soc_handle,
2297 		uint16_t peer_id, uint8_t *peer_mac)
2298 {
2299 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
2300 	struct dp_peer *peer;
2301 
2302 	peer = dp_peer_find_by_id(soc, peer_id);
2303 
2304 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
2305 			"soc %pK peer_id %d", soc, peer_id);
2306 
2307 	if (!peer) {
2308 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2309 				"peer not found ");
2310 		return CDP_INVALID_VDEV_ID;
2311 	}
2312 
2313 	qdf_mem_copy(peer_mac, peer->mac_addr.raw, 6);
2314 	return peer->vdev->vdev_id;
2315 }
2316 
2317 /**
2318  * dp_peer_rxtid_stats: Retried Rx TID (REO queue) stats from HW
2319  * @peer: DP peer handle
2320  * @dp_stats_cmd_cb: REO command callback function
2321  * @cb_ctxt: Callback context
2322  *
2323  * Return: none
2324  */
2325 void dp_peer_rxtid_stats(struct dp_peer *peer, void (*dp_stats_cmd_cb),
2326 			void *cb_ctxt)
2327 {
2328 	struct dp_soc *soc = peer->vdev->pdev->soc;
2329 	struct hal_reo_cmd_params params;
2330 	int i;
2331 
2332 	if (!dp_stats_cmd_cb)
2333 		return;
2334 
2335 	qdf_mem_zero(&params, sizeof(params));
2336 	for (i = 0; i < DP_MAX_TIDS; i++) {
2337 		struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
2338 		if (rx_tid->hw_qdesc_vaddr_unaligned != NULL) {
2339 			params.std.need_status = 1;
2340 			params.std.addr_lo =
2341 				rx_tid->hw_qdesc_paddr & 0xffffffff;
2342 			params.std.addr_hi =
2343 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2344 
2345 			if (cb_ctxt) {
2346 				dp_reo_send_cmd(soc, CMD_GET_QUEUE_STATS,
2347 					&params, dp_stats_cmd_cb, cb_ctxt);
2348 			} else {
2349 				dp_reo_send_cmd(soc, CMD_GET_QUEUE_STATS,
2350 					&params, dp_stats_cmd_cb, rx_tid);
2351 			}
2352 
2353 			/* Flush REO descriptor from HW cache to update stats
2354 			 * in descriptor memory. This is to help debugging */
2355 			qdf_mem_zero(&params, sizeof(params));
2356 			params.std.need_status = 0;
2357 			params.std.addr_lo =
2358 				rx_tid->hw_qdesc_paddr & 0xffffffff;
2359 			params.std.addr_hi =
2360 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2361 			params.u.fl_cache_params.flush_no_inval = 1;
2362 			dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, &params, NULL,
2363 				NULL);
2364 		}
2365 	}
2366 }
2367 
2368