xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_peer.c (revision a175314c51a4ce5cec2835cc8a8c7dc0c1810915)
1 /*
2  * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <qdf_types.h>
20 #include <qdf_lock.h>
21 #include "dp_htt.h"
22 #include "dp_types.h"
23 #include "dp_internal.h"
24 #include "dp_peer.h"
25 #include "dp_rx_defrag.h"
26 #include <hal_api.h>
27 #include <hal_reo.h>
28 #ifdef CONFIG_MCL
29 #include <cds_ieee80211_common.h>
30 #include <cds_api.h>
31 #endif
32 #include <cdp_txrx_handle.h>
33 #include <wlan_cfg.h>
34 
35 #ifdef DP_LFR
36 static inline void
37 dp_set_ssn_valid_flag(struct hal_reo_cmd_params *params,
38 					uint8_t valid)
39 {
40 	params->u.upd_queue_params.update_svld = 1;
41 	params->u.upd_queue_params.svld = valid;
42 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
43 		"%s: Setting SSN valid bit to %d\n",
44 				__func__, valid);
45 }
46 #else
47 static inline void
48 dp_set_ssn_valid_flag(struct hal_reo_cmd_params *params,
49 					uint8_t valid) {};
50 #endif
51 
52 static inline int dp_peer_find_mac_addr_cmp(
53 	union dp_align_mac_addr *mac_addr1,
54 	union dp_align_mac_addr *mac_addr2)
55 {
56 	return !((mac_addr1->align4.bytes_abcd == mac_addr2->align4.bytes_abcd)
57 		/*
58 		 * Intentionally use & rather than &&.
59 		 * because the operands are binary rather than generic boolean,
60 		 * the functionality is equivalent.
61 		 * Using && has the advantage of short-circuited evaluation,
62 		 * but using & has the advantage of no conditional branching,
63 		 * which is a more significant benefit.
64 		 */
65 		&
66 		(mac_addr1->align4.bytes_ef == mac_addr2->align4.bytes_ef));
67 }
68 
69 static int dp_peer_find_map_attach(struct dp_soc *soc)
70 {
71 	uint32_t max_peers, peer_map_size;
72 
73 	max_peers = soc->max_peers;
74 	/* allocate the peer ID -> peer object map */
75 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
76 		"\n<=== cfg max peer id %d ====>\n", max_peers);
77 	peer_map_size = max_peers * sizeof(soc->peer_id_to_obj_map[0]);
78 	soc->peer_id_to_obj_map = qdf_mem_malloc(peer_map_size);
79 	if (!soc->peer_id_to_obj_map) {
80 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
81 			"%s: peer map memory allocation failed\n", __func__);
82 		return QDF_STATUS_E_NOMEM;
83 	}
84 
85 	/*
86 	 * The peer_id_to_obj_map doesn't really need to be initialized,
87 	 * since elements are only used after they have been individually
88 	 * initialized.
89 	 * However, it is convenient for debugging to have all elements
90 	 * that are not in use set to 0.
91 	 */
92 	qdf_mem_zero(soc->peer_id_to_obj_map, peer_map_size);
93 	return 0; /* success */
94 }
95 
96 static int dp_log2_ceil(unsigned value)
97 {
98 	unsigned tmp = value;
99 	int log2 = -1;
100 
101 	while (tmp) {
102 		log2++;
103 		tmp >>= 1;
104 	}
105 	if (1 << log2 != value)
106 		log2++;
107 	return log2;
108 }
109 
110 static int dp_peer_find_add_id_to_obj(
111 	struct dp_peer *peer,
112 	uint16_t peer_id)
113 {
114 	int i;
115 
116 	for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
117 		if (peer->peer_ids[i] == HTT_INVALID_PEER) {
118 			peer->peer_ids[i] = peer_id;
119 			return 0; /* success */
120 		}
121 	}
122 	return QDF_STATUS_E_FAILURE; /* failure */
123 }
124 
125 #define DP_PEER_HASH_LOAD_MULT  2
126 #define DP_PEER_HASH_LOAD_SHIFT 0
127 
128 #define DP_AST_HASH_LOAD_MULT  2
129 #define DP_AST_HASH_LOAD_SHIFT 0
130 
131 static int dp_peer_find_hash_attach(struct dp_soc *soc)
132 {
133 	int i, hash_elems, log2;
134 
135 	/* allocate the peer MAC address -> peer object hash table */
136 	hash_elems = soc->max_peers;
137 	hash_elems *= DP_PEER_HASH_LOAD_MULT;
138 	hash_elems >>= DP_PEER_HASH_LOAD_SHIFT;
139 	log2 = dp_log2_ceil(hash_elems);
140 	hash_elems = 1 << log2;
141 
142 	soc->peer_hash.mask = hash_elems - 1;
143 	soc->peer_hash.idx_bits = log2;
144 	/* allocate an array of TAILQ peer object lists */
145 	soc->peer_hash.bins = qdf_mem_malloc(
146 		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q, dp_peer)));
147 	if (!soc->peer_hash.bins)
148 		return QDF_STATUS_E_NOMEM;
149 
150 	for (i = 0; i < hash_elems; i++)
151 		TAILQ_INIT(&soc->peer_hash.bins[i]);
152 
153 	return 0;
154 }
155 
156 static void dp_peer_find_hash_detach(struct dp_soc *soc)
157 {
158 	qdf_mem_free(soc->peer_hash.bins);
159 }
160 
161 static inline unsigned dp_peer_find_hash_index(struct dp_soc *soc,
162 	union dp_align_mac_addr *mac_addr)
163 {
164 	unsigned index;
165 
166 	index =
167 		mac_addr->align2.bytes_ab ^
168 		mac_addr->align2.bytes_cd ^
169 		mac_addr->align2.bytes_ef;
170 	index ^= index >> soc->peer_hash.idx_bits;
171 	index &= soc->peer_hash.mask;
172 	return index;
173 }
174 
175 
176 void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer)
177 {
178 	unsigned index;
179 
180 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
181 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
182 	/*
183 	 * It is important to add the new peer at the tail of the peer list
184 	 * with the bin index.  Together with having the hash_find function
185 	 * search from head to tail, this ensures that if two entries with
186 	 * the same MAC address are stored, the one added first will be
187 	 * found first.
188 	 */
189 	TAILQ_INSERT_TAIL(&soc->peer_hash.bins[index], peer, hash_list_elem);
190 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
191 }
192 
193 #ifdef FEATURE_AST
194 /*
195  * dp_peer_ast_hash_attach() - Allocate and initialize AST Hash Table
196  * @soc: SoC handle
197  *
198  * Return: None
199  */
200 static int dp_peer_ast_hash_attach(struct dp_soc *soc)
201 {
202 	int i, hash_elems, log2;
203 
204 	hash_elems = ((soc->max_peers * DP_AST_HASH_LOAD_MULT) >>
205 		DP_AST_HASH_LOAD_SHIFT);
206 
207 	log2 = dp_log2_ceil(hash_elems);
208 	hash_elems = 1 << log2;
209 
210 	soc->ast_hash.mask = hash_elems - 1;
211 	soc->ast_hash.idx_bits = log2;
212 
213 	/* allocate an array of TAILQ peer object lists */
214 	soc->ast_hash.bins = qdf_mem_malloc(
215 		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q,
216 				dp_ast_entry)));
217 
218 	if (!soc->ast_hash.bins)
219 		return QDF_STATUS_E_NOMEM;
220 
221 	for (i = 0; i < hash_elems; i++)
222 		TAILQ_INIT(&soc->ast_hash.bins[i]);
223 
224 	return 0;
225 }
226 
227 /*
228  * dp_peer_ast_hash_detach() - Free AST Hash table
229  * @soc: SoC handle
230  *
231  * Return: None
232  */
233 static void dp_peer_ast_hash_detach(struct dp_soc *soc)
234 {
235 	qdf_mem_free(soc->ast_hash.bins);
236 }
237 
238 /*
239  * dp_peer_ast_hash_index() - Compute the AST hash from MAC address
240  * @soc: SoC handle
241  *
242  * Return: AST hash
243  */
244 static inline uint32_t dp_peer_ast_hash_index(struct dp_soc *soc,
245 	union dp_align_mac_addr *mac_addr)
246 {
247 	uint32_t index;
248 
249 	index =
250 		mac_addr->align2.bytes_ab ^
251 		mac_addr->align2.bytes_cd ^
252 		mac_addr->align2.bytes_ef;
253 	index ^= index >> soc->ast_hash.idx_bits;
254 	index &= soc->ast_hash.mask;
255 	return index;
256 }
257 
258 /*
259  * dp_peer_ast_hash_add() - Add AST entry into hash table
260  * @soc: SoC handle
261  *
262  * This function adds the AST entry into SoC AST hash table
263  * It assumes caller has taken the ast lock to protect the access to this table
264  *
265  * Return: None
266  */
267 static inline void dp_peer_ast_hash_add(struct dp_soc *soc,
268 		struct dp_ast_entry *ase)
269 {
270 	uint32_t index;
271 
272 	index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
273 	TAILQ_INSERT_TAIL(&soc->ast_hash.bins[index], ase, hash_list_elem);
274 }
275 
276 /*
277  * dp_peer_ast_hash_remove() - Look up and remove AST entry from hash table
278  * @soc: SoC handle
279  *
280  * This function removes the AST entry from soc AST hash table
281  * It assumes caller has taken the ast lock to protect the access to this table
282  *
283  * Return: None
284  */
285 static inline void dp_peer_ast_hash_remove(struct dp_soc *soc,
286 		struct dp_ast_entry *ase)
287 {
288 	unsigned index;
289 	struct dp_ast_entry *tmpase;
290 	int found = 0;
291 
292 	index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
293 	/* Check if tail is not empty before delete*/
294 	QDF_ASSERT(!TAILQ_EMPTY(&soc->ast_hash.bins[index]));
295 
296 	TAILQ_FOREACH(tmpase, &soc->ast_hash.bins[index], hash_list_elem) {
297 		if (tmpase == ase) {
298 			found = 1;
299 			break;
300 		}
301 	}
302 
303 	QDF_ASSERT(found);
304 	TAILQ_REMOVE(&soc->ast_hash.bins[index], ase, hash_list_elem);
305 }
306 
307 /*
308  * dp_peer_ast_hash_find() - Find AST entry by MAC address
309  * @soc: SoC handle
310  *
311  * It assumes caller has taken the ast lock to protect the access to
312  * AST hash table
313  *
314  * Return: AST entry
315  */
316 struct dp_ast_entry *dp_peer_ast_hash_find(struct dp_soc *soc,
317 						uint8_t *ast_mac_addr)
318 {
319 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
320 	unsigned index;
321 	struct dp_ast_entry *ase;
322 
323 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
324 			ast_mac_addr, DP_MAC_ADDR_LEN);
325 	mac_addr = &local_mac_addr_aligned;
326 
327 	index = dp_peer_ast_hash_index(soc, mac_addr);
328 	TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
329 		if (dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr) == 0) {
330 			return ase;
331 		}
332 	}
333 
334 	return NULL;
335 }
336 
337 /*
338  * dp_peer_map_ast() - Map the ast entry with HW AST Index
339  * @soc: SoC handle
340  * @peer: peer to which ast node belongs
341  * @mac_addr: MAC address of ast node
342  * @hw_peer_id: HW AST Index returned by target in peer map event
343  * @vdev_id: vdev id for VAP to which the peer belongs to
344  *
345  * Return: None
346  */
347 static inline void dp_peer_map_ast(struct dp_soc *soc,
348 	struct dp_peer *peer, uint8_t *mac_addr, uint16_t hw_peer_id,
349 	uint8_t vdev_id)
350 {
351 	struct dp_ast_entry *ast_entry;
352 	enum cdp_txrx_ast_entry_type peer_type = CDP_TXRX_AST_TYPE_STATIC;
353 	bool ast_entry_found = FALSE;
354 
355 	if (!peer) {
356 		return;
357 	}
358 
359 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
360 		"%s: peer %pK ID %d vid %d mac %02x:%02x:%02x:%02x:%02x:%02x\n",
361 		__func__, peer, hw_peer_id, vdev_id, mac_addr[0],
362 		mac_addr[1], mac_addr[2], mac_addr[3],
363 		mac_addr[4], mac_addr[5]);
364 
365 	qdf_spin_lock_bh(&soc->ast_lock);
366 	TAILQ_FOREACH(ast_entry, &peer->ast_entry_list, ase_list_elem) {
367 		if (!(qdf_mem_cmp(mac_addr, ast_entry->mac_addr.raw,
368 				DP_MAC_ADDR_LEN))) {
369 			ast_entry->ast_idx = hw_peer_id;
370 			soc->ast_table[hw_peer_id] = ast_entry;
371 			ast_entry->is_active = TRUE;
372 			peer_type = ast_entry->type;
373 			ast_entry_found = TRUE;
374 		}
375 	}
376 
377 	if (ast_entry_found || (peer->vdev && peer->vdev->proxysta_vdev)) {
378 		if (soc->cdp_soc.ol_ops->peer_map_event) {
379 			soc->cdp_soc.ol_ops->peer_map_event(
380 			soc->ctrl_psoc, peer->peer_ids[0],
381 			hw_peer_id, vdev_id,
382 			mac_addr, peer_type);
383 		}
384 	} else {
385 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
386 			"AST entry not found\n");
387 	}
388 
389 	qdf_spin_unlock_bh(&soc->ast_lock);
390 	return;
391 }
392 
393 /*
394  * dp_peer_add_ast() - Allocate and add AST entry into peer list
395  * @soc: SoC handle
396  * @peer: peer to which ast node belongs
397  * @mac_addr: MAC address of ast node
398  * @is_self: Is this base AST entry with peer mac address
399  *
400  * This API is used by WDS source port learning function to
401  * add a new AST entry into peer AST list
402  *
403  * Return: 0 if new entry is allocated,
404  *        -1 if entry add failed
405  */
406 int dp_peer_add_ast(struct dp_soc *soc,
407 			struct dp_peer *peer,
408 			uint8_t *mac_addr,
409 			enum cdp_txrx_ast_entry_type type,
410 			uint32_t flags)
411 {
412 	struct dp_ast_entry *ast_entry;
413 	struct dp_vdev *vdev = peer->vdev;
414 	uint8_t next_node_mac[6];
415 	int  ret = -1;
416 
417 	if (!vdev) {
418 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
419 			FL("Peers vdev is NULL"));
420 		QDF_ASSERT(0);
421 		return ret;
422 	}
423 
424 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
425 		"%s: peer %pK mac %02x:%02x:%02x:%02x:%02x:%02x\n",
426 		__func__, peer, mac_addr[0], mac_addr[1], mac_addr[2],
427 		mac_addr[3], mac_addr[4], mac_addr[5]);
428 
429 	qdf_spin_lock_bh(&soc->ast_lock);
430 
431 	/* If AST entry already exists , just return from here */
432 	ast_entry = dp_peer_ast_hash_find(soc, mac_addr);
433 
434 	if (ast_entry) {
435 		if (ast_entry->type == CDP_TXRX_AST_TYPE_MEC)
436 			ast_entry->is_active = TRUE;
437 
438 		qdf_spin_unlock_bh(&soc->ast_lock);
439 		return 0;
440 	}
441 
442 	ast_entry = (struct dp_ast_entry *)
443 			qdf_mem_malloc(sizeof(struct dp_ast_entry));
444 
445 	if (!ast_entry) {
446 		qdf_spin_unlock_bh(&soc->ast_lock);
447 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
448 			FL("fail to allocate ast_entry"));
449 		QDF_ASSERT(0);
450 		return ret;
451 	}
452 
453 	qdf_mem_copy(&ast_entry->mac_addr.raw[0], mac_addr, DP_MAC_ADDR_LEN);
454 	ast_entry->peer = peer;
455 	ast_entry->pdev_id = vdev->pdev->pdev_id;
456 	ast_entry->vdev_id = vdev->vdev_id;
457 
458 	switch (type) {
459 	case CDP_TXRX_AST_TYPE_STATIC:
460 		peer->self_ast_entry = ast_entry;
461 		ast_entry->type = CDP_TXRX_AST_TYPE_STATIC;
462 		break;
463 	case CDP_TXRX_AST_TYPE_WDS:
464 		ast_entry->next_hop = 1;
465 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
466 		break;
467 	case CDP_TXRX_AST_TYPE_WDS_HM:
468 		ast_entry->next_hop = 1;
469 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM;
470 		break;
471 	case CDP_TXRX_AST_TYPE_MEC:
472 		ast_entry->next_hop = 1;
473 		ast_entry->type = CDP_TXRX_AST_TYPE_MEC;
474 		break;
475 	default:
476 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
477 			FL("Incorrect AST entry type"));
478 	}
479 
480 	ast_entry->is_active = TRUE;
481 	TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem);
482 	DP_STATS_INC(soc, ast.added, 1);
483 	dp_peer_ast_hash_add(soc, ast_entry);
484 	qdf_spin_unlock_bh(&soc->ast_lock);
485 
486 	if (ast_entry->type == CDP_TXRX_AST_TYPE_MEC)
487 		qdf_mem_copy(next_node_mac, peer->vdev->mac_addr.raw, 6);
488 	else
489 		qdf_mem_copy(next_node_mac, peer->mac_addr.raw, 6);
490 
491 	if (ast_entry->type != CDP_TXRX_AST_TYPE_STATIC) {
492 		if (QDF_STATUS_SUCCESS ==
493 				soc->cdp_soc.ol_ops->peer_add_wds_entry(
494 				peer->vdev->osif_vdev,
495 				mac_addr,
496 				next_node_mac,
497 				flags))
498 			return 0;
499 	}
500 
501 	return ret;
502 }
503 
504 /*
505  * dp_peer_del_ast() - Delete and free AST entry
506  * @soc: SoC handle
507  * @ast_entry: AST entry of the node
508  *
509  * This function removes the AST entry from peer and soc tables
510  * It assumes caller has taken the ast lock to protect the access to these
511  * tables
512  *
513  * Return: None
514  */
515 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
516 {
517 	struct dp_peer *peer = ast_entry->peer;
518 
519 	if (ast_entry->next_hop)
520 		soc->cdp_soc.ol_ops->peer_del_wds_entry(peer->vdev->osif_vdev,
521 						ast_entry->mac_addr.raw);
522 
523 	soc->ast_table[ast_entry->ast_idx] = NULL;
524 	TAILQ_REMOVE(&peer->ast_entry_list, ast_entry, ase_list_elem);
525 	DP_STATS_INC(soc, ast.deleted, 1);
526 	dp_peer_ast_hash_remove(soc, ast_entry);
527 	qdf_mem_free(ast_entry);
528 }
529 
530 /*
531  * dp_peer_update_ast() - Delete and free AST entry
532  * @soc: SoC handle
533  * @peer: peer to which ast node belongs
534  * @ast_entry: AST entry of the node
535  * @flags: wds or hmwds
536  *
537  * This function update the AST entry to the roamed peer and soc tables
538  * It assumes caller has taken the ast lock to protect the access to these
539  * tables
540  *
541  * Return: 0 if ast entry is updated successfully
542  *         -1 failure
543  */
544 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
545 		       struct dp_ast_entry *ast_entry, uint32_t flags)
546 {
547 	int ret = -1;
548 	struct dp_peer *old_peer;
549 	struct dp_peer *sa_peer;
550 
551 	if (ast_entry->type == CDP_TXRX_AST_TYPE_STATIC) {
552 		sa_peer = ast_entry->peer;
553 
554 		/*
555 		 * Kickout, when direct associated peer(SA) roams
556 		 * to another AP and reachable via TA peer
557 		 */
558 		if (!sa_peer->delete_in_progress) {
559 			sa_peer->delete_in_progress = true;
560 			if (soc->cdp_soc.ol_ops->peer_sta_kickout) {
561 				soc->cdp_soc.ol_ops->peer_sta_kickout(
562 						sa_peer->vdev->pdev->ctrl_pdev,
563 						ast_entry->mac_addr.raw);
564 			}
565 			return 0;
566 		}
567 	}
568 
569 	old_peer = ast_entry->peer;
570 	TAILQ_REMOVE(&old_peer->ast_entry_list, ast_entry, ase_list_elem);
571 
572 	ast_entry->peer = peer;
573 	ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
574 	ast_entry->pdev_id = peer->vdev->pdev->pdev_id;
575 	ast_entry->vdev_id = peer->vdev->vdev_id;
576 	ast_entry->is_active = TRUE;
577 	TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem);
578 
579 	ret = soc->cdp_soc.ol_ops->peer_update_wds_entry(
580 			peer->vdev->osif_vdev,
581 				ast_entry->mac_addr.raw,
582 				peer->mac_addr.raw,
583 				flags);
584 
585 	return ret;
586 }
587 
588 /*
589  * dp_peer_ast_get_pdev_id() - get pdev_id from the ast entry
590  * @soc: SoC handle
591  * @ast_entry: AST entry of the node
592  *
593  * This function gets the pdev_id from the ast entry.
594  *
595  * Return: (uint8_t) pdev_id
596  */
597 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
598 				struct dp_ast_entry *ast_entry)
599 {
600 	return ast_entry->pdev_id;
601 }
602 
603 /*
604  * dp_peer_ast_get_next_hop() - get next_hop from the ast entry
605  * @soc: SoC handle
606  * @ast_entry: AST entry of the node
607  *
608  * This function gets the next hop from the ast entry.
609  *
610  * Return: (uint8_t) next_hop
611  */
612 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
613 				struct dp_ast_entry *ast_entry)
614 {
615 	return ast_entry->next_hop;
616 }
617 
618 /*
619  * dp_peer_ast_set_type() - set type from the ast entry
620  * @soc: SoC handle
621  * @ast_entry: AST entry of the node
622  *
623  * This function sets the type in the ast entry.
624  *
625  * Return:
626  */
627 void dp_peer_ast_set_type(struct dp_soc *soc,
628 				struct dp_ast_entry *ast_entry,
629 				enum cdp_txrx_ast_entry_type type)
630 {
631 	ast_entry->type = type;
632 }
633 
634 #else
635 int dp_peer_add_ast(struct dp_soc *soc, struct dp_peer *peer,
636 		uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type,
637 		uint32_t flags)
638 {
639 	return 1;
640 }
641 
642 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
643 {
644 }
645 
646 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
647 			struct dp_ast_entry *ast_entry, uint32_t flags)
648 {
649 	return 1;
650 }
651 
652 struct dp_ast_entry *dp_peer_ast_hash_find(struct dp_soc *soc,
653 						uint8_t *ast_mac_addr)
654 {
655 	return NULL;
656 }
657 
658 static int dp_peer_ast_hash_attach(struct dp_soc *soc)
659 {
660 	return 0;
661 }
662 
663 static inline void dp_peer_map_ast(struct dp_soc *soc,
664 	struct dp_peer *peer, uint8_t *mac_addr, uint16_t hw_peer_id,
665 	uint8_t vdev_id)
666 {
667 	return;
668 }
669 
670 static void dp_peer_ast_hash_detach(struct dp_soc *soc)
671 {
672 }
673 
674 void dp_peer_ast_set_type(struct dp_soc *soc,
675 				struct dp_ast_entry *ast_entry,
676 				enum cdp_txrx_ast_entry_type type)
677 {
678 }
679 
680 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
681 				struct dp_ast_entry *ast_entry)
682 {
683 	return 0xff;
684 }
685 
686 
687 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
688 				struct dp_ast_entry *ast_entry)
689 {
690 	return 0xff;
691 }
692 #endif
693 
694 struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc,
695 	uint8_t *peer_mac_addr, int mac_addr_is_aligned, uint8_t vdev_id)
696 {
697 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
698 	unsigned index;
699 	struct dp_peer *peer;
700 
701 	if (mac_addr_is_aligned) {
702 		mac_addr = (union dp_align_mac_addr *) peer_mac_addr;
703 	} else {
704 		qdf_mem_copy(
705 			&local_mac_addr_aligned.raw[0],
706 			peer_mac_addr, DP_MAC_ADDR_LEN);
707 		mac_addr = &local_mac_addr_aligned;
708 	}
709 	index = dp_peer_find_hash_index(soc, mac_addr);
710 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
711 	TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
712 #if ATH_SUPPORT_WRAP
713 		/* ProxySTA may have multiple BSS peer with same MAC address,
714 		 * modified find will take care of finding the correct BSS peer.
715 		 */
716 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
717 			((peer->vdev->vdev_id == vdev_id) ||
718 			 (vdev_id == DP_VDEV_ALL))) {
719 #else
720 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0) {
721 #endif
722 			/* found it - increment the ref count before releasing
723 			 * the lock
724 			 */
725 			qdf_atomic_inc(&peer->ref_cnt);
726 			qdf_spin_unlock_bh(&soc->peer_ref_mutex);
727 			return peer;
728 		}
729 	}
730 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
731 	return NULL; /* failure */
732 }
733 
734 void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer)
735 {
736 	unsigned index;
737 	struct dp_peer *tmppeer = NULL;
738 	int found = 0;
739 
740 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
741 	/* Check if tail is not empty before delete*/
742 	QDF_ASSERT(!TAILQ_EMPTY(&soc->peer_hash.bins[index]));
743 	/*
744 	 * DO NOT take the peer_ref_mutex lock here - it needs to be taken
745 	 * by the caller.
746 	 * The caller needs to hold the lock from the time the peer object's
747 	 * reference count is decremented and tested up through the time the
748 	 * reference to the peer object is removed from the hash table, by
749 	 * this function.
750 	 * Holding the lock only while removing the peer object reference
751 	 * from the hash table keeps the hash table consistent, but does not
752 	 * protect against a new HL tx context starting to use the peer object
753 	 * if it looks up the peer object from its MAC address just after the
754 	 * peer ref count is decremented to zero, but just before the peer
755 	 * object reference is removed from the hash table.
756 	 */
757 	 TAILQ_FOREACH(tmppeer, &soc->peer_hash.bins[index], hash_list_elem) {
758 		if (tmppeer == peer) {
759 			found = 1;
760 			break;
761 		}
762 	}
763 	QDF_ASSERT(found);
764 	TAILQ_REMOVE(&soc->peer_hash.bins[index], peer, hash_list_elem);
765 }
766 
767 void dp_peer_find_hash_erase(struct dp_soc *soc)
768 {
769 	int i;
770 
771 	/*
772 	 * Not really necessary to take peer_ref_mutex lock - by this point,
773 	 * it's known that the soc is no longer in use.
774 	 */
775 	for (i = 0; i <= soc->peer_hash.mask; i++) {
776 		if (!TAILQ_EMPTY(&soc->peer_hash.bins[i])) {
777 			struct dp_peer *peer, *peer_next;
778 
779 			/*
780 			 * TAILQ_FOREACH_SAFE must be used here to avoid any
781 			 * memory access violation after peer is freed
782 			 */
783 			TAILQ_FOREACH_SAFE(peer, &soc->peer_hash.bins[i],
784 				hash_list_elem, peer_next) {
785 				/*
786 				 * Don't remove the peer from the hash table -
787 				 * that would modify the list we are currently
788 				 * traversing, and it's not necessary anyway.
789 				 */
790 				/*
791 				 * Artificially adjust the peer's ref count to
792 				 * 1, so it will get deleted by
793 				 * dp_peer_unref_delete.
794 				 */
795 				/* set to zero */
796 				qdf_atomic_init(&peer->ref_cnt);
797 				/* incr to one */
798 				qdf_atomic_inc(&peer->ref_cnt);
799 				dp_peer_unref_delete(peer);
800 			}
801 		}
802 	}
803 }
804 
805 static void dp_peer_find_map_detach(struct dp_soc *soc)
806 {
807 	qdf_mem_free(soc->peer_id_to_obj_map);
808 }
809 
810 int dp_peer_find_attach(struct dp_soc *soc)
811 {
812 	if (dp_peer_find_map_attach(soc))
813 		return 1;
814 
815 	if (dp_peer_find_hash_attach(soc)) {
816 		dp_peer_find_map_detach(soc);
817 		return 1;
818 	}
819 
820 	if (dp_peer_ast_hash_attach(soc)) {
821 		dp_peer_find_hash_detach(soc);
822 		dp_peer_find_map_detach(soc);
823 		return 1;
824 	}
825 	return 0; /* success */
826 }
827 
828 void dp_rx_tid_stats_cb(struct dp_soc *soc, void *cb_ctxt,
829 	union hal_reo_status *reo_status)
830 {
831 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
832 	struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
833 
834 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
835 		DP_TRACE_STATS(FATAL, "REO stats failure %d for TID %d\n",
836 			queue_status->header.status, rx_tid->tid);
837 		return;
838 	}
839 
840 	DP_TRACE_STATS(FATAL, "REO queue stats (TID: %d): \n"
841 		"ssn: %d\n"
842 		"curr_idx  : %d\n"
843 		"pn_31_0   : %08x\n"
844 		"pn_63_32  : %08x\n"
845 		"pn_95_64  : %08x\n"
846 		"pn_127_96 : %08x\n"
847 		"last_rx_enq_tstamp : %08x\n"
848 		"last_rx_deq_tstamp : %08x\n"
849 		"rx_bitmap_31_0     : %08x\n"
850 		"rx_bitmap_63_32    : %08x\n"
851 		"rx_bitmap_95_64    : %08x\n"
852 		"rx_bitmap_127_96   : %08x\n"
853 		"rx_bitmap_159_128  : %08x\n"
854 		"rx_bitmap_191_160  : %08x\n"
855 		"rx_bitmap_223_192  : %08x\n"
856 		"rx_bitmap_255_224  : %08x\n",
857 		rx_tid->tid,
858 		queue_status->ssn, queue_status->curr_idx,
859 		queue_status->pn_31_0, queue_status->pn_63_32,
860 		queue_status->pn_95_64, queue_status->pn_127_96,
861 		queue_status->last_rx_enq_tstamp,
862 		queue_status->last_rx_deq_tstamp,
863 		queue_status->rx_bitmap_31_0, queue_status->rx_bitmap_63_32,
864 		queue_status->rx_bitmap_95_64, queue_status->rx_bitmap_127_96,
865 		queue_status->rx_bitmap_159_128,
866 		queue_status->rx_bitmap_191_160,
867 		queue_status->rx_bitmap_223_192,
868 		queue_status->rx_bitmap_255_224);
869 
870 	DP_TRACE_STATS(FATAL,
871 		"curr_mpdu_cnt      : %d\n"
872 		"curr_msdu_cnt      : %d\n"
873 		"fwd_timeout_cnt    : %d\n"
874 		"fwd_bar_cnt        : %d\n"
875 		"dup_cnt            : %d\n"
876 		"frms_in_order_cnt  : %d\n"
877 		"bar_rcvd_cnt       : %d\n"
878 		"mpdu_frms_cnt      : %d\n"
879 		"msdu_frms_cnt      : %d\n"
880 		"total_byte_cnt     : %d\n"
881 		"late_recv_mpdu_cnt : %d\n"
882 		"win_jump_2k 	    : %d\n"
883 		"hole_cnt 	    : %d\n",
884 		queue_status->curr_mpdu_cnt, queue_status->curr_msdu_cnt,
885 		queue_status->fwd_timeout_cnt, queue_status->fwd_bar_cnt,
886 		queue_status->dup_cnt, queue_status->frms_in_order_cnt,
887 		queue_status->bar_rcvd_cnt, queue_status->mpdu_frms_cnt,
888 		queue_status->msdu_frms_cnt, queue_status->total_cnt,
889 		queue_status->late_recv_mpdu_cnt, queue_status->win_jump_2k,
890 		queue_status->hole_cnt);
891 
892 	DP_PRINT_STATS("Num of Addba Req = %d\n", rx_tid->num_of_addba_req);
893 	DP_PRINT_STATS("Num of Addba Resp = %d\n", rx_tid->num_of_addba_resp);
894 	DP_PRINT_STATS("Num of Delba Req = %d\n", rx_tid->num_of_delba_req);
895 	DP_PRINT_STATS("BA window size   = %d\n", rx_tid->ba_win_size);
896 	DP_PRINT_STATS("Pn size = %d\n", rx_tid->pn_size);
897 }
898 
899 static inline struct dp_peer *dp_peer_find_add_id(struct dp_soc *soc,
900 	uint8_t *peer_mac_addr, uint16_t peer_id, uint16_t hw_peer_id,
901 	uint8_t vdev_id)
902 {
903 	struct dp_peer *peer;
904 
905 	QDF_ASSERT(peer_id <= soc->max_peers);
906 	/* check if there's already a peer object with this MAC address */
907 	peer = dp_peer_find_hash_find(soc, peer_mac_addr,
908 		0 /* is aligned */, vdev_id);
909 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
910 		"%s: peer %pK ID %d vid %d mac %02x:%02x:%02x:%02x:%02x:%02x\n",
911 		__func__, peer, peer_id, vdev_id, peer_mac_addr[0],
912 		peer_mac_addr[1], peer_mac_addr[2], peer_mac_addr[3],
913 		peer_mac_addr[4], peer_mac_addr[5]);
914 
915 	if (peer) {
916 		/* peer's ref count was already incremented by
917 		 * peer_find_hash_find
918 		 */
919 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
920 			  "%s: ref_cnt: %d", __func__,
921 			   qdf_atomic_read(&peer->ref_cnt));
922 		soc->peer_id_to_obj_map[peer_id] = peer;
923 
924 		if (dp_peer_find_add_id_to_obj(peer, peer_id)) {
925 			/* TBDXXX: assert for now */
926 			QDF_ASSERT(0);
927 		}
928 
929 		return peer;
930 	}
931 
932 	return NULL;
933 }
934 
935 /**
936  * dp_rx_peer_map_handler() - handle peer map event from firmware
937  * @soc_handle - genereic soc handle
938  * @peeri_id - peer_id from firmware
939  * @hw_peer_id - ast index for this peer
940  * vdev_id - vdev ID
941  * peer_mac_addr - macc assress of the peer
942  *
943  * associate the peer_id that firmware provided with peer entry
944  * and update the ast table in the host with the hw_peer_id.
945  *
946  * Return: none
947  */
948 
949 void
950 dp_rx_peer_map_handler(void *soc_handle, uint16_t peer_id, uint16_t hw_peer_id,
951 			uint8_t vdev_id, uint8_t *peer_mac_addr)
952 {
953 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
954 	struct dp_peer *peer = NULL;
955 
956 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
957 		"peer_map_event (soc:%pK): peer_id %di, hw_peer_id %d, peer_mac "
958 		"%02x:%02x:%02x:%02x:%02x:%02x, vdev_id %d\n", soc, peer_id,
959 		hw_peer_id, peer_mac_addr[0], peer_mac_addr[1],
960 		peer_mac_addr[2], peer_mac_addr[3], peer_mac_addr[4],
961 		peer_mac_addr[5], vdev_id);
962 
963 	peer = soc->peer_id_to_obj_map[peer_id];
964 
965 	if ((hw_peer_id < 0) || (hw_peer_id > (WLAN_UMAC_PSOC_MAX_PEERS * 2))) {
966 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
967 			"invalid hw_peer_id: %d", hw_peer_id);
968 		qdf_assert_always(0);
969 	}
970 
971 	/*
972 	 * check if peer already exists for this peer_id, if so
973 	 * this peer map event is in response for a wds peer add
974 	 * wmi command sent during wds source port learning.
975 	 * in this case just add the ast entry to the existing
976 	 * peer ast_list.
977 	 */
978 	if (!peer)
979 		peer = dp_peer_find_add_id(soc, peer_mac_addr, peer_id,
980 					hw_peer_id, vdev_id);
981 
982 	if (peer) {
983 		qdf_assert_always(peer->vdev);
984 		/*
985 		 * For every peer MAp message search and set if bss_peer
986 		 */
987 		if (!(qdf_mem_cmp(peer->mac_addr.raw, peer->vdev->mac_addr.raw,
988 				 DP_MAC_ADDR_LEN))) {
989 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
990 				"vdev bss_peer!!!!");
991 			peer->bss_peer = 1;
992 			peer->vdev->vap_bss_peer = peer;
993 		}
994 	}
995 
996 	dp_peer_map_ast(soc, peer, peer_mac_addr,
997 			hw_peer_id, vdev_id);
998 }
999 
1000 void
1001 dp_rx_peer_unmap_handler(void *soc_handle, uint16_t peer_id)
1002 {
1003 	struct dp_peer *peer;
1004 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
1005 	uint8_t i;
1006 
1007 	peer = __dp_peer_find_by_id(soc, peer_id);
1008 
1009 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
1010 		"peer_unmap_event (soc:%pK) peer_id %d peer %pK\n",
1011 		soc, peer_id, peer);
1012 
1013 	/*
1014 	 * Currently peer IDs are assigned for vdevs as well as peers.
1015 	 * If the peer ID is for a vdev, then the peer pointer stored
1016 	 * in peer_id_to_obj_map will be NULL.
1017 	 */
1018 	if (!peer) {
1019 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1020 			"%s: Received unmap event for invalid peer_id"
1021 			" %u\n", __func__, peer_id);
1022 		return;
1023 	}
1024 
1025 	soc->peer_id_to_obj_map[peer_id] = NULL;
1026 	for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
1027 		if (peer->peer_ids[i] == peer_id) {
1028 			peer->peer_ids[i] = HTT_INVALID_PEER;
1029 			break;
1030 		}
1031 	}
1032 
1033 	if (soc->cdp_soc.ol_ops->peer_unmap_event) {
1034 		soc->cdp_soc.ol_ops->peer_unmap_event(soc->ctrl_psoc,
1035 				peer_id);
1036 	}
1037 
1038 	/*
1039 	 * Remove a reference to the peer.
1040 	 * If there are no more references, delete the peer object.
1041 	 */
1042 	dp_peer_unref_delete(peer);
1043 }
1044 
1045 void
1046 dp_peer_find_detach(struct dp_soc *soc)
1047 {
1048 	dp_peer_find_map_detach(soc);
1049 	dp_peer_find_hash_detach(soc);
1050 	dp_peer_ast_hash_detach(soc);
1051 }
1052 
1053 static void dp_rx_tid_update_cb(struct dp_soc *soc, void *cb_ctxt,
1054 	union hal_reo_status *reo_status)
1055 {
1056 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
1057 
1058 	if ((reo_status->rx_queue_status.header.status !=
1059 		HAL_REO_CMD_SUCCESS) &&
1060 		(reo_status->rx_queue_status.header.status !=
1061 		HAL_REO_CMD_DRAIN)) {
1062 		/* Should not happen normally. Just print error for now */
1063 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1064 			"%s: Rx tid HW desc update failed(%d): tid %d\n",
1065 			__func__,
1066 			reo_status->rx_queue_status.header.status,
1067 			rx_tid->tid);
1068 	}
1069 }
1070 
1071 /*
1072  * dp_find_peer_by_addr - find peer instance by mac address
1073  * @dev: physical device instance
1074  * @peer_mac_addr: peer mac address
1075  * @local_id: local id for the peer
1076  *
1077  * Return: peer instance pointer
1078  */
1079 void *dp_find_peer_by_addr(struct cdp_pdev *dev, uint8_t *peer_mac_addr,
1080 		uint8_t *local_id)
1081 {
1082 	struct dp_pdev *pdev = (struct dp_pdev *)dev;
1083 	struct dp_peer *peer;
1084 
1085 	peer = dp_peer_find_hash_find(pdev->soc, peer_mac_addr, 0, DP_VDEV_ALL);
1086 
1087 	if (!peer)
1088 		return NULL;
1089 
1090 	/* Multiple peer ids? How can know peer id? */
1091 	*local_id = peer->local_id;
1092 	DP_TRACE(INFO, "%s: peer %pK id %d", __func__, peer, *local_id);
1093 
1094 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
1095 	 * Decrement it here.
1096 	 */
1097 	qdf_atomic_dec(&peer->ref_cnt);
1098 
1099 	return peer;
1100 }
1101 
1102 /*
1103  * dp_rx_tid_update_wifi3() – Update receive TID state
1104  * @peer: Datapath peer handle
1105  * @tid: TID
1106  * @ba_window_size: BlockAck window size
1107  * @start_seq: Starting sequence number
1108  *
1109  * Return: 0 on success, error code on failure
1110  */
1111 static int dp_rx_tid_update_wifi3(struct dp_peer *peer, int tid, uint32_t
1112 				  ba_window_size, uint32_t start_seq)
1113 {
1114 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1115 	struct dp_soc *soc = peer->vdev->pdev->soc;
1116 	struct hal_reo_cmd_params params;
1117 
1118 	qdf_mem_zero(&params, sizeof(params));
1119 
1120 	params.std.need_status = 1;
1121 	params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
1122 	params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1123 	params.u.upd_queue_params.update_ba_window_size = 1;
1124 	params.u.upd_queue_params.ba_window_size = ba_window_size;
1125 
1126 	if (start_seq < IEEE80211_SEQ_MAX) {
1127 		params.u.upd_queue_params.update_ssn = 1;
1128 		params.u.upd_queue_params.ssn = start_seq;
1129 	}
1130 
1131 	dp_set_ssn_valid_flag(&params, 0);
1132 
1133 	dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params, dp_rx_tid_update_cb, rx_tid);
1134 	return 0;
1135 }
1136 
1137 /*
1138  * dp_reo_desc_free() - Callback free reo descriptor memory after
1139  * HW cache flush
1140  *
1141  * @soc: DP SOC handle
1142  * @cb_ctxt: Callback context
1143  * @reo_status: REO command status
1144  */
1145 static void dp_reo_desc_free(struct dp_soc *soc, void *cb_ctxt,
1146 	union hal_reo_status *reo_status)
1147 {
1148 	struct reo_desc_list_node *freedesc =
1149 		(struct reo_desc_list_node *)cb_ctxt;
1150 	struct dp_rx_tid *rx_tid = &freedesc->rx_tid;
1151 
1152 	if ((reo_status->fl_cache_status.header.status !=
1153 		HAL_REO_CMD_SUCCESS) &&
1154 		(reo_status->fl_cache_status.header.status !=
1155 		HAL_REO_CMD_DRAIN)) {
1156 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1157 			"%s: Rx tid HW desc flush failed(%d): tid %d\n",
1158 			__func__,
1159 			reo_status->rx_queue_status.header.status,
1160 			freedesc->rx_tid.tid);
1161 	}
1162 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1163 		"%s: hw_qdesc_paddr: %pK, tid:%d\n", __func__,
1164 		(void *)(rx_tid->hw_qdesc_paddr), rx_tid->tid);
1165 	qdf_mem_unmap_nbytes_single(soc->osdev,
1166 		rx_tid->hw_qdesc_paddr,
1167 		QDF_DMA_BIDIRECTIONAL,
1168 		rx_tid->hw_qdesc_alloc_size);
1169 	qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
1170 	qdf_mem_free(freedesc);
1171 }
1172 
1173 #if defined(QCA_WIFI_QCA8074) && defined(BUILD_X86)
1174 /* Hawkeye emulation requires bus address to be >= 0x50000000 */
1175 static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
1176 {
1177 	if (dma_addr < 0x50000000)
1178 		return QDF_STATUS_E_FAILURE;
1179 	else
1180 		return QDF_STATUS_SUCCESS;
1181 }
1182 #else
1183 static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
1184 {
1185 	return QDF_STATUS_SUCCESS;
1186 }
1187 #endif
1188 
1189 
1190 /*
1191  * dp_rx_tid_setup_wifi3() – Setup receive TID state
1192  * @peer: Datapath peer handle
1193  * @tid: TID
1194  * @ba_window_size: BlockAck window size
1195  * @start_seq: Starting sequence number
1196  *
1197  * Return: 0 on success, error code on failure
1198  */
1199 int dp_rx_tid_setup_wifi3(struct dp_peer *peer, int tid,
1200 	uint32_t ba_window_size, uint32_t start_seq)
1201 {
1202 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1203 	struct dp_vdev *vdev = peer->vdev;
1204 	struct dp_soc *soc = vdev->pdev->soc;
1205 	uint32_t hw_qdesc_size;
1206 	uint32_t hw_qdesc_align;
1207 	int hal_pn_type;
1208 	void *hw_qdesc_vaddr;
1209 	uint32_t alloc_tries = 0;
1210 
1211 	if (peer->delete_in_progress)
1212 		return QDF_STATUS_E_FAILURE;
1213 
1214 	rx_tid->ba_win_size = ba_window_size;
1215 	if (rx_tid->hw_qdesc_vaddr_unaligned != NULL)
1216 		return dp_rx_tid_update_wifi3(peer, tid, ba_window_size,
1217 			start_seq);
1218 	rx_tid->num_of_addba_req = 0;
1219 	rx_tid->num_of_delba_req = 0;
1220 	rx_tid->num_of_addba_resp = 0;
1221 #ifdef notyet
1222 	hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc, ba_window_size);
1223 #else
1224 	/* TODO: Allocating HW queue descriptors based on max BA window size
1225 	 * for all QOS TIDs so that same descriptor can be used later when
1226 	 * ADDBA request is recevied. This should be changed to allocate HW
1227 	 * queue descriptors based on BA window size being negotiated (0 for
1228 	 * non BA cases), and reallocate when BA window size changes and also
1229 	 * send WMI message to FW to change the REO queue descriptor in Rx
1230 	 * peer entry as part of dp_rx_tid_update.
1231 	 */
1232 	if (tid != DP_NON_QOS_TID)
1233 		hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc,
1234 			HAL_RX_MAX_BA_WINDOW);
1235 	else
1236 		hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc,
1237 			ba_window_size);
1238 #endif
1239 
1240 	hw_qdesc_align = hal_get_reo_qdesc_align(soc->hal_soc);
1241 	/* To avoid unnecessary extra allocation for alignment, try allocating
1242 	 * exact size and see if we already have aligned address.
1243 	 */
1244 	rx_tid->hw_qdesc_alloc_size = hw_qdesc_size;
1245 
1246 try_desc_alloc:
1247 	rx_tid->hw_qdesc_vaddr_unaligned =
1248 		qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size);
1249 
1250 	if (!rx_tid->hw_qdesc_vaddr_unaligned) {
1251 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1252 			"%s: Rx tid HW desc alloc failed: tid %d\n",
1253 			__func__, tid);
1254 		return QDF_STATUS_E_NOMEM;
1255 	}
1256 
1257 	if ((unsigned long)(rx_tid->hw_qdesc_vaddr_unaligned) %
1258 		hw_qdesc_align) {
1259 		/* Address allocated above is not alinged. Allocate extra
1260 		 * memory for alignment
1261 		 */
1262 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
1263 		rx_tid->hw_qdesc_vaddr_unaligned =
1264 			qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size +
1265 					hw_qdesc_align - 1);
1266 
1267 		if (!rx_tid->hw_qdesc_vaddr_unaligned) {
1268 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1269 				"%s: Rx tid HW desc alloc failed: tid %d\n",
1270 				__func__, tid);
1271 			return QDF_STATUS_E_NOMEM;
1272 		}
1273 
1274 		hw_qdesc_vaddr = (void *)qdf_align((unsigned long)
1275 			rx_tid->hw_qdesc_vaddr_unaligned,
1276 			hw_qdesc_align);
1277 
1278 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1279 			"%s: Total Size %d Aligned Addr %pK\n",
1280 			__func__, rx_tid->hw_qdesc_alloc_size,
1281 			hw_qdesc_vaddr);
1282 
1283 	} else {
1284 		hw_qdesc_vaddr = rx_tid->hw_qdesc_vaddr_unaligned;
1285 	}
1286 
1287 	/* TODO: Ensure that sec_type is set before ADDBA is received.
1288 	 * Currently this is set based on htt indication
1289 	 * HTT_T2H_MSG_TYPE_SEC_IND from target
1290 	 */
1291 	switch (peer->security[dp_sec_ucast].sec_type) {
1292 	case cdp_sec_type_tkip_nomic:
1293 	case cdp_sec_type_aes_ccmp:
1294 	case cdp_sec_type_aes_ccmp_256:
1295 	case cdp_sec_type_aes_gcmp:
1296 	case cdp_sec_type_aes_gcmp_256:
1297 		hal_pn_type = HAL_PN_WPA;
1298 		break;
1299 	case cdp_sec_type_wapi:
1300 		if (vdev->opmode == wlan_op_mode_ap)
1301 			hal_pn_type = HAL_PN_WAPI_EVEN;
1302 		else
1303 			hal_pn_type = HAL_PN_WAPI_UNEVEN;
1304 		break;
1305 	default:
1306 		hal_pn_type = HAL_PN_NONE;
1307 		break;
1308 	}
1309 
1310 	hal_reo_qdesc_setup(soc->hal_soc, tid, ba_window_size, start_seq,
1311 		hw_qdesc_vaddr, rx_tid->hw_qdesc_paddr, hal_pn_type);
1312 
1313 	qdf_mem_map_nbytes_single(soc->osdev, hw_qdesc_vaddr,
1314 		QDF_DMA_BIDIRECTIONAL, rx_tid->hw_qdesc_alloc_size,
1315 		&(rx_tid->hw_qdesc_paddr));
1316 
1317 	if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) !=
1318 			QDF_STATUS_SUCCESS) {
1319 		if (alloc_tries++ < 10)
1320 			goto try_desc_alloc;
1321 		else {
1322 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1323 			"%s: Rx tid HW desc alloc failed (lowmem): tid %d\n",
1324 			__func__, tid);
1325 			return QDF_STATUS_E_NOMEM;
1326 		}
1327 	}
1328 
1329 	if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
1330 		soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup(
1331 			vdev->pdev->ctrl_pdev,
1332 			peer->vdev->vdev_id, peer->mac_addr.raw,
1333 			rx_tid->hw_qdesc_paddr, tid, tid);
1334 
1335 	}
1336 	return 0;
1337 }
1338 
1339 /*
1340  * dp_rx_tid_delete_cb() - Callback to flush reo descriptor HW cache
1341  * after deleting the entries (ie., setting valid=0)
1342  *
1343  * @soc: DP SOC handle
1344  * @cb_ctxt: Callback context
1345  * @reo_status: REO command status
1346  */
1347 static void dp_rx_tid_delete_cb(struct dp_soc *soc, void *cb_ctxt,
1348 	union hal_reo_status *reo_status)
1349 {
1350 	struct reo_desc_list_node *freedesc =
1351 		(struct reo_desc_list_node *)cb_ctxt;
1352 	uint32_t list_size;
1353 	struct reo_desc_list_node *desc;
1354 	unsigned long curr_ts = qdf_get_system_timestamp();
1355 	uint32_t desc_size, tot_desc_size;
1356 	struct hal_reo_cmd_params params;
1357 
1358 	if (reo_status->rx_queue_status.header.status == HAL_REO_CMD_DRAIN) {
1359 		qdf_mem_zero(reo_status, sizeof(*reo_status));
1360 		reo_status->fl_cache_status.header.status = HAL_REO_CMD_DRAIN;
1361 		dp_reo_desc_free(soc, (void *)freedesc, reo_status);
1362 		return;
1363 	} else if (reo_status->rx_queue_status.header.status !=
1364 		HAL_REO_CMD_SUCCESS) {
1365 		/* Should not happen normally. Just print error for now */
1366 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1367 			"%s: Rx tid HW desc deletion failed(%d): tid %d\n",
1368 			__func__,
1369 			reo_status->rx_queue_status.header.status,
1370 			freedesc->rx_tid.tid);
1371 	}
1372 
1373 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
1374 		"%s: rx_tid: %d status: %d\n", __func__,
1375 		freedesc->rx_tid.tid,
1376 		reo_status->rx_queue_status.header.status);
1377 
1378 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
1379 	freedesc->free_ts = curr_ts;
1380 	qdf_list_insert_back_size(&soc->reo_desc_freelist,
1381 		(qdf_list_node_t *)freedesc, &list_size);
1382 
1383 	while ((qdf_list_peek_front(&soc->reo_desc_freelist,
1384 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) &&
1385 		((list_size >= REO_DESC_FREELIST_SIZE) ||
1386 		((curr_ts - desc->free_ts) > REO_DESC_FREE_DEFER_MS))) {
1387 		struct dp_rx_tid *rx_tid;
1388 
1389 		qdf_list_remove_front(&soc->reo_desc_freelist,
1390 				(qdf_list_node_t **)&desc);
1391 		list_size--;
1392 		rx_tid = &desc->rx_tid;
1393 
1394 		/* Flush and invalidate REO descriptor from HW cache: Base and
1395 		 * extension descriptors should be flushed separately */
1396 		tot_desc_size = hal_get_reo_qdesc_size(soc->hal_soc,
1397 			rx_tid->ba_win_size);
1398 		desc_size = hal_get_reo_qdesc_size(soc->hal_soc, 0);
1399 
1400 		/* Flush reo extension descriptors */
1401 		while ((tot_desc_size -= desc_size) > 0) {
1402 			qdf_mem_zero(&params, sizeof(params));
1403 			params.std.addr_lo =
1404 				((uint64_t)(rx_tid->hw_qdesc_paddr) +
1405 				tot_desc_size) & 0xffffffff;
1406 			params.std.addr_hi =
1407 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1408 
1409 			if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc,
1410 							CMD_FLUSH_CACHE,
1411 							&params,
1412 							NULL,
1413 							NULL)) {
1414 				QDF_TRACE(QDF_MODULE_ID_DP,
1415 					QDF_TRACE_LEVEL_ERROR,
1416 					"%s: fail to send CMD_CACHE_FLUSH:"
1417 					"tid %d desc %pK\n", __func__,
1418 					rx_tid->tid,
1419 					(void *)(rx_tid->hw_qdesc_paddr));
1420 			}
1421 		}
1422 
1423 		/* Flush base descriptor */
1424 		qdf_mem_zero(&params, sizeof(params));
1425 		params.std.need_status = 1;
1426 		params.std.addr_lo =
1427 			(uint64_t)(rx_tid->hw_qdesc_paddr) & 0xffffffff;
1428 		params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1429 
1430 		if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc,
1431 							  CMD_FLUSH_CACHE,
1432 							  &params,
1433 							  dp_reo_desc_free,
1434 							  (void *)desc)) {
1435 			union hal_reo_status reo_status;
1436 			/*
1437 			 * If dp_reo_send_cmd return failure, related TID queue desc
1438 			 * should be unmapped. Also locally reo_desc, together with
1439 			 * TID queue desc also need to be freed accordingly.
1440 			 *
1441 			 * Here invoke desc_free function directly to do clean up.
1442 			 */
1443 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1444 				"%s: fail to send REO cmd to flush cache: tid %d\n",
1445 				__func__, rx_tid->tid);
1446 			qdf_mem_zero(&reo_status, sizeof(reo_status));
1447 			reo_status.fl_cache_status.header.status = 0;
1448 			dp_reo_desc_free(soc, (void *)desc, &reo_status);
1449 		}
1450 	}
1451 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
1452 }
1453 
1454 /*
1455  * dp_rx_tid_delete_wifi3() – Delete receive TID queue
1456  * @peer: Datapath peer handle
1457  * @tid: TID
1458  *
1459  * Return: 0 on success, error code on failure
1460  */
1461 static int dp_rx_tid_delete_wifi3(struct dp_peer *peer, int tid)
1462 {
1463 	struct dp_rx_tid *rx_tid = &(peer->rx_tid[tid]);
1464 	struct dp_soc *soc = peer->vdev->pdev->soc;
1465 	struct hal_reo_cmd_params params;
1466 	struct reo_desc_list_node *freedesc =
1467 		qdf_mem_malloc(sizeof(*freedesc));
1468 
1469 	if (!freedesc) {
1470 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1471 			"%s: malloc failed for freedesc: tid %d\n",
1472 			__func__, tid);
1473 		return -ENOMEM;
1474 	}
1475 
1476 	freedesc->rx_tid = *rx_tid;
1477 
1478 	qdf_mem_zero(&params, sizeof(params));
1479 
1480 	params.std.need_status = 0;
1481 	params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
1482 	params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1483 	params.u.upd_queue_params.update_vld = 1;
1484 	params.u.upd_queue_params.vld = 0;
1485 
1486 	dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
1487 		dp_rx_tid_delete_cb, (void *)freedesc);
1488 
1489 	rx_tid->hw_qdesc_vaddr_unaligned = NULL;
1490 	rx_tid->hw_qdesc_alloc_size = 0;
1491 	rx_tid->hw_qdesc_paddr = 0;
1492 
1493 	return 0;
1494 }
1495 
1496 #ifdef DP_LFR
1497 static void dp_peer_setup_remaining_tids(struct dp_peer *peer)
1498 {
1499 	int tid;
1500 
1501 	for (tid = 1; tid < DP_MAX_TIDS-1; tid++) {
1502 		dp_rx_tid_setup_wifi3(peer, tid, 1, 0);
1503 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1504 			"Setting up TID %d for peer %pK peer->local_id %d\n",
1505 			tid, peer, peer->local_id);
1506 	}
1507 }
1508 #else
1509 static void dp_peer_setup_remaining_tids(struct dp_peer *peer) {};
1510 #endif
1511 /*
1512  * dp_peer_rx_init() – Initialize receive TID state
1513  * @pdev: Datapath pdev
1514  * @peer: Datapath peer
1515  *
1516  */
1517 void dp_peer_rx_init(struct dp_pdev *pdev, struct dp_peer *peer)
1518 {
1519 	int tid;
1520 	struct dp_rx_tid *rx_tid;
1521 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
1522 		rx_tid = &peer->rx_tid[tid];
1523 		rx_tid->array = &rx_tid->base;
1524 		rx_tid->base.head = rx_tid->base.tail = NULL;
1525 		rx_tid->tid = tid;
1526 		rx_tid->defrag_timeout_ms = 0;
1527 		rx_tid->ba_win_size = 0;
1528 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
1529 
1530 		rx_tid->defrag_waitlist_elem.tqe_next = NULL;
1531 		rx_tid->defrag_waitlist_elem.tqe_prev = NULL;
1532 
1533 #ifdef notyet /* TODO: See if this is required for exception handling */
1534 		/* invalid sequence number */
1535 		peer->tids_last_seq[tid] = 0xffff;
1536 #endif
1537 	}
1538 
1539 	/* Setup default (non-qos) rx tid queue */
1540 	dp_rx_tid_setup_wifi3(peer, DP_NON_QOS_TID, 1, 0);
1541 
1542 	/* Setup rx tid queue for TID 0.
1543 	 * Other queues will be setup on receiving first packet, which will cause
1544 	 * NULL REO queue error
1545 	 */
1546 	dp_rx_tid_setup_wifi3(peer, 0, 1, 0);
1547 
1548 	/*
1549 	 * Setup the rest of TID's to handle LFR
1550 	 */
1551 	dp_peer_setup_remaining_tids(peer);
1552 
1553 	/*
1554 	 * Set security defaults: no PN check, no security. The target may
1555 	 * send a HTT SEC_IND message to overwrite these defaults.
1556 	 */
1557 	peer->security[dp_sec_ucast].sec_type =
1558 		peer->security[dp_sec_mcast].sec_type = cdp_sec_type_none;
1559 }
1560 
1561 /*
1562  * dp_peer_rx_cleanup() – Cleanup receive TID state
1563  * @vdev: Datapath vdev
1564  * @peer: Datapath peer
1565  *
1566  */
1567 void dp_peer_rx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
1568 {
1569 	int tid;
1570 	uint32_t tid_delete_mask = 0;
1571 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
1572 		if (peer->rx_tid[tid].hw_qdesc_vaddr_unaligned != NULL) {
1573 			dp_rx_tid_delete_wifi3(peer, tid);
1574 
1575 			/* Cleanup defrag related resource */
1576 			dp_rx_defrag_waitlist_remove(peer, tid);
1577 			dp_rx_reorder_flush_frag(peer, tid);
1578 
1579 			tid_delete_mask |= (1 << tid);
1580 		}
1581 	}
1582 #ifdef notyet /* See if FW can remove queues as part of peer cleanup */
1583 	if (soc->ol_ops->peer_rx_reorder_queue_remove) {
1584 		soc->ol_ops->peer_rx_reorder_queue_remove(vdev->pdev->ctrl_pdev,
1585 			peer->vdev->vdev_id, peer->mac_addr.raw,
1586 			tid_delete_mask);
1587 	}
1588 #endif
1589 }
1590 
1591 /*
1592  * dp_peer_cleanup() – Cleanup peer information
1593  * @vdev: Datapath vdev
1594  * @peer: Datapath peer
1595  *
1596  */
1597 void dp_peer_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
1598 {
1599 	peer->last_assoc_rcvd = 0;
1600 	peer->last_disassoc_rcvd = 0;
1601 	peer->last_deauth_rcvd = 0;
1602 
1603 	/* cleanup the Rx reorder queues for this peer */
1604 	dp_peer_rx_cleanup(vdev, peer);
1605 }
1606 
1607 /*
1608 * dp_rx_addba_requestprocess_wifi3() – Process ADDBA request from peer
1609 *
1610 * @peer: Datapath peer handle
1611 * @dialogtoken: dialogtoken from ADDBA frame
1612 * @tid: TID number
1613 * @startseqnum: Start seq. number received in BA sequence control
1614 * in ADDBA frame
1615 *
1616 * Return: 0 on success, error code on failure
1617 */
1618 int dp_addba_requestprocess_wifi3(void *peer_handle,
1619 	uint8_t dialogtoken, uint16_t tid, uint16_t batimeout,
1620 	uint16_t buffersize, uint16_t startseqnum)
1621 {
1622 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
1623 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1624 
1625 	if ((rx_tid->ba_status == DP_RX_BA_ACTIVE) &&
1626 			(rx_tid->hw_qdesc_vaddr_unaligned != NULL))
1627 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
1628 
1629 	if (dp_rx_tid_setup_wifi3(peer, tid, buffersize,
1630 		startseqnum)) {
1631 		/* TODO: Should we send addba reject in this case */
1632 		return QDF_STATUS_E_FAILURE;
1633 	}
1634 
1635 	if (rx_tid->userstatuscode != IEEE80211_STATUS_SUCCESS)
1636 		rx_tid->statuscode = rx_tid->userstatuscode;
1637 	else
1638 		rx_tid->statuscode = IEEE80211_STATUS_SUCCESS;
1639 
1640 	rx_tid->dialogtoken = dialogtoken;
1641 	rx_tid->ba_status = DP_RX_BA_ACTIVE;
1642 	rx_tid->num_of_addba_req++;
1643 
1644 	return 0;
1645 }
1646 
1647 /*
1648 * dp_rx_addba_responsesetup_wifi3() – Process ADDBA request from peer
1649 *
1650 * @peer: Datapath peer handle
1651 * @tid: TID number
1652 * @dialogtoken: output dialogtoken
1653 * @statuscode: output dialogtoken
1654 * @buffersize: Output BA window size
1655 * @batimeout: Output BA timeout
1656 */
1657 void dp_addba_responsesetup_wifi3(void *peer_handle, uint8_t tid,
1658 	uint8_t *dialogtoken, uint16_t *statuscode,
1659 	uint16_t *buffersize, uint16_t *batimeout)
1660 {
1661 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
1662 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1663 
1664 	rx_tid->num_of_addba_resp++;
1665 	/* setup ADDBA response parameters */
1666 	*dialogtoken = rx_tid->dialogtoken;
1667 	*statuscode = rx_tid->statuscode;
1668 	*buffersize = rx_tid->ba_win_size;
1669 	*batimeout  = 0;
1670 }
1671 
1672 /*
1673 * dp_set_addba_response() – Set a user defined ADDBA response status code
1674 *
1675 * @peer: Datapath peer handle
1676 * @tid: TID number
1677 * @statuscode: response status code to be set
1678 */
1679 void dp_set_addba_response(void *peer_handle, uint8_t tid,
1680 	uint16_t statuscode)
1681 {
1682 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
1683 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1684 
1685 	rx_tid->userstatuscode = statuscode;
1686 }
1687 
1688 /*
1689 * dp_rx_delba_process_wifi3() – Process DELBA from peer
1690 * @peer: Datapath peer handle
1691 * @tid: TID number
1692 * @reasoncode: Reason code received in DELBA frame
1693 *
1694 * Return: 0 on success, error code on failure
1695 */
1696 int dp_delba_process_wifi3(void *peer_handle,
1697 	int tid, uint16_t reasoncode)
1698 {
1699 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
1700 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1701 
1702 	if (rx_tid->ba_status != DP_RX_BA_ACTIVE)
1703 		return QDF_STATUS_E_FAILURE;
1704 
1705 	/* TODO: See if we can delete the existing REO queue descriptor and
1706 	 * replace with a new one without queue extenstion descript to save
1707 	 * memory
1708 	 */
1709 	rx_tid->num_of_delba_req++;
1710 	dp_rx_tid_update_wifi3(peer, tid, 1, 0);
1711 
1712 	rx_tid->ba_status = DP_RX_BA_INACTIVE;
1713 
1714 	return 0;
1715 }
1716 
1717 void dp_rx_discard(struct dp_vdev *vdev, struct dp_peer *peer, unsigned tid,
1718 	qdf_nbuf_t msdu_list)
1719 {
1720 	while (msdu_list) {
1721 		qdf_nbuf_t msdu = msdu_list;
1722 
1723 		msdu_list = qdf_nbuf_next(msdu_list);
1724 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
1725 			"discard rx %pK from partly-deleted peer %pK "
1726 			"(%02x:%02x:%02x:%02x:%02x:%02x)\n",
1727 			msdu, peer,
1728 			peer->mac_addr.raw[0], peer->mac_addr.raw[1],
1729 			peer->mac_addr.raw[2], peer->mac_addr.raw[3],
1730 			peer->mac_addr.raw[4], peer->mac_addr.raw[5]);
1731 		qdf_nbuf_free(msdu);
1732 	}
1733 }
1734 
1735 
1736 /**
1737  * dp_set_pn_check_wifi3() - enable PN check in REO for security
1738  * @peer: Datapath peer handle
1739  * @vdev: Datapath vdev
1740  * @pdev - data path device instance
1741  * @sec_type - security type
1742  * @rx_pn - Receive pn starting number
1743  *
1744  */
1745 
1746 void
1747 dp_set_pn_check_wifi3(struct cdp_vdev *vdev_handle, struct cdp_peer *peer_handle, enum cdp_sec_type sec_type,  uint32_t *rx_pn)
1748 {
1749 	struct dp_peer *peer =  (struct dp_peer *)peer_handle;
1750 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
1751 	struct dp_pdev *pdev;
1752 	struct dp_soc *soc;
1753 	int i;
1754 	uint8_t pn_size;
1755 	struct hal_reo_cmd_params params;
1756 
1757 	/* preconditions */
1758 	qdf_assert(vdev);
1759 
1760 	pdev = vdev->pdev;
1761 	soc = pdev->soc;
1762 
1763 
1764 	qdf_mem_zero(&params, sizeof(params));
1765 
1766 	params.std.need_status = 1;
1767 	params.u.upd_queue_params.update_pn_valid = 1;
1768 	params.u.upd_queue_params.update_pn_size = 1;
1769 	params.u.upd_queue_params.update_pn = 1;
1770 	params.u.upd_queue_params.update_pn_check_needed = 1;
1771 	params.u.upd_queue_params.update_svld = 1;
1772 	params.u.upd_queue_params.svld = 0;
1773 
1774 	peer->security[dp_sec_ucast].sec_type = sec_type;
1775 
1776 	switch (sec_type) {
1777 	case cdp_sec_type_tkip_nomic:
1778 	case cdp_sec_type_aes_ccmp:
1779 	case cdp_sec_type_aes_ccmp_256:
1780 	case cdp_sec_type_aes_gcmp:
1781 	case cdp_sec_type_aes_gcmp_256:
1782 		params.u.upd_queue_params.pn_check_needed = 1;
1783 		params.u.upd_queue_params.pn_size = 48;
1784 		pn_size = 48;
1785 		break;
1786 	case cdp_sec_type_wapi:
1787 		params.u.upd_queue_params.pn_check_needed = 1;
1788 		params.u.upd_queue_params.pn_size = 128;
1789 		pn_size = 128;
1790 		if (vdev->opmode == wlan_op_mode_ap) {
1791 			params.u.upd_queue_params.pn_even = 1;
1792 			params.u.upd_queue_params.update_pn_even = 1;
1793 		} else {
1794 			params.u.upd_queue_params.pn_uneven = 1;
1795 			params.u.upd_queue_params.update_pn_uneven = 1;
1796 		}
1797 		break;
1798 	default:
1799 		params.u.upd_queue_params.pn_check_needed = 0;
1800 		pn_size = 0;
1801 		break;
1802 	}
1803 
1804 
1805 	for (i = 0; i < DP_MAX_TIDS; i++) {
1806 		struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
1807 		if (rx_tid->hw_qdesc_vaddr_unaligned != NULL) {
1808 			params.std.addr_lo =
1809 				rx_tid->hw_qdesc_paddr & 0xffffffff;
1810 			params.std.addr_hi =
1811 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1812 
1813 			if (sec_type != cdp_sec_type_wapi) {
1814 				params.u.upd_queue_params.update_pn_valid = 0;
1815 			} else {
1816 				/*
1817 				 * Setting PN valid bit for WAPI sec_type,
1818 				 * since WAPI PN has to be started with
1819 				 * predefined value
1820 				 */
1821 				params.u.upd_queue_params.update_pn_valid = 1;
1822 				params.u.upd_queue_params.pn_31_0 = rx_pn[0];
1823 				params.u.upd_queue_params.pn_63_32 = rx_pn[1];
1824 				params.u.upd_queue_params.pn_95_64 = rx_pn[2];
1825 				params.u.upd_queue_params.pn_127_96 = rx_pn[3];
1826 			}
1827 			rx_tid->pn_size = pn_size;
1828 			dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
1829 				dp_rx_tid_update_cb, rx_tid);
1830 		} else {
1831 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
1832 				"PN Check not setup for TID :%d \n", i);
1833 		}
1834 	}
1835 }
1836 
1837 
1838 void
1839 dp_rx_sec_ind_handler(void *soc_handle, uint16_t peer_id,
1840 	enum htt_sec_type sec_type, int is_unicast, u_int32_t *michael_key,
1841 	u_int32_t *rx_pn)
1842 {
1843 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
1844 	struct dp_peer *peer;
1845 	int sec_index;
1846 
1847 	peer = dp_peer_find_by_id(soc, peer_id);
1848 	if (!peer) {
1849 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1850 			"Couldn't find peer from ID %d - skipping security inits\n",
1851 			peer_id);
1852 		return;
1853 	}
1854 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
1855 		"sec spec for peer %pK (%02x:%02x:%02x:%02x:%02x:%02x): "
1856 		"%s key of type %d\n",
1857 		peer,
1858 		peer->mac_addr.raw[0], peer->mac_addr.raw[1],
1859 		peer->mac_addr.raw[2], peer->mac_addr.raw[3],
1860 		peer->mac_addr.raw[4], peer->mac_addr.raw[5],
1861 		is_unicast ? "ucast" : "mcast",
1862 		sec_type);
1863 	sec_index = is_unicast ? dp_sec_ucast : dp_sec_mcast;
1864 	peer->security[sec_index].sec_type = sec_type;
1865 #ifdef notyet /* TODO: See if this is required for defrag support */
1866 	/* michael key only valid for TKIP, but for simplicity,
1867 	 * copy it anyway
1868 	 */
1869 	qdf_mem_copy(
1870 		&peer->security[sec_index].michael_key[0],
1871 		michael_key,
1872 		sizeof(peer->security[sec_index].michael_key));
1873 #ifdef BIG_ENDIAN_HOST
1874 	OL_IF_SWAPBO(peer->security[sec_index].michael_key[0],
1875 				 sizeof(peer->security[sec_index].michael_key));
1876 #endif /* BIG_ENDIAN_HOST */
1877 #endif
1878 
1879 #ifdef notyet /* TODO: Check if this is required for wifi3.0 */
1880 	if (sec_type != htt_sec_type_wapi) {
1881 		qdf_mem_set(peer->tids_last_pn_valid, _EXT_TIDS, 0x00);
1882 	} else {
1883 		for (i = 0; i < DP_MAX_TIDS; i++) {
1884 			/*
1885 			 * Setting PN valid bit for WAPI sec_type,
1886 			 * since WAPI PN has to be started with predefined value
1887 			 */
1888 			peer->tids_last_pn_valid[i] = 1;
1889 			qdf_mem_copy(
1890 				(u_int8_t *) &peer->tids_last_pn[i],
1891 				(u_int8_t *) rx_pn, sizeof(union htt_rx_pn_t));
1892 			peer->tids_last_pn[i].pn128[1] =
1893 				qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[1]);
1894 			peer->tids_last_pn[i].pn128[0] =
1895 				qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[0]);
1896 		}
1897 	}
1898 #endif
1899 	/* TODO: Update HW TID queue with PN check parameters (pn type for
1900 	 * all security types and last pn for WAPI) once REO command API
1901 	 * is available
1902 	 */
1903 }
1904 
1905 #ifndef CONFIG_WIN
1906 /**
1907  * dp_register_peer() - Register peer into physical device
1908  * @pdev - data path device instance
1909  * @sta_desc - peer description
1910  *
1911  * Register peer into physical device
1912  *
1913  * Return: QDF_STATUS_SUCCESS registration success
1914  *         QDF_STATUS_E_FAULT peer not found
1915  */
1916 QDF_STATUS dp_register_peer(struct cdp_pdev *pdev_handle,
1917 		struct ol_txrx_desc_type *sta_desc)
1918 {
1919 	struct dp_peer *peer;
1920 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
1921 
1922 	peer = dp_peer_find_by_local_id((struct cdp_pdev *)pdev,
1923 			sta_desc->sta_id);
1924 	if (!peer)
1925 		return QDF_STATUS_E_FAULT;
1926 
1927 	qdf_spin_lock_bh(&peer->peer_info_lock);
1928 	peer->state = OL_TXRX_PEER_STATE_CONN;
1929 	qdf_spin_unlock_bh(&peer->peer_info_lock);
1930 
1931 	return QDF_STATUS_SUCCESS;
1932 }
1933 
1934 /**
1935  * dp_clear_peer() - remove peer from physical device
1936  * @pdev - data path device instance
1937  * @sta_id - local peer id
1938  *
1939  * remove peer from physical device
1940  *
1941  * Return: QDF_STATUS_SUCCESS registration success
1942  *         QDF_STATUS_E_FAULT peer not found
1943  */
1944 QDF_STATUS dp_clear_peer(struct cdp_pdev *pdev_handle, uint8_t local_id)
1945 {
1946 	struct dp_peer *peer;
1947 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
1948 
1949 	peer = dp_peer_find_by_local_id((struct cdp_pdev *)pdev, local_id);
1950 	if (!peer)
1951 		return QDF_STATUS_E_FAULT;
1952 
1953 	qdf_spin_lock_bh(&peer->peer_info_lock);
1954 	peer->state = OL_TXRX_PEER_STATE_DISC;
1955 	qdf_spin_unlock_bh(&peer->peer_info_lock);
1956 
1957 	return QDF_STATUS_SUCCESS;
1958 }
1959 
1960 /**
1961  * dp_find_peer_by_addr_and_vdev() - Find peer by peer mac address within vdev
1962  * @pdev - data path device instance
1963  * @vdev - virtual interface instance
1964  * @peer_addr - peer mac address
1965  * @peer_id - local peer id with target mac address
1966  *
1967  * Find peer by peer mac address within vdev
1968  *
1969  * Return: peer instance void pointer
1970  *         NULL cannot find target peer
1971  */
1972 void *dp_find_peer_by_addr_and_vdev(struct cdp_pdev *pdev_handle,
1973 		struct cdp_vdev *vdev_handle,
1974 		uint8_t *peer_addr, uint8_t *local_id)
1975 {
1976 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
1977 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
1978 	struct dp_peer *peer;
1979 
1980 	DP_TRACE(INFO, "vdev %pK peer_addr %pK", vdev, peer_addr);
1981 	peer = dp_peer_find_hash_find(pdev->soc, peer_addr, 0, 0);
1982 	DP_TRACE(INFO, "peer %pK vdev %pK", peer, vdev);
1983 
1984 	if (!peer)
1985 		return NULL;
1986 
1987 	if (peer->vdev != vdev) {
1988 		qdf_atomic_dec(&peer->ref_cnt);
1989 		return NULL;
1990 	}
1991 
1992 	*local_id = peer->local_id;
1993 	DP_TRACE(INFO, "peer %pK vdev %pK local id %d", peer, vdev, *local_id);
1994 
1995 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
1996 	 * Decrement it here.
1997 	 */
1998 	qdf_atomic_dec(&peer->ref_cnt);
1999 
2000 	return peer;
2001 }
2002 
2003 /**
2004  * dp_local_peer_id() - Find local peer id within peer instance
2005  * @peer - peer instance
2006  *
2007  * Find local peer id within peer instance
2008  *
2009  * Return: local peer id
2010  */
2011 uint16_t dp_local_peer_id(void *peer)
2012 {
2013 	return ((struct dp_peer *)peer)->local_id;
2014 }
2015 
2016 /**
2017  * dp_peer_find_by_local_id() - Find peer by local peer id
2018  * @pdev - data path device instance
2019  * @local_peer_id - local peer id want to find
2020  *
2021  * Find peer by local peer id within physical device
2022  *
2023  * Return: peer instance void pointer
2024  *         NULL cannot find target peer
2025  */
2026 void *dp_peer_find_by_local_id(struct cdp_pdev *pdev_handle, uint8_t local_id)
2027 {
2028 	struct dp_peer *peer;
2029 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
2030 
2031 	if (local_id >= OL_TXRX_NUM_LOCAL_PEER_IDS) {
2032 		DP_TRACE(DEBUG, "Incorrect local id %d", local_id);
2033 		QDF_BUG(0);
2034 		return NULL;
2035 	}
2036 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
2037 	peer = pdev->local_peer_ids.map[local_id];
2038 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
2039 	DP_TRACE(DEBUG, "peer %pK local id %d", peer, local_id);
2040 	return peer;
2041 }
2042 
2043 /**
2044  * dp_peer_state_update() - update peer local state
2045  * @pdev - data path device instance
2046  * @peer_addr - peer mac address
2047  * @state - new peer local state
2048  *
2049  * update peer local state
2050  *
2051  * Return: QDF_STATUS_SUCCESS registration success
2052  */
2053 QDF_STATUS dp_peer_state_update(struct cdp_pdev *pdev_handle, uint8_t *peer_mac,
2054 		enum ol_txrx_peer_state state)
2055 {
2056 	struct dp_peer *peer;
2057 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
2058 
2059 	peer =  dp_peer_find_hash_find(pdev->soc, peer_mac, 0, DP_VDEV_ALL);
2060 	if (NULL == peer) {
2061 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2062 		"Failed to find peer for: [%pM]", peer_mac);
2063 		return QDF_STATUS_E_FAILURE;
2064 	}
2065 	peer->state = state;
2066 
2067 	DP_TRACE(INFO, "peer %pK state %d", peer, peer->state);
2068 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
2069 	 * Decrement it here.
2070 	 */
2071 	qdf_atomic_dec(&peer->ref_cnt);
2072 
2073 	return QDF_STATUS_SUCCESS;
2074 }
2075 
2076 /**
2077  * dp_get_vdevid() - Get virtual interface id which peer registered
2078  * @peer - peer instance
2079  * @vdev_id - virtual interface id which peer registered
2080  *
2081  * Get virtual interface id which peer registered
2082  *
2083  * Return: QDF_STATUS_SUCCESS registration success
2084  */
2085 QDF_STATUS dp_get_vdevid(void *peer_handle, uint8_t *vdev_id)
2086 {
2087 	struct dp_peer *peer = peer_handle;
2088 
2089 	DP_TRACE(INFO, "peer %pK vdev %pK vdev id %d",
2090 			peer, peer->vdev, peer->vdev->vdev_id);
2091 	*vdev_id = peer->vdev->vdev_id;
2092 	return QDF_STATUS_SUCCESS;
2093 }
2094 
2095 struct cdp_vdev *dp_get_vdev_by_sta_id(struct cdp_pdev *pdev_handle,
2096 				       uint8_t sta_id)
2097 {
2098 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
2099 	struct dp_peer *peer = NULL;
2100 
2101 	if (sta_id >= WLAN_MAX_STA_COUNT) {
2102 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
2103 			  "Invalid sta id passed");
2104 		return NULL;
2105 	}
2106 
2107 	if (!pdev) {
2108 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
2109 			  "PDEV not found for sta_id [%d]", sta_id);
2110 		return NULL;
2111 	}
2112 
2113 	peer = dp_peer_find_by_local_id((struct cdp_pdev *)pdev, sta_id);
2114 	if (!peer) {
2115 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
2116 			  "PEER [%d] not found", sta_id);
2117 		return NULL;
2118 	}
2119 
2120 	return (struct cdp_vdev *)peer->vdev;
2121 }
2122 
2123 /**
2124  * dp_get_vdev_for_peer() - Get virtual interface instance which peer belongs
2125  * @peer - peer instance
2126  *
2127  * Get virtual interface instance which peer belongs
2128  *
2129  * Return: virtual interface instance pointer
2130  *         NULL in case cannot find
2131  */
2132 struct cdp_vdev *dp_get_vdev_for_peer(void *peer_handle)
2133 {
2134 	struct dp_peer *peer = peer_handle;
2135 
2136 	DP_TRACE(INFO, "peer %pK vdev %pK", peer, peer->vdev);
2137 	return (struct cdp_vdev *)peer->vdev;
2138 }
2139 
2140 /**
2141  * dp_peer_get_peer_mac_addr() - Get peer mac address
2142  * @peer - peer instance
2143  *
2144  * Get peer mac address
2145  *
2146  * Return: peer mac address pointer
2147  *         NULL in case cannot find
2148  */
2149 uint8_t *dp_peer_get_peer_mac_addr(void *peer_handle)
2150 {
2151 	struct dp_peer *peer = peer_handle;
2152 	uint8_t *mac;
2153 
2154 	mac = peer->mac_addr.raw;
2155 	DP_TRACE(INFO, "peer %pK mac 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
2156 		peer, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
2157 	return peer->mac_addr.raw;
2158 }
2159 
2160 /**
2161  * dp_get_peer_state() - Get local peer state
2162  * @peer - peer instance
2163  *
2164  * Get local peer state
2165  *
2166  * Return: peer status
2167  */
2168 int dp_get_peer_state(void *peer_handle)
2169 {
2170 	struct dp_peer *peer = peer_handle;
2171 
2172 	DP_TRACE(DEBUG, "peer %pK stats %d", peer, peer->state);
2173 	return peer->state;
2174 }
2175 
2176 /**
2177  * dp_get_last_assoc_received() - get time of last assoc received
2178  * @peer_handle: peer handle
2179  *
2180  * Return: pointer for the time of last assoc received
2181  */
2182 qdf_time_t *dp_get_last_assoc_received(void *peer_handle)
2183 {
2184 	struct dp_peer *peer = peer_handle;
2185 
2186 	DP_TRACE(INFO, "peer %pK last_assoc_rcvd: %lu", peer,
2187 		peer->last_assoc_rcvd);
2188 	return &peer->last_assoc_rcvd;
2189 }
2190 
2191 /**
2192  * dp_get_last_disassoc_received() - get time of last disassoc received
2193  * @peer_handle: peer handle
2194  *
2195  * Return: pointer for the time of last disassoc received
2196  */
2197 qdf_time_t *dp_get_last_disassoc_received(void *peer_handle)
2198 {
2199 	struct dp_peer *peer = peer_handle;
2200 
2201 	DP_TRACE(INFO, "peer %pK last_disassoc_rcvd: %lu", peer,
2202 		peer->last_disassoc_rcvd);
2203 	return &peer->last_disassoc_rcvd;
2204 }
2205 
2206 /**
2207  * dp_get_last_deauth_received() - get time of last deauth received
2208  * @peer_handle: peer handle
2209  *
2210  * Return: pointer for the time of last deauth received
2211  */
2212 qdf_time_t *dp_get_last_deauth_received(void *peer_handle)
2213 {
2214 	struct dp_peer *peer = peer_handle;
2215 
2216 	DP_TRACE(INFO, "peer %pK last_deauth_rcvd: %lu", peer,
2217 		peer->last_deauth_rcvd);
2218 	return &peer->last_deauth_rcvd;
2219 }
2220 
2221 /**
2222  * dp_local_peer_id_pool_init() - local peer id pool alloc for physical device
2223  * @pdev - data path device instance
2224  *
2225  * local peer id pool alloc for physical device
2226  *
2227  * Return: none
2228  */
2229 void dp_local_peer_id_pool_init(struct dp_pdev *pdev)
2230 {
2231 	int i;
2232 
2233 	/* point the freelist to the first ID */
2234 	pdev->local_peer_ids.freelist = 0;
2235 
2236 	/* link each ID to the next one */
2237 	for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) {
2238 		pdev->local_peer_ids.pool[i] = i + 1;
2239 		pdev->local_peer_ids.map[i] = NULL;
2240 	}
2241 
2242 	/* link the last ID to itself, to mark the end of the list */
2243 	i = OL_TXRX_NUM_LOCAL_PEER_IDS;
2244 	pdev->local_peer_ids.pool[i] = i;
2245 
2246 	qdf_spinlock_create(&pdev->local_peer_ids.lock);
2247 	DP_TRACE(INFO, "Peer pool init");
2248 }
2249 
2250 /**
2251  * dp_local_peer_id_alloc() - allocate local peer id
2252  * @pdev - data path device instance
2253  * @peer - new peer instance
2254  *
2255  * allocate local peer id
2256  *
2257  * Return: none
2258  */
2259 void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer)
2260 {
2261 	int i;
2262 
2263 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
2264 	i = pdev->local_peer_ids.freelist;
2265 	if (pdev->local_peer_ids.pool[i] == i) {
2266 		/* the list is empty, except for the list-end marker */
2267 		peer->local_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
2268 	} else {
2269 		/* take the head ID and advance the freelist */
2270 		peer->local_id = i;
2271 		pdev->local_peer_ids.freelist = pdev->local_peer_ids.pool[i];
2272 		pdev->local_peer_ids.map[i] = peer;
2273 	}
2274 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
2275 	DP_TRACE(INFO, "peer %pK, local id %d", peer, peer->local_id);
2276 }
2277 
2278 /**
2279  * dp_local_peer_id_free() - remove local peer id
2280  * @pdev - data path device instance
2281  * @peer - peer instance should be removed
2282  *
2283  * remove local peer id
2284  *
2285  * Return: none
2286  */
2287 void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer)
2288 {
2289 	int i = peer->local_id;
2290 	if ((i == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
2291 	    (i >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
2292 		return;
2293 	}
2294 
2295 	/* put this ID on the head of the freelist */
2296 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
2297 	pdev->local_peer_ids.pool[i] = pdev->local_peer_ids.freelist;
2298 	pdev->local_peer_ids.freelist = i;
2299 	pdev->local_peer_ids.map[i] = NULL;
2300 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
2301 }
2302 #endif
2303 
2304 /**
2305  * dp_get_peer_mac_addr_frm_id(): get mac address of the peer
2306  * @soc_handle: DP SOC handle
2307  * @peer_id:peer_id of the peer
2308  *
2309  * return: vdev_id of the vap
2310  */
2311 uint8_t dp_get_peer_mac_addr_frm_id(struct cdp_soc_t *soc_handle,
2312 		uint16_t peer_id, uint8_t *peer_mac)
2313 {
2314 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
2315 	struct dp_peer *peer;
2316 
2317 	peer = dp_peer_find_by_id(soc, peer_id);
2318 
2319 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
2320 			"soc %pK peer_id %d", soc, peer_id);
2321 
2322 	if (!peer) {
2323 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2324 				"peer not found ");
2325 		return CDP_INVALID_VDEV_ID;
2326 	}
2327 
2328 	qdf_mem_copy(peer_mac, peer->mac_addr.raw, 6);
2329 	return peer->vdev->vdev_id;
2330 }
2331 
2332 /**
2333  * dp_peer_rxtid_stats: Retried Rx TID (REO queue) stats from HW
2334  * @peer: DP peer handle
2335  * @dp_stats_cmd_cb: REO command callback function
2336  * @cb_ctxt: Callback context
2337  *
2338  * Return: none
2339  */
2340 void dp_peer_rxtid_stats(struct dp_peer *peer, void (*dp_stats_cmd_cb),
2341 			void *cb_ctxt)
2342 {
2343 	struct dp_soc *soc = peer->vdev->pdev->soc;
2344 	struct hal_reo_cmd_params params;
2345 	int i;
2346 
2347 	if (!dp_stats_cmd_cb)
2348 		return;
2349 
2350 	qdf_mem_zero(&params, sizeof(params));
2351 	for (i = 0; i < DP_MAX_TIDS; i++) {
2352 		struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
2353 		if (rx_tid->hw_qdesc_vaddr_unaligned != NULL) {
2354 			params.std.need_status = 1;
2355 			params.std.addr_lo =
2356 				rx_tid->hw_qdesc_paddr & 0xffffffff;
2357 			params.std.addr_hi =
2358 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2359 
2360 			if (cb_ctxt) {
2361 				dp_reo_send_cmd(soc, CMD_GET_QUEUE_STATS,
2362 					&params, dp_stats_cmd_cb, cb_ctxt);
2363 			} else {
2364 				dp_reo_send_cmd(soc, CMD_GET_QUEUE_STATS,
2365 					&params, dp_stats_cmd_cb, rx_tid);
2366 			}
2367 
2368 			/* Flush REO descriptor from HW cache to update stats
2369 			 * in descriptor memory. This is to help debugging */
2370 			qdf_mem_zero(&params, sizeof(params));
2371 			params.std.need_status = 0;
2372 			params.std.addr_lo =
2373 				rx_tid->hw_qdesc_paddr & 0xffffffff;
2374 			params.std.addr_hi =
2375 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2376 			params.u.fl_cache_params.flush_no_inval = 1;
2377 			dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, &params, NULL,
2378 				NULL);
2379 		}
2380 	}
2381 }
2382 
2383 void dp_set_michael_key(struct cdp_peer *peer_handle,
2384 			bool is_unicast, uint32_t *key)
2385 {
2386 	struct dp_peer *peer =  (struct dp_peer *)peer_handle;
2387 	uint8_t sec_index = is_unicast ? 1 : 0;
2388 
2389 	if (!peer) {
2390 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2391 			  "peer not found ");
2392 		return;
2393 	}
2394 
2395 	qdf_mem_copy(&peer->security[sec_index].michael_key[0],
2396 		     key, IEEE80211_WEP_MICLEN);
2397 }
2398