xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_peer.c (revision 1b9674e21e24478fba4530f5ae7396b9555e9c6a)
1 /*
2  * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <qdf_types.h>
20 #include <qdf_lock.h>
21 #include <hal_hw_headers.h>
22 #include "dp_htt.h"
23 #include "dp_types.h"
24 #include "dp_internal.h"
25 #include "dp_peer.h"
26 #include "dp_rx_defrag.h"
27 #include <hal_api.h>
28 #include <hal_reo.h>
29 #ifdef CONFIG_MCL
30 #include <cds_ieee80211_common.h>
31 #include <cds_api.h>
32 #endif
33 #include <cdp_txrx_handle.h>
34 #include <wlan_cfg.h>
35 
36 #ifdef DP_LFR
37 static inline void
38 dp_set_ssn_valid_flag(struct hal_reo_cmd_params *params,
39 					uint8_t valid)
40 {
41 	params->u.upd_queue_params.update_svld = 1;
42 	params->u.upd_queue_params.svld = valid;
43 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
44 		"%s: Setting SSN valid bit to %d",
45 				__func__, valid);
46 }
47 #else
48 static inline void
49 dp_set_ssn_valid_flag(struct hal_reo_cmd_params *params,
50 					uint8_t valid) {};
51 #endif
52 
53 static inline int dp_peer_find_mac_addr_cmp(
54 	union dp_align_mac_addr *mac_addr1,
55 	union dp_align_mac_addr *mac_addr2)
56 {
57 	return !((mac_addr1->align4.bytes_abcd == mac_addr2->align4.bytes_abcd)
58 		/*
59 		 * Intentionally use & rather than &&.
60 		 * because the operands are binary rather than generic boolean,
61 		 * the functionality is equivalent.
62 		 * Using && has the advantage of short-circuited evaluation,
63 		 * but using & has the advantage of no conditional branching,
64 		 * which is a more significant benefit.
65 		 */
66 		&
67 		(mac_addr1->align4.bytes_ef == mac_addr2->align4.bytes_ef));
68 }
69 
70 static int dp_peer_find_map_attach(struct dp_soc *soc)
71 {
72 	uint32_t max_peers, peer_map_size;
73 
74 	max_peers = soc->max_peers;
75 	/* allocate the peer ID -> peer object map */
76 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
77 		"\n<=== cfg max peer id %d ====>", max_peers);
78 	peer_map_size = max_peers * sizeof(soc->peer_id_to_obj_map[0]);
79 	soc->peer_id_to_obj_map = qdf_mem_malloc(peer_map_size);
80 	if (!soc->peer_id_to_obj_map) {
81 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
82 			"%s: peer map memory allocation failed", __func__);
83 		return QDF_STATUS_E_NOMEM;
84 	}
85 
86 	/*
87 	 * The peer_id_to_obj_map doesn't really need to be initialized,
88 	 * since elements are only used after they have been individually
89 	 * initialized.
90 	 * However, it is convenient for debugging to have all elements
91 	 * that are not in use set to 0.
92 	 */
93 	qdf_mem_zero(soc->peer_id_to_obj_map, peer_map_size);
94 	return 0; /* success */
95 }
96 
97 static int dp_log2_ceil(unsigned value)
98 {
99 	unsigned tmp = value;
100 	int log2 = -1;
101 
102 	while (tmp) {
103 		log2++;
104 		tmp >>= 1;
105 	}
106 	if (1 << log2 != value)
107 		log2++;
108 	return log2;
109 }
110 
111 static int dp_peer_find_add_id_to_obj(
112 	struct dp_peer *peer,
113 	uint16_t peer_id)
114 {
115 	int i;
116 
117 	for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
118 		if (peer->peer_ids[i] == HTT_INVALID_PEER) {
119 			peer->peer_ids[i] = peer_id;
120 			return 0; /* success */
121 		}
122 	}
123 	return QDF_STATUS_E_FAILURE; /* failure */
124 }
125 
126 #define DP_PEER_HASH_LOAD_MULT  2
127 #define DP_PEER_HASH_LOAD_SHIFT 0
128 
129 #define DP_AST_HASH_LOAD_MULT  2
130 #define DP_AST_HASH_LOAD_SHIFT 0
131 
132 static int dp_peer_find_hash_attach(struct dp_soc *soc)
133 {
134 	int i, hash_elems, log2;
135 
136 	/* allocate the peer MAC address -> peer object hash table */
137 	hash_elems = soc->max_peers;
138 	hash_elems *= DP_PEER_HASH_LOAD_MULT;
139 	hash_elems >>= DP_PEER_HASH_LOAD_SHIFT;
140 	log2 = dp_log2_ceil(hash_elems);
141 	hash_elems = 1 << log2;
142 
143 	soc->peer_hash.mask = hash_elems - 1;
144 	soc->peer_hash.idx_bits = log2;
145 	/* allocate an array of TAILQ peer object lists */
146 	soc->peer_hash.bins = qdf_mem_malloc(
147 		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q, dp_peer)));
148 	if (!soc->peer_hash.bins)
149 		return QDF_STATUS_E_NOMEM;
150 
151 	for (i = 0; i < hash_elems; i++)
152 		TAILQ_INIT(&soc->peer_hash.bins[i]);
153 
154 	return 0;
155 }
156 
157 static void dp_peer_find_hash_detach(struct dp_soc *soc)
158 {
159 	qdf_mem_free(soc->peer_hash.bins);
160 }
161 
162 static inline unsigned dp_peer_find_hash_index(struct dp_soc *soc,
163 	union dp_align_mac_addr *mac_addr)
164 {
165 	unsigned index;
166 
167 	index =
168 		mac_addr->align2.bytes_ab ^
169 		mac_addr->align2.bytes_cd ^
170 		mac_addr->align2.bytes_ef;
171 	index ^= index >> soc->peer_hash.idx_bits;
172 	index &= soc->peer_hash.mask;
173 	return index;
174 }
175 
176 
177 void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer)
178 {
179 	unsigned index;
180 
181 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
182 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
183 	/*
184 	 * It is important to add the new peer at the tail of the peer list
185 	 * with the bin index.  Together with having the hash_find function
186 	 * search from head to tail, this ensures that if two entries with
187 	 * the same MAC address are stored, the one added first will be
188 	 * found first.
189 	 */
190 	TAILQ_INSERT_TAIL(&soc->peer_hash.bins[index], peer, hash_list_elem);
191 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
192 }
193 
194 #ifdef FEATURE_AST
195 /*
196  * dp_peer_ast_hash_attach() - Allocate and initialize AST Hash Table
197  * @soc: SoC handle
198  *
199  * Return: None
200  */
201 static int dp_peer_ast_hash_attach(struct dp_soc *soc)
202 {
203 	int i, hash_elems, log2;
204 
205 	hash_elems = ((soc->max_peers * DP_AST_HASH_LOAD_MULT) >>
206 		DP_AST_HASH_LOAD_SHIFT);
207 
208 	log2 = dp_log2_ceil(hash_elems);
209 	hash_elems = 1 << log2;
210 
211 	soc->ast_hash.mask = hash_elems - 1;
212 	soc->ast_hash.idx_bits = log2;
213 
214 	/* allocate an array of TAILQ peer object lists */
215 	soc->ast_hash.bins = qdf_mem_malloc(
216 		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q,
217 				dp_ast_entry)));
218 
219 	if (!soc->ast_hash.bins)
220 		return QDF_STATUS_E_NOMEM;
221 
222 	for (i = 0; i < hash_elems; i++)
223 		TAILQ_INIT(&soc->ast_hash.bins[i]);
224 
225 	return 0;
226 }
227 
228 /*
229  * dp_peer_ast_hash_detach() - Free AST Hash table
230  * @soc: SoC handle
231  *
232  * Return: None
233  */
234 static void dp_peer_ast_hash_detach(struct dp_soc *soc)
235 {
236 	qdf_mem_free(soc->ast_hash.bins);
237 }
238 
239 /*
240  * dp_peer_ast_hash_index() - Compute the AST hash from MAC address
241  * @soc: SoC handle
242  *
243  * Return: AST hash
244  */
245 static inline uint32_t dp_peer_ast_hash_index(struct dp_soc *soc,
246 	union dp_align_mac_addr *mac_addr)
247 {
248 	uint32_t index;
249 
250 	index =
251 		mac_addr->align2.bytes_ab ^
252 		mac_addr->align2.bytes_cd ^
253 		mac_addr->align2.bytes_ef;
254 	index ^= index >> soc->ast_hash.idx_bits;
255 	index &= soc->ast_hash.mask;
256 	return index;
257 }
258 
259 /*
260  * dp_peer_ast_hash_add() - Add AST entry into hash table
261  * @soc: SoC handle
262  *
263  * This function adds the AST entry into SoC AST hash table
264  * It assumes caller has taken the ast lock to protect the access to this table
265  *
266  * Return: None
267  */
268 static inline void dp_peer_ast_hash_add(struct dp_soc *soc,
269 		struct dp_ast_entry *ase)
270 {
271 	uint32_t index;
272 
273 	index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
274 	TAILQ_INSERT_TAIL(&soc->ast_hash.bins[index], ase, hash_list_elem);
275 }
276 
277 /*
278  * dp_peer_ast_hash_remove() - Look up and remove AST entry from hash table
279  * @soc: SoC handle
280  *
281  * This function removes the AST entry from soc AST hash table
282  * It assumes caller has taken the ast lock to protect the access to this table
283  *
284  * Return: None
285  */
286 static inline void dp_peer_ast_hash_remove(struct dp_soc *soc,
287 		struct dp_ast_entry *ase)
288 {
289 	unsigned index;
290 	struct dp_ast_entry *tmpase;
291 	int found = 0;
292 
293 	index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
294 	/* Check if tail is not empty before delete*/
295 	QDF_ASSERT(!TAILQ_EMPTY(&soc->ast_hash.bins[index]));
296 
297 	TAILQ_FOREACH(tmpase, &soc->ast_hash.bins[index], hash_list_elem) {
298 		if (tmpase == ase) {
299 			found = 1;
300 			break;
301 		}
302 	}
303 
304 	QDF_ASSERT(found);
305 	TAILQ_REMOVE(&soc->ast_hash.bins[index], ase, hash_list_elem);
306 }
307 
308 /*
309  * dp_peer_ast_hash_find() - Find AST entry by MAC address
310  * @soc: SoC handle
311  *
312  * It assumes caller has taken the ast lock to protect the access to
313  * AST hash table
314  *
315  * Return: AST entry
316  */
317 struct dp_ast_entry *dp_peer_ast_hash_find(struct dp_soc *soc,
318 						uint8_t *ast_mac_addr)
319 {
320 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
321 	unsigned index;
322 	struct dp_ast_entry *ase;
323 
324 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
325 			ast_mac_addr, DP_MAC_ADDR_LEN);
326 	mac_addr = &local_mac_addr_aligned;
327 
328 	index = dp_peer_ast_hash_index(soc, mac_addr);
329 	TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
330 		if (dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr) == 0) {
331 			return ase;
332 		}
333 	}
334 
335 	return NULL;
336 }
337 
338 /*
339  * dp_peer_map_ast() - Map the ast entry with HW AST Index
340  * @soc: SoC handle
341  * @peer: peer to which ast node belongs
342  * @mac_addr: MAC address of ast node
343  * @hw_peer_id: HW AST Index returned by target in peer map event
344  * @vdev_id: vdev id for VAP to which the peer belongs to
345  *
346  * Return: None
347  */
348 static inline void dp_peer_map_ast(struct dp_soc *soc,
349 	struct dp_peer *peer, uint8_t *mac_addr, uint16_t hw_peer_id,
350 	uint8_t vdev_id)
351 {
352 	struct dp_ast_entry *ast_entry;
353 	enum cdp_txrx_ast_entry_type peer_type = CDP_TXRX_AST_TYPE_STATIC;
354 	bool ast_entry_found = FALSE;
355 
356 	if (!peer) {
357 		return;
358 	}
359 
360 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
361 		"%s: peer %pK ID %d vid %d mac %02x:%02x:%02x:%02x:%02x:%02x",
362 		__func__, peer, hw_peer_id, vdev_id, mac_addr[0],
363 		mac_addr[1], mac_addr[2], mac_addr[3],
364 		mac_addr[4], mac_addr[5]);
365 
366 	qdf_spin_lock_bh(&soc->ast_lock);
367 	TAILQ_FOREACH(ast_entry, &peer->ast_entry_list, ase_list_elem) {
368 		if (!(qdf_mem_cmp(mac_addr, ast_entry->mac_addr.raw,
369 				DP_MAC_ADDR_LEN))) {
370 			ast_entry->ast_idx = hw_peer_id;
371 			soc->ast_table[hw_peer_id] = ast_entry;
372 			ast_entry->is_active = TRUE;
373 			peer_type = ast_entry->type;
374 			ast_entry_found = TRUE;
375 		}
376 	}
377 
378 	if (ast_entry_found || (peer->vdev && peer->vdev->proxysta_vdev)) {
379 		if (soc->cdp_soc.ol_ops->peer_map_event) {
380 			soc->cdp_soc.ol_ops->peer_map_event(
381 			soc->ctrl_psoc, peer->peer_ids[0],
382 			hw_peer_id, vdev_id,
383 			mac_addr, peer_type);
384 		}
385 	} else {
386 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
387 			"AST entry not found");
388 	}
389 
390 	qdf_spin_unlock_bh(&soc->ast_lock);
391 	return;
392 }
393 
394 /*
395  * dp_peer_add_ast() - Allocate and add AST entry into peer list
396  * @soc: SoC handle
397  * @peer: peer to which ast node belongs
398  * @mac_addr: MAC address of ast node
399  * @is_self: Is this base AST entry with peer mac address
400  *
401  * This API is used by WDS source port learning function to
402  * add a new AST entry into peer AST list
403  *
404  * Return: 0 if new entry is allocated,
405  *        -1 if entry add failed
406  */
407 int dp_peer_add_ast(struct dp_soc *soc,
408 			struct dp_peer *peer,
409 			uint8_t *mac_addr,
410 			enum cdp_txrx_ast_entry_type type,
411 			uint32_t flags)
412 {
413 	struct dp_ast_entry *ast_entry;
414 	struct dp_vdev *vdev = peer->vdev;
415 	uint8_t next_node_mac[6];
416 	int  ret = -1;
417 
418 	if (!vdev) {
419 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
420 			FL("Peers vdev is NULL"));
421 		QDF_ASSERT(0);
422 		return ret;
423 	}
424 
425 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
426 		"%s: peer %pK mac %02x:%02x:%02x:%02x:%02x:%02x",
427 		__func__, peer, mac_addr[0], mac_addr[1], mac_addr[2],
428 		mac_addr[3], mac_addr[4], mac_addr[5]);
429 
430 	qdf_spin_lock_bh(&soc->ast_lock);
431 
432 	/* If AST entry already exists , just return from here */
433 	ast_entry = dp_peer_ast_hash_find(soc, mac_addr);
434 
435 	if (ast_entry) {
436 		if (ast_entry->type == CDP_TXRX_AST_TYPE_MEC)
437 			ast_entry->is_active = TRUE;
438 
439 		qdf_spin_unlock_bh(&soc->ast_lock);
440 		return 0;
441 	}
442 
443 	ast_entry = (struct dp_ast_entry *)
444 			qdf_mem_malloc(sizeof(struct dp_ast_entry));
445 
446 	if (!ast_entry) {
447 		qdf_spin_unlock_bh(&soc->ast_lock);
448 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
449 			FL("fail to allocate ast_entry"));
450 		QDF_ASSERT(0);
451 		return ret;
452 	}
453 
454 	qdf_mem_copy(&ast_entry->mac_addr.raw[0], mac_addr, DP_MAC_ADDR_LEN);
455 	ast_entry->peer = peer;
456 	ast_entry->pdev_id = vdev->pdev->pdev_id;
457 	ast_entry->vdev_id = vdev->vdev_id;
458 
459 	switch (type) {
460 	case CDP_TXRX_AST_TYPE_STATIC:
461 		peer->self_ast_entry = ast_entry;
462 		ast_entry->type = CDP_TXRX_AST_TYPE_STATIC;
463 		break;
464 	case CDP_TXRX_AST_TYPE_SELF:
465 		peer->self_ast_entry = ast_entry;
466 		ast_entry->type = CDP_TXRX_AST_TYPE_SELF;
467 		break;
468 	case CDP_TXRX_AST_TYPE_WDS:
469 		ast_entry->next_hop = 1;
470 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
471 		break;
472 	case CDP_TXRX_AST_TYPE_WDS_HM:
473 		ast_entry->next_hop = 1;
474 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM;
475 		break;
476 	case CDP_TXRX_AST_TYPE_MEC:
477 		ast_entry->next_hop = 1;
478 		ast_entry->type = CDP_TXRX_AST_TYPE_MEC;
479 		break;
480 	default:
481 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
482 			FL("Incorrect AST entry type"));
483 	}
484 
485 	ast_entry->is_active = TRUE;
486 	TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem);
487 	DP_STATS_INC(soc, ast.added, 1);
488 	dp_peer_ast_hash_add(soc, ast_entry);
489 	qdf_spin_unlock_bh(&soc->ast_lock);
490 
491 	if (ast_entry->type == CDP_TXRX_AST_TYPE_MEC)
492 		qdf_mem_copy(next_node_mac, peer->vdev->mac_addr.raw, 6);
493 	else
494 		qdf_mem_copy(next_node_mac, peer->mac_addr.raw, 6);
495 
496 	if ((ast_entry->type != CDP_TXRX_AST_TYPE_STATIC) &&
497 	    (ast_entry->type != CDP_TXRX_AST_TYPE_SELF)) {
498 		if (QDF_STATUS_SUCCESS ==
499 				soc->cdp_soc.ol_ops->peer_add_wds_entry(
500 				peer->vdev->osif_vdev,
501 				mac_addr,
502 				next_node_mac,
503 				flags))
504 			return 0;
505 	}
506 
507 	return ret;
508 }
509 
510 /*
511  * dp_peer_del_ast() - Delete and free AST entry
512  * @soc: SoC handle
513  * @ast_entry: AST entry of the node
514  *
515  * This function removes the AST entry from peer and soc tables
516  * It assumes caller has taken the ast lock to protect the access to these
517  * tables
518  *
519  * Return: None
520  */
521 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
522 {
523 	struct dp_peer *peer = ast_entry->peer;
524 
525 	if (ast_entry->next_hop)
526 		soc->cdp_soc.ol_ops->peer_del_wds_entry(peer->vdev->osif_vdev,
527 						ast_entry->mac_addr.raw);
528 
529 	soc->ast_table[ast_entry->ast_idx] = NULL;
530 	TAILQ_REMOVE(&peer->ast_entry_list, ast_entry, ase_list_elem);
531 	DP_STATS_INC(soc, ast.deleted, 1);
532 	dp_peer_ast_hash_remove(soc, ast_entry);
533 	qdf_mem_free(ast_entry);
534 }
535 
536 /*
537  * dp_peer_update_ast() - Delete and free AST entry
538  * @soc: SoC handle
539  * @peer: peer to which ast node belongs
540  * @ast_entry: AST entry of the node
541  * @flags: wds or hmwds
542  *
543  * This function update the AST entry to the roamed peer and soc tables
544  * It assumes caller has taken the ast lock to protect the access to these
545  * tables
546  *
547  * Return: 0 if ast entry is updated successfully
548  *         -1 failure
549  */
550 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
551 		       struct dp_ast_entry *ast_entry, uint32_t flags)
552 {
553 	int ret = -1;
554 	struct dp_peer *old_peer;
555 
556 	if ((ast_entry->type == CDP_TXRX_AST_TYPE_STATIC) ||
557 	    (ast_entry->type == CDP_TXRX_AST_TYPE_SELF))
558 		return 0;
559 
560 	old_peer = ast_entry->peer;
561 	TAILQ_REMOVE(&old_peer->ast_entry_list, ast_entry, ase_list_elem);
562 
563 	ast_entry->peer = peer;
564 	ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
565 	ast_entry->pdev_id = peer->vdev->pdev->pdev_id;
566 	ast_entry->vdev_id = peer->vdev->vdev_id;
567 	ast_entry->is_active = TRUE;
568 	TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem);
569 
570 	ret = soc->cdp_soc.ol_ops->peer_update_wds_entry(
571 				peer->vdev->osif_vdev,
572 				ast_entry->mac_addr.raw,
573 				peer->mac_addr.raw,
574 				flags);
575 
576 	return ret;
577 }
578 
579 /*
580  * dp_peer_ast_get_pdev_id() - get pdev_id from the ast entry
581  * @soc: SoC handle
582  * @ast_entry: AST entry of the node
583  *
584  * This function gets the pdev_id from the ast entry.
585  *
586  * Return: (uint8_t) pdev_id
587  */
588 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
589 				struct dp_ast_entry *ast_entry)
590 {
591 	return ast_entry->pdev_id;
592 }
593 
594 /*
595  * dp_peer_ast_get_next_hop() - get next_hop from the ast entry
596  * @soc: SoC handle
597  * @ast_entry: AST entry of the node
598  *
599  * This function gets the next hop from the ast entry.
600  *
601  * Return: (uint8_t) next_hop
602  */
603 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
604 				struct dp_ast_entry *ast_entry)
605 {
606 	return ast_entry->next_hop;
607 }
608 
609 /*
610  * dp_peer_ast_set_type() - set type from the ast entry
611  * @soc: SoC handle
612  * @ast_entry: AST entry of the node
613  *
614  * This function sets the type in the ast entry.
615  *
616  * Return:
617  */
618 void dp_peer_ast_set_type(struct dp_soc *soc,
619 				struct dp_ast_entry *ast_entry,
620 				enum cdp_txrx_ast_entry_type type)
621 {
622 	ast_entry->type = type;
623 }
624 
625 #else
626 int dp_peer_add_ast(struct dp_soc *soc, struct dp_peer *peer,
627 		uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type,
628 		uint32_t flags)
629 {
630 	return 1;
631 }
632 
633 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
634 {
635 }
636 
637 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
638 			struct dp_ast_entry *ast_entry, uint32_t flags)
639 {
640 	return 1;
641 }
642 
643 struct dp_ast_entry *dp_peer_ast_hash_find(struct dp_soc *soc,
644 						uint8_t *ast_mac_addr)
645 {
646 	return NULL;
647 }
648 
649 static int dp_peer_ast_hash_attach(struct dp_soc *soc)
650 {
651 	return 0;
652 }
653 
654 static inline void dp_peer_map_ast(struct dp_soc *soc,
655 	struct dp_peer *peer, uint8_t *mac_addr, uint16_t hw_peer_id,
656 	uint8_t vdev_id)
657 {
658 	return;
659 }
660 
661 static void dp_peer_ast_hash_detach(struct dp_soc *soc)
662 {
663 }
664 
665 void dp_peer_ast_set_type(struct dp_soc *soc,
666 				struct dp_ast_entry *ast_entry,
667 				enum cdp_txrx_ast_entry_type type)
668 {
669 }
670 
671 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
672 				struct dp_ast_entry *ast_entry)
673 {
674 	return 0xff;
675 }
676 
677 
678 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
679 				struct dp_ast_entry *ast_entry)
680 {
681 	return 0xff;
682 }
683 #endif
684 
685 struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc,
686 	uint8_t *peer_mac_addr, int mac_addr_is_aligned, uint8_t vdev_id)
687 {
688 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
689 	unsigned index;
690 	struct dp_peer *peer;
691 
692 	if (mac_addr_is_aligned) {
693 		mac_addr = (union dp_align_mac_addr *) peer_mac_addr;
694 	} else {
695 		qdf_mem_copy(
696 			&local_mac_addr_aligned.raw[0],
697 			peer_mac_addr, DP_MAC_ADDR_LEN);
698 		mac_addr = &local_mac_addr_aligned;
699 	}
700 	index = dp_peer_find_hash_index(soc, mac_addr);
701 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
702 	TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
703 #if ATH_SUPPORT_WRAP
704 		/* ProxySTA may have multiple BSS peer with same MAC address,
705 		 * modified find will take care of finding the correct BSS peer.
706 		 */
707 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
708 			((peer->vdev->vdev_id == vdev_id) ||
709 			 (vdev_id == DP_VDEV_ALL))) {
710 #else
711 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0) {
712 #endif
713 			/* found it - increment the ref count before releasing
714 			 * the lock
715 			 */
716 			qdf_atomic_inc(&peer->ref_cnt);
717 			qdf_spin_unlock_bh(&soc->peer_ref_mutex);
718 			return peer;
719 		}
720 	}
721 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
722 	return NULL; /* failure */
723 }
724 
725 void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer)
726 {
727 	unsigned index;
728 	struct dp_peer *tmppeer = NULL;
729 	int found = 0;
730 
731 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
732 	/* Check if tail is not empty before delete*/
733 	QDF_ASSERT(!TAILQ_EMPTY(&soc->peer_hash.bins[index]));
734 	/*
735 	 * DO NOT take the peer_ref_mutex lock here - it needs to be taken
736 	 * by the caller.
737 	 * The caller needs to hold the lock from the time the peer object's
738 	 * reference count is decremented and tested up through the time the
739 	 * reference to the peer object is removed from the hash table, by
740 	 * this function.
741 	 * Holding the lock only while removing the peer object reference
742 	 * from the hash table keeps the hash table consistent, but does not
743 	 * protect against a new HL tx context starting to use the peer object
744 	 * if it looks up the peer object from its MAC address just after the
745 	 * peer ref count is decremented to zero, but just before the peer
746 	 * object reference is removed from the hash table.
747 	 */
748 	 TAILQ_FOREACH(tmppeer, &soc->peer_hash.bins[index], hash_list_elem) {
749 		if (tmppeer == peer) {
750 			found = 1;
751 			break;
752 		}
753 	}
754 	QDF_ASSERT(found);
755 	TAILQ_REMOVE(&soc->peer_hash.bins[index], peer, hash_list_elem);
756 }
757 
758 void dp_peer_find_hash_erase(struct dp_soc *soc)
759 {
760 	int i;
761 
762 	/*
763 	 * Not really necessary to take peer_ref_mutex lock - by this point,
764 	 * it's known that the soc is no longer in use.
765 	 */
766 	for (i = 0; i <= soc->peer_hash.mask; i++) {
767 		if (!TAILQ_EMPTY(&soc->peer_hash.bins[i])) {
768 			struct dp_peer *peer, *peer_next;
769 
770 			/*
771 			 * TAILQ_FOREACH_SAFE must be used here to avoid any
772 			 * memory access violation after peer is freed
773 			 */
774 			TAILQ_FOREACH_SAFE(peer, &soc->peer_hash.bins[i],
775 				hash_list_elem, peer_next) {
776 				/*
777 				 * Don't remove the peer from the hash table -
778 				 * that would modify the list we are currently
779 				 * traversing, and it's not necessary anyway.
780 				 */
781 				/*
782 				 * Artificially adjust the peer's ref count to
783 				 * 1, so it will get deleted by
784 				 * dp_peer_unref_delete.
785 				 */
786 				/* set to zero */
787 				qdf_atomic_init(&peer->ref_cnt);
788 				/* incr to one */
789 				qdf_atomic_inc(&peer->ref_cnt);
790 				dp_peer_unref_delete(peer);
791 			}
792 		}
793 	}
794 }
795 
796 static void dp_peer_find_map_detach(struct dp_soc *soc)
797 {
798 	qdf_mem_free(soc->peer_id_to_obj_map);
799 }
800 
801 int dp_peer_find_attach(struct dp_soc *soc)
802 {
803 	if (dp_peer_find_map_attach(soc))
804 		return 1;
805 
806 	if (dp_peer_find_hash_attach(soc)) {
807 		dp_peer_find_map_detach(soc);
808 		return 1;
809 	}
810 
811 	if (dp_peer_ast_hash_attach(soc)) {
812 		dp_peer_find_hash_detach(soc);
813 		dp_peer_find_map_detach(soc);
814 		return 1;
815 	}
816 	return 0; /* success */
817 }
818 
819 void dp_rx_tid_stats_cb(struct dp_soc *soc, void *cb_ctxt,
820 	union hal_reo_status *reo_status)
821 {
822 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
823 	struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
824 
825 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
826 		DP_TRACE_STATS(FATAL, "REO stats failure %d for TID %d\n",
827 			queue_status->header.status, rx_tid->tid);
828 		return;
829 	}
830 
831 	DP_TRACE_STATS(FATAL, "REO queue stats (TID: %d): \n"
832 		"ssn: %d\n"
833 		"curr_idx  : %d\n"
834 		"pn_31_0   : %08x\n"
835 		"pn_63_32  : %08x\n"
836 		"pn_95_64  : %08x\n"
837 		"pn_127_96 : %08x\n"
838 		"last_rx_enq_tstamp : %08x\n"
839 		"last_rx_deq_tstamp : %08x\n"
840 		"rx_bitmap_31_0     : %08x\n"
841 		"rx_bitmap_63_32    : %08x\n"
842 		"rx_bitmap_95_64    : %08x\n"
843 		"rx_bitmap_127_96   : %08x\n"
844 		"rx_bitmap_159_128  : %08x\n"
845 		"rx_bitmap_191_160  : %08x\n"
846 		"rx_bitmap_223_192  : %08x\n"
847 		"rx_bitmap_255_224  : %08x\n",
848 		rx_tid->tid,
849 		queue_status->ssn, queue_status->curr_idx,
850 		queue_status->pn_31_0, queue_status->pn_63_32,
851 		queue_status->pn_95_64, queue_status->pn_127_96,
852 		queue_status->last_rx_enq_tstamp,
853 		queue_status->last_rx_deq_tstamp,
854 		queue_status->rx_bitmap_31_0, queue_status->rx_bitmap_63_32,
855 		queue_status->rx_bitmap_95_64, queue_status->rx_bitmap_127_96,
856 		queue_status->rx_bitmap_159_128,
857 		queue_status->rx_bitmap_191_160,
858 		queue_status->rx_bitmap_223_192,
859 		queue_status->rx_bitmap_255_224);
860 
861 	DP_TRACE_STATS(FATAL,
862 		"curr_mpdu_cnt      : %d\n"
863 		"curr_msdu_cnt      : %d\n"
864 		"fwd_timeout_cnt    : %d\n"
865 		"fwd_bar_cnt        : %d\n"
866 		"dup_cnt            : %d\n"
867 		"frms_in_order_cnt  : %d\n"
868 		"bar_rcvd_cnt       : %d\n"
869 		"mpdu_frms_cnt      : %d\n"
870 		"msdu_frms_cnt      : %d\n"
871 		"total_byte_cnt     : %d\n"
872 		"late_recv_mpdu_cnt : %d\n"
873 		"win_jump_2k 	    : %d\n"
874 		"hole_cnt 	    : %d\n",
875 		queue_status->curr_mpdu_cnt, queue_status->curr_msdu_cnt,
876 		queue_status->fwd_timeout_cnt, queue_status->fwd_bar_cnt,
877 		queue_status->dup_cnt, queue_status->frms_in_order_cnt,
878 		queue_status->bar_rcvd_cnt, queue_status->mpdu_frms_cnt,
879 		queue_status->msdu_frms_cnt, queue_status->total_cnt,
880 		queue_status->late_recv_mpdu_cnt, queue_status->win_jump_2k,
881 		queue_status->hole_cnt);
882 
883 	DP_PRINT_STATS("Num of Addba Req = %d\n", rx_tid->num_of_addba_req);
884 	DP_PRINT_STATS("Num of Addba Resp = %d\n", rx_tid->num_of_addba_resp);
885 	DP_PRINT_STATS("Num of Addba Resp successful = %d\n",
886 		       rx_tid->num_addba_rsp_success);
887 	DP_PRINT_STATS("Num of Addba Resp failed = %d\n",
888 		       rx_tid->num_addba_rsp_failed);
889 	DP_PRINT_STATS("Num of Delba Req = %d\n", rx_tid->num_of_delba_req);
890 	DP_PRINT_STATS("BA window size   = %d\n", rx_tid->ba_win_size);
891 	DP_PRINT_STATS("Pn size = %d\n", rx_tid->pn_size);
892 }
893 
894 static inline struct dp_peer *dp_peer_find_add_id(struct dp_soc *soc,
895 	uint8_t *peer_mac_addr, uint16_t peer_id, uint16_t hw_peer_id,
896 	uint8_t vdev_id)
897 {
898 	struct dp_peer *peer;
899 
900 	QDF_ASSERT(peer_id <= soc->max_peers);
901 	/* check if there's already a peer object with this MAC address */
902 	peer = dp_peer_find_hash_find(soc, peer_mac_addr,
903 		0 /* is aligned */, vdev_id);
904 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
905 		"%s: peer %pK ID %d vid %d mac %02x:%02x:%02x:%02x:%02x:%02x",
906 		__func__, peer, peer_id, vdev_id, peer_mac_addr[0],
907 		peer_mac_addr[1], peer_mac_addr[2], peer_mac_addr[3],
908 		peer_mac_addr[4], peer_mac_addr[5]);
909 
910 	if (peer) {
911 		/* peer's ref count was already incremented by
912 		 * peer_find_hash_find
913 		 */
914 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
915 			  "%s: ref_cnt: %d", __func__,
916 			   qdf_atomic_read(&peer->ref_cnt));
917 		soc->peer_id_to_obj_map[peer_id] = peer;
918 
919 		if (dp_peer_find_add_id_to_obj(peer, peer_id)) {
920 			/* TBDXXX: assert for now */
921 			QDF_ASSERT(0);
922 		}
923 
924 		return peer;
925 	}
926 
927 	return NULL;
928 }
929 
930 /**
931  * dp_rx_peer_map_handler() - handle peer map event from firmware
932  * @soc_handle - genereic soc handle
933  * @peeri_id - peer_id from firmware
934  * @hw_peer_id - ast index for this peer
935  * vdev_id - vdev ID
936  * peer_mac_addr - macc assress of the peer
937  *
938  * associate the peer_id that firmware provided with peer entry
939  * and update the ast table in the host with the hw_peer_id.
940  *
941  * Return: none
942  */
943 
944 void
945 dp_rx_peer_map_handler(void *soc_handle, uint16_t peer_id, uint16_t hw_peer_id,
946 			uint8_t vdev_id, uint8_t *peer_mac_addr)
947 {
948 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
949 	struct dp_peer *peer = NULL;
950 
951 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
952 		"peer_map_event (soc:%pK): peer_id %di, hw_peer_id %d, peer_mac "
953 		"%02x:%02x:%02x:%02x:%02x:%02x, vdev_id %d", soc, peer_id,
954 		hw_peer_id, peer_mac_addr[0], peer_mac_addr[1],
955 		peer_mac_addr[2], peer_mac_addr[3], peer_mac_addr[4],
956 		peer_mac_addr[5], vdev_id);
957 
958 	peer = soc->peer_id_to_obj_map[peer_id];
959 
960 	if ((hw_peer_id < 0) || (hw_peer_id > (WLAN_UMAC_PSOC_MAX_PEERS * 2))) {
961 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
962 			"invalid hw_peer_id: %d", hw_peer_id);
963 		qdf_assert_always(0);
964 	}
965 
966 	/*
967 	 * check if peer already exists for this peer_id, if so
968 	 * this peer map event is in response for a wds peer add
969 	 * wmi command sent during wds source port learning.
970 	 * in this case just add the ast entry to the existing
971 	 * peer ast_list.
972 	 */
973 	if (!peer)
974 		peer = dp_peer_find_add_id(soc, peer_mac_addr, peer_id,
975 					hw_peer_id, vdev_id);
976 
977 	if (peer) {
978 		qdf_assert_always(peer->vdev);
979 		/*
980 		 * For every peer MAp message search and set if bss_peer
981 		 */
982 		if (!(qdf_mem_cmp(peer->mac_addr.raw, peer->vdev->mac_addr.raw,
983 				 DP_MAC_ADDR_LEN))) {
984 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
985 				"vdev bss_peer!!!!");
986 			peer->bss_peer = 1;
987 			peer->vdev->vap_bss_peer = peer;
988 		}
989 	}
990 
991 	dp_peer_map_ast(soc, peer, peer_mac_addr,
992 			hw_peer_id, vdev_id);
993 }
994 
995 void
996 dp_rx_peer_unmap_handler(void *soc_handle, uint16_t peer_id)
997 {
998 	struct dp_peer *peer;
999 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
1000 	uint8_t i;
1001 
1002 	peer = __dp_peer_find_by_id(soc, peer_id);
1003 
1004 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
1005 		"peer_unmap_event (soc:%pK) peer_id %d peer %pK",
1006 		soc, peer_id, peer);
1007 
1008 	/*
1009 	 * Currently peer IDs are assigned for vdevs as well as peers.
1010 	 * If the peer ID is for a vdev, then the peer pointer stored
1011 	 * in peer_id_to_obj_map will be NULL.
1012 	 */
1013 	if (!peer) {
1014 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1015 			"%s: Received unmap event for invalid peer_id"
1016 			" %u", __func__, peer_id);
1017 		return;
1018 	}
1019 
1020 	soc->peer_id_to_obj_map[peer_id] = NULL;
1021 	for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
1022 		if (peer->peer_ids[i] == peer_id) {
1023 			peer->peer_ids[i] = HTT_INVALID_PEER;
1024 			break;
1025 		}
1026 	}
1027 
1028 	if (soc->cdp_soc.ol_ops->peer_unmap_event) {
1029 		soc->cdp_soc.ol_ops->peer_unmap_event(soc->ctrl_psoc,
1030 				peer_id);
1031 	}
1032 
1033 	/*
1034 	 * Remove a reference to the peer.
1035 	 * If there are no more references, delete the peer object.
1036 	 */
1037 	dp_peer_unref_delete(peer);
1038 }
1039 
1040 void
1041 dp_peer_find_detach(struct dp_soc *soc)
1042 {
1043 	dp_peer_find_map_detach(soc);
1044 	dp_peer_find_hash_detach(soc);
1045 	dp_peer_ast_hash_detach(soc);
1046 }
1047 
1048 static void dp_rx_tid_update_cb(struct dp_soc *soc, void *cb_ctxt,
1049 	union hal_reo_status *reo_status)
1050 {
1051 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
1052 
1053 	if ((reo_status->rx_queue_status.header.status !=
1054 		HAL_REO_CMD_SUCCESS) &&
1055 		(reo_status->rx_queue_status.header.status !=
1056 		HAL_REO_CMD_DRAIN)) {
1057 		/* Should not happen normally. Just print error for now */
1058 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1059 			"%s: Rx tid HW desc update failed(%d): tid %d",
1060 			__func__,
1061 			reo_status->rx_queue_status.header.status,
1062 			rx_tid->tid);
1063 	}
1064 }
1065 
1066 /*
1067  * dp_find_peer_by_addr - find peer instance by mac address
1068  * @dev: physical device instance
1069  * @peer_mac_addr: peer mac address
1070  * @local_id: local id for the peer
1071  *
1072  * Return: peer instance pointer
1073  */
1074 void *dp_find_peer_by_addr(struct cdp_pdev *dev, uint8_t *peer_mac_addr,
1075 		uint8_t *local_id)
1076 {
1077 	struct dp_pdev *pdev = (struct dp_pdev *)dev;
1078 	struct dp_peer *peer;
1079 
1080 	peer = dp_peer_find_hash_find(pdev->soc, peer_mac_addr, 0, DP_VDEV_ALL);
1081 
1082 	if (!peer)
1083 		return NULL;
1084 
1085 	/* Multiple peer ids? How can know peer id? */
1086 	*local_id = peer->local_id;
1087 	DP_TRACE(INFO, "%s: peer %pK id %d", __func__, peer, *local_id);
1088 
1089 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
1090 	 * Decrement it here.
1091 	 */
1092 	qdf_atomic_dec(&peer->ref_cnt);
1093 
1094 	return peer;
1095 }
1096 
1097 /*
1098  * dp_rx_tid_update_wifi3() – Update receive TID state
1099  * @peer: Datapath peer handle
1100  * @tid: TID
1101  * @ba_window_size: BlockAck window size
1102  * @start_seq: Starting sequence number
1103  *
1104  * Return: 0 on success, error code on failure
1105  */
1106 static int dp_rx_tid_update_wifi3(struct dp_peer *peer, int tid, uint32_t
1107 				  ba_window_size, uint32_t start_seq)
1108 {
1109 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1110 	struct dp_soc *soc = peer->vdev->pdev->soc;
1111 	struct hal_reo_cmd_params params;
1112 
1113 	qdf_mem_zero(&params, sizeof(params));
1114 
1115 	params.std.need_status = 1;
1116 	params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
1117 	params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1118 	params.u.upd_queue_params.update_ba_window_size = 1;
1119 	params.u.upd_queue_params.ba_window_size = ba_window_size;
1120 
1121 	if (start_seq < IEEE80211_SEQ_MAX) {
1122 		params.u.upd_queue_params.update_ssn = 1;
1123 		params.u.upd_queue_params.ssn = start_seq;
1124 	}
1125 
1126 	dp_set_ssn_valid_flag(&params, 0);
1127 
1128 	dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params, dp_rx_tid_update_cb, rx_tid);
1129 
1130 	rx_tid->ba_win_size = ba_window_size;
1131 	if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
1132 		soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup(
1133 			peer->vdev->pdev->ctrl_pdev,
1134 			peer->vdev->vdev_id, peer->mac_addr.raw,
1135 			rx_tid->hw_qdesc_paddr, tid, tid, 1, ba_window_size);
1136 
1137 	}
1138 	return 0;
1139 }
1140 
1141 /*
1142  * dp_reo_desc_free() - Callback free reo descriptor memory after
1143  * HW cache flush
1144  *
1145  * @soc: DP SOC handle
1146  * @cb_ctxt: Callback context
1147  * @reo_status: REO command status
1148  */
1149 static void dp_reo_desc_free(struct dp_soc *soc, void *cb_ctxt,
1150 	union hal_reo_status *reo_status)
1151 {
1152 	struct reo_desc_list_node *freedesc =
1153 		(struct reo_desc_list_node *)cb_ctxt;
1154 	struct dp_rx_tid *rx_tid = &freedesc->rx_tid;
1155 
1156 	if ((reo_status->fl_cache_status.header.status !=
1157 		HAL_REO_CMD_SUCCESS) &&
1158 		(reo_status->fl_cache_status.header.status !=
1159 		HAL_REO_CMD_DRAIN)) {
1160 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1161 			"%s: Rx tid HW desc flush failed(%d): tid %d",
1162 			__func__,
1163 			reo_status->rx_queue_status.header.status,
1164 			freedesc->rx_tid.tid);
1165 	}
1166 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1167 		"%s: hw_qdesc_paddr: %pK, tid:%d", __func__,
1168 		(void *)(rx_tid->hw_qdesc_paddr), rx_tid->tid);
1169 	qdf_mem_unmap_nbytes_single(soc->osdev,
1170 		rx_tid->hw_qdesc_paddr,
1171 		QDF_DMA_BIDIRECTIONAL,
1172 		rx_tid->hw_qdesc_alloc_size);
1173 	qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
1174 	qdf_mem_free(freedesc);
1175 }
1176 
1177 #if defined(QCA_WIFI_QCA8074) && defined(BUILD_X86)
1178 /* Hawkeye emulation requires bus address to be >= 0x50000000 */
1179 static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
1180 {
1181 	if (dma_addr < 0x50000000)
1182 		return QDF_STATUS_E_FAILURE;
1183 	else
1184 		return QDF_STATUS_SUCCESS;
1185 }
1186 #else
1187 static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
1188 {
1189 	return QDF_STATUS_SUCCESS;
1190 }
1191 #endif
1192 
1193 
1194 /*
1195  * dp_rx_tid_setup_wifi3() – Setup receive TID state
1196  * @peer: Datapath peer handle
1197  * @tid: TID
1198  * @ba_window_size: BlockAck window size
1199  * @start_seq: Starting sequence number
1200  *
1201  * Return: 0 on success, error code on failure
1202  */
1203 int dp_rx_tid_setup_wifi3(struct dp_peer *peer, int tid,
1204 	uint32_t ba_window_size, uint32_t start_seq)
1205 {
1206 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1207 	struct dp_vdev *vdev = peer->vdev;
1208 	struct dp_soc *soc = vdev->pdev->soc;
1209 	uint32_t hw_qdesc_size;
1210 	uint32_t hw_qdesc_align;
1211 	int hal_pn_type;
1212 	void *hw_qdesc_vaddr;
1213 	uint32_t alloc_tries = 0;
1214 
1215 	if (peer->delete_in_progress)
1216 		return QDF_STATUS_E_FAILURE;
1217 
1218 	rx_tid->ba_win_size = ba_window_size;
1219 	if (rx_tid->hw_qdesc_vaddr_unaligned != NULL)
1220 		return dp_rx_tid_update_wifi3(peer, tid, ba_window_size,
1221 			start_seq);
1222 	rx_tid->num_of_addba_req = 0;
1223 	rx_tid->num_of_delba_req = 0;
1224 	rx_tid->num_of_addba_resp = 0;
1225 	rx_tid->num_addba_rsp_failed = 0;
1226 	rx_tid->num_addba_rsp_success = 0;
1227 #ifdef notyet
1228 	hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc, ba_window_size);
1229 #else
1230 	/* TODO: Allocating HW queue descriptors based on max BA window size
1231 	 * for all QOS TIDs so that same descriptor can be used later when
1232 	 * ADDBA request is recevied. This should be changed to allocate HW
1233 	 * queue descriptors based on BA window size being negotiated (0 for
1234 	 * non BA cases), and reallocate when BA window size changes and also
1235 	 * send WMI message to FW to change the REO queue descriptor in Rx
1236 	 * peer entry as part of dp_rx_tid_update.
1237 	 */
1238 	if (tid != DP_NON_QOS_TID)
1239 		hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc,
1240 			HAL_RX_MAX_BA_WINDOW);
1241 	else
1242 		hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc,
1243 			ba_window_size);
1244 #endif
1245 
1246 	hw_qdesc_align = hal_get_reo_qdesc_align(soc->hal_soc);
1247 	/* To avoid unnecessary extra allocation for alignment, try allocating
1248 	 * exact size and see if we already have aligned address.
1249 	 */
1250 	rx_tid->hw_qdesc_alloc_size = hw_qdesc_size;
1251 
1252 try_desc_alloc:
1253 	rx_tid->hw_qdesc_vaddr_unaligned =
1254 		qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size);
1255 
1256 	if (!rx_tid->hw_qdesc_vaddr_unaligned) {
1257 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1258 			"%s: Rx tid HW desc alloc failed: tid %d",
1259 			__func__, tid);
1260 		return QDF_STATUS_E_NOMEM;
1261 	}
1262 
1263 	if ((unsigned long)(rx_tid->hw_qdesc_vaddr_unaligned) %
1264 		hw_qdesc_align) {
1265 		/* Address allocated above is not alinged. Allocate extra
1266 		 * memory for alignment
1267 		 */
1268 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
1269 		rx_tid->hw_qdesc_vaddr_unaligned =
1270 			qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size +
1271 					hw_qdesc_align - 1);
1272 
1273 		if (!rx_tid->hw_qdesc_vaddr_unaligned) {
1274 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1275 				"%s: Rx tid HW desc alloc failed: tid %d",
1276 				__func__, tid);
1277 			return QDF_STATUS_E_NOMEM;
1278 		}
1279 
1280 		hw_qdesc_vaddr = (void *)qdf_align((unsigned long)
1281 			rx_tid->hw_qdesc_vaddr_unaligned,
1282 			hw_qdesc_align);
1283 
1284 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1285 			"%s: Total Size %d Aligned Addr %pK",
1286 			__func__, rx_tid->hw_qdesc_alloc_size,
1287 			hw_qdesc_vaddr);
1288 
1289 	} else {
1290 		hw_qdesc_vaddr = rx_tid->hw_qdesc_vaddr_unaligned;
1291 	}
1292 
1293 	/* TODO: Ensure that sec_type is set before ADDBA is received.
1294 	 * Currently this is set based on htt indication
1295 	 * HTT_T2H_MSG_TYPE_SEC_IND from target
1296 	 */
1297 	switch (peer->security[dp_sec_ucast].sec_type) {
1298 	case cdp_sec_type_tkip_nomic:
1299 	case cdp_sec_type_aes_ccmp:
1300 	case cdp_sec_type_aes_ccmp_256:
1301 	case cdp_sec_type_aes_gcmp:
1302 	case cdp_sec_type_aes_gcmp_256:
1303 		hal_pn_type = HAL_PN_WPA;
1304 		break;
1305 	case cdp_sec_type_wapi:
1306 		if (vdev->opmode == wlan_op_mode_ap)
1307 			hal_pn_type = HAL_PN_WAPI_EVEN;
1308 		else
1309 			hal_pn_type = HAL_PN_WAPI_UNEVEN;
1310 		break;
1311 	default:
1312 		hal_pn_type = HAL_PN_NONE;
1313 		break;
1314 	}
1315 
1316 	hal_reo_qdesc_setup(soc->hal_soc, tid, ba_window_size, start_seq,
1317 		hw_qdesc_vaddr, rx_tid->hw_qdesc_paddr, hal_pn_type);
1318 
1319 	qdf_mem_map_nbytes_single(soc->osdev, hw_qdesc_vaddr,
1320 		QDF_DMA_BIDIRECTIONAL, rx_tid->hw_qdesc_alloc_size,
1321 		&(rx_tid->hw_qdesc_paddr));
1322 
1323 	if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) !=
1324 			QDF_STATUS_SUCCESS) {
1325 		if (alloc_tries++ < 10)
1326 			goto try_desc_alloc;
1327 		else {
1328 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1329 			"%s: Rx tid HW desc alloc failed (lowmem): tid %d",
1330 			__func__, tid);
1331 			return QDF_STATUS_E_NOMEM;
1332 		}
1333 	}
1334 
1335 	if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
1336 		soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup(
1337 			vdev->pdev->ctrl_pdev,
1338 			peer->vdev->vdev_id, peer->mac_addr.raw,
1339 			rx_tid->hw_qdesc_paddr, tid, tid, 1, ba_window_size);
1340 
1341 	}
1342 	return 0;
1343 }
1344 
1345 /*
1346  * dp_rx_tid_delete_cb() - Callback to flush reo descriptor HW cache
1347  * after deleting the entries (ie., setting valid=0)
1348  *
1349  * @soc: DP SOC handle
1350  * @cb_ctxt: Callback context
1351  * @reo_status: REO command status
1352  */
1353 static void dp_rx_tid_delete_cb(struct dp_soc *soc, void *cb_ctxt,
1354 	union hal_reo_status *reo_status)
1355 {
1356 	struct reo_desc_list_node *freedesc =
1357 		(struct reo_desc_list_node *)cb_ctxt;
1358 	uint32_t list_size;
1359 	struct reo_desc_list_node *desc;
1360 	unsigned long curr_ts = qdf_get_system_timestamp();
1361 	uint32_t desc_size, tot_desc_size;
1362 	struct hal_reo_cmd_params params;
1363 
1364 	if (reo_status->rx_queue_status.header.status == HAL_REO_CMD_DRAIN) {
1365 		qdf_mem_zero(reo_status, sizeof(*reo_status));
1366 		reo_status->fl_cache_status.header.status = HAL_REO_CMD_DRAIN;
1367 		dp_reo_desc_free(soc, (void *)freedesc, reo_status);
1368 		return;
1369 	} else if (reo_status->rx_queue_status.header.status !=
1370 		HAL_REO_CMD_SUCCESS) {
1371 		/* Should not happen normally. Just print error for now */
1372 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1373 			"%s: Rx tid HW desc deletion failed(%d): tid %d",
1374 			__func__,
1375 			reo_status->rx_queue_status.header.status,
1376 			freedesc->rx_tid.tid);
1377 	}
1378 
1379 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
1380 		"%s: rx_tid: %d status: %d", __func__,
1381 		freedesc->rx_tid.tid,
1382 		reo_status->rx_queue_status.header.status);
1383 
1384 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
1385 	freedesc->free_ts = curr_ts;
1386 	qdf_list_insert_back_size(&soc->reo_desc_freelist,
1387 		(qdf_list_node_t *)freedesc, &list_size);
1388 
1389 	while ((qdf_list_peek_front(&soc->reo_desc_freelist,
1390 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) &&
1391 		((list_size >= REO_DESC_FREELIST_SIZE) ||
1392 		((curr_ts - desc->free_ts) > REO_DESC_FREE_DEFER_MS))) {
1393 		struct dp_rx_tid *rx_tid;
1394 
1395 		qdf_list_remove_front(&soc->reo_desc_freelist,
1396 				(qdf_list_node_t **)&desc);
1397 		list_size--;
1398 		rx_tid = &desc->rx_tid;
1399 
1400 		/* Flush and invalidate REO descriptor from HW cache: Base and
1401 		 * extension descriptors should be flushed separately */
1402 		tot_desc_size = hal_get_reo_qdesc_size(soc->hal_soc,
1403 			rx_tid->ba_win_size);
1404 		desc_size = hal_get_reo_qdesc_size(soc->hal_soc, 0);
1405 
1406 		/* Flush reo extension descriptors */
1407 		while ((tot_desc_size -= desc_size) > 0) {
1408 			qdf_mem_zero(&params, sizeof(params));
1409 			params.std.addr_lo =
1410 				((uint64_t)(rx_tid->hw_qdesc_paddr) +
1411 				tot_desc_size) & 0xffffffff;
1412 			params.std.addr_hi =
1413 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1414 
1415 			if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc,
1416 							CMD_FLUSH_CACHE,
1417 							&params,
1418 							NULL,
1419 							NULL)) {
1420 				QDF_TRACE(QDF_MODULE_ID_DP,
1421 					QDF_TRACE_LEVEL_ERROR,
1422 					"%s: fail to send CMD_CACHE_FLUSH:"
1423 					"tid %d desc %pK", __func__,
1424 					rx_tid->tid,
1425 					(void *)(rx_tid->hw_qdesc_paddr));
1426 			}
1427 		}
1428 
1429 		/* Flush base descriptor */
1430 		qdf_mem_zero(&params, sizeof(params));
1431 		params.std.need_status = 1;
1432 		params.std.addr_lo =
1433 			(uint64_t)(rx_tid->hw_qdesc_paddr) & 0xffffffff;
1434 		params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1435 
1436 		if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc,
1437 							  CMD_FLUSH_CACHE,
1438 							  &params,
1439 							  dp_reo_desc_free,
1440 							  (void *)desc)) {
1441 			union hal_reo_status reo_status;
1442 			/*
1443 			 * If dp_reo_send_cmd return failure, related TID queue desc
1444 			 * should be unmapped. Also locally reo_desc, together with
1445 			 * TID queue desc also need to be freed accordingly.
1446 			 *
1447 			 * Here invoke desc_free function directly to do clean up.
1448 			 */
1449 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1450 				"%s: fail to send REO cmd to flush cache: tid %d",
1451 				__func__, rx_tid->tid);
1452 			qdf_mem_zero(&reo_status, sizeof(reo_status));
1453 			reo_status.fl_cache_status.header.status = 0;
1454 			dp_reo_desc_free(soc, (void *)desc, &reo_status);
1455 		}
1456 	}
1457 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
1458 }
1459 
1460 /*
1461  * dp_rx_tid_delete_wifi3() – Delete receive TID queue
1462  * @peer: Datapath peer handle
1463  * @tid: TID
1464  *
1465  * Return: 0 on success, error code on failure
1466  */
1467 static int dp_rx_tid_delete_wifi3(struct dp_peer *peer, int tid)
1468 {
1469 	struct dp_rx_tid *rx_tid = &(peer->rx_tid[tid]);
1470 	struct dp_soc *soc = peer->vdev->pdev->soc;
1471 	struct hal_reo_cmd_params params;
1472 	struct reo_desc_list_node *freedesc =
1473 		qdf_mem_malloc(sizeof(*freedesc));
1474 
1475 	if (!freedesc) {
1476 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1477 			"%s: malloc failed for freedesc: tid %d",
1478 			__func__, tid);
1479 		return -ENOMEM;
1480 	}
1481 
1482 	freedesc->rx_tid = *rx_tid;
1483 
1484 	qdf_mem_zero(&params, sizeof(params));
1485 
1486 	params.std.need_status = 0;
1487 	params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
1488 	params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1489 	params.u.upd_queue_params.update_vld = 1;
1490 	params.u.upd_queue_params.vld = 0;
1491 
1492 	dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
1493 		dp_rx_tid_delete_cb, (void *)freedesc);
1494 
1495 	rx_tid->hw_qdesc_vaddr_unaligned = NULL;
1496 	rx_tid->hw_qdesc_alloc_size = 0;
1497 	rx_tid->hw_qdesc_paddr = 0;
1498 
1499 	return 0;
1500 }
1501 
1502 #ifdef DP_LFR
1503 static void dp_peer_setup_remaining_tids(struct dp_peer *peer)
1504 {
1505 	int tid;
1506 
1507 	for (tid = 1; tid < DP_MAX_TIDS-1; tid++) {
1508 		dp_rx_tid_setup_wifi3(peer, tid, 1, 0);
1509 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1510 			"Setting up TID %d for peer %pK peer->local_id %d",
1511 			tid, peer, peer->local_id);
1512 	}
1513 }
1514 #else
1515 static void dp_peer_setup_remaining_tids(struct dp_peer *peer) {};
1516 #endif
1517 /*
1518  * dp_peer_rx_init() – Initialize receive TID state
1519  * @pdev: Datapath pdev
1520  * @peer: Datapath peer
1521  *
1522  */
1523 void dp_peer_rx_init(struct dp_pdev *pdev, struct dp_peer *peer)
1524 {
1525 	int tid;
1526 	struct dp_rx_tid *rx_tid;
1527 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
1528 		rx_tid = &peer->rx_tid[tid];
1529 		rx_tid->array = &rx_tid->base;
1530 		rx_tid->base.head = rx_tid->base.tail = NULL;
1531 		rx_tid->tid = tid;
1532 		rx_tid->defrag_timeout_ms = 0;
1533 		rx_tid->ba_win_size = 0;
1534 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
1535 
1536 		rx_tid->defrag_waitlist_elem.tqe_next = NULL;
1537 		rx_tid->defrag_waitlist_elem.tqe_prev = NULL;
1538 
1539 #ifdef notyet /* TODO: See if this is required for exception handling */
1540 		/* invalid sequence number */
1541 		peer->tids_last_seq[tid] = 0xffff;
1542 #endif
1543 	}
1544 
1545 	/* Setup default (non-qos) rx tid queue */
1546 	dp_rx_tid_setup_wifi3(peer, DP_NON_QOS_TID, 1, 0);
1547 
1548 	/* Setup rx tid queue for TID 0.
1549 	 * Other queues will be setup on receiving first packet, which will cause
1550 	 * NULL REO queue error
1551 	 */
1552 	dp_rx_tid_setup_wifi3(peer, 0, 1, 0);
1553 
1554 	/*
1555 	 * Setup the rest of TID's to handle LFR
1556 	 */
1557 	dp_peer_setup_remaining_tids(peer);
1558 
1559 	/*
1560 	 * Set security defaults: no PN check, no security. The target may
1561 	 * send a HTT SEC_IND message to overwrite these defaults.
1562 	 */
1563 	peer->security[dp_sec_ucast].sec_type =
1564 		peer->security[dp_sec_mcast].sec_type = cdp_sec_type_none;
1565 }
1566 
1567 /*
1568  * dp_peer_rx_cleanup() – Cleanup receive TID state
1569  * @vdev: Datapath vdev
1570  * @peer: Datapath peer
1571  *
1572  */
1573 void dp_peer_rx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
1574 {
1575 	int tid;
1576 	uint32_t tid_delete_mask = 0;
1577 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
1578 		struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1579 
1580 		qdf_spin_lock_bh(&rx_tid->tid_lock);
1581 		if (peer->rx_tid[tid].hw_qdesc_vaddr_unaligned != NULL) {
1582 			dp_rx_tid_delete_wifi3(peer, tid);
1583 
1584 			/* Cleanup defrag related resource */
1585 			dp_rx_defrag_waitlist_remove(peer, tid);
1586 			dp_rx_reorder_flush_frag(peer, tid);
1587 
1588 			tid_delete_mask |= (1 << tid);
1589 		}
1590 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
1591 	}
1592 #ifdef notyet /* See if FW can remove queues as part of peer cleanup */
1593 	if (soc->ol_ops->peer_rx_reorder_queue_remove) {
1594 		soc->ol_ops->peer_rx_reorder_queue_remove(vdev->pdev->ctrl_pdev,
1595 			peer->vdev->vdev_id, peer->mac_addr.raw,
1596 			tid_delete_mask);
1597 	}
1598 #endif
1599 	for (tid = 0; tid < DP_MAX_TIDS; tid++)
1600 		qdf_spinlock_destroy(&peer->rx_tid[tid].tid_lock);
1601 }
1602 
1603 /*
1604  * dp_peer_cleanup() – Cleanup peer information
1605  * @vdev: Datapath vdev
1606  * @peer: Datapath peer
1607  *
1608  */
1609 void dp_peer_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
1610 {
1611 	peer->last_assoc_rcvd = 0;
1612 	peer->last_disassoc_rcvd = 0;
1613 	peer->last_deauth_rcvd = 0;
1614 
1615 	/* cleanup the Rx reorder queues for this peer */
1616 	dp_peer_rx_cleanup(vdev, peer);
1617 }
1618 
1619 /*
1620 * dp_rx_addba_resp_tx_completion_wifi3() – Update Rx Tid State
1621 *
1622 * @peer: Datapath peer handle
1623 * @tid: TID number
1624 * @status: tx completion status
1625 * Return: 0 on success, error code on failure
1626 */
1627 int dp_addba_resp_tx_completion_wifi3(void *peer_handle,
1628 				      uint8_t tid, int status)
1629 {
1630 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
1631 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1632 
1633 	qdf_spin_lock_bh(&rx_tid->tid_lock);
1634 	if (status) {
1635 		rx_tid->num_addba_rsp_failed++;
1636 		dp_rx_tid_update_wifi3(peer, tid, 1, 0);
1637 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
1638 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
1639 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1640 			  "%s: Rx Tid- %d addba rsp tx completion failed!",
1641 			 __func__, tid);
1642 		return 0;
1643 	}
1644 
1645 	rx_tid->num_addba_rsp_success++;
1646 	if (rx_tid->ba_status == DP_RX_BA_INACTIVE) {
1647 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
1648 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1649 			  "%s: Rx Tid- %d hw qdesc is not in IN_PROGRESS",
1650 			__func__, tid);
1651 		return QDF_STATUS_E_FAILURE;
1652 	}
1653 
1654 	if (dp_rx_tid_update_wifi3(peer, tid, rx_tid->ba_win_size,
1655 				   rx_tid->startseqnum)) {
1656 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
1657 		return QDF_STATUS_E_FAILURE;
1658 	}
1659 	if (rx_tid->userstatuscode != IEEE80211_STATUS_SUCCESS)
1660 		rx_tid->statuscode = rx_tid->userstatuscode;
1661 	else
1662 		rx_tid->statuscode = IEEE80211_STATUS_SUCCESS;
1663 
1664 	rx_tid->ba_status = DP_RX_BA_ACTIVE;
1665 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
1666 	return 0;
1667 }
1668 
1669 /*
1670 * dp_rx_addba_responsesetup_wifi3() – Process ADDBA request from peer
1671 *
1672 * @peer: Datapath peer handle
1673 * @tid: TID number
1674 * @dialogtoken: output dialogtoken
1675 * @statuscode: output dialogtoken
1676 * @buffersize: Output BA window size
1677 * @batimeout: Output BA timeout
1678 */
1679 void dp_addba_responsesetup_wifi3(void *peer_handle, uint8_t tid,
1680 	uint8_t *dialogtoken, uint16_t *statuscode,
1681 	uint16_t *buffersize, uint16_t *batimeout)
1682 {
1683 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
1684 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1685 
1686 	qdf_spin_lock_bh(&rx_tid->tid_lock);
1687 	rx_tid->num_of_addba_resp++;
1688 	/* setup ADDBA response parameters */
1689 	*dialogtoken = rx_tid->dialogtoken;
1690 	*statuscode = rx_tid->statuscode;
1691 	*buffersize = rx_tid->ba_win_size;
1692 	*batimeout  = 0;
1693 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
1694 }
1695 
1696 /*
1697  * dp_addba_requestprocess_wifi3() - Process ADDBA request from peer
1698  *
1699  * @peer: Datapath peer handle
1700  * @dialogtoken: dialogtoken from ADDBA frame
1701  * @tid: TID number
1702  * @batimeout: BA timeout
1703  * @buffersize: BA window size
1704  * @startseqnum: Start seq. number received in BA sequence control
1705  *
1706  * Return: 0 on success, error code on failure
1707  */
1708 int dp_addba_requestprocess_wifi3(void *peer_handle,
1709 				  uint8_t dialogtoken,
1710 				  uint16_t tid, uint16_t batimeout,
1711 				  uint16_t buffersize,
1712 				  uint16_t startseqnum)
1713 {
1714 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
1715 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1716 
1717 	qdf_spin_lock_bh(&rx_tid->tid_lock);
1718 	rx_tid->num_of_addba_req++;
1719 	if ((rx_tid->ba_status == DP_RX_BA_ACTIVE &&
1720 	     rx_tid->hw_qdesc_vaddr_unaligned != NULL) ||
1721 	    (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS)) {
1722 		dp_rx_tid_update_wifi3(peer, tid, 1, 0);
1723 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
1724 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
1725 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1726 			  "%s: Rx Tid- %d hw qdesc is already setup",
1727 			__func__, tid);
1728 		return QDF_STATUS_E_FAILURE;
1729 	}
1730 
1731 	if (dp_rx_tid_setup_wifi3(peer, tid, 1, 0)) {
1732 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
1733 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
1734 		return QDF_STATUS_E_FAILURE;
1735 	}
1736 	rx_tid->ba_status = DP_RX_BA_IN_PROGRESS;
1737 
1738 	rx_tid->ba_win_size = buffersize;
1739 	rx_tid->dialogtoken = dialogtoken;
1740 	rx_tid->startseqnum = startseqnum;
1741 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
1742 	return 0;
1743 }
1744 
1745 /*
1746 * dp_set_addba_response() – Set a user defined ADDBA response status code
1747 *
1748 * @peer: Datapath peer handle
1749 * @tid: TID number
1750 * @statuscode: response status code to be set
1751 */
1752 void dp_set_addba_response(void *peer_handle, uint8_t tid,
1753 	uint16_t statuscode)
1754 {
1755 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
1756 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1757 
1758 	qdf_spin_lock_bh(&rx_tid->tid_lock);
1759 	rx_tid->userstatuscode = statuscode;
1760 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
1761 }
1762 
1763 /*
1764 * dp_rx_delba_process_wifi3() – Process DELBA from peer
1765 * @peer: Datapath peer handle
1766 * @tid: TID number
1767 * @reasoncode: Reason code received in DELBA frame
1768 *
1769 * Return: 0 on success, error code on failure
1770 */
1771 int dp_delba_process_wifi3(void *peer_handle,
1772 	int tid, uint16_t reasoncode)
1773 {
1774 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
1775 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1776 
1777 	qdf_spin_lock_bh(&rx_tid->tid_lock);
1778 	if (rx_tid->ba_status == DP_RX_BA_INACTIVE) {
1779 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
1780 		return QDF_STATUS_E_FAILURE;
1781 	}
1782 	/* TODO: See if we can delete the existing REO queue descriptor and
1783 	 * replace with a new one without queue extenstion descript to save
1784 	 * memory
1785 	 */
1786 	rx_tid->num_of_delba_req++;
1787 	dp_rx_tid_update_wifi3(peer, tid, 1, 0);
1788 
1789 	rx_tid->ba_status = DP_RX_BA_INACTIVE;
1790 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
1791 	return 0;
1792 }
1793 
1794 void dp_rx_discard(struct dp_vdev *vdev, struct dp_peer *peer, unsigned tid,
1795 	qdf_nbuf_t msdu_list)
1796 {
1797 	while (msdu_list) {
1798 		qdf_nbuf_t msdu = msdu_list;
1799 
1800 		msdu_list = qdf_nbuf_next(msdu_list);
1801 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
1802 			"discard rx %pK from partly-deleted peer %pK "
1803 			"(%02x:%02x:%02x:%02x:%02x:%02x)",
1804 			msdu, peer,
1805 			peer->mac_addr.raw[0], peer->mac_addr.raw[1],
1806 			peer->mac_addr.raw[2], peer->mac_addr.raw[3],
1807 			peer->mac_addr.raw[4], peer->mac_addr.raw[5]);
1808 		qdf_nbuf_free(msdu);
1809 	}
1810 }
1811 
1812 
1813 /**
1814  * dp_set_pn_check_wifi3() - enable PN check in REO for security
1815  * @peer: Datapath peer handle
1816  * @vdev: Datapath vdev
1817  * @pdev - data path device instance
1818  * @sec_type - security type
1819  * @rx_pn - Receive pn starting number
1820  *
1821  */
1822 
1823 void
1824 dp_set_pn_check_wifi3(struct cdp_vdev *vdev_handle, struct cdp_peer *peer_handle, enum cdp_sec_type sec_type,  uint32_t *rx_pn)
1825 {
1826 	struct dp_peer *peer =  (struct dp_peer *)peer_handle;
1827 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
1828 	struct dp_pdev *pdev;
1829 	struct dp_soc *soc;
1830 	int i;
1831 	uint8_t pn_size;
1832 	struct hal_reo_cmd_params params;
1833 
1834 	/* preconditions */
1835 	qdf_assert(vdev);
1836 
1837 	pdev = vdev->pdev;
1838 	soc = pdev->soc;
1839 
1840 
1841 	qdf_mem_zero(&params, sizeof(params));
1842 
1843 	params.std.need_status = 1;
1844 	params.u.upd_queue_params.update_pn_valid = 1;
1845 	params.u.upd_queue_params.update_pn_size = 1;
1846 	params.u.upd_queue_params.update_pn = 1;
1847 	params.u.upd_queue_params.update_pn_check_needed = 1;
1848 	params.u.upd_queue_params.update_svld = 1;
1849 	params.u.upd_queue_params.svld = 0;
1850 
1851 	peer->security[dp_sec_ucast].sec_type = sec_type;
1852 
1853 	switch (sec_type) {
1854 	case cdp_sec_type_tkip_nomic:
1855 	case cdp_sec_type_aes_ccmp:
1856 	case cdp_sec_type_aes_ccmp_256:
1857 	case cdp_sec_type_aes_gcmp:
1858 	case cdp_sec_type_aes_gcmp_256:
1859 		params.u.upd_queue_params.pn_check_needed = 1;
1860 		params.u.upd_queue_params.pn_size = 48;
1861 		pn_size = 48;
1862 		break;
1863 	case cdp_sec_type_wapi:
1864 		params.u.upd_queue_params.pn_check_needed = 1;
1865 		params.u.upd_queue_params.pn_size = 128;
1866 		pn_size = 128;
1867 		if (vdev->opmode == wlan_op_mode_ap) {
1868 			params.u.upd_queue_params.pn_even = 1;
1869 			params.u.upd_queue_params.update_pn_even = 1;
1870 		} else {
1871 			params.u.upd_queue_params.pn_uneven = 1;
1872 			params.u.upd_queue_params.update_pn_uneven = 1;
1873 		}
1874 		break;
1875 	default:
1876 		params.u.upd_queue_params.pn_check_needed = 0;
1877 		pn_size = 0;
1878 		break;
1879 	}
1880 
1881 
1882 	for (i = 0; i < DP_MAX_TIDS; i++) {
1883 		struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
1884 		qdf_spin_lock_bh(&rx_tid->tid_lock);
1885 		if (rx_tid->hw_qdesc_vaddr_unaligned != NULL) {
1886 			params.std.addr_lo =
1887 				rx_tid->hw_qdesc_paddr & 0xffffffff;
1888 			params.std.addr_hi =
1889 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1890 
1891 			if (sec_type != cdp_sec_type_wapi) {
1892 				params.u.upd_queue_params.update_pn_valid = 0;
1893 			} else {
1894 				/*
1895 				 * Setting PN valid bit for WAPI sec_type,
1896 				 * since WAPI PN has to be started with
1897 				 * predefined value
1898 				 */
1899 				params.u.upd_queue_params.update_pn_valid = 1;
1900 				params.u.upd_queue_params.pn_31_0 = rx_pn[0];
1901 				params.u.upd_queue_params.pn_63_32 = rx_pn[1];
1902 				params.u.upd_queue_params.pn_95_64 = rx_pn[2];
1903 				params.u.upd_queue_params.pn_127_96 = rx_pn[3];
1904 			}
1905 			rx_tid->pn_size = pn_size;
1906 			dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
1907 				dp_rx_tid_update_cb, rx_tid);
1908 		} else {
1909 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
1910 				"PN Check not setup for TID :%d ", i);
1911 		}
1912 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
1913 	}
1914 }
1915 
1916 
1917 void
1918 dp_rx_sec_ind_handler(void *soc_handle, uint16_t peer_id,
1919 	enum htt_sec_type sec_type, int is_unicast, u_int32_t *michael_key,
1920 	u_int32_t *rx_pn)
1921 {
1922 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
1923 	struct dp_peer *peer;
1924 	int sec_index;
1925 
1926 	peer = dp_peer_find_by_id(soc, peer_id);
1927 	if (!peer) {
1928 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1929 			"Couldn't find peer from ID %d - skipping security inits",
1930 			peer_id);
1931 		return;
1932 	}
1933 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
1934 		"sec spec for peer %pK (%02x:%02x:%02x:%02x:%02x:%02x): "
1935 		"%s key of type %d",
1936 		peer,
1937 		peer->mac_addr.raw[0], peer->mac_addr.raw[1],
1938 		peer->mac_addr.raw[2], peer->mac_addr.raw[3],
1939 		peer->mac_addr.raw[4], peer->mac_addr.raw[5],
1940 		is_unicast ? "ucast" : "mcast",
1941 		sec_type);
1942 	sec_index = is_unicast ? dp_sec_ucast : dp_sec_mcast;
1943 	peer->security[sec_index].sec_type = sec_type;
1944 #ifdef notyet /* TODO: See if this is required for defrag support */
1945 	/* michael key only valid for TKIP, but for simplicity,
1946 	 * copy it anyway
1947 	 */
1948 	qdf_mem_copy(
1949 		&peer->security[sec_index].michael_key[0],
1950 		michael_key,
1951 		sizeof(peer->security[sec_index].michael_key));
1952 #ifdef BIG_ENDIAN_HOST
1953 	OL_IF_SWAPBO(peer->security[sec_index].michael_key[0],
1954 				 sizeof(peer->security[sec_index].michael_key));
1955 #endif /* BIG_ENDIAN_HOST */
1956 #endif
1957 
1958 #ifdef notyet /* TODO: Check if this is required for wifi3.0 */
1959 	if (sec_type != htt_sec_type_wapi) {
1960 		qdf_mem_set(peer->tids_last_pn_valid, _EXT_TIDS, 0x00);
1961 	} else {
1962 		for (i = 0; i < DP_MAX_TIDS; i++) {
1963 			/*
1964 			 * Setting PN valid bit for WAPI sec_type,
1965 			 * since WAPI PN has to be started with predefined value
1966 			 */
1967 			peer->tids_last_pn_valid[i] = 1;
1968 			qdf_mem_copy(
1969 				(u_int8_t *) &peer->tids_last_pn[i],
1970 				(u_int8_t *) rx_pn, sizeof(union htt_rx_pn_t));
1971 			peer->tids_last_pn[i].pn128[1] =
1972 				qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[1]);
1973 			peer->tids_last_pn[i].pn128[0] =
1974 				qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[0]);
1975 		}
1976 	}
1977 #endif
1978 	/* TODO: Update HW TID queue with PN check parameters (pn type for
1979 	 * all security types and last pn for WAPI) once REO command API
1980 	 * is available
1981 	 */
1982 }
1983 
1984 #ifndef CONFIG_WIN
1985 /**
1986  * dp_register_peer() - Register peer into physical device
1987  * @pdev - data path device instance
1988  * @sta_desc - peer description
1989  *
1990  * Register peer into physical device
1991  *
1992  * Return: QDF_STATUS_SUCCESS registration success
1993  *         QDF_STATUS_E_FAULT peer not found
1994  */
1995 QDF_STATUS dp_register_peer(struct cdp_pdev *pdev_handle,
1996 		struct ol_txrx_desc_type *sta_desc)
1997 {
1998 	struct dp_peer *peer;
1999 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
2000 
2001 	peer = dp_peer_find_by_local_id((struct cdp_pdev *)pdev,
2002 			sta_desc->sta_id);
2003 	if (!peer)
2004 		return QDF_STATUS_E_FAULT;
2005 
2006 	qdf_spin_lock_bh(&peer->peer_info_lock);
2007 	peer->state = OL_TXRX_PEER_STATE_CONN;
2008 	qdf_spin_unlock_bh(&peer->peer_info_lock);
2009 
2010 	return QDF_STATUS_SUCCESS;
2011 }
2012 
2013 /**
2014  * dp_clear_peer() - remove peer from physical device
2015  * @pdev - data path device instance
2016  * @sta_id - local peer id
2017  *
2018  * remove peer from physical device
2019  *
2020  * Return: QDF_STATUS_SUCCESS registration success
2021  *         QDF_STATUS_E_FAULT peer not found
2022  */
2023 QDF_STATUS dp_clear_peer(struct cdp_pdev *pdev_handle, uint8_t local_id)
2024 {
2025 	struct dp_peer *peer;
2026 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
2027 
2028 	peer = dp_peer_find_by_local_id((struct cdp_pdev *)pdev, local_id);
2029 	if (!peer)
2030 		return QDF_STATUS_E_FAULT;
2031 
2032 	qdf_spin_lock_bh(&peer->peer_info_lock);
2033 	peer->state = OL_TXRX_PEER_STATE_DISC;
2034 	qdf_spin_unlock_bh(&peer->peer_info_lock);
2035 
2036 	return QDF_STATUS_SUCCESS;
2037 }
2038 
2039 /**
2040  * dp_find_peer_by_addr_and_vdev() - Find peer by peer mac address within vdev
2041  * @pdev - data path device instance
2042  * @vdev - virtual interface instance
2043  * @peer_addr - peer mac address
2044  * @peer_id - local peer id with target mac address
2045  *
2046  * Find peer by peer mac address within vdev
2047  *
2048  * Return: peer instance void pointer
2049  *         NULL cannot find target peer
2050  */
2051 void *dp_find_peer_by_addr_and_vdev(struct cdp_pdev *pdev_handle,
2052 		struct cdp_vdev *vdev_handle,
2053 		uint8_t *peer_addr, uint8_t *local_id)
2054 {
2055 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
2056 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
2057 	struct dp_peer *peer;
2058 
2059 	DP_TRACE(INFO, "vdev %pK peer_addr %pK", vdev, peer_addr);
2060 	peer = dp_peer_find_hash_find(pdev->soc, peer_addr, 0, 0);
2061 	DP_TRACE(INFO, "peer %pK vdev %pK", peer, vdev);
2062 
2063 	if (!peer)
2064 		return NULL;
2065 
2066 	if (peer->vdev != vdev) {
2067 		qdf_atomic_dec(&peer->ref_cnt);
2068 		return NULL;
2069 	}
2070 
2071 	*local_id = peer->local_id;
2072 	DP_TRACE(INFO, "peer %pK vdev %pK local id %d", peer, vdev, *local_id);
2073 
2074 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
2075 	 * Decrement it here.
2076 	 */
2077 	qdf_atomic_dec(&peer->ref_cnt);
2078 
2079 	return peer;
2080 }
2081 
2082 /**
2083  * dp_local_peer_id() - Find local peer id within peer instance
2084  * @peer - peer instance
2085  *
2086  * Find local peer id within peer instance
2087  *
2088  * Return: local peer id
2089  */
2090 uint16_t dp_local_peer_id(void *peer)
2091 {
2092 	return ((struct dp_peer *)peer)->local_id;
2093 }
2094 
2095 /**
2096  * dp_peer_find_by_local_id() - Find peer by local peer id
2097  * @pdev - data path device instance
2098  * @local_peer_id - local peer id want to find
2099  *
2100  * Find peer by local peer id within physical device
2101  *
2102  * Return: peer instance void pointer
2103  *         NULL cannot find target peer
2104  */
2105 void *dp_peer_find_by_local_id(struct cdp_pdev *pdev_handle, uint8_t local_id)
2106 {
2107 	struct dp_peer *peer;
2108 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
2109 
2110 	if (local_id >= OL_TXRX_NUM_LOCAL_PEER_IDS) {
2111 		QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_DP,
2112 				   "Incorrect local id %u", local_id);
2113 		return NULL;
2114 	}
2115 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
2116 	peer = pdev->local_peer_ids.map[local_id];
2117 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
2118 	DP_TRACE(DEBUG, "peer %pK local id %d", peer, local_id);
2119 	return peer;
2120 }
2121 
2122 /**
2123  * dp_peer_state_update() - update peer local state
2124  * @pdev - data path device instance
2125  * @peer_addr - peer mac address
2126  * @state - new peer local state
2127  *
2128  * update peer local state
2129  *
2130  * Return: QDF_STATUS_SUCCESS registration success
2131  */
2132 QDF_STATUS dp_peer_state_update(struct cdp_pdev *pdev_handle, uint8_t *peer_mac,
2133 		enum ol_txrx_peer_state state)
2134 {
2135 	struct dp_peer *peer;
2136 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
2137 
2138 	peer =  dp_peer_find_hash_find(pdev->soc, peer_mac, 0, DP_VDEV_ALL);
2139 	if (NULL == peer) {
2140 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2141 		"Failed to find peer for: [%pM]", peer_mac);
2142 		return QDF_STATUS_E_FAILURE;
2143 	}
2144 	peer->state = state;
2145 
2146 	DP_TRACE(INFO, "peer %pK state %d", peer, peer->state);
2147 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
2148 	 * Decrement it here.
2149 	 */
2150 	qdf_atomic_dec(&peer->ref_cnt);
2151 
2152 	return QDF_STATUS_SUCCESS;
2153 }
2154 
2155 /**
2156  * dp_get_vdevid() - Get virtual interface id which peer registered
2157  * @peer - peer instance
2158  * @vdev_id - virtual interface id which peer registered
2159  *
2160  * Get virtual interface id which peer registered
2161  *
2162  * Return: QDF_STATUS_SUCCESS registration success
2163  */
2164 QDF_STATUS dp_get_vdevid(void *peer_handle, uint8_t *vdev_id)
2165 {
2166 	struct dp_peer *peer = peer_handle;
2167 
2168 	DP_TRACE(INFO, "peer %pK vdev %pK vdev id %d",
2169 			peer, peer->vdev, peer->vdev->vdev_id);
2170 	*vdev_id = peer->vdev->vdev_id;
2171 	return QDF_STATUS_SUCCESS;
2172 }
2173 
2174 struct cdp_vdev *dp_get_vdev_by_sta_id(struct cdp_pdev *pdev_handle,
2175 				       uint8_t sta_id)
2176 {
2177 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
2178 	struct dp_peer *peer = NULL;
2179 
2180 	if (sta_id >= WLAN_MAX_STA_COUNT) {
2181 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
2182 			  "Invalid sta id passed");
2183 		return NULL;
2184 	}
2185 
2186 	if (!pdev) {
2187 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
2188 			  "PDEV not found for sta_id [%d]", sta_id);
2189 		return NULL;
2190 	}
2191 
2192 	peer = dp_peer_find_by_local_id((struct cdp_pdev *)pdev, sta_id);
2193 	if (!peer) {
2194 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
2195 			  "PEER [%d] not found", sta_id);
2196 		return NULL;
2197 	}
2198 
2199 	return (struct cdp_vdev *)peer->vdev;
2200 }
2201 
2202 /**
2203  * dp_get_vdev_for_peer() - Get virtual interface instance which peer belongs
2204  * @peer - peer instance
2205  *
2206  * Get virtual interface instance which peer belongs
2207  *
2208  * Return: virtual interface instance pointer
2209  *         NULL in case cannot find
2210  */
2211 struct cdp_vdev *dp_get_vdev_for_peer(void *peer_handle)
2212 {
2213 	struct dp_peer *peer = peer_handle;
2214 
2215 	DP_TRACE(INFO, "peer %pK vdev %pK", peer, peer->vdev);
2216 	return (struct cdp_vdev *)peer->vdev;
2217 }
2218 
2219 /**
2220  * dp_peer_get_peer_mac_addr() - Get peer mac address
2221  * @peer - peer instance
2222  *
2223  * Get peer mac address
2224  *
2225  * Return: peer mac address pointer
2226  *         NULL in case cannot find
2227  */
2228 uint8_t *dp_peer_get_peer_mac_addr(void *peer_handle)
2229 {
2230 	struct dp_peer *peer = peer_handle;
2231 	uint8_t *mac;
2232 
2233 	mac = peer->mac_addr.raw;
2234 	DP_TRACE(INFO, "peer %pK mac 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
2235 		peer, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
2236 	return peer->mac_addr.raw;
2237 }
2238 
2239 /**
2240  * dp_get_peer_state() - Get local peer state
2241  * @peer - peer instance
2242  *
2243  * Get local peer state
2244  *
2245  * Return: peer status
2246  */
2247 int dp_get_peer_state(void *peer_handle)
2248 {
2249 	struct dp_peer *peer = peer_handle;
2250 
2251 	DP_TRACE(DEBUG, "peer %pK stats %d", peer, peer->state);
2252 	return peer->state;
2253 }
2254 
2255 /**
2256  * dp_get_last_mgmt_timestamp() - get timestamp of last mgmt frame
2257  * @pdev: pdev handle
2258  * @ppeer_addr: peer mac addr
2259  * @subtype: management frame type
2260  * @timestamp: last timestamp
2261  *
2262  * Return: true if timestamp is retrieved for valid peer else false
2263  */
2264 bool dp_get_last_mgmt_timestamp(struct cdp_pdev *ppdev, u8 *peer_addr,
2265 				u8 subtype, qdf_time_t *timestamp)
2266 {
2267 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
2268 	unsigned int index;
2269 	struct dp_peer *peer;
2270 	struct dp_soc *soc;
2271 
2272 	bool ret = false;
2273 	struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
2274 
2275 	soc = pdev->soc;
2276 	qdf_mem_copy(
2277 		&local_mac_addr_aligned.raw[0],
2278 		peer_addr, DP_MAC_ADDR_LEN);
2279 	mac_addr = &local_mac_addr_aligned;
2280 
2281 	index = dp_peer_find_hash_index(soc, mac_addr);
2282 
2283 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
2284 	TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
2285 #if ATH_SUPPORT_WRAP
2286 		/* ProxySTA may have multiple BSS peer with same MAC address,
2287 		 * modified find will take care of finding the correct BSS peer.
2288 		 */
2289 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
2290 		    (peer->vdev->vdev_id == DP_VDEV_ALL)) {
2291 #else
2292 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0) {
2293 #endif
2294 			/* found it */
2295 			switch (subtype) {
2296 			case IEEE80211_FC0_SUBTYPE_ASSOC_REQ:
2297 				*timestamp = peer->last_assoc_rcvd;
2298 				ret = true;
2299 				break;
2300 			case IEEE80211_FC0_SUBTYPE_DISASSOC:
2301 			case IEEE80211_FC0_SUBTYPE_DEAUTH:
2302 				*timestamp = peer->last_disassoc_rcvd;
2303 				ret = true;
2304 				break;
2305 			default:
2306 				break;
2307 			}
2308 			qdf_spin_unlock_bh(&soc->peer_ref_mutex);
2309 			return ret;
2310 		}
2311 	}
2312 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
2313 	return false;		/*failure*/
2314 }
2315 
2316 /**
2317  * dp_update_last_mgmt_timestamp() - set timestamp of last mgmt frame
2318  * @pdev: pdev handle
2319  * @ppeer_addr: peer mac addr
2320  * @timestamp: time to be set
2321  * @subtype: management frame type
2322  *
2323  * Return: true if timestamp is updated for valid peer else false
2324  */
2325 
2326 bool dp_update_last_mgmt_timestamp(struct cdp_pdev *ppdev, u8 *peer_addr,
2327 				   qdf_time_t timestamp, u8 subtype)
2328 {
2329 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
2330 	unsigned int index;
2331 	struct dp_peer *peer;
2332 	struct dp_soc *soc;
2333 
2334 	bool ret = false;
2335 	struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
2336 
2337 	soc = pdev->soc;
2338 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
2339 		     peer_addr, DP_MAC_ADDR_LEN);
2340 	mac_addr = &local_mac_addr_aligned;
2341 
2342 	index = dp_peer_find_hash_index(soc, mac_addr);
2343 
2344 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
2345 	TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
2346 #if ATH_SUPPORT_WRAP
2347 		/* ProxySTA may have multiple BSS peer with same MAC address,
2348 		 * modified find will take care of finding the correct BSS peer.
2349 		 */
2350 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
2351 		    (peer->vdev->vdev_id == DP_VDEV_ALL)) {
2352 #else
2353 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0) {
2354 #endif
2355 			/* found it */
2356 			switch (subtype) {
2357 			case IEEE80211_FC0_SUBTYPE_ASSOC_REQ:
2358 				peer->last_assoc_rcvd = timestamp;
2359 				ret = true;
2360 				break;
2361 			case IEEE80211_FC0_SUBTYPE_DISASSOC:
2362 			case IEEE80211_FC0_SUBTYPE_DEAUTH:
2363 				peer->last_disassoc_rcvd = timestamp;
2364 				ret = true;
2365 				break;
2366 			default:
2367 				break;
2368 			}
2369 			qdf_spin_unlock_bh(&soc->peer_ref_mutex);
2370 			return ret;
2371 		}
2372 	}
2373 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
2374 	return false;		/*failure*/
2375 }
2376 
2377 /**
2378  * dp_local_peer_id_pool_init() - local peer id pool alloc for physical device
2379  * @pdev - data path device instance
2380  *
2381  * local peer id pool alloc for physical device
2382  *
2383  * Return: none
2384  */
2385 void dp_local_peer_id_pool_init(struct dp_pdev *pdev)
2386 {
2387 	int i;
2388 
2389 	/* point the freelist to the first ID */
2390 	pdev->local_peer_ids.freelist = 0;
2391 
2392 	/* link each ID to the next one */
2393 	for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) {
2394 		pdev->local_peer_ids.pool[i] = i + 1;
2395 		pdev->local_peer_ids.map[i] = NULL;
2396 	}
2397 
2398 	/* link the last ID to itself, to mark the end of the list */
2399 	i = OL_TXRX_NUM_LOCAL_PEER_IDS;
2400 	pdev->local_peer_ids.pool[i] = i;
2401 
2402 	qdf_spinlock_create(&pdev->local_peer_ids.lock);
2403 	DP_TRACE(INFO, "Peer pool init");
2404 }
2405 
2406 /**
2407  * dp_local_peer_id_alloc() - allocate local peer id
2408  * @pdev - data path device instance
2409  * @peer - new peer instance
2410  *
2411  * allocate local peer id
2412  *
2413  * Return: none
2414  */
2415 void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer)
2416 {
2417 	int i;
2418 
2419 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
2420 	i = pdev->local_peer_ids.freelist;
2421 	if (pdev->local_peer_ids.pool[i] == i) {
2422 		/* the list is empty, except for the list-end marker */
2423 		peer->local_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
2424 	} else {
2425 		/* take the head ID and advance the freelist */
2426 		peer->local_id = i;
2427 		pdev->local_peer_ids.freelist = pdev->local_peer_ids.pool[i];
2428 		pdev->local_peer_ids.map[i] = peer;
2429 	}
2430 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
2431 	DP_TRACE(INFO, "peer %pK, local id %d", peer, peer->local_id);
2432 }
2433 
2434 /**
2435  * dp_local_peer_id_free() - remove local peer id
2436  * @pdev - data path device instance
2437  * @peer - peer instance should be removed
2438  *
2439  * remove local peer id
2440  *
2441  * Return: none
2442  */
2443 void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer)
2444 {
2445 	int i = peer->local_id;
2446 	if ((i == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
2447 	    (i >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
2448 		return;
2449 	}
2450 
2451 	/* put this ID on the head of the freelist */
2452 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
2453 	pdev->local_peer_ids.pool[i] = pdev->local_peer_ids.freelist;
2454 	pdev->local_peer_ids.freelist = i;
2455 	pdev->local_peer_ids.map[i] = NULL;
2456 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
2457 }
2458 #endif
2459 
2460 /**
2461  * dp_get_peer_mac_addr_frm_id(): get mac address of the peer
2462  * @soc_handle: DP SOC handle
2463  * @peer_id:peer_id of the peer
2464  *
2465  * return: vdev_id of the vap
2466  */
2467 uint8_t dp_get_peer_mac_addr_frm_id(struct cdp_soc_t *soc_handle,
2468 		uint16_t peer_id, uint8_t *peer_mac)
2469 {
2470 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
2471 	struct dp_peer *peer;
2472 
2473 	peer = dp_peer_find_by_id(soc, peer_id);
2474 
2475 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
2476 			"soc %pK peer_id %d", soc, peer_id);
2477 
2478 	if (!peer) {
2479 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2480 				"peer not found ");
2481 		return CDP_INVALID_VDEV_ID;
2482 	}
2483 
2484 	qdf_mem_copy(peer_mac, peer->mac_addr.raw, 6);
2485 	return peer->vdev->vdev_id;
2486 }
2487 
2488 /**
2489  * dp_peer_rxtid_stats: Retried Rx TID (REO queue) stats from HW
2490  * @peer: DP peer handle
2491  * @dp_stats_cmd_cb: REO command callback function
2492  * @cb_ctxt: Callback context
2493  *
2494  * Return: none
2495  */
2496 void dp_peer_rxtid_stats(struct dp_peer *peer, void (*dp_stats_cmd_cb),
2497 			void *cb_ctxt)
2498 {
2499 	struct dp_soc *soc = peer->vdev->pdev->soc;
2500 	struct hal_reo_cmd_params params;
2501 	int i;
2502 
2503 	if (!dp_stats_cmd_cb)
2504 		return;
2505 
2506 	qdf_mem_zero(&params, sizeof(params));
2507 	for (i = 0; i < DP_MAX_TIDS; i++) {
2508 		struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
2509 		if (rx_tid->hw_qdesc_vaddr_unaligned != NULL) {
2510 			params.std.need_status = 1;
2511 			params.std.addr_lo =
2512 				rx_tid->hw_qdesc_paddr & 0xffffffff;
2513 			params.std.addr_hi =
2514 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2515 
2516 			if (cb_ctxt) {
2517 				dp_reo_send_cmd(soc, CMD_GET_QUEUE_STATS,
2518 					&params, dp_stats_cmd_cb, cb_ctxt);
2519 			} else {
2520 				dp_reo_send_cmd(soc, CMD_GET_QUEUE_STATS,
2521 					&params, dp_stats_cmd_cb, rx_tid);
2522 			}
2523 
2524 			/* Flush REO descriptor from HW cache to update stats
2525 			 * in descriptor memory. This is to help debugging */
2526 			qdf_mem_zero(&params, sizeof(params));
2527 			params.std.need_status = 0;
2528 			params.std.addr_lo =
2529 				rx_tid->hw_qdesc_paddr & 0xffffffff;
2530 			params.std.addr_hi =
2531 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2532 			params.u.fl_cache_params.flush_no_inval = 1;
2533 			dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, &params, NULL,
2534 				NULL);
2535 		}
2536 	}
2537 }
2538 
2539 void dp_set_michael_key(struct cdp_peer *peer_handle,
2540 			bool is_unicast, uint32_t *key)
2541 {
2542 	struct dp_peer *peer =  (struct dp_peer *)peer_handle;
2543 	uint8_t sec_index = is_unicast ? 1 : 0;
2544 
2545 	if (!peer) {
2546 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2547 			  "peer not found ");
2548 		return;
2549 	}
2550 
2551 	qdf_mem_copy(&peer->security[sec_index].michael_key[0],
2552 		     key, IEEE80211_WEP_MICLEN);
2553 }
2554