xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_peer.c (revision 302a1d9701784af5f4797b1a9fe07ae820b51907)
1 /*
2  * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <qdf_types.h>
20 #include <qdf_lock.h>
21 #include <hal_hw_headers.h>
22 #include "dp_htt.h"
23 #include "dp_types.h"
24 #include "dp_internal.h"
25 #include "dp_peer.h"
26 #include "dp_rx_defrag.h"
27 #include <hal_api.h>
28 #include <hal_reo.h>
29 #ifdef CONFIG_MCL
30 #include <cds_ieee80211_common.h>
31 #include <cds_api.h>
32 #endif
33 #include <cdp_txrx_handle.h>
34 #include <wlan_cfg.h>
35 
36 #ifdef DP_LFR
37 static inline void
38 dp_set_ssn_valid_flag(struct hal_reo_cmd_params *params,
39 					uint8_t valid)
40 {
41 	params->u.upd_queue_params.update_svld = 1;
42 	params->u.upd_queue_params.svld = valid;
43 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
44 		"%s: Setting SSN valid bit to %d",
45 				__func__, valid);
46 }
47 #else
48 static inline void
49 dp_set_ssn_valid_flag(struct hal_reo_cmd_params *params,
50 					uint8_t valid) {};
51 #endif
52 
53 static inline int dp_peer_find_mac_addr_cmp(
54 	union dp_align_mac_addr *mac_addr1,
55 	union dp_align_mac_addr *mac_addr2)
56 {
57 	return !((mac_addr1->align4.bytes_abcd == mac_addr2->align4.bytes_abcd)
58 		/*
59 		 * Intentionally use & rather than &&.
60 		 * because the operands are binary rather than generic boolean,
61 		 * the functionality is equivalent.
62 		 * Using && has the advantage of short-circuited evaluation,
63 		 * but using & has the advantage of no conditional branching,
64 		 * which is a more significant benefit.
65 		 */
66 		&
67 		(mac_addr1->align4.bytes_ef == mac_addr2->align4.bytes_ef));
68 }
69 
70 static int dp_peer_find_map_attach(struct dp_soc *soc)
71 {
72 	uint32_t max_peers, peer_map_size;
73 
74 	max_peers = soc->max_peers;
75 	/* allocate the peer ID -> peer object map */
76 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
77 		"\n<=== cfg max peer id %d ====>", max_peers);
78 	peer_map_size = max_peers * sizeof(soc->peer_id_to_obj_map[0]);
79 	soc->peer_id_to_obj_map = qdf_mem_malloc(peer_map_size);
80 	if (!soc->peer_id_to_obj_map) {
81 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
82 			"%s: peer map memory allocation failed", __func__);
83 		return QDF_STATUS_E_NOMEM;
84 	}
85 
86 	/*
87 	 * The peer_id_to_obj_map doesn't really need to be initialized,
88 	 * since elements are only used after they have been individually
89 	 * initialized.
90 	 * However, it is convenient for debugging to have all elements
91 	 * that are not in use set to 0.
92 	 */
93 	qdf_mem_zero(soc->peer_id_to_obj_map, peer_map_size);
94 	return 0; /* success */
95 }
96 
97 static int dp_log2_ceil(unsigned value)
98 {
99 	unsigned tmp = value;
100 	int log2 = -1;
101 
102 	while (tmp) {
103 		log2++;
104 		tmp >>= 1;
105 	}
106 	if (1 << log2 != value)
107 		log2++;
108 	return log2;
109 }
110 
111 static int dp_peer_find_add_id_to_obj(
112 	struct dp_peer *peer,
113 	uint16_t peer_id)
114 {
115 	int i;
116 
117 	for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
118 		if (peer->peer_ids[i] == HTT_INVALID_PEER) {
119 			peer->peer_ids[i] = peer_id;
120 			return 0; /* success */
121 		}
122 	}
123 	return QDF_STATUS_E_FAILURE; /* failure */
124 }
125 
126 #define DP_PEER_HASH_LOAD_MULT  2
127 #define DP_PEER_HASH_LOAD_SHIFT 0
128 
129 #define DP_AST_HASH_LOAD_MULT  2
130 #define DP_AST_HASH_LOAD_SHIFT 0
131 
132 static int dp_peer_find_hash_attach(struct dp_soc *soc)
133 {
134 	int i, hash_elems, log2;
135 
136 	/* allocate the peer MAC address -> peer object hash table */
137 	hash_elems = soc->max_peers;
138 	hash_elems *= DP_PEER_HASH_LOAD_MULT;
139 	hash_elems >>= DP_PEER_HASH_LOAD_SHIFT;
140 	log2 = dp_log2_ceil(hash_elems);
141 	hash_elems = 1 << log2;
142 
143 	soc->peer_hash.mask = hash_elems - 1;
144 	soc->peer_hash.idx_bits = log2;
145 	/* allocate an array of TAILQ peer object lists */
146 	soc->peer_hash.bins = qdf_mem_malloc(
147 		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q, dp_peer)));
148 	if (!soc->peer_hash.bins)
149 		return QDF_STATUS_E_NOMEM;
150 
151 	for (i = 0; i < hash_elems; i++)
152 		TAILQ_INIT(&soc->peer_hash.bins[i]);
153 
154 	return 0;
155 }
156 
157 static void dp_peer_find_hash_detach(struct dp_soc *soc)
158 {
159 	qdf_mem_free(soc->peer_hash.bins);
160 }
161 
162 static inline unsigned dp_peer_find_hash_index(struct dp_soc *soc,
163 	union dp_align_mac_addr *mac_addr)
164 {
165 	unsigned index;
166 
167 	index =
168 		mac_addr->align2.bytes_ab ^
169 		mac_addr->align2.bytes_cd ^
170 		mac_addr->align2.bytes_ef;
171 	index ^= index >> soc->peer_hash.idx_bits;
172 	index &= soc->peer_hash.mask;
173 	return index;
174 }
175 
176 
177 void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer)
178 {
179 	unsigned index;
180 
181 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
182 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
183 	/*
184 	 * It is important to add the new peer at the tail of the peer list
185 	 * with the bin index.  Together with having the hash_find function
186 	 * search from head to tail, this ensures that if two entries with
187 	 * the same MAC address are stored, the one added first will be
188 	 * found first.
189 	 */
190 	TAILQ_INSERT_TAIL(&soc->peer_hash.bins[index], peer, hash_list_elem);
191 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
192 }
193 
194 #ifdef FEATURE_AST
195 /*
196  * dp_peer_ast_hash_attach() - Allocate and initialize AST Hash Table
197  * @soc: SoC handle
198  *
199  * Return: None
200  */
201 static int dp_peer_ast_hash_attach(struct dp_soc *soc)
202 {
203 	int i, hash_elems, log2;
204 
205 	hash_elems = ((soc->max_peers * DP_AST_HASH_LOAD_MULT) >>
206 		DP_AST_HASH_LOAD_SHIFT);
207 
208 	log2 = dp_log2_ceil(hash_elems);
209 	hash_elems = 1 << log2;
210 
211 	soc->ast_hash.mask = hash_elems - 1;
212 	soc->ast_hash.idx_bits = log2;
213 
214 	/* allocate an array of TAILQ peer object lists */
215 	soc->ast_hash.bins = qdf_mem_malloc(
216 		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q,
217 				dp_ast_entry)));
218 
219 	if (!soc->ast_hash.bins)
220 		return QDF_STATUS_E_NOMEM;
221 
222 	for (i = 0; i < hash_elems; i++)
223 		TAILQ_INIT(&soc->ast_hash.bins[i]);
224 
225 	return 0;
226 }
227 
228 /*
229  * dp_peer_ast_hash_detach() - Free AST Hash table
230  * @soc: SoC handle
231  *
232  * Return: None
233  */
234 static void dp_peer_ast_hash_detach(struct dp_soc *soc)
235 {
236 	qdf_mem_free(soc->ast_hash.bins);
237 }
238 
239 /*
240  * dp_peer_ast_hash_index() - Compute the AST hash from MAC address
241  * @soc: SoC handle
242  *
243  * Return: AST hash
244  */
245 static inline uint32_t dp_peer_ast_hash_index(struct dp_soc *soc,
246 	union dp_align_mac_addr *mac_addr)
247 {
248 	uint32_t index;
249 
250 	index =
251 		mac_addr->align2.bytes_ab ^
252 		mac_addr->align2.bytes_cd ^
253 		mac_addr->align2.bytes_ef;
254 	index ^= index >> soc->ast_hash.idx_bits;
255 	index &= soc->ast_hash.mask;
256 	return index;
257 }
258 
259 /*
260  * dp_peer_ast_hash_add() - Add AST entry into hash table
261  * @soc: SoC handle
262  *
263  * This function adds the AST entry into SoC AST hash table
264  * It assumes caller has taken the ast lock to protect the access to this table
265  *
266  * Return: None
267  */
268 static inline void dp_peer_ast_hash_add(struct dp_soc *soc,
269 		struct dp_ast_entry *ase)
270 {
271 	uint32_t index;
272 
273 	index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
274 	TAILQ_INSERT_TAIL(&soc->ast_hash.bins[index], ase, hash_list_elem);
275 }
276 
277 /*
278  * dp_peer_ast_hash_remove() - Look up and remove AST entry from hash table
279  * @soc: SoC handle
280  *
281  * This function removes the AST entry from soc AST hash table
282  * It assumes caller has taken the ast lock to protect the access to this table
283  *
284  * Return: None
285  */
286 static inline void dp_peer_ast_hash_remove(struct dp_soc *soc,
287 		struct dp_ast_entry *ase)
288 {
289 	unsigned index;
290 	struct dp_ast_entry *tmpase;
291 	int found = 0;
292 
293 	index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
294 	/* Check if tail is not empty before delete*/
295 	QDF_ASSERT(!TAILQ_EMPTY(&soc->ast_hash.bins[index]));
296 
297 	TAILQ_FOREACH(tmpase, &soc->ast_hash.bins[index], hash_list_elem) {
298 		if (tmpase == ase) {
299 			found = 1;
300 			break;
301 		}
302 	}
303 
304 	QDF_ASSERT(found);
305 	TAILQ_REMOVE(&soc->ast_hash.bins[index], ase, hash_list_elem);
306 }
307 
308 /*
309  * dp_peer_ast_hash_find() - Find AST entry by MAC address
310  * @soc: SoC handle
311  *
312  * It assumes caller has taken the ast lock to protect the access to
313  * AST hash table
314  *
315  * Return: AST entry
316  */
317 struct dp_ast_entry *dp_peer_ast_hash_find(struct dp_soc *soc,
318 						uint8_t *ast_mac_addr)
319 {
320 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
321 	unsigned index;
322 	struct dp_ast_entry *ase;
323 
324 	qdf_mem_copy(&local_mac_addr_aligned.raw[0],
325 			ast_mac_addr, DP_MAC_ADDR_LEN);
326 	mac_addr = &local_mac_addr_aligned;
327 
328 	index = dp_peer_ast_hash_index(soc, mac_addr);
329 	TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
330 		if (dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr) == 0) {
331 			return ase;
332 		}
333 	}
334 
335 	return NULL;
336 }
337 
338 /*
339  * dp_peer_map_ast() - Map the ast entry with HW AST Index
340  * @soc: SoC handle
341  * @peer: peer to which ast node belongs
342  * @mac_addr: MAC address of ast node
343  * @hw_peer_id: HW AST Index returned by target in peer map event
344  * @vdev_id: vdev id for VAP to which the peer belongs to
345  *
346  * Return: None
347  */
348 static inline void dp_peer_map_ast(struct dp_soc *soc,
349 	struct dp_peer *peer, uint8_t *mac_addr, uint16_t hw_peer_id,
350 	uint8_t vdev_id)
351 {
352 	struct dp_ast_entry *ast_entry;
353 	enum cdp_txrx_ast_entry_type peer_type = CDP_TXRX_AST_TYPE_STATIC;
354 	bool ast_entry_found = FALSE;
355 
356 	if (!peer) {
357 		return;
358 	}
359 
360 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
361 		"%s: peer %pK ID %d vid %d mac %02x:%02x:%02x:%02x:%02x:%02x",
362 		__func__, peer, hw_peer_id, vdev_id, mac_addr[0],
363 		mac_addr[1], mac_addr[2], mac_addr[3],
364 		mac_addr[4], mac_addr[5]);
365 
366 	qdf_spin_lock_bh(&soc->ast_lock);
367 	TAILQ_FOREACH(ast_entry, &peer->ast_entry_list, ase_list_elem) {
368 		if (!(qdf_mem_cmp(mac_addr, ast_entry->mac_addr.raw,
369 				DP_MAC_ADDR_LEN))) {
370 			ast_entry->ast_idx = hw_peer_id;
371 			soc->ast_table[hw_peer_id] = ast_entry;
372 			ast_entry->is_active = TRUE;
373 			peer_type = ast_entry->type;
374 			ast_entry_found = TRUE;
375 		}
376 	}
377 
378 	if (ast_entry_found || (peer->vdev && peer->vdev->proxysta_vdev)) {
379 		if (soc->cdp_soc.ol_ops->peer_map_event) {
380 			soc->cdp_soc.ol_ops->peer_map_event(
381 			soc->ctrl_psoc, peer->peer_ids[0],
382 			hw_peer_id, vdev_id,
383 			mac_addr, peer_type);
384 		}
385 	} else {
386 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
387 			"AST entry not found");
388 	}
389 
390 	qdf_spin_unlock_bh(&soc->ast_lock);
391 	return;
392 }
393 
394 /*
395  * dp_peer_add_ast() - Allocate and add AST entry into peer list
396  * @soc: SoC handle
397  * @peer: peer to which ast node belongs
398  * @mac_addr: MAC address of ast node
399  * @is_self: Is this base AST entry with peer mac address
400  *
401  * This API is used by WDS source port learning function to
402  * add a new AST entry into peer AST list
403  *
404  * Return: 0 if new entry is allocated,
405  *        -1 if entry add failed
406  */
407 int dp_peer_add_ast(struct dp_soc *soc,
408 			struct dp_peer *peer,
409 			uint8_t *mac_addr,
410 			enum cdp_txrx_ast_entry_type type,
411 			uint32_t flags)
412 {
413 	struct dp_ast_entry *ast_entry;
414 	struct dp_vdev *vdev = peer->vdev;
415 	uint8_t next_node_mac[6];
416 	int  ret = -1;
417 
418 	if (!vdev) {
419 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
420 			FL("Peers vdev is NULL"));
421 		QDF_ASSERT(0);
422 		return ret;
423 	}
424 
425 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
426 		"%s: peer %pK mac %02x:%02x:%02x:%02x:%02x:%02x",
427 		__func__, peer, mac_addr[0], mac_addr[1], mac_addr[2],
428 		mac_addr[3], mac_addr[4], mac_addr[5]);
429 
430 	qdf_spin_lock_bh(&soc->ast_lock);
431 
432 	/* If AST entry already exists , just return from here */
433 	ast_entry = dp_peer_ast_hash_find(soc, mac_addr);
434 
435 	if (ast_entry) {
436 		if (ast_entry->type == CDP_TXRX_AST_TYPE_MEC) {
437 			ast_entry->is_active = TRUE;
438 			qdf_spin_unlock_bh(&soc->ast_lock);
439 			return 0;
440 		}
441 
442 		/*
443 		 * WAR for HK 1.x AST issue
444 		 * If an AST entry with same mac address already exists and is
445 		 * mapped to a different radio, and if the current radio is
446 		 * primary radio , delete the existing AST entry and return.
447 		 *
448 		 * New AST entry will be created again on next SA_invalid
449 		 * frame
450 		 */
451 		if ((ast_entry->pdev_id != vdev->pdev->pdev_id) &&
452 		    vdev->pdev->is_primary) {
453 			qdf_print("Deleting ast_pdev=%d pdev=%d addr=%pM\n",
454 				  ast_entry->pdev_id,
455 				  vdev->pdev->pdev_id, mac_addr);
456 			dp_peer_del_ast(soc, ast_entry);
457 		}
458 
459 		qdf_spin_unlock_bh(&soc->ast_lock);
460 		return 0;
461 	}
462 
463 	ast_entry = (struct dp_ast_entry *)
464 			qdf_mem_malloc(sizeof(struct dp_ast_entry));
465 
466 	if (!ast_entry) {
467 		qdf_spin_unlock_bh(&soc->ast_lock);
468 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
469 			FL("fail to allocate ast_entry"));
470 		QDF_ASSERT(0);
471 		return ret;
472 	}
473 
474 	qdf_mem_copy(&ast_entry->mac_addr.raw[0], mac_addr, DP_MAC_ADDR_LEN);
475 	ast_entry->peer = peer;
476 	ast_entry->pdev_id = vdev->pdev->pdev_id;
477 	ast_entry->vdev_id = vdev->vdev_id;
478 
479 	switch (type) {
480 	case CDP_TXRX_AST_TYPE_STATIC:
481 		peer->self_ast_entry = ast_entry;
482 		ast_entry->type = CDP_TXRX_AST_TYPE_STATIC;
483 		break;
484 	case CDP_TXRX_AST_TYPE_SELF:
485 		peer->self_ast_entry = ast_entry;
486 		ast_entry->type = CDP_TXRX_AST_TYPE_SELF;
487 		break;
488 	case CDP_TXRX_AST_TYPE_WDS:
489 		ast_entry->next_hop = 1;
490 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
491 		break;
492 	case CDP_TXRX_AST_TYPE_WDS_HM:
493 		ast_entry->next_hop = 1;
494 		ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM;
495 		break;
496 	case CDP_TXRX_AST_TYPE_MEC:
497 		ast_entry->next_hop = 1;
498 		ast_entry->type = CDP_TXRX_AST_TYPE_MEC;
499 		break;
500 	default:
501 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
502 			FL("Incorrect AST entry type"));
503 	}
504 
505 	ast_entry->is_active = TRUE;
506 	TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem);
507 	DP_STATS_INC(soc, ast.added, 1);
508 	dp_peer_ast_hash_add(soc, ast_entry);
509 	qdf_spin_unlock_bh(&soc->ast_lock);
510 
511 	if (ast_entry->type == CDP_TXRX_AST_TYPE_MEC)
512 		qdf_mem_copy(next_node_mac, peer->vdev->mac_addr.raw, 6);
513 	else
514 		qdf_mem_copy(next_node_mac, peer->mac_addr.raw, 6);
515 
516 	if ((ast_entry->type != CDP_TXRX_AST_TYPE_STATIC) &&
517 	    (ast_entry->type != CDP_TXRX_AST_TYPE_SELF)) {
518 		if (QDF_STATUS_SUCCESS ==
519 				soc->cdp_soc.ol_ops->peer_add_wds_entry(
520 				peer->vdev->osif_vdev,
521 				mac_addr,
522 				next_node_mac,
523 				flags))
524 			return 0;
525 	}
526 
527 	return ret;
528 }
529 
530 /*
531  * dp_peer_del_ast() - Delete and free AST entry
532  * @soc: SoC handle
533  * @ast_entry: AST entry of the node
534  *
535  * This function removes the AST entry from peer and soc tables
536  * It assumes caller has taken the ast lock to protect the access to these
537  * tables
538  *
539  * Return: None
540  */
541 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
542 {
543 	struct dp_peer *peer = ast_entry->peer;
544 
545 	if (ast_entry->next_hop)
546 		soc->cdp_soc.ol_ops->peer_del_wds_entry(peer->vdev->osif_vdev,
547 						ast_entry->mac_addr.raw);
548 
549 	soc->ast_table[ast_entry->ast_idx] = NULL;
550 	TAILQ_REMOVE(&peer->ast_entry_list, ast_entry, ase_list_elem);
551 
552 	if (ast_entry == peer->self_ast_entry)
553 		peer->self_ast_entry = NULL;
554 
555 	DP_STATS_INC(soc, ast.deleted, 1);
556 	dp_peer_ast_hash_remove(soc, ast_entry);
557 	qdf_mem_free(ast_entry);
558 }
559 
560 /*
561  * dp_peer_update_ast() - Delete and free AST entry
562  * @soc: SoC handle
563  * @peer: peer to which ast node belongs
564  * @ast_entry: AST entry of the node
565  * @flags: wds or hmwds
566  *
567  * This function update the AST entry to the roamed peer and soc tables
568  * It assumes caller has taken the ast lock to protect the access to these
569  * tables
570  *
571  * Return: 0 if ast entry is updated successfully
572  *         -1 failure
573  */
574 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
575 		       struct dp_ast_entry *ast_entry, uint32_t flags)
576 {
577 	int ret = -1;
578 	struct dp_peer *old_peer;
579 
580 	if ((ast_entry->type == CDP_TXRX_AST_TYPE_STATIC) ||
581 	    (ast_entry->type == CDP_TXRX_AST_TYPE_SELF))
582 		return 0;
583 
584 	old_peer = ast_entry->peer;
585 	TAILQ_REMOVE(&old_peer->ast_entry_list, ast_entry, ase_list_elem);
586 
587 	ast_entry->peer = peer;
588 	ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
589 	ast_entry->pdev_id = peer->vdev->pdev->pdev_id;
590 	ast_entry->vdev_id = peer->vdev->vdev_id;
591 	ast_entry->is_active = TRUE;
592 	TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem);
593 
594 	ret = soc->cdp_soc.ol_ops->peer_update_wds_entry(
595 				peer->vdev->osif_vdev,
596 				ast_entry->mac_addr.raw,
597 				peer->mac_addr.raw,
598 				flags);
599 
600 	return ret;
601 }
602 
603 /*
604  * dp_peer_ast_get_pdev_id() - get pdev_id from the ast entry
605  * @soc: SoC handle
606  * @ast_entry: AST entry of the node
607  *
608  * This function gets the pdev_id from the ast entry.
609  *
610  * Return: (uint8_t) pdev_id
611  */
612 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
613 				struct dp_ast_entry *ast_entry)
614 {
615 	return ast_entry->pdev_id;
616 }
617 
618 /*
619  * dp_peer_ast_get_next_hop() - get next_hop from the ast entry
620  * @soc: SoC handle
621  * @ast_entry: AST entry of the node
622  *
623  * This function gets the next hop from the ast entry.
624  *
625  * Return: (uint8_t) next_hop
626  */
627 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
628 				struct dp_ast_entry *ast_entry)
629 {
630 	return ast_entry->next_hop;
631 }
632 
633 /*
634  * dp_peer_ast_set_type() - set type from the ast entry
635  * @soc: SoC handle
636  * @ast_entry: AST entry of the node
637  *
638  * This function sets the type in the ast entry.
639  *
640  * Return:
641  */
642 void dp_peer_ast_set_type(struct dp_soc *soc,
643 				struct dp_ast_entry *ast_entry,
644 				enum cdp_txrx_ast_entry_type type)
645 {
646 	ast_entry->type = type;
647 }
648 
649 #else
650 int dp_peer_add_ast(struct dp_soc *soc, struct dp_peer *peer,
651 		uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type,
652 		uint32_t flags)
653 {
654 	return 1;
655 }
656 
657 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
658 {
659 }
660 
661 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
662 			struct dp_ast_entry *ast_entry, uint32_t flags)
663 {
664 	return 1;
665 }
666 
667 struct dp_ast_entry *dp_peer_ast_hash_find(struct dp_soc *soc,
668 						uint8_t *ast_mac_addr)
669 {
670 	return NULL;
671 }
672 
673 static int dp_peer_ast_hash_attach(struct dp_soc *soc)
674 {
675 	return 0;
676 }
677 
678 static inline void dp_peer_map_ast(struct dp_soc *soc,
679 	struct dp_peer *peer, uint8_t *mac_addr, uint16_t hw_peer_id,
680 	uint8_t vdev_id)
681 {
682 	return;
683 }
684 
685 static void dp_peer_ast_hash_detach(struct dp_soc *soc)
686 {
687 }
688 
689 void dp_peer_ast_set_type(struct dp_soc *soc,
690 				struct dp_ast_entry *ast_entry,
691 				enum cdp_txrx_ast_entry_type type)
692 {
693 }
694 
695 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
696 				struct dp_ast_entry *ast_entry)
697 {
698 	return 0xff;
699 }
700 
701 
702 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
703 				struct dp_ast_entry *ast_entry)
704 {
705 	return 0xff;
706 }
707 #endif
708 
709 struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc,
710 	uint8_t *peer_mac_addr, int mac_addr_is_aligned, uint8_t vdev_id)
711 {
712 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
713 	unsigned index;
714 	struct dp_peer *peer;
715 
716 	if (mac_addr_is_aligned) {
717 		mac_addr = (union dp_align_mac_addr *) peer_mac_addr;
718 	} else {
719 		qdf_mem_copy(
720 			&local_mac_addr_aligned.raw[0],
721 			peer_mac_addr, DP_MAC_ADDR_LEN);
722 		mac_addr = &local_mac_addr_aligned;
723 	}
724 	index = dp_peer_find_hash_index(soc, mac_addr);
725 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
726 	TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
727 #if ATH_SUPPORT_WRAP
728 		/* ProxySTA may have multiple BSS peer with same MAC address,
729 		 * modified find will take care of finding the correct BSS peer.
730 		 */
731 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
732 			((peer->vdev->vdev_id == vdev_id) ||
733 			 (vdev_id == DP_VDEV_ALL))) {
734 #else
735 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0) {
736 #endif
737 			/* found it - increment the ref count before releasing
738 			 * the lock
739 			 */
740 			qdf_atomic_inc(&peer->ref_cnt);
741 			qdf_spin_unlock_bh(&soc->peer_ref_mutex);
742 			return peer;
743 		}
744 	}
745 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
746 	return NULL; /* failure */
747 }
748 
749 void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer)
750 {
751 	unsigned index;
752 	struct dp_peer *tmppeer = NULL;
753 	int found = 0;
754 
755 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
756 	/* Check if tail is not empty before delete*/
757 	QDF_ASSERT(!TAILQ_EMPTY(&soc->peer_hash.bins[index]));
758 	/*
759 	 * DO NOT take the peer_ref_mutex lock here - it needs to be taken
760 	 * by the caller.
761 	 * The caller needs to hold the lock from the time the peer object's
762 	 * reference count is decremented and tested up through the time the
763 	 * reference to the peer object is removed from the hash table, by
764 	 * this function.
765 	 * Holding the lock only while removing the peer object reference
766 	 * from the hash table keeps the hash table consistent, but does not
767 	 * protect against a new HL tx context starting to use the peer object
768 	 * if it looks up the peer object from its MAC address just after the
769 	 * peer ref count is decremented to zero, but just before the peer
770 	 * object reference is removed from the hash table.
771 	 */
772 	 TAILQ_FOREACH(tmppeer, &soc->peer_hash.bins[index], hash_list_elem) {
773 		if (tmppeer == peer) {
774 			found = 1;
775 			break;
776 		}
777 	}
778 	QDF_ASSERT(found);
779 	TAILQ_REMOVE(&soc->peer_hash.bins[index], peer, hash_list_elem);
780 }
781 
782 void dp_peer_find_hash_erase(struct dp_soc *soc)
783 {
784 	int i;
785 
786 	/*
787 	 * Not really necessary to take peer_ref_mutex lock - by this point,
788 	 * it's known that the soc is no longer in use.
789 	 */
790 	for (i = 0; i <= soc->peer_hash.mask; i++) {
791 		if (!TAILQ_EMPTY(&soc->peer_hash.bins[i])) {
792 			struct dp_peer *peer, *peer_next;
793 
794 			/*
795 			 * TAILQ_FOREACH_SAFE must be used here to avoid any
796 			 * memory access violation after peer is freed
797 			 */
798 			TAILQ_FOREACH_SAFE(peer, &soc->peer_hash.bins[i],
799 				hash_list_elem, peer_next) {
800 				/*
801 				 * Don't remove the peer from the hash table -
802 				 * that would modify the list we are currently
803 				 * traversing, and it's not necessary anyway.
804 				 */
805 				/*
806 				 * Artificially adjust the peer's ref count to
807 				 * 1, so it will get deleted by
808 				 * dp_peer_unref_delete.
809 				 */
810 				/* set to zero */
811 				qdf_atomic_init(&peer->ref_cnt);
812 				/* incr to one */
813 				qdf_atomic_inc(&peer->ref_cnt);
814 				dp_peer_unref_delete(peer);
815 			}
816 		}
817 	}
818 }
819 
820 static void dp_peer_find_map_detach(struct dp_soc *soc)
821 {
822 	qdf_mem_free(soc->peer_id_to_obj_map);
823 }
824 
825 int dp_peer_find_attach(struct dp_soc *soc)
826 {
827 	if (dp_peer_find_map_attach(soc))
828 		return 1;
829 
830 	if (dp_peer_find_hash_attach(soc)) {
831 		dp_peer_find_map_detach(soc);
832 		return 1;
833 	}
834 
835 	if (dp_peer_ast_hash_attach(soc)) {
836 		dp_peer_find_hash_detach(soc);
837 		dp_peer_find_map_detach(soc);
838 		return 1;
839 	}
840 	return 0; /* success */
841 }
842 
843 void dp_rx_tid_stats_cb(struct dp_soc *soc, void *cb_ctxt,
844 	union hal_reo_status *reo_status)
845 {
846 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
847 	struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
848 
849 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
850 		DP_TRACE_STATS(FATAL, "REO stats failure %d for TID %d\n",
851 			queue_status->header.status, rx_tid->tid);
852 		return;
853 	}
854 
855 	DP_TRACE_STATS(FATAL, "REO queue stats (TID: %d): \n"
856 		"ssn: %d\n"
857 		"curr_idx  : %d\n"
858 		"pn_31_0   : %08x\n"
859 		"pn_63_32  : %08x\n"
860 		"pn_95_64  : %08x\n"
861 		"pn_127_96 : %08x\n"
862 		"last_rx_enq_tstamp : %08x\n"
863 		"last_rx_deq_tstamp : %08x\n"
864 		"rx_bitmap_31_0     : %08x\n"
865 		"rx_bitmap_63_32    : %08x\n"
866 		"rx_bitmap_95_64    : %08x\n"
867 		"rx_bitmap_127_96   : %08x\n"
868 		"rx_bitmap_159_128  : %08x\n"
869 		"rx_bitmap_191_160  : %08x\n"
870 		"rx_bitmap_223_192  : %08x\n"
871 		"rx_bitmap_255_224  : %08x\n",
872 		rx_tid->tid,
873 		queue_status->ssn, queue_status->curr_idx,
874 		queue_status->pn_31_0, queue_status->pn_63_32,
875 		queue_status->pn_95_64, queue_status->pn_127_96,
876 		queue_status->last_rx_enq_tstamp,
877 		queue_status->last_rx_deq_tstamp,
878 		queue_status->rx_bitmap_31_0, queue_status->rx_bitmap_63_32,
879 		queue_status->rx_bitmap_95_64, queue_status->rx_bitmap_127_96,
880 		queue_status->rx_bitmap_159_128,
881 		queue_status->rx_bitmap_191_160,
882 		queue_status->rx_bitmap_223_192,
883 		queue_status->rx_bitmap_255_224);
884 
885 	DP_TRACE_STATS(FATAL,
886 		"curr_mpdu_cnt      : %d\n"
887 		"curr_msdu_cnt      : %d\n"
888 		"fwd_timeout_cnt    : %d\n"
889 		"fwd_bar_cnt        : %d\n"
890 		"dup_cnt            : %d\n"
891 		"frms_in_order_cnt  : %d\n"
892 		"bar_rcvd_cnt       : %d\n"
893 		"mpdu_frms_cnt      : %d\n"
894 		"msdu_frms_cnt      : %d\n"
895 		"total_byte_cnt     : %d\n"
896 		"late_recv_mpdu_cnt : %d\n"
897 		"win_jump_2k 	    : %d\n"
898 		"hole_cnt 	    : %d\n",
899 		queue_status->curr_mpdu_cnt, queue_status->curr_msdu_cnt,
900 		queue_status->fwd_timeout_cnt, queue_status->fwd_bar_cnt,
901 		queue_status->dup_cnt, queue_status->frms_in_order_cnt,
902 		queue_status->bar_rcvd_cnt, queue_status->mpdu_frms_cnt,
903 		queue_status->msdu_frms_cnt, queue_status->total_cnt,
904 		queue_status->late_recv_mpdu_cnt, queue_status->win_jump_2k,
905 		queue_status->hole_cnt);
906 
907 	DP_PRINT_STATS("Addba Req          : %d\n"
908 			"Addba Resp         : %d\n"
909 			"Addba Resp success : %d\n"
910 			"Addba Resp failed  : %d\n"
911 			"Delba Req received : %d\n"
912 			"Delba Tx success   : %d\n"
913 			"Delba Tx Fail      : %d\n"
914 			"BA window size     : %d\n"
915 			"Pn size            : %d\n",
916 			rx_tid->num_of_addba_req,
917 			rx_tid->num_of_addba_resp,
918 			rx_tid->num_addba_rsp_success,
919 			rx_tid->num_addba_rsp_failed,
920 			rx_tid->num_of_delba_req,
921 			rx_tid->delba_tx_success_cnt,
922 			rx_tid->delba_tx_fail_cnt,
923 			rx_tid->ba_win_size,
924 			rx_tid->pn_size);
925 }
926 
927 static inline struct dp_peer *dp_peer_find_add_id(struct dp_soc *soc,
928 	uint8_t *peer_mac_addr, uint16_t peer_id, uint16_t hw_peer_id,
929 	uint8_t vdev_id)
930 {
931 	struct dp_peer *peer;
932 
933 	QDF_ASSERT(peer_id <= soc->max_peers);
934 	/* check if there's already a peer object with this MAC address */
935 	peer = dp_peer_find_hash_find(soc, peer_mac_addr,
936 		0 /* is aligned */, vdev_id);
937 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
938 		"%s: peer %pK ID %d vid %d mac %02x:%02x:%02x:%02x:%02x:%02x",
939 		__func__, peer, peer_id, vdev_id, peer_mac_addr[0],
940 		peer_mac_addr[1], peer_mac_addr[2], peer_mac_addr[3],
941 		peer_mac_addr[4], peer_mac_addr[5]);
942 
943 	if (peer) {
944 		/* peer's ref count was already incremented by
945 		 * peer_find_hash_find
946 		 */
947 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
948 			  "%s: ref_cnt: %d", __func__,
949 			   qdf_atomic_read(&peer->ref_cnt));
950 		soc->peer_id_to_obj_map[peer_id] = peer;
951 
952 		if (dp_peer_find_add_id_to_obj(peer, peer_id)) {
953 			/* TBDXXX: assert for now */
954 			QDF_ASSERT(0);
955 		}
956 
957 		return peer;
958 	}
959 
960 	return NULL;
961 }
962 
963 /**
964  * dp_rx_peer_map_handler() - handle peer map event from firmware
965  * @soc_handle - genereic soc handle
966  * @peeri_id - peer_id from firmware
967  * @hw_peer_id - ast index for this peer
968  * vdev_id - vdev ID
969  * peer_mac_addr - macc assress of the peer
970  *
971  * associate the peer_id that firmware provided with peer entry
972  * and update the ast table in the host with the hw_peer_id.
973  *
974  * Return: none
975  */
976 
977 void
978 dp_rx_peer_map_handler(void *soc_handle, uint16_t peer_id, uint16_t hw_peer_id,
979 			uint8_t vdev_id, uint8_t *peer_mac_addr)
980 {
981 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
982 	struct dp_peer *peer = NULL;
983 
984 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
985 		"peer_map_event (soc:%pK): peer_id %di, hw_peer_id %d, peer_mac "
986 		"%02x:%02x:%02x:%02x:%02x:%02x, vdev_id %d", soc, peer_id,
987 		hw_peer_id, peer_mac_addr[0], peer_mac_addr[1],
988 		peer_mac_addr[2], peer_mac_addr[3], peer_mac_addr[4],
989 		peer_mac_addr[5], vdev_id);
990 
991 	peer = soc->peer_id_to_obj_map[peer_id];
992 
993 	if ((hw_peer_id < 0) || (hw_peer_id > (WLAN_UMAC_PSOC_MAX_PEERS * 2))) {
994 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
995 			"invalid hw_peer_id: %d", hw_peer_id);
996 		qdf_assert_always(0);
997 	}
998 
999 	/*
1000 	 * check if peer already exists for this peer_id, if so
1001 	 * this peer map event is in response for a wds peer add
1002 	 * wmi command sent during wds source port learning.
1003 	 * in this case just add the ast entry to the existing
1004 	 * peer ast_list.
1005 	 */
1006 	if (!peer)
1007 		peer = dp_peer_find_add_id(soc, peer_mac_addr, peer_id,
1008 					hw_peer_id, vdev_id);
1009 
1010 	if (peer) {
1011 		qdf_assert_always(peer->vdev);
1012 		/*
1013 		 * For every peer MAp message search and set if bss_peer
1014 		 */
1015 		if (!(qdf_mem_cmp(peer->mac_addr.raw, peer->vdev->mac_addr.raw,
1016 				 DP_MAC_ADDR_LEN))) {
1017 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1018 				"vdev bss_peer!!!!");
1019 			peer->bss_peer = 1;
1020 			peer->vdev->vap_bss_peer = peer;
1021 		}
1022 	}
1023 
1024 	dp_peer_map_ast(soc, peer, peer_mac_addr,
1025 			hw_peer_id, vdev_id);
1026 }
1027 
1028 void
1029 dp_rx_peer_unmap_handler(void *soc_handle, uint16_t peer_id)
1030 {
1031 	struct dp_peer *peer;
1032 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
1033 	uint8_t i;
1034 
1035 	peer = __dp_peer_find_by_id(soc, peer_id);
1036 
1037 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
1038 		"peer_unmap_event (soc:%pK) peer_id %d peer %pK",
1039 		soc, peer_id, peer);
1040 
1041 	/*
1042 	 * Currently peer IDs are assigned for vdevs as well as peers.
1043 	 * If the peer ID is for a vdev, then the peer pointer stored
1044 	 * in peer_id_to_obj_map will be NULL.
1045 	 */
1046 	if (!peer) {
1047 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1048 			"%s: Received unmap event for invalid peer_id"
1049 			" %u", __func__, peer_id);
1050 		return;
1051 	}
1052 
1053 	soc->peer_id_to_obj_map[peer_id] = NULL;
1054 	for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
1055 		if (peer->peer_ids[i] == peer_id) {
1056 			peer->peer_ids[i] = HTT_INVALID_PEER;
1057 			break;
1058 		}
1059 	}
1060 
1061 	if (soc->cdp_soc.ol_ops->peer_unmap_event) {
1062 		soc->cdp_soc.ol_ops->peer_unmap_event(soc->ctrl_psoc,
1063 				peer_id);
1064 	}
1065 
1066 	/*
1067 	 * Remove a reference to the peer.
1068 	 * If there are no more references, delete the peer object.
1069 	 */
1070 	dp_peer_unref_delete(peer);
1071 }
1072 
1073 void
1074 dp_peer_find_detach(struct dp_soc *soc)
1075 {
1076 	dp_peer_find_map_detach(soc);
1077 	dp_peer_find_hash_detach(soc);
1078 	dp_peer_ast_hash_detach(soc);
1079 }
1080 
1081 static void dp_rx_tid_update_cb(struct dp_soc *soc, void *cb_ctxt,
1082 	union hal_reo_status *reo_status)
1083 {
1084 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
1085 
1086 	if ((reo_status->rx_queue_status.header.status !=
1087 		HAL_REO_CMD_SUCCESS) &&
1088 		(reo_status->rx_queue_status.header.status !=
1089 		HAL_REO_CMD_DRAIN)) {
1090 		/* Should not happen normally. Just print error for now */
1091 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1092 			"%s: Rx tid HW desc update failed(%d): tid %d",
1093 			__func__,
1094 			reo_status->rx_queue_status.header.status,
1095 			rx_tid->tid);
1096 	}
1097 }
1098 
1099 /*
1100  * dp_find_peer_by_addr - find peer instance by mac address
1101  * @dev: physical device instance
1102  * @peer_mac_addr: peer mac address
1103  * @local_id: local id for the peer
1104  *
1105  * Return: peer instance pointer
1106  */
1107 void *dp_find_peer_by_addr(struct cdp_pdev *dev, uint8_t *peer_mac_addr,
1108 		uint8_t *local_id)
1109 {
1110 	struct dp_pdev *pdev = (struct dp_pdev *)dev;
1111 	struct dp_peer *peer;
1112 
1113 	peer = dp_peer_find_hash_find(pdev->soc, peer_mac_addr, 0, DP_VDEV_ALL);
1114 
1115 	if (!peer)
1116 		return NULL;
1117 
1118 	/* Multiple peer ids? How can know peer id? */
1119 	*local_id = peer->local_id;
1120 	DP_TRACE(INFO, "%s: peer %pK id %d", __func__, peer, *local_id);
1121 
1122 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
1123 	 * Decrement it here.
1124 	 */
1125 	qdf_atomic_dec(&peer->ref_cnt);
1126 
1127 	return peer;
1128 }
1129 
1130 /*
1131  * dp_rx_tid_update_wifi3() – Update receive TID state
1132  * @peer: Datapath peer handle
1133  * @tid: TID
1134  * @ba_window_size: BlockAck window size
1135  * @start_seq: Starting sequence number
1136  *
1137  * Return: 0 on success, error code on failure
1138  */
1139 static int dp_rx_tid_update_wifi3(struct dp_peer *peer, int tid, uint32_t
1140 				  ba_window_size, uint32_t start_seq)
1141 {
1142 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1143 	struct dp_soc *soc = peer->vdev->pdev->soc;
1144 	struct hal_reo_cmd_params params;
1145 
1146 	qdf_mem_zero(&params, sizeof(params));
1147 
1148 	params.std.need_status = 1;
1149 	params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
1150 	params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1151 	params.u.upd_queue_params.update_ba_window_size = 1;
1152 	params.u.upd_queue_params.ba_window_size = ba_window_size;
1153 
1154 	if (start_seq < IEEE80211_SEQ_MAX) {
1155 		params.u.upd_queue_params.update_ssn = 1;
1156 		params.u.upd_queue_params.ssn = start_seq;
1157 	}
1158 
1159 	dp_set_ssn_valid_flag(&params, 0);
1160 
1161 	dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params, dp_rx_tid_update_cb, rx_tid);
1162 
1163 	rx_tid->ba_win_size = ba_window_size;
1164 	if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
1165 		soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup(
1166 			peer->vdev->pdev->ctrl_pdev,
1167 			peer->vdev->vdev_id, peer->mac_addr.raw,
1168 			rx_tid->hw_qdesc_paddr, tid, tid, 1, ba_window_size);
1169 
1170 	}
1171 	return 0;
1172 }
1173 
1174 /*
1175  * dp_reo_desc_free() - Callback free reo descriptor memory after
1176  * HW cache flush
1177  *
1178  * @soc: DP SOC handle
1179  * @cb_ctxt: Callback context
1180  * @reo_status: REO command status
1181  */
1182 static void dp_reo_desc_free(struct dp_soc *soc, void *cb_ctxt,
1183 	union hal_reo_status *reo_status)
1184 {
1185 	struct reo_desc_list_node *freedesc =
1186 		(struct reo_desc_list_node *)cb_ctxt;
1187 	struct dp_rx_tid *rx_tid = &freedesc->rx_tid;
1188 
1189 	if ((reo_status->fl_cache_status.header.status !=
1190 		HAL_REO_CMD_SUCCESS) &&
1191 		(reo_status->fl_cache_status.header.status !=
1192 		HAL_REO_CMD_DRAIN)) {
1193 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1194 			"%s: Rx tid HW desc flush failed(%d): tid %d",
1195 			__func__,
1196 			reo_status->rx_queue_status.header.status,
1197 			freedesc->rx_tid.tid);
1198 	}
1199 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1200 		"%s: hw_qdesc_paddr: %pK, tid:%d", __func__,
1201 		(void *)(rx_tid->hw_qdesc_paddr), rx_tid->tid);
1202 	qdf_mem_unmap_nbytes_single(soc->osdev,
1203 		rx_tid->hw_qdesc_paddr,
1204 		QDF_DMA_BIDIRECTIONAL,
1205 		rx_tid->hw_qdesc_alloc_size);
1206 	qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
1207 	qdf_mem_free(freedesc);
1208 }
1209 
1210 #if defined(QCA_WIFI_QCA8074) && defined(BUILD_X86)
1211 /* Hawkeye emulation requires bus address to be >= 0x50000000 */
1212 static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
1213 {
1214 	if (dma_addr < 0x50000000)
1215 		return QDF_STATUS_E_FAILURE;
1216 	else
1217 		return QDF_STATUS_SUCCESS;
1218 }
1219 #else
1220 static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
1221 {
1222 	return QDF_STATUS_SUCCESS;
1223 }
1224 #endif
1225 
1226 
1227 /*
1228  * dp_rx_tid_setup_wifi3() – Setup receive TID state
1229  * @peer: Datapath peer handle
1230  * @tid: TID
1231  * @ba_window_size: BlockAck window size
1232  * @start_seq: Starting sequence number
1233  *
1234  * Return: 0 on success, error code on failure
1235  */
1236 int dp_rx_tid_setup_wifi3(struct dp_peer *peer, int tid,
1237 	uint32_t ba_window_size, uint32_t start_seq)
1238 {
1239 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1240 	struct dp_vdev *vdev = peer->vdev;
1241 	struct dp_soc *soc = vdev->pdev->soc;
1242 	uint32_t hw_qdesc_size;
1243 	uint32_t hw_qdesc_align;
1244 	int hal_pn_type;
1245 	void *hw_qdesc_vaddr;
1246 	uint32_t alloc_tries = 0;
1247 
1248 	if (peer->delete_in_progress)
1249 		return QDF_STATUS_E_FAILURE;
1250 
1251 	rx_tid->ba_win_size = ba_window_size;
1252 	if (rx_tid->hw_qdesc_vaddr_unaligned != NULL)
1253 		return dp_rx_tid_update_wifi3(peer, tid, ba_window_size,
1254 			start_seq);
1255 	rx_tid->delba_tx_status = 0;
1256 	rx_tid->ppdu_id_2k = 0;
1257 	rx_tid->num_of_addba_req = 0;
1258 	rx_tid->num_of_delba_req = 0;
1259 	rx_tid->num_of_addba_resp = 0;
1260 	rx_tid->num_addba_rsp_failed = 0;
1261 	rx_tid->num_addba_rsp_success = 0;
1262 	rx_tid->delba_tx_success_cnt = 0;
1263 	rx_tid->delba_tx_fail_cnt = 0;
1264 	rx_tid->statuscode = 0;
1265 #ifdef notyet
1266 	hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc, ba_window_size);
1267 #else
1268 	/* TODO: Allocating HW queue descriptors based on max BA window size
1269 	 * for all QOS TIDs so that same descriptor can be used later when
1270 	 * ADDBA request is recevied. This should be changed to allocate HW
1271 	 * queue descriptors based on BA window size being negotiated (0 for
1272 	 * non BA cases), and reallocate when BA window size changes and also
1273 	 * send WMI message to FW to change the REO queue descriptor in Rx
1274 	 * peer entry as part of dp_rx_tid_update.
1275 	 */
1276 	if (tid != DP_NON_QOS_TID)
1277 		hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc,
1278 			HAL_RX_MAX_BA_WINDOW);
1279 	else
1280 		hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc,
1281 			ba_window_size);
1282 #endif
1283 
1284 	hw_qdesc_align = hal_get_reo_qdesc_align(soc->hal_soc);
1285 	/* To avoid unnecessary extra allocation for alignment, try allocating
1286 	 * exact size and see if we already have aligned address.
1287 	 */
1288 	rx_tid->hw_qdesc_alloc_size = hw_qdesc_size;
1289 
1290 try_desc_alloc:
1291 	rx_tid->hw_qdesc_vaddr_unaligned =
1292 		qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size);
1293 
1294 	if (!rx_tid->hw_qdesc_vaddr_unaligned) {
1295 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1296 			"%s: Rx tid HW desc alloc failed: tid %d",
1297 			__func__, tid);
1298 		return QDF_STATUS_E_NOMEM;
1299 	}
1300 
1301 	if ((unsigned long)(rx_tid->hw_qdesc_vaddr_unaligned) %
1302 		hw_qdesc_align) {
1303 		/* Address allocated above is not alinged. Allocate extra
1304 		 * memory for alignment
1305 		 */
1306 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
1307 		rx_tid->hw_qdesc_vaddr_unaligned =
1308 			qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size +
1309 					hw_qdesc_align - 1);
1310 
1311 		if (!rx_tid->hw_qdesc_vaddr_unaligned) {
1312 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1313 				"%s: Rx tid HW desc alloc failed: tid %d",
1314 				__func__, tid);
1315 			return QDF_STATUS_E_NOMEM;
1316 		}
1317 
1318 		hw_qdesc_vaddr = (void *)qdf_align((unsigned long)
1319 			rx_tid->hw_qdesc_vaddr_unaligned,
1320 			hw_qdesc_align);
1321 
1322 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1323 			"%s: Total Size %d Aligned Addr %pK",
1324 			__func__, rx_tid->hw_qdesc_alloc_size,
1325 			hw_qdesc_vaddr);
1326 
1327 	} else {
1328 		hw_qdesc_vaddr = rx_tid->hw_qdesc_vaddr_unaligned;
1329 	}
1330 
1331 	/* TODO: Ensure that sec_type is set before ADDBA is received.
1332 	 * Currently this is set based on htt indication
1333 	 * HTT_T2H_MSG_TYPE_SEC_IND from target
1334 	 */
1335 	switch (peer->security[dp_sec_ucast].sec_type) {
1336 	case cdp_sec_type_tkip_nomic:
1337 	case cdp_sec_type_aes_ccmp:
1338 	case cdp_sec_type_aes_ccmp_256:
1339 	case cdp_sec_type_aes_gcmp:
1340 	case cdp_sec_type_aes_gcmp_256:
1341 		hal_pn_type = HAL_PN_WPA;
1342 		break;
1343 	case cdp_sec_type_wapi:
1344 		if (vdev->opmode == wlan_op_mode_ap)
1345 			hal_pn_type = HAL_PN_WAPI_EVEN;
1346 		else
1347 			hal_pn_type = HAL_PN_WAPI_UNEVEN;
1348 		break;
1349 	default:
1350 		hal_pn_type = HAL_PN_NONE;
1351 		break;
1352 	}
1353 
1354 	hal_reo_qdesc_setup(soc->hal_soc, tid, ba_window_size, start_seq,
1355 		hw_qdesc_vaddr, rx_tid->hw_qdesc_paddr, hal_pn_type);
1356 
1357 	qdf_mem_map_nbytes_single(soc->osdev, hw_qdesc_vaddr,
1358 		QDF_DMA_BIDIRECTIONAL, rx_tid->hw_qdesc_alloc_size,
1359 		&(rx_tid->hw_qdesc_paddr));
1360 
1361 	if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) !=
1362 			QDF_STATUS_SUCCESS) {
1363 		if (alloc_tries++ < 10)
1364 			goto try_desc_alloc;
1365 		else {
1366 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1367 			"%s: Rx tid HW desc alloc failed (lowmem): tid %d",
1368 			__func__, tid);
1369 			return QDF_STATUS_E_NOMEM;
1370 		}
1371 	}
1372 
1373 	if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
1374 		soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup(
1375 			vdev->pdev->ctrl_pdev,
1376 			peer->vdev->vdev_id, peer->mac_addr.raw,
1377 			rx_tid->hw_qdesc_paddr, tid, tid, 1, ba_window_size);
1378 
1379 	}
1380 	return 0;
1381 }
1382 
1383 /*
1384  * dp_rx_tid_delete_cb() - Callback to flush reo descriptor HW cache
1385  * after deleting the entries (ie., setting valid=0)
1386  *
1387  * @soc: DP SOC handle
1388  * @cb_ctxt: Callback context
1389  * @reo_status: REO command status
1390  */
1391 static void dp_rx_tid_delete_cb(struct dp_soc *soc, void *cb_ctxt,
1392 	union hal_reo_status *reo_status)
1393 {
1394 	struct reo_desc_list_node *freedesc =
1395 		(struct reo_desc_list_node *)cb_ctxt;
1396 	uint32_t list_size;
1397 	struct reo_desc_list_node *desc;
1398 	unsigned long curr_ts = qdf_get_system_timestamp();
1399 	uint32_t desc_size, tot_desc_size;
1400 	struct hal_reo_cmd_params params;
1401 
1402 	if (reo_status->rx_queue_status.header.status == HAL_REO_CMD_DRAIN) {
1403 		qdf_mem_zero(reo_status, sizeof(*reo_status));
1404 		reo_status->fl_cache_status.header.status = HAL_REO_CMD_DRAIN;
1405 		dp_reo_desc_free(soc, (void *)freedesc, reo_status);
1406 		return;
1407 	} else if (reo_status->rx_queue_status.header.status !=
1408 		HAL_REO_CMD_SUCCESS) {
1409 		/* Should not happen normally. Just print error for now */
1410 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1411 			"%s: Rx tid HW desc deletion failed(%d): tid %d",
1412 			__func__,
1413 			reo_status->rx_queue_status.header.status,
1414 			freedesc->rx_tid.tid);
1415 	}
1416 
1417 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
1418 		"%s: rx_tid: %d status: %d", __func__,
1419 		freedesc->rx_tid.tid,
1420 		reo_status->rx_queue_status.header.status);
1421 
1422 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
1423 	freedesc->free_ts = curr_ts;
1424 	qdf_list_insert_back_size(&soc->reo_desc_freelist,
1425 		(qdf_list_node_t *)freedesc, &list_size);
1426 
1427 	while ((qdf_list_peek_front(&soc->reo_desc_freelist,
1428 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) &&
1429 		((list_size >= REO_DESC_FREELIST_SIZE) ||
1430 		((curr_ts - desc->free_ts) > REO_DESC_FREE_DEFER_MS))) {
1431 		struct dp_rx_tid *rx_tid;
1432 
1433 		qdf_list_remove_front(&soc->reo_desc_freelist,
1434 				(qdf_list_node_t **)&desc);
1435 		list_size--;
1436 		rx_tid = &desc->rx_tid;
1437 
1438 		/* Flush and invalidate REO descriptor from HW cache: Base and
1439 		 * extension descriptors should be flushed separately */
1440 		tot_desc_size = hal_get_reo_qdesc_size(soc->hal_soc,
1441 			rx_tid->ba_win_size);
1442 		desc_size = hal_get_reo_qdesc_size(soc->hal_soc, 0);
1443 
1444 		/* Flush reo extension descriptors */
1445 		while ((tot_desc_size -= desc_size) > 0) {
1446 			qdf_mem_zero(&params, sizeof(params));
1447 			params.std.addr_lo =
1448 				((uint64_t)(rx_tid->hw_qdesc_paddr) +
1449 				tot_desc_size) & 0xffffffff;
1450 			params.std.addr_hi =
1451 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1452 
1453 			if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc,
1454 							CMD_FLUSH_CACHE,
1455 							&params,
1456 							NULL,
1457 							NULL)) {
1458 				QDF_TRACE(QDF_MODULE_ID_DP,
1459 					QDF_TRACE_LEVEL_ERROR,
1460 					"%s: fail to send CMD_CACHE_FLUSH:"
1461 					"tid %d desc %pK", __func__,
1462 					rx_tid->tid,
1463 					(void *)(rx_tid->hw_qdesc_paddr));
1464 			}
1465 		}
1466 
1467 		/* Flush base descriptor */
1468 		qdf_mem_zero(&params, sizeof(params));
1469 		params.std.need_status = 1;
1470 		params.std.addr_lo =
1471 			(uint64_t)(rx_tid->hw_qdesc_paddr) & 0xffffffff;
1472 		params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1473 
1474 		if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc,
1475 							  CMD_FLUSH_CACHE,
1476 							  &params,
1477 							  dp_reo_desc_free,
1478 							  (void *)desc)) {
1479 			union hal_reo_status reo_status;
1480 			/*
1481 			 * If dp_reo_send_cmd return failure, related TID queue desc
1482 			 * should be unmapped. Also locally reo_desc, together with
1483 			 * TID queue desc also need to be freed accordingly.
1484 			 *
1485 			 * Here invoke desc_free function directly to do clean up.
1486 			 */
1487 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1488 				"%s: fail to send REO cmd to flush cache: tid %d",
1489 				__func__, rx_tid->tid);
1490 			qdf_mem_zero(&reo_status, sizeof(reo_status));
1491 			reo_status.fl_cache_status.header.status = 0;
1492 			dp_reo_desc_free(soc, (void *)desc, &reo_status);
1493 		}
1494 	}
1495 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
1496 }
1497 
1498 /*
1499  * dp_rx_tid_delete_wifi3() – Delete receive TID queue
1500  * @peer: Datapath peer handle
1501  * @tid: TID
1502  *
1503  * Return: 0 on success, error code on failure
1504  */
1505 static int dp_rx_tid_delete_wifi3(struct dp_peer *peer, int tid)
1506 {
1507 	struct dp_rx_tid *rx_tid = &(peer->rx_tid[tid]);
1508 	struct dp_soc *soc = peer->vdev->pdev->soc;
1509 	struct hal_reo_cmd_params params;
1510 	struct reo_desc_list_node *freedesc =
1511 		qdf_mem_malloc(sizeof(*freedesc));
1512 
1513 	if (!freedesc) {
1514 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1515 			"%s: malloc failed for freedesc: tid %d",
1516 			__func__, tid);
1517 		return -ENOMEM;
1518 	}
1519 
1520 	freedesc->rx_tid = *rx_tid;
1521 
1522 	qdf_mem_zero(&params, sizeof(params));
1523 
1524 	params.std.need_status = 1;
1525 	params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
1526 	params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1527 	params.u.upd_queue_params.update_vld = 1;
1528 	params.u.upd_queue_params.vld = 0;
1529 
1530 	dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
1531 		dp_rx_tid_delete_cb, (void *)freedesc);
1532 
1533 	rx_tid->hw_qdesc_vaddr_unaligned = NULL;
1534 	rx_tid->hw_qdesc_alloc_size = 0;
1535 	rx_tid->hw_qdesc_paddr = 0;
1536 
1537 	return 0;
1538 }
1539 
1540 #ifdef DP_LFR
1541 static void dp_peer_setup_remaining_tids(struct dp_peer *peer)
1542 {
1543 	int tid;
1544 
1545 	for (tid = 1; tid < DP_MAX_TIDS-1; tid++) {
1546 		dp_rx_tid_setup_wifi3(peer, tid, 1, 0);
1547 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1548 			"Setting up TID %d for peer %pK peer->local_id %d",
1549 			tid, peer, peer->local_id);
1550 	}
1551 }
1552 #else
1553 static void dp_peer_setup_remaining_tids(struct dp_peer *peer) {};
1554 #endif
1555 /*
1556  * dp_peer_rx_init() – Initialize receive TID state
1557  * @pdev: Datapath pdev
1558  * @peer: Datapath peer
1559  *
1560  */
1561 void dp_peer_rx_init(struct dp_pdev *pdev, struct dp_peer *peer)
1562 {
1563 	int tid;
1564 	struct dp_rx_tid *rx_tid;
1565 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
1566 		rx_tid = &peer->rx_tid[tid];
1567 		rx_tid->array = &rx_tid->base;
1568 		rx_tid->base.head = rx_tid->base.tail = NULL;
1569 		rx_tid->tid = tid;
1570 		rx_tid->defrag_timeout_ms = 0;
1571 		rx_tid->ba_win_size = 0;
1572 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
1573 
1574 		rx_tid->defrag_waitlist_elem.tqe_next = NULL;
1575 		rx_tid->defrag_waitlist_elem.tqe_prev = NULL;
1576 
1577 #ifdef notyet /* TODO: See if this is required for exception handling */
1578 		/* invalid sequence number */
1579 		peer->tids_last_seq[tid] = 0xffff;
1580 #endif
1581 	}
1582 
1583 	peer->active_ba_session_cnt = 0;
1584 	peer->hw_buffer_size = 0;
1585 	peer->kill_256_sessions = 0;
1586 
1587 	/* Setup default (non-qos) rx tid queue */
1588 	dp_rx_tid_setup_wifi3(peer, DP_NON_QOS_TID, 1, 0);
1589 
1590 	/* Setup rx tid queue for TID 0.
1591 	 * Other queues will be setup on receiving first packet, which will cause
1592 	 * NULL REO queue error
1593 	 */
1594 	dp_rx_tid_setup_wifi3(peer, 0, 1, 0);
1595 
1596 	/*
1597 	 * Setup the rest of TID's to handle LFR
1598 	 */
1599 	dp_peer_setup_remaining_tids(peer);
1600 
1601 	/*
1602 	 * Set security defaults: no PN check, no security. The target may
1603 	 * send a HTT SEC_IND message to overwrite these defaults.
1604 	 */
1605 	peer->security[dp_sec_ucast].sec_type =
1606 		peer->security[dp_sec_mcast].sec_type = cdp_sec_type_none;
1607 }
1608 
1609 /*
1610  * dp_peer_rx_cleanup() – Cleanup receive TID state
1611  * @vdev: Datapath vdev
1612  * @peer: Datapath peer
1613  *
1614  */
1615 void dp_peer_rx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
1616 {
1617 	int tid;
1618 	uint32_t tid_delete_mask = 0;
1619 
1620 	DP_TRACE(INFO_HIGH, FL("Remove tids for peer: %pK"), peer);
1621 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
1622 		struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1623 
1624 		qdf_spin_lock_bh(&rx_tid->tid_lock);
1625 		if (peer->rx_tid[tid].hw_qdesc_vaddr_unaligned != NULL) {
1626 			dp_rx_tid_delete_wifi3(peer, tid);
1627 
1628 			/* Cleanup defrag related resource */
1629 			dp_rx_defrag_waitlist_remove(peer, tid);
1630 			dp_rx_reorder_flush_frag(peer, tid);
1631 
1632 			tid_delete_mask |= (1 << tid);
1633 		}
1634 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
1635 	}
1636 #ifdef notyet /* See if FW can remove queues as part of peer cleanup */
1637 	if (soc->ol_ops->peer_rx_reorder_queue_remove) {
1638 		soc->ol_ops->peer_rx_reorder_queue_remove(vdev->pdev->ctrl_pdev,
1639 			peer->vdev->vdev_id, peer->mac_addr.raw,
1640 			tid_delete_mask);
1641 	}
1642 #endif
1643 	for (tid = 0; tid < DP_MAX_TIDS; tid++)
1644 		qdf_spinlock_destroy(&peer->rx_tid[tid].tid_lock);
1645 }
1646 
1647 /*
1648  * dp_peer_cleanup() – Cleanup peer information
1649  * @vdev: Datapath vdev
1650  * @peer: Datapath peer
1651  *
1652  */
1653 void dp_peer_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
1654 {
1655 	peer->last_assoc_rcvd = 0;
1656 	peer->last_disassoc_rcvd = 0;
1657 	peer->last_deauth_rcvd = 0;
1658 
1659 	/* cleanup the Rx reorder queues for this peer */
1660 	dp_peer_rx_cleanup(vdev, peer);
1661 }
1662 
1663 /* dp_teardown_256_ba_session() - Teardown sessions using 256
1664  *                                window size when a request with
1665  *                                64 window size is received.
1666  *                                This is done as a WAR since HW can
1667  *                                have only one setting per peer (64 or 256).
1668  * @peer: Datapath peer
1669  *
1670  * Return: void
1671  */
1672 static void dp_teardown_256_ba_sessions(struct dp_peer *peer)
1673 {
1674 	uint8_t delba_rcode = 0;
1675 	int tid;
1676 	struct dp_rx_tid *rx_tid = NULL;
1677 
1678 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
1679 		rx_tid = &peer->rx_tid[tid];
1680 		qdf_spin_lock_bh(&rx_tid->tid_lock);
1681 
1682 		if (rx_tid->ba_win_size <= 64) {
1683 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
1684 			continue;
1685 		} else {
1686 			if (rx_tid->ba_status == DP_RX_BA_ACTIVE ||
1687 			    rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
1688 				/* send delba */
1689 				if (!rx_tid->delba_tx_status) {
1690 					rx_tid->delba_tx_retry++;
1691 					rx_tid->delba_tx_status = 1;
1692 					rx_tid->delba_rcode =
1693 					IEEE80211_REASON_QOS_SETUP_REQUIRED;
1694 					delba_rcode = rx_tid->delba_rcode;
1695 
1696 					qdf_spin_unlock_bh(&rx_tid->tid_lock);
1697 					peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba(
1698 							peer->vdev->pdev->ctrl_pdev,
1699 							peer->ctrl_peer,
1700 							peer->mac_addr.raw,
1701 							tid, peer->vdev->ctrl_vdev,
1702 							delba_rcode);
1703 				} else {
1704 					qdf_spin_unlock_bh(&rx_tid->tid_lock);
1705 				}
1706 			} else {
1707 				qdf_spin_unlock_bh(&rx_tid->tid_lock);
1708 			}
1709 		}
1710 	}
1711 }
1712 
1713 /*
1714 * dp_rx_addba_resp_tx_completion_wifi3() – Update Rx Tid State
1715 *
1716 * @peer: Datapath peer handle
1717 * @tid: TID number
1718 * @status: tx completion status
1719 * Return: 0 on success, error code on failure
1720 */
1721 int dp_addba_resp_tx_completion_wifi3(void *peer_handle,
1722 				      uint8_t tid, int status)
1723 {
1724 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
1725 	struct dp_rx_tid *rx_tid = NULL;
1726 
1727 	if (!peer || peer->delete_in_progress) {
1728 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1729 			  "%s: Peer is NULL!\n", __func__);
1730 		return QDF_STATUS_E_FAILURE;
1731 	}
1732 	rx_tid = &peer->rx_tid[tid];
1733 	qdf_spin_lock_bh(&rx_tid->tid_lock);
1734 	if (status) {
1735 		rx_tid->num_addba_rsp_failed++;
1736 		dp_rx_tid_update_wifi3(peer, tid, 1, 0);
1737 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
1738 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
1739 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1740 			  "%s: Rx Tid- %d addba rsp tx completion failed!",
1741 			 __func__, tid);
1742 		return QDF_STATUS_SUCCESS;
1743 	}
1744 
1745 	rx_tid->num_addba_rsp_success++;
1746 	if (rx_tid->ba_status == DP_RX_BA_INACTIVE) {
1747 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
1748 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1749 			  "%s: Rx Tid- %d hw qdesc is not in IN_PROGRESS",
1750 			__func__, tid);
1751 		return QDF_STATUS_E_FAILURE;
1752 	}
1753 
1754 	/* First Session */
1755 	if (peer->active_ba_session_cnt == 0) {
1756 		if (rx_tid->ba_win_size > 64 && rx_tid->ba_win_size <= 256)
1757 			peer->hw_buffer_size = 256;
1758 		else
1759 			peer->hw_buffer_size = 64;
1760 	}
1761 
1762 	rx_tid->ba_status = DP_RX_BA_ACTIVE;
1763 
1764 	peer->active_ba_session_cnt++;
1765 
1766 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
1767 
1768 	/* Kill any session having 256 buffer size
1769 	 * when 64 buffer size request is received.
1770 	 * Also, latch on to 64 as new buffer size.
1771 	 */
1772 	if (peer->kill_256_sessions) {
1773 		dp_teardown_256_ba_sessions(peer);
1774 		peer->kill_256_sessions = 0;
1775 	}
1776 	return QDF_STATUS_SUCCESS;
1777 }
1778 
1779 /*
1780 * dp_rx_addba_responsesetup_wifi3() – Process ADDBA request from peer
1781 *
1782 * @peer: Datapath peer handle
1783 * @tid: TID number
1784 * @dialogtoken: output dialogtoken
1785 * @statuscode: output dialogtoken
1786 * @buffersize: Output BA window size
1787 * @batimeout: Output BA timeout
1788 */
1789 void dp_addba_responsesetup_wifi3(void *peer_handle, uint8_t tid,
1790 	uint8_t *dialogtoken, uint16_t *statuscode,
1791 	uint16_t *buffersize, uint16_t *batimeout)
1792 {
1793 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
1794 	struct dp_rx_tid *rx_tid = NULL;
1795 
1796 	if (!peer || peer->delete_in_progress) {
1797 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1798 			  "%s: Peer is NULL!\n", __func__);
1799 		return;
1800 	}
1801 	rx_tid = &peer->rx_tid[tid];
1802 	qdf_spin_lock_bh(&rx_tid->tid_lock);
1803 	rx_tid->num_of_addba_resp++;
1804 	/* setup ADDBA response parameters */
1805 	*dialogtoken = rx_tid->dialogtoken;
1806 	*statuscode = rx_tid->statuscode;
1807 	*buffersize = rx_tid->ba_win_size;
1808 	*batimeout  = 0;
1809 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
1810 }
1811 
1812 /* dp_check_ba_buffersize() - Check buffer size in request
1813  *                            and latch onto this size based on
1814  *                            size used in first active session.
1815  * @peer: Datapath peer
1816  * @tid: Tid
1817  * @buffersize: Block ack window size
1818  *
1819  * Return: void
1820  */
1821 static void dp_check_ba_buffersize(struct dp_peer *peer,
1822 				   uint16_t tid,
1823 				   uint16_t buffersize)
1824 {
1825 	struct dp_rx_tid *rx_tid = NULL;
1826 
1827 	rx_tid = &peer->rx_tid[tid];
1828 
1829 	if (peer->active_ba_session_cnt == 0) {
1830 		rx_tid->ba_win_size = buffersize;
1831 	} else {
1832 		if (peer->hw_buffer_size == 64) {
1833 			if (buffersize <= 64)
1834 				rx_tid->ba_win_size = buffersize;
1835 			else
1836 				rx_tid->ba_win_size = peer->hw_buffer_size;
1837 		} else if (peer->hw_buffer_size == 256) {
1838 			if (buffersize > 64) {
1839 				rx_tid->ba_win_size = buffersize;
1840 			} else {
1841 				rx_tid->ba_win_size = buffersize;
1842 				peer->hw_buffer_size = 64;
1843 				peer->kill_256_sessions = 1;
1844 			}
1845 		}
1846 	}
1847 }
1848 
1849 /*
1850  * dp_addba_requestprocess_wifi3() - Process ADDBA request from peer
1851  *
1852  * @peer: Datapath peer handle
1853  * @dialogtoken: dialogtoken from ADDBA frame
1854  * @tid: TID number
1855  * @batimeout: BA timeout
1856  * @buffersize: BA window size
1857  * @startseqnum: Start seq. number received in BA sequence control
1858  *
1859  * Return: 0 on success, error code on failure
1860  */
1861 int dp_addba_requestprocess_wifi3(void *peer_handle,
1862 				  uint8_t dialogtoken,
1863 				  uint16_t tid, uint16_t batimeout,
1864 				  uint16_t buffersize,
1865 				  uint16_t startseqnum)
1866 {
1867 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
1868 	struct dp_rx_tid *rx_tid = NULL;
1869 
1870 	if (!peer || peer->delete_in_progress) {
1871 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1872 			  "%s: Peer is NULL!\n", __func__);
1873 		return QDF_STATUS_E_FAILURE;
1874 	}
1875 	rx_tid = &peer->rx_tid[tid];
1876 	qdf_spin_lock_bh(&rx_tid->tid_lock);
1877 	rx_tid->num_of_addba_req++;
1878 	if ((rx_tid->ba_status == DP_RX_BA_ACTIVE &&
1879 	     rx_tid->hw_qdesc_vaddr_unaligned != NULL) ||
1880 	    (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS)) {
1881 		dp_rx_tid_update_wifi3(peer, tid, 1, 0);
1882 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
1883 		peer->active_ba_session_cnt--;
1884 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
1885 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1886 			  "%s: Rx Tid- %d hw qdesc is already setup",
1887 			__func__, tid);
1888 		return QDF_STATUS_E_FAILURE;
1889 	}
1890 
1891 	if (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
1892 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
1893 		return QDF_STATUS_E_FAILURE;
1894 	}
1895 
1896 	dp_check_ba_buffersize(peer, tid, buffersize);
1897 
1898 	if (dp_rx_tid_setup_wifi3(peer, tid, buffersize, startseqnum)) {
1899 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
1900 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
1901 		return QDF_STATUS_E_FAILURE;
1902 	}
1903 	rx_tid->ba_status = DP_RX_BA_IN_PROGRESS;
1904 
1905 	rx_tid->ba_win_size = buffersize;
1906 	rx_tid->dialogtoken = dialogtoken;
1907 	rx_tid->startseqnum = startseqnum;
1908 
1909 	if (rx_tid->userstatuscode != IEEE80211_STATUS_SUCCESS)
1910 		rx_tid->statuscode = rx_tid->userstatuscode;
1911 	else
1912 		rx_tid->statuscode = IEEE80211_STATUS_SUCCESS;
1913 
1914 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
1915 
1916 	return QDF_STATUS_SUCCESS;
1917 }
1918 
1919 /*
1920 * dp_set_addba_response() – Set a user defined ADDBA response status code
1921 *
1922 * @peer: Datapath peer handle
1923 * @tid: TID number
1924 * @statuscode: response status code to be set
1925 */
1926 void dp_set_addba_response(void *peer_handle, uint8_t tid,
1927 	uint16_t statuscode)
1928 {
1929 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
1930 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1931 
1932 	qdf_spin_lock_bh(&rx_tid->tid_lock);
1933 	rx_tid->userstatuscode = statuscode;
1934 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
1935 }
1936 
1937 /*
1938 * dp_rx_delba_process_wifi3() – Process DELBA from peer
1939 * @peer: Datapath peer handle
1940 * @tid: TID number
1941 * @reasoncode: Reason code received in DELBA frame
1942 *
1943 * Return: 0 on success, error code on failure
1944 */
1945 int dp_delba_process_wifi3(void *peer_handle,
1946 	int tid, uint16_t reasoncode)
1947 {
1948 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
1949 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1950 
1951 	qdf_spin_lock_bh(&rx_tid->tid_lock);
1952 	if (rx_tid->ba_status == DP_RX_BA_INACTIVE ||
1953 	    rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
1954 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
1955 		return QDF_STATUS_E_FAILURE;
1956 	}
1957 	/* TODO: See if we can delete the existing REO queue descriptor and
1958 	 * replace with a new one without queue extenstion descript to save
1959 	 * memory
1960 	 */
1961 	rx_tid->delba_rcode = reasoncode;
1962 	rx_tid->num_of_delba_req++;
1963 	dp_rx_tid_update_wifi3(peer, tid, 1, 0);
1964 
1965 	rx_tid->ba_status = DP_RX_BA_INACTIVE;
1966 	peer->active_ba_session_cnt--;
1967 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
1968 	return 0;
1969 }
1970 
1971 /*
1972  * dp_rx_delba_tx_completion_wifi3() – Send Delba Request
1973  *
1974  * @peer: Datapath peer handle
1975  * @tid: TID number
1976  * @status: tx completion status
1977  * Return: 0 on success, error code on failure
1978  */
1979 
1980 int dp_delba_tx_completion_wifi3(void *peer_handle,
1981 				 uint8_t tid, int status)
1982 {
1983 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
1984 	struct dp_rx_tid *rx_tid = NULL;
1985 
1986 	if (!peer || peer->delete_in_progress) {
1987 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1988 			  "%s: Peer is NULL!", __func__);
1989 		return QDF_STATUS_E_FAILURE;
1990 	}
1991 	rx_tid = &peer->rx_tid[tid];
1992 	qdf_spin_lock_bh(&rx_tid->tid_lock);
1993 	if (status) {
1994 		rx_tid->delba_tx_fail_cnt++;
1995 		if (rx_tid->delba_tx_retry >= DP_MAX_DELBA_RETRY) {
1996 			rx_tid->delba_tx_retry = 0;
1997 			rx_tid->delba_tx_status = 0;
1998 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
1999 		} else {
2000 			rx_tid->delba_tx_retry++;
2001 			rx_tid->delba_tx_status = 1;
2002 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
2003 			peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba(
2004 				peer->vdev->pdev->ctrl_pdev, peer->ctrl_peer,
2005 				peer->mac_addr.raw, tid, peer->vdev->ctrl_vdev,
2006 				rx_tid->delba_rcode);
2007 		}
2008 		return QDF_STATUS_SUCCESS;
2009 	} else {
2010 		rx_tid->delba_tx_success_cnt++;
2011 		rx_tid->delba_tx_retry = 0;
2012 		rx_tid->delba_tx_status = 0;
2013 	}
2014 	if (rx_tid->ba_status == DP_RX_BA_ACTIVE) {
2015 		dp_rx_tid_update_wifi3(peer, tid, 1, 0);
2016 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
2017 		peer->active_ba_session_cnt--;
2018 	}
2019 	if (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
2020 		dp_rx_tid_update_wifi3(peer, tid, 1, 0);
2021 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
2022 	}
2023 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
2024 
2025 	return QDF_STATUS_SUCCESS;
2026 }
2027 
2028 void dp_rx_discard(struct dp_vdev *vdev, struct dp_peer *peer, unsigned tid,
2029 	qdf_nbuf_t msdu_list)
2030 {
2031 	while (msdu_list) {
2032 		qdf_nbuf_t msdu = msdu_list;
2033 
2034 		msdu_list = qdf_nbuf_next(msdu_list);
2035 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
2036 			"discard rx %pK from partly-deleted peer %pK "
2037 			"(%02x:%02x:%02x:%02x:%02x:%02x)",
2038 			msdu, peer,
2039 			peer->mac_addr.raw[0], peer->mac_addr.raw[1],
2040 			peer->mac_addr.raw[2], peer->mac_addr.raw[3],
2041 			peer->mac_addr.raw[4], peer->mac_addr.raw[5]);
2042 		qdf_nbuf_free(msdu);
2043 	}
2044 }
2045 
2046 
2047 /**
2048  * dp_set_pn_check_wifi3() - enable PN check in REO for security
2049  * @peer: Datapath peer handle
2050  * @vdev: Datapath vdev
2051  * @pdev - data path device instance
2052  * @sec_type - security type
2053  * @rx_pn - Receive pn starting number
2054  *
2055  */
2056 
2057 void
2058 dp_set_pn_check_wifi3(struct cdp_vdev *vdev_handle, struct cdp_peer *peer_handle, enum cdp_sec_type sec_type,  uint32_t *rx_pn)
2059 {
2060 	struct dp_peer *peer =  (struct dp_peer *)peer_handle;
2061 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
2062 	struct dp_pdev *pdev;
2063 	struct dp_soc *soc;
2064 	int i;
2065 	uint8_t pn_size;
2066 	struct hal_reo_cmd_params params;
2067 
2068 	/* preconditions */
2069 	qdf_assert(vdev);
2070 
2071 	pdev = vdev->pdev;
2072 	soc = pdev->soc;
2073 
2074 
2075 	qdf_mem_zero(&params, sizeof(params));
2076 
2077 	params.std.need_status = 1;
2078 	params.u.upd_queue_params.update_pn_valid = 1;
2079 	params.u.upd_queue_params.update_pn_size = 1;
2080 	params.u.upd_queue_params.update_pn = 1;
2081 	params.u.upd_queue_params.update_pn_check_needed = 1;
2082 	params.u.upd_queue_params.update_svld = 1;
2083 	params.u.upd_queue_params.svld = 0;
2084 
2085 	peer->security[dp_sec_ucast].sec_type = sec_type;
2086 
2087 	switch (sec_type) {
2088 	case cdp_sec_type_tkip_nomic:
2089 	case cdp_sec_type_aes_ccmp:
2090 	case cdp_sec_type_aes_ccmp_256:
2091 	case cdp_sec_type_aes_gcmp:
2092 	case cdp_sec_type_aes_gcmp_256:
2093 		params.u.upd_queue_params.pn_check_needed = 1;
2094 		params.u.upd_queue_params.pn_size = 48;
2095 		pn_size = 48;
2096 		break;
2097 	case cdp_sec_type_wapi:
2098 		params.u.upd_queue_params.pn_check_needed = 1;
2099 		params.u.upd_queue_params.pn_size = 128;
2100 		pn_size = 128;
2101 		if (vdev->opmode == wlan_op_mode_ap) {
2102 			params.u.upd_queue_params.pn_even = 1;
2103 			params.u.upd_queue_params.update_pn_even = 1;
2104 		} else {
2105 			params.u.upd_queue_params.pn_uneven = 1;
2106 			params.u.upd_queue_params.update_pn_uneven = 1;
2107 		}
2108 		break;
2109 	default:
2110 		params.u.upd_queue_params.pn_check_needed = 0;
2111 		pn_size = 0;
2112 		break;
2113 	}
2114 
2115 
2116 	for (i = 0; i < DP_MAX_TIDS; i++) {
2117 		struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
2118 		qdf_spin_lock_bh(&rx_tid->tid_lock);
2119 		if (rx_tid->hw_qdesc_vaddr_unaligned != NULL) {
2120 			params.std.addr_lo =
2121 				rx_tid->hw_qdesc_paddr & 0xffffffff;
2122 			params.std.addr_hi =
2123 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2124 
2125 			if (sec_type != cdp_sec_type_wapi) {
2126 				params.u.upd_queue_params.update_pn_valid = 0;
2127 			} else {
2128 				/*
2129 				 * Setting PN valid bit for WAPI sec_type,
2130 				 * since WAPI PN has to be started with
2131 				 * predefined value
2132 				 */
2133 				params.u.upd_queue_params.update_pn_valid = 1;
2134 				params.u.upd_queue_params.pn_31_0 = rx_pn[0];
2135 				params.u.upd_queue_params.pn_63_32 = rx_pn[1];
2136 				params.u.upd_queue_params.pn_95_64 = rx_pn[2];
2137 				params.u.upd_queue_params.pn_127_96 = rx_pn[3];
2138 			}
2139 			rx_tid->pn_size = pn_size;
2140 			dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
2141 				dp_rx_tid_update_cb, rx_tid);
2142 		} else {
2143 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
2144 				"PN Check not setup for TID :%d ", i);
2145 		}
2146 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2147 	}
2148 }
2149 
2150 
2151 void
2152 dp_rx_sec_ind_handler(void *soc_handle, uint16_t peer_id,
2153 	enum htt_sec_type sec_type, int is_unicast, u_int32_t *michael_key,
2154 	u_int32_t *rx_pn)
2155 {
2156 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
2157 	struct dp_peer *peer;
2158 	int sec_index;
2159 
2160 	peer = dp_peer_find_by_id(soc, peer_id);
2161 	if (!peer) {
2162 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2163 			"Couldn't find peer from ID %d - skipping security inits",
2164 			peer_id);
2165 		return;
2166 	}
2167 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
2168 		"sec spec for peer %pK (%02x:%02x:%02x:%02x:%02x:%02x): "
2169 		"%s key of type %d",
2170 		peer,
2171 		peer->mac_addr.raw[0], peer->mac_addr.raw[1],
2172 		peer->mac_addr.raw[2], peer->mac_addr.raw[3],
2173 		peer->mac_addr.raw[4], peer->mac_addr.raw[5],
2174 		is_unicast ? "ucast" : "mcast",
2175 		sec_type);
2176 	sec_index = is_unicast ? dp_sec_ucast : dp_sec_mcast;
2177 	peer->security[sec_index].sec_type = sec_type;
2178 #ifdef notyet /* TODO: See if this is required for defrag support */
2179 	/* michael key only valid for TKIP, but for simplicity,
2180 	 * copy it anyway
2181 	 */
2182 	qdf_mem_copy(
2183 		&peer->security[sec_index].michael_key[0],
2184 		michael_key,
2185 		sizeof(peer->security[sec_index].michael_key));
2186 #ifdef BIG_ENDIAN_HOST
2187 	OL_IF_SWAPBO(peer->security[sec_index].michael_key[0],
2188 				 sizeof(peer->security[sec_index].michael_key));
2189 #endif /* BIG_ENDIAN_HOST */
2190 #endif
2191 
2192 #ifdef notyet /* TODO: Check if this is required for wifi3.0 */
2193 	if (sec_type != htt_sec_type_wapi) {
2194 		qdf_mem_set(peer->tids_last_pn_valid, _EXT_TIDS, 0x00);
2195 	} else {
2196 		for (i = 0; i < DP_MAX_TIDS; i++) {
2197 			/*
2198 			 * Setting PN valid bit for WAPI sec_type,
2199 			 * since WAPI PN has to be started with predefined value
2200 			 */
2201 			peer->tids_last_pn_valid[i] = 1;
2202 			qdf_mem_copy(
2203 				(u_int8_t *) &peer->tids_last_pn[i],
2204 				(u_int8_t *) rx_pn, sizeof(union htt_rx_pn_t));
2205 			peer->tids_last_pn[i].pn128[1] =
2206 				qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[1]);
2207 			peer->tids_last_pn[i].pn128[0] =
2208 				qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[0]);
2209 		}
2210 	}
2211 #endif
2212 	/* TODO: Update HW TID queue with PN check parameters (pn type for
2213 	 * all security types and last pn for WAPI) once REO command API
2214 	 * is available
2215 	 */
2216 }
2217 
2218 #ifndef CONFIG_WIN
2219 /**
2220  * dp_register_peer() - Register peer into physical device
2221  * @pdev - data path device instance
2222  * @sta_desc - peer description
2223  *
2224  * Register peer into physical device
2225  *
2226  * Return: QDF_STATUS_SUCCESS registration success
2227  *         QDF_STATUS_E_FAULT peer not found
2228  */
2229 QDF_STATUS dp_register_peer(struct cdp_pdev *pdev_handle,
2230 		struct ol_txrx_desc_type *sta_desc)
2231 {
2232 	struct dp_peer *peer;
2233 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
2234 
2235 	peer = dp_peer_find_by_local_id((struct cdp_pdev *)pdev,
2236 			sta_desc->sta_id);
2237 	if (!peer)
2238 		return QDF_STATUS_E_FAULT;
2239 
2240 	qdf_spin_lock_bh(&peer->peer_info_lock);
2241 	peer->state = OL_TXRX_PEER_STATE_CONN;
2242 	qdf_spin_unlock_bh(&peer->peer_info_lock);
2243 
2244 	return QDF_STATUS_SUCCESS;
2245 }
2246 
2247 /**
2248  * dp_clear_peer() - remove peer from physical device
2249  * @pdev - data path device instance
2250  * @sta_id - local peer id
2251  *
2252  * remove peer from physical device
2253  *
2254  * Return: QDF_STATUS_SUCCESS registration success
2255  *         QDF_STATUS_E_FAULT peer not found
2256  */
2257 QDF_STATUS dp_clear_peer(struct cdp_pdev *pdev_handle, uint8_t local_id)
2258 {
2259 	struct dp_peer *peer;
2260 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
2261 
2262 	peer = dp_peer_find_by_local_id((struct cdp_pdev *)pdev, local_id);
2263 	if (!peer)
2264 		return QDF_STATUS_E_FAULT;
2265 
2266 	qdf_spin_lock_bh(&peer->peer_info_lock);
2267 	peer->state = OL_TXRX_PEER_STATE_DISC;
2268 	qdf_spin_unlock_bh(&peer->peer_info_lock);
2269 
2270 	return QDF_STATUS_SUCCESS;
2271 }
2272 
2273 /**
2274  * dp_find_peer_by_addr_and_vdev() - Find peer by peer mac address within vdev
2275  * @pdev - data path device instance
2276  * @vdev - virtual interface instance
2277  * @peer_addr - peer mac address
2278  * @peer_id - local peer id with target mac address
2279  *
2280  * Find peer by peer mac address within vdev
2281  *
2282  * Return: peer instance void pointer
2283  *         NULL cannot find target peer
2284  */
2285 void *dp_find_peer_by_addr_and_vdev(struct cdp_pdev *pdev_handle,
2286 		struct cdp_vdev *vdev_handle,
2287 		uint8_t *peer_addr, uint8_t *local_id)
2288 {
2289 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
2290 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
2291 	struct dp_peer *peer;
2292 
2293 	DP_TRACE(INFO, "vdev %pK peer_addr %pK", vdev, peer_addr);
2294 	peer = dp_peer_find_hash_find(pdev->soc, peer_addr, 0, 0);
2295 	DP_TRACE(INFO, "peer %pK vdev %pK", peer, vdev);
2296 
2297 	if (!peer)
2298 		return NULL;
2299 
2300 	if (peer->vdev != vdev) {
2301 		qdf_atomic_dec(&peer->ref_cnt);
2302 		return NULL;
2303 	}
2304 
2305 	*local_id = peer->local_id;
2306 	DP_TRACE(INFO, "peer %pK vdev %pK local id %d", peer, vdev, *local_id);
2307 
2308 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
2309 	 * Decrement it here.
2310 	 */
2311 	qdf_atomic_dec(&peer->ref_cnt);
2312 
2313 	return peer;
2314 }
2315 
2316 /**
2317  * dp_local_peer_id() - Find local peer id within peer instance
2318  * @peer - peer instance
2319  *
2320  * Find local peer id within peer instance
2321  *
2322  * Return: local peer id
2323  */
2324 uint16_t dp_local_peer_id(void *peer)
2325 {
2326 	return ((struct dp_peer *)peer)->local_id;
2327 }
2328 
2329 /**
2330  * dp_peer_find_by_local_id() - Find peer by local peer id
2331  * @pdev - data path device instance
2332  * @local_peer_id - local peer id want to find
2333  *
2334  * Find peer by local peer id within physical device
2335  *
2336  * Return: peer instance void pointer
2337  *         NULL cannot find target peer
2338  */
2339 void *dp_peer_find_by_local_id(struct cdp_pdev *pdev_handle, uint8_t local_id)
2340 {
2341 	struct dp_peer *peer;
2342 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
2343 
2344 	if (local_id >= OL_TXRX_NUM_LOCAL_PEER_IDS) {
2345 		QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_DP,
2346 				   "Incorrect local id %u", local_id);
2347 		return NULL;
2348 	}
2349 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
2350 	peer = pdev->local_peer_ids.map[local_id];
2351 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
2352 	DP_TRACE(DEBUG, "peer %pK local id %d", peer, local_id);
2353 	return peer;
2354 }
2355 
2356 /**
2357  * dp_peer_state_update() - update peer local state
2358  * @pdev - data path device instance
2359  * @peer_addr - peer mac address
2360  * @state - new peer local state
2361  *
2362  * update peer local state
2363  *
2364  * Return: QDF_STATUS_SUCCESS registration success
2365  */
2366 QDF_STATUS dp_peer_state_update(struct cdp_pdev *pdev_handle, uint8_t *peer_mac,
2367 		enum ol_txrx_peer_state state)
2368 {
2369 	struct dp_peer *peer;
2370 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
2371 
2372 	peer =  dp_peer_find_hash_find(pdev->soc, peer_mac, 0, DP_VDEV_ALL);
2373 	if (NULL == peer) {
2374 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2375 		"Failed to find peer for: [%pM]", peer_mac);
2376 		return QDF_STATUS_E_FAILURE;
2377 	}
2378 	peer->state = state;
2379 
2380 	DP_TRACE(INFO, "peer %pK state %d", peer, peer->state);
2381 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
2382 	 * Decrement it here.
2383 	 */
2384 	qdf_atomic_dec(&peer->ref_cnt);
2385 
2386 	return QDF_STATUS_SUCCESS;
2387 }
2388 
2389 /**
2390  * dp_get_vdevid() - Get virtual interface id which peer registered
2391  * @peer - peer instance
2392  * @vdev_id - virtual interface id which peer registered
2393  *
2394  * Get virtual interface id which peer registered
2395  *
2396  * Return: QDF_STATUS_SUCCESS registration success
2397  */
2398 QDF_STATUS dp_get_vdevid(void *peer_handle, uint8_t *vdev_id)
2399 {
2400 	struct dp_peer *peer = peer_handle;
2401 
2402 	DP_TRACE(INFO, "peer %pK vdev %pK vdev id %d",
2403 			peer, peer->vdev, peer->vdev->vdev_id);
2404 	*vdev_id = peer->vdev->vdev_id;
2405 	return QDF_STATUS_SUCCESS;
2406 }
2407 
2408 struct cdp_vdev *dp_get_vdev_by_sta_id(struct cdp_pdev *pdev_handle,
2409 				       uint8_t sta_id)
2410 {
2411 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
2412 	struct dp_peer *peer = NULL;
2413 
2414 	if (sta_id >= WLAN_MAX_STA_COUNT) {
2415 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
2416 			  "Invalid sta id passed");
2417 		return NULL;
2418 	}
2419 
2420 	if (!pdev) {
2421 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
2422 			  "PDEV not found for sta_id [%d]", sta_id);
2423 		return NULL;
2424 	}
2425 
2426 	peer = dp_peer_find_by_local_id((struct cdp_pdev *)pdev, sta_id);
2427 	if (!peer) {
2428 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
2429 			  "PEER [%d] not found", sta_id);
2430 		return NULL;
2431 	}
2432 
2433 	return (struct cdp_vdev *)peer->vdev;
2434 }
2435 
2436 /**
2437  * dp_get_vdev_for_peer() - Get virtual interface instance which peer belongs
2438  * @peer - peer instance
2439  *
2440  * Get virtual interface instance which peer belongs
2441  *
2442  * Return: virtual interface instance pointer
2443  *         NULL in case cannot find
2444  */
2445 struct cdp_vdev *dp_get_vdev_for_peer(void *peer_handle)
2446 {
2447 	struct dp_peer *peer = peer_handle;
2448 
2449 	DP_TRACE(DEBUG, "peer %pK vdev %pK", peer, peer->vdev);
2450 	return (struct cdp_vdev *)peer->vdev;
2451 }
2452 
2453 /**
2454  * dp_peer_get_peer_mac_addr() - Get peer mac address
2455  * @peer - peer instance
2456  *
2457  * Get peer mac address
2458  *
2459  * Return: peer mac address pointer
2460  *         NULL in case cannot find
2461  */
2462 uint8_t *dp_peer_get_peer_mac_addr(void *peer_handle)
2463 {
2464 	struct dp_peer *peer = peer_handle;
2465 	uint8_t *mac;
2466 
2467 	mac = peer->mac_addr.raw;
2468 	DP_TRACE(INFO, "peer %pK mac 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
2469 		peer, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
2470 	return peer->mac_addr.raw;
2471 }
2472 
2473 /**
2474  * dp_get_peer_state() - Get local peer state
2475  * @peer - peer instance
2476  *
2477  * Get local peer state
2478  *
2479  * Return: peer status
2480  */
2481 int dp_get_peer_state(void *peer_handle)
2482 {
2483 	struct dp_peer *peer = peer_handle;
2484 
2485 	DP_TRACE(DEBUG, "peer %pK stats %d", peer, peer->state);
2486 	return peer->state;
2487 }
2488 
2489 /**
2490  * dp_local_peer_id_pool_init() - local peer id pool alloc for physical device
2491  * @pdev - data path device instance
2492  *
2493  * local peer id pool alloc for physical device
2494  *
2495  * Return: none
2496  */
2497 void dp_local_peer_id_pool_init(struct dp_pdev *pdev)
2498 {
2499 	int i;
2500 
2501 	/* point the freelist to the first ID */
2502 	pdev->local_peer_ids.freelist = 0;
2503 
2504 	/* link each ID to the next one */
2505 	for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) {
2506 		pdev->local_peer_ids.pool[i] = i + 1;
2507 		pdev->local_peer_ids.map[i] = NULL;
2508 	}
2509 
2510 	/* link the last ID to itself, to mark the end of the list */
2511 	i = OL_TXRX_NUM_LOCAL_PEER_IDS;
2512 	pdev->local_peer_ids.pool[i] = i;
2513 
2514 	qdf_spinlock_create(&pdev->local_peer_ids.lock);
2515 	DP_TRACE(INFO, "Peer pool init");
2516 }
2517 
2518 /**
2519  * dp_local_peer_id_alloc() - allocate local peer id
2520  * @pdev - data path device instance
2521  * @peer - new peer instance
2522  *
2523  * allocate local peer id
2524  *
2525  * Return: none
2526  */
2527 void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer)
2528 {
2529 	int i;
2530 
2531 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
2532 	i = pdev->local_peer_ids.freelist;
2533 	if (pdev->local_peer_ids.pool[i] == i) {
2534 		/* the list is empty, except for the list-end marker */
2535 		peer->local_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
2536 	} else {
2537 		/* take the head ID and advance the freelist */
2538 		peer->local_id = i;
2539 		pdev->local_peer_ids.freelist = pdev->local_peer_ids.pool[i];
2540 		pdev->local_peer_ids.map[i] = peer;
2541 	}
2542 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
2543 	DP_TRACE(INFO, "peer %pK, local id %d", peer, peer->local_id);
2544 }
2545 
2546 /**
2547  * dp_local_peer_id_free() - remove local peer id
2548  * @pdev - data path device instance
2549  * @peer - peer instance should be removed
2550  *
2551  * remove local peer id
2552  *
2553  * Return: none
2554  */
2555 void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer)
2556 {
2557 	int i = peer->local_id;
2558 	if ((i == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
2559 	    (i >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
2560 		return;
2561 	}
2562 
2563 	/* put this ID on the head of the freelist */
2564 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
2565 	pdev->local_peer_ids.pool[i] = pdev->local_peer_ids.freelist;
2566 	pdev->local_peer_ids.freelist = i;
2567 	pdev->local_peer_ids.map[i] = NULL;
2568 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
2569 }
2570 #endif
2571 
2572 /**
2573  * dp_get_peer_mac_addr_frm_id(): get mac address of the peer
2574  * @soc_handle: DP SOC handle
2575  * @peer_id:peer_id of the peer
2576  *
2577  * return: vdev_id of the vap
2578  */
2579 uint8_t dp_get_peer_mac_addr_frm_id(struct cdp_soc_t *soc_handle,
2580 		uint16_t peer_id, uint8_t *peer_mac)
2581 {
2582 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
2583 	struct dp_peer *peer;
2584 
2585 	peer = dp_peer_find_by_id(soc, peer_id);
2586 
2587 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
2588 			"soc %pK peer_id %d", soc, peer_id);
2589 
2590 	if (!peer) {
2591 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2592 				"peer not found ");
2593 		return CDP_INVALID_VDEV_ID;
2594 	}
2595 
2596 	qdf_mem_copy(peer_mac, peer->mac_addr.raw, 6);
2597 	return peer->vdev->vdev_id;
2598 }
2599 
2600 /**
2601  * dp_peer_rxtid_stats: Retried Rx TID (REO queue) stats from HW
2602  * @peer: DP peer handle
2603  * @dp_stats_cmd_cb: REO command callback function
2604  * @cb_ctxt: Callback context
2605  *
2606  * Return: none
2607  */
2608 void dp_peer_rxtid_stats(struct dp_peer *peer, void (*dp_stats_cmd_cb),
2609 			void *cb_ctxt)
2610 {
2611 	struct dp_soc *soc = peer->vdev->pdev->soc;
2612 	struct hal_reo_cmd_params params;
2613 	int i;
2614 
2615 	if (!dp_stats_cmd_cb)
2616 		return;
2617 
2618 	qdf_mem_zero(&params, sizeof(params));
2619 	for (i = 0; i < DP_MAX_TIDS; i++) {
2620 		struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
2621 		if (rx_tid->hw_qdesc_vaddr_unaligned != NULL) {
2622 			params.std.need_status = 1;
2623 			params.std.addr_lo =
2624 				rx_tid->hw_qdesc_paddr & 0xffffffff;
2625 			params.std.addr_hi =
2626 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2627 
2628 			if (cb_ctxt) {
2629 				dp_reo_send_cmd(soc, CMD_GET_QUEUE_STATS,
2630 					&params, dp_stats_cmd_cb, cb_ctxt);
2631 			} else {
2632 				dp_reo_send_cmd(soc, CMD_GET_QUEUE_STATS,
2633 					&params, dp_stats_cmd_cb, rx_tid);
2634 			}
2635 
2636 			/* Flush REO descriptor from HW cache to update stats
2637 			 * in descriptor memory. This is to help debugging */
2638 			qdf_mem_zero(&params, sizeof(params));
2639 			params.std.need_status = 0;
2640 			params.std.addr_lo =
2641 				rx_tid->hw_qdesc_paddr & 0xffffffff;
2642 			params.std.addr_hi =
2643 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2644 			params.u.fl_cache_params.flush_no_inval = 1;
2645 			dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, &params, NULL,
2646 				NULL);
2647 		}
2648 	}
2649 }
2650 
2651 void dp_set_michael_key(struct cdp_peer *peer_handle,
2652 			bool is_unicast, uint32_t *key)
2653 {
2654 	struct dp_peer *peer =  (struct dp_peer *)peer_handle;
2655 	uint8_t sec_index = is_unicast ? 1 : 0;
2656 
2657 	if (!peer) {
2658 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2659 			  "peer not found ");
2660 		return;
2661 	}
2662 
2663 	qdf_mem_copy(&peer->security[sec_index].michael_key[0],
2664 		     key, IEEE80211_WEP_MICLEN);
2665 }
2666