xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_peer.c (revision d2cd9eab9b38f8dceb85c744ffada78cad4f5940)
1 /*
2  * Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  *  PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <qdf_types.h>
20 #include <qdf_lock.h>
21 #include "dp_htt.h"
22 #include "dp_types.h"
23 #include "dp_internal.h"
24 #include "dp_peer.h"
25 #include <hal_api.h>
26 #include <hal_reo.h>
27 #ifdef CONFIG_MCL
28 #include <cds_ieee80211_common.h>
29 #endif
30 #include <cdp_txrx_handle.h>
31 #include <wlan_cfg.h>
32 
33 #ifdef DP_LFR
34 static inline void
35 dp_set_ssn_valid_flag(struct hal_reo_cmd_params *params,
36 					uint8_t valid)
37 {
38 	params->u.upd_queue_params.update_svld = 1;
39 	params->u.upd_queue_params.svld = valid;
40 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
41 		"%s: Setting SSN valid bit to %d\n",
42 				__func__, valid);
43 }
44 #else
45 static inline void
46 dp_set_ssn_valid_flag(struct hal_reo_cmd_params *params,
47 					uint8_t valid) {};
48 #endif
49 
50 static inline int dp_peer_find_mac_addr_cmp(
51 	union dp_align_mac_addr *mac_addr1,
52 	union dp_align_mac_addr *mac_addr2)
53 {
54 	return !((mac_addr1->align4.bytes_abcd == mac_addr2->align4.bytes_abcd)
55 		/*
56 		 * Intentionally use & rather than &&.
57 		 * because the operands are binary rather than generic boolean,
58 		 * the functionality is equivalent.
59 		 * Using && has the advantage of short-circuited evaluation,
60 		 * but using & has the advantage of no conditional branching,
61 		 * which is a more significant benefit.
62 		 */
63 		&
64 		(mac_addr1->align4.bytes_ef == mac_addr2->align4.bytes_ef));
65 }
66 
67 static int dp_peer_find_map_attach(struct dp_soc *soc)
68 {
69 	uint32_t max_peers, peer_map_size;
70 
71 	/* allocate the peer ID -> peer object map */
72 	max_peers = wlan_cfg_max_peer_id(soc->wlan_cfg_ctx) + 1;
73 	soc->max_peers = max_peers;
74 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
75 		"\n<=== cfg max peer id %d ====>\n", max_peers);
76 	peer_map_size = max_peers * sizeof(soc->peer_id_to_obj_map[0]);
77 	soc->peer_id_to_obj_map = qdf_mem_malloc(peer_map_size);
78 	if (!soc->peer_id_to_obj_map) {
79 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
80 			"%s: peer map memory allocation failed\n", __func__);
81 		return QDF_STATUS_E_NOMEM;
82 	}
83 
84 	/*
85 	 * The peer_id_to_obj_map doesn't really need to be initialized,
86 	 * since elements are only used after they have been individually
87 	 * initialized.
88 	 * However, it is convenient for debugging to have all elements
89 	 * that are not in use set to 0.
90 	 */
91 	qdf_mem_zero(soc->peer_id_to_obj_map, peer_map_size);
92 	return 0; /* success */
93 }
94 
95 static int dp_log2_ceil(unsigned value)
96 {
97 	unsigned tmp = value;
98 	int log2 = -1;
99 
100 	while (tmp) {
101 		log2++;
102 		tmp >>= 1;
103 	}
104 	if (1 << log2 != value)
105 		log2++;
106 	return log2;
107 }
108 
109 static int dp_peer_find_add_id_to_obj(
110 	struct dp_peer *peer,
111 	uint16_t peer_id)
112 {
113 	int i;
114 
115 	for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
116 		if (peer->peer_ids[i] == HTT_INVALID_PEER) {
117 			peer->peer_ids[i] = peer_id;
118 			return 0; /* success */
119 		}
120 	}
121 	return QDF_STATUS_E_FAILURE; /* failure */
122 }
123 
124 #define DP_PEER_HASH_LOAD_MULT  2
125 #define DP_PEER_HASH_LOAD_SHIFT 0
126 
127 static int dp_peer_find_hash_attach(struct dp_soc *soc)
128 {
129 	int i, hash_elems, log2;
130 
131 	/* allocate the peer MAC address -> peer object hash table */
132 	hash_elems = wlan_cfg_max_peer_id(soc->wlan_cfg_ctx) + 1;
133 	hash_elems *= DP_PEER_HASH_LOAD_MULT;
134 	hash_elems >>= DP_PEER_HASH_LOAD_SHIFT;
135 	log2 = dp_log2_ceil(hash_elems);
136 	hash_elems = 1 << log2;
137 
138 	soc->peer_hash.mask = hash_elems - 1;
139 	soc->peer_hash.idx_bits = log2;
140 	/* allocate an array of TAILQ peer object lists */
141 	soc->peer_hash.bins = qdf_mem_malloc(
142 		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q, dp_peer)));
143 	if (!soc->peer_hash.bins)
144 		return QDF_STATUS_E_NOMEM;
145 
146 	for (i = 0; i < hash_elems; i++)
147 		TAILQ_INIT(&soc->peer_hash.bins[i]);
148 
149 	return 0;
150 }
151 
152 static void dp_peer_find_hash_detach(struct dp_soc *soc)
153 {
154 	qdf_mem_free(soc->peer_hash.bins);
155 }
156 
157 static inline unsigned dp_peer_find_hash_index(struct dp_soc *soc,
158 	union dp_align_mac_addr *mac_addr)
159 {
160 	unsigned index;
161 
162 	index =
163 		mac_addr->align2.bytes_ab ^
164 		mac_addr->align2.bytes_cd ^
165 		mac_addr->align2.bytes_ef;
166 	index ^= index >> soc->peer_hash.idx_bits;
167 	index &= soc->peer_hash.mask;
168 	return index;
169 }
170 
171 
172 void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer)
173 {
174 	unsigned index;
175 
176 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
177 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
178 	/*
179 	 * It is important to add the new peer at the tail of the peer list
180 	 * with the bin index.  Together with having the hash_find function
181 	 * search from head to tail, this ensures that if two entries with
182 	 * the same MAC address are stored, the one added first will be
183 	 * found first.
184 	 */
185 	TAILQ_INSERT_TAIL(&soc->peer_hash.bins[index], peer, hash_list_elem);
186 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
187 }
188 
189 #if ATH_SUPPORT_WRAP
190 static struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc,
191 	uint8_t *peer_mac_addr, int mac_addr_is_aligned, uint8_t vdev_id)
192 #else
193 static struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc,
194 	uint8_t *peer_mac_addr, int mac_addr_is_aligned)
195 #endif
196 {
197 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
198 	unsigned index;
199 	struct dp_peer *peer;
200 
201 	if (mac_addr_is_aligned) {
202 		mac_addr = (union dp_align_mac_addr *) peer_mac_addr;
203 	} else {
204 		qdf_mem_copy(
205 			&local_mac_addr_aligned.raw[0],
206 			peer_mac_addr, DP_MAC_ADDR_LEN);
207 		mac_addr = &local_mac_addr_aligned;
208 	}
209 	index = dp_peer_find_hash_index(soc, mac_addr);
210 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
211 	TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
212 #if ATH_SUPPORT_WRAP
213 		/* ProxySTA may have multiple BSS peer with same MAC address,
214 		 * modified find will take care of finding the correct BSS peer.
215 		 */
216 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
217 			(peer->vdev->vdev_id == vdev_id)) {
218 #else
219 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0) {
220 #endif
221 			/* found it - increment the ref count before releasing
222 			 * the lock
223 			 */
224 			qdf_atomic_inc(&peer->ref_cnt);
225 			qdf_spin_unlock_bh(&soc->peer_ref_mutex);
226 			return peer;
227 		}
228 	}
229 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
230 	return NULL; /* failure */
231 }
232 
233 void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer)
234 {
235 	unsigned index;
236 	struct dp_peer *tmppeer = NULL;
237 	int found = 0;
238 
239 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
240 	/* Check if tail is not empty before delete*/
241 	QDF_ASSERT(!TAILQ_EMPTY(&soc->peer_hash.bins[index]));
242 	/*
243 	 * DO NOT take the peer_ref_mutex lock here - it needs to be taken
244 	 * by the caller.
245 	 * The caller needs to hold the lock from the time the peer object's
246 	 * reference count is decremented and tested up through the time the
247 	 * reference to the peer object is removed from the hash table, by
248 	 * this function.
249 	 * Holding the lock only while removing the peer object reference
250 	 * from the hash table keeps the hash table consistent, but does not
251 	 * protect against a new HL tx context starting to use the peer object
252 	 * if it looks up the peer object from its MAC address just after the
253 	 * peer ref count is decremented to zero, but just before the peer
254 	 * object reference is removed from the hash table.
255 	 */
256 	 TAILQ_FOREACH(tmppeer, &soc->peer_hash.bins[index], hash_list_elem) {
257 		if (tmppeer == peer) {
258 			found = 1;
259 			break;
260 		}
261 	}
262 	QDF_ASSERT(found);
263 	TAILQ_REMOVE(&soc->peer_hash.bins[index], peer, hash_list_elem);
264 }
265 
266 void dp_peer_find_hash_erase(struct dp_soc *soc)
267 {
268 	int i;
269 
270 	/*
271 	 * Not really necessary to take peer_ref_mutex lock - by this point,
272 	 * it's known that the soc is no longer in use.
273 	 */
274 	for (i = 0; i <= soc->peer_hash.mask; i++) {
275 		if (!TAILQ_EMPTY(&soc->peer_hash.bins[i])) {
276 			struct dp_peer *peer, *peer_next;
277 
278 			/*
279 			 * TAILQ_FOREACH_SAFE must be used here to avoid any
280 			 * memory access violation after peer is freed
281 			 */
282 			TAILQ_FOREACH_SAFE(peer, &soc->peer_hash.bins[i],
283 				hash_list_elem, peer_next) {
284 				/*
285 				 * Don't remove the peer from the hash table -
286 				 * that would modify the list we are currently
287 				 * traversing, and it's not necessary anyway.
288 				 */
289 				/*
290 				 * Artificially adjust the peer's ref count to
291 				 * 1, so it will get deleted by
292 				 * dp_peer_unref_delete.
293 				 */
294 				/* set to zero */
295 				qdf_atomic_init(&peer->ref_cnt);
296 				/* incr to one */
297 				qdf_atomic_inc(&peer->ref_cnt);
298 				dp_peer_unref_delete(peer);
299 			}
300 		}
301 	}
302 }
303 
304 static void dp_peer_find_map_detach(struct dp_soc *soc)
305 {
306 	qdf_mem_free(soc->peer_id_to_obj_map);
307 }
308 
309 int dp_peer_find_attach(struct dp_soc *soc)
310 {
311 	if (dp_peer_find_map_attach(soc))
312 		return 1;
313 
314 	if (dp_peer_find_hash_attach(soc)) {
315 		dp_peer_find_map_detach(soc);
316 		return 1;
317 	}
318 	return 0; /* success */
319 }
320 
321 static inline void dp_peer_find_add_id(struct dp_soc *soc,
322 	uint8_t *peer_mac_addr, uint16_t peer_id, uint16_t hw_peer_id,
323 	uint8_t vdev_id)
324 {
325 	struct dp_peer *peer;
326 
327 	QDF_ASSERT(peer_id <= wlan_cfg_max_peer_id(soc->wlan_cfg_ctx) + 1);
328 	/* check if there's already a peer object with this MAC address */
329 #if ATH_SUPPORT_WRAP
330 	peer = dp_peer_find_hash_find(soc, peer_mac_addr,
331 		0 /* is aligned */, vdev_id);
332 #else
333 	peer = dp_peer_find_hash_find(soc, peer_mac_addr, 0 /* is aligned */);
334 #endif
335 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
336 		"%s: peer %p ID %d vid %d mac %02x:%02x:%02x:%02x:%02x:%02x\n",
337 		__func__, peer, peer_id, vdev_id, peer_mac_addr[0],
338 		peer_mac_addr[1], peer_mac_addr[2], peer_mac_addr[3],
339 		peer_mac_addr[4], peer_mac_addr[5]);
340 
341 	if (peer) {
342 		/* peer's ref count was already incremented by
343 		 * peer_find_hash_find
344 		 */
345 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
346 			  "%s: ref_cnt: %d", __func__,
347 			   qdf_atomic_read(&peer->ref_cnt));
348 		soc->peer_id_to_obj_map[peer_id] = peer;
349 		peer->self_ast_entry.ast_idx = hw_peer_id;
350 		soc->ast_table[hw_peer_id] = &peer->self_ast_entry;
351 
352 		if (dp_peer_find_add_id_to_obj(peer, peer_id)) {
353 			/* TBDXXX: assert for now */
354 			QDF_ASSERT(0);
355 		}
356 
357 		return;
358 	}
359 }
360 
361 static inline void dp_peer_add_ast(struct dp_soc *soc,
362 	struct dp_peer *peer, uint8_t *peer_mac_addr, uint16_t hw_peer_id,
363 	uint8_t vdev_id)
364 {
365 	struct dp_ast_entry *ast_entry;
366 
367 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
368 		"%s: peer %p ID %d vid %d mac %02x:%02x:%02x:%02x:%02x:%02x\n",
369 		__func__, peer, hw_peer_id, vdev_id, peer_mac_addr[0],
370 		peer_mac_addr[1], peer_mac_addr[2], peer_mac_addr[3],
371 		peer_mac_addr[4], peer_mac_addr[5]);
372 
373 	TAILQ_FOREACH(ast_entry, &peer->ast_entry_list, ast_entry_elem) {
374 		if (!(qdf_mem_cmp(peer_mac_addr, ast_entry->mac_addr,
375 				DP_MAC_ADDR_LEN))) {
376 			soc->ast_table[ast_entry->ast_idx] = NULL;
377 			ast_entry->ast_idx = hw_peer_id;
378 			soc->ast_table[hw_peer_id] = ast_entry;
379 			return;
380 		}
381 	}
382 
383 	ast_entry = (struct dp_ast_entry *)
384 			qdf_mem_malloc(sizeof(struct dp_ast_entry));
385 
386 	if (!ast_entry) {
387 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
388 			FL("fail to allocate ast_entry for: %d"), hw_peer_id);
389 		QDF_ASSERT(0);
390 	}
391 
392 	qdf_mem_copy(&ast_entry->mac_addr, peer_mac_addr, DP_MAC_ADDR_LEN);
393 	ast_entry->peer = peer;
394 	ast_entry->next_hop = 1;
395 	TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ast_entry_elem);
396 	soc->ast_table[hw_peer_id] = ast_entry;
397 	return;
398 }
399 
400 /**
401  * dp_rx_peer_map_handler() - handle peer map event from firmware
402  * @soc_handle - genereic soc handle
403  * @peeri_id - peer_id from firmware
404  * @hw_peer_id - ast index for this peer
405  * vdev_id - vdev ID
406  * peer_mac_addr - macc assress of the peer
407  *
408  * associate the peer_id that firmware provided with peer entry
409  * and update the ast table in the host with the hw_peer_id.
410  *
411  * Return: none
412  */
413 
414 void
415 dp_rx_peer_map_handler(void *soc_handle, uint16_t peer_id, uint16_t hw_peer_id,
416 			uint8_t vdev_id, uint8_t *peer_mac_addr)
417 {
418 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
419 	struct dp_peer *peer = NULL;
420 
421 
422 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
423 		"peer_map_event (soc:%p): peer_id %di, hw_peer_id %d, peer_mac "
424 		"%02x:%02x:%02x:%02x:%02x:%02x, vdev_id %d\n", soc, peer_id,
425 		hw_peer_id, peer_mac_addr[0], peer_mac_addr[1],
426 		peer_mac_addr[2], peer_mac_addr[3], peer_mac_addr[4],
427 		peer_mac_addr[5], vdev_id);
428 
429 	peer = soc->peer_id_to_obj_map[peer_id];
430 
431 	if ((hw_peer_id < 0) || (hw_peer_id > WLAN_UMAC_PSOC_MAX_PEERS)) {
432 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
433 			"invalid hw_peer_id: %d", hw_peer_id);
434 		QDF_ASSERT(0);
435 	}
436 
437 	/*
438 	 * check if peer already exists for this peer_id, if so
439 	 * this peer map event is in response for a wds peer add
440 	 * wmi command sent during wds source port learning.
441 	 * in this case just add the ast entry to the existing
442 	 * peer ast_list.
443 	 */
444 	if (!peer) {
445 		dp_peer_find_add_id(soc, peer_mac_addr, peer_id,
446 				hw_peer_id, vdev_id);
447 		if (soc->cdp_soc.ol_ops->peer_map_event) {
448 			soc->cdp_soc.ol_ops->peer_map_event(soc->osif_soc,
449 					peer_id, hw_peer_id, vdev_id, peer_mac_addr);
450 		}
451 
452 	} else {
453 		dp_peer_add_ast(soc, peer, peer_mac_addr,
454 				hw_peer_id, vdev_id);
455 	}
456 }
457 
458 void
459 dp_rx_peer_unmap_handler(void *soc_handle, uint16_t peer_id)
460 {
461 	struct dp_peer *peer;
462 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
463 	uint8_t i;
464 	peer = dp_peer_find_by_id(soc, peer_id);
465 
466 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
467 		"peer_unmap_event (soc:%p) peer_id %d peer %p\n",
468 		soc, peer_id, peer);
469 
470 	/*
471 	 * Currently peer IDs are assigned for vdevs as well as peers.
472 	 * If the peer ID is for a vdev, then the peer pointer stored
473 	 * in peer_id_to_obj_map will be NULL.
474 	 */
475 	if (!peer)
476 		return;
477 
478 	soc->peer_id_to_obj_map[peer_id] = NULL;
479 	for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
480 		if (peer->peer_ids[i] == peer_id) {
481 			peer->peer_ids[i] = HTT_INVALID_PEER;
482 			break;
483 		}
484 	}
485 
486 	if (soc->cdp_soc.ol_ops->peer_unmap_event) {
487 		soc->cdp_soc.ol_ops->peer_unmap_event(soc->osif_soc,
488 				peer_id);
489 	}
490 
491 	/*
492 	 * Remove a reference to the peer.
493 	 * If there are no more references, delete the peer object.
494 	 */
495 	dp_peer_unref_delete(peer);
496 }
497 
498 void
499 dp_peer_find_detach(struct dp_soc *soc)
500 {
501 	dp_peer_find_map_detach(soc);
502 	dp_peer_find_hash_detach(soc);
503 }
504 
505 static void dp_rx_tid_update_cb(struct dp_soc *soc, void *cb_ctxt,
506 	union hal_reo_status *reo_status)
507 {
508 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
509 
510 	if (reo_status->queue_status.header.status) {
511 		/* Should not happen normally. Just print error for now */
512 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
513 			"%s: Rx tid HW desc update failed(%d): tid %d\n",
514 			__func__,
515 			reo_status->rx_queue_status.header.status,
516 			rx_tid->tid);
517 	}
518 }
519 
520 /*
521  * dp_find_peer_by_addr - find peer instance by mac address
522  * @dev: physical device instance
523  * @peer_mac_addr: peer mac address
524  * @local_id: local id for the peer
525  *
526  * Return: peer instance pointer
527  */
528 void *dp_find_peer_by_addr(struct cdp_pdev *dev, uint8_t *peer_mac_addr,
529 		uint8_t *local_id)
530 {
531 	struct dp_pdev *pdev = (struct dp_pdev *)dev;
532 	struct dp_peer *peer;
533 
534 #if ATH_SUPPORT_WRAP
535 	peer = dp_peer_find_hash_find(pdev->soc, peer_mac_addr, 0, 0);
536 	/* WAR, VDEV ID? TEMP 0 */
537 #else
538 	peer = dp_peer_find_hash_find(pdev->soc, peer_mac_addr, 0);
539 #endif
540 	if (!peer)
541 		return NULL;
542 
543 	/* Multiple peer ids? How can know peer id? */
544 	*local_id = peer->local_id;
545 	DP_TRACE(INFO, "%s: peer %p id %d", __func__, peer, *local_id);
546 
547 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
548 	 * Decrement it here.
549 	 */
550 	qdf_atomic_dec(&peer->ref_cnt);
551 
552 	return peer;
553 }
554 
555 /*
556  * dp_rx_tid_update_wifi3() – Update receive TID state
557  * @peer: Datapath peer handle
558  * @tid: TID
559  * @ba_window_size: BlockAck window size
560  * @start_seq: Starting sequence number
561  *
562  * Return: 0 on success, error code on failure
563  */
564 static int dp_rx_tid_update_wifi3(struct dp_peer *peer, int tid, uint32_t
565 				  ba_window_size, uint32_t start_seq)
566 {
567 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
568 	struct dp_soc *soc = peer->vdev->pdev->soc;
569 	struct hal_reo_cmd_params params;
570 
571 	qdf_mem_zero(&params, sizeof(params));
572 
573 	params.std.need_status = 1;
574 	params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
575 	params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
576 	params.u.upd_queue_params.update_ba_window_size = 1;
577 	params.u.upd_queue_params.ba_window_size = ba_window_size;
578 
579 	if (start_seq < IEEE80211_SEQ_MAX) {
580 		params.u.upd_queue_params.update_ssn = 1;
581 		params.u.upd_queue_params.ssn = start_seq;
582 	}
583 
584 	dp_set_ssn_valid_flag(&params, 0);
585 
586 	dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params, dp_rx_tid_update_cb, rx_tid);
587 	return 0;
588 }
589 
590 /*
591  * dp_reo_desc_free() - Add reo descriptor to deferred freelist and free any
592  * aged out descriptors
593  *
594  * @soc: DP SOC handle
595  * @freedesc: REO descriptor to be freed
596  */
597 static void dp_reo_desc_free(struct dp_soc *soc,
598 	struct reo_desc_list_node *freedesc)
599 {
600 	uint32_t list_size;
601 	struct reo_desc_list_node *desc;
602 	unsigned long curr_ts = qdf_get_system_timestamp();
603 
604 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
605 	freedesc->free_ts = curr_ts;
606 	qdf_list_insert_back_size(&soc->reo_desc_freelist,
607 		(qdf_list_node_t *)freedesc, &list_size);
608 
609 	while ((qdf_list_peek_front(&soc->reo_desc_freelist,
610 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) &&
611 		((list_size >= REO_DESC_FREELIST_SIZE) ||
612 		((curr_ts - desc->free_ts) > REO_DESC_FREE_DEFER_MS))) {
613 		struct dp_rx_tid *rx_tid;
614 
615 		qdf_list_remove_front(&soc->reo_desc_freelist,
616 				(qdf_list_node_t **)&desc);
617 		list_size--;
618 		rx_tid = &desc->rx_tid;
619 		qdf_mem_unmap_nbytes_single(soc->osdev,
620 			rx_tid->hw_qdesc_paddr,
621 			QDF_DMA_BIDIRECTIONAL,
622 			rx_tid->hw_qdesc_alloc_size);
623 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
624 		qdf_mem_free(desc);
625 
626 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
627 			"%s: Freed: %p\n",
628 			__func__, desc);
629 	}
630 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
631 }
632 
633 #if defined(QCA_WIFI_QCA8074) && defined(BUILD_X86)
634 /* Hawkeye emulation requires bus address to be >= 0x50000000 */
635 static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
636 {
637 	if (dma_addr < 0x50000000)
638 		return QDF_STATUS_E_FAILURE;
639 	else
640 		return QDF_STATUS_SUCCESS;
641 }
642 #else
643 static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
644 {
645 	return QDF_STATUS_SUCCESS;
646 }
647 #endif
648 
649 
650 /*
651  * dp_rx_tid_setup_wifi3() – Setup receive TID state
652  * @peer: Datapath peer handle
653  * @tid: TID
654  * @ba_window_size: BlockAck window size
655  * @start_seq: Starting sequence number
656  *
657  * Return: 0 on success, error code on failure
658  */
659 int dp_rx_tid_setup_wifi3(struct dp_peer *peer, int tid,
660 	uint32_t ba_window_size, uint32_t start_seq)
661 {
662 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
663 	struct dp_vdev *vdev = peer->vdev;
664 	struct dp_soc *soc = vdev->pdev->soc;
665 	uint32_t hw_qdesc_size;
666 	uint32_t hw_qdesc_align;
667 	int hal_pn_type;
668 	void *hw_qdesc_vaddr;
669 	uint32_t alloc_tries = 0;
670 
671 	if (rx_tid->hw_qdesc_vaddr_unaligned != NULL)
672 		return dp_rx_tid_update_wifi3(peer, tid, ba_window_size,
673 			start_seq);
674 
675 #ifdef notyet
676 	hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc, ba_window_size);
677 #else
678 	/* TODO: Allocating HW queue descriptors based on max BA window size
679 	 * for all QOS TIDs so that same descriptor can be used later when
680 	 * ADDBA request is recevied. This should be changed to allocate HW
681 	 * queue descriptors based on BA window size being negotiated (0 for
682 	 * non BA cases), and reallocate when BA window size changes and also
683 	 * send WMI message to FW to change the REO queue descriptor in Rx
684 	 * peer entry as part of dp_rx_tid_update.
685 	 */
686 	if (tid != DP_NON_QOS_TID)
687 		hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc,
688 			HAL_RX_MAX_BA_WINDOW);
689 	else
690 		hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc,
691 			ba_window_size);
692 #endif
693 
694 	hw_qdesc_align = hal_get_reo_qdesc_align(soc->hal_soc);
695 	/* To avoid unnecessary extra allocation for alignment, try allocating
696 	 * exact size and see if we already have aligned address.
697 	 */
698 	rx_tid->hw_qdesc_alloc_size = hw_qdesc_size;
699 
700 try_desc_alloc:
701 	rx_tid->hw_qdesc_vaddr_unaligned =
702 		qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size);
703 
704 	if (!rx_tid->hw_qdesc_vaddr_unaligned) {
705 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
706 			"%s: Rx tid HW desc alloc failed: tid %d\n",
707 			__func__, tid);
708 		return QDF_STATUS_E_NOMEM;
709 	}
710 
711 	if ((unsigned long)(rx_tid->hw_qdesc_vaddr_unaligned) %
712 		hw_qdesc_align) {
713 		/* Address allocated above is not alinged. Allocate extra
714 		 * memory for alignment
715 		 */
716 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
717 		rx_tid->hw_qdesc_vaddr_unaligned =
718 			qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size +
719 					hw_qdesc_align - 1);
720 
721 		if (!rx_tid->hw_qdesc_vaddr_unaligned) {
722 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
723 				"%s: Rx tid HW desc alloc failed: tid %d\n",
724 				__func__, tid);
725 			return QDF_STATUS_E_NOMEM;
726 		}
727 
728 		hw_qdesc_vaddr = (void *)qdf_align((unsigned long)
729 			rx_tid->hw_qdesc_vaddr_unaligned,
730 			hw_qdesc_align);
731 
732 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
733 			"%s: Total Size %d Aligned Addr %p\n",
734 			__func__, rx_tid->hw_qdesc_alloc_size,
735 			hw_qdesc_vaddr);
736 
737 	} else {
738 		hw_qdesc_vaddr = rx_tid->hw_qdesc_vaddr_unaligned;
739 	}
740 
741 	/* TODO: Ensure that sec_type is set before ADDBA is received.
742 	 * Currently this is set based on htt indication
743 	 * HTT_T2H_MSG_TYPE_SEC_IND from target
744 	 */
745 	switch (peer->security[dp_sec_ucast].sec_type) {
746 	case htt_sec_type_tkip_nomic:
747 	case htt_sec_type_aes_ccmp:
748 	case htt_sec_type_aes_ccmp_256:
749 	case htt_sec_type_aes_gcmp:
750 	case htt_sec_type_aes_gcmp_256:
751 		hal_pn_type = HAL_PN_WPA;
752 		break;
753 	case htt_sec_type_wapi:
754 		if (vdev->opmode == wlan_op_mode_ap)
755 			hal_pn_type = HAL_PN_WAPI_EVEN;
756 		else
757 			hal_pn_type = HAL_PN_WAPI_UNEVEN;
758 		break;
759 	default:
760 		hal_pn_type = HAL_PN_NONE;
761 		break;
762 	}
763 
764 	hal_reo_qdesc_setup(soc->hal_soc, tid, ba_window_size, start_seq,
765 		hw_qdesc_vaddr, rx_tid->hw_qdesc_paddr, hal_pn_type);
766 
767 	qdf_mem_map_nbytes_single(soc->osdev, hw_qdesc_vaddr,
768 		QDF_DMA_BIDIRECTIONAL, rx_tid->hw_qdesc_alloc_size,
769 		&(rx_tid->hw_qdesc_paddr));
770 
771 	if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) !=
772 			QDF_STATUS_SUCCESS) {
773 		if (alloc_tries++ < 10)
774 			goto try_desc_alloc;
775 		else {
776 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
777 			"%s: Rx tid HW desc alloc failed (lowmem): tid %d\n",
778 			__func__, tid);
779 			return QDF_STATUS_E_NOMEM;
780 		}
781 	}
782 
783 	if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
784 		soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup(
785 			vdev->pdev->osif_pdev,
786 			peer->vdev->vdev_id, peer->mac_addr.raw,
787 			rx_tid->hw_qdesc_paddr, tid, tid);
788 
789 	}
790 	return 0;
791 }
792 
793 /*
794  * Rx TID deletion callback to free memory allocated for HW queue descriptor
795  */
796 static void dp_rx_tid_delete_cb(struct dp_soc *soc, void *cb_ctxt,
797 	union hal_reo_status *reo_status)
798 {
799 	struct reo_desc_list_node *freedesc =
800 		(struct reo_desc_list_node *)cb_ctxt;
801 
802 	if (reo_status->rx_queue_status.header.status) {
803 		/* Should not happen normally. Just print error for now */
804 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
805 			"%s: Rx tid HW desc deletion failed(%d): tid %d\n",
806 			__func__,
807 			reo_status->rx_queue_status.header.status,
808 			freedesc->rx_tid.tid);
809 	}
810 
811 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
812 		"%s: rx_tid: %d status: %d\n", __func__,
813 		freedesc->rx_tid.tid,
814 		reo_status->rx_queue_status.header.status);
815 
816 	dp_reo_desc_free(soc, freedesc);
817 }
818 
819 /*
820  * dp_rx_tid_delete_wifi3() – Delete receive TID queue
821  * @peer: Datapath peer handle
822  * @tid: TID
823  *
824  * Return: 0 on success, error code on failure
825  */
826 static int dp_rx_tid_delete_wifi3(struct dp_peer *peer, int tid)
827 {
828 	struct dp_rx_tid *rx_tid = &(peer->rx_tid[tid]);
829 	struct dp_soc *soc = peer->vdev->pdev->soc;
830 	struct hal_reo_cmd_params params;
831 	struct reo_desc_list_node *freedesc =
832 		qdf_mem_malloc(sizeof(*freedesc));
833 	if (!freedesc) {
834 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
835 			"%s: malloc failed for freedesc: tid %d\n",
836 			__func__, tid);
837 		return -ENOMEM;
838 	}
839 
840 	freedesc->rx_tid = *rx_tid;
841 
842 	qdf_mem_zero(&params, sizeof(params));
843 
844 	params.std.need_status = 0;
845 	params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
846 	params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
847 	params.u.upd_queue_params.update_vld = 1;
848 	params.u.upd_queue_params.vld = 0;
849 
850 	dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params, NULL, NULL);
851 
852 	/* Flush and invalidate the REO descriptor from HW cache */
853 	qdf_mem_zero(&params, sizeof(params));
854 	params.std.need_status = 1;
855 	params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
856 	params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
857 
858 	dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, &params, dp_rx_tid_delete_cb,
859 		(void *)freedesc);
860 
861 	rx_tid->hw_qdesc_vaddr_unaligned = NULL;
862 	rx_tid->hw_qdesc_alloc_size = 0;
863 	rx_tid->hw_qdesc_paddr = 0;
864 
865 	return 0;
866 }
867 
868 #ifdef DP_LFR
869 static void dp_peer_setup_remaining_tids(struct dp_peer *peer)
870 {
871 	int tid;
872 
873 	for (tid = 1; tid < DP_MAX_TIDS-1; tid++) {
874 		dp_rx_tid_setup_wifi3(peer, tid, 1, 0);
875 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
876 			"Setting up TID %d for peer %p peer->local_id %d\n",
877 			tid, peer, peer->local_id);
878 	}
879 }
880 #else
881 static void dp_peer_setup_remaining_tids(struct dp_peer *peer) {};
882 #endif
883 /*
884  * dp_peer_rx_init() – Initialize receive TID state
885  * @pdev: Datapath pdev
886  * @peer: Datapath peer
887  *
888  */
889 void dp_peer_rx_init(struct dp_pdev *pdev, struct dp_peer *peer)
890 {
891 	int tid;
892 	struct dp_rx_tid *rx_tid;
893 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
894 		rx_tid = &peer->rx_tid[tid];
895 		rx_tid->array = &rx_tid->base;
896 		rx_tid->base.head = rx_tid->base.tail = NULL;
897 		rx_tid->tid = tid;
898 		rx_tid->defrag_timeout_ms = 0;
899 		rx_tid->ba_win_size = 0;
900 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
901 
902 		rx_tid->defrag_waitlist_elem.tqe_next = NULL;
903 		rx_tid->defrag_waitlist_elem.tqe_prev = NULL;
904 
905 #ifdef notyet /* TODO: See if this is required for exception handling */
906 		/* invalid sequence number */
907 		peer->tids_last_seq[tid] = 0xffff;
908 #endif
909 	}
910 
911 	/* Setup default (non-qos) rx tid queue */
912 	dp_rx_tid_setup_wifi3(peer, DP_NON_QOS_TID, 1, 0);
913 
914 	/* Setup rx tid queue for TID 0.
915 	 * Other queues will be setup on receiving first packet, which will cause
916 	 * NULL REO queue error
917 	 */
918 	dp_rx_tid_setup_wifi3(peer, 0, 1, 0);
919 
920 	/*
921 	 * Setup the rest of TID's to handle LFR
922 	 */
923 	dp_peer_setup_remaining_tids(peer);
924 
925 	/*
926 	 * Set security defaults: no PN check, no security. The target may
927 	 * send a HTT SEC_IND message to overwrite these defaults.
928 	 */
929 	peer->security[dp_sec_ucast].sec_type =
930 		peer->security[dp_sec_mcast].sec_type = htt_sec_type_none;
931 }
932 
933 /*
934  * dp_peer_rx_cleanup() – Cleanup receive TID state
935  * @vdev: Datapath vdev
936  * @peer: Datapath peer
937  *
938  */
939 void dp_peer_rx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
940 {
941 	int tid;
942 	uint32_t tid_delete_mask = 0;
943 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
944 		if (peer->rx_tid[tid].hw_qdesc_vaddr_unaligned != NULL) {
945 			dp_rx_tid_delete_wifi3(peer, tid);
946 			tid_delete_mask |= (1 << tid);
947 		}
948 	}
949 #ifdef notyet /* See if FW can remove queues as part of peer cleanup */
950 	if (soc->ol_ops->peer_rx_reorder_queue_remove) {
951 		soc->ol_ops->peer_rx_reorder_queue_remove(vdev->pdev->osif_pdev,
952 			peer->vdev->vdev_id, peer->mac_addr.raw,
953 			tid_delete_mask);
954 	}
955 #endif
956 }
957 
958 /*
959  * dp_peer_cleanup() – Cleanup peer information
960  * @vdev: Datapath vdev
961  * @peer: Datapath peer
962  *
963  */
964 void dp_peer_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
965 {
966 	peer->last_assoc_rcvd = 0;
967 	peer->last_disassoc_rcvd = 0;
968 	peer->last_deauth_rcvd = 0;
969 
970 	/* cleanup the Rx reorder queues for this peer */
971 	dp_peer_rx_cleanup(vdev, peer);
972 }
973 
974 /*
975 * dp_rx_addba_requestprocess_wifi3() – Process ADDBA request from peer
976 *
977 * @peer: Datapath peer handle
978 * @dialogtoken: dialogtoken from ADDBA frame
979 * @tid: TID number
980 * @startseqnum: Start seq. number received in BA sequence control
981 * in ADDBA frame
982 *
983 * Return: 0 on success, error code on failure
984 */
985 int dp_addba_requestprocess_wifi3(void *peer_handle,
986 	uint8_t dialogtoken, uint16_t tid, uint16_t batimeout,
987 	uint16_t buffersize, uint16_t startseqnum)
988 {
989 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
990 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
991 
992 	if ((rx_tid->ba_status == DP_RX_BA_ACTIVE) &&
993 			(rx_tid->hw_qdesc_vaddr_unaligned != NULL))
994 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
995 
996 	if (dp_rx_tid_setup_wifi3(peer, tid, buffersize,
997 		startseqnum)) {
998 		/* TODO: Should we send addba reject in this case */
999 		return QDF_STATUS_E_FAILURE;
1000 	}
1001 
1002 	rx_tid->ba_win_size = buffersize;
1003 	rx_tid->dialogtoken = dialogtoken;
1004 	rx_tid->statuscode = QDF_STATUS_SUCCESS;
1005 	rx_tid->ba_status = DP_RX_BA_ACTIVE;
1006 
1007 	return 0;
1008 }
1009 
1010 /*
1011 * dp_rx_addba_responsesetup_wifi3() – Process ADDBA request from peer
1012 *
1013 * @peer: Datapath peer handle
1014 * @tid: TID number
1015 * @dialogtoken: output dialogtoken
1016 * @statuscode: output dialogtoken
1017 * @buffersize: Ouput BA window sizze
1018 * @batimeout: Ouput BA timeout
1019 */
1020 void dp_addba_responsesetup_wifi3(void *peer_handle, uint8_t tid,
1021 	uint8_t *dialogtoken, uint16_t *statuscode,
1022 	uint16_t *buffersize, uint16_t *batimeout)
1023 {
1024 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
1025 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1026 
1027 	/* setup ADDBA response paramters */
1028 	*dialogtoken = rx_tid->dialogtoken;
1029 	*statuscode = rx_tid->statuscode;
1030 	*buffersize = rx_tid->ba_win_size;
1031 	*batimeout  = 0;
1032 }
1033 
1034 /*
1035 * dp_rx_delba_process_wifi3() – Process DELBA from peer
1036 * @peer: Datapath peer handle
1037 * @tid: TID number
1038 * @reasoncode: Reason code received in DELBA frame
1039 *
1040 * Return: 0 on success, error code on failure
1041 */
1042 int dp_delba_process_wifi3(void *peer_handle,
1043 	int tid, uint16_t reasoncode)
1044 {
1045 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
1046 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1047 
1048 	if (rx_tid->ba_status != DP_RX_BA_ACTIVE)
1049 		return QDF_STATUS_E_FAILURE;
1050 
1051 	/* TODO: See if we can delete the existing REO queue descriptor and
1052 	 * replace with a new one without queue extenstion descript to save
1053 	 * memory
1054 	 */
1055 	dp_rx_tid_update_wifi3(peer, tid, 1, 0);
1056 
1057 	rx_tid->ba_status = DP_RX_BA_INACTIVE;
1058 
1059 	return 0;
1060 }
1061 
1062 void dp_rx_discard(struct dp_vdev *vdev, struct dp_peer *peer, unsigned tid,
1063 	qdf_nbuf_t msdu_list)
1064 {
1065 	while (msdu_list) {
1066 		qdf_nbuf_t msdu = msdu_list;
1067 
1068 		msdu_list = qdf_nbuf_next(msdu_list);
1069 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
1070 			"discard rx %p from partly-deleted peer %p "
1071 			"(%02x:%02x:%02x:%02x:%02x:%02x)\n",
1072 			msdu, peer,
1073 			peer->mac_addr.raw[0], peer->mac_addr.raw[1],
1074 			peer->mac_addr.raw[2], peer->mac_addr.raw[3],
1075 			peer->mac_addr.raw[4], peer->mac_addr.raw[5]);
1076 		qdf_nbuf_free(msdu);
1077 	}
1078 }
1079 
1080 void
1081 dp_rx_sec_ind_handler(void *soc_handle, uint16_t peer_id,
1082 	enum htt_sec_type sec_type, int is_unicast, u_int32_t *michael_key,
1083 	u_int32_t *rx_pn)
1084 {
1085 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
1086 	struct dp_peer *peer;
1087 	int sec_index;
1088 
1089 	peer = dp_peer_find_by_id(soc, peer_id);
1090 	if (!peer) {
1091 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1092 			"Couldn't find peer from ID %d - skipping security inits\n",
1093 			peer_id);
1094 		return;
1095 	}
1096 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
1097 		"sec spec for peer %p (%02x:%02x:%02x:%02x:%02x:%02x): "
1098 		"%s key of type %d\n",
1099 		peer,
1100 		peer->mac_addr.raw[0], peer->mac_addr.raw[1],
1101 		peer->mac_addr.raw[2], peer->mac_addr.raw[3],
1102 		peer->mac_addr.raw[4], peer->mac_addr.raw[5],
1103 		is_unicast ? "ucast" : "mcast",
1104 		sec_type);
1105 	sec_index = is_unicast ? dp_sec_ucast : dp_sec_mcast;
1106 	peer->security[sec_index].sec_type = sec_type;
1107 #ifdef notyet /* TODO: See if this is required for defrag support */
1108 	/* michael key only valid for TKIP, but for simplicity,
1109 	 * copy it anyway
1110 	 */
1111 	qdf_mem_copy(
1112 		&peer->security[sec_index].michael_key[0],
1113 		michael_key,
1114 		sizeof(peer->security[sec_index].michael_key));
1115 #ifdef BIG_ENDIAN_HOST
1116 	OL_IF_SWAPBO(peer->security[sec_index].michael_key[0],
1117 				 sizeof(peer->security[sec_index].michael_key));
1118 #endif /* BIG_ENDIAN_HOST */
1119 #endif
1120 
1121 #ifdef notyet /* TODO: Check if this is required for wifi3.0 */
1122 	if (sec_type != htt_sec_type_wapi) {
1123 		qdf_mem_set(peer->tids_last_pn_valid, _EXT_TIDS, 0x00);
1124 	} else {
1125 		for (i = 0; i < DP_MAX_TIDS; i++) {
1126 			/*
1127 			 * Setting PN valid bit for WAPI sec_type,
1128 			 * since WAPI PN has to be started with predefined value
1129 			 */
1130 			peer->tids_last_pn_valid[i] = 1;
1131 			qdf_mem_copy(
1132 				(u_int8_t *) &peer->tids_last_pn[i],
1133 				(u_int8_t *) rx_pn, sizeof(union htt_rx_pn_t));
1134 			peer->tids_last_pn[i].pn128[1] =
1135 				qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[1]);
1136 			peer->tids_last_pn[i].pn128[0] =
1137 				qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[0]);
1138 		}
1139 	}
1140 #endif
1141 	/* TODO: Update HW TID queue with PN check parameters (pn type for
1142 	 * all security types and last pn for WAPI) once REO command API
1143 	 * is available
1144 	 */
1145 }
1146 
1147 #ifndef CONFIG_WIN
1148 /**
1149  * dp_register_peer() - Register peer into physical device
1150  * @pdev - data path device instance
1151  * @sta_desc - peer description
1152  *
1153  * Register peer into physical device
1154  *
1155  * Return: QDF_STATUS_SUCCESS registration success
1156  *         QDF_STATUS_E_FAULT peer not found
1157  */
1158 QDF_STATUS dp_register_peer(struct cdp_pdev *pdev_handle,
1159 		struct ol_txrx_desc_type *sta_desc)
1160 {
1161 	struct dp_peer *peer;
1162 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
1163 
1164 	peer = dp_peer_find_by_local_id((struct cdp_pdev *)pdev,
1165 			sta_desc->sta_id);
1166 	if (!peer)
1167 		return QDF_STATUS_E_FAULT;
1168 
1169 	qdf_spin_lock_bh(&peer->peer_info_lock);
1170 	peer->state = OL_TXRX_PEER_STATE_CONN;
1171 	qdf_spin_unlock_bh(&peer->peer_info_lock);
1172 
1173 	return QDF_STATUS_SUCCESS;
1174 }
1175 
1176 /**
1177  * dp_clear_peer() - remove peer from physical device
1178  * @pdev - data path device instance
1179  * @sta_id - local peer id
1180  *
1181  * remove peer from physical device
1182  *
1183  * Return: QDF_STATUS_SUCCESS registration success
1184  *         QDF_STATUS_E_FAULT peer not found
1185  */
1186 QDF_STATUS dp_clear_peer(struct cdp_pdev *pdev_handle, uint8_t local_id)
1187 {
1188 	struct dp_peer *peer;
1189 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
1190 
1191 	peer = dp_peer_find_by_local_id((struct cdp_pdev *)pdev, local_id);
1192 	if (!peer)
1193 		return QDF_STATUS_E_FAULT;
1194 
1195 	qdf_spin_lock_bh(&peer->peer_info_lock);
1196 	peer->state = OL_TXRX_PEER_STATE_DISC;
1197 	qdf_spin_unlock_bh(&peer->peer_info_lock);
1198 
1199 	return QDF_STATUS_SUCCESS;
1200 }
1201 
1202 /**
1203  * dp_find_peer_by_addr_and_vdev() - Find peer by peer mac address within vdev
1204  * @pdev - data path device instance
1205  * @vdev - virtual interface instance
1206  * @peer_addr - peer mac address
1207  * @peer_id - local peer id with target mac address
1208  *
1209  * Find peer by peer mac address within vdev
1210  *
1211  * Return: peer instance void pointer
1212  *         NULL cannot find target peer
1213  */
1214 void *dp_find_peer_by_addr_and_vdev(struct cdp_pdev *pdev_handle,
1215 		struct cdp_vdev *vdev_handle,
1216 		uint8_t *peer_addr, uint8_t *local_id)
1217 {
1218 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
1219 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
1220 	struct dp_peer *peer;
1221 
1222 	DP_TRACE(INFO, "vdev %p peer_addr %p", vdev, peer_addr);
1223 	peer = dp_peer_find_hash_find(pdev->soc, peer_addr, 0);
1224 	DP_TRACE(INFO, "peer %p vdev %p", peer, vdev);
1225 
1226 	if (!peer)
1227 		return NULL;
1228 
1229 	if (peer->vdev != vdev)
1230 		return NULL;
1231 
1232 	*local_id = peer->local_id;
1233 	DP_TRACE(INFO, "peer %p vdev %p lcoal id %d", peer, vdev, *local_id);
1234 
1235 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
1236 	 * Decrement it here.
1237 	 */
1238 	qdf_atomic_dec(&peer->ref_cnt);
1239 
1240 	return peer;
1241 }
1242 
1243 /**
1244  * dp_local_peer_id() - Find local peer id within peer instance
1245  * @peer - peer instance
1246  *
1247  * Find local peer id within peer instance
1248  *
1249  * Return: local peer id
1250  */
1251 uint16_t dp_local_peer_id(void *peer)
1252 {
1253 	return ((struct dp_peer *)peer)->local_id;
1254 }
1255 
1256 /**
1257  * dp_peer_find_by_local_id() - Find peer by local peer id
1258  * @pdev - data path device instance
1259  * @local_peer_id - local peer id want to find
1260  *
1261  * Find peer by local peer id within physical device
1262  *
1263  * Return: peer instance void pointer
1264  *         NULL cannot find target peer
1265  */
1266 void *dp_peer_find_by_local_id(struct cdp_pdev *pdev_handle, uint8_t local_id)
1267 {
1268 	struct dp_peer *peer;
1269 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
1270 
1271 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
1272 	peer = pdev->local_peer_ids.map[local_id];
1273 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
1274 	DP_TRACE(INFO, "peer %p lcoal id %d",
1275 			peer, local_id);
1276 	return peer;
1277 }
1278 
1279 /**
1280  * dp_peer_state_update() - update peer local state
1281  * @pdev - data path device instance
1282  * @peer_addr - peer mac address
1283  * @state - new peer local state
1284  *
1285  * update peer local state
1286  *
1287  * Return: QDF_STATUS_SUCCESS registration success
1288  */
1289 QDF_STATUS dp_peer_state_update(struct cdp_pdev *pdev_handle, uint8_t *peer_mac,
1290 		enum ol_txrx_peer_state state)
1291 {
1292 	struct dp_peer *peer;
1293 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
1294 
1295 	peer =  dp_peer_find_hash_find(pdev->soc, peer_mac, 0);
1296 	if (NULL == peer) {
1297 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1298 		"Failed to find peer for: [%pM]", peer_mac);
1299 		return QDF_STATUS_E_FAILURE;
1300 	}
1301 	peer->state = state;
1302 
1303 	DP_TRACE(INFO, "peer %p state %d", peer, peer->state);
1304 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
1305 	 * Decrement it here.
1306 	 */
1307 	qdf_atomic_dec(&peer->ref_cnt);
1308 
1309 	return QDF_STATUS_SUCCESS;
1310 }
1311 
1312 /**
1313  * dp_get_vdevid() - Get virtaul interface id which peer registered
1314  * @peer - peer instance
1315  * @vdev_id - virtaul interface id which peer registered
1316  *
1317  * Get virtaul interface id which peer registered
1318  *
1319  * Return: QDF_STATUS_SUCCESS registration success
1320  */
1321 QDF_STATUS dp_get_vdevid(void *peer_handle, uint8_t *vdev_id)
1322 {
1323 	struct dp_peer *peer = peer_handle;
1324 
1325 	DP_TRACE(INFO, "peer %p vdev %p vdev id %d",
1326 			peer, peer->vdev, peer->vdev->vdev_id);
1327 	*vdev_id = peer->vdev->vdev_id;
1328 	return QDF_STATUS_SUCCESS;
1329 }
1330 
1331 /**
1332  * dp_get_vdev_for_peer() - Get virtual interface instance which peer belongs
1333  * @peer - peer instance
1334  *
1335  * Get virtual interface instance which peer belongs
1336  *
1337  * Return: virtual interface instance pointer
1338  *         NULL in case cannot find
1339  */
1340 struct cdp_vdev *dp_get_vdev_for_peer(void *peer_handle)
1341 {
1342 	struct dp_peer *peer = peer_handle;
1343 
1344 	DP_TRACE(INFO, "peer %p vdev %p", peer, peer->vdev);
1345 	return (struct cdp_vdev *)peer->vdev;
1346 }
1347 
1348 /**
1349  * dp_peer_get_peer_mac_addr() - Get peer mac address
1350  * @peer - peer instance
1351  *
1352  * Get peer mac address
1353  *
1354  * Return: peer mac address pointer
1355  *         NULL in case cannot find
1356  */
1357 uint8_t *dp_peer_get_peer_mac_addr(void *peer_handle)
1358 {
1359 	struct dp_peer *peer = peer_handle;
1360 	uint8_t *mac;
1361 
1362 	mac = peer->mac_addr.raw;
1363 	DP_TRACE(INFO, "peer %p mac 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
1364 		peer, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
1365 	return peer->mac_addr.raw;
1366 }
1367 
1368 /**
1369  * dp_get_peer_state() - Get local peer state
1370  * @peer - peer instance
1371  *
1372  * Get local peer state
1373  *
1374  * Return: peer status
1375  */
1376 int dp_get_peer_state(void *peer_handle)
1377 {
1378 	struct dp_peer *peer = peer_handle;
1379 
1380 	DP_TRACE(INFO, "peer %p stats %d", peer, peer->state);
1381 	return peer->state;
1382 }
1383 
1384 /**
1385  * dp_get_last_assoc_received() - get time of last assoc received
1386  * @peer_handle: peer handle
1387  *
1388  * Return: pointer for the time of last assoc received
1389  */
1390 qdf_time_t *dp_get_last_assoc_received(void *peer_handle)
1391 {
1392 	struct dp_peer *peer = peer_handle;
1393 
1394 	DP_TRACE(INFO, "peer %p last_assoc_rcvd: %lu", peer,
1395 		peer->last_assoc_rcvd);
1396 	return &peer->last_assoc_rcvd;
1397 }
1398 
1399 /**
1400  * dp_get_last_disassoc_received() - get time of last disassoc received
1401  * @peer_handle: peer handle
1402  *
1403  * Return: pointer for the time of last disassoc received
1404  */
1405 qdf_time_t *dp_get_last_disassoc_received(void *peer_handle)
1406 {
1407 	struct dp_peer *peer = peer_handle;
1408 
1409 	DP_TRACE(INFO, "peer %p last_disassoc_rcvd: %lu", peer,
1410 		peer->last_disassoc_rcvd);
1411 	return &peer->last_disassoc_rcvd;
1412 }
1413 
1414 /**
1415  * dp_get_last_deauth_received() - get time of last deauth received
1416  * @peer_handle: peer handle
1417  *
1418  * Return: pointer for the time of last deauth received
1419  */
1420 qdf_time_t *dp_get_last_deauth_received(void *peer_handle)
1421 {
1422 	struct dp_peer *peer = peer_handle;
1423 
1424 	DP_TRACE(INFO, "peer %p last_deauth_rcvd: %lu", peer,
1425 		peer->last_deauth_rcvd);
1426 	return &peer->last_deauth_rcvd;
1427 }
1428 
1429 /**
1430  * dp_local_peer_id_pool_init() - local peer id pool alloc for physical device
1431  * @pdev - data path device instance
1432  *
1433  * local peer id pool alloc for physical device
1434  *
1435  * Return: none
1436  */
1437 void dp_local_peer_id_pool_init(struct dp_pdev *pdev)
1438 {
1439 	int i;
1440 
1441 	/* point the freelist to the first ID */
1442 	pdev->local_peer_ids.freelist = 0;
1443 
1444 	/* link each ID to the next one */
1445 	for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) {
1446 		pdev->local_peer_ids.pool[i] = i + 1;
1447 		pdev->local_peer_ids.map[i] = NULL;
1448 	}
1449 
1450 	/* link the last ID to itself, to mark the end of the list */
1451 	i = OL_TXRX_NUM_LOCAL_PEER_IDS;
1452 	pdev->local_peer_ids.pool[i] = i;
1453 
1454 	qdf_spinlock_create(&pdev->local_peer_ids.lock);
1455 	DP_TRACE(INFO, "Peer pool init");
1456 }
1457 
1458 /**
1459  * dp_local_peer_id_alloc() - allocate local peer id
1460  * @pdev - data path device instance
1461  * @peer - new peer instance
1462  *
1463  * allocate local peer id
1464  *
1465  * Return: none
1466  */
1467 void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer)
1468 {
1469 	int i;
1470 
1471 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
1472 	i = pdev->local_peer_ids.freelist;
1473 	if (pdev->local_peer_ids.pool[i] == i) {
1474 		/* the list is empty, except for the list-end marker */
1475 		peer->local_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
1476 	} else {
1477 		/* take the head ID and advance the freelist */
1478 		peer->local_id = i;
1479 		pdev->local_peer_ids.freelist = pdev->local_peer_ids.pool[i];
1480 		pdev->local_peer_ids.map[i] = peer;
1481 	}
1482 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
1483 	DP_TRACE(INFO, "peer %p, local id %d", peer, peer->local_id);
1484 }
1485 
1486 /**
1487  * dp_local_peer_id_free() - remove local peer id
1488  * @pdev - data path device instance
1489  * @peer - peer instance should be removed
1490  *
1491  * remove local peer id
1492  *
1493  * Return: none
1494  */
1495 void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer)
1496 {
1497 	int i = peer->local_id;
1498 	if ((i == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
1499 	    (i >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
1500 		return;
1501 	}
1502 
1503 	/* put this ID on the head of the freelist */
1504 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
1505 	pdev->local_peer_ids.pool[i] = pdev->local_peer_ids.freelist;
1506 	pdev->local_peer_ids.freelist = i;
1507 	pdev->local_peer_ids.map[i] = NULL;
1508 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
1509 }
1510 #endif
1511 
1512 /**
1513  * dp_get_peer_mac_addr_frm_id(): get mac address of the peer
1514  * @soc_handle: DP SOC handle
1515  * @peer_id:peer_id of the peer
1516  *
1517  * return: vdev_id of the vap
1518  */
1519 uint8_t dp_get_peer_mac_addr_frm_id(struct cdp_soc_t *soc_handle,
1520 		uint16_t peer_id, uint8_t *peer_mac)
1521 {
1522 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
1523 	struct dp_peer *peer;
1524 
1525 	peer = dp_peer_find_by_id(soc, peer_id);
1526 
1527 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1528 			"soc %p peer_id %d", soc, peer_id);
1529 
1530 	if (!peer) {
1531 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1532 				"peer not found ");
1533 		return CDP_INVALID_VDEV_ID;
1534 	}
1535 
1536 	qdf_mem_copy(peer_mac, peer->mac_addr.raw, 6);
1537 	return peer->vdev->vdev_id;
1538 }
1539