xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_peer.c (revision 890dbd2774e25e00ca31154aa4c2ba6a62c2c708)
1 /*
2  * Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <qdf_types.h>
20 #include <qdf_lock.h>
21 #include "dp_htt.h"
22 #include "dp_types.h"
23 #include "dp_internal.h"
24 #include "dp_peer.h"
25 #include <hal_api.h>
26 #include <hal_reo.h>
27 #ifdef CONFIG_MCL
28 #include <cds_ieee80211_common.h>
29 #endif
30 #include <cdp_txrx_handle.h>
31 /* Temporary definitions to be moved to wlan_cfg */
32 static inline uint32_t wlan_cfg_max_peer_id(void *wlan_cfg_ctx)
33 {
34 	/* TODO: This should be calculated based on target capabilities */
35 	return 2048;
36 }
37 
38 static inline int dp_peer_find_mac_addr_cmp(
39 	union dp_align_mac_addr *mac_addr1,
40 	union dp_align_mac_addr *mac_addr2)
41 {
42 	return !((mac_addr1->align4.bytes_abcd == mac_addr2->align4.bytes_abcd)
43 		/*
44 		 * Intentionally use & rather than &&.
45 		 * because the operands are binary rather than generic boolean,
46 		 * the functionality is equivalent.
47 		 * Using && has the advantage of short-circuited evaluation,
48 		 * but using & has the advantage of no conditional branching,
49 		 * which is a more significant benefit.
50 		 */
51 		&
52 		(mac_addr1->align4.bytes_ef == mac_addr2->align4.bytes_ef));
53 }
54 
55 static int dp_peer_find_map_attach(struct dp_soc *soc)
56 {
57 	uint32_t max_peers, peer_map_size;
58 
59 	/* allocate the peer ID -> peer object map */
60 	max_peers = wlan_cfg_max_peer_id(soc->wlan_cfg_ctx) + 1;
61 	soc->max_peers = max_peers;
62 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
63 		"\n<=== cfg max peer id %d ====>\n", max_peers);
64 	peer_map_size = max_peers * sizeof(soc->peer_id_to_obj_map[0]);
65 	soc->peer_id_to_obj_map = qdf_mem_malloc(peer_map_size);
66 	if (!soc->peer_id_to_obj_map) {
67 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
68 			"%s: peer map memory allocation failed\n", __func__);
69 		return QDF_STATUS_E_NOMEM;
70 	}
71 
72 	/*
73 	 * The peer_id_to_obj_map doesn't really need to be initialized,
74 	 * since elements are only used after they have been individually
75 	 * initialized.
76 	 * However, it is convenient for debugging to have all elements
77 	 * that are not in use set to 0.
78 	 */
79 	qdf_mem_zero(soc->peer_id_to_obj_map, peer_map_size);
80 #ifdef notyet /* ATH_BAND_STEERING */
81 		OS_INIT_TIMER(soc->osdev, &(soc->bs_inact_timer),
82 			dp_peer_find_inact_timeout_handler, (void *)soc,
83 			QDF_TIMER_TYPE_WAKE_APPS);
84 #endif
85 	return 0; /* success */
86 }
87 
88 static int dp_log2_ceil(unsigned value)
89 {
90 	unsigned tmp = value;
91 	int log2 = -1;
92 
93 	while (tmp) {
94 		log2++;
95 		tmp >>= 1;
96 	}
97 	if (1 << log2 != value)
98 		log2++;
99 	return log2;
100 }
101 
102 static int dp_peer_find_add_id_to_obj(
103 	struct dp_peer *peer,
104 	uint16_t peer_id)
105 {
106 	int i;
107 
108 	for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
109 		if (peer->peer_ids[i] == HTT_INVALID_PEER) {
110 			peer->peer_ids[i] = peer_id;
111 			return 0; /* success */
112 		}
113 	}
114 	return QDF_STATUS_E_FAILURE; /* failure */
115 }
116 
117 #define DP_PEER_HASH_LOAD_MULT  2
118 #define DP_PEER_HASH_LOAD_SHIFT 0
119 
120 static int dp_peer_find_hash_attach(struct dp_soc *soc)
121 {
122 	int i, hash_elems, log2;
123 
124 	/* allocate the peer MAC address -> peer object hash table */
125 	hash_elems = wlan_cfg_max_peer_id(soc->wlan_cfg_ctx) + 1;
126 	hash_elems *= DP_PEER_HASH_LOAD_MULT;
127 	hash_elems >>= DP_PEER_HASH_LOAD_SHIFT;
128 	log2 = dp_log2_ceil(hash_elems);
129 	hash_elems = 1 << log2;
130 
131 	soc->peer_hash.mask = hash_elems - 1;
132 	soc->peer_hash.idx_bits = log2;
133 	/* allocate an array of TAILQ peer object lists */
134 	soc->peer_hash.bins = qdf_mem_malloc(
135 		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q, dp_peer)));
136 	if (!soc->peer_hash.bins)
137 		return QDF_STATUS_E_NOMEM;
138 
139 	for (i = 0; i < hash_elems; i++)
140 		TAILQ_INIT(&soc->peer_hash.bins[i]);
141 
142 	return 0;
143 }
144 
145 static void dp_peer_find_hash_detach(struct dp_soc *soc)
146 {
147 	qdf_mem_free(soc->peer_hash.bins);
148 }
149 
150 static inline unsigned dp_peer_find_hash_index(struct dp_soc *soc,
151 	union dp_align_mac_addr *mac_addr)
152 {
153 	unsigned index;
154 
155 	index =
156 		mac_addr->align2.bytes_ab ^
157 		mac_addr->align2.bytes_cd ^
158 		mac_addr->align2.bytes_ef;
159 	index ^= index >> soc->peer_hash.idx_bits;
160 	index &= soc->peer_hash.mask;
161 	return index;
162 }
163 
164 
165 void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer)
166 {
167 	unsigned index;
168 
169 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
170 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
171 	/*
172 	 * It is important to add the new peer at the tail of the peer list
173 	 * with the bin index.  Together with having the hash_find function
174 	 * search from head to tail, this ensures that if two entries with
175 	 * the same MAC address are stored, the one added first will be
176 	 * found first.
177 	 */
178 	TAILQ_INSERT_TAIL(&soc->peer_hash.bins[index], peer, hash_list_elem);
179 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
180 }
181 
182 #if ATH_SUPPORT_WRAP
183 static struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc,
184 	uint8_t *peer_mac_addr, int mac_addr_is_aligned, uint8_t vdev_id)
185 #else
186 static struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc,
187 	uint8_t *peer_mac_addr, int mac_addr_is_aligned)
188 #endif
189 {
190 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
191 	unsigned index;
192 	struct dp_peer *peer;
193 
194 	if (mac_addr_is_aligned) {
195 		mac_addr = (union dp_align_mac_addr *) peer_mac_addr;
196 	} else {
197 		qdf_mem_copy(
198 			&local_mac_addr_aligned.raw[0],
199 			peer_mac_addr, DP_MAC_ADDR_LEN);
200 		mac_addr = &local_mac_addr_aligned;
201 	}
202 	index = dp_peer_find_hash_index(soc, mac_addr);
203 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
204 	TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
205 #if ATH_SUPPORT_WRAP
206 		/* ProxySTA may have multiple BSS peer with same MAC address,
207 		 * modified find will take care of finding the correct BSS peer.
208 		 */
209 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
210 			(peer->vdev->vdev_id == vdev_id)) {
211 #else
212 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0) {
213 #endif
214 			/* found it - increment the ref count before releasing
215 			 * the lock
216 			 */
217 			qdf_atomic_inc(&peer->ref_cnt);
218 			qdf_spin_unlock_bh(&soc->peer_ref_mutex);
219 			return peer;
220 		}
221 	}
222 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
223 	return NULL; /* failure */
224 }
225 
226 void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer)
227 {
228 	unsigned index;
229 	struct dp_peer *tmppeer = NULL;
230 	int found = 0;
231 
232 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
233 	/* Check if tail is not empty before delete*/
234 	QDF_ASSERT(!TAILQ_EMPTY(&soc->peer_hash.bins[index]));
235 	/*
236 	 * DO NOT take the peer_ref_mutex lock here - it needs to be taken
237 	 * by the caller.
238 	 * The caller needs to hold the lock from the time the peer object's
239 	 * reference count is decremented and tested up through the time the
240 	 * reference to the peer object is removed from the hash table, by
241 	 * this function.
242 	 * Holding the lock only while removing the peer object reference
243 	 * from the hash table keeps the hash table consistent, but does not
244 	 * protect against a new HL tx context starting to use the peer object
245 	 * if it looks up the peer object from its MAC address just after the
246 	 * peer ref count is decremented to zero, but just before the peer
247 	 * object reference is removed from the hash table.
248 	 */
249 	 TAILQ_FOREACH(tmppeer, &soc->peer_hash.bins[index], hash_list_elem) {
250 		if (tmppeer == peer) {
251 			found = 1;
252 			break;
253 		}
254 	}
255 	QDF_ASSERT(found);
256 	TAILQ_REMOVE(&soc->peer_hash.bins[index], peer, hash_list_elem);
257 }
258 
259 void dp_peer_find_hash_erase(struct dp_soc *soc)
260 {
261 	int i;
262 
263 	/*
264 	 * Not really necessary to take peer_ref_mutex lock - by this point,
265 	 * it's known that the soc is no longer in use.
266 	 */
267 	for (i = 0; i <= soc->peer_hash.mask; i++) {
268 		if (!TAILQ_EMPTY(&soc->peer_hash.bins[i])) {
269 			struct dp_peer *peer, *peer_next;
270 
271 			/*
272 			 * TAILQ_FOREACH_SAFE must be used here to avoid any
273 			 * memory access violation after peer is freed
274 			 */
275 			TAILQ_FOREACH_SAFE(peer, &soc->peer_hash.bins[i],
276 				hash_list_elem, peer_next) {
277 				/*
278 				 * Don't remove the peer from the hash table -
279 				 * that would modify the list we are currently
280 				 * traversing, and it's not necessary anyway.
281 				 */
282 				/*
283 				 * Artificially adjust the peer's ref count to
284 				 * 1, so it will get deleted by
285 				 * dp_peer_unref_delete.
286 				 */
287 				/* set to zero */
288 				qdf_atomic_init(&peer->ref_cnt);
289 				/* incr to one */
290 				qdf_atomic_inc(&peer->ref_cnt);
291 				dp_peer_unref_delete(peer);
292 			}
293 		}
294 	}
295 }
296 
297 static void dp_peer_find_map_detach(struct dp_soc *soc)
298 {
299 #ifdef notyet /* ATH_BAND_STEERING */
300 	OS_FREE_TIMER(&(soc->bs_inact_timer));
301 #endif
302 	qdf_mem_free(soc->peer_id_to_obj_map);
303 }
304 
305 int dp_peer_find_attach(struct dp_soc *soc)
306 {
307 	if (dp_peer_find_map_attach(soc))
308 		return 1;
309 
310 	if (dp_peer_find_hash_attach(soc)) {
311 		dp_peer_find_map_detach(soc);
312 		return 1;
313 	}
314 	return 0; /* success */
315 }
316 
317 static inline void dp_peer_find_add_id(struct dp_soc *soc,
318 	uint8_t *peer_mac_addr, uint16_t peer_id, uint8_t vdev_id)
319 {
320 	struct dp_peer *peer;
321 
322 	QDF_ASSERT(peer_id <= wlan_cfg_max_peer_id(soc->wlan_cfg_ctx) + 1);
323 	/* check if there's already a peer object with this MAC address */
324 #if ATH_SUPPORT_WRAP
325 	peer = dp_peer_find_hash_find(soc, peer_mac_addr,
326 		0 /* is aligned */, vdev_id);
327 #else
328 	peer = dp_peer_find_hash_find(soc, peer_mac_addr, 0 /* is aligned */);
329 #endif
330 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
331 		"%s: peer %p ID %d vid %d mac %02x:%02x:%02x:%02x:%02x:%02x\n",
332 		__func__, peer, peer_id, vdev_id, peer_mac_addr[0],
333 		peer_mac_addr[1], peer_mac_addr[2], peer_mac_addr[3],
334 		peer_mac_addr[4], peer_mac_addr[5]);
335 
336 	if (peer) {
337 		/* peer's ref count was already incremented by
338 		 * peer_find_hash_find
339 		 */
340 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
341 			  "%s: ref_cnt: %d", __func__,
342 			   qdf_atomic_read(&peer->ref_cnt));
343 		soc->peer_id_to_obj_map[peer_id] = peer;
344 
345 		if (dp_peer_find_add_id_to_obj(peer, peer_id)) {
346 			/* TBDXXX: assert for now */
347 			QDF_ASSERT(0);
348 		}
349 
350 		return;
351 	}
352 }
353 
354 void
355 dp_rx_peer_map_handler(void *soc_handle, uint16_t peer_id, uint8_t vdev_id,
356 	uint8_t *peer_mac_addr)
357 {
358 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
359 
360 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
361 		"peer_map_event (soc:%p): peer_id %d, peer_mac "
362 		"%02x:%02x:%02x:%02x:%02x:%02x, vdev_id %d\n", soc, peer_id,
363 		peer_mac_addr[0], peer_mac_addr[1], peer_mac_addr[2],
364 		peer_mac_addr[3], peer_mac_addr[4], peer_mac_addr[5], vdev_id);
365 
366 	dp_peer_find_add_id(soc, peer_mac_addr, peer_id, vdev_id);
367 }
368 
369 void
370 dp_rx_peer_unmap_handler(void *soc_handle, uint16_t peer_id)
371 {
372 	struct dp_peer *peer;
373 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
374 	uint8_t i;
375 	peer = dp_peer_find_by_id(soc, peer_id);
376 
377 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
378 		"peer_unmap_event (soc:%p) peer_id %d peer %p\n",
379 		soc, peer_id, peer);
380 
381 	/*
382 	 * Currently peer IDs are assigned for vdevs as well as peers.
383 	 * If the peer ID is for a vdev, then the peer pointer stored
384 	 * in peer_id_to_obj_map will be NULL.
385 	 */
386 	if (!peer)
387 		return;
388 
389 	soc->peer_id_to_obj_map[peer_id] = NULL;
390 	for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
391 		if (peer->peer_ids[i] == peer_id) {
392 			peer->peer_ids[i] = HTT_INVALID_PEER;
393 			break;
394 		}
395 	}
396 
397 	/*
398 	 * Remove a reference to the peer.
399 	 * If there are no more references, delete the peer object.
400 	 */
401 	dp_peer_unref_delete(peer);
402 }
403 
404 void
405 dp_peer_find_detach(struct dp_soc *soc)
406 {
407 	dp_peer_find_map_detach(soc);
408 	dp_peer_find_hash_detach(soc);
409 }
410 
411 static void dp_rx_tid_update_cb(struct dp_soc *soc, void *cb_ctxt,
412 	union hal_reo_status *reo_status)
413 {
414 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
415 
416 	if (reo_status->queue_status.header.status) {
417 		/* Should not happen normally. Just print error for now */
418 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
419 			"%s: Rx tid HW desc update failed(%d): tid %d\n",
420 			__func__,
421 			reo_status->rx_queue_status.header.status,
422 			rx_tid->tid);
423 	}
424 }
425 
426 /*
427  * dp_find_peer_by_addr - find peer instance by mac address
428  * @dev: physical device instance
429  * @peer_mac_addr: peer mac address
430  * @local_id: local id for the peer
431  *
432  * Return: peer instance pointer
433  */
434 void *dp_find_peer_by_addr(struct cdp_pdev *dev, uint8_t *peer_mac_addr,
435 		uint8_t *local_id)
436 {
437 	struct dp_pdev *pdev = (struct dp_pdev *)dev;
438 	struct dp_peer *peer;
439 
440 #if ATH_SUPPORT_WRAP
441 	peer = dp_peer_find_hash_find(pdev->soc, peer_mac_addr, 0, 0);
442 	/* WAR, VDEV ID? TEMP 0 */
443 #else
444 	peer = dp_peer_find_hash_find(pdev->soc, peer_mac_addr, 0);
445 #endif
446 	if (!peer)
447 		return NULL;
448 
449 	/* Multiple peer ids? How can know peer id? */
450 	*local_id = peer->local_id;
451 	DP_TRACE(INFO, "%s: peer %p id %d", __func__, peer, *local_id);
452 
453 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
454 	 * Decrement it here.
455 	 */
456 	qdf_atomic_dec(&peer->ref_cnt);
457 
458 	return peer;
459 }
460 
461 /*
462  * dp_rx_tid_update_wifi3() – Update receive TID state
463  * @peer: Datapath peer handle
464  * @tid: TID
465  * @ba_window_size: BlockAck window size
466  * @start_seq: Starting sequence number
467  *
468  * Return: 0 on success, error code on failure
469  */
470 static int dp_rx_tid_update_wifi3(struct dp_peer *peer, int tid, uint32_t
471 				  ba_window_size, uint32_t start_seq)
472 {
473 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
474 	struct dp_soc *soc = peer->vdev->pdev->soc;
475 	struct hal_reo_cmd_params params;
476 
477 	qdf_mem_zero(&params, sizeof(params));
478 
479 	params.std.need_status = 1;
480 	params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
481 	params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
482 	params.u.upd_queue_params.update_ba_window_size = 1;
483 	params.u.upd_queue_params.ba_window_size = ba_window_size;
484 
485 	if (start_seq < IEEE80211_SEQ_MAX) {
486 		params.u.upd_queue_params.update_ssn = 1;
487 		params.u.upd_queue_params.ssn = start_seq;
488 	}
489 
490 	dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params, dp_rx_tid_update_cb, rx_tid);
491 	return 0;
492 }
493 
494 /*
495  * dp_reo_desc_free() - Add reo descriptor to deferred freelist and free any
496  * aged out descriptors
497  *
498  * @soc: DP SOC handle
499  * @freedesc: REO descriptor to be freed
500  */
501 static void dp_reo_desc_free(struct dp_soc *soc,
502 	struct reo_desc_list_node *freedesc)
503 {
504 	uint32_t list_size;
505 	struct reo_desc_list_node *desc;
506 	unsigned long curr_ts = qdf_get_system_timestamp();
507 
508 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
509 	freedesc->free_ts = curr_ts;
510 	qdf_list_insert_back_size(&soc->reo_desc_freelist,
511 		(qdf_list_node_t *)freedesc, &list_size);
512 
513 	while ((qdf_list_peek_front(&soc->reo_desc_freelist,
514 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) &&
515 		((list_size >= REO_DESC_FREELIST_SIZE) ||
516 		((curr_ts - desc->free_ts) > REO_DESC_FREE_DEFER_MS))) {
517 		struct dp_rx_tid *rx_tid;
518 
519 		qdf_list_remove_front(&soc->reo_desc_freelist,
520 				(qdf_list_node_t **)&desc);
521 		list_size--;
522 		rx_tid = &desc->rx_tid;
523 	/* Calling qdf_mem_free_consistent() in MCL is resulting in kernel BUG.
524 	 * Diasble this temporarily.
525 	 */
526 #ifndef QCA_WIFI_NAPIER_EMULATION
527 		qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
528 			rx_tid->hw_qdesc_alloc_size,
529 			rx_tid->hw_qdesc_vaddr_unaligned,
530 			rx_tid->hw_qdesc_paddr_unaligned, 0);
531 #endif
532 		qdf_mem_free(desc);
533 
534 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
535 			"%s: Freed: %p\n",
536 			__func__, desc);
537 	}
538 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
539 }
540 
541 /*
542  * dp_rx_tid_setup_wifi3() – Setup receive TID state
543  * @peer: Datapath peer handle
544  * @tid: TID
545  * @ba_window_size: BlockAck window size
546  * @start_seq: Starting sequence number
547  *
548  * Return: 0 on success, error code on failure
549  */
550 int dp_rx_tid_setup_wifi3(struct dp_peer *peer, int tid,
551 				 uint32_t ba_window_size, uint32_t start_seq)
552 {
553 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
554 	struct dp_vdev *vdev = peer->vdev;
555 	struct dp_soc *soc = vdev->pdev->soc;
556 	uint32_t hw_qdesc_size;
557 	uint32_t hw_qdesc_align;
558 	int hal_pn_type;
559 	void *hw_qdesc_vaddr;
560 
561 	if (rx_tid->hw_qdesc_vaddr_unaligned != NULL)
562 		return dp_rx_tid_update_wifi3(peer, tid, ba_window_size,
563 			start_seq);
564 
565 #ifdef notyet
566 	hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc, ba_window_size);
567 #else
568 	/* TODO: Allocating HW queue descriptors based on max BA window size
569 	 * for all QOS TIDs so that same descriptor can be used later when
570 	 * ADDBA request is recevied. This should be changed to allocate HW
571 	 * queue descriptors based on BA window size being negotiated (0 for
572 	 * non BA cases), and reallocate when BA window size changes and also
573 	 * send WMI message to FW to change the REO queue descriptor in Rx
574 	 * peer entry as part of dp_rx_tid_update.
575 	 */
576 	if (tid != DP_NON_QOS_TID)
577 		hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc,
578 			HAL_RX_MAX_BA_WINDOW);
579 	else
580 		hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc,
581 			ba_window_size);
582 #endif
583 
584 	hw_qdesc_align = hal_get_reo_qdesc_align(soc->hal_soc);
585 	/* To avoid unnecessary extra allocation for alignment, try allocating
586 	 * exact size and see if we already have aligned address.
587 	 */
588 	rx_tid->hw_qdesc_alloc_size = hw_qdesc_size;
589 	rx_tid->hw_qdesc_vaddr_unaligned = qdf_mem_alloc_consistent(
590 		soc->osdev, soc->osdev->dev, rx_tid->hw_qdesc_alloc_size,
591 		&(rx_tid->hw_qdesc_paddr_unaligned));
592 
593 	if (!rx_tid->hw_qdesc_vaddr_unaligned) {
594 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
595 			"%s: Rx tid HW desc alloc failed: tid %d\n",
596 			__func__, tid);
597 		return QDF_STATUS_E_NOMEM;
598 	}
599 
600 	if ((unsigned long)(rx_tid->hw_qdesc_vaddr_unaligned) %
601 		hw_qdesc_align) {
602 		/* Address allocated above is not alinged. Allocate extra
603 		 * memory for alignment
604 		 */
605 		qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
606 				rx_tid->hw_qdesc_alloc_size,
607 				rx_tid->hw_qdesc_vaddr_unaligned,
608 				rx_tid->hw_qdesc_paddr_unaligned, 0);
609 
610 		rx_tid->hw_qdesc_alloc_size =
611 			hw_qdesc_size + hw_qdesc_align - 1;
612 		rx_tid->hw_qdesc_vaddr_unaligned = qdf_mem_alloc_consistent(
613 			soc->osdev, soc->osdev->dev, rx_tid->hw_qdesc_alloc_size,
614 			&(rx_tid->hw_qdesc_paddr_unaligned));
615 
616 		if (!rx_tid->hw_qdesc_vaddr_unaligned) {
617 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
618 				"%s: Rx tid HW desc alloc failed: tid %d\n",
619 				__func__, tid);
620 			return QDF_STATUS_E_NOMEM;
621 		}
622 
623 		hw_qdesc_vaddr = rx_tid->hw_qdesc_vaddr_unaligned +
624 			((unsigned long)(rx_tid->hw_qdesc_vaddr_unaligned) %
625 			hw_qdesc_align);
626 
627 		rx_tid->hw_qdesc_paddr = rx_tid->hw_qdesc_paddr_unaligned +
628 			((unsigned long)hw_qdesc_vaddr -
629 			(unsigned long)(rx_tid->hw_qdesc_vaddr_unaligned));
630 	} else {
631 		hw_qdesc_vaddr = rx_tid->hw_qdesc_vaddr_unaligned;
632 		rx_tid->hw_qdesc_paddr = rx_tid->hw_qdesc_paddr_unaligned;
633 	}
634 
635 	/* TODO: Ensure that sec_type is set before ADDBA is received.
636 	 * Currently this is set based on htt indication
637 	 * HTT_T2H_MSG_TYPE_SEC_IND from target
638 	 */
639 	switch (peer->security[dp_sec_ucast].sec_type) {
640 	case htt_sec_type_tkip_nomic:
641 	case htt_sec_type_aes_ccmp:
642 	case htt_sec_type_aes_ccmp_256:
643 	case htt_sec_type_aes_gcmp:
644 	case htt_sec_type_aes_gcmp_256:
645 		hal_pn_type = HAL_PN_WPA;
646 		break;
647 	case htt_sec_type_wapi:
648 		if (vdev->opmode == wlan_op_mode_ap)
649 			hal_pn_type = HAL_PN_WAPI_EVEN;
650 		else
651 			hal_pn_type = HAL_PN_WAPI_UNEVEN;
652 		break;
653 	default:
654 		hal_pn_type = HAL_PN_NONE;
655 		break;
656 	}
657 
658 	hal_reo_qdesc_setup(soc->hal_soc, tid, ba_window_size, start_seq,
659 		hw_qdesc_vaddr, rx_tid->hw_qdesc_paddr, hal_pn_type);
660 
661 	if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
662 		soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup(
663 			vdev->pdev->osif_pdev,
664 			peer->vdev->vdev_id, peer->mac_addr.raw,
665 			rx_tid->hw_qdesc_paddr, tid, tid);
666 
667 	}
668 	return 0;
669 }
670 
671 /*
672  * Rx TID deletion callback to free memory allocated for HW queue descriptor
673  */
674 static void dp_rx_tid_delete_cb(struct dp_soc *soc, void *cb_ctxt,
675 	union hal_reo_status *reo_status)
676 {
677 	struct reo_desc_list_node *freedesc =
678 		(struct reo_desc_list_node *)cb_ctxt;
679 
680 	if (reo_status->rx_queue_status.header.status) {
681 		/* Should not happen normally. Just print error for now */
682 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
683 			"%s: Rx tid HW desc deletion failed(%d): tid %d\n",
684 			__func__,
685 			reo_status->rx_queue_status.header.status,
686 			freedesc->rx_tid.tid);
687 	}
688 
689 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
690 		"%s: rx_tid: %d status: %d\n", __func__,
691 		freedesc->rx_tid.tid,
692 		reo_status->rx_queue_status.header.status);
693 
694 	dp_reo_desc_free(soc, freedesc);
695 }
696 
697 /*
698  * dp_rx_tid_delete_wifi3() – Delete receive TID queue
699  * @peer: Datapath peer handle
700  * @tid: TID
701  *
702  * Return: 0 on success, error code on failure
703  */
704 static int dp_rx_tid_delete_wifi3(struct dp_peer *peer, int tid)
705 {
706 	struct dp_rx_tid *rx_tid = &(peer->rx_tid[tid]);
707 	struct dp_soc *soc = peer->vdev->pdev->soc;
708 	struct hal_reo_cmd_params params;
709 	struct reo_desc_list_node *freedesc =
710 		qdf_mem_malloc(sizeof(*freedesc));
711 	if (!freedesc) {
712 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
713 			"%s: malloc failed for freedesc: tid %d\n",
714 			__func__, tid);
715 		return -ENOMEM;
716 	}
717 
718 	freedesc->rx_tid = *rx_tid;
719 
720 	qdf_mem_zero(&params, sizeof(params));
721 
722 	params.std.need_status = 0;
723 	params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
724 	params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
725 	params.u.upd_queue_params.update_vld = 1;
726 	params.u.upd_queue_params.vld = 0;
727 
728 	dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params, NULL, NULL);
729 
730 	/* Flush and invalidate the REO descriptor from HW cache */
731 	qdf_mem_zero(&params, sizeof(params));
732 	params.std.need_status = 1;
733 	params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
734 	params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
735 
736 	dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, &params, dp_rx_tid_delete_cb,
737 		(void *)freedesc);
738 
739 	rx_tid->hw_qdesc_vaddr_unaligned = NULL;
740 	rx_tid->hw_qdesc_alloc_size = 0;
741 	rx_tid->hw_qdesc_paddr = 0;
742 
743 	return 0;
744 }
745 
746 /*
747  * dp_peer_rx_init() – Initialize receive TID state
748  * @pdev: Datapath pdev
749  * @peer: Datapath peer
750  *
751  */
752 void dp_peer_rx_init(struct dp_pdev *pdev, struct dp_peer *peer)
753 {
754 	int tid;
755 	struct dp_rx_tid *rx_tid;
756 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
757 		rx_tid = &peer->rx_tid[tid];
758 		rx_tid->array = &rx_tid->base;
759 		rx_tid->base.head = rx_tid->base.tail = NULL;
760 		rx_tid->tid = tid;
761 		rx_tid->defrag_timeout_ms = 0;
762 		rx_tid->ba_win_size = 0;
763 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
764 
765 		rx_tid->defrag_waitlist_elem.tqe_next = NULL;
766 		rx_tid->defrag_waitlist_elem.tqe_prev = NULL;
767 
768 #ifdef notyet /* TODO: See if this is required for exception handling */
769 		/* invalid sequence number */
770 		peer->tids_last_seq[tid] = 0xffff;
771 #endif
772 	}
773 
774 	/* Setup default (non-qos) rx tid queue */
775 	dp_rx_tid_setup_wifi3(peer, DP_NON_QOS_TID, 1, 0);
776 
777 	/* Setup rx tid queue for TID 0.
778 	 * Other queues will be setup on receiving first packet, which will cause
779 	 * NULL REO queue error
780 	 */
781 	dp_rx_tid_setup_wifi3(peer, 0, 1, 0);
782 
783 	/*
784 	 * Set security defaults: no PN check, no security. The target may
785 	 * send a HTT SEC_IND message to overwrite these defaults.
786 	 */
787 	peer->security[dp_sec_ucast].sec_type =
788 		peer->security[dp_sec_mcast].sec_type = htt_sec_type_none;
789 }
790 
791 /*
792  * dp_peer_rx_cleanup() – Cleanup receive TID state
793  * @vdev: Datapath vdev
794  * @peer: Datapath peer
795  *
796  */
797 void dp_peer_rx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
798 {
799 	int tid;
800 	uint32_t tid_delete_mask = 0;
801 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
802 		if (peer->rx_tid[tid].hw_qdesc_vaddr_unaligned != NULL) {
803 			dp_rx_tid_delete_wifi3(peer, tid);
804 			tid_delete_mask |= (1 << tid);
805 		}
806 	}
807 #ifdef notyet /* See if FW can remove queues as part of peer cleanup */
808 	if (soc->ol_ops->peer_rx_reorder_queue_remove) {
809 		soc->ol_ops->peer_rx_reorder_queue_remove(vdev->pdev->osif_pdev,
810 			peer->vdev->vdev_id, peer->mac_addr.raw,
811 			tid_delete_mask);
812 	}
813 #endif
814 }
815 
816 /*
817  * dp_peer_cleanup() – Cleanup peer information
818  * @vdev: Datapath vdev
819  * @peer: Datapath peer
820  *
821  */
822 void dp_peer_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
823 {
824 	peer->last_assoc_rcvd = 0;
825 	peer->last_disassoc_rcvd = 0;
826 	peer->last_deauth_rcvd = 0;
827 
828 	/* cleanup the Rx reorder queues for this peer */
829 	dp_peer_rx_cleanup(vdev, peer);
830 }
831 
832 /*
833 * dp_rx_addba_requestprocess_wifi3() – Process ADDBA request from peer
834 *
835 * @peer: Datapath peer handle
836 * @dialogtoken: dialogtoken from ADDBA frame
837 * @tid: TID number
838 * @startseqnum: Start seq. number received in BA sequence control
839 * in ADDBA frame
840 *
841 * Return: 0 on success, error code on failure
842 */
843 int dp_addba_requestprocess_wifi3(void *peer_handle,
844 	uint8_t dialogtoken, uint16_t tid, uint16_t batimeout,
845 	uint16_t buffersize, uint16_t startseqnum)
846 {
847 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
848 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
849 
850 	if ((rx_tid->ba_status == DP_RX_BA_ACTIVE) &&
851 			(rx_tid->hw_qdesc_vaddr_unaligned != NULL))
852 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
853 
854 	if (dp_rx_tid_setup_wifi3(peer, tid, buffersize,
855 		startseqnum)) {
856 		/* TODO: Should we send addba reject in this case */
857 		return QDF_STATUS_E_FAILURE;
858 	}
859 
860 	rx_tid->ba_win_size = buffersize;
861 	rx_tid->dialogtoken = dialogtoken;
862 	rx_tid->statuscode = QDF_STATUS_SUCCESS;
863 	rx_tid->ba_status = DP_RX_BA_ACTIVE;
864 
865 	return 0;
866 }
867 
868 /*
869 * dp_rx_addba_responsesetup_wifi3() – Process ADDBA request from peer
870 *
871 * @peer: Datapath peer handle
872 * @tid: TID number
873 * @dialogtoken: output dialogtoken
874 * @statuscode: output dialogtoken
875 * @buffersize: Ouput BA window sizze
876 * @batimeout: Ouput BA timeout
877 */
878 void dp_addba_responsesetup_wifi3(void *peer_handle, uint8_t tid,
879 	uint8_t *dialogtoken, uint16_t *statuscode,
880 	uint16_t *buffersize, uint16_t *batimeout)
881 {
882 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
883 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
884 
885 	/* setup ADDBA response paramters */
886 	*dialogtoken = rx_tid->dialogtoken;
887 	*statuscode = rx_tid->statuscode;
888 	*buffersize = rx_tid->ba_win_size;
889 	*batimeout  = 0;
890 }
891 
892 /*
893 * dp_rx_delba_process_wifi3() – Process DELBA from peer
894 * @peer: Datapath peer handle
895 * @tid: TID number
896 * @reasoncode: Reason code received in DELBA frame
897 *
898 * Return: 0 on success, error code on failure
899 */
900 int dp_delba_process_wifi3(void *peer_handle,
901 	int tid, uint16_t reasoncode)
902 {
903 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
904 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
905 
906 	if (rx_tid->ba_status != DP_RX_BA_ACTIVE)
907 		return QDF_STATUS_E_FAILURE;
908 
909 	/* TODO: See if we can delete the existing REO queue descriptor and
910 	 * replace with a new one without queue extenstion descript to save
911 	 * memory
912 	 */
913 	dp_rx_tid_update_wifi3(peer, tid, 0, 0);
914 
915 	rx_tid->ba_status = DP_RX_BA_INACTIVE;
916 
917 	return 0;
918 }
919 
920 void dp_rx_discard(struct dp_vdev *vdev, struct dp_peer *peer, unsigned tid,
921 	qdf_nbuf_t msdu_list)
922 {
923 	while (msdu_list) {
924 		qdf_nbuf_t msdu = msdu_list;
925 
926 		msdu_list = qdf_nbuf_next(msdu_list);
927 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
928 			"discard rx %p from partly-deleted peer %p "
929 			"(%02x:%02x:%02x:%02x:%02x:%02x)\n",
930 			msdu, peer,
931 			peer->mac_addr.raw[0], peer->mac_addr.raw[1],
932 			peer->mac_addr.raw[2], peer->mac_addr.raw[3],
933 			peer->mac_addr.raw[4], peer->mac_addr.raw[5]);
934 		qdf_nbuf_free(msdu);
935 	}
936 }
937 
938 void
939 dp_rx_sec_ind_handler(void *soc_handle, uint16_t peer_id,
940 	enum htt_sec_type sec_type, int is_unicast, u_int32_t *michael_key,
941 	u_int32_t *rx_pn)
942 {
943 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
944 	struct dp_peer *peer;
945 	int sec_index;
946 
947 	peer = dp_peer_find_by_id(soc, peer_id);
948 	if (!peer) {
949 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
950 			"Couldn't find peer from ID %d - skipping security inits\n",
951 			peer_id);
952 		return;
953 	}
954 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
955 		"sec spec for peer %p (%02x:%02x:%02x:%02x:%02x:%02x): "
956 		"%s key of type %d\n",
957 		peer,
958 		peer->mac_addr.raw[0], peer->mac_addr.raw[1],
959 		peer->mac_addr.raw[2], peer->mac_addr.raw[3],
960 		peer->mac_addr.raw[4], peer->mac_addr.raw[5],
961 		is_unicast ? "ucast" : "mcast",
962 		sec_type);
963 	sec_index = is_unicast ? dp_sec_ucast : dp_sec_mcast;
964 	peer->security[sec_index].sec_type = sec_type;
965 #ifdef notyet /* TODO: See if this is required for defrag support */
966 	/* michael key only valid for TKIP, but for simplicity,
967 	 * copy it anyway
968 	 */
969 	qdf_mem_copy(
970 		&peer->security[sec_index].michael_key[0],
971 		michael_key,
972 		sizeof(peer->security[sec_index].michael_key));
973 #ifdef BIG_ENDIAN_HOST
974 	OL_IF_SWAPBO(peer->security[sec_index].michael_key[0],
975 				 sizeof(peer->security[sec_index].michael_key));
976 #endif /* BIG_ENDIAN_HOST */
977 #endif
978 
979 #ifdef notyet /* TODO: Check if this is required for wifi3.0 */
980 	if (sec_type != htt_sec_type_wapi) {
981 		qdf_mem_set(peer->tids_last_pn_valid, _EXT_TIDS, 0x00);
982 	} else {
983 		for (i = 0; i < DP_MAX_TIDS; i++) {
984 			/*
985 			 * Setting PN valid bit for WAPI sec_type,
986 			 * since WAPI PN has to be started with predefined value
987 			 */
988 			peer->tids_last_pn_valid[i] = 1;
989 			qdf_mem_copy(
990 				(u_int8_t *) &peer->tids_last_pn[i],
991 				(u_int8_t *) rx_pn, sizeof(union htt_rx_pn_t));
992 			peer->tids_last_pn[i].pn128[1] =
993 				qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[1]);
994 			peer->tids_last_pn[i].pn128[0] =
995 				qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[0]);
996 		}
997 	}
998 #endif
999 	/* TODO: Update HW TID queue with PN check parameters (pn type for
1000 	 * all security types and last pn for WAPI) once REO command API
1001 	 * is available
1002 	 */
1003 }
1004 
1005 #ifndef CONFIG_WIN
1006 /**
1007  * dp_register_peer() - Register peer into physical device
1008  * @pdev - data path device instance
1009  * @sta_desc - peer description
1010  *
1011  * Register peer into physical device
1012  *
1013  * Return: QDF_STATUS_SUCCESS registration success
1014  *         QDF_STATUS_E_FAULT peer not found
1015  */
1016 QDF_STATUS dp_register_peer(struct cdp_pdev *pdev_handle,
1017 		struct ol_txrx_desc_type *sta_desc)
1018 {
1019 	struct dp_peer *peer;
1020 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
1021 
1022 	peer = dp_peer_find_by_local_id((struct cdp_pdev *)pdev,
1023 			sta_desc->sta_id);
1024 	if (!peer)
1025 		return QDF_STATUS_E_FAULT;
1026 
1027 	qdf_spin_lock_bh(&peer->peer_info_lock);
1028 	peer->state = OL_TXRX_PEER_STATE_CONN;
1029 	qdf_spin_unlock_bh(&peer->peer_info_lock);
1030 
1031 	return QDF_STATUS_SUCCESS;
1032 }
1033 
1034 /**
1035  * dp_clear_peer() - remove peer from physical device
1036  * @pdev - data path device instance
1037  * @sta_id - local peer id
1038  *
1039  * remove peer from physical device
1040  *
1041  * Return: QDF_STATUS_SUCCESS registration success
1042  *         QDF_STATUS_E_FAULT peer not found
1043  */
1044 QDF_STATUS dp_clear_peer(struct cdp_pdev *pdev_handle, uint8_t local_id)
1045 {
1046 	struct dp_peer *peer;
1047 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
1048 
1049 	peer = dp_peer_find_by_local_id((struct cdp_pdev *)pdev, local_id);
1050 	if (!peer)
1051 		return QDF_STATUS_E_FAULT;
1052 
1053 	qdf_spin_lock_bh(&peer->peer_info_lock);
1054 	peer->state = OL_TXRX_PEER_STATE_DISC;
1055 	qdf_spin_unlock_bh(&peer->peer_info_lock);
1056 
1057 	return QDF_STATUS_SUCCESS;
1058 }
1059 
1060 /**
1061  * dp_find_peer_by_addr_and_vdev() - Find peer by peer mac address within vdev
1062  * @pdev - data path device instance
1063  * @vdev - virtual interface instance
1064  * @peer_addr - peer mac address
1065  * @peer_id - local peer id with target mac address
1066  *
1067  * Find peer by peer mac address within vdev
1068  *
1069  * Return: peer instance void pointer
1070  *         NULL cannot find target peer
1071  */
1072 void *dp_find_peer_by_addr_and_vdev(struct cdp_pdev *pdev_handle,
1073 		struct cdp_vdev *vdev_handle,
1074 		uint8_t *peer_addr, uint8_t *local_id)
1075 {
1076 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
1077 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
1078 	struct dp_peer *peer;
1079 
1080 	DP_TRACE(INFO, "vdev %p peer_addr %p", vdev, peer_addr);
1081 	peer = dp_peer_find_hash_find(pdev->soc, peer_addr, 0);
1082 	DP_TRACE(INFO, "peer %p vdev %p", peer, vdev);
1083 
1084 	if (!peer)
1085 		return NULL;
1086 
1087 	if (peer->vdev != vdev)
1088 		return NULL;
1089 
1090 	*local_id = peer->local_id;
1091 	DP_TRACE(INFO, "peer %p vdev %p lcoal id %d", peer, vdev, *local_id);
1092 
1093 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
1094 	 * Decrement it here.
1095 	 */
1096 	qdf_atomic_dec(&peer->ref_cnt);
1097 
1098 	return peer;
1099 }
1100 
1101 /**
1102  * dp_local_peer_id() - Find local peer id within peer instance
1103  * @peer - peer instance
1104  *
1105  * Find local peer id within peer instance
1106  *
1107  * Return: local peer id
1108  */
1109 uint16_t dp_local_peer_id(void *peer)
1110 {
1111 	return ((struct dp_peer *)peer)->local_id;
1112 }
1113 
1114 /**
1115  * dp_peer_find_by_local_id() - Find peer by local peer id
1116  * @pdev - data path device instance
1117  * @local_peer_id - local peer id want to find
1118  *
1119  * Find peer by local peer id within physical device
1120  *
1121  * Return: peer instance void pointer
1122  *         NULL cannot find target peer
1123  */
1124 void *dp_peer_find_by_local_id(struct cdp_pdev *pdev_handle, uint8_t local_id)
1125 {
1126 	struct dp_peer *peer;
1127 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
1128 
1129 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
1130 	peer = pdev->local_peer_ids.map[local_id];
1131 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
1132 	DP_TRACE(INFO, "peer %p lcoal id %d",
1133 			peer, local_id);
1134 	return peer;
1135 }
1136 
1137 /**
1138  * dp_peer_state_update() - update peer local state
1139  * @pdev - data path device instance
1140  * @peer_addr - peer mac address
1141  * @state - new peer local state
1142  *
1143  * update peer local state
1144  *
1145  * Return: QDF_STATUS_SUCCESS registration success
1146  */
1147 QDF_STATUS dp_peer_state_update(struct cdp_pdev *pdev_handle, uint8_t *peer_mac,
1148 		enum ol_txrx_peer_state state)
1149 {
1150 	struct dp_peer *peer;
1151 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
1152 
1153 	peer =  dp_peer_find_hash_find(pdev->soc, peer_mac, 0);
1154 	if (NULL == peer) {
1155 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1156 		"Failed to find peer for: [%pM]", peer_mac);
1157 		return QDF_STATUS_E_FAILURE;
1158 	}
1159 	peer->state = state;
1160 
1161 	DP_TRACE(INFO, "peer %p state %d", peer, peer->state);
1162 	/* ref_cnt is incremented inside dp_peer_find_hash_find().
1163 	 * Decrement it here.
1164 	 */
1165 	qdf_atomic_dec(&peer->ref_cnt);
1166 
1167 	return QDF_STATUS_SUCCESS;
1168 }
1169 
1170 /**
1171  * dp_get_vdevid() - Get virtaul interface id which peer registered
1172  * @peer - peer instance
1173  * @vdev_id - virtaul interface id which peer registered
1174  *
1175  * Get virtaul interface id which peer registered
1176  *
1177  * Return: QDF_STATUS_SUCCESS registration success
1178  */
1179 QDF_STATUS dp_get_vdevid(void *peer_handle, uint8_t *vdev_id)
1180 {
1181 	struct dp_peer *peer = peer_handle;
1182 
1183 	DP_TRACE(INFO, "peer %p vdev %p vdev id %d",
1184 			peer, peer->vdev, peer->vdev->vdev_id);
1185 	*vdev_id = peer->vdev->vdev_id;
1186 	return QDF_STATUS_SUCCESS;
1187 }
1188 
1189 /**
1190  * dp_get_vdev_for_peer() - Get virtual interface instance which peer belongs
1191  * @peer - peer instance
1192  *
1193  * Get virtual interface instance which peer belongs
1194  *
1195  * Return: virtual interface instance pointer
1196  *         NULL in case cannot find
1197  */
1198 struct cdp_vdev *dp_get_vdev_for_peer(void *peer_handle)
1199 {
1200 	struct dp_peer *peer = peer_handle;
1201 
1202 	DP_TRACE(INFO, "peer %p vdev %p", peer, peer->vdev);
1203 	return (struct cdp_vdev *)peer->vdev;
1204 }
1205 
1206 /**
1207  * dp_peer_get_peer_mac_addr() - Get peer mac address
1208  * @peer - peer instance
1209  *
1210  * Get peer mac address
1211  *
1212  * Return: peer mac address pointer
1213  *         NULL in case cannot find
1214  */
1215 uint8_t *dp_peer_get_peer_mac_addr(void *peer_handle)
1216 {
1217 	struct dp_peer *peer = peer_handle;
1218 	uint8_t *mac;
1219 
1220 	mac = peer->mac_addr.raw;
1221 	DP_TRACE(INFO, "peer %p mac 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
1222 		peer, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
1223 	return peer->mac_addr.raw;
1224 }
1225 
1226 /**
1227  * dp_get_peer_state() - Get local peer state
1228  * @peer - peer instance
1229  *
1230  * Get local peer state
1231  *
1232  * Return: peer status
1233  */
1234 int dp_get_peer_state(void *peer_handle)
1235 {
1236 	struct dp_peer *peer = peer_handle;
1237 
1238 	DP_TRACE(INFO, "peer %p stats %d", peer, peer->state);
1239 	return peer->state;
1240 }
1241 
1242 /**
1243  * dp_get_last_assoc_received() - get time of last assoc received
1244  * @peer_handle: peer handle
1245  *
1246  * Return: pointer for the time of last assoc received
1247  */
1248 qdf_time_t *dp_get_last_assoc_received(void *peer_handle)
1249 {
1250 	struct dp_peer *peer = peer_handle;
1251 
1252 	DP_TRACE(INFO, "peer %p last_assoc_rcvd: %lu", peer,
1253 		peer->last_assoc_rcvd);
1254 	return &peer->last_assoc_rcvd;
1255 }
1256 
1257 /**
1258  * dp_get_last_disassoc_received() - get time of last disassoc received
1259  * @peer_handle: peer handle
1260  *
1261  * Return: pointer for the time of last disassoc received
1262  */
1263 qdf_time_t *dp_get_last_disassoc_received(void *peer_handle)
1264 {
1265 	struct dp_peer *peer = peer_handle;
1266 
1267 	DP_TRACE(INFO, "peer %p last_disassoc_rcvd: %lu", peer,
1268 		peer->last_disassoc_rcvd);
1269 	return &peer->last_disassoc_rcvd;
1270 }
1271 
1272 /**
1273  * dp_get_last_deauth_received() - get time of last deauth received
1274  * @peer_handle: peer handle
1275  *
1276  * Return: pointer for the time of last deauth received
1277  */
1278 qdf_time_t *dp_get_last_deauth_received(void *peer_handle)
1279 {
1280 	struct dp_peer *peer = peer_handle;
1281 
1282 	DP_TRACE(INFO, "peer %p last_deauth_rcvd: %lu", peer,
1283 		peer->last_deauth_rcvd);
1284 	return &peer->last_deauth_rcvd;
1285 }
1286 
1287 /**
1288  * dp_local_peer_id_pool_init() - local peer id pool alloc for physical device
1289  * @pdev - data path device instance
1290  *
1291  * local peer id pool alloc for physical device
1292  *
1293  * Return: none
1294  */
1295 void dp_local_peer_id_pool_init(struct dp_pdev *pdev)
1296 {
1297 	int i;
1298 
1299 	/* point the freelist to the first ID */
1300 	pdev->local_peer_ids.freelist = 0;
1301 
1302 	/* link each ID to the next one */
1303 	for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) {
1304 		pdev->local_peer_ids.pool[i] = i + 1;
1305 		pdev->local_peer_ids.map[i] = NULL;
1306 	}
1307 
1308 	/* link the last ID to itself, to mark the end of the list */
1309 	i = OL_TXRX_NUM_LOCAL_PEER_IDS;
1310 	pdev->local_peer_ids.pool[i] = i;
1311 
1312 	qdf_spinlock_create(&pdev->local_peer_ids.lock);
1313 	DP_TRACE(INFO, "Peer pool init");
1314 }
1315 
1316 /**
1317  * dp_local_peer_id_alloc() - allocate local peer id
1318  * @pdev - data path device instance
1319  * @peer - new peer instance
1320  *
1321  * allocate local peer id
1322  *
1323  * Return: none
1324  */
1325 void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer)
1326 {
1327 	int i;
1328 
1329 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
1330 	i = pdev->local_peer_ids.freelist;
1331 	if (pdev->local_peer_ids.pool[i] == i) {
1332 		/* the list is empty, except for the list-end marker */
1333 		peer->local_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
1334 	} else {
1335 		/* take the head ID and advance the freelist */
1336 		peer->local_id = i;
1337 		pdev->local_peer_ids.freelist = pdev->local_peer_ids.pool[i];
1338 		pdev->local_peer_ids.map[i] = peer;
1339 	}
1340 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
1341 	DP_TRACE(INFO, "peer %p, local id %d", peer, peer->local_id);
1342 }
1343 
1344 /**
1345  * dp_local_peer_id_free() - remove local peer id
1346  * @pdev - data path device instance
1347  * @peer - peer instance should be removed
1348  *
1349  * remove local peer id
1350  *
1351  * Return: none
1352  */
1353 void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer)
1354 {
1355 	int i = peer->local_id;
1356 	if ((i == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
1357 	    (i >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
1358 		return;
1359 	}
1360 
1361 	/* put this ID on the head of the freelist */
1362 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
1363 	pdev->local_peer_ids.pool[i] = pdev->local_peer_ids.freelist;
1364 	pdev->local_peer_ids.freelist = i;
1365 	pdev->local_peer_ids.map[i] = NULL;
1366 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
1367 }
1368 #endif
1369