xref: /wlan-dirver/qcacld-3.0/core/dp/txrx/ol_txrx_peer_find.c (revision 0474a821f1d954abee806bf4b29a365b268298f5)
17090c5fdSPrakash Dhavali /*
2153db6d0SKarthik Kantamneni  * Copyright (c) 2011-2021 The Linux Foundation. All rights reserved.
3*0474a821SKarthik Kantamneni  * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
47090c5fdSPrakash Dhavali  *
57090c5fdSPrakash Dhavali  * Permission to use, copy, modify, and/or distribute this software for
67090c5fdSPrakash Dhavali  * any purpose with or without fee is hereby granted, provided that the
77090c5fdSPrakash Dhavali  * above copyright notice and this permission notice appear in all
87090c5fdSPrakash Dhavali  * copies.
97090c5fdSPrakash Dhavali  *
107090c5fdSPrakash Dhavali  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
117090c5fdSPrakash Dhavali  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
127090c5fdSPrakash Dhavali  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
137090c5fdSPrakash Dhavali  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
147090c5fdSPrakash Dhavali  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
157090c5fdSPrakash Dhavali  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
167090c5fdSPrakash Dhavali  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
177090c5fdSPrakash Dhavali  * PERFORMANCE OF THIS SOFTWARE.
187090c5fdSPrakash Dhavali  */
197090c5fdSPrakash Dhavali 
207090c5fdSPrakash Dhavali /*=== includes ===*/
217090c5fdSPrakash Dhavali /* header files for OS primitives */
227090c5fdSPrakash Dhavali #include <osdep.h>              /* uint32_t, etc. */
23600c3a00SAnurag Chouhan #include <qdf_mem.h>         /* qdf_mem_malloc, etc. */
246d760664SAnurag Chouhan #include <qdf_types.h>          /* qdf_device_t, qdf_print */
257090c5fdSPrakash Dhavali /* header files for utilities */
2694ffbd10SAshish Kumar Dhanotiya #include "queue.h"         /* TAILQ */
277090c5fdSPrakash Dhavali 
287090c5fdSPrakash Dhavali /* header files for configuration API */
297090c5fdSPrakash Dhavali #include <ol_cfg.h>             /* ol_cfg_max_peer_id */
307090c5fdSPrakash Dhavali 
317090c5fdSPrakash Dhavali /* header files for our internal definitions */
327090c5fdSPrakash Dhavali #include <ol_txrx_api.h>        /* ol_txrx_pdev_t, etc. */
337090c5fdSPrakash Dhavali #include <ol_txrx_dbg.h>        /* TXRX_DEBUG_LEVEL */
347090c5fdSPrakash Dhavali #include <ol_txrx_internal.h>   /* ol_txrx_pdev_t, etc. */
35b7bec723SMohit Khanna #include <ol_txrx.h>            /* ol_txrx_peer_release_ref */
367090c5fdSPrakash Dhavali #include <ol_txrx_peer_find.h>  /* ol_txrx_peer_find_attach, etc. */
377090c5fdSPrakash Dhavali #include <ol_tx_queue.h>
38f918d42bSDeepak Dhamdhere #include "wlan_roam_debug.h"
397090c5fdSPrakash Dhavali 
407090c5fdSPrakash Dhavali /*=== misc. / utility function definitions ==================================*/
417090c5fdSPrakash Dhavali 
42512f3a14SYun Park static int ol_txrx_log2_ceil(unsigned int value)
437090c5fdSPrakash Dhavali {
447090c5fdSPrakash Dhavali 	/* need to switch to unsigned math so that negative values
457090c5fdSPrakash Dhavali 	 * will right-shift towards 0 instead of -1
467090c5fdSPrakash Dhavali 	 */
47512f3a14SYun Park 	unsigned int tmp = value;
487090c5fdSPrakash Dhavali 	int log2 = -1;
497090c5fdSPrakash Dhavali 
507090c5fdSPrakash Dhavali 	if (value == 0) {
517090c5fdSPrakash Dhavali 		TXRX_ASSERT2(0);
527090c5fdSPrakash Dhavali 		return 0;
537090c5fdSPrakash Dhavali 	}
547090c5fdSPrakash Dhavali 
557090c5fdSPrakash Dhavali 	while (tmp) {
567090c5fdSPrakash Dhavali 		log2++;
577090c5fdSPrakash Dhavali 		tmp >>= 1;
587090c5fdSPrakash Dhavali 	}
597090c5fdSPrakash Dhavali 	if (1U << log2 != value)
607090c5fdSPrakash Dhavali 		log2++;
617090c5fdSPrakash Dhavali 
627090c5fdSPrakash Dhavali 	return log2;
637090c5fdSPrakash Dhavali }
647090c5fdSPrakash Dhavali 
65b7bec723SMohit Khanna int ol_txrx_peer_get_ref(struct ol_txrx_peer_t *peer,
66b7bec723SMohit Khanna 			  enum peer_debug_id_type dbg_id)
67b04dfcd0SMohit Khanna {
68b7bec723SMohit Khanna 	int refs_dbg_id;
69b7bec723SMohit Khanna 
70b7bec723SMohit Khanna 	if (!peer) {
71b7bec723SMohit Khanna 		ol_txrx_err("peer is null for ID %d", dbg_id);
72b7bec723SMohit Khanna 		return -EINVAL;
73b7bec723SMohit Khanna 	}
74b7bec723SMohit Khanna 
75b7bec723SMohit Khanna 	if (dbg_id >= PEER_DEBUG_ID_MAX || dbg_id < 0) {
76b7bec723SMohit Khanna 		ol_txrx_err("incorrect debug_id %d ", dbg_id);
77b7bec723SMohit Khanna 		return -EINVAL;
78b7bec723SMohit Khanna 	}
79b7bec723SMohit Khanna 
80b7bec723SMohit Khanna 	qdf_atomic_inc(&peer->ref_cnt);
81b7bec723SMohit Khanna 	qdf_atomic_inc(&peer->access_list[dbg_id]);
82b7bec723SMohit Khanna 	refs_dbg_id = qdf_atomic_read(&peer->access_list[dbg_id]);
833badb980SJingxiang Ge 
84b7bec723SMohit Khanna 	return refs_dbg_id;
85b04dfcd0SMohit Khanna }
86b04dfcd0SMohit Khanna 
877090c5fdSPrakash Dhavali /*=== function definitions for peer MAC addr --> peer object hash table =====*/
887090c5fdSPrakash Dhavali 
897090c5fdSPrakash Dhavali /*
907090c5fdSPrakash Dhavali  * TXRX_PEER_HASH_LOAD_FACTOR:
917090c5fdSPrakash Dhavali  * Multiply by 2 and divide by 2^0 (shift by 0), then round up to a
927090c5fdSPrakash Dhavali  * power of two.
937090c5fdSPrakash Dhavali  * This provides at least twice as many bins in the peer hash table
947090c5fdSPrakash Dhavali  * as there will be entries.
957090c5fdSPrakash Dhavali  * Having substantially more bins than spaces minimizes the probability of
967090c5fdSPrakash Dhavali  * having to compare MAC addresses.
977090c5fdSPrakash Dhavali  * Because the MAC address comparison is fairly efficient, it is okay if the
987090c5fdSPrakash Dhavali  * hash table is sparsely loaded, but it's generally better to use extra mem
997090c5fdSPrakash Dhavali  * to keep the table sparse, to keep the lookups as fast as possible.
1007090c5fdSPrakash Dhavali  * An optimization would be to apply a more conservative loading factor for
1017090c5fdSPrakash Dhavali  * high latency, where the lookup happens during the tx classification of
1027090c5fdSPrakash Dhavali  * every tx frame, than for low-latency, where the lookup only happens
1037090c5fdSPrakash Dhavali  * during association, when the PEER_MAP message is received.
1047090c5fdSPrakash Dhavali  */
1057090c5fdSPrakash Dhavali #define TXRX_PEER_HASH_LOAD_MULT  2
1067090c5fdSPrakash Dhavali #define TXRX_PEER_HASH_LOAD_SHIFT 0
1077090c5fdSPrakash Dhavali 
1087090c5fdSPrakash Dhavali static int ol_txrx_peer_find_hash_attach(struct ol_txrx_pdev_t *pdev)
1097090c5fdSPrakash Dhavali {
1107090c5fdSPrakash Dhavali 	int i, hash_elems, log2;
1117090c5fdSPrakash Dhavali 
1127090c5fdSPrakash Dhavali 	/* allocate the peer MAC address -> peer object hash table */
1137090c5fdSPrakash Dhavali 	hash_elems = ol_cfg_max_peer_id(pdev->ctrl_pdev) + 1;
1147090c5fdSPrakash Dhavali 	hash_elems *= TXRX_PEER_HASH_LOAD_MULT;
1157090c5fdSPrakash Dhavali 	hash_elems >>= TXRX_PEER_HASH_LOAD_SHIFT;
1167090c5fdSPrakash Dhavali 	log2 = ol_txrx_log2_ceil(hash_elems);
1177090c5fdSPrakash Dhavali 	hash_elems = 1 << log2;
1187090c5fdSPrakash Dhavali 
1197090c5fdSPrakash Dhavali 	pdev->peer_hash.mask = hash_elems - 1;
1207090c5fdSPrakash Dhavali 	pdev->peer_hash.idx_bits = log2;
1217090c5fdSPrakash Dhavali 	/* allocate an array of TAILQ peer object lists */
1227090c5fdSPrakash Dhavali 	pdev->peer_hash.bins =
123600c3a00SAnurag Chouhan 		qdf_mem_malloc(hash_elems *
1247090c5fdSPrakash Dhavali 			       sizeof(TAILQ_HEAD(anonymous_tail_q,
1257090c5fdSPrakash Dhavali 						 ol_txrx_peer_t)));
1267090c5fdSPrakash Dhavali 	if (!pdev->peer_hash.bins)
1277090c5fdSPrakash Dhavali 		return 1;       /* failure */
1287090c5fdSPrakash Dhavali 
1297090c5fdSPrakash Dhavali 	for (i = 0; i < hash_elems; i++)
1307090c5fdSPrakash Dhavali 		TAILQ_INIT(&pdev->peer_hash.bins[i]);
1317090c5fdSPrakash Dhavali 
1327090c5fdSPrakash Dhavali 	return 0;               /* success */
1337090c5fdSPrakash Dhavali }
1347090c5fdSPrakash Dhavali 
1357090c5fdSPrakash Dhavali static void ol_txrx_peer_find_hash_detach(struct ol_txrx_pdev_t *pdev)
1367090c5fdSPrakash Dhavali {
137600c3a00SAnurag Chouhan 	qdf_mem_free(pdev->peer_hash.bins);
1387090c5fdSPrakash Dhavali }
1397090c5fdSPrakash Dhavali 
1409e54d986SKrunal Soni static inline unsigned int
1417090c5fdSPrakash Dhavali ol_txrx_peer_find_hash_index(struct ol_txrx_pdev_t *pdev,
1427090c5fdSPrakash Dhavali 			     union ol_txrx_align_mac_addr_t *mac_addr)
1437090c5fdSPrakash Dhavali {
144512f3a14SYun Park 	unsigned int index;
1457090c5fdSPrakash Dhavali 
1467090c5fdSPrakash Dhavali 	index =
1477090c5fdSPrakash Dhavali 		mac_addr->align2.bytes_ab ^
1487090c5fdSPrakash Dhavali 		mac_addr->align2.bytes_cd ^ mac_addr->align2.bytes_ef;
1497090c5fdSPrakash Dhavali 	index ^= index >> pdev->peer_hash.idx_bits;
1507090c5fdSPrakash Dhavali 	index &= pdev->peer_hash.mask;
1517090c5fdSPrakash Dhavali 	return index;
1527090c5fdSPrakash Dhavali }
1537090c5fdSPrakash Dhavali 
1547090c5fdSPrakash Dhavali void
1557090c5fdSPrakash Dhavali ol_txrx_peer_find_hash_add(struct ol_txrx_pdev_t *pdev,
1567090c5fdSPrakash Dhavali 			   struct ol_txrx_peer_t *peer)
1577090c5fdSPrakash Dhavali {
158512f3a14SYun Park 	unsigned int index;
1597090c5fdSPrakash Dhavali 
1607090c5fdSPrakash Dhavali 	index = ol_txrx_peer_find_hash_index(pdev, &peer->mac_addr);
161a37b5b78SAnurag Chouhan 	qdf_spin_lock_bh(&pdev->peer_ref_mutex);
1627090c5fdSPrakash Dhavali 	/*
1637090c5fdSPrakash Dhavali 	 * It is important to add the new peer at the tail of the peer list
1647090c5fdSPrakash Dhavali 	 * with the bin index.  Together with having the hash_find function
1657090c5fdSPrakash Dhavali 	 * search from head to tail, this ensures that if two entries with
1667090c5fdSPrakash Dhavali 	 * the same MAC address are stored, the one added first will be
1677090c5fdSPrakash Dhavali 	 * found first.
1687090c5fdSPrakash Dhavali 	 */
1697090c5fdSPrakash Dhavali 	TAILQ_INSERT_TAIL(&pdev->peer_hash.bins[index], peer, hash_list_elem);
170a37b5b78SAnurag Chouhan 	qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
1717090c5fdSPrakash Dhavali }
1727090c5fdSPrakash Dhavali 
1737090c5fdSPrakash Dhavali struct ol_txrx_peer_t *ol_txrx_peer_vdev_find_hash(struct ol_txrx_pdev_t *pdev,
1747090c5fdSPrakash Dhavali 						   struct ol_txrx_vdev_t *vdev,
1757090c5fdSPrakash Dhavali 						   uint8_t *peer_mac_addr,
1767090c5fdSPrakash Dhavali 						   int mac_addr_is_aligned,
1777090c5fdSPrakash Dhavali 						   uint8_t check_valid)
1787090c5fdSPrakash Dhavali {
1797090c5fdSPrakash Dhavali 	union ol_txrx_align_mac_addr_t local_mac_addr_aligned, *mac_addr;
180512f3a14SYun Park 	unsigned int index;
1817090c5fdSPrakash Dhavali 	struct ol_txrx_peer_t *peer;
1827090c5fdSPrakash Dhavali 
1837090c5fdSPrakash Dhavali 	if (mac_addr_is_aligned) {
1847090c5fdSPrakash Dhavali 		mac_addr = (union ol_txrx_align_mac_addr_t *)peer_mac_addr;
1857090c5fdSPrakash Dhavali 	} else {
186600c3a00SAnurag Chouhan 		qdf_mem_copy(&local_mac_addr_aligned.raw[0],
187a47b45f9SSrinivas Girigowda 			     peer_mac_addr, QDF_MAC_ADDR_SIZE);
1887090c5fdSPrakash Dhavali 		mac_addr = &local_mac_addr_aligned;
1897090c5fdSPrakash Dhavali 	}
1907090c5fdSPrakash Dhavali 	index = ol_txrx_peer_find_hash_index(pdev, mac_addr);
191a37b5b78SAnurag Chouhan 	qdf_spin_lock_bh(&pdev->peer_ref_mutex);
1927090c5fdSPrakash Dhavali 	TAILQ_FOREACH(peer, &pdev->peer_hash.bins[index], hash_list_elem) {
1937090c5fdSPrakash Dhavali 		if (ol_txrx_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) ==
1947090c5fdSPrakash Dhavali 		    0 && (check_valid == 0 || peer->valid)
1957090c5fdSPrakash Dhavali 		    && peer->vdev == vdev) {
196b04dfcd0SMohit Khanna 			/* found it */
197b7bec723SMohit Khanna 			ol_txrx_peer_get_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
198a37b5b78SAnurag Chouhan 			qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
1997090c5fdSPrakash Dhavali 			return peer;
2007090c5fdSPrakash Dhavali 		}
2017090c5fdSPrakash Dhavali 	}
202a37b5b78SAnurag Chouhan 	qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
2037090c5fdSPrakash Dhavali 	return NULL;            /* failure */
2047090c5fdSPrakash Dhavali }
2057090c5fdSPrakash Dhavali 
206babadb8bSMohit Khanna struct ol_txrx_peer_t *
207b7bec723SMohit Khanna 	ol_txrx_peer_find_hash_find_get_ref
208babadb8bSMohit Khanna 				(struct ol_txrx_pdev_t *pdev,
2097090c5fdSPrakash Dhavali 				uint8_t *peer_mac_addr,
2107090c5fdSPrakash Dhavali 				int mac_addr_is_aligned,
211b7bec723SMohit Khanna 				u8 check_valid,
212b7bec723SMohit Khanna 				enum peer_debug_id_type dbg_id)
2137090c5fdSPrakash Dhavali {
2147090c5fdSPrakash Dhavali 	union ol_txrx_align_mac_addr_t local_mac_addr_aligned, *mac_addr;
215512f3a14SYun Park 	unsigned int index;
2167090c5fdSPrakash Dhavali 	struct ol_txrx_peer_t *peer;
2177090c5fdSPrakash Dhavali 
2187090c5fdSPrakash Dhavali 	if (mac_addr_is_aligned) {
2197090c5fdSPrakash Dhavali 		mac_addr = (union ol_txrx_align_mac_addr_t *)peer_mac_addr;
2207090c5fdSPrakash Dhavali 	} else {
221600c3a00SAnurag Chouhan 		qdf_mem_copy(&local_mac_addr_aligned.raw[0],
222a47b45f9SSrinivas Girigowda 			     peer_mac_addr, QDF_MAC_ADDR_SIZE);
2237090c5fdSPrakash Dhavali 		mac_addr = &local_mac_addr_aligned;
2247090c5fdSPrakash Dhavali 	}
2257090c5fdSPrakash Dhavali 	index = ol_txrx_peer_find_hash_index(pdev, mac_addr);
226a37b5b78SAnurag Chouhan 	qdf_spin_lock_bh(&pdev->peer_ref_mutex);
2277090c5fdSPrakash Dhavali 	TAILQ_FOREACH(peer, &pdev->peer_hash.bins[index], hash_list_elem) {
2287090c5fdSPrakash Dhavali 		if (ol_txrx_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) ==
2297090c5fdSPrakash Dhavali 		    0 && (check_valid == 0 || peer->valid)) {
230b04dfcd0SMohit Khanna 			/* found it */
231b7bec723SMohit Khanna 			ol_txrx_peer_get_ref(peer, dbg_id);
232a37b5b78SAnurag Chouhan 			qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
2337090c5fdSPrakash Dhavali 			return peer;
2347090c5fdSPrakash Dhavali 		}
2357090c5fdSPrakash Dhavali 	}
236a37b5b78SAnurag Chouhan 	qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
2377090c5fdSPrakash Dhavali 	return NULL;            /* failure */
2387090c5fdSPrakash Dhavali }
2397090c5fdSPrakash Dhavali 
2407090c5fdSPrakash Dhavali void
2417090c5fdSPrakash Dhavali ol_txrx_peer_find_hash_remove(struct ol_txrx_pdev_t *pdev,
2427090c5fdSPrakash Dhavali 			      struct ol_txrx_peer_t *peer)
2437090c5fdSPrakash Dhavali {
244512f3a14SYun Park 	unsigned int index;
2457090c5fdSPrakash Dhavali 
2467090c5fdSPrakash Dhavali 	index = ol_txrx_peer_find_hash_index(pdev, &peer->mac_addr);
2477090c5fdSPrakash Dhavali 	/*
2487090c5fdSPrakash Dhavali 	 * DO NOT take the peer_ref_mutex lock here - it needs to be taken
2497090c5fdSPrakash Dhavali 	 * by the caller.
2507090c5fdSPrakash Dhavali 	 * The caller needs to hold the lock from the time the peer object's
2517090c5fdSPrakash Dhavali 	 * reference count is decremented and tested up through the time the
2527090c5fdSPrakash Dhavali 	 * reference to the peer object is removed from the hash table, by
2537090c5fdSPrakash Dhavali 	 * this function.
2547090c5fdSPrakash Dhavali 	 * Holding the lock only while removing the peer object reference
2557090c5fdSPrakash Dhavali 	 * from the hash table keeps the hash table consistent, but does not
2567090c5fdSPrakash Dhavali 	 * protect against a new HL tx context starting to use the peer object
2577090c5fdSPrakash Dhavali 	 * if it looks up the peer object from its MAC address just after the
2587090c5fdSPrakash Dhavali 	 * peer ref count is decremented to zero, but just before the peer
2597090c5fdSPrakash Dhavali 	 * object reference is removed from the hash table.
2607090c5fdSPrakash Dhavali 	 */
261a37b5b78SAnurag Chouhan 	/* qdf_spin_lock_bh(&pdev->peer_ref_mutex); */
2627090c5fdSPrakash Dhavali 	TAILQ_REMOVE(&pdev->peer_hash.bins[index], peer, hash_list_elem);
263a37b5b78SAnurag Chouhan 	/* qdf_spin_unlock_bh(&pdev->peer_ref_mutex); */
2647090c5fdSPrakash Dhavali }
2657090c5fdSPrakash Dhavali 
2667090c5fdSPrakash Dhavali void ol_txrx_peer_find_hash_erase(struct ol_txrx_pdev_t *pdev)
2677090c5fdSPrakash Dhavali {
268512f3a14SYun Park 	unsigned int i;
2697090c5fdSPrakash Dhavali 	/*
2707090c5fdSPrakash Dhavali 	 * Not really necessary to take peer_ref_mutex lock - by this point,
2717090c5fdSPrakash Dhavali 	 * it's known that the pdev is no longer in use.
2727090c5fdSPrakash Dhavali 	 */
2737090c5fdSPrakash Dhavali 
2747090c5fdSPrakash Dhavali 	for (i = 0; i <= pdev->peer_hash.mask; i++) {
2757090c5fdSPrakash Dhavali 		if (!TAILQ_EMPTY(&pdev->peer_hash.bins[i])) {
2767090c5fdSPrakash Dhavali 			struct ol_txrx_peer_t *peer, *peer_next;
2777090c5fdSPrakash Dhavali 
2787090c5fdSPrakash Dhavali 			/*
2797090c5fdSPrakash Dhavali 			 * TAILQ_FOREACH_SAFE must be used here to avoid any
2807090c5fdSPrakash Dhavali 			 * memory access violation after peer is freed
2817090c5fdSPrakash Dhavali 			 */
2827090c5fdSPrakash Dhavali 			TAILQ_FOREACH_SAFE(peer, &pdev->peer_hash.bins[i],
2837090c5fdSPrakash Dhavali 					   hash_list_elem, peer_next) {
2847090c5fdSPrakash Dhavali 				/*
2857090c5fdSPrakash Dhavali 				 * Don't remove the peer from the hash table -
2867090c5fdSPrakash Dhavali 				 * that would modify the list we are currently
2877090c5fdSPrakash Dhavali 				 * traversing,
2887090c5fdSPrakash Dhavali 				 * and it's not necessary anyway.
2897090c5fdSPrakash Dhavali 				 */
2907090c5fdSPrakash Dhavali 				/*
2917090c5fdSPrakash Dhavali 				 * Artificially adjust the peer's ref count to
2927090c5fdSPrakash Dhavali 				 * 1, so it will get deleted by
293b7bec723SMohit Khanna 				 * ol_txrx_peer_release_ref.
2947090c5fdSPrakash Dhavali 				 */
2958e0ccd33SAnurag Chouhan 				qdf_atomic_init(&peer->ref_cnt); /* set to 0 */
296763f3963SDustin Brown 				ol_txrx_peer_get_ref(peer,
2971253c3d2SManjunathappa Prakash 						     PEER_DEBUG_ID_OL_HASH_ERS);
298763f3963SDustin Brown 				ol_txrx_peer_release_ref(peer,
2991253c3d2SManjunathappa Prakash 						     PEER_DEBUG_ID_OL_HASH_ERS);
3007090c5fdSPrakash Dhavali 			}
3017090c5fdSPrakash Dhavali 		}
3027090c5fdSPrakash Dhavali 	}
3037090c5fdSPrakash Dhavali }
3047090c5fdSPrakash Dhavali 
305153db6d0SKarthik Kantamneni void ol_txrx_peer_free_inactive_list(struct ol_txrx_pdev_t *pdev)
306153db6d0SKarthik Kantamneni {
307153db6d0SKarthik Kantamneni 	struct ol_txrx_peer_t *peer = NULL, *tmp;
308153db6d0SKarthik Kantamneni 
309153db6d0SKarthik Kantamneni 	qdf_spin_lock_bh(&pdev->peer_map_unmap_lock);
310153db6d0SKarthik Kantamneni 	if (!TAILQ_EMPTY(&pdev->inactive_peer_list)) {
311153db6d0SKarthik Kantamneni 		TAILQ_FOREACH_SAFE(peer, &pdev->inactive_peer_list,
312153db6d0SKarthik Kantamneni 				   inactive_peer_list_elem, tmp) {
313153db6d0SKarthik Kantamneni 			qdf_atomic_init(&peer->del_ref_cnt); /* set to 0 */
314153db6d0SKarthik Kantamneni 			qdf_mem_free(peer);
315153db6d0SKarthik Kantamneni 		}
316153db6d0SKarthik Kantamneni 	}
317153db6d0SKarthik Kantamneni 	qdf_spin_unlock_bh(&pdev->peer_map_unmap_lock);
318153db6d0SKarthik Kantamneni }
319153db6d0SKarthik Kantamneni 
3207090c5fdSPrakash Dhavali /*=== function definitions for peer id --> peer object map ==================*/
3217090c5fdSPrakash Dhavali 
3227090c5fdSPrakash Dhavali static int ol_txrx_peer_find_map_attach(struct ol_txrx_pdev_t *pdev)
3237090c5fdSPrakash Dhavali {
3247090c5fdSPrakash Dhavali 	int max_peers, peer_map_size;
3257090c5fdSPrakash Dhavali 
3267090c5fdSPrakash Dhavali 	/* allocate the peer ID -> peer object map */
3277090c5fdSPrakash Dhavali 	max_peers = ol_cfg_max_peer_id(pdev->ctrl_pdev) + 1;
3287090c5fdSPrakash Dhavali 	peer_map_size = max_peers * sizeof(pdev->peer_id_to_obj_map[0]);
329600c3a00SAnurag Chouhan 	pdev->peer_id_to_obj_map = qdf_mem_malloc(peer_map_size);
3307090c5fdSPrakash Dhavali 	if (!pdev->peer_id_to_obj_map)
3317090c5fdSPrakash Dhavali 		return 1;       /* failure */
3327090c5fdSPrakash Dhavali 
3337090c5fdSPrakash Dhavali 	return 0;               /* success */
3347090c5fdSPrakash Dhavali }
3357090c5fdSPrakash Dhavali 
3367090c5fdSPrakash Dhavali static void ol_txrx_peer_find_map_detach(struct ol_txrx_pdev_t *pdev)
3377090c5fdSPrakash Dhavali {
338600c3a00SAnurag Chouhan 	qdf_mem_free(pdev->peer_id_to_obj_map);
3397090c5fdSPrakash Dhavali }
3407090c5fdSPrakash Dhavali 
341b0d2ddadSDeepak Dhamdhere /**
342b0d2ddadSDeepak Dhamdhere  * ol_txrx_peer_clear_map_peer() - Remove map entries that refer to a peer.
343b0d2ddadSDeepak Dhamdhere  * @pdev: pdev handle
344b0d2ddadSDeepak Dhamdhere  * @peer: peer for removing obj map entries
345b0d2ddadSDeepak Dhamdhere  *
346b0d2ddadSDeepak Dhamdhere  * Run through the entire peer_id_to_obj map and nullify all the entries
347b0d2ddadSDeepak Dhamdhere  * that map to a particular peer. Called before deleting the peer object.
348b0d2ddadSDeepak Dhamdhere  *
349b0d2ddadSDeepak Dhamdhere  * Return: None
350b0d2ddadSDeepak Dhamdhere  */
351b0d2ddadSDeepak Dhamdhere void ol_txrx_peer_clear_map_peer(ol_txrx_pdev_handle pdev,
352b0d2ddadSDeepak Dhamdhere 				 struct ol_txrx_peer_t *peer)
353b0d2ddadSDeepak Dhamdhere {
354b0d2ddadSDeepak Dhamdhere 	int max_peers;
355b0d2ddadSDeepak Dhamdhere 	int i;
356b0d2ddadSDeepak Dhamdhere 
357b0d2ddadSDeepak Dhamdhere 	max_peers = ol_cfg_max_peer_id(pdev->ctrl_pdev) + 1;
358b0d2ddadSDeepak Dhamdhere 
359b0d2ddadSDeepak Dhamdhere 	qdf_spin_lock_bh(&pdev->peer_map_unmap_lock);
360b0d2ddadSDeepak Dhamdhere 	for (i = 0; i < max_peers; i++) {
361b0d2ddadSDeepak Dhamdhere 		if (pdev->peer_id_to_obj_map[i].peer == peer) {
362b0d2ddadSDeepak Dhamdhere 			/* Found a map entry for this peer, clear it. */
363b0d2ddadSDeepak Dhamdhere 			pdev->peer_id_to_obj_map[i].peer = NULL;
364b0d2ddadSDeepak Dhamdhere 		}
365b0d2ddadSDeepak Dhamdhere 	}
366b0d2ddadSDeepak Dhamdhere 	qdf_spin_unlock_bh(&pdev->peer_map_unmap_lock);
367b0d2ddadSDeepak Dhamdhere }
368b0d2ddadSDeepak Dhamdhere 
369f74d6f8aSDeepak Dhamdhere /*
370f74d6f8aSDeepak Dhamdhere  * ol_txrx_peer_find_add_id() - Add peer_id entry to peer
371f74d6f8aSDeepak Dhamdhere  *
372f74d6f8aSDeepak Dhamdhere  * @pdev: Handle to pdev object
373f74d6f8aSDeepak Dhamdhere  * @peer_mac_addr: MAC address of peer provided by firmware
374f74d6f8aSDeepak Dhamdhere  * @peer_id: peer_id provided by firmware
375f74d6f8aSDeepak Dhamdhere  *
376f74d6f8aSDeepak Dhamdhere  * Search for peer object for the MAC address, add the peer_id to
377f74d6f8aSDeepak Dhamdhere  * its array of peer_id's and update the peer_id_to_obj map entry
378f74d6f8aSDeepak Dhamdhere  * for that peer_id. Increment corresponding reference counts.
379f74d6f8aSDeepak Dhamdhere  *
3800d3f1d62SPrakash Dhavali  * Riva/Pronto has one peer id for each peer.
3810d3f1d62SPrakash Dhavali  * Peregrine/Rome has two peer id for each peer.
3820d3f1d62SPrakash Dhavali  * iHelium has upto three peer id for each peer.
3830d3f1d62SPrakash Dhavali  *
384f74d6f8aSDeepak Dhamdhere  * Return: None
385f74d6f8aSDeepak Dhamdhere  */
386f74d6f8aSDeepak Dhamdhere static inline void ol_txrx_peer_find_add_id(struct ol_txrx_pdev_t *pdev,
3877090c5fdSPrakash Dhavali 				uint8_t *peer_mac_addr, uint16_t peer_id)
3887090c5fdSPrakash Dhavali {
3897090c5fdSPrakash Dhavali 	struct ol_txrx_peer_t *peer;
39037ffb293SMohit Khanna 	int status;
39137ffb293SMohit Khanna 	int i;
392b04dfcd0SMohit Khanna 	uint32_t peer_id_ref_cnt;
393b04dfcd0SMohit Khanna 	uint32_t peer_ref_cnt;
394e197744eSAlok Kumar 	u8 check_valid = 0;
395e197744eSAlok Kumar 
396e197744eSAlok Kumar 	if (pdev->enable_peer_unmap_conf_support)
397e197744eSAlok Kumar 		check_valid = 1;
3987090c5fdSPrakash Dhavali 
3997090c5fdSPrakash Dhavali 	/* check if there's already a peer object with this MAC address */
4007090c5fdSPrakash Dhavali 	peer =
401b7bec723SMohit Khanna 		ol_txrx_peer_find_hash_find_get_ref(pdev, peer_mac_addr,
402e197744eSAlok Kumar 						    1 /* is aligned */,
403e197744eSAlok Kumar 						    check_valid,
4041253c3d2SManjunathappa Prakash 						    PEER_DEBUG_ID_OL_PEER_MAP);
40547384bcdSMohit Khanna 
40647384bcdSMohit Khanna 	if (!peer || peer_id == HTT_INVALID_PEER) {
40737ffb293SMohit Khanna 		/*
40837ffb293SMohit Khanna 		 * Currently peer IDs are assigned for vdevs as well as peers.
40937ffb293SMohit Khanna 		 * If the peer ID is for a vdev, then we will fail to find a
41037ffb293SMohit Khanna 		 * peer with a matching MAC address.
41137ffb293SMohit Khanna 		 */
4127c8c171eSNirav Shah 		ol_txrx_err("peer not found or peer ID is %d invalid",
4137c8c171eSNirav Shah 			    peer_id);
414f918d42bSDeepak Dhamdhere 		wlan_roam_debug_log(DEBUG_INVALID_VDEV_ID,
415f918d42bSDeepak Dhamdhere 				    DEBUG_PEER_MAP_EVENT,
416f918d42bSDeepak Dhamdhere 				    peer_id, peer_mac_addr,
417f918d42bSDeepak Dhamdhere 				    peer, 0, 0);
418f918d42bSDeepak Dhamdhere 
41937ffb293SMohit Khanna 		return;
42037ffb293SMohit Khanna 	}
42137ffb293SMohit Khanna 
42237ffb293SMohit Khanna 	qdf_spin_lock(&pdev->peer_map_unmap_lock);
42337ffb293SMohit Khanna 
4247090c5fdSPrakash Dhavali 	/* peer's ref count was already incremented by
42537ffb293SMohit Khanna 	 * peer_find_hash_find
42637ffb293SMohit Khanna 	 */
427f099e5e1SNirav Shah 	if (!pdev->peer_id_to_obj_map[peer_id].peer) {
428f099e5e1SNirav Shah 		pdev->peer_id_to_obj_map[peer_id].peer = peer;
429f099e5e1SNirav Shah 		qdf_atomic_init
430f099e5e1SNirav Shah 		  (&pdev->peer_id_to_obj_map[peer_id].peer_id_ref_cnt);
431f099e5e1SNirav Shah 	}
432f099e5e1SNirav Shah 	qdf_atomic_inc
433f099e5e1SNirav Shah 		(&pdev->peer_id_to_obj_map[peer_id].peer_id_ref_cnt);
4343aee1314SMohit Khanna 
43537ffb293SMohit Khanna 	status = 1;
436f74d6f8aSDeepak Dhamdhere 
437f74d6f8aSDeepak Dhamdhere 	/* find a place in peer_id array and insert peer_id */
43837ffb293SMohit Khanna 	for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
43937ffb293SMohit Khanna 		if (peer->peer_ids[i] == HTT_INVALID_PEER) {
44037ffb293SMohit Khanna 			peer->peer_ids[i] = peer_id;
44137ffb293SMohit Khanna 			status = 0;
44247384bcdSMohit Khanna 			break;
44337ffb293SMohit Khanna 		}
44437ffb293SMohit Khanna 	}
44537ffb293SMohit Khanna 
4460d3f1d62SPrakash Dhavali 	if (qdf_atomic_read(&peer->fw_create_pending) == 1) {
4470d3f1d62SPrakash Dhavali 		qdf_atomic_set(&peer->fw_create_pending, 0);
4480d3f1d62SPrakash Dhavali 	}
44947384bcdSMohit Khanna 
45047384bcdSMohit Khanna 	qdf_spin_unlock(&pdev->peer_map_unmap_lock);
45147384bcdSMohit Khanna 
452b04dfcd0SMohit Khanna 	peer_id_ref_cnt = qdf_atomic_read(&pdev->
453b04dfcd0SMohit Khanna 				peer_id_to_obj_map[peer_id].peer_id_ref_cnt);
454b04dfcd0SMohit Khanna 	peer_ref_cnt = qdf_atomic_read(&peer->ref_cnt);
455b04dfcd0SMohit Khanna 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
456c13bfe08SJeff Johnson 	   "%s: peer %pK ID %d peer_id[%d] peer_id_ref_cnt %d peer->ref_cnt %d",
457b04dfcd0SMohit Khanna 	   __func__, peer, peer_id, i, peer_id_ref_cnt, peer_ref_cnt);
458f918d42bSDeepak Dhamdhere 	wlan_roam_debug_log(DEBUG_INVALID_VDEV_ID,
459f918d42bSDeepak Dhamdhere 			    DEBUG_PEER_MAP_EVENT,
460f918d42bSDeepak Dhamdhere 			    peer_id, &peer->mac_addr.raw, peer,
461f918d42bSDeepak Dhamdhere 			    peer_id_ref_cnt,
462f918d42bSDeepak Dhamdhere 			    peer_ref_cnt);
463f918d42bSDeepak Dhamdhere 
46437ffb293SMohit Khanna 
46537ffb293SMohit Khanna 	if (status) {
4667090c5fdSPrakash Dhavali 		/* TBDXXX: assert for now */
467c5548423SAnurag Chouhan 		qdf_assert(0);
4687090c5fdSPrakash Dhavali 	}
4697090c5fdSPrakash Dhavali }
4707090c5fdSPrakash Dhavali 
4717090c5fdSPrakash Dhavali /*=== allocation / deallocation function definitions ========================*/
4727090c5fdSPrakash Dhavali 
4737090c5fdSPrakash Dhavali int ol_txrx_peer_find_attach(struct ol_txrx_pdev_t *pdev)
4747090c5fdSPrakash Dhavali {
4757090c5fdSPrakash Dhavali 	if (ol_txrx_peer_find_map_attach(pdev))
4767090c5fdSPrakash Dhavali 		return 1;
4777090c5fdSPrakash Dhavali 	if (ol_txrx_peer_find_hash_attach(pdev)) {
4787090c5fdSPrakash Dhavali 		ol_txrx_peer_find_map_detach(pdev);
4797090c5fdSPrakash Dhavali 		return 1;
4807090c5fdSPrakash Dhavali 	}
4817090c5fdSPrakash Dhavali 	return 0;               /* success */
4827090c5fdSPrakash Dhavali }
4837090c5fdSPrakash Dhavali 
4847090c5fdSPrakash Dhavali void ol_txrx_peer_find_detach(struct ol_txrx_pdev_t *pdev)
4857090c5fdSPrakash Dhavali {
4867090c5fdSPrakash Dhavali 	ol_txrx_peer_find_map_detach(pdev);
4877090c5fdSPrakash Dhavali 	ol_txrx_peer_find_hash_detach(pdev);
4887090c5fdSPrakash Dhavali }
4897090c5fdSPrakash Dhavali 
490604b033eSAlok Kumar /**
491604b033eSAlok Kumar  * ol_txrx_peer_unmap_conf_handler() - send peer unmap conf cmd to FW
492604b033eSAlok Kumar  * @pdev: pdev_handle
493604b033eSAlok Kumar  * @peer_id: peer_id
494604b033eSAlok Kumar  *
495604b033eSAlok Kumar  * Return: None
496604b033eSAlok Kumar  */
497604b033eSAlok Kumar static inline void
498604b033eSAlok Kumar ol_txrx_peer_unmap_conf_handler(ol_txrx_pdev_handle pdev,
499604b033eSAlok Kumar 				uint16_t peer_id)
500604b033eSAlok Kumar {
501604b033eSAlok Kumar 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
502604b033eSAlok Kumar 
503604b033eSAlok Kumar 	if (peer_id == HTT_INVALID_PEER) {
504604b033eSAlok Kumar 		ol_txrx_err(
505604b033eSAlok Kumar 		   "invalid peer ID %d\n", peer_id);
506604b033eSAlok Kumar 		return;
507604b033eSAlok Kumar 	}
508604b033eSAlok Kumar 
509604b033eSAlok Kumar 	qdf_atomic_inc(&pdev->peer_id_to_obj_map[peer_id].peer_id_unmap_cnt);
510604b033eSAlok Kumar 
511604b033eSAlok Kumar 	if (qdf_atomic_read(
512604b033eSAlok Kumar 		&pdev->peer_id_to_obj_map[peer_id].peer_id_unmap_cnt) ==
513604b033eSAlok Kumar 		pdev->peer_id_unmap_ref_cnt) {
514604b033eSAlok Kumar 		ol_txrx_dbg("send unmap conf cmd: peer_id[%d] unmap_cnt[%d]",
515604b033eSAlok Kumar 			    peer_id, pdev->peer_id_unmap_ref_cnt);
516604b033eSAlok Kumar 		status = pdev->peer_unmap_sync_cb(
517604b033eSAlok Kumar 				DEBUG_INVALID_VDEV_ID,
518604b033eSAlok Kumar 				1, &peer_id);
519604b033eSAlok Kumar 
5205f9efa34SRakshith Suresh Patkar 		if (status == QDF_STATUS_SUCCESS ||
5215f9efa34SRakshith Suresh Patkar 		    status == QDF_STATUS_E_BUSY) {
522604b033eSAlok Kumar 			qdf_atomic_init(
523604b033eSAlok Kumar 			&pdev->peer_id_to_obj_map[peer_id].peer_id_unmap_cnt);
5245f9efa34SRakshith Suresh Patkar 		} else {
5255f9efa34SRakshith Suresh Patkar 			qdf_atomic_set(
5265f9efa34SRakshith Suresh Patkar 			&pdev->peer_id_to_obj_map[peer_id].peer_id_unmap_cnt,
5275f9efa34SRakshith Suresh Patkar 			OL_TXRX_INVALID_PEER_UNMAP_COUNT);
5285f9efa34SRakshith Suresh Patkar 			ol_txrx_err("unable to send unmap conf cmd [%d]",
5295f9efa34SRakshith Suresh Patkar 				    peer_id);
5305f9efa34SRakshith Suresh Patkar 		}
5315f9efa34SRakshith Suresh Patkar 
532604b033eSAlok Kumar 	}
533604b033eSAlok Kumar }
534604b033eSAlok Kumar 
5357090c5fdSPrakash Dhavali /*=== function definitions for message handling =============================*/
5367090c5fdSPrakash Dhavali 
537b2011f64SSiddarth Poddar #if defined(CONFIG_HL_SUPPORT)
538b2011f64SSiddarth Poddar 
5397090c5fdSPrakash Dhavali void
5407090c5fdSPrakash Dhavali ol_rx_peer_map_handler(ol_txrx_pdev_handle pdev,
5417090c5fdSPrakash Dhavali 		       uint16_t peer_id,
5427090c5fdSPrakash Dhavali 		       uint8_t vdev_id, uint8_t *peer_mac_addr, int tx_ready)
5437090c5fdSPrakash Dhavali {
5447090c5fdSPrakash Dhavali 	ol_txrx_peer_find_add_id(pdev, peer_mac_addr, peer_id);
545b2011f64SSiddarth Poddar 	if (!tx_ready) {
546b2011f64SSiddarth Poddar 		struct ol_txrx_peer_t *peer;
547512f3a14SYun Park 
548b2011f64SSiddarth Poddar 		peer = ol_txrx_peer_find_by_id(pdev, peer_id);
549b2011f64SSiddarth Poddar 		if (!peer) {
550b2011f64SSiddarth Poddar 			/* ol_txrx_peer_detach called before peer map arrived*/
551b2011f64SSiddarth Poddar 			return;
552b2011f64SSiddarth Poddar 		} else {
553b2011f64SSiddarth Poddar 			if (tx_ready) {
554b2011f64SSiddarth Poddar 				int i;
555512f3a14SYun Park 
556b2011f64SSiddarth Poddar 				/* unpause all tx queues now, since the
557b2011f64SSiddarth Poddar 				 * target is ready
558b2011f64SSiddarth Poddar 				 */
559b2011f64SSiddarth Poddar 				for (i = 0; i < QDF_ARRAY_SIZE(peer->txqs);
560b2011f64SSiddarth Poddar 									i++)
561b2011f64SSiddarth Poddar 					ol_txrx_peer_tid_unpause(peer, i);
562b2011f64SSiddarth Poddar 
563b2011f64SSiddarth Poddar 			} else {
564b2011f64SSiddarth Poddar 				/* walk through paused mgmt queue,
565b2011f64SSiddarth Poddar 				 * update tx descriptors
566b2011f64SSiddarth Poddar 				 */
567b2011f64SSiddarth Poddar 				ol_tx_queue_decs_reinit(peer, peer_id);
568b2011f64SSiddarth Poddar 
569b2011f64SSiddarth Poddar 				/* keep non-mgmt tx queues paused until assoc
570b2011f64SSiddarth Poddar 				 * is finished tx queues were paused in
571512f3a14SYun Park 				 * ol_txrx_peer_attach
572512f3a14SYun Park 				 */
573b2011f64SSiddarth Poddar 				/* unpause tx mgmt queue */
574b2011f64SSiddarth Poddar 				ol_txrx_peer_tid_unpause(peer,
575b2011f64SSiddarth Poddar 							 HTT_TX_EXT_TID_MGMT);
576b2011f64SSiddarth Poddar 			}
577b2011f64SSiddarth Poddar 		}
578b2011f64SSiddarth Poddar 	}
5797090c5fdSPrakash Dhavali }
5807090c5fdSPrakash Dhavali 
5817090c5fdSPrakash Dhavali void ol_txrx_peer_tx_ready_handler(ol_txrx_pdev_handle pdev, uint16_t peer_id)
5827090c5fdSPrakash Dhavali {
583b2011f64SSiddarth Poddar 	struct ol_txrx_peer_t *peer;
5844d65ebe9SSrinivas Girigowda 
585b2011f64SSiddarth Poddar 	peer = ol_txrx_peer_find_by_id(pdev, peer_id);
586b2011f64SSiddarth Poddar 	if (peer) {
587b2011f64SSiddarth Poddar 		int i;
588b2011f64SSiddarth Poddar 		/*
589b2011f64SSiddarth Poddar 		 * Unpause all data tx queues now that the target is ready.
590b2011f64SSiddarth Poddar 		 * The mgmt tx queue was not paused, so skip it.
591b2011f64SSiddarth Poddar 		 */
592b2011f64SSiddarth Poddar 		for (i = 0; i < QDF_ARRAY_SIZE(peer->txqs); i++) {
593b2011f64SSiddarth Poddar 			if (i == HTT_TX_EXT_TID_MGMT)
594b2011f64SSiddarth Poddar 				continue; /* mgmt tx queue was not paused */
595b2011f64SSiddarth Poddar 
596b2011f64SSiddarth Poddar 			ol_txrx_peer_tid_unpause(peer, i);
5977090c5fdSPrakash Dhavali 		}
598b2011f64SSiddarth Poddar 	}
599b2011f64SSiddarth Poddar }
600b2011f64SSiddarth Poddar #else
601b2011f64SSiddarth Poddar 
602b2011f64SSiddarth Poddar void
603b2011f64SSiddarth Poddar ol_rx_peer_map_handler(ol_txrx_pdev_handle pdev,
604b2011f64SSiddarth Poddar 		       uint16_t peer_id,
605b2011f64SSiddarth Poddar 		       uint8_t vdev_id,
606b2011f64SSiddarth Poddar 		       uint8_t *peer_mac_addr,
607b2011f64SSiddarth Poddar 		       int tx_ready)
608b2011f64SSiddarth Poddar {
609b2011f64SSiddarth Poddar 	ol_txrx_peer_find_add_id(pdev, peer_mac_addr, peer_id);
610b2011f64SSiddarth Poddar }
611b2011f64SSiddarth Poddar 
612b2011f64SSiddarth Poddar void ol_txrx_peer_tx_ready_handler(ol_txrx_pdev_handle pdev, uint16_t peer_id)
613b2011f64SSiddarth Poddar {
614b2011f64SSiddarth Poddar }
615b2011f64SSiddarth Poddar 
616b2011f64SSiddarth Poddar #endif
617b2011f64SSiddarth Poddar 
618f74d6f8aSDeepak Dhamdhere /*
619f74d6f8aSDeepak Dhamdhere  * ol_rx_peer_unmap_handler() - Handle peer unmap event from firmware
620f74d6f8aSDeepak Dhamdhere  *
621f74d6f8aSDeepak Dhamdhere  * @pdev: Handle to pdev pbject
622f74d6f8aSDeepak Dhamdhere  * @peer_id: peer_id unmapped by firmware
623f74d6f8aSDeepak Dhamdhere  *
624f74d6f8aSDeepak Dhamdhere  * Decrement reference count for the peer_id in peer_id_to_obj_map,
625f74d6f8aSDeepak Dhamdhere  * decrement reference count in corresponding peer object and clear the entry
626f74d6f8aSDeepak Dhamdhere  * in peer's peer_ids array.
627f74d6f8aSDeepak Dhamdhere  * In case of unmap events for a peer that is already deleted, just decrement
628f74d6f8aSDeepak Dhamdhere  * del_peer_id_ref_cnt.
629f74d6f8aSDeepak Dhamdhere  *
630f74d6f8aSDeepak Dhamdhere  * Return: None
631f74d6f8aSDeepak Dhamdhere  */
6327090c5fdSPrakash Dhavali void ol_rx_peer_unmap_handler(ol_txrx_pdev_handle pdev, uint16_t peer_id)
6337090c5fdSPrakash Dhavali {
6347090c5fdSPrakash Dhavali 	struct ol_txrx_peer_t *peer;
63537ffb293SMohit Khanna 	int i = 0;
6363e766e05SPoddar, Siddarth 	int32_t ref_cnt;
637153db6d0SKarthik Kantamneni 	int del_ref_cnt;
638f099e5e1SNirav Shah 
63937ffb293SMohit Khanna 	if (peer_id == HTT_INVALID_PEER) {
64014521793SPoddar, Siddarth 		ol_txrx_err(
6417c8c171eSNirav Shah 		   "invalid peer ID %d\n", peer_id);
642f918d42bSDeepak Dhamdhere 		wlan_roam_debug_log(DEBUG_INVALID_VDEV_ID,
643f918d42bSDeepak Dhamdhere 				    DEBUG_PEER_UNMAP_EVENT,
644f918d42bSDeepak Dhamdhere 				    peer_id, NULL, NULL, 0, 0x100);
64537ffb293SMohit Khanna 		return;
646f099e5e1SNirav Shah 	}
64737ffb293SMohit Khanna 
648f74d6f8aSDeepak Dhamdhere 	qdf_spin_lock_bh(&pdev->peer_map_unmap_lock);
64947384bcdSMohit Khanna 
650604b033eSAlok Kumar 	/* send peer unmap conf cmd to fw for unmapped peer_ids */
651604b033eSAlok Kumar 	if (pdev->enable_peer_unmap_conf_support &&
652604b033eSAlok Kumar 	    pdev->peer_unmap_sync_cb)
653604b033eSAlok Kumar 		ol_txrx_peer_unmap_conf_handler(pdev, peer_id);
654604b033eSAlok Kumar 
655f74d6f8aSDeepak Dhamdhere 	if (qdf_atomic_read(
656f74d6f8aSDeepak Dhamdhere 		&pdev->peer_id_to_obj_map[peer_id].del_peer_id_ref_cnt)) {
657f74d6f8aSDeepak Dhamdhere 		/* This peer_id belongs to a peer already deleted */
658153db6d0SKarthik Kantamneni 		peer = pdev->peer_id_to_obj_map[peer_id].del_peer;
659153db6d0SKarthik Kantamneni 		if (qdf_atomic_dec_and_test
660153db6d0SKarthik Kantamneni 		    (&pdev->peer_id_to_obj_map[peer_id].del_peer_id_ref_cnt)) {
661153db6d0SKarthik Kantamneni 			pdev->peer_id_to_obj_map[peer_id].del_peer = NULL;
662153db6d0SKarthik Kantamneni 		}
663153db6d0SKarthik Kantamneni 
664153db6d0SKarthik Kantamneni 		del_ref_cnt = qdf_atomic_read(&peer->del_ref_cnt);
665153db6d0SKarthik Kantamneni 		if (qdf_atomic_dec_and_test(&peer->del_ref_cnt)) {
666153db6d0SKarthik Kantamneni 			TAILQ_REMOVE(&pdev->inactive_peer_list, peer,
667153db6d0SKarthik Kantamneni 				     inactive_peer_list_elem);
668153db6d0SKarthik Kantamneni 			qdf_mem_free(peer);
669153db6d0SKarthik Kantamneni 		}
670153db6d0SKarthik Kantamneni 		del_ref_cnt--;
671153db6d0SKarthik Kantamneni 
6723e766e05SPoddar, Siddarth 		ref_cnt = qdf_atomic_read(&pdev->peer_id_to_obj_map[peer_id].
6733e766e05SPoddar, Siddarth 							del_peer_id_ref_cnt);
6741525bb9cSHimanshu Agarwal 		qdf_spin_unlock_bh(&pdev->peer_map_unmap_lock);
675f918d42bSDeepak Dhamdhere 		wlan_roam_debug_log(DEBUG_INVALID_VDEV_ID,
676f918d42bSDeepak Dhamdhere 				    DEBUG_PEER_UNMAP_EVENT,
677f918d42bSDeepak Dhamdhere 				    peer_id, NULL, NULL, ref_cnt, 0x101);
678153db6d0SKarthik Kantamneni 		ol_txrx_dbg("peer already deleted, peer_id %d del_ref_cnt:%d del_peer_id_ref_cnt %d",
679153db6d0SKarthik Kantamneni 			    peer_id, del_ref_cnt, ref_cnt);
680f74d6f8aSDeepak Dhamdhere 		return;
681f74d6f8aSDeepak Dhamdhere 	}
68237ffb293SMohit Khanna 	peer = pdev->peer_id_to_obj_map[peer_id].peer;
68337ffb293SMohit Khanna 
6846795c3a9SJeff Johnson 	if (!peer) {
6857090c5fdSPrakash Dhavali 		/*
6867090c5fdSPrakash Dhavali 		 * Currently peer IDs are assigned for vdevs as well as peers.
6877090c5fdSPrakash Dhavali 		 * If the peer ID is for a vdev, then the peer pointer stored
6887090c5fdSPrakash Dhavali 		 * in peer_id_to_obj_map will be NULL.
6897090c5fdSPrakash Dhavali 		 */
6901525bb9cSHimanshu Agarwal 		qdf_spin_unlock_bh(&pdev->peer_map_unmap_lock);
6917c8c171eSNirav Shah 		ol_txrx_info("peer not found for peer_id %d", peer_id);
692f918d42bSDeepak Dhamdhere 		wlan_roam_debug_log(DEBUG_INVALID_VDEV_ID,
693f918d42bSDeepak Dhamdhere 				    DEBUG_PEER_UNMAP_EVENT,
694f918d42bSDeepak Dhamdhere 				    peer_id, NULL, NULL, 0, 0x102);
6957090c5fdSPrakash Dhavali 		return;
69637ffb293SMohit Khanna 	}
69737ffb293SMohit Khanna 
69837ffb293SMohit Khanna 	if (qdf_atomic_dec_and_test
69937ffb293SMohit Khanna 		(&pdev->peer_id_to_obj_map[peer_id].peer_id_ref_cnt)) {
70037ffb293SMohit Khanna 		pdev->peer_id_to_obj_map[peer_id].peer = NULL;
70137ffb293SMohit Khanna 		for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
70237ffb293SMohit Khanna 			if (peer->peer_ids[i] == peer_id) {
70337ffb293SMohit Khanna 				peer->peer_ids[i] = HTT_INVALID_PEER;
70437ffb293SMohit Khanna 				break;
70537ffb293SMohit Khanna 			}
70637ffb293SMohit Khanna 		}
7075c656344SSravan Kumar Kairam 	}
7083e766e05SPoddar, Siddarth 
7093e766e05SPoddar, Siddarth 	ref_cnt = qdf_atomic_read
7103e766e05SPoddar, Siddarth 		(&pdev->peer_id_to_obj_map[peer_id].peer_id_ref_cnt);
7113e766e05SPoddar, Siddarth 
7123e766e05SPoddar, Siddarth 	qdf_spin_unlock_bh(&pdev->peer_map_unmap_lock);
71337ffb293SMohit Khanna 
714f918d42bSDeepak Dhamdhere 	wlan_roam_debug_log(DEBUG_INVALID_VDEV_ID,
715f918d42bSDeepak Dhamdhere 			    DEBUG_PEER_UNMAP_EVENT,
716f918d42bSDeepak Dhamdhere 			    peer_id, &peer->mac_addr.raw, peer, ref_cnt,
717f918d42bSDeepak Dhamdhere 			    qdf_atomic_read(&peer->ref_cnt));
718f918d42bSDeepak Dhamdhere 
71937ffb293SMohit Khanna 	/*
72037ffb293SMohit Khanna 	 * Remove a reference to the peer.
72137ffb293SMohit Khanna 	 * If there are no more references, delete the peer object.
72237ffb293SMohit Khanna 	 */
7231253c3d2SManjunathappa Prakash 	ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_PEER_MAP);
724f74d6f8aSDeepak Dhamdhere 
725dd51e8d6SVarun Reddy Yeturu 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
726c13bfe08SJeff Johnson 		  "%s: peer_id %d peer %pK peer_id_ref_cnt %d",
7273e766e05SPoddar, Siddarth 		  __func__, peer_id, peer, ref_cnt);
728f74d6f8aSDeepak Dhamdhere }
729f74d6f8aSDeepak Dhamdhere 
730f74d6f8aSDeepak Dhamdhere /**
731f74d6f8aSDeepak Dhamdhere  * ol_txrx_peer_remove_obj_map_entries() - Remove matching pdev peer map entries
732f74d6f8aSDeepak Dhamdhere  * @pdev: pdev handle
733d40f4b1bSDeepak Dhamdhere  * @peer: peer for removing obj map entries
734d40f4b1bSDeepak Dhamdhere  *
735d40f4b1bSDeepak Dhamdhere  * Saves peer_id_ref_cnt to a different field and removes the link
736d40f4b1bSDeepak Dhamdhere  * to peer object. It also decrements the peer reference count by
737d40f4b1bSDeepak Dhamdhere  * the number of references removed.
738f74d6f8aSDeepak Dhamdhere  *
739f74d6f8aSDeepak Dhamdhere  * Return: None
740f74d6f8aSDeepak Dhamdhere  */
741f74d6f8aSDeepak Dhamdhere void ol_txrx_peer_remove_obj_map_entries(ol_txrx_pdev_handle pdev,
742f74d6f8aSDeepak Dhamdhere 					struct ol_txrx_peer_t *peer)
743f74d6f8aSDeepak Dhamdhere {
744f74d6f8aSDeepak Dhamdhere 	int i;
745f74d6f8aSDeepak Dhamdhere 	uint16_t peer_id;
746f74d6f8aSDeepak Dhamdhere 	int32_t peer_id_ref_cnt;
747d40f4b1bSDeepak Dhamdhere 	int32_t num_deleted_maps = 0;
74866bb63ddSDeepak Dhamdhere 	uint16_t save_peer_ids[MAX_NUM_PEER_ID_PER_PEER];
7493cf1f1ceSKrunal Soni 	uint16_t save_peer_id_ref_cnt[MAX_NUM_PEER_ID_PER_PEER] = {0};
750f74d6f8aSDeepak Dhamdhere 
751d40f4b1bSDeepak Dhamdhere 	qdf_spin_lock_bh(&pdev->peer_map_unmap_lock);
752f74d6f8aSDeepak Dhamdhere 	for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
753f74d6f8aSDeepak Dhamdhere 		peer_id = peer->peer_ids[i];
75466bb63ddSDeepak Dhamdhere 		save_peer_ids[i] = HTT_INVALID_PEER;
755f74d6f8aSDeepak Dhamdhere 		if (peer_id == HTT_INVALID_PEER ||
7566795c3a9SJeff Johnson 			!pdev->peer_id_to_obj_map[peer_id].peer) {
757f74d6f8aSDeepak Dhamdhere 			/* unused peer_id, or object is already dereferenced */
758f74d6f8aSDeepak Dhamdhere 			continue;
759f74d6f8aSDeepak Dhamdhere 		}
760f74d6f8aSDeepak Dhamdhere 		if (pdev->peer_id_to_obj_map[peer_id].peer != peer) {
761f74d6f8aSDeepak Dhamdhere 			QDF_TRACE(QDF_MODULE_ID_TXRX,
762f74d6f8aSDeepak Dhamdhere 				QDF_TRACE_LEVEL_ERROR,
763f74d6f8aSDeepak Dhamdhere 				FL("peer pointer mismatch in peer_id_to_obj"));
764f74d6f8aSDeepak Dhamdhere 			continue;
765f74d6f8aSDeepak Dhamdhere 		}
766f74d6f8aSDeepak Dhamdhere 		peer_id_ref_cnt = qdf_atomic_read(
767f74d6f8aSDeepak Dhamdhere 					&pdev->peer_id_to_obj_map[peer_id].
768f74d6f8aSDeepak Dhamdhere 						peer_id_ref_cnt);
76966bb63ddSDeepak Dhamdhere 		save_peer_ids[i] = peer_id;
77066bb63ddSDeepak Dhamdhere 		save_peer_id_ref_cnt[i] = peer_id_ref_cnt;
77166bb63ddSDeepak Dhamdhere 
772f74d6f8aSDeepak Dhamdhere 		/*
773f74d6f8aSDeepak Dhamdhere 		 * Transfer peer_id_ref_cnt into del_peer_id_ref_cnt so that
774b7bec723SMohit Khanna 		 * ol_txrx_peer_release_ref will decrement del_peer_id_ref_cnt
775f74d6f8aSDeepak Dhamdhere 		 * and any map events will increment peer_id_ref_cnt. Otherwise
776f74d6f8aSDeepak Dhamdhere 		 * accounting will be messed up.
777f74d6f8aSDeepak Dhamdhere 		 *
778f74d6f8aSDeepak Dhamdhere 		 * Add operation will ensure that back to back roaming in the
779f74d6f8aSDeepak Dhamdhere 		 * middle of unmap/map event sequence will be accounted for.
780f74d6f8aSDeepak Dhamdhere 		 */
781f74d6f8aSDeepak Dhamdhere 		qdf_atomic_add(peer_id_ref_cnt,
782f74d6f8aSDeepak Dhamdhere 			&pdev->peer_id_to_obj_map[peer_id].del_peer_id_ref_cnt);
783f74d6f8aSDeepak Dhamdhere 		qdf_atomic_init(&pdev->peer_id_to_obj_map[peer_id].
784f74d6f8aSDeepak Dhamdhere 				peer_id_ref_cnt);
785d40f4b1bSDeepak Dhamdhere 		num_deleted_maps += peer_id_ref_cnt;
786f74d6f8aSDeepak Dhamdhere 		pdev->peer_id_to_obj_map[peer_id].peer = NULL;
787153db6d0SKarthik Kantamneni 		pdev->peer_id_to_obj_map[peer_id].del_peer = peer;
788f74d6f8aSDeepak Dhamdhere 		peer->peer_ids[i] = HTT_INVALID_PEER;
789f74d6f8aSDeepak Dhamdhere 	}
790153db6d0SKarthik Kantamneni 	qdf_atomic_init(&peer->del_ref_cnt);
791*0474a821SKarthik Kantamneni 	if (num_deleted_maps != 0) {
792153db6d0SKarthik Kantamneni 		qdf_atomic_add(num_deleted_maps, &peer->del_ref_cnt);
793153db6d0SKarthik Kantamneni 		TAILQ_INSERT_TAIL(&pdev->inactive_peer_list, peer,
794153db6d0SKarthik Kantamneni 				  inactive_peer_list_elem);
795*0474a821SKarthik Kantamneni 	}
796d40f4b1bSDeepak Dhamdhere 	qdf_spin_unlock_bh(&pdev->peer_map_unmap_lock);
797d40f4b1bSDeepak Dhamdhere 
79866bb63ddSDeepak Dhamdhere 	/* Debug print the information after releasing bh spinlock */
79966bb63ddSDeepak Dhamdhere 	for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
80066bb63ddSDeepak Dhamdhere 		if (save_peer_ids[i] == HTT_INVALID_PEER)
80166bb63ddSDeepak Dhamdhere 			continue;
80266bb63ddSDeepak Dhamdhere 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
80366bb63ddSDeepak Dhamdhere 			  FL("peer_id = %d, peer_id_ref_cnt = %d, index = %d"),
80466bb63ddSDeepak Dhamdhere 			  save_peer_ids[i], save_peer_id_ref_cnt[i], i);
80566bb63ddSDeepak Dhamdhere 	}
80666bb63ddSDeepak Dhamdhere 
8074cdbf7d8SPadma, Santhosh Kumar 	if (num_deleted_maps > qdf_atomic_read(&peer->ref_cnt)) {
8084cdbf7d8SPadma, Santhosh Kumar 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
8094cdbf7d8SPadma, Santhosh Kumar 			  FL("num_deleted_maps %d ref_cnt %d"),
8104cdbf7d8SPadma, Santhosh Kumar 			  num_deleted_maps, qdf_atomic_read(&peer->ref_cnt));
8114cdbf7d8SPadma, Santhosh Kumar 		QDF_BUG(0);
8124cdbf7d8SPadma, Santhosh Kumar 		return;
8134cdbf7d8SPadma, Santhosh Kumar 	}
8144cdbf7d8SPadma, Santhosh Kumar 
815d40f4b1bSDeepak Dhamdhere 	while (num_deleted_maps-- > 0)
81658b641e8SJianmin Zhu 		ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_PEER_MAP);
8177090c5fdSPrakash Dhavali }
8187090c5fdSPrakash Dhavali 
8197090c5fdSPrakash Dhavali struct ol_txrx_peer_t *ol_txrx_assoc_peer_find(struct ol_txrx_vdev_t *vdev)
8207090c5fdSPrakash Dhavali {
8217090c5fdSPrakash Dhavali 	struct ol_txrx_peer_t *peer;
8227090c5fdSPrakash Dhavali 
823a37b5b78SAnurag Chouhan 	qdf_spin_lock_bh(&vdev->pdev->last_real_peer_mutex);
8247090c5fdSPrakash Dhavali 	/*
8257090c5fdSPrakash Dhavali 	 * Check the TXRX Peer is itself valid And also
8267090c5fdSPrakash Dhavali 	 * if HTT Peer ID has been setup for this peer
8277090c5fdSPrakash Dhavali 	 */
8287090c5fdSPrakash Dhavali 	if (vdev->last_real_peer
8297090c5fdSPrakash Dhavali 	    && vdev->last_real_peer->peer_ids[0] != HTT_INVALID_PEER_ID) {
8304362e46cSFrank Liu 		qdf_spin_lock_bh(&vdev->pdev->peer_ref_mutex);
831b7bec723SMohit Khanna 		ol_txrx_peer_get_ref(vdev->last_real_peer,
832b7bec723SMohit Khanna 				     PEER_DEBUG_ID_OL_INTERNAL);
8334362e46cSFrank Liu 		qdf_spin_unlock_bh(&vdev->pdev->peer_ref_mutex);
8347090c5fdSPrakash Dhavali 		peer = vdev->last_real_peer;
8357090c5fdSPrakash Dhavali 	} else {
8367090c5fdSPrakash Dhavali 		peer = NULL;
8377090c5fdSPrakash Dhavali 	}
838a37b5b78SAnurag Chouhan 	qdf_spin_unlock_bh(&vdev->pdev->last_real_peer_mutex);
8397090c5fdSPrakash Dhavali 	return peer;
8407090c5fdSPrakash Dhavali }
8417090c5fdSPrakash Dhavali 
842b0d2ddadSDeepak Dhamdhere 
8437090c5fdSPrakash Dhavali /*=== function definitions for debug ========================================*/
8447090c5fdSPrakash Dhavali 
8457090c5fdSPrakash Dhavali #if defined(TXRX_DEBUG_LEVEL) && TXRX_DEBUG_LEVEL > 5
8467090c5fdSPrakash Dhavali void ol_txrx_peer_find_display(ol_txrx_pdev_handle pdev, int indent)
8477090c5fdSPrakash Dhavali {
8487090c5fdSPrakash Dhavali 	int i, max_peers;
8497090c5fdSPrakash Dhavali 
850b2dc16feSAnurag Chouhan 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
8517090c5fdSPrakash Dhavali 		  "%*speer map:\n", indent, " ");
8527090c5fdSPrakash Dhavali 	max_peers = ol_cfg_max_peer_id(pdev->ctrl_pdev) + 1;
8537090c5fdSPrakash Dhavali 	for (i = 0; i < max_peers; i++) {
854f099e5e1SNirav Shah 		if (pdev->peer_id_to_obj_map[i].peer) {
855b2dc16feSAnurag Chouhan 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
856c13bfe08SJeff Johnson 				  "%*sid %d -> %pK\n",
8577090c5fdSPrakash Dhavali 				  indent + 4, " ", i,
858f099e5e1SNirav Shah 				  pdev->peer_id_to_obj_map[i].peer);
8597090c5fdSPrakash Dhavali 		}
8607090c5fdSPrakash Dhavali 	}
861b2dc16feSAnurag Chouhan 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
8627090c5fdSPrakash Dhavali 		  "%*speer hash table:\n", indent, " ");
8637090c5fdSPrakash Dhavali 	for (i = 0; i <= pdev->peer_hash.mask; i++) {
8647090c5fdSPrakash Dhavali 		if (!TAILQ_EMPTY(&pdev->peer_hash.bins[i])) {
8657090c5fdSPrakash Dhavali 			struct ol_txrx_peer_t *peer;
866512f3a14SYun Park 
8677090c5fdSPrakash Dhavali 			TAILQ_FOREACH(peer, &pdev->peer_hash.bins[i],
8687090c5fdSPrakash Dhavali 				      hash_list_elem) {
869b2dc16feSAnurag Chouhan 				QDF_TRACE(QDF_MODULE_ID_TXRX,
870b2dc16feSAnurag Chouhan 					  QDF_TRACE_LEVEL_INFO_LOW,
871451c5f86SSrinivas Girigowda 					  "%*shash idx %d -> %pK ("QDF_MAC_ADDR_FMT")\n",
8727090c5fdSPrakash Dhavali 					indent + 4, " ", i, peer,
873451c5f86SSrinivas Girigowda 					QDF_MAC_ADDR_REF(peer->mac_addr.raw));
8747090c5fdSPrakash Dhavali 			}
8757090c5fdSPrakash Dhavali 		}
8767090c5fdSPrakash Dhavali 	}
8777090c5fdSPrakash Dhavali }
878f74d6f8aSDeepak Dhamdhere 
8797090c5fdSPrakash Dhavali #endif /* if TXRX_DEBUG_LEVEL */
880