xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_peer.h (revision 06f9ae280111c1da9db0809f457076854959e02d)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 #ifndef _DP_PEER_H_
20 #define _DP_PEER_H_
21 
22 #include <qdf_types.h>
23 #include <qdf_lock.h>
24 #include "dp_types.h"
25 #include "dp_internal.h"
26 
27 #ifdef DUMP_REO_QUEUE_INFO_IN_DDR
28 #include "hal_reo.h"
29 #endif
30 
31 #define DP_INVALID_PEER_ID 0xffff
32 
33 #define DP_PEER_MAX_MEC_IDX 1024	/* maximum index for MEC table */
34 #define DP_PEER_MAX_MEC_ENTRY 4096	/* maximum MEC entries in MEC table */
35 
36 #define DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC 5000
37 
38 #define DP_PEER_HASH_LOAD_MULT  2
39 #define DP_PEER_HASH_LOAD_SHIFT 0
40 
41 /* Threshold for peer's cached buf queue beyond which frames are dropped */
42 #define DP_RX_CACHED_BUFQ_THRESH 64
43 
44 #define dp_peer_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_PEER, params)
45 #define dp_peer_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_PEER, params)
46 #define dp_peer_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_PEER, params)
47 #define dp_peer_info(params...) \
48 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_PEER, ## params)
49 #define dp_peer_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_PEER, params)
50 
51 void check_free_list_for_invalid_flush(struct dp_soc *soc);
52 
53 static inline
54 void add_entry_alloc_list(struct dp_soc *soc, struct dp_rx_tid *rx_tid,
55 			  struct dp_peer *peer, void *hw_qdesc_vaddr)
56 {
57 	uint32_t max_list_size;
58 	unsigned long curr_ts = qdf_get_system_timestamp();
59 	uint32_t qref_index = soc->free_addr_list_idx;
60 
61 	max_list_size = soc->wlan_cfg_ctx->qref_control_size;
62 
63 	if (max_list_size == 0)
64 		return;
65 
66 	soc->list_qdesc_addr_alloc[qref_index].hw_qdesc_paddr =
67 							 rx_tid->hw_qdesc_paddr;
68 	soc->list_qdesc_addr_alloc[qref_index].ts_qdesc_mem_hdl = curr_ts;
69 	soc->list_qdesc_addr_alloc[qref_index].hw_qdesc_vaddr_align =
70 								 hw_qdesc_vaddr;
71 	soc->list_qdesc_addr_alloc[qref_index].hw_qdesc_vaddr_unalign =
72 					       rx_tid->hw_qdesc_vaddr_unaligned;
73 	soc->list_qdesc_addr_alloc[qref_index].peer_id = peer->peer_id;
74 	soc->list_qdesc_addr_alloc[qref_index].tid = rx_tid->tid;
75 	soc->alloc_addr_list_idx++;
76 
77 	if (soc->alloc_addr_list_idx == max_list_size)
78 		soc->alloc_addr_list_idx = 0;
79 }
80 
81 static inline
82 void add_entry_free_list(struct dp_soc *soc, struct dp_rx_tid *rx_tid)
83 {
84 	uint32_t max_list_size;
85 	unsigned long curr_ts = qdf_get_system_timestamp();
86 	uint32_t qref_index = soc->free_addr_list_idx;
87 
88 	max_list_size = soc->wlan_cfg_ctx->qref_control_size;
89 
90 	if (max_list_size == 0)
91 		return;
92 
93 	soc->list_qdesc_addr_free[qref_index].ts_qdesc_mem_hdl = curr_ts;
94 	soc->list_qdesc_addr_free[qref_index].hw_qdesc_paddr =
95 							 rx_tid->hw_qdesc_paddr;
96 	soc->list_qdesc_addr_free[qref_index].hw_qdesc_vaddr_align =
97 						 rx_tid->hw_qdesc_vaddr_aligned;
98 	soc->list_qdesc_addr_free[qref_index].hw_qdesc_vaddr_unalign =
99 					       rx_tid->hw_qdesc_vaddr_unaligned;
100 	soc->free_addr_list_idx++;
101 
102 	if (soc->free_addr_list_idx == max_list_size)
103 		soc->free_addr_list_idx = 0;
104 }
105 
106 static inline
107 void add_entry_write_list(struct dp_soc *soc, struct dp_peer *peer,
108 			  uint32_t tid)
109 {
110 	uint32_t max_list_size;
111 	unsigned long curr_ts = qdf_get_system_timestamp();
112 
113 	max_list_size = soc->wlan_cfg_ctx->qref_control_size;
114 
115 	if (max_list_size == 0)
116 		return;
117 
118 	soc->reo_write_list[soc->write_paddr_list_idx].ts_qaddr_del = curr_ts;
119 	soc->reo_write_list[soc->write_paddr_list_idx].peer_id = peer->peer_id;
120 	soc->reo_write_list[soc->write_paddr_list_idx].paddr =
121 					       peer->rx_tid[tid].hw_qdesc_paddr;
122 	soc->reo_write_list[soc->write_paddr_list_idx].tid = tid;
123 	soc->write_paddr_list_idx++;
124 
125 	if (soc->write_paddr_list_idx == max_list_size)
126 		soc->write_paddr_list_idx = 0;
127 }
128 
129 #ifdef REO_QDESC_HISTORY
130 enum reo_qdesc_event_type {
131 	REO_QDESC_UPDATE_CB = 0,
132 	REO_QDESC_FREE,
133 };
134 
135 struct reo_qdesc_event {
136 	qdf_dma_addr_t qdesc_addr;
137 	uint64_t ts;
138 	enum reo_qdesc_event_type type;
139 	uint8_t peer_mac[QDF_MAC_ADDR_SIZE];
140 };
141 #endif
142 
143 struct ast_del_ctxt {
144 	bool age;
145 	int del_count;
146 };
147 
148 #ifdef QCA_SUPPORT_WDS_EXTENDED
149 /**
150  * dp_peer_is_wds_ext_peer() - peer is WDS_EXT peer
151  *
152  * @peer: DP peer context
153  *
154  * This API checks whether the peer is WDS_EXT peer or not
155  *
156  * Return: true in the wds_ext peer else flase
157  */
158 static inline bool dp_peer_is_wds_ext_peer(struct dp_txrx_peer *peer)
159 {
160 	return qdf_atomic_test_bit(WDS_EXT_PEER_INIT_BIT, &peer->wds_ext.init);
161 }
162 #else
163 static inline bool dp_peer_is_wds_ext_peer(struct dp_txrx_peer *peer)
164 {
165 	return false;
166 }
167 #endif
168 
169 typedef void dp_peer_iter_func(struct dp_soc *soc, struct dp_peer *peer,
170 			       void *arg);
171 /**
172  * dp_peer_unref_delete() - unref and delete peer
173  * @peer: Datapath peer handle
174  * @id: ID of module releasing reference
175  *
176  */
177 void dp_peer_unref_delete(struct dp_peer *peer, enum dp_mod_id id);
178 
179 /**
180  * dp_txrx_peer_unref_delete() - unref and delete peer
181  * @handle: Datapath txrx ref handle
182  * @id: Module ID of the caller
183  *
184  */
185 void dp_txrx_peer_unref_delete(dp_txrx_ref_handle handle, enum dp_mod_id id);
186 
187 /**
188  * dp_peer_find_hash_find() - returns legacy or mlo link peer from
189  *			      peer_hash_table matching vdev_id and mac_address
190  * @soc: soc handle
191  * @peer_mac_addr: peer mac address
192  * @mac_addr_is_aligned: is mac addr aligned
193  * @vdev_id: vdev_id
194  * @mod_id: id of module requesting reference
195  *
196  * return: peer in success
197  *         NULL in failure
198  */
199 struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc,
200 				       uint8_t *peer_mac_addr,
201 				       int mac_addr_is_aligned,
202 				       uint8_t vdev_id,
203 				       enum dp_mod_id mod_id);
204 
205 /**
206  * dp_peer_find_by_id_valid - check if peer exists for given id
207  * @soc: core DP soc context
208  * @peer_id: peer id from peer object can be retrieved
209  *
210  * Return: true if peer exists of false otherwise
211  */
212 bool dp_peer_find_by_id_valid(struct dp_soc *soc, uint16_t peer_id);
213 
214 /**
215  * dp_peer_get_ref() - Returns peer object given the peer id
216  *
217  * @soc: core DP soc context
218  * @peer: DP peer
219  * @mod_id: id of module requesting the reference
220  *
221  * Return:	QDF_STATUS_SUCCESS if reference held successfully
222  *		else QDF_STATUS_E_INVAL
223  */
224 static inline
225 QDF_STATUS dp_peer_get_ref(struct dp_soc *soc,
226 			   struct dp_peer *peer,
227 			   enum dp_mod_id mod_id)
228 {
229 	if (!qdf_atomic_inc_not_zero(&peer->ref_cnt))
230 		return QDF_STATUS_E_INVAL;
231 
232 	if (mod_id > DP_MOD_ID_RX)
233 		qdf_atomic_inc(&peer->mod_refs[mod_id]);
234 
235 	return QDF_STATUS_SUCCESS;
236 }
237 
238 /**
239  * __dp_peer_get_ref_by_id() - Returns peer object given the peer id
240  *
241  * @soc: core DP soc context
242  * @peer_id: peer id from peer object can be retrieved
243  * @mod_id: module id
244  *
245  * Return: struct dp_peer*: Pointer to DP peer object
246  */
247 static inline struct dp_peer *
248 __dp_peer_get_ref_by_id(struct dp_soc *soc,
249 			uint16_t peer_id,
250 			enum dp_mod_id mod_id)
251 
252 {
253 	struct dp_peer *peer;
254 
255 	qdf_spin_lock_bh(&soc->peer_map_lock);
256 	peer = (peer_id >= soc->max_peer_id) ? NULL :
257 				soc->peer_id_to_obj_map[peer_id];
258 	if (!peer ||
259 	    (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) {
260 		qdf_spin_unlock_bh(&soc->peer_map_lock);
261 		return NULL;
262 	}
263 
264 	qdf_spin_unlock_bh(&soc->peer_map_lock);
265 	return peer;
266 }
267 
268 /**
269  * dp_peer_get_ref_by_id() - Returns peer object given the peer id
270  *                        if peer state is active
271  *
272  * @soc: core DP soc context
273  * @peer_id: peer id from peer object can be retrieved
274  * @mod_id: ID of module requesting reference
275  *
276  * Return: struct dp_peer*: Pointer to DP peer object
277  */
278 static inline
279 struct dp_peer *dp_peer_get_ref_by_id(struct dp_soc *soc,
280 				      uint16_t peer_id,
281 				      enum dp_mod_id mod_id)
282 {
283 	struct dp_peer *peer;
284 
285 	qdf_spin_lock_bh(&soc->peer_map_lock);
286 	peer = (peer_id >= soc->max_peer_id) ? NULL :
287 				soc->peer_id_to_obj_map[peer_id];
288 
289 	if (!peer || peer->peer_state >= DP_PEER_STATE_LOGICAL_DELETE ||
290 	    (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) {
291 		qdf_spin_unlock_bh(&soc->peer_map_lock);
292 		return NULL;
293 	}
294 
295 	qdf_spin_unlock_bh(&soc->peer_map_lock);
296 
297 	return peer;
298 }
299 
300 /**
301  * dp_txrx_peer_get_ref_by_id() - Returns txrx peer object given the peer id
302  *
303  * @soc: core DP soc context
304  * @peer_id: peer id from peer object can be retrieved
305  * @handle: reference handle
306  * @mod_id: ID of module requesting reference
307  *
308  * Return: struct dp_txrx_peer*: Pointer to txrx DP peer object
309  */
310 static inline struct dp_txrx_peer *
311 dp_txrx_peer_get_ref_by_id(struct dp_soc *soc,
312 			   uint16_t peer_id,
313 			   dp_txrx_ref_handle *handle,
314 			   enum dp_mod_id mod_id)
315 
316 {
317 	struct dp_peer *peer;
318 
319 	peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
320 	if (!peer)
321 		return NULL;
322 
323 	if (!peer->txrx_peer) {
324 		dp_peer_unref_delete(peer, mod_id);
325 		return NULL;
326 	}
327 
328 	*handle = (dp_txrx_ref_handle)peer;
329 	return peer->txrx_peer;
330 }
331 
332 #ifdef PEER_CACHE_RX_PKTS
333 /**
334  * dp_rx_flush_rx_cached() - flush cached rx frames
335  * @peer: peer
336  * @drop: set flag to drop frames
337  *
338  * Return: None
339  */
340 void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop);
341 #else
342 static inline void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop)
343 {
344 }
345 #endif
346 
347 static inline void
348 dp_clear_peer_internal(struct dp_soc *soc, struct dp_peer *peer)
349 {
350 	qdf_spin_lock_bh(&peer->peer_info_lock);
351 	peer->state = OL_TXRX_PEER_STATE_DISC;
352 	qdf_spin_unlock_bh(&peer->peer_info_lock);
353 
354 	dp_rx_flush_rx_cached(peer, true);
355 }
356 
357 /**
358  * dp_vdev_iterate_peer() - API to iterate through vdev peer list
359  *
360  * @vdev: DP vdev context
361  * @func: function to be called for each peer
362  * @arg: argument need to be passed to func
363  * @mod_id: module_id
364  *
365  * Return: void
366  */
367 static inline void
368 dp_vdev_iterate_peer(struct dp_vdev *vdev, dp_peer_iter_func *func, void *arg,
369 		     enum dp_mod_id mod_id)
370 {
371 	struct dp_peer *peer;
372 	struct dp_peer *tmp_peer;
373 	struct dp_soc *soc = NULL;
374 
375 	if (!vdev || !vdev->pdev || !vdev->pdev->soc)
376 		return;
377 
378 	soc = vdev->pdev->soc;
379 
380 	qdf_spin_lock_bh(&vdev->peer_list_lock);
381 	TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
382 			   peer_list_elem,
383 			   tmp_peer) {
384 		if (dp_peer_get_ref(soc, peer, mod_id) ==
385 					QDF_STATUS_SUCCESS) {
386 			(*func)(soc, peer, arg);
387 			dp_peer_unref_delete(peer, mod_id);
388 		}
389 	}
390 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
391 }
392 
393 /**
394  * dp_pdev_iterate_peer() - API to iterate through all peers of pdev
395  *
396  * @pdev: DP pdev context
397  * @func: function to be called for each peer
398  * @arg: argument need to be passed to func
399  * @mod_id: module_id
400  *
401  * Return: void
402  */
403 static inline void
404 dp_pdev_iterate_peer(struct dp_pdev *pdev, dp_peer_iter_func *func, void *arg,
405 		     enum dp_mod_id mod_id)
406 {
407 	struct dp_vdev *vdev;
408 
409 	if (!pdev)
410 		return;
411 
412 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
413 	DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev)
414 		dp_vdev_iterate_peer(vdev, func, arg, mod_id);
415 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
416 }
417 
418 /**
419  * dp_soc_iterate_peer() - API to iterate through all peers of soc
420  *
421  * @soc: DP soc context
422  * @func: function to be called for each peer
423  * @arg: argument need to be passed to func
424  * @mod_id: module_id
425  *
426  * Return: void
427  */
428 static inline void
429 dp_soc_iterate_peer(struct dp_soc *soc, dp_peer_iter_func *func, void *arg,
430 		    enum dp_mod_id mod_id)
431 {
432 	struct dp_pdev *pdev;
433 	int i;
434 
435 	if (!soc)
436 		return;
437 
438 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
439 		pdev = soc->pdev_list[i];
440 		dp_pdev_iterate_peer(pdev, func, arg, mod_id);
441 	}
442 }
443 
444 /**
445  * dp_vdev_iterate_peer_lock_safe() - API to iterate through vdev list
446  *
447  * This API will cache the peers in local allocated memory and calls
448  * iterate function outside the lock.
449  *
450  * As this API is allocating new memory it is suggested to use this
451  * only when lock cannot be held
452  *
453  * @vdev: DP vdev context
454  * @func: function to be called for each peer
455  * @arg: argument need to be passed to func
456  * @mod_id: module_id
457  *
458  * Return: void
459  */
460 static inline void
461 dp_vdev_iterate_peer_lock_safe(struct dp_vdev *vdev,
462 			       dp_peer_iter_func *func,
463 			       void *arg,
464 			       enum dp_mod_id mod_id)
465 {
466 	struct dp_peer *peer;
467 	struct dp_peer *tmp_peer;
468 	struct dp_soc *soc = NULL;
469 	struct dp_peer **peer_array = NULL;
470 	int i = 0;
471 	uint32_t num_peers = 0;
472 
473 	if (!vdev || !vdev->pdev || !vdev->pdev->soc)
474 		return;
475 
476 	num_peers = vdev->num_peers;
477 
478 	soc = vdev->pdev->soc;
479 
480 	peer_array = qdf_mem_malloc(num_peers * sizeof(struct dp_peer *));
481 	if (!peer_array)
482 		return;
483 
484 	qdf_spin_lock_bh(&vdev->peer_list_lock);
485 	TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
486 			   peer_list_elem,
487 			   tmp_peer) {
488 		if (i >= num_peers)
489 			break;
490 
491 		if (dp_peer_get_ref(soc, peer, mod_id) == QDF_STATUS_SUCCESS) {
492 			peer_array[i] = peer;
493 			i = (i + 1);
494 		}
495 	}
496 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
497 
498 	for (i = 0; i < num_peers; i++) {
499 		peer = peer_array[i];
500 
501 		if (!peer)
502 			continue;
503 
504 		(*func)(soc, peer, arg);
505 		dp_peer_unref_delete(peer, mod_id);
506 	}
507 
508 	qdf_mem_free(peer_array);
509 }
510 
511 /**
512  * dp_pdev_iterate_peer_lock_safe() - API to iterate through all peers of pdev
513  *
514  * This API will cache the peers in local allocated memory and calls
515  * iterate function outside the lock.
516  *
517  * As this API is allocating new memory it is suggested to use this
518  * only when lock cannot be held
519  *
520  * @pdev: DP pdev context
521  * @func: function to be called for each peer
522  * @arg: argument need to be passed to func
523  * @mod_id: module_id
524  *
525  * Return: void
526  */
527 static inline void
528 dp_pdev_iterate_peer_lock_safe(struct dp_pdev *pdev,
529 			       dp_peer_iter_func *func,
530 			       void *arg,
531 			       enum dp_mod_id mod_id)
532 {
533 	struct dp_peer *peer;
534 	struct dp_peer *tmp_peer;
535 	struct dp_soc *soc = NULL;
536 	struct dp_vdev *vdev = NULL;
537 	struct dp_peer **peer_array[DP_PDEV_MAX_VDEVS] = {0};
538 	int i = 0;
539 	int j = 0;
540 	uint32_t num_peers[DP_PDEV_MAX_VDEVS] = {0};
541 
542 	if (!pdev || !pdev->soc)
543 		return;
544 
545 	soc = pdev->soc;
546 
547 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
548 	DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
549 		num_peers[i] = vdev->num_peers;
550 		peer_array[i] = qdf_mem_malloc(num_peers[i] *
551 					       sizeof(struct dp_peer *));
552 		if (!peer_array[i])
553 			break;
554 
555 		qdf_spin_lock_bh(&vdev->peer_list_lock);
556 		TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
557 				   peer_list_elem,
558 				   tmp_peer) {
559 			if (j >= num_peers[i])
560 				break;
561 
562 			if (dp_peer_get_ref(soc, peer, mod_id) ==
563 					QDF_STATUS_SUCCESS) {
564 				peer_array[i][j] = peer;
565 
566 				j = (j + 1);
567 			}
568 		}
569 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
570 		i = (i + 1);
571 	}
572 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
573 
574 	for (i = 0; i < DP_PDEV_MAX_VDEVS; i++) {
575 		if (!peer_array[i])
576 			break;
577 
578 		for (j = 0; j < num_peers[i]; j++) {
579 			peer = peer_array[i][j];
580 
581 			if (!peer)
582 				continue;
583 
584 			(*func)(soc, peer, arg);
585 			dp_peer_unref_delete(peer, mod_id);
586 		}
587 
588 		qdf_mem_free(peer_array[i]);
589 	}
590 }
591 
592 /**
593  * dp_soc_iterate_peer_lock_safe() - API to iterate through all peers of soc
594  *
595  * This API will cache the peers in local allocated memory and calls
596  * iterate function outside the lock.
597  *
598  * As this API is allocating new memory it is suggested to use this
599  * only when lock cannot be held
600  *
601  * @soc: DP soc context
602  * @func: function to be called for each peer
603  * @arg: argument need to be passed to func
604  * @mod_id: module_id
605  *
606  * Return: void
607  */
608 static inline void
609 dp_soc_iterate_peer_lock_safe(struct dp_soc *soc,
610 			      dp_peer_iter_func *func,
611 			      void *arg,
612 			      enum dp_mod_id mod_id)
613 {
614 	struct dp_pdev *pdev;
615 	int i;
616 
617 	if (!soc)
618 		return;
619 
620 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
621 		pdev = soc->pdev_list[i];
622 		dp_pdev_iterate_peer_lock_safe(pdev, func, arg, mod_id);
623 	}
624 }
625 
626 #ifdef DP_PEER_STATE_DEBUG
627 #define DP_PEER_STATE_ASSERT(_peer, _new_state, _condition) \
628 	do {  \
629 		if (!(_condition)) { \
630 			dp_alert("Invalid state shift from %u to %u peer " \
631 				 QDF_MAC_ADDR_FMT, \
632 				 (_peer)->peer_state, (_new_state), \
633 				 QDF_MAC_ADDR_REF((_peer)->mac_addr.raw)); \
634 			QDF_ASSERT(0); \
635 		} \
636 	} while (0)
637 
638 #else
639 #define DP_PEER_STATE_ASSERT(_peer, _new_state, _condition) \
640 	do {  \
641 		if (!(_condition)) { \
642 			dp_alert("Invalid state shift from %u to %u peer " \
643 				 QDF_MAC_ADDR_FMT, \
644 				 (_peer)->peer_state, (_new_state), \
645 				 QDF_MAC_ADDR_REF((_peer)->mac_addr.raw)); \
646 		} \
647 	} while (0)
648 #endif
649 
650 /**
651  * dp_peer_state_cmp() - compare dp peer state
652  *
653  * @peer: DP peer
654  * @state: state
655  *
656  * Return: true if state matches with peer state
657  *	   false if it does not match
658  */
659 static inline bool
660 dp_peer_state_cmp(struct dp_peer *peer,
661 		  enum dp_peer_state state)
662 {
663 	bool is_status_equal = false;
664 
665 	qdf_spin_lock_bh(&peer->peer_state_lock);
666 	is_status_equal = (peer->peer_state == state);
667 	qdf_spin_unlock_bh(&peer->peer_state_lock);
668 
669 	return is_status_equal;
670 }
671 
672 /**
673  * dp_print_ast_stats() - Dump AST table contents
674  * @soc: Datapath soc handle
675  *
676  * Return: void
677  */
678 void dp_print_ast_stats(struct dp_soc *soc);
679 
680 /**
681  * dp_rx_peer_map_handler() - handle peer map event from firmware
682  * @soc: generic soc handle
683  * @peer_id: peer_id from firmware
684  * @hw_peer_id: ast index for this peer
685  * @vdev_id: vdev ID
686  * @peer_mac_addr: mac address of the peer
687  * @ast_hash: ast hash value
688  * @is_wds: flag to indicate peer map event for WDS ast entry
689  *
690  * associate the peer_id that firmware provided with peer entry
691  * and update the ast table in the host with the hw_peer_id.
692  *
693  * Return: QDF_STATUS code
694  */
695 
696 QDF_STATUS dp_rx_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
697 				  uint16_t hw_peer_id, uint8_t vdev_id,
698 				  uint8_t *peer_mac_addr, uint16_t ast_hash,
699 				  uint8_t is_wds);
700 
701 /**
702  * dp_rx_peer_unmap_handler() - handle peer unmap event from firmware
703  * @soc: generic soc handle
704  * @peer_id: peer_id from firmware
705  * @vdev_id: vdev ID
706  * @peer_mac_addr: mac address of the peer or wds entry
707  * @is_wds: flag to indicate peer map event for WDS ast entry
708  * @free_wds_count: number of wds entries freed by FW with peer delete
709  *
710  * Return: none
711  */
712 void dp_rx_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id,
713 			      uint8_t vdev_id, uint8_t *peer_mac_addr,
714 			      uint8_t is_wds, uint32_t free_wds_count);
715 
716 #if defined(WLAN_FEATURE_11BE_MLO) && defined(DP_MLO_LINK_STATS_SUPPORT)
717 /**
718  * dp_rx_peer_ext_evt() - handle peer extended event from firmware
719  * @soc: DP soc handle
720  * @info: extended evt info
721  *
722  *
723  * Return: QDF_STATUS
724  */
725 
726 QDF_STATUS
727 dp_rx_peer_ext_evt(struct dp_soc *soc, struct dp_peer_ext_evt_info *info);
728 #endif
729 #ifdef DP_RX_UDP_OVER_PEER_ROAM
730 /**
731  * dp_rx_reset_roaming_peer() - Reset the roamed peer in vdev
732  * @soc: dp soc pointer
733  * @vdev_id: vdev id
734  * @peer_mac_addr: mac address of the peer
735  *
736  * This function resets the roamed peer auth status and mac address
737  * after peer map indication of same peer is received from firmware.
738  *
739  * Return: None
740  */
741 void dp_rx_reset_roaming_peer(struct dp_soc *soc, uint8_t vdev_id,
742 			      uint8_t *peer_mac_addr);
743 #else
744 static inline void dp_rx_reset_roaming_peer(struct dp_soc *soc, uint8_t vdev_id,
745 					    uint8_t *peer_mac_addr)
746 {
747 }
748 #endif
749 
750 #ifdef WLAN_FEATURE_11BE_MLO
751 /**
752  * dp_rx_mlo_peer_map_handler() - handle MLO peer map event from firmware
753  * @soc: generic soc handle
754  * @peer_id: ML peer_id from firmware
755  * @peer_mac_addr: mac address of the peer
756  * @mlo_flow_info: MLO AST flow info
757  * @mlo_link_info: MLO link info
758  *
759  * associate the ML peer_id that firmware provided with peer entry
760  * and update the ast table in the host with the hw_peer_id.
761  *
762  * Return: QDF_STATUS code
763  */
764 QDF_STATUS
765 dp_rx_mlo_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
766 			   uint8_t *peer_mac_addr,
767 			   struct dp_mlo_flow_override_info *mlo_flow_info,
768 			   struct dp_mlo_link_info *mlo_link_info);
769 
770 /**
771  * dp_rx_mlo_peer_unmap_handler() - handle MLO peer unmap event from firmware
772  * @soc: generic soc handle
773  * @peer_id: peer_id from firmware
774  *
775  * Return: none
776  */
777 void dp_rx_mlo_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id);
778 #endif
779 
780 void dp_rx_sec_ind_handler(struct dp_soc *soc, uint16_t peer_id,
781 			   enum cdp_sec_type sec_type, int is_unicast,
782 			   u_int32_t *michael_key, u_int32_t *rx_pn);
783 
784 uint8_t dp_get_peer_mac_addr_frm_id(struct cdp_soc_t *soc_handle,
785 		uint16_t peer_id, uint8_t *peer_mac);
786 
787 /**
788  * dp_peer_add_ast() - Allocate and add AST entry into peer list
789  * @soc: SoC handle
790  * @peer: peer to which ast node belongs
791  * @mac_addr: MAC address of ast node
792  * @type: AST entry type
793  * @flags: AST configuration flags
794  *
795  * This API is used by WDS source port learning function to
796  * add a new AST entry into peer AST list
797  *
798  * Return: QDF_STATUS code
799  */
800 QDF_STATUS dp_peer_add_ast(struct dp_soc *soc, struct dp_peer *peer,
801 			   uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type,
802 			   uint32_t flags);
803 
804 /**
805  * dp_peer_del_ast() - Delete and free AST entry
806  * @soc: SoC handle
807  * @ast_entry: AST entry of the node
808  *
809  * This function removes the AST entry from peer and soc tables
810  * It assumes caller has taken the ast lock to protect the access to these
811  * tables
812  *
813  * Return: None
814  */
815 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry);
816 
817 void dp_peer_ast_unmap_handler(struct dp_soc *soc,
818 			       struct dp_ast_entry *ast_entry);
819 
820 /**
821  * dp_peer_update_ast() - Delete and free AST entry
822  * @soc: SoC handle
823  * @peer: peer to which ast node belongs
824  * @ast_entry: AST entry of the node
825  * @flags: wds or hmwds
826  *
827  * This function update the AST entry to the roamed peer and soc tables
828  * It assumes caller has taken the ast lock to protect the access to these
829  * tables
830  *
831  * Return: 0 if ast entry is updated successfully
832  *         -1 failure
833  */
834 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
835 			struct dp_ast_entry *ast_entry,	uint32_t flags);
836 
837 /**
838  * dp_peer_ast_hash_find_by_pdevid() - Find AST entry by MAC address
839  * @soc: SoC handle
840  * @ast_mac_addr: Mac address
841  * @pdev_id: pdev Id
842  *
843  * It assumes caller has taken the ast lock to protect the access to
844  * AST hash table
845  *
846  * Return: AST entry
847  */
848 struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
849 						     uint8_t *ast_mac_addr,
850 						     uint8_t pdev_id);
851 
852 /**
853  * dp_peer_ast_hash_find_by_vdevid() - Find AST entry by MAC address
854  * @soc: SoC handle
855  * @ast_mac_addr: Mac address
856  * @vdev_id: vdev Id
857  *
858  * It assumes caller has taken the ast lock to protect the access to
859  * AST hash table
860  *
861  * Return: AST entry
862  */
863 struct dp_ast_entry *dp_peer_ast_hash_find_by_vdevid(struct dp_soc *soc,
864 						     uint8_t *ast_mac_addr,
865 						     uint8_t vdev_id);
866 
867 /**
868  * dp_peer_ast_hash_find_soc() - Find AST entry by MAC address
869  * @soc: SoC handle
870  * @ast_mac_addr: Mac address
871  *
872  * It assumes caller has taken the ast lock to protect the access to
873  * AST hash table
874  *
875  * Return: AST entry
876  */
877 struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
878 					       uint8_t *ast_mac_addr);
879 
880 /**
881  * dp_peer_ast_get_pdev_id() - get pdev_id from the ast entry
882  * @soc: SoC handle
883  * @ast_entry: AST entry of the node
884  *
885  * This function gets the pdev_id from the ast entry.
886  *
887  * Return: (uint8_t) pdev_id
888  */
889 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
890 				struct dp_ast_entry *ast_entry);
891 
892 
893 /**
894  * dp_peer_ast_get_next_hop() - get next_hop from the ast entry
895  * @soc: SoC handle
896  * @ast_entry: AST entry of the node
897  *
898  * This function gets the next hop from the ast entry.
899  *
900  * Return: (uint8_t) next_hop
901  */
902 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
903 				struct dp_ast_entry *ast_entry);
904 
905 /**
906  * dp_peer_ast_set_type() - set type from the ast entry
907  * @soc: SoC handle
908  * @ast_entry: AST entry of the node
909  * @type: AST entry type
910  *
911  * This function sets the type in the ast entry.
912  *
913  * Return:
914  */
915 void dp_peer_ast_set_type(struct dp_soc *soc,
916 				struct dp_ast_entry *ast_entry,
917 				enum cdp_txrx_ast_entry_type type);
918 
919 void dp_peer_ast_send_wds_del(struct dp_soc *soc,
920 			      struct dp_ast_entry *ast_entry,
921 			      struct dp_peer *peer);
922 
923 #ifdef WLAN_FEATURE_MULTI_AST_DEL
924 void dp_peer_ast_send_multi_wds_del(
925 		struct dp_soc *soc, uint8_t vdev_id,
926 		struct peer_del_multi_wds_entries *wds_list);
927 #endif
928 
929 void dp_peer_free_hmwds_cb(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
930 			   struct cdp_soc *dp_soc,
931 			   void *cookie,
932 			   enum cdp_ast_free_status status);
933 
934 /**
935  * dp_peer_ast_hash_remove() - Look up and remove AST entry from hash table
936  * @soc: SoC handle
937  * @ase: Address search entry
938  *
939  * This function removes the AST entry from soc AST hash table
940  * It assumes caller has taken the ast lock to protect the access to this table
941  *
942  * Return: None
943  */
944 void dp_peer_ast_hash_remove(struct dp_soc *soc,
945 			     struct dp_ast_entry *ase);
946 
947 /**
948  * dp_peer_free_ast_entry() - Free up the ast entry memory
949  * @soc: SoC handle
950  * @ast_entry: Address search entry
951  *
952  * This API is used to free up the memory associated with
953  * AST entry.
954  *
955  * Return: None
956  */
957 void dp_peer_free_ast_entry(struct dp_soc *soc,
958 			    struct dp_ast_entry *ast_entry);
959 
960 /**
961  * dp_peer_unlink_ast_entry() - Free up the ast entry memory
962  * @soc: SoC handle
963  * @ast_entry: Address search entry
964  * @peer: peer
965  *
966  * This API is used to remove/unlink AST entry from the peer list
967  * and hash list.
968  *
969  * Return: None
970  */
971 void dp_peer_unlink_ast_entry(struct dp_soc *soc,
972 			      struct dp_ast_entry *ast_entry,
973 			      struct dp_peer *peer);
974 
975 /**
976  * dp_peer_mec_detach_entry() - Detach the MEC entry
977  * @soc: SoC handle
978  * @mecentry: MEC entry of the node
979  * @ptr: pointer to free list
980  *
981  * The MEC entry is detached from MEC table and added to free_list
982  * to free the object outside lock
983  *
984  * Return: None
985  */
986 void dp_peer_mec_detach_entry(struct dp_soc *soc, struct dp_mec_entry *mecentry,
987 			      void *ptr);
988 
989 /**
990  * dp_peer_mec_free_list() - free the MEC entry from free_list
991  * @soc: SoC handle
992  * @ptr: pointer to free list
993  *
994  * Return: None
995  */
996 void dp_peer_mec_free_list(struct dp_soc *soc, void *ptr);
997 
998 /**
999  * dp_peer_mec_add_entry()
1000  * @soc: SoC handle
1001  * @vdev: vdev to which mec node belongs
1002  * @mac_addr: MAC address of mec node
1003  *
1004  * This function allocates and adds MEC entry to MEC table.
1005  * It assumes caller has taken the mec lock to protect the access to these
1006  * tables
1007  *
1008  * Return: QDF_STATUS
1009  */
1010 QDF_STATUS dp_peer_mec_add_entry(struct dp_soc *soc,
1011 				 struct dp_vdev *vdev,
1012 				 uint8_t *mac_addr);
1013 
1014 /**
1015  * dp_peer_mec_hash_find_by_pdevid() - Find MEC entry by PDEV Id
1016  * within pdev
1017  * @soc: SoC handle
1018  * @pdev_id: pdev Id
1019  * @mec_mac_addr: MAC address of mec node
1020  *
1021  * It assumes caller has taken the mec_lock to protect the access to
1022  * MEC hash table
1023  *
1024  * Return: MEC entry
1025  */
1026 struct dp_mec_entry *dp_peer_mec_hash_find_by_pdevid(struct dp_soc *soc,
1027 						     uint8_t pdev_id,
1028 						     uint8_t *mec_mac_addr);
1029 
1030 #define DP_AST_ASSERT(_condition) \
1031 	do { \
1032 		if (!(_condition)) { \
1033 			dp_print_ast_stats(soc);\
1034 			QDF_BUG(_condition); \
1035 		} \
1036 	} while (0)
1037 
1038 /**
1039  * dp_peer_update_inactive_time() - Update inactive time for peer
1040  * @pdev: pdev object
1041  * @tag_type: htt_tlv_tag type
1042  * @tag_buf: buf message
1043  */
1044 void
1045 dp_peer_update_inactive_time(struct dp_pdev *pdev, uint32_t tag_type,
1046 			     uint32_t *tag_buf);
1047 
1048 #ifndef QCA_MULTIPASS_SUPPORT
1049 static inline
1050 /**
1051  * dp_peer_set_vlan_id() - set vlan_id for this peer
1052  * @cdp_soc: soc handle
1053  * @vdev_id: id of vdev object
1054  * @peer_mac: mac address
1055  * @vlan_id: vlan id for peer
1056  *
1057  * Return: void
1058  */
1059 void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
1060 			 uint8_t vdev_id, uint8_t *peer_mac,
1061 			 uint16_t vlan_id)
1062 {
1063 }
1064 
1065 /**
1066  * dp_set_vlan_groupkey() - set vlan map for vdev
1067  * @soc_hdl: pointer to soc
1068  * @vdev_id: id of vdev handle
1069  * @vlan_id: vlan_id
1070  * @group_key: group key for vlan
1071  *
1072  * Return: set success/failure
1073  */
1074 static inline
1075 QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
1076 				uint16_t vlan_id, uint16_t group_key)
1077 {
1078 	return QDF_STATUS_SUCCESS;
1079 }
1080 
1081 /**
1082  * dp_peer_multipass_list_init() - initialize multipass peer list
1083  * @vdev: pointer to vdev
1084  *
1085  * Return: void
1086  */
1087 static inline
1088 void dp_peer_multipass_list_init(struct dp_vdev *vdev)
1089 {
1090 }
1091 
1092 /**
1093  * dp_peer_multipass_list_remove() - remove peer from special peer list
1094  * @peer: peer handle
1095  *
1096  * Return: void
1097  */
1098 static inline
1099 void dp_peer_multipass_list_remove(struct dp_peer *peer)
1100 {
1101 }
1102 #else
1103 void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
1104 			 uint8_t vdev_id, uint8_t *peer_mac,
1105 			 uint16_t vlan_id);
1106 QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc, uint8_t vdev_id,
1107 				uint16_t vlan_id, uint16_t group_key);
1108 void dp_peer_multipass_list_init(struct dp_vdev *vdev);
1109 void dp_peer_multipass_list_remove(struct dp_peer *peer);
1110 #endif
1111 
1112 
1113 #ifndef QCA_PEER_MULTIQ_SUPPORT
1114 /**
1115  * dp_peer_reset_flowq_map() - reset peer flowq map table
1116  * @peer: dp peer handle
1117  *
1118  * Return: none
1119  */
1120 static inline
1121 void dp_peer_reset_flowq_map(struct dp_peer *peer)
1122 {
1123 }
1124 
1125 /**
1126  * dp_peer_ast_index_flow_queue_map_create() - create ast index flow queue map
1127  * @soc_hdl: generic soc handle
1128  * @is_wds: flag to indicate if peer is wds
1129  * @peer_id: peer_id from htt peer map message
1130  * @peer_mac_addr: mac address of the peer
1131  * @ast_info: ast flow override information from peer map
1132  *
1133  * Return: none
1134  */
1135 static inline
1136 void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
1137 		    bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
1138 		    struct dp_ast_flow_override_info *ast_info)
1139 {
1140 }
1141 #else
1142 void dp_peer_reset_flowq_map(struct dp_peer *peer);
1143 
1144 void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
1145 		    bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
1146 		    struct dp_ast_flow_override_info *ast_info);
1147 #endif
1148 
1149 #ifdef QCA_PEER_EXT_STATS
1150 /**
1151  * dp_peer_delay_stats_ctx_alloc() - Allocate peer delay stats content
1152  * @soc: DP SoC context
1153  * @txrx_peer: DP txrx peer context
1154  *
1155  * Allocate the peer delay stats context
1156  *
1157  * Return: QDF_STATUS_SUCCESS if allocation is
1158  *	   successful
1159  */
1160 QDF_STATUS dp_peer_delay_stats_ctx_alloc(struct dp_soc *soc,
1161 					 struct dp_txrx_peer *txrx_peer);
1162 
1163 /**
1164  * dp_peer_delay_stats_ctx_dealloc() - Dealloc the peer delay stats context
1165  * @soc: DP SoC context
1166  * @txrx_peer: txrx DP peer context
1167  *
1168  * Free the peer delay stats context
1169  *
1170  * Return: Void
1171  */
1172 void dp_peer_delay_stats_ctx_dealloc(struct dp_soc *soc,
1173 				     struct dp_txrx_peer *txrx_peer);
1174 
1175 /**
1176  * dp_peer_delay_stats_ctx_clr() - Clear delay stats context of peer
1177  * @txrx_peer: dp_txrx_peer handle
1178  *
1179  * Return: void
1180  */
1181 void dp_peer_delay_stats_ctx_clr(struct dp_txrx_peer *txrx_peer);
1182 #else
1183 static inline
1184 QDF_STATUS dp_peer_delay_stats_ctx_alloc(struct dp_soc *soc,
1185 					 struct dp_txrx_peer *txrx_peer)
1186 {
1187 	return QDF_STATUS_SUCCESS;
1188 }
1189 
1190 static inline
1191 void dp_peer_delay_stats_ctx_dealloc(struct dp_soc *soc,
1192 				     struct dp_txrx_peer *txrx_peer)
1193 {
1194 }
1195 
1196 static inline
1197 void dp_peer_delay_stats_ctx_clr(struct dp_txrx_peer *txrx_peer)
1198 {
1199 }
1200 #endif
1201 
1202 #ifdef WLAN_PEER_JITTER
1203 /**
1204  * dp_peer_jitter_stats_ctx_alloc() - Allocate jitter stats context for peer
1205  * @pdev: Datapath pdev handle
1206  * @txrx_peer: dp_txrx_peer handle
1207  *
1208  * Return: QDF_STATUS
1209  */
1210 QDF_STATUS dp_peer_jitter_stats_ctx_alloc(struct dp_pdev *pdev,
1211 					  struct dp_txrx_peer *txrx_peer);
1212 
1213 /**
1214  * dp_peer_jitter_stats_ctx_dealloc() - Deallocate jitter stats context
1215  * @pdev: Datapath pdev handle
1216  * @txrx_peer: dp_txrx_peer handle
1217  *
1218  * Return: void
1219  */
1220 void dp_peer_jitter_stats_ctx_dealloc(struct dp_pdev *pdev,
1221 				      struct dp_txrx_peer *txrx_peer);
1222 
1223 /**
1224  * dp_peer_jitter_stats_ctx_clr() - Clear jitter stats context of peer
1225  * @txrx_peer: dp_txrx_peer handle
1226  *
1227  * Return: void
1228  */
1229 void dp_peer_jitter_stats_ctx_clr(struct dp_txrx_peer *txrx_peer);
1230 #else
1231 static inline
1232 QDF_STATUS dp_peer_jitter_stats_ctx_alloc(struct dp_pdev *pdev,
1233 					  struct dp_txrx_peer *txrx_peer)
1234 {
1235 	return QDF_STATUS_SUCCESS;
1236 }
1237 
1238 static inline
1239 void dp_peer_jitter_stats_ctx_dealloc(struct dp_pdev *pdev,
1240 				      struct dp_txrx_peer *txrx_peer)
1241 {
1242 }
1243 
1244 static inline
1245 void dp_peer_jitter_stats_ctx_clr(struct dp_txrx_peer *txrx_peer)
1246 {
1247 }
1248 #endif
1249 
1250 #ifndef CONFIG_SAWF_DEF_QUEUES
1251 static inline QDF_STATUS dp_peer_sawf_ctx_alloc(struct dp_soc *soc,
1252 						struct dp_peer *peer)
1253 {
1254 	return QDF_STATUS_SUCCESS;
1255 }
1256 
1257 static inline QDF_STATUS dp_peer_sawf_ctx_free(struct dp_soc *soc,
1258 					       struct dp_peer *peer)
1259 {
1260 	return QDF_STATUS_SUCCESS;
1261 }
1262 
1263 #endif
1264 
1265 #ifndef CONFIG_SAWF
1266 static inline
1267 QDF_STATUS dp_peer_sawf_stats_ctx_alloc(struct dp_soc *soc,
1268 					struct dp_txrx_peer *txrx_peer)
1269 {
1270 	return QDF_STATUS_SUCCESS;
1271 }
1272 
1273 static inline
1274 QDF_STATUS dp_peer_sawf_stats_ctx_free(struct dp_soc *soc,
1275 				       struct dp_txrx_peer *txrx_peer)
1276 {
1277 	return QDF_STATUS_SUCCESS;
1278 }
1279 #endif
1280 
1281 /**
1282  * dp_vdev_bss_peer_ref_n_get: Get bss peer of a vdev
1283  * @soc: DP soc
1284  * @vdev: vdev
1285  * @mod_id: id of module requesting reference
1286  *
1287  * Return: VDEV BSS peer
1288  */
1289 struct dp_peer *dp_vdev_bss_peer_ref_n_get(struct dp_soc *soc,
1290 					   struct dp_vdev *vdev,
1291 					   enum dp_mod_id mod_id);
1292 
1293 /**
1294  * dp_sta_vdev_self_peer_ref_n_get: Get self peer of sta vdev
1295  * @soc: DP soc
1296  * @vdev: vdev
1297  * @mod_id: id of module requesting reference
1298  *
1299  * Return: VDEV self peer
1300  */
1301 struct dp_peer *dp_sta_vdev_self_peer_ref_n_get(struct dp_soc *soc,
1302 						struct dp_vdev *vdev,
1303 						enum dp_mod_id mod_id);
1304 
1305 void dp_peer_ast_table_detach(struct dp_soc *soc);
1306 
1307 /**
1308  * dp_peer_find_map_detach() - cleanup memory for peer_id_to_obj_map
1309  * @soc: soc handle
1310  *
1311  * Return: none
1312  */
1313 void dp_peer_find_map_detach(struct dp_soc *soc);
1314 
1315 void dp_soc_wds_detach(struct dp_soc *soc);
1316 QDF_STATUS dp_peer_ast_table_attach(struct dp_soc *soc);
1317 
1318 /**
1319  * dp_find_peer_by_macaddr() - Finding the peer from mac address provided.
1320  * @soc: soc handle
1321  * @mac_addr: MAC address to be used to find peer
1322  * @vdev_id: VDEV id
1323  * @mod_id: MODULE ID
1324  *
1325  * Return: struct dp_peer
1326  */
1327 struct dp_peer *dp_find_peer_by_macaddr(struct dp_soc *soc, uint8_t *mac_addr,
1328 					uint8_t vdev_id, enum dp_mod_id mod_id);
1329 /**
1330  * dp_peer_ast_hash_attach() - Allocate and initialize AST Hash Table
1331  * @soc: SoC handle
1332  *
1333  * Return: QDF_STATUS
1334  */
1335 QDF_STATUS dp_peer_ast_hash_attach(struct dp_soc *soc);
1336 
1337 /**
1338  * dp_peer_mec_hash_attach() - Allocate and initialize MEC Hash Table
1339  * @soc: SoC handle
1340  *
1341  * Return: QDF_STATUS
1342  */
1343 QDF_STATUS dp_peer_mec_hash_attach(struct dp_soc *soc);
1344 
1345 /**
1346  * dp_del_wds_entry_wrapper() - delete a WDS AST entry
1347  * @soc: DP soc structure pointer
1348  * @vdev_id: vdev_id
1349  * @wds_macaddr: MAC address of ast node
1350  * @type: type from enum cdp_txrx_ast_entry_type
1351  * @delete_in_fw: Flag to indicate if entry needs to be deleted in fw
1352  *
1353  * This API is used to delete an AST entry from fw
1354  *
1355  * Return: None
1356  */
1357 void dp_del_wds_entry_wrapper(struct dp_soc *soc, uint8_t vdev_id,
1358 			      uint8_t *wds_macaddr, uint8_t type,
1359 			      uint8_t delete_in_fw);
1360 
1361 void dp_soc_wds_attach(struct dp_soc *soc);
1362 
1363 /**
1364  * dp_peer_mec_hash_detach() - Free MEC Hash table
1365  * @soc: SoC handle
1366  *
1367  * Return: None
1368  */
1369 void dp_peer_mec_hash_detach(struct dp_soc *soc);
1370 
1371 /**
1372  * dp_peer_ast_hash_detach() - Free AST Hash table
1373  * @soc: SoC handle
1374  *
1375  * Return: None
1376  */
1377 void dp_peer_ast_hash_detach(struct dp_soc *soc);
1378 
1379 #ifdef FEATURE_AST
1380 /**
1381  * dp_peer_delete_ast_entries(): Delete all AST entries for a peer
1382  * @soc: datapath soc handle
1383  * @peer: datapath peer handle
1384  *
1385  * Delete the AST entries belonging to a peer
1386  */
1387 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
1388 					      struct dp_peer *peer)
1389 {
1390 	struct dp_ast_entry *ast_entry, *temp_ast_entry;
1391 
1392 	dp_peer_debug("peer: %pK, self_ast: %pK", peer, peer->self_ast_entry);
1393 	/*
1394 	 * Delete peer self ast entry. This is done to handle scenarios
1395 	 * where peer is freed before peer map is received(for ex in case
1396 	 * of auth disallow due to ACL) in such cases self ast is not added
1397 	 * to peer->ast_list.
1398 	 */
1399 	if (peer->self_ast_entry) {
1400 		dp_peer_del_ast(soc, peer->self_ast_entry);
1401 		peer->self_ast_entry = NULL;
1402 	}
1403 
1404 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry)
1405 		dp_peer_del_ast(soc, ast_entry);
1406 }
1407 
1408 /**
1409  * dp_print_peer_ast_entries() - Dump AST entries of peer
1410  * @soc: Datapath soc handle
1411  * @peer: Datapath peer
1412  * @arg: argument to iterate function
1413  *
1414  * Return: void
1415  */
1416 void dp_print_peer_ast_entries(struct dp_soc *soc, struct dp_peer *peer,
1417 			       void *arg);
1418 #else
1419 static inline void dp_print_peer_ast_entries(struct dp_soc *soc,
1420 					     struct dp_peer *peer, void *arg)
1421 {
1422 }
1423 
1424 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
1425 					      struct dp_peer *peer)
1426 {
1427 }
1428 #endif
1429 
1430 #ifdef FEATURE_MEC
1431 /**
1432  * dp_peer_mec_spinlock_create() - Create the MEC spinlock
1433  * @soc: SoC handle
1434  *
1435  * Return: none
1436  */
1437 void dp_peer_mec_spinlock_create(struct dp_soc *soc);
1438 
1439 /**
1440  * dp_peer_mec_spinlock_destroy() - Destroy the MEC spinlock
1441  * @soc: SoC handle
1442  *
1443  * Return: none
1444  */
1445 void dp_peer_mec_spinlock_destroy(struct dp_soc *soc);
1446 
1447 /**
1448  * dp_peer_mec_flush_entries() - Delete all mec entries in table
1449  * @soc: Datapath SOC
1450  *
1451  * Return: None
1452  */
1453 void dp_peer_mec_flush_entries(struct dp_soc *soc);
1454 #else
1455 static inline void dp_peer_mec_spinlock_create(struct dp_soc *soc)
1456 {
1457 }
1458 
1459 static inline void dp_peer_mec_spinlock_destroy(struct dp_soc *soc)
1460 {
1461 }
1462 
1463 static inline void dp_peer_mec_flush_entries(struct dp_soc *soc)
1464 {
1465 }
1466 #endif
1467 
1468 static inline int dp_peer_find_mac_addr_cmp(
1469 	union dp_align_mac_addr *mac_addr1,
1470 	union dp_align_mac_addr *mac_addr2)
1471 {
1472 		/*
1473 		 * Intentionally use & rather than &&.
1474 		 * because the operands are binary rather than generic boolean,
1475 		 * the functionality is equivalent.
1476 		 * Using && has the advantage of short-circuited evaluation,
1477 		 * but using & has the advantage of no conditional branching,
1478 		 * which is a more significant benefit.
1479 		 */
1480 	return !((mac_addr1->align4.bytes_abcd == mac_addr2->align4.bytes_abcd)
1481 		 & (mac_addr1->align4.bytes_ef == mac_addr2->align4.bytes_ef));
1482 }
1483 
1484 /**
1485  * dp_peer_delete() - delete DP peer
1486  *
1487  * @soc: Datatpath soc
1488  * @peer: Datapath peer
1489  * @arg: argument to iter function
1490  *
1491  * Return: void
1492  */
1493 void dp_peer_delete(struct dp_soc *soc,
1494 		    struct dp_peer *peer,
1495 		    void *arg);
1496 
1497 /**
1498  * dp_mlo_peer_delete() - delete MLO DP peer
1499  *
1500  * @soc: Datapath soc
1501  * @peer: Datapath peer
1502  * @arg: argument to iter function
1503  *
1504  * Return: void
1505  */
1506 void dp_mlo_peer_delete(struct dp_soc *soc, struct dp_peer *peer, void *arg);
1507 
1508 #ifdef WLAN_FEATURE_11BE_MLO
1509 
1510 /* is MLO connection mld peer */
1511 #define IS_MLO_DP_MLD_TXRX_PEER(_peer) ((_peer)->mld_peer)
1512 
1513 /* set peer type */
1514 #define DP_PEER_SET_TYPE(_peer, _type_val) \
1515 	((_peer)->peer_type = (_type_val))
1516 
1517 /* is legacy peer */
1518 #define IS_DP_LEGACY_PEER(_peer) \
1519 	((_peer)->peer_type == CDP_LINK_PEER_TYPE && !((_peer)->mld_peer))
1520 /* is MLO connection link peer */
1521 #define IS_MLO_DP_LINK_PEER(_peer) \
1522 	((_peer)->peer_type == CDP_LINK_PEER_TYPE && (_peer)->mld_peer)
1523 /* is MLO connection mld peer */
1524 #define IS_MLO_DP_MLD_PEER(_peer) \
1525 	((_peer)->peer_type == CDP_MLD_PEER_TYPE)
1526 /* Get Mld peer from link peer */
1527 #define DP_GET_MLD_PEER_FROM_PEER(link_peer) \
1528 	((link_peer)->mld_peer)
1529 
1530 #ifdef WLAN_MLO_MULTI_CHIP
1531 static inline uint8_t dp_get_chip_id(struct dp_soc *soc)
1532 {
1533 	if (soc->arch_ops.mlo_get_chip_id)
1534 		return soc->arch_ops.mlo_get_chip_id(soc);
1535 
1536 	return 0;
1537 }
1538 
1539 static inline struct dp_peer *
1540 dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
1541 				  uint8_t *peer_mac_addr,
1542 				  int mac_addr_is_aligned,
1543 				  uint8_t vdev_id,
1544 				  uint8_t chip_id,
1545 				  enum dp_mod_id mod_id)
1546 {
1547 	if (soc->arch_ops.mlo_link_peer_find_hash_find_by_chip_id)
1548 		return soc->arch_ops.mlo_link_peer_find_hash_find_by_chip_id
1549 							(soc, peer_mac_addr,
1550 							 mac_addr_is_aligned,
1551 							 vdev_id, chip_id,
1552 							 mod_id);
1553 
1554 	return NULL;
1555 }
1556 #else
1557 static inline uint8_t dp_get_chip_id(struct dp_soc *soc)
1558 {
1559 	return 0;
1560 }
1561 
1562 static inline struct dp_peer *
1563 dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
1564 				  uint8_t *peer_mac_addr,
1565 				  int mac_addr_is_aligned,
1566 				  uint8_t vdev_id,
1567 				  uint8_t chip_id,
1568 				  enum dp_mod_id mod_id)
1569 {
1570 	return dp_peer_find_hash_find(soc, peer_mac_addr,
1571 				      mac_addr_is_aligned,
1572 				      vdev_id, mod_id);
1573 }
1574 #endif
1575 
1576 /**
1577  * dp_mld_peer_find_hash_find() - returns mld peer from mld peer_hash_table
1578  *				  matching mac_address
1579  * @soc: soc handle
1580  * @peer_mac_addr: mld peer mac address
1581  * @mac_addr_is_aligned: is mac addr aligned
1582  * @vdev_id: vdev_id
1583  * @mod_id: id of module requesting reference
1584  *
1585  * Return: peer in success
1586  *         NULL in failure
1587  */
1588 static inline
1589 struct dp_peer *dp_mld_peer_find_hash_find(struct dp_soc *soc,
1590 					   uint8_t *peer_mac_addr,
1591 					   int mac_addr_is_aligned,
1592 					   uint8_t vdev_id,
1593 					   enum dp_mod_id mod_id)
1594 {
1595 	if (soc->arch_ops.mlo_peer_find_hash_find)
1596 		return soc->arch_ops.mlo_peer_find_hash_find(soc,
1597 					      peer_mac_addr,
1598 					      mac_addr_is_aligned,
1599 					      mod_id, vdev_id);
1600 	return NULL;
1601 }
1602 
1603 /**
1604  * dp_peer_hash_find_wrapper() - find link peer or mld per according to
1605  *				 peer_type
1606  * @soc: DP SOC handle
1607  * @peer_info: peer information for hash find
1608  * @mod_id: ID of module requesting reference
1609  *
1610  * Return: peer handle
1611  */
1612 static inline
1613 struct dp_peer *dp_peer_hash_find_wrapper(struct dp_soc *soc,
1614 					  struct cdp_peer_info *peer_info,
1615 					  enum dp_mod_id mod_id)
1616 {
1617 	struct dp_peer *peer = NULL;
1618 
1619 	if (peer_info->peer_type == CDP_LINK_PEER_TYPE ||
1620 	    peer_info->peer_type == CDP_WILD_PEER_TYPE) {
1621 		peer = dp_peer_find_hash_find(soc, peer_info->mac_addr,
1622 					      peer_info->mac_addr_is_aligned,
1623 					      peer_info->vdev_id,
1624 					      mod_id);
1625 		if (peer)
1626 			return peer;
1627 	}
1628 	if (peer_info->peer_type == CDP_MLD_PEER_TYPE ||
1629 	    peer_info->peer_type == CDP_WILD_PEER_TYPE)
1630 		peer = dp_mld_peer_find_hash_find(
1631 					soc, peer_info->mac_addr,
1632 					peer_info->mac_addr_is_aligned,
1633 					peer_info->vdev_id,
1634 					mod_id);
1635 	return peer;
1636 }
1637 
1638 /**
1639  * dp_link_peer_add_mld_peer() - add mld peer pointer to link peer,
1640  *				 increase mld peer ref_cnt
1641  * @link_peer: link peer pointer
1642  * @mld_peer: mld peer pointer
1643  *
1644  * Return: none
1645  */
1646 static inline
1647 void dp_link_peer_add_mld_peer(struct dp_peer *link_peer,
1648 			       struct dp_peer *mld_peer)
1649 {
1650 	/* increase mld_peer ref_cnt */
1651 	dp_peer_get_ref(NULL, mld_peer, DP_MOD_ID_CDP);
1652 	link_peer->mld_peer = mld_peer;
1653 }
1654 
1655 /**
1656  * dp_link_peer_del_mld_peer() - delete mld peer pointer from link peer,
1657  *				 decrease mld peer ref_cnt
1658  * @link_peer: link peer pointer
1659  *
1660  * Return: None
1661  */
1662 static inline
1663 void dp_link_peer_del_mld_peer(struct dp_peer *link_peer)
1664 {
1665 	dp_peer_unref_delete(link_peer->mld_peer, DP_MOD_ID_CDP);
1666 	link_peer->mld_peer = NULL;
1667 }
1668 
1669 /**
1670  * dp_mld_peer_init_link_peers_info() - init link peers info in mld peer
1671  * @mld_peer: mld peer pointer
1672  *
1673  * Return: None
1674  */
1675 static inline
1676 void dp_mld_peer_init_link_peers_info(struct dp_peer *mld_peer)
1677 {
1678 	int i;
1679 
1680 	qdf_spinlock_create(&mld_peer->link_peers_info_lock);
1681 	mld_peer->num_links = 0;
1682 	for (i = 0; i < DP_MAX_MLO_LINKS; i++)
1683 		mld_peer->link_peers[i].is_valid = false;
1684 }
1685 
1686 /**
1687  * dp_mld_peer_deinit_link_peers_info() - Deinit link peers info in mld peer
1688  * @mld_peer: mld peer pointer
1689  *
1690  * Return: None
1691  */
1692 static inline
1693 void dp_mld_peer_deinit_link_peers_info(struct dp_peer *mld_peer)
1694 {
1695 	qdf_spinlock_destroy(&mld_peer->link_peers_info_lock);
1696 }
1697 
1698 /**
1699  * dp_mld_peer_add_link_peer() - add link peer info to mld peer
1700  * @mld_peer: mld dp peer pointer
1701  * @link_peer: link dp peer pointer
1702  *
1703  * Return: None
1704  */
1705 static inline
1706 void dp_mld_peer_add_link_peer(struct dp_peer *mld_peer,
1707 			       struct dp_peer *link_peer)
1708 {
1709 	int i;
1710 	struct dp_peer_link_info *link_peer_info;
1711 	struct dp_soc *soc = mld_peer->vdev->pdev->soc;
1712 
1713 	qdf_spin_lock_bh(&mld_peer->link_peers_info_lock);
1714 	for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
1715 		link_peer_info = &mld_peer->link_peers[i];
1716 		if (!link_peer_info->is_valid) {
1717 			qdf_mem_copy(link_peer_info->mac_addr.raw,
1718 				     link_peer->mac_addr.raw,
1719 				     QDF_MAC_ADDR_SIZE);
1720 			link_peer_info->is_valid = true;
1721 			link_peer_info->vdev_id = link_peer->vdev->vdev_id;
1722 			link_peer_info->chip_id =
1723 				dp_get_chip_id(link_peer->vdev->pdev->soc);
1724 			mld_peer->num_links++;
1725 			break;
1726 		}
1727 	}
1728 	qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock);
1729 
1730 	dp_peer_info("%s addition of link peer %pK (" QDF_MAC_ADDR_FMT ") "
1731 		     "to MLD peer %pK (" QDF_MAC_ADDR_FMT "), "
1732 		     "idx %u num_links %u",
1733 		     (i != DP_MAX_MLO_LINKS) ? "Successful" : "Failed",
1734 		     link_peer, QDF_MAC_ADDR_REF(link_peer->mac_addr.raw),
1735 		     mld_peer, QDF_MAC_ADDR_REF(mld_peer->mac_addr.raw),
1736 		     i, mld_peer->num_links);
1737 
1738 	dp_cfg_event_record_mlo_link_delink_evt(soc, DP_CFG_EVENT_MLO_ADD_LINK,
1739 						mld_peer, link_peer, i,
1740 						(i != DP_MAX_MLO_LINKS) ? 1 : 0);
1741 }
1742 
1743 /**
1744  * dp_mld_peer_del_link_peer() - Delete link peer info from MLD peer
1745  * @mld_peer: MLD dp peer pointer
1746  * @link_peer: link dp peer pointer
1747  *
1748  * Return: number of links left after deletion
1749  */
1750 static inline
1751 uint8_t dp_mld_peer_del_link_peer(struct dp_peer *mld_peer,
1752 				  struct dp_peer *link_peer)
1753 {
1754 	int i;
1755 	struct dp_peer_link_info *link_peer_info;
1756 	uint8_t num_links;
1757 	struct dp_soc *soc = mld_peer->vdev->pdev->soc;
1758 
1759 	qdf_spin_lock_bh(&mld_peer->link_peers_info_lock);
1760 	for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
1761 		link_peer_info = &mld_peer->link_peers[i];
1762 		if (link_peer_info->is_valid &&
1763 		    !dp_peer_find_mac_addr_cmp(&link_peer->mac_addr,
1764 					&link_peer_info->mac_addr)) {
1765 			link_peer_info->is_valid = false;
1766 			mld_peer->num_links--;
1767 			break;
1768 		}
1769 	}
1770 	num_links = mld_peer->num_links;
1771 	qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock);
1772 
1773 	dp_peer_info("%s deletion of link peer %pK (" QDF_MAC_ADDR_FMT ") "
1774 		     "from MLD peer %pK (" QDF_MAC_ADDR_FMT "), "
1775 		     "idx %u num_links %u",
1776 		     (i != DP_MAX_MLO_LINKS) ? "Successful" : "Failed",
1777 		     link_peer, QDF_MAC_ADDR_REF(link_peer->mac_addr.raw),
1778 		     mld_peer, QDF_MAC_ADDR_REF(mld_peer->mac_addr.raw),
1779 		     i, mld_peer->num_links);
1780 
1781 	dp_cfg_event_record_mlo_link_delink_evt(soc, DP_CFG_EVENT_MLO_DEL_LINK,
1782 						mld_peer, link_peer, i,
1783 						(i != DP_MAX_MLO_LINKS) ? 1 : 0);
1784 
1785 	return num_links;
1786 }
1787 
1788 /**
1789  * dp_get_link_peers_ref_from_mld_peer() - get link peers pointer and
1790  *					   increase link peers ref_cnt
1791  * @soc: dp_soc handle
1792  * @mld_peer: dp mld peer pointer
1793  * @mld_link_peers: structure that hold links peers pointer array and number
1794  * @mod_id: id of module requesting reference
1795  *
1796  * Return: None
1797  */
1798 static inline
1799 void dp_get_link_peers_ref_from_mld_peer(
1800 				struct dp_soc *soc,
1801 				struct dp_peer *mld_peer,
1802 				struct dp_mld_link_peers *mld_link_peers,
1803 				enum dp_mod_id mod_id)
1804 {
1805 	struct dp_peer *peer;
1806 	uint8_t i = 0, j = 0;
1807 	struct dp_peer_link_info *link_peer_info;
1808 
1809 	qdf_mem_zero(mld_link_peers, sizeof(*mld_link_peers));
1810 	qdf_spin_lock_bh(&mld_peer->link_peers_info_lock);
1811 	for (i = 0; i < DP_MAX_MLO_LINKS; i++)  {
1812 		link_peer_info = &mld_peer->link_peers[i];
1813 		if (link_peer_info->is_valid) {
1814 			peer = dp_link_peer_hash_find_by_chip_id(
1815 						soc,
1816 						link_peer_info->mac_addr.raw,
1817 						true,
1818 						link_peer_info->vdev_id,
1819 						link_peer_info->chip_id,
1820 						mod_id);
1821 			if (peer)
1822 				mld_link_peers->link_peers[j++] = peer;
1823 		}
1824 	}
1825 	qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock);
1826 
1827 	mld_link_peers->num_links = j;
1828 }
1829 
1830 /**
1831  * dp_release_link_peers_ref() - release all link peers reference
1832  * @mld_link_peers: structure that hold links peers pointer array and number
1833  * @mod_id: id of module requesting reference
1834  *
1835  * Return: None.
1836  */
1837 static inline
1838 void dp_release_link_peers_ref(
1839 			struct dp_mld_link_peers *mld_link_peers,
1840 			enum dp_mod_id mod_id)
1841 {
1842 	struct dp_peer *peer;
1843 	uint8_t i;
1844 
1845 	for (i = 0; i < mld_link_peers->num_links; i++) {
1846 		peer = mld_link_peers->link_peers[i];
1847 		if (peer)
1848 			dp_peer_unref_delete(peer, mod_id);
1849 		mld_link_peers->link_peers[i] = NULL;
1850 	}
1851 
1852 	 mld_link_peers->num_links = 0;
1853 }
1854 
1855 /**
1856  * dp_get_link_peer_id_by_lmac_id() - Get link peer id using peer id and lmac id
1857  * @soc: Datapath soc handle
1858  * @peer_id: peer id
1859  * @lmac_id: lmac id to find the link peer on given lmac
1860  *
1861  * Return: peer_id of link peer if found
1862  *         else return HTT_INVALID_PEER
1863  */
1864 static inline
1865 uint16_t dp_get_link_peer_id_by_lmac_id(struct dp_soc *soc, uint16_t peer_id,
1866 					uint8_t lmac_id)
1867 {
1868 	uint8_t i;
1869 	struct dp_peer *peer;
1870 	struct dp_peer *link_peer;
1871 	struct dp_soc *link_peer_soc;
1872 	struct dp_mld_link_peers link_peers_info;
1873 	uint16_t link_peer_id = HTT_INVALID_PEER;
1874 
1875 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_CDP);
1876 
1877 	if (!peer)
1878 		return HTT_INVALID_PEER;
1879 
1880 	if (IS_MLO_DP_MLD_PEER(peer)) {
1881 		/* get link peers with reference */
1882 		dp_get_link_peers_ref_from_mld_peer(soc, peer, &link_peers_info,
1883 						    DP_MOD_ID_CDP);
1884 
1885 		for (i = 0; i < link_peers_info.num_links; i++) {
1886 			link_peer = link_peers_info.link_peers[i];
1887 			link_peer_soc = link_peer->vdev->pdev->soc;
1888 			if ((link_peer_soc == soc) &&
1889 			    (link_peer->vdev->pdev->lmac_id == lmac_id)) {
1890 				link_peer_id = link_peer->peer_id;
1891 				break;
1892 			}
1893 		}
1894 		/* release link peers reference */
1895 		dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP);
1896 	} else {
1897 		link_peer_id = peer_id;
1898 	}
1899 
1900 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
1901 
1902 	return link_peer_id;
1903 }
1904 
1905 /**
1906  * dp_peer_get_tgt_peer_hash_find() - get dp_peer handle
1907  * @soc: soc handle
1908  * @peer_mac: peer mac address
1909  * @mac_addr_is_aligned: is mac addr aligned
1910  * @vdev_id: vdev_id
1911  * @mod_id: id of module requesting reference
1912  *
1913  * for MLO connection, get corresponding MLD peer,
1914  * otherwise get link peer for non-MLO case.
1915  *
1916  * Return: peer in success
1917  *         NULL in failure
1918  */
1919 static inline
1920 struct dp_peer *dp_peer_get_tgt_peer_hash_find(struct dp_soc *soc,
1921 					       uint8_t *peer_mac,
1922 					       int mac_addr_is_aligned,
1923 					       uint8_t vdev_id,
1924 					       enum dp_mod_id mod_id)
1925 {
1926 	struct dp_peer *ta_peer = NULL;
1927 	struct dp_peer *peer = dp_peer_find_hash_find(soc,
1928 						      peer_mac, 0, vdev_id,
1929 						      mod_id);
1930 
1931 	if (peer) {
1932 		/* mlo connection link peer, get mld peer with reference */
1933 		if (IS_MLO_DP_LINK_PEER(peer)) {
1934 			/* increase mld peer ref_cnt */
1935 			if (QDF_STATUS_SUCCESS ==
1936 			    dp_peer_get_ref(soc, peer->mld_peer, mod_id))
1937 				ta_peer = peer->mld_peer;
1938 			else
1939 				ta_peer = NULL;
1940 
1941 			/* release peer reference that added by hash find */
1942 			dp_peer_unref_delete(peer, mod_id);
1943 		} else {
1944 		/* mlo MLD peer or non-mlo link peer */
1945 			ta_peer = peer;
1946 		}
1947 	} else {
1948 		dp_peer_err("fail to find peer:" QDF_MAC_ADDR_FMT,
1949 			    QDF_MAC_ADDR_REF(peer_mac));
1950 	}
1951 
1952 	return ta_peer;
1953 }
1954 
1955 /**
1956  * dp_peer_get_tgt_peer_by_id() - Returns target peer object given the peer id
1957  * @soc: core DP soc context
1958  * @peer_id: peer id from peer object can be retrieved
1959  * @mod_id: ID of module requesting reference
1960  *
1961  * for MLO connection, get corresponding MLD peer,
1962  * otherwise get link peer for non-MLO case.
1963  *
1964  * Return: peer in success
1965  *         NULL in failure
1966  */
1967 static inline
1968 struct dp_peer *dp_peer_get_tgt_peer_by_id(struct dp_soc *soc,
1969 					   uint16_t peer_id,
1970 					   enum dp_mod_id mod_id)
1971 {
1972 	struct dp_peer *ta_peer = NULL;
1973 	struct dp_peer *peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
1974 
1975 	if (peer) {
1976 		/* mlo connection link peer, get mld peer with reference */
1977 		if (IS_MLO_DP_LINK_PEER(peer)) {
1978 			/* increase mld peer ref_cnt */
1979 			if (QDF_STATUS_SUCCESS ==
1980 				dp_peer_get_ref(soc, peer->mld_peer, mod_id))
1981 				ta_peer = peer->mld_peer;
1982 			else
1983 				ta_peer = NULL;
1984 
1985 			/* release peer reference that added by hash find */
1986 			dp_peer_unref_delete(peer, mod_id);
1987 		} else {
1988 		/* mlo MLD peer or non-mlo link peer */
1989 			ta_peer = peer;
1990 		}
1991 	}
1992 
1993 	return ta_peer;
1994 }
1995 
1996 /**
1997  * dp_peer_mlo_delete() - peer MLO related delete operation
1998  * @peer: DP peer handle
1999  * Return: None
2000  */
2001 static inline
2002 void dp_peer_mlo_delete(struct dp_peer *peer)
2003 {
2004 	struct dp_peer *ml_peer;
2005 	struct dp_soc *soc;
2006 
2007 	dp_info("peer " QDF_MAC_ADDR_FMT " type %d",
2008 		QDF_MAC_ADDR_REF(peer->mac_addr.raw), peer->peer_type);
2009 
2010 	/* MLO connection link peer */
2011 	if (IS_MLO_DP_LINK_PEER(peer)) {
2012 		ml_peer = peer->mld_peer;
2013 		soc = ml_peer->vdev->pdev->soc;
2014 
2015 		/* if last link peer deletion, delete MLD peer */
2016 		if (dp_mld_peer_del_link_peer(peer->mld_peer, peer) == 0)
2017 			dp_peer_delete(soc, peer->mld_peer, NULL);
2018 	}
2019 }
2020 
2021 /**
2022  * dp_peer_mlo_setup() - create MLD peer and MLO related initialization
2023  * @soc: Soc handle
2024  * @peer: DP peer handle
2025  * @vdev_id: Vdev ID
2026  * @setup_info: peer setup information for MLO
2027  */
2028 QDF_STATUS dp_peer_mlo_setup(
2029 			struct dp_soc *soc,
2030 			struct dp_peer *peer,
2031 			uint8_t vdev_id,
2032 			struct cdp_peer_setup_info *setup_info);
2033 
2034 /**
2035  * dp_get_tgt_peer_from_peer() - Get target peer from the given peer
2036  * @peer: datapath peer
2037  *
2038  * Return: MLD peer in case of MLO Link peer
2039  *	   Peer itself in other cases
2040  */
2041 static inline
2042 struct dp_peer *dp_get_tgt_peer_from_peer(struct dp_peer *peer)
2043 {
2044 	return IS_MLO_DP_LINK_PEER(peer) ? peer->mld_peer : peer;
2045 }
2046 
2047 /**
2048  * dp_get_primary_link_peer_by_id(): Get primary link peer from the given
2049  *					peer id
2050  * @soc: core DP soc context
2051  * @peer_id: peer id
2052  * @mod_id: ID of module requesting reference
2053  *
2054  * Return: primary link peer for the MLO peer
2055  *	   legacy peer itself in case of legacy peer
2056  */
2057 static inline
2058 struct dp_peer *dp_get_primary_link_peer_by_id(struct dp_soc *soc,
2059 					       uint16_t peer_id,
2060 					       enum dp_mod_id mod_id)
2061 {
2062 	uint8_t i;
2063 	struct dp_mld_link_peers link_peers_info;
2064 	struct dp_peer *peer;
2065 	struct dp_peer *link_peer;
2066 	struct dp_peer *primary_peer = NULL;
2067 
2068 	peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
2069 
2070 	if (!peer)
2071 		return NULL;
2072 
2073 	if (IS_MLO_DP_MLD_PEER(peer)) {
2074 		/* get link peers with reference */
2075 		dp_get_link_peers_ref_from_mld_peer(soc, peer, &link_peers_info,
2076 						    mod_id);
2077 
2078 		for (i = 0; i < link_peers_info.num_links; i++) {
2079 			link_peer = link_peers_info.link_peers[i];
2080 			if (link_peer->primary_link) {
2081 				primary_peer = link_peer;
2082 				/*
2083 				 * Take additional reference over
2084 				 * primary link peer.
2085 				 */
2086 				dp_peer_get_ref(NULL, primary_peer, mod_id);
2087 				break;
2088 			}
2089 		}
2090 		/* release link peers reference */
2091 		dp_release_link_peers_ref(&link_peers_info, mod_id);
2092 		dp_peer_unref_delete(peer, mod_id);
2093 	} else {
2094 		primary_peer = peer;
2095 	}
2096 
2097 	return primary_peer;
2098 }
2099 
2100 /**
2101  * dp_get_txrx_peer() - Get dp_txrx_peer from passed dp_peer
2102  * @peer: Datapath peer
2103  *
2104  * Return: dp_txrx_peer from MLD peer if peer type is link peer
2105  *	   dp_txrx_peer from peer itself for other cases
2106  */
2107 static inline
2108 struct dp_txrx_peer *dp_get_txrx_peer(struct dp_peer *peer)
2109 {
2110 	return IS_MLO_DP_LINK_PEER(peer) ?
2111 				peer->mld_peer->txrx_peer : peer->txrx_peer;
2112 }
2113 
2114 /**
2115  * dp_peer_is_primary_link_peer() - Check if peer is primary link peer
2116  * @peer: Datapath peer
2117  *
2118  * Return: true if peer is primary link peer or legacy peer
2119  *	   false otherwise
2120  */
2121 static inline
2122 bool dp_peer_is_primary_link_peer(struct dp_peer *peer)
2123 {
2124 	if (IS_MLO_DP_LINK_PEER(peer) && peer->primary_link)
2125 		return true;
2126 	else if (IS_DP_LEGACY_PEER(peer))
2127 		return true;
2128 	else
2129 		return false;
2130 }
2131 
2132 /**
2133  * dp_tgt_txrx_peer_get_ref_by_id() - Gets tgt txrx peer for given the peer id
2134  *
2135  * @soc: core DP soc context
2136  * @peer_id: peer id from peer object can be retrieved
2137  * @handle: reference handle
2138  * @mod_id: ID of module requesting reference
2139  *
2140  * Return: struct dp_txrx_peer*: Pointer to txrx DP peer object
2141  */
2142 static inline struct dp_txrx_peer *
2143 dp_tgt_txrx_peer_get_ref_by_id(struct dp_soc *soc,
2144 			       uint16_t peer_id,
2145 			       dp_txrx_ref_handle *handle,
2146 			       enum dp_mod_id mod_id)
2147 
2148 {
2149 	struct dp_peer *peer;
2150 	struct dp_txrx_peer *txrx_peer;
2151 
2152 	peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
2153 	if (!peer)
2154 		return NULL;
2155 
2156 	txrx_peer = dp_get_txrx_peer(peer);
2157 	if (txrx_peer) {
2158 		*handle = (dp_txrx_ref_handle)peer;
2159 		return txrx_peer;
2160 	}
2161 
2162 	dp_peer_unref_delete(peer, mod_id);
2163 	return NULL;
2164 }
2165 
2166 /**
2167  * dp_print_mlo_ast_stats_be() - Print AST stats for MLO peers
2168  *
2169  * @soc: core DP soc context
2170  *
2171  * Return: void
2172  */
2173 void dp_print_mlo_ast_stats_be(struct dp_soc *soc);
2174 
2175 #else
2176 
2177 #define IS_MLO_DP_MLD_TXRX_PEER(_peer) false
2178 
2179 #define DP_PEER_SET_TYPE(_peer, _type_val) /* no op */
2180 /* is legacy peer */
2181 #define IS_DP_LEGACY_PEER(_peer) true
2182 #define IS_MLO_DP_LINK_PEER(_peer) false
2183 #define IS_MLO_DP_MLD_PEER(_peer) false
2184 #define DP_GET_MLD_PEER_FROM_PEER(link_peer) NULL
2185 
2186 static inline
2187 struct dp_peer *dp_peer_hash_find_wrapper(struct dp_soc *soc,
2188 					  struct cdp_peer_info *peer_info,
2189 					  enum dp_mod_id mod_id)
2190 {
2191 	return dp_peer_find_hash_find(soc, peer_info->mac_addr,
2192 				      peer_info->mac_addr_is_aligned,
2193 				      peer_info->vdev_id,
2194 				      mod_id);
2195 }
2196 
2197 static inline
2198 struct dp_peer *dp_peer_get_tgt_peer_hash_find(struct dp_soc *soc,
2199 					       uint8_t *peer_mac,
2200 					       int mac_addr_is_aligned,
2201 					       uint8_t vdev_id,
2202 					       enum dp_mod_id mod_id)
2203 {
2204 	return dp_peer_find_hash_find(soc, peer_mac,
2205 				      mac_addr_is_aligned, vdev_id,
2206 				      mod_id);
2207 }
2208 
2209 static inline
2210 struct dp_peer *dp_peer_get_tgt_peer_by_id(struct dp_soc *soc,
2211 					   uint16_t peer_id,
2212 					   enum dp_mod_id mod_id)
2213 {
2214 	return dp_peer_get_ref_by_id(soc, peer_id, mod_id);
2215 }
2216 
2217 static inline
2218 QDF_STATUS dp_peer_mlo_setup(
2219 			struct dp_soc *soc,
2220 			struct dp_peer *peer,
2221 			uint8_t vdev_id,
2222 			struct cdp_peer_setup_info *setup_info)
2223 {
2224 	return QDF_STATUS_SUCCESS;
2225 }
2226 
2227 static inline
2228 void dp_mld_peer_init_link_peers_info(struct dp_peer *mld_peer)
2229 {
2230 }
2231 
2232 static inline
2233 void dp_mld_peer_deinit_link_peers_info(struct dp_peer *mld_peer)
2234 {
2235 }
2236 
2237 static inline
2238 void dp_link_peer_del_mld_peer(struct dp_peer *link_peer)
2239 {
2240 }
2241 
2242 static inline
2243 void dp_peer_mlo_delete(struct dp_peer *peer)
2244 {
2245 }
2246 
2247 static inline
2248 void dp_mlo_peer_authorize(struct dp_soc *soc,
2249 			   struct dp_peer *link_peer)
2250 {
2251 }
2252 
2253 static inline uint8_t dp_get_chip_id(struct dp_soc *soc)
2254 {
2255 	return 0;
2256 }
2257 
2258 static inline struct dp_peer *
2259 dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
2260 				  uint8_t *peer_mac_addr,
2261 				  int mac_addr_is_aligned,
2262 				  uint8_t vdev_id,
2263 				  uint8_t chip_id,
2264 				  enum dp_mod_id mod_id)
2265 {
2266 	return dp_peer_find_hash_find(soc, peer_mac_addr,
2267 				      mac_addr_is_aligned,
2268 				      vdev_id, mod_id);
2269 }
2270 
2271 static inline
2272 struct dp_peer *dp_get_tgt_peer_from_peer(struct dp_peer *peer)
2273 {
2274 	return peer;
2275 }
2276 
2277 static inline
2278 struct dp_peer *dp_get_primary_link_peer_by_id(struct dp_soc *soc,
2279 					       uint16_t peer_id,
2280 					       enum dp_mod_id mod_id)
2281 {
2282 	return dp_peer_get_ref_by_id(soc, peer_id, mod_id);
2283 }
2284 
2285 static inline
2286 struct dp_txrx_peer *dp_get_txrx_peer(struct dp_peer *peer)
2287 {
2288 	return peer->txrx_peer;
2289 }
2290 
2291 static inline
2292 bool dp_peer_is_primary_link_peer(struct dp_peer *peer)
2293 {
2294 	return true;
2295 }
2296 
2297 /**
2298  * dp_tgt_txrx_peer_get_ref_by_id() - Gets tgt txrx peer for given the peer id
2299  *
2300  * @soc: core DP soc context
2301  * @peer_id: peer id from peer object can be retrieved
2302  * @handle: reference handle
2303  * @mod_id: ID of module requesting reference
2304  *
2305  * Return: struct dp_txrx_peer*: Pointer to txrx DP peer object
2306  */
2307 static inline struct dp_txrx_peer *
2308 dp_tgt_txrx_peer_get_ref_by_id(struct dp_soc *soc,
2309 			       uint16_t peer_id,
2310 			       dp_txrx_ref_handle *handle,
2311 			       enum dp_mod_id mod_id)
2312 
2313 {
2314 	return dp_txrx_peer_get_ref_by_id(soc, peer_id, handle, mod_id);
2315 }
2316 
2317 static inline
2318 uint16_t dp_get_link_peer_id_by_lmac_id(struct dp_soc *soc, uint16_t peer_id,
2319 					uint8_t lmac_id)
2320 {
2321 	return peer_id;
2322 }
2323 
2324 static inline void dp_print_mlo_ast_stats_be(struct dp_soc *soc)
2325 {
2326 }
2327 #endif /* WLAN_FEATURE_11BE_MLO */
2328 
2329 static inline
2330 void dp_peer_defrag_rx_tids_init(struct dp_txrx_peer *txrx_peer)
2331 {
2332 	uint8_t i;
2333 
2334 	qdf_mem_zero(&txrx_peer->rx_tid, DP_MAX_TIDS *
2335 		     sizeof(struct dp_rx_tid_defrag));
2336 
2337 	for (i = 0; i < DP_MAX_TIDS; i++)
2338 		qdf_spinlock_create(&txrx_peer->rx_tid[i].defrag_tid_lock);
2339 }
2340 
2341 static inline
2342 void dp_peer_defrag_rx_tids_deinit(struct dp_txrx_peer *txrx_peer)
2343 {
2344 	uint8_t i;
2345 
2346 	for (i = 0; i < DP_MAX_TIDS; i++)
2347 		qdf_spinlock_destroy(&txrx_peer->rx_tid[i].defrag_tid_lock);
2348 }
2349 
2350 #ifdef PEER_CACHE_RX_PKTS
2351 static inline
2352 void dp_peer_rx_bufq_resources_init(struct dp_txrx_peer *txrx_peer)
2353 {
2354 	qdf_spinlock_create(&txrx_peer->bufq_info.bufq_lock);
2355 	txrx_peer->bufq_info.thresh = DP_RX_CACHED_BUFQ_THRESH;
2356 	qdf_list_create(&txrx_peer->bufq_info.cached_bufq,
2357 			DP_RX_CACHED_BUFQ_THRESH);
2358 }
2359 
2360 static inline
2361 void dp_peer_rx_bufq_resources_deinit(struct dp_txrx_peer *txrx_peer)
2362 {
2363 	qdf_list_destroy(&txrx_peer->bufq_info.cached_bufq);
2364 	qdf_spinlock_destroy(&txrx_peer->bufq_info.bufq_lock);
2365 }
2366 
2367 #else
2368 static inline
2369 void dp_peer_rx_bufq_resources_init(struct dp_txrx_peer *txrx_peer)
2370 {
2371 }
2372 
2373 static inline
2374 void dp_peer_rx_bufq_resources_deinit(struct dp_txrx_peer *txrx_peer)
2375 {
2376 }
2377 #endif
2378 
2379 /**
2380  * dp_peer_update_state() - update dp peer state
2381  *
2382  * @soc: core DP soc context
2383  * @peer: DP peer
2384  * @state: new state
2385  *
2386  * Return: None
2387  */
2388 static inline void
2389 dp_peer_update_state(struct dp_soc *soc,
2390 		     struct dp_peer *peer,
2391 		     enum dp_peer_state state)
2392 {
2393 	uint8_t peer_state;
2394 
2395 	qdf_spin_lock_bh(&peer->peer_state_lock);
2396 	peer_state = peer->peer_state;
2397 
2398 	switch (state) {
2399 	case DP_PEER_STATE_INIT:
2400 		DP_PEER_STATE_ASSERT
2401 			(peer, state, (peer_state != DP_PEER_STATE_ACTIVE) ||
2402 			 (peer_state != DP_PEER_STATE_LOGICAL_DELETE));
2403 		break;
2404 
2405 	case DP_PEER_STATE_ACTIVE:
2406 		DP_PEER_STATE_ASSERT(peer, state,
2407 				     (peer_state == DP_PEER_STATE_INIT));
2408 		break;
2409 
2410 	case DP_PEER_STATE_LOGICAL_DELETE:
2411 		DP_PEER_STATE_ASSERT(peer, state,
2412 				     (peer_state == DP_PEER_STATE_ACTIVE) ||
2413 				     (peer_state == DP_PEER_STATE_INIT));
2414 		break;
2415 
2416 	case DP_PEER_STATE_INACTIVE:
2417 		if (IS_MLO_DP_MLD_PEER(peer))
2418 			DP_PEER_STATE_ASSERT
2419 				(peer, state,
2420 				 (peer_state == DP_PEER_STATE_ACTIVE));
2421 		else
2422 			DP_PEER_STATE_ASSERT
2423 				(peer, state,
2424 				 (peer_state == DP_PEER_STATE_LOGICAL_DELETE));
2425 		break;
2426 
2427 	case DP_PEER_STATE_FREED:
2428 		if (peer->sta_self_peer)
2429 			DP_PEER_STATE_ASSERT
2430 			(peer, state, (peer_state == DP_PEER_STATE_INIT));
2431 		else
2432 			DP_PEER_STATE_ASSERT
2433 				(peer, state,
2434 				 (peer_state == DP_PEER_STATE_INACTIVE) ||
2435 				 (peer_state == DP_PEER_STATE_LOGICAL_DELETE));
2436 		break;
2437 
2438 	default:
2439 		qdf_spin_unlock_bh(&peer->peer_state_lock);
2440 		dp_alert("Invalid peer state %u for peer " QDF_MAC_ADDR_FMT,
2441 			 state, QDF_MAC_ADDR_REF(peer->mac_addr.raw));
2442 		return;
2443 	}
2444 	peer->peer_state = state;
2445 	qdf_spin_unlock_bh(&peer->peer_state_lock);
2446 	dp_info("Updating peer state from %u to %u mac " QDF_MAC_ADDR_FMT "\n",
2447 		peer_state, state,
2448 		QDF_MAC_ADDR_REF(peer->mac_addr.raw));
2449 }
2450 
2451 /**
2452  * dp_vdev_iterate_specific_peer_type() - API to iterate through vdev peer
2453  * list based on type of peer (Legacy or MLD peer)
2454  *
2455  * @vdev: DP vdev context
2456  * @func: function to be called for each peer
2457  * @arg: argument need to be passed to func
2458  * @mod_id: module_id
2459  * @peer_type: type of peer - MLO Link Peer or Legacy Peer
2460  *
2461  * Return: void
2462  */
2463 static inline void
2464 dp_vdev_iterate_specific_peer_type(struct dp_vdev *vdev,
2465 				   dp_peer_iter_func *func,
2466 				   void *arg, enum dp_mod_id mod_id,
2467 				   enum dp_peer_type peer_type)
2468 {
2469 	struct dp_peer *peer;
2470 	struct dp_peer *tmp_peer;
2471 	struct dp_soc *soc = NULL;
2472 
2473 	if (!vdev || !vdev->pdev || !vdev->pdev->soc)
2474 		return;
2475 
2476 	soc = vdev->pdev->soc;
2477 
2478 	qdf_spin_lock_bh(&vdev->peer_list_lock);
2479 	TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
2480 			   peer_list_elem,
2481 			   tmp_peer) {
2482 		if (dp_peer_get_ref(soc, peer, mod_id) ==
2483 					QDF_STATUS_SUCCESS) {
2484 			if ((peer_type == DP_PEER_TYPE_LEGACY &&
2485 			     (IS_DP_LEGACY_PEER(peer))) ||
2486 			    (peer_type == DP_PEER_TYPE_MLO_LINK &&
2487 			     (IS_MLO_DP_LINK_PEER(peer)))) {
2488 				(*func)(soc, peer, arg);
2489 			}
2490 			dp_peer_unref_delete(peer, mod_id);
2491 		}
2492 	}
2493 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
2494 }
2495 
2496 #ifdef REO_SHARED_QREF_TABLE_EN
2497 void dp_peer_rx_reo_shared_qaddr_delete(struct dp_soc *soc,
2498 					struct dp_peer *peer);
2499 #else
2500 static inline void dp_peer_rx_reo_shared_qaddr_delete(struct dp_soc *soc,
2501 						      struct dp_peer *peer) {}
2502 #endif
2503 
2504 /**
2505  * dp_peer_check_wds_ext_peer() - Check WDS ext peer
2506  *
2507  * @peer: DP peer
2508  *
2509  * Return: True for WDS ext peer, false otherwise
2510  */
2511 bool dp_peer_check_wds_ext_peer(struct dp_peer *peer);
2512 
2513 /**
2514  * dp_gen_ml_peer_id() - Generate MLD peer id for DP
2515  *
2516  * @soc: DP soc context
2517  * @peer_id: mld peer id
2518  *
2519  * Return: DP MLD peer id
2520  */
2521 uint16_t dp_gen_ml_peer_id(struct dp_soc *soc, uint16_t peer_id);
2522 
2523 #ifdef FEATURE_AST
2524 /**
2525  * dp_peer_host_add_map_ast() - Add ast entry with HW AST Index
2526  * @soc: SoC handle
2527  * @peer_id: peer id from firmware
2528  * @mac_addr: MAC address of ast node
2529  * @hw_peer_id: HW AST Index returned by target in peer map event
2530  * @vdev_id: vdev id for VAP to which the peer belongs to
2531  * @ast_hash: ast hash value in HW
2532  * @is_wds: flag to indicate peer map event for WDS ast entry
2533  *
2534  * Return: QDF_STATUS code
2535  */
2536 QDF_STATUS dp_peer_host_add_map_ast(struct dp_soc *soc, uint16_t peer_id,
2537 				    uint8_t *mac_addr, uint16_t hw_peer_id,
2538 				    uint8_t vdev_id, uint16_t ast_hash,
2539 				    uint8_t is_wds);
2540 #endif
2541 #endif /* _DP_PEER_H_ */
2542