xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_peer.h (revision 70a19e16789e308182f63b15c75decec7bf0b342)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 #ifndef _DP_PEER_H_
20 #define _DP_PEER_H_
21 
22 #include <qdf_types.h>
23 #include <qdf_lock.h>
24 #include "dp_types.h"
25 #include "dp_internal.h"
26 
27 #ifdef DUMP_REO_QUEUE_INFO_IN_DDR
28 #include "hal_reo.h"
29 #endif
30 
31 #define DP_INVALID_PEER_ID 0xffff
32 
33 #define DP_PEER_MAX_MEC_IDX 1024	/* maximum index for MEC table */
34 #define DP_PEER_MAX_MEC_ENTRY 4096	/* maximum MEC entries in MEC table */
35 
36 #define DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC 5000
37 
38 #define DP_PEER_HASH_LOAD_MULT  2
39 #define DP_PEER_HASH_LOAD_SHIFT 0
40 
41 /* Threshold for peer's cached buf queue beyond which frames are dropped */
42 #define DP_RX_CACHED_BUFQ_THRESH 64
43 
44 #define dp_peer_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_PEER, params)
45 #define dp_peer_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_PEER, params)
46 #define dp_peer_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_PEER, params)
47 #define dp_peer_info(params...) \
48 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_PEER, ## params)
49 #define dp_peer_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_PEER, params)
50 
51 #ifdef REO_QDESC_HISTORY
52 enum reo_qdesc_event_type {
53 	REO_QDESC_UPDATE_CB = 0,
54 	REO_QDESC_FREE,
55 };
56 
57 struct reo_qdesc_event {
58 	qdf_dma_addr_t qdesc_addr;
59 	uint64_t ts;
60 	enum reo_qdesc_event_type type;
61 	uint8_t peer_mac[QDF_MAC_ADDR_SIZE];
62 };
63 #endif
64 
65 struct ast_del_ctxt {
66 	bool age;
67 	int del_count;
68 };
69 
70 typedef void dp_peer_iter_func(struct dp_soc *soc, struct dp_peer *peer,
71 			       void *arg);
72 void dp_peer_unref_delete(struct dp_peer *peer, enum dp_mod_id id);
73 void dp_txrx_peer_unref_delete(dp_txrx_ref_handle handle, enum dp_mod_id id);
74 struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc,
75 				       uint8_t *peer_mac_addr,
76 				       int mac_addr_is_aligned,
77 				       uint8_t vdev_id,
78 				       enum dp_mod_id id);
79 bool dp_peer_find_by_id_valid(struct dp_soc *soc, uint16_t peer_id);
80 
81 #ifdef DP_UMAC_HW_RESET_SUPPORT
82 void dp_reset_tid_q_setup(struct dp_soc *soc);
83 #endif
84 /**
85  * dp_peer_get_ref() - Returns peer object given the peer id
86  *
87  * @soc		: core DP soc context
88  * @peer	: DP peer
89  * @mod_id	: id of module requesting the reference
90  *
91  * Return:	QDF_STATUS_SUCCESS if reference held successfully
92  *		else QDF_STATUS_E_INVAL
93  */
94 static inline
95 QDF_STATUS dp_peer_get_ref(struct dp_soc *soc,
96 			   struct dp_peer *peer,
97 			   enum dp_mod_id mod_id)
98 {
99 	if (!qdf_atomic_inc_not_zero(&peer->ref_cnt))
100 		return QDF_STATUS_E_INVAL;
101 
102 	if (mod_id > DP_MOD_ID_RX)
103 		qdf_atomic_inc(&peer->mod_refs[mod_id]);
104 
105 	return QDF_STATUS_SUCCESS;
106 }
107 
108 /**
109  * __dp_peer_get_ref_by_id() - Returns peer object given the peer id
110  *
111  * @soc		: core DP soc context
112  * @peer_id	: peer id from peer object can be retrieved
113  * @mod_id	: module id
114  *
115  * Return: struct dp_peer*: Pointer to DP peer object
116  */
117 static inline struct dp_peer *
118 __dp_peer_get_ref_by_id(struct dp_soc *soc,
119 			uint16_t peer_id,
120 			enum dp_mod_id mod_id)
121 
122 {
123 	struct dp_peer *peer;
124 
125 	qdf_spin_lock_bh(&soc->peer_map_lock);
126 	peer = (peer_id >= soc->max_peer_id) ? NULL :
127 				soc->peer_id_to_obj_map[peer_id];
128 	if (!peer ||
129 	    (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) {
130 		qdf_spin_unlock_bh(&soc->peer_map_lock);
131 		return NULL;
132 	}
133 
134 	qdf_spin_unlock_bh(&soc->peer_map_lock);
135 	return peer;
136 }
137 
138 /**
139  * dp_peer_get_ref_by_id() - Returns peer object given the peer id
140  *                        if peer state is active
141  *
142  * @soc		: core DP soc context
143  * @peer_id	: peer id from peer object can be retrieved
144  * @mod_id      : ID of module requesting reference
145  *
146  * Return: struct dp_peer*: Pointer to DP peer object
147  */
148 static inline
149 struct dp_peer *dp_peer_get_ref_by_id(struct dp_soc *soc,
150 				      uint16_t peer_id,
151 				      enum dp_mod_id mod_id)
152 {
153 	struct dp_peer *peer;
154 
155 	qdf_spin_lock_bh(&soc->peer_map_lock);
156 	peer = (peer_id >= soc->max_peer_id) ? NULL :
157 				soc->peer_id_to_obj_map[peer_id];
158 
159 	if (!peer || peer->peer_state >= DP_PEER_STATE_LOGICAL_DELETE ||
160 	    (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) {
161 		qdf_spin_unlock_bh(&soc->peer_map_lock);
162 		return NULL;
163 	}
164 
165 	qdf_spin_unlock_bh(&soc->peer_map_lock);
166 
167 	return peer;
168 }
169 
170 /**
171  * dp_txrx_peer_get_ref_by_id() - Returns txrx peer object given the peer id
172  *
173  * @soc		: core DP soc context
174  * @peer_id	: peer id from peer object can be retrieved
175  * @handle	: reference handle
176  * @mod_id      : ID of module requesting reference
177  *
178  * Return: struct dp_txrx_peer*: Pointer to txrx DP peer object
179  */
180 static inline struct dp_txrx_peer *
181 dp_txrx_peer_get_ref_by_id(struct dp_soc *soc,
182 			   uint16_t peer_id,
183 			   dp_txrx_ref_handle *handle,
184 			   enum dp_mod_id mod_id)
185 
186 {
187 	struct dp_peer *peer;
188 
189 	peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
190 	if (!peer)
191 		return NULL;
192 
193 	if (!peer->txrx_peer) {
194 		dp_peer_unref_delete(peer, mod_id);
195 		return NULL;
196 	}
197 
198 	*handle = (dp_txrx_ref_handle)peer;
199 	return peer->txrx_peer;
200 }
201 
202 #ifdef PEER_CACHE_RX_PKTS
203 /**
204  * dp_rx_flush_rx_cached() - flush cached rx frames
205  * @peer: peer
206  * @drop: set flag to drop frames
207  *
208  * Return: None
209  */
210 void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop);
211 #else
212 static inline void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop)
213 {
214 }
215 #endif
216 
217 static inline void
218 dp_clear_peer_internal(struct dp_soc *soc, struct dp_peer *peer)
219 {
220 	qdf_spin_lock_bh(&peer->peer_info_lock);
221 	peer->state = OL_TXRX_PEER_STATE_DISC;
222 	qdf_spin_unlock_bh(&peer->peer_info_lock);
223 
224 	dp_rx_flush_rx_cached(peer, true);
225 }
226 
227 /**
228  * dp_vdev_iterate_peer() - API to iterate through vdev peer list
229  *
230  * @vdev	: DP vdev context
231  * @func	: function to be called for each peer
232  * @arg		: argument need to be passed to func
233  * @mod_id	: module_id
234  *
235  * Return: void
236  */
237 static inline void
238 dp_vdev_iterate_peer(struct dp_vdev *vdev, dp_peer_iter_func *func, void *arg,
239 		     enum dp_mod_id mod_id)
240 {
241 	struct dp_peer *peer;
242 	struct dp_peer *tmp_peer;
243 	struct dp_soc *soc = NULL;
244 
245 	if (!vdev || !vdev->pdev || !vdev->pdev->soc)
246 		return;
247 
248 	soc = vdev->pdev->soc;
249 
250 	qdf_spin_lock_bh(&vdev->peer_list_lock);
251 	TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
252 			   peer_list_elem,
253 			   tmp_peer) {
254 		if (dp_peer_get_ref(soc, peer, mod_id) ==
255 					QDF_STATUS_SUCCESS) {
256 			(*func)(soc, peer, arg);
257 			dp_peer_unref_delete(peer, mod_id);
258 		}
259 	}
260 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
261 }
262 
263 /**
264  * dp_pdev_iterate_peer() - API to iterate through all peers of pdev
265  *
266  * @pdev	: DP pdev context
267  * @func	: function to be called for each peer
268  * @arg		: argument need to be passed to func
269  * @mod_id	: module_id
270  *
271  * Return: void
272  */
273 static inline void
274 dp_pdev_iterate_peer(struct dp_pdev *pdev, dp_peer_iter_func *func, void *arg,
275 		     enum dp_mod_id mod_id)
276 {
277 	struct dp_vdev *vdev;
278 
279 	if (!pdev)
280 		return;
281 
282 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
283 	DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev)
284 		dp_vdev_iterate_peer(vdev, func, arg, mod_id);
285 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
286 }
287 
288 /**
289  * dp_soc_iterate_peer() - API to iterate through all peers of soc
290  *
291  * @soc		: DP soc context
292  * @func	: function to be called for each peer
293  * @arg		: argument need to be passed to func
294  * @mod_id	: module_id
295  *
296  * Return: void
297  */
298 static inline void
299 dp_soc_iterate_peer(struct dp_soc *soc, dp_peer_iter_func *func, void *arg,
300 		    enum dp_mod_id mod_id)
301 {
302 	struct dp_pdev *pdev;
303 	int i;
304 
305 	if (!soc)
306 		return;
307 
308 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
309 		pdev = soc->pdev_list[i];
310 		dp_pdev_iterate_peer(pdev, func, arg, mod_id);
311 	}
312 }
313 
314 /**
315  * dp_vdev_iterate_peer_lock_safe() - API to iterate through vdev list
316  *
317  * This API will cache the peers in local allocated memory and calls
318  * iterate function outside the lock.
319  *
320  * As this API is allocating new memory it is suggested to use this
321  * only when lock cannot be held
322  *
323  * @vdev	: DP vdev context
324  * @func	: function to be called for each peer
325  * @arg		: argument need to be passed to func
326  * @mod_id	: module_id
327  *
328  * Return: void
329  */
330 static inline void
331 dp_vdev_iterate_peer_lock_safe(struct dp_vdev *vdev,
332 			       dp_peer_iter_func *func,
333 			       void *arg,
334 			       enum dp_mod_id mod_id)
335 {
336 	struct dp_peer *peer;
337 	struct dp_peer *tmp_peer;
338 	struct dp_soc *soc = NULL;
339 	struct dp_peer **peer_array = NULL;
340 	int i = 0;
341 	uint32_t num_peers = 0;
342 
343 	if (!vdev || !vdev->pdev || !vdev->pdev->soc)
344 		return;
345 
346 	num_peers = vdev->num_peers;
347 
348 	soc = vdev->pdev->soc;
349 
350 	peer_array = qdf_mem_malloc(num_peers * sizeof(struct dp_peer *));
351 	if (!peer_array)
352 		return;
353 
354 	qdf_spin_lock_bh(&vdev->peer_list_lock);
355 	TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
356 			   peer_list_elem,
357 			   tmp_peer) {
358 		if (i >= num_peers)
359 			break;
360 
361 		if (dp_peer_get_ref(soc, peer, mod_id) == QDF_STATUS_SUCCESS) {
362 			peer_array[i] = peer;
363 			i = (i + 1);
364 		}
365 	}
366 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
367 
368 	for (i = 0; i < num_peers; i++) {
369 		peer = peer_array[i];
370 
371 		if (!peer)
372 			continue;
373 
374 		(*func)(soc, peer, arg);
375 		dp_peer_unref_delete(peer, mod_id);
376 	}
377 
378 	qdf_mem_free(peer_array);
379 }
380 
381 /**
382  * dp_pdev_iterate_peer_lock_safe() - API to iterate through all peers of pdev
383  *
384  * This API will cache the peers in local allocated memory and calls
385  * iterate function outside the lock.
386  *
387  * As this API is allocating new memory it is suggested to use this
388  * only when lock cannot be held
389  *
390  * @pdev	: DP pdev context
391  * @func	: function to be called for each peer
392  * @arg		: argument need to be passed to func
393  * @mod_id	: module_id
394  *
395  * Return: void
396  */
397 static inline void
398 dp_pdev_iterate_peer_lock_safe(struct dp_pdev *pdev,
399 			       dp_peer_iter_func *func,
400 			       void *arg,
401 			       enum dp_mod_id mod_id)
402 {
403 	struct dp_peer *peer;
404 	struct dp_peer *tmp_peer;
405 	struct dp_soc *soc = NULL;
406 	struct dp_vdev *vdev = NULL;
407 	struct dp_peer **peer_array[DP_PDEV_MAX_VDEVS] = {0};
408 	int i = 0;
409 	int j = 0;
410 	uint32_t num_peers[DP_PDEV_MAX_VDEVS] = {0};
411 
412 	if (!pdev || !pdev->soc)
413 		return;
414 
415 	soc = pdev->soc;
416 
417 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
418 	DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
419 		num_peers[i] = vdev->num_peers;
420 		peer_array[i] = qdf_mem_malloc(num_peers[i] *
421 					       sizeof(struct dp_peer *));
422 		if (!peer_array[i])
423 			break;
424 
425 		qdf_spin_lock_bh(&vdev->peer_list_lock);
426 		TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
427 				   peer_list_elem,
428 				   tmp_peer) {
429 			if (j >= num_peers[i])
430 				break;
431 
432 			if (dp_peer_get_ref(soc, peer, mod_id) ==
433 					QDF_STATUS_SUCCESS) {
434 				peer_array[i][j] = peer;
435 
436 				j = (j + 1);
437 			}
438 		}
439 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
440 		i = (i + 1);
441 	}
442 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
443 
444 	for (i = 0; i < DP_PDEV_MAX_VDEVS; i++) {
445 		if (!peer_array[i])
446 			break;
447 
448 		for (j = 0; j < num_peers[i]; j++) {
449 			peer = peer_array[i][j];
450 
451 			if (!peer)
452 				continue;
453 
454 			(*func)(soc, peer, arg);
455 			dp_peer_unref_delete(peer, mod_id);
456 		}
457 
458 		qdf_mem_free(peer_array[i]);
459 	}
460 }
461 
462 /**
463  * dp_soc_iterate_peer_lock_safe() - API to iterate through all peers of soc
464  *
465  * This API will cache the peers in local allocated memory and calls
466  * iterate function outside the lock.
467  *
468  * As this API is allocating new memory it is suggested to use this
469  * only when lock cannot be held
470  *
471  * @soc		: DP soc context
472  * @func	: function to be called for each peer
473  * @arg		: argument need to be passed to func
474  * @mod_id	: module_id
475  *
476  * Return: void
477  */
478 static inline void
479 dp_soc_iterate_peer_lock_safe(struct dp_soc *soc,
480 			      dp_peer_iter_func *func,
481 			      void *arg,
482 			      enum dp_mod_id mod_id)
483 {
484 	struct dp_pdev *pdev;
485 	int i;
486 
487 	if (!soc)
488 		return;
489 
490 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
491 		pdev = soc->pdev_list[i];
492 		dp_pdev_iterate_peer_lock_safe(pdev, func, arg, mod_id);
493 	}
494 }
495 
496 #ifdef DP_PEER_STATE_DEBUG
497 #define DP_PEER_STATE_ASSERT(_peer, _new_state, _condition) \
498 	do {  \
499 		if (!(_condition)) { \
500 			dp_alert("Invalid state shift from %u to %u peer " \
501 				 QDF_MAC_ADDR_FMT, \
502 				 (_peer)->peer_state, (_new_state), \
503 				 QDF_MAC_ADDR_REF((_peer)->mac_addr.raw)); \
504 			QDF_ASSERT(0); \
505 		} \
506 	} while (0)
507 
508 #else
509 #define DP_PEER_STATE_ASSERT(_peer, _new_state, _condition) \
510 	do {  \
511 		if (!(_condition)) { \
512 			dp_alert("Invalid state shift from %u to %u peer " \
513 				 QDF_MAC_ADDR_FMT, \
514 				 (_peer)->peer_state, (_new_state), \
515 				 QDF_MAC_ADDR_REF((_peer)->mac_addr.raw)); \
516 		} \
517 	} while (0)
518 #endif
519 
520 /**
521  * dp_peer_state_cmp() - compare dp peer state
522  *
523  * @peer	: DP peer
524  * @state	: state
525  *
526  * Return: true if state matches with peer state
527  *	   false if it does not match
528  */
529 static inline bool
530 dp_peer_state_cmp(struct dp_peer *peer,
531 		  enum dp_peer_state state)
532 {
533 	bool is_status_equal = false;
534 
535 	qdf_spin_lock_bh(&peer->peer_state_lock);
536 	is_status_equal = (peer->peer_state == state);
537 	qdf_spin_unlock_bh(&peer->peer_state_lock);
538 
539 	return is_status_equal;
540 }
541 
542 void dp_print_ast_stats(struct dp_soc *soc);
543 QDF_STATUS dp_rx_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
544 				  uint16_t hw_peer_id, uint8_t vdev_id,
545 				  uint8_t *peer_mac_addr, uint16_t ast_hash,
546 				  uint8_t is_wds);
547 void dp_rx_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id,
548 			      uint8_t vdev_id, uint8_t *peer_mac_addr,
549 			      uint8_t is_wds, uint32_t free_wds_count);
550 
551 #ifdef DP_RX_UDP_OVER_PEER_ROAM
552 /**
553  * dp_rx_reset_roaming_peer() - Reset the roamed peer in vdev
554  * @soc - dp soc pointer
555  * @vdev_id - vdev id
556  * @peer_mac_addr - mac address of the peer
557  *
558  * This function resets the roamed peer auth status and mac address
559  * after peer map indication of same peer is received from firmware.
560  *
561  * Return: None
562  */
563 void dp_rx_reset_roaming_peer(struct dp_soc *soc, uint8_t vdev_id,
564 			      uint8_t *peer_mac_addr);
565 #else
566 static inline void dp_rx_reset_roaming_peer(struct dp_soc *soc, uint8_t vdev_id,
567 					    uint8_t *peer_mac_addr)
568 {
569 }
570 #endif
571 
572 #ifdef WLAN_FEATURE_11BE_MLO
573 /**
574  * dp_rx_mlo_peer_map_handler() - handle MLO peer map event from firmware
575  * @soc_handle - generic soc handle
576  * @peer_id - ML peer_id from firmware
577  * @peer_mac_addr - mac address of the peer
578  * @mlo_ast_flow_info: MLO AST flow info
579  * @mlo_link_info - MLO link info
580  *
581  * associate the ML peer_id that firmware provided with peer entry
582  * and update the ast table in the host with the hw_peer_id.
583  *
584  * Return: QDF_STATUS code
585  */
586 QDF_STATUS
587 dp_rx_mlo_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
588 			   uint8_t *peer_mac_addr,
589 			   struct dp_mlo_flow_override_info *mlo_flow_info,
590 			   struct dp_mlo_link_info *mlo_link_info);
591 
592 /**
593  * dp_rx_mlo_peer_unmap_handler() - handle MLO peer unmap event from firmware
594  * @soc_handle - generic soc handle
595  * @peeri_id - peer_id from firmware
596  *
597  * Return: none
598  */
599 void dp_rx_mlo_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id);
600 #endif
601 
602 void dp_rx_sec_ind_handler(struct dp_soc *soc, uint16_t peer_id,
603 			   enum cdp_sec_type sec_type, int is_unicast,
604 			   u_int32_t *michael_key, u_int32_t *rx_pn);
605 
606 QDF_STATUS dp_rx_delba_ind_handler(void *soc_handle, uint16_t peer_id,
607 				   uint8_t tid, uint16_t win_sz);
608 
609 uint8_t dp_get_peer_mac_addr_frm_id(struct cdp_soc_t *soc_handle,
610 		uint16_t peer_id, uint8_t *peer_mac);
611 
612 QDF_STATUS dp_peer_add_ast(struct dp_soc *soc, struct dp_peer *peer,
613 			   uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type,
614 			   uint32_t flags);
615 
616 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry);
617 
618 void dp_peer_ast_unmap_handler(struct dp_soc *soc,
619 			       struct dp_ast_entry *ast_entry);
620 
621 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
622 			struct dp_ast_entry *ast_entry,	uint32_t flags);
623 
624 struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
625 						     uint8_t *ast_mac_addr,
626 						     uint8_t pdev_id);
627 
628 struct dp_ast_entry *dp_peer_ast_hash_find_by_vdevid(struct dp_soc *soc,
629 						     uint8_t *ast_mac_addr,
630 						     uint8_t vdev_id);
631 
632 struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
633 					       uint8_t *ast_mac_addr);
634 
635 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
636 				struct dp_ast_entry *ast_entry);
637 
638 
639 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
640 				struct dp_ast_entry *ast_entry);
641 
642 void dp_peer_ast_set_type(struct dp_soc *soc,
643 				struct dp_ast_entry *ast_entry,
644 				enum cdp_txrx_ast_entry_type type);
645 
646 void dp_peer_ast_send_wds_del(struct dp_soc *soc,
647 			      struct dp_ast_entry *ast_entry,
648 			      struct dp_peer *peer);
649 
650 #ifdef WLAN_FEATURE_MULTI_AST_DEL
651 void dp_peer_ast_send_multi_wds_del(
652 		struct dp_soc *soc, uint8_t vdev_id,
653 		struct peer_del_multi_wds_entries *wds_list);
654 #endif
655 
656 void dp_peer_free_hmwds_cb(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
657 			   struct cdp_soc *dp_soc,
658 			   void *cookie,
659 			   enum cdp_ast_free_status status);
660 
661 void dp_peer_ast_hash_remove(struct dp_soc *soc,
662 			     struct dp_ast_entry *ase);
663 
664 void dp_peer_free_ast_entry(struct dp_soc *soc,
665 			    struct dp_ast_entry *ast_entry);
666 
667 void dp_peer_unlink_ast_entry(struct dp_soc *soc,
668 			      struct dp_ast_entry *ast_entry,
669 			      struct dp_peer *peer);
670 
671 /**
672  * dp_peer_mec_detach_entry() - Detach the MEC entry
673  * @soc: SoC handle
674  * @mecentry: MEC entry of the node
675  * @ptr: pointer to free list
676  *
677  * The MEC entry is detached from MEC table and added to free_list
678  * to free the object outside lock
679  *
680  * Return: None
681  */
682 void dp_peer_mec_detach_entry(struct dp_soc *soc, struct dp_mec_entry *mecentry,
683 			      void *ptr);
684 
685 /**
686  * dp_peer_mec_free_list() - free the MEC entry from free_list
687  * @soc: SoC handle
688  * @ptr: pointer to free list
689  *
690  * Return: None
691  */
692 void dp_peer_mec_free_list(struct dp_soc *soc, void *ptr);
693 
694 /**
695  * dp_peer_mec_add_entry()
696  * @soc: SoC handle
697  * @vdev: vdev to which mec node belongs
698  * @mac_addr: MAC address of mec node
699  *
700  * This function allocates and adds MEC entry to MEC table.
701  * It assumes caller has taken the mec lock to protect the access to these
702  * tables
703  *
704  * Return: QDF_STATUS
705  */
706 QDF_STATUS dp_peer_mec_add_entry(struct dp_soc *soc,
707 				 struct dp_vdev *vdev,
708 				 uint8_t *mac_addr);
709 
710 /**
711  * dp_peer_mec_hash_find_by_pdevid() - Find MEC entry by MAC address
712  * within pdev
713  * @soc: SoC handle
714  *
715  * It assumes caller has taken the mec_lock to protect the access to
716  * MEC hash table
717  *
718  * Return: MEC entry
719  */
720 struct dp_mec_entry *dp_peer_mec_hash_find_by_pdevid(struct dp_soc *soc,
721 						     uint8_t pdev_id,
722 						     uint8_t *mec_mac_addr);
723 
724 #define DP_AST_ASSERT(_condition) \
725 	do { \
726 		if (!(_condition)) { \
727 			dp_print_ast_stats(soc);\
728 			QDF_BUG(_condition); \
729 		} \
730 	} while (0)
731 
732 /**
733  * dp_peer_update_inactive_time - Update inactive time for peer
734  * @pdev: pdev object
735  * @tag_type: htt_tlv_tag type
736  * #tag_buf: buf message
737  */
738 void
739 dp_peer_update_inactive_time(struct dp_pdev *pdev, uint32_t tag_type,
740 			     uint32_t *tag_buf);
741 
742 #ifndef QCA_MULTIPASS_SUPPORT
743 /**
744  * dp_peer_set_vlan_id: set vlan_id for this peer
745  * @cdp_soc: soc handle
746  * @vdev_id: id of vdev object
747  * @peer_mac: mac address
748  * @vlan_id: vlan id for peer
749  *
750  * return: void
751  */
752 static inline
753 void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
754 			 uint8_t vdev_id, uint8_t *peer_mac,
755 			 uint16_t vlan_id)
756 {
757 }
758 
759 /**
760  * dp_set_vlan_groupkey: set vlan map for vdev
761  * @soc: pointer to soc
762  * @vdev_id: id of vdev handle
763  * @vlan_id: vlan_id
764  * @group_key: group key for vlan
765  *
766  * return: set success/failure
767  */
768 static inline
769 QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc, uint8_t vdev_id,
770 				uint16_t vlan_id, uint16_t group_key)
771 {
772 	return QDF_STATUS_SUCCESS;
773 }
774 
775 /**
776  * dp_peer_multipass_list_init: initialize multipass peer list
777  * @vdev: pointer to vdev
778  *
779  * return: void
780  */
781 static inline
782 void dp_peer_multipass_list_init(struct dp_vdev *vdev)
783 {
784 }
785 
786 /**
787  * dp_peer_multipass_list_remove: remove peer from special peer list
788  * @peer: peer handle
789  *
790  * return: void
791  */
792 static inline
793 void dp_peer_multipass_list_remove(struct dp_peer *peer)
794 {
795 }
796 #else
797 void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
798 			 uint8_t vdev_id, uint8_t *peer_mac,
799 			 uint16_t vlan_id);
800 QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc, uint8_t vdev_id,
801 				uint16_t vlan_id, uint16_t group_key);
802 void dp_peer_multipass_list_init(struct dp_vdev *vdev);
803 void dp_peer_multipass_list_remove(struct dp_peer *peer);
804 #endif
805 
806 
807 #ifndef QCA_PEER_MULTIQ_SUPPORT
808 /**
809  * dp_peer_reset_flowq_map() - reset peer flowq map table
810  * @peer - dp peer handle
811  *
812  * Return: none
813  */
814 static inline
815 void dp_peer_reset_flowq_map(struct dp_peer *peer)
816 {
817 }
818 
819 /**
820  * dp_peer_ast_index_flow_queue_map_create() - create ast index flow queue map
821  * @soc - generic soc handle
822  * @is_wds - flag to indicate if peer is wds
823  * @peer_id - peer_id from htt peer map message
824  * @peer_mac_addr - mac address of the peer
825  * @ast_info - ast flow override information from peer map
826  *
827  * Return: none
828  */
829 static inline
830 void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
831 		    bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
832 		    struct dp_ast_flow_override_info *ast_info)
833 {
834 }
835 #else
836 void dp_peer_reset_flowq_map(struct dp_peer *peer);
837 void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
838 		    bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
839 		    struct dp_ast_flow_override_info *ast_info);
840 #endif
841 
842 /*
843  * dp_rx_tid_delete_cb() - Callback to flush reo descriptor HW cache
844  * after deleting the entries (ie., setting valid=0)
845  *
846  * @soc: DP SOC handle
847  * @cb_ctxt: Callback context
848  * @reo_status: REO command status
849  */
850 void dp_rx_tid_delete_cb(struct dp_soc *soc,
851 			 void *cb_ctxt,
852 			 union hal_reo_status *reo_status);
853 
854 #ifdef QCA_PEER_EXT_STATS
855 QDF_STATUS dp_peer_delay_stats_ctx_alloc(struct dp_soc *soc,
856 					 struct dp_txrx_peer *txrx_peer);
857 void dp_peer_delay_stats_ctx_dealloc(struct dp_soc *soc,
858 				     struct dp_txrx_peer *txrx_peer);
859 void dp_peer_delay_stats_ctx_clr(struct dp_txrx_peer *txrx_peer);
860 #else
861 static inline
862 QDF_STATUS dp_peer_delay_stats_ctx_alloc(struct dp_soc *soc,
863 					 struct dp_txrx_peer *txrx_peer)
864 {
865 	return QDF_STATUS_SUCCESS;
866 }
867 
868 static inline
869 void dp_peer_delay_stats_ctx_dealloc(struct dp_soc *soc,
870 				     struct dp_txrx_peer *txrx_peer)
871 {
872 }
873 
874 static inline
875 void dp_peer_delay_stats_ctx_clr(struct dp_txrx_peer *txrx_peer)
876 {
877 }
878 #endif
879 
880 #ifdef WLAN_PEER_JITTER
881 QDF_STATUS dp_peer_jitter_stats_ctx_alloc(struct dp_pdev *pdev,
882 					  struct dp_txrx_peer *txrx_peer);
883 
884 void dp_peer_jitter_stats_ctx_dealloc(struct dp_pdev *pdev,
885 				      struct dp_txrx_peer *txrx_peer);
886 
887 void dp_peer_jitter_stats_ctx_clr(struct dp_txrx_peer *txrx_peer);
888 #else
889 static inline
890 QDF_STATUS dp_peer_jitter_stats_ctx_alloc(struct dp_pdev *pdev,
891 					  struct dp_txrx_peer *txrx_peer)
892 {
893 	return QDF_STATUS_SUCCESS;
894 }
895 
896 static inline
897 void dp_peer_jitter_stats_ctx_dealloc(struct dp_pdev *pdev,
898 				      struct dp_txrx_peer *txrx_peer)
899 {
900 }
901 
902 static inline
903 void dp_peer_jitter_stats_ctx_clr(struct dp_txrx_peer *txrx_peer)
904 {
905 }
906 #endif
907 
908 #ifndef CONFIG_SAWF_DEF_QUEUES
909 static inline QDF_STATUS dp_peer_sawf_ctx_alloc(struct dp_soc *soc,
910 						struct dp_peer *peer)
911 {
912 	return QDF_STATUS_SUCCESS;
913 }
914 
915 static inline QDF_STATUS dp_peer_sawf_ctx_free(struct dp_soc *soc,
916 					       struct dp_peer *peer)
917 {
918 	return QDF_STATUS_SUCCESS;
919 }
920 
921 #endif
922 
923 #ifndef CONFIG_SAWF
924 static inline
925 QDF_STATUS dp_peer_sawf_stats_ctx_alloc(struct dp_soc *soc,
926 					struct dp_txrx_peer *txrx_peer)
927 {
928 	return QDF_STATUS_SUCCESS;
929 }
930 
931 static inline
932 QDF_STATUS dp_peer_sawf_stats_ctx_free(struct dp_soc *soc,
933 				       struct dp_txrx_peer *txrx_peer)
934 {
935 	return QDF_STATUS_SUCCESS;
936 }
937 #endif
938 
939 struct dp_peer *dp_vdev_bss_peer_ref_n_get(struct dp_soc *soc,
940 					   struct dp_vdev *vdev,
941 					   enum dp_mod_id mod_id);
942 struct dp_peer *dp_sta_vdev_self_peer_ref_n_get(struct dp_soc *soc,
943 						struct dp_vdev *vdev,
944 						enum dp_mod_id mod_id);
945 
946 void dp_peer_ast_table_detach(struct dp_soc *soc);
947 void dp_peer_find_map_detach(struct dp_soc *soc);
948 void dp_soc_wds_detach(struct dp_soc *soc);
949 QDF_STATUS dp_peer_ast_table_attach(struct dp_soc *soc);
950 QDF_STATUS dp_peer_ast_hash_attach(struct dp_soc *soc);
951 QDF_STATUS dp_peer_mec_hash_attach(struct dp_soc *soc);
952 void dp_soc_wds_attach(struct dp_soc *soc);
953 void dp_peer_mec_hash_detach(struct dp_soc *soc);
954 void dp_peer_ast_hash_detach(struct dp_soc *soc);
955 
956 #ifdef FEATURE_AST
957 /*
958  * dp_peer_delete_ast_entries(): Delete all AST entries for a peer
959  * @soc - datapath soc handle
960  * @peer - datapath peer handle
961  *
962  * Delete the AST entries belonging to a peer
963  */
964 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
965 					      struct dp_peer *peer)
966 {
967 	struct dp_ast_entry *ast_entry, *temp_ast_entry;
968 
969 	dp_peer_debug("peer: %pK, self_ast: %pK", peer, peer->self_ast_entry);
970 	/*
971 	 * Delete peer self ast entry. This is done to handle scenarios
972 	 * where peer is freed before peer map is received(for ex in case
973 	 * of auth disallow due to ACL) in such cases self ast is not added
974 	 * to peer->ast_list.
975 	 */
976 	if (peer->self_ast_entry) {
977 		dp_peer_del_ast(soc, peer->self_ast_entry);
978 		peer->self_ast_entry = NULL;
979 	}
980 
981 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry)
982 		dp_peer_del_ast(soc, ast_entry);
983 }
984 
985 void dp_print_peer_ast_entries(struct dp_soc *soc, struct dp_peer *peer,
986 			       void *arg);
987 #else
988 static inline void dp_print_peer_ast_entries(struct dp_soc *soc,
989 					     struct dp_peer *peer, void *arg)
990 {
991 }
992 
993 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
994 					      struct dp_peer *peer)
995 {
996 }
997 #endif
998 
999 #ifdef FEATURE_MEC
1000 /**
1001  * dp_peer_mec_spinlock_create() - Create the MEC spinlock
1002  * @soc: SoC handle
1003  *
1004  * Return: none
1005  */
1006 void dp_peer_mec_spinlock_create(struct dp_soc *soc);
1007 
1008 /**
1009  * dp_peer_mec_spinlock_destroy() - Destroy the MEC spinlock
1010  * @soc: SoC handle
1011  *
1012  * Return: none
1013  */
1014 void dp_peer_mec_spinlock_destroy(struct dp_soc *soc);
1015 
1016 /**
1017  * dp_peer_mec_flush_entries() - Delete all mec entries in table
1018  * @soc: Datapath SOC
1019  *
1020  * Return: None
1021  */
1022 void dp_peer_mec_flush_entries(struct dp_soc *soc);
1023 #else
1024 static inline void dp_peer_mec_spinlock_create(struct dp_soc *soc)
1025 {
1026 }
1027 
1028 static inline void dp_peer_mec_spinlock_destroy(struct dp_soc *soc)
1029 {
1030 }
1031 
1032 static inline void dp_peer_mec_flush_entries(struct dp_soc *soc)
1033 {
1034 }
1035 #endif
1036 
1037 #ifdef DUMP_REO_QUEUE_INFO_IN_DDR
1038 /**
1039  * dp_send_cache_flush_for_rx_tid() - Send cache flush cmd to REO per tid
1040  * @soc : dp_soc handle
1041  * @peer: peer
1042  *
1043  * This function is used to send cache flush cmd to reo and
1044  * to register the callback to handle the dumping of the reo
1045  * queue stas from DDR
1046  *
1047  * Return: none
1048  */
1049 void dp_send_cache_flush_for_rx_tid(
1050 	struct dp_soc *soc, struct dp_peer *peer);
1051 
1052 /**
1053  * dp_get_rx_reo_queue_info() - Handler to get rx tid info
1054  * @soc : cdp_soc_t handle
1055  * @vdev_id: vdev id
1056  *
1057  * Handler to get rx tid info from DDR after h/w cache is
1058  * invalidated first using the cache flush cmd.
1059  *
1060  * Return: none
1061  */
1062 void dp_get_rx_reo_queue_info(
1063 	struct cdp_soc_t *soc_hdl, uint8_t vdev_id);
1064 
1065 /**
1066  * dp_dump_rx_reo_queue_info() - Callback function to dump reo queue stats
1067  * @soc : dp_soc handle
1068  * @cb_ctxt - callback context
1069  * @reo_status: vdev id
1070  *
1071  * This is the callback function registered after sending the reo cmd
1072  * to flush the h/w cache and invalidate it. In the callback the reo
1073  * queue desc info is dumped from DDR.
1074  *
1075  * Return: none
1076  */
1077 void dp_dump_rx_reo_queue_info(
1078 	struct dp_soc *soc, void *cb_ctxt, union hal_reo_status *reo_status);
1079 
1080 #else /* DUMP_REO_QUEUE_INFO_IN_DDR */
1081 
1082 static inline void dp_get_rx_reo_queue_info(
1083 	struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
1084 {
1085 }
1086 #endif /* DUMP_REO_QUEUE_INFO_IN_DDR */
1087 
1088 static inline int dp_peer_find_mac_addr_cmp(
1089 	union dp_align_mac_addr *mac_addr1,
1090 	union dp_align_mac_addr *mac_addr2)
1091 {
1092 		/*
1093 		 * Intentionally use & rather than &&.
1094 		 * because the operands are binary rather than generic boolean,
1095 		 * the functionality is equivalent.
1096 		 * Using && has the advantage of short-circuited evaluation,
1097 		 * but using & has the advantage of no conditional branching,
1098 		 * which is a more significant benefit.
1099 		 */
1100 	return !((mac_addr1->align4.bytes_abcd == mac_addr2->align4.bytes_abcd)
1101 		 & (mac_addr1->align4.bytes_ef == mac_addr2->align4.bytes_ef));
1102 }
1103 
1104 /**
1105  * dp_peer_delete() - delete DP peer
1106  *
1107  * @soc: Datatpath soc
1108  * @peer: Datapath peer
1109  * @arg: argument to iter function
1110  *
1111  * Return: void
1112  */
1113 void dp_peer_delete(struct dp_soc *soc,
1114 		    struct dp_peer *peer,
1115 		    void *arg);
1116 
1117 /**
1118  * dp_mlo_peer_delete() - delete MLO DP peer
1119  *
1120  * @soc: Datapath soc
1121  * @peer: Datapath peer
1122  * @arg: argument to iter function
1123  *
1124  * Return: void
1125  */
1126 void dp_mlo_peer_delete(struct dp_soc *soc, struct dp_peer *peer, void *arg);
1127 
1128 #ifdef WLAN_FEATURE_11BE_MLO
1129 
1130 /* is MLO connection mld peer */
1131 #define IS_MLO_DP_MLD_TXRX_PEER(_peer) ((_peer)->mld_peer)
1132 
1133 /* set peer type */
1134 #define DP_PEER_SET_TYPE(_peer, _type_val) \
1135 	((_peer)->peer_type = (_type_val))
1136 
1137 /* is legacy peer */
1138 #define IS_DP_LEGACY_PEER(_peer) \
1139 	((_peer)->peer_type == CDP_LINK_PEER_TYPE && !((_peer)->mld_peer))
1140 /* is MLO connection link peer */
1141 #define IS_MLO_DP_LINK_PEER(_peer) \
1142 	((_peer)->peer_type == CDP_LINK_PEER_TYPE && (_peer)->mld_peer)
1143 /* is MLO connection mld peer */
1144 #define IS_MLO_DP_MLD_PEER(_peer) \
1145 	((_peer)->peer_type == CDP_MLD_PEER_TYPE)
1146 /* Get Mld peer from link peer */
1147 #define DP_GET_MLD_PEER_FROM_PEER(link_peer) \
1148 	((link_peer)->mld_peer)
1149 
1150 #ifdef WLAN_MLO_MULTI_CHIP
1151 uint8_t dp_mlo_get_chip_id(struct dp_soc *soc);
1152 
1153 struct dp_peer *
1154 dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
1155 				  uint8_t *peer_mac_addr,
1156 				  int mac_addr_is_aligned,
1157 				  uint8_t vdev_id,
1158 				  uint8_t chip_id,
1159 				  enum dp_mod_id mod_id);
1160 #else
1161 static inline uint8_t dp_mlo_get_chip_id(struct dp_soc *soc)
1162 {
1163 	return 0;
1164 }
1165 
1166 static inline struct dp_peer *
1167 dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
1168 				  uint8_t *peer_mac_addr,
1169 				  int mac_addr_is_aligned,
1170 				  uint8_t vdev_id,
1171 				  uint8_t chip_id,
1172 				  enum dp_mod_id mod_id)
1173 {
1174 	return dp_peer_find_hash_find(soc, peer_mac_addr,
1175 				      mac_addr_is_aligned,
1176 				      vdev_id, mod_id);
1177 }
1178 #endif
1179 
1180 /*
1181  * dp_mld_peer_find_hash_find() - returns mld peer from mld peer_hash_table
1182  *				  matching mac_address
1183  * @soc: soc handle
1184  * @peer_mac_addr: mld peer mac address
1185  * @mac_addr_is_aligned: is mac addr aligned
1186  * @vdev_id: vdev_id
1187  * @mod_id: id of module requesting reference
1188  *
1189  * return: peer in sucsess
1190  *         NULL in failure
1191  */
1192 static inline
1193 struct dp_peer *dp_mld_peer_find_hash_find(struct dp_soc *soc,
1194 					   uint8_t *peer_mac_addr,
1195 					   int mac_addr_is_aligned,
1196 					   uint8_t vdev_id,
1197 					   enum dp_mod_id mod_id)
1198 {
1199 	if (soc->arch_ops.mlo_peer_find_hash_find)
1200 		return soc->arch_ops.mlo_peer_find_hash_find(soc,
1201 					      peer_mac_addr,
1202 					      mac_addr_is_aligned,
1203 					      mod_id, vdev_id);
1204 	return NULL;
1205 }
1206 
1207 /**
1208  * dp_peer_hash_find_wrapper() - find link peer or mld per according to
1209 				 peer_type
1210  * @soc: DP SOC handle
1211  * @peer_info: peer information for hash find
1212  * @mod_id: ID of module requesting reference
1213  *
1214  * Return: peer handle
1215  */
1216 static inline
1217 struct dp_peer *dp_peer_hash_find_wrapper(struct dp_soc *soc,
1218 					  struct cdp_peer_info *peer_info,
1219 					  enum dp_mod_id mod_id)
1220 {
1221 	struct dp_peer *peer = NULL;
1222 
1223 	if (peer_info->peer_type == CDP_LINK_PEER_TYPE ||
1224 	    peer_info->peer_type == CDP_WILD_PEER_TYPE) {
1225 		peer = dp_peer_find_hash_find(soc, peer_info->mac_addr,
1226 					      peer_info->mac_addr_is_aligned,
1227 					      peer_info->vdev_id,
1228 					      mod_id);
1229 		if (peer)
1230 			return peer;
1231 	}
1232 	if (peer_info->peer_type == CDP_MLD_PEER_TYPE ||
1233 	    peer_info->peer_type == CDP_WILD_PEER_TYPE)
1234 		peer = dp_mld_peer_find_hash_find(
1235 					soc, peer_info->mac_addr,
1236 					peer_info->mac_addr_is_aligned,
1237 					peer_info->vdev_id,
1238 					mod_id);
1239 	return peer;
1240 }
1241 
1242 /**
1243  * dp_link_peer_add_mld_peer() - add mld peer pointer to link peer,
1244 				 increase mld peer ref_cnt
1245  * @link_peer: link peer pointer
1246  * @mld_peer: mld peer pointer
1247  *
1248  * Return: none
1249  */
1250 static inline
1251 void dp_link_peer_add_mld_peer(struct dp_peer *link_peer,
1252 			       struct dp_peer *mld_peer)
1253 {
1254 	/* increase mld_peer ref_cnt */
1255 	dp_peer_get_ref(NULL, mld_peer, DP_MOD_ID_CDP);
1256 	link_peer->mld_peer = mld_peer;
1257 }
1258 
1259 /**
1260  * dp_link_peer_del_mld_peer() - delete mld peer pointer from link peer,
1261 				 decrease mld peer ref_cnt
1262  * @link_peer: link peer pointer
1263  *
1264  * Return: None
1265  */
1266 static inline
1267 void dp_link_peer_del_mld_peer(struct dp_peer *link_peer)
1268 {
1269 	dp_peer_unref_delete(link_peer->mld_peer, DP_MOD_ID_CDP);
1270 	link_peer->mld_peer = NULL;
1271 }
1272 
1273 /**
1274  * dp_mld_peer_init_link_peers_info() - init link peers info in mld peer
1275  * @mld_peer: mld peer pointer
1276  *
1277  * Return: None
1278  */
1279 static inline
1280 void dp_mld_peer_init_link_peers_info(struct dp_peer *mld_peer)
1281 {
1282 	int i;
1283 
1284 	qdf_spinlock_create(&mld_peer->link_peers_info_lock);
1285 	mld_peer->num_links = 0;
1286 	for (i = 0; i < DP_MAX_MLO_LINKS; i++)
1287 		mld_peer->link_peers[i].is_valid = false;
1288 }
1289 
1290 /**
1291  * dp_mld_peer_deinit_link_peers_info() - Deinit link peers info in mld peer
1292  * @mld_peer: mld peer pointer
1293  *
1294  * Return: None
1295  */
1296 static inline
1297 void dp_mld_peer_deinit_link_peers_info(struct dp_peer *mld_peer)
1298 {
1299 	qdf_spinlock_destroy(&mld_peer->link_peers_info_lock);
1300 }
1301 
1302 /**
1303  * dp_mld_peer_add_link_peer() - add link peer info to mld peer
1304  * @mld_peer: mld dp peer pointer
1305  * @link_peer: link dp peer pointer
1306  *
1307  * Return: None
1308  */
1309 static inline
1310 void dp_mld_peer_add_link_peer(struct dp_peer *mld_peer,
1311 			       struct dp_peer *link_peer)
1312 {
1313 	int i;
1314 	struct dp_peer_link_info *link_peer_info;
1315 	bool action_done = false;
1316 	struct dp_soc *soc = mld_peer->vdev->pdev->soc;
1317 
1318 	qdf_spin_lock_bh(&mld_peer->link_peers_info_lock);
1319 	for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
1320 		link_peer_info = &mld_peer->link_peers[i];
1321 		if (!link_peer_info->is_valid) {
1322 			qdf_mem_copy(link_peer_info->mac_addr.raw,
1323 				     link_peer->mac_addr.raw,
1324 				     QDF_MAC_ADDR_SIZE);
1325 			link_peer_info->is_valid = true;
1326 			link_peer_info->vdev_id = link_peer->vdev->vdev_id;
1327 			link_peer_info->chip_id =
1328 				dp_mlo_get_chip_id(link_peer->vdev->pdev->soc);
1329 			mld_peer->num_links++;
1330 			action_done = true;
1331 			break;
1332 		}
1333 	}
1334 	qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock);
1335 
1336 	if (i == DP_MAX_MLO_LINKS)
1337 		dp_err("fail to add link peer" QDF_MAC_ADDR_FMT "to mld peer",
1338 		       QDF_MAC_ADDR_REF(link_peer->mac_addr.raw));
1339 	else
1340 		dp_peer_info("%s addition of link peer %pK (" QDF_MAC_ADDR_FMT ") "
1341 			     "to MLD peer %pK (" QDF_MAC_ADDR_FMT "), "
1342 			     "idx %u num_links %u",
1343 			     action_done ? "Successful" : "Failed",
1344 			     mld_peer, QDF_MAC_ADDR_REF(mld_peer->mac_addr.raw),
1345 			     link_peer, QDF_MAC_ADDR_REF(link_peer->mac_addr.raw),
1346 			     i, mld_peer->num_links);
1347 
1348 	dp_cfg_event_record_mlo_link_delink_evt(soc, DP_CFG_EVENT_MLO_ADD_LINK,
1349 						mld_peer, link_peer, i,
1350 						action_done ? 1 : 0);
1351 }
1352 
1353 /**
1354  * dp_mld_peer_del_link_peer() - Delete link peer info from MLD peer
1355  * @mld_peer: MLD dp peer pointer
1356  * @link_peer: link dp peer pointer
1357  *
1358  * Return: number of links left after deletion
1359  */
1360 static inline
1361 uint8_t dp_mld_peer_del_link_peer(struct dp_peer *mld_peer,
1362 				  struct dp_peer *link_peer)
1363 {
1364 	int i;
1365 	struct dp_peer_link_info *link_peer_info;
1366 	uint8_t num_links;
1367 	bool action_done = false;
1368 	struct dp_soc *soc = mld_peer->vdev->pdev->soc;
1369 
1370 	qdf_spin_lock_bh(&mld_peer->link_peers_info_lock);
1371 	for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
1372 		link_peer_info = &mld_peer->link_peers[i];
1373 		if (link_peer_info->is_valid &&
1374 		    !dp_peer_find_mac_addr_cmp(&link_peer->mac_addr,
1375 					&link_peer_info->mac_addr)) {
1376 			link_peer_info->is_valid = false;
1377 			mld_peer->num_links--;
1378 			action_done = true;
1379 			break;
1380 		}
1381 	}
1382 	num_links = mld_peer->num_links;
1383 	qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock);
1384 
1385 	if (i == DP_MAX_MLO_LINKS)
1386 		dp_err("fail to del link peer" QDF_MAC_ADDR_FMT "to mld peer",
1387 		       QDF_MAC_ADDR_REF(link_peer->mac_addr.raw));
1388 	else
1389 		dp_peer_info("%s deletion of link peer %pK (" QDF_MAC_ADDR_FMT ") "
1390 			     "from MLD peer %pK (" QDF_MAC_ADDR_FMT "), "
1391 			     "idx %u num_links %u",
1392 			     action_done ? "Successful" : "Failed",
1393 			     mld_peer, QDF_MAC_ADDR_REF(mld_peer->mac_addr.raw),
1394 			     link_peer, QDF_MAC_ADDR_REF(link_peer->mac_addr.raw),
1395 			     i, mld_peer->num_links);
1396 
1397 	dp_cfg_event_record_mlo_link_delink_evt(soc, DP_CFG_EVENT_MLO_DEL_LINK,
1398 						mld_peer, link_peer, i,
1399 						action_done ? 1 : 0);
1400 
1401 	return num_links;
1402 }
1403 
1404 /**
1405  * dp_get_link_peers_ref_from_mld_peer() - get link peers pointer and
1406 					   increase link peers ref_cnt
1407  * @soc: dp_soc handle
1408  * @mld_peer: dp mld peer pointer
1409  * @mld_link_peers: structure that hold links peers pointer array and number
1410  * @mod_id: id of module requesting reference
1411  *
1412  * Return: None
1413  */
1414 static inline
1415 void dp_get_link_peers_ref_from_mld_peer(
1416 				struct dp_soc *soc,
1417 				struct dp_peer *mld_peer,
1418 				struct dp_mld_link_peers *mld_link_peers,
1419 				enum dp_mod_id mod_id)
1420 {
1421 	struct dp_peer *peer;
1422 	uint8_t i = 0, j = 0;
1423 	struct dp_peer_link_info *link_peer_info;
1424 
1425 	qdf_mem_zero(mld_link_peers, sizeof(*mld_link_peers));
1426 	qdf_spin_lock_bh(&mld_peer->link_peers_info_lock);
1427 	for (i = 0; i < DP_MAX_MLO_LINKS; i++)  {
1428 		link_peer_info = &mld_peer->link_peers[i];
1429 		if (link_peer_info->is_valid) {
1430 			peer = dp_link_peer_hash_find_by_chip_id(
1431 						soc,
1432 						link_peer_info->mac_addr.raw,
1433 						true,
1434 						link_peer_info->vdev_id,
1435 						link_peer_info->chip_id,
1436 						mod_id);
1437 			if (peer)
1438 				mld_link_peers->link_peers[j++] = peer;
1439 		}
1440 	}
1441 	qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock);
1442 
1443 	mld_link_peers->num_links = j;
1444 }
1445 
1446 /**
1447  * dp_release_link_peers_ref() - release all link peers reference
1448  * @mld_link_peers: structure that hold links peers pointer array and number
1449  * @mod_id: id of module requesting reference
1450  *
1451  * Return: None.
1452  */
1453 static inline
1454 void dp_release_link_peers_ref(
1455 			struct dp_mld_link_peers *mld_link_peers,
1456 			enum dp_mod_id mod_id)
1457 {
1458 	struct dp_peer *peer;
1459 	uint8_t i;
1460 
1461 	for (i = 0; i < mld_link_peers->num_links; i++) {
1462 		peer = mld_link_peers->link_peers[i];
1463 		if (peer)
1464 			dp_peer_unref_delete(peer, mod_id);
1465 		mld_link_peers->link_peers[i] = NULL;
1466 	}
1467 
1468 	 mld_link_peers->num_links = 0;
1469 }
1470 
1471 /**
1472  * dp_get_link_peer_id_by_lmac_id() - Get link peer id using peer id and lmac id
1473  * @soc: Datapath soc handle
1474  * @peer_id: peer id
1475  * @lmac_id: lmac id to find the link peer on given lmac
1476  *
1477  * Return: peer_id of link peer if found
1478  *         else return HTT_INVALID_PEER
1479  */
1480 static inline
1481 uint16_t dp_get_link_peer_id_by_lmac_id(struct dp_soc *soc, uint16_t peer_id,
1482 					uint8_t lmac_id)
1483 {
1484 	uint8_t i;
1485 	struct dp_peer *peer;
1486 	struct dp_peer *link_peer;
1487 	struct dp_soc *link_peer_soc;
1488 	struct dp_mld_link_peers link_peers_info;
1489 	uint16_t link_peer_id = HTT_INVALID_PEER;
1490 
1491 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_CDP);
1492 
1493 	if (!peer)
1494 		return HTT_INVALID_PEER;
1495 
1496 	if (IS_MLO_DP_MLD_PEER(peer)) {
1497 		/* get link peers with reference */
1498 		dp_get_link_peers_ref_from_mld_peer(soc, peer, &link_peers_info,
1499 						    DP_MOD_ID_CDP);
1500 
1501 		for (i = 0; i < link_peers_info.num_links; i++) {
1502 			link_peer = link_peers_info.link_peers[i];
1503 			link_peer_soc = link_peer->vdev->pdev->soc;
1504 			if ((link_peer_soc == soc) &&
1505 			    (link_peer->vdev->pdev->lmac_id == lmac_id)) {
1506 				link_peer_id = link_peer->peer_id;
1507 				break;
1508 			}
1509 		}
1510 		/* release link peers reference */
1511 		dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP);
1512 	} else {
1513 		link_peer_id = peer_id;
1514 	}
1515 
1516 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
1517 
1518 	return link_peer_id;
1519 }
1520 
1521 /**
1522  * dp_peer_get_tgt_peer_hash_find() - get dp_peer handle
1523  * @soc: soc handle
1524  * @peer_mac_addr: peer mac address
1525  * @mac_addr_is_aligned: is mac addr aligned
1526  * @vdev_id: vdev_id
1527  * @mod_id: id of module requesting reference
1528  *
1529  * for MLO connection, get corresponding MLD peer,
1530  * otherwise get link peer for non-MLO case.
1531  *
1532  * return: peer in success
1533  *         NULL in failure
1534  */
1535 static inline
1536 struct dp_peer *dp_peer_get_tgt_peer_hash_find(struct dp_soc *soc,
1537 					       uint8_t *peer_mac,
1538 					       int mac_addr_is_aligned,
1539 					       uint8_t vdev_id,
1540 					       enum dp_mod_id mod_id)
1541 {
1542 	struct dp_peer *ta_peer = NULL;
1543 	struct dp_peer *peer = dp_peer_find_hash_find(soc,
1544 						      peer_mac, 0, vdev_id,
1545 						      mod_id);
1546 
1547 	if (peer) {
1548 		/* mlo connection link peer, get mld peer with reference */
1549 		if (IS_MLO_DP_LINK_PEER(peer)) {
1550 			/* increase mld peer ref_cnt */
1551 			if (QDF_STATUS_SUCCESS ==
1552 			    dp_peer_get_ref(soc, peer->mld_peer, mod_id))
1553 				ta_peer = peer->mld_peer;
1554 			else
1555 				ta_peer = NULL;
1556 
1557 			/* release peer reference that added by hash find */
1558 			dp_peer_unref_delete(peer, mod_id);
1559 		} else {
1560 		/* mlo MLD peer or non-mlo link peer */
1561 			ta_peer = peer;
1562 		}
1563 	} else {
1564 		dp_peer_err("fail to find peer:" QDF_MAC_ADDR_FMT,
1565 			    QDF_MAC_ADDR_REF(peer_mac));
1566 	}
1567 
1568 	return ta_peer;
1569 }
1570 
1571 /**
1572  * dp_peer_get_tgt_peer_by_id() - Returns target peer object given the peer id
1573  * @soc		: core DP soc context
1574  * @peer_id	: peer id from peer object can be retrieved
1575  * @mod_id      : ID of module requesting reference
1576  *
1577  * for MLO connection, get corresponding MLD peer,
1578  * otherwise get link peer for non-MLO case.
1579  *
1580  * return: peer in success
1581  *         NULL in failure
1582  */
1583 static inline
1584 struct dp_peer *dp_peer_get_tgt_peer_by_id(struct dp_soc *soc,
1585 					   uint16_t peer_id,
1586 					   enum dp_mod_id mod_id)
1587 {
1588 	struct dp_peer *ta_peer = NULL;
1589 	struct dp_peer *peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
1590 
1591 	if (peer) {
1592 		/* mlo connection link peer, get mld peer with reference */
1593 		if (IS_MLO_DP_LINK_PEER(peer)) {
1594 			/* increase mld peer ref_cnt */
1595 			if (QDF_STATUS_SUCCESS ==
1596 				dp_peer_get_ref(soc, peer->mld_peer, mod_id))
1597 				ta_peer = peer->mld_peer;
1598 			else
1599 				ta_peer = NULL;
1600 
1601 			/* release peer reference that added by hash find */
1602 			dp_peer_unref_delete(peer, mod_id);
1603 		} else {
1604 		/* mlo MLD peer or non-mlo link peer */
1605 			ta_peer = peer;
1606 		}
1607 	}
1608 
1609 	return ta_peer;
1610 }
1611 
1612 /**
1613  * dp_peer_mlo_delete() - peer MLO related delete operation
1614  * @peer: DP peer handle
1615  * Return: None
1616  */
1617 static inline
1618 void dp_peer_mlo_delete(struct dp_peer *peer)
1619 {
1620 	struct dp_peer *ml_peer;
1621 	struct dp_soc *soc;
1622 
1623 	dp_info("peer " QDF_MAC_ADDR_FMT " type %d",
1624 		QDF_MAC_ADDR_REF(peer->mac_addr.raw), peer->peer_type);
1625 
1626 	/* MLO connection link peer */
1627 	if (IS_MLO_DP_LINK_PEER(peer)) {
1628 		ml_peer = peer->mld_peer;
1629 		soc = ml_peer->vdev->pdev->soc;
1630 
1631 		/* if last link peer deletion, delete MLD peer */
1632 		if (dp_mld_peer_del_link_peer(peer->mld_peer, peer) == 0)
1633 			dp_peer_delete(soc, peer->mld_peer, NULL);
1634 	}
1635 }
1636 
1637 /**
1638  * dp_peer_mlo_setup() - create MLD peer and MLO related initialization
1639  * @soc: Soc handle
1640  * @vdev_id: Vdev ID
1641  * @peer_setup_info: peer setup information for MLO
1642  */
1643 QDF_STATUS dp_peer_mlo_setup(
1644 			struct dp_soc *soc,
1645 			struct dp_peer *peer,
1646 			uint8_t vdev_id,
1647 			struct cdp_peer_setup_info *setup_info);
1648 
1649 /**
1650  * dp_get_tgt_peer_from_peer() - Get target peer from the given peer
1651  * @peer: datapath peer
1652  *
1653  * Return: MLD peer in case of MLO Link peer
1654  *	   Peer itself in other cases
1655  */
1656 static inline
1657 struct dp_peer *dp_get_tgt_peer_from_peer(struct dp_peer *peer)
1658 {
1659 	return IS_MLO_DP_LINK_PEER(peer) ? peer->mld_peer : peer;
1660 }
1661 
1662 /**
1663  * dp_get_primary_link_peer_by_id(): Get primary link peer from the given
1664  *					peer id
1665  * @soc: core DP soc context
1666  * @peer_id: peer id
1667  * @mod_id: ID of module requesting reference
1668  *
1669  * Return: primary link peer for the MLO peer
1670  *	   legacy peer itself in case of legacy peer
1671  */
1672 static inline
1673 struct dp_peer *dp_get_primary_link_peer_by_id(struct dp_soc *soc,
1674 					       uint16_t peer_id,
1675 					       enum dp_mod_id mod_id)
1676 {
1677 	uint8_t i;
1678 	struct dp_mld_link_peers link_peers_info;
1679 	struct dp_peer *peer;
1680 	struct dp_peer *link_peer;
1681 	struct dp_peer *primary_peer = NULL;
1682 
1683 	peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
1684 
1685 	if (!peer)
1686 		return NULL;
1687 
1688 	if (IS_MLO_DP_MLD_PEER(peer)) {
1689 		/* get link peers with reference */
1690 		dp_get_link_peers_ref_from_mld_peer(soc, peer, &link_peers_info,
1691 						    mod_id);
1692 
1693 		for (i = 0; i < link_peers_info.num_links; i++) {
1694 			link_peer = link_peers_info.link_peers[i];
1695 			if (link_peer->primary_link) {
1696 				primary_peer = link_peer;
1697 				/*
1698 				 * Take additional reference over
1699 				 * primary link peer.
1700 				 */
1701 				dp_peer_get_ref(NULL, primary_peer, mod_id);
1702 				break;
1703 			}
1704 		}
1705 		/* release link peers reference */
1706 		dp_release_link_peers_ref(&link_peers_info, mod_id);
1707 		dp_peer_unref_delete(peer, mod_id);
1708 	} else {
1709 		primary_peer = peer;
1710 	}
1711 
1712 	return primary_peer;
1713 }
1714 
1715 /**
1716  * dp_get_txrx_peer() - Get dp_txrx_peer from passed dp_peer
1717  * @peer: Datapath peer
1718  *
1719  * Return: dp_txrx_peer from MLD peer if peer type is link peer
1720  *	   dp_txrx_peer from peer itself for other cases
1721  */
1722 static inline
1723 struct dp_txrx_peer *dp_get_txrx_peer(struct dp_peer *peer)
1724 {
1725 	return IS_MLO_DP_LINK_PEER(peer) ?
1726 				peer->mld_peer->txrx_peer : peer->txrx_peer;
1727 }
1728 
1729 /**
1730  * dp_peer_is_primary_link_peer() - Check if peer is primary link peer
1731  * @peer: Datapath peer
1732  *
1733  * Return: true if peer is primary link peer or legacy peer
1734  *	   false otherwise
1735  */
1736 static inline
1737 bool dp_peer_is_primary_link_peer(struct dp_peer *peer)
1738 {
1739 	if (IS_MLO_DP_LINK_PEER(peer) && peer->primary_link)
1740 		return true;
1741 	else if (IS_DP_LEGACY_PEER(peer))
1742 		return true;
1743 	else
1744 		return false;
1745 }
1746 
1747 /**
1748  * dp_tgt_txrx_peer_get_ref_by_id() - Gets tgt txrx peer for given the peer id
1749  *
1750  * @soc		: core DP soc context
1751  * @peer_id	: peer id from peer object can be retrieved
1752  * @handle	: reference handle
1753  * @mod_id      : ID of module requesting reference
1754  *
1755  * Return: struct dp_txrx_peer*: Pointer to txrx DP peer object
1756  */
1757 static inline struct dp_txrx_peer *
1758 dp_tgt_txrx_peer_get_ref_by_id(struct dp_soc *soc,
1759 			       uint16_t peer_id,
1760 			       dp_txrx_ref_handle *handle,
1761 			       enum dp_mod_id mod_id)
1762 
1763 {
1764 	struct dp_peer *peer;
1765 	struct dp_txrx_peer *txrx_peer;
1766 
1767 	peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
1768 	if (!peer)
1769 		return NULL;
1770 
1771 	txrx_peer = dp_get_txrx_peer(peer);
1772 	if (txrx_peer) {
1773 		*handle = (dp_txrx_ref_handle)peer;
1774 		return txrx_peer;
1775 	}
1776 
1777 	dp_peer_unref_delete(peer, mod_id);
1778 	return NULL;
1779 }
1780 
1781 /**
1782  * dp_print_mlo_ast_stats_be() - Print AST stats for MLO peers
1783  *
1784  * @soc	: core DP soc context
1785  *
1786  * Return: void
1787  */
1788 void dp_print_mlo_ast_stats_be(struct dp_soc *soc);
1789 
1790 #else
1791 
1792 #define IS_MLO_DP_MLD_TXRX_PEER(_peer) false
1793 
1794 #define DP_PEER_SET_TYPE(_peer, _type_val) /* no op */
1795 /* is legacy peer */
1796 #define IS_DP_LEGACY_PEER(_peer) true
1797 #define IS_MLO_DP_LINK_PEER(_peer) false
1798 #define IS_MLO_DP_MLD_PEER(_peer) false
1799 #define DP_GET_MLD_PEER_FROM_PEER(link_peer) NULL
1800 
1801 static inline
1802 struct dp_peer *dp_peer_hash_find_wrapper(struct dp_soc *soc,
1803 					  struct cdp_peer_info *peer_info,
1804 					  enum dp_mod_id mod_id)
1805 {
1806 	return dp_peer_find_hash_find(soc, peer_info->mac_addr,
1807 				      peer_info->mac_addr_is_aligned,
1808 				      peer_info->vdev_id,
1809 				      mod_id);
1810 }
1811 
1812 static inline
1813 struct dp_peer *dp_peer_get_tgt_peer_hash_find(struct dp_soc *soc,
1814 					       uint8_t *peer_mac,
1815 					       int mac_addr_is_aligned,
1816 					       uint8_t vdev_id,
1817 					       enum dp_mod_id mod_id)
1818 {
1819 	return dp_peer_find_hash_find(soc, peer_mac,
1820 				      mac_addr_is_aligned, vdev_id,
1821 				      mod_id);
1822 }
1823 
1824 static inline
1825 struct dp_peer *dp_peer_get_tgt_peer_by_id(struct dp_soc *soc,
1826 					   uint16_t peer_id,
1827 					   enum dp_mod_id mod_id)
1828 {
1829 	return dp_peer_get_ref_by_id(soc, peer_id, mod_id);
1830 }
1831 
1832 static inline
1833 QDF_STATUS dp_peer_mlo_setup(
1834 			struct dp_soc *soc,
1835 			struct dp_peer *peer,
1836 			uint8_t vdev_id,
1837 			struct cdp_peer_setup_info *setup_info)
1838 {
1839 	return QDF_STATUS_SUCCESS;
1840 }
1841 
1842 static inline
1843 void dp_mld_peer_init_link_peers_info(struct dp_peer *mld_peer)
1844 {
1845 }
1846 
1847 static inline
1848 void dp_mld_peer_deinit_link_peers_info(struct dp_peer *mld_peer)
1849 {
1850 }
1851 
1852 static inline
1853 void dp_link_peer_del_mld_peer(struct dp_peer *link_peer)
1854 {
1855 }
1856 
1857 static inline
1858 void dp_peer_mlo_delete(struct dp_peer *peer)
1859 {
1860 }
1861 
1862 static inline
1863 void dp_mlo_peer_authorize(struct dp_soc *soc,
1864 			   struct dp_peer *link_peer)
1865 {
1866 }
1867 
1868 static inline uint8_t dp_mlo_get_chip_id(struct dp_soc *soc)
1869 {
1870 	return 0;
1871 }
1872 
1873 static inline struct dp_peer *
1874 dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
1875 				  uint8_t *peer_mac_addr,
1876 				  int mac_addr_is_aligned,
1877 				  uint8_t vdev_id,
1878 				  uint8_t chip_id,
1879 				  enum dp_mod_id mod_id)
1880 {
1881 	return dp_peer_find_hash_find(soc, peer_mac_addr,
1882 				      mac_addr_is_aligned,
1883 				      vdev_id, mod_id);
1884 }
1885 
1886 static inline
1887 struct dp_peer *dp_get_tgt_peer_from_peer(struct dp_peer *peer)
1888 {
1889 	return peer;
1890 }
1891 
1892 static inline
1893 struct dp_peer *dp_get_primary_link_peer_by_id(struct dp_soc *soc,
1894 					       uint16_t peer_id,
1895 					       enum dp_mod_id mod_id)
1896 {
1897 	return dp_peer_get_ref_by_id(soc, peer_id, mod_id);
1898 }
1899 
1900 static inline
1901 struct dp_txrx_peer *dp_get_txrx_peer(struct dp_peer *peer)
1902 {
1903 	return peer->txrx_peer;
1904 }
1905 
1906 static inline
1907 bool dp_peer_is_primary_link_peer(struct dp_peer *peer)
1908 {
1909 	return true;
1910 }
1911 
1912 /**
1913  * dp_tgt_txrx_peer_get_ref_by_id() - Gets tgt txrx peer for given the peer id
1914  *
1915  * @soc		: core DP soc context
1916  * @peer_id	: peer id from peer object can be retrieved
1917  * @handle	: reference handle
1918  * @mod_id      : ID of module requesting reference
1919  *
1920  * Return: struct dp_txrx_peer*: Pointer to txrx DP peer object
1921  */
1922 static inline struct dp_txrx_peer *
1923 dp_tgt_txrx_peer_get_ref_by_id(struct dp_soc *soc,
1924 			       uint16_t peer_id,
1925 			       dp_txrx_ref_handle *handle,
1926 			       enum dp_mod_id mod_id)
1927 
1928 {
1929 	return dp_txrx_peer_get_ref_by_id(soc, peer_id, handle, mod_id);
1930 }
1931 
1932 static inline
1933 uint16_t dp_get_link_peer_id_by_lmac_id(struct dp_soc *soc, uint16_t peer_id,
1934 					uint8_t lmac_id)
1935 {
1936 	return peer_id;
1937 }
1938 
1939 static inline void dp_print_mlo_ast_stats_be(struct dp_soc *soc)
1940 {
1941 }
1942 #endif /* WLAN_FEATURE_11BE_MLO */
1943 
1944 static inline
1945 QDF_STATUS dp_peer_rx_tids_create(struct dp_peer *peer)
1946 {
1947 	uint8_t i;
1948 
1949 	if (IS_MLO_DP_MLD_PEER(peer)) {
1950 		dp_peer_info("skip for mld peer");
1951 		return QDF_STATUS_SUCCESS;
1952 	}
1953 
1954 	if (peer->rx_tid) {
1955 		QDF_BUG(0);
1956 		dp_peer_err("peer rx_tid mem already exist");
1957 		return QDF_STATUS_E_FAILURE;
1958 	}
1959 
1960 	peer->rx_tid = qdf_mem_malloc(DP_MAX_TIDS *
1961 				      sizeof(struct dp_rx_tid));
1962 
1963 	if (!peer->rx_tid) {
1964 		dp_err("fail to alloc tid for peer" QDF_MAC_ADDR_FMT,
1965 		       QDF_MAC_ADDR_REF(peer->mac_addr.raw));
1966 		return QDF_STATUS_E_NOMEM;
1967 	}
1968 
1969 	qdf_mem_zero(peer->rx_tid, DP_MAX_TIDS * sizeof(struct dp_rx_tid));
1970 	for (i = 0; i < DP_MAX_TIDS; i++)
1971 		qdf_spinlock_create(&peer->rx_tid[i].tid_lock);
1972 
1973 	return QDF_STATUS_SUCCESS;
1974 }
1975 
1976 static inline
1977 void dp_peer_rx_tids_destroy(struct dp_peer *peer)
1978 {
1979 	uint8_t i;
1980 
1981 	if (!IS_MLO_DP_LINK_PEER(peer)) {
1982 		for (i = 0; i < DP_MAX_TIDS; i++)
1983 			qdf_spinlock_destroy(&peer->rx_tid[i].tid_lock);
1984 
1985 		qdf_mem_free(peer->rx_tid);
1986 	}
1987 
1988 	peer->rx_tid = NULL;
1989 }
1990 
1991 static inline
1992 void dp_peer_defrag_rx_tids_init(struct dp_txrx_peer *txrx_peer)
1993 {
1994 	uint8_t i;
1995 
1996 	qdf_mem_zero(&txrx_peer->rx_tid, DP_MAX_TIDS *
1997 		     sizeof(struct dp_rx_tid_defrag));
1998 
1999 	for (i = 0; i < DP_MAX_TIDS; i++)
2000 		qdf_spinlock_create(&txrx_peer->rx_tid[i].defrag_tid_lock);
2001 }
2002 
2003 static inline
2004 void dp_peer_defrag_rx_tids_deinit(struct dp_txrx_peer *txrx_peer)
2005 {
2006 	uint8_t i;
2007 
2008 	for (i = 0; i < DP_MAX_TIDS; i++)
2009 		qdf_spinlock_destroy(&txrx_peer->rx_tid[i].defrag_tid_lock);
2010 }
2011 
2012 #ifdef PEER_CACHE_RX_PKTS
2013 static inline
2014 void dp_peer_rx_bufq_resources_init(struct dp_txrx_peer *txrx_peer)
2015 {
2016 	qdf_spinlock_create(&txrx_peer->bufq_info.bufq_lock);
2017 	txrx_peer->bufq_info.thresh = DP_RX_CACHED_BUFQ_THRESH;
2018 	qdf_list_create(&txrx_peer->bufq_info.cached_bufq,
2019 			DP_RX_CACHED_BUFQ_THRESH);
2020 }
2021 
2022 static inline
2023 void dp_peer_rx_bufq_resources_deinit(struct dp_txrx_peer *txrx_peer)
2024 {
2025 	qdf_list_destroy(&txrx_peer->bufq_info.cached_bufq);
2026 	qdf_spinlock_destroy(&txrx_peer->bufq_info.bufq_lock);
2027 }
2028 
2029 #else
2030 static inline
2031 void dp_peer_rx_bufq_resources_init(struct dp_txrx_peer *txrx_peer)
2032 {
2033 }
2034 
2035 static inline
2036 void dp_peer_rx_bufq_resources_deinit(struct dp_txrx_peer *txrx_peer)
2037 {
2038 }
2039 #endif
2040 
2041 /**
2042  * dp_peer_update_state() - update dp peer state
2043  *
2044  * @soc		: core DP soc context
2045  * @peer	: DP peer
2046  * @state	: new state
2047  *
2048  * Return: None
2049  */
2050 static inline void
2051 dp_peer_update_state(struct dp_soc *soc,
2052 		     struct dp_peer *peer,
2053 		     enum dp_peer_state state)
2054 {
2055 	uint8_t peer_state;
2056 
2057 	qdf_spin_lock_bh(&peer->peer_state_lock);
2058 	peer_state = peer->peer_state;
2059 
2060 	switch (state) {
2061 	case DP_PEER_STATE_INIT:
2062 		DP_PEER_STATE_ASSERT
2063 			(peer, state, (peer_state != DP_PEER_STATE_ACTIVE) ||
2064 			 (peer_state != DP_PEER_STATE_LOGICAL_DELETE));
2065 		break;
2066 
2067 	case DP_PEER_STATE_ACTIVE:
2068 		DP_PEER_STATE_ASSERT(peer, state,
2069 				     (peer_state == DP_PEER_STATE_INIT));
2070 		break;
2071 
2072 	case DP_PEER_STATE_LOGICAL_DELETE:
2073 		DP_PEER_STATE_ASSERT(peer, state,
2074 				     (peer_state == DP_PEER_STATE_ACTIVE) ||
2075 				     (peer_state == DP_PEER_STATE_INIT));
2076 		break;
2077 
2078 	case DP_PEER_STATE_INACTIVE:
2079 		if (IS_MLO_DP_MLD_PEER(peer))
2080 			DP_PEER_STATE_ASSERT
2081 				(peer, state,
2082 				 (peer_state == DP_PEER_STATE_ACTIVE));
2083 		else
2084 			DP_PEER_STATE_ASSERT
2085 				(peer, state,
2086 				 (peer_state == DP_PEER_STATE_LOGICAL_DELETE));
2087 		break;
2088 
2089 	case DP_PEER_STATE_FREED:
2090 		if (peer->sta_self_peer)
2091 			DP_PEER_STATE_ASSERT
2092 			(peer, state, (peer_state == DP_PEER_STATE_INIT));
2093 		else
2094 			DP_PEER_STATE_ASSERT
2095 				(peer, state,
2096 				 (peer_state == DP_PEER_STATE_INACTIVE) ||
2097 				 (peer_state == DP_PEER_STATE_LOGICAL_DELETE));
2098 		break;
2099 
2100 	default:
2101 		qdf_spin_unlock_bh(&peer->peer_state_lock);
2102 		dp_alert("Invalid peer state %u for peer " QDF_MAC_ADDR_FMT,
2103 			 state, QDF_MAC_ADDR_REF(peer->mac_addr.raw));
2104 		return;
2105 	}
2106 	peer->peer_state = state;
2107 	qdf_spin_unlock_bh(&peer->peer_state_lock);
2108 	dp_info("Updating peer state from %u to %u mac " QDF_MAC_ADDR_FMT "\n",
2109 		peer_state, state,
2110 		QDF_MAC_ADDR_REF(peer->mac_addr.raw));
2111 }
2112 
2113 #ifdef REO_SHARED_QREF_TABLE_EN
2114 void dp_peer_rx_reo_shared_qaddr_delete(struct dp_soc *soc,
2115 					struct dp_peer *peer);
2116 #else
2117 static inline void dp_peer_rx_reo_shared_qaddr_delete(struct dp_soc *soc,
2118 						      struct dp_peer *peer) {}
2119 #endif
2120 
2121 /**
2122  * dp_peer_check_wds_ext_peer() - Check WDS ext peer
2123  *
2124  * @peer: DP peer
2125  *
2126  * Return: True for WDS ext peer, false otherwise
2127  */
2128 bool dp_peer_check_wds_ext_peer(struct dp_peer *peer);
2129 #endif /* _DP_PEER_H_ */
2130