xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_peer.h (revision 126e35d7167fa08f07037c44c9fa104324e1fa8a)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 #ifndef _DP_PEER_H_
20 #define _DP_PEER_H_
21 
22 #include <qdf_types.h>
23 #include <qdf_lock.h>
24 #include "dp_types.h"
25 
26 #ifdef DUMP_REO_QUEUE_INFO_IN_DDR
27 #include "hal_reo.h"
28 #endif
29 
30 #define DP_INVALID_PEER_ID 0xffff
31 
32 #define DP_PEER_MAX_MEC_IDX 1024	/* maximum index for MEC table */
33 #define DP_PEER_MAX_MEC_ENTRY 4096	/* maximum MEC entries in MEC table */
34 
35 #define DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC 5000
36 
37 #define DP_PEER_HASH_LOAD_MULT  2
38 #define DP_PEER_HASH_LOAD_SHIFT 0
39 
40 /* Threshold for peer's cached buf queue beyond which frames are dropped */
41 #define DP_RX_CACHED_BUFQ_THRESH 64
42 
43 #define dp_peer_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_PEER, params)
44 #define dp_peer_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_PEER, params)
45 #define dp_peer_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_PEER, params)
46 #define dp_peer_info(params...) \
47 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_PEER, ## params)
48 #define dp_peer_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_PEER, params)
49 
50 #ifdef REO_QDESC_HISTORY
51 enum reo_qdesc_event_type {
52 	REO_QDESC_UPDATE_CB = 0,
53 	REO_QDESC_FREE,
54 };
55 
56 struct reo_qdesc_event {
57 	qdf_dma_addr_t qdesc_addr;
58 	uint64_t ts;
59 	enum reo_qdesc_event_type type;
60 	uint8_t peer_mac[QDF_MAC_ADDR_SIZE];
61 };
62 #endif
63 
64 struct ast_del_ctxt {
65 	bool age;
66 	int del_count;
67 };
68 
69 typedef void dp_peer_iter_func(struct dp_soc *soc, struct dp_peer *peer,
70 			       void *arg);
71 void dp_peer_unref_delete(struct dp_peer *peer, enum dp_mod_id id);
72 void dp_txrx_peer_unref_delete(dp_txrx_ref_handle handle, enum dp_mod_id id);
73 struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc,
74 				       uint8_t *peer_mac_addr,
75 				       int mac_addr_is_aligned,
76 				       uint8_t vdev_id,
77 				       enum dp_mod_id id);
78 bool dp_peer_find_by_id_valid(struct dp_soc *soc, uint16_t peer_id);
79 
80 #ifdef DP_UMAC_HW_RESET_SUPPORT
81 void dp_reset_tid_q_setup(struct dp_soc *soc);
82 #endif
83 /**
84  * dp_peer_get_ref() - Returns peer object given the peer id
85  *
86  * @soc		: core DP soc context
87  * @peer	: DP peer
88  * @mod_id	: id of module requesting the reference
89  *
90  * Return:	QDF_STATUS_SUCCESS if reference held successfully
91  *		else QDF_STATUS_E_INVAL
92  */
93 static inline
94 QDF_STATUS dp_peer_get_ref(struct dp_soc *soc,
95 			   struct dp_peer *peer,
96 			   enum dp_mod_id mod_id)
97 {
98 	if (!qdf_atomic_inc_not_zero(&peer->ref_cnt))
99 		return QDF_STATUS_E_INVAL;
100 
101 	if (mod_id > DP_MOD_ID_RX)
102 		qdf_atomic_inc(&peer->mod_refs[mod_id]);
103 
104 	return QDF_STATUS_SUCCESS;
105 }
106 
107 /**
108  * __dp_peer_get_ref_by_id() - Returns peer object given the peer id
109  *
110  * @soc		: core DP soc context
111  * @peer_id	: peer id from peer object can be retrieved
112  * @mod_id	: module id
113  *
114  * Return: struct dp_peer*: Pointer to DP peer object
115  */
116 static inline struct dp_peer *
117 __dp_peer_get_ref_by_id(struct dp_soc *soc,
118 			uint16_t peer_id,
119 			enum dp_mod_id mod_id)
120 
121 {
122 	struct dp_peer *peer;
123 
124 	qdf_spin_lock_bh(&soc->peer_map_lock);
125 	peer = (peer_id >= soc->max_peer_id) ? NULL :
126 				soc->peer_id_to_obj_map[peer_id];
127 	if (!peer ||
128 	    (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) {
129 		qdf_spin_unlock_bh(&soc->peer_map_lock);
130 		return NULL;
131 	}
132 
133 	qdf_spin_unlock_bh(&soc->peer_map_lock);
134 	return peer;
135 }
136 
137 /**
138  * dp_peer_get_ref_by_id() - Returns peer object given the peer id
139  *                        if peer state is active
140  *
141  * @soc		: core DP soc context
142  * @peer_id	: peer id from peer object can be retrieved
143  * @mod_id      : ID of module requesting reference
144  *
145  * Return: struct dp_peer*: Pointer to DP peer object
146  */
147 static inline
148 struct dp_peer *dp_peer_get_ref_by_id(struct dp_soc *soc,
149 				      uint16_t peer_id,
150 				      enum dp_mod_id mod_id)
151 {
152 	struct dp_peer *peer;
153 
154 	qdf_spin_lock_bh(&soc->peer_map_lock);
155 	peer = (peer_id >= soc->max_peer_id) ? NULL :
156 				soc->peer_id_to_obj_map[peer_id];
157 
158 	if (!peer || peer->peer_state >= DP_PEER_STATE_LOGICAL_DELETE ||
159 	    (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) {
160 		qdf_spin_unlock_bh(&soc->peer_map_lock);
161 		return NULL;
162 	}
163 
164 	qdf_spin_unlock_bh(&soc->peer_map_lock);
165 
166 	return peer;
167 }
168 
169 /**
170  * dp_txrx_peer_get_ref_by_id() - Returns txrx peer object given the peer id
171  *
172  * @soc		: core DP soc context
173  * @peer_id	: peer id from peer object can be retrieved
174  * @handle	: reference handle
175  * @mod_id      : ID of module requesting reference
176  *
177  * Return: struct dp_txrx_peer*: Pointer to txrx DP peer object
178  */
179 static inline struct dp_txrx_peer *
180 dp_txrx_peer_get_ref_by_id(struct dp_soc *soc,
181 			   uint16_t peer_id,
182 			   dp_txrx_ref_handle *handle,
183 			   enum dp_mod_id mod_id)
184 
185 {
186 	struct dp_peer *peer;
187 
188 	peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
189 	if (!peer)
190 		return NULL;
191 
192 	if (!peer->txrx_peer) {
193 		dp_peer_unref_delete(peer, mod_id);
194 		return NULL;
195 	}
196 
197 	*handle = (dp_txrx_ref_handle)peer;
198 	return peer->txrx_peer;
199 }
200 
201 #ifdef PEER_CACHE_RX_PKTS
202 /**
203  * dp_rx_flush_rx_cached() - flush cached rx frames
204  * @peer: peer
205  * @drop: set flag to drop frames
206  *
207  * Return: None
208  */
209 void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop);
210 #else
211 static inline void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop)
212 {
213 }
214 #endif
215 
216 static inline void
217 dp_clear_peer_internal(struct dp_soc *soc, struct dp_peer *peer)
218 {
219 	qdf_spin_lock_bh(&peer->peer_info_lock);
220 	peer->state = OL_TXRX_PEER_STATE_DISC;
221 	qdf_spin_unlock_bh(&peer->peer_info_lock);
222 
223 	dp_rx_flush_rx_cached(peer, true);
224 }
225 
226 /**
227  * dp_vdev_iterate_peer() - API to iterate through vdev peer list
228  *
229  * @vdev	: DP vdev context
230  * @func	: function to be called for each peer
231  * @arg		: argument need to be passed to func
232  * @mod_id	: module_id
233  *
234  * Return: void
235  */
236 static inline void
237 dp_vdev_iterate_peer(struct dp_vdev *vdev, dp_peer_iter_func *func, void *arg,
238 		     enum dp_mod_id mod_id)
239 {
240 	struct dp_peer *peer;
241 	struct dp_peer *tmp_peer;
242 	struct dp_soc *soc = NULL;
243 
244 	if (!vdev || !vdev->pdev || !vdev->pdev->soc)
245 		return;
246 
247 	soc = vdev->pdev->soc;
248 
249 	qdf_spin_lock_bh(&vdev->peer_list_lock);
250 	TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
251 			   peer_list_elem,
252 			   tmp_peer) {
253 		if (dp_peer_get_ref(soc, peer, mod_id) ==
254 					QDF_STATUS_SUCCESS) {
255 			(*func)(soc, peer, arg);
256 			dp_peer_unref_delete(peer, mod_id);
257 		}
258 	}
259 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
260 }
261 
262 /**
263  * dp_pdev_iterate_peer() - API to iterate through all peers of pdev
264  *
265  * @pdev	: DP pdev context
266  * @func	: function to be called for each peer
267  * @arg		: argument need to be passed to func
268  * @mod_id	: module_id
269  *
270  * Return: void
271  */
272 static inline void
273 dp_pdev_iterate_peer(struct dp_pdev *pdev, dp_peer_iter_func *func, void *arg,
274 		     enum dp_mod_id mod_id)
275 {
276 	struct dp_vdev *vdev;
277 
278 	if (!pdev)
279 		return;
280 
281 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
282 	DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev)
283 		dp_vdev_iterate_peer(vdev, func, arg, mod_id);
284 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
285 }
286 
287 /**
288  * dp_soc_iterate_peer() - API to iterate through all peers of soc
289  *
290  * @soc		: DP soc context
291  * @func	: function to be called for each peer
292  * @arg		: argument need to be passed to func
293  * @mod_id	: module_id
294  *
295  * Return: void
296  */
297 static inline void
298 dp_soc_iterate_peer(struct dp_soc *soc, dp_peer_iter_func *func, void *arg,
299 		    enum dp_mod_id mod_id)
300 {
301 	struct dp_pdev *pdev;
302 	int i;
303 
304 	if (!soc)
305 		return;
306 
307 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
308 		pdev = soc->pdev_list[i];
309 		dp_pdev_iterate_peer(pdev, func, arg, mod_id);
310 	}
311 }
312 
313 /**
314  * dp_vdev_iterate_peer_lock_safe() - API to iterate through vdev list
315  *
316  * This API will cache the peers in local allocated memory and calls
317  * iterate function outside the lock.
318  *
319  * As this API is allocating new memory it is suggested to use this
320  * only when lock cannot be held
321  *
322  * @vdev	: DP vdev context
323  * @func	: function to be called for each peer
324  * @arg		: argument need to be passed to func
325  * @mod_id	: module_id
326  *
327  * Return: void
328  */
329 static inline void
330 dp_vdev_iterate_peer_lock_safe(struct dp_vdev *vdev,
331 			       dp_peer_iter_func *func,
332 			       void *arg,
333 			       enum dp_mod_id mod_id)
334 {
335 	struct dp_peer *peer;
336 	struct dp_peer *tmp_peer;
337 	struct dp_soc *soc = NULL;
338 	struct dp_peer **peer_array = NULL;
339 	int i = 0;
340 	uint32_t num_peers = 0;
341 
342 	if (!vdev || !vdev->pdev || !vdev->pdev->soc)
343 		return;
344 
345 	num_peers = vdev->num_peers;
346 
347 	soc = vdev->pdev->soc;
348 
349 	peer_array = qdf_mem_malloc(num_peers * sizeof(struct dp_peer *));
350 	if (!peer_array)
351 		return;
352 
353 	qdf_spin_lock_bh(&vdev->peer_list_lock);
354 	TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
355 			   peer_list_elem,
356 			   tmp_peer) {
357 		if (i >= num_peers)
358 			break;
359 
360 		if (dp_peer_get_ref(soc, peer, mod_id) == QDF_STATUS_SUCCESS) {
361 			peer_array[i] = peer;
362 			i = (i + 1);
363 		}
364 	}
365 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
366 
367 	for (i = 0; i < num_peers; i++) {
368 		peer = peer_array[i];
369 
370 		if (!peer)
371 			continue;
372 
373 		(*func)(soc, peer, arg);
374 		dp_peer_unref_delete(peer, mod_id);
375 	}
376 
377 	qdf_mem_free(peer_array);
378 }
379 
380 /**
381  * dp_pdev_iterate_peer_lock_safe() - API to iterate through all peers of pdev
382  *
383  * This API will cache the peers in local allocated memory and calls
384  * iterate function outside the lock.
385  *
386  * As this API is allocating new memory it is suggested to use this
387  * only when lock cannot be held
388  *
389  * @pdev	: DP pdev context
390  * @func	: function to be called for each peer
391  * @arg		: argument need to be passed to func
392  * @mod_id	: module_id
393  *
394  * Return: void
395  */
396 static inline void
397 dp_pdev_iterate_peer_lock_safe(struct dp_pdev *pdev,
398 			       dp_peer_iter_func *func,
399 			       void *arg,
400 			       enum dp_mod_id mod_id)
401 {
402 	struct dp_peer *peer;
403 	struct dp_peer *tmp_peer;
404 	struct dp_soc *soc = NULL;
405 	struct dp_vdev *vdev = NULL;
406 	struct dp_peer **peer_array[DP_PDEV_MAX_VDEVS] = {0};
407 	int i = 0;
408 	int j = 0;
409 	uint32_t num_peers[DP_PDEV_MAX_VDEVS] = {0};
410 
411 	if (!pdev || !pdev->soc)
412 		return;
413 
414 	soc = pdev->soc;
415 
416 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
417 	DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
418 		num_peers[i] = vdev->num_peers;
419 		peer_array[i] = qdf_mem_malloc(num_peers[i] *
420 					       sizeof(struct dp_peer *));
421 		if (!peer_array[i])
422 			break;
423 
424 		qdf_spin_lock_bh(&vdev->peer_list_lock);
425 		TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
426 				   peer_list_elem,
427 				   tmp_peer) {
428 			if (j >= num_peers[i])
429 				break;
430 
431 			if (dp_peer_get_ref(soc, peer, mod_id) ==
432 					QDF_STATUS_SUCCESS) {
433 				peer_array[i][j] = peer;
434 
435 				j = (j + 1);
436 			}
437 		}
438 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
439 		i = (i + 1);
440 	}
441 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
442 
443 	for (i = 0; i < DP_PDEV_MAX_VDEVS; i++) {
444 		if (!peer_array[i])
445 			break;
446 
447 		for (j = 0; j < num_peers[i]; j++) {
448 			peer = peer_array[i][j];
449 
450 			if (!peer)
451 				continue;
452 
453 			(*func)(soc, peer, arg);
454 			dp_peer_unref_delete(peer, mod_id);
455 		}
456 
457 		qdf_mem_free(peer_array[i]);
458 	}
459 }
460 
461 /**
462  * dp_soc_iterate_peer_lock_safe() - API to iterate through all peers of soc
463  *
464  * This API will cache the peers in local allocated memory and calls
465  * iterate function outside the lock.
466  *
467  * As this API is allocating new memory it is suggested to use this
468  * only when lock cannot be held
469  *
470  * @soc		: DP soc context
471  * @func	: function to be called for each peer
472  * @arg		: argument need to be passed to func
473  * @mod_id	: module_id
474  *
475  * Return: void
476  */
477 static inline void
478 dp_soc_iterate_peer_lock_safe(struct dp_soc *soc,
479 			      dp_peer_iter_func *func,
480 			      void *arg,
481 			      enum dp_mod_id mod_id)
482 {
483 	struct dp_pdev *pdev;
484 	int i;
485 
486 	if (!soc)
487 		return;
488 
489 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
490 		pdev = soc->pdev_list[i];
491 		dp_pdev_iterate_peer_lock_safe(pdev, func, arg, mod_id);
492 	}
493 }
494 
495 #ifdef DP_PEER_STATE_DEBUG
496 #define DP_PEER_STATE_ASSERT(_peer, _new_state, _condition) \
497 	do {  \
498 		if (!(_condition)) { \
499 			dp_alert("Invalid state shift from %u to %u peer " \
500 				 QDF_MAC_ADDR_FMT, \
501 				 (_peer)->peer_state, (_new_state), \
502 				 QDF_MAC_ADDR_REF((_peer)->mac_addr.raw)); \
503 			QDF_ASSERT(0); \
504 		} \
505 	} while (0)
506 
507 #else
508 #define DP_PEER_STATE_ASSERT(_peer, _new_state, _condition) \
509 	do {  \
510 		if (!(_condition)) { \
511 			dp_alert("Invalid state shift from %u to %u peer " \
512 				 QDF_MAC_ADDR_FMT, \
513 				 (_peer)->peer_state, (_new_state), \
514 				 QDF_MAC_ADDR_REF((_peer)->mac_addr.raw)); \
515 		} \
516 	} while (0)
517 #endif
518 
519 /**
520  * dp_peer_state_cmp() - compare dp peer state
521  *
522  * @peer	: DP peer
523  * @state	: state
524  *
525  * Return: true if state matches with peer state
526  *	   false if it does not match
527  */
528 static inline bool
529 dp_peer_state_cmp(struct dp_peer *peer,
530 		  enum dp_peer_state state)
531 {
532 	bool is_status_equal = false;
533 
534 	qdf_spin_lock_bh(&peer->peer_state_lock);
535 	is_status_equal = (peer->peer_state == state);
536 	qdf_spin_unlock_bh(&peer->peer_state_lock);
537 
538 	return is_status_equal;
539 }
540 
541 void dp_print_ast_stats(struct dp_soc *soc);
542 QDF_STATUS dp_rx_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
543 				  uint16_t hw_peer_id, uint8_t vdev_id,
544 				  uint8_t *peer_mac_addr, uint16_t ast_hash,
545 				  uint8_t is_wds);
546 void dp_rx_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id,
547 			      uint8_t vdev_id, uint8_t *peer_mac_addr,
548 			      uint8_t is_wds, uint32_t free_wds_count);
549 
550 #ifdef DP_RX_UDP_OVER_PEER_ROAM
551 /**
552  * dp_rx_reset_roaming_peer() - Reset the roamed peer in vdev
553  * @soc - dp soc pointer
554  * @vdev_id - vdev id
555  * @peer_mac_addr - mac address of the peer
556  *
557  * This function resets the roamed peer auth status and mac address
558  * after peer map indication of same peer is received from firmware.
559  *
560  * Return: None
561  */
562 void dp_rx_reset_roaming_peer(struct dp_soc *soc, uint8_t vdev_id,
563 			      uint8_t *peer_mac_addr);
564 #else
565 static inline void dp_rx_reset_roaming_peer(struct dp_soc *soc, uint8_t vdev_id,
566 					    uint8_t *peer_mac_addr)
567 {
568 }
569 #endif
570 
571 #ifdef WLAN_FEATURE_11BE_MLO
572 /**
573  * dp_rx_mlo_peer_map_handler() - handle MLO peer map event from firmware
574  * @soc_handle - generic soc handle
575  * @peer_id - ML peer_id from firmware
576  * @peer_mac_addr - mac address of the peer
577  * @mlo_ast_flow_info: MLO AST flow info
578  * @mlo_link_info - MLO link info
579  *
580  * associate the ML peer_id that firmware provided with peer entry
581  * and update the ast table in the host with the hw_peer_id.
582  *
583  * Return: QDF_STATUS code
584  */
585 QDF_STATUS
586 dp_rx_mlo_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
587 			   uint8_t *peer_mac_addr,
588 			   struct dp_mlo_flow_override_info *mlo_flow_info,
589 			   struct dp_mlo_link_info *mlo_link_info);
590 
591 /**
592  * dp_rx_mlo_peer_unmap_handler() - handle MLO peer unmap event from firmware
593  * @soc_handle - generic soc handle
594  * @peeri_id - peer_id from firmware
595  *
596  * Return: none
597  */
598 void dp_rx_mlo_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id);
599 #endif
600 
601 void dp_rx_sec_ind_handler(struct dp_soc *soc, uint16_t peer_id,
602 			   enum cdp_sec_type sec_type, int is_unicast,
603 			   u_int32_t *michael_key, u_int32_t *rx_pn);
604 
605 QDF_STATUS dp_rx_delba_ind_handler(void *soc_handle, uint16_t peer_id,
606 				   uint8_t tid, uint16_t win_sz);
607 
608 uint8_t dp_get_peer_mac_addr_frm_id(struct cdp_soc_t *soc_handle,
609 		uint16_t peer_id, uint8_t *peer_mac);
610 
611 QDF_STATUS dp_peer_add_ast(struct dp_soc *soc, struct dp_peer *peer,
612 			   uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type,
613 			   uint32_t flags);
614 
615 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry);
616 
617 void dp_peer_ast_unmap_handler(struct dp_soc *soc,
618 			       struct dp_ast_entry *ast_entry);
619 
620 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
621 			struct dp_ast_entry *ast_entry,	uint32_t flags);
622 
623 struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
624 						     uint8_t *ast_mac_addr,
625 						     uint8_t pdev_id);
626 
627 struct dp_ast_entry *dp_peer_ast_hash_find_by_vdevid(struct dp_soc *soc,
628 						     uint8_t *ast_mac_addr,
629 						     uint8_t vdev_id);
630 
631 struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
632 					       uint8_t *ast_mac_addr);
633 
634 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
635 				struct dp_ast_entry *ast_entry);
636 
637 
638 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
639 				struct dp_ast_entry *ast_entry);
640 
641 void dp_peer_ast_set_type(struct dp_soc *soc,
642 				struct dp_ast_entry *ast_entry,
643 				enum cdp_txrx_ast_entry_type type);
644 
645 void dp_peer_ast_send_wds_del(struct dp_soc *soc,
646 			      struct dp_ast_entry *ast_entry,
647 			      struct dp_peer *peer);
648 
649 #ifdef WLAN_FEATURE_MULTI_AST_DEL
650 void dp_peer_ast_send_multi_wds_del(
651 		struct dp_soc *soc, uint8_t vdev_id,
652 		struct peer_del_multi_wds_entries *wds_list);
653 #endif
654 
655 void dp_peer_free_hmwds_cb(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
656 			   struct cdp_soc *dp_soc,
657 			   void *cookie,
658 			   enum cdp_ast_free_status status);
659 
660 void dp_peer_ast_hash_remove(struct dp_soc *soc,
661 			     struct dp_ast_entry *ase);
662 
663 void dp_peer_free_ast_entry(struct dp_soc *soc,
664 			    struct dp_ast_entry *ast_entry);
665 
666 void dp_peer_unlink_ast_entry(struct dp_soc *soc,
667 			      struct dp_ast_entry *ast_entry,
668 			      struct dp_peer *peer);
669 
670 /**
671  * dp_peer_mec_detach_entry() - Detach the MEC entry
672  * @soc: SoC handle
673  * @mecentry: MEC entry of the node
674  * @ptr: pointer to free list
675  *
676  * The MEC entry is detached from MEC table and added to free_list
677  * to free the object outside lock
678  *
679  * Return: None
680  */
681 void dp_peer_mec_detach_entry(struct dp_soc *soc, struct dp_mec_entry *mecentry,
682 			      void *ptr);
683 
684 /**
685  * dp_peer_mec_free_list() - free the MEC entry from free_list
686  * @soc: SoC handle
687  * @ptr: pointer to free list
688  *
689  * Return: None
690  */
691 void dp_peer_mec_free_list(struct dp_soc *soc, void *ptr);
692 
693 /**
694  * dp_peer_mec_add_entry()
695  * @soc: SoC handle
696  * @vdev: vdev to which mec node belongs
697  * @mac_addr: MAC address of mec node
698  *
699  * This function allocates and adds MEC entry to MEC table.
700  * It assumes caller has taken the mec lock to protect the access to these
701  * tables
702  *
703  * Return: QDF_STATUS
704  */
705 QDF_STATUS dp_peer_mec_add_entry(struct dp_soc *soc,
706 				 struct dp_vdev *vdev,
707 				 uint8_t *mac_addr);
708 
709 /**
710  * dp_peer_mec_hash_find_by_pdevid() - Find MEC entry by MAC address
711  * within pdev
712  * @soc: SoC handle
713  *
714  * It assumes caller has taken the mec_lock to protect the access to
715  * MEC hash table
716  *
717  * Return: MEC entry
718  */
719 struct dp_mec_entry *dp_peer_mec_hash_find_by_pdevid(struct dp_soc *soc,
720 						     uint8_t pdev_id,
721 						     uint8_t *mec_mac_addr);
722 
723 #define DP_AST_ASSERT(_condition) \
724 	do { \
725 		if (!(_condition)) { \
726 			dp_print_ast_stats(soc);\
727 			QDF_BUG(_condition); \
728 		} \
729 	} while (0)
730 
731 /**
732  * dp_peer_update_inactive_time - Update inactive time for peer
733  * @pdev: pdev object
734  * @tag_type: htt_tlv_tag type
735  * #tag_buf: buf message
736  */
737 void
738 dp_peer_update_inactive_time(struct dp_pdev *pdev, uint32_t tag_type,
739 			     uint32_t *tag_buf);
740 
741 #ifndef QCA_MULTIPASS_SUPPORT
742 /**
743  * dp_peer_set_vlan_id: set vlan_id for this peer
744  * @cdp_soc: soc handle
745  * @vdev_id: id of vdev object
746  * @peer_mac: mac address
747  * @vlan_id: vlan id for peer
748  *
749  * return: void
750  */
751 static inline
752 void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
753 			 uint8_t vdev_id, uint8_t *peer_mac,
754 			 uint16_t vlan_id)
755 {
756 }
757 
758 /**
759  * dp_set_vlan_groupkey: set vlan map for vdev
760  * @soc: pointer to soc
761  * @vdev_id: id of vdev handle
762  * @vlan_id: vlan_id
763  * @group_key: group key for vlan
764  *
765  * return: set success/failure
766  */
767 static inline
768 QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc, uint8_t vdev_id,
769 				uint16_t vlan_id, uint16_t group_key)
770 {
771 	return QDF_STATUS_SUCCESS;
772 }
773 
774 /**
775  * dp_peer_multipass_list_init: initialize multipass peer list
776  * @vdev: pointer to vdev
777  *
778  * return: void
779  */
780 static inline
781 void dp_peer_multipass_list_init(struct dp_vdev *vdev)
782 {
783 }
784 
785 /**
786  * dp_peer_multipass_list_remove: remove peer from special peer list
787  * @peer: peer handle
788  *
789  * return: void
790  */
791 static inline
792 void dp_peer_multipass_list_remove(struct dp_peer *peer)
793 {
794 }
795 #else
796 void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
797 			 uint8_t vdev_id, uint8_t *peer_mac,
798 			 uint16_t vlan_id);
799 QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc, uint8_t vdev_id,
800 				uint16_t vlan_id, uint16_t group_key);
801 void dp_peer_multipass_list_init(struct dp_vdev *vdev);
802 void dp_peer_multipass_list_remove(struct dp_peer *peer);
803 #endif
804 
805 
806 #ifndef QCA_PEER_MULTIQ_SUPPORT
807 /**
808  * dp_peer_reset_flowq_map() - reset peer flowq map table
809  * @peer - dp peer handle
810  *
811  * Return: none
812  */
813 static inline
814 void dp_peer_reset_flowq_map(struct dp_peer *peer)
815 {
816 }
817 
818 /**
819  * dp_peer_ast_index_flow_queue_map_create() - create ast index flow queue map
820  * @soc - generic soc handle
821  * @is_wds - flag to indicate if peer is wds
822  * @peer_id - peer_id from htt peer map message
823  * @peer_mac_addr - mac address of the peer
824  * @ast_info - ast flow override information from peer map
825  *
826  * Return: none
827  */
828 static inline
829 void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
830 		    bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
831 		    struct dp_ast_flow_override_info *ast_info)
832 {
833 }
834 #else
835 void dp_peer_reset_flowq_map(struct dp_peer *peer);
836 void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
837 		    bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
838 		    struct dp_ast_flow_override_info *ast_info);
839 #endif
840 
841 /*
842  * dp_rx_tid_delete_cb() - Callback to flush reo descriptor HW cache
843  * after deleting the entries (ie., setting valid=0)
844  *
845  * @soc: DP SOC handle
846  * @cb_ctxt: Callback context
847  * @reo_status: REO command status
848  */
849 void dp_rx_tid_delete_cb(struct dp_soc *soc,
850 			 void *cb_ctxt,
851 			 union hal_reo_status *reo_status);
852 
853 #ifdef QCA_PEER_EXT_STATS
854 QDF_STATUS dp_peer_delay_stats_ctx_alloc(struct dp_soc *soc,
855 					 struct dp_txrx_peer *txrx_peer);
856 void dp_peer_delay_stats_ctx_dealloc(struct dp_soc *soc,
857 				     struct dp_txrx_peer *txrx_peer);
858 void dp_peer_delay_stats_ctx_clr(struct dp_txrx_peer *txrx_peer);
859 #else
860 static inline
861 QDF_STATUS dp_peer_delay_stats_ctx_alloc(struct dp_soc *soc,
862 					 struct dp_txrx_peer *txrx_peer)
863 {
864 	return QDF_STATUS_SUCCESS;
865 }
866 
867 static inline
868 void dp_peer_delay_stats_ctx_dealloc(struct dp_soc *soc,
869 				     struct dp_txrx_peer *txrx_peer)
870 {
871 }
872 
873 static inline
874 void dp_peer_delay_stats_ctx_clr(struct dp_txrx_peer *txrx_peer)
875 {
876 }
877 #endif
878 
879 #ifdef WLAN_PEER_JITTER
880 QDF_STATUS dp_peer_jitter_stats_ctx_alloc(struct dp_pdev *pdev,
881 					  struct dp_txrx_peer *txrx_peer);
882 
883 void dp_peer_jitter_stats_ctx_dealloc(struct dp_pdev *pdev,
884 				      struct dp_txrx_peer *txrx_peer);
885 
886 void dp_peer_jitter_stats_ctx_clr(struct dp_txrx_peer *txrx_peer);
887 #else
888 static inline
889 QDF_STATUS dp_peer_jitter_stats_ctx_alloc(struct dp_pdev *pdev,
890 					  struct dp_txrx_peer *txrx_peer)
891 {
892 	return QDF_STATUS_SUCCESS;
893 }
894 
895 static inline
896 void dp_peer_jitter_stats_ctx_dealloc(struct dp_pdev *pdev,
897 				      struct dp_txrx_peer *txrx_peer)
898 {
899 }
900 
901 static inline
902 void dp_peer_jitter_stats_ctx_clr(struct dp_txrx_peer *txrx_peer)
903 {
904 }
905 #endif
906 
907 #ifndef CONFIG_SAWF_DEF_QUEUES
908 static inline QDF_STATUS dp_peer_sawf_ctx_alloc(struct dp_soc *soc,
909 						struct dp_peer *peer)
910 {
911 	return QDF_STATUS_SUCCESS;
912 }
913 
914 static inline QDF_STATUS dp_peer_sawf_ctx_free(struct dp_soc *soc,
915 					       struct dp_peer *peer)
916 {
917 	return QDF_STATUS_SUCCESS;
918 }
919 
920 #endif
921 
922 #ifndef CONFIG_SAWF
923 static inline
924 QDF_STATUS dp_peer_sawf_stats_ctx_alloc(struct dp_soc *soc,
925 					struct dp_txrx_peer *txrx_peer)
926 {
927 	return QDF_STATUS_SUCCESS;
928 }
929 
930 static inline
931 QDF_STATUS dp_peer_sawf_stats_ctx_free(struct dp_soc *soc,
932 				       struct dp_txrx_peer *txrx_peer)
933 {
934 	return QDF_STATUS_SUCCESS;
935 }
936 #endif
937 
938 struct dp_peer *dp_vdev_bss_peer_ref_n_get(struct dp_soc *soc,
939 					   struct dp_vdev *vdev,
940 					   enum dp_mod_id mod_id);
941 struct dp_peer *dp_sta_vdev_self_peer_ref_n_get(struct dp_soc *soc,
942 						struct dp_vdev *vdev,
943 						enum dp_mod_id mod_id);
944 
945 void dp_peer_ast_table_detach(struct dp_soc *soc);
946 void dp_peer_find_map_detach(struct dp_soc *soc);
947 void dp_soc_wds_detach(struct dp_soc *soc);
948 QDF_STATUS dp_peer_ast_table_attach(struct dp_soc *soc);
949 QDF_STATUS dp_peer_ast_hash_attach(struct dp_soc *soc);
950 QDF_STATUS dp_peer_mec_hash_attach(struct dp_soc *soc);
951 void dp_soc_wds_attach(struct dp_soc *soc);
952 void dp_peer_mec_hash_detach(struct dp_soc *soc);
953 void dp_peer_ast_hash_detach(struct dp_soc *soc);
954 
955 #ifdef FEATURE_AST
956 /*
957  * dp_peer_delete_ast_entries(): Delete all AST entries for a peer
958  * @soc - datapath soc handle
959  * @peer - datapath peer handle
960  *
961  * Delete the AST entries belonging to a peer
962  */
963 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
964 					      struct dp_peer *peer)
965 {
966 	struct dp_ast_entry *ast_entry, *temp_ast_entry;
967 
968 	dp_peer_debug("peer: %pK, self_ast: %pK", peer, peer->self_ast_entry);
969 	/*
970 	 * Delete peer self ast entry. This is done to handle scenarios
971 	 * where peer is freed before peer map is received(for ex in case
972 	 * of auth disallow due to ACL) in such cases self ast is not added
973 	 * to peer->ast_list.
974 	 */
975 	if (peer->self_ast_entry) {
976 		dp_peer_del_ast(soc, peer->self_ast_entry);
977 		peer->self_ast_entry = NULL;
978 	}
979 
980 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry)
981 		dp_peer_del_ast(soc, ast_entry);
982 }
983 
984 void dp_print_peer_ast_entries(struct dp_soc *soc, struct dp_peer *peer,
985 			       void *arg);
986 #else
987 static inline void dp_print_peer_ast_entries(struct dp_soc *soc,
988 					     struct dp_peer *peer, void *arg)
989 {
990 }
991 
992 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
993 					      struct dp_peer *peer)
994 {
995 }
996 #endif
997 
998 #ifdef FEATURE_MEC
999 /**
1000  * dp_peer_mec_spinlock_create() - Create the MEC spinlock
1001  * @soc: SoC handle
1002  *
1003  * Return: none
1004  */
1005 void dp_peer_mec_spinlock_create(struct dp_soc *soc);
1006 
1007 /**
1008  * dp_peer_mec_spinlock_destroy() - Destroy the MEC spinlock
1009  * @soc: SoC handle
1010  *
1011  * Return: none
1012  */
1013 void dp_peer_mec_spinlock_destroy(struct dp_soc *soc);
1014 
1015 /**
1016  * dp_peer_mec_flush_entries() - Delete all mec entries in table
1017  * @soc: Datapath SOC
1018  *
1019  * Return: None
1020  */
1021 void dp_peer_mec_flush_entries(struct dp_soc *soc);
1022 #else
1023 static inline void dp_peer_mec_spinlock_create(struct dp_soc *soc)
1024 {
1025 }
1026 
1027 static inline void dp_peer_mec_spinlock_destroy(struct dp_soc *soc)
1028 {
1029 }
1030 
1031 static inline void dp_peer_mec_flush_entries(struct dp_soc *soc)
1032 {
1033 }
1034 #endif
1035 
1036 #ifdef DUMP_REO_QUEUE_INFO_IN_DDR
1037 /**
1038  * dp_send_cache_flush_for_rx_tid() - Send cache flush cmd to REO per tid
1039  * @soc : dp_soc handle
1040  * @peer: peer
1041  *
1042  * This function is used to send cache flush cmd to reo and
1043  * to register the callback to handle the dumping of the reo
1044  * queue stas from DDR
1045  *
1046  * Return: none
1047  */
1048 void dp_send_cache_flush_for_rx_tid(
1049 	struct dp_soc *soc, struct dp_peer *peer);
1050 
1051 /**
1052  * dp_get_rx_reo_queue_info() - Handler to get rx tid info
1053  * @soc : cdp_soc_t handle
1054  * @vdev_id: vdev id
1055  *
1056  * Handler to get rx tid info from DDR after h/w cache is
1057  * invalidated first using the cache flush cmd.
1058  *
1059  * Return: none
1060  */
1061 void dp_get_rx_reo_queue_info(
1062 	struct cdp_soc_t *soc_hdl, uint8_t vdev_id);
1063 
1064 /**
1065  * dp_dump_rx_reo_queue_info() - Callback function to dump reo queue stats
1066  * @soc : dp_soc handle
1067  * @cb_ctxt - callback context
1068  * @reo_status: vdev id
1069  *
1070  * This is the callback function registered after sending the reo cmd
1071  * to flush the h/w cache and invalidate it. In the callback the reo
1072  * queue desc info is dumped from DDR.
1073  *
1074  * Return: none
1075  */
1076 void dp_dump_rx_reo_queue_info(
1077 	struct dp_soc *soc, void *cb_ctxt, union hal_reo_status *reo_status);
1078 
1079 #else /* DUMP_REO_QUEUE_INFO_IN_DDR */
1080 
1081 static inline void dp_get_rx_reo_queue_info(
1082 	struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
1083 {
1084 }
1085 #endif /* DUMP_REO_QUEUE_INFO_IN_DDR */
1086 
1087 static inline int dp_peer_find_mac_addr_cmp(
1088 	union dp_align_mac_addr *mac_addr1,
1089 	union dp_align_mac_addr *mac_addr2)
1090 {
1091 		/*
1092 		 * Intentionally use & rather than &&.
1093 		 * because the operands are binary rather than generic boolean,
1094 		 * the functionality is equivalent.
1095 		 * Using && has the advantage of short-circuited evaluation,
1096 		 * but using & has the advantage of no conditional branching,
1097 		 * which is a more significant benefit.
1098 		 */
1099 	return !((mac_addr1->align4.bytes_abcd == mac_addr2->align4.bytes_abcd)
1100 		 & (mac_addr1->align4.bytes_ef == mac_addr2->align4.bytes_ef));
1101 }
1102 
1103 /**
1104  * dp_peer_delete() - delete DP peer
1105  *
1106  * @soc: Datatpath soc
1107  * @peer: Datapath peer
1108  * @arg: argument to iter function
1109  *
1110  * Return: void
1111  */
1112 void dp_peer_delete(struct dp_soc *soc,
1113 		    struct dp_peer *peer,
1114 		    void *arg);
1115 
1116 /**
1117  * dp_mlo_peer_delete() - delete MLO DP peer
1118  *
1119  * @soc: Datapath soc
1120  * @peer: Datapath peer
1121  * @arg: argument to iter function
1122  *
1123  * Return: void
1124  */
1125 void dp_mlo_peer_delete(struct dp_soc *soc, struct dp_peer *peer, void *arg);
1126 
1127 #ifdef WLAN_FEATURE_11BE_MLO
1128 
1129 /* is MLO connection mld peer */
1130 #define IS_MLO_DP_MLD_TXRX_PEER(_peer) ((_peer)->mld_peer)
1131 
1132 /* set peer type */
1133 #define DP_PEER_SET_TYPE(_peer, _type_val) \
1134 	((_peer)->peer_type = (_type_val))
1135 
1136 /* is legacy peer */
1137 #define IS_DP_LEGACY_PEER(_peer) \
1138 	((_peer)->peer_type == CDP_LINK_PEER_TYPE && !((_peer)->mld_peer))
1139 /* is MLO connection link peer */
1140 #define IS_MLO_DP_LINK_PEER(_peer) \
1141 	((_peer)->peer_type == CDP_LINK_PEER_TYPE && (_peer)->mld_peer)
1142 /* is MLO connection mld peer */
1143 #define IS_MLO_DP_MLD_PEER(_peer) \
1144 	((_peer)->peer_type == CDP_MLD_PEER_TYPE)
1145 /* Get Mld peer from link peer */
1146 #define DP_GET_MLD_PEER_FROM_PEER(link_peer) \
1147 	((link_peer)->mld_peer)
1148 
1149 #ifdef WLAN_MLO_MULTI_CHIP
1150 uint8_t dp_mlo_get_chip_id(struct dp_soc *soc);
1151 
1152 struct dp_peer *
1153 dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
1154 				  uint8_t *peer_mac_addr,
1155 				  int mac_addr_is_aligned,
1156 				  uint8_t vdev_id,
1157 				  uint8_t chip_id,
1158 				  enum dp_mod_id mod_id);
1159 #else
1160 static inline uint8_t dp_mlo_get_chip_id(struct dp_soc *soc)
1161 {
1162 	return 0;
1163 }
1164 
1165 static inline struct dp_peer *
1166 dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
1167 				  uint8_t *peer_mac_addr,
1168 				  int mac_addr_is_aligned,
1169 				  uint8_t vdev_id,
1170 				  uint8_t chip_id,
1171 				  enum dp_mod_id mod_id)
1172 {
1173 	return dp_peer_find_hash_find(soc, peer_mac_addr,
1174 				      mac_addr_is_aligned,
1175 				      vdev_id, mod_id);
1176 }
1177 #endif
1178 
1179 /*
1180  * dp_mld_peer_find_hash_find() - returns mld peer from mld peer_hash_table
1181  *				  matching mac_address
1182  * @soc: soc handle
1183  * @peer_mac_addr: mld peer mac address
1184  * @mac_addr_is_aligned: is mac addr aligned
1185  * @vdev_id: vdev_id
1186  * @mod_id: id of module requesting reference
1187  *
1188  * return: peer in sucsess
1189  *         NULL in failure
1190  */
1191 static inline
1192 struct dp_peer *dp_mld_peer_find_hash_find(struct dp_soc *soc,
1193 					   uint8_t *peer_mac_addr,
1194 					   int mac_addr_is_aligned,
1195 					   uint8_t vdev_id,
1196 					   enum dp_mod_id mod_id)
1197 {
1198 	if (soc->arch_ops.mlo_peer_find_hash_find)
1199 		return soc->arch_ops.mlo_peer_find_hash_find(soc,
1200 					      peer_mac_addr,
1201 					      mac_addr_is_aligned,
1202 					      mod_id, vdev_id);
1203 	return NULL;
1204 }
1205 
1206 /**
1207  * dp_peer_hash_find_wrapper() - find link peer or mld per according to
1208 				 peer_type
1209  * @soc: DP SOC handle
1210  * @peer_info: peer information for hash find
1211  * @mod_id: ID of module requesting reference
1212  *
1213  * Return: peer handle
1214  */
1215 static inline
1216 struct dp_peer *dp_peer_hash_find_wrapper(struct dp_soc *soc,
1217 					  struct cdp_peer_info *peer_info,
1218 					  enum dp_mod_id mod_id)
1219 {
1220 	struct dp_peer *peer = NULL;
1221 
1222 	if (peer_info->peer_type == CDP_LINK_PEER_TYPE ||
1223 	    peer_info->peer_type == CDP_WILD_PEER_TYPE) {
1224 		peer = dp_peer_find_hash_find(soc, peer_info->mac_addr,
1225 					      peer_info->mac_addr_is_aligned,
1226 					      peer_info->vdev_id,
1227 					      mod_id);
1228 		if (peer)
1229 			return peer;
1230 	}
1231 	if (peer_info->peer_type == CDP_MLD_PEER_TYPE ||
1232 	    peer_info->peer_type == CDP_WILD_PEER_TYPE)
1233 		peer = dp_mld_peer_find_hash_find(
1234 					soc, peer_info->mac_addr,
1235 					peer_info->mac_addr_is_aligned,
1236 					peer_info->vdev_id,
1237 					mod_id);
1238 	return peer;
1239 }
1240 
1241 /**
1242  * dp_link_peer_add_mld_peer() - add mld peer pointer to link peer,
1243 				 increase mld peer ref_cnt
1244  * @link_peer: link peer pointer
1245  * @mld_peer: mld peer pointer
1246  *
1247  * Return: none
1248  */
1249 static inline
1250 void dp_link_peer_add_mld_peer(struct dp_peer *link_peer,
1251 			       struct dp_peer *mld_peer)
1252 {
1253 	/* increase mld_peer ref_cnt */
1254 	dp_peer_get_ref(NULL, mld_peer, DP_MOD_ID_CDP);
1255 	link_peer->mld_peer = mld_peer;
1256 }
1257 
1258 /**
1259  * dp_link_peer_del_mld_peer() - delete mld peer pointer from link peer,
1260 				 decrease mld peer ref_cnt
1261  * @link_peer: link peer pointer
1262  *
1263  * Return: None
1264  */
1265 static inline
1266 void dp_link_peer_del_mld_peer(struct dp_peer *link_peer)
1267 {
1268 	dp_peer_unref_delete(link_peer->mld_peer, DP_MOD_ID_CDP);
1269 	link_peer->mld_peer = NULL;
1270 }
1271 
1272 /**
1273  * dp_mld_peer_init_link_peers_info() - init link peers info in mld peer
1274  * @mld_peer: mld peer pointer
1275  *
1276  * Return: None
1277  */
1278 static inline
1279 void dp_mld_peer_init_link_peers_info(struct dp_peer *mld_peer)
1280 {
1281 	int i;
1282 
1283 	qdf_spinlock_create(&mld_peer->link_peers_info_lock);
1284 	mld_peer->num_links = 0;
1285 	for (i = 0; i < DP_MAX_MLO_LINKS; i++)
1286 		mld_peer->link_peers[i].is_valid = false;
1287 }
1288 
1289 /**
1290  * dp_mld_peer_deinit_link_peers_info() - Deinit link peers info in mld peer
1291  * @mld_peer: mld peer pointer
1292  *
1293  * Return: None
1294  */
1295 static inline
1296 void dp_mld_peer_deinit_link_peers_info(struct dp_peer *mld_peer)
1297 {
1298 	qdf_spinlock_destroy(&mld_peer->link_peers_info_lock);
1299 }
1300 
1301 /**
1302  * dp_mld_peer_add_link_peer() - add link peer info to mld peer
1303  * @mld_peer: mld dp peer pointer
1304  * @link_peer: link dp peer pointer
1305  *
1306  * Return: None
1307  */
1308 static inline
1309 void dp_mld_peer_add_link_peer(struct dp_peer *mld_peer,
1310 			       struct dp_peer *link_peer)
1311 {
1312 	int i;
1313 	struct dp_peer_link_info *link_peer_info;
1314 
1315 	qdf_spin_lock_bh(&mld_peer->link_peers_info_lock);
1316 	for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
1317 		link_peer_info = &mld_peer->link_peers[i];
1318 		if (!link_peer_info->is_valid) {
1319 			qdf_mem_copy(link_peer_info->mac_addr.raw,
1320 				     link_peer->mac_addr.raw,
1321 				     QDF_MAC_ADDR_SIZE);
1322 			link_peer_info->is_valid = true;
1323 			link_peer_info->vdev_id = link_peer->vdev->vdev_id;
1324 			link_peer_info->chip_id =
1325 				dp_mlo_get_chip_id(link_peer->vdev->pdev->soc);
1326 			mld_peer->num_links++;
1327 			break;
1328 		}
1329 	}
1330 	qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock);
1331 
1332 	if (i == DP_MAX_MLO_LINKS)
1333 		dp_err("fail to add link peer" QDF_MAC_ADDR_FMT "to mld peer",
1334 		       QDF_MAC_ADDR_REF(link_peer->mac_addr.raw));
1335 }
1336 
1337 /**
1338  * dp_mld_peer_del_link_peer() - Delete link peer info from MLD peer
1339  * @mld_peer: MLD dp peer pointer
1340  * @link_peer: link dp peer pointer
1341  *
1342  * Return: number of links left after deletion
1343  */
1344 static inline
1345 uint8_t dp_mld_peer_del_link_peer(struct dp_peer *mld_peer,
1346 				  struct dp_peer *link_peer)
1347 {
1348 	int i;
1349 	struct dp_peer_link_info *link_peer_info;
1350 	uint8_t num_links;
1351 
1352 	qdf_spin_lock_bh(&mld_peer->link_peers_info_lock);
1353 	for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
1354 		link_peer_info = &mld_peer->link_peers[i];
1355 		if (link_peer_info->is_valid &&
1356 		    !dp_peer_find_mac_addr_cmp(&link_peer->mac_addr,
1357 					&link_peer_info->mac_addr)) {
1358 			link_peer_info->is_valid = false;
1359 			mld_peer->num_links--;
1360 			break;
1361 		}
1362 	}
1363 	num_links = mld_peer->num_links;
1364 	qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock);
1365 
1366 	if (i == DP_MAX_MLO_LINKS)
1367 		dp_err("fail to del link peer" QDF_MAC_ADDR_FMT "to mld peer",
1368 		       QDF_MAC_ADDR_REF(link_peer->mac_addr.raw));
1369 
1370 	return num_links;
1371 }
1372 
1373 /**
1374  * dp_get_link_peers_ref_from_mld_peer() - get link peers pointer and
1375 					   increase link peers ref_cnt
1376  * @soc: dp_soc handle
1377  * @mld_peer: dp mld peer pointer
1378  * @mld_link_peers: structure that hold links peers pointer array and number
1379  * @mod_id: id of module requesting reference
1380  *
1381  * Return: None
1382  */
1383 static inline
1384 void dp_get_link_peers_ref_from_mld_peer(
1385 				struct dp_soc *soc,
1386 				struct dp_peer *mld_peer,
1387 				struct dp_mld_link_peers *mld_link_peers,
1388 				enum dp_mod_id mod_id)
1389 {
1390 	struct dp_peer *peer;
1391 	uint8_t i = 0, j = 0;
1392 	struct dp_peer_link_info *link_peer_info;
1393 
1394 	qdf_mem_zero(mld_link_peers, sizeof(*mld_link_peers));
1395 	qdf_spin_lock_bh(&mld_peer->link_peers_info_lock);
1396 	for (i = 0; i < DP_MAX_MLO_LINKS; i++)  {
1397 		link_peer_info = &mld_peer->link_peers[i];
1398 		if (link_peer_info->is_valid) {
1399 			peer = dp_link_peer_hash_find_by_chip_id(
1400 						soc,
1401 						link_peer_info->mac_addr.raw,
1402 						true,
1403 						link_peer_info->vdev_id,
1404 						link_peer_info->chip_id,
1405 						mod_id);
1406 			if (peer)
1407 				mld_link_peers->link_peers[j++] = peer;
1408 		}
1409 	}
1410 	qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock);
1411 
1412 	mld_link_peers->num_links = j;
1413 }
1414 
1415 /**
1416  * dp_release_link_peers_ref() - release all link peers reference
1417  * @mld_link_peers: structure that hold links peers pointer array and number
1418  * @mod_id: id of module requesting reference
1419  *
1420  * Return: None.
1421  */
1422 static inline
1423 void dp_release_link_peers_ref(
1424 			struct dp_mld_link_peers *mld_link_peers,
1425 			enum dp_mod_id mod_id)
1426 {
1427 	struct dp_peer *peer;
1428 	uint8_t i;
1429 
1430 	for (i = 0; i < mld_link_peers->num_links; i++) {
1431 		peer = mld_link_peers->link_peers[i];
1432 		if (peer)
1433 			dp_peer_unref_delete(peer, mod_id);
1434 		mld_link_peers->link_peers[i] = NULL;
1435 	}
1436 
1437 	 mld_link_peers->num_links = 0;
1438 }
1439 
1440 /**
1441  * dp_get_link_peer_id_by_lmac_id() - Get link peer id using peer id and lmac id
1442  * @soc: Datapath soc handle
1443  * @peer_id: peer id
1444  * @lmac_id: lmac id to find the link peer on given lmac
1445  *
1446  * Return: peer_id of link peer if found
1447  *         else return HTT_INVALID_PEER
1448  */
1449 static inline
1450 uint16_t dp_get_link_peer_id_by_lmac_id(struct dp_soc *soc, uint16_t peer_id,
1451 					uint8_t lmac_id)
1452 {
1453 	uint8_t i;
1454 	struct dp_peer *peer;
1455 	struct dp_peer *link_peer;
1456 	struct dp_soc *link_peer_soc;
1457 	struct dp_mld_link_peers link_peers_info;
1458 	uint16_t link_peer_id = HTT_INVALID_PEER;
1459 
1460 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_CDP);
1461 
1462 	if (!peer)
1463 		return HTT_INVALID_PEER;
1464 
1465 	if (IS_MLO_DP_MLD_PEER(peer)) {
1466 		/* get link peers with reference */
1467 		dp_get_link_peers_ref_from_mld_peer(soc, peer, &link_peers_info,
1468 						    DP_MOD_ID_CDP);
1469 
1470 		for (i = 0; i < link_peers_info.num_links; i++) {
1471 			link_peer = link_peers_info.link_peers[i];
1472 			link_peer_soc = link_peer->vdev->pdev->soc;
1473 			if ((link_peer_soc == soc) &&
1474 			    (link_peer->vdev->pdev->lmac_id == lmac_id)) {
1475 				link_peer_id = link_peer->peer_id;
1476 				break;
1477 			}
1478 		}
1479 		/* release link peers reference */
1480 		dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP);
1481 	} else {
1482 		link_peer_id = peer_id;
1483 	}
1484 
1485 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
1486 
1487 	return link_peer_id;
1488 }
1489 
1490 /**
1491  * dp_peer_get_tgt_peer_hash_find() - get dp_peer handle
1492  * @soc: soc handle
1493  * @peer_mac_addr: peer mac address
1494  * @mac_addr_is_aligned: is mac addr aligned
1495  * @vdev_id: vdev_id
1496  * @mod_id: id of module requesting reference
1497  *
1498  * for MLO connection, get corresponding MLD peer,
1499  * otherwise get link peer for non-MLO case.
1500  *
1501  * return: peer in success
1502  *         NULL in failure
1503  */
1504 static inline
1505 struct dp_peer *dp_peer_get_tgt_peer_hash_find(struct dp_soc *soc,
1506 					       uint8_t *peer_mac,
1507 					       int mac_addr_is_aligned,
1508 					       uint8_t vdev_id,
1509 					       enum dp_mod_id mod_id)
1510 {
1511 	struct dp_peer *ta_peer = NULL;
1512 	struct dp_peer *peer = dp_peer_find_hash_find(soc,
1513 						      peer_mac, 0, vdev_id,
1514 						      mod_id);
1515 
1516 	if (peer) {
1517 		/* mlo connection link peer, get mld peer with reference */
1518 		if (IS_MLO_DP_LINK_PEER(peer)) {
1519 			/* increase mld peer ref_cnt */
1520 			if (QDF_STATUS_SUCCESS ==
1521 			    dp_peer_get_ref(soc, peer->mld_peer, mod_id))
1522 				ta_peer = peer->mld_peer;
1523 			else
1524 				ta_peer = NULL;
1525 
1526 			/* release peer reference that added by hash find */
1527 			dp_peer_unref_delete(peer, mod_id);
1528 		} else {
1529 		/* mlo MLD peer or non-mlo link peer */
1530 			ta_peer = peer;
1531 		}
1532 	} else {
1533 		dp_peer_err("fail to find peer:" QDF_MAC_ADDR_FMT,
1534 			    QDF_MAC_ADDR_REF(peer_mac));
1535 	}
1536 
1537 	return ta_peer;
1538 }
1539 
1540 /**
1541  * dp_peer_get_tgt_peer_by_id() - Returns target peer object given the peer id
1542  * @soc		: core DP soc context
1543  * @peer_id	: peer id from peer object can be retrieved
1544  * @mod_id      : ID of module requesting reference
1545  *
1546  * for MLO connection, get corresponding MLD peer,
1547  * otherwise get link peer for non-MLO case.
1548  *
1549  * return: peer in success
1550  *         NULL in failure
1551  */
1552 static inline
1553 struct dp_peer *dp_peer_get_tgt_peer_by_id(struct dp_soc *soc,
1554 					   uint16_t peer_id,
1555 					   enum dp_mod_id mod_id)
1556 {
1557 	struct dp_peer *ta_peer = NULL;
1558 	struct dp_peer *peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
1559 
1560 	if (peer) {
1561 		/* mlo connection link peer, get mld peer with reference */
1562 		if (IS_MLO_DP_LINK_PEER(peer)) {
1563 			/* increase mld peer ref_cnt */
1564 			if (QDF_STATUS_SUCCESS ==
1565 				dp_peer_get_ref(soc, peer->mld_peer, mod_id))
1566 				ta_peer = peer->mld_peer;
1567 			else
1568 				ta_peer = NULL;
1569 
1570 			/* release peer reference that added by hash find */
1571 			dp_peer_unref_delete(peer, mod_id);
1572 		} else {
1573 		/* mlo MLD peer or non-mlo link peer */
1574 			ta_peer = peer;
1575 		}
1576 	}
1577 
1578 	return ta_peer;
1579 }
1580 
1581 /**
1582  * dp_peer_mlo_delete() - peer MLO related delete operation
1583  * @peer: DP peer handle
1584  * Return: None
1585  */
1586 static inline
1587 void dp_peer_mlo_delete(struct dp_peer *peer)
1588 {
1589 	struct dp_peer *ml_peer;
1590 	struct dp_soc *soc;
1591 
1592 	/* MLO connection link peer */
1593 	if (IS_MLO_DP_LINK_PEER(peer)) {
1594 		ml_peer = peer->mld_peer;
1595 		soc = ml_peer->vdev->pdev->soc;
1596 
1597 		/* if last link peer deletion, delete MLD peer */
1598 		if (dp_mld_peer_del_link_peer(peer->mld_peer, peer) == 0)
1599 			dp_peer_delete(soc, peer->mld_peer, NULL);
1600 	}
1601 }
1602 
1603 /**
1604  * dp_peer_mlo_setup() - create MLD peer and MLO related initialization
1605  * @soc: Soc handle
1606  * @vdev_id: Vdev ID
1607  * @peer_setup_info: peer setup information for MLO
1608  */
1609 QDF_STATUS dp_peer_mlo_setup(
1610 			struct dp_soc *soc,
1611 			struct dp_peer *peer,
1612 			uint8_t vdev_id,
1613 			struct cdp_peer_setup_info *setup_info);
1614 
1615 /**
1616  * dp_get_tgt_peer_from_peer() - Get target peer from the given peer
1617  * @peer: datapath peer
1618  *
1619  * Return: MLD peer in case of MLO Link peer
1620  *	   Peer itself in other cases
1621  */
1622 static inline
1623 struct dp_peer *dp_get_tgt_peer_from_peer(struct dp_peer *peer)
1624 {
1625 	return IS_MLO_DP_LINK_PEER(peer) ? peer->mld_peer : peer;
1626 }
1627 
1628 /**
1629  * dp_get_primary_link_peer_by_id(): Get primary link peer from the given
1630  *					peer id
1631  * @soc: core DP soc context
1632  * @peer_id: peer id
1633  * @mod_id: ID of module requesting reference
1634  *
1635  * Return: primary link peer for the MLO peer
1636  *	   legacy peer itself in case of legacy peer
1637  */
1638 static inline
1639 struct dp_peer *dp_get_primary_link_peer_by_id(struct dp_soc *soc,
1640 					       uint16_t peer_id,
1641 					       enum dp_mod_id mod_id)
1642 {
1643 	uint8_t i;
1644 	struct dp_mld_link_peers link_peers_info;
1645 	struct dp_peer *peer;
1646 	struct dp_peer *link_peer;
1647 	struct dp_peer *primary_peer = NULL;
1648 
1649 	peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
1650 
1651 	if (!peer)
1652 		return NULL;
1653 
1654 	if (IS_MLO_DP_MLD_PEER(peer)) {
1655 		/* get link peers with reference */
1656 		dp_get_link_peers_ref_from_mld_peer(soc, peer, &link_peers_info,
1657 						    mod_id);
1658 
1659 		for (i = 0; i < link_peers_info.num_links; i++) {
1660 			link_peer = link_peers_info.link_peers[i];
1661 			if (link_peer->primary_link) {
1662 				primary_peer = link_peer;
1663 				/*
1664 				 * Take additional reference over
1665 				 * primary link peer.
1666 				 */
1667 				dp_peer_get_ref(NULL, primary_peer, mod_id);
1668 				break;
1669 			}
1670 		}
1671 		/* release link peers reference */
1672 		dp_release_link_peers_ref(&link_peers_info, mod_id);
1673 		dp_peer_unref_delete(peer, mod_id);
1674 	} else {
1675 		primary_peer = peer;
1676 	}
1677 
1678 	return primary_peer;
1679 }
1680 
1681 /**
1682  * dp_get_txrx_peer() - Get dp_txrx_peer from passed dp_peer
1683  * @peer: Datapath peer
1684  *
1685  * Return: dp_txrx_peer from MLD peer if peer type is link peer
1686  *	   dp_txrx_peer from peer itself for other cases
1687  */
1688 static inline
1689 struct dp_txrx_peer *dp_get_txrx_peer(struct dp_peer *peer)
1690 {
1691 	return IS_MLO_DP_LINK_PEER(peer) ?
1692 				peer->mld_peer->txrx_peer : peer->txrx_peer;
1693 }
1694 
1695 /**
1696  * dp_peer_is_primary_link_peer() - Check if peer is primary link peer
1697  * @peer: Datapath peer
1698  *
1699  * Return: true if peer is primary link peer or legacy peer
1700  *	   false otherwise
1701  */
1702 static inline
1703 bool dp_peer_is_primary_link_peer(struct dp_peer *peer)
1704 {
1705 	if (IS_MLO_DP_LINK_PEER(peer) && peer->primary_link)
1706 		return true;
1707 	else if (IS_DP_LEGACY_PEER(peer))
1708 		return true;
1709 	else
1710 		return false;
1711 }
1712 
1713 /**
1714  * dp_tgt_txrx_peer_get_ref_by_id() - Gets tgt txrx peer for given the peer id
1715  *
1716  * @soc		: core DP soc context
1717  * @peer_id	: peer id from peer object can be retrieved
1718  * @handle	: reference handle
1719  * @mod_id      : ID of module requesting reference
1720  *
1721  * Return: struct dp_txrx_peer*: Pointer to txrx DP peer object
1722  */
1723 static inline struct dp_txrx_peer *
1724 dp_tgt_txrx_peer_get_ref_by_id(struct dp_soc *soc,
1725 			       uint16_t peer_id,
1726 			       dp_txrx_ref_handle *handle,
1727 			       enum dp_mod_id mod_id)
1728 
1729 {
1730 	struct dp_peer *peer;
1731 	struct dp_txrx_peer *txrx_peer;
1732 
1733 	peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
1734 	if (!peer)
1735 		return NULL;
1736 
1737 	txrx_peer = dp_get_txrx_peer(peer);
1738 	if (txrx_peer) {
1739 		*handle = (dp_txrx_ref_handle)peer;
1740 		return txrx_peer;
1741 	}
1742 
1743 	dp_peer_unref_delete(peer, mod_id);
1744 	return NULL;
1745 }
1746 
1747 /**
1748  * dp_print_mlo_ast_stats_be() - Print AST stats for MLO peers
1749  *
1750  * @soc	: core DP soc context
1751  *
1752  * Return: void
1753  */
1754 void dp_print_mlo_ast_stats_be(struct dp_soc *soc);
1755 
1756 #else
1757 
1758 #define IS_MLO_DP_MLD_TXRX_PEER(_peer) false
1759 
1760 #define DP_PEER_SET_TYPE(_peer, _type_val) /* no op */
1761 /* is legacy peer */
1762 #define IS_DP_LEGACY_PEER(_peer) true
1763 #define IS_MLO_DP_LINK_PEER(_peer) false
1764 #define IS_MLO_DP_MLD_PEER(_peer) false
1765 #define DP_GET_MLD_PEER_FROM_PEER(link_peer) NULL
1766 
1767 static inline
1768 struct dp_peer *dp_peer_hash_find_wrapper(struct dp_soc *soc,
1769 					  struct cdp_peer_info *peer_info,
1770 					  enum dp_mod_id mod_id)
1771 {
1772 	return dp_peer_find_hash_find(soc, peer_info->mac_addr,
1773 				      peer_info->mac_addr_is_aligned,
1774 				      peer_info->vdev_id,
1775 				      mod_id);
1776 }
1777 
1778 static inline
1779 struct dp_peer *dp_peer_get_tgt_peer_hash_find(struct dp_soc *soc,
1780 					       uint8_t *peer_mac,
1781 					       int mac_addr_is_aligned,
1782 					       uint8_t vdev_id,
1783 					       enum dp_mod_id mod_id)
1784 {
1785 	return dp_peer_find_hash_find(soc, peer_mac,
1786 				      mac_addr_is_aligned, vdev_id,
1787 				      mod_id);
1788 }
1789 
1790 static inline
1791 struct dp_peer *dp_peer_get_tgt_peer_by_id(struct dp_soc *soc,
1792 					   uint16_t peer_id,
1793 					   enum dp_mod_id mod_id)
1794 {
1795 	return dp_peer_get_ref_by_id(soc, peer_id, mod_id);
1796 }
1797 
1798 static inline
1799 QDF_STATUS dp_peer_mlo_setup(
1800 			struct dp_soc *soc,
1801 			struct dp_peer *peer,
1802 			uint8_t vdev_id,
1803 			struct cdp_peer_setup_info *setup_info)
1804 {
1805 	return QDF_STATUS_SUCCESS;
1806 }
1807 
1808 static inline
1809 void dp_mld_peer_init_link_peers_info(struct dp_peer *mld_peer)
1810 {
1811 }
1812 
1813 static inline
1814 void dp_mld_peer_deinit_link_peers_info(struct dp_peer *mld_peer)
1815 {
1816 }
1817 
1818 static inline
1819 void dp_link_peer_del_mld_peer(struct dp_peer *link_peer)
1820 {
1821 }
1822 
1823 static inline
1824 void dp_peer_mlo_delete(struct dp_peer *peer)
1825 {
1826 }
1827 
1828 static inline
1829 void dp_mlo_peer_authorize(struct dp_soc *soc,
1830 			   struct dp_peer *link_peer)
1831 {
1832 }
1833 
1834 static inline uint8_t dp_mlo_get_chip_id(struct dp_soc *soc)
1835 {
1836 	return 0;
1837 }
1838 
1839 static inline struct dp_peer *
1840 dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
1841 				  uint8_t *peer_mac_addr,
1842 				  int mac_addr_is_aligned,
1843 				  uint8_t vdev_id,
1844 				  uint8_t chip_id,
1845 				  enum dp_mod_id mod_id)
1846 {
1847 	return dp_peer_find_hash_find(soc, peer_mac_addr,
1848 				      mac_addr_is_aligned,
1849 				      vdev_id, mod_id);
1850 }
1851 
1852 static inline
1853 struct dp_peer *dp_get_tgt_peer_from_peer(struct dp_peer *peer)
1854 {
1855 	return peer;
1856 }
1857 
1858 static inline
1859 struct dp_peer *dp_get_primary_link_peer_by_id(struct dp_soc *soc,
1860 					       uint16_t peer_id,
1861 					       enum dp_mod_id mod_id)
1862 {
1863 	return dp_peer_get_ref_by_id(soc, peer_id, mod_id);
1864 }
1865 
1866 static inline
1867 struct dp_txrx_peer *dp_get_txrx_peer(struct dp_peer *peer)
1868 {
1869 	return peer->txrx_peer;
1870 }
1871 
1872 static inline
1873 bool dp_peer_is_primary_link_peer(struct dp_peer *peer)
1874 {
1875 	return true;
1876 }
1877 
1878 /**
1879  * dp_tgt_txrx_peer_get_ref_by_id() - Gets tgt txrx peer for given the peer id
1880  *
1881  * @soc		: core DP soc context
1882  * @peer_id	: peer id from peer object can be retrieved
1883  * @handle	: reference handle
1884  * @mod_id      : ID of module requesting reference
1885  *
1886  * Return: struct dp_txrx_peer*: Pointer to txrx DP peer object
1887  */
1888 static inline struct dp_txrx_peer *
1889 dp_tgt_txrx_peer_get_ref_by_id(struct dp_soc *soc,
1890 			       uint16_t peer_id,
1891 			       dp_txrx_ref_handle *handle,
1892 			       enum dp_mod_id mod_id)
1893 
1894 {
1895 	return dp_txrx_peer_get_ref_by_id(soc, peer_id, handle, mod_id);
1896 }
1897 
1898 static inline
1899 uint16_t dp_get_link_peer_id_by_lmac_id(struct dp_soc *soc, uint16_t peer_id,
1900 					uint8_t lmac_id)
1901 {
1902 	return peer_id;
1903 }
1904 
1905 static inline void dp_print_mlo_ast_stats_be(struct dp_soc *soc)
1906 {
1907 }
1908 #endif /* WLAN_FEATURE_11BE_MLO */
1909 
1910 static inline
1911 QDF_STATUS dp_peer_rx_tids_create(struct dp_peer *peer)
1912 {
1913 	uint8_t i;
1914 
1915 	if (IS_MLO_DP_MLD_PEER(peer)) {
1916 		dp_peer_info("skip for mld peer");
1917 		return QDF_STATUS_SUCCESS;
1918 	}
1919 
1920 	if (peer->rx_tid) {
1921 		QDF_BUG(0);
1922 		dp_peer_err("peer rx_tid mem already exist");
1923 		return QDF_STATUS_E_FAILURE;
1924 	}
1925 
1926 	peer->rx_tid = qdf_mem_malloc(DP_MAX_TIDS *
1927 				      sizeof(struct dp_rx_tid));
1928 
1929 	if (!peer->rx_tid) {
1930 		dp_err("fail to alloc tid for peer" QDF_MAC_ADDR_FMT,
1931 		       QDF_MAC_ADDR_REF(peer->mac_addr.raw));
1932 		return QDF_STATUS_E_NOMEM;
1933 	}
1934 
1935 	qdf_mem_zero(peer->rx_tid, DP_MAX_TIDS * sizeof(struct dp_rx_tid));
1936 	for (i = 0; i < DP_MAX_TIDS; i++)
1937 		qdf_spinlock_create(&peer->rx_tid[i].tid_lock);
1938 
1939 	return QDF_STATUS_SUCCESS;
1940 }
1941 
1942 static inline
1943 void dp_peer_rx_tids_destroy(struct dp_peer *peer)
1944 {
1945 	uint8_t i;
1946 
1947 	if (!IS_MLO_DP_LINK_PEER(peer)) {
1948 		for (i = 0; i < DP_MAX_TIDS; i++)
1949 			qdf_spinlock_destroy(&peer->rx_tid[i].tid_lock);
1950 
1951 		qdf_mem_free(peer->rx_tid);
1952 	}
1953 
1954 	peer->rx_tid = NULL;
1955 }
1956 
1957 static inline
1958 void dp_peer_defrag_rx_tids_init(struct dp_txrx_peer *txrx_peer)
1959 {
1960 	uint8_t i;
1961 
1962 	qdf_mem_zero(&txrx_peer->rx_tid, DP_MAX_TIDS *
1963 		     sizeof(struct dp_rx_tid_defrag));
1964 
1965 	for (i = 0; i < DP_MAX_TIDS; i++)
1966 		qdf_spinlock_create(&txrx_peer->rx_tid[i].defrag_tid_lock);
1967 }
1968 
1969 static inline
1970 void dp_peer_defrag_rx_tids_deinit(struct dp_txrx_peer *txrx_peer)
1971 {
1972 	uint8_t i;
1973 
1974 	for (i = 0; i < DP_MAX_TIDS; i++)
1975 		qdf_spinlock_destroy(&txrx_peer->rx_tid[i].defrag_tid_lock);
1976 }
1977 
1978 #ifdef PEER_CACHE_RX_PKTS
1979 static inline
1980 void dp_peer_rx_bufq_resources_init(struct dp_txrx_peer *txrx_peer)
1981 {
1982 	qdf_spinlock_create(&txrx_peer->bufq_info.bufq_lock);
1983 	txrx_peer->bufq_info.thresh = DP_RX_CACHED_BUFQ_THRESH;
1984 	qdf_list_create(&txrx_peer->bufq_info.cached_bufq,
1985 			DP_RX_CACHED_BUFQ_THRESH);
1986 }
1987 
1988 static inline
1989 void dp_peer_rx_bufq_resources_deinit(struct dp_txrx_peer *txrx_peer)
1990 {
1991 	qdf_list_destroy(&txrx_peer->bufq_info.cached_bufq);
1992 	qdf_spinlock_destroy(&txrx_peer->bufq_info.bufq_lock);
1993 }
1994 
1995 #else
1996 static inline
1997 void dp_peer_rx_bufq_resources_init(struct dp_txrx_peer *txrx_peer)
1998 {
1999 }
2000 
2001 static inline
2002 void dp_peer_rx_bufq_resources_deinit(struct dp_txrx_peer *txrx_peer)
2003 {
2004 }
2005 #endif
2006 
2007 /**
2008  * dp_peer_update_state() - update dp peer state
2009  *
2010  * @soc		: core DP soc context
2011  * @peer	: DP peer
2012  * @state	: new state
2013  *
2014  * Return: None
2015  */
2016 static inline void
2017 dp_peer_update_state(struct dp_soc *soc,
2018 		     struct dp_peer *peer,
2019 		     enum dp_peer_state state)
2020 {
2021 	uint8_t peer_state;
2022 
2023 	qdf_spin_lock_bh(&peer->peer_state_lock);
2024 	peer_state = peer->peer_state;
2025 
2026 	switch (state) {
2027 	case DP_PEER_STATE_INIT:
2028 		DP_PEER_STATE_ASSERT
2029 			(peer, state, (peer_state != DP_PEER_STATE_ACTIVE) ||
2030 			 (peer_state != DP_PEER_STATE_LOGICAL_DELETE));
2031 		break;
2032 
2033 	case DP_PEER_STATE_ACTIVE:
2034 		DP_PEER_STATE_ASSERT(peer, state,
2035 				     (peer_state == DP_PEER_STATE_INIT));
2036 		break;
2037 
2038 	case DP_PEER_STATE_LOGICAL_DELETE:
2039 		DP_PEER_STATE_ASSERT(peer, state,
2040 				     (peer_state == DP_PEER_STATE_ACTIVE) ||
2041 				     (peer_state == DP_PEER_STATE_INIT));
2042 		break;
2043 
2044 	case DP_PEER_STATE_INACTIVE:
2045 		if (IS_MLO_DP_MLD_PEER(peer))
2046 			DP_PEER_STATE_ASSERT
2047 				(peer, state,
2048 				 (peer_state == DP_PEER_STATE_ACTIVE));
2049 		else
2050 			DP_PEER_STATE_ASSERT
2051 				(peer, state,
2052 				 (peer_state == DP_PEER_STATE_LOGICAL_DELETE));
2053 		break;
2054 
2055 	case DP_PEER_STATE_FREED:
2056 		if (peer->sta_self_peer)
2057 			DP_PEER_STATE_ASSERT
2058 			(peer, state, (peer_state == DP_PEER_STATE_INIT));
2059 		else
2060 			DP_PEER_STATE_ASSERT
2061 				(peer, state,
2062 				 (peer_state == DP_PEER_STATE_INACTIVE) ||
2063 				 (peer_state == DP_PEER_STATE_LOGICAL_DELETE));
2064 		break;
2065 
2066 	default:
2067 		qdf_spin_unlock_bh(&peer->peer_state_lock);
2068 		dp_alert("Invalid peer state %u for peer " QDF_MAC_ADDR_FMT,
2069 			 state, QDF_MAC_ADDR_REF(peer->mac_addr.raw));
2070 		return;
2071 	}
2072 	peer->peer_state = state;
2073 	qdf_spin_unlock_bh(&peer->peer_state_lock);
2074 	dp_info("Updating peer state from %u to %u mac " QDF_MAC_ADDR_FMT "\n",
2075 		peer_state, state,
2076 		QDF_MAC_ADDR_REF(peer->mac_addr.raw));
2077 }
2078 
2079 #ifdef REO_SHARED_QREF_TABLE_EN
2080 void dp_peer_rx_reo_shared_qaddr_delete(struct dp_soc *soc,
2081 					struct dp_peer *peer);
2082 #else
2083 static inline void dp_peer_rx_reo_shared_qaddr_delete(struct dp_soc *soc,
2084 						      struct dp_peer *peer) {}
2085 #endif
2086 #endif /* _DP_PEER_H_ */
2087