xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_peer.h (revision d0c05845839e5f2ba5a8dcebe0cd3e4cd4e8dfcf)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 #ifndef _DP_PEER_H_
20 #define _DP_PEER_H_
21 
22 #include <qdf_types.h>
23 #include <qdf_lock.h>
24 #include "dp_types.h"
25 
26 #ifdef DUMP_REO_QUEUE_INFO_IN_DDR
27 #include "hal_reo.h"
28 #endif
29 
30 #define DP_INVALID_PEER_ID 0xffff
31 
32 #define DP_PEER_MAX_MEC_IDX 1024	/* maximum index for MEC table */
33 #define DP_PEER_MAX_MEC_ENTRY 4096	/* maximum MEC entries in MEC table */
34 
35 #define DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC 5000
36 
37 #define DP_PEER_HASH_LOAD_MULT  2
38 #define DP_PEER_HASH_LOAD_SHIFT 0
39 
40 /* Threshold for peer's cached buf queue beyond which frames are dropped */
41 #define DP_RX_CACHED_BUFQ_THRESH 64
42 
43 #define dp_peer_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_PEER, params)
44 #define dp_peer_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_PEER, params)
45 #define dp_peer_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_PEER, params)
46 #define dp_peer_info(params...) \
47 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_PEER, ## params)
48 #define dp_peer_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_PEER, params)
49 
50 #ifdef REO_QDESC_HISTORY
51 enum reo_qdesc_event_type {
52 	REO_QDESC_UPDATE_CB = 0,
53 	REO_QDESC_FREE,
54 };
55 
56 struct reo_qdesc_event {
57 	qdf_dma_addr_t qdesc_addr;
58 	uint64_t ts;
59 	enum reo_qdesc_event_type type;
60 	uint8_t peer_mac[QDF_MAC_ADDR_SIZE];
61 };
62 #endif
63 
64 struct ast_del_ctxt {
65 	bool age;
66 	int del_count;
67 };
68 
69 typedef void dp_peer_iter_func(struct dp_soc *soc, struct dp_peer *peer,
70 			       void *arg);
71 void dp_peer_unref_delete(struct dp_peer *peer, enum dp_mod_id id);
72 void dp_txrx_peer_unref_delete(dp_txrx_ref_handle handle, enum dp_mod_id id);
73 struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc,
74 				       uint8_t *peer_mac_addr,
75 				       int mac_addr_is_aligned,
76 				       uint8_t vdev_id,
77 				       enum dp_mod_id id);
78 bool dp_peer_find_by_id_valid(struct dp_soc *soc, uint16_t peer_id);
79 
80 /**
81  * dp_peer_get_ref() - Returns peer object given the peer id
82  *
83  * @soc		: core DP soc context
84  * @peer	: DP peer
85  * @mod_id	: id of module requesting the reference
86  *
87  * Return:	QDF_STATUS_SUCCESS if reference held successfully
88  *		else QDF_STATUS_E_INVAL
89  */
90 static inline
91 QDF_STATUS dp_peer_get_ref(struct dp_soc *soc,
92 			   struct dp_peer *peer,
93 			   enum dp_mod_id mod_id)
94 {
95 	if (!qdf_atomic_inc_not_zero(&peer->ref_cnt))
96 		return QDF_STATUS_E_INVAL;
97 
98 	if (mod_id > DP_MOD_ID_RX)
99 		qdf_atomic_inc(&peer->mod_refs[mod_id]);
100 
101 	return QDF_STATUS_SUCCESS;
102 }
103 
104 /**
105  * __dp_peer_get_ref_by_id() - Returns peer object given the peer id
106  *
107  * @soc		: core DP soc context
108  * @peer_id	: peer id from peer object can be retrieved
109  * @mod_id	: module id
110  *
111  * Return: struct dp_peer*: Pointer to DP peer object
112  */
113 static inline struct dp_peer *
114 __dp_peer_get_ref_by_id(struct dp_soc *soc,
115 			uint16_t peer_id,
116 			enum dp_mod_id mod_id)
117 
118 {
119 	struct dp_peer *peer;
120 
121 	qdf_spin_lock_bh(&soc->peer_map_lock);
122 	peer = (peer_id >= soc->max_peer_id) ? NULL :
123 				soc->peer_id_to_obj_map[peer_id];
124 	if (!peer ||
125 	    (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) {
126 		qdf_spin_unlock_bh(&soc->peer_map_lock);
127 		return NULL;
128 	}
129 
130 	qdf_spin_unlock_bh(&soc->peer_map_lock);
131 	return peer;
132 }
133 
134 /**
135  * dp_peer_get_ref_by_id() - Returns peer object given the peer id
136  *                        if peer state is active
137  *
138  * @soc		: core DP soc context
139  * @peer_id	: peer id from peer object can be retrieved
140  * @mod_id      : ID ot module requesting reference
141  *
142  * Return: struct dp_peer*: Pointer to DP peer object
143  */
144 static inline
145 struct dp_peer *dp_peer_get_ref_by_id(struct dp_soc *soc,
146 				      uint16_t peer_id,
147 				      enum dp_mod_id mod_id)
148 {
149 	struct dp_peer *peer;
150 
151 	qdf_spin_lock_bh(&soc->peer_map_lock);
152 	peer = (peer_id >= soc->max_peer_id) ? NULL :
153 				soc->peer_id_to_obj_map[peer_id];
154 
155 	if (!peer || peer->peer_state >= DP_PEER_STATE_LOGICAL_DELETE ||
156 	    (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) {
157 		qdf_spin_unlock_bh(&soc->peer_map_lock);
158 		return NULL;
159 	}
160 
161 	qdf_spin_unlock_bh(&soc->peer_map_lock);
162 
163 	return peer;
164 }
165 
166 /**
167  * dp_txrx_peer_get_ref_by_id() - Returns txrx peer object given the peer id
168  *
169  * @soc		: core DP soc context
170  * @peer_id	: peer id from peer object can be retrieved
171  * @handle	: reference handle
172  * @mod_id      : ID ot module requesting reference
173  *
174  * Return: struct dp_txrx_peer*: Pointer to txrx DP peer object
175  */
176 static inline struct dp_txrx_peer *
177 dp_txrx_peer_get_ref_by_id(struct dp_soc *soc,
178 			   uint16_t peer_id,
179 			   dp_txrx_ref_handle *handle,
180 			   enum dp_mod_id mod_id)
181 
182 {
183 	struct dp_peer *peer;
184 
185 	peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
186 	if (!peer)
187 		return NULL;
188 
189 	if (!peer->txrx_peer) {
190 		dp_peer_unref_delete(peer, mod_id);
191 		return NULL;
192 	}
193 
194 	*handle = (dp_txrx_ref_handle)peer;
195 	return peer->txrx_peer;
196 }
197 
198 #ifdef PEER_CACHE_RX_PKTS
199 /**
200  * dp_rx_flush_rx_cached() - flush cached rx frames
201  * @peer: peer
202  * @drop: set flag to drop frames
203  *
204  * Return: None
205  */
206 void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop);
207 #else
208 static inline void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop)
209 {
210 }
211 #endif
212 
213 static inline void
214 dp_clear_peer_internal(struct dp_soc *soc, struct dp_peer *peer)
215 {
216 	qdf_spin_lock_bh(&peer->peer_info_lock);
217 	peer->state = OL_TXRX_PEER_STATE_DISC;
218 	qdf_spin_unlock_bh(&peer->peer_info_lock);
219 
220 	dp_rx_flush_rx_cached(peer, true);
221 }
222 
223 /**
224  * dp_vdev_iterate_peer() - API to iterate through vdev peer list
225  *
226  * @vdev	: DP vdev context
227  * @func	: function to be called for each peer
228  * @arg		: argument need to be passed to func
229  * @mod_id	: module_id
230  *
231  * Return: void
232  */
233 static inline void
234 dp_vdev_iterate_peer(struct dp_vdev *vdev, dp_peer_iter_func *func, void *arg,
235 		     enum dp_mod_id mod_id)
236 {
237 	struct dp_peer *peer;
238 	struct dp_peer *tmp_peer;
239 	struct dp_soc *soc = NULL;
240 
241 	if (!vdev || !vdev->pdev || !vdev->pdev->soc)
242 		return;
243 
244 	soc = vdev->pdev->soc;
245 
246 	qdf_spin_lock_bh(&vdev->peer_list_lock);
247 	TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
248 			   peer_list_elem,
249 			   tmp_peer) {
250 		if (dp_peer_get_ref(soc, peer, mod_id) ==
251 					QDF_STATUS_SUCCESS) {
252 			(*func)(soc, peer, arg);
253 			dp_peer_unref_delete(peer, mod_id);
254 		}
255 	}
256 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
257 }
258 
259 /**
260  * dp_pdev_iterate_peer() - API to iterate through all peers of pdev
261  *
262  * @pdev	: DP pdev context
263  * @func	: function to be called for each peer
264  * @arg		: argument need to be passed to func
265  * @mod_id	: module_id
266  *
267  * Return: void
268  */
269 static inline void
270 dp_pdev_iterate_peer(struct dp_pdev *pdev, dp_peer_iter_func *func, void *arg,
271 		     enum dp_mod_id mod_id)
272 {
273 	struct dp_vdev *vdev;
274 
275 	if (!pdev)
276 		return;
277 
278 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
279 	DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev)
280 		dp_vdev_iterate_peer(vdev, func, arg, mod_id);
281 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
282 }
283 
284 /**
285  * dp_soc_iterate_peer() - API to iterate through all peers of soc
286  *
287  * @soc		: DP soc context
288  * @func	: function to be called for each peer
289  * @arg		: argument need to be passed to func
290  * @mod_id	: module_id
291  *
292  * Return: void
293  */
294 static inline void
295 dp_soc_iterate_peer(struct dp_soc *soc, dp_peer_iter_func *func, void *arg,
296 		    enum dp_mod_id mod_id)
297 {
298 	struct dp_pdev *pdev;
299 	int i;
300 
301 	if (!soc)
302 		return;
303 
304 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
305 		pdev = soc->pdev_list[i];
306 		dp_pdev_iterate_peer(pdev, func, arg, mod_id);
307 	}
308 }
309 
310 /**
311  * dp_vdev_iterate_peer_lock_safe() - API to iterate through vdev list
312  *
313  * This API will cache the peers in local allocated memory and calls
314  * iterate function outside the lock.
315  *
316  * As this API is allocating new memory it is suggested to use this
317  * only when lock cannot be held
318  *
319  * @vdev	: DP vdev context
320  * @func	: function to be called for each peer
321  * @arg		: argument need to be passed to func
322  * @mod_id	: module_id
323  *
324  * Return: void
325  */
326 static inline void
327 dp_vdev_iterate_peer_lock_safe(struct dp_vdev *vdev,
328 			       dp_peer_iter_func *func,
329 			       void *arg,
330 			       enum dp_mod_id mod_id)
331 {
332 	struct dp_peer *peer;
333 	struct dp_peer *tmp_peer;
334 	struct dp_soc *soc = NULL;
335 	struct dp_peer **peer_array = NULL;
336 	int i = 0;
337 	uint32_t num_peers = 0;
338 
339 	if (!vdev || !vdev->pdev || !vdev->pdev->soc)
340 		return;
341 
342 	num_peers = vdev->num_peers;
343 
344 	soc = vdev->pdev->soc;
345 
346 	peer_array = qdf_mem_malloc(num_peers * sizeof(struct dp_peer *));
347 	if (!peer_array)
348 		return;
349 
350 	qdf_spin_lock_bh(&vdev->peer_list_lock);
351 	TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
352 			   peer_list_elem,
353 			   tmp_peer) {
354 		if (i >= num_peers)
355 			break;
356 
357 		if (dp_peer_get_ref(soc, peer, mod_id) == QDF_STATUS_SUCCESS) {
358 			peer_array[i] = peer;
359 			i = (i + 1);
360 		}
361 	}
362 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
363 
364 	for (i = 0; i < num_peers; i++) {
365 		peer = peer_array[i];
366 
367 		if (!peer)
368 			continue;
369 
370 		(*func)(soc, peer, arg);
371 		dp_peer_unref_delete(peer, mod_id);
372 	}
373 
374 	qdf_mem_free(peer_array);
375 }
376 
377 /**
378  * dp_pdev_iterate_peer_lock_safe() - API to iterate through all peers of pdev
379  *
380  * This API will cache the peers in local allocated memory and calls
381  * iterate function outside the lock.
382  *
383  * As this API is allocating new memory it is suggested to use this
384  * only when lock cannot be held
385  *
386  * @pdev	: DP pdev context
387  * @func	: function to be called for each peer
388  * @arg		: argument need to be passed to func
389  * @mod_id	: module_id
390  *
391  * Return: void
392  */
393 static inline void
394 dp_pdev_iterate_peer_lock_safe(struct dp_pdev *pdev,
395 			       dp_peer_iter_func *func,
396 			       void *arg,
397 			       enum dp_mod_id mod_id)
398 {
399 	struct dp_peer *peer;
400 	struct dp_peer *tmp_peer;
401 	struct dp_soc *soc = NULL;
402 	struct dp_vdev *vdev = NULL;
403 	struct dp_peer **peer_array[DP_PDEV_MAX_VDEVS] = {0};
404 	int i = 0;
405 	int j = 0;
406 	uint32_t num_peers[DP_PDEV_MAX_VDEVS] = {0};
407 
408 	if (!pdev || !pdev->soc)
409 		return;
410 
411 	soc = pdev->soc;
412 
413 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
414 	DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
415 		num_peers[i] = vdev->num_peers;
416 		peer_array[i] = qdf_mem_malloc(num_peers[i] *
417 					       sizeof(struct dp_peer *));
418 		if (!peer_array[i])
419 			break;
420 
421 		qdf_spin_lock_bh(&vdev->peer_list_lock);
422 		TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
423 				   peer_list_elem,
424 				   tmp_peer) {
425 			if (j >= num_peers[i])
426 				break;
427 
428 			if (dp_peer_get_ref(soc, peer, mod_id) ==
429 					QDF_STATUS_SUCCESS) {
430 				peer_array[i][j] = peer;
431 
432 				j = (j + 1);
433 			}
434 		}
435 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
436 		i = (i + 1);
437 	}
438 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
439 
440 	for (i = 0; i < DP_PDEV_MAX_VDEVS; i++) {
441 		if (!peer_array[i])
442 			break;
443 
444 		for (j = 0; j < num_peers[i]; j++) {
445 			peer = peer_array[i][j];
446 
447 			if (!peer)
448 				continue;
449 
450 			(*func)(soc, peer, arg);
451 			dp_peer_unref_delete(peer, mod_id);
452 		}
453 
454 		qdf_mem_free(peer_array[i]);
455 	}
456 }
457 
458 /**
459  * dp_soc_iterate_peer_lock_safe() - API to iterate through all peers of soc
460  *
461  * This API will cache the peers in local allocated memory and calls
462  * iterate function outside the lock.
463  *
464  * As this API is allocating new memory it is suggested to use this
465  * only when lock cannot be held
466  *
467  * @soc		: DP soc context
468  * @func	: function to be called for each peer
469  * @arg		: argument need to be passed to func
470  * @mod_id	: module_id
471  *
472  * Return: void
473  */
474 static inline void
475 dp_soc_iterate_peer_lock_safe(struct dp_soc *soc,
476 			      dp_peer_iter_func *func,
477 			      void *arg,
478 			      enum dp_mod_id mod_id)
479 {
480 	struct dp_pdev *pdev;
481 	int i;
482 
483 	if (!soc)
484 		return;
485 
486 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
487 		pdev = soc->pdev_list[i];
488 		dp_pdev_iterate_peer_lock_safe(pdev, func, arg, mod_id);
489 	}
490 }
491 
492 #ifdef DP_PEER_STATE_DEBUG
493 #define DP_PEER_STATE_ASSERT(_peer, _new_state, _condition) \
494 	do {  \
495 		if (!(_condition)) { \
496 			dp_alert("Invalid state shift from %u to %u peer " \
497 				 QDF_MAC_ADDR_FMT, \
498 				 (_peer)->peer_state, (_new_state), \
499 				 QDF_MAC_ADDR_REF((_peer)->mac_addr.raw)); \
500 			QDF_ASSERT(0); \
501 		} \
502 	} while (0)
503 
504 #else
505 #define DP_PEER_STATE_ASSERT(_peer, _new_state, _condition) \
506 	do {  \
507 		if (!(_condition)) { \
508 			dp_alert("Invalid state shift from %u to %u peer " \
509 				 QDF_MAC_ADDR_FMT, \
510 				 (_peer)->peer_state, (_new_state), \
511 				 QDF_MAC_ADDR_REF((_peer)->mac_addr.raw)); \
512 		} \
513 	} while (0)
514 #endif
515 
516 /**
517  * dp_peer_state_cmp() - compare dp peer state
518  *
519  * @peer	: DP peer
520  * @state	: state
521  *
522  * Return: true if state matches with peer state
523  *	   false if it does not match
524  */
525 static inline bool
526 dp_peer_state_cmp(struct dp_peer *peer,
527 		  enum dp_peer_state state)
528 {
529 	bool is_status_equal = false;
530 
531 	qdf_spin_lock_bh(&peer->peer_state_lock);
532 	is_status_equal = (peer->peer_state == state);
533 	qdf_spin_unlock_bh(&peer->peer_state_lock);
534 
535 	return is_status_equal;
536 }
537 
538 /**
539  * dp_peer_update_state() - update dp peer state
540  *
541  * @soc		: core DP soc context
542  * @peer	: DP peer
543  * @state	: new state
544  *
545  * Return: None
546  */
547 static inline void
548 dp_peer_update_state(struct dp_soc *soc,
549 		     struct dp_peer *peer,
550 		     enum dp_peer_state state)
551 {
552 	uint8_t peer_state;
553 
554 	qdf_spin_lock_bh(&peer->peer_state_lock);
555 	peer_state = peer->peer_state;
556 
557 	switch (state) {
558 	case DP_PEER_STATE_INIT:
559 		DP_PEER_STATE_ASSERT
560 			(peer, state, (peer_state != DP_PEER_STATE_ACTIVE) ||
561 			 (peer_state != DP_PEER_STATE_LOGICAL_DELETE));
562 		break;
563 
564 	case DP_PEER_STATE_ACTIVE:
565 		DP_PEER_STATE_ASSERT(peer, state,
566 				     (peer_state == DP_PEER_STATE_INIT));
567 		break;
568 
569 	case DP_PEER_STATE_LOGICAL_DELETE:
570 		DP_PEER_STATE_ASSERT(peer, state,
571 				     (peer_state == DP_PEER_STATE_ACTIVE) ||
572 				     (peer_state == DP_PEER_STATE_INIT));
573 		break;
574 
575 	case DP_PEER_STATE_INACTIVE:
576 		DP_PEER_STATE_ASSERT
577 			(peer, state,
578 			 (peer_state == DP_PEER_STATE_LOGICAL_DELETE));
579 		break;
580 
581 	case DP_PEER_STATE_FREED:
582 		if (peer->sta_self_peer)
583 			DP_PEER_STATE_ASSERT
584 			(peer, state, (peer_state == DP_PEER_STATE_INIT));
585 		else
586 			DP_PEER_STATE_ASSERT
587 				(peer, state,
588 				 (peer_state == DP_PEER_STATE_INACTIVE) ||
589 				 (peer_state == DP_PEER_STATE_LOGICAL_DELETE));
590 		break;
591 
592 	default:
593 		qdf_spin_unlock_bh(&peer->peer_state_lock);
594 		dp_alert("Invalid peer state %u for peer "QDF_MAC_ADDR_FMT,
595 			 state, QDF_MAC_ADDR_REF(peer->mac_addr.raw));
596 		return;
597 	}
598 	peer->peer_state = state;
599 	qdf_spin_unlock_bh(&peer->peer_state_lock);
600 	dp_info("Updating peer state from %u to %u mac "QDF_MAC_ADDR_FMT"\n",
601 		peer_state, state,
602 		QDF_MAC_ADDR_REF(peer->mac_addr.raw));
603 }
604 
605 void dp_print_ast_stats(struct dp_soc *soc);
606 QDF_STATUS dp_rx_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
607 				  uint16_t hw_peer_id, uint8_t vdev_id,
608 				  uint8_t *peer_mac_addr, uint16_t ast_hash,
609 				  uint8_t is_wds);
610 void dp_rx_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id,
611 			      uint8_t vdev_id, uint8_t *peer_mac_addr,
612 			      uint8_t is_wds, uint32_t free_wds_count);
613 
614 #ifdef DP_RX_UDP_OVER_PEER_ROAM
615 /**
616  * dp_rx_reset_roaming_peer() - Reset the roamed peer in vdev
617  * @soc - dp soc pointer
618  * @vdev_id - vdev id
619  * @peer_mac_addr - mac address of the peer
620  *
621  * This function resets the roamed peer auth status and mac address
622  * after peer map indication of same peer is received from firmware.
623  *
624  * Return: None
625  */
626 void dp_rx_reset_roaming_peer(struct dp_soc *soc, uint8_t vdev_id,
627 			      uint8_t *peer_mac_addr);
628 #else
629 static inline void dp_rx_reset_roaming_peer(struct dp_soc *soc, uint8_t vdev_id,
630 					    uint8_t *peer_mac_addr)
631 {
632 }
633 #endif
634 
635 #ifdef WLAN_FEATURE_11BE_MLO
636 /**
637  * dp_rx_mlo_peer_map_handler() - handle MLO peer map event from firmware
638  * @soc_handle - genereic soc handle
639  * @peer_id - ML peer_id from firmware
640  * @peer_mac_addr - mac address of the peer
641  * @mlo_ast_flow_info: MLO AST flow info
642  * @mlo_link_info - MLO link info
643  *
644  * associate the ML peer_id that firmware provided with peer entry
645  * and update the ast table in the host with the hw_peer_id.
646  *
647  * Return: QDF_STATUS code
648  */
649 QDF_STATUS
650 dp_rx_mlo_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
651 			   uint8_t *peer_mac_addr,
652 			   struct dp_mlo_flow_override_info *mlo_flow_info,
653 			   struct dp_mlo_link_info *mlo_link_info);
654 
655 /**
656  * dp_rx_mlo_peer_unmap_handler() - handle MLO peer unmap event from firmware
657  * @soc_handle - genereic soc handle
658  * @peeri_id - peer_id from firmware
659  *
660  * Return: none
661  */
662 void dp_rx_mlo_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id);
663 #endif
664 
665 void dp_rx_sec_ind_handler(struct dp_soc *soc, uint16_t peer_id,
666 			   enum cdp_sec_type sec_type, int is_unicast,
667 			   u_int32_t *michael_key, u_int32_t *rx_pn);
668 
669 QDF_STATUS dp_rx_delba_ind_handler(void *soc_handle, uint16_t peer_id,
670 				   uint8_t tid, uint16_t win_sz);
671 
672 uint8_t dp_get_peer_mac_addr_frm_id(struct cdp_soc_t *soc_handle,
673 		uint16_t peer_id, uint8_t *peer_mac);
674 
675 QDF_STATUS dp_peer_add_ast(struct dp_soc *soc, struct dp_peer *peer,
676 			   uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type,
677 			   uint32_t flags);
678 
679 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry);
680 
681 void dp_peer_ast_unmap_handler(struct dp_soc *soc,
682 			       struct dp_ast_entry *ast_entry);
683 
684 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
685 			struct dp_ast_entry *ast_entry,	uint32_t flags);
686 
687 struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
688 						     uint8_t *ast_mac_addr,
689 						     uint8_t pdev_id);
690 
691 struct dp_ast_entry *dp_peer_ast_hash_find_by_vdevid(struct dp_soc *soc,
692 						     uint8_t *ast_mac_addr,
693 						     uint8_t vdev_id);
694 
695 struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
696 					       uint8_t *ast_mac_addr);
697 
698 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
699 				struct dp_ast_entry *ast_entry);
700 
701 
702 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
703 				struct dp_ast_entry *ast_entry);
704 
705 void dp_peer_ast_set_type(struct dp_soc *soc,
706 				struct dp_ast_entry *ast_entry,
707 				enum cdp_txrx_ast_entry_type type);
708 
709 void dp_peer_ast_send_wds_del(struct dp_soc *soc,
710 			      struct dp_ast_entry *ast_entry,
711 			      struct dp_peer *peer);
712 
713 #ifdef WLAN_FEATURE_MULTI_AST_DEL
714 void dp_peer_ast_send_multi_wds_del(
715 		struct dp_soc *soc, uint8_t vdev_id,
716 		struct peer_del_multi_wds_entries *wds_list);
717 #endif
718 
719 void dp_peer_free_hmwds_cb(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
720 			   struct cdp_soc *dp_soc,
721 			   void *cookie,
722 			   enum cdp_ast_free_status status);
723 
724 void dp_peer_ast_hash_remove(struct dp_soc *soc,
725 			     struct dp_ast_entry *ase);
726 
727 void dp_peer_free_ast_entry(struct dp_soc *soc,
728 			    struct dp_ast_entry *ast_entry);
729 
730 void dp_peer_unlink_ast_entry(struct dp_soc *soc,
731 			      struct dp_ast_entry *ast_entry,
732 			      struct dp_peer *peer);
733 
734 /**
735  * dp_peer_mec_detach_entry() - Detach the MEC entry
736  * @soc: SoC handle
737  * @mecentry: MEC entry of the node
738  * @ptr: pointer to free list
739  *
740  * The MEC entry is detached from MEC table and added to free_list
741  * to free the object outside lock
742  *
743  * Return: None
744  */
745 void dp_peer_mec_detach_entry(struct dp_soc *soc, struct dp_mec_entry *mecentry,
746 			      void *ptr);
747 
748 /**
749  * dp_peer_mec_free_list() - free the MEC entry from free_list
750  * @soc: SoC handle
751  * @ptr: pointer to free list
752  *
753  * Return: None
754  */
755 void dp_peer_mec_free_list(struct dp_soc *soc, void *ptr);
756 
757 /**
758  * dp_peer_mec_add_entry()
759  * @soc: SoC handle
760  * @vdev: vdev to which mec node belongs
761  * @mac_addr: MAC address of mec node
762  *
763  * This function allocates and adds MEC entry to MEC table.
764  * It assumes caller has taken the mec lock to protect the access to these
765  * tables
766  *
767  * Return: QDF_STATUS
768  */
769 QDF_STATUS dp_peer_mec_add_entry(struct dp_soc *soc,
770 				 struct dp_vdev *vdev,
771 				 uint8_t *mac_addr);
772 
773 /**
774  * dp_peer_mec_hash_find_by_pdevid() - Find MEC entry by MAC address
775  * within pdev
776  * @soc: SoC handle
777  *
778  * It assumes caller has taken the mec_lock to protect the access to
779  * MEC hash table
780  *
781  * Return: MEC entry
782  */
783 struct dp_mec_entry *dp_peer_mec_hash_find_by_pdevid(struct dp_soc *soc,
784 						     uint8_t pdev_id,
785 						     uint8_t *mec_mac_addr);
786 
787 #define DP_AST_ASSERT(_condition) \
788 	do { \
789 		if (!(_condition)) { \
790 			dp_print_ast_stats(soc);\
791 			QDF_BUG(_condition); \
792 		} \
793 	} while (0)
794 
795 /**
796  * dp_peer_update_inactive_time - Update inactive time for peer
797  * @pdev: pdev object
798  * @tag_type: htt_tlv_tag type
799  * #tag_buf: buf message
800  */
801 void
802 dp_peer_update_inactive_time(struct dp_pdev *pdev, uint32_t tag_type,
803 			     uint32_t *tag_buf);
804 
805 #ifndef QCA_MULTIPASS_SUPPORT
806 /**
807  * dp_peer_set_vlan_id: set vlan_id for this peer
808  * @cdp_soc: soc handle
809  * @vdev_id: id of vdev object
810  * @peer_mac: mac address
811  * @vlan_id: vlan id for peer
812  *
813  * return: void
814  */
815 static inline
816 void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
817 			 uint8_t vdev_id, uint8_t *peer_mac,
818 			 uint16_t vlan_id)
819 {
820 }
821 
822 /**
823  * dp_set_vlan_groupkey: set vlan map for vdev
824  * @soc: pointer to soc
825  * @vdev_id: id of vdev handle
826  * @vlan_id: vlan_id
827  * @group_key: group key for vlan
828  *
829  * return: set success/failure
830  */
831 static inline
832 QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc, uint8_t vdev_id,
833 				uint16_t vlan_id, uint16_t group_key)
834 {
835 	return QDF_STATUS_SUCCESS;
836 }
837 
838 /**
839  * dp_peer_multipass_list_init: initialize multipass peer list
840  * @vdev: pointer to vdev
841  *
842  * return: void
843  */
844 static inline
845 void dp_peer_multipass_list_init(struct dp_vdev *vdev)
846 {
847 }
848 
849 /**
850  * dp_peer_multipass_list_remove: remove peer from special peer list
851  * @peer: peer handle
852  *
853  * return: void
854  */
855 static inline
856 void dp_peer_multipass_list_remove(struct dp_peer *peer)
857 {
858 }
859 #else
860 void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
861 			 uint8_t vdev_id, uint8_t *peer_mac,
862 			 uint16_t vlan_id);
863 QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc, uint8_t vdev_id,
864 				uint16_t vlan_id, uint16_t group_key);
865 void dp_peer_multipass_list_init(struct dp_vdev *vdev);
866 void dp_peer_multipass_list_remove(struct dp_peer *peer);
867 #endif
868 
869 
870 #ifndef QCA_PEER_MULTIQ_SUPPORT
871 /**
872  * dp_peer_reset_flowq_map() - reset peer flowq map table
873  * @peer - dp peer handle
874  *
875  * Return: none
876  */
877 static inline
878 void dp_peer_reset_flowq_map(struct dp_peer *peer)
879 {
880 }
881 
882 /**
883  * dp_peer_ast_index_flow_queue_map_create() - create ast index flow queue map
884  * @soc - genereic soc handle
885  * @is_wds - flag to indicate if peer is wds
886  * @peer_id - peer_id from htt peer map message
887  * @peer_mac_addr - mac address of the peer
888  * @ast_info - ast flow override information from peer map
889  *
890  * Return: none
891  */
892 static inline
893 void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
894 		    bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
895 		    struct dp_ast_flow_override_info *ast_info)
896 {
897 }
898 #else
899 void dp_peer_reset_flowq_map(struct dp_peer *peer);
900 void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
901 		    bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
902 		    struct dp_ast_flow_override_info *ast_info);
903 #endif
904 
905 /*
906  * dp_rx_tid_delete_cb() - Callback to flush reo descriptor HW cache
907  * after deleting the entries (ie., setting valid=0)
908  *
909  * @soc: DP SOC handle
910  * @cb_ctxt: Callback context
911  * @reo_status: REO command status
912  */
913 void dp_rx_tid_delete_cb(struct dp_soc *soc,
914 			 void *cb_ctxt,
915 			 union hal_reo_status *reo_status);
916 
917 #ifdef QCA_PEER_EXT_STATS
918 QDF_STATUS dp_peer_delay_stats_ctx_alloc(struct dp_soc *soc,
919 					 struct dp_txrx_peer *txrx_peer);
920 void dp_peer_delay_stats_ctx_dealloc(struct dp_soc *soc,
921 				     struct dp_txrx_peer *txrx_peer);
922 void dp_peer_delay_stats_ctx_clr(struct dp_txrx_peer *txrx_peer);
923 #else
924 static inline
925 QDF_STATUS dp_peer_delay_stats_ctx_alloc(struct dp_soc *soc,
926 					 struct dp_txrx_peer *txrx_peer)
927 {
928 	return QDF_STATUS_SUCCESS;
929 }
930 
931 static inline
932 void dp_peer_delay_stats_ctx_dealloc(struct dp_soc *soc,
933 				     struct dp_txrx_peer *txrx_peer)
934 {
935 }
936 
937 static inline
938 void dp_peer_delay_stats_ctx_clr(struct dp_txrx_peer *txrx_peer)
939 {
940 }
941 #endif
942 
943 #ifdef WLAN_PEER_JITTER
944 QDF_STATUS dp_peer_jitter_stats_ctx_alloc(struct dp_pdev *pdev,
945 					  struct dp_txrx_peer *txrx_peer);
946 
947 void dp_peer_jitter_stats_ctx_dealloc(struct dp_pdev *pdev,
948 				      struct dp_txrx_peer *txrx_peer);
949 
950 void dp_peer_jitter_stats_ctx_clr(struct dp_txrx_peer *txrx_peer);
951 #else
952 static inline
953 QDF_STATUS dp_peer_jitter_stats_ctx_alloc(struct dp_pdev *pdev,
954 					  struct dp_txrx_peer *txrx_peer)
955 {
956 	return QDF_STATUS_SUCCESS;
957 }
958 
959 static inline
960 void dp_peer_jitter_stats_ctx_dealloc(struct dp_pdev *pdev,
961 				      struct dp_txrx_peer *txrx_peer)
962 {
963 }
964 
965 static inline
966 void dp_peer_jitter_stats_ctx_clr(struct dp_txrx_peer *txrx_peer)
967 {
968 }
969 #endif
970 
971 #ifndef CONFIG_SAWF_DEF_QUEUES
972 static inline QDF_STATUS dp_peer_sawf_ctx_alloc(struct dp_soc *soc,
973 						struct dp_peer *peer)
974 {
975 	return QDF_STATUS_SUCCESS;
976 }
977 
978 static inline QDF_STATUS dp_peer_sawf_ctx_free(struct dp_soc *soc,
979 					       struct dp_peer *peer)
980 {
981 	return QDF_STATUS_SUCCESS;
982 }
983 
984 #endif
985 
986 #ifndef CONFIG_SAWF
987 static inline
988 QDF_STATUS dp_peer_sawf_stats_ctx_alloc(struct dp_soc *soc,
989 					struct dp_txrx_peer *txrx_peer)
990 {
991 	return QDF_STATUS_SUCCESS;
992 }
993 
994 static inline
995 QDF_STATUS dp_peer_sawf_stats_ctx_free(struct dp_soc *soc,
996 				       struct dp_txrx_peer *txrx_peer)
997 {
998 	return QDF_STATUS_SUCCESS;
999 }
1000 #endif
1001 
1002 struct dp_peer *dp_vdev_bss_peer_ref_n_get(struct dp_soc *soc,
1003 					   struct dp_vdev *vdev,
1004 					   enum dp_mod_id mod_id);
1005 struct dp_peer *dp_sta_vdev_self_peer_ref_n_get(struct dp_soc *soc,
1006 						struct dp_vdev *vdev,
1007 						enum dp_mod_id mod_id);
1008 
1009 void dp_peer_ast_table_detach(struct dp_soc *soc);
1010 void dp_peer_find_map_detach(struct dp_soc *soc);
1011 void dp_soc_wds_detach(struct dp_soc *soc);
1012 QDF_STATUS dp_peer_ast_table_attach(struct dp_soc *soc);
1013 QDF_STATUS dp_peer_ast_hash_attach(struct dp_soc *soc);
1014 QDF_STATUS dp_peer_mec_hash_attach(struct dp_soc *soc);
1015 void dp_soc_wds_attach(struct dp_soc *soc);
1016 void dp_peer_mec_hash_detach(struct dp_soc *soc);
1017 void dp_peer_ast_hash_detach(struct dp_soc *soc);
1018 
1019 #ifdef FEATURE_AST
1020 /*
1021  * dp_peer_delete_ast_entries(): Delete all AST entries for a peer
1022  * @soc - datapath soc handle
1023  * @peer - datapath peer handle
1024  *
1025  * Delete the AST entries belonging to a peer
1026  */
1027 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
1028 					      struct dp_peer *peer)
1029 {
1030 	struct dp_ast_entry *ast_entry, *temp_ast_entry;
1031 
1032 	dp_peer_debug("peer: %pK, self_ast: %pK", peer, peer->self_ast_entry);
1033 	/*
1034 	 * Delete peer self ast entry. This is done to handle scenarios
1035 	 * where peer is freed before peer map is received(for ex in case
1036 	 * of auth disallow due to ACL) in such cases self ast is not added
1037 	 * to peer->ast_list.
1038 	 */
1039 	if (peer->self_ast_entry) {
1040 		dp_peer_del_ast(soc, peer->self_ast_entry);
1041 		peer->self_ast_entry = NULL;
1042 	}
1043 
1044 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry)
1045 		dp_peer_del_ast(soc, ast_entry);
1046 }
1047 
1048 void dp_print_peer_ast_entries(struct dp_soc *soc, struct dp_peer *peer,
1049 			       void *arg);
1050 #else
1051 static inline void dp_print_peer_ast_entries(struct dp_soc *soc,
1052 					     struct dp_peer *peer, void *arg)
1053 {
1054 }
1055 
1056 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
1057 					      struct dp_peer *peer)
1058 {
1059 }
1060 #endif
1061 
1062 #ifdef FEATURE_MEC
1063 /**
1064  * dp_peer_mec_spinlock_create() - Create the MEC spinlock
1065  * @soc: SoC handle
1066  *
1067  * Return: none
1068  */
1069 void dp_peer_mec_spinlock_create(struct dp_soc *soc);
1070 
1071 /**
1072  * dp_peer_mec_spinlock_destroy() - Destroy the MEC spinlock
1073  * @soc: SoC handle
1074  *
1075  * Return: none
1076  */
1077 void dp_peer_mec_spinlock_destroy(struct dp_soc *soc);
1078 
1079 /**
1080  * dp_peer_mec_flush_entries() - Delete all mec entries in table
1081  * @soc: Datapath SOC
1082  *
1083  * Return: None
1084  */
1085 void dp_peer_mec_flush_entries(struct dp_soc *soc);
1086 #else
1087 static inline void dp_peer_mec_spinlock_create(struct dp_soc *soc)
1088 {
1089 }
1090 
1091 static inline void dp_peer_mec_spinlock_destroy(struct dp_soc *soc)
1092 {
1093 }
1094 
1095 static inline void dp_peer_mec_flush_entries(struct dp_soc *soc)
1096 {
1097 }
1098 #endif
1099 
1100 #ifdef DUMP_REO_QUEUE_INFO_IN_DDR
1101 /**
1102  * dp_send_cache_flush_for_rx_tid() - Send cache flush cmd to REO per tid
1103  * @soc : dp_soc handle
1104  * @peer: peer
1105  *
1106  * This function is used to send cache flush cmd to reo and
1107  * to register the callback to handle the dumping of the reo
1108  * queue stas from DDR
1109  *
1110  * Return: none
1111  */
1112 void dp_send_cache_flush_for_rx_tid(
1113 	struct dp_soc *soc, struct dp_peer *peer);
1114 
1115 /**
1116  * dp_get_rx_reo_queue_info() - Handler to get rx tid info
1117  * @soc : cdp_soc_t handle
1118  * @vdev_id: vdev id
1119  *
1120  * Handler to get rx tid info from DDR after h/w cache is
1121  * invalidated first using the cache flush cmd.
1122  *
1123  * Return: none
1124  */
1125 void dp_get_rx_reo_queue_info(
1126 	struct cdp_soc_t *soc_hdl, uint8_t vdev_id);
1127 
1128 /**
1129  * dp_dump_rx_reo_queue_info() - Callback function to dump reo queue stats
1130  * @soc : dp_soc handle
1131  * @cb_ctxt - callback context
1132  * @reo_status: vdev id
1133  *
1134  * This is the callback function registered after sending the reo cmd
1135  * to flush the h/w cache and invalidate it. In the callback the reo
1136  * queue desc info is dumped from DDR.
1137  *
1138  * Return: none
1139  */
1140 void dp_dump_rx_reo_queue_info(
1141 	struct dp_soc *soc, void *cb_ctxt, union hal_reo_status *reo_status);
1142 
1143 #else /* DUMP_REO_QUEUE_INFO_IN_DDR */
1144 
1145 static inline void dp_get_rx_reo_queue_info(
1146 	struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
1147 {
1148 }
1149 #endif /* DUMP_REO_QUEUE_INFO_IN_DDR */
1150 
1151 static inline int dp_peer_find_mac_addr_cmp(
1152 	union dp_align_mac_addr *mac_addr1,
1153 	union dp_align_mac_addr *mac_addr2)
1154 {
1155 		/*
1156 		 * Intentionally use & rather than &&.
1157 		 * because the operands are binary rather than generic boolean,
1158 		 * the functionality is equivalent.
1159 		 * Using && has the advantage of short-circuited evaluation,
1160 		 * but using & has the advantage of no conditional branching,
1161 		 * which is a more significant benefit.
1162 		 */
1163 	return !((mac_addr1->align4.bytes_abcd == mac_addr2->align4.bytes_abcd)
1164 		 & (mac_addr1->align4.bytes_ef == mac_addr2->align4.bytes_ef));
1165 }
1166 
1167 /**
1168  * dp_peer_delete() - delete DP peer
1169  *
1170  * @soc: Datatpath soc
1171  * @peer: Datapath peer
1172  * @arg: argument to iter function
1173  *
1174  * Return: void
1175  */
1176 void dp_peer_delete(struct dp_soc *soc,
1177 		    struct dp_peer *peer,
1178 		    void *arg);
1179 
1180 /**
1181  * dp_mlo_peer_delete() - delete MLO DP peer
1182  *
1183  * @soc: Datapath soc
1184  * @peer: Datapath peer
1185  * @arg: argument to iter function
1186  *
1187  * Return: void
1188  */
1189 void dp_mlo_peer_delete(struct dp_soc *soc, struct dp_peer *peer, void *arg);
1190 
1191 #ifdef WLAN_FEATURE_11BE_MLO
1192 
1193 /* is MLO connection mld peer */
1194 #define IS_MLO_DP_MLD_TXRX_PEER(_peer) ((_peer)->mld_peer)
1195 
1196 /* set peer type */
1197 #define DP_PEER_SET_TYPE(_peer, _type_val) \
1198 	((_peer)->peer_type = (_type_val))
1199 
1200 /* is legacy peer */
1201 #define IS_DP_LEGACY_PEER(_peer) \
1202 	((_peer)->peer_type == CDP_LINK_PEER_TYPE && !((_peer)->mld_peer))
1203 /* is MLO connection link peer */
1204 #define IS_MLO_DP_LINK_PEER(_peer) \
1205 	((_peer)->peer_type == CDP_LINK_PEER_TYPE && (_peer)->mld_peer)
1206 /* is MLO connection mld peer */
1207 #define IS_MLO_DP_MLD_PEER(_peer) \
1208 	((_peer)->peer_type == CDP_MLD_PEER_TYPE)
1209 /* Get Mld peer from link peer */
1210 #define DP_GET_MLD_PEER_FROM_PEER(link_peer) \
1211 	((link_peer)->mld_peer)
1212 
1213 #ifdef WLAN_MLO_MULTI_CHIP
1214 uint8_t dp_mlo_get_chip_id(struct dp_soc *soc);
1215 
1216 struct dp_peer *
1217 dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
1218 				  uint8_t *peer_mac_addr,
1219 				  int mac_addr_is_aligned,
1220 				  uint8_t vdev_id,
1221 				  uint8_t chip_id,
1222 				  enum dp_mod_id mod_id);
1223 #else
1224 static inline uint8_t dp_mlo_get_chip_id(struct dp_soc *soc)
1225 {
1226 	return 0;
1227 }
1228 
1229 static inline struct dp_peer *
1230 dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
1231 				  uint8_t *peer_mac_addr,
1232 				  int mac_addr_is_aligned,
1233 				  uint8_t vdev_id,
1234 				  uint8_t chip_id,
1235 				  enum dp_mod_id mod_id)
1236 {
1237 	return dp_peer_find_hash_find(soc, peer_mac_addr,
1238 				      mac_addr_is_aligned,
1239 				      vdev_id, mod_id);
1240 }
1241 #endif
1242 
1243 /*
1244  * dp_mld_peer_find_hash_find() - returns mld peer from mld peer_hash_table
1245  *				  matching mac_address
1246  * @soc: soc handle
1247  * @peer_mac_addr: mld peer mac address
1248  * @mac_addr_is_aligned: is mac addr alligned
1249  * @vdev_id: vdev_id
1250  * @mod_id: id of module requesting reference
1251  *
1252  * return: peer in sucsess
1253  *         NULL in failure
1254  */
1255 static inline
1256 struct dp_peer *dp_mld_peer_find_hash_find(struct dp_soc *soc,
1257 					   uint8_t *peer_mac_addr,
1258 					   int mac_addr_is_aligned,
1259 					   uint8_t vdev_id,
1260 					   enum dp_mod_id mod_id)
1261 {
1262 	if (soc->arch_ops.mlo_peer_find_hash_find)
1263 		return soc->arch_ops.mlo_peer_find_hash_find(soc,
1264 					      peer_mac_addr,
1265 					      mac_addr_is_aligned,
1266 					      mod_id, vdev_id);
1267 	return NULL;
1268 }
1269 
1270 /**
1271  * dp_peer_hash_find_wrapper() - find link peer or mld per according to
1272 				 peer_type
1273  * @soc: DP SOC handle
1274  * @peer_info: peer information for hash find
1275  * @mod_id: ID of module requesting reference
1276  *
1277  * Return: peer hanlde
1278  */
1279 static inline
1280 struct dp_peer *dp_peer_hash_find_wrapper(struct dp_soc *soc,
1281 					  struct cdp_peer_info *peer_info,
1282 					  enum dp_mod_id mod_id)
1283 {
1284 	struct dp_peer *peer = NULL;
1285 
1286 	if (peer_info->peer_type == CDP_LINK_PEER_TYPE ||
1287 	    peer_info->peer_type == CDP_WILD_PEER_TYPE) {
1288 		peer = dp_peer_find_hash_find(soc, peer_info->mac_addr,
1289 					      peer_info->mac_addr_is_aligned,
1290 					      peer_info->vdev_id,
1291 					      mod_id);
1292 		if (peer)
1293 			return peer;
1294 	}
1295 	if (peer_info->peer_type == CDP_MLD_PEER_TYPE ||
1296 	    peer_info->peer_type == CDP_WILD_PEER_TYPE)
1297 		peer = dp_mld_peer_find_hash_find(
1298 					soc, peer_info->mac_addr,
1299 					peer_info->mac_addr_is_aligned,
1300 					peer_info->vdev_id,
1301 					mod_id);
1302 	return peer;
1303 }
1304 
1305 /**
1306  * dp_link_peer_add_mld_peer() - add mld peer pointer to link peer,
1307 				 increase mld peer ref_cnt
1308  * @link_peer: link peer pointer
1309  * @mld_peer: mld peer pointer
1310  *
1311  * Return: none
1312  */
1313 static inline
1314 void dp_link_peer_add_mld_peer(struct dp_peer *link_peer,
1315 			       struct dp_peer *mld_peer)
1316 {
1317 	/* increase mld_peer ref_cnt */
1318 	dp_peer_get_ref(NULL, mld_peer, DP_MOD_ID_CDP);
1319 	link_peer->mld_peer = mld_peer;
1320 }
1321 
1322 /**
1323  * dp_link_peer_del_mld_peer() - delete mld peer pointer from link peer,
1324 				 decrease mld peer ref_cnt
1325  * @link_peer: link peer pointer
1326  *
1327  * Return: None
1328  */
1329 static inline
1330 void dp_link_peer_del_mld_peer(struct dp_peer *link_peer)
1331 {
1332 	dp_peer_unref_delete(link_peer->mld_peer, DP_MOD_ID_CDP);
1333 	link_peer->mld_peer = NULL;
1334 }
1335 
1336 /**
1337  * dp_mld_peer_init_link_peers_info() - init link peers info in mld peer
1338  * @mld_peer: mld peer pointer
1339  *
1340  * Return: None
1341  */
1342 static inline
1343 void dp_mld_peer_init_link_peers_info(struct dp_peer *mld_peer)
1344 {
1345 	int i;
1346 
1347 	qdf_spinlock_create(&mld_peer->link_peers_info_lock);
1348 	mld_peer->num_links = 0;
1349 	for (i = 0; i < DP_MAX_MLO_LINKS; i++)
1350 		mld_peer->link_peers[i].is_valid = false;
1351 }
1352 
1353 /**
1354  * dp_mld_peer_deinit_link_peers_info() - Deinit link peers info in mld peer
1355  * @mld_peer: mld peer pointer
1356  *
1357  * Return: None
1358  */
1359 static inline
1360 void dp_mld_peer_deinit_link_peers_info(struct dp_peer *mld_peer)
1361 {
1362 	qdf_spinlock_destroy(&mld_peer->link_peers_info_lock);
1363 }
1364 
1365 /**
1366  * dp_mld_peer_add_link_peer() - add link peer info to mld peer
1367  * @mld_peer: mld dp peer pointer
1368  * @link_peer: link dp peer pointer
1369  *
1370  * Return: None
1371  */
1372 static inline
1373 void dp_mld_peer_add_link_peer(struct dp_peer *mld_peer,
1374 			       struct dp_peer *link_peer)
1375 {
1376 	int i;
1377 	struct dp_peer_link_info *link_peer_info;
1378 
1379 	qdf_spin_lock_bh(&mld_peer->link_peers_info_lock);
1380 	for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
1381 		link_peer_info = &mld_peer->link_peers[i];
1382 		if (!link_peer_info->is_valid) {
1383 			qdf_mem_copy(link_peer_info->mac_addr.raw,
1384 				     link_peer->mac_addr.raw,
1385 				     QDF_MAC_ADDR_SIZE);
1386 			link_peer_info->is_valid = true;
1387 			link_peer_info->vdev_id = link_peer->vdev->vdev_id;
1388 			link_peer_info->chip_id =
1389 				dp_mlo_get_chip_id(link_peer->vdev->pdev->soc);
1390 			mld_peer->num_links++;
1391 			break;
1392 		}
1393 	}
1394 	qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock);
1395 
1396 	if (i == DP_MAX_MLO_LINKS)
1397 		dp_err("fail to add link peer" QDF_MAC_ADDR_FMT "to mld peer",
1398 		       QDF_MAC_ADDR_REF(link_peer->mac_addr.raw));
1399 }
1400 
1401 /**
1402  * dp_mld_peer_del_link_peer() - Delete link peer info from MLD peer
1403  * @mld_peer: MLD dp peer pointer
1404  * @link_peer: link dp peer pointer
1405  *
1406  * Return: number of links left after deletion
1407  */
1408 static inline
1409 uint8_t dp_mld_peer_del_link_peer(struct dp_peer *mld_peer,
1410 				  struct dp_peer *link_peer)
1411 {
1412 	int i;
1413 	struct dp_peer_link_info *link_peer_info;
1414 	uint8_t num_links;
1415 
1416 	qdf_spin_lock_bh(&mld_peer->link_peers_info_lock);
1417 	for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
1418 		link_peer_info = &mld_peer->link_peers[i];
1419 		if (link_peer_info->is_valid &&
1420 		    !dp_peer_find_mac_addr_cmp(&link_peer->mac_addr,
1421 					&link_peer_info->mac_addr)) {
1422 			link_peer_info->is_valid = false;
1423 			mld_peer->num_links--;
1424 			break;
1425 		}
1426 	}
1427 	num_links = mld_peer->num_links;
1428 	qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock);
1429 
1430 	if (i == DP_MAX_MLO_LINKS)
1431 		dp_err("fail to del link peer" QDF_MAC_ADDR_FMT "to mld peer",
1432 		       QDF_MAC_ADDR_REF(link_peer->mac_addr.raw));
1433 
1434 	return num_links;
1435 }
1436 
1437 /**
1438  * dp_get_link_peers_ref_from_mld_peer() - get link peers pointer and
1439 					   increase link peers ref_cnt
1440  * @soc: dp_soc handle
1441  * @mld_peer: dp mld peer pointer
1442  * @mld_link_peers: structure that hold links peers ponter array and number
1443  * @mod_id: id of module requesting reference
1444  *
1445  * Return: None
1446  */
1447 static inline
1448 void dp_get_link_peers_ref_from_mld_peer(
1449 				struct dp_soc *soc,
1450 				struct dp_peer *mld_peer,
1451 				struct dp_mld_link_peers *mld_link_peers,
1452 				enum dp_mod_id mod_id)
1453 {
1454 	struct dp_peer *peer;
1455 	uint8_t i = 0, j = 0;
1456 	struct dp_peer_link_info *link_peer_info;
1457 
1458 	qdf_mem_zero(mld_link_peers, sizeof(*mld_link_peers));
1459 	qdf_spin_lock_bh(&mld_peer->link_peers_info_lock);
1460 	for (i = 0; i < DP_MAX_MLO_LINKS; i++)  {
1461 		link_peer_info = &mld_peer->link_peers[i];
1462 		if (link_peer_info->is_valid) {
1463 			peer = dp_link_peer_hash_find_by_chip_id(
1464 						soc,
1465 						link_peer_info->mac_addr.raw,
1466 						true,
1467 						link_peer_info->vdev_id,
1468 						link_peer_info->chip_id,
1469 						mod_id);
1470 			if (peer)
1471 				mld_link_peers->link_peers[j++] = peer;
1472 		}
1473 	}
1474 	qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock);
1475 
1476 	mld_link_peers->num_links = j;
1477 }
1478 
1479 /**
1480  * dp_release_link_peers_ref() - release all link peers reference
1481  * @mld_link_peers: structure that hold links peers ponter array and number
1482  * @mod_id: id of module requesting reference
1483  *
1484  * Return: None.
1485  */
1486 static inline
1487 void dp_release_link_peers_ref(
1488 			struct dp_mld_link_peers *mld_link_peers,
1489 			enum dp_mod_id mod_id)
1490 {
1491 	struct dp_peer *peer;
1492 	uint8_t i;
1493 
1494 	for (i = 0; i < mld_link_peers->num_links; i++) {
1495 		peer = mld_link_peers->link_peers[i];
1496 		if (peer)
1497 			dp_peer_unref_delete(peer, mod_id);
1498 		mld_link_peers->link_peers[i] = NULL;
1499 	}
1500 
1501 	 mld_link_peers->num_links = 0;
1502 }
1503 
1504 /**
1505  * dp_get_link_peer_id_by_lmac_id() - Get link peer id using peer id and lmac id
1506  * @soc: Datapath soc handle
1507  * @peer_id: peer id
1508  * @lmac_id: lmac id to find the link peer on given lmac
1509  *
1510  * Return: peer_id of link peer if found
1511  *         else return HTT_INVALID_PEER
1512  */
1513 static inline
1514 uint16_t dp_get_link_peer_id_by_lmac_id(struct dp_soc *soc, uint16_t peer_id,
1515 					uint8_t lmac_id)
1516 {
1517 	uint8_t i;
1518 	struct dp_peer *peer;
1519 	struct dp_peer *link_peer;
1520 	struct dp_soc *link_peer_soc;
1521 	struct dp_mld_link_peers link_peers_info;
1522 	uint16_t link_peer_id = HTT_INVALID_PEER;
1523 
1524 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_CDP);
1525 
1526 	if (!peer)
1527 		return HTT_INVALID_PEER;
1528 
1529 	if (IS_MLO_DP_MLD_PEER(peer)) {
1530 		/* get link peers with reference */
1531 		dp_get_link_peers_ref_from_mld_peer(soc, peer, &link_peers_info,
1532 						    DP_MOD_ID_CDP);
1533 
1534 		for (i = 0; i < link_peers_info.num_links; i++) {
1535 			link_peer = link_peers_info.link_peers[i];
1536 			link_peer_soc = link_peer->vdev->pdev->soc;
1537 			if ((link_peer_soc == soc) &&
1538 			    (link_peer->vdev->pdev->lmac_id == lmac_id)) {
1539 				link_peer_id = link_peer->peer_id;
1540 				break;
1541 			}
1542 		}
1543 		/* release link peers reference */
1544 		dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP);
1545 	} else {
1546 		link_peer_id = peer_id;
1547 	}
1548 
1549 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
1550 
1551 	return link_peer_id;
1552 }
1553 
1554 /**
1555  * dp_peer_get_tgt_peer_hash_find() - get dp_peer handle
1556  * @soc: soc handle
1557  * @peer_mac_addr: peer mac address
1558  * @mac_addr_is_aligned: is mac addr alligned
1559  * @vdev_id: vdev_id
1560  * @mod_id: id of module requesting reference
1561  *
1562  * for MLO connection, get corresponding MLD peer,
1563  * otherwise get link peer for non-MLO case.
1564  *
1565  * return: peer in success
1566  *         NULL in failure
1567  */
1568 static inline
1569 struct dp_peer *dp_peer_get_tgt_peer_hash_find(struct dp_soc *soc,
1570 					       uint8_t *peer_mac,
1571 					       int mac_addr_is_aligned,
1572 					       uint8_t vdev_id,
1573 					       enum dp_mod_id mod_id)
1574 {
1575 	struct dp_peer *ta_peer = NULL;
1576 	struct dp_peer *peer = dp_peer_find_hash_find(soc,
1577 						      peer_mac, 0, vdev_id,
1578 						      mod_id);
1579 
1580 	if (peer) {
1581 		/* mlo connection link peer, get mld peer with reference */
1582 		if (IS_MLO_DP_LINK_PEER(peer)) {
1583 			/* increase mld peer ref_cnt */
1584 			if (QDF_STATUS_SUCCESS ==
1585 			    dp_peer_get_ref(soc, peer->mld_peer, mod_id))
1586 				ta_peer = peer->mld_peer;
1587 			else
1588 				ta_peer = NULL;
1589 
1590 			/* relese peer reference that added by hash find */
1591 			dp_peer_unref_delete(peer, mod_id);
1592 		} else {
1593 		/* mlo MLD peer or non-mlo link peer */
1594 			ta_peer = peer;
1595 		}
1596 	} else {
1597 		dp_peer_err("fail to find peer:" QDF_MAC_ADDR_FMT,
1598 			    QDF_MAC_ADDR_REF(peer_mac));
1599 	}
1600 
1601 	return ta_peer;
1602 }
1603 
1604 /**
1605  * dp_peer_get_tgt_peer_by_id() - Returns target peer object given the peer id
1606  * @soc		: core DP soc context
1607  * @peer_id	: peer id from peer object can be retrieved
1608  * @mod_id      : ID ot module requesting reference
1609  *
1610  * for MLO connection, get corresponding MLD peer,
1611  * otherwise get link peer for non-MLO case.
1612  *
1613  * return: peer in success
1614  *         NULL in failure
1615  */
1616 static inline
1617 struct dp_peer *dp_peer_get_tgt_peer_by_id(struct dp_soc *soc,
1618 					   uint16_t peer_id,
1619 					   enum dp_mod_id mod_id)
1620 {
1621 	struct dp_peer *ta_peer = NULL;
1622 	struct dp_peer *peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
1623 
1624 	if (peer) {
1625 		/* mlo connection link peer, get mld peer with reference */
1626 		if (IS_MLO_DP_LINK_PEER(peer)) {
1627 			/* increase mld peer ref_cnt */
1628 			if (QDF_STATUS_SUCCESS ==
1629 				dp_peer_get_ref(soc, peer->mld_peer, mod_id))
1630 				ta_peer = peer->mld_peer;
1631 			else
1632 				ta_peer = NULL;
1633 
1634 			/* relese peer reference that added by hash find */
1635 			dp_peer_unref_delete(peer, mod_id);
1636 		} else {
1637 		/* mlo MLD peer or non-mlo link peer */
1638 			ta_peer = peer;
1639 		}
1640 	}
1641 
1642 	return ta_peer;
1643 }
1644 
1645 /**
1646  * dp_peer_mlo_delete() - peer MLO related delete operation
1647  * @peer: DP peer handle
1648  * Return: None
1649  */
1650 static inline
1651 void dp_peer_mlo_delete(struct dp_peer *peer)
1652 {
1653 	struct dp_peer *ml_peer;
1654 	struct dp_soc *soc;
1655 
1656 	/* MLO connection link peer */
1657 	if (IS_MLO_DP_LINK_PEER(peer)) {
1658 		ml_peer = peer->mld_peer;
1659 		soc = ml_peer->vdev->pdev->soc;
1660 
1661 		/* if last link peer deletion, delete MLD peer */
1662 		if (dp_mld_peer_del_link_peer(peer->mld_peer, peer) == 0)
1663 			dp_peer_delete(soc, peer->mld_peer, NULL);
1664 	}
1665 }
1666 
1667 /**
1668  * dp_peer_mlo_setup() - create MLD peer and MLO related initialization
1669  * @soc: Soc handle
1670  * @vdev_id: Vdev ID
1671  * @peer_setup_info: peer setup information for MLO
1672  */
1673 QDF_STATUS dp_peer_mlo_setup(
1674 			struct dp_soc *soc,
1675 			struct dp_peer *peer,
1676 			uint8_t vdev_id,
1677 			struct cdp_peer_setup_info *setup_info);
1678 
1679 /**
1680  * dp_get_tgt_peer_from_peer() - Get target peer from the given peer
1681  * @peer: datapath peer
1682  *
1683  * Return: MLD peer in case of MLO Link peer
1684  *	   Peer itself in other cases
1685  */
1686 static inline
1687 struct dp_peer *dp_get_tgt_peer_from_peer(struct dp_peer *peer)
1688 {
1689 	return IS_MLO_DP_LINK_PEER(peer) ? peer->mld_peer : peer;
1690 }
1691 
1692 /**
1693  * dp_get_primary_link_peer_by_id(): Get primary link peer from the given
1694  *					peer id
1695  * @soc: core DP soc context
1696  * @peer_id: peer id
1697  * @mod_id: ID of module requesting reference
1698  *
1699  * Return: primary link peer for the MLO peer
1700  *	   legacy peer itself in case of legacy peer
1701  */
1702 static inline
1703 struct dp_peer *dp_get_primary_link_peer_by_id(struct dp_soc *soc,
1704 					       uint16_t peer_id,
1705 					       enum dp_mod_id mod_id)
1706 {
1707 	uint8_t i;
1708 	struct dp_mld_link_peers link_peers_info;
1709 	struct dp_peer *peer;
1710 	struct dp_peer *link_peer;
1711 	struct dp_peer *primary_peer = NULL;
1712 
1713 	peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
1714 
1715 	if (!peer)
1716 		return NULL;
1717 
1718 	if (IS_MLO_DP_MLD_PEER(peer)) {
1719 		/* get link peers with reference */
1720 		dp_get_link_peers_ref_from_mld_peer(soc, peer, &link_peers_info,
1721 						    mod_id);
1722 
1723 		for (i = 0; i < link_peers_info.num_links; i++) {
1724 			link_peer = link_peers_info.link_peers[i];
1725 			if (link_peer->primary_link) {
1726 				primary_peer = link_peer;
1727 				/*
1728 				 * Take additional reference over
1729 				 * primary link peer.
1730 				 */
1731 				dp_peer_get_ref(NULL, primary_peer, mod_id);
1732 				break;
1733 			}
1734 		}
1735 		/* release link peers reference */
1736 		dp_release_link_peers_ref(&link_peers_info, mod_id);
1737 		dp_peer_unref_delete(peer, mod_id);
1738 	} else {
1739 		primary_peer = peer;
1740 	}
1741 
1742 	return primary_peer;
1743 }
1744 
1745 /**
1746  * dp_get_txrx_peer() - Get dp_txrx_peer from passed dp_peer
1747  * @peer: Datapath peer
1748  *
1749  * Return: dp_txrx_peer from MLD peer if peer type is link peer
1750  *	   dp_txrx_peer from peer itself for other cases
1751  */
1752 static inline
1753 struct dp_txrx_peer *dp_get_txrx_peer(struct dp_peer *peer)
1754 {
1755 	return IS_MLO_DP_LINK_PEER(peer) ?
1756 				peer->mld_peer->txrx_peer : peer->txrx_peer;
1757 }
1758 
1759 /**
1760  * dp_peer_is_primary_link_peer() - Check if peer is primary link peer
1761  * @peer: Datapath peer
1762  *
1763  * Return: true if peer is primary link peer or legacy peer
1764  *	   false otherwise
1765  */
1766 static inline
1767 bool dp_peer_is_primary_link_peer(struct dp_peer *peer)
1768 {
1769 	if (IS_MLO_DP_LINK_PEER(peer) && peer->primary_link)
1770 		return true;
1771 	else if (IS_DP_LEGACY_PEER(peer))
1772 		return true;
1773 	else
1774 		return false;
1775 }
1776 
1777 /**
1778  * dp_tgt_txrx_peer_get_ref_by_id() - Gets tgt txrx peer for given the peer id
1779  *
1780  * @soc		: core DP soc context
1781  * @peer_id	: peer id from peer object can be retrieved
1782  * @handle	: reference handle
1783  * @mod_id      : ID ot module requesting reference
1784  *
1785  * Return: struct dp_txrx_peer*: Pointer to txrx DP peer object
1786  */
1787 static inline struct dp_txrx_peer *
1788 dp_tgt_txrx_peer_get_ref_by_id(struct dp_soc *soc,
1789 			       uint16_t peer_id,
1790 			       dp_txrx_ref_handle *handle,
1791 			       enum dp_mod_id mod_id)
1792 
1793 {
1794 	struct dp_peer *peer;
1795 	struct dp_txrx_peer *txrx_peer;
1796 
1797 	peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
1798 	if (!peer)
1799 		return NULL;
1800 
1801 	txrx_peer = dp_get_txrx_peer(peer);
1802 	if (txrx_peer) {
1803 		*handle = (dp_txrx_ref_handle)peer;
1804 		return txrx_peer;
1805 	}
1806 
1807 	dp_peer_unref_delete(peer, mod_id);
1808 	return NULL;
1809 }
1810 
1811 /**
1812  * dp_print_mlo_ast_stats_be() - Print AST stats for MLO peers
1813  *
1814  * @soc	: core DP soc context
1815  *
1816  * Return: void
1817  */
1818 void dp_print_mlo_ast_stats_be(struct dp_soc *soc);
1819 
1820 #else
1821 
1822 #define IS_MLO_DP_MLD_TXRX_PEER(_peer) false
1823 
1824 #define DP_PEER_SET_TYPE(_peer, _type_val) /* no op */
1825 /* is legacy peer */
1826 #define IS_DP_LEGACY_PEER(_peer) true
1827 #define IS_MLO_DP_LINK_PEER(_peer) false
1828 #define IS_MLO_DP_MLD_PEER(_peer) false
1829 #define DP_GET_MLD_PEER_FROM_PEER(link_peer) NULL
1830 
1831 static inline
1832 struct dp_peer *dp_peer_hash_find_wrapper(struct dp_soc *soc,
1833 					  struct cdp_peer_info *peer_info,
1834 					  enum dp_mod_id mod_id)
1835 {
1836 	return dp_peer_find_hash_find(soc, peer_info->mac_addr,
1837 				      peer_info->mac_addr_is_aligned,
1838 				      peer_info->vdev_id,
1839 				      mod_id);
1840 }
1841 
1842 static inline
1843 struct dp_peer *dp_peer_get_tgt_peer_hash_find(struct dp_soc *soc,
1844 					       uint8_t *peer_mac,
1845 					       int mac_addr_is_aligned,
1846 					       uint8_t vdev_id,
1847 					       enum dp_mod_id mod_id)
1848 {
1849 	return dp_peer_find_hash_find(soc, peer_mac,
1850 				      mac_addr_is_aligned, vdev_id,
1851 				      mod_id);
1852 }
1853 
1854 static inline
1855 struct dp_peer *dp_peer_get_tgt_peer_by_id(struct dp_soc *soc,
1856 					   uint16_t peer_id,
1857 					   enum dp_mod_id mod_id)
1858 {
1859 	return dp_peer_get_ref_by_id(soc, peer_id, mod_id);
1860 }
1861 
1862 static inline
1863 QDF_STATUS dp_peer_mlo_setup(
1864 			struct dp_soc *soc,
1865 			struct dp_peer *peer,
1866 			uint8_t vdev_id,
1867 			struct cdp_peer_setup_info *setup_info)
1868 {
1869 	return QDF_STATUS_SUCCESS;
1870 }
1871 
1872 static inline
1873 void dp_mld_peer_init_link_peers_info(struct dp_peer *mld_peer)
1874 {
1875 }
1876 
1877 static inline
1878 void dp_mld_peer_deinit_link_peers_info(struct dp_peer *mld_peer)
1879 {
1880 }
1881 
1882 static inline
1883 void dp_link_peer_del_mld_peer(struct dp_peer *link_peer)
1884 {
1885 }
1886 
1887 static inline
1888 void dp_peer_mlo_delete(struct dp_peer *peer)
1889 {
1890 }
1891 
1892 static inline
1893 void dp_mlo_peer_authorize(struct dp_soc *soc,
1894 			   struct dp_peer *link_peer)
1895 {
1896 }
1897 
1898 static inline uint8_t dp_mlo_get_chip_id(struct dp_soc *soc)
1899 {
1900 	return 0;
1901 }
1902 
1903 static inline struct dp_peer *
1904 dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
1905 				  uint8_t *peer_mac_addr,
1906 				  int mac_addr_is_aligned,
1907 				  uint8_t vdev_id,
1908 				  uint8_t chip_id,
1909 				  enum dp_mod_id mod_id)
1910 {
1911 	return dp_peer_find_hash_find(soc, peer_mac_addr,
1912 				      mac_addr_is_aligned,
1913 				      vdev_id, mod_id);
1914 }
1915 
1916 static inline
1917 struct dp_peer *dp_get_tgt_peer_from_peer(struct dp_peer *peer)
1918 {
1919 	return peer;
1920 }
1921 
1922 static inline
1923 struct dp_peer *dp_get_primary_link_peer_by_id(struct dp_soc *soc,
1924 					       uint16_t peer_id,
1925 					       enum dp_mod_id mod_id)
1926 {
1927 	return dp_peer_get_ref_by_id(soc, peer_id, mod_id);
1928 }
1929 
1930 static inline
1931 struct dp_txrx_peer *dp_get_txrx_peer(struct dp_peer *peer)
1932 {
1933 	return peer->txrx_peer;
1934 }
1935 
1936 static inline
1937 bool dp_peer_is_primary_link_peer(struct dp_peer *peer)
1938 {
1939 	return true;
1940 }
1941 
1942 /**
1943  * dp_tgt_txrx_peer_get_ref_by_id() - Gets tgt txrx peer for given the peer id
1944  *
1945  * @soc		: core DP soc context
1946  * @peer_id	: peer id from peer object can be retrieved
1947  * @handle	: reference handle
1948  * @mod_id      : ID ot module requesting reference
1949  *
1950  * Return: struct dp_txrx_peer*: Pointer to txrx DP peer object
1951  */
1952 static inline struct dp_txrx_peer *
1953 dp_tgt_txrx_peer_get_ref_by_id(struct dp_soc *soc,
1954 			       uint16_t peer_id,
1955 			       dp_txrx_ref_handle *handle,
1956 			       enum dp_mod_id mod_id)
1957 
1958 {
1959 	return dp_txrx_peer_get_ref_by_id(soc, peer_id, handle, mod_id);
1960 }
1961 
1962 static inline
1963 uint16_t dp_get_link_peer_id_by_lmac_id(struct dp_soc *soc, uint16_t peer_id,
1964 					uint8_t lmac_id)
1965 {
1966 	return peer_id;
1967 }
1968 
1969 static inline void dp_print_mlo_ast_stats_be(struct dp_soc *soc)
1970 {
1971 }
1972 #endif /* WLAN_FEATURE_11BE_MLO */
1973 
1974 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
1975 /**
1976  * dp_mlo_partner_chips_map() - Map MLO peers to partner SOCs
1977  * @soc: Soc handle
1978  * @peer: DP peer handle for ML peer
1979  * @peer_id: peer_id
1980  * Return: None
1981  */
1982 void dp_mlo_partner_chips_map(struct dp_soc *soc,
1983 			      struct dp_peer *peer,
1984 			      uint16_t peer_id);
1985 
1986 /**
1987  * dp_mlo_partner_chips_unmap() - Unmap MLO peers to partner SOCs
1988  * @soc: Soc handle
1989  * @peer_id: peer_id
1990  * Return: None
1991  */
1992 void dp_mlo_partner_chips_unmap(struct dp_soc *soc,
1993 				uint16_t peer_id);
1994 #else
1995 static inline void dp_mlo_partner_chips_map(struct dp_soc *soc,
1996 					    struct dp_peer *peer,
1997 					    uint16_t peer_id)
1998 {
1999 }
2000 
2001 static inline void dp_mlo_partner_chips_unmap(struct dp_soc *soc,
2002 					      uint16_t peer_id)
2003 {
2004 }
2005 #endif
2006 
2007 static inline
2008 QDF_STATUS dp_peer_rx_tids_create(struct dp_peer *peer)
2009 {
2010 	uint8_t i;
2011 
2012 	if (IS_MLO_DP_MLD_PEER(peer)) {
2013 		dp_peer_info("skip for mld peer");
2014 		return QDF_STATUS_SUCCESS;
2015 	}
2016 
2017 	if (peer->rx_tid) {
2018 		QDF_BUG(0);
2019 		dp_peer_err("peer rx_tid mem already exist");
2020 		return QDF_STATUS_E_FAILURE;
2021 	}
2022 
2023 	peer->rx_tid = qdf_mem_malloc(DP_MAX_TIDS *
2024 				      sizeof(struct dp_rx_tid));
2025 
2026 	if (!peer->rx_tid) {
2027 		dp_err("fail to alloc tid for peer" QDF_MAC_ADDR_FMT,
2028 		       QDF_MAC_ADDR_REF(peer->mac_addr.raw));
2029 		return QDF_STATUS_E_NOMEM;
2030 	}
2031 
2032 	qdf_mem_zero(peer->rx_tid, DP_MAX_TIDS * sizeof(struct dp_rx_tid));
2033 	for (i = 0; i < DP_MAX_TIDS; i++)
2034 		qdf_spinlock_create(&peer->rx_tid[i].tid_lock);
2035 
2036 	return QDF_STATUS_SUCCESS;
2037 }
2038 
2039 static inline
2040 void dp_peer_rx_tids_destroy(struct dp_peer *peer)
2041 {
2042 	uint8_t i;
2043 
2044 	if (!IS_MLO_DP_LINK_PEER(peer)) {
2045 		for (i = 0; i < DP_MAX_TIDS; i++)
2046 			qdf_spinlock_destroy(&peer->rx_tid[i].tid_lock);
2047 
2048 		qdf_mem_free(peer->rx_tid);
2049 	}
2050 
2051 	peer->rx_tid = NULL;
2052 }
2053 
2054 static inline
2055 void dp_peer_defrag_rx_tids_init(struct dp_txrx_peer *txrx_peer)
2056 {
2057 	uint8_t i;
2058 
2059 	qdf_mem_zero(&txrx_peer->rx_tid, DP_MAX_TIDS *
2060 		     sizeof(struct dp_rx_tid_defrag));
2061 
2062 	for (i = 0; i < DP_MAX_TIDS; i++)
2063 		qdf_spinlock_create(&txrx_peer->rx_tid[i].defrag_tid_lock);
2064 }
2065 
2066 static inline
2067 void dp_peer_defrag_rx_tids_deinit(struct dp_txrx_peer *txrx_peer)
2068 {
2069 	uint8_t i;
2070 
2071 	for (i = 0; i < DP_MAX_TIDS; i++)
2072 		qdf_spinlock_destroy(&txrx_peer->rx_tid[i].defrag_tid_lock);
2073 }
2074 
2075 #ifdef PEER_CACHE_RX_PKTS
2076 static inline
2077 void dp_peer_rx_bufq_resources_init(struct dp_txrx_peer *txrx_peer)
2078 {
2079 	qdf_spinlock_create(&txrx_peer->bufq_info.bufq_lock);
2080 	txrx_peer->bufq_info.thresh = DP_RX_CACHED_BUFQ_THRESH;
2081 	qdf_list_create(&txrx_peer->bufq_info.cached_bufq,
2082 			DP_RX_CACHED_BUFQ_THRESH);
2083 }
2084 
2085 static inline
2086 void dp_peer_rx_bufq_resources_deinit(struct dp_txrx_peer *txrx_peer)
2087 {
2088 	qdf_list_destroy(&txrx_peer->bufq_info.cached_bufq);
2089 	qdf_spinlock_destroy(&txrx_peer->bufq_info.bufq_lock);
2090 }
2091 
2092 #else
2093 static inline
2094 void dp_peer_rx_bufq_resources_init(struct dp_txrx_peer *txrx_peer)
2095 {
2096 }
2097 
2098 static inline
2099 void dp_peer_rx_bufq_resources_deinit(struct dp_txrx_peer *txrx_peer)
2100 {
2101 }
2102 #endif
2103 
2104 #ifdef REO_SHARED_QREF_TABLE_EN
2105 void dp_peer_rx_reo_shared_qaddr_delete(struct dp_soc *soc,
2106 					struct dp_peer *peer);
2107 #else
2108 static inline void dp_peer_rx_reo_shared_qaddr_delete(struct dp_soc *soc,
2109 						      struct dp_peer *peer) {}
2110 #endif
2111 #endif /* _DP_PEER_H_ */
2112