xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_peer.h (revision ccf6794c7efeda37a9772e5eb4d4dab2ab5af07a)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 #ifndef _DP_PEER_H_
20 #define _DP_PEER_H_
21 
22 #include <qdf_types.h>
23 #include <qdf_lock.h>
24 #include "dp_types.h"
25 
26 #ifdef DUMP_REO_QUEUE_INFO_IN_DDR
27 #include "hal_reo.h"
28 #endif
29 
30 #define DP_INVALID_PEER_ID 0xffff
31 
32 #define DP_PEER_MAX_MEC_IDX 1024	/* maximum index for MEC table */
33 #define DP_PEER_MAX_MEC_ENTRY 4096	/* maximum MEC entries in MEC table */
34 
35 #define DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC 5000
36 
37 #define DP_PEER_HASH_LOAD_MULT  2
38 #define DP_PEER_HASH_LOAD_SHIFT 0
39 
40 #define dp_peer_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_PEER, params)
41 #define dp_peer_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_PEER, params)
42 #define dp_peer_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_PEER, params)
43 #define dp_peer_info(params...) \
44 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_PEER, ## params)
45 #define dp_peer_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_PEER, params)
46 
47 #ifdef REO_QDESC_HISTORY
48 enum reo_qdesc_event_type {
49 	REO_QDESC_UPDATE_CB = 0,
50 	REO_QDESC_FREE,
51 };
52 
53 struct reo_qdesc_event {
54 	qdf_dma_addr_t qdesc_addr;
55 	uint64_t ts;
56 	enum reo_qdesc_event_type type;
57 	uint8_t peer_mac[QDF_MAC_ADDR_SIZE];
58 };
59 #endif
60 
61 struct ast_del_ctxt {
62 	bool age;
63 	int del_count;
64 };
65 
66 typedef void dp_peer_iter_func(struct dp_soc *soc, struct dp_peer *peer,
67 			       void *arg);
68 void dp_peer_unref_delete(struct dp_peer *peer, enum dp_mod_id id);
69 struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc,
70 				       uint8_t *peer_mac_addr,
71 				       int mac_addr_is_aligned,
72 				       uint8_t vdev_id,
73 				       enum dp_mod_id id);
74 bool dp_peer_find_by_id_valid(struct dp_soc *soc, uint16_t peer_id);
75 
76 /**
77  * dp_peer_get_ref() - Returns peer object given the peer id
78  *
79  * @soc		: core DP soc context
80  * @peer	: DP peer
81  * @mod_id	: id of module requesting the reference
82  *
83  * Return:	QDF_STATUS_SUCCESS if reference held successfully
84  *		else QDF_STATUS_E_INVAL
85  */
86 static inline
87 QDF_STATUS dp_peer_get_ref(struct dp_soc *soc,
88 			   struct dp_peer *peer,
89 			   enum dp_mod_id mod_id)
90 {
91 	if (!qdf_atomic_inc_not_zero(&peer->ref_cnt))
92 		return QDF_STATUS_E_INVAL;
93 
94 	if (mod_id > DP_MOD_ID_RX)
95 		qdf_atomic_inc(&peer->mod_refs[mod_id]);
96 
97 	return QDF_STATUS_SUCCESS;
98 }
99 
100 /**
101  * __dp_peer_get_ref_by_id() - Returns peer object given the peer id
102  *
103  * @soc		: core DP soc context
104  * @peer_id	: peer id from peer object can be retrieved
105  * @mod_id	: module id
106  *
107  * Return: struct dp_peer*: Pointer to DP peer object
108  */
109 static inline struct dp_peer *
110 __dp_peer_get_ref_by_id(struct dp_soc *soc,
111 			uint16_t peer_id,
112 			enum dp_mod_id mod_id)
113 
114 {
115 	struct dp_peer *peer;
116 
117 	qdf_spin_lock_bh(&soc->peer_map_lock);
118 	peer = (peer_id >= soc->max_peer_id) ? NULL :
119 				soc->peer_id_to_obj_map[peer_id];
120 	if (!peer ||
121 	    (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) {
122 		qdf_spin_unlock_bh(&soc->peer_map_lock);
123 		return NULL;
124 	}
125 
126 	qdf_spin_unlock_bh(&soc->peer_map_lock);
127 	return peer;
128 }
129 
130 /**
131  * dp_peer_get_ref_by_id() - Returns peer object given the peer id
132  *                        if peer state is active
133  *
134  * @soc		: core DP soc context
135  * @peer_id	: peer id from peer object can be retrieved
136  * @mod_id      : ID ot module requesting reference
137  *
138  * Return: struct dp_peer*: Pointer to DP peer object
139  */
140 static inline
141 struct dp_peer *dp_peer_get_ref_by_id(struct dp_soc *soc,
142 				      uint16_t peer_id,
143 				      enum dp_mod_id mod_id)
144 {
145 	struct dp_peer *peer;
146 
147 	qdf_spin_lock_bh(&soc->peer_map_lock);
148 	peer = (peer_id >= soc->max_peer_id) ? NULL :
149 				soc->peer_id_to_obj_map[peer_id];
150 
151 	if (!peer || peer->peer_state >= DP_PEER_STATE_LOGICAL_DELETE ||
152 	    (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) {
153 		qdf_spin_unlock_bh(&soc->peer_map_lock);
154 		return NULL;
155 	}
156 
157 	qdf_spin_unlock_bh(&soc->peer_map_lock);
158 
159 	return peer;
160 }
161 
162 #ifdef PEER_CACHE_RX_PKTS
163 /**
164  * dp_rx_flush_rx_cached() - flush cached rx frames
165  * @peer: peer
166  * @drop: set flag to drop frames
167  *
168  * Return: None
169  */
170 void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop);
171 #else
172 static inline void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop)
173 {
174 }
175 #endif
176 
177 static inline void
178 dp_clear_peer_internal(struct dp_soc *soc, struct dp_peer *peer)
179 {
180 	qdf_spin_lock_bh(&peer->peer_info_lock);
181 	peer->state = OL_TXRX_PEER_STATE_DISC;
182 	qdf_spin_unlock_bh(&peer->peer_info_lock);
183 
184 	dp_rx_flush_rx_cached(peer, true);
185 }
186 
187 /**
188  * dp_vdev_iterate_peer() - API to iterate through vdev peer list
189  *
190  * @vdev	: DP vdev context
191  * @func	: function to be called for each peer
192  * @arg		: argument need to be passed to func
193  * @mod_id	: module_id
194  *
195  * Return: void
196  */
197 static inline void
198 dp_vdev_iterate_peer(struct dp_vdev *vdev, dp_peer_iter_func *func, void *arg,
199 		     enum dp_mod_id mod_id)
200 {
201 	struct dp_peer *peer;
202 	struct dp_peer *tmp_peer;
203 	struct dp_soc *soc = NULL;
204 
205 	if (!vdev || !vdev->pdev || !vdev->pdev->soc)
206 		return;
207 
208 	soc = vdev->pdev->soc;
209 
210 	qdf_spin_lock_bh(&vdev->peer_list_lock);
211 	TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
212 			   peer_list_elem,
213 			   tmp_peer) {
214 		if (dp_peer_get_ref(soc, peer, mod_id) ==
215 					QDF_STATUS_SUCCESS) {
216 			(*func)(soc, peer, arg);
217 			dp_peer_unref_delete(peer, mod_id);
218 		}
219 	}
220 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
221 }
222 
223 /**
224  * dp_pdev_iterate_peer() - API to iterate through all peers of pdev
225  *
226  * @pdev	: DP pdev context
227  * @func	: function to be called for each peer
228  * @arg		: argument need to be passed to func
229  * @mod_id	: module_id
230  *
231  * Return: void
232  */
233 static inline void
234 dp_pdev_iterate_peer(struct dp_pdev *pdev, dp_peer_iter_func *func, void *arg,
235 		     enum dp_mod_id mod_id)
236 {
237 	struct dp_vdev *vdev;
238 
239 	if (!pdev)
240 		return;
241 
242 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
243 	DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev)
244 		dp_vdev_iterate_peer(vdev, func, arg, mod_id);
245 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
246 }
247 
248 /**
249  * dp_soc_iterate_peer() - API to iterate through all peers of soc
250  *
251  * @soc		: DP soc context
252  * @func	: function to be called for each peer
253  * @arg		: argument need to be passed to func
254  * @mod_id	: module_id
255  *
256  * Return: void
257  */
258 static inline void
259 dp_soc_iterate_peer(struct dp_soc *soc, dp_peer_iter_func *func, void *arg,
260 		    enum dp_mod_id mod_id)
261 {
262 	struct dp_pdev *pdev;
263 	int i;
264 
265 	if (!soc)
266 		return;
267 
268 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
269 		pdev = soc->pdev_list[i];
270 		dp_pdev_iterate_peer(pdev, func, arg, mod_id);
271 	}
272 }
273 
274 /**
275  * dp_vdev_iterate_peer_lock_safe() - API to iterate through vdev list
276  *
277  * This API will cache the peers in local allocated memory and calls
278  * iterate function outside the lock.
279  *
280  * As this API is allocating new memory it is suggested to use this
281  * only when lock cannot be held
282  *
283  * @vdev	: DP vdev context
284  * @func	: function to be called for each peer
285  * @arg		: argument need to be passed to func
286  * @mod_id	: module_id
287  *
288  * Return: void
289  */
290 static inline void
291 dp_vdev_iterate_peer_lock_safe(struct dp_vdev *vdev,
292 			       dp_peer_iter_func *func,
293 			       void *arg,
294 			       enum dp_mod_id mod_id)
295 {
296 	struct dp_peer *peer;
297 	struct dp_peer *tmp_peer;
298 	struct dp_soc *soc = NULL;
299 	struct dp_peer **peer_array = NULL;
300 	int i = 0;
301 	uint32_t num_peers = 0;
302 
303 	if (!vdev || !vdev->pdev || !vdev->pdev->soc)
304 		return;
305 
306 	num_peers = vdev->num_peers;
307 
308 	soc = vdev->pdev->soc;
309 
310 	peer_array = qdf_mem_malloc(num_peers * sizeof(struct dp_peer *));
311 	if (!peer_array)
312 		return;
313 
314 	qdf_spin_lock_bh(&vdev->peer_list_lock);
315 	TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
316 			   peer_list_elem,
317 			   tmp_peer) {
318 		if (i >= num_peers)
319 			break;
320 
321 		if (dp_peer_get_ref(soc, peer, mod_id) == QDF_STATUS_SUCCESS) {
322 			peer_array[i] = peer;
323 			i = (i + 1);
324 		}
325 	}
326 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
327 
328 	for (i = 0; i < num_peers; i++) {
329 		peer = peer_array[i];
330 
331 		if (!peer)
332 			continue;
333 
334 		(*func)(soc, peer, arg);
335 		dp_peer_unref_delete(peer, mod_id);
336 	}
337 
338 	qdf_mem_free(peer_array);
339 }
340 
341 /**
342  * dp_pdev_iterate_peer_lock_safe() - API to iterate through all peers of pdev
343  *
344  * This API will cache the peers in local allocated memory and calls
345  * iterate function outside the lock.
346  *
347  * As this API is allocating new memory it is suggested to use this
348  * only when lock cannot be held
349  *
350  * @pdev	: DP pdev context
351  * @func	: function to be called for each peer
352  * @arg		: argument need to be passed to func
353  * @mod_id	: module_id
354  *
355  * Return: void
356  */
357 static inline void
358 dp_pdev_iterate_peer_lock_safe(struct dp_pdev *pdev,
359 			       dp_peer_iter_func *func,
360 			       void *arg,
361 			       enum dp_mod_id mod_id)
362 {
363 	struct dp_peer *peer;
364 	struct dp_peer *tmp_peer;
365 	struct dp_soc *soc = NULL;
366 	struct dp_vdev *vdev = NULL;
367 	struct dp_peer **peer_array[DP_PDEV_MAX_VDEVS] = {0};
368 	int i = 0;
369 	int j = 0;
370 	uint32_t num_peers[DP_PDEV_MAX_VDEVS] = {0};
371 
372 	if (!pdev || !pdev->soc)
373 		return;
374 
375 	soc = pdev->soc;
376 
377 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
378 	DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
379 		num_peers[i] = vdev->num_peers;
380 		peer_array[i] = qdf_mem_malloc(num_peers[i] *
381 					       sizeof(struct dp_peer *));
382 		if (!peer_array[i])
383 			break;
384 
385 		qdf_spin_lock_bh(&vdev->peer_list_lock);
386 		TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
387 				   peer_list_elem,
388 				   tmp_peer) {
389 			if (j >= num_peers[i])
390 				break;
391 
392 			if (dp_peer_get_ref(soc, peer, mod_id) ==
393 					QDF_STATUS_SUCCESS) {
394 				peer_array[i][j] = peer;
395 
396 				j = (j + 1);
397 			}
398 		}
399 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
400 		i = (i + 1);
401 	}
402 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
403 
404 	for (i = 0; i < DP_PDEV_MAX_VDEVS; i++) {
405 		if (!peer_array[i])
406 			break;
407 
408 		for (j = 0; j < num_peers[i]; j++) {
409 			peer = peer_array[i][j];
410 
411 			if (!peer)
412 				continue;
413 
414 			(*func)(soc, peer, arg);
415 			dp_peer_unref_delete(peer, mod_id);
416 		}
417 
418 		qdf_mem_free(peer_array[i]);
419 	}
420 }
421 
422 /**
423  * dp_soc_iterate_peer_lock_safe() - API to iterate through all peers of soc
424  *
425  * This API will cache the peers in local allocated memory and calls
426  * iterate function outside the lock.
427  *
428  * As this API is allocating new memory it is suggested to use this
429  * only when lock cannot be held
430  *
431  * @soc		: DP soc context
432  * @func	: function to be called for each peer
433  * @arg		: argument need to be passed to func
434  * @mod_id	: module_id
435  *
436  * Return: void
437  */
438 static inline void
439 dp_soc_iterate_peer_lock_safe(struct dp_soc *soc,
440 			      dp_peer_iter_func *func,
441 			      void *arg,
442 			      enum dp_mod_id mod_id)
443 {
444 	struct dp_pdev *pdev;
445 	int i;
446 
447 	if (!soc)
448 		return;
449 
450 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
451 		pdev = soc->pdev_list[i];
452 		dp_pdev_iterate_peer_lock_safe(pdev, func, arg, mod_id);
453 	}
454 }
455 
456 #ifdef DP_PEER_STATE_DEBUG
457 #define DP_PEER_STATE_ASSERT(_peer, _new_state, _condition) \
458 	do {  \
459 		if (!(_condition)) { \
460 			dp_alert("Invalid state shift from %u to %u peer " \
461 				 QDF_MAC_ADDR_FMT, \
462 				 (_peer)->peer_state, (_new_state), \
463 				 QDF_MAC_ADDR_REF((_peer)->mac_addr.raw)); \
464 			QDF_ASSERT(0); \
465 		} \
466 	} while (0)
467 
468 #else
469 #define DP_PEER_STATE_ASSERT(_peer, _new_state, _condition) \
470 	do {  \
471 		if (!(_condition)) { \
472 			dp_alert("Invalid state shift from %u to %u peer " \
473 				 QDF_MAC_ADDR_FMT, \
474 				 (_peer)->peer_state, (_new_state), \
475 				 QDF_MAC_ADDR_REF((_peer)->mac_addr.raw)); \
476 		} \
477 	} while (0)
478 #endif
479 
480 /**
481  * dp_peer_state_cmp() - compare dp peer state
482  *
483  * @peer	: DP peer
484  * @state	: state
485  *
486  * Return: true if state matches with peer state
487  *	   false if it does not match
488  */
489 static inline bool
490 dp_peer_state_cmp(struct dp_peer *peer,
491 		  enum dp_peer_state state)
492 {
493 	bool is_status_equal = false;
494 
495 	qdf_spin_lock_bh(&peer->peer_state_lock);
496 	is_status_equal = (peer->peer_state == state);
497 	qdf_spin_unlock_bh(&peer->peer_state_lock);
498 
499 	return is_status_equal;
500 }
501 
502 /**
503  * dp_peer_update_state() - update dp peer state
504  *
505  * @soc		: core DP soc context
506  * @peer	: DP peer
507  * @state	: new state
508  *
509  * Return: None
510  */
511 static inline void
512 dp_peer_update_state(struct dp_soc *soc,
513 		     struct dp_peer *peer,
514 		     enum dp_peer_state state)
515 {
516 	uint8_t peer_state;
517 
518 	qdf_spin_lock_bh(&peer->peer_state_lock);
519 	peer_state = peer->peer_state;
520 
521 	switch (state) {
522 	case DP_PEER_STATE_INIT:
523 		DP_PEER_STATE_ASSERT
524 			(peer, state, (peer_state != DP_PEER_STATE_ACTIVE) ||
525 			 (peer_state != DP_PEER_STATE_LOGICAL_DELETE));
526 		break;
527 
528 	case DP_PEER_STATE_ACTIVE:
529 		DP_PEER_STATE_ASSERT(peer, state,
530 				     (peer_state == DP_PEER_STATE_INIT));
531 		break;
532 
533 	case DP_PEER_STATE_LOGICAL_DELETE:
534 		DP_PEER_STATE_ASSERT(peer, state,
535 				     (peer_state == DP_PEER_STATE_ACTIVE) ||
536 				     (peer_state == DP_PEER_STATE_INIT));
537 		break;
538 
539 	case DP_PEER_STATE_INACTIVE:
540 		DP_PEER_STATE_ASSERT
541 			(peer, state,
542 			 (peer_state == DP_PEER_STATE_LOGICAL_DELETE));
543 		break;
544 
545 	case DP_PEER_STATE_FREED:
546 		if (peer->sta_self_peer)
547 			DP_PEER_STATE_ASSERT
548 			(peer, state, (peer_state == DP_PEER_STATE_INIT));
549 		else
550 			DP_PEER_STATE_ASSERT
551 				(peer, state,
552 				 (peer_state == DP_PEER_STATE_INACTIVE) ||
553 				 (peer_state == DP_PEER_STATE_LOGICAL_DELETE));
554 		break;
555 
556 	default:
557 		qdf_spin_unlock_bh(&peer->peer_state_lock);
558 		dp_alert("Invalid peer state %u for peer "QDF_MAC_ADDR_FMT,
559 			 state, QDF_MAC_ADDR_REF(peer->mac_addr.raw));
560 		return;
561 	}
562 	peer->peer_state = state;
563 	qdf_spin_unlock_bh(&peer->peer_state_lock);
564 	dp_info("Updating peer state from %u to %u mac "QDF_MAC_ADDR_FMT"\n",
565 		peer_state, state,
566 		QDF_MAC_ADDR_REF(peer->mac_addr.raw));
567 }
568 
569 void dp_print_ast_stats(struct dp_soc *soc);
570 QDF_STATUS dp_rx_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
571 				  uint16_t hw_peer_id, uint8_t vdev_id,
572 				  uint8_t *peer_mac_addr, uint16_t ast_hash,
573 				  uint8_t is_wds);
574 void dp_rx_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id,
575 			      uint8_t vdev_id, uint8_t *peer_mac_addr,
576 			      uint8_t is_wds, uint32_t free_wds_count);
577 
578 #ifdef WLAN_FEATURE_11BE_MLO
579 /**
580  * dp_rx_mlo_peer_map_handler() - handle MLO peer map event from firmware
581  * @soc_handle - genereic soc handle
582  * @peer_id - ML peer_id from firmware
583  * @peer_mac_addr - mac address of the peer
584  * @mlo_ast_flow_info: MLO AST flow info
585  *
586  * associate the ML peer_id that firmware provided with peer entry
587  * and update the ast table in the host with the hw_peer_id.
588  *
589  * Return: QDF_STATUS code
590  */
591 QDF_STATUS
592 dp_rx_mlo_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
593 			   uint8_t *peer_mac_addr,
594 			   struct dp_mlo_flow_override_info *mlo_flow_info);
595 
596 /**
597  * dp_rx_mlo_peer_unmap_handler() - handle MLO peer unmap event from firmware
598  * @soc_handle - genereic soc handle
599  * @peeri_id - peer_id from firmware
600  *
601  * Return: none
602  */
603 void dp_rx_mlo_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id);
604 #endif
605 
606 void dp_rx_sec_ind_handler(struct dp_soc *soc, uint16_t peer_id,
607 			   enum cdp_sec_type sec_type, int is_unicast,
608 			   u_int32_t *michael_key, u_int32_t *rx_pn);
609 
610 QDF_STATUS dp_rx_delba_ind_handler(void *soc_handle, uint16_t peer_id,
611 				   uint8_t tid, uint16_t win_sz);
612 
613 uint8_t dp_get_peer_mac_addr_frm_id(struct cdp_soc_t *soc_handle,
614 		uint16_t peer_id, uint8_t *peer_mac);
615 
616 QDF_STATUS dp_peer_add_ast(struct dp_soc *soc, struct dp_peer *peer,
617 			   uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type,
618 			   uint32_t flags);
619 
620 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry);
621 
622 void dp_peer_ast_unmap_handler(struct dp_soc *soc,
623 			       struct dp_ast_entry *ast_entry);
624 
625 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
626 			struct dp_ast_entry *ast_entry,	uint32_t flags);
627 
628 struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
629 						     uint8_t *ast_mac_addr,
630 						     uint8_t pdev_id);
631 
632 struct dp_ast_entry *dp_peer_ast_hash_find_by_vdevid(struct dp_soc *soc,
633 						     uint8_t *ast_mac_addr,
634 						     uint8_t vdev_id);
635 
636 struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
637 					       uint8_t *ast_mac_addr);
638 
639 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
640 				struct dp_ast_entry *ast_entry);
641 
642 
643 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
644 				struct dp_ast_entry *ast_entry);
645 
646 void dp_peer_ast_set_type(struct dp_soc *soc,
647 				struct dp_ast_entry *ast_entry,
648 				enum cdp_txrx_ast_entry_type type);
649 
650 void dp_peer_ast_send_wds_del(struct dp_soc *soc,
651 			      struct dp_ast_entry *ast_entry,
652 			      struct dp_peer *peer);
653 
654 void dp_peer_free_hmwds_cb(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
655 			   struct cdp_soc *dp_soc,
656 			   void *cookie,
657 			   enum cdp_ast_free_status status);
658 
659 void dp_peer_ast_hash_remove(struct dp_soc *soc,
660 			     struct dp_ast_entry *ase);
661 
662 void dp_peer_free_ast_entry(struct dp_soc *soc,
663 			    struct dp_ast_entry *ast_entry);
664 
665 void dp_peer_unlink_ast_entry(struct dp_soc *soc,
666 			      struct dp_ast_entry *ast_entry,
667 			      struct dp_peer *peer);
668 
669 /**
670  * dp_peer_mec_detach_entry() - Detach the MEC entry
671  * @soc: SoC handle
672  * @mecentry: MEC entry of the node
673  * @ptr: pointer to free list
674  *
675  * The MEC entry is detached from MEC table and added to free_list
676  * to free the object outside lock
677  *
678  * Return: None
679  */
680 void dp_peer_mec_detach_entry(struct dp_soc *soc, struct dp_mec_entry *mecentry,
681 			      void *ptr);
682 
683 /**
684  * dp_peer_mec_free_list() - free the MEC entry from free_list
685  * @soc: SoC handle
686  * @ptr: pointer to free list
687  *
688  * Return: None
689  */
690 void dp_peer_mec_free_list(struct dp_soc *soc, void *ptr);
691 
692 /**
693  * dp_peer_mec_add_entry()
694  * @soc: SoC handle
695  * @vdev: vdev to which mec node belongs
696  * @mac_addr: MAC address of mec node
697  *
698  * This function allocates and adds MEC entry to MEC table.
699  * It assumes caller has taken the mec lock to protect the access to these
700  * tables
701  *
702  * Return: QDF_STATUS
703  */
704 QDF_STATUS dp_peer_mec_add_entry(struct dp_soc *soc,
705 				 struct dp_vdev *vdev,
706 				 uint8_t *mac_addr);
707 
708 /**
709  * dp_peer_mec_hash_find_by_pdevid() - Find MEC entry by MAC address
710  * within pdev
711  * @soc: SoC handle
712  *
713  * It assumes caller has taken the mec_lock to protect the access to
714  * MEC hash table
715  *
716  * Return: MEC entry
717  */
718 struct dp_mec_entry *dp_peer_mec_hash_find_by_pdevid(struct dp_soc *soc,
719 						     uint8_t pdev_id,
720 						     uint8_t *mec_mac_addr);
721 
722 #define DP_AST_ASSERT(_condition) \
723 	do { \
724 		if (!(_condition)) { \
725 			dp_print_ast_stats(soc);\
726 			QDF_BUG(_condition); \
727 		} \
728 	} while (0)
729 
730 /**
731  * dp_peer_update_inactive_time - Update inactive time for peer
732  * @pdev: pdev object
733  * @tag_type: htt_tlv_tag type
734  * #tag_buf: buf message
735  */
736 void
737 dp_peer_update_inactive_time(struct dp_pdev *pdev, uint32_t tag_type,
738 			     uint32_t *tag_buf);
739 
740 #ifndef QCA_MULTIPASS_SUPPORT
741 /**
742  * dp_peer_set_vlan_id: set vlan_id for this peer
743  * @cdp_soc: soc handle
744  * @vdev_id: id of vdev object
745  * @peer_mac: mac address
746  * @vlan_id: vlan id for peer
747  *
748  * return: void
749  */
750 static inline
751 void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
752 			 uint8_t vdev_id, uint8_t *peer_mac,
753 			 uint16_t vlan_id)
754 {
755 }
756 
757 /**
758  * dp_set_vlan_groupkey: set vlan map for vdev
759  * @soc: pointer to soc
760  * @vdev_id: id of vdev handle
761  * @vlan_id: vlan_id
762  * @group_key: group key for vlan
763  *
764  * return: set success/failure
765  */
766 static inline
767 QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc, uint8_t vdev_id,
768 				uint16_t vlan_id, uint16_t group_key)
769 {
770 	return QDF_STATUS_SUCCESS;
771 }
772 
773 /**
774  * dp_peer_multipass_list_init: initialize multipass peer list
775  * @vdev: pointer to vdev
776  *
777  * return: void
778  */
779 static inline
780 void dp_peer_multipass_list_init(struct dp_vdev *vdev)
781 {
782 }
783 
784 /**
785  * dp_peer_multipass_list_remove: remove peer from special peer list
786  * @peer: peer handle
787  *
788  * return: void
789  */
790 static inline
791 void dp_peer_multipass_list_remove(struct dp_peer *peer)
792 {
793 }
794 #else
795 void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
796 			 uint8_t vdev_id, uint8_t *peer_mac,
797 			 uint16_t vlan_id);
798 QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc, uint8_t vdev_id,
799 				uint16_t vlan_id, uint16_t group_key);
800 void dp_peer_multipass_list_init(struct dp_vdev *vdev);
801 void dp_peer_multipass_list_remove(struct dp_peer *peer);
802 #endif
803 
804 
805 #ifndef QCA_PEER_MULTIQ_SUPPORT
806 /**
807  * dp_peer_reset_flowq_map() - reset peer flowq map table
808  * @peer - dp peer handle
809  *
810  * Return: none
811  */
812 static inline
813 void dp_peer_reset_flowq_map(struct dp_peer *peer)
814 {
815 }
816 
817 /**
818  * dp_peer_ast_index_flow_queue_map_create() - create ast index flow queue map
819  * @soc - genereic soc handle
820  * @is_wds - flag to indicate if peer is wds
821  * @peer_id - peer_id from htt peer map message
822  * @peer_mac_addr - mac address of the peer
823  * @ast_info - ast flow override information from peer map
824  *
825  * Return: none
826  */
827 static inline
828 void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
829 		    bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
830 		    struct dp_ast_flow_override_info *ast_info)
831 {
832 }
833 #else
834 void dp_peer_reset_flowq_map(struct dp_peer *peer);
835 void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
836 		    bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
837 		    struct dp_ast_flow_override_info *ast_info);
838 #endif
839 
840 /*
841  * dp_rx_tid_delete_cb() - Callback to flush reo descriptor HW cache
842  * after deleting the entries (ie., setting valid=0)
843  *
844  * @soc: DP SOC handle
845  * @cb_ctxt: Callback context
846  * @reo_status: REO command status
847  */
848 void dp_rx_tid_delete_cb(struct dp_soc *soc,
849 			 void *cb_ctxt,
850 			 union hal_reo_status *reo_status);
851 
852 #ifdef QCA_PEER_EXT_STATS
853 QDF_STATUS dp_peer_ext_stats_ctx_alloc(struct dp_soc *soc,
854 				       struct dp_peer *peer);
855 void dp_peer_ext_stats_ctx_dealloc(struct dp_soc *soc,
856 				   struct dp_peer *peer);
857 #else
858 static inline QDF_STATUS dp_peer_ext_stats_ctx_alloc(struct dp_soc *soc,
859 						     struct dp_peer *peer)
860 {
861 	return QDF_STATUS_SUCCESS;
862 }
863 
864 static inline void dp_peer_ext_stats_ctx_dealloc(struct dp_soc *soc,
865 						 struct dp_peer *peer)
866 {
867 }
868 #endif
869 
870 struct dp_peer *dp_vdev_bss_peer_ref_n_get(struct dp_soc *soc,
871 					   struct dp_vdev *vdev,
872 					   enum dp_mod_id mod_id);
873 struct dp_peer *dp_sta_vdev_self_peer_ref_n_get(struct dp_soc *soc,
874 						struct dp_vdev *vdev,
875 						enum dp_mod_id mod_id);
876 
877 void dp_peer_ast_table_detach(struct dp_soc *soc);
878 void dp_peer_find_map_detach(struct dp_soc *soc);
879 void dp_soc_wds_detach(struct dp_soc *soc);
880 QDF_STATUS dp_peer_ast_table_attach(struct dp_soc *soc);
881 QDF_STATUS dp_peer_ast_hash_attach(struct dp_soc *soc);
882 QDF_STATUS dp_peer_mec_hash_attach(struct dp_soc *soc);
883 void dp_soc_wds_attach(struct dp_soc *soc);
884 void dp_peer_mec_hash_detach(struct dp_soc *soc);
885 void dp_peer_ast_hash_detach(struct dp_soc *soc);
886 
887 #ifdef FEATURE_AST
888 /*
889  * dp_peer_delete_ast_entries(): Delete all AST entries for a peer
890  * @soc - datapath soc handle
891  * @peer - datapath peer handle
892  *
893  * Delete the AST entries belonging to a peer
894  */
895 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
896 					      struct dp_peer *peer)
897 {
898 	struct dp_ast_entry *ast_entry, *temp_ast_entry;
899 
900 	dp_peer_debug("peer: %pK, self_ast: %pK", peer, peer->self_ast_entry);
901 	/*
902 	 * Delete peer self ast entry. This is done to handle scenarios
903 	 * where peer is freed before peer map is received(for ex in case
904 	 * of auth disallow due to ACL) in such cases self ast is not added
905 	 * to peer->ast_list.
906 	 */
907 	if (peer->self_ast_entry) {
908 		dp_peer_del_ast(soc, peer->self_ast_entry);
909 		peer->self_ast_entry = NULL;
910 	}
911 
912 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry)
913 		dp_peer_del_ast(soc, ast_entry);
914 }
915 #else
916 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
917 					      struct dp_peer *peer)
918 {
919 }
920 #endif
921 
922 #ifdef FEATURE_MEC
923 /**
924  * dp_peer_mec_spinlock_create() - Create the MEC spinlock
925  * @soc: SoC handle
926  *
927  * Return: none
928  */
929 void dp_peer_mec_spinlock_create(struct dp_soc *soc);
930 
931 /**
932  * dp_peer_mec_spinlock_destroy() - Destroy the MEC spinlock
933  * @soc: SoC handle
934  *
935  * Return: none
936  */
937 void dp_peer_mec_spinlock_destroy(struct dp_soc *soc);
938 
939 /**
940  * dp_peer_mec_flush_entries() - Delete all mec entries in table
941  * @soc: Datapath SOC
942  *
943  * Return: None
944  */
945 void dp_peer_mec_flush_entries(struct dp_soc *soc);
946 #else
947 static inline void dp_peer_mec_spinlock_create(struct dp_soc *soc)
948 {
949 }
950 
951 static inline void dp_peer_mec_spinlock_destroy(struct dp_soc *soc)
952 {
953 }
954 
955 static inline void dp_peer_mec_flush_entries(struct dp_soc *soc)
956 {
957 }
958 #endif
959 
960 #ifdef DUMP_REO_QUEUE_INFO_IN_DDR
961 /**
962  * dp_send_cache_flush_for_rx_tid() - Send cache flush cmd to REO per tid
963  * @soc : dp_soc handle
964  * @peer: peer
965  *
966  * This function is used to send cache flush cmd to reo and
967  * to register the callback to handle the dumping of the reo
968  * queue stas from DDR
969  *
970  * Return: none
971  */
972 void dp_send_cache_flush_for_rx_tid(
973 	struct dp_soc *soc, struct dp_peer *peer);
974 
975 /**
976  * dp_get_rx_reo_queue_info() - Handler to get rx tid info
977  * @soc : cdp_soc_t handle
978  * @vdev_id: vdev id
979  *
980  * Handler to get rx tid info from DDR after h/w cache is
981  * invalidated first using the cache flush cmd.
982  *
983  * Return: none
984  */
985 void dp_get_rx_reo_queue_info(
986 	struct cdp_soc_t *soc_hdl, uint8_t vdev_id);
987 
988 /**
989  * dp_dump_rx_reo_queue_info() - Callback function to dump reo queue stats
990  * @soc : dp_soc handle
991  * @cb_ctxt - callback context
992  * @reo_status: vdev id
993  *
994  * This is the callback function registered after sending the reo cmd
995  * to flush the h/w cache and invalidate it. In the callback the reo
996  * queue desc info is dumped from DDR.
997  *
998  * Return: none
999  */
1000 void dp_dump_rx_reo_queue_info(
1001 	struct dp_soc *soc, void *cb_ctxt, union hal_reo_status *reo_status);
1002 
1003 #else /* DUMP_REO_QUEUE_INFO_IN_DDR */
1004 
1005 static inline void dp_get_rx_reo_queue_info(
1006 	struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
1007 {
1008 }
1009 #endif /* DUMP_REO_QUEUE_INFO_IN_DDR */
1010 
1011 static inline int dp_peer_find_mac_addr_cmp(
1012 	union dp_align_mac_addr *mac_addr1,
1013 	union dp_align_mac_addr *mac_addr2)
1014 {
1015 		/*
1016 		 * Intentionally use & rather than &&.
1017 		 * because the operands are binary rather than generic boolean,
1018 		 * the functionality is equivalent.
1019 		 * Using && has the advantage of short-circuited evaluation,
1020 		 * but using & has the advantage of no conditional branching,
1021 		 * which is a more significant benefit.
1022 		 */
1023 	return !((mac_addr1->align4.bytes_abcd == mac_addr2->align4.bytes_abcd)
1024 		 & (mac_addr1->align4.bytes_ef == mac_addr2->align4.bytes_ef));
1025 }
1026 
1027 /**
1028  * dp_peer_delete() - delete DP peer
1029  *
1030  * @soc: Datatpath soc
1031  * @peer: Datapath peer
1032  * @arg: argument to iter function
1033  *
1034  * Return: void
1035  */
1036 void dp_peer_delete(struct dp_soc *soc,
1037 		    struct dp_peer *peer,
1038 		    void *arg);
1039 
1040 #ifdef WLAN_FEATURE_11BE_MLO
1041 /* set peer type */
1042 #define DP_PEER_SET_TYPE(_peer, _type_val) \
1043 	((_peer)->peer_type = (_type_val))
1044 /* is MLO connection link peer */
1045 #define IS_MLO_DP_LINK_PEER(_peer) \
1046 	((_peer)->peer_type == CDP_LINK_PEER_TYPE && (_peer)->mld_peer)
1047 /* is MLO connection mld peer */
1048 #define IS_MLO_DP_MLD_PEER(_peer) \
1049 	((_peer)->peer_type == CDP_MLD_PEER_TYPE)
1050 
1051 #ifdef WLAN_MLO_MULTI_CHIP
1052 uint8_t dp_mlo_get_chip_id(struct dp_soc *soc);
1053 
1054 struct dp_peer *
1055 dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
1056 				  uint8_t *peer_mac_addr,
1057 				  int mac_addr_is_aligned,
1058 				  uint8_t vdev_id,
1059 				  uint8_t chip_id,
1060 				  enum dp_mod_id mod_id);
1061 #else
1062 static inline uint8_t dp_mlo_get_chip_id(struct dp_soc *soc)
1063 {
1064 	return 0;
1065 }
1066 
1067 static inline struct dp_peer *
1068 dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
1069 				  uint8_t *peer_mac_addr,
1070 				  int mac_addr_is_aligned,
1071 				  uint8_t vdev_id,
1072 				  uint8_t chip_id,
1073 				  enum dp_mod_id mod_id)
1074 {
1075 	return dp_peer_find_hash_find(soc, peer_mac_addr,
1076 				      mac_addr_is_aligned,
1077 				      vdev_id, mod_id);
1078 }
1079 #endif
1080 
1081 /**
1082  * dp_link_peer_add_mld_peer() - add mld peer pointer to link peer,
1083 				 increase mld peer ref_cnt
1084  * @link_peer: link peer pointer
1085  * @mld_peer: mld peer pointer
1086  *
1087  * Return: none
1088  */
1089 static inline
1090 void dp_link_peer_add_mld_peer(struct dp_peer *link_peer,
1091 			       struct dp_peer *mld_peer)
1092 {
1093 	/* increase mld_peer ref_cnt */
1094 	dp_peer_get_ref(NULL, mld_peer, DP_MOD_ID_CDP);
1095 	link_peer->mld_peer = mld_peer;
1096 }
1097 
1098 /**
1099  * dp_link_peer_del_mld_peer() - delete mld peer pointer from link peer,
1100 				 decrease mld peer ref_cnt
1101  * @link_peer: link peer pointer
1102  *
1103  * Return: None
1104  */
1105 static inline
1106 void dp_link_peer_del_mld_peer(struct dp_peer *link_peer)
1107 {
1108 	dp_peer_unref_delete(link_peer->mld_peer, DP_MOD_ID_CDP);
1109 	link_peer->mld_peer = NULL;
1110 }
1111 
1112 /**
1113  * dp_mld_peer_init_link_peers_info() - init link peers info in mld peer
1114  * @mld_peer: mld peer pointer
1115  *
1116  * Return: None
1117  */
1118 static inline
1119 void dp_mld_peer_init_link_peers_info(struct dp_peer *mld_peer)
1120 {
1121 	int i;
1122 
1123 	qdf_spinlock_create(&mld_peer->link_peers_info_lock);
1124 	mld_peer->num_links = 0;
1125 	for (i = 0; i < DP_MAX_MLO_LINKS; i++)
1126 		mld_peer->link_peers[i].is_valid = false;
1127 }
1128 
1129 /**
1130  * dp_mld_peer_deinit_link_peers_info() - Deinit link peers info in mld peer
1131  * @mld_peer: mld peer pointer
1132  *
1133  * Return: None
1134  */
1135 static inline
1136 void dp_mld_peer_deinit_link_peers_info(struct dp_peer *mld_peer)
1137 {
1138 	qdf_spinlock_destroy(&mld_peer->link_peers_info_lock);
1139 }
1140 
1141 /**
1142  * dp_mld_peer_add_link_peer() - add link peer info to mld peer
1143  * @mld_peer: mld dp peer pointer
1144  * @link_peer: link dp peer pointer
1145  *
1146  * Return: None
1147  */
1148 static inline
1149 void dp_mld_peer_add_link_peer(struct dp_peer *mld_peer,
1150 			       struct dp_peer *link_peer)
1151 {
1152 	int i;
1153 	struct dp_peer_link_info *link_peer_info;
1154 
1155 	qdf_spin_lock_bh(&mld_peer->link_peers_info_lock);
1156 	for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
1157 		link_peer_info = &mld_peer->link_peers[i];
1158 		if (!link_peer_info->is_valid) {
1159 			qdf_mem_copy(link_peer_info->mac_addr.raw,
1160 				     link_peer->mac_addr.raw,
1161 				     QDF_MAC_ADDR_SIZE);
1162 			link_peer_info->is_valid = true;
1163 			link_peer_info->vdev_id = link_peer->vdev->vdev_id;
1164 			link_peer_info->chip_id =
1165 				dp_mlo_get_chip_id(link_peer->vdev->pdev->soc);
1166 			mld_peer->num_links++;
1167 			break;
1168 		}
1169 	}
1170 	qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock);
1171 
1172 	if (i == DP_MAX_MLO_LINKS)
1173 		dp_err("fail to add link peer" QDF_MAC_ADDR_FMT "to mld peer",
1174 		       QDF_MAC_ADDR_REF(link_peer->mac_addr.raw));
1175 }
1176 
1177 /**
1178  * dp_mld_peer_del_link_peer() - Delete link peer info from MLD peer
1179  * @mld_peer: MLD dp peer pointer
1180  * @link_peer: link dp peer pointer
1181  *
1182  * Return: number of links left after deletion
1183  */
1184 static inline
1185 uint8_t dp_mld_peer_del_link_peer(struct dp_peer *mld_peer,
1186 				  struct dp_peer *link_peer)
1187 {
1188 	int i;
1189 	struct dp_peer_link_info *link_peer_info;
1190 	uint8_t num_links;
1191 
1192 	qdf_spin_lock_bh(&mld_peer->link_peers_info_lock);
1193 	for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
1194 		link_peer_info = &mld_peer->link_peers[i];
1195 		if (link_peer_info->is_valid &&
1196 		    !dp_peer_find_mac_addr_cmp(&link_peer->mac_addr,
1197 					&link_peer_info->mac_addr)) {
1198 			link_peer_info->is_valid = false;
1199 			mld_peer->num_links--;
1200 			break;
1201 		}
1202 	}
1203 	num_links = mld_peer->num_links;
1204 	qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock);
1205 
1206 	if (i == DP_MAX_MLO_LINKS)
1207 		dp_err("fail to del link peer" QDF_MAC_ADDR_FMT "to mld peer",
1208 		       QDF_MAC_ADDR_REF(link_peer->mac_addr.raw));
1209 
1210 	return num_links;
1211 }
1212 
1213 /**
1214  * dp_get_link_peers_ref_from_mld_peer() - get link peers pointer and
1215 					   increase link peers ref_cnt
1216  * @soc: dp_soc handle
1217  * @mld_peer: dp mld peer pointer
1218  * @mld_link_peers: structure that hold links peers ponter array and number
1219  * @mod_id: id of module requesting reference
1220  *
1221  * Return: None
1222  */
1223 static inline
1224 void dp_get_link_peers_ref_from_mld_peer(
1225 				struct dp_soc *soc,
1226 				struct dp_peer *mld_peer,
1227 				struct dp_mld_link_peers *mld_link_peers,
1228 				enum dp_mod_id mod_id)
1229 {
1230 	struct dp_peer *peer;
1231 	uint8_t i = 0, j = 0;
1232 	struct dp_peer_link_info *link_peer_info;
1233 
1234 	qdf_mem_zero(mld_link_peers, sizeof(*mld_link_peers));
1235 	qdf_spin_lock_bh(&mld_peer->link_peers_info_lock);
1236 	for (i = 0; i < DP_MAX_MLO_LINKS; i++)  {
1237 		link_peer_info = &mld_peer->link_peers[i];
1238 		if (link_peer_info->is_valid) {
1239 			peer = dp_link_peer_hash_find_by_chip_id(
1240 						soc,
1241 						link_peer_info->mac_addr.raw,
1242 						true,
1243 						link_peer_info->vdev_id,
1244 						link_peer_info->chip_id,
1245 						mod_id);
1246 			if (peer)
1247 				mld_link_peers->link_peers[j++] = peer;
1248 		}
1249 	}
1250 	qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock);
1251 
1252 	mld_link_peers->num_links = j;
1253 }
1254 
1255 /**
1256  * dp_release_link_peers_ref() - release all link peers reference
1257  * @mld_link_peers: structure that hold links peers ponter array and number
1258  * @mod_id: id of module requesting reference
1259  *
1260  * Return: None.
1261  */
1262 static inline
1263 void dp_release_link_peers_ref(
1264 			struct dp_mld_link_peers *mld_link_peers,
1265 			enum dp_mod_id mod_id)
1266 {
1267 	struct dp_peer *peer;
1268 	uint8_t i;
1269 
1270 	for (i = 0; i < mld_link_peers->num_links; i++) {
1271 		peer = mld_link_peers->link_peers[i];
1272 		if (peer)
1273 			dp_peer_unref_delete(peer, mod_id);
1274 		mld_link_peers->link_peers[i] = NULL;
1275 	}
1276 
1277 	 mld_link_peers->num_links = 0;
1278 }
1279 
1280 /**
1281  * dp_peer_get_tgt_peer_hash_find() - get MLD dp_peer handle
1282 				   for processing
1283  * @soc: soc handle
1284  * @peer_mac_addr: peer mac address
1285  * @mac_addr_is_aligned: is mac addr alligned
1286  * @vdev_id: vdev_id
1287  * @mod_id: id of module requesting reference
1288  *
1289  * for MLO connection, get corresponding MLD peer,
1290  * otherwise get link peer for non-MLO case.
1291  *
1292  * return: peer in success
1293  *         NULL in failure
1294  */
1295 static inline
1296 struct dp_peer *dp_peer_get_tgt_peer_hash_find(struct dp_soc *soc,
1297 					       uint8_t *peer_mac,
1298 					       int mac_addr_is_aligned,
1299 					       uint8_t vdev_id,
1300 					       enum dp_mod_id mod_id)
1301 {
1302 	struct dp_peer *ta_peer = NULL;
1303 	struct dp_peer *peer = dp_peer_find_hash_find(soc,
1304 						      peer_mac, 0, vdev_id,
1305 						      mod_id);
1306 
1307 	if (peer) {
1308 		/* mlo connection link peer, get mld peer with reference */
1309 		if (IS_MLO_DP_LINK_PEER(peer)) {
1310 			/* increase mld peer ref_cnt */
1311 			if (QDF_STATUS_SUCCESS ==
1312 			    dp_peer_get_ref(soc, peer->mld_peer, mod_id))
1313 				ta_peer = peer->mld_peer;
1314 			else
1315 				ta_peer = NULL;
1316 
1317 			/* relese peer reference that added by hash find */
1318 			dp_peer_unref_delete(peer, mod_id);
1319 		} else {
1320 		/* mlo MLD peer or non-mlo link peer */
1321 			ta_peer = peer;
1322 		}
1323 	}
1324 
1325 	return ta_peer;
1326 }
1327 
1328 /**
1329  * dp_peer_get_tgt_peer_by_id() - Returns target peer object given the peer id
1330  * @soc		: core DP soc context
1331  * @peer_id	: peer id from peer object can be retrieved
1332  * @mod_id      : ID ot module requesting reference
1333  *
1334  * for MLO connection, get corresponding MLD peer,
1335  * otherwise get link peer for non-MLO case.
1336  *
1337  * return: peer in success
1338  *         NULL in failure
1339  */
1340 static inline
1341 struct dp_peer *dp_peer_get_tgt_peer_by_id(struct dp_soc *soc,
1342 					   uint16_t peer_id,
1343 					   enum dp_mod_id mod_id)
1344 {
1345 	struct dp_peer *ta_peer = NULL;
1346 	struct dp_peer *peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
1347 
1348 	if (peer) {
1349 		/* mlo connection link peer, get mld peer with reference */
1350 		if (IS_MLO_DP_LINK_PEER(peer)) {
1351 			/* increase mld peer ref_cnt */
1352 			if (QDF_STATUS_SUCCESS ==
1353 				dp_peer_get_ref(soc, peer->mld_peer, mod_id))
1354 				ta_peer = peer->mld_peer;
1355 			else
1356 				ta_peer = NULL;
1357 
1358 			/* relese peer reference that added by hash find */
1359 			dp_peer_unref_delete(peer, mod_id);
1360 		} else {
1361 		/* mlo MLD peer or non-mlo link peer */
1362 			ta_peer = peer;
1363 		}
1364 	}
1365 
1366 	return ta_peer;
1367 }
1368 
1369 /**
1370  * dp_peer_mlo_delete() - peer MLO related delete operation
1371  * @soc: Soc handle
1372  * @peer: DP peer handle
1373  * Return: None
1374  */
1375 static inline
1376 void dp_peer_mlo_delete(struct dp_soc *soc,
1377 			struct dp_peer *peer)
1378 {
1379 	/* MLO connection link peer */
1380 	if (IS_MLO_DP_LINK_PEER(peer)) {
1381 		/* if last link peer deletion, delete MLD peer */
1382 		if (dp_mld_peer_del_link_peer(peer->mld_peer, peer) == 0)
1383 			dp_peer_delete(soc, peer->mld_peer, NULL);
1384 	}
1385 }
1386 
1387 /**
1388  * dp_peer_mlo_setup() - create MLD peer and MLO related initialization
1389  * @soc: Soc handle
1390  * @vdev_id: Vdev ID
1391  * @peer_setup_info: peer setup information for MLO
1392  */
1393 QDF_STATUS dp_peer_mlo_setup(
1394 			struct dp_soc *soc,
1395 			struct dp_peer *peer,
1396 			uint8_t vdev_id,
1397 			struct cdp_peer_setup_info *setup_info);
1398 
1399 #else
1400 #define DP_PEER_SET_TYPE(_peer, _type_val) /* no op */
1401 #define IS_MLO_DP_LINK_PEER(_peer) false
1402 #define IS_MLO_DP_MLD_PEER(_peer) false
1403 
1404 static inline
1405 struct dp_peer *dp_peer_get_tgt_peer_hash_find(struct dp_soc *soc,
1406 					       uint8_t *peer_mac,
1407 					       int mac_addr_is_aligned,
1408 					       uint8_t vdev_id,
1409 					       enum dp_mod_id mod_id)
1410 {
1411 	return dp_peer_find_hash_find(soc, peer_mac,
1412 				      mac_addr_is_aligned, vdev_id,
1413 				      mod_id);
1414 }
1415 
1416 static inline
1417 struct dp_peer *dp_peer_get_tgt_peer_by_id(struct dp_soc *soc,
1418 					   uint16_t peer_id,
1419 					   enum dp_mod_id mod_id)
1420 {
1421 	return dp_peer_get_ref_by_id(soc, peer_id, mod_id);
1422 }
1423 
1424 static inline
1425 QDF_STATUS dp_peer_mlo_setup(
1426 			struct dp_soc *soc,
1427 			struct dp_peer *peer,
1428 			uint8_t vdev_id,
1429 			struct cdp_peer_setup_info *setup_info)
1430 {
1431 	return QDF_STATUS_SUCCESS;
1432 }
1433 
1434 static inline
1435 void dp_mld_peer_init_link_peers_info(struct dp_peer *mld_peer)
1436 {
1437 }
1438 
1439 static inline
1440 void dp_mld_peer_deinit_link_peers_info(struct dp_peer *mld_peer)
1441 {
1442 }
1443 
1444 static inline
1445 void dp_link_peer_del_mld_peer(struct dp_peer *link_peer)
1446 {
1447 }
1448 
1449 static inline
1450 void dp_peer_mlo_delete(struct dp_soc *soc,
1451 			struct dp_peer *peer)
1452 {
1453 }
1454 
1455 static inline
1456 void dp_mlo_peer_authorize(struct dp_soc *soc,
1457 			   struct dp_peer *link_peer)
1458 {
1459 }
1460 
1461 static inline uint8_t dp_mlo_get_chip_id(struct dp_soc *soc)
1462 {
1463 	return 0;
1464 }
1465 
1466 static inline struct dp_peer *
1467 dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
1468 				  uint8_t *peer_mac_addr,
1469 				  int mac_addr_is_aligned,
1470 				  uint8_t vdev_id,
1471 				  uint8_t chip_id,
1472 				  enum dp_mod_id mod_id)
1473 {
1474 	return dp_peer_find_hash_find(soc, peer_mac_addr,
1475 				      mac_addr_is_aligned,
1476 				      vdev_id, mod_id);
1477 }
1478 #endif /* WLAN_FEATURE_11BE_MLO */
1479 
1480 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
1481 /**
1482  * dp_mlo_partner_chips_map() - Map MLO peers to partner SOCs
1483  * @soc: Soc handle
1484  * @peer: DP peer handle for ML peer
1485  * @peer_id: peer_id
1486  * Return: None
1487  */
1488 void dp_mlo_partner_chips_map(struct dp_soc *soc,
1489 			      struct dp_peer *peer,
1490 			      uint16_t peer_id);
1491 
1492 /**
1493  * dp_mlo_partner_chips_unmap() - Unmap MLO peers to partner SOCs
1494  * @soc: Soc handle
1495  * @peer_id: peer_id
1496  * Return: None
1497  */
1498 void dp_mlo_partner_chips_unmap(struct dp_soc *soc,
1499 				uint16_t peer_id);
1500 #else
1501 static inline void dp_mlo_partner_chips_map(struct dp_soc *soc,
1502 					    struct dp_peer *peer,
1503 					    uint16_t peer_id)
1504 {
1505 }
1506 
1507 static inline void dp_mlo_partner_chips_unmap(struct dp_soc *soc,
1508 					      uint16_t peer_id)
1509 {
1510 }
1511 #endif
1512 
1513 static inline
1514 QDF_STATUS dp_peer_rx_tids_create(struct dp_peer *peer)
1515 {
1516 	uint8_t i;
1517 
1518 	if (IS_MLO_DP_MLD_PEER(peer)) {
1519 		dp_peer_info("skip for mld peer");
1520 		return QDF_STATUS_SUCCESS;
1521 	}
1522 
1523 	if (peer->rx_tid) {
1524 		QDF_BUG(0);
1525 		dp_peer_err("peer rx_tid mem already exist");
1526 		return QDF_STATUS_E_FAILURE;
1527 	}
1528 
1529 	peer->rx_tid = qdf_mem_malloc(DP_MAX_TIDS *
1530 				      sizeof(struct dp_rx_tid));
1531 
1532 	if (!peer->rx_tid) {
1533 		dp_err("fail to alloc tid for peer" QDF_MAC_ADDR_FMT,
1534 		       QDF_MAC_ADDR_REF(peer->mac_addr.raw));
1535 		return QDF_STATUS_E_NOMEM;
1536 	}
1537 
1538 	qdf_mem_zero(peer->rx_tid, DP_MAX_TIDS * sizeof(struct dp_rx_tid));
1539 	for (i = 0; i < DP_MAX_TIDS; i++)
1540 		qdf_spinlock_create(&peer->rx_tid[i].tid_lock);
1541 
1542 	return QDF_STATUS_SUCCESS;
1543 }
1544 
1545 static inline
1546 void dp_peer_rx_tids_destroy(struct dp_peer *peer)
1547 {
1548 	uint8_t i;
1549 
1550 	if (!IS_MLO_DP_LINK_PEER(peer)) {
1551 		for (i = 0; i < DP_MAX_TIDS; i++)
1552 			qdf_spinlock_destroy(&peer->rx_tid[i].tid_lock);
1553 
1554 		qdf_mem_free(peer->rx_tid);
1555 	}
1556 
1557 	peer->rx_tid = NULL;
1558 }
1559 #endif /* _DP_PEER_H_ */
1560