xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_peer.h (revision d94f0fb619d3da5ae22f9943f88d4634e2d28581)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 #ifndef _DP_PEER_H_
20 #define _DP_PEER_H_
21 
22 #include <qdf_types.h>
23 #include <qdf_lock.h>
24 #include "dp_types.h"
25 
26 #ifdef DUMP_REO_QUEUE_INFO_IN_DDR
27 #include "hal_reo.h"
28 #endif
29 
30 #define DP_INVALID_PEER_ID 0xffff
31 
32 #define DP_PEER_MAX_MEC_IDX 1024	/* maximum index for MEC table */
33 #define DP_PEER_MAX_MEC_ENTRY 4096	/* maximum MEC entries in MEC table */
34 
35 #define DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC 5000
36 
37 #define DP_PEER_HASH_LOAD_MULT  2
38 #define DP_PEER_HASH_LOAD_SHIFT 0
39 
40 /* Threshold for peer's cached buf queue beyond which frames are dropped */
41 #define DP_RX_CACHED_BUFQ_THRESH 64
42 
43 #define dp_peer_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_PEER, params)
44 #define dp_peer_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_PEER, params)
45 #define dp_peer_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_PEER, params)
46 #define dp_peer_info(params...) \
47 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_PEER, ## params)
48 #define dp_peer_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_PEER, params)
49 
50 #ifdef REO_QDESC_HISTORY
51 enum reo_qdesc_event_type {
52 	REO_QDESC_UPDATE_CB = 0,
53 	REO_QDESC_FREE,
54 };
55 
56 struct reo_qdesc_event {
57 	qdf_dma_addr_t qdesc_addr;
58 	uint64_t ts;
59 	enum reo_qdesc_event_type type;
60 	uint8_t peer_mac[QDF_MAC_ADDR_SIZE];
61 };
62 #endif
63 
64 struct ast_del_ctxt {
65 	bool age;
66 	int del_count;
67 };
68 
69 typedef void dp_peer_iter_func(struct dp_soc *soc, struct dp_peer *peer,
70 			       void *arg);
71 void dp_peer_unref_delete(struct dp_peer *peer, enum dp_mod_id id);
72 void dp_txrx_peer_unref_delete(dp_txrx_ref_handle handle, enum dp_mod_id id);
73 struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc,
74 				       uint8_t *peer_mac_addr,
75 				       int mac_addr_is_aligned,
76 				       uint8_t vdev_id,
77 				       enum dp_mod_id id);
78 bool dp_peer_find_by_id_valid(struct dp_soc *soc, uint16_t peer_id);
79 
80 /**
81  * dp_peer_get_ref() - Returns peer object given the peer id
82  *
83  * @soc		: core DP soc context
84  * @peer	: DP peer
85  * @mod_id	: id of module requesting the reference
86  *
87  * Return:	QDF_STATUS_SUCCESS if reference held successfully
88  *		else QDF_STATUS_E_INVAL
89  */
90 static inline
91 QDF_STATUS dp_peer_get_ref(struct dp_soc *soc,
92 			   struct dp_peer *peer,
93 			   enum dp_mod_id mod_id)
94 {
95 	if (!qdf_atomic_inc_not_zero(&peer->ref_cnt))
96 		return QDF_STATUS_E_INVAL;
97 
98 	if (mod_id > DP_MOD_ID_RX)
99 		qdf_atomic_inc(&peer->mod_refs[mod_id]);
100 
101 	return QDF_STATUS_SUCCESS;
102 }
103 
104 /**
105  * __dp_peer_get_ref_by_id() - Returns peer object given the peer id
106  *
107  * @soc		: core DP soc context
108  * @peer_id	: peer id from peer object can be retrieved
109  * @mod_id	: module id
110  *
111  * Return: struct dp_peer*: Pointer to DP peer object
112  */
113 static inline struct dp_peer *
114 __dp_peer_get_ref_by_id(struct dp_soc *soc,
115 			uint16_t peer_id,
116 			enum dp_mod_id mod_id)
117 
118 {
119 	struct dp_peer *peer;
120 
121 	qdf_spin_lock_bh(&soc->peer_map_lock);
122 	peer = (peer_id >= soc->max_peer_id) ? NULL :
123 				soc->peer_id_to_obj_map[peer_id];
124 	if (!peer ||
125 	    (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) {
126 		qdf_spin_unlock_bh(&soc->peer_map_lock);
127 		return NULL;
128 	}
129 
130 	qdf_spin_unlock_bh(&soc->peer_map_lock);
131 	return peer;
132 }
133 
134 /**
135  * dp_peer_get_ref_by_id() - Returns peer object given the peer id
136  *                        if peer state is active
137  *
138  * @soc		: core DP soc context
139  * @peer_id	: peer id from peer object can be retrieved
140  * @mod_id      : ID ot module requesting reference
141  *
142  * Return: struct dp_peer*: Pointer to DP peer object
143  */
144 static inline
145 struct dp_peer *dp_peer_get_ref_by_id(struct dp_soc *soc,
146 				      uint16_t peer_id,
147 				      enum dp_mod_id mod_id)
148 {
149 	struct dp_peer *peer;
150 
151 	qdf_spin_lock_bh(&soc->peer_map_lock);
152 	peer = (peer_id >= soc->max_peer_id) ? NULL :
153 				soc->peer_id_to_obj_map[peer_id];
154 
155 	if (!peer || peer->peer_state >= DP_PEER_STATE_LOGICAL_DELETE ||
156 	    (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) {
157 		qdf_spin_unlock_bh(&soc->peer_map_lock);
158 		return NULL;
159 	}
160 
161 	qdf_spin_unlock_bh(&soc->peer_map_lock);
162 
163 	return peer;
164 }
165 
166 /**
167  * dp_txrx_peer_get_ref_by_id() - Returns txrx peer object given the peer id
168  *
169  * @soc		: core DP soc context
170  * @peer_id	: peer id from peer object can be retrieved
171  * @handle	: reference handle
172  * @mod_id      : ID ot module requesting reference
173  *
174  * Return: struct dp_txrx_peer*: Pointer to txrx DP peer object
175  */
176 static inline struct dp_txrx_peer *
177 dp_txrx_peer_get_ref_by_id(struct dp_soc *soc,
178 			   uint16_t peer_id,
179 			   dp_txrx_ref_handle *handle,
180 			   enum dp_mod_id mod_id)
181 
182 {
183 	struct dp_peer *peer;
184 
185 	peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
186 	if (!peer)
187 		return NULL;
188 
189 	if (!peer->txrx_peer) {
190 		dp_peer_unref_delete(peer, mod_id);
191 		return NULL;
192 	}
193 
194 	*handle = (dp_txrx_ref_handle)peer;
195 	return peer->txrx_peer;
196 }
197 
198 #ifdef PEER_CACHE_RX_PKTS
199 /**
200  * dp_rx_flush_rx_cached() - flush cached rx frames
201  * @peer: peer
202  * @drop: set flag to drop frames
203  *
204  * Return: None
205  */
206 void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop);
207 #else
208 static inline void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop)
209 {
210 }
211 #endif
212 
213 static inline void
214 dp_clear_peer_internal(struct dp_soc *soc, struct dp_peer *peer)
215 {
216 	qdf_spin_lock_bh(&peer->peer_info_lock);
217 	peer->state = OL_TXRX_PEER_STATE_DISC;
218 	qdf_spin_unlock_bh(&peer->peer_info_lock);
219 
220 	dp_rx_flush_rx_cached(peer, true);
221 }
222 
223 /**
224  * dp_vdev_iterate_peer() - API to iterate through vdev peer list
225  *
226  * @vdev	: DP vdev context
227  * @func	: function to be called for each peer
228  * @arg		: argument need to be passed to func
229  * @mod_id	: module_id
230  *
231  * Return: void
232  */
233 static inline void
234 dp_vdev_iterate_peer(struct dp_vdev *vdev, dp_peer_iter_func *func, void *arg,
235 		     enum dp_mod_id mod_id)
236 {
237 	struct dp_peer *peer;
238 	struct dp_peer *tmp_peer;
239 	struct dp_soc *soc = NULL;
240 
241 	if (!vdev || !vdev->pdev || !vdev->pdev->soc)
242 		return;
243 
244 	soc = vdev->pdev->soc;
245 
246 	qdf_spin_lock_bh(&vdev->peer_list_lock);
247 	TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
248 			   peer_list_elem,
249 			   tmp_peer) {
250 		if (dp_peer_get_ref(soc, peer, mod_id) ==
251 					QDF_STATUS_SUCCESS) {
252 			(*func)(soc, peer, arg);
253 			dp_peer_unref_delete(peer, mod_id);
254 		}
255 	}
256 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
257 }
258 
259 /**
260  * dp_pdev_iterate_peer() - API to iterate through all peers of pdev
261  *
262  * @pdev	: DP pdev context
263  * @func	: function to be called for each peer
264  * @arg		: argument need to be passed to func
265  * @mod_id	: module_id
266  *
267  * Return: void
268  */
269 static inline void
270 dp_pdev_iterate_peer(struct dp_pdev *pdev, dp_peer_iter_func *func, void *arg,
271 		     enum dp_mod_id mod_id)
272 {
273 	struct dp_vdev *vdev;
274 
275 	if (!pdev)
276 		return;
277 
278 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
279 	DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev)
280 		dp_vdev_iterate_peer(vdev, func, arg, mod_id);
281 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
282 }
283 
284 /**
285  * dp_soc_iterate_peer() - API to iterate through all peers of soc
286  *
287  * @soc		: DP soc context
288  * @func	: function to be called for each peer
289  * @arg		: argument need to be passed to func
290  * @mod_id	: module_id
291  *
292  * Return: void
293  */
294 static inline void
295 dp_soc_iterate_peer(struct dp_soc *soc, dp_peer_iter_func *func, void *arg,
296 		    enum dp_mod_id mod_id)
297 {
298 	struct dp_pdev *pdev;
299 	int i;
300 
301 	if (!soc)
302 		return;
303 
304 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
305 		pdev = soc->pdev_list[i];
306 		dp_pdev_iterate_peer(pdev, func, arg, mod_id);
307 	}
308 }
309 
310 /**
311  * dp_vdev_iterate_peer_lock_safe() - API to iterate through vdev list
312  *
313  * This API will cache the peers in local allocated memory and calls
314  * iterate function outside the lock.
315  *
316  * As this API is allocating new memory it is suggested to use this
317  * only when lock cannot be held
318  *
319  * @vdev	: DP vdev context
320  * @func	: function to be called for each peer
321  * @arg		: argument need to be passed to func
322  * @mod_id	: module_id
323  *
324  * Return: void
325  */
326 static inline void
327 dp_vdev_iterate_peer_lock_safe(struct dp_vdev *vdev,
328 			       dp_peer_iter_func *func,
329 			       void *arg,
330 			       enum dp_mod_id mod_id)
331 {
332 	struct dp_peer *peer;
333 	struct dp_peer *tmp_peer;
334 	struct dp_soc *soc = NULL;
335 	struct dp_peer **peer_array = NULL;
336 	int i = 0;
337 	uint32_t num_peers = 0;
338 
339 	if (!vdev || !vdev->pdev || !vdev->pdev->soc)
340 		return;
341 
342 	num_peers = vdev->num_peers;
343 
344 	soc = vdev->pdev->soc;
345 
346 	peer_array = qdf_mem_malloc(num_peers * sizeof(struct dp_peer *));
347 	if (!peer_array)
348 		return;
349 
350 	qdf_spin_lock_bh(&vdev->peer_list_lock);
351 	TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
352 			   peer_list_elem,
353 			   tmp_peer) {
354 		if (i >= num_peers)
355 			break;
356 
357 		if (dp_peer_get_ref(soc, peer, mod_id) == QDF_STATUS_SUCCESS) {
358 			peer_array[i] = peer;
359 			i = (i + 1);
360 		}
361 	}
362 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
363 
364 	for (i = 0; i < num_peers; i++) {
365 		peer = peer_array[i];
366 
367 		if (!peer)
368 			continue;
369 
370 		(*func)(soc, peer, arg);
371 		dp_peer_unref_delete(peer, mod_id);
372 	}
373 
374 	qdf_mem_free(peer_array);
375 }
376 
377 /**
378  * dp_pdev_iterate_peer_lock_safe() - API to iterate through all peers of pdev
379  *
380  * This API will cache the peers in local allocated memory and calls
381  * iterate function outside the lock.
382  *
383  * As this API is allocating new memory it is suggested to use this
384  * only when lock cannot be held
385  *
386  * @pdev	: DP pdev context
387  * @func	: function to be called for each peer
388  * @arg		: argument need to be passed to func
389  * @mod_id	: module_id
390  *
391  * Return: void
392  */
393 static inline void
394 dp_pdev_iterate_peer_lock_safe(struct dp_pdev *pdev,
395 			       dp_peer_iter_func *func,
396 			       void *arg,
397 			       enum dp_mod_id mod_id)
398 {
399 	struct dp_peer *peer;
400 	struct dp_peer *tmp_peer;
401 	struct dp_soc *soc = NULL;
402 	struct dp_vdev *vdev = NULL;
403 	struct dp_peer **peer_array[DP_PDEV_MAX_VDEVS] = {0};
404 	int i = 0;
405 	int j = 0;
406 	uint32_t num_peers[DP_PDEV_MAX_VDEVS] = {0};
407 
408 	if (!pdev || !pdev->soc)
409 		return;
410 
411 	soc = pdev->soc;
412 
413 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
414 	DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
415 		num_peers[i] = vdev->num_peers;
416 		peer_array[i] = qdf_mem_malloc(num_peers[i] *
417 					       sizeof(struct dp_peer *));
418 		if (!peer_array[i])
419 			break;
420 
421 		qdf_spin_lock_bh(&vdev->peer_list_lock);
422 		TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
423 				   peer_list_elem,
424 				   tmp_peer) {
425 			if (j >= num_peers[i])
426 				break;
427 
428 			if (dp_peer_get_ref(soc, peer, mod_id) ==
429 					QDF_STATUS_SUCCESS) {
430 				peer_array[i][j] = peer;
431 
432 				j = (j + 1);
433 			}
434 		}
435 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
436 		i = (i + 1);
437 	}
438 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
439 
440 	for (i = 0; i < DP_PDEV_MAX_VDEVS; i++) {
441 		if (!peer_array[i])
442 			break;
443 
444 		for (j = 0; j < num_peers[i]; j++) {
445 			peer = peer_array[i][j];
446 
447 			if (!peer)
448 				continue;
449 
450 			(*func)(soc, peer, arg);
451 			dp_peer_unref_delete(peer, mod_id);
452 		}
453 
454 		qdf_mem_free(peer_array[i]);
455 	}
456 }
457 
458 /**
459  * dp_soc_iterate_peer_lock_safe() - API to iterate through all peers of soc
460  *
461  * This API will cache the peers in local allocated memory and calls
462  * iterate function outside the lock.
463  *
464  * As this API is allocating new memory it is suggested to use this
465  * only when lock cannot be held
466  *
467  * @soc		: DP soc context
468  * @func	: function to be called for each peer
469  * @arg		: argument need to be passed to func
470  * @mod_id	: module_id
471  *
472  * Return: void
473  */
474 static inline void
475 dp_soc_iterate_peer_lock_safe(struct dp_soc *soc,
476 			      dp_peer_iter_func *func,
477 			      void *arg,
478 			      enum dp_mod_id mod_id)
479 {
480 	struct dp_pdev *pdev;
481 	int i;
482 
483 	if (!soc)
484 		return;
485 
486 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
487 		pdev = soc->pdev_list[i];
488 		dp_pdev_iterate_peer_lock_safe(pdev, func, arg, mod_id);
489 	}
490 }
491 
492 #ifdef DP_PEER_STATE_DEBUG
493 #define DP_PEER_STATE_ASSERT(_peer, _new_state, _condition) \
494 	do {  \
495 		if (!(_condition)) { \
496 			dp_alert("Invalid state shift from %u to %u peer " \
497 				 QDF_MAC_ADDR_FMT, \
498 				 (_peer)->peer_state, (_new_state), \
499 				 QDF_MAC_ADDR_REF((_peer)->mac_addr.raw)); \
500 			QDF_ASSERT(0); \
501 		} \
502 	} while (0)
503 
504 #else
505 #define DP_PEER_STATE_ASSERT(_peer, _new_state, _condition) \
506 	do {  \
507 		if (!(_condition)) { \
508 			dp_alert("Invalid state shift from %u to %u peer " \
509 				 QDF_MAC_ADDR_FMT, \
510 				 (_peer)->peer_state, (_new_state), \
511 				 QDF_MAC_ADDR_REF((_peer)->mac_addr.raw)); \
512 		} \
513 	} while (0)
514 #endif
515 
516 /**
517  * dp_peer_state_cmp() - compare dp peer state
518  *
519  * @peer	: DP peer
520  * @state	: state
521  *
522  * Return: true if state matches with peer state
523  *	   false if it does not match
524  */
525 static inline bool
526 dp_peer_state_cmp(struct dp_peer *peer,
527 		  enum dp_peer_state state)
528 {
529 	bool is_status_equal = false;
530 
531 	qdf_spin_lock_bh(&peer->peer_state_lock);
532 	is_status_equal = (peer->peer_state == state);
533 	qdf_spin_unlock_bh(&peer->peer_state_lock);
534 
535 	return is_status_equal;
536 }
537 
538 /**
539  * dp_peer_update_state() - update dp peer state
540  *
541  * @soc		: core DP soc context
542  * @peer	: DP peer
543  * @state	: new state
544  *
545  * Return: None
546  */
547 static inline void
548 dp_peer_update_state(struct dp_soc *soc,
549 		     struct dp_peer *peer,
550 		     enum dp_peer_state state)
551 {
552 	uint8_t peer_state;
553 
554 	qdf_spin_lock_bh(&peer->peer_state_lock);
555 	peer_state = peer->peer_state;
556 
557 	switch (state) {
558 	case DP_PEER_STATE_INIT:
559 		DP_PEER_STATE_ASSERT
560 			(peer, state, (peer_state != DP_PEER_STATE_ACTIVE) ||
561 			 (peer_state != DP_PEER_STATE_LOGICAL_DELETE));
562 		break;
563 
564 	case DP_PEER_STATE_ACTIVE:
565 		DP_PEER_STATE_ASSERT(peer, state,
566 				     (peer_state == DP_PEER_STATE_INIT));
567 		break;
568 
569 	case DP_PEER_STATE_LOGICAL_DELETE:
570 		DP_PEER_STATE_ASSERT(peer, state,
571 				     (peer_state == DP_PEER_STATE_ACTIVE) ||
572 				     (peer_state == DP_PEER_STATE_INIT));
573 		break;
574 
575 	case DP_PEER_STATE_INACTIVE:
576 		DP_PEER_STATE_ASSERT
577 			(peer, state,
578 			 (peer_state == DP_PEER_STATE_LOGICAL_DELETE));
579 		break;
580 
581 	case DP_PEER_STATE_FREED:
582 		if (peer->sta_self_peer)
583 			DP_PEER_STATE_ASSERT
584 			(peer, state, (peer_state == DP_PEER_STATE_INIT));
585 		else
586 			DP_PEER_STATE_ASSERT
587 				(peer, state,
588 				 (peer_state == DP_PEER_STATE_INACTIVE) ||
589 				 (peer_state == DP_PEER_STATE_LOGICAL_DELETE));
590 		break;
591 
592 	default:
593 		qdf_spin_unlock_bh(&peer->peer_state_lock);
594 		dp_alert("Invalid peer state %u for peer "QDF_MAC_ADDR_FMT,
595 			 state, QDF_MAC_ADDR_REF(peer->mac_addr.raw));
596 		return;
597 	}
598 	peer->peer_state = state;
599 	qdf_spin_unlock_bh(&peer->peer_state_lock);
600 	dp_info("Updating peer state from %u to %u mac "QDF_MAC_ADDR_FMT"\n",
601 		peer_state, state,
602 		QDF_MAC_ADDR_REF(peer->mac_addr.raw));
603 }
604 
605 void dp_print_ast_stats(struct dp_soc *soc);
606 QDF_STATUS dp_rx_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
607 				  uint16_t hw_peer_id, uint8_t vdev_id,
608 				  uint8_t *peer_mac_addr, uint16_t ast_hash,
609 				  uint8_t is_wds);
610 void dp_rx_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id,
611 			      uint8_t vdev_id, uint8_t *peer_mac_addr,
612 			      uint8_t is_wds, uint32_t free_wds_count);
613 
614 #ifdef WLAN_FEATURE_11BE_MLO
615 /**
616  * dp_rx_mlo_peer_map_handler() - handle MLO peer map event from firmware
617  * @soc_handle - genereic soc handle
618  * @peer_id - ML peer_id from firmware
619  * @peer_mac_addr - mac address of the peer
620  * @mlo_ast_flow_info: MLO AST flow info
621  *
622  * associate the ML peer_id that firmware provided with peer entry
623  * and update the ast table in the host with the hw_peer_id.
624  *
625  * Return: QDF_STATUS code
626  */
627 QDF_STATUS
628 dp_rx_mlo_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
629 			   uint8_t *peer_mac_addr,
630 			   struct dp_mlo_flow_override_info *mlo_flow_info);
631 
632 /**
633  * dp_rx_mlo_peer_unmap_handler() - handle MLO peer unmap event from firmware
634  * @soc_handle - genereic soc handle
635  * @peeri_id - peer_id from firmware
636  *
637  * Return: none
638  */
639 void dp_rx_mlo_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id);
640 #endif
641 
642 void dp_rx_sec_ind_handler(struct dp_soc *soc, uint16_t peer_id,
643 			   enum cdp_sec_type sec_type, int is_unicast,
644 			   u_int32_t *michael_key, u_int32_t *rx_pn);
645 
646 QDF_STATUS dp_rx_delba_ind_handler(void *soc_handle, uint16_t peer_id,
647 				   uint8_t tid, uint16_t win_sz);
648 
649 uint8_t dp_get_peer_mac_addr_frm_id(struct cdp_soc_t *soc_handle,
650 		uint16_t peer_id, uint8_t *peer_mac);
651 
652 QDF_STATUS dp_peer_add_ast(struct dp_soc *soc, struct dp_peer *peer,
653 			   uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type,
654 			   uint32_t flags);
655 
656 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry);
657 
658 void dp_peer_ast_unmap_handler(struct dp_soc *soc,
659 			       struct dp_ast_entry *ast_entry);
660 
661 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
662 			struct dp_ast_entry *ast_entry,	uint32_t flags);
663 
664 struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
665 						     uint8_t *ast_mac_addr,
666 						     uint8_t pdev_id);
667 
668 struct dp_ast_entry *dp_peer_ast_hash_find_by_vdevid(struct dp_soc *soc,
669 						     uint8_t *ast_mac_addr,
670 						     uint8_t vdev_id);
671 
672 struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
673 					       uint8_t *ast_mac_addr);
674 
675 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
676 				struct dp_ast_entry *ast_entry);
677 
678 
679 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
680 				struct dp_ast_entry *ast_entry);
681 
682 void dp_peer_ast_set_type(struct dp_soc *soc,
683 				struct dp_ast_entry *ast_entry,
684 				enum cdp_txrx_ast_entry_type type);
685 
686 void dp_peer_ast_send_wds_del(struct dp_soc *soc,
687 			      struct dp_ast_entry *ast_entry,
688 			      struct dp_peer *peer);
689 
690 #ifdef WLAN_FEATURE_MULTI_AST_DEL
691 void dp_peer_ast_send_multi_wds_del(
692 		struct dp_soc *soc, uint8_t vdev_id,
693 		struct peer_del_multi_wds_entries *wds_list);
694 #endif
695 
696 void dp_peer_free_hmwds_cb(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
697 			   struct cdp_soc *dp_soc,
698 			   void *cookie,
699 			   enum cdp_ast_free_status status);
700 
701 void dp_peer_ast_hash_remove(struct dp_soc *soc,
702 			     struct dp_ast_entry *ase);
703 
704 void dp_peer_free_ast_entry(struct dp_soc *soc,
705 			    struct dp_ast_entry *ast_entry);
706 
707 void dp_peer_unlink_ast_entry(struct dp_soc *soc,
708 			      struct dp_ast_entry *ast_entry,
709 			      struct dp_peer *peer);
710 
711 /**
712  * dp_peer_mec_detach_entry() - Detach the MEC entry
713  * @soc: SoC handle
714  * @mecentry: MEC entry of the node
715  * @ptr: pointer to free list
716  *
717  * The MEC entry is detached from MEC table and added to free_list
718  * to free the object outside lock
719  *
720  * Return: None
721  */
722 void dp_peer_mec_detach_entry(struct dp_soc *soc, struct dp_mec_entry *mecentry,
723 			      void *ptr);
724 
725 /**
726  * dp_peer_mec_free_list() - free the MEC entry from free_list
727  * @soc: SoC handle
728  * @ptr: pointer to free list
729  *
730  * Return: None
731  */
732 void dp_peer_mec_free_list(struct dp_soc *soc, void *ptr);
733 
734 /**
735  * dp_peer_mec_add_entry()
736  * @soc: SoC handle
737  * @vdev: vdev to which mec node belongs
738  * @mac_addr: MAC address of mec node
739  *
740  * This function allocates and adds MEC entry to MEC table.
741  * It assumes caller has taken the mec lock to protect the access to these
742  * tables
743  *
744  * Return: QDF_STATUS
745  */
746 QDF_STATUS dp_peer_mec_add_entry(struct dp_soc *soc,
747 				 struct dp_vdev *vdev,
748 				 uint8_t *mac_addr);
749 
750 /**
751  * dp_peer_mec_hash_find_by_pdevid() - Find MEC entry by MAC address
752  * within pdev
753  * @soc: SoC handle
754  *
755  * It assumes caller has taken the mec_lock to protect the access to
756  * MEC hash table
757  *
758  * Return: MEC entry
759  */
760 struct dp_mec_entry *dp_peer_mec_hash_find_by_pdevid(struct dp_soc *soc,
761 						     uint8_t pdev_id,
762 						     uint8_t *mec_mac_addr);
763 
764 #define DP_AST_ASSERT(_condition) \
765 	do { \
766 		if (!(_condition)) { \
767 			dp_print_ast_stats(soc);\
768 			QDF_BUG(_condition); \
769 		} \
770 	} while (0)
771 
772 /**
773  * dp_peer_update_inactive_time - Update inactive time for peer
774  * @pdev: pdev object
775  * @tag_type: htt_tlv_tag type
776  * #tag_buf: buf message
777  */
778 void
779 dp_peer_update_inactive_time(struct dp_pdev *pdev, uint32_t tag_type,
780 			     uint32_t *tag_buf);
781 
782 #ifndef QCA_MULTIPASS_SUPPORT
783 /**
784  * dp_peer_set_vlan_id: set vlan_id for this peer
785  * @cdp_soc: soc handle
786  * @vdev_id: id of vdev object
787  * @peer_mac: mac address
788  * @vlan_id: vlan id for peer
789  *
790  * return: void
791  */
792 static inline
793 void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
794 			 uint8_t vdev_id, uint8_t *peer_mac,
795 			 uint16_t vlan_id)
796 {
797 }
798 
799 /**
800  * dp_set_vlan_groupkey: set vlan map for vdev
801  * @soc: pointer to soc
802  * @vdev_id: id of vdev handle
803  * @vlan_id: vlan_id
804  * @group_key: group key for vlan
805  *
806  * return: set success/failure
807  */
808 static inline
809 QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc, uint8_t vdev_id,
810 				uint16_t vlan_id, uint16_t group_key)
811 {
812 	return QDF_STATUS_SUCCESS;
813 }
814 
815 /**
816  * dp_peer_multipass_list_init: initialize multipass peer list
817  * @vdev: pointer to vdev
818  *
819  * return: void
820  */
821 static inline
822 void dp_peer_multipass_list_init(struct dp_vdev *vdev)
823 {
824 }
825 
826 /**
827  * dp_peer_multipass_list_remove: remove peer from special peer list
828  * @peer: peer handle
829  *
830  * return: void
831  */
832 static inline
833 void dp_peer_multipass_list_remove(struct dp_peer *peer)
834 {
835 }
836 #else
837 void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
838 			 uint8_t vdev_id, uint8_t *peer_mac,
839 			 uint16_t vlan_id);
840 QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc, uint8_t vdev_id,
841 				uint16_t vlan_id, uint16_t group_key);
842 void dp_peer_multipass_list_init(struct dp_vdev *vdev);
843 void dp_peer_multipass_list_remove(struct dp_peer *peer);
844 #endif
845 
846 
847 #ifndef QCA_PEER_MULTIQ_SUPPORT
848 /**
849  * dp_peer_reset_flowq_map() - reset peer flowq map table
850  * @peer - dp peer handle
851  *
852  * Return: none
853  */
854 static inline
855 void dp_peer_reset_flowq_map(struct dp_peer *peer)
856 {
857 }
858 
859 /**
860  * dp_peer_ast_index_flow_queue_map_create() - create ast index flow queue map
861  * @soc - genereic soc handle
862  * @is_wds - flag to indicate if peer is wds
863  * @peer_id - peer_id from htt peer map message
864  * @peer_mac_addr - mac address of the peer
865  * @ast_info - ast flow override information from peer map
866  *
867  * Return: none
868  */
869 static inline
870 void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
871 		    bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
872 		    struct dp_ast_flow_override_info *ast_info)
873 {
874 }
875 #else
876 void dp_peer_reset_flowq_map(struct dp_peer *peer);
877 void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
878 		    bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
879 		    struct dp_ast_flow_override_info *ast_info);
880 #endif
881 
882 /*
883  * dp_rx_tid_delete_cb() - Callback to flush reo descriptor HW cache
884  * after deleting the entries (ie., setting valid=0)
885  *
886  * @soc: DP SOC handle
887  * @cb_ctxt: Callback context
888  * @reo_status: REO command status
889  */
890 void dp_rx_tid_delete_cb(struct dp_soc *soc,
891 			 void *cb_ctxt,
892 			 union hal_reo_status *reo_status);
893 
894 #ifdef QCA_PEER_EXT_STATS
895 QDF_STATUS dp_peer_delay_stats_ctx_alloc(struct dp_soc *soc,
896 					 struct dp_txrx_peer *txrx_peer);
897 void dp_peer_delay_stats_ctx_dealloc(struct dp_soc *soc,
898 				     struct dp_txrx_peer *txrx_peer);
899 void dp_peer_delay_stats_ctx_clr(struct dp_txrx_peer *txrx_peer);
900 #else
901 static inline
902 QDF_STATUS dp_peer_delay_stats_ctx_alloc(struct dp_soc *soc,
903 					 struct dp_txrx_peer *txrx_peer)
904 {
905 	return QDF_STATUS_SUCCESS;
906 }
907 
908 static inline
909 void dp_peer_delay_stats_ctx_dealloc(struct dp_soc *soc,
910 				     struct dp_txrx_peer *txrx_peer)
911 {
912 }
913 
914 static inline
915 void dp_peer_delay_stats_ctx_clr(struct dp_txrx_peer *txrx_peer)
916 {
917 }
918 #endif
919 
920 #ifdef WLAN_PEER_JITTER
921 QDF_STATUS dp_peer_jitter_stats_ctx_alloc(struct dp_pdev *pdev,
922 					  struct dp_txrx_peer *txrx_peer);
923 
924 void dp_peer_jitter_stats_ctx_dealloc(struct dp_pdev *pdev,
925 				      struct dp_txrx_peer *txrx_peer);
926 
927 void dp_peer_jitter_stats_ctx_clr(struct dp_txrx_peer *txrx_peer);
928 #else
929 static inline
930 QDF_STATUS dp_peer_jitter_stats_ctx_alloc(struct dp_pdev *pdev,
931 					  struct dp_txrx_peer *txrx_peer)
932 {
933 	return QDF_STATUS_SUCCESS;
934 }
935 
936 static inline
937 void dp_peer_jitter_stats_ctx_dealloc(struct dp_pdev *pdev,
938 				      struct dp_txrx_peer *txrx_peer)
939 {
940 }
941 
942 static inline
943 void dp_peer_jitter_stats_ctx_clr(struct dp_txrx_peer *txrx_peer)
944 {
945 }
946 #endif
947 
948 #ifndef CONFIG_SAWF_DEF_QUEUES
949 static inline QDF_STATUS dp_peer_sawf_ctx_alloc(struct dp_soc *soc,
950 						struct dp_peer *peer)
951 {
952 	return QDF_STATUS_SUCCESS;
953 }
954 
955 static inline QDF_STATUS dp_peer_sawf_ctx_free(struct dp_soc *soc,
956 					       struct dp_peer *peer)
957 {
958 	return QDF_STATUS_SUCCESS;
959 }
960 
961 #endif
962 
963 #ifndef CONFIG_SAWF
964 static inline
965 QDF_STATUS dp_peer_sawf_stats_ctx_alloc(struct dp_soc *soc,
966 					struct dp_txrx_peer *txrx_peer)
967 {
968 	return QDF_STATUS_SUCCESS;
969 }
970 
971 static inline
972 QDF_STATUS dp_peer_sawf_stats_ctx_free(struct dp_soc *soc,
973 				       struct dp_txrx_peer *txrx_peer)
974 {
975 	return QDF_STATUS_SUCCESS;
976 }
977 #endif
978 
979 struct dp_peer *dp_vdev_bss_peer_ref_n_get(struct dp_soc *soc,
980 					   struct dp_vdev *vdev,
981 					   enum dp_mod_id mod_id);
982 struct dp_peer *dp_sta_vdev_self_peer_ref_n_get(struct dp_soc *soc,
983 						struct dp_vdev *vdev,
984 						enum dp_mod_id mod_id);
985 
986 void dp_peer_ast_table_detach(struct dp_soc *soc);
987 void dp_peer_find_map_detach(struct dp_soc *soc);
988 void dp_soc_wds_detach(struct dp_soc *soc);
989 QDF_STATUS dp_peer_ast_table_attach(struct dp_soc *soc);
990 QDF_STATUS dp_peer_ast_hash_attach(struct dp_soc *soc);
991 QDF_STATUS dp_peer_mec_hash_attach(struct dp_soc *soc);
992 void dp_soc_wds_attach(struct dp_soc *soc);
993 void dp_peer_mec_hash_detach(struct dp_soc *soc);
994 void dp_peer_ast_hash_detach(struct dp_soc *soc);
995 
996 #ifdef FEATURE_AST
997 /*
998  * dp_peer_delete_ast_entries(): Delete all AST entries for a peer
999  * @soc - datapath soc handle
1000  * @peer - datapath peer handle
1001  *
1002  * Delete the AST entries belonging to a peer
1003  */
1004 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
1005 					      struct dp_peer *peer)
1006 {
1007 	struct dp_ast_entry *ast_entry, *temp_ast_entry;
1008 
1009 	dp_peer_debug("peer: %pK, self_ast: %pK", peer, peer->self_ast_entry);
1010 	/*
1011 	 * Delete peer self ast entry. This is done to handle scenarios
1012 	 * where peer is freed before peer map is received(for ex in case
1013 	 * of auth disallow due to ACL) in such cases self ast is not added
1014 	 * to peer->ast_list.
1015 	 */
1016 	if (peer->self_ast_entry) {
1017 		dp_peer_del_ast(soc, peer->self_ast_entry);
1018 		peer->self_ast_entry = NULL;
1019 	}
1020 
1021 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry)
1022 		dp_peer_del_ast(soc, ast_entry);
1023 }
1024 #else
1025 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
1026 					      struct dp_peer *peer)
1027 {
1028 }
1029 #endif
1030 
1031 #ifdef FEATURE_MEC
1032 /**
1033  * dp_peer_mec_spinlock_create() - Create the MEC spinlock
1034  * @soc: SoC handle
1035  *
1036  * Return: none
1037  */
1038 void dp_peer_mec_spinlock_create(struct dp_soc *soc);
1039 
1040 /**
1041  * dp_peer_mec_spinlock_destroy() - Destroy the MEC spinlock
1042  * @soc: SoC handle
1043  *
1044  * Return: none
1045  */
1046 void dp_peer_mec_spinlock_destroy(struct dp_soc *soc);
1047 
1048 /**
1049  * dp_peer_mec_flush_entries() - Delete all mec entries in table
1050  * @soc: Datapath SOC
1051  *
1052  * Return: None
1053  */
1054 void dp_peer_mec_flush_entries(struct dp_soc *soc);
1055 #else
1056 static inline void dp_peer_mec_spinlock_create(struct dp_soc *soc)
1057 {
1058 }
1059 
1060 static inline void dp_peer_mec_spinlock_destroy(struct dp_soc *soc)
1061 {
1062 }
1063 
1064 static inline void dp_peer_mec_flush_entries(struct dp_soc *soc)
1065 {
1066 }
1067 #endif
1068 
1069 #ifdef DUMP_REO_QUEUE_INFO_IN_DDR
1070 /**
1071  * dp_send_cache_flush_for_rx_tid() - Send cache flush cmd to REO per tid
1072  * @soc : dp_soc handle
1073  * @peer: peer
1074  *
1075  * This function is used to send cache flush cmd to reo and
1076  * to register the callback to handle the dumping of the reo
1077  * queue stas from DDR
1078  *
1079  * Return: none
1080  */
1081 void dp_send_cache_flush_for_rx_tid(
1082 	struct dp_soc *soc, struct dp_peer *peer);
1083 
1084 /**
1085  * dp_get_rx_reo_queue_info() - Handler to get rx tid info
1086  * @soc : cdp_soc_t handle
1087  * @vdev_id: vdev id
1088  *
1089  * Handler to get rx tid info from DDR after h/w cache is
1090  * invalidated first using the cache flush cmd.
1091  *
1092  * Return: none
1093  */
1094 void dp_get_rx_reo_queue_info(
1095 	struct cdp_soc_t *soc_hdl, uint8_t vdev_id);
1096 
1097 /**
1098  * dp_dump_rx_reo_queue_info() - Callback function to dump reo queue stats
1099  * @soc : dp_soc handle
1100  * @cb_ctxt - callback context
1101  * @reo_status: vdev id
1102  *
1103  * This is the callback function registered after sending the reo cmd
1104  * to flush the h/w cache and invalidate it. In the callback the reo
1105  * queue desc info is dumped from DDR.
1106  *
1107  * Return: none
1108  */
1109 void dp_dump_rx_reo_queue_info(
1110 	struct dp_soc *soc, void *cb_ctxt, union hal_reo_status *reo_status);
1111 
1112 #else /* DUMP_REO_QUEUE_INFO_IN_DDR */
1113 
1114 static inline void dp_get_rx_reo_queue_info(
1115 	struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
1116 {
1117 }
1118 #endif /* DUMP_REO_QUEUE_INFO_IN_DDR */
1119 
1120 static inline int dp_peer_find_mac_addr_cmp(
1121 	union dp_align_mac_addr *mac_addr1,
1122 	union dp_align_mac_addr *mac_addr2)
1123 {
1124 		/*
1125 		 * Intentionally use & rather than &&.
1126 		 * because the operands are binary rather than generic boolean,
1127 		 * the functionality is equivalent.
1128 		 * Using && has the advantage of short-circuited evaluation,
1129 		 * but using & has the advantage of no conditional branching,
1130 		 * which is a more significant benefit.
1131 		 */
1132 	return !((mac_addr1->align4.bytes_abcd == mac_addr2->align4.bytes_abcd)
1133 		 & (mac_addr1->align4.bytes_ef == mac_addr2->align4.bytes_ef));
1134 }
1135 
1136 /**
1137  * dp_peer_delete() - delete DP peer
1138  *
1139  * @soc: Datatpath soc
1140  * @peer: Datapath peer
1141  * @arg: argument to iter function
1142  *
1143  * Return: void
1144  */
1145 void dp_peer_delete(struct dp_soc *soc,
1146 		    struct dp_peer *peer,
1147 		    void *arg);
1148 
1149 #ifdef WLAN_FEATURE_11BE_MLO
1150 
1151 /* is MLO connection mld peer */
1152 #define IS_MLO_DP_MLD_TXRX_PEER(_peer) ((_peer)->mld_peer)
1153 
1154 /* set peer type */
1155 #define DP_PEER_SET_TYPE(_peer, _type_val) \
1156 	((_peer)->peer_type = (_type_val))
1157 
1158 /* is legacy peer */
1159 #define IS_DP_LEGACY_PEER(_peer) \
1160 	((_peer)->peer_type == CDP_LINK_PEER_TYPE && !((_peer)->mld_peer))
1161 /* is MLO connection link peer */
1162 #define IS_MLO_DP_LINK_PEER(_peer) \
1163 	((_peer)->peer_type == CDP_LINK_PEER_TYPE && (_peer)->mld_peer)
1164 /* is MLO connection mld peer */
1165 #define IS_MLO_DP_MLD_PEER(_peer) \
1166 	((_peer)->peer_type == CDP_MLD_PEER_TYPE)
1167 /* Get Mld peer from link peer */
1168 #define DP_GET_MLD_PEER_FROM_PEER(link_peer) \
1169 	((link_peer)->mld_peer)
1170 
1171 #ifdef WLAN_MLO_MULTI_CHIP
1172 uint8_t dp_mlo_get_chip_id(struct dp_soc *soc);
1173 
1174 struct dp_peer *
1175 dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
1176 				  uint8_t *peer_mac_addr,
1177 				  int mac_addr_is_aligned,
1178 				  uint8_t vdev_id,
1179 				  uint8_t chip_id,
1180 				  enum dp_mod_id mod_id);
1181 #else
1182 static inline uint8_t dp_mlo_get_chip_id(struct dp_soc *soc)
1183 {
1184 	return 0;
1185 }
1186 
1187 static inline struct dp_peer *
1188 dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
1189 				  uint8_t *peer_mac_addr,
1190 				  int mac_addr_is_aligned,
1191 				  uint8_t vdev_id,
1192 				  uint8_t chip_id,
1193 				  enum dp_mod_id mod_id)
1194 {
1195 	return dp_peer_find_hash_find(soc, peer_mac_addr,
1196 				      mac_addr_is_aligned,
1197 				      vdev_id, mod_id);
1198 }
1199 #endif
1200 
1201 /**
1202  * dp_link_peer_add_mld_peer() - add mld peer pointer to link peer,
1203 				 increase mld peer ref_cnt
1204  * @link_peer: link peer pointer
1205  * @mld_peer: mld peer pointer
1206  *
1207  * Return: none
1208  */
1209 static inline
1210 void dp_link_peer_add_mld_peer(struct dp_peer *link_peer,
1211 			       struct dp_peer *mld_peer)
1212 {
1213 	/* increase mld_peer ref_cnt */
1214 	dp_peer_get_ref(NULL, mld_peer, DP_MOD_ID_CDP);
1215 	link_peer->mld_peer = mld_peer;
1216 }
1217 
1218 /**
1219  * dp_link_peer_del_mld_peer() - delete mld peer pointer from link peer,
1220 				 decrease mld peer ref_cnt
1221  * @link_peer: link peer pointer
1222  *
1223  * Return: None
1224  */
1225 static inline
1226 void dp_link_peer_del_mld_peer(struct dp_peer *link_peer)
1227 {
1228 	dp_peer_unref_delete(link_peer->mld_peer, DP_MOD_ID_CDP);
1229 	link_peer->mld_peer = NULL;
1230 }
1231 
1232 /**
1233  * dp_mld_peer_init_link_peers_info() - init link peers info in mld peer
1234  * @mld_peer: mld peer pointer
1235  *
1236  * Return: None
1237  */
1238 static inline
1239 void dp_mld_peer_init_link_peers_info(struct dp_peer *mld_peer)
1240 {
1241 	int i;
1242 
1243 	qdf_spinlock_create(&mld_peer->link_peers_info_lock);
1244 	mld_peer->num_links = 0;
1245 	for (i = 0; i < DP_MAX_MLO_LINKS; i++)
1246 		mld_peer->link_peers[i].is_valid = false;
1247 }
1248 
1249 /**
1250  * dp_mld_peer_deinit_link_peers_info() - Deinit link peers info in mld peer
1251  * @mld_peer: mld peer pointer
1252  *
1253  * Return: None
1254  */
1255 static inline
1256 void dp_mld_peer_deinit_link_peers_info(struct dp_peer *mld_peer)
1257 {
1258 	qdf_spinlock_destroy(&mld_peer->link_peers_info_lock);
1259 }
1260 
1261 /**
1262  * dp_mld_peer_add_link_peer() - add link peer info to mld peer
1263  * @mld_peer: mld dp peer pointer
1264  * @link_peer: link dp peer pointer
1265  *
1266  * Return: None
1267  */
1268 static inline
1269 void dp_mld_peer_add_link_peer(struct dp_peer *mld_peer,
1270 			       struct dp_peer *link_peer)
1271 {
1272 	int i;
1273 	struct dp_peer_link_info *link_peer_info;
1274 
1275 	qdf_spin_lock_bh(&mld_peer->link_peers_info_lock);
1276 	for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
1277 		link_peer_info = &mld_peer->link_peers[i];
1278 		if (!link_peer_info->is_valid) {
1279 			qdf_mem_copy(link_peer_info->mac_addr.raw,
1280 				     link_peer->mac_addr.raw,
1281 				     QDF_MAC_ADDR_SIZE);
1282 			link_peer_info->is_valid = true;
1283 			link_peer_info->vdev_id = link_peer->vdev->vdev_id;
1284 			link_peer_info->chip_id =
1285 				dp_mlo_get_chip_id(link_peer->vdev->pdev->soc);
1286 			mld_peer->num_links++;
1287 			break;
1288 		}
1289 	}
1290 	qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock);
1291 
1292 	if (i == DP_MAX_MLO_LINKS)
1293 		dp_err("fail to add link peer" QDF_MAC_ADDR_FMT "to mld peer",
1294 		       QDF_MAC_ADDR_REF(link_peer->mac_addr.raw));
1295 }
1296 
1297 /**
1298  * dp_mld_peer_del_link_peer() - Delete link peer info from MLD peer
1299  * @mld_peer: MLD dp peer pointer
1300  * @link_peer: link dp peer pointer
1301  *
1302  * Return: number of links left after deletion
1303  */
1304 static inline
1305 uint8_t dp_mld_peer_del_link_peer(struct dp_peer *mld_peer,
1306 				  struct dp_peer *link_peer)
1307 {
1308 	int i;
1309 	struct dp_peer_link_info *link_peer_info;
1310 	uint8_t num_links;
1311 
1312 	qdf_spin_lock_bh(&mld_peer->link_peers_info_lock);
1313 	for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
1314 		link_peer_info = &mld_peer->link_peers[i];
1315 		if (link_peer_info->is_valid &&
1316 		    !dp_peer_find_mac_addr_cmp(&link_peer->mac_addr,
1317 					&link_peer_info->mac_addr)) {
1318 			link_peer_info->is_valid = false;
1319 			mld_peer->num_links--;
1320 			break;
1321 		}
1322 	}
1323 	num_links = mld_peer->num_links;
1324 	qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock);
1325 
1326 	if (i == DP_MAX_MLO_LINKS)
1327 		dp_err("fail to del link peer" QDF_MAC_ADDR_FMT "to mld peer",
1328 		       QDF_MAC_ADDR_REF(link_peer->mac_addr.raw));
1329 
1330 	return num_links;
1331 }
1332 
1333 /**
1334  * dp_get_link_peers_ref_from_mld_peer() - get link peers pointer and
1335 					   increase link peers ref_cnt
1336  * @soc: dp_soc handle
1337  * @mld_peer: dp mld peer pointer
1338  * @mld_link_peers: structure that hold links peers ponter array and number
1339  * @mod_id: id of module requesting reference
1340  *
1341  * Return: None
1342  */
1343 static inline
1344 void dp_get_link_peers_ref_from_mld_peer(
1345 				struct dp_soc *soc,
1346 				struct dp_peer *mld_peer,
1347 				struct dp_mld_link_peers *mld_link_peers,
1348 				enum dp_mod_id mod_id)
1349 {
1350 	struct dp_peer *peer;
1351 	uint8_t i = 0, j = 0;
1352 	struct dp_peer_link_info *link_peer_info;
1353 
1354 	qdf_mem_zero(mld_link_peers, sizeof(*mld_link_peers));
1355 	qdf_spin_lock_bh(&mld_peer->link_peers_info_lock);
1356 	for (i = 0; i < DP_MAX_MLO_LINKS; i++)  {
1357 		link_peer_info = &mld_peer->link_peers[i];
1358 		if (link_peer_info->is_valid) {
1359 			peer = dp_link_peer_hash_find_by_chip_id(
1360 						soc,
1361 						link_peer_info->mac_addr.raw,
1362 						true,
1363 						link_peer_info->vdev_id,
1364 						link_peer_info->chip_id,
1365 						mod_id);
1366 			if (peer)
1367 				mld_link_peers->link_peers[j++] = peer;
1368 		}
1369 	}
1370 	qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock);
1371 
1372 	mld_link_peers->num_links = j;
1373 }
1374 
1375 /**
1376  * dp_release_link_peers_ref() - release all link peers reference
1377  * @mld_link_peers: structure that hold links peers ponter array and number
1378  * @mod_id: id of module requesting reference
1379  *
1380  * Return: None.
1381  */
1382 static inline
1383 void dp_release_link_peers_ref(
1384 			struct dp_mld_link_peers *mld_link_peers,
1385 			enum dp_mod_id mod_id)
1386 {
1387 	struct dp_peer *peer;
1388 	uint8_t i;
1389 
1390 	for (i = 0; i < mld_link_peers->num_links; i++) {
1391 		peer = mld_link_peers->link_peers[i];
1392 		if (peer)
1393 			dp_peer_unref_delete(peer, mod_id);
1394 		mld_link_peers->link_peers[i] = NULL;
1395 	}
1396 
1397 	 mld_link_peers->num_links = 0;
1398 }
1399 
1400 /**
1401  * dp_get_link_peer_id_by_lmac_id() - Get link peer id using peer id and lmac id
1402  * @soc: Datapath soc handle
1403  * @peer_id: peer id
1404  * @lmac_id: lmac id to find the link peer on given lmac
1405  *
1406  * Return: peer_id of link peer if found
1407  *         else return HTT_INVALID_PEER
1408  */
1409 static inline
1410 uint16_t dp_get_link_peer_id_by_lmac_id(struct dp_soc *soc, uint16_t peer_id,
1411 					uint8_t lmac_id)
1412 {
1413 	uint8_t i;
1414 	struct dp_peer *peer;
1415 	struct dp_peer *link_peer;
1416 	struct dp_soc *link_peer_soc;
1417 	struct dp_mld_link_peers link_peers_info;
1418 	uint16_t link_peer_id = HTT_INVALID_PEER;
1419 
1420 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_CDP);
1421 
1422 	if (!peer)
1423 		return HTT_INVALID_PEER;
1424 
1425 	if (IS_MLO_DP_MLD_PEER(peer)) {
1426 		/* get link peers with reference */
1427 		dp_get_link_peers_ref_from_mld_peer(soc, peer, &link_peers_info,
1428 						    DP_MOD_ID_CDP);
1429 
1430 		for (i = 0; i < link_peers_info.num_links; i++) {
1431 			link_peer = link_peers_info.link_peers[i];
1432 			link_peer_soc = link_peer->vdev->pdev->soc;
1433 			if ((link_peer_soc == soc) &&
1434 			    (link_peer->vdev->pdev->lmac_id == lmac_id)) {
1435 				link_peer_id = link_peer->peer_id;
1436 				break;
1437 			}
1438 		}
1439 		/* release link peers reference */
1440 		dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP);
1441 	} else {
1442 		link_peer_id = peer_id;
1443 	}
1444 
1445 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
1446 
1447 	return link_peer_id;
1448 }
1449 
1450 /**
1451  * dp_peer_get_tgt_peer_hash_find() - get MLD dp_peer handle
1452 				   for processing
1453  * @soc: soc handle
1454  * @peer_mac_addr: peer mac address
1455  * @mac_addr_is_aligned: is mac addr alligned
1456  * @vdev_id: vdev_id
1457  * @mod_id: id of module requesting reference
1458  *
1459  * for MLO connection, get corresponding MLD peer,
1460  * otherwise get link peer for non-MLO case.
1461  *
1462  * return: peer in success
1463  *         NULL in failure
1464  */
1465 static inline
1466 struct dp_peer *dp_peer_get_tgt_peer_hash_find(struct dp_soc *soc,
1467 					       uint8_t *peer_mac,
1468 					       int mac_addr_is_aligned,
1469 					       uint8_t vdev_id,
1470 					       enum dp_mod_id mod_id)
1471 {
1472 	struct dp_peer *ta_peer = NULL;
1473 	struct dp_peer *peer = dp_peer_find_hash_find(soc,
1474 						      peer_mac, 0, vdev_id,
1475 						      mod_id);
1476 
1477 	if (peer) {
1478 		/* mlo connection link peer, get mld peer with reference */
1479 		if (IS_MLO_DP_LINK_PEER(peer)) {
1480 			/* increase mld peer ref_cnt */
1481 			if (QDF_STATUS_SUCCESS ==
1482 			    dp_peer_get_ref(soc, peer->mld_peer, mod_id))
1483 				ta_peer = peer->mld_peer;
1484 			else
1485 				ta_peer = NULL;
1486 
1487 			/* relese peer reference that added by hash find */
1488 			dp_peer_unref_delete(peer, mod_id);
1489 		} else {
1490 		/* mlo MLD peer or non-mlo link peer */
1491 			ta_peer = peer;
1492 		}
1493 	}
1494 
1495 	return ta_peer;
1496 }
1497 
1498 /**
1499  * dp_peer_get_tgt_peer_by_id() - Returns target peer object given the peer id
1500  * @soc		: core DP soc context
1501  * @peer_id	: peer id from peer object can be retrieved
1502  * @mod_id      : ID ot module requesting reference
1503  *
1504  * for MLO connection, get corresponding MLD peer,
1505  * otherwise get link peer for non-MLO case.
1506  *
1507  * return: peer in success
1508  *         NULL in failure
1509  */
1510 static inline
1511 struct dp_peer *dp_peer_get_tgt_peer_by_id(struct dp_soc *soc,
1512 					   uint16_t peer_id,
1513 					   enum dp_mod_id mod_id)
1514 {
1515 	struct dp_peer *ta_peer = NULL;
1516 	struct dp_peer *peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
1517 
1518 	if (peer) {
1519 		/* mlo connection link peer, get mld peer with reference */
1520 		if (IS_MLO_DP_LINK_PEER(peer)) {
1521 			/* increase mld peer ref_cnt */
1522 			if (QDF_STATUS_SUCCESS ==
1523 				dp_peer_get_ref(soc, peer->mld_peer, mod_id))
1524 				ta_peer = peer->mld_peer;
1525 			else
1526 				ta_peer = NULL;
1527 
1528 			/* relese peer reference that added by hash find */
1529 			dp_peer_unref_delete(peer, mod_id);
1530 		} else {
1531 		/* mlo MLD peer or non-mlo link peer */
1532 			ta_peer = peer;
1533 		}
1534 	}
1535 
1536 	return ta_peer;
1537 }
1538 
1539 /**
1540  * dp_peer_mlo_delete() - peer MLO related delete operation
1541  * @peer: DP peer handle
1542  * Return: None
1543  */
1544 static inline
1545 void dp_peer_mlo_delete(struct dp_peer *peer)
1546 {
1547 	struct dp_peer *ml_peer;
1548 	struct dp_soc *soc;
1549 
1550 	/* MLO connection link peer */
1551 	if (IS_MLO_DP_LINK_PEER(peer)) {
1552 		ml_peer = peer->mld_peer;
1553 		soc = ml_peer->vdev->pdev->soc;
1554 
1555 		/* if last link peer deletion, delete MLD peer */
1556 		if (dp_mld_peer_del_link_peer(peer->mld_peer, peer) == 0)
1557 			dp_peer_delete(soc, peer->mld_peer, NULL);
1558 	}
1559 }
1560 
1561 /**
1562  * dp_peer_mlo_setup() - create MLD peer and MLO related initialization
1563  * @soc: Soc handle
1564  * @vdev_id: Vdev ID
1565  * @peer_setup_info: peer setup information for MLO
1566  */
1567 QDF_STATUS dp_peer_mlo_setup(
1568 			struct dp_soc *soc,
1569 			struct dp_peer *peer,
1570 			uint8_t vdev_id,
1571 			struct cdp_peer_setup_info *setup_info);
1572 
1573 /**
1574  * dp_get_tgt_peer_from_peer() - Get target peer from the given peer
1575  * @peer: datapath peer
1576  *
1577  * Return: MLD peer in case of MLO Link peer
1578  *	   Peer itself in other cases
1579  */
1580 static inline
1581 struct dp_peer *dp_get_tgt_peer_from_peer(struct dp_peer *peer)
1582 {
1583 	return IS_MLO_DP_LINK_PEER(peer) ? peer->mld_peer : peer;
1584 }
1585 
1586 /**
1587  * dp_get_primary_link_peer_by_id(): Get primary link peer from the given
1588  *					peer id
1589  * @soc: core DP soc context
1590  * @peer_id: peer id
1591  * @mod_id: ID of module requesting reference
1592  *
1593  * Return: primary link peer for the MLO peer
1594  *	   legacy peer itself in case of legacy peer
1595  */
1596 static inline
1597 struct dp_peer *dp_get_primary_link_peer_by_id(struct dp_soc *soc,
1598 					       uint16_t peer_id,
1599 					       enum dp_mod_id mod_id)
1600 {
1601 	uint8_t i;
1602 	struct dp_mld_link_peers link_peers_info;
1603 	struct dp_peer *peer;
1604 	struct dp_peer *link_peer;
1605 	struct dp_peer *primary_peer = NULL;
1606 
1607 	peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
1608 
1609 	if (!peer)
1610 		return NULL;
1611 
1612 	if (IS_MLO_DP_MLD_PEER(peer)) {
1613 		/* get link peers with reference */
1614 		dp_get_link_peers_ref_from_mld_peer(soc, peer, &link_peers_info,
1615 						    mod_id);
1616 
1617 		for (i = 0; i < link_peers_info.num_links; i++) {
1618 			link_peer = link_peers_info.link_peers[i];
1619 			if (link_peer->primary_link) {
1620 				primary_peer = link_peer;
1621 				/*
1622 				 * Take additional reference over
1623 				 * primary link peer.
1624 				 */
1625 				dp_peer_get_ref(NULL, primary_peer, mod_id);
1626 				break;
1627 			}
1628 		}
1629 		/* release link peers reference */
1630 		dp_release_link_peers_ref(&link_peers_info, mod_id);
1631 		dp_peer_unref_delete(peer, mod_id);
1632 	} else {
1633 		primary_peer = peer;
1634 	}
1635 
1636 	return primary_peer;
1637 }
1638 
1639 /**
1640  * dp_get_txrx_peer() - Get dp_txrx_peer from passed dp_peer
1641  * @peer: Datapath peer
1642  *
1643  * Return: dp_txrx_peer from MLD peer if peer type is link peer
1644  *	   dp_txrx_peer from peer itself for other cases
1645  */
1646 static inline
1647 struct dp_txrx_peer *dp_get_txrx_peer(struct dp_peer *peer)
1648 {
1649 	return IS_MLO_DP_LINK_PEER(peer) ?
1650 				peer->mld_peer->txrx_peer : peer->txrx_peer;
1651 }
1652 
1653 /**
1654  * dp_peer_is_primary_link_peer() - Check if peer is primary link peer
1655  * @peer: Datapath peer
1656  *
1657  * Return: true if peer is primary link peer or legacy peer
1658  *	   false otherwise
1659  */
1660 static inline
1661 bool dp_peer_is_primary_link_peer(struct dp_peer *peer)
1662 {
1663 	if (IS_MLO_DP_LINK_PEER(peer) && peer->primary_link)
1664 		return true;
1665 	else if (IS_DP_LEGACY_PEER(peer))
1666 		return true;
1667 	else
1668 		return false;
1669 }
1670 
1671 /**
1672  * dp_tgt_txrx_peer_get_ref_by_id() - Gets tgt txrx peer for given the peer id
1673  *
1674  * @soc		: core DP soc context
1675  * @peer_id	: peer id from peer object can be retrieved
1676  * @handle	: reference handle
1677  * @mod_id      : ID ot module requesting reference
1678  *
1679  * Return: struct dp_txrx_peer*: Pointer to txrx DP peer object
1680  */
1681 static inline struct dp_txrx_peer *
1682 dp_tgt_txrx_peer_get_ref_by_id(struct dp_soc *soc,
1683 			       uint16_t peer_id,
1684 			       dp_txrx_ref_handle *handle,
1685 			       enum dp_mod_id mod_id)
1686 
1687 {
1688 	struct dp_peer *peer;
1689 	struct dp_txrx_peer *txrx_peer;
1690 
1691 	peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
1692 	if (!peer)
1693 		return NULL;
1694 
1695 	txrx_peer = dp_get_txrx_peer(peer);
1696 	if (txrx_peer) {
1697 		*handle = (dp_txrx_ref_handle)peer;
1698 		return txrx_peer;
1699 	}
1700 
1701 	dp_peer_unref_delete(peer, mod_id);
1702 	return NULL;
1703 }
1704 
1705 #else
1706 
1707 #define IS_MLO_DP_MLD_TXRX_PEER(_peer) false
1708 
1709 #define DP_PEER_SET_TYPE(_peer, _type_val) /* no op */
1710 /* is legacy peer */
1711 #define IS_DP_LEGACY_PEER(_peer) true
1712 #define IS_MLO_DP_LINK_PEER(_peer) false
1713 #define IS_MLO_DP_MLD_PEER(_peer) false
1714 #define DP_GET_MLD_PEER_FROM_PEER(link_peer) NULL
1715 
1716 static inline
1717 struct dp_peer *dp_peer_get_tgt_peer_hash_find(struct dp_soc *soc,
1718 					       uint8_t *peer_mac,
1719 					       int mac_addr_is_aligned,
1720 					       uint8_t vdev_id,
1721 					       enum dp_mod_id mod_id)
1722 {
1723 	return dp_peer_find_hash_find(soc, peer_mac,
1724 				      mac_addr_is_aligned, vdev_id,
1725 				      mod_id);
1726 }
1727 
1728 static inline
1729 struct dp_peer *dp_peer_get_tgt_peer_by_id(struct dp_soc *soc,
1730 					   uint16_t peer_id,
1731 					   enum dp_mod_id mod_id)
1732 {
1733 	return dp_peer_get_ref_by_id(soc, peer_id, mod_id);
1734 }
1735 
1736 static inline
1737 QDF_STATUS dp_peer_mlo_setup(
1738 			struct dp_soc *soc,
1739 			struct dp_peer *peer,
1740 			uint8_t vdev_id,
1741 			struct cdp_peer_setup_info *setup_info)
1742 {
1743 	return QDF_STATUS_SUCCESS;
1744 }
1745 
1746 static inline
1747 void dp_mld_peer_init_link_peers_info(struct dp_peer *mld_peer)
1748 {
1749 }
1750 
1751 static inline
1752 void dp_mld_peer_deinit_link_peers_info(struct dp_peer *mld_peer)
1753 {
1754 }
1755 
1756 static inline
1757 void dp_link_peer_del_mld_peer(struct dp_peer *link_peer)
1758 {
1759 }
1760 
1761 static inline
1762 void dp_peer_mlo_delete(struct dp_peer *peer)
1763 {
1764 }
1765 
1766 static inline
1767 void dp_mlo_peer_authorize(struct dp_soc *soc,
1768 			   struct dp_peer *link_peer)
1769 {
1770 }
1771 
1772 static inline uint8_t dp_mlo_get_chip_id(struct dp_soc *soc)
1773 {
1774 	return 0;
1775 }
1776 
1777 static inline struct dp_peer *
1778 dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
1779 				  uint8_t *peer_mac_addr,
1780 				  int mac_addr_is_aligned,
1781 				  uint8_t vdev_id,
1782 				  uint8_t chip_id,
1783 				  enum dp_mod_id mod_id)
1784 {
1785 	return dp_peer_find_hash_find(soc, peer_mac_addr,
1786 				      mac_addr_is_aligned,
1787 				      vdev_id, mod_id);
1788 }
1789 
1790 static inline
1791 struct dp_peer *dp_get_tgt_peer_from_peer(struct dp_peer *peer)
1792 {
1793 	return peer;
1794 }
1795 
1796 static inline
1797 struct dp_peer *dp_get_primary_link_peer_by_id(struct dp_soc *soc,
1798 					       uint16_t peer_id,
1799 					       enum dp_mod_id mod_id)
1800 {
1801 	return dp_peer_get_ref_by_id(soc, peer_id, mod_id);
1802 }
1803 
1804 static inline
1805 struct dp_txrx_peer *dp_get_txrx_peer(struct dp_peer *peer)
1806 {
1807 	return peer->txrx_peer;
1808 }
1809 
1810 static inline
1811 bool dp_peer_is_primary_link_peer(struct dp_peer *peer)
1812 {
1813 	return true;
1814 }
1815 
1816 /**
1817  * dp_tgt_txrx_peer_get_ref_by_id() - Gets tgt txrx peer for given the peer id
1818  *
1819  * @soc		: core DP soc context
1820  * @peer_id	: peer id from peer object can be retrieved
1821  * @handle	: reference handle
1822  * @mod_id      : ID ot module requesting reference
1823  *
1824  * Return: struct dp_txrx_peer*: Pointer to txrx DP peer object
1825  */
1826 static inline struct dp_txrx_peer *
1827 dp_tgt_txrx_peer_get_ref_by_id(struct dp_soc *soc,
1828 			       uint16_t peer_id,
1829 			       dp_txrx_ref_handle *handle,
1830 			       enum dp_mod_id mod_id)
1831 
1832 {
1833 	return dp_txrx_peer_get_ref_by_id(soc, peer_id, handle, mod_id);
1834 }
1835 
1836 static inline
1837 uint16_t dp_get_link_peer_id_by_lmac_id(struct dp_soc *soc, uint16_t peer_id,
1838 					uint8_t lmac_id)
1839 {
1840 	return peer_id;
1841 }
1842 #endif /* WLAN_FEATURE_11BE_MLO */
1843 
1844 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
1845 /**
1846  * dp_mlo_partner_chips_map() - Map MLO peers to partner SOCs
1847  * @soc: Soc handle
1848  * @peer: DP peer handle for ML peer
1849  * @peer_id: peer_id
1850  * Return: None
1851  */
1852 void dp_mlo_partner_chips_map(struct dp_soc *soc,
1853 			      struct dp_peer *peer,
1854 			      uint16_t peer_id);
1855 
1856 /**
1857  * dp_mlo_partner_chips_unmap() - Unmap MLO peers to partner SOCs
1858  * @soc: Soc handle
1859  * @peer_id: peer_id
1860  * Return: None
1861  */
1862 void dp_mlo_partner_chips_unmap(struct dp_soc *soc,
1863 				uint16_t peer_id);
1864 #else
1865 static inline void dp_mlo_partner_chips_map(struct dp_soc *soc,
1866 					    struct dp_peer *peer,
1867 					    uint16_t peer_id)
1868 {
1869 }
1870 
1871 static inline void dp_mlo_partner_chips_unmap(struct dp_soc *soc,
1872 					      uint16_t peer_id)
1873 {
1874 }
1875 #endif
1876 
1877 static inline
1878 QDF_STATUS dp_peer_rx_tids_create(struct dp_peer *peer)
1879 {
1880 	uint8_t i;
1881 
1882 	if (IS_MLO_DP_MLD_PEER(peer)) {
1883 		dp_peer_info("skip for mld peer");
1884 		return QDF_STATUS_SUCCESS;
1885 	}
1886 
1887 	if (peer->rx_tid) {
1888 		QDF_BUG(0);
1889 		dp_peer_err("peer rx_tid mem already exist");
1890 		return QDF_STATUS_E_FAILURE;
1891 	}
1892 
1893 	peer->rx_tid = qdf_mem_malloc(DP_MAX_TIDS *
1894 				      sizeof(struct dp_rx_tid));
1895 
1896 	if (!peer->rx_tid) {
1897 		dp_err("fail to alloc tid for peer" QDF_MAC_ADDR_FMT,
1898 		       QDF_MAC_ADDR_REF(peer->mac_addr.raw));
1899 		return QDF_STATUS_E_NOMEM;
1900 	}
1901 
1902 	qdf_mem_zero(peer->rx_tid, DP_MAX_TIDS * sizeof(struct dp_rx_tid));
1903 	for (i = 0; i < DP_MAX_TIDS; i++)
1904 		qdf_spinlock_create(&peer->rx_tid[i].tid_lock);
1905 
1906 	return QDF_STATUS_SUCCESS;
1907 }
1908 
1909 static inline
1910 void dp_peer_rx_tids_destroy(struct dp_peer *peer)
1911 {
1912 	uint8_t i;
1913 
1914 	if (!IS_MLO_DP_LINK_PEER(peer)) {
1915 		for (i = 0; i < DP_MAX_TIDS; i++)
1916 			qdf_spinlock_destroy(&peer->rx_tid[i].tid_lock);
1917 
1918 		qdf_mem_free(peer->rx_tid);
1919 	}
1920 
1921 	peer->rx_tid = NULL;
1922 }
1923 
1924 static inline
1925 void dp_peer_defrag_rx_tids_init(struct dp_txrx_peer *txrx_peer)
1926 {
1927 	uint8_t i;
1928 
1929 	qdf_mem_zero(&txrx_peer->rx_tid, DP_MAX_TIDS *
1930 		     sizeof(struct dp_rx_tid_defrag));
1931 
1932 	for (i = 0; i < DP_MAX_TIDS; i++)
1933 		qdf_spinlock_create(&txrx_peer->rx_tid[i].defrag_tid_lock);
1934 }
1935 
1936 static inline
1937 void dp_peer_defrag_rx_tids_deinit(struct dp_txrx_peer *txrx_peer)
1938 {
1939 	uint8_t i;
1940 
1941 	for (i = 0; i < DP_MAX_TIDS; i++)
1942 		qdf_spinlock_destroy(&txrx_peer->rx_tid[i].defrag_tid_lock);
1943 }
1944 
1945 #ifdef PEER_CACHE_RX_PKTS
1946 static inline
1947 void dp_peer_rx_bufq_resources_init(struct dp_txrx_peer *txrx_peer)
1948 {
1949 	qdf_spinlock_create(&txrx_peer->bufq_info.bufq_lock);
1950 	txrx_peer->bufq_info.thresh = DP_RX_CACHED_BUFQ_THRESH;
1951 	qdf_list_create(&txrx_peer->bufq_info.cached_bufq,
1952 			DP_RX_CACHED_BUFQ_THRESH);
1953 }
1954 
1955 static inline
1956 void dp_peer_rx_bufq_resources_deinit(struct dp_txrx_peer *txrx_peer)
1957 {
1958 	qdf_list_destroy(&txrx_peer->bufq_info.cached_bufq);
1959 	qdf_spinlock_destroy(&txrx_peer->bufq_info.bufq_lock);
1960 }
1961 
1962 #else
1963 static inline
1964 void dp_peer_rx_bufq_resources_init(struct dp_txrx_peer *txrx_peer)
1965 {
1966 }
1967 
1968 static inline
1969 void dp_peer_rx_bufq_resources_deinit(struct dp_txrx_peer *txrx_peer)
1970 {
1971 }
1972 #endif
1973 
1974 #ifdef REO_SHARED_QREF_TABLE_EN
1975 void dp_peer_rx_reo_shared_qaddr_delete(struct dp_soc *soc,
1976 					struct dp_peer *peer);
1977 #else
1978 static inline void dp_peer_rx_reo_shared_qaddr_delete(struct dp_soc *soc,
1979 						      struct dp_peer *peer) {}
1980 #endif
1981 #endif /* _DP_PEER_H_ */
1982