xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_peer.h (revision 96fec42073e9eee550f8cb3e41c76bc8dbc75738)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 #ifndef _DP_PEER_H_
19 #define _DP_PEER_H_
20 
21 #include <qdf_types.h>
22 #include <qdf_lock.h>
23 #include "dp_types.h"
24 
25 #ifdef DUMP_REO_QUEUE_INFO_IN_DDR
26 #include "hal_reo.h"
27 #endif
28 
29 #define DP_INVALID_PEER_ID 0xffff
30 
31 #define DP_PEER_MAX_MEC_IDX 1024	/* maximum index for MEC table */
32 #define DP_PEER_MAX_MEC_ENTRY 4096	/* maximum MEC entries in MEC table */
33 
34 #define DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC 5000
35 
36 #define dp_peer_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_PEER, params)
37 #define dp_peer_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_PEER, params)
38 #define dp_peer_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_PEER, params)
39 #define dp_peer_info(params...) \
40 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_PEER, ## params)
41 #define dp_peer_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_PEER, params)
42 
43 #ifdef REO_QDESC_HISTORY
44 enum reo_qdesc_event_type {
45 	REO_QDESC_UPDATE_CB = 0,
46 	REO_QDESC_FREE,
47 };
48 
49 struct reo_qdesc_event {
50 	qdf_dma_addr_t qdesc_addr;
51 	uint64_t ts;
52 	enum reo_qdesc_event_type type;
53 	uint8_t peer_mac[QDF_MAC_ADDR_SIZE];
54 };
55 #endif
56 
57 struct ast_del_ctxt {
58 	bool age;
59 	int del_count;
60 };
61 
62 typedef void dp_peer_iter_func(struct dp_soc *soc, struct dp_peer *peer,
63 			       void *arg);
64 void dp_peer_unref_delete(struct dp_peer *peer, enum dp_mod_id id);
65 
66 /**
67  * dp_peer_get_ref() - Returns peer object given the peer id
68  *
69  * @soc		: core DP soc context
70  * @peer	: DP peer
71  * @mod_id	: id of module requesting the reference
72  *
73  * Return:	QDF_STATUS_SUCCESS if reference held successfully
74  *		else QDF_STATUS_E_INVAL
75  */
76 static inline
77 QDF_STATUS dp_peer_get_ref(struct dp_soc *soc,
78 			   struct dp_peer *peer,
79 			   enum dp_mod_id mod_id)
80 {
81 	if (!qdf_atomic_inc_not_zero(&peer->ref_cnt))
82 		return QDF_STATUS_E_INVAL;
83 
84 	if (mod_id > DP_MOD_ID_RX)
85 		qdf_atomic_inc(&peer->mod_refs[mod_id]);
86 
87 	return QDF_STATUS_SUCCESS;
88 }
89 
90 /**
91  * __dp_peer_get_ref_by_id() - Returns peer object given the peer id
92  *
93  * @soc		: core DP soc context
94  * @peer_id	: peer id from peer object can be retrieved
95  * @mod_id	: module id
96  *
97  * Return: struct dp_peer*: Pointer to DP peer object
98  */
99 static inline struct dp_peer *
100 __dp_peer_get_ref_by_id(struct dp_soc *soc,
101 			uint16_t peer_id,
102 			enum dp_mod_id mod_id)
103 
104 {
105 	struct dp_peer *peer;
106 
107 	qdf_spin_lock_bh(&soc->peer_map_lock);
108 	peer = (peer_id >= soc->max_peers) ? NULL :
109 				soc->peer_id_to_obj_map[peer_id];
110 	if (!peer ||
111 	    (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) {
112 		qdf_spin_unlock_bh(&soc->peer_map_lock);
113 		return NULL;
114 	}
115 
116 	qdf_spin_unlock_bh(&soc->peer_map_lock);
117 	return peer;
118 }
119 
120 /**
121  * dp_peer_get_ref_by_id() - Returns peer object given the peer id
122  *                        if peer state is active
123  *
124  * @soc		: core DP soc context
125  * @peer_id	: peer id from peer object can be retrieved
126  * @mod_id      : ID ot module requesting reference
127  *
128  * Return: struct dp_peer*: Pointer to DP peer object
129  */
130 static inline
131 struct dp_peer *dp_peer_get_ref_by_id(struct dp_soc *soc,
132 				      uint16_t peer_id,
133 				      enum dp_mod_id mod_id)
134 {
135 	struct dp_peer *peer;
136 
137 	qdf_spin_lock_bh(&soc->peer_map_lock);
138 	peer = (peer_id >= soc->max_peers) ? NULL :
139 				soc->peer_id_to_obj_map[peer_id];
140 
141 	if (!peer || peer->peer_state >= DP_PEER_STATE_LOGICAL_DELETE ||
142 	    (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) {
143 		qdf_spin_unlock_bh(&soc->peer_map_lock);
144 		return NULL;
145 	}
146 
147 	qdf_spin_unlock_bh(&soc->peer_map_lock);
148 
149 	return peer;
150 }
151 
152 #ifdef PEER_CACHE_RX_PKTS
153 /**
154  * dp_rx_flush_rx_cached() - flush cached rx frames
155  * @peer: peer
156  * @drop: set flag to drop frames
157  *
158  * Return: None
159  */
160 void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop);
161 #else
162 static inline void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop)
163 {
164 }
165 #endif
166 
167 static inline void
168 dp_clear_peer_internal(struct dp_soc *soc, struct dp_peer *peer)
169 {
170 	qdf_spin_lock_bh(&peer->peer_info_lock);
171 	peer->state = OL_TXRX_PEER_STATE_DISC;
172 	qdf_spin_unlock_bh(&peer->peer_info_lock);
173 
174 	dp_rx_flush_rx_cached(peer, true);
175 }
176 
177 /**
178  * dp_vdev_iterate_peer() - API to iterate through vdev peer list
179  *
180  * @vdev	: DP vdev context
181  * @func	: function to be called for each peer
182  * @arg		: argument need to be passed to func
183  * @mod_id	: module_id
184  *
185  * Return: void
186  */
187 static inline void
188 dp_vdev_iterate_peer(struct dp_vdev *vdev, dp_peer_iter_func *func, void *arg,
189 		     enum dp_mod_id mod_id)
190 {
191 	struct dp_peer *peer;
192 	struct dp_peer *tmp_peer;
193 	struct dp_soc *soc = NULL;
194 
195 	if (!vdev || !vdev->pdev || !vdev->pdev->soc)
196 		return;
197 
198 	soc = vdev->pdev->soc;
199 
200 	qdf_spin_lock_bh(&vdev->peer_list_lock);
201 	TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
202 			   peer_list_elem,
203 			   tmp_peer) {
204 		if (dp_peer_get_ref(soc, peer, mod_id) ==
205 					QDF_STATUS_SUCCESS) {
206 			(*func)(soc, peer, arg);
207 			dp_peer_unref_delete(peer, mod_id);
208 		}
209 	}
210 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
211 }
212 
213 /**
214  * dp_pdev_iterate_peer() - API to iterate through all peers of pdev
215  *
216  * @pdev	: DP pdev context
217  * @func	: function to be called for each peer
218  * @arg		: argument need to be passed to func
219  * @mod_id	: module_id
220  *
221  * Return: void
222  */
223 static inline void
224 dp_pdev_iterate_peer(struct dp_pdev *pdev, dp_peer_iter_func *func, void *arg,
225 		     enum dp_mod_id mod_id)
226 {
227 	struct dp_vdev *vdev;
228 
229 	if (!pdev)
230 		return;
231 
232 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
233 	DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev)
234 		dp_vdev_iterate_peer(vdev, func, arg, mod_id);
235 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
236 }
237 
238 /**
239  * dp_soc_iterate_peer() - API to iterate through all peers of soc
240  *
241  * @soc		: DP soc context
242  * @func	: function to be called for each peer
243  * @arg		: argument need to be passed to func
244  * @mod_id	: module_id
245  *
246  * Return: void
247  */
248 static inline void
249 dp_soc_iterate_peer(struct dp_soc *soc, dp_peer_iter_func *func, void *arg,
250 		    enum dp_mod_id mod_id)
251 {
252 	struct dp_pdev *pdev;
253 	int i;
254 
255 	if (!soc)
256 		return;
257 
258 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
259 		pdev = soc->pdev_list[i];
260 		dp_pdev_iterate_peer(pdev, func, arg, mod_id);
261 	}
262 }
263 
264 /**
265  * dp_vdev_iterate_peer_lock_safe() - API to iterate through vdev list
266  *
267  * This API will cache the peers in local allocated memory and calls
268  * iterate function outside the lock.
269  *
270  * As this API is allocating new memory it is suggested to use this
271  * only when lock cannot be held
272  *
273  * @vdev	: DP vdev context
274  * @func	: function to be called for each peer
275  * @arg		: argument need to be passed to func
276  * @mod_id	: module_id
277  *
278  * Return: void
279  */
280 static inline void
281 dp_vdev_iterate_peer_lock_safe(struct dp_vdev *vdev,
282 			       dp_peer_iter_func *func,
283 			       void *arg,
284 			       enum dp_mod_id mod_id)
285 {
286 	struct dp_peer *peer;
287 	struct dp_peer *tmp_peer;
288 	struct dp_soc *soc = NULL;
289 	struct dp_peer **peer_array = NULL;
290 	int i = 0;
291 	uint32_t num_peers = 0;
292 
293 	if (!vdev || !vdev->pdev || !vdev->pdev->soc)
294 		return;
295 
296 	num_peers = vdev->num_peers;
297 
298 	soc = vdev->pdev->soc;
299 
300 	peer_array = qdf_mem_malloc(num_peers * sizeof(struct dp_peer *));
301 	if (!peer_array)
302 		return;
303 
304 	qdf_spin_lock_bh(&vdev->peer_list_lock);
305 	TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
306 			   peer_list_elem,
307 			   tmp_peer) {
308 		if (i >= num_peers)
309 			break;
310 
311 		if (dp_peer_get_ref(soc, peer, mod_id) == QDF_STATUS_SUCCESS) {
312 			peer_array[i] = peer;
313 			i = (i + 1);
314 		}
315 	}
316 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
317 
318 	for (i = 0; i < num_peers; i++) {
319 		peer = peer_array[i];
320 
321 		if (!peer)
322 			continue;
323 
324 		(*func)(soc, peer, arg);
325 		dp_peer_unref_delete(peer, mod_id);
326 	}
327 
328 	qdf_mem_free(peer_array);
329 }
330 
331 /**
332  * dp_pdev_iterate_peer_lock_safe() - API to iterate through all peers of pdev
333  *
334  * This API will cache the peers in local allocated memory and calls
335  * iterate function outside the lock.
336  *
337  * As this API is allocating new memory it is suggested to use this
338  * only when lock cannot be held
339  *
340  * @pdev	: DP pdev context
341  * @func	: function to be called for each peer
342  * @arg		: argument need to be passed to func
343  * @mod_id	: module_id
344  *
345  * Return: void
346  */
347 static inline void
348 dp_pdev_iterate_peer_lock_safe(struct dp_pdev *pdev,
349 			       dp_peer_iter_func *func,
350 			       void *arg,
351 			       enum dp_mod_id mod_id)
352 {
353 	struct dp_peer *peer;
354 	struct dp_peer *tmp_peer;
355 	struct dp_soc *soc = NULL;
356 	struct dp_vdev *vdev = NULL;
357 	struct dp_peer **peer_array[DP_PDEV_MAX_VDEVS] = {0};
358 	int i = 0;
359 	int j = 0;
360 	uint32_t num_peers[DP_PDEV_MAX_VDEVS] = {0};
361 
362 	if (!pdev || !pdev->soc)
363 		return;
364 
365 	soc = pdev->soc;
366 
367 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
368 	DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
369 		num_peers[i] = vdev->num_peers;
370 		peer_array[i] = qdf_mem_malloc(num_peers[i] *
371 					       sizeof(struct dp_peer *));
372 		if (!peer_array[i])
373 			break;
374 
375 		qdf_spin_lock_bh(&vdev->peer_list_lock);
376 		TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
377 				   peer_list_elem,
378 				   tmp_peer) {
379 			if (j >= num_peers[i])
380 				break;
381 
382 			if (dp_peer_get_ref(soc, peer, mod_id) ==
383 					QDF_STATUS_SUCCESS) {
384 				peer_array[i][j] = peer;
385 
386 				j = (j + 1);
387 			}
388 		}
389 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
390 		i = (i + 1);
391 	}
392 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
393 
394 	for (i = 0; i < DP_PDEV_MAX_VDEVS; i++) {
395 		if (!peer_array[i])
396 			break;
397 
398 		for (j = 0; j < num_peers[i]; j++) {
399 			peer = peer_array[i][j];
400 
401 			if (!peer)
402 				continue;
403 
404 			(*func)(soc, peer, arg);
405 			dp_peer_unref_delete(peer, mod_id);
406 		}
407 
408 		qdf_mem_free(peer_array[i]);
409 	}
410 }
411 
412 /**
413  * dp_soc_iterate_peer_lock_safe() - API to iterate through all peers of soc
414  *
415  * This API will cache the peers in local allocated memory and calls
416  * iterate function outside the lock.
417  *
418  * As this API is allocating new memory it is suggested to use this
419  * only when lock cannot be held
420  *
421  * @soc		: DP soc context
422  * @func	: function to be called for each peer
423  * @arg		: argument need to be passed to func
424  * @mod_id	: module_id
425  *
426  * Return: void
427  */
428 static inline void
429 dp_soc_iterate_peer_lock_safe(struct dp_soc *soc,
430 			      dp_peer_iter_func *func,
431 			      void *arg,
432 			      enum dp_mod_id mod_id)
433 {
434 	struct dp_pdev *pdev;
435 	int i;
436 
437 	if (!soc)
438 		return;
439 
440 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
441 		pdev = soc->pdev_list[i];
442 		dp_pdev_iterate_peer_lock_safe(pdev, func, arg, mod_id);
443 	}
444 }
445 
446 #ifdef DP_PEER_STATE_DEBUG
447 #define DP_PEER_STATE_ASSERT(_peer, _new_state, _condition) \
448 	do {  \
449 		if (!(_condition)) { \
450 			dp_alert("Invalid state shift from %u to %u peer " \
451 				 QDF_MAC_ADDR_FMT, \
452 				 (_peer)->peer_state, (_new_state), \
453 				 QDF_MAC_ADDR_REF((_peer)->mac_addr.raw)); \
454 			QDF_ASSERT(0); \
455 		} \
456 	} while (0)
457 
458 #else
459 #define DP_PEER_STATE_ASSERT(_peer, _new_state, _condition) \
460 	do {  \
461 		if (!(_condition)) { \
462 			dp_alert("Invalid state shift from %u to %u peer " \
463 				 QDF_MAC_ADDR_FMT, \
464 				 (_peer)->peer_state, (_new_state), \
465 				 QDF_MAC_ADDR_REF((_peer)->mac_addr.raw)); \
466 		} \
467 	} while (0)
468 #endif
469 
470 /**
471  * dp_peer_state_cmp() - compare dp peer state
472  *
473  * @peer	: DP peer
474  * @state	: state
475  *
476  * Return: true if state matches with peer state
477  *	   false if it does not match
478  */
479 static inline bool
480 dp_peer_state_cmp(struct dp_peer *peer,
481 		  enum dp_peer_state state)
482 {
483 	bool is_status_equal = false;
484 
485 	qdf_spin_lock_bh(&peer->peer_state_lock);
486 	is_status_equal = (peer->peer_state == state);
487 	qdf_spin_unlock_bh(&peer->peer_state_lock);
488 
489 	return is_status_equal;
490 }
491 
492 /**
493  * dp_peer_update_state() - update dp peer state
494  *
495  * @soc		: core DP soc context
496  * @peer	: DP peer
497  * @state	: new state
498  *
499  * Return: None
500  */
501 static inline void
502 dp_peer_update_state(struct dp_soc *soc,
503 		     struct dp_peer *peer,
504 		     enum dp_peer_state state)
505 {
506 	uint8_t peer_state;
507 
508 	qdf_spin_lock_bh(&peer->peer_state_lock);
509 	peer_state = peer->peer_state;
510 
511 	switch (state) {
512 	case DP_PEER_STATE_INIT:
513 		DP_PEER_STATE_ASSERT
514 			(peer, state, (peer_state != DP_PEER_STATE_ACTIVE) ||
515 			 (peer_state != DP_PEER_STATE_LOGICAL_DELETE));
516 		break;
517 
518 	case DP_PEER_STATE_ACTIVE:
519 		DP_PEER_STATE_ASSERT(peer, state,
520 				     (peer_state == DP_PEER_STATE_INIT));
521 		break;
522 
523 	case DP_PEER_STATE_LOGICAL_DELETE:
524 		DP_PEER_STATE_ASSERT(peer, state,
525 				     (peer_state == DP_PEER_STATE_ACTIVE) ||
526 				     (peer_state == DP_PEER_STATE_INIT));
527 		break;
528 
529 	case DP_PEER_STATE_INACTIVE:
530 		DP_PEER_STATE_ASSERT
531 			(peer, state,
532 			 (peer_state == DP_PEER_STATE_LOGICAL_DELETE));
533 		break;
534 
535 	case DP_PEER_STATE_FREED:
536 		if (peer->sta_self_peer)
537 			DP_PEER_STATE_ASSERT
538 			(peer, state, (peer_state == DP_PEER_STATE_INIT));
539 		else
540 			DP_PEER_STATE_ASSERT
541 				(peer, state,
542 				 (peer_state == DP_PEER_STATE_INACTIVE) ||
543 				 (peer_state == DP_PEER_STATE_LOGICAL_DELETE));
544 		break;
545 
546 	default:
547 		qdf_spin_unlock_bh(&peer->peer_state_lock);
548 		dp_alert("Invalid peer state %u for peer "QDF_MAC_ADDR_FMT,
549 			 state, QDF_MAC_ADDR_REF(peer->mac_addr.raw));
550 		return;
551 	}
552 	peer->peer_state = state;
553 	qdf_spin_unlock_bh(&peer->peer_state_lock);
554 	dp_info("Updating peer state from %u to %u mac "QDF_MAC_ADDR_FMT"\n",
555 		peer_state, state,
556 		QDF_MAC_ADDR_REF(peer->mac_addr.raw));
557 }
558 
559 void dp_print_ast_stats(struct dp_soc *soc);
560 QDF_STATUS dp_rx_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
561 				  uint16_t hw_peer_id, uint8_t vdev_id,
562 				  uint8_t *peer_mac_addr, uint16_t ast_hash,
563 				  uint8_t is_wds);
564 void dp_rx_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id,
565 			      uint8_t vdev_id, uint8_t *peer_mac_addr,
566 			      uint8_t is_wds, uint32_t free_wds_count);
567 void dp_rx_sec_ind_handler(struct dp_soc *soc, uint16_t peer_id,
568 			   enum cdp_sec_type sec_type, int is_unicast,
569 			   u_int32_t *michael_key, u_int32_t *rx_pn);
570 
571 QDF_STATUS dp_rx_delba_ind_handler(void *soc_handle, uint16_t peer_id,
572 				   uint8_t tid, uint16_t win_sz);
573 
574 uint8_t dp_get_peer_mac_addr_frm_id(struct cdp_soc_t *soc_handle,
575 		uint16_t peer_id, uint8_t *peer_mac);
576 
577 QDF_STATUS dp_peer_add_ast(struct dp_soc *soc, struct dp_peer *peer,
578 			   uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type,
579 			   uint32_t flags);
580 
581 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry);
582 
583 void dp_peer_ast_unmap_handler(struct dp_soc *soc,
584 			       struct dp_ast_entry *ast_entry);
585 
586 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
587 			struct dp_ast_entry *ast_entry,	uint32_t flags);
588 
589 struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
590 						     uint8_t *ast_mac_addr,
591 						     uint8_t pdev_id);
592 
593 struct dp_ast_entry *dp_peer_ast_hash_find_by_vdevid(struct dp_soc *soc,
594 						     uint8_t *ast_mac_addr,
595 						     uint8_t vdev_id);
596 
597 struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
598 					       uint8_t *ast_mac_addr);
599 
600 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
601 				struct dp_ast_entry *ast_entry);
602 
603 
604 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
605 				struct dp_ast_entry *ast_entry);
606 
607 void dp_peer_ast_set_type(struct dp_soc *soc,
608 				struct dp_ast_entry *ast_entry,
609 				enum cdp_txrx_ast_entry_type type);
610 
611 void dp_peer_ast_send_wds_del(struct dp_soc *soc,
612 			      struct dp_ast_entry *ast_entry,
613 			      struct dp_peer *peer);
614 
615 void dp_peer_free_hmwds_cb(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
616 			   struct cdp_soc *dp_soc,
617 			   void *cookie,
618 			   enum cdp_ast_free_status status);
619 
620 void dp_peer_ast_hash_remove(struct dp_soc *soc,
621 			     struct dp_ast_entry *ase);
622 
623 void dp_peer_free_ast_entry(struct dp_soc *soc,
624 			    struct dp_ast_entry *ast_entry);
625 
626 void dp_peer_unlink_ast_entry(struct dp_soc *soc,
627 			      struct dp_ast_entry *ast_entry,
628 			      struct dp_peer *peer);
629 
630 /**
631  * dp_peer_mec_detach_entry() - Detach the MEC entry
632  * @soc: SoC handle
633  * @mecentry: MEC entry of the node
634  * @ptr: pointer to free list
635  *
636  * The MEC entry is detached from MEC table and added to free_list
637  * to free the object outside lock
638  *
639  * Return: None
640  */
641 void dp_peer_mec_detach_entry(struct dp_soc *soc, struct dp_mec_entry *mecentry,
642 			      void *ptr);
643 
644 /**
645  * dp_peer_mec_free_list() - free the MEC entry from free_list
646  * @soc: SoC handle
647  * @ptr: pointer to free list
648  *
649  * Return: None
650  */
651 void dp_peer_mec_free_list(struct dp_soc *soc, void *ptr);
652 
653 /**
654  * dp_peer_mec_add_entry()
655  * @soc: SoC handle
656  * @vdev: vdev to which mec node belongs
657  * @mac_addr: MAC address of mec node
658  *
659  * This function allocates and adds MEC entry to MEC table.
660  * It assumes caller has taken the mec lock to protect the access to these
661  * tables
662  *
663  * Return: QDF_STATUS
664  */
665 QDF_STATUS dp_peer_mec_add_entry(struct dp_soc *soc,
666 				 struct dp_vdev *vdev,
667 				 uint8_t *mac_addr);
668 
669 /**
670  * dp_peer_mec_hash_find_by_pdevid() - Find MEC entry by MAC address
671  * within pdev
672  * @soc: SoC handle
673  *
674  * It assumes caller has taken the mec_lock to protect the access to
675  * MEC hash table
676  *
677  * Return: MEC entry
678  */
679 struct dp_mec_entry *dp_peer_mec_hash_find_by_pdevid(struct dp_soc *soc,
680 						     uint8_t pdev_id,
681 						     uint8_t *mec_mac_addr);
682 
683 #define DP_AST_ASSERT(_condition) \
684 	do { \
685 		if (!(_condition)) { \
686 			dp_print_ast_stats(soc);\
687 			QDF_BUG(_condition); \
688 		} \
689 	} while (0)
690 
691 /**
692  * dp_peer_update_inactive_time - Update inactive time for peer
693  * @pdev: pdev object
694  * @tag_type: htt_tlv_tag type
695  * #tag_buf: buf message
696  */
697 void
698 dp_peer_update_inactive_time(struct dp_pdev *pdev, uint32_t tag_type,
699 			     uint32_t *tag_buf);
700 
701 #ifndef QCA_MULTIPASS_SUPPORT
702 /**
703  * dp_peer_set_vlan_id: set vlan_id for this peer
704  * @cdp_soc: soc handle
705  * @vdev_id: id of vdev object
706  * @peer_mac: mac address
707  * @vlan_id: vlan id for peer
708  *
709  * return: void
710  */
711 static inline
712 void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
713 			 uint8_t vdev_id, uint8_t *peer_mac,
714 			 uint16_t vlan_id)
715 {
716 }
717 
718 /**
719  * dp_set_vlan_groupkey: set vlan map for vdev
720  * @soc: pointer to soc
721  * @vdev_id: id of vdev handle
722  * @vlan_id: vlan_id
723  * @group_key: group key for vlan
724  *
725  * return: set success/failure
726  */
727 static inline
728 QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc, uint8_t vdev_id,
729 				uint16_t vlan_id, uint16_t group_key)
730 {
731 	return QDF_STATUS_SUCCESS;
732 }
733 
734 /**
735  * dp_peer_multipass_list_init: initialize multipass peer list
736  * @vdev: pointer to vdev
737  *
738  * return: void
739  */
740 static inline
741 void dp_peer_multipass_list_init(struct dp_vdev *vdev)
742 {
743 }
744 
745 /**
746  * dp_peer_multipass_list_remove: remove peer from special peer list
747  * @peer: peer handle
748  *
749  * return: void
750  */
751 static inline
752 void dp_peer_multipass_list_remove(struct dp_peer *peer)
753 {
754 }
755 #else
756 void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
757 			 uint8_t vdev_id, uint8_t *peer_mac,
758 			 uint16_t vlan_id);
759 QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc, uint8_t vdev_id,
760 				uint16_t vlan_id, uint16_t group_key);
761 void dp_peer_multipass_list_init(struct dp_vdev *vdev);
762 void dp_peer_multipass_list_remove(struct dp_peer *peer);
763 #endif
764 
765 
766 #ifndef QCA_PEER_MULTIQ_SUPPORT
767 /**
768  * dp_peer_reset_flowq_map() - reset peer flowq map table
769  * @peer - dp peer handle
770  *
771  * Return: none
772  */
773 static inline
774 void dp_peer_reset_flowq_map(struct dp_peer *peer)
775 {
776 }
777 
778 /**
779  * dp_peer_ast_index_flow_queue_map_create() - create ast index flow queue map
780  * @soc - genereic soc handle
781  * @is_wds - flag to indicate if peer is wds
782  * @peer_id - peer_id from htt peer map message
783  * @peer_mac_addr - mac address of the peer
784  * @ast_info - ast flow override information from peer map
785  *
786  * Return: none
787  */
788 static inline
789 void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
790 		    bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
791 		    struct dp_ast_flow_override_info *ast_info)
792 {
793 }
794 #else
795 void dp_peer_reset_flowq_map(struct dp_peer *peer);
796 void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
797 		    bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
798 		    struct dp_ast_flow_override_info *ast_info);
799 #endif
800 
801 /**
802  * dp_peer_update_pkt_capture_params: Set Rx & Tx Capture flags for a peer
803  * @soc: DP SOC handle
804  * @pdev_id: id of DP pdev handle
805  * @is_rx_pkt_cap_enable: enable/disable Rx packet capture in monitor mode
806  * @is_tx_pkt_cap_enable: enable/disable/delete/print
807  * Tx packet capture in monitor mode
808  * Tx packet capture in monitor mode
809  * @peer_mac: MAC address for which the above need to be enabled/disabled
810  *
811  * Return: Success if Rx & Tx capture is enabled for peer, false otherwise
812  */
813 QDF_STATUS
814 dp_peer_update_pkt_capture_params(ol_txrx_soc_handle soc,
815 				  uint8_t pdev_id,
816 				  bool is_rx_pkt_cap_enable,
817 				  uint8_t is_tx_pkt_cap_enable,
818 				  uint8_t *peer_mac);
819 
820 /*
821  * dp_rx_tid_delete_cb() - Callback to flush reo descriptor HW cache
822  * after deleting the entries (ie., setting valid=0)
823  *
824  * @soc: DP SOC handle
825  * @cb_ctxt: Callback context
826  * @reo_status: REO command status
827  */
828 void dp_rx_tid_delete_cb(struct dp_soc *soc,
829 			 void *cb_ctxt,
830 			 union hal_reo_status *reo_status);
831 
832 #ifndef WLAN_TX_PKT_CAPTURE_ENH
833 /**
834  * dp_peer_tid_queue_init() – Initialize ppdu stats queue per TID
835  * @peer: Datapath peer
836  *
837  */
838 static inline void dp_peer_tid_queue_init(struct dp_peer *peer)
839 {
840 }
841 
842 /**
843  * dp_peer_tid_peer_id_update() – update peer_id to tid structure
844  * @peer: Datapath peer
845  * @peer_id: peer_id
846  *
847  */
848 static inline
849 void dp_peer_tid_peer_id_update(struct dp_peer *peer, uint16_t peer_id)
850 {
851 }
852 
853 /**
854  * dp_peer_tid_queue_cleanup() – remove ppdu stats queue per TID
855  * @peer: Datapath peer
856  *
857  */
858 static inline void dp_peer_tid_queue_cleanup(struct dp_peer *peer)
859 {
860 }
861 
862 /**
863  * dp_peer_update_80211_hdr() – dp peer update 80211 hdr
864  * @vdev: Datapath vdev
865  * @peer: Datapath peer
866  *
867  */
868 static inline void
869 dp_peer_update_80211_hdr(struct dp_vdev *vdev, struct dp_peer *peer)
870 {
871 }
872 #endif
873 
874 #ifdef QCA_PEER_EXT_STATS
875 QDF_STATUS dp_peer_ext_stats_ctx_alloc(struct dp_soc *soc,
876 				       struct dp_peer *peer);
877 void dp_peer_ext_stats_ctx_dealloc(struct dp_soc *soc,
878 				   struct dp_peer *peer);
879 #else
880 static inline QDF_STATUS dp_peer_ext_stats_ctx_alloc(struct dp_soc *soc,
881 						     struct dp_peer *peer)
882 {
883 	return QDF_STATUS_SUCCESS;
884 }
885 
886 static inline void dp_peer_ext_stats_ctx_dealloc(struct dp_soc *soc,
887 						 struct dp_peer *peer)
888 {
889 }
890 #endif
891 
892 struct dp_peer *dp_vdev_bss_peer_ref_n_get(struct dp_soc *soc,
893 					   struct dp_vdev *vdev,
894 					   enum dp_mod_id mod_id);
895 struct dp_peer *dp_sta_vdev_self_peer_ref_n_get(struct dp_soc *soc,
896 						struct dp_vdev *vdev,
897 						enum dp_mod_id mod_id);
898 
899 #ifdef FEATURE_AST
900 /*
901  * dp_peer_delete_ast_entries(): Delete all AST entries for a peer
902  * @soc - datapath soc handle
903  * @peer - datapath peer handle
904  *
905  * Delete the AST entries belonging to a peer
906  */
907 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
908 					      struct dp_peer *peer)
909 {
910 	struct dp_ast_entry *ast_entry, *temp_ast_entry;
911 
912 	dp_peer_debug("peer: %pK, self_ast: %pK", peer, peer->self_ast_entry);
913 	/*
914 	 * Delete peer self ast entry. This is done to handle scenarios
915 	 * where peer is freed before peer map is received(for ex in case
916 	 * of auth disallow due to ACL) in such cases self ast is not added
917 	 * to peer->ast_list.
918 	 */
919 	if (peer->self_ast_entry) {
920 		dp_peer_del_ast(soc, peer->self_ast_entry);
921 		peer->self_ast_entry = NULL;
922 	}
923 
924 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry)
925 		dp_peer_del_ast(soc, ast_entry);
926 }
927 #else
928 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
929 					      struct dp_peer *peer)
930 {
931 }
932 #endif
933 
934 #ifdef FEATURE_MEC
935 /**
936  * dp_peer_mec_spinlock_create() - Create the MEC spinlock
937  * @soc: SoC handle
938  *
939  * Return: none
940  */
941 void dp_peer_mec_spinlock_create(struct dp_soc *soc);
942 
943 /**
944  * dp_peer_mec_spinlock_destroy() - Destroy the MEC spinlock
945  * @soc: SoC handle
946  *
947  * Return: none
948  */
949 void dp_peer_mec_spinlock_destroy(struct dp_soc *soc);
950 
951 /**
952  * dp_peer_mec_flush_entries() - Delete all mec entries in table
953  * @soc: Datapath SOC
954  *
955  * Return: None
956  */
957 void dp_peer_mec_flush_entries(struct dp_soc *soc);
958 #else
959 static inline void dp_peer_mec_spinlock_create(struct dp_soc *soc)
960 {
961 }
962 
963 static inline void dp_peer_mec_spinlock_destroy(struct dp_soc *soc)
964 {
965 }
966 
967 static inline void dp_peer_mec_flush_entries(struct dp_soc *soc)
968 {
969 }
970 #endif
971 
972 #ifdef DUMP_REO_QUEUE_INFO_IN_DDR
973 /**
974  * dp_send_cache_flush_for_rx_tid() - Send cache flush cmd to REO per tid
975  * @soc : dp_soc handle
976  * @peer: peer
977  *
978  * This function is used to send cache flush cmd to reo and
979  * to register the callback to handle the dumping of the reo
980  * queue stas from DDR
981  *
982  * Return: none
983  */
984 void dp_send_cache_flush_for_rx_tid(
985 	struct dp_soc *soc, struct dp_peer *peer);
986 
987 /**
988  * dp_get_rx_reo_queue_info() - Handler to get rx tid info
989  * @soc : cdp_soc_t handle
990  * @vdev_id: vdev id
991  *
992  * Handler to get rx tid info from DDR after h/w cache is
993  * invalidated first using the cache flush cmd.
994  *
995  * Return: none
996  */
997 void dp_get_rx_reo_queue_info(
998 	struct cdp_soc_t *soc_hdl, uint8_t vdev_id);
999 
1000 /**
1001  * dp_dump_rx_reo_queue_info() - Callback function to dump reo queue stats
1002  * @soc : dp_soc handle
1003  * @cb_ctxt - callback context
1004  * @reo_status: vdev id
1005  *
1006  * This is the callback function registered after sending the reo cmd
1007  * to flush the h/w cache and invalidate it. In the callback the reo
1008  * queue desc info is dumped from DDR.
1009  *
1010  * Return: none
1011  */
1012 void dp_dump_rx_reo_queue_info(
1013 	struct dp_soc *soc, void *cb_ctxt, union hal_reo_status *reo_status);
1014 
1015 #else /* DUMP_REO_QUEUE_INFO_IN_DDR */
1016 
1017 static inline void dp_get_rx_reo_queue_info(
1018 	struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
1019 {
1020 }
1021 #endif /* DUMP_REO_QUEUE_INFO_IN_DDR */
1022 #endif /* _DP_PEER_H_ */
1023