xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_peer.h (revision 97f44cd39e4ff816eaa1710279d28cf6b9e65ad9)
1 /*
2  * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 #ifndef _DP_PEER_H_
19 #define _DP_PEER_H_
20 
21 #include <qdf_types.h>
22 #include <qdf_lock.h>
23 #include "dp_types.h"
24 
25 #define DP_INVALID_PEER_ID 0xffff
26 
27 #define DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC 5000
28 
29 typedef void dp_peer_iter_func(struct dp_soc *soc, struct dp_peer *peer,
30 			       void *arg);
31 void dp_peer_unref_delete(struct dp_peer *peer, enum dp_mod_id id);
32 
33 /**
34  * dp_peer_get_ref() - Returns peer object given the peer id
35  *
36  * @soc		: core DP soc context
37  * @peer	: DP peer
38  * @mod_id	: id of module requesting the reference
39  *
40  * Return:	QDF_STATUS_SUCCESS if reference held successfully
41  *		else QDF_STATUS_E_INVAL
42  */
43 static inline
44 QDF_STATUS dp_peer_get_ref(struct dp_soc *soc,
45 			   struct dp_peer *peer,
46 			   enum dp_mod_id mod_id)
47 {
48 	if (!qdf_atomic_inc_not_zero(&peer->ref_cnt))
49 		return QDF_STATUS_E_INVAL;
50 
51 	if (mod_id > DP_MOD_ID_RX)
52 		qdf_atomic_inc(&peer->mod_refs[mod_id]);
53 
54 	return QDF_STATUS_SUCCESS;
55 }
56 
57 /**
58  * __dp_peer_get_ref_by_id() - Returns peer object given the peer id
59  *
60  * @soc		: core DP soc context
61  * @peer_id	: peer id from peer object can be retrieved
62  * @mod_id	: module id
63  *
64  * Return: struct dp_peer*: Pointer to DP peer object
65  */
66 static inline struct dp_peer *
67 __dp_peer_get_ref_by_id(struct dp_soc *soc,
68 			uint16_t peer_id,
69 			enum dp_mod_id mod_id)
70 
71 {
72 	struct dp_peer *peer;
73 
74 	qdf_spin_lock_bh(&soc->peer_map_lock);
75 	peer = (peer_id >= soc->max_peers) ? NULL :
76 				soc->peer_id_to_obj_map[peer_id];
77 	if (!peer ||
78 	    (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) {
79 		qdf_spin_unlock_bh(&soc->peer_map_lock);
80 		return NULL;
81 	}
82 
83 	qdf_spin_unlock_bh(&soc->peer_map_lock);
84 	return peer;
85 }
86 
87 /**
88  * dp_peer_get_ref_by_id() - Returns peer object given the peer id
89  *                        if peer state is active
90  *
91  * @soc		: core DP soc context
92  * @peer_id	: peer id from peer object can be retrieved
93  * @mod_id      : ID ot module requesting reference
94  *
95  * Return: struct dp_peer*: Pointer to DP peer object
96  */
97 static inline
98 struct dp_peer *dp_peer_get_ref_by_id(struct dp_soc *soc,
99 				      uint16_t peer_id,
100 				      enum dp_mod_id mod_id)
101 {
102 	struct dp_peer *peer;
103 
104 	qdf_spin_lock_bh(&soc->peer_map_lock);
105 	peer = (peer_id >= soc->max_peers) ? NULL :
106 				soc->peer_id_to_obj_map[peer_id];
107 
108 	if (!peer || peer->peer_state >= DP_PEER_STATE_LOGICAL_DELETE ||
109 	    (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) {
110 		qdf_spin_unlock_bh(&soc->peer_map_lock);
111 		return NULL;
112 	}
113 
114 	qdf_spin_unlock_bh(&soc->peer_map_lock);
115 
116 	return peer;
117 }
118 
119 #ifdef PEER_CACHE_RX_PKTS
120 /**
121  * dp_rx_flush_rx_cached() - flush cached rx frames
122  * @peer: peer
123  * @drop: set flag to drop frames
124  *
125  * Return: None
126  */
127 void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop);
128 #else
129 static inline void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop)
130 {
131 }
132 #endif
133 
134 static inline void
135 dp_clear_peer_internal(struct dp_soc *soc, struct dp_peer *peer)
136 {
137 	qdf_spin_lock_bh(&peer->peer_info_lock);
138 	peer->state = OL_TXRX_PEER_STATE_DISC;
139 	qdf_spin_unlock_bh(&peer->peer_info_lock);
140 
141 	dp_rx_flush_rx_cached(peer, true);
142 }
143 
144 /**
145  * dp_vdev_iterate_peer() - API to iterate through vdev peer list
146  *
147  * @vdev	: DP vdev context
148  * @func	: function to be called for each peer
149  * @arg		: argument need to be passed to func
150  * @mod_id	: module_id
151  *
152  * Return: void
153  */
154 static inline void
155 dp_vdev_iterate_peer(struct dp_vdev *vdev, dp_peer_iter_func *func, void *arg,
156 		     enum dp_mod_id mod_id)
157 {
158 	struct dp_peer *peer;
159 	struct dp_peer *tmp_peer;
160 	struct dp_soc *soc = NULL;
161 
162 	if (!vdev || !vdev->pdev || !vdev->pdev->soc)
163 		return;
164 
165 	soc = vdev->pdev->soc;
166 
167 	qdf_spin_lock_bh(&vdev->peer_list_lock);
168 	TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
169 			   peer_list_elem,
170 			   tmp_peer) {
171 		if (dp_peer_get_ref(soc, peer, mod_id) ==
172 					QDF_STATUS_SUCCESS) {
173 			(*func)(soc, peer, arg);
174 			dp_peer_unref_delete(peer, mod_id);
175 		}
176 	}
177 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
178 }
179 
180 /**
181  * dp_pdev_iterate_peer() - API to iterate through all peers of pdev
182  *
183  * @pdev	: DP pdev context
184  * @func	: function to be called for each peer
185  * @arg		: argument need to be passed to func
186  * @mod_id	: module_id
187  *
188  * Return: void
189  */
190 static inline void
191 dp_pdev_iterate_peer(struct dp_pdev *pdev, dp_peer_iter_func *func, void *arg,
192 		     enum dp_mod_id mod_id)
193 {
194 	struct dp_vdev *vdev;
195 
196 	if (!pdev)
197 		return;
198 
199 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
200 	DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev)
201 		dp_vdev_iterate_peer(vdev, func, arg, mod_id);
202 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
203 }
204 
205 /**
206  * dp_soc_iterate_peer() - API to iterate through all peers of soc
207  *
208  * @soc		: DP soc context
209  * @func	: function to be called for each peer
210  * @arg		: argument need to be passed to func
211  * @mod_id	: module_id
212  *
213  * Return: void
214  */
215 static inline void
216 dp_soc_iterate_peer(struct dp_soc *soc, dp_peer_iter_func *func, void *arg,
217 		    enum dp_mod_id mod_id)
218 {
219 	struct dp_pdev *pdev;
220 	int i;
221 
222 	if (!soc)
223 		return;
224 
225 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
226 		pdev = soc->pdev_list[i];
227 		dp_pdev_iterate_peer(pdev, func, arg, mod_id);
228 	}
229 }
230 
231 /**
232  * dp_vdev_iterate_peer_lock_safe() - API to iterate through vdev list
233  *
234  * This API will cache the peers in local allocated memory and calls
235  * iterate function outside the lock.
236  *
237  * As this API is allocating new memory it is suggested to use this
238  * only when lock cannot be held
239  *
240  * @vdev	: DP vdev context
241  * @func	: function to be called for each peer
242  * @arg		: argument need to be passed to func
243  * @mod_id	: module_id
244  *
245  * Return: void
246  */
247 static inline void
248 dp_vdev_iterate_peer_lock_safe(struct dp_vdev *vdev,
249 			       dp_peer_iter_func *func,
250 			       void *arg,
251 			       enum dp_mod_id mod_id)
252 {
253 	struct dp_peer *peer;
254 	struct dp_peer *tmp_peer;
255 	struct dp_soc *soc = NULL;
256 	struct dp_peer **peer_array = NULL;
257 	int i = 0;
258 	uint32_t num_peers = 0;
259 
260 	if (!vdev || !vdev->pdev || !vdev->pdev->soc)
261 		return;
262 
263 	num_peers = vdev->num_peers;
264 
265 	soc = vdev->pdev->soc;
266 
267 	peer_array = qdf_mem_malloc(num_peers * sizeof(struct dp_peer *));
268 	if (!peer_array)
269 		return;
270 
271 	qdf_spin_lock_bh(&vdev->peer_list_lock);
272 	TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
273 			   peer_list_elem,
274 			   tmp_peer) {
275 		if (i >= num_peers)
276 			break;
277 
278 		if (dp_peer_get_ref(soc, peer, mod_id) == QDF_STATUS_SUCCESS) {
279 			peer_array[i] = peer;
280 			i = (i + 1);
281 		}
282 	}
283 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
284 
285 	for (i = 0; i < num_peers; i++) {
286 		peer = peer_array[i];
287 
288 		if (!peer)
289 			continue;
290 
291 		(*func)(soc, peer, arg);
292 		dp_peer_unref_delete(peer, mod_id);
293 	}
294 
295 	qdf_mem_free(peer_array);
296 }
297 
298 /**
299  * dp_pdev_iterate_peer_lock_safe() - API to iterate through all peers of pdev
300  *
301  * This API will cache the peers in local allocated memory and calls
302  * iterate function outside the lock.
303  *
304  * As this API is allocating new memory it is suggested to use this
305  * only when lock cannot be held
306  *
307  * @pdev	: DP pdev context
308  * @func	: function to be called for each peer
309  * @arg		: argument need to be passed to func
310  * @mod_id	: module_id
311  *
312  * Return: void
313  */
314 static inline void
315 dp_pdev_iterate_peer_lock_safe(struct dp_pdev *pdev,
316 			       dp_peer_iter_func *func,
317 			       void *arg,
318 			       enum dp_mod_id mod_id)
319 {
320 	struct dp_peer *peer;
321 	struct dp_peer *tmp_peer;
322 	struct dp_soc *soc = NULL;
323 	struct dp_vdev *vdev = NULL;
324 	struct dp_peer **peer_array[DP_PDEV_MAX_VDEVS] = {0};
325 	int i = 0;
326 	int j = 0;
327 	uint32_t num_peers[DP_PDEV_MAX_VDEVS] = {0};
328 
329 	if (!pdev || !pdev->soc)
330 		return;
331 
332 	soc = pdev->soc;
333 
334 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
335 	DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
336 		num_peers[i] = vdev->num_peers;
337 		peer_array[i] = qdf_mem_malloc(num_peers[i] *
338 					       sizeof(struct dp_peer *));
339 		if (!peer_array[i])
340 			break;
341 
342 		qdf_spin_lock_bh(&vdev->peer_list_lock);
343 		TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
344 				   peer_list_elem,
345 				   tmp_peer) {
346 			if (j >= num_peers[i])
347 				break;
348 
349 			if (dp_peer_get_ref(soc, peer, mod_id) ==
350 					QDF_STATUS_SUCCESS) {
351 				peer_array[i][j] = peer;
352 
353 				j = (j + 1);
354 			}
355 		}
356 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
357 		i = (i + 1);
358 	}
359 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
360 
361 	for (i = 0; i < DP_PDEV_MAX_VDEVS; i++) {
362 		if (!peer_array[i])
363 			break;
364 
365 		for (j = 0; j < num_peers[i]; j++) {
366 			peer = peer_array[i][j];
367 
368 			if (!peer)
369 				continue;
370 
371 			(*func)(soc, peer, arg);
372 			dp_peer_unref_delete(peer, mod_id);
373 		}
374 
375 		qdf_mem_free(peer_array[i]);
376 	}
377 }
378 
379 /**
380  * dp_soc_iterate_peer_lock_safe() - API to iterate through all peers of soc
381  *
382  * This API will cache the peers in local allocated memory and calls
383  * iterate function outside the lock.
384  *
385  * As this API is allocating new memory it is suggested to use this
386  * only when lock cannot be held
387  *
388  * @soc		: DP soc context
389  * @func	: function to be called for each peer
390  * @arg		: argument need to be passed to func
391  * @mod_id	: module_id
392  *
393  * Return: void
394  */
395 static inline void
396 dp_soc_iterate_peer_lock_safe(struct dp_soc *soc,
397 			      dp_peer_iter_func *func,
398 			      void *arg,
399 			      enum dp_mod_id mod_id)
400 {
401 	struct dp_pdev *pdev;
402 	int i;
403 
404 	if (!soc)
405 		return;
406 
407 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
408 		pdev = soc->pdev_list[i];
409 		dp_pdev_iterate_peer_lock_safe(pdev, func, arg, mod_id);
410 	}
411 }
412 
413 #ifdef DP_PEER_STATE_DEBUG
414 #define DP_PEER_STATE_ASSERT(_peer, _new_state, _condition) \
415 	do {  \
416 		if (!(_condition)) { \
417 			dp_alert("Invalid state shift from %u to %u peer " \
418 				 QDF_MAC_ADDR_FMT, \
419 				 (_peer)->peer_state, (_new_state), \
420 				 QDF_MAC_ADDR_REF((_peer)->mac_addr.raw)); \
421 			QDF_ASSERT(0); \
422 		} \
423 	} while (0)
424 
425 #else
426 #define DP_PEER_STATE_ASSERT(_peer, _new_state, _condition) \
427 	do {  \
428 		if (!(_condition)) { \
429 			dp_alert("Invalid state shift from %u to %u peer " \
430 				 QDF_MAC_ADDR_FMT, \
431 				 (_peer)->peer_state, (_new_state), \
432 				 QDF_MAC_ADDR_REF((_peer)->mac_addr.raw)); \
433 		} \
434 	} while (0)
435 #endif
436 
437 /**
438  * dp_peer_state_cmp() - compare dp peer state
439  *
440  * @peer	: DP peer
441  * @state	: state
442  *
443  * Return: true if state matches with peer state
444  *	   false if it does not match
445  */
446 static inline bool
447 dp_peer_state_cmp(struct dp_peer *peer,
448 		  enum dp_peer_state state)
449 {
450 	bool is_status_equal = false;
451 
452 	qdf_spin_lock_bh(&peer->peer_state_lock);
453 	is_status_equal = (peer->peer_state == state);
454 	qdf_spin_unlock_bh(&peer->peer_state_lock);
455 
456 	return is_status_equal;
457 }
458 
459 /**
460  * dp_peer_update_state() - update dp peer state
461  *
462  * @soc		: core DP soc context
463  * @peer	: DP peer
464  * @state	: new state
465  *
466  * Return: None
467  */
468 static inline void
469 dp_peer_update_state(struct dp_soc *soc,
470 		     struct dp_peer *peer,
471 		     enum dp_peer_state state)
472 {
473 	uint8_t peer_state;
474 
475 	qdf_spin_lock_bh(&peer->peer_state_lock);
476 	peer_state = peer->peer_state;
477 
478 	switch (state) {
479 	case DP_PEER_STATE_INIT:
480 		DP_PEER_STATE_ASSERT
481 			(peer, state, (peer_state != DP_PEER_STATE_ACTIVE) ||
482 			 (peer_state != DP_PEER_STATE_LOGICAL_DELETE));
483 		break;
484 
485 	case DP_PEER_STATE_ACTIVE:
486 		DP_PEER_STATE_ASSERT(peer, state,
487 				     (peer_state == DP_PEER_STATE_INIT));
488 		break;
489 
490 	case DP_PEER_STATE_LOGICAL_DELETE:
491 		DP_PEER_STATE_ASSERT(peer, state,
492 				     (peer_state == DP_PEER_STATE_ACTIVE) ||
493 				     (peer_state == DP_PEER_STATE_INIT));
494 		break;
495 
496 	case DP_PEER_STATE_INACTIVE:
497 		DP_PEER_STATE_ASSERT
498 			(peer, state,
499 			 (peer_state == DP_PEER_STATE_LOGICAL_DELETE));
500 		break;
501 
502 	case DP_PEER_STATE_FREED:
503 		if (peer->sta_self_peer)
504 			DP_PEER_STATE_ASSERT
505 			(peer, state, (peer_state == DP_PEER_STATE_INIT));
506 		else
507 			DP_PEER_STATE_ASSERT
508 				(peer, state,
509 				 (peer_state == DP_PEER_STATE_INACTIVE) ||
510 				 (peer_state == DP_PEER_STATE_LOGICAL_DELETE));
511 		break;
512 
513 	default:
514 		qdf_spin_unlock_bh(&peer->peer_state_lock);
515 		dp_alert("Invalid peer state %u for peer "QDF_MAC_ADDR_FMT,
516 			 state, QDF_MAC_ADDR_REF(peer->mac_addr.raw));
517 		return;
518 	}
519 	peer->peer_state = state;
520 	qdf_spin_unlock_bh(&peer->peer_state_lock);
521 	dp_info("Updating peer state from %u to %u mac "QDF_MAC_ADDR_FMT"\n",
522 		peer_state, state,
523 		QDF_MAC_ADDR_REF(peer->mac_addr.raw));
524 }
525 
526 void dp_print_ast_stats(struct dp_soc *soc);
527 QDF_STATUS dp_rx_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
528 				  uint16_t hw_peer_id, uint8_t vdev_id,
529 				  uint8_t *peer_mac_addr, uint16_t ast_hash,
530 				  uint8_t is_wds);
531 void dp_rx_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id,
532 			      uint8_t vdev_id, uint8_t *peer_mac_addr,
533 			      uint8_t is_wds, uint32_t free_wds_count);
534 void dp_rx_sec_ind_handler(struct dp_soc *soc, uint16_t peer_id,
535 			   enum cdp_sec_type sec_type, int is_unicast,
536 			   u_int32_t *michael_key, u_int32_t *rx_pn);
537 
538 QDF_STATUS dp_rx_delba_ind_handler(void *soc_handle, uint16_t peer_id,
539 				   uint8_t tid, uint16_t win_sz);
540 
541 uint8_t dp_get_peer_mac_addr_frm_id(struct cdp_soc_t *soc_handle,
542 		uint16_t peer_id, uint8_t *peer_mac);
543 
544 QDF_STATUS dp_peer_add_ast(struct dp_soc *soc, struct dp_peer *peer,
545 			   uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type,
546 			   uint32_t flags);
547 
548 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry);
549 
550 void dp_peer_ast_unmap_handler(struct dp_soc *soc,
551 			       struct dp_ast_entry *ast_entry);
552 
553 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
554 			struct dp_ast_entry *ast_entry,	uint32_t flags);
555 
556 struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
557 						     uint8_t *ast_mac_addr,
558 						     uint8_t pdev_id);
559 
560 struct dp_ast_entry *dp_peer_ast_hash_find_by_vdevid(struct dp_soc *soc,
561 						     uint8_t *ast_mac_addr,
562 						     uint8_t vdev_id);
563 
564 struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
565 					       uint8_t *ast_mac_addr);
566 
567 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
568 				struct dp_ast_entry *ast_entry);
569 
570 
571 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
572 				struct dp_ast_entry *ast_entry);
573 
574 void dp_peer_ast_set_type(struct dp_soc *soc,
575 				struct dp_ast_entry *ast_entry,
576 				enum cdp_txrx_ast_entry_type type);
577 
578 void dp_peer_ast_send_wds_del(struct dp_soc *soc,
579 			      struct dp_ast_entry *ast_entry,
580 			      struct dp_peer *peer);
581 
582 void dp_peer_free_hmwds_cb(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
583 			   struct cdp_soc *dp_soc,
584 			   void *cookie,
585 			   enum cdp_ast_free_status status);
586 
587 void dp_peer_ast_hash_remove(struct dp_soc *soc,
588 			     struct dp_ast_entry *ase);
589 
590 void dp_peer_free_ast_entry(struct dp_soc *soc,
591 			    struct dp_ast_entry *ast_entry);
592 
593 void dp_peer_unlink_ast_entry(struct dp_soc *soc,
594 			      struct dp_ast_entry *ast_entry,
595 			      struct dp_peer *peer);
596 
597 #define DP_AST_ASSERT(_condition) \
598 	do { \
599 		if (!(_condition)) { \
600 			dp_print_ast_stats(soc);\
601 			QDF_BUG(_condition); \
602 		} \
603 	} while (0)
604 
605 /**
606  * dp_peer_update_inactive_time - Update inactive time for peer
607  * @pdev: pdev object
608  * @tag_type: htt_tlv_tag type
609  * #tag_buf: buf message
610  */
611 void
612 dp_peer_update_inactive_time(struct dp_pdev *pdev, uint32_t tag_type,
613 			     uint32_t *tag_buf);
614 
615 #ifndef QCA_MULTIPASS_SUPPORT
616 /**
617  * dp_peer_set_vlan_id: set vlan_id for this peer
618  * @cdp_soc: soc handle
619  * @vdev_id: id of vdev object
620  * @peer_mac: mac address
621  * @vlan_id: vlan id for peer
622  *
623  * return: void
624  */
625 static inline
626 void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
627 			 uint8_t vdev_id, uint8_t *peer_mac,
628 			 uint16_t vlan_id)
629 {
630 }
631 
632 /**
633  * dp_set_vlan_groupkey: set vlan map for vdev
634  * @soc: pointer to soc
635  * @vdev_id: id of vdev handle
636  * @vlan_id: vlan_id
637  * @group_key: group key for vlan
638  *
639  * return: set success/failure
640  */
641 static inline
642 QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc, uint8_t vdev_id,
643 				uint16_t vlan_id, uint16_t group_key)
644 {
645 	return QDF_STATUS_SUCCESS;
646 }
647 
648 /**
649  * dp_peer_multipass_list_init: initialize multipass peer list
650  * @vdev: pointer to vdev
651  *
652  * return: void
653  */
654 static inline
655 void dp_peer_multipass_list_init(struct dp_vdev *vdev)
656 {
657 }
658 
659 /**
660  * dp_peer_multipass_list_remove: remove peer from special peer list
661  * @peer: peer handle
662  *
663  * return: void
664  */
665 static inline
666 void dp_peer_multipass_list_remove(struct dp_peer *peer)
667 {
668 }
669 #else
670 void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
671 			 uint8_t vdev_id, uint8_t *peer_mac,
672 			 uint16_t vlan_id);
673 QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc, uint8_t vdev_id,
674 				uint16_t vlan_id, uint16_t group_key);
675 void dp_peer_multipass_list_init(struct dp_vdev *vdev);
676 void dp_peer_multipass_list_remove(struct dp_peer *peer);
677 #endif
678 
679 
680 #ifndef QCA_PEER_MULTIQ_SUPPORT
681 /**
682  * dp_peer_reset_flowq_map() - reset peer flowq map table
683  * @peer - dp peer handle
684  *
685  * Return: none
686  */
687 static inline
688 void dp_peer_reset_flowq_map(struct dp_peer *peer)
689 {
690 }
691 
692 /**
693  * dp_peer_ast_index_flow_queue_map_create() - create ast index flow queue map
694  * @soc - genereic soc handle
695  * @is_wds - flag to indicate if peer is wds
696  * @peer_id - peer_id from htt peer map message
697  * @peer_mac_addr - mac address of the peer
698  * @ast_info - ast flow override information from peer map
699  *
700  * Return: none
701  */
702 static inline
703 void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
704 		    bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
705 		    struct dp_ast_flow_override_info *ast_info)
706 {
707 }
708 #else
709 void dp_peer_reset_flowq_map(struct dp_peer *peer);
710 void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
711 		    bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
712 		    struct dp_ast_flow_override_info *ast_info);
713 #endif
714 
715 /**
716  * dp_peer_update_pkt_capture_params: Set Rx & Tx Capture flags for a peer
717  * @soc: DP SOC handle
718  * @pdev_id: id of DP pdev handle
719  * @is_rx_pkt_cap_enable: enable/disable Rx packet capture in monitor mode
720  * @is_tx_pkt_cap_enable: enable/disable/delete/print
721  * Tx packet capture in monitor mode
722  * Tx packet capture in monitor mode
723  * @peer_mac: MAC address for which the above need to be enabled/disabled
724  *
725  * Return: Success if Rx & Tx capture is enabled for peer, false otherwise
726  */
727 QDF_STATUS
728 dp_peer_update_pkt_capture_params(ol_txrx_soc_handle soc,
729 				  uint8_t pdev_id,
730 				  bool is_rx_pkt_cap_enable,
731 				  uint8_t is_tx_pkt_cap_enable,
732 				  uint8_t *peer_mac);
733 
734 /*
735  * dp_rx_tid_delete_cb() - Callback to flush reo descriptor HW cache
736  * after deleting the entries (ie., setting valid=0)
737  *
738  * @soc: DP SOC handle
739  * @cb_ctxt: Callback context
740  * @reo_status: REO command status
741  */
742 void dp_rx_tid_delete_cb(struct dp_soc *soc,
743 			 void *cb_ctxt,
744 			 union hal_reo_status *reo_status);
745 
746 #ifndef WLAN_TX_PKT_CAPTURE_ENH
747 /**
748  * dp_peer_tid_queue_init() – Initialize ppdu stats queue per TID
749  * @peer: Datapath peer
750  *
751  */
752 static inline void dp_peer_tid_queue_init(struct dp_peer *peer)
753 {
754 }
755 
756 /**
757  * dp_peer_tid_peer_id_update() – update peer_id to tid structure
758  * @peer: Datapath peer
759  * @peer_id: peer_id
760  *
761  */
762 static inline
763 void dp_peer_tid_peer_id_update(struct dp_peer *peer, uint16_t peer_id)
764 {
765 }
766 
767 /**
768  * dp_peer_tid_queue_cleanup() – remove ppdu stats queue per TID
769  * @peer: Datapath peer
770  *
771  */
772 static inline void dp_peer_tid_queue_cleanup(struct dp_peer *peer)
773 {
774 }
775 
776 /**
777  * dp_peer_update_80211_hdr() – dp peer update 80211 hdr
778  * @vdev: Datapath vdev
779  * @peer: Datapath peer
780  *
781  */
782 static inline void
783 dp_peer_update_80211_hdr(struct dp_vdev *vdev, struct dp_peer *peer)
784 {
785 }
786 #endif
787 
788 #ifdef QCA_PEER_EXT_STATS
789 QDF_STATUS dp_peer_ext_stats_ctx_alloc(struct dp_soc *soc,
790 				       struct dp_peer *peer);
791 void dp_peer_ext_stats_ctx_dealloc(struct dp_soc *soc,
792 				   struct dp_peer *peer);
793 #else
794 static inline QDF_STATUS dp_peer_ext_stats_ctx_alloc(struct dp_soc *soc,
795 						     struct dp_peer *peer)
796 {
797 	return QDF_STATUS_SUCCESS;
798 }
799 
800 static inline void dp_peer_ext_stats_ctx_dealloc(struct dp_soc *soc,
801 						 struct dp_peer *peer)
802 {
803 }
804 #endif
805 
806 struct dp_peer *dp_vdev_bss_peer_ref_n_get(struct dp_soc *soc,
807 					   struct dp_vdev *vdev,
808 					   enum dp_mod_id mod_id);
809 struct dp_peer *dp_sta_vdev_self_peer_ref_n_get(struct dp_soc *soc,
810 						struct dp_vdev *vdev,
811 						enum dp_mod_id mod_id);
812 
813 #ifdef FEATURE_AST
814 /*
815  * dp_peer_delete_ast_entries(): Delete all AST entries for a peer
816  * @soc - datapath soc handle
817  * @peer - datapath peer handle
818  *
819  * Delete the AST entries belonging to a peer
820  */
821 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
822 					      struct dp_peer *peer)
823 {
824 	struct dp_ast_entry *ast_entry, *temp_ast_entry;
825 
826 	/*
827 	 * Delete peer self ast entry. This is done to handle scenarios
828 	 * where peer is freed before peer map is received(for ex in case
829 	 * of auth disallow due to ACL) in such cases self ast is not added
830 	 * to peer->ast_list.
831 	 */
832 	if (peer->self_ast_entry) {
833 		dp_peer_del_ast(soc, peer->self_ast_entry);
834 		peer->self_ast_entry = NULL;
835 	}
836 
837 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry)
838 		dp_peer_del_ast(soc, ast_entry);
839 }
840 #else
841 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
842 					      struct dp_peer *peer)
843 {
844 }
845 #endif
846 #endif /* _DP_PEER_H_ */
847