xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_peer.h (revision 97b39bfea3401259bed153a56c00d1fddbb9e87d)
1 /*
2  * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 #ifndef _DP_PEER_H_
19 #define _DP_PEER_H_
20 
21 #include <qdf_types.h>
22 #include <qdf_lock.h>
23 #include "dp_types.h"
24 
25 #define DP_INVALID_PEER_ID 0xffff
26 
27 #define DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC 5000
28 
29 typedef void dp_peer_iter_func(struct dp_soc *soc, struct dp_peer *peer,
30 			       void *arg);
31 void dp_peer_unref_delete(struct dp_peer *peer, enum dp_mod_id id);
32 
33 /**
34  * dp_peer_get_ref() - Returns peer object given the peer id
35  *
36  * @soc		: core DP soc context
37  * @peer	: DP peer
38  * @mod_id	: id of module requesting the reference
39  *
40  * Return:	QDF_STATUS_SUCCESS if reference held successfully
41  *		else QDF_STATUS_E_INVAL
42  */
43 static inline
44 QDF_STATUS dp_peer_get_ref(struct dp_soc *soc,
45 			   struct dp_peer *peer,
46 			   enum dp_mod_id mod_id)
47 {
48 	if (!qdf_atomic_inc_not_zero(&peer->ref_cnt))
49 		return QDF_STATUS_E_INVAL;
50 
51 	if (mod_id > DP_MOD_ID_RX)
52 		qdf_atomic_inc(&peer->mod_refs[mod_id]);
53 
54 	return QDF_STATUS_SUCCESS;
55 }
56 
57 /**
58  * __dp_peer_get_ref_by_id() - Returns peer object given the peer id
59  *
60  * @soc		: core DP soc context
61  * @peer_id	: peer id from peer object can be retrieved
62  * @mod_id	: module id
63  *
64  * Return: struct dp_peer*: Pointer to DP peer object
65  */
66 static inline struct dp_peer *
67 __dp_peer_get_ref_by_id(struct dp_soc *soc,
68 			uint16_t peer_id,
69 			enum dp_mod_id mod_id)
70 
71 {
72 	struct dp_peer *peer;
73 
74 	qdf_spin_lock_bh(&soc->peer_map_lock);
75 	peer = (peer_id >= soc->max_peers) ? NULL :
76 				soc->peer_id_to_obj_map[peer_id];
77 	if (!peer ||
78 	    (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) {
79 		qdf_spin_unlock_bh(&soc->peer_map_lock);
80 		return NULL;
81 	}
82 
83 	qdf_spin_unlock_bh(&soc->peer_map_lock);
84 	return peer;
85 }
86 
87 /**
88  * dp_peer_get_ref_by_id() - Returns peer object given the peer id
89  *                        if peer state is active
90  *
91  * @soc		: core DP soc context
92  * @peer_id	: peer id from peer object can be retrieved
93  * @mod_id      : ID ot module requesting reference
94  *
95  * Return: struct dp_peer*: Pointer to DP peer object
96  */
97 static inline
98 struct dp_peer *dp_peer_get_ref_by_id(struct dp_soc *soc,
99 				      uint16_t peer_id,
100 				      enum dp_mod_id mod_id)
101 {
102 	struct dp_peer *peer;
103 
104 	qdf_spin_lock_bh(&soc->peer_map_lock);
105 	peer = (peer_id >= soc->max_peers) ? NULL :
106 				soc->peer_id_to_obj_map[peer_id];
107 
108 	if (!peer || peer->peer_state >= DP_PEER_STATE_LOGICAL_DELETE ||
109 	    (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) {
110 		qdf_spin_unlock_bh(&soc->peer_map_lock);
111 		return NULL;
112 	}
113 
114 	qdf_spin_unlock_bh(&soc->peer_map_lock);
115 
116 	return peer;
117 }
118 
119 #ifdef PEER_CACHE_RX_PKTS
120 /**
121  * dp_rx_flush_rx_cached() - flush cached rx frames
122  * @peer: peer
123  * @drop: set flag to drop frames
124  *
125  * Return: None
126  */
127 void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop);
128 #else
129 static inline void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop)
130 {
131 }
132 #endif
133 
134 static inline void
135 dp_clear_peer_internal(struct dp_soc *soc, struct dp_peer *peer)
136 {
137 	qdf_spin_lock_bh(&peer->peer_info_lock);
138 	peer->state = OL_TXRX_PEER_STATE_DISC;
139 	qdf_spin_unlock_bh(&peer->peer_info_lock);
140 
141 	dp_rx_flush_rx_cached(peer, true);
142 }
143 
144 /**
145  * dp_vdev_iterate_peer() - API to iterate through vdev peer list
146  *
147  * @vdev	: DP vdev context
148  * @func	: function to be called for each peer
149  * @arg		: argument need to be passed to func
150  * @mod_id	: module_id
151  *
152  * Return: void
153  */
154 static inline void
155 dp_vdev_iterate_peer(struct dp_vdev *vdev, dp_peer_iter_func *func, void *arg,
156 		     enum dp_mod_id mod_id)
157 {
158 	struct dp_peer *peer;
159 	struct dp_peer *tmp_peer;
160 	struct dp_soc *soc = NULL;
161 
162 	if (!vdev || !vdev->pdev || !vdev->pdev->soc)
163 		return;
164 
165 	soc = vdev->pdev->soc;
166 
167 	qdf_spin_lock_bh(&vdev->peer_list_lock);
168 	TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
169 			   peer_list_elem,
170 			   tmp_peer) {
171 		if (dp_peer_get_ref(soc, peer, mod_id) ==
172 					QDF_STATUS_SUCCESS) {
173 			(*func)(soc, peer, arg);
174 			dp_peer_unref_delete(peer, mod_id);
175 		}
176 	}
177 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
178 }
179 
180 /**
181  * dp_pdev_iterate_peer() - API to iterate through all peers of pdev
182  *
183  * @pdev	: DP pdev context
184  * @func	: function to be called for each peer
185  * @arg		: argument need to be passed to func
186  * @mod_id	: module_id
187  *
188  * Return: void
189  */
190 static inline void
191 dp_pdev_iterate_peer(struct dp_pdev *pdev, dp_peer_iter_func *func, void *arg,
192 		     enum dp_mod_id mod_id)
193 {
194 	struct dp_vdev *vdev;
195 
196 	if (!pdev)
197 		return;
198 
199 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
200 	DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev)
201 		dp_vdev_iterate_peer(vdev, func, arg, mod_id);
202 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
203 }
204 
205 /**
206  * dp_soc_iterate_peer() - API to iterate through all peers of soc
207  *
208  * @soc		: DP soc context
209  * @func	: function to be called for each peer
210  * @arg		: argument need to be passed to func
211  * @mod_id	: module_id
212  *
213  * Return: void
214  */
215 static inline void
216 dp_soc_iterate_peer(struct dp_soc *soc, dp_peer_iter_func *func, void *arg,
217 		    enum dp_mod_id mod_id)
218 {
219 	struct dp_pdev *pdev;
220 	int i;
221 
222 	if (!soc)
223 		return;
224 
225 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
226 		pdev = soc->pdev_list[i];
227 		dp_pdev_iterate_peer(pdev, func, arg, mod_id);
228 	}
229 }
230 
231 /**
232  * dp_vdev_iterate_peer_lock_safe() - API to iterate through vdev list
233  *
234  * This API will cache the peers in local allocated memory and calls
235  * iterate function outside the lock.
236  *
237  * As this API is allocating new memory it is suggested to use this
238  * only when lock cannot be held
239  *
240  * @vdev	: DP vdev context
241  * @func	: function to be called for each peer
242  * @arg		: argument need to be passed to func
243  * @mod_id	: module_id
244  *
245  * Return: void
246  */
247 static inline void
248 dp_vdev_iterate_peer_lock_safe(struct dp_vdev *vdev,
249 			       dp_peer_iter_func *func,
250 			       void *arg,
251 			       enum dp_mod_id mod_id)
252 {
253 	struct dp_peer *peer;
254 	struct dp_peer *tmp_peer;
255 	struct dp_soc *soc = NULL;
256 	struct dp_peer **peer_array = NULL;
257 	int i = 0;
258 	uint32_t num_peers = 0;
259 
260 	if (!vdev || !vdev->pdev || !vdev->pdev->soc)
261 		return;
262 
263 	num_peers = vdev->num_peers;
264 
265 	soc = vdev->pdev->soc;
266 
267 	peer_array = qdf_mem_malloc(num_peers * sizeof(struct dp_peer *));
268 	if (!peer_array)
269 		return;
270 
271 	qdf_spin_lock_bh(&vdev->peer_list_lock);
272 	TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
273 			   peer_list_elem,
274 			   tmp_peer) {
275 		if (i >= num_peers)
276 			break;
277 
278 		if (dp_peer_get_ref(soc, peer, mod_id) == QDF_STATUS_SUCCESS) {
279 			peer_array[i] = peer;
280 			i = (i + 1);
281 		}
282 	}
283 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
284 
285 	for (i = 0; i < num_peers; i++) {
286 		peer = peer_array[i];
287 
288 		if (!peer)
289 			continue;
290 
291 		(*func)(soc, peer, arg);
292 		dp_peer_unref_delete(peer, mod_id);
293 	}
294 
295 	qdf_mem_free(peer_array);
296 }
297 
298 /**
299  * dp_pdev_iterate_peer_lock_safe() - API to iterate through all peers of pdev
300  *
301  * This API will cache the peers in local allocated memory and calls
302  * iterate function outside the lock.
303  *
304  * As this API is allocating new memory it is suggested to use this
305  * only when lock cannot be held
306  *
307  * @pdev	: DP pdev context
308  * @func	: function to be called for each peer
309  * @arg		: argument need to be passed to func
310  * @mod_id	: module_id
311  *
312  * Return: void
313  */
314 static inline void
315 dp_pdev_iterate_peer_lock_safe(struct dp_pdev *pdev,
316 			       dp_peer_iter_func *func,
317 			       void *arg,
318 			       enum dp_mod_id mod_id)
319 {
320 	struct dp_peer *peer;
321 	struct dp_peer *tmp_peer;
322 	struct dp_soc *soc = NULL;
323 	struct dp_vdev *vdev = NULL;
324 	struct dp_peer **peer_array[DP_PDEV_MAX_VDEVS] = {0};
325 	int i = 0;
326 	int j = 0;
327 	uint32_t num_peers[DP_PDEV_MAX_VDEVS] = {0};
328 
329 	if (!pdev || !pdev->soc)
330 		return;
331 
332 	soc = pdev->soc;
333 
334 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
335 	DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
336 		num_peers[i] = vdev->num_peers;
337 		peer_array[i] = qdf_mem_malloc(num_peers[i] *
338 					       sizeof(struct dp_peer *));
339 		if (!peer_array[i])
340 			break;
341 
342 		qdf_spin_lock_bh(&vdev->peer_list_lock);
343 		TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
344 				   peer_list_elem,
345 				   tmp_peer) {
346 			if (j >= num_peers[i])
347 				break;
348 
349 			if (dp_peer_get_ref(soc, peer, mod_id) ==
350 					QDF_STATUS_SUCCESS) {
351 				peer_array[i][j] = peer;
352 
353 				j = (j + 1);
354 			}
355 		}
356 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
357 		i = (i + 1);
358 	}
359 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
360 
361 	for (i = 0; i < DP_PDEV_MAX_VDEVS; i++) {
362 		if (!peer_array[i])
363 			break;
364 
365 		for (j = 0; j < num_peers[i]; j++) {
366 			peer = peer_array[i][j];
367 
368 			if (!peer)
369 				continue;
370 
371 			(*func)(soc, peer, arg);
372 			dp_peer_unref_delete(peer, mod_id);
373 		}
374 
375 		qdf_mem_free(peer_array[i]);
376 	}
377 }
378 
379 /**
380  * dp_soc_iterate_peer_lock_safe() - API to iterate through all peers of soc
381  *
382  * This API will cache the peers in local allocated memory and calls
383  * iterate function outside the lock.
384  *
385  * As this API is allocating new memory it is suggested to use this
386  * only when lock cannot be held
387  *
388  * @soc		: DP soc context
389  * @func	: function to be called for each peer
390  * @arg		: argument need to be passed to func
391  * @mod_id	: module_id
392  *
393  * Return: void
394  */
395 static inline void
396 dp_soc_iterate_peer_lock_safe(struct dp_soc *soc,
397 			      dp_peer_iter_func *func,
398 			      void *arg,
399 			      enum dp_mod_id mod_id)
400 {
401 	struct dp_pdev *pdev;
402 	int i;
403 
404 	if (!soc)
405 		return;
406 
407 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
408 		pdev = soc->pdev_list[i];
409 		dp_pdev_iterate_peer_lock_safe(pdev, func, arg, mod_id);
410 	}
411 }
412 
413 /**
414  * dp_peer_update_state() - update dp peer state
415  *
416  * @soc		: core DP soc context
417  * @peer	: DP peer
418  * @state	: new state
419  *
420  * Return: None
421  */
422 static inline void
423 dp_peer_update_state(struct dp_soc *soc,
424 		     struct dp_peer *peer,
425 		     enum dp_peer_state state)
426 {
427 	uint8_t peer_state = peer->peer_state;
428 
429 	switch (state) {
430 	case DP_PEER_STATE_INIT:
431 		QDF_ASSERT
432 			((peer_state != DP_PEER_STATE_ACTIVE) ||
433 			 (peer_state != DP_PEER_STATE_LOGICAL_DELETE));
434 		break;
435 
436 	case DP_PEER_STATE_ACTIVE:
437 		QDF_ASSERT(peer_state == DP_PEER_STATE_INIT);
438 		break;
439 
440 	case DP_PEER_STATE_LOGICAL_DELETE:
441 		QDF_ASSERT((peer_state == DP_PEER_STATE_ACTIVE) ||
442 			   (peer_state == DP_PEER_STATE_INIT));
443 		break;
444 
445 	case DP_PEER_STATE_INACTIVE:
446 		QDF_ASSERT(peer_state == DP_PEER_STATE_LOGICAL_DELETE);
447 		break;
448 
449 	case DP_PEER_STATE_FREED:
450 		if (peer->sta_self_peer)
451 			QDF_ASSERT(peer_state ==
452 					DP_PEER_STATE_INIT);
453 		else
454 			QDF_ASSERT((peer_state ==
455 					DP_PEER_STATE_INACTIVE) ||
456 				   (peer_state ==
457 					DP_PEER_STATE_LOGICAL_DELETE));
458 		break;
459 
460 	default:
461 		QDF_ASSERT(0);
462 		break;
463 	}
464 	qdf_info("Updating peer state from %u to %u mac %pM\n",
465 		 peer_state, state, peer->mac_addr.raw);
466 	peer->peer_state = state;
467 }
468 
469 void dp_print_ast_stats(struct dp_soc *soc);
470 QDF_STATUS dp_rx_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
471 				  uint16_t hw_peer_id, uint8_t vdev_id,
472 				  uint8_t *peer_mac_addr, uint16_t ast_hash,
473 				  uint8_t is_wds);
474 void dp_rx_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id,
475 			      uint8_t vdev_id, uint8_t *peer_mac_addr,
476 			      uint8_t is_wds, uint32_t free_wds_count);
477 void dp_rx_sec_ind_handler(struct dp_soc *soc, uint16_t peer_id,
478 			   enum cdp_sec_type sec_type, int is_unicast,
479 			   u_int32_t *michael_key, u_int32_t *rx_pn);
480 
481 QDF_STATUS dp_rx_delba_ind_handler(void *soc_handle, uint16_t peer_id,
482 				   uint8_t tid, uint16_t win_sz);
483 
484 uint8_t dp_get_peer_mac_addr_frm_id(struct cdp_soc_t *soc_handle,
485 		uint16_t peer_id, uint8_t *peer_mac);
486 
487 QDF_STATUS dp_peer_add_ast(struct dp_soc *soc, struct dp_peer *peer,
488 			   uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type,
489 			   uint32_t flags);
490 
491 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry);
492 
493 void dp_peer_ast_unmap_handler(struct dp_soc *soc,
494 			       struct dp_ast_entry *ast_entry);
495 
496 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
497 			struct dp_ast_entry *ast_entry,	uint32_t flags);
498 
499 struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
500 						     uint8_t *ast_mac_addr,
501 						     uint8_t pdev_id);
502 
503 struct dp_ast_entry *dp_peer_ast_hash_find_by_vdevid(struct dp_soc *soc,
504 						     uint8_t *ast_mac_addr,
505 						     uint8_t vdev_id);
506 
507 struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
508 					       uint8_t *ast_mac_addr);
509 
510 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
511 				struct dp_ast_entry *ast_entry);
512 
513 
514 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
515 				struct dp_ast_entry *ast_entry);
516 
517 void dp_peer_ast_set_type(struct dp_soc *soc,
518 				struct dp_ast_entry *ast_entry,
519 				enum cdp_txrx_ast_entry_type type);
520 
521 void dp_peer_ast_send_wds_del(struct dp_soc *soc,
522 			      struct dp_ast_entry *ast_entry,
523 			      struct dp_peer *peer);
524 
525 void dp_peer_free_hmwds_cb(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
526 			   struct cdp_soc *dp_soc,
527 			   void *cookie,
528 			   enum cdp_ast_free_status status);
529 
530 void dp_peer_ast_hash_remove(struct dp_soc *soc,
531 			     struct dp_ast_entry *ase);
532 
533 void dp_peer_free_ast_entry(struct dp_soc *soc,
534 			    struct dp_ast_entry *ast_entry);
535 
536 void dp_peer_unlink_ast_entry(struct dp_soc *soc,
537 			      struct dp_ast_entry *ast_entry,
538 			      struct dp_peer *peer);
539 
540 #define DP_AST_ASSERT(_condition) \
541 	do { \
542 		if (!(_condition)) { \
543 			dp_print_ast_stats(soc);\
544 			QDF_BUG(_condition); \
545 		} \
546 	} while (0)
547 
548 /**
549  * dp_peer_update_inactive_time - Update inactive time for peer
550  * @pdev: pdev object
551  * @tag_type: htt_tlv_tag type
552  * #tag_buf: buf message
553  */
554 void
555 dp_peer_update_inactive_time(struct dp_pdev *pdev, uint32_t tag_type,
556 			     uint32_t *tag_buf);
557 
558 #ifndef QCA_MULTIPASS_SUPPORT
559 /**
560  * dp_peer_set_vlan_id: set vlan_id for this peer
561  * @cdp_soc: soc handle
562  * @vdev_id: id of vdev object
563  * @peer_mac: mac address
564  * @vlan_id: vlan id for peer
565  *
566  * return: void
567  */
568 static inline
569 void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
570 			 uint8_t vdev_id, uint8_t *peer_mac,
571 			 uint16_t vlan_id)
572 {
573 }
574 
575 /**
576  * dp_set_vlan_groupkey: set vlan map for vdev
577  * @soc: pointer to soc
578  * @vdev_id: id of vdev handle
579  * @vlan_id: vlan_id
580  * @group_key: group key for vlan
581  *
582  * return: set success/failure
583  */
584 static inline
585 QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc, uint8_t vdev_id,
586 				uint16_t vlan_id, uint16_t group_key)
587 {
588 	return QDF_STATUS_SUCCESS;
589 }
590 
591 /**
592  * dp_peer_multipass_list_init: initialize multipass peer list
593  * @vdev: pointer to vdev
594  *
595  * return: void
596  */
597 static inline
598 void dp_peer_multipass_list_init(struct dp_vdev *vdev)
599 {
600 }
601 
602 /**
603  * dp_peer_multipass_list_remove: remove peer from special peer list
604  * @peer: peer handle
605  *
606  * return: void
607  */
608 static inline
609 void dp_peer_multipass_list_remove(struct dp_peer *peer)
610 {
611 }
612 #else
613 void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
614 			 uint8_t vdev_id, uint8_t *peer_mac,
615 			 uint16_t vlan_id);
616 QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc, uint8_t vdev_id,
617 				uint16_t vlan_id, uint16_t group_key);
618 void dp_peer_multipass_list_init(struct dp_vdev *vdev);
619 void dp_peer_multipass_list_remove(struct dp_peer *peer);
620 #endif
621 
622 
623 #ifndef QCA_PEER_MULTIQ_SUPPORT
624 /**
625  * dp_peer_reset_flowq_map() - reset peer flowq map table
626  * @peer - dp peer handle
627  *
628  * Return: none
629  */
630 static inline
631 void dp_peer_reset_flowq_map(struct dp_peer *peer)
632 {
633 }
634 
635 /**
636  * dp_peer_ast_index_flow_queue_map_create() - create ast index flow queue map
637  * @soc - genereic soc handle
638  * @is_wds - flag to indicate if peer is wds
639  * @peer_id - peer_id from htt peer map message
640  * @peer_mac_addr - mac address of the peer
641  * @ast_info - ast flow override information from peer map
642  *
643  * Return: none
644  */
645 static inline
646 void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
647 		    bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
648 		    struct dp_ast_flow_override_info *ast_info)
649 {
650 }
651 #else
652 void dp_peer_reset_flowq_map(struct dp_peer *peer);
653 void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
654 		    bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
655 		    struct dp_ast_flow_override_info *ast_info);
656 #endif
657 
658 /**
659  * dp_peer_update_pkt_capture_params: Set Rx & Tx Capture flags for a peer
660  * @soc: DP SOC handle
661  * @pdev_id: id of DP pdev handle
662  * @is_rx_pkt_cap_enable: enable/disable Rx packet capture in monitor mode
663  * @is_tx_pkt_cap_enable: enable/disable/delete/print
664  * Tx packet capture in monitor mode
665  * Tx packet capture in monitor mode
666  * @peer_mac: MAC address for which the above need to be enabled/disabled
667  *
668  * Return: Success if Rx & Tx capture is enabled for peer, false otherwise
669  */
670 QDF_STATUS
671 dp_peer_update_pkt_capture_params(ol_txrx_soc_handle soc,
672 				  uint8_t pdev_id,
673 				  bool is_rx_pkt_cap_enable,
674 				  uint8_t is_tx_pkt_cap_enable,
675 				  uint8_t *peer_mac);
676 
677 /*
678  * dp_rx_tid_delete_cb() - Callback to flush reo descriptor HW cache
679  * after deleting the entries (ie., setting valid=0)
680  *
681  * @soc: DP SOC handle
682  * @cb_ctxt: Callback context
683  * @reo_status: REO command status
684  */
685 void dp_rx_tid_delete_cb(struct dp_soc *soc,
686 			 void *cb_ctxt,
687 			 union hal_reo_status *reo_status);
688 
689 #ifndef WLAN_TX_PKT_CAPTURE_ENH
690 /**
691  * dp_peer_tid_queue_init() – Initialize ppdu stats queue per TID
692  * @peer: Datapath peer
693  *
694  */
695 static inline void dp_peer_tid_queue_init(struct dp_peer *peer)
696 {
697 }
698 
699 /**
700  * dp_peer_tid_peer_id_update() – update peer_id to tid structure
701  * @peer: Datapath peer
702  * @peer_id: peer_id
703  *
704  */
705 static inline
706 void dp_peer_tid_peer_id_update(struct dp_peer *peer, uint16_t peer_id)
707 {
708 }
709 
710 /**
711  * dp_peer_tid_queue_cleanup() – remove ppdu stats queue per TID
712  * @peer: Datapath peer
713  *
714  */
715 static inline void dp_peer_tid_queue_cleanup(struct dp_peer *peer)
716 {
717 }
718 
719 /**
720  * dp_peer_update_80211_hdr() – dp peer update 80211 hdr
721  * @vdev: Datapath vdev
722  * @peer: Datapath peer
723  *
724  */
725 static inline void
726 dp_peer_update_80211_hdr(struct dp_vdev *vdev, struct dp_peer *peer)
727 {
728 }
729 #endif
730 
731 #ifdef QCA_PEER_EXT_STATS
732 QDF_STATUS dp_peer_ext_stats_ctx_alloc(struct dp_soc *soc,
733 				       struct dp_peer *peer);
734 void dp_peer_ext_stats_ctx_dealloc(struct dp_soc *soc,
735 				   struct dp_peer *peer);
736 #else
737 static inline QDF_STATUS dp_peer_ext_stats_ctx_alloc(struct dp_soc *soc,
738 						     struct dp_peer *peer)
739 {
740 	return QDF_STATUS_SUCCESS;
741 }
742 
743 static inline void dp_peer_ext_stats_ctx_dealloc(struct dp_soc *soc,
744 						 struct dp_peer *peer)
745 {
746 }
747 #endif
748 
749 struct dp_peer *dp_vdev_bss_peer_ref_n_get(struct dp_soc *soc,
750 					   struct dp_vdev *vdev,
751 					   enum dp_mod_id mod_id);
752 struct dp_peer *dp_sta_vdev_self_peer_ref_n_get(struct dp_soc *soc,
753 						struct dp_vdev *vdev,
754 						enum dp_mod_id mod_id);
755 #endif /* _DP_PEER_H_ */
756