xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_txrx_wds.c (revision d0c05845839e5f2ba5a8dcebe0cd3e4cd4e8dfcf)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 #include "htt.h"
20 #include "dp_peer.h"
21 #include "hal_rx.h"
22 #include "hal_api.h"
23 #include "qdf_nbuf.h"
24 #include "dp_types.h"
25 #include "dp_internal.h"
26 #include "dp_tx.h"
27 #include "enet.h"
28 #ifdef WIFI_MONITOR_SUPPORT
29 #include "dp_mon.h"
30 #endif
31 #include "dp_txrx_wds.h"
32 
33 /* Generic AST entry aging timer value */
34 #define DP_AST_AGING_TIMER_DEFAULT_MS	5000
35 #define DP_VLAN_UNTAGGED 0
36 #define DP_VLAN_TAGGED_MULTICAST 1
37 #define DP_VLAN_TAGGED_UNICAST 2
38 #define DP_MAX_VLAN_IDS 4096
39 #define DP_INVALID_AST_IDX 0xffff
40 #define DP_INVALID_FLOW_PRIORITY 0xff
41 #define DP_PEER_AST0_FLOW_MASK 0x4
42 #define DP_PEER_AST1_FLOW_MASK 0x8
43 #define DP_PEER_AST2_FLOW_MASK 0x1
44 #define DP_PEER_AST3_FLOW_MASK 0x2
45 #define DP_MAX_AST_INDEX_PER_PEER 4
46 
47 #ifdef WLAN_FEATURE_MULTI_AST_DEL
48 
49 void dp_peer_free_peer_ase_list(struct dp_soc *soc,
50 				struct peer_del_multi_wds_entries *wds_list)
51 {
52 	struct peer_wds_entry_list *wds_entry, *tmp_entry;
53 
54 	TAILQ_FOREACH_SAFE(wds_entry, &wds_list->ase_list,
55 			   ase_list_elem, tmp_entry) {
56 		dp_peer_debug("type: %d mac_addr: " QDF_MAC_ADDR_FMT,
57 			      wds_entry->type,
58 			      QDF_MAC_ADDR_REF(wds_entry->dest_addr));
59 		TAILQ_REMOVE(&wds_list->ase_list, wds_entry, ase_list_elem);
60 		wds_list->num_entries--;
61 		qdf_mem_free(wds_entry);
62 	}
63 }
64 
65 static void
66 dp_pdev_build_peer_ase_list(struct dp_soc *soc, struct dp_peer *peer,
67 			    void *arg)
68 {
69 	struct dp_ast_entry *ase, *temp_ase;
70 	struct peer_del_multi_wds_entries *list = arg;
71 	struct peer_wds_entry_list *wds_entry;
72 
73 	if (!soc || !peer || !arg) {
74 		dp_peer_err("Invalid input");
75 		return;
76 	}
77 
78 	list->vdev_id = peer->vdev->vdev_id;
79 	DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
80 		if (ase->type != CDP_TXRX_AST_TYPE_WDS &&
81 		    ase->type != CDP_TXRX_AST_TYPE_DA)
82 			continue;
83 
84 		if (ase->is_active) {
85 			ase->is_active = false;
86 			continue;
87 		}
88 
89 		if (ase->delete_in_progress) {
90 			dp_info_rl("Del set addr:" QDF_MAC_ADDR_FMT " type:%d",
91 				   QDF_MAC_ADDR_REF(ase->mac_addr.raw),
92 				   ase->type);
93 			continue;
94 		}
95 
96 		if (ase->is_mapped)
97 			soc->ast_table[ase->ast_idx] = NULL;
98 
99 		if (!ase->next_hop) {
100 			dp_peer_unlink_ast_entry(soc, ase, peer);
101 			continue;
102 		}
103 
104 		wds_entry = (struct peer_wds_entry_list *)
105 			    qdf_mem_malloc(sizeof(*wds_entry));
106 		if (!wds_entry) {
107 			dp_peer_err("%pK: fail to allocate wds_entry", soc);
108 			dp_peer_free_peer_ase_list(soc, list);
109 			return;
110 		}
111 
112 		DP_STATS_INC(soc, ast.aged_out, 1);
113 		ase->delete_in_progress = true;
114 		wds_entry->dest_addr = ase->mac_addr.raw;
115 		wds_entry->type = ase->type;
116 
117 		if (dp_peer_state_cmp(peer, DP_PEER_STATE_LOGICAL_DELETE))
118 			wds_entry->delete_in_fw = false;
119 		else
120 			wds_entry->delete_in_fw = true;
121 
122 		dp_peer_debug("ase->type: %d pdev: %u vdev: %u mac_addr: " QDF_MAC_ADDR_FMT " next_hop: %u peer: %u",
123 			      ase->type, ase->pdev_id, ase->vdev_id,
124 			      QDF_MAC_ADDR_REF(ase->mac_addr.raw),
125 			      ase->next_hop, ase->peer_id);
126 		TAILQ_INSERT_TAIL(&list->ase_list, wds_entry, ase_list_elem);
127 		list->num_entries++;
128 	}
129 	dp_peer_info("Total num of entries :%d", list->num_entries);
130 }
131 
132 static void
133 dp_peer_age_multi_ast_entries(struct dp_soc *soc, void *arg,
134 			      enum dp_mod_id mod_id)
135 {
136 	uint8_t i;
137 	struct dp_pdev *pdev = NULL;
138 	struct peer_del_multi_wds_entries wds_list = {0};
139 
140 	TAILQ_INIT(&wds_list.ase_list);
141 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
142 		pdev = soc->pdev_list[i];
143 		dp_pdev_iterate_peer(pdev, dp_pdev_build_peer_ase_list,
144 				     &wds_list, mod_id);
145 		if (wds_list.num_entries > 0) {
146 			dp_peer_ast_send_multi_wds_del(soc, wds_list.vdev_id,
147 						       &wds_list);
148 			dp_peer_free_peer_ase_list(soc, &wds_list);
149 		} else {
150 			dp_peer_debug("No AST entries for pdev:%u",
151 				      pdev->pdev_id);
152 		}
153 	}
154 }
155 #endif /* WLAN_FEATURE_MULTI_AST_DEL */
156 
157 static void
158 dp_peer_age_ast_entries(struct dp_soc *soc, struct dp_peer *peer, void *arg)
159 {
160 	struct dp_ast_entry *ase, *temp_ase;
161 	struct ast_del_ctxt *del_ctxt = (struct ast_del_ctxt *)arg;
162 
163 	if ((del_ctxt->del_count >= soc->max_ast_ageout_count) &&
164 	    !del_ctxt->age) {
165 		return;
166 	}
167 
168 	DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
169 		/*
170 		 * Do not expire static ast entries and HM WDS entries
171 		 */
172 		if (ase->type != CDP_TXRX_AST_TYPE_WDS &&
173 		    ase->type != CDP_TXRX_AST_TYPE_DA)
174 			continue;
175 
176 		if (ase->is_active) {
177 			if (del_ctxt->age)
178 				ase->is_active = FALSE;
179 
180 			continue;
181 		}
182 
183 		if (del_ctxt->del_count < soc->max_ast_ageout_count) {
184 			DP_STATS_INC(soc, ast.aged_out, 1);
185 			dp_peer_del_ast(soc, ase);
186 			del_ctxt->del_count++;
187 		} else {
188 			soc->pending_ageout = true;
189 			if (!del_ctxt->age)
190 				break;
191 		}
192 	}
193 }
194 
195 static void
196 dp_peer_age_mec_entries(struct dp_soc *soc)
197 {
198 	uint32_t index;
199 	struct dp_mec_entry *mecentry, *mecentry_next;
200 
201 	TAILQ_HEAD(, dp_mec_entry) free_list;
202 	TAILQ_INIT(&free_list);
203 
204 	for (index = 0; index <= soc->mec_hash.mask; index++) {
205 		qdf_spin_lock_bh(&soc->mec_lock);
206 		/*
207 		 * Expire MEC entry every n sec.
208 		 */
209 		if (!TAILQ_EMPTY(&soc->mec_hash.bins[index])) {
210 			TAILQ_FOREACH_SAFE(mecentry, &soc->mec_hash.bins[index],
211 					   hash_list_elem, mecentry_next) {
212 				if (mecentry->is_active) {
213 					mecentry->is_active = FALSE;
214 					continue;
215 				}
216 				dp_peer_mec_detach_entry(soc, mecentry,
217 							 &free_list);
218 			}
219 		}
220 		qdf_spin_unlock_bh(&soc->mec_lock);
221 	}
222 
223 	dp_peer_mec_free_list(soc, &free_list);
224 }
225 
226 #ifdef WLAN_FEATURE_MULTI_AST_DEL
227 static void dp_ast_aging_timer_fn(void *soc_hdl)
228 {
229 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
230 	struct ast_del_ctxt del_ctxt = {0};
231 
232 	if (soc->wds_ast_aging_timer_cnt++ >= DP_WDS_AST_AGING_TIMER_CNT) {
233 		del_ctxt.age = true;
234 		soc->wds_ast_aging_timer_cnt = 0;
235 	}
236 
237 	if (soc->pending_ageout || del_ctxt.age) {
238 		soc->pending_ageout = false;
239 
240 		/* AST list access lock */
241 		qdf_spin_lock_bh(&soc->ast_lock);
242 
243 		if (soc->multi_peer_grp_cmd_supported)
244 			dp_peer_age_multi_ast_entries(soc, NULL, DP_MOD_ID_AST);
245 		else
246 			dp_soc_iterate_peer(soc, dp_peer_age_ast_entries,
247 					    &del_ctxt, DP_MOD_ID_AST);
248 		qdf_spin_unlock_bh(&soc->ast_lock);
249 	}
250 
251 	/*
252 	 * If NSS offload is enabled, the MEC timeout
253 	 * will be managed by NSS.
254 	 */
255 	if (qdf_atomic_read(&soc->mec_cnt) &&
256 	    !wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx))
257 		dp_peer_age_mec_entries(soc);
258 
259 	if (qdf_atomic_read(&soc->cmn_init_done))
260 		qdf_timer_mod(&soc->ast_aging_timer,
261 			      DP_AST_AGING_TIMER_DEFAULT_MS);
262 }
263 #else
264 static void dp_ast_aging_timer_fn(void *soc_hdl)
265 {
266 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
267 	struct ast_del_ctxt del_ctxt = {0};
268 
269 	if (soc->wds_ast_aging_timer_cnt++ >= DP_WDS_AST_AGING_TIMER_CNT) {
270 		del_ctxt.age = true;
271 		soc->wds_ast_aging_timer_cnt = 0;
272 	}
273 
274 	if (soc->pending_ageout || del_ctxt.age) {
275 		soc->pending_ageout = false;
276 
277 		/* AST list access lock */
278 		qdf_spin_lock_bh(&soc->ast_lock);
279 		dp_soc_iterate_peer(soc, dp_peer_age_ast_entries,
280 				    &del_ctxt, DP_MOD_ID_AST);
281 		qdf_spin_unlock_bh(&soc->ast_lock);
282 	}
283 
284 	/*
285 	 * If NSS offload is enabled, the MEC timeout
286 	 * will be managed by NSS.
287 	 */
288 	if (qdf_atomic_read(&soc->mec_cnt) &&
289 	    !wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx))
290 		dp_peer_age_mec_entries(soc);
291 
292 	if (qdf_atomic_read(&soc->cmn_init_done))
293 		qdf_timer_mod(&soc->ast_aging_timer,
294 			      DP_AST_AGING_TIMER_DEFAULT_MS);
295 }
296 #endif /* WLAN_FEATURE_MULTI_AST_DEL */
297 
298 #ifndef IPA_WDS_EASYMESH_FEATURE
299 /*
300  * dp_soc_wds_attach() - Setup WDS timer and AST table
301  * @soc:		Datapath SOC handle
302  *
303  * Return: None
304  */
305 void dp_soc_wds_attach(struct dp_soc *soc)
306 {
307 	if (soc->ast_offload_support)
308 		return;
309 
310 	soc->wds_ast_aging_timer_cnt = 0;
311 	soc->pending_ageout = false;
312 	qdf_timer_init(soc->osdev, &soc->ast_aging_timer,
313 		       dp_ast_aging_timer_fn, (void *)soc,
314 		       QDF_TIMER_TYPE_WAKE_APPS);
315 
316 	qdf_timer_mod(&soc->ast_aging_timer, DP_AST_AGING_TIMER_DEFAULT_MS);
317 }
318 
319 /*
320  * dp_soc_wds_detach() - Detach WDS data structures and timers
321  * @txrx_soc: DP SOC handle
322  *
323  * Return: None
324  */
325 void dp_soc_wds_detach(struct dp_soc *soc)
326 {
327 	qdf_timer_stop(&soc->ast_aging_timer);
328 	qdf_timer_free(&soc->ast_aging_timer);
329 }
330 #else
331 void dp_soc_wds_attach(struct dp_soc *soc)
332 {
333 }
334 
335 void dp_soc_wds_detach(struct dp_soc *soc)
336 {
337 }
338 #endif
339 
340 /**
341  * dp_tx_mec_handler() - Tx  MEC Notify Handler
342  * @vdev: pointer to dp dev handler
343  * @status : Tx completion status from HTT descriptor
344  *
345  * Handles MEC notify event sent from fw to Host
346  *
347  * Return: none
348  */
349 void dp_tx_mec_handler(struct dp_vdev *vdev, uint8_t *status)
350 {
351 	struct dp_soc *soc;
352 	QDF_STATUS add_mec_status;
353 	uint8_t mac_addr[QDF_MAC_ADDR_SIZE], i;
354 
355 	if (!vdev->mec_enabled)
356 		return;
357 
358 	/* MEC required only in STA mode */
359 	if (vdev->opmode != wlan_op_mode_sta)
360 		return;
361 
362 	soc = vdev->pdev->soc;
363 
364 	for (i = 0; i < QDF_MAC_ADDR_SIZE; i++)
365 		mac_addr[(QDF_MAC_ADDR_SIZE - 1) - i] =
366 					status[(QDF_MAC_ADDR_SIZE - 2) + i];
367 
368 	dp_peer_debug("%pK: MEC add for mac_addr "QDF_MAC_ADDR_FMT,
369 		      soc, QDF_MAC_ADDR_REF(mac_addr));
370 
371 	if (qdf_mem_cmp(mac_addr, vdev->mac_addr.raw, QDF_MAC_ADDR_SIZE)) {
372 		add_mec_status = dp_peer_mec_add_entry(soc, vdev, mac_addr);
373 		dp_peer_debug("%pK: MEC add status %d", vdev, add_mec_status);
374 	}
375 }
376 
377 #ifndef QCA_HOST_MODE_WIFI_DISABLED
378 
379 /**
380  * dp_rx_da_learn() - Add AST entry based on DA lookup
381  *			This is a WAR for HK 1.0 and will
382  *			be removed in HK 2.0
383  *
384  * @soc: core txrx main context
385  * @rx_tlv_hdr	: start address of rx tlvs
386  * @ta_txrx_peer: Transmitter peer entry
387  * @nbuf	: nbuf to retrieve destination mac for which AST will be added
388  *
389  */
390 void
391 dp_rx_da_learn(struct dp_soc *soc,
392 	       uint8_t *rx_tlv_hdr,
393 	       struct dp_txrx_peer *ta_txrx_peer,
394 	       qdf_nbuf_t nbuf)
395 {
396 	struct dp_peer *base_peer;
397 	/* For HKv2 DA port learing is not needed */
398 	if (qdf_likely(soc->ast_override_support))
399 		return;
400 
401 	if (qdf_unlikely(!ta_txrx_peer))
402 		return;
403 
404 	if (qdf_unlikely(ta_txrx_peer->vdev->opmode != wlan_op_mode_ap))
405 		return;
406 
407 	if (!soc->da_war_enabled)
408 		return;
409 
410 	if (qdf_unlikely(!qdf_nbuf_is_da_valid(nbuf) &&
411 			 !qdf_nbuf_is_da_mcbc(nbuf))) {
412 		base_peer = dp_peer_get_ref_by_id(soc, ta_txrx_peer->peer_id,
413 						  DP_MOD_ID_AST);
414 
415 		if (base_peer) {
416 			dp_peer_add_ast(soc,
417 					base_peer,
418 					qdf_nbuf_data(nbuf),
419 					CDP_TXRX_AST_TYPE_DA,
420 					DP_AST_FLAGS_HM);
421 
422 			dp_peer_unref_delete(base_peer, DP_MOD_ID_AST);
423 		}
424 	}
425 }
426 
427 /**
428  * dp_txrx_set_wds_rx_policy() - API to store datapath
429  *                            config parameters
430  * @soc - datapath soc handle
431  * @vdev_id - id of datapath vdev handle
432  * @cfg: ini parameter handle
433  *
434  * Return: status
435  */
436 #ifdef WDS_VENDOR_EXTENSION
437 QDF_STATUS
438 dp_txrx_set_wds_rx_policy(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
439 			  u_int32_t val)
440 {
441 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
442 	struct dp_peer *peer;
443 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
444 						     DP_MOD_ID_MISC);
445 	if (!vdev) {
446 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
447 			  FL("vdev is NULL for vdev_id %d"), vdev_id);
448 		return QDF_STATUS_E_INVAL;
449 	}
450 
451 	peer = dp_vdev_bss_peer_ref_n_get(vdev, DP_MOD_ID_AST);
452 
453 	if (peer) {
454 		peer->txrx_peer->wds_ecm.wds_rx_filter = 1;
455 		peer->txrx_peer->wds_ecm.wds_rx_ucast_4addr =
456 			(val & WDS_POLICY_RX_UCAST_4ADDR) ? 1 : 0;
457 		peer->txrx_peer->wds_ecm.wds_rx_mcast_4addr =
458 			(val & WDS_POLICY_RX_MCAST_4ADDR) ? 1 : 0;
459 		dp_peer_unref_delete(peer, DP_MOD_ID_AST);
460 	}
461 
462 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MISC);
463 	return QDF_STATUS_SUCCESS;
464 }
465 
466 /**
467  * dp_txrx_peer_wds_tx_policy_update() - API to set tx wds policy
468  *
469  * @cdp_soc: DP soc handle
470  * @vdev_id: id of vdev handle
471  * @peer_mac: peer mac address
472  * @wds_tx_ucast: policy for unicast transmission
473  * @wds_tx_mcast: policy for multicast transmission
474  *
475  * Return: void
476  */
477 QDF_STATUS
478 dp_txrx_peer_wds_tx_policy_update(struct cdp_soc_t *soc,  uint8_t vdev_id,
479 				  uint8_t *peer_mac, int wds_tx_ucast,
480 				  int wds_tx_mcast)
481 {
482 	struct dp_peer *peer =
483 			dp_peer_get_tgt_peer_hash_find((struct dp_soc *)soc,
484 						       peer_mac, 0,
485 						       vdev_id,
486 						       DP_MOD_ID_AST);
487 	if (!peer) {
488 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
489 			  FL("peer is NULL for mac %pM vdev_id %d"),
490 			  peer_mac, vdev_id);
491 		return QDF_STATUS_E_INVAL;
492 	}
493 
494 	if (!peer->txrx_peer) {
495 		dp_peer_unref_delete(peer, DP_MOD_ID_AST);
496 		return QDF_STATUS_E_INVAL;
497 	}
498 
499 	if (wds_tx_ucast || wds_tx_mcast) {
500 		peer->txrx_peer->wds_enabled = 1;
501 		peer->txrx_peer->wds_ecm.wds_tx_ucast_4addr = wds_tx_ucast;
502 		peer->txrx_peer->wds_ecm.wds_tx_mcast_4addr = wds_tx_mcast;
503 	} else {
504 		peer->txrx_peer->wds_enabled = 0;
505 		peer->txrx_peer->wds_ecm.wds_tx_ucast_4addr = 0;
506 		peer->txrx_peer->wds_ecm.wds_tx_mcast_4addr = 0;
507 	}
508 
509 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
510 		  "Policy Update set to :\n");
511 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
512 		  "peer->wds_enabled %d\n", peer->wds_enabled);
513 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
514 		  "peer->wds_ecm.wds_tx_ucast_4addr %d\n",
515 		  peer->txrx_peer->wds_ecm.wds_tx_ucast_4addr);
516 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
517 		  "peer->wds_ecm.wds_tx_mcast_4addr %d\n",
518 		  peer->txrx_peer->wds_ecm.wds_tx_mcast_4addr);
519 
520 	dp_peer_unref_delete(peer, DP_MOD_ID_AST);
521 	return QDF_STATUS_SUCCESS;
522 }
523 
524 int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr,
525 			   struct dp_vdev *vdev,
526 			   struct dp_txrx_peer *txrx_peer)
527 {
528 	struct dp_peer *bss_peer;
529 	int fr_ds, to_ds, rx_3addr, rx_4addr;
530 	int rx_policy_ucast, rx_policy_mcast;
531 	hal_soc_handle_t hal_soc = vdev->pdev->soc->hal_soc;
532 	int rx_mcast = hal_rx_msdu_end_da_is_mcbc_get(hal_soc, rx_tlv_hdr);
533 
534 	if (vdev->opmode == wlan_op_mode_ap) {
535 		bss_peer = dp_vdev_bss_peer_ref_n_get(vdev, DP_MOD_ID_AST);
536 		/* if wds policy check is not enabled on this vdev, accept all frames */
537 		if (bss_peer && !bss_peer->txrx_peer->wds_ecm.wds_rx_filter) {
538 			dp_peer_unref_delete(bss_peer, DP_MOD_ID_AST);
539 			return 1;
540 		}
541 		rx_policy_ucast = bss_peer->txrx_peerwds_ecm.wds_rx_ucast_4addr;
542 		rx_policy_mcast = bss_peer->txrx_peerwds_ecm.wds_rx_mcast_4addr;
543 		dp_peer_unref_delete(bss_peer, DP_MOD_ID_AST);
544 	} else {             /* sta mode */
545 		if (!txrx_peer->wds_ecm.wds_rx_filter)
546 			return 1;
547 
548 		rx_policy_ucast = txrx_peer->wds_ecm.wds_rx_ucast_4addr;
549 		rx_policy_mcast = txrx_peer->wds_ecm.wds_rx_mcast_4addr;
550 	}
551 
552 	/* ------------------------------------------------
553 	 *                       self
554 	 * peer-             rx  rx-
555 	 * wds  ucast mcast dir policy accept note
556 	 * ------------------------------------------------
557 	 * 1     1     0     11  x1     1      AP configured to accept ds-to-ds Rx ucast from wds peers, constraint met; so, accept
558 	 * 1     1     0     01  x1     0      AP configured to accept ds-to-ds Rx ucast from wds peers, constraint not met; so, drop
559 	 * 1     1     0     10  x1     0      AP configured to accept ds-to-ds Rx ucast from wds peers, constraint not met; so, drop
560 	 * 1     1     0     00  x1     0      bad frame, won't see it
561 	 * 1     0     1     11  1x     1      AP configured to accept ds-to-ds Rx mcast from wds peers, constraint met; so, accept
562 	 * 1     0     1     01  1x     0      AP configured to accept ds-to-ds Rx mcast from wds peers, constraint not met; so, drop
563 	 * 1     0     1     10  1x     0      AP configured to accept ds-to-ds Rx mcast from wds peers, constraint not met; so, drop
564 	 * 1     0     1     00  1x     0      bad frame, won't see it
565 	 * 1     1     0     11  x0     0      AP configured to accept from-ds Rx ucast from wds peers, constraint not met; so, drop
566 	 * 1     1     0     01  x0     0      AP configured to accept from-ds Rx ucast from wds peers, constraint not met; so, drop
567 	 * 1     1     0     10  x0     1      AP configured to accept from-ds Rx ucast from wds peers, constraint met; so, accept
568 	 * 1     1     0     00  x0     0      bad frame, won't see it
569 	 * 1     0     1     11  0x     0      AP configured to accept from-ds Rx mcast from wds peers, constraint not met; so, drop
570 	 * 1     0     1     01  0x     0      AP configured to accept from-ds Rx mcast from wds peers, constraint not met; so, drop
571 	 * 1     0     1     10  0x     1      AP configured to accept from-ds Rx mcast from wds peers, constraint met; so, accept
572 	 * 1     0     1     00  0x     0      bad frame, won't see it
573 	 *
574 	 * 0     x     x     11  xx     0      we only accept td-ds Rx frames from non-wds peers in mode.
575 	 * 0     x     x     01  xx     1
576 	 * 0     x     x     10  xx     0
577 	 * 0     x     x     00  xx     0      bad frame, won't see it
578 	 * ------------------------------------------------
579 	 */
580 
581 	fr_ds = hal_rx_mpdu_get_fr_ds(hal_soc, rx_tlv_hdr);
582 	to_ds = hal_rx_mpdu_get_to_ds(hal_soc, rx_tlv_hdr);
583 	rx_3addr = fr_ds ^ to_ds;
584 	rx_4addr = fr_ds & to_ds;
585 
586 	if (vdev->opmode == wlan_op_mode_ap) {
587 		if ((!txrx_peer->wds_enabled && rx_3addr && to_ds) ||
588 		    (txrx_peer->wds_enabled && !rx_mcast &&
589 		    (rx_4addr == rx_policy_ucast)) ||
590 		    (txrx_peer->wds_enabled && rx_mcast &&
591 		    (rx_4addr == rx_policy_mcast))) {
592 			return 1;
593 		}
594 	} else {           /* sta mode */
595 		if ((!rx_mcast && (rx_4addr == rx_policy_ucast)) ||
596 				(rx_mcast && (rx_4addr == rx_policy_mcast))) {
597 			return 1;
598 		}
599 	}
600 	return 0;
601 }
602 #endif
603 
604 /**
605  * dp_tx_add_groupkey_metadata - Add group key in metadata
606  * @vdev: DP vdev handle
607  * @msdu_info: MSDU info to be setup in MSDU descriptor
608  * @group_key: Group key index programmed in metadata
609  *
610  * Return: void
611  */
612 #ifdef QCA_MULTIPASS_SUPPORT
613 void dp_tx_add_groupkey_metadata(struct dp_vdev *vdev,
614 		struct dp_tx_msdu_info_s *msdu_info, uint16_t group_key)
615 {
616 	struct htt_tx_msdu_desc_ext2_t *meta_data =
617 		(struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
618 
619 	qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
620 
621 	/*
622 	 * When attempting to send a multicast packet with multi-passphrase,
623 	 * host shall add HTT EXT meta data "struct htt_tx_msdu_desc_ext2_t"
624 	 * ref htt.h indicating the group_id field in "key_flags" also having
625 	 * "valid_key_flags" as 1. Assign “key_flags = group_key_ix”.
626 	 */
627 	HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_SET(msdu_info->meta_data[0], 1);
628 	HTT_TX_MSDU_EXT2_DESC_KEY_FLAGS_SET(msdu_info->meta_data[2], group_key);
629 }
630 
631 /**
632  * dp_tx_remove_vlan_tag - Remove 4 bytes of vlan tag
633  * @vdev: DP vdev handle
634  * @tx_desc: Tx Descriptor Handle
635  *
636  * Return: void
637  */
638 void dp_tx_remove_vlan_tag(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
639 {
640 	struct vlan_ethhdr veth_hdr;
641 	struct vlan_ethhdr *veh = (struct vlan_ethhdr *)nbuf->data;
642 
643 	/*
644 	 * Extract VLAN header of 4 bytes:
645 	 * Frame Format : {dst_addr[6], src_addr[6], 802.1Q header[4], EtherType[2], Payload}
646 	 * Before Removal : xx xx xx xx xx xx xx xx xx xx xx xx 81 00 00 02 08 00 45 00 00...
647 	 * After Removal  : xx xx xx xx xx xx xx xx xx xx xx xx 08 00 45 00 00...
648 	 */
649 	qdf_mem_copy(&veth_hdr, veh, sizeof(veth_hdr));
650 	qdf_nbuf_pull_head(nbuf, ETHERTYPE_VLAN_LEN);
651 	veh = (struct vlan_ethhdr *)nbuf->data;
652 	qdf_mem_copy(veh, &veth_hdr, 2 * QDF_MAC_ADDR_SIZE);
653 	return;
654 }
655 
656 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
657 	defined(WLAN_MCAST_MLO)
658 /**
659  * dp_tx_need_mcast_reinject - If frame needs to be processed in reinject path
660  * @vdev: DP vdev handle
661  *
662  * Return: true if reinject handling is required else false
663  */
664 static inline bool
665 dp_tx_need_mcast_reinject(struct dp_vdev *vdev)
666 {
667 	if (vdev->mlo_vdev && vdev->opmode == wlan_op_mode_ap)
668 		return true;
669 
670 	return false;
671 }
672 #else
673 static inline bool
674 dp_tx_need_mcast_reinject(struct dp_vdev *vdev)
675 {
676 	return false;
677 }
678 
679 #endif
680 /**
681  * dp_tx_need_multipass_process - If frame needs multipass phrase processing
682  * @vdev: DP vdev handle
683  * @tx_desc: Tx Descriptor Handle
684  * @vlan_id: vlan id of frame
685  *
686  * Return: whether peer is special or classic
687  */
688 static
689 uint8_t dp_tx_need_multipass_process(struct dp_soc *soc, struct dp_vdev *vdev,
690 			   qdf_nbuf_t buf, uint16_t *vlan_id)
691 {
692 	struct dp_txrx_peer *txrx_peer = NULL;
693 	struct dp_peer *peer = NULL;
694 	qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(buf);
695 	struct vlan_ethhdr *veh = NULL;
696 	bool not_vlan = ((vdev->tx_encap_type == htt_cmn_pkt_type_raw) ||
697 			(htons(eh->ether_type) != ETH_P_8021Q));
698 
699 	if (qdf_unlikely(not_vlan))
700 		return DP_VLAN_UNTAGGED;
701 
702 	veh = (struct vlan_ethhdr *)eh;
703 	*vlan_id = (ntohs(veh->h_vlan_TCI) & VLAN_VID_MASK);
704 
705 	if (qdf_unlikely(DP_FRAME_IS_MULTICAST((eh)->ether_dhost))) {
706 		/* look for handling of multicast packets in reinject path */
707 		if (dp_tx_need_mcast_reinject(vdev))
708 			return DP_VLAN_UNTAGGED;
709 
710 		qdf_spin_lock_bh(&vdev->mpass_peer_mutex);
711 		TAILQ_FOREACH(txrx_peer, &vdev->mpass_peer_list,
712 			      mpass_peer_list_elem) {
713 			if (*vlan_id == txrx_peer->vlan_id) {
714 				qdf_spin_unlock_bh(&vdev->mpass_peer_mutex);
715 				return DP_VLAN_TAGGED_MULTICAST;
716 			}
717 		}
718 		qdf_spin_unlock_bh(&vdev->mpass_peer_mutex);
719 		return DP_VLAN_UNTAGGED;
720 	}
721 
722 	peer = dp_peer_find_hash_find(soc, eh->ether_dhost, 0, DP_VDEV_ALL,
723 				      DP_MOD_ID_TX_MULTIPASS);
724 
725 	if (qdf_unlikely(peer == NULL))
726 		return DP_VLAN_UNTAGGED;
727 
728 	/*
729 	 * Do not drop the frame when vlan_id doesn't match.
730 	 * Send the frame as it is.
731 	 */
732 	if (*vlan_id == peer->txrx_peer->vlan_id) {
733 		dp_peer_unref_delete(peer, DP_MOD_ID_TX_MULTIPASS);
734 		return DP_VLAN_TAGGED_UNICAST;
735 	}
736 
737 	dp_peer_unref_delete(peer, DP_MOD_ID_TX_MULTIPASS);
738 	return DP_VLAN_UNTAGGED;
739 }
740 
741 /**
742  * dp_tx_multipass_process - Process vlan frames in tx path
743  * @soc: dp soc handle
744  * @vdev: DP vdev handle
745  * @nbuf: skb
746  * @msdu_info: msdu descriptor
747  *
748  * Return: status whether frame needs to be dropped or transmitted
749  */
750 bool dp_tx_multipass_process(struct dp_soc *soc, struct dp_vdev *vdev,
751 			     qdf_nbuf_t nbuf,
752 			     struct dp_tx_msdu_info_s *msdu_info)
753 {
754 	uint16_t vlan_id = 0;
755 	uint16_t group_key = 0;
756 	uint8_t is_spcl_peer = DP_VLAN_UNTAGGED;
757 	qdf_nbuf_t nbuf_copy = NULL;
758 
759 	if (HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_GET(msdu_info->meta_data[0])) {
760 		return true;
761 	}
762 
763 	is_spcl_peer = dp_tx_need_multipass_process(soc, vdev, nbuf, &vlan_id);
764 
765 	if ((is_spcl_peer != DP_VLAN_TAGGED_MULTICAST) &&
766 	    (is_spcl_peer != DP_VLAN_TAGGED_UNICAST))
767 		return true;
768 
769 	if (is_spcl_peer == DP_VLAN_TAGGED_UNICAST) {
770 		dp_tx_remove_vlan_tag(vdev, nbuf);
771 		return true;
772 	}
773 
774 	/* AP can have classic clients, special clients &
775 	 * classic repeaters.
776 	 * 1. Classic clients & special client:
777 	 *	Remove vlan header, find corresponding group key
778 	 *	index, fill in metaheader and enqueue multicast
779 	 *	frame to TCL.
780 	 * 2. Classic repeater:
781 	 *	Pass through to classic repeater with vlan tag
782 	 *	intact without any group key index. Hardware
783 	 *	will know which key to use to send frame to
784 	 *	repeater.
785 	 */
786 	nbuf_copy = qdf_nbuf_copy(nbuf);
787 
788 	/*
789 	 * Send multicast frame to special peers even
790 	 * if pass through to classic repeater fails.
791 	 */
792 	if (nbuf_copy) {
793 		struct dp_tx_msdu_info_s msdu_info_copy;
794 		qdf_mem_zero(&msdu_info_copy, sizeof(msdu_info_copy));
795 		msdu_info_copy.tid = HTT_TX_EXT_TID_INVALID;
796 		HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_SET(msdu_info_copy.meta_data[0], 1);
797 		nbuf_copy = dp_tx_send_msdu_single(vdev, nbuf_copy, &msdu_info_copy, HTT_INVALID_PEER, NULL);
798 		if (nbuf_copy) {
799 			qdf_nbuf_free(nbuf_copy);
800 			qdf_err("nbuf_copy send failed");
801 		}
802 	}
803 
804 	group_key = vdev->iv_vlan_map[vlan_id];
805 
806 	/*
807 	 * If group key is not installed, drop the frame.
808 	 */
809 	if (!group_key)
810 		return false;
811 
812 	dp_tx_remove_vlan_tag(vdev, nbuf);
813 	dp_tx_add_groupkey_metadata(vdev, msdu_info, group_key);
814 	msdu_info->exception_fw = 1;
815 	return true;
816 }
817 
818 /**
819  * dp_rx_multipass_process - insert vlan tag on frames for traffic separation
820  * @txrx_peer: DP txrx peer handle
821  * @nbuf: skb
822  * @tid: traffic priority
823  *
824  * Return: bool: true in case of success else false
825  * Success is considered if:
826  *  i. If frame has vlan header
827  *  ii. If the frame comes from different peer and dont need multipass processing
828  * Failure is considered if:
829  *  i. Frame comes from multipass peer but doesn't contain vlan header.
830  *  In failure case, drop such frames.
831  */
832 bool dp_rx_multipass_process(struct dp_txrx_peer *txrx_peer, qdf_nbuf_t nbuf,
833 			     uint8_t tid)
834 {
835 	struct vlan_ethhdr *vethhdrp;
836 
837 	if (qdf_unlikely(!txrx_peer->vlan_id))
838 		return true;
839 
840 	vethhdrp = (struct vlan_ethhdr *)qdf_nbuf_data(nbuf);
841 	/*
842 	 * h_vlan_proto & h_vlan_TCI should be 0x8100 & zero respectively
843 	 * as it is expected to be padded by 0
844 	 * return false if frame doesn't have above tag so that caller will
845 	 * drop the frame.
846 	 */
847 	if (qdf_unlikely(vethhdrp->h_vlan_proto != htons(QDF_ETH_TYPE_8021Q)) ||
848 	    qdf_unlikely(vethhdrp->h_vlan_TCI != 0))
849 		return false;
850 
851 	vethhdrp->h_vlan_TCI = htons(((tid & 0x7) << VLAN_PRIO_SHIFT) |
852 		(txrx_peer->vlan_id & VLAN_VID_MASK));
853 
854 	if (vethhdrp->h_vlan_encapsulated_proto == htons(ETHERTYPE_PAE))
855 		dp_tx_remove_vlan_tag(txrx_peer->vdev, nbuf);
856 
857 	return true;
858 }
859 
860 #endif /* QCA_MULTIPASS_SUPPORT */
861 
862 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
863 
864 #ifdef QCA_MULTIPASS_SUPPORT
865 
866 /**
867  * dp_peer_multipass_list_remove: remove peer from list
868  * @peer: pointer to peer
869  *
870  * return: void
871  */
872 void dp_peer_multipass_list_remove(struct dp_peer *peer)
873 {
874 	struct dp_vdev *vdev = peer->vdev;
875 	struct dp_txrx_peer *tpeer = NULL;
876 	bool found = 0;
877 
878 	qdf_spin_lock_bh(&vdev->mpass_peer_mutex);
879 	TAILQ_FOREACH(tpeer, &vdev->mpass_peer_list, mpass_peer_list_elem) {
880 		if (tpeer == peer->txrx_peer) {
881 			found = 1;
882 			TAILQ_REMOVE(&vdev->mpass_peer_list, peer->txrx_peer,
883 				     mpass_peer_list_elem);
884 			break;
885 		}
886 	}
887 
888 	qdf_spin_unlock_bh(&vdev->mpass_peer_mutex);
889 
890 	if (found)
891 		dp_peer_unref_delete(peer, DP_MOD_ID_TX_MULTIPASS);
892 }
893 
894 /**
895  * dp_peer_multipass_list_add: add to new multipass list
896  * @dp_soc: soc handle
897  * @peer_mac: mac address
898  * @vdev_id: vdev id for peer
899  * @vlan_id: vlan_id
900  *
901  * return: void
902  */
903 static void dp_peer_multipass_list_add(struct dp_soc *soc, uint8_t *peer_mac,
904 				       uint8_t vdev_id, uint16_t vlan_id)
905 {
906 	struct dp_peer *peer =
907 			dp_peer_get_tgt_peer_hash_find(soc, peer_mac, 0,
908 						       vdev_id,
909 						       DP_MOD_ID_TX_MULTIPASS);
910 
911 	if (qdf_unlikely(!peer)) {
912 		qdf_err("NULL peer");
913 		return;
914 	}
915 
916 	if (qdf_unlikely(!peer->txrx_peer))
917 		goto fail;
918 
919 	/* If peer already exists in vdev multipass list, do not add it.
920 	 * This may happen if key install comes twice or re-key
921 	 * happens for a peer.
922 	 */
923 	if (peer->txrx_peer->vlan_id) {
924 		dp_debug("peer already added to vdev multipass list"
925 			 "MAC: "QDF_MAC_ADDR_FMT" vlan: %d ",
926 			 QDF_MAC_ADDR_REF(peer->mac_addr.raw),
927 			 peer->txrx_peer->vlan_id);
928 		goto fail;
929 	}
930 
931 	/*
932 	 * Ref_cnt is incremented inside dp_peer_find_hash_find().
933 	 * Decrement it when element is deleted from the list.
934 	 */
935 	peer->txrx_peer->vlan_id = vlan_id;
936 	qdf_spin_lock_bh(&peer->txrx_peer->vdev->mpass_peer_mutex);
937 	TAILQ_INSERT_HEAD(&peer->txrx_peer->vdev->mpass_peer_list,
938 			  peer->txrx_peer,
939 			  mpass_peer_list_elem);
940 	qdf_spin_unlock_bh(&peer->txrx_peer->vdev->mpass_peer_mutex);
941 	return;
942 
943 fail:
944 	dp_peer_unref_delete(peer, DP_MOD_ID_TX_MULTIPASS);
945 	return;
946 }
947 
948 /**
949  * dp_peer_set_vlan_id: set vlan_id for this peer
950  * @cdp_soc: soc handle
951  * @vdev_id: vdev id for peer
952  * @peer_mac: mac address
953  * @vlan_id: vlan id for peer
954  *
955  * return: void
956  */
957 void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
958 		uint8_t vdev_id, uint8_t *peer_mac,
959 		uint16_t vlan_id)
960 {
961 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
962 	struct dp_vdev *vdev =
963 		dp_vdev_get_ref_by_id((struct dp_soc *)soc, vdev_id,
964 				      DP_MOD_ID_TX_MULTIPASS);
965 
966 	if (vdev && vdev->multipass_en) {
967 		dp_peer_multipass_list_add(soc, peer_mac, vdev_id, vlan_id);
968 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_MULTIPASS);
969 	}
970 }
971 
972 /**
973  * dp_set_vlan_groupkey: set vlan map for vdev
974  * @soc: pointer to soc
975  * @vdev_id : id of vdev
976  * @vlan_id: vlan_id
977  * @group_key: group key for vlan
978  *
979  * return: set success/failure
980  */
981 QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
982 				uint16_t vlan_id, uint16_t group_key)
983 {
984 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
985 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
986 						     DP_MOD_ID_TX_MULTIPASS);
987 	QDF_STATUS status;
988 
989 	if (!vdev || !vdev->multipass_en) {
990 		status = QDF_STATUS_E_INVAL;
991 		goto fail;
992 	}
993 
994 	if (!vdev->iv_vlan_map) {
995 		uint16_t vlan_map_size = (sizeof(uint16_t))*DP_MAX_VLAN_IDS;
996 		vdev->iv_vlan_map = (uint16_t *)qdf_mem_malloc(vlan_map_size);
997 
998 		if (!vdev->iv_vlan_map) {
999 			QDF_TRACE_ERROR(QDF_MODULE_ID_DP, "iv_vlan_map");
1000 			status = QDF_STATUS_E_NOMEM;
1001 			goto fail;
1002 		}
1003 
1004 		/*
1005 		 * 0 is invalid group key.
1006 		 * Initilalize array with invalid group keys.
1007 		 */
1008 		qdf_mem_zero(vdev->iv_vlan_map, vlan_map_size);
1009 	}
1010 
1011 	if (vlan_id >= DP_MAX_VLAN_IDS) {
1012 		status = QDF_STATUS_E_INVAL;
1013 		goto fail;
1014 	}
1015 
1016 	vdev->iv_vlan_map[vlan_id] = group_key;
1017 	status = QDF_STATUS_SUCCESS;
1018 fail:
1019 	if (vdev)
1020 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_MULTIPASS);
1021 	return status;
1022 }
1023 
1024 /**
1025  * dp_tx_vdev_multipass_deinit: set vlan map for vdev
1026  * @vdev_handle: pointer to vdev
1027  *
1028  * return: void
1029  */
1030 void dp_tx_vdev_multipass_deinit(struct dp_vdev *vdev)
1031 {
1032 	struct dp_txrx_peer *txrx_peer = NULL;
1033 	qdf_spin_lock_bh(&vdev->mpass_peer_mutex);
1034 	TAILQ_FOREACH(txrx_peer, &vdev->mpass_peer_list, mpass_peer_list_elem)
1035 		qdf_err("Peers present in mpass list : %d", txrx_peer->peer_id);
1036 	qdf_spin_unlock_bh(&vdev->mpass_peer_mutex);
1037 
1038 	if (vdev->iv_vlan_map) {
1039 		qdf_mem_free(vdev->iv_vlan_map);
1040 		vdev->iv_vlan_map = NULL;
1041 	}
1042 
1043 	qdf_spinlock_destroy(&vdev->mpass_peer_mutex);
1044 }
1045 
1046 /**
1047  * dp_peer_multipass_list_init: initialize peer mulitpass list
1048  * @vdev_handle: pointer to vdev
1049  *
1050  * return: set success/failure
1051  */
1052 void dp_peer_multipass_list_init(struct dp_vdev *vdev)
1053 {
1054 	/*
1055 	 * vdev->iv_vlan_map is allocated when the first configuration command
1056 	 * is issued to avoid unnecessary allocation for regular mode VAP.
1057 	 */
1058 	TAILQ_INIT(&vdev->mpass_peer_list);
1059 	qdf_spinlock_create(&vdev->mpass_peer_mutex);
1060 }
1061 #endif /* QCA_MULTIPASS_SUPPORT */
1062 
1063 #ifdef QCA_PEER_MULTIQ_SUPPORT
1064 
1065 /**
1066  * dp_peer_reset_flowq_map() - reset peer flowq map table
1067  * @peer - dp peer handle
1068  *
1069  * Return: none
1070  */
1071 void dp_peer_reset_flowq_map(struct dp_peer *peer)
1072 {
1073 	int i = 0;
1074 
1075 	if (!peer)
1076 		return;
1077 
1078 	for (i = 0; i < DP_PEER_AST_FLOWQ_MAX; i++) {
1079 		peer->peer_ast_flowq_idx[i].is_valid = false;
1080 		peer->peer_ast_flowq_idx[i].valid_tid_mask = false;
1081 		peer->peer_ast_flowq_idx[i].ast_idx = DP_INVALID_AST_IDX;
1082 		peer->peer_ast_flowq_idx[i].flowQ = DP_INVALID_FLOW_PRIORITY;
1083 	}
1084 }
1085 
1086 /**
1087  * dp_peer_get_flowid_from_flowmask() - get flow id from flow mask
1088  * @peer - dp peer handle
1089  * @mask - flow mask
1090  *
1091  * Return: flow id
1092  */
1093 static int dp_peer_get_flowid_from_flowmask(struct dp_peer *peer,
1094 		uint8_t mask)
1095 {
1096 	if (!peer) {
1097 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1098 				"%s: Invalid peer\n", __func__);
1099 		return -1;
1100 	}
1101 
1102 	if (mask & DP_PEER_AST0_FLOW_MASK)
1103 		return DP_PEER_AST_FLOWQ_UDP;
1104 	else if (mask & DP_PEER_AST1_FLOW_MASK)
1105 		return DP_PEER_AST_FLOWQ_NON_UDP;
1106 	else if (mask & DP_PEER_AST2_FLOW_MASK)
1107 		return DP_PEER_AST_FLOWQ_HI_PRIO;
1108 	else if (mask & DP_PEER_AST3_FLOW_MASK)
1109 		return DP_PEER_AST_FLOWQ_LOW_PRIO;
1110 
1111 	return DP_PEER_AST_FLOWQ_MAX;
1112 }
1113 
1114 /**
1115  * dp_peer_get_ast_valid() - get ast index valid from mask
1116  * @mask - mask for ast valid bits
1117  * @index - index for an ast
1118  *
1119  * Return - 1 if ast index is valid from mask else 0
1120  */
1121 static inline bool dp_peer_get_ast_valid(uint8_t mask, uint16_t index)
1122 {
1123 	if (index == 0)
1124 		return 1;
1125 	return ((mask) & (1 << ((index) - 1)));
1126 }
1127 
1128 /**
1129  * dp_peer_ast_index_flow_queue_map_create() - create ast index flow queue map
1130  * @soc - genereic soc handle
1131  * @is_wds - flag to indicate if peer is wds
1132  * @peer_id - peer_id from htt peer map message
1133  * @peer_mac_addr - mac address of the peer
1134  * @ast_info - ast flow override information from peer map
1135  *
1136  * Return: none
1137  */
1138 void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
1139 		bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
1140 		struct dp_ast_flow_override_info *ast_info)
1141 {
1142 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
1143 	struct dp_peer *peer = NULL;
1144 	uint8_t i;
1145 
1146 	/*
1147 	 * Ast flow override feature is supported
1148 	 * only for connected client
1149 	 */
1150 	if (is_wds)
1151 		return;
1152 
1153 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_AST);
1154 	if (!peer) {
1155 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1156 				"%s: Invalid peer\n", __func__);
1157 		return;
1158 	}
1159 
1160 	/* Valid only in AP mode */
1161 	if (peer->vdev->opmode != wlan_op_mode_ap) {
1162 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1163 				"%s: Peer ast flow map not in STA mode\n", __func__);
1164 		goto end;
1165 	}
1166 
1167 	/* Making sure the peer is for this mac address */
1168 	if (!qdf_is_macaddr_equal((struct qdf_mac_addr *)peer_mac_addr,
1169 				(struct qdf_mac_addr *)peer->mac_addr.raw)) {
1170 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1171 				"%s: Peer mac address mismatch\n", __func__);
1172 		goto end;
1173 	}
1174 
1175 	/* Ast entry flow mapping not valid for self peer map */
1176 	if (qdf_is_macaddr_equal((struct qdf_mac_addr *)peer_mac_addr,
1177 				(struct qdf_mac_addr *)peer->vdev->mac_addr.raw)) {
1178 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1179 				"%s: Ast flow mapping not valid for self peer \n", __func__);
1180 		goto end;
1181 	}
1182 
1183 	/* Fill up ast index <---> flow id mapping table for this peer */
1184 	for (i = 0; i < DP_MAX_AST_INDEX_PER_PEER; i++) {
1185 
1186 		/* Check if this ast index is valid */
1187 		peer->peer_ast_flowq_idx[i].is_valid =
1188 			dp_peer_get_ast_valid(ast_info->ast_valid_mask, i);
1189 		if (!peer->peer_ast_flowq_idx[i].is_valid)
1190 			continue;
1191 
1192 		/* Get the flow queue id which is mapped to this ast index */
1193 		peer->peer_ast_flowq_idx[i].flowQ =
1194 			dp_peer_get_flowid_from_flowmask(peer,
1195 					ast_info->ast_flow_mask[i]);
1196 		/*
1197 		 * Update tid valid mask only if flow id HIGH or
1198 		 * Low priority
1199 		 */
1200 		if (peer->peer_ast_flowq_idx[i].flowQ ==
1201 				DP_PEER_AST_FLOWQ_HI_PRIO) {
1202 			peer->peer_ast_flowq_idx[i].valid_tid_mask =
1203 				ast_info->tid_valid_hi_pri_mask;
1204 		} else if (peer->peer_ast_flowq_idx[i].flowQ ==
1205 				DP_PEER_AST_FLOWQ_LOW_PRIO) {
1206 			peer->peer_ast_flowq_idx[i].valid_tid_mask =
1207 				ast_info->tid_valid_low_pri_mask;
1208 		}
1209 
1210 		/* Save the ast index for this entry */
1211 		peer->peer_ast_flowq_idx[i].ast_idx = ast_info->ast_idx[i];
1212 	}
1213 
1214 	if (soc->cdp_soc.ol_ops->peer_ast_flowid_map) {
1215 		soc->cdp_soc.ol_ops->peer_ast_flowid_map(
1216 				soc->ctrl_psoc, peer->peer_id,
1217 				peer->vdev->vdev_id, peer_mac_addr);
1218 	}
1219 
1220 end:
1221 	/* Release peer reference */
1222 	dp_peer_unref_delete(peer, DP_MOD_ID_AST);
1223 }
1224 
1225 /**
1226  * dp_peer_find_ast_index_by_flowq_id() - API to get ast idx for a given flowid
1227  * @soc - soc handle
1228  * @peer_mac_addr - mac address of the peer
1229  * @flow_id - flow id to find ast index
1230  *
1231  * Return: ast index for a given flow id, -1 for fail cases
1232  */
1233 int dp_peer_find_ast_index_by_flowq_id(struct cdp_soc_t *soc,
1234 		uint16_t vdev_id, uint8_t *peer_mac_addr,
1235 		uint8_t flow_id, uint8_t tid)
1236 {
1237 	struct dp_peer *peer = NULL;
1238 	uint8_t i;
1239 	uint16_t ast_index;
1240 
1241 	if (flow_id >= DP_PEER_AST_FLOWQ_MAX) {
1242 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1243 				"Invalid Flow ID %d\n", flow_id);
1244 		return -1;
1245 	}
1246 
1247 	peer = dp_peer_find_hash_find((struct dp_soc *)soc,
1248 				peer_mac_addr, 0, vdev_id,
1249 				DP_MOD_ID_AST);
1250 	if (!peer) {
1251 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1252 				"%s: Invalid peer\n", __func__);
1253 		return -1;
1254 	}
1255 
1256 	 /*
1257 	  * Loop over the ast entry <----> flow-id mapping to find
1258 	  * which ast index entry has this flow queue id enabled.
1259 	  */
1260 	for (i = 0; i < DP_PEER_AST_FLOWQ_MAX; i++) {
1261 		if (peer->peer_ast_flowq_idx[i].flowQ == flow_id)
1262 			/*
1263 			 * Found the matching index for this flow id
1264 			 */
1265 			break;
1266 	}
1267 
1268 	/*
1269 	 * No match found for this flow id
1270 	 */
1271 	if (i == DP_PEER_AST_FLOWQ_MAX) {
1272 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1273 				"%s: ast index not found for flow %d\n", __func__, flow_id);
1274 		dp_peer_unref_delete(peer, DP_MOD_ID_AST);
1275 		return -1;
1276 	}
1277 
1278 	/* Check whether this ast entry is valid */
1279 	if (!peer->peer_ast_flowq_idx[i].is_valid) {
1280 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1281 				"%s: ast index is invalid for flow %d\n", __func__, flow_id);
1282 		dp_peer_unref_delete(peer, DP_MOD_ID_AST);
1283 		return -1;
1284 	}
1285 
1286 	if (flow_id == DP_PEER_AST_FLOWQ_HI_PRIO ||
1287 			flow_id == DP_PEER_AST_FLOWQ_LOW_PRIO) {
1288 		/*
1289 		 * check if this tid is valid for Hi
1290 		 * and Low priority flow id
1291 		 */
1292 		if ((peer->peer_ast_flowq_idx[i].valid_tid_mask
1293 					& (1 << tid))) {
1294 			/* Release peer reference */
1295 			ast_index = peer->peer_ast_flowq_idx[i].ast_idx;
1296 			dp_peer_unref_delete(peer, DP_MOD_ID_AST);
1297 			return ast_index;
1298 		} else {
1299 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1300 					"%s: TID %d is not valid for flow %d\n",
1301 					__func__, tid, flow_id);
1302 			/*
1303 			 * TID is not valid for this flow
1304 			 * Return -1
1305 			 */
1306 			dp_peer_unref_delete(peer, DP_MOD_ID_AST);
1307 			return -1;
1308 		}
1309 	}
1310 
1311 	/*
1312 	 * TID valid check not required for
1313 	 * UDP/NON UDP flow id
1314 	 */
1315 	ast_index = peer->peer_ast_flowq_idx[i].ast_idx;
1316 	dp_peer_unref_delete(peer, DP_MOD_ID_AST);
1317 	return ast_index;
1318 }
1319 #endif
1320 
1321 void dp_hmwds_ast_add_notify(struct dp_peer *peer,
1322 			     uint8_t *mac_addr,
1323 			     enum cdp_txrx_ast_entry_type type,
1324 			     QDF_STATUS err,
1325 			     bool is_peer_map)
1326 {
1327 	struct dp_vdev *dp_vdev = peer->vdev;
1328 	struct dp_pdev *dp_pdev = dp_vdev->pdev;
1329 	struct cdp_peer_hmwds_ast_add_status add_status;
1330 
1331 	/* Ignore ast types other than HM */
1332 	if ((type != CDP_TXRX_AST_TYPE_WDS_HM) &&
1333 	    (type != CDP_TXRX_AST_TYPE_WDS_HM_SEC))
1334 		return;
1335 
1336 	/* existing ast delete in progress, will be attempted
1337 	 * to add again after delete is complete. Send status then.
1338 	 */
1339 	if (err == QDF_STATUS_E_AGAIN)
1340 		return;
1341 
1342 	/* peer map pending, notify actual status
1343 	 * when peer map is received.
1344 	 */
1345 	if (!is_peer_map && (err == QDF_STATUS_SUCCESS))
1346 		return;
1347 
1348 	qdf_mem_zero(&add_status, sizeof(add_status));
1349 	add_status.vdev_id = dp_vdev->vdev_id;
1350 	/* For type CDP_TXRX_AST_TYPE_WDS_HM_SEC dp_peer_add_ast()
1351 	 * returns QDF_STATUS_E_FAILURE as it is host only entry.
1352 	 * In such cases set err as success. Also err code set to
1353 	 * QDF_STATUS_E_ALREADY indicates entry already exist in
1354 	 * such cases set err as success too. Any other error code
1355 	 * is actual error.
1356 	 */
1357 	if (((type == CDP_TXRX_AST_TYPE_WDS_HM_SEC) &&
1358 	     (err == QDF_STATUS_E_FAILURE)) ||
1359 	    (err == QDF_STATUS_E_ALREADY)) {
1360 		err = QDF_STATUS_SUCCESS;
1361 	}
1362 	add_status.status = err;
1363 	qdf_mem_copy(add_status.peer_mac, peer->mac_addr.raw,
1364 		     QDF_MAC_ADDR_SIZE);
1365 	qdf_mem_copy(add_status.ast_mac, mac_addr,
1366 		     QDF_MAC_ADDR_SIZE);
1367 #ifdef WDI_EVENT_ENABLE
1368 	dp_wdi_event_handler(WDI_EVENT_HMWDS_AST_ADD_STATUS, dp_pdev->soc,
1369 			     (void *)&add_status, 0,
1370 			     WDI_NO_VAL, dp_pdev->pdev_id);
1371 #endif
1372 }
1373 
1374 #if defined(QCA_SUPPORT_LATENCY_CAPTURE) || \
1375 	defined(QCA_TX_CAPTURE_SUPPORT) || \
1376 	defined(QCA_MCOPY_SUPPORT)
1377 #ifdef FEATURE_PERPKT_INFO
1378 /**
1379  * dp_get_completion_indication_for_stack() - send completion to stack
1380  * @soc : dp_soc handle
1381  * @pdev: dp_pdev handle
1382  * @peer: dp peer handle
1383  * @ts: transmit completion status structure
1384  * @netbuf: Buffer pointer for free
1385  *
1386  * This function is used for indication whether buffer needs to be
1387  * sent to stack for freeing or not
1388 */
1389 QDF_STATUS
1390 dp_get_completion_indication_for_stack(struct dp_soc *soc,
1391 				       struct dp_pdev *pdev,
1392 				       struct dp_txrx_peer *txrx_peer,
1393 				       struct hal_tx_completion_status *ts,
1394 				       qdf_nbuf_t netbuf,
1395 				       uint64_t time_latency)
1396 {
1397 	struct tx_capture_hdr *ppdu_hdr;
1398 	uint16_t peer_id = ts->peer_id;
1399 	uint32_t ppdu_id = ts->ppdu_id;
1400 	uint8_t first_msdu = ts->first_msdu;
1401 	uint8_t last_msdu = ts->last_msdu;
1402 	uint32_t txcap_hdr_size = sizeof(struct tx_capture_hdr);
1403 	struct dp_peer *peer;
1404 
1405 	if (qdf_unlikely(!dp_monitor_is_enable_tx_sniffer(pdev) &&
1406 			 !dp_monitor_is_enable_mcopy_mode(pdev) &&
1407 			 !pdev->latency_capture_enable))
1408 		return QDF_STATUS_E_NOSUPPORT;
1409 
1410 	if (!txrx_peer) {
1411 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1412 			  FL("Peer Invalid"));
1413 		return QDF_STATUS_E_INVAL;
1414 	}
1415 
1416 	/* If mcopy is enabled and mcopy_mode is M_COPY deliver 1st MSDU
1417 	 * per PPDU. If mcopy_mode is M_COPY_EXTENDED deliver 1st MSDU
1418 	 * for each MPDU
1419 	 */
1420 	if (dp_monitor_mcopy_check_deliver(pdev,
1421 					   peer_id,
1422 					   ppdu_id,
1423 					   first_msdu) != QDF_STATUS_SUCCESS)
1424 		return QDF_STATUS_E_INVAL;
1425 
1426 	if (qdf_unlikely(qdf_nbuf_headroom(netbuf) < txcap_hdr_size)) {
1427 		netbuf = qdf_nbuf_realloc_headroom(netbuf, txcap_hdr_size);
1428 		if (!netbuf) {
1429 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1430 				  FL("No headroom"));
1431 			return QDF_STATUS_E_NOMEM;
1432 		}
1433 	}
1434 
1435 	if (!qdf_nbuf_push_head(netbuf, txcap_hdr_size)) {
1436 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1437 			  FL("No headroom"));
1438 		return QDF_STATUS_E_NOMEM;
1439 	}
1440 
1441 	ppdu_hdr = (struct tx_capture_hdr *)qdf_nbuf_data(netbuf);
1442 	qdf_mem_copy(ppdu_hdr->ta, txrx_peer->vdev->mac_addr.raw,
1443 		     QDF_MAC_ADDR_SIZE);
1444 
1445 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_TX_COMP);
1446 	if (peer) {
1447 		qdf_mem_copy(ppdu_hdr->ra, peer->mac_addr.raw,
1448 			     QDF_MAC_ADDR_SIZE);
1449 		dp_peer_unref_delete(peer, DP_MOD_ID_TX_COMP);
1450 	}
1451 	ppdu_hdr->ppdu_id = ppdu_id;
1452 	ppdu_hdr->peer_id = peer_id;
1453 	ppdu_hdr->first_msdu = first_msdu;
1454 	ppdu_hdr->last_msdu = last_msdu;
1455 	if (qdf_unlikely(pdev->latency_capture_enable)) {
1456 		ppdu_hdr->tsf = ts->tsf;
1457 		ppdu_hdr->time_latency = (uint32_t)time_latency;
1458 	}
1459 
1460 	return QDF_STATUS_SUCCESS;
1461 }
1462 
1463 /**
1464  * dp_send_completion_to_stack() - send completion to stack
1465  * @soc :  dp_soc handle
1466  * @pdev:  dp_pdev handle
1467  * @peer_id: peer_id of the peer for which completion came
1468  * @ppdu_id: ppdu_id
1469  * @netbuf: Buffer pointer for free
1470  *
1471  * This function is used to send completion to stack
1472  * to free buffer
1473 */
1474 void dp_send_completion_to_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
1475 				 uint16_t peer_id, uint32_t ppdu_id,
1476 				 qdf_nbuf_t netbuf)
1477 {
1478 	dp_wdi_event_handler(WDI_EVENT_TX_DATA, soc,
1479 			     netbuf, peer_id,
1480 			     WDI_NO_VAL, pdev->pdev_id);
1481 }
1482 #endif
1483 #endif
1484