xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_txrx_wds.c (revision 19ceffca9d494f2431432fa8123aa187c91cb4fb)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 #include "htt.h"
20 #include "dp_peer.h"
21 #include "hal_rx.h"
22 #include "hal_api.h"
23 #include "qdf_nbuf.h"
24 #include "dp_types.h"
25 #include "dp_internal.h"
26 #include "dp_tx.h"
27 #include "enet.h"
28 #ifdef WIFI_MONITOR_SUPPORT
29 #include "dp_mon.h"
30 #endif
31 #include "dp_txrx_wds.h"
32 
33 /* Generic AST entry aging timer value */
34 #define DP_AST_AGING_TIMER_DEFAULT_MS	5000
35 #define DP_VLAN_UNTAGGED 0
36 #define DP_VLAN_TAGGED_MULTICAST 1
37 #define DP_VLAN_TAGGED_UNICAST 2
38 #define DP_MAX_VLAN_IDS 4096
39 #define DP_INVALID_AST_IDX 0xffff
40 #define DP_INVALID_FLOW_PRIORITY 0xff
41 #define DP_PEER_AST0_FLOW_MASK 0x4
42 #define DP_PEER_AST1_FLOW_MASK 0x8
43 #define DP_PEER_AST2_FLOW_MASK 0x1
44 #define DP_PEER_AST3_FLOW_MASK 0x2
45 #define DP_MAX_AST_INDEX_PER_PEER 4
46 
47 #ifdef WLAN_FEATURE_MULTI_AST_DEL
48 
49 void dp_peer_free_peer_ase_list(struct dp_soc *soc,
50 				struct peer_del_multi_wds_entries *wds_list)
51 {
52 	struct peer_wds_entry_list *wds_entry, *tmp_entry;
53 
54 	TAILQ_FOREACH_SAFE(wds_entry, &wds_list->ase_list,
55 			   ase_list_elem, tmp_entry) {
56 		dp_peer_debug("type: %d mac_addr: " QDF_MAC_ADDR_FMT,
57 			      wds_entry->type,
58 			      QDF_MAC_ADDR_REF(wds_entry->dest_addr));
59 		TAILQ_REMOVE(&wds_list->ase_list, wds_entry, ase_list_elem);
60 		wds_list->num_entries--;
61 		qdf_mem_free(wds_entry);
62 	}
63 }
64 
65 static void
66 dp_pdev_build_peer_ase_list(struct dp_soc *soc, struct dp_peer *peer,
67 			    void *arg)
68 {
69 	struct dp_ast_entry *ase, *temp_ase;
70 	struct peer_del_multi_wds_entries *list = arg;
71 	struct peer_wds_entry_list *wds_entry;
72 
73 	if (!soc || !peer || !arg) {
74 		dp_peer_err("Invalid input");
75 		return;
76 	}
77 
78 	list->vdev_id = peer->vdev->vdev_id;
79 	DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
80 		if (ase->type != CDP_TXRX_AST_TYPE_WDS &&
81 		    ase->type != CDP_TXRX_AST_TYPE_DA)
82 			continue;
83 
84 		if (ase->is_active) {
85 			ase->is_active = false;
86 			continue;
87 		}
88 
89 		if (ase->delete_in_progress) {
90 			dp_info_rl("Del set addr:" QDF_MAC_ADDR_FMT " type:%d",
91 				   QDF_MAC_ADDR_REF(ase->mac_addr.raw),
92 				   ase->type);
93 			continue;
94 		}
95 
96 		if (ase->is_mapped)
97 			soc->ast_table[ase->ast_idx] = NULL;
98 
99 		if (!ase->next_hop) {
100 			dp_peer_unlink_ast_entry(soc, ase, peer);
101 			continue;
102 		}
103 
104 		wds_entry = (struct peer_wds_entry_list *)
105 			    qdf_mem_malloc(sizeof(*wds_entry));
106 		if (!wds_entry) {
107 			dp_peer_err("%pK: fail to allocate wds_entry", soc);
108 			dp_peer_free_peer_ase_list(soc, list);
109 			return;
110 		}
111 
112 		DP_STATS_INC(soc, ast.aged_out, 1);
113 		ase->delete_in_progress = true;
114 		wds_entry->dest_addr = ase->mac_addr.raw;
115 		wds_entry->type = ase->type;
116 
117 		if (dp_peer_state_cmp(peer, DP_PEER_STATE_LOGICAL_DELETE))
118 			wds_entry->delete_in_fw = false;
119 		else
120 			wds_entry->delete_in_fw = true;
121 
122 		dp_peer_debug("ase->type: %d pdev: %u vdev: %u mac_addr: " QDF_MAC_ADDR_FMT " next_hop: %u peer: %u",
123 			      ase->type, ase->pdev_id, ase->vdev_id,
124 			      QDF_MAC_ADDR_REF(ase->mac_addr.raw),
125 			      ase->next_hop, ase->peer_id);
126 		TAILQ_INSERT_TAIL(&list->ase_list, wds_entry, ase_list_elem);
127 		list->num_entries++;
128 	}
129 	dp_peer_info("Total num of entries :%d", list->num_entries);
130 }
131 
132 static void
133 dp_peer_age_multi_ast_entries(struct dp_soc *soc, void *arg,
134 			      enum dp_mod_id mod_id)
135 {
136 	uint8_t i;
137 	struct dp_pdev *pdev = NULL;
138 	struct peer_del_multi_wds_entries wds_list = {0};
139 
140 	TAILQ_INIT(&wds_list.ase_list);
141 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
142 		pdev = soc->pdev_list[i];
143 		dp_pdev_iterate_peer(pdev, dp_pdev_build_peer_ase_list,
144 				     &wds_list, mod_id);
145 		if (wds_list.num_entries > 0) {
146 			dp_peer_ast_send_multi_wds_del(soc, wds_list.vdev_id,
147 						       &wds_list);
148 			dp_peer_free_peer_ase_list(soc, &wds_list);
149 		} else {
150 			dp_peer_debug("No AST entries for pdev:%u",
151 				      pdev->pdev_id);
152 		}
153 	}
154 }
155 #endif /* WLAN_FEATURE_MULTI_AST_DEL */
156 
157 static void
158 dp_peer_age_ast_entries(struct dp_soc *soc, struct dp_peer *peer, void *arg)
159 {
160 	struct dp_ast_entry *ase, *temp_ase;
161 	struct ast_del_ctxt *del_ctxt = (struct ast_del_ctxt *)arg;
162 
163 	if ((del_ctxt->del_count >= soc->max_ast_ageout_count) &&
164 	    !del_ctxt->age) {
165 		return;
166 	}
167 
168 	DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
169 		/*
170 		 * Do not expire static ast entries and HM WDS entries
171 		 */
172 		if (ase->type != CDP_TXRX_AST_TYPE_WDS &&
173 		    ase->type != CDP_TXRX_AST_TYPE_DA)
174 			continue;
175 
176 		if (ase->is_active) {
177 			if (del_ctxt->age)
178 				ase->is_active = FALSE;
179 
180 			continue;
181 		}
182 
183 		if (del_ctxt->del_count < soc->max_ast_ageout_count) {
184 			DP_STATS_INC(soc, ast.aged_out, 1);
185 			dp_peer_del_ast(soc, ase);
186 			del_ctxt->del_count++;
187 		} else {
188 			soc->pending_ageout = true;
189 			if (!del_ctxt->age)
190 				break;
191 		}
192 	}
193 }
194 
195 static void
196 dp_peer_age_mec_entries(struct dp_soc *soc)
197 {
198 	uint32_t index;
199 	struct dp_mec_entry *mecentry, *mecentry_next;
200 
201 	TAILQ_HEAD(, dp_mec_entry) free_list;
202 	TAILQ_INIT(&free_list);
203 
204 	for (index = 0; index <= soc->mec_hash.mask; index++) {
205 		qdf_spin_lock_bh(&soc->mec_lock);
206 		/*
207 		 * Expire MEC entry every n sec.
208 		 */
209 		if (!TAILQ_EMPTY(&soc->mec_hash.bins[index])) {
210 			TAILQ_FOREACH_SAFE(mecentry, &soc->mec_hash.bins[index],
211 					   hash_list_elem, mecentry_next) {
212 				if (mecentry->is_active) {
213 					mecentry->is_active = FALSE;
214 					continue;
215 				}
216 				dp_peer_mec_detach_entry(soc, mecentry,
217 							 &free_list);
218 			}
219 		}
220 		qdf_spin_unlock_bh(&soc->mec_lock);
221 	}
222 
223 	dp_peer_mec_free_list(soc, &free_list);
224 }
225 
226 #ifdef WLAN_FEATURE_MULTI_AST_DEL
227 static void dp_ast_aging_timer_fn(void *soc_hdl)
228 {
229 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
230 	struct ast_del_ctxt del_ctxt = {0};
231 
232 	if (soc->wds_ast_aging_timer_cnt++ >= DP_WDS_AST_AGING_TIMER_CNT) {
233 		del_ctxt.age = true;
234 		soc->wds_ast_aging_timer_cnt = 0;
235 	}
236 
237 	if (soc->pending_ageout || del_ctxt.age) {
238 		soc->pending_ageout = false;
239 
240 		/* AST list access lock */
241 		qdf_spin_lock_bh(&soc->ast_lock);
242 
243 		if (soc->multi_peer_grp_cmd_supported)
244 			dp_peer_age_multi_ast_entries(soc, NULL, DP_MOD_ID_AST);
245 		else
246 			dp_soc_iterate_peer(soc, dp_peer_age_ast_entries,
247 					    &del_ctxt, DP_MOD_ID_AST);
248 		qdf_spin_unlock_bh(&soc->ast_lock);
249 	}
250 
251 	/*
252 	 * If NSS offload is enabled, the MEC timeout
253 	 * will be managed by NSS.
254 	 */
255 	if (qdf_atomic_read(&soc->mec_cnt) &&
256 	    !wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx))
257 		dp_peer_age_mec_entries(soc);
258 
259 	if (qdf_atomic_read(&soc->cmn_init_done))
260 		qdf_timer_mod(&soc->ast_aging_timer,
261 			      DP_AST_AGING_TIMER_DEFAULT_MS);
262 }
263 #else
264 static void dp_ast_aging_timer_fn(void *soc_hdl)
265 {
266 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
267 	struct ast_del_ctxt del_ctxt = {0};
268 
269 	if (soc->wds_ast_aging_timer_cnt++ >= DP_WDS_AST_AGING_TIMER_CNT) {
270 		del_ctxt.age = true;
271 		soc->wds_ast_aging_timer_cnt = 0;
272 	}
273 
274 	if (soc->pending_ageout || del_ctxt.age) {
275 		soc->pending_ageout = false;
276 
277 		/* AST list access lock */
278 		qdf_spin_lock_bh(&soc->ast_lock);
279 		dp_soc_iterate_peer(soc, dp_peer_age_ast_entries,
280 				    &del_ctxt, DP_MOD_ID_AST);
281 		qdf_spin_unlock_bh(&soc->ast_lock);
282 	}
283 
284 	/*
285 	 * If NSS offload is enabled, the MEC timeout
286 	 * will be managed by NSS.
287 	 */
288 	if (qdf_atomic_read(&soc->mec_cnt) &&
289 	    !wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx))
290 		dp_peer_age_mec_entries(soc);
291 
292 	if (qdf_atomic_read(&soc->cmn_init_done))
293 		qdf_timer_mod(&soc->ast_aging_timer,
294 			      DP_AST_AGING_TIMER_DEFAULT_MS);
295 }
296 #endif /* WLAN_FEATURE_MULTI_AST_DEL */
297 
298 /*
299  * dp_soc_wds_attach() - Setup WDS timer and AST table
300  * @soc:		Datapath SOC handle
301  *
302  * Return: None
303  */
304 void dp_soc_wds_attach(struct dp_soc *soc)
305 {
306 	if (soc->ast_offload_support)
307 		return;
308 
309 	soc->wds_ast_aging_timer_cnt = 0;
310 	soc->pending_ageout = false;
311 	qdf_timer_init(soc->osdev, &soc->ast_aging_timer,
312 		       dp_ast_aging_timer_fn, (void *)soc,
313 		       QDF_TIMER_TYPE_WAKE_APPS);
314 
315 	qdf_timer_mod(&soc->ast_aging_timer, DP_AST_AGING_TIMER_DEFAULT_MS);
316 }
317 
318 /*
319  * dp_soc_wds_detach() - Detach WDS data structures and timers
320  * @txrx_soc: DP SOC handle
321  *
322  * Return: None
323  */
324 void dp_soc_wds_detach(struct dp_soc *soc)
325 {
326 	qdf_timer_stop(&soc->ast_aging_timer);
327 	qdf_timer_free(&soc->ast_aging_timer);
328 }
329 
330 /**
331  * dp_tx_mec_handler() - Tx  MEC Notify Handler
332  * @vdev: pointer to dp dev handler
333  * @status : Tx completion status from HTT descriptor
334  *
335  * Handles MEC notify event sent from fw to Host
336  *
337  * Return: none
338  */
339 void dp_tx_mec_handler(struct dp_vdev *vdev, uint8_t *status)
340 {
341 	struct dp_soc *soc;
342 	QDF_STATUS add_mec_status;
343 	uint8_t mac_addr[QDF_MAC_ADDR_SIZE], i;
344 
345 	if (!vdev->mec_enabled)
346 		return;
347 
348 	/* MEC required only in STA mode */
349 	if (vdev->opmode != wlan_op_mode_sta)
350 		return;
351 
352 	soc = vdev->pdev->soc;
353 
354 	for (i = 0; i < QDF_MAC_ADDR_SIZE; i++)
355 		mac_addr[(QDF_MAC_ADDR_SIZE - 1) - i] =
356 					status[(QDF_MAC_ADDR_SIZE - 2) + i];
357 
358 	dp_peer_debug("%pK: MEC add for mac_addr "QDF_MAC_ADDR_FMT,
359 		      soc, QDF_MAC_ADDR_REF(mac_addr));
360 
361 	if (qdf_mem_cmp(mac_addr, vdev->mac_addr.raw, QDF_MAC_ADDR_SIZE)) {
362 		add_mec_status = dp_peer_mec_add_entry(soc, vdev, mac_addr);
363 		dp_peer_debug("%pK: MEC add status %d", vdev, add_mec_status);
364 	}
365 }
366 
367 #ifndef QCA_HOST_MODE_WIFI_DISABLED
368 
369 /**
370  * dp_rx_da_learn() - Add AST entry based on DA lookup
371  *			This is a WAR for HK 1.0 and will
372  *			be removed in HK 2.0
373  *
374  * @soc: core txrx main context
375  * @rx_tlv_hdr	: start address of rx tlvs
376  * @ta_txrx_peer: Transmitter peer entry
377  * @nbuf	: nbuf to retrieve destination mac for which AST will be added
378  *
379  */
380 void
381 dp_rx_da_learn(struct dp_soc *soc,
382 	       uint8_t *rx_tlv_hdr,
383 	       struct dp_txrx_peer *ta_txrx_peer,
384 	       qdf_nbuf_t nbuf)
385 {
386 	struct dp_peer *base_peer;
387 	/* For HKv2 DA port learing is not needed */
388 	if (qdf_likely(soc->ast_override_support))
389 		return;
390 
391 	if (qdf_unlikely(!ta_txrx_peer))
392 		return;
393 
394 	if (qdf_unlikely(ta_txrx_peer->vdev->opmode != wlan_op_mode_ap))
395 		return;
396 
397 	if (!soc->da_war_enabled)
398 		return;
399 
400 	if (qdf_unlikely(!qdf_nbuf_is_da_valid(nbuf) &&
401 			 !qdf_nbuf_is_da_mcbc(nbuf))) {
402 		base_peer = dp_peer_get_ref_by_id(soc, ta_txrx_peer->peer_id,
403 						  DP_MOD_ID_AST);
404 
405 		if (base_peer) {
406 			dp_peer_add_ast(soc,
407 					base_peer,
408 					qdf_nbuf_data(nbuf),
409 					CDP_TXRX_AST_TYPE_DA,
410 					DP_AST_FLAGS_HM);
411 
412 			dp_peer_unref_delete(base_peer, DP_MOD_ID_AST);
413 		}
414 	}
415 }
416 
417 /**
418  * dp_txrx_set_wds_rx_policy() - API to store datapath
419  *                            config parameters
420  * @soc - datapath soc handle
421  * @vdev_id - id of datapath vdev handle
422  * @cfg: ini parameter handle
423  *
424  * Return: status
425  */
426 #ifdef WDS_VENDOR_EXTENSION
427 QDF_STATUS
428 dp_txrx_set_wds_rx_policy(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
429 			  u_int32_t val)
430 {
431 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
432 	struct dp_peer *peer;
433 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
434 						     DP_MOD_ID_MISC);
435 	if (!vdev) {
436 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
437 			  FL("vdev is NULL for vdev_id %d"), vdev_id);
438 		return QDF_STATUS_E_INVAL;
439 	}
440 
441 	peer = dp_vdev_bss_peer_ref_n_get(vdev, DP_MOD_ID_AST);
442 
443 	if (peer) {
444 		peer->txrx_peer->wds_ecm.wds_rx_filter = 1;
445 		peer->txrx_peer->wds_ecm.wds_rx_ucast_4addr =
446 			(val & WDS_POLICY_RX_UCAST_4ADDR) ? 1 : 0;
447 		peer->txrx_peer->wds_ecm.wds_rx_mcast_4addr =
448 			(val & WDS_POLICY_RX_MCAST_4ADDR) ? 1 : 0;
449 		dp_peer_unref_delete(peer, DP_MOD_ID_AST);
450 	}
451 
452 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MISC);
453 	return QDF_STATUS_SUCCESS;
454 }
455 
456 /**
457  * dp_txrx_peer_wds_tx_policy_update() - API to set tx wds policy
458  *
459  * @cdp_soc: DP soc handle
460  * @vdev_id: id of vdev handle
461  * @peer_mac: peer mac address
462  * @wds_tx_ucast: policy for unicast transmission
463  * @wds_tx_mcast: policy for multicast transmission
464  *
465  * Return: void
466  */
467 QDF_STATUS
468 dp_txrx_peer_wds_tx_policy_update(struct cdp_soc_t *soc,  uint8_t vdev_id,
469 				  uint8_t *peer_mac, int wds_tx_ucast,
470 				  int wds_tx_mcast)
471 {
472 	struct dp_peer *peer =
473 			dp_peer_get_tgt_peer_hash_find((struct dp_soc *)soc,
474 						       peer_mac, 0,
475 						       vdev_id,
476 						       DP_MOD_ID_AST);
477 	if (!peer) {
478 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
479 			  FL("peer is NULL for mac %pM vdev_id %d"),
480 			  peer_mac, vdev_id);
481 		return QDF_STATUS_E_INVAL;
482 	}
483 
484 	if (!peer->txrx_peer) {
485 		dp_peer_unref_delete(peer, DP_MOD_ID_AST);
486 		return QDF_STATUS_E_INVAL;
487 	}
488 
489 	if (wds_tx_ucast || wds_tx_mcast) {
490 		peer->txrx_peer->wds_enabled = 1;
491 		peer->txrx_peer->wds_ecm.wds_tx_ucast_4addr = wds_tx_ucast;
492 		peer->txrx_peer->wds_ecm.wds_tx_mcast_4addr = wds_tx_mcast;
493 	} else {
494 		peer->txrx_peer->wds_enabled = 0;
495 		peer->txrx_peer->wds_ecm.wds_tx_ucast_4addr = 0;
496 		peer->txrx_peer->wds_ecm.wds_tx_mcast_4addr = 0;
497 	}
498 
499 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
500 		  "Policy Update set to :\n");
501 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
502 		  "peer->wds_enabled %d\n", peer->wds_enabled);
503 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
504 		  "peer->wds_ecm.wds_tx_ucast_4addr %d\n",
505 		  peer->txrx_peer->wds_ecm.wds_tx_ucast_4addr);
506 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
507 		  "peer->wds_ecm.wds_tx_mcast_4addr %d\n",
508 		  peer->txrx_peer->wds_ecm.wds_tx_mcast_4addr);
509 
510 	dp_peer_unref_delete(peer, DP_MOD_ID_AST);
511 	return QDF_STATUS_SUCCESS;
512 }
513 
514 int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr,
515 			   struct dp_vdev *vdev,
516 			   struct dp_txrx_peer *txrx_peer)
517 {
518 	struct dp_peer *bss_peer;
519 	int fr_ds, to_ds, rx_3addr, rx_4addr;
520 	int rx_policy_ucast, rx_policy_mcast;
521 	hal_soc_handle_t hal_soc = vdev->pdev->soc->hal_soc;
522 	int rx_mcast = hal_rx_msdu_end_da_is_mcbc_get(hal_soc, rx_tlv_hdr);
523 
524 	if (vdev->opmode == wlan_op_mode_ap) {
525 		bss_peer = dp_vdev_bss_peer_ref_n_get(vdev, DP_MOD_ID_AST);
526 		/* if wds policy check is not enabled on this vdev, accept all frames */
527 		if (bss_peer && !bss_peer->txrx_peer->wds_ecm.wds_rx_filter) {
528 			dp_peer_unref_delete(bss_peer, DP_MOD_ID_AST);
529 			return 1;
530 		}
531 		rx_policy_ucast = bss_peer->txrx_peerwds_ecm.wds_rx_ucast_4addr;
532 		rx_policy_mcast = bss_peer->txrx_peerwds_ecm.wds_rx_mcast_4addr;
533 		dp_peer_unref_delete(bss_peer, DP_MOD_ID_AST);
534 	} else {             /* sta mode */
535 		if (!txrx_peer->wds_ecm.wds_rx_filter)
536 			return 1;
537 
538 		rx_policy_ucast = txrx_peer->wds_ecm.wds_rx_ucast_4addr;
539 		rx_policy_mcast = txrx_peer->wds_ecm.wds_rx_mcast_4addr;
540 	}
541 
542 	/* ------------------------------------------------
543 	 *                       self
544 	 * peer-             rx  rx-
545 	 * wds  ucast mcast dir policy accept note
546 	 * ------------------------------------------------
547 	 * 1     1     0     11  x1     1      AP configured to accept ds-to-ds Rx ucast from wds peers, constraint met; so, accept
548 	 * 1     1     0     01  x1     0      AP configured to accept ds-to-ds Rx ucast from wds peers, constraint not met; so, drop
549 	 * 1     1     0     10  x1     0      AP configured to accept ds-to-ds Rx ucast from wds peers, constraint not met; so, drop
550 	 * 1     1     0     00  x1     0      bad frame, won't see it
551 	 * 1     0     1     11  1x     1      AP configured to accept ds-to-ds Rx mcast from wds peers, constraint met; so, accept
552 	 * 1     0     1     01  1x     0      AP configured to accept ds-to-ds Rx mcast from wds peers, constraint not met; so, drop
553 	 * 1     0     1     10  1x     0      AP configured to accept ds-to-ds Rx mcast from wds peers, constraint not met; so, drop
554 	 * 1     0     1     00  1x     0      bad frame, won't see it
555 	 * 1     1     0     11  x0     0      AP configured to accept from-ds Rx ucast from wds peers, constraint not met; so, drop
556 	 * 1     1     0     01  x0     0      AP configured to accept from-ds Rx ucast from wds peers, constraint not met; so, drop
557 	 * 1     1     0     10  x0     1      AP configured to accept from-ds Rx ucast from wds peers, constraint met; so, accept
558 	 * 1     1     0     00  x0     0      bad frame, won't see it
559 	 * 1     0     1     11  0x     0      AP configured to accept from-ds Rx mcast from wds peers, constraint not met; so, drop
560 	 * 1     0     1     01  0x     0      AP configured to accept from-ds Rx mcast from wds peers, constraint not met; so, drop
561 	 * 1     0     1     10  0x     1      AP configured to accept from-ds Rx mcast from wds peers, constraint met; so, accept
562 	 * 1     0     1     00  0x     0      bad frame, won't see it
563 	 *
564 	 * 0     x     x     11  xx     0      we only accept td-ds Rx frames from non-wds peers in mode.
565 	 * 0     x     x     01  xx     1
566 	 * 0     x     x     10  xx     0
567 	 * 0     x     x     00  xx     0      bad frame, won't see it
568 	 * ------------------------------------------------
569 	 */
570 
571 	fr_ds = hal_rx_mpdu_get_fr_ds(hal_soc, rx_tlv_hdr);
572 	to_ds = hal_rx_mpdu_get_to_ds(hal_soc, rx_tlv_hdr);
573 	rx_3addr = fr_ds ^ to_ds;
574 	rx_4addr = fr_ds & to_ds;
575 
576 	if (vdev->opmode == wlan_op_mode_ap) {
577 		if ((!txrx_peer->wds_enabled && rx_3addr && to_ds) ||
578 		    (txrx_peer->wds_enabled && !rx_mcast &&
579 		    (rx_4addr == rx_policy_ucast)) ||
580 		    (txrx_peer->wds_enabled && rx_mcast &&
581 		    (rx_4addr == rx_policy_mcast))) {
582 			return 1;
583 		}
584 	} else {           /* sta mode */
585 		if ((!rx_mcast && (rx_4addr == rx_policy_ucast)) ||
586 				(rx_mcast && (rx_4addr == rx_policy_mcast))) {
587 			return 1;
588 		}
589 	}
590 	return 0;
591 }
592 #endif
593 
594 /**
595  * dp_tx_add_groupkey_metadata - Add group key in metadata
596  * @vdev: DP vdev handle
597  * @msdu_info: MSDU info to be setup in MSDU descriptor
598  * @group_key: Group key index programmed in metadata
599  *
600  * Return: void
601  */
602 #ifdef QCA_MULTIPASS_SUPPORT
603 static
604 void dp_tx_add_groupkey_metadata(struct dp_vdev *vdev,
605 		struct dp_tx_msdu_info_s *msdu_info, uint16_t group_key)
606 {
607 	struct htt_tx_msdu_desc_ext2_t *meta_data =
608 		(struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
609 
610 	qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
611 
612 	/*
613 	 * When attempting to send a multicast packet with multi-passphrase,
614 	 * host shall add HTT EXT meta data "struct htt_tx_msdu_desc_ext2_t"
615 	 * ref htt.h indicating the group_id field in "key_flags" also having
616 	 * "valid_key_flags" as 1. Assign “key_flags = group_key_ix”.
617 	 */
618 	HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_SET(msdu_info->meta_data[0], 1);
619 	HTT_TX_MSDU_EXT2_DESC_KEY_FLAGS_SET(msdu_info->meta_data[2], group_key);
620 }
621 
622 /**
623  * dp_tx_remove_vlan_tag - Remove 4 bytes of vlan tag
624  * @vdev: DP vdev handle
625  * @tx_desc: Tx Descriptor Handle
626  *
627  * Return: void
628  */
629 static
630 void dp_tx_remove_vlan_tag(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
631 {
632 	struct vlan_ethhdr veth_hdr;
633 	struct vlan_ethhdr *veh = (struct vlan_ethhdr *)nbuf->data;
634 
635 	/*
636 	 * Extract VLAN header of 4 bytes:
637 	 * Frame Format : {dst_addr[6], src_addr[6], 802.1Q header[4], EtherType[2], Payload}
638 	 * Before Removal : xx xx xx xx xx xx xx xx xx xx xx xx 81 00 00 02 08 00 45 00 00...
639 	 * After Removal  : xx xx xx xx xx xx xx xx xx xx xx xx 08 00 45 00 00...
640 	 */
641 	qdf_mem_copy(&veth_hdr, veh, sizeof(veth_hdr));
642 	qdf_nbuf_pull_head(nbuf, ETHERTYPE_VLAN_LEN);
643 	veh = (struct vlan_ethhdr *)nbuf->data;
644 	qdf_mem_copy(veh, &veth_hdr, 2 * QDF_MAC_ADDR_SIZE);
645 	return;
646 }
647 
648 /**
649  * dp_tx_need_multipass_process - If frame needs multipass phrase processing
650  * @vdev: DP vdev handle
651  * @tx_desc: Tx Descriptor Handle
652  * @vlan_id: vlan id of frame
653  *
654  * Return: whether peer is special or classic
655  */
656 static
657 uint8_t dp_tx_need_multipass_process(struct dp_soc *soc, struct dp_vdev *vdev,
658 			   qdf_nbuf_t buf, uint16_t *vlan_id)
659 {
660 	struct dp_txrx_peer *txrx_peer = NULL;
661 	struct dp_peer *peer = NULL;
662 	qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(buf);
663 	struct vlan_ethhdr *veh = NULL;
664 	bool not_vlan = ((vdev->tx_encap_type == htt_cmn_pkt_type_raw) ||
665 			(htons(eh->ether_type) != ETH_P_8021Q));
666 
667 	if (qdf_unlikely(not_vlan))
668 		return DP_VLAN_UNTAGGED;
669 
670 	veh = (struct vlan_ethhdr *)eh;
671 	*vlan_id = (ntohs(veh->h_vlan_TCI) & VLAN_VID_MASK);
672 
673 	if (qdf_unlikely(DP_FRAME_IS_MULTICAST((eh)->ether_dhost))) {
674 		qdf_spin_lock_bh(&vdev->mpass_peer_mutex);
675 		TAILQ_FOREACH(txrx_peer, &vdev->mpass_peer_list,
676 			      mpass_peer_list_elem) {
677 			if (*vlan_id == txrx_peer->vlan_id) {
678 				qdf_spin_unlock_bh(&vdev->mpass_peer_mutex);
679 				return DP_VLAN_TAGGED_MULTICAST;
680 			}
681 		}
682 		qdf_spin_unlock_bh(&vdev->mpass_peer_mutex);
683 		return DP_VLAN_UNTAGGED;
684 	}
685 
686 	peer = dp_peer_find_hash_find(soc, eh->ether_dhost, 0, DP_VDEV_ALL,
687 				      DP_MOD_ID_TX_MULTIPASS);
688 
689 	if (qdf_unlikely(peer == NULL))
690 		return DP_VLAN_UNTAGGED;
691 
692 	/*
693 	 * Do not drop the frame when vlan_id doesn't match.
694 	 * Send the frame as it is.
695 	 */
696 	if (*vlan_id == peer->txrx_peer->vlan_id) {
697 		dp_peer_unref_delete(peer, DP_MOD_ID_TX_MULTIPASS);
698 		return DP_VLAN_TAGGED_UNICAST;
699 	}
700 
701 	dp_peer_unref_delete(peer, DP_MOD_ID_TX_MULTIPASS);
702 	return DP_VLAN_UNTAGGED;
703 }
704 
705 /**
706  * dp_tx_multipass_process - Process vlan frames in tx path
707  * @soc: dp soc handle
708  * @vdev: DP vdev handle
709  * @nbuf: skb
710  * @msdu_info: msdu descriptor
711  *
712  * Return: status whether frame needs to be dropped or transmitted
713  */
714 bool dp_tx_multipass_process(struct dp_soc *soc, struct dp_vdev *vdev,
715 			     qdf_nbuf_t nbuf,
716 			     struct dp_tx_msdu_info_s *msdu_info)
717 {
718 	uint16_t vlan_id = 0;
719 	uint16_t group_key = 0;
720 	uint8_t is_spcl_peer = DP_VLAN_UNTAGGED;
721 	qdf_nbuf_t nbuf_copy = NULL;
722 
723 	if (HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_GET(msdu_info->meta_data[0])) {
724 		return true;
725 	}
726 
727 	is_spcl_peer = dp_tx_need_multipass_process(soc, vdev, nbuf, &vlan_id);
728 
729 	if ((is_spcl_peer != DP_VLAN_TAGGED_MULTICAST) &&
730 	    (is_spcl_peer != DP_VLAN_TAGGED_UNICAST))
731 		return true;
732 
733 	if (is_spcl_peer == DP_VLAN_TAGGED_UNICAST) {
734 		dp_tx_remove_vlan_tag(vdev, nbuf);
735 		return true;
736 	}
737 
738 	/* AP can have classic clients, special clients &
739 	 * classic repeaters.
740 	 * 1. Classic clients & special client:
741 	 *	Remove vlan header, find corresponding group key
742 	 *	index, fill in metaheader and enqueue multicast
743 	 *	frame to TCL.
744 	 * 2. Classic repeater:
745 	 *	Pass through to classic repeater with vlan tag
746 	 *	intact without any group key index. Hardware
747 	 *	will know which key to use to send frame to
748 	 *	repeater.
749 	 */
750 	nbuf_copy = qdf_nbuf_copy(nbuf);
751 
752 	/*
753 	 * Send multicast frame to special peers even
754 	 * if pass through to classic repeater fails.
755 	 */
756 	if (nbuf_copy) {
757 		struct dp_tx_msdu_info_s msdu_info_copy;
758 		qdf_mem_zero(&msdu_info_copy, sizeof(msdu_info_copy));
759 		msdu_info_copy.tid = HTT_TX_EXT_TID_INVALID;
760 		HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_SET(msdu_info_copy.meta_data[0], 1);
761 		nbuf_copy = dp_tx_send_msdu_single(vdev, nbuf_copy, &msdu_info_copy, HTT_INVALID_PEER, NULL);
762 		if (nbuf_copy) {
763 			qdf_nbuf_free(nbuf_copy);
764 			qdf_err("nbuf_copy send failed");
765 		}
766 	}
767 
768 	group_key = vdev->iv_vlan_map[vlan_id];
769 
770 	/*
771 	 * If group key is not installed, drop the frame.
772 	 */
773 	if (!group_key)
774 		return false;
775 
776 	dp_tx_remove_vlan_tag(vdev, nbuf);
777 	dp_tx_add_groupkey_metadata(vdev, msdu_info, group_key);
778 	msdu_info->exception_fw = 1;
779 	return true;
780 }
781 
782 /**
783  * dp_rx_multipass_process - insert vlan tag on frames for traffic separation
784  * @txrx_peer: DP txrx peer handle
785  * @nbuf: skb
786  * @tid: traffic priority
787  *
788  * Return: bool: true in case of success else false
789  * Success is considered if:
790  *  i. If frame has vlan header
791  *  ii. If the frame comes from different peer and dont need multipass processing
792  * Failure is considered if:
793  *  i. Frame comes from multipass peer but doesn't contain vlan header.
794  *  In failure case, drop such frames.
795  */
796 bool dp_rx_multipass_process(struct dp_txrx_peer *txrx_peer, qdf_nbuf_t nbuf,
797 			     uint8_t tid)
798 {
799 	struct vlan_ethhdr *vethhdrp;
800 
801 	if (qdf_unlikely(!txrx_peer->vlan_id))
802 		return true;
803 
804 	vethhdrp = (struct vlan_ethhdr *)qdf_nbuf_data(nbuf);
805 	/*
806 	 * h_vlan_proto & h_vlan_TCI should be 0x8100 & zero respectively
807 	 * as it is expected to be padded by 0
808 	 * return false if frame doesn't have above tag so that caller will
809 	 * drop the frame.
810 	 */
811 	if (qdf_unlikely(vethhdrp->h_vlan_proto != htons(QDF_ETH_TYPE_8021Q)) ||
812 	    qdf_unlikely(vethhdrp->h_vlan_TCI != 0))
813 		return false;
814 
815 	vethhdrp->h_vlan_TCI = htons(((tid & 0x7) << VLAN_PRIO_SHIFT) |
816 		(txrx_peer->vlan_id & VLAN_VID_MASK));
817 
818 	if (vethhdrp->h_vlan_encapsulated_proto == htons(ETHERTYPE_PAE))
819 		dp_tx_remove_vlan_tag(txrx_peer->vdev, nbuf);
820 
821 	return true;
822 }
823 
824 #endif /* QCA_MULTIPASS_SUPPORT */
825 
826 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
827 
828 #ifdef QCA_MULTIPASS_SUPPORT
829 
830 /**
831  * dp_peer_multipass_list_remove: remove peer from list
832  * @peer: pointer to peer
833  *
834  * return: void
835  */
836 void dp_peer_multipass_list_remove(struct dp_peer *peer)
837 {
838 	struct dp_vdev *vdev = peer->vdev;
839 	struct dp_txrx_peer *tpeer = NULL;
840 	bool found = 0;
841 
842 	qdf_spin_lock_bh(&vdev->mpass_peer_mutex);
843 	TAILQ_FOREACH(tpeer, &vdev->mpass_peer_list, mpass_peer_list_elem) {
844 		if (tpeer == peer->txrx_peer) {
845 			found = 1;
846 			TAILQ_REMOVE(&vdev->mpass_peer_list, peer->txrx_peer,
847 				     mpass_peer_list_elem);
848 			break;
849 		}
850 	}
851 
852 	qdf_spin_unlock_bh(&vdev->mpass_peer_mutex);
853 
854 	if (found)
855 		dp_peer_unref_delete(peer, DP_MOD_ID_TX_MULTIPASS);
856 }
857 
858 /**
859  * dp_peer_multipass_list_add: add to new multipass list
860  * @dp_soc: soc handle
861  * @peer_mac: mac address
862  * @vdev_id: vdev id for peer
863  * @vlan_id: vlan_id
864  *
865  * return: void
866  */
867 static void dp_peer_multipass_list_add(struct dp_soc *soc, uint8_t *peer_mac,
868 				       uint8_t vdev_id, uint16_t vlan_id)
869 {
870 	struct dp_peer *peer =
871 			dp_peer_get_tgt_peer_hash_find(soc, peer_mac, 0,
872 						       vdev_id,
873 						       DP_MOD_ID_TX_MULTIPASS);
874 
875 	if (qdf_unlikely(!peer)) {
876 		qdf_err("NULL peer");
877 		return;
878 	}
879 
880 	if (qdf_unlikely(!peer->txrx_peer))
881 		goto fail;
882 
883 	/* If peer already exists in vdev multipass list, do not add it.
884 	 * This may happen if key install comes twice or re-key
885 	 * happens for a peer.
886 	 */
887 	if (peer->txrx_peer->vlan_id) {
888 		dp_debug("peer already added to vdev multipass list"
889 			 "MAC: "QDF_MAC_ADDR_FMT" vlan: %d ",
890 			 QDF_MAC_ADDR_REF(peer->mac_addr.raw),
891 			 peer->txrx_peer->vlan_id);
892 		goto fail;
893 	}
894 
895 	/*
896 	 * Ref_cnt is incremented inside dp_peer_find_hash_find().
897 	 * Decrement it when element is deleted from the list.
898 	 */
899 	peer->txrx_peer->vlan_id = vlan_id;
900 	qdf_spin_lock_bh(&peer->txrx_peer->vdev->mpass_peer_mutex);
901 	TAILQ_INSERT_HEAD(&peer->txrx_peer->vdev->mpass_peer_list,
902 			  peer->txrx_peer,
903 			  mpass_peer_list_elem);
904 	qdf_spin_unlock_bh(&peer->txrx_peer->vdev->mpass_peer_mutex);
905 	return;
906 
907 fail:
908 	dp_peer_unref_delete(peer, DP_MOD_ID_TX_MULTIPASS);
909 	return;
910 }
911 
912 /**
913  * dp_peer_set_vlan_id: set vlan_id for this peer
914  * @cdp_soc: soc handle
915  * @vdev_id: vdev id for peer
916  * @peer_mac: mac address
917  * @vlan_id: vlan id for peer
918  *
919  * return: void
920  */
921 void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
922 		uint8_t vdev_id, uint8_t *peer_mac,
923 		uint16_t vlan_id)
924 {
925 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
926 	struct dp_vdev *vdev =
927 		dp_vdev_get_ref_by_id((struct dp_soc *)soc, vdev_id,
928 				      DP_MOD_ID_TX_MULTIPASS);
929 
930 	if (vdev && vdev->multipass_en) {
931 		dp_peer_multipass_list_add(soc, peer_mac, vdev_id, vlan_id);
932 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_MULTIPASS);
933 	}
934 }
935 
936 /**
937  * dp_set_vlan_groupkey: set vlan map for vdev
938  * @soc: pointer to soc
939  * @vdev_id : id of vdev
940  * @vlan_id: vlan_id
941  * @group_key: group key for vlan
942  *
943  * return: set success/failure
944  */
945 QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
946 				uint16_t vlan_id, uint16_t group_key)
947 {
948 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
949 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
950 						     DP_MOD_ID_TX_MULTIPASS);
951 	QDF_STATUS status;
952 
953 	if (!vdev || !vdev->multipass_en) {
954 		status = QDF_STATUS_E_INVAL;
955 		goto fail;
956 	}
957 
958 	if (!vdev->iv_vlan_map) {
959 		uint16_t vlan_map_size = (sizeof(uint16_t))*DP_MAX_VLAN_IDS;
960 		vdev->iv_vlan_map = (uint16_t *)qdf_mem_malloc(vlan_map_size);
961 
962 		if (!vdev->iv_vlan_map) {
963 			QDF_TRACE_ERROR(QDF_MODULE_ID_DP, "iv_vlan_map");
964 			status = QDF_STATUS_E_NOMEM;
965 			goto fail;
966 		}
967 
968 		/*
969 		 * 0 is invalid group key.
970 		 * Initilalize array with invalid group keys.
971 		 */
972 		qdf_mem_zero(vdev->iv_vlan_map, vlan_map_size);
973 	}
974 
975 	if (vlan_id >= DP_MAX_VLAN_IDS) {
976 		status = QDF_STATUS_E_INVAL;
977 		goto fail;
978 	}
979 
980 	vdev->iv_vlan_map[vlan_id] = group_key;
981 	status = QDF_STATUS_SUCCESS;
982 fail:
983 	if (vdev)
984 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_MULTIPASS);
985 	return status;
986 }
987 
988 /**
989  * dp_tx_vdev_multipass_deinit: set vlan map for vdev
990  * @vdev_handle: pointer to vdev
991  *
992  * return: void
993  */
994 void dp_tx_vdev_multipass_deinit(struct dp_vdev *vdev)
995 {
996 	struct dp_txrx_peer *txrx_peer = NULL;
997 	qdf_spin_lock_bh(&vdev->mpass_peer_mutex);
998 	TAILQ_FOREACH(txrx_peer, &vdev->mpass_peer_list, mpass_peer_list_elem)
999 		qdf_err("Peers present in mpass list : %d", txrx_peer->peer_id);
1000 	qdf_spin_unlock_bh(&vdev->mpass_peer_mutex);
1001 
1002 	if (vdev->iv_vlan_map) {
1003 		qdf_mem_free(vdev->iv_vlan_map);
1004 		vdev->iv_vlan_map = NULL;
1005 	}
1006 
1007 	qdf_spinlock_destroy(&vdev->mpass_peer_mutex);
1008 }
1009 
1010 /**
1011  * dp_peer_multipass_list_init: initialize peer mulitpass list
1012  * @vdev_handle: pointer to vdev
1013  *
1014  * return: set success/failure
1015  */
1016 void dp_peer_multipass_list_init(struct dp_vdev *vdev)
1017 {
1018 	/*
1019 	 * vdev->iv_vlan_map is allocated when the first configuration command
1020 	 * is issued to avoid unnecessary allocation for regular mode VAP.
1021 	 */
1022 	TAILQ_INIT(&vdev->mpass_peer_list);
1023 	qdf_spinlock_create(&vdev->mpass_peer_mutex);
1024 }
1025 #endif /* QCA_MULTIPASS_SUPPORT */
1026 
1027 #ifdef QCA_PEER_MULTIQ_SUPPORT
1028 
1029 /**
1030  * dp_peer_reset_flowq_map() - reset peer flowq map table
1031  * @peer - dp peer handle
1032  *
1033  * Return: none
1034  */
1035 void dp_peer_reset_flowq_map(struct dp_peer *peer)
1036 {
1037 	int i = 0;
1038 
1039 	if (!peer)
1040 		return;
1041 
1042 	for (i = 0; i < DP_PEER_AST_FLOWQ_MAX; i++) {
1043 		peer->peer_ast_flowq_idx[i].is_valid = false;
1044 		peer->peer_ast_flowq_idx[i].valid_tid_mask = false;
1045 		peer->peer_ast_flowq_idx[i].ast_idx = DP_INVALID_AST_IDX;
1046 		peer->peer_ast_flowq_idx[i].flowQ = DP_INVALID_FLOW_PRIORITY;
1047 	}
1048 }
1049 
1050 /**
1051  * dp_peer_get_flowid_from_flowmask() - get flow id from flow mask
1052  * @peer - dp peer handle
1053  * @mask - flow mask
1054  *
1055  * Return: flow id
1056  */
1057 static int dp_peer_get_flowid_from_flowmask(struct dp_peer *peer,
1058 		uint8_t mask)
1059 {
1060 	if (!peer) {
1061 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1062 				"%s: Invalid peer\n", __func__);
1063 		return -1;
1064 	}
1065 
1066 	if (mask & DP_PEER_AST0_FLOW_MASK)
1067 		return DP_PEER_AST_FLOWQ_UDP;
1068 	else if (mask & DP_PEER_AST1_FLOW_MASK)
1069 		return DP_PEER_AST_FLOWQ_NON_UDP;
1070 	else if (mask & DP_PEER_AST2_FLOW_MASK)
1071 		return DP_PEER_AST_FLOWQ_HI_PRIO;
1072 	else if (mask & DP_PEER_AST3_FLOW_MASK)
1073 		return DP_PEER_AST_FLOWQ_LOW_PRIO;
1074 
1075 	return DP_PEER_AST_FLOWQ_MAX;
1076 }
1077 
1078 /**
1079  * dp_peer_get_ast_valid() - get ast index valid from mask
1080  * @mask - mask for ast valid bits
1081  * @index - index for an ast
1082  *
1083  * Return - 1 if ast index is valid from mask else 0
1084  */
1085 static inline bool dp_peer_get_ast_valid(uint8_t mask, uint16_t index)
1086 {
1087 	if (index == 0)
1088 		return 1;
1089 	return ((mask) & (1 << ((index) - 1)));
1090 }
1091 
1092 /**
1093  * dp_peer_ast_index_flow_queue_map_create() - create ast index flow queue map
1094  * @soc - genereic soc handle
1095  * @is_wds - flag to indicate if peer is wds
1096  * @peer_id - peer_id from htt peer map message
1097  * @peer_mac_addr - mac address of the peer
1098  * @ast_info - ast flow override information from peer map
1099  *
1100  * Return: none
1101  */
1102 void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
1103 		bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
1104 		struct dp_ast_flow_override_info *ast_info)
1105 {
1106 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
1107 	struct dp_peer *peer = NULL;
1108 	uint8_t i;
1109 
1110 	/*
1111 	 * Ast flow override feature is supported
1112 	 * only for connected client
1113 	 */
1114 	if (is_wds)
1115 		return;
1116 
1117 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_AST);
1118 	if (!peer) {
1119 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1120 				"%s: Invalid peer\n", __func__);
1121 		return;
1122 	}
1123 
1124 	/* Valid only in AP mode */
1125 	if (peer->vdev->opmode != wlan_op_mode_ap) {
1126 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1127 				"%s: Peer ast flow map not in STA mode\n", __func__);
1128 		goto end;
1129 	}
1130 
1131 	/* Making sure the peer is for this mac address */
1132 	if (!qdf_is_macaddr_equal((struct qdf_mac_addr *)peer_mac_addr,
1133 				(struct qdf_mac_addr *)peer->mac_addr.raw)) {
1134 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1135 				"%s: Peer mac address mismatch\n", __func__);
1136 		goto end;
1137 	}
1138 
1139 	/* Ast entry flow mapping not valid for self peer map */
1140 	if (qdf_is_macaddr_equal((struct qdf_mac_addr *)peer_mac_addr,
1141 				(struct qdf_mac_addr *)peer->vdev->mac_addr.raw)) {
1142 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1143 				"%s: Ast flow mapping not valid for self peer \n", __func__);
1144 		goto end;
1145 	}
1146 
1147 	/* Fill up ast index <---> flow id mapping table for this peer */
1148 	for (i = 0; i < DP_MAX_AST_INDEX_PER_PEER; i++) {
1149 
1150 		/* Check if this ast index is valid */
1151 		peer->peer_ast_flowq_idx[i].is_valid =
1152 			dp_peer_get_ast_valid(ast_info->ast_valid_mask, i);
1153 		if (!peer->peer_ast_flowq_idx[i].is_valid)
1154 			continue;
1155 
1156 		/* Get the flow queue id which is mapped to this ast index */
1157 		peer->peer_ast_flowq_idx[i].flowQ =
1158 			dp_peer_get_flowid_from_flowmask(peer,
1159 					ast_info->ast_flow_mask[i]);
1160 		/*
1161 		 * Update tid valid mask only if flow id HIGH or
1162 		 * Low priority
1163 		 */
1164 		if (peer->peer_ast_flowq_idx[i].flowQ ==
1165 				DP_PEER_AST_FLOWQ_HI_PRIO) {
1166 			peer->peer_ast_flowq_idx[i].valid_tid_mask =
1167 				ast_info->tid_valid_hi_pri_mask;
1168 		} else if (peer->peer_ast_flowq_idx[i].flowQ ==
1169 				DP_PEER_AST_FLOWQ_LOW_PRIO) {
1170 			peer->peer_ast_flowq_idx[i].valid_tid_mask =
1171 				ast_info->tid_valid_low_pri_mask;
1172 		}
1173 
1174 		/* Save the ast index for this entry */
1175 		peer->peer_ast_flowq_idx[i].ast_idx = ast_info->ast_idx[i];
1176 	}
1177 
1178 	if (soc->cdp_soc.ol_ops->peer_ast_flowid_map) {
1179 		soc->cdp_soc.ol_ops->peer_ast_flowid_map(
1180 				soc->ctrl_psoc, peer->peer_id,
1181 				peer->vdev->vdev_id, peer_mac_addr);
1182 	}
1183 
1184 end:
1185 	/* Release peer reference */
1186 	dp_peer_unref_delete(peer, DP_MOD_ID_AST);
1187 }
1188 
1189 /**
1190  * dp_peer_find_ast_index_by_flowq_id() - API to get ast idx for a given flowid
1191  * @soc - soc handle
1192  * @peer_mac_addr - mac address of the peer
1193  * @flow_id - flow id to find ast index
1194  *
1195  * Return: ast index for a given flow id, -1 for fail cases
1196  */
1197 int dp_peer_find_ast_index_by_flowq_id(struct cdp_soc_t *soc,
1198 		uint16_t vdev_id, uint8_t *peer_mac_addr,
1199 		uint8_t flow_id, uint8_t tid)
1200 {
1201 	struct dp_peer *peer = NULL;
1202 	uint8_t i;
1203 	uint16_t ast_index;
1204 
1205 	if (flow_id >= DP_PEER_AST_FLOWQ_MAX) {
1206 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1207 				"Invalid Flow ID %d\n", flow_id);
1208 		return -1;
1209 	}
1210 
1211 	peer = dp_peer_find_hash_find((struct dp_soc *)soc,
1212 				peer_mac_addr, 0, vdev_id,
1213 				DP_MOD_ID_AST);
1214 	if (!peer) {
1215 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1216 				"%s: Invalid peer\n", __func__);
1217 		return -1;
1218 	}
1219 
1220 	 /*
1221 	  * Loop over the ast entry <----> flow-id mapping to find
1222 	  * which ast index entry has this flow queue id enabled.
1223 	  */
1224 	for (i = 0; i < DP_PEER_AST_FLOWQ_MAX; i++) {
1225 		if (peer->peer_ast_flowq_idx[i].flowQ == flow_id)
1226 			/*
1227 			 * Found the matching index for this flow id
1228 			 */
1229 			break;
1230 	}
1231 
1232 	/*
1233 	 * No match found for this flow id
1234 	 */
1235 	if (i == DP_PEER_AST_FLOWQ_MAX) {
1236 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1237 				"%s: ast index not found for flow %d\n", __func__, flow_id);
1238 		dp_peer_unref_delete(peer, DP_MOD_ID_AST);
1239 		return -1;
1240 	}
1241 
1242 	/* Check whether this ast entry is valid */
1243 	if (!peer->peer_ast_flowq_idx[i].is_valid) {
1244 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1245 				"%s: ast index is invalid for flow %d\n", __func__, flow_id);
1246 		dp_peer_unref_delete(peer, DP_MOD_ID_AST);
1247 		return -1;
1248 	}
1249 
1250 	if (flow_id == DP_PEER_AST_FLOWQ_HI_PRIO ||
1251 			flow_id == DP_PEER_AST_FLOWQ_LOW_PRIO) {
1252 		/*
1253 		 * check if this tid is valid for Hi
1254 		 * and Low priority flow id
1255 		 */
1256 		if ((peer->peer_ast_flowq_idx[i].valid_tid_mask
1257 					& (1 << tid))) {
1258 			/* Release peer reference */
1259 			ast_index = peer->peer_ast_flowq_idx[i].ast_idx;
1260 			dp_peer_unref_delete(peer, DP_MOD_ID_AST);
1261 			return ast_index;
1262 		} else {
1263 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1264 					"%s: TID %d is not valid for flow %d\n",
1265 					__func__, tid, flow_id);
1266 			/*
1267 			 * TID is not valid for this flow
1268 			 * Return -1
1269 			 */
1270 			dp_peer_unref_delete(peer, DP_MOD_ID_AST);
1271 			return -1;
1272 		}
1273 	}
1274 
1275 	/*
1276 	 * TID valid check not required for
1277 	 * UDP/NON UDP flow id
1278 	 */
1279 	ast_index = peer->peer_ast_flowq_idx[i].ast_idx;
1280 	dp_peer_unref_delete(peer, DP_MOD_ID_AST);
1281 	return ast_index;
1282 }
1283 #endif
1284 
1285 void dp_hmwds_ast_add_notify(struct dp_peer *peer,
1286 			     uint8_t *mac_addr,
1287 			     enum cdp_txrx_ast_entry_type type,
1288 			     QDF_STATUS err,
1289 			     bool is_peer_map)
1290 {
1291 	struct dp_vdev *dp_vdev = peer->vdev;
1292 	struct dp_pdev *dp_pdev = dp_vdev->pdev;
1293 	struct cdp_peer_hmwds_ast_add_status add_status;
1294 
1295 	/* Ignore ast types other than HM */
1296 	if ((type != CDP_TXRX_AST_TYPE_WDS_HM) &&
1297 	    (type != CDP_TXRX_AST_TYPE_WDS_HM_SEC))
1298 		return;
1299 
1300 	/* existing ast delete in progress, will be attempted
1301 	 * to add again after delete is complete. Send status then.
1302 	 */
1303 	if (err == QDF_STATUS_E_AGAIN)
1304 		return;
1305 
1306 	/* peer map pending, notify actual status
1307 	 * when peer map is received.
1308 	 */
1309 	if (!is_peer_map && (err == QDF_STATUS_SUCCESS))
1310 		return;
1311 
1312 	qdf_mem_zero(&add_status, sizeof(add_status));
1313 	add_status.vdev_id = dp_vdev->vdev_id;
1314 	/* For type CDP_TXRX_AST_TYPE_WDS_HM_SEC dp_peer_add_ast()
1315 	 * returns QDF_STATUS_E_FAILURE as it is host only entry.
1316 	 * In such cases set err as success. Also err code set to
1317 	 * QDF_STATUS_E_ALREADY indicates entry already exist in
1318 	 * such cases set err as success too. Any other error code
1319 	 * is actual error.
1320 	 */
1321 	if (((type == CDP_TXRX_AST_TYPE_WDS_HM_SEC) &&
1322 	     (err == QDF_STATUS_E_FAILURE)) ||
1323 	    (err == QDF_STATUS_E_ALREADY)) {
1324 		err = QDF_STATUS_SUCCESS;
1325 	}
1326 	add_status.status = err;
1327 	qdf_mem_copy(add_status.peer_mac, peer->mac_addr.raw,
1328 		     QDF_MAC_ADDR_SIZE);
1329 	qdf_mem_copy(add_status.ast_mac, mac_addr,
1330 		     QDF_MAC_ADDR_SIZE);
1331 #ifdef WDI_EVENT_ENABLE
1332 	dp_wdi_event_handler(WDI_EVENT_HMWDS_AST_ADD_STATUS, dp_pdev->soc,
1333 			     (void *)&add_status, 0,
1334 			     WDI_NO_VAL, dp_pdev->pdev_id);
1335 #endif
1336 }
1337 
1338 #ifdef FEATURE_PERPKT_INFO
1339 /**
1340  * dp_get_completion_indication_for_stack() - send completion to stack
1341  * @soc : dp_soc handle
1342  * @pdev: dp_pdev handle
1343  * @peer: dp peer handle
1344  * @ts: transmit completion status structure
1345  * @netbuf: Buffer pointer for free
1346  *
1347  * This function is used for indication whether buffer needs to be
1348  * sent to stack for freeing or not
1349 */
1350 QDF_STATUS
1351 dp_get_completion_indication_for_stack(struct dp_soc *soc,
1352 				       struct dp_pdev *pdev,
1353 				       struct dp_txrx_peer *txrx_peer,
1354 				       struct hal_tx_completion_status *ts,
1355 				       qdf_nbuf_t netbuf,
1356 				       uint64_t time_latency)
1357 {
1358 	struct tx_capture_hdr *ppdu_hdr;
1359 	uint16_t peer_id = ts->peer_id;
1360 	uint32_t ppdu_id = ts->ppdu_id;
1361 	uint8_t first_msdu = ts->first_msdu;
1362 	uint8_t last_msdu = ts->last_msdu;
1363 	uint32_t txcap_hdr_size = sizeof(struct tx_capture_hdr);
1364 	struct dp_peer *peer;
1365 
1366 	if (qdf_unlikely(!dp_monitor_is_enable_tx_sniffer(pdev) &&
1367 			 !dp_monitor_is_enable_mcopy_mode(pdev) &&
1368 			 !pdev->latency_capture_enable))
1369 		return QDF_STATUS_E_NOSUPPORT;
1370 
1371 	if (!txrx_peer) {
1372 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1373 			  FL("Peer Invalid"));
1374 		return QDF_STATUS_E_INVAL;
1375 	}
1376 
1377 	/* If mcopy is enabled and mcopy_mode is M_COPY deliver 1st MSDU
1378 	 * per PPDU. If mcopy_mode is M_COPY_EXTENDED deliver 1st MSDU
1379 	 * for each MPDU
1380 	 */
1381 	if (dp_monitor_mcopy_check_deliver(pdev,
1382 					   peer_id,
1383 					   ppdu_id,
1384 					   first_msdu) != QDF_STATUS_SUCCESS)
1385 		return QDF_STATUS_E_INVAL;
1386 
1387 	if (qdf_unlikely(qdf_nbuf_headroom(netbuf) < txcap_hdr_size)) {
1388 		netbuf = qdf_nbuf_realloc_headroom(netbuf, txcap_hdr_size);
1389 		if (!netbuf) {
1390 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1391 				  FL("No headroom"));
1392 			return QDF_STATUS_E_NOMEM;
1393 		}
1394 	}
1395 
1396 	if (!qdf_nbuf_push_head(netbuf, txcap_hdr_size)) {
1397 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1398 			  FL("No headroom"));
1399 		return QDF_STATUS_E_NOMEM;
1400 	}
1401 
1402 	ppdu_hdr = (struct tx_capture_hdr *)qdf_nbuf_data(netbuf);
1403 	qdf_mem_copy(ppdu_hdr->ta, txrx_peer->vdev->mac_addr.raw,
1404 		     QDF_MAC_ADDR_SIZE);
1405 
1406 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_TX_COMP);
1407 	if (peer) {
1408 		qdf_mem_copy(ppdu_hdr->ra, peer->mac_addr.raw,
1409 			     QDF_MAC_ADDR_SIZE);
1410 		dp_peer_unref_delete(peer, DP_MOD_ID_TX_COMP);
1411 	}
1412 	ppdu_hdr->ppdu_id = ppdu_id;
1413 	ppdu_hdr->peer_id = peer_id;
1414 	ppdu_hdr->first_msdu = first_msdu;
1415 	ppdu_hdr->last_msdu = last_msdu;
1416 	if (qdf_unlikely(pdev->latency_capture_enable)) {
1417 		ppdu_hdr->tsf = ts->tsf;
1418 		ppdu_hdr->time_latency = (uint32_t)time_latency;
1419 	}
1420 
1421 	return QDF_STATUS_SUCCESS;
1422 }
1423 
1424 /**
1425  * dp_send_completion_to_stack() - send completion to stack
1426  * @soc :  dp_soc handle
1427  * @pdev:  dp_pdev handle
1428  * @peer_id: peer_id of the peer for which completion came
1429  * @ppdu_id: ppdu_id
1430  * @netbuf: Buffer pointer for free
1431  *
1432  * This function is used to send completion to stack
1433  * to free buffer
1434 */
1435 void dp_send_completion_to_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
1436 				 uint16_t peer_id, uint32_t ppdu_id,
1437 				 qdf_nbuf_t netbuf)
1438 {
1439 	dp_wdi_event_handler(WDI_EVENT_TX_DATA, soc,
1440 			     netbuf, peer_id,
1441 			     WDI_NO_VAL, pdev->pdev_id);
1442 }
1443 #endif
1444