1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 #include "htt.h"
20 #include "dp_peer.h"
21 #include "hal_rx.h"
22 #include "hal_api.h"
23 #include "qdf_nbuf.h"
24 #include "dp_types.h"
25 #include "dp_internal.h"
26 #include "dp_tx.h"
27 #include "enet.h"
28 #ifdef WIFI_MONITOR_SUPPORT
29 #include "dp_mon.h"
30 #endif
31 #include "dp_txrx_wds.h"
32 
33 /* Generic AST entry aging timer value */
34 #define DP_AST_AGING_TIMER_DEFAULT_MS	5000
35 #define DP_INVALID_AST_IDX 0xffff
36 #define DP_INVALID_FLOW_PRIORITY 0xff
37 #define DP_PEER_AST0_FLOW_MASK 0x4
38 #define DP_PEER_AST1_FLOW_MASK 0x8
39 #define DP_PEER_AST2_FLOW_MASK 0x1
40 #define DP_PEER_AST3_FLOW_MASK 0x2
41 #define DP_MAX_AST_INDEX_PER_PEER 4
42 
43 #ifdef WLAN_FEATURE_MULTI_AST_DEL
44 
dp_peer_free_peer_ase_list(struct dp_soc * soc,struct peer_del_multi_wds_entries * wds_list)45 void dp_peer_free_peer_ase_list(struct dp_soc *soc,
46 				struct peer_del_multi_wds_entries *wds_list)
47 {
48 	struct peer_wds_entry_list *wds_entry, *tmp_entry;
49 
50 	TAILQ_FOREACH_SAFE(wds_entry, &wds_list->ase_list,
51 			   ase_list_elem, tmp_entry) {
52 		dp_peer_debug("type: %d mac_addr: " QDF_MAC_ADDR_FMT,
53 			      wds_entry->type,
54 			      QDF_MAC_ADDR_REF(wds_entry->dest_addr));
55 		TAILQ_REMOVE(&wds_list->ase_list, wds_entry, ase_list_elem);
56 		wds_list->num_entries--;
57 		qdf_mem_free(wds_entry);
58 	}
59 }
60 
61 static void
dp_pdev_build_peer_ase_list(struct dp_soc * soc,struct dp_peer * peer,void * arg)62 dp_pdev_build_peer_ase_list(struct dp_soc *soc, struct dp_peer *peer,
63 			    void *arg)
64 {
65 	struct dp_ast_entry *ase, *temp_ase;
66 	struct peer_del_multi_wds_entries *list = arg;
67 	struct peer_wds_entry_list *wds_entry;
68 
69 	if (!soc || !peer || !arg) {
70 		dp_peer_err("Invalid input");
71 		return;
72 	}
73 
74 	list->vdev_id = peer->vdev->vdev_id;
75 	DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
76 		if (ase->type != CDP_TXRX_AST_TYPE_WDS &&
77 		    ase->type != CDP_TXRX_AST_TYPE_DA)
78 			continue;
79 
80 		if (ase->is_active) {
81 			ase->is_active = false;
82 			continue;
83 		}
84 
85 		if (ase->delete_in_progress) {
86 			dp_info_rl("Del set addr:" QDF_MAC_ADDR_FMT " type:%d",
87 				   QDF_MAC_ADDR_REF(ase->mac_addr.raw),
88 				   ase->type);
89 			continue;
90 		}
91 
92 		if (ase->is_mapped)
93 			soc->ast_table[ase->ast_idx] = NULL;
94 
95 		if (!ase->next_hop) {
96 			dp_peer_unlink_ast_entry(soc, ase, peer);
97 			continue;
98 		}
99 
100 		wds_entry = (struct peer_wds_entry_list *)
101 			    qdf_mem_malloc(sizeof(*wds_entry));
102 		if (!wds_entry) {
103 			dp_peer_err("%pK: fail to allocate wds_entry", soc);
104 			dp_peer_free_peer_ase_list(soc, list);
105 			return;
106 		}
107 
108 		DP_STATS_INC(soc, ast.aged_out, 1);
109 		ase->delete_in_progress = true;
110 		wds_entry->dest_addr = ase->mac_addr.raw;
111 		wds_entry->type = ase->type;
112 
113 		if (dp_peer_state_cmp(peer, DP_PEER_STATE_LOGICAL_DELETE))
114 			wds_entry->delete_in_fw = false;
115 		else
116 			wds_entry->delete_in_fw = true;
117 
118 		dp_peer_debug("ase->type: %d pdev: %u vdev: %u mac_addr: " QDF_MAC_ADDR_FMT " next_hop: %u peer: %u",
119 			      ase->type, ase->pdev_id, ase->vdev_id,
120 			      QDF_MAC_ADDR_REF(ase->mac_addr.raw),
121 			      ase->next_hop, ase->peer_id);
122 		TAILQ_INSERT_TAIL(&list->ase_list, wds_entry, ase_list_elem);
123 		list->num_entries++;
124 	}
125 	dp_peer_info("Total num of entries :%d", list->num_entries);
126 }
127 
128 static void
dp_peer_age_multi_ast_entries(struct dp_soc * soc,void * arg,enum dp_mod_id mod_id)129 dp_peer_age_multi_ast_entries(struct dp_soc *soc, void *arg,
130 			      enum dp_mod_id mod_id)
131 {
132 	uint8_t i;
133 	struct dp_pdev *pdev = NULL;
134 	struct peer_del_multi_wds_entries wds_list = {0};
135 
136 	TAILQ_INIT(&wds_list.ase_list);
137 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
138 		pdev = soc->pdev_list[i];
139 		dp_pdev_iterate_peer(pdev, dp_pdev_build_peer_ase_list,
140 				     &wds_list, mod_id);
141 		if (wds_list.num_entries > 0) {
142 			dp_peer_ast_send_multi_wds_del(soc, wds_list.vdev_id,
143 						       &wds_list);
144 			dp_peer_free_peer_ase_list(soc, &wds_list);
145 		} else {
146 			dp_peer_debug("No AST entries for pdev:%u",
147 				      pdev->pdev_id);
148 		}
149 	}
150 }
151 #endif /* WLAN_FEATURE_MULTI_AST_DEL */
152 
153 static void
dp_peer_age_ast_entries(struct dp_soc * soc,struct dp_peer * peer,void * arg)154 dp_peer_age_ast_entries(struct dp_soc *soc, struct dp_peer *peer, void *arg)
155 {
156 	struct dp_ast_entry *ase, *temp_ase;
157 	struct ast_del_ctxt *del_ctxt = (struct ast_del_ctxt *)arg;
158 
159 	if ((del_ctxt->del_count >= soc->max_ast_ageout_count) &&
160 	    !del_ctxt->age) {
161 		return;
162 	}
163 
164 	DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
165 		/*
166 		 * Do not expire static ast entries and HM WDS entries
167 		 */
168 		if (ase->type != CDP_TXRX_AST_TYPE_WDS &&
169 		    ase->type != CDP_TXRX_AST_TYPE_DA)
170 			continue;
171 
172 		if (ase->is_active) {
173 			if (del_ctxt->age)
174 				ase->is_active = FALSE;
175 
176 			continue;
177 		}
178 
179 		if (del_ctxt->del_count < soc->max_ast_ageout_count) {
180 			DP_STATS_INC(soc, ast.aged_out, 1);
181 			dp_peer_del_ast(soc, ase);
182 			del_ctxt->del_count++;
183 		} else {
184 			soc->pending_ageout = true;
185 			if (!del_ctxt->age)
186 				break;
187 		}
188 	}
189 }
190 
191 static void
dp_peer_age_mec_entries(struct dp_soc * soc)192 dp_peer_age_mec_entries(struct dp_soc *soc)
193 {
194 	uint32_t index;
195 	struct dp_mec_entry *mecentry, *mecentry_next;
196 
197 	TAILQ_HEAD(, dp_mec_entry) free_list;
198 	TAILQ_INIT(&free_list);
199 
200 	for (index = 0; index <= soc->mec_hash.mask; index++) {
201 		qdf_spin_lock_bh(&soc->mec_lock);
202 		/*
203 		 * Expire MEC entry every n sec.
204 		 */
205 		if (!TAILQ_EMPTY(&soc->mec_hash.bins[index])) {
206 			TAILQ_FOREACH_SAFE(mecentry, &soc->mec_hash.bins[index],
207 					   hash_list_elem, mecentry_next) {
208 				if (mecentry->is_active) {
209 					mecentry->is_active = FALSE;
210 					continue;
211 				}
212 				dp_peer_mec_detach_entry(soc, mecentry,
213 							 &free_list);
214 			}
215 		}
216 		qdf_spin_unlock_bh(&soc->mec_lock);
217 	}
218 
219 	dp_peer_mec_free_list(soc, &free_list);
220 }
221 
222 #ifdef WLAN_FEATURE_MULTI_AST_DEL
dp_ast_aging_timer_fn(void * soc_hdl)223 static void dp_ast_aging_timer_fn(void *soc_hdl)
224 {
225 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
226 	struct ast_del_ctxt del_ctxt = {0};
227 
228 	if (soc->wds_ast_aging_timer_cnt++ >= DP_WDS_AST_AGING_TIMER_CNT) {
229 		del_ctxt.age = true;
230 		soc->wds_ast_aging_timer_cnt = 0;
231 	}
232 
233 	if (soc->pending_ageout || del_ctxt.age) {
234 		soc->pending_ageout = false;
235 
236 		/* AST list access lock */
237 		qdf_spin_lock_bh(&soc->ast_lock);
238 
239 		if (soc->multi_peer_grp_cmd_supported)
240 			dp_peer_age_multi_ast_entries(soc, NULL, DP_MOD_ID_AST);
241 		else
242 			dp_soc_iterate_peer(soc, dp_peer_age_ast_entries,
243 					    &del_ctxt, DP_MOD_ID_AST);
244 		qdf_spin_unlock_bh(&soc->ast_lock);
245 	}
246 
247 	/*
248 	 * If NSS offload is enabled, the MEC timeout
249 	 * will be managed by NSS.
250 	 */
251 	if (qdf_atomic_read(&soc->mec_cnt) &&
252 	    !wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx))
253 		dp_peer_age_mec_entries(soc);
254 
255 	if (qdf_atomic_read(&soc->cmn_init_done))
256 		qdf_timer_mod(&soc->ast_aging_timer,
257 			      DP_AST_AGING_TIMER_DEFAULT_MS);
258 }
259 #else
dp_ast_aging_timer_fn(void * soc_hdl)260 static void dp_ast_aging_timer_fn(void *soc_hdl)
261 {
262 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
263 	struct ast_del_ctxt del_ctxt = {0};
264 
265 	if (soc->wds_ast_aging_timer_cnt++ >= DP_WDS_AST_AGING_TIMER_CNT) {
266 		del_ctxt.age = true;
267 		soc->wds_ast_aging_timer_cnt = 0;
268 	}
269 
270 	if (soc->pending_ageout || del_ctxt.age) {
271 		soc->pending_ageout = false;
272 
273 		/* AST list access lock */
274 		qdf_spin_lock_bh(&soc->ast_lock);
275 		dp_soc_iterate_peer(soc, dp_peer_age_ast_entries,
276 				    &del_ctxt, DP_MOD_ID_AST);
277 		qdf_spin_unlock_bh(&soc->ast_lock);
278 	}
279 
280 	/*
281 	 * If NSS offload is enabled, the MEC timeout
282 	 * will be managed by NSS.
283 	 */
284 	if (qdf_atomic_read(&soc->mec_cnt) &&
285 	    !wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx))
286 		dp_peer_age_mec_entries(soc);
287 
288 	if (qdf_atomic_read(&soc->cmn_init_done))
289 		qdf_timer_mod(&soc->ast_aging_timer,
290 			      DP_AST_AGING_TIMER_DEFAULT_MS);
291 }
292 #endif /* WLAN_FEATURE_MULTI_AST_DEL */
293 
294 #ifndef IPA_WDS_EASYMESH_FEATURE
dp_soc_wds_attach(struct dp_soc * soc)295 void dp_soc_wds_attach(struct dp_soc *soc)
296 {
297 	if (soc->ast_offload_support)
298 		return;
299 
300 	soc->wds_ast_aging_timer_cnt = 0;
301 	soc->pending_ageout = false;
302 	qdf_timer_init(soc->osdev, &soc->ast_aging_timer,
303 		       dp_ast_aging_timer_fn, (void *)soc,
304 		       QDF_TIMER_TYPE_WAKE_APPS);
305 
306 	qdf_timer_mod(&soc->ast_aging_timer, DP_AST_AGING_TIMER_DEFAULT_MS);
307 }
308 
dp_soc_wds_detach(struct dp_soc * soc)309 void dp_soc_wds_detach(struct dp_soc *soc)
310 {
311 	qdf_timer_stop(&soc->ast_aging_timer);
312 	qdf_timer_free(&soc->ast_aging_timer);
313 }
314 #else
dp_soc_wds_attach(struct dp_soc * soc)315 void dp_soc_wds_attach(struct dp_soc *soc)
316 {
317 }
318 
dp_soc_wds_detach(struct dp_soc * soc)319 void dp_soc_wds_detach(struct dp_soc *soc)
320 {
321 }
322 #endif
323 
dp_tx_mec_handler(struct dp_vdev * vdev,uint8_t * status)324 void dp_tx_mec_handler(struct dp_vdev *vdev, uint8_t *status)
325 {
326 	struct dp_soc *soc;
327 	QDF_STATUS add_mec_status;
328 	uint8_t mac_addr[QDF_MAC_ADDR_SIZE], i;
329 
330 	if (!vdev->mec_enabled)
331 		return;
332 
333 	/* MEC required only in STA mode */
334 	if (vdev->opmode != wlan_op_mode_sta)
335 		return;
336 
337 	soc = vdev->pdev->soc;
338 
339 	for (i = 0; i < QDF_MAC_ADDR_SIZE; i++)
340 		mac_addr[(QDF_MAC_ADDR_SIZE - 1) - i] =
341 					status[(QDF_MAC_ADDR_SIZE - 2) + i];
342 
343 	dp_peer_debug("%pK: MEC add for mac_addr "QDF_MAC_ADDR_FMT,
344 		      soc, QDF_MAC_ADDR_REF(mac_addr));
345 
346 	if (qdf_mem_cmp(mac_addr, vdev->mac_addr.raw, QDF_MAC_ADDR_SIZE)) {
347 		add_mec_status = dp_peer_mec_add_entry(soc, vdev, mac_addr);
348 		dp_peer_debug("%pK: MEC add status %d", vdev, add_mec_status);
349 	}
350 }
351 
352 #ifndef QCA_HOST_MODE_WIFI_DISABLED
353 
354 void
dp_rx_da_learn(struct dp_soc * soc,uint8_t * rx_tlv_hdr,struct dp_txrx_peer * ta_txrx_peer,qdf_nbuf_t nbuf)355 dp_rx_da_learn(struct dp_soc *soc,
356 	       uint8_t *rx_tlv_hdr,
357 	       struct dp_txrx_peer *ta_txrx_peer,
358 	       qdf_nbuf_t nbuf)
359 {
360 	struct dp_peer *base_peer;
361 	/* For HKv2 DA port learing is not needed */
362 	if (qdf_likely(soc->ast_override_support))
363 		return;
364 
365 	if (qdf_unlikely(!ta_txrx_peer))
366 		return;
367 
368 	if (qdf_unlikely(ta_txrx_peer->vdev->opmode != wlan_op_mode_ap))
369 		return;
370 
371 	if (!soc->da_war_enabled)
372 		return;
373 
374 	if (qdf_unlikely(!qdf_nbuf_is_da_valid(nbuf) &&
375 			 !qdf_nbuf_is_da_mcbc(nbuf))) {
376 		base_peer = dp_peer_get_ref_by_id(soc, ta_txrx_peer->peer_id,
377 						  DP_MOD_ID_AST);
378 
379 		if (base_peer) {
380 			dp_peer_add_ast(soc,
381 					base_peer,
382 					qdf_nbuf_data(nbuf),
383 					CDP_TXRX_AST_TYPE_DA,
384 					DP_AST_FLAGS_HM);
385 
386 			dp_peer_unref_delete(base_peer, DP_MOD_ID_AST);
387 		}
388 	}
389 }
390 
391 #ifdef WDS_VENDOR_EXTENSION
392 QDF_STATUS
dp_txrx_set_wds_rx_policy(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,u_int32_t val)393 dp_txrx_set_wds_rx_policy(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
394 			  u_int32_t val)
395 {
396 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
397 	struct dp_peer *peer;
398 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
399 						     DP_MOD_ID_MISC);
400 	if (!vdev) {
401 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
402 			  FL("vdev is NULL for vdev_id %d"), vdev_id);
403 		return QDF_STATUS_E_INVAL;
404 	}
405 
406 	peer = dp_vdev_bss_peer_ref_n_get(vdev, DP_MOD_ID_AST);
407 
408 	if (peer) {
409 		peer->txrx_peer->wds_ecm.wds_rx_filter = 1;
410 		peer->txrx_peer->wds_ecm.wds_rx_ucast_4addr =
411 			(val & WDS_POLICY_RX_UCAST_4ADDR) ? 1 : 0;
412 		peer->txrx_peer->wds_ecm.wds_rx_mcast_4addr =
413 			(val & WDS_POLICY_RX_MCAST_4ADDR) ? 1 : 0;
414 		dp_peer_unref_delete(peer, DP_MOD_ID_AST);
415 	}
416 
417 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MISC);
418 	return QDF_STATUS_SUCCESS;
419 }
420 
421 QDF_STATUS
dp_txrx_peer_wds_tx_policy_update(struct cdp_soc_t * soc,uint8_t vdev_id,uint8_t * peer_mac,int wds_tx_ucast,int wds_tx_mcast)422 dp_txrx_peer_wds_tx_policy_update(struct cdp_soc_t *soc,  uint8_t vdev_id,
423 				  uint8_t *peer_mac, int wds_tx_ucast,
424 				  int wds_tx_mcast)
425 {
426 	struct dp_peer *peer =
427 			dp_peer_get_tgt_peer_hash_find((struct dp_soc *)soc,
428 						       peer_mac, 0,
429 						       vdev_id,
430 						       DP_MOD_ID_AST);
431 	if (!peer) {
432 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
433 			  FL("peer is NULL for mac" QDF_MAC_ADDR_FMT
434 			     " vdev_id %d"), QDF_MAC_ADDR_REF(peer_mac),
435 			     vdev_id);
436 		return QDF_STATUS_E_INVAL;
437 	}
438 
439 	if (!peer->txrx_peer) {
440 		dp_peer_unref_delete(peer, DP_MOD_ID_AST);
441 		return QDF_STATUS_E_INVAL;
442 	}
443 
444 	if (wds_tx_ucast || wds_tx_mcast) {
445 		peer->txrx_peer->wds_enabled = 1;
446 		peer->txrx_peer->wds_ecm.wds_tx_ucast_4addr = wds_tx_ucast;
447 		peer->txrx_peer->wds_ecm.wds_tx_mcast_4addr = wds_tx_mcast;
448 	} else {
449 		peer->txrx_peer->wds_enabled = 0;
450 		peer->txrx_peer->wds_ecm.wds_tx_ucast_4addr = 0;
451 		peer->txrx_peer->wds_ecm.wds_tx_mcast_4addr = 0;
452 	}
453 
454 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
455 		  "Policy Update set to :\n");
456 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
457 		  "peer->wds_enabled %d\n", peer->wds_enabled);
458 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
459 		  "peer->wds_ecm.wds_tx_ucast_4addr %d\n",
460 		  peer->txrx_peer->wds_ecm.wds_tx_ucast_4addr);
461 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
462 		  "peer->wds_ecm.wds_tx_mcast_4addr %d\n",
463 		  peer->txrx_peer->wds_ecm.wds_tx_mcast_4addr);
464 
465 	dp_peer_unref_delete(peer, DP_MOD_ID_AST);
466 	return QDF_STATUS_SUCCESS;
467 }
468 
dp_wds_rx_policy_check(uint8_t * rx_tlv_hdr,struct dp_vdev * vdev,struct dp_txrx_peer * txrx_peer)469 int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr,
470 			   struct dp_vdev *vdev,
471 			   struct dp_txrx_peer *txrx_peer)
472 {
473 	struct dp_peer *bss_peer;
474 	int fr_ds, to_ds, rx_3addr, rx_4addr;
475 	int rx_policy_ucast, rx_policy_mcast;
476 	hal_soc_handle_t hal_soc = vdev->pdev->soc->hal_soc;
477 	int rx_mcast = hal_rx_msdu_end_da_is_mcbc_get(hal_soc, rx_tlv_hdr);
478 
479 	if (vdev->opmode == wlan_op_mode_ap) {
480 		bss_peer = dp_vdev_bss_peer_ref_n_get(vdev, DP_MOD_ID_AST);
481 		/* if wds policy check is not enabled on this vdev, accept all frames */
482 		if (bss_peer && !bss_peer->txrx_peer->wds_ecm.wds_rx_filter) {
483 			dp_peer_unref_delete(bss_peer, DP_MOD_ID_AST);
484 			return 1;
485 		}
486 		rx_policy_ucast = bss_peer->txrx_peerwds_ecm.wds_rx_ucast_4addr;
487 		rx_policy_mcast = bss_peer->txrx_peerwds_ecm.wds_rx_mcast_4addr;
488 		dp_peer_unref_delete(bss_peer, DP_MOD_ID_AST);
489 	} else {             /* sta mode */
490 		if (!txrx_peer->wds_ecm.wds_rx_filter)
491 			return 1;
492 
493 		rx_policy_ucast = txrx_peer->wds_ecm.wds_rx_ucast_4addr;
494 		rx_policy_mcast = txrx_peer->wds_ecm.wds_rx_mcast_4addr;
495 	}
496 
497 	/* ------------------------------------------------
498 	 *                       self
499 	 * peer-             rx  rx-
500 	 * wds  ucast mcast dir policy accept note
501 	 * ------------------------------------------------
502 	 * 1     1     0     11  x1     1      AP configured to accept ds-to-ds Rx ucast from wds peers, constraint met; so, accept
503 	 * 1     1     0     01  x1     0      AP configured to accept ds-to-ds Rx ucast from wds peers, constraint not met; so, drop
504 	 * 1     1     0     10  x1     0      AP configured to accept ds-to-ds Rx ucast from wds peers, constraint not met; so, drop
505 	 * 1     1     0     00  x1     0      bad frame, won't see it
506 	 * 1     0     1     11  1x     1      AP configured to accept ds-to-ds Rx mcast from wds peers, constraint met; so, accept
507 	 * 1     0     1     01  1x     0      AP configured to accept ds-to-ds Rx mcast from wds peers, constraint not met; so, drop
508 	 * 1     0     1     10  1x     0      AP configured to accept ds-to-ds Rx mcast from wds peers, constraint not met; so, drop
509 	 * 1     0     1     00  1x     0      bad frame, won't see it
510 	 * 1     1     0     11  x0     0      AP configured to accept from-ds Rx ucast from wds peers, constraint not met; so, drop
511 	 * 1     1     0     01  x0     0      AP configured to accept from-ds Rx ucast from wds peers, constraint not met; so, drop
512 	 * 1     1     0     10  x0     1      AP configured to accept from-ds Rx ucast from wds peers, constraint met; so, accept
513 	 * 1     1     0     00  x0     0      bad frame, won't see it
514 	 * 1     0     1     11  0x     0      AP configured to accept from-ds Rx mcast from wds peers, constraint not met; so, drop
515 	 * 1     0     1     01  0x     0      AP configured to accept from-ds Rx mcast from wds peers, constraint not met; so, drop
516 	 * 1     0     1     10  0x     1      AP configured to accept from-ds Rx mcast from wds peers, constraint met; so, accept
517 	 * 1     0     1     00  0x     0      bad frame, won't see it
518 	 *
519 	 * 0     x     x     11  xx     0      we only accept td-ds Rx frames from non-wds peers in mode.
520 	 * 0     x     x     01  xx     1
521 	 * 0     x     x     10  xx     0
522 	 * 0     x     x     00  xx     0      bad frame, won't see it
523 	 * ------------------------------------------------
524 	 */
525 
526 	fr_ds = hal_rx_mpdu_get_fr_ds(hal_soc, rx_tlv_hdr);
527 	to_ds = hal_rx_mpdu_get_to_ds(hal_soc, rx_tlv_hdr);
528 	rx_3addr = fr_ds ^ to_ds;
529 	rx_4addr = fr_ds & to_ds;
530 
531 	if (vdev->opmode == wlan_op_mode_ap) {
532 		if ((!txrx_peer->wds_enabled && rx_3addr && to_ds) ||
533 		    (txrx_peer->wds_enabled && !rx_mcast &&
534 		    (rx_4addr == rx_policy_ucast)) ||
535 		    (txrx_peer->wds_enabled && rx_mcast &&
536 		    (rx_4addr == rx_policy_mcast))) {
537 			return 1;
538 		}
539 	} else {           /* sta mode */
540 		if ((!rx_mcast && (rx_4addr == rx_policy_ucast)) ||
541 				(rx_mcast && (rx_4addr == rx_policy_mcast))) {
542 			return 1;
543 		}
544 	}
545 	return 0;
546 }
547 #endif
548 
549 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
550 
551 #ifdef QCA_PEER_MULTIQ_SUPPORT
552 
dp_peer_reset_flowq_map(struct dp_peer * peer)553 void dp_peer_reset_flowq_map(struct dp_peer *peer)
554 {
555 	int i = 0;
556 
557 	if (!peer)
558 		return;
559 
560 	for (i = 0; i < DP_PEER_AST_FLOWQ_MAX; i++) {
561 		peer->peer_ast_flowq_idx[i].is_valid = false;
562 		peer->peer_ast_flowq_idx[i].valid_tid_mask = false;
563 		peer->peer_ast_flowq_idx[i].ast_idx = DP_INVALID_AST_IDX;
564 		peer->peer_ast_flowq_idx[i].flowQ = DP_INVALID_FLOW_PRIORITY;
565 	}
566 }
567 
568 /**
569  * dp_peer_get_flowid_from_flowmask() - get flow id from flow mask
570  * @peer: dp peer handle
571  * @mask: flow mask
572  *
573  * Return: flow id
574  */
dp_peer_get_flowid_from_flowmask(struct dp_peer * peer,uint8_t mask)575 static int dp_peer_get_flowid_from_flowmask(struct dp_peer *peer,
576 		uint8_t mask)
577 {
578 	if (!peer) {
579 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
580 				"%s: Invalid peer\n", __func__);
581 		return -1;
582 	}
583 
584 	if (mask & DP_PEER_AST0_FLOW_MASK)
585 		return DP_PEER_AST_FLOWQ_UDP;
586 	else if (mask & DP_PEER_AST1_FLOW_MASK)
587 		return DP_PEER_AST_FLOWQ_NON_UDP;
588 	else if (mask & DP_PEER_AST2_FLOW_MASK)
589 		return DP_PEER_AST_FLOWQ_HI_PRIO;
590 	else if (mask & DP_PEER_AST3_FLOW_MASK)
591 		return DP_PEER_AST_FLOWQ_LOW_PRIO;
592 
593 	return DP_PEER_AST_FLOWQ_MAX;
594 }
595 
596 /**
597  * dp_peer_get_ast_valid() - get ast index valid from mask
598  * @mask: mask for ast valid bits
599  * @index: index for an ast
600  *
601  * Return: 1 if ast index is valid from mask else 0
602  */
dp_peer_get_ast_valid(uint8_t mask,uint16_t index)603 static inline bool dp_peer_get_ast_valid(uint8_t mask, uint16_t index)
604 {
605 	if (index == 0)
606 		return 1;
607 	return ((mask) & (1 << ((index) - 1)));
608 }
609 
dp_peer_ast_index_flow_queue_map_create(void * soc_hdl,bool is_wds,uint16_t peer_id,uint8_t * peer_mac_addr,struct dp_ast_flow_override_info * ast_info)610 void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
611 		bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
612 		struct dp_ast_flow_override_info *ast_info)
613 {
614 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
615 	struct dp_peer *peer = NULL;
616 	uint8_t i;
617 
618 	/*
619 	 * Ast flow override feature is supported
620 	 * only for connected client
621 	 */
622 	if (is_wds)
623 		return;
624 
625 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_AST);
626 	if (!peer) {
627 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
628 				"%s: Invalid peer\n", __func__);
629 		return;
630 	}
631 
632 	/* Valid only in AP mode */
633 	if (peer->vdev->opmode != wlan_op_mode_ap) {
634 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
635 				"%s: Peer ast flow map not in STA mode\n", __func__);
636 		goto end;
637 	}
638 
639 	/* Making sure the peer is for this mac address */
640 	if (!qdf_is_macaddr_equal((struct qdf_mac_addr *)peer_mac_addr,
641 				(struct qdf_mac_addr *)peer->mac_addr.raw)) {
642 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
643 				"%s: Peer mac address mismatch\n", __func__);
644 		goto end;
645 	}
646 
647 	/* Ast entry flow mapping not valid for self peer map */
648 	if (qdf_is_macaddr_equal((struct qdf_mac_addr *)peer_mac_addr,
649 				(struct qdf_mac_addr *)peer->vdev->mac_addr.raw)) {
650 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
651 				"%s: Ast flow mapping not valid for self peer \n", __func__);
652 		goto end;
653 	}
654 
655 	/* Fill up ast index <---> flow id mapping table for this peer */
656 	for (i = 0; i < DP_MAX_AST_INDEX_PER_PEER; i++) {
657 
658 		/* Check if this ast index is valid */
659 		peer->peer_ast_flowq_idx[i].is_valid =
660 			dp_peer_get_ast_valid(ast_info->ast_valid_mask, i);
661 		if (!peer->peer_ast_flowq_idx[i].is_valid)
662 			continue;
663 
664 		/* Get the flow queue id which is mapped to this ast index */
665 		peer->peer_ast_flowq_idx[i].flowQ =
666 			dp_peer_get_flowid_from_flowmask(peer,
667 					ast_info->ast_flow_mask[i]);
668 		/*
669 		 * Update tid valid mask only if flow id HIGH or
670 		 * Low priority
671 		 */
672 		if (peer->peer_ast_flowq_idx[i].flowQ ==
673 				DP_PEER_AST_FLOWQ_HI_PRIO) {
674 			peer->peer_ast_flowq_idx[i].valid_tid_mask =
675 				ast_info->tid_valid_hi_pri_mask;
676 		} else if (peer->peer_ast_flowq_idx[i].flowQ ==
677 				DP_PEER_AST_FLOWQ_LOW_PRIO) {
678 			peer->peer_ast_flowq_idx[i].valid_tid_mask =
679 				ast_info->tid_valid_low_pri_mask;
680 		}
681 
682 		/* Save the ast index for this entry */
683 		peer->peer_ast_flowq_idx[i].ast_idx = ast_info->ast_idx[i];
684 	}
685 
686 	if (soc->cdp_soc.ol_ops->peer_ast_flowid_map) {
687 		soc->cdp_soc.ol_ops->peer_ast_flowid_map(
688 				soc->ctrl_psoc, peer->peer_id,
689 				peer->vdev->vdev_id, peer_mac_addr);
690 	}
691 
692 end:
693 	/* Release peer reference */
694 	dp_peer_unref_delete(peer, DP_MOD_ID_AST);
695 }
696 
dp_peer_find_ast_index_by_flowq_id(struct cdp_soc_t * soc,uint16_t vdev_id,uint8_t * peer_mac_addr,uint8_t flow_id,uint8_t tid)697 int dp_peer_find_ast_index_by_flowq_id(struct cdp_soc_t *soc,
698 		uint16_t vdev_id, uint8_t *peer_mac_addr,
699 		uint8_t flow_id, uint8_t tid)
700 {
701 	struct dp_peer *peer = NULL;
702 	uint8_t i;
703 	uint16_t ast_index;
704 
705 	if (flow_id >= DP_PEER_AST_FLOWQ_MAX) {
706 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
707 				"Invalid Flow ID %d\n", flow_id);
708 		return -1;
709 	}
710 
711 	peer = dp_peer_find_hash_find((struct dp_soc *)soc,
712 				peer_mac_addr, 0, vdev_id,
713 				DP_MOD_ID_AST);
714 	if (!peer) {
715 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
716 				"%s: Invalid peer\n", __func__);
717 		return -1;
718 	}
719 
720 	 /*
721 	  * Loop over the ast entry <----> flow-id mapping to find
722 	  * which ast index entry has this flow queue id enabled.
723 	  */
724 	for (i = 0; i < DP_PEER_AST_FLOWQ_MAX; i++) {
725 		if (peer->peer_ast_flowq_idx[i].flowQ == flow_id)
726 			/*
727 			 * Found the matching index for this flow id
728 			 */
729 			break;
730 	}
731 
732 	/*
733 	 * No match found for this flow id
734 	 */
735 	if (i == DP_PEER_AST_FLOWQ_MAX) {
736 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
737 				"%s: ast index not found for flow %d\n", __func__, flow_id);
738 		dp_peer_unref_delete(peer, DP_MOD_ID_AST);
739 		return -1;
740 	}
741 
742 	/* Check whether this ast entry is valid */
743 	if (!peer->peer_ast_flowq_idx[i].is_valid) {
744 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
745 				"%s: ast index is invalid for flow %d\n", __func__, flow_id);
746 		dp_peer_unref_delete(peer, DP_MOD_ID_AST);
747 		return -1;
748 	}
749 
750 	if (flow_id == DP_PEER_AST_FLOWQ_HI_PRIO ||
751 			flow_id == DP_PEER_AST_FLOWQ_LOW_PRIO) {
752 		/*
753 		 * check if this tid is valid for Hi
754 		 * and Low priority flow id
755 		 */
756 		if ((peer->peer_ast_flowq_idx[i].valid_tid_mask
757 					& (1 << tid))) {
758 			/* Release peer reference */
759 			ast_index = peer->peer_ast_flowq_idx[i].ast_idx;
760 			dp_peer_unref_delete(peer, DP_MOD_ID_AST);
761 			return ast_index;
762 		} else {
763 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
764 					"%s: TID %d is not valid for flow %d\n",
765 					__func__, tid, flow_id);
766 			/*
767 			 * TID is not valid for this flow
768 			 * Return -1
769 			 */
770 			dp_peer_unref_delete(peer, DP_MOD_ID_AST);
771 			return -1;
772 		}
773 	}
774 
775 	/*
776 	 * TID valid check not required for
777 	 * UDP/NON UDP flow id
778 	 */
779 	ast_index = peer->peer_ast_flowq_idx[i].ast_idx;
780 	dp_peer_unref_delete(peer, DP_MOD_ID_AST);
781 	return ast_index;
782 }
783 #endif
784 
dp_hmwds_ast_add_notify(struct dp_peer * peer,uint8_t * mac_addr,enum cdp_txrx_ast_entry_type type,QDF_STATUS err,bool is_peer_map)785 void dp_hmwds_ast_add_notify(struct dp_peer *peer,
786 			     uint8_t *mac_addr,
787 			     enum cdp_txrx_ast_entry_type type,
788 			     QDF_STATUS err,
789 			     bool is_peer_map)
790 {
791 	struct dp_vdev *dp_vdev = peer->vdev;
792 	struct dp_pdev *dp_pdev = dp_vdev->pdev;
793 	struct cdp_peer_hmwds_ast_add_status add_status;
794 
795 	/* Ignore ast types other than HM */
796 	if ((type != CDP_TXRX_AST_TYPE_WDS_HM) &&
797 	    (type != CDP_TXRX_AST_TYPE_WDS_HM_SEC))
798 		return;
799 
800 	/* existing ast delete in progress, will be attempted
801 	 * to add again after delete is complete. Send status then.
802 	 */
803 	if (err == QDF_STATUS_E_AGAIN)
804 		return;
805 
806 	/* peer map pending, notify actual status
807 	 * when peer map is received.
808 	 */
809 	if (!is_peer_map && (err == QDF_STATUS_SUCCESS))
810 		return;
811 
812 	qdf_mem_zero(&add_status, sizeof(add_status));
813 	add_status.vdev_id = dp_vdev->vdev_id;
814 	/* For type CDP_TXRX_AST_TYPE_WDS_HM_SEC dp_peer_add_ast()
815 	 * returns QDF_STATUS_E_FAILURE as it is host only entry.
816 	 * In such cases set err as success. Also err code set to
817 	 * QDF_STATUS_E_ALREADY indicates entry already exist in
818 	 * such cases set err as success too. Any other error code
819 	 * is actual error.
820 	 */
821 	if (((type == CDP_TXRX_AST_TYPE_WDS_HM_SEC) &&
822 	     (err == QDF_STATUS_E_FAILURE)) ||
823 	    (err == QDF_STATUS_E_ALREADY)) {
824 		err = QDF_STATUS_SUCCESS;
825 	}
826 	add_status.status = err;
827 	qdf_mem_copy(add_status.peer_mac, peer->mac_addr.raw,
828 		     QDF_MAC_ADDR_SIZE);
829 	qdf_mem_copy(add_status.ast_mac, mac_addr,
830 		     QDF_MAC_ADDR_SIZE);
831 #ifdef WDI_EVENT_ENABLE
832 	dp_wdi_event_handler(WDI_EVENT_HMWDS_AST_ADD_STATUS, dp_pdev->soc,
833 			     (void *)&add_status, 0,
834 			     WDI_NO_VAL, dp_pdev->pdev_id);
835 #endif
836 }
837 
838 #if defined(QCA_SUPPORT_LATENCY_CAPTURE) || \
839 	defined(QCA_TX_CAPTURE_SUPPORT) || \
840 	defined(QCA_MCOPY_SUPPORT)
841 #ifdef FEATURE_PERPKT_INFO
842 QDF_STATUS
dp_get_completion_indication_for_stack(struct dp_soc * soc,struct dp_pdev * pdev,struct dp_txrx_peer * txrx_peer,struct hal_tx_completion_status * ts,qdf_nbuf_t netbuf,uint64_t time_latency)843 dp_get_completion_indication_for_stack(struct dp_soc *soc,
844 				       struct dp_pdev *pdev,
845 				       struct dp_txrx_peer *txrx_peer,
846 				       struct hal_tx_completion_status *ts,
847 				       qdf_nbuf_t netbuf,
848 				       uint64_t time_latency)
849 {
850 	struct tx_capture_hdr *ppdu_hdr;
851 	uint16_t peer_id = ts->peer_id;
852 	uint32_t ppdu_id = ts->ppdu_id;
853 	uint8_t first_msdu = ts->first_msdu;
854 	uint8_t last_msdu = ts->last_msdu;
855 	uint32_t txcap_hdr_size = sizeof(struct tx_capture_hdr);
856 	struct dp_peer *peer;
857 
858 	if (qdf_unlikely(!dp_monitor_is_enable_tx_sniffer(pdev) &&
859 			 !dp_monitor_is_enable_mcopy_mode(pdev) &&
860 			 !pdev->latency_capture_enable))
861 		return QDF_STATUS_E_NOSUPPORT;
862 
863 	if (!txrx_peer) {
864 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
865 			  FL("txrx_peer is NULL"));
866 		return QDF_STATUS_E_INVAL;
867 	}
868 
869 	/* If mcopy is enabled and mcopy_mode is M_COPY deliver 1st MSDU
870 	 * per PPDU. If mcopy_mode is M_COPY_EXTENDED deliver 1st MSDU
871 	 * for each MPDU
872 	 */
873 	if (dp_monitor_mcopy_check_deliver(pdev,
874 					   peer_id,
875 					   ppdu_id,
876 					   first_msdu) != QDF_STATUS_SUCCESS)
877 		return QDF_STATUS_E_INVAL;
878 
879 	if (qdf_unlikely(qdf_nbuf_headroom(netbuf) < txcap_hdr_size)) {
880 		netbuf = qdf_nbuf_realloc_headroom(netbuf, txcap_hdr_size);
881 		if (!netbuf) {
882 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
883 				  FL("No headroom"));
884 			return QDF_STATUS_E_NOMEM;
885 		}
886 	}
887 
888 	if (!qdf_nbuf_push_head(netbuf, txcap_hdr_size)) {
889 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
890 			  FL("No headroom"));
891 		return QDF_STATUS_E_NOMEM;
892 	}
893 
894 	ppdu_hdr = (struct tx_capture_hdr *)qdf_nbuf_data(netbuf);
895 	qdf_mem_copy(ppdu_hdr->ta, txrx_peer->vdev->mac_addr.raw,
896 		     QDF_MAC_ADDR_SIZE);
897 
898 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_TX_COMP);
899 	if (peer) {
900 		qdf_mem_copy(ppdu_hdr->ra, peer->mac_addr.raw,
901 			     QDF_MAC_ADDR_SIZE);
902 		dp_peer_unref_delete(peer, DP_MOD_ID_TX_COMP);
903 	}
904 	ppdu_hdr->ppdu_id = ppdu_id;
905 	ppdu_hdr->peer_id = peer_id;
906 	ppdu_hdr->first_msdu = first_msdu;
907 	ppdu_hdr->last_msdu = last_msdu;
908 	if (qdf_unlikely(pdev->latency_capture_enable)) {
909 		ppdu_hdr->tsf = ts->tsf;
910 		ppdu_hdr->time_latency = (uint32_t)time_latency;
911 	}
912 
913 	return QDF_STATUS_SUCCESS;
914 }
915 
dp_send_completion_to_stack(struct dp_soc * soc,struct dp_pdev * pdev,uint16_t peer_id,uint32_t ppdu_id,qdf_nbuf_t netbuf)916 void dp_send_completion_to_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
917 				 uint16_t peer_id, uint32_t ppdu_id,
918 				 qdf_nbuf_t netbuf)
919 {
920 	dp_wdi_event_handler(WDI_EVENT_TX_DATA, soc,
921 			     netbuf, peer_id,
922 			     WDI_NO_VAL, pdev->pdev_id);
923 }
924 #endif
925 #endif
926