xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/be/dp_be_tx.c (revision d0c05845839e5f2ba5a8dcebe0cd3e4cd4e8dfcf)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "cdp_txrx_cmn_struct.h"
21 #include "dp_types.h"
22 #include "dp_tx.h"
23 #include "dp_be_tx.h"
24 #include "dp_tx_desc.h"
25 #include "hal_tx.h"
26 #include <hal_be_api.h>
27 #include <hal_be_tx.h>
28 #include <dp_htt.h>
29 #ifdef FEATURE_WDS
30 #include "dp_txrx_wds.h"
31 #endif
32 
33 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
34 #define DP_TX_BANK_LOCK_CREATE(lock) qdf_mutex_create(lock)
35 #define DP_TX_BANK_LOCK_DESTROY(lock) qdf_mutex_destroy(lock)
36 #define DP_TX_BANK_LOCK_ACQUIRE(lock) qdf_mutex_acquire(lock)
37 #define DP_TX_BANK_LOCK_RELEASE(lock) qdf_mutex_release(lock)
38 #else
39 #define DP_TX_BANK_LOCK_CREATE(lock) qdf_spinlock_create(lock)
40 #define DP_TX_BANK_LOCK_DESTROY(lock) qdf_spinlock_destroy(lock)
41 #define DP_TX_BANK_LOCK_ACQUIRE(lock) qdf_spin_lock_bh(lock)
42 #define DP_TX_BANK_LOCK_RELEASE(lock) qdf_spin_unlock_bh(lock)
43 #endif
44 
45 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
46 #ifdef WLAN_MCAST_MLO
47 /* MLO peer id for reinject*/
48 #define DP_MLO_MCAST_REINJECT_PEER_ID 0XFFFD
49 #define MAX_GSN_NUM 0x0FFF
50 
51 #ifdef QCA_MULTIPASS_SUPPORT
52 #define INVALID_VLAN_ID         0xFFFF
53 #define MULTIPASS_WITH_VLAN_ID 0xFFFE
54 /**
55  * struct dp_mlo_mpass_buf - Multipass buffer
56  * @vlan_id: vlan_id of frame
57  * @nbuf: pointer to skb buf
58  */
59 struct dp_mlo_mpass_buf {
60 	uint16_t vlan_id;
61 	qdf_nbuf_t  nbuf;
62 };
63 #endif
64 #endif
65 #endif
66 
67 #define DP_TX_WBM_COMPLETION_V3_VDEV_ID_GET(_var) \
68 	HTT_TX_WBM_COMPLETION_V2_VDEV_ID_GET(_var)
69 #define DP_TX_WBM_COMPLETION_V3_VALID_GET(_var) \
70 	HTT_TX_WBM_COMPLETION_V2_VALID_GET(_var)
71 #define DP_TX_WBM_COMPLETION_V3_SW_PEER_ID_GET(_var) \
72 	HTT_TX_WBM_COMPLETION_V2_SW_PEER_ID_GET(_var)
73 #define DP_TX_WBM_COMPLETION_V3_TID_NUM_GET(_var) \
74 	HTT_TX_WBM_COMPLETION_V2_TID_NUM_GET(_var)
75 #define DP_TX_WBM_COMPLETION_V3_SCH_CMD_ID_GET(_var) \
76 	HTT_TX_WBM_COMPLETION_V2_SCH_CMD_ID_GET(_var)
77 #define DP_TX_WBM_COMPLETION_V3_ACK_FRAME_RSSI_GET(_var) \
78 	HTT_TX_WBM_COMPLETION_V2_ACK_FRAME_RSSI_GET(_var)
79 
80 extern uint8_t sec_type_map[MAX_CDP_SEC_TYPE];
81 
82 #ifdef DP_USE_REDUCED_PEER_ID_FIELD_WIDTH
83 static inline uint16_t dp_tx_comp_get_peer_id(struct dp_soc *soc,
84 					      void *tx_comp_hal_desc)
85 {
86 	uint16_t peer_id = hal_tx_comp_get_peer_id(tx_comp_hal_desc);
87 	struct dp_tx_comp_peer_id *tx_peer_id =
88 			(struct dp_tx_comp_peer_id *)&peer_id;
89 
90 	return (tx_peer_id->peer_id |
91 	        (tx_peer_id->ml_peer_valid << soc->peer_id_shift));
92 }
93 #else
94 /* Combine ml_peer_valid and peer_id field */
95 #define DP_BE_TX_COMP_PEER_ID_MASK	0x00003fff
96 #define DP_BE_TX_COMP_PEER_ID_SHIFT	0
97 
98 static inline uint16_t dp_tx_comp_get_peer_id(struct dp_soc *soc,
99 					      void *tx_comp_hal_desc)
100 {
101 	uint16_t peer_id = hal_tx_comp_get_peer_id(tx_comp_hal_desc);
102 
103 	return ((peer_id & DP_BE_TX_COMP_PEER_ID_MASK) >>
104 		DP_BE_TX_COMP_PEER_ID_SHIFT);
105 }
106 #endif
107 
108 #ifdef DP_FEATURE_HW_COOKIE_CONVERSION
109 #ifdef DP_HW_COOKIE_CONVERT_EXCEPTION
110 void dp_tx_comp_get_params_from_hal_desc_be(struct dp_soc *soc,
111 					    void *tx_comp_hal_desc,
112 					    struct dp_tx_desc_s **r_tx_desc)
113 {
114 	uint32_t tx_desc_id;
115 
116 	if (qdf_likely(
117 		hal_tx_comp_get_cookie_convert_done(tx_comp_hal_desc))) {
118 		/* HW cookie conversion done */
119 		*r_tx_desc = (struct dp_tx_desc_s *)
120 				hal_tx_comp_get_desc_va(tx_comp_hal_desc);
121 	} else {
122 		/* SW do cookie conversion to VA */
123 		tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc);
124 		*r_tx_desc =
125 		(struct dp_tx_desc_s *)dp_cc_desc_find(soc, tx_desc_id);
126 	}
127 
128 	if (*r_tx_desc)
129 		(*r_tx_desc)->peer_id = dp_tx_comp_get_peer_id(soc,
130 							       tx_comp_hal_desc);
131 }
132 #else
133 void dp_tx_comp_get_params_from_hal_desc_be(struct dp_soc *soc,
134 					    void *tx_comp_hal_desc,
135 					    struct dp_tx_desc_s **r_tx_desc)
136 {
137 	*r_tx_desc = (struct dp_tx_desc_s *)
138 			hal_tx_comp_get_desc_va(tx_comp_hal_desc);
139 
140 	if (*r_tx_desc)
141 		(*r_tx_desc)->peer_id = dp_tx_comp_get_peer_id(soc,
142 							       tx_comp_hal_desc);
143 }
144 #endif /* DP_HW_COOKIE_CONVERT_EXCEPTION */
145 #else
146 
147 void dp_tx_comp_get_params_from_hal_desc_be(struct dp_soc *soc,
148 					    void *tx_comp_hal_desc,
149 					    struct dp_tx_desc_s **r_tx_desc)
150 {
151 	uint32_t tx_desc_id;
152 
153 	/* SW do cookie conversion to VA */
154 	tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc);
155 	*r_tx_desc =
156 	(struct dp_tx_desc_s *)dp_cc_desc_find(soc, tx_desc_id);
157 
158 	if (*r_tx_desc)
159 		(*r_tx_desc)->peer_id = dp_tx_comp_get_peer_id(soc,
160 							       tx_comp_hal_desc);
161 }
162 #endif /* DP_FEATURE_HW_COOKIE_CONVERSION */
163 
164 static inline
165 void dp_tx_process_mec_notify_be(struct dp_soc *soc, uint8_t *status)
166 {
167 	struct dp_vdev *vdev;
168 	uint8_t vdev_id;
169 	uint32_t *htt_desc = (uint32_t *)status;
170 
171 	qdf_assert_always(!soc->mec_fw_offload);
172 
173 	/*
174 	 * Get vdev id from HTT status word in case of MEC
175 	 * notification
176 	 */
177 	vdev_id = DP_TX_WBM_COMPLETION_V3_VDEV_ID_GET(htt_desc[4]);
178 	if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
179 		return;
180 
181 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
182 				     DP_MOD_ID_HTT_COMP);
183 	if (!vdev)
184 		return;
185 	dp_tx_mec_handler(vdev, status);
186 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP);
187 }
188 
189 void dp_tx_process_htt_completion_be(struct dp_soc *soc,
190 				     struct dp_tx_desc_s *tx_desc,
191 				     uint8_t *status,
192 				     uint8_t ring_id)
193 {
194 	uint8_t tx_status;
195 	struct dp_pdev *pdev;
196 	struct dp_vdev *vdev = NULL;
197 	struct hal_tx_completion_status ts = {0};
198 	uint32_t *htt_desc = (uint32_t *)status;
199 	struct dp_txrx_peer *txrx_peer;
200 	dp_txrx_ref_handle txrx_ref_handle = NULL;
201 	struct cdp_tid_tx_stats *tid_stats = NULL;
202 	struct htt_soc *htt_handle;
203 	uint8_t vdev_id;
204 
205 	tx_status = HTT_TX_WBM_COMPLETION_V3_TX_STATUS_GET(htt_desc[0]);
206 	htt_handle = (struct htt_soc *)soc->htt_handle;
207 	htt_wbm_event_record(htt_handle->htt_logger_handle, tx_status, status);
208 
209 	/*
210 	 * There can be scenario where WBM consuming descriptor enqueued
211 	 * from TQM2WBM first and TQM completion can happen before MEC
212 	 * notification comes from FW2WBM. Avoid access any field of tx
213 	 * descriptor in case of MEC notify.
214 	 */
215 	if (tx_status == HTT_TX_FW2WBM_TX_STATUS_MEC_NOTIFY)
216 		return dp_tx_process_mec_notify_be(soc, status);
217 
218 	/*
219 	 * If the descriptor is already freed in vdev_detach,
220 	 * continue to next descriptor
221 	 */
222 	if (qdf_unlikely(!tx_desc->flags)) {
223 		dp_tx_comp_info_rl("Descriptor freed in vdev_detach %d",
224 				   tx_desc->id);
225 		return;
226 	}
227 
228 	if (qdf_unlikely(tx_desc->vdev_id == DP_INVALID_VDEV_ID)) {
229 		dp_tx_comp_info_rl("Invalid vdev_id %d", tx_desc->id);
230 		tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
231 		goto release_tx_desc;
232 	}
233 
234 	pdev = tx_desc->pdev;
235 
236 	if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
237 		dp_tx_comp_info_rl("pdev in down state %d", tx_desc->id);
238 		tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
239 		goto release_tx_desc;
240 	}
241 
242 	qdf_assert(tx_desc->pdev);
243 
244 	vdev_id = tx_desc->vdev_id;
245 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
246 				     DP_MOD_ID_HTT_COMP);
247 
248 	if (qdf_unlikely(!vdev)) {
249 		dp_tx_comp_info_rl("Unable to get vdev ref  %d", tx_desc->id);
250 		tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
251 		goto release_tx_desc;
252 	}
253 
254 	switch (tx_status) {
255 	case HTT_TX_FW2WBM_TX_STATUS_OK:
256 	case HTT_TX_FW2WBM_TX_STATUS_DROP:
257 	case HTT_TX_FW2WBM_TX_STATUS_TTL:
258 	{
259 		uint8_t tid;
260 
261 		if (DP_TX_WBM_COMPLETION_V3_VALID_GET(htt_desc[3])) {
262 			ts.peer_id =
263 				DP_TX_WBM_COMPLETION_V3_SW_PEER_ID_GET(
264 						htt_desc[3]);
265 			ts.tid =
266 				DP_TX_WBM_COMPLETION_V3_TID_NUM_GET(
267 						htt_desc[3]);
268 		} else {
269 			ts.peer_id = HTT_INVALID_PEER;
270 			ts.tid = HTT_INVALID_TID;
271 		}
272 		ts.release_src = HAL_TX_COMP_RELEASE_SOURCE_FW;
273 		ts.ppdu_id =
274 			DP_TX_WBM_COMPLETION_V3_SCH_CMD_ID_GET(
275 					htt_desc[2]);
276 		ts.ack_frame_rssi =
277 			DP_TX_WBM_COMPLETION_V3_ACK_FRAME_RSSI_GET(
278 					htt_desc[2]);
279 
280 		ts.tsf = htt_desc[4];
281 		ts.first_msdu = 1;
282 		ts.last_msdu = 1;
283 		ts.status = (tx_status == HTT_TX_FW2WBM_TX_STATUS_OK ?
284 			     HAL_TX_TQM_RR_FRAME_ACKED :
285 			     HAL_TX_TQM_RR_REM_CMD_REM);
286 		tid = ts.tid;
287 		if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
288 			tid = CDP_MAX_DATA_TIDS - 1;
289 
290 		tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
291 
292 		if (qdf_unlikely(pdev->delay_stats_flag) ||
293 		    qdf_unlikely(dp_is_vdev_tx_delay_stats_enabled(vdev)))
294 			dp_tx_compute_delay(vdev, tx_desc, tid, ring_id);
295 		if (tx_status < CDP_MAX_TX_HTT_STATUS)
296 			tid_stats->htt_status_cnt[tx_status]++;
297 
298 		txrx_peer = dp_txrx_peer_get_ref_by_id(soc, ts.peer_id,
299 						       &txrx_ref_handle,
300 						       DP_MOD_ID_HTT_COMP);
301 		if (qdf_likely(txrx_peer))
302 			dp_tx_update_peer_basic_stats(
303 						txrx_peer,
304 						qdf_nbuf_len(tx_desc->nbuf),
305 						tx_status,
306 						pdev->enhanced_stats_en);
307 
308 		dp_tx_comp_process_tx_status(soc, tx_desc, &ts, txrx_peer,
309 					     ring_id);
310 		dp_tx_comp_process_desc(soc, tx_desc, &ts, txrx_peer);
311 		dp_tx_desc_release(tx_desc, tx_desc->pool_id);
312 
313 		if (qdf_likely(txrx_peer))
314 			dp_txrx_peer_unref_delete(txrx_ref_handle,
315 						  DP_MOD_ID_HTT_COMP);
316 
317 		break;
318 	}
319 	case HTT_TX_FW2WBM_TX_STATUS_REINJECT:
320 	{
321 		uint8_t reinject_reason;
322 
323 		reinject_reason =
324 			HTT_TX_WBM_COMPLETION_V3_REINJECT_REASON_GET(
325 								htt_desc[1]);
326 		dp_tx_reinject_handler(soc, vdev, tx_desc,
327 				       status, reinject_reason);
328 		break;
329 	}
330 	case HTT_TX_FW2WBM_TX_STATUS_INSPECT:
331 	{
332 		dp_tx_inspect_handler(soc, vdev, tx_desc, status);
333 		break;
334 	}
335 	case HTT_TX_FW2WBM_TX_STATUS_VDEVID_MISMATCH:
336 	{
337 		DP_STATS_INC(vdev, tx_i.dropped.fail_per_pkt_vdev_id_check, 1);
338 		goto release_tx_desc;
339 	}
340 	default:
341 		dp_tx_comp_err("Invalid HTT tx_status %d\n",
342 			       tx_status);
343 		goto release_tx_desc;
344 	}
345 
346 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP);
347 	return;
348 
349 release_tx_desc:
350 	dp_tx_comp_free_buf(soc, tx_desc);
351 	dp_tx_desc_release(tx_desc, tx_desc->pool_id);
352 	if (vdev)
353 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP);
354 }
355 
356 #ifdef QCA_OL_TX_MULTIQ_SUPPORT
357 #ifdef DP_TX_IMPLICIT_RBM_MAPPING
358 /*
359  * dp_tx_get_rbm_id()- Get the RBM ID for data transmission completion.
360  * @dp_soc - DP soc structure pointer
361  * @ring_id - Transmit Queue/ring_id to be used when XPS is enabled
362  *
363  * Return - RBM ID corresponding to TCL ring_id
364  */
365 static inline uint8_t dp_tx_get_rbm_id_be(struct dp_soc *soc,
366 					  uint8_t ring_id)
367 {
368 	return 0;
369 }
370 #else
371 static inline uint8_t dp_tx_get_rbm_id_be(struct dp_soc *soc,
372 					  uint8_t ring_id)
373 {
374 	return (ring_id ? soc->wbm_sw0_bm_id + (ring_id - 1) :
375 			  HAL_WBM_SW2_BM_ID(soc->wbm_sw0_bm_id));
376 }
377 #endif /*DP_TX_IMPLICIT_RBM_MAPPING*/
378 #else
379 static inline uint8_t dp_tx_get_rbm_id_be(struct dp_soc *soc,
380 					  uint8_t tcl_index)
381 {
382 	uint8_t rbm;
383 
384 	rbm = wlan_cfg_get_rbm_id_for_index(soc->wlan_cfg_ctx, tcl_index);
385 	dp_verbose_debug("tcl_id %u rbm %u", tcl_index, rbm);
386 	return rbm;
387 }
388 #endif
389 #ifdef QCA_SUPPORT_TX_MIN_RATES_FOR_SPECIAL_FRAMES
390 
391 /*
392  * dp_tx_set_min_rates_for_critical_frames()- sets min-rates for critical pkts
393  * @dp_soc - DP soc structure pointer
394  * @hal_tx_desc - HAL descriptor where fields are set
395  * nbuf - skb to be considered for min rates
396  *
397  * The function relies on upper layers to set QDF_NBUF_CB_TX_EXTRA_IS_CRITICAL
398  * and uses it to determine if the frame is critical. For a critical frame,
399  * flow override bits are set to classify the frame into HW's high priority
400  * queue. The HW will pick pre-configured min rates for such packets.
401  *
402  * Return - None
403  */
404 static void
405 dp_tx_set_min_rates_for_critical_frames(struct dp_soc *soc,
406 					uint32_t *hal_tx_desc,
407 					qdf_nbuf_t nbuf)
408 {
409 /*
410  * Critical frames should be queued to the high priority queue for the TID on
411  * on which they are sent out (for the concerned peer).
412  * FW is using HTT_MSDU_Q_IDX 2 for HOL (high priority) queue.
413  * htt_msdu_idx = (2 * who_classify_info_sel) + flow_override
414  * Hence, using who_classify_info_sel = 1, flow_override = 0 to select
415  * HOL queue.
416  */
417 	if (QDF_NBUF_CB_TX_EXTRA_IS_CRITICAL(nbuf)) {
418 		hal_tx_desc_set_flow_override_enable(hal_tx_desc, 1);
419 		hal_tx_desc_set_flow_override(hal_tx_desc, 0);
420 		hal_tx_desc_set_who_classify_info_sel(hal_tx_desc, 1);
421 		hal_tx_desc_set_tx_notify_frame(hal_tx_desc,
422 						TX_SEMI_HARD_NOTIFY_E);
423 	}
424 }
425 #else
426 static inline void
427 dp_tx_set_min_rates_for_critical_frames(struct dp_soc *soc,
428 					uint32_t *hal_tx_desc_cached,
429 					qdf_nbuf_t nbuf)
430 {
431 }
432 #endif
433 
434 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
435 	defined(WLAN_MCAST_MLO)
436 #ifdef QCA_MULTIPASS_SUPPORT
437 /**
438  * dp_tx_mlo_mcast_multipass_lookup() - lookup vlan_id in mpass peer list
439  * @be_vdev: Handle to DP be_vdev structure
440  * @ptnr_vdev: DP ptnr_vdev handle
441  * @arg: pointer to dp_mlo_mpass_ buf
442  *
443  * Return: None
444  */
445 static void
446 dp_tx_mlo_mcast_multipass_lookup(struct dp_vdev_be *be_vdev,
447 				 struct dp_vdev *ptnr_vdev,
448 				 void *arg)
449 {
450 	struct dp_mlo_mpass_buf *ptr = (struct dp_mlo_mpass_buf *)arg;
451 	struct dp_txrx_peer *txrx_peer = NULL;
452 	struct vlan_ethhdr *veh = NULL;
453 	qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(ptr->nbuf);
454 	uint16_t vlan_id = 0;
455 	bool not_vlan = ((ptnr_vdev->tx_encap_type == htt_cmn_pkt_type_raw) ||
456 			(htons(eh->ether_type) != ETH_P_8021Q));
457 
458 	if (qdf_unlikely(not_vlan))
459 		return;
460 	veh = (struct vlan_ethhdr *)eh;
461 	vlan_id = (ntohs(veh->h_vlan_TCI) & VLAN_VID_MASK);
462 
463 	qdf_spin_lock_bh(&ptnr_vdev->mpass_peer_mutex);
464 	TAILQ_FOREACH(txrx_peer, &ptnr_vdev->mpass_peer_list,
465 		      mpass_peer_list_elem) {
466 		if (vlan_id == txrx_peer->vlan_id) {
467 			qdf_spin_unlock_bh(&ptnr_vdev->mpass_peer_mutex);
468 			ptr->vlan_id = vlan_id;
469 			return;
470 		}
471 	}
472 	qdf_spin_unlock_bh(&ptnr_vdev->mpass_peer_mutex);
473 }
474 
475 /**
476  * dp_tx_mlo_mcast_multipass_send() - send multipass MLO Mcast packets
477  * @be_vdev: Handle to DP be_vdev structure
478  * @ptnr_vdev: DP ptnr_vdev handle
479  * @arg: pointer to dp_mlo_mpass_ buf
480  *
481  * Return: None
482  */
483 static void
484 dp_tx_mlo_mcast_multipass_send(struct dp_vdev_be *be_vdev,
485 			       struct dp_vdev *ptnr_vdev,
486 			       void *arg)
487 {
488 	struct dp_mlo_mpass_buf *ptr = (struct dp_mlo_mpass_buf *)arg;
489 	struct dp_tx_msdu_info_s msdu_info;
490 	struct dp_vdev_be *be_ptnr_vdev = NULL;
491 	qdf_nbuf_t  nbuf_clone;
492 	uint16_t group_key = 0;
493 
494 	be_ptnr_vdev = dp_get_be_vdev_from_dp_vdev(ptnr_vdev);
495 	if (be_vdev != be_ptnr_vdev) {
496 		nbuf_clone = qdf_nbuf_clone(ptr->nbuf);
497 		if (qdf_unlikely(!nbuf_clone)) {
498 			dp_tx_debug("nbuf clone failed");
499 			return;
500 		}
501 	} else {
502 		nbuf_clone = ptr->nbuf;
503 	}
504 	qdf_mem_zero(&msdu_info, sizeof(msdu_info));
505 	dp_tx_get_queue(ptnr_vdev, nbuf_clone, &msdu_info.tx_queue);
506 	msdu_info.gsn = be_vdev->seq_num;
507 	be_ptnr_vdev->seq_num = be_vdev->seq_num;
508 
509 	if (ptr->vlan_id == MULTIPASS_WITH_VLAN_ID) {
510 		msdu_info.tid = HTT_TX_EXT_TID_INVALID;
511 		HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_SET(
512 						msdu_info.meta_data[0], 1);
513 	} else {
514 		/* return when vlan map is not initialized */
515 		if (!ptnr_vdev->iv_vlan_map)
516 			return;
517 		group_key = ptnr_vdev->iv_vlan_map[ptr->vlan_id];
518 
519 		/*
520 		 * If group key is not installed, drop the frame.
521 		 */
522 
523 		if (!group_key)
524 			return;
525 
526 		dp_tx_remove_vlan_tag(ptnr_vdev, nbuf_clone);
527 		dp_tx_add_groupkey_metadata(ptnr_vdev, &msdu_info, group_key);
528 		msdu_info.exception_fw = 1;
529 	}
530 
531 	nbuf_clone = dp_tx_send_msdu_single(
532 					ptnr_vdev,
533 					nbuf_clone,
534 					&msdu_info,
535 					DP_MLO_MCAST_REINJECT_PEER_ID,
536 					NULL);
537 	if (qdf_unlikely(nbuf_clone)) {
538 		dp_info("pkt send failed");
539 		qdf_nbuf_free(nbuf_clone);
540 		return;
541 	}
542 }
543 
544 /**
545  * dp_tx_mlo_mcast_multipass_handler - If frame needs multipass processing
546  * @soc: DP soc handle
547  * @vdev: DP vdev handle
548  * @nbuf: nbuf to be enqueued
549  *
550  * Return: true if handling is done else false
551  */
552 static bool
553 dp_tx_mlo_mcast_multipass_handler(struct dp_soc *soc,
554 				  struct dp_vdev *vdev,
555 				  qdf_nbuf_t nbuf)
556 {
557 	struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
558 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
559 	qdf_nbuf_t nbuf_copy = NULL;
560 	struct dp_mlo_mpass_buf mpass_buf;
561 
562 	memset(&mpass_buf, 0, sizeof(struct dp_mlo_mpass_buf));
563 	mpass_buf.vlan_id = INVALID_VLAN_ID;
564 	mpass_buf.nbuf = nbuf;
565 
566 	dp_tx_mlo_mcast_multipass_lookup(be_vdev, vdev, &mpass_buf);
567 	if (mpass_buf.vlan_id == INVALID_VLAN_ID) {
568 		dp_mcast_mlo_iter_ptnr_vdev(be_soc, be_vdev,
569 					    dp_tx_mlo_mcast_multipass_lookup,
570 					    &mpass_buf, DP_MOD_ID_TX);
571 		/*
572 		 * Do not drop the frame when vlan_id doesn't match.
573 		 * Send the frame as it is.
574 		 */
575 		if (mpass_buf.vlan_id == INVALID_VLAN_ID)
576 			return false;
577 	}
578 
579 	/* AP can have classic clients, special clients &
580 	 * classic repeaters.
581 	 * 1. Classic clients & special client:
582 	 *	Remove vlan header, find corresponding group key
583 	 *	index, fill in metaheader and enqueue multicast
584 	 *	frame to TCL.
585 	 * 2. Classic repeater:
586 	 *	Pass through to classic repeater with vlan tag
587 	 *	intact without any group key index. Hardware
588 	 *	will know which key to use to send frame to
589 	 *	repeater.
590 	 */
591 	nbuf_copy = qdf_nbuf_copy(nbuf);
592 
593 	/*
594 	 * Send multicast frame to special peers even
595 	 * if pass through to classic repeater fails.
596 	 */
597 	if (nbuf_copy) {
598 		struct dp_mlo_mpass_buf mpass_buf_copy = {0};
599 
600 		mpass_buf_copy.vlan_id = MULTIPASS_WITH_VLAN_ID;
601 		mpass_buf_copy.nbuf = nbuf_copy;
602 		/* send frame on partner vdevs */
603 		dp_mcast_mlo_iter_ptnr_vdev(be_soc, be_vdev,
604 					    dp_tx_mlo_mcast_multipass_send,
605 					    &mpass_buf_copy, DP_MOD_ID_TX);
606 
607 		/* send frame on mcast primary vdev */
608 		dp_tx_mlo_mcast_multipass_send(be_vdev, vdev, &mpass_buf_copy);
609 
610 		if (qdf_unlikely(be_vdev->seq_num > MAX_GSN_NUM))
611 			be_vdev->seq_num = 0;
612 		else
613 			be_vdev->seq_num++;
614 	}
615 
616 	dp_mcast_mlo_iter_ptnr_vdev(be_soc, be_vdev,
617 				    dp_tx_mlo_mcast_multipass_send,
618 				    &mpass_buf, DP_MOD_ID_TX);
619 	dp_tx_mlo_mcast_multipass_send(be_vdev, vdev, &mpass_buf);
620 
621 	if (qdf_unlikely(be_vdev->seq_num > MAX_GSN_NUM))
622 		be_vdev->seq_num = 0;
623 	else
624 		be_vdev->seq_num++;
625 
626 	return true;
627 }
628 #else
629 static bool
630 dp_tx_mlo_mcast_multipass_handler(struct dp_soc *soc, struct dp_vdev *vdev,
631 				  qdf_nbuf_t nbuf)
632 {
633 	return false;
634 }
635 #endif
636 
637 void dp_tx_mcast_mlo_reinject_routing_set(struct dp_soc *soc, void *arg)
638 {
639 	hal_soc_handle_t hal_soc = soc->hal_soc;
640 	uint8_t *cmd = (uint8_t *)arg;
641 
642 	if (*cmd)
643 		hal_tx_mcast_mlo_reinject_routing_set(
644 					hal_soc,
645 					HAL_TX_MCAST_MLO_REINJECT_TQM_NOTIFY);
646 	else
647 		hal_tx_mcast_mlo_reinject_routing_set(
648 					hal_soc,
649 					HAL_TX_MCAST_MLO_REINJECT_FW_NOTIFY);
650 }
651 
652 void
653 dp_tx_mlo_mcast_pkt_send(struct dp_vdev_be *be_vdev,
654 			 struct dp_vdev *ptnr_vdev,
655 			 void *arg)
656 {
657 	qdf_nbuf_t  nbuf = (qdf_nbuf_t)arg;
658 	qdf_nbuf_t  nbuf_clone;
659 	struct dp_vdev_be *be_ptnr_vdev = NULL;
660 	struct dp_tx_msdu_info_s msdu_info;
661 
662 	be_ptnr_vdev = dp_get_be_vdev_from_dp_vdev(ptnr_vdev);
663 	if (be_vdev != be_ptnr_vdev) {
664 		nbuf_clone = qdf_nbuf_clone(nbuf);
665 		if (qdf_unlikely(!nbuf_clone)) {
666 			dp_tx_debug("nbuf clone failed");
667 			return;
668 		}
669 	} else {
670 		nbuf_clone = nbuf;
671 	}
672 
673 	qdf_mem_zero(&msdu_info, sizeof(msdu_info));
674 	dp_tx_get_queue(ptnr_vdev, nbuf_clone, &msdu_info.tx_queue);
675 	msdu_info.gsn = be_vdev->seq_num;
676 	be_ptnr_vdev->seq_num = be_vdev->seq_num;
677 
678 	nbuf_clone = dp_tx_send_msdu_single(
679 					ptnr_vdev,
680 					nbuf_clone,
681 					&msdu_info,
682 					DP_MLO_MCAST_REINJECT_PEER_ID,
683 					NULL);
684 	if (qdf_unlikely(nbuf_clone)) {
685 		dp_info("pkt send failed");
686 		qdf_nbuf_free(nbuf_clone);
687 		return;
688 	}
689 }
690 
691 static inline void
692 dp_tx_vdev_id_set_hal_tx_desc(uint32_t *hal_tx_desc_cached,
693 			      struct dp_vdev *vdev,
694 			      struct dp_tx_msdu_info_s *msdu_info)
695 {
696 	hal_tx_desc_set_vdev_id(hal_tx_desc_cached, msdu_info->vdev_id);
697 }
698 
699 void dp_tx_mlo_mcast_handler_be(struct dp_soc *soc,
700 				struct dp_vdev *vdev,
701 				qdf_nbuf_t nbuf)
702 {
703 	struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
704 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
705 
706 	if (qdf_unlikely(vdev->multipass_en) &&
707 	    dp_tx_mlo_mcast_multipass_handler(soc, vdev, nbuf))
708 		return;
709 	/* send frame on partner vdevs */
710 	dp_mcast_mlo_iter_ptnr_vdev(be_soc, be_vdev,
711 				    dp_tx_mlo_mcast_pkt_send,
712 				    nbuf, DP_MOD_ID_REINJECT);
713 
714 	/* send frame on mcast primary vdev */
715 	dp_tx_mlo_mcast_pkt_send(be_vdev, vdev, nbuf);
716 
717 	if (qdf_unlikely(be_vdev->seq_num > MAX_GSN_NUM))
718 		be_vdev->seq_num = 0;
719 	else
720 		be_vdev->seq_num++;
721 }
722 #else
723 static inline void
724 dp_tx_vdev_id_set_hal_tx_desc(uint32_t *hal_tx_desc_cached,
725 			      struct dp_vdev *vdev,
726 			      struct dp_tx_msdu_info_s *msdu_info)
727 {
728 	hal_tx_desc_set_vdev_id(hal_tx_desc_cached, vdev->vdev_id);
729 }
730 #endif
731 #if defined(WLAN_FEATURE_11BE_MLO) && !defined(WLAN_MLO_MULTI_CHIP) && \
732 	!defined(WLAN_MCAST_MLO)
733 void dp_tx_mlo_mcast_handler_be(struct dp_soc *soc,
734 				struct dp_vdev *vdev,
735 				qdf_nbuf_t nbuf)
736 {
737 }
738 #endif
739 
740 #ifdef CONFIG_SAWF
741 void dp_sawf_config_be(struct dp_soc *soc, uint32_t *hal_tx_desc_cached,
742 		       uint16_t *fw_metadata, qdf_nbuf_t nbuf)
743 {
744 	uint8_t q_id = 0;
745 
746 	if (!wlan_cfg_get_sawf_config(soc->wlan_cfg_ctx))
747 		return;
748 
749 	dp_sawf_tcl_cmd(fw_metadata, nbuf);
750 	q_id = dp_sawf_queue_id_get(nbuf);
751 
752 	if (q_id == DP_SAWF_DEFAULT_Q_INVALID)
753 		return;
754 	hal_tx_desc_set_hlos_tid(hal_tx_desc_cached, DP_TX_HLOS_TID_GET(q_id));
755 	hal_tx_desc_set_flow_override_enable(hal_tx_desc_cached,
756 					     DP_TX_FLOW_OVERRIDE_ENABLE);
757 	hal_tx_desc_set_flow_override(hal_tx_desc_cached,
758 				      DP_TX_FLOW_OVERRIDE_GET(q_id));
759 	hal_tx_desc_set_who_classify_info_sel(hal_tx_desc_cached,
760 					      DP_TX_WHO_CLFY_INF_SEL_GET(q_id));
761 }
762 
763 #else
764 
765 static inline
766 void dp_sawf_config_be(struct dp_soc *soc, uint32_t *hal_tx_desc_cached,
767 		       uint16_t *fw_metadata, qdf_nbuf_t nbuf)
768 {
769 }
770 
771 static inline
772 QDF_STATUS dp_sawf_tx_enqueue_peer_stats(struct dp_soc *soc,
773 					 struct dp_tx_desc_s *tx_desc)
774 {
775 	return QDF_STATUS_SUCCESS;
776 }
777 
778 static inline
779 QDF_STATUS dp_sawf_tx_enqueue_fail_peer_stats(struct dp_soc *soc,
780 					      struct dp_tx_desc_s *tx_desc)
781 {
782 	return QDF_STATUS_SUCCESS;
783 }
784 #endif
785 
786 QDF_STATUS
787 dp_tx_hw_enqueue_be(struct dp_soc *soc, struct dp_vdev *vdev,
788 		    struct dp_tx_desc_s *tx_desc, uint16_t fw_metadata,
789 		    struct cdp_tx_exception_metadata *tx_exc_metadata,
790 		    struct dp_tx_msdu_info_s *msdu_info)
791 {
792 	void *hal_tx_desc;
793 	uint32_t *hal_tx_desc_cached;
794 	int coalesce = 0;
795 	struct dp_tx_queue *tx_q = &msdu_info->tx_queue;
796 	uint8_t ring_id = tx_q->ring_id;
797 	uint8_t tid = msdu_info->tid;
798 	struct dp_vdev_be *be_vdev;
799 	uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES] = { 0 };
800 	uint8_t bm_id = dp_tx_get_rbm_id_be(soc, ring_id);
801 	hal_ring_handle_t hal_ring_hdl = NULL;
802 	QDF_STATUS status = QDF_STATUS_E_RESOURCES;
803 
804 	be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
805 
806 	if (!dp_tx_is_desc_id_valid(soc, tx_desc->id)) {
807 		dp_err_rl("Invalid tx desc id:%d", tx_desc->id);
808 		return QDF_STATUS_E_RESOURCES;
809 	}
810 
811 	if (qdf_unlikely(tx_exc_metadata)) {
812 		qdf_assert_always((tx_exc_metadata->tx_encap_type ==
813 				   CDP_INVALID_TX_ENCAP_TYPE) ||
814 				   (tx_exc_metadata->tx_encap_type ==
815 				    vdev->tx_encap_type));
816 
817 		if (tx_exc_metadata->tx_encap_type == htt_cmn_pkt_type_raw)
818 			qdf_assert_always((tx_exc_metadata->sec_type ==
819 					   CDP_INVALID_SEC_TYPE) ||
820 					   tx_exc_metadata->sec_type ==
821 					   vdev->sec_type);
822 	}
823 
824 	hal_tx_desc_cached = (void *)cached_desc;
825 
826 	if (dp_sawf_tag_valid_get(tx_desc->nbuf)) {
827 		dp_sawf_config_be(soc, hal_tx_desc_cached,
828 				  &fw_metadata, tx_desc->nbuf);
829 		dp_sawf_tx_enqueue_peer_stats(soc, tx_desc);
830 	}
831 
832 	hal_tx_desc_set_buf_addr_be(soc->hal_soc, hal_tx_desc_cached,
833 				    tx_desc->dma_addr, bm_id, tx_desc->id,
834 				    (tx_desc->flags & DP_TX_DESC_FLAG_FRAG));
835 	hal_tx_desc_set_lmac_id_be(soc->hal_soc, hal_tx_desc_cached,
836 				   vdev->lmac_id);
837 
838 	hal_tx_desc_set_search_index_be(soc->hal_soc, hal_tx_desc_cached,
839 					vdev->bss_ast_idx);
840 	/*
841 	 * Bank_ID is used as DSCP_TABLE number in beryllium
842 	 * So there is no explicit field used for DSCP_TID_TABLE_NUM.
843 	 */
844 
845 	hal_tx_desc_set_cache_set_num(soc->hal_soc, hal_tx_desc_cached,
846 				      (vdev->bss_ast_hash & 0xF));
847 
848 	hal_tx_desc_set_fw_metadata(hal_tx_desc_cached, fw_metadata);
849 	hal_tx_desc_set_buf_length(hal_tx_desc_cached, tx_desc->length);
850 	hal_tx_desc_set_buf_offset(hal_tx_desc_cached, tx_desc->pkt_offset);
851 
852 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
853 		hal_tx_desc_set_to_fw(hal_tx_desc_cached, 1);
854 
855 	/* verify checksum offload configuration*/
856 	if ((qdf_nbuf_get_tx_cksum(tx_desc->nbuf) ==
857 				   QDF_NBUF_TX_CKSUM_TCP_UDP) ||
858 	      qdf_nbuf_is_tso(tx_desc->nbuf)) {
859 		hal_tx_desc_set_l3_checksum_en(hal_tx_desc_cached, 1);
860 		hal_tx_desc_set_l4_checksum_en(hal_tx_desc_cached, 1);
861 	}
862 
863 	hal_tx_desc_set_bank_id(hal_tx_desc_cached, be_vdev->bank_id);
864 
865 	dp_tx_vdev_id_set_hal_tx_desc(hal_tx_desc_cached, vdev, msdu_info);
866 
867 	if (tid != HTT_TX_EXT_TID_INVALID)
868 		hal_tx_desc_set_hlos_tid(hal_tx_desc_cached, tid);
869 
870 	dp_tx_set_min_rates_for_critical_frames(soc, hal_tx_desc_cached,
871 						tx_desc->nbuf);
872 	dp_tx_desc_set_ktimestamp(vdev, tx_desc);
873 
874 	hal_ring_hdl = dp_tx_get_hal_ring_hdl(soc, ring_id);
875 
876 	if (qdf_unlikely(dp_tx_hal_ring_access_start(soc, hal_ring_hdl))) {
877 		dp_err("HAL RING Access Failed -- %pK", hal_ring_hdl);
878 		DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
879 		DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
880 		dp_sawf_tx_enqueue_fail_peer_stats(soc, tx_desc);
881 		return status;
882 	}
883 
884 	hal_tx_desc = hal_srng_src_get_next(soc->hal_soc, hal_ring_hdl);
885 	if (qdf_unlikely(!hal_tx_desc)) {
886 		dp_verbose_debug("TCL ring full ring_id:%d", ring_id);
887 		DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
888 		DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
889 		dp_sawf_tx_enqueue_fail_peer_stats(soc, tx_desc);
890 		goto ring_access_fail;
891 	}
892 
893 	tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX;
894 	dp_vdev_peer_stats_update_protocol_cnt_tx(vdev, tx_desc->nbuf);
895 
896 	/* Sync cached descriptor with HW */
897 	hal_tx_desc_sync(hal_tx_desc_cached, hal_tx_desc);
898 
899 	coalesce = dp_tx_attempt_coalescing(soc, vdev, tx_desc, tid,
900 					    msdu_info, ring_id);
901 
902 	DP_STATS_INC_PKT(vdev, tx_i.processed, 1, tx_desc->length);
903 	DP_STATS_INC(soc, tx.tcl_enq[ring_id], 1);
904 	dp_tx_update_stats(soc, tx_desc, ring_id);
905 	status = QDF_STATUS_SUCCESS;
906 
907 	dp_tx_hw_desc_update_evt((uint8_t *)hal_tx_desc_cached,
908 				 hal_ring_hdl, soc);
909 
910 ring_access_fail:
911 	dp_tx_ring_access_end_wrapper(soc, hal_ring_hdl, coalesce);
912 	dp_pkt_add_timestamp(vdev, QDF_PKT_TX_DRIVER_EXIT,
913 			     qdf_get_log_timestamp(), tx_desc->nbuf);
914 	return status;
915 }
916 
917 QDF_STATUS dp_tx_init_bank_profiles(struct dp_soc_be *be_soc)
918 {
919 	int i, num_tcl_banks;
920 
921 	num_tcl_banks = hal_tx_get_num_tcl_banks(be_soc->soc.hal_soc);
922 
923 	qdf_assert_always(num_tcl_banks);
924 	be_soc->num_bank_profiles = num_tcl_banks;
925 
926 	be_soc->bank_profiles = qdf_mem_malloc(num_tcl_banks *
927 					       sizeof(*be_soc->bank_profiles));
928 	if (!be_soc->bank_profiles) {
929 		dp_err("unable to allocate memory for DP TX Profiles!");
930 		return QDF_STATUS_E_NOMEM;
931 	}
932 
933 	DP_TX_BANK_LOCK_CREATE(&be_soc->tx_bank_lock);
934 
935 	for (i = 0; i < num_tcl_banks; i++) {
936 		be_soc->bank_profiles[i].is_configured = false;
937 		qdf_atomic_init(&be_soc->bank_profiles[i].ref_count);
938 	}
939 	dp_info("initialized %u bank profiles", be_soc->num_bank_profiles);
940 	return QDF_STATUS_SUCCESS;
941 }
942 
943 void dp_tx_deinit_bank_profiles(struct dp_soc_be *be_soc)
944 {
945 	qdf_mem_free(be_soc->bank_profiles);
946 	DP_TX_BANK_LOCK_DESTROY(&be_soc->tx_bank_lock);
947 }
948 
949 static
950 void dp_tx_get_vdev_bank_config(struct dp_vdev_be *be_vdev,
951 				union hal_tx_bank_config *bank_config)
952 {
953 	struct dp_vdev *vdev = &be_vdev->vdev;
954 
955 	bank_config->epd = 0;
956 
957 	bank_config->encap_type = vdev->tx_encap_type;
958 
959 	/* Only valid for raw frames. Needs work for RAW mode */
960 	if (vdev->tx_encap_type == htt_cmn_pkt_type_raw) {
961 		bank_config->encrypt_type = sec_type_map[vdev->sec_type];
962 	} else {
963 		bank_config->encrypt_type = 0;
964 	}
965 
966 	bank_config->src_buffer_swap = 0;
967 	bank_config->link_meta_swap = 0;
968 
969 	if ((vdev->search_type == HAL_TX_ADDR_INDEX_SEARCH) &&
970 	    vdev->opmode == wlan_op_mode_sta) {
971 		bank_config->index_lookup_enable = 1;
972 		bank_config->mcast_pkt_ctrl = HAL_TX_MCAST_CTRL_MEC_NOTIFY;
973 		bank_config->addrx_en = 0;
974 		bank_config->addry_en = 0;
975 	} else {
976 		bank_config->index_lookup_enable = 0;
977 		bank_config->mcast_pkt_ctrl = HAL_TX_MCAST_CTRL_FW_EXCEPTION;
978 		bank_config->addrx_en =
979 			(vdev->hal_desc_addr_search_flags &
980 			 HAL_TX_DESC_ADDRX_EN) ? 1 : 0;
981 		bank_config->addry_en =
982 			(vdev->hal_desc_addr_search_flags &
983 			 HAL_TX_DESC_ADDRY_EN) ? 1 : 0;
984 	}
985 
986 	bank_config->mesh_enable = vdev->mesh_vdev ? 1 : 0;
987 
988 	bank_config->dscp_tid_map_id = vdev->dscp_tid_map_id;
989 
990 	/* Disabling vdev id check for now. Needs revist. */
991 	bank_config->vdev_id_check_en = be_vdev->vdev_id_check_en;
992 
993 	bank_config->pmac_id = vdev->lmac_id;
994 }
995 
996 int dp_tx_get_bank_profile(struct dp_soc_be *be_soc,
997 			   struct dp_vdev_be *be_vdev)
998 {
999 	char *temp_str = "";
1000 	bool found_match = false;
1001 	int bank_id = DP_BE_INVALID_BANK_ID;
1002 	int i;
1003 	int unconfigured_slot = DP_BE_INVALID_BANK_ID;
1004 	int zero_ref_count_slot = DP_BE_INVALID_BANK_ID;
1005 	union hal_tx_bank_config vdev_config = {0};
1006 
1007 	/* convert vdev params into hal_tx_bank_config */
1008 	dp_tx_get_vdev_bank_config(be_vdev, &vdev_config);
1009 
1010 	DP_TX_BANK_LOCK_ACQUIRE(&be_soc->tx_bank_lock);
1011 	/* go over all banks and find a matching/unconfigured/unsed bank */
1012 	for (i = 0; i < be_soc->num_bank_profiles; i++) {
1013 		if (be_soc->bank_profiles[i].is_configured &&
1014 		    (be_soc->bank_profiles[i].bank_config.val ^
1015 						vdev_config.val) == 0) {
1016 			found_match = true;
1017 			break;
1018 		}
1019 
1020 		if (unconfigured_slot == DP_BE_INVALID_BANK_ID &&
1021 		    !be_soc->bank_profiles[i].is_configured)
1022 			unconfigured_slot = i;
1023 		else if (zero_ref_count_slot  == DP_BE_INVALID_BANK_ID &&
1024 		    !qdf_atomic_read(&be_soc->bank_profiles[i].ref_count))
1025 			zero_ref_count_slot = i;
1026 	}
1027 
1028 	if (found_match) {
1029 		temp_str = "matching";
1030 		bank_id = i;
1031 		goto inc_ref_and_return;
1032 	}
1033 	if (unconfigured_slot != DP_BE_INVALID_BANK_ID) {
1034 		temp_str = "unconfigured";
1035 		bank_id = unconfigured_slot;
1036 		goto configure_and_return;
1037 	}
1038 	if (zero_ref_count_slot != DP_BE_INVALID_BANK_ID) {
1039 		temp_str = "zero_ref_count";
1040 		bank_id = zero_ref_count_slot;
1041 	}
1042 	if (bank_id == DP_BE_INVALID_BANK_ID) {
1043 		dp_alert("unable to find TX bank!");
1044 		QDF_BUG(0);
1045 		return bank_id;
1046 	}
1047 
1048 configure_and_return:
1049 	be_soc->bank_profiles[bank_id].is_configured = true;
1050 	be_soc->bank_profiles[bank_id].bank_config.val = vdev_config.val;
1051 	hal_tx_populate_bank_register(be_soc->soc.hal_soc,
1052 				      &be_soc->bank_profiles[bank_id].bank_config,
1053 				      bank_id);
1054 inc_ref_and_return:
1055 	qdf_atomic_inc(&be_soc->bank_profiles[bank_id].ref_count);
1056 	DP_TX_BANK_LOCK_RELEASE(&be_soc->tx_bank_lock);
1057 
1058 	dp_info("found %s slot at index %d, input:0x%x match:0x%x ref_count %u",
1059 		temp_str, bank_id, vdev_config.val,
1060 		be_soc->bank_profiles[bank_id].bank_config.val,
1061 		qdf_atomic_read(&be_soc->bank_profiles[bank_id].ref_count));
1062 
1063 	dp_info("epd:%x encap:%x encryp:%x src_buf_swap:%x link_meta_swap:%x addrx_en:%x addry_en:%x mesh_en:%x vdev_id_check:%x pmac_id:%x mcast_pkt_ctrl:%x",
1064 		be_soc->bank_profiles[bank_id].bank_config.epd,
1065 		be_soc->bank_profiles[bank_id].bank_config.encap_type,
1066 		be_soc->bank_profiles[bank_id].bank_config.encrypt_type,
1067 		be_soc->bank_profiles[bank_id].bank_config.src_buffer_swap,
1068 		be_soc->bank_profiles[bank_id].bank_config.link_meta_swap,
1069 		be_soc->bank_profiles[bank_id].bank_config.addrx_en,
1070 		be_soc->bank_profiles[bank_id].bank_config.addry_en,
1071 		be_soc->bank_profiles[bank_id].bank_config.mesh_enable,
1072 		be_soc->bank_profiles[bank_id].bank_config.vdev_id_check_en,
1073 		be_soc->bank_profiles[bank_id].bank_config.pmac_id,
1074 		be_soc->bank_profiles[bank_id].bank_config.mcast_pkt_ctrl);
1075 
1076 	return bank_id;
1077 }
1078 
1079 void dp_tx_put_bank_profile(struct dp_soc_be *be_soc,
1080 			    struct dp_vdev_be *be_vdev)
1081 {
1082 	DP_TX_BANK_LOCK_ACQUIRE(&be_soc->tx_bank_lock);
1083 	qdf_atomic_dec(&be_soc->bank_profiles[be_vdev->bank_id].ref_count);
1084 	DP_TX_BANK_LOCK_RELEASE(&be_soc->tx_bank_lock);
1085 }
1086 
1087 void dp_tx_update_bank_profile(struct dp_soc_be *be_soc,
1088 			       struct dp_vdev_be *be_vdev)
1089 {
1090 	dp_tx_put_bank_profile(be_soc, be_vdev);
1091 	be_vdev->bank_id = dp_tx_get_bank_profile(be_soc, be_vdev);
1092 }
1093 
1094 QDF_STATUS dp_tx_desc_pool_init_be(struct dp_soc *soc,
1095 				   uint32_t num_elem,
1096 				   uint8_t pool_id)
1097 {
1098 	struct dp_tx_desc_pool_s *tx_desc_pool;
1099 	struct dp_hw_cookie_conversion_t *cc_ctx;
1100 	struct dp_soc_be *be_soc;
1101 	struct dp_spt_page_desc *page_desc;
1102 	struct dp_tx_desc_s *tx_desc;
1103 	uint32_t ppt_idx = 0;
1104 	uint32_t avail_entry_index = 0;
1105 
1106 	if (!num_elem) {
1107 		dp_err("desc_num 0 !!");
1108 		return QDF_STATUS_E_FAILURE;
1109 	}
1110 
1111 	be_soc = dp_get_be_soc_from_dp_soc(soc);
1112 	tx_desc_pool = &soc->tx_desc[pool_id];
1113 	cc_ctx  = &be_soc->tx_cc_ctx[pool_id];
1114 
1115 	tx_desc = tx_desc_pool->freelist;
1116 	page_desc = &cc_ctx->page_desc_base[0];
1117 	while (tx_desc) {
1118 		if (avail_entry_index == 0) {
1119 			if (ppt_idx >= cc_ctx->total_page_num) {
1120 				dp_alert("insufficient secondary page tables");
1121 				qdf_assert_always(0);
1122 			}
1123 			page_desc = &cc_ctx->page_desc_base[ppt_idx++];
1124 		}
1125 
1126 		/* put each TX Desc VA to SPT pages and
1127 		 * get corresponding ID
1128 		 */
1129 		DP_CC_SPT_PAGE_UPDATE_VA(page_desc->page_v_addr,
1130 					 avail_entry_index,
1131 					 tx_desc);
1132 		tx_desc->id =
1133 			dp_cc_desc_id_generate(page_desc->ppt_index,
1134 					       avail_entry_index);
1135 		tx_desc->pool_id = pool_id;
1136 		dp_tx_desc_set_magic(tx_desc, DP_TX_MAGIC_PATTERN_FREE);
1137 		tx_desc = tx_desc->next;
1138 		avail_entry_index = (avail_entry_index + 1) &
1139 					DP_CC_SPT_PAGE_MAX_ENTRIES_MASK;
1140 	}
1141 
1142 	return QDF_STATUS_SUCCESS;
1143 }
1144 
1145 void dp_tx_desc_pool_deinit_be(struct dp_soc *soc,
1146 			       struct dp_tx_desc_pool_s *tx_desc_pool,
1147 			       uint8_t pool_id)
1148 {
1149 	struct dp_spt_page_desc *page_desc;
1150 	struct dp_soc_be *be_soc;
1151 	int i = 0;
1152 	struct dp_hw_cookie_conversion_t *cc_ctx;
1153 
1154 	be_soc = dp_get_be_soc_from_dp_soc(soc);
1155 	cc_ctx  = &be_soc->tx_cc_ctx[pool_id];
1156 
1157 	for (i = 0; i < cc_ctx->total_page_num; i++) {
1158 		page_desc = &cc_ctx->page_desc_base[i];
1159 		qdf_mem_zero(page_desc->page_v_addr, qdf_page_size);
1160 	}
1161 }
1162 
1163 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
1164 uint32_t dp_tx_comp_nf_handler(struct dp_intr *int_ctx, struct dp_soc *soc,
1165 			       hal_ring_handle_t hal_ring_hdl, uint8_t ring_id,
1166 			       uint32_t quota)
1167 {
1168 	struct dp_srng *tx_comp_ring = &soc->tx_comp_ring[ring_id];
1169 	uint32_t work_done = 0;
1170 
1171 	if (dp_srng_get_near_full_level(soc, tx_comp_ring) <
1172 			DP_SRNG_THRESH_NEAR_FULL)
1173 		return 0;
1174 
1175 	qdf_atomic_set(&tx_comp_ring->near_full, 1);
1176 	work_done++;
1177 
1178 	return work_done;
1179 }
1180 #endif
1181 
1182 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
1183 	defined(CONFIG_SAWF)
1184 #define PPDUID_GET_HW_LINK_ID(PPDU_ID, LINK_ID_OFFSET, LINK_ID_BITS) \
1185 	(((PPDU_ID) >> (LINK_ID_OFFSET)) & ((1 << (LINK_ID_BITS)) - 1))
1186 
1187 #define HW_TX_DELAY_MAX                       0x1000000
1188 #define TX_COMPL_SHIFT_BUFFER_TIMESTAMP_US    10
1189 #define HW_TX_DELAY_MASK                      0x1FFFFFFF
1190 #define TX_COMPL_BUFFER_TSTAMP_US(TSTAMP) \
1191 	(((TSTAMP) << TX_COMPL_SHIFT_BUFFER_TIMESTAMP_US) & \
1192 	 HW_TX_DELAY_MASK)
1193 
1194 static inline
1195 QDF_STATUS dp_mlo_compute_hw_delay_us(struct dp_soc *soc,
1196 				      struct dp_vdev *vdev,
1197 				      struct hal_tx_completion_status *ts,
1198 				      uint32_t *delay_us)
1199 {
1200 	uint32_t ppdu_id;
1201 	uint8_t link_id_offset, link_id_bits;
1202 	uint8_t hw_link_id;
1203 	uint32_t msdu_tqm_enqueue_tstamp_us, final_msdu_tqm_enqueue_tstamp_us;
1204 	uint32_t msdu_compl_tsf_tstamp_us, final_msdu_compl_tsf_tstamp_us;
1205 	uint32_t delay;
1206 	int32_t delta_tsf2, delta_tqm;
1207 
1208 	if (!ts->valid)
1209 		return QDF_STATUS_E_INVAL;
1210 
1211 	link_id_offset = soc->link_id_offset;
1212 	link_id_bits = soc->link_id_bits;
1213 	ppdu_id = ts->ppdu_id;
1214 	hw_link_id = PPDUID_GET_HW_LINK_ID(ppdu_id, link_id_offset,
1215 					   link_id_bits);
1216 
1217 	msdu_tqm_enqueue_tstamp_us =
1218 		TX_COMPL_BUFFER_TSTAMP_US(ts->buffer_timestamp);
1219 	msdu_compl_tsf_tstamp_us = ts->tsf;
1220 
1221 	delta_tsf2 = dp_mlo_get_delta_tsf2_wrt_mlo_offset(soc, hw_link_id);
1222 	delta_tqm = dp_mlo_get_delta_tqm_wrt_mlo_offset(soc);
1223 
1224 	final_msdu_tqm_enqueue_tstamp_us = (msdu_tqm_enqueue_tstamp_us +
1225 			delta_tqm) & HW_TX_DELAY_MASK;
1226 
1227 	final_msdu_compl_tsf_tstamp_us = (msdu_compl_tsf_tstamp_us +
1228 			delta_tsf2) & HW_TX_DELAY_MASK;
1229 
1230 	delay = (final_msdu_compl_tsf_tstamp_us -
1231 		final_msdu_tqm_enqueue_tstamp_us) & HW_TX_DELAY_MASK;
1232 
1233 	if (delay > HW_TX_DELAY_MAX)
1234 		return QDF_STATUS_E_FAILURE;
1235 
1236 	if (delay_us)
1237 		*delay_us = delay;
1238 
1239 	return QDF_STATUS_SUCCESS;
1240 }
1241 #else
1242 static inline
1243 QDF_STATUS dp_mlo_compute_hw_delay_us(struct dp_soc *soc,
1244 				      struct dp_vdev *vdev,
1245 				      struct hal_tx_completion_status *ts,
1246 				      uint32_t *delay_us)
1247 {
1248 	return QDF_STATUS_SUCCESS;
1249 }
1250 #endif
1251 
1252 QDF_STATUS dp_tx_compute_tx_delay_be(struct dp_soc *soc,
1253 				     struct dp_vdev *vdev,
1254 				     struct hal_tx_completion_status *ts,
1255 				     uint32_t *delay_us)
1256 {
1257 	return dp_mlo_compute_hw_delay_us(soc, vdev, ts, delay_us);
1258 }
1259