xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/be/dp_be_tx.c (revision 901120c066e139c7f8a2c8e4820561fdd83c67ef)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "cdp_txrx_cmn_struct.h"
21 #include "dp_types.h"
22 #include "dp_tx.h"
23 #include "dp_be_tx.h"
24 #include "dp_tx_desc.h"
25 #include "hal_tx.h"
26 #include <hal_be_api.h>
27 #include <hal_be_tx.h>
28 #include <dp_htt.h>
29 #ifdef FEATURE_WDS
30 #include "dp_txrx_wds.h"
31 #endif
32 
33 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
34 #define DP_TX_BANK_LOCK_CREATE(lock) qdf_mutex_create(lock)
35 #define DP_TX_BANK_LOCK_DESTROY(lock) qdf_mutex_destroy(lock)
36 #define DP_TX_BANK_LOCK_ACQUIRE(lock) qdf_mutex_acquire(lock)
37 #define DP_TX_BANK_LOCK_RELEASE(lock) qdf_mutex_release(lock)
38 #else
39 #define DP_TX_BANK_LOCK_CREATE(lock) qdf_spinlock_create(lock)
40 #define DP_TX_BANK_LOCK_DESTROY(lock) qdf_spinlock_destroy(lock)
41 #define DP_TX_BANK_LOCK_ACQUIRE(lock) qdf_spin_lock_bh(lock)
42 #define DP_TX_BANK_LOCK_RELEASE(lock) qdf_spin_unlock_bh(lock)
43 #endif
44 
45 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
46 #ifdef WLAN_MCAST_MLO
47 /* MLO peer id for reinject*/
48 #define DP_MLO_MCAST_REINJECT_PEER_ID 0XFFFD
49 #define MAX_GSN_NUM 0x0FFF
50 
51 #ifdef QCA_MULTIPASS_SUPPORT
52 #define INVALID_VLAN_ID         0xFFFF
53 #define MULTIPASS_WITH_VLAN_ID 0xFFFE
54 /**
55  * struct dp_mlo_mpass_buf - Multipass buffer
56  * @vlan_id: vlan_id of frame
57  * @nbuf: pointer to skb buf
58  */
59 struct dp_mlo_mpass_buf {
60 	uint16_t vlan_id;
61 	qdf_nbuf_t  nbuf;
62 };
63 #endif
64 #endif
65 #endif
66 
67 #define DP_TX_WBM_COMPLETION_V3_VDEV_ID_GET(_var) \
68 	HTT_TX_WBM_COMPLETION_V2_VDEV_ID_GET(_var)
69 #define DP_TX_WBM_COMPLETION_V3_VALID_GET(_var) \
70 	HTT_TX_WBM_COMPLETION_V2_VALID_GET(_var)
71 #define DP_TX_WBM_COMPLETION_V3_SW_PEER_ID_GET(_var) \
72 	HTT_TX_WBM_COMPLETION_V2_SW_PEER_ID_GET(_var)
73 #define DP_TX_WBM_COMPLETION_V3_TID_NUM_GET(_var) \
74 	HTT_TX_WBM_COMPLETION_V2_TID_NUM_GET(_var)
75 #define DP_TX_WBM_COMPLETION_V3_SCH_CMD_ID_GET(_var) \
76 	HTT_TX_WBM_COMPLETION_V2_SCH_CMD_ID_GET(_var)
77 #define DP_TX_WBM_COMPLETION_V3_ACK_FRAME_RSSI_GET(_var) \
78 	HTT_TX_WBM_COMPLETION_V2_ACK_FRAME_RSSI_GET(_var)
79 
80 extern uint8_t sec_type_map[MAX_CDP_SEC_TYPE];
81 
82 #ifdef DP_USE_REDUCED_PEER_ID_FIELD_WIDTH
83 static inline uint16_t dp_tx_comp_get_peer_id(struct dp_soc *soc,
84 					      void *tx_comp_hal_desc)
85 {
86 	uint16_t peer_id = hal_tx_comp_get_peer_id(tx_comp_hal_desc);
87 	struct dp_tx_comp_peer_id *tx_peer_id =
88 			(struct dp_tx_comp_peer_id *)&peer_id;
89 
90 	return (tx_peer_id->peer_id |
91 	        (tx_peer_id->ml_peer_valid << soc->peer_id_shift));
92 }
93 #else
94 /* Combine ml_peer_valid and peer_id field */
95 #define DP_BE_TX_COMP_PEER_ID_MASK	0x00003fff
96 #define DP_BE_TX_COMP_PEER_ID_SHIFT	0
97 
98 static inline uint16_t dp_tx_comp_get_peer_id(struct dp_soc *soc,
99 					      void *tx_comp_hal_desc)
100 {
101 	uint16_t peer_id = hal_tx_comp_get_peer_id(tx_comp_hal_desc);
102 
103 	return ((peer_id & DP_BE_TX_COMP_PEER_ID_MASK) >>
104 		DP_BE_TX_COMP_PEER_ID_SHIFT);
105 }
106 #endif
107 
108 #ifdef DP_FEATURE_HW_COOKIE_CONVERSION
109 #ifdef DP_HW_COOKIE_CONVERT_EXCEPTION
110 void dp_tx_comp_get_params_from_hal_desc_be(struct dp_soc *soc,
111 					    void *tx_comp_hal_desc,
112 					    struct dp_tx_desc_s **r_tx_desc)
113 {
114 	uint32_t tx_desc_id;
115 
116 	if (qdf_likely(
117 		hal_tx_comp_get_cookie_convert_done(tx_comp_hal_desc))) {
118 		/* HW cookie conversion done */
119 		*r_tx_desc = (struct dp_tx_desc_s *)
120 				hal_tx_comp_get_desc_va(tx_comp_hal_desc);
121 	} else {
122 		/* SW do cookie conversion to VA */
123 		tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc);
124 		*r_tx_desc =
125 		(struct dp_tx_desc_s *)dp_cc_desc_find(soc, tx_desc_id);
126 	}
127 
128 	if (*r_tx_desc)
129 		(*r_tx_desc)->peer_id = dp_tx_comp_get_peer_id(soc,
130 							       tx_comp_hal_desc);
131 }
132 #else
133 void dp_tx_comp_get_params_from_hal_desc_be(struct dp_soc *soc,
134 					    void *tx_comp_hal_desc,
135 					    struct dp_tx_desc_s **r_tx_desc)
136 {
137 	*r_tx_desc = (struct dp_tx_desc_s *)
138 			hal_tx_comp_get_desc_va(tx_comp_hal_desc);
139 
140 	if (*r_tx_desc)
141 		(*r_tx_desc)->peer_id = dp_tx_comp_get_peer_id(soc,
142 							       tx_comp_hal_desc);
143 }
144 #endif /* DP_HW_COOKIE_CONVERT_EXCEPTION */
145 #else
146 
147 void dp_tx_comp_get_params_from_hal_desc_be(struct dp_soc *soc,
148 					    void *tx_comp_hal_desc,
149 					    struct dp_tx_desc_s **r_tx_desc)
150 {
151 	uint32_t tx_desc_id;
152 
153 	/* SW do cookie conversion to VA */
154 	tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc);
155 	*r_tx_desc =
156 	(struct dp_tx_desc_s *)dp_cc_desc_find(soc, tx_desc_id);
157 
158 	if (*r_tx_desc)
159 		(*r_tx_desc)->peer_id = dp_tx_comp_get_peer_id(soc,
160 							       tx_comp_hal_desc);
161 }
162 #endif /* DP_FEATURE_HW_COOKIE_CONVERSION */
163 
164 static inline
165 void dp_tx_process_mec_notify_be(struct dp_soc *soc, uint8_t *status)
166 {
167 	struct dp_vdev *vdev;
168 	uint8_t vdev_id;
169 	uint32_t *htt_desc = (uint32_t *)status;
170 
171 	qdf_assert_always(!soc->mec_fw_offload);
172 
173 	/*
174 	 * Get vdev id from HTT status word in case of MEC
175 	 * notification
176 	 */
177 	vdev_id = DP_TX_WBM_COMPLETION_V3_VDEV_ID_GET(htt_desc[4]);
178 	if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
179 		return;
180 
181 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
182 				     DP_MOD_ID_HTT_COMP);
183 	if (!vdev)
184 		return;
185 	dp_tx_mec_handler(vdev, status);
186 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP);
187 }
188 
189 void dp_tx_process_htt_completion_be(struct dp_soc *soc,
190 				     struct dp_tx_desc_s *tx_desc,
191 				     uint8_t *status,
192 				     uint8_t ring_id)
193 {
194 	uint8_t tx_status;
195 	struct dp_pdev *pdev;
196 	struct dp_vdev *vdev = NULL;
197 	struct hal_tx_completion_status ts = {0};
198 	uint32_t *htt_desc = (uint32_t *)status;
199 	struct dp_txrx_peer *txrx_peer;
200 	dp_txrx_ref_handle txrx_ref_handle = NULL;
201 	struct cdp_tid_tx_stats *tid_stats = NULL;
202 	struct htt_soc *htt_handle;
203 	uint8_t vdev_id;
204 
205 	tx_status = HTT_TX_WBM_COMPLETION_V3_TX_STATUS_GET(htt_desc[0]);
206 	htt_handle = (struct htt_soc *)soc->htt_handle;
207 	htt_wbm_event_record(htt_handle->htt_logger_handle, tx_status, status);
208 
209 	/*
210 	 * There can be scenario where WBM consuming descriptor enqueued
211 	 * from TQM2WBM first and TQM completion can happen before MEC
212 	 * notification comes from FW2WBM. Avoid access any field of tx
213 	 * descriptor in case of MEC notify.
214 	 */
215 	if (tx_status == HTT_TX_FW2WBM_TX_STATUS_MEC_NOTIFY)
216 		return dp_tx_process_mec_notify_be(soc, status);
217 
218 	/*
219 	 * If the descriptor is already freed in vdev_detach,
220 	 * continue to next descriptor
221 	 */
222 	if (qdf_unlikely(!tx_desc->flags)) {
223 		dp_tx_comp_info_rl("Descriptor freed in vdev_detach %d",
224 				   tx_desc->id);
225 		return;
226 	}
227 
228 	if (qdf_unlikely(tx_desc->vdev_id == DP_INVALID_VDEV_ID)) {
229 		dp_tx_comp_info_rl("Invalid vdev_id %d", tx_desc->id);
230 		tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
231 		goto release_tx_desc;
232 	}
233 
234 	pdev = tx_desc->pdev;
235 
236 	if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
237 		dp_tx_comp_info_rl("pdev in down state %d", tx_desc->id);
238 		tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
239 		goto release_tx_desc;
240 	}
241 
242 	qdf_assert(tx_desc->pdev);
243 
244 	vdev_id = tx_desc->vdev_id;
245 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
246 				     DP_MOD_ID_HTT_COMP);
247 
248 	if (qdf_unlikely(!vdev)) {
249 		dp_tx_comp_info_rl("Unable to get vdev ref  %d", tx_desc->id);
250 		tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
251 		goto release_tx_desc;
252 	}
253 
254 	switch (tx_status) {
255 	case HTT_TX_FW2WBM_TX_STATUS_OK:
256 	case HTT_TX_FW2WBM_TX_STATUS_DROP:
257 	case HTT_TX_FW2WBM_TX_STATUS_TTL:
258 	{
259 		uint8_t tid;
260 
261 		if (DP_TX_WBM_COMPLETION_V3_VALID_GET(htt_desc[3])) {
262 			ts.peer_id =
263 				DP_TX_WBM_COMPLETION_V3_SW_PEER_ID_GET(
264 						htt_desc[3]);
265 			ts.tid =
266 				DP_TX_WBM_COMPLETION_V3_TID_NUM_GET(
267 						htt_desc[3]);
268 		} else {
269 			ts.peer_id = HTT_INVALID_PEER;
270 			ts.tid = HTT_INVALID_TID;
271 		}
272 		ts.release_src = HAL_TX_COMP_RELEASE_SOURCE_FW;
273 		ts.ppdu_id =
274 			DP_TX_WBM_COMPLETION_V3_SCH_CMD_ID_GET(
275 					htt_desc[2]);
276 		ts.ack_frame_rssi =
277 			DP_TX_WBM_COMPLETION_V3_ACK_FRAME_RSSI_GET(
278 					htt_desc[2]);
279 
280 		ts.tsf = htt_desc[4];
281 		ts.first_msdu = 1;
282 		ts.last_msdu = 1;
283 		ts.status = (tx_status == HTT_TX_FW2WBM_TX_STATUS_OK ?
284 			     HAL_TX_TQM_RR_FRAME_ACKED :
285 			     HAL_TX_TQM_RR_REM_CMD_REM);
286 		tid = ts.tid;
287 		if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
288 			tid = CDP_MAX_DATA_TIDS - 1;
289 
290 		tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
291 
292 		if (qdf_unlikely(pdev->delay_stats_flag) ||
293 		    qdf_unlikely(dp_is_vdev_tx_delay_stats_enabled(vdev)))
294 			dp_tx_compute_delay(vdev, tx_desc, tid, ring_id);
295 		if (tx_status < CDP_MAX_TX_HTT_STATUS)
296 			tid_stats->htt_status_cnt[tx_status]++;
297 
298 		txrx_peer = dp_txrx_peer_get_ref_by_id(soc, ts.peer_id,
299 						       &txrx_ref_handle,
300 						       DP_MOD_ID_HTT_COMP);
301 		if (qdf_likely(txrx_peer))
302 			dp_tx_update_peer_basic_stats(
303 						txrx_peer,
304 						qdf_nbuf_len(tx_desc->nbuf),
305 						tx_status,
306 						pdev->enhanced_stats_en);
307 
308 		dp_tx_comp_process_tx_status(soc, tx_desc, &ts, txrx_peer,
309 					     ring_id);
310 		dp_tx_comp_process_desc(soc, tx_desc, &ts, txrx_peer);
311 		dp_tx_desc_release(tx_desc, tx_desc->pool_id);
312 
313 		if (qdf_likely(txrx_peer))
314 			dp_txrx_peer_unref_delete(txrx_ref_handle,
315 						  DP_MOD_ID_HTT_COMP);
316 
317 		break;
318 	}
319 	case HTT_TX_FW2WBM_TX_STATUS_REINJECT:
320 	{
321 		uint8_t reinject_reason;
322 
323 		reinject_reason =
324 			HTT_TX_WBM_COMPLETION_V3_REINJECT_REASON_GET(
325 								htt_desc[1]);
326 		dp_tx_reinject_handler(soc, vdev, tx_desc,
327 				       status, reinject_reason);
328 		break;
329 	}
330 	case HTT_TX_FW2WBM_TX_STATUS_INSPECT:
331 	{
332 		dp_tx_inspect_handler(soc, vdev, tx_desc, status);
333 		break;
334 	}
335 	case HTT_TX_FW2WBM_TX_STATUS_VDEVID_MISMATCH:
336 	{
337 		DP_STATS_INC(vdev, tx_i.dropped.fail_per_pkt_vdev_id_check, 1);
338 		goto release_tx_desc;
339 	}
340 	default:
341 		dp_tx_comp_err("Invalid HTT tx_status %d\n",
342 			       tx_status);
343 		goto release_tx_desc;
344 	}
345 
346 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP);
347 	return;
348 
349 release_tx_desc:
350 	dp_tx_comp_free_buf(soc, tx_desc, false);
351 	dp_tx_desc_release(tx_desc, tx_desc->pool_id);
352 	if (vdev)
353 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP);
354 }
355 
356 #ifdef QCA_OL_TX_MULTIQ_SUPPORT
357 #ifdef DP_TX_IMPLICIT_RBM_MAPPING
358 /*
359  * dp_tx_get_rbm_id()- Get the RBM ID for data transmission completion.
360  * @dp_soc - DP soc structure pointer
361  * @ring_id - Transmit Queue/ring_id to be used when XPS is enabled
362  *
363  * Return - RBM ID corresponding to TCL ring_id
364  */
365 static inline uint8_t dp_tx_get_rbm_id_be(struct dp_soc *soc,
366 					  uint8_t ring_id)
367 {
368 	return 0;
369 }
370 #else
371 static inline uint8_t dp_tx_get_rbm_id_be(struct dp_soc *soc,
372 					  uint8_t ring_id)
373 {
374 	return (ring_id ? soc->wbm_sw0_bm_id + (ring_id - 1) :
375 			  HAL_WBM_SW2_BM_ID(soc->wbm_sw0_bm_id));
376 }
377 #endif /*DP_TX_IMPLICIT_RBM_MAPPING*/
378 #else
379 static inline uint8_t dp_tx_get_rbm_id_be(struct dp_soc *soc,
380 					  uint8_t tcl_index)
381 {
382 	uint8_t rbm;
383 
384 	rbm = wlan_cfg_get_rbm_id_for_index(soc->wlan_cfg_ctx, tcl_index);
385 	dp_verbose_debug("tcl_id %u rbm %u", tcl_index, rbm);
386 	return rbm;
387 }
388 #endif
389 #ifdef QCA_SUPPORT_TX_MIN_RATES_FOR_SPECIAL_FRAMES
390 
391 /*
392  * dp_tx_set_min_rates_for_critical_frames()- sets min-rates for critical pkts
393  * @dp_soc - DP soc structure pointer
394  * @hal_tx_desc - HAL descriptor where fields are set
395  * nbuf - skb to be considered for min rates
396  *
397  * The function relies on upper layers to set QDF_NBUF_CB_TX_EXTRA_IS_CRITICAL
398  * and uses it to determine if the frame is critical. For a critical frame,
399  * flow override bits are set to classify the frame into HW's high priority
400  * queue. The HW will pick pre-configured min rates for such packets.
401  *
402  * Return - None
403  */
404 static void
405 dp_tx_set_min_rates_for_critical_frames(struct dp_soc *soc,
406 					uint32_t *hal_tx_desc,
407 					qdf_nbuf_t nbuf)
408 {
409 /*
410  * Critical frames should be queued to the high priority queue for the TID on
411  * on which they are sent out (for the concerned peer).
412  * FW is using HTT_MSDU_Q_IDX 2 for HOL (high priority) queue.
413  * htt_msdu_idx = (2 * who_classify_info_sel) + flow_override
414  * Hence, using who_classify_info_sel = 1, flow_override = 0 to select
415  * HOL queue.
416  */
417 	if (QDF_NBUF_CB_TX_EXTRA_IS_CRITICAL(nbuf)) {
418 		hal_tx_desc_set_flow_override_enable(hal_tx_desc, 1);
419 		hal_tx_desc_set_flow_override(hal_tx_desc, 0);
420 		hal_tx_desc_set_who_classify_info_sel(hal_tx_desc, 1);
421 		hal_tx_desc_set_tx_notify_frame(hal_tx_desc,
422 						TX_SEMI_HARD_NOTIFY_E);
423 	}
424 }
425 #else
426 static inline void
427 dp_tx_set_min_rates_for_critical_frames(struct dp_soc *soc,
428 					uint32_t *hal_tx_desc_cached,
429 					qdf_nbuf_t nbuf)
430 {
431 }
432 #endif
433 
434 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
435 	defined(WLAN_MCAST_MLO)
436 #ifdef QCA_MULTIPASS_SUPPORT
437 /**
438  * dp_tx_mlo_mcast_multipass_lookup() - lookup vlan_id in mpass peer list
439  * @be_vdev: Handle to DP be_vdev structure
440  * @ptnr_vdev: DP ptnr_vdev handle
441  * @arg: pointer to dp_mlo_mpass_ buf
442  *
443  * Return: None
444  */
445 static void
446 dp_tx_mlo_mcast_multipass_lookup(struct dp_vdev_be *be_vdev,
447 				 struct dp_vdev *ptnr_vdev,
448 				 void *arg)
449 {
450 	struct dp_mlo_mpass_buf *ptr = (struct dp_mlo_mpass_buf *)arg;
451 	struct dp_txrx_peer *txrx_peer = NULL;
452 	struct vlan_ethhdr *veh = NULL;
453 	qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(ptr->nbuf);
454 	uint16_t vlan_id = 0;
455 	bool not_vlan = ((ptnr_vdev->tx_encap_type == htt_cmn_pkt_type_raw) ||
456 			(htons(eh->ether_type) != ETH_P_8021Q));
457 
458 	if (qdf_unlikely(not_vlan))
459 		return;
460 	veh = (struct vlan_ethhdr *)eh;
461 	vlan_id = (ntohs(veh->h_vlan_TCI) & VLAN_VID_MASK);
462 
463 	qdf_spin_lock_bh(&ptnr_vdev->mpass_peer_mutex);
464 	TAILQ_FOREACH(txrx_peer, &ptnr_vdev->mpass_peer_list,
465 		      mpass_peer_list_elem) {
466 		if (vlan_id == txrx_peer->vlan_id) {
467 			qdf_spin_unlock_bh(&ptnr_vdev->mpass_peer_mutex);
468 			ptr->vlan_id = vlan_id;
469 			return;
470 		}
471 	}
472 	qdf_spin_unlock_bh(&ptnr_vdev->mpass_peer_mutex);
473 }
474 
475 /**
476  * dp_tx_mlo_mcast_multipass_send() - send multipass MLO Mcast packets
477  * @be_vdev: Handle to DP be_vdev structure
478  * @ptnr_vdev: DP ptnr_vdev handle
479  * @arg: pointer to dp_mlo_mpass_ buf
480  *
481  * Return: None
482  */
483 static void
484 dp_tx_mlo_mcast_multipass_send(struct dp_vdev_be *be_vdev,
485 			       struct dp_vdev *ptnr_vdev,
486 			       void *arg)
487 {
488 	struct dp_mlo_mpass_buf *ptr = (struct dp_mlo_mpass_buf *)arg;
489 	struct dp_tx_msdu_info_s msdu_info;
490 	struct dp_vdev_be *be_ptnr_vdev = NULL;
491 	qdf_nbuf_t  nbuf_clone;
492 	uint16_t group_key = 0;
493 
494 	be_ptnr_vdev = dp_get_be_vdev_from_dp_vdev(ptnr_vdev);
495 	if (be_vdev != be_ptnr_vdev) {
496 		nbuf_clone = qdf_nbuf_clone(ptr->nbuf);
497 		if (qdf_unlikely(!nbuf_clone)) {
498 			dp_tx_debug("nbuf clone failed");
499 			return;
500 		}
501 	} else {
502 		nbuf_clone = ptr->nbuf;
503 	}
504 	qdf_mem_zero(&msdu_info, sizeof(msdu_info));
505 	dp_tx_get_queue(ptnr_vdev, nbuf_clone, &msdu_info.tx_queue);
506 	msdu_info.gsn = be_vdev->seq_num;
507 	be_ptnr_vdev->seq_num = be_vdev->seq_num;
508 
509 	if (ptr->vlan_id == MULTIPASS_WITH_VLAN_ID) {
510 		msdu_info.tid = HTT_TX_EXT_TID_INVALID;
511 		HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_SET(
512 						msdu_info.meta_data[0], 1);
513 	} else {
514 		/* return when vlan map is not initialized */
515 		if (!ptnr_vdev->iv_vlan_map)
516 			return;
517 		group_key = ptnr_vdev->iv_vlan_map[ptr->vlan_id];
518 
519 		/*
520 		 * If group key is not installed, drop the frame.
521 		 */
522 
523 		if (!group_key)
524 			return;
525 
526 		dp_tx_remove_vlan_tag(ptnr_vdev, nbuf_clone);
527 		dp_tx_add_groupkey_metadata(ptnr_vdev, &msdu_info, group_key);
528 		msdu_info.exception_fw = 1;
529 	}
530 
531 	nbuf_clone = dp_tx_send_msdu_single(
532 					ptnr_vdev,
533 					nbuf_clone,
534 					&msdu_info,
535 					DP_MLO_MCAST_REINJECT_PEER_ID,
536 					NULL);
537 	if (qdf_unlikely(nbuf_clone)) {
538 		dp_info("pkt send failed");
539 		qdf_nbuf_free(nbuf_clone);
540 		return;
541 	}
542 }
543 
544 /**
545  * dp_tx_mlo_mcast_multipass_handler - If frame needs multipass processing
546  * @soc: DP soc handle
547  * @vdev: DP vdev handle
548  * @nbuf: nbuf to be enqueued
549  *
550  * Return: true if handling is done else false
551  */
552 static bool
553 dp_tx_mlo_mcast_multipass_handler(struct dp_soc *soc,
554 				  struct dp_vdev *vdev,
555 				  qdf_nbuf_t nbuf)
556 {
557 	struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
558 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
559 	qdf_nbuf_t nbuf_copy = NULL;
560 	struct dp_mlo_mpass_buf mpass_buf;
561 
562 	memset(&mpass_buf, 0, sizeof(struct dp_mlo_mpass_buf));
563 	mpass_buf.vlan_id = INVALID_VLAN_ID;
564 	mpass_buf.nbuf = nbuf;
565 
566 	dp_tx_mlo_mcast_multipass_lookup(be_vdev, vdev, &mpass_buf);
567 	if (mpass_buf.vlan_id == INVALID_VLAN_ID) {
568 		dp_mcast_mlo_iter_ptnr_vdev(be_soc, be_vdev,
569 					    dp_tx_mlo_mcast_multipass_lookup,
570 					    &mpass_buf, DP_MOD_ID_TX);
571 		/*
572 		 * Do not drop the frame when vlan_id doesn't match.
573 		 * Send the frame as it is.
574 		 */
575 		if (mpass_buf.vlan_id == INVALID_VLAN_ID)
576 			return false;
577 	}
578 
579 	/* AP can have classic clients, special clients &
580 	 * classic repeaters.
581 	 * 1. Classic clients & special client:
582 	 *	Remove vlan header, find corresponding group key
583 	 *	index, fill in metaheader and enqueue multicast
584 	 *	frame to TCL.
585 	 * 2. Classic repeater:
586 	 *	Pass through to classic repeater with vlan tag
587 	 *	intact without any group key index. Hardware
588 	 *	will know which key to use to send frame to
589 	 *	repeater.
590 	 */
591 	nbuf_copy = qdf_nbuf_copy(nbuf);
592 
593 	/*
594 	 * Send multicast frame to special peers even
595 	 * if pass through to classic repeater fails.
596 	 */
597 	if (nbuf_copy) {
598 		struct dp_mlo_mpass_buf mpass_buf_copy = {0};
599 
600 		mpass_buf_copy.vlan_id = MULTIPASS_WITH_VLAN_ID;
601 		mpass_buf_copy.nbuf = nbuf_copy;
602 		/* send frame on partner vdevs */
603 		dp_mcast_mlo_iter_ptnr_vdev(be_soc, be_vdev,
604 					    dp_tx_mlo_mcast_multipass_send,
605 					    &mpass_buf_copy, DP_MOD_ID_TX);
606 
607 		/* send frame on mcast primary vdev */
608 		dp_tx_mlo_mcast_multipass_send(be_vdev, vdev, &mpass_buf_copy);
609 
610 		if (qdf_unlikely(be_vdev->seq_num > MAX_GSN_NUM))
611 			be_vdev->seq_num = 0;
612 		else
613 			be_vdev->seq_num++;
614 	}
615 
616 	dp_mcast_mlo_iter_ptnr_vdev(be_soc, be_vdev,
617 				    dp_tx_mlo_mcast_multipass_send,
618 				    &mpass_buf, DP_MOD_ID_TX);
619 	dp_tx_mlo_mcast_multipass_send(be_vdev, vdev, &mpass_buf);
620 
621 	if (qdf_unlikely(be_vdev->seq_num > MAX_GSN_NUM))
622 		be_vdev->seq_num = 0;
623 	else
624 		be_vdev->seq_num++;
625 
626 	return true;
627 }
628 #else
629 static bool
630 dp_tx_mlo_mcast_multipass_handler(struct dp_soc *soc, struct dp_vdev *vdev,
631 				  qdf_nbuf_t nbuf)
632 {
633 	return false;
634 }
635 #endif
636 
637 void dp_tx_mcast_mlo_reinject_routing_set(struct dp_soc *soc, void *arg)
638 {
639 	hal_soc_handle_t hal_soc = soc->hal_soc;
640 	uint8_t *cmd = (uint8_t *)arg;
641 
642 	if (*cmd)
643 		hal_tx_mcast_mlo_reinject_routing_set(
644 					hal_soc,
645 					HAL_TX_MCAST_MLO_REINJECT_TQM_NOTIFY);
646 	else
647 		hal_tx_mcast_mlo_reinject_routing_set(
648 					hal_soc,
649 					HAL_TX_MCAST_MLO_REINJECT_FW_NOTIFY);
650 }
651 
652 void
653 dp_tx_mlo_mcast_pkt_send(struct dp_vdev_be *be_vdev,
654 			 struct dp_vdev *ptnr_vdev,
655 			 void *arg)
656 {
657 	qdf_nbuf_t  nbuf = (qdf_nbuf_t)arg;
658 	qdf_nbuf_t  nbuf_clone;
659 	struct dp_vdev_be *be_ptnr_vdev = NULL;
660 	struct dp_tx_msdu_info_s msdu_info;
661 
662 	be_ptnr_vdev = dp_get_be_vdev_from_dp_vdev(ptnr_vdev);
663 	if (be_vdev != be_ptnr_vdev) {
664 		nbuf_clone = qdf_nbuf_clone(nbuf);
665 		if (qdf_unlikely(!nbuf_clone)) {
666 			dp_tx_debug("nbuf clone failed");
667 			return;
668 		}
669 	} else {
670 		nbuf_clone = nbuf;
671 	}
672 
673 	qdf_mem_zero(&msdu_info, sizeof(msdu_info));
674 	dp_tx_get_queue(ptnr_vdev, nbuf_clone, &msdu_info.tx_queue);
675 	msdu_info.gsn = be_vdev->seq_num;
676 	be_ptnr_vdev->seq_num = be_vdev->seq_num;
677 
678 	nbuf_clone = dp_tx_send_msdu_single(
679 					ptnr_vdev,
680 					nbuf_clone,
681 					&msdu_info,
682 					DP_MLO_MCAST_REINJECT_PEER_ID,
683 					NULL);
684 	if (qdf_unlikely(nbuf_clone)) {
685 		dp_info("pkt send failed");
686 		qdf_nbuf_free(nbuf_clone);
687 		return;
688 	}
689 }
690 
691 static inline void
692 dp_tx_vdev_id_set_hal_tx_desc(uint32_t *hal_tx_desc_cached,
693 			      struct dp_vdev *vdev,
694 			      struct dp_tx_msdu_info_s *msdu_info)
695 {
696 	hal_tx_desc_set_vdev_id(hal_tx_desc_cached, msdu_info->vdev_id);
697 }
698 
699 void dp_tx_mlo_mcast_handler_be(struct dp_soc *soc,
700 				struct dp_vdev *vdev,
701 				qdf_nbuf_t nbuf)
702 {
703 	struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
704 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
705 
706 	if (qdf_unlikely(vdev->multipass_en) &&
707 	    dp_tx_mlo_mcast_multipass_handler(soc, vdev, nbuf))
708 		return;
709 	/* send frame on partner vdevs */
710 	dp_mcast_mlo_iter_ptnr_vdev(be_soc, be_vdev,
711 				    dp_tx_mlo_mcast_pkt_send,
712 				    nbuf, DP_MOD_ID_REINJECT);
713 
714 	/* send frame on mcast primary vdev */
715 	dp_tx_mlo_mcast_pkt_send(be_vdev, vdev, nbuf);
716 
717 	if (qdf_unlikely(be_vdev->seq_num > MAX_GSN_NUM))
718 		be_vdev->seq_num = 0;
719 	else
720 		be_vdev->seq_num++;
721 }
722 #else
723 static inline void
724 dp_tx_vdev_id_set_hal_tx_desc(uint32_t *hal_tx_desc_cached,
725 			      struct dp_vdev *vdev,
726 			      struct dp_tx_msdu_info_s *msdu_info)
727 {
728 	hal_tx_desc_set_vdev_id(hal_tx_desc_cached, vdev->vdev_id);
729 }
730 #endif
731 #if defined(WLAN_FEATURE_11BE_MLO) && !defined(WLAN_MLO_MULTI_CHIP) && \
732 	!defined(WLAN_MCAST_MLO)
733 void dp_tx_mlo_mcast_handler_be(struct dp_soc *soc,
734 				struct dp_vdev *vdev,
735 				qdf_nbuf_t nbuf)
736 {
737 }
738 #endif
739 
740 #ifdef CONFIG_SAWF
741 void dp_sawf_config_be(struct dp_soc *soc, uint32_t *hal_tx_desc_cached,
742 		       uint16_t *fw_metadata, qdf_nbuf_t nbuf)
743 {
744 	uint8_t q_id = 0;
745 
746 	if (!wlan_cfg_get_sawf_config(soc->wlan_cfg_ctx))
747 		return;
748 
749 	dp_sawf_tcl_cmd(fw_metadata, nbuf);
750 	q_id = dp_sawf_queue_id_get(nbuf);
751 
752 	if (q_id == DP_SAWF_DEFAULT_Q_INVALID)
753 		return;
754 	hal_tx_desc_set_hlos_tid(hal_tx_desc_cached, DP_TX_HLOS_TID_GET(q_id));
755 	hal_tx_desc_set_flow_override_enable(hal_tx_desc_cached,
756 					     DP_TX_FLOW_OVERRIDE_ENABLE);
757 	hal_tx_desc_set_flow_override(hal_tx_desc_cached,
758 				      DP_TX_FLOW_OVERRIDE_GET(q_id));
759 	hal_tx_desc_set_who_classify_info_sel(hal_tx_desc_cached,
760 					      DP_TX_WHO_CLFY_INF_SEL_GET(q_id));
761 }
762 
763 #else
764 
765 static inline
766 void dp_sawf_config_be(struct dp_soc *soc, uint32_t *hal_tx_desc_cached,
767 		       uint16_t *fw_metadata, qdf_nbuf_t nbuf)
768 {
769 }
770 
771 static inline
772 QDF_STATUS dp_sawf_tx_enqueue_peer_stats(struct dp_soc *soc,
773 					 struct dp_tx_desc_s *tx_desc)
774 {
775 	return QDF_STATUS_SUCCESS;
776 }
777 
778 static inline
779 QDF_STATUS dp_sawf_tx_enqueue_fail_peer_stats(struct dp_soc *soc,
780 					      struct dp_tx_desc_s *tx_desc)
781 {
782 	return QDF_STATUS_SUCCESS;
783 }
784 #endif
785 
786 QDF_STATUS
787 dp_tx_hw_enqueue_be(struct dp_soc *soc, struct dp_vdev *vdev,
788 		    struct dp_tx_desc_s *tx_desc, uint16_t fw_metadata,
789 		    struct cdp_tx_exception_metadata *tx_exc_metadata,
790 		    struct dp_tx_msdu_info_s *msdu_info)
791 {
792 	void *hal_tx_desc;
793 	uint32_t *hal_tx_desc_cached;
794 	int coalesce = 0;
795 	struct dp_tx_queue *tx_q = &msdu_info->tx_queue;
796 	uint8_t ring_id = tx_q->ring_id;
797 	uint8_t tid = msdu_info->tid;
798 	struct dp_vdev_be *be_vdev;
799 	uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES] = { 0 };
800 	uint8_t bm_id = dp_tx_get_rbm_id_be(soc, ring_id);
801 	hal_ring_handle_t hal_ring_hdl = NULL;
802 	QDF_STATUS status = QDF_STATUS_E_RESOURCES;
803 	uint8_t num_desc_bytes = HAL_TX_DESC_LEN_BYTES;
804 
805 	be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
806 
807 	if (!dp_tx_is_desc_id_valid(soc, tx_desc->id)) {
808 		dp_err_rl("Invalid tx desc id:%d", tx_desc->id);
809 		return QDF_STATUS_E_RESOURCES;
810 	}
811 
812 	if (qdf_unlikely(tx_exc_metadata)) {
813 		qdf_assert_always((tx_exc_metadata->tx_encap_type ==
814 				   CDP_INVALID_TX_ENCAP_TYPE) ||
815 				   (tx_exc_metadata->tx_encap_type ==
816 				    vdev->tx_encap_type));
817 
818 		if (tx_exc_metadata->tx_encap_type == htt_cmn_pkt_type_raw)
819 			qdf_assert_always((tx_exc_metadata->sec_type ==
820 					   CDP_INVALID_SEC_TYPE) ||
821 					   tx_exc_metadata->sec_type ==
822 					   vdev->sec_type);
823 	}
824 
825 	hal_tx_desc_cached = (void *)cached_desc;
826 
827 	if (dp_sawf_tag_valid_get(tx_desc->nbuf)) {
828 		dp_sawf_config_be(soc, hal_tx_desc_cached,
829 				  &fw_metadata, tx_desc->nbuf);
830 		dp_sawf_tx_enqueue_peer_stats(soc, tx_desc);
831 	}
832 
833 	hal_tx_desc_set_buf_addr_be(soc->hal_soc, hal_tx_desc_cached,
834 				    tx_desc->dma_addr, bm_id, tx_desc->id,
835 				    (tx_desc->flags & DP_TX_DESC_FLAG_FRAG));
836 	hal_tx_desc_set_lmac_id_be(soc->hal_soc, hal_tx_desc_cached,
837 				   vdev->lmac_id);
838 
839 	hal_tx_desc_set_search_index_be(soc->hal_soc, hal_tx_desc_cached,
840 					vdev->bss_ast_idx);
841 	/*
842 	 * Bank_ID is used as DSCP_TABLE number in beryllium
843 	 * So there is no explicit field used for DSCP_TID_TABLE_NUM.
844 	 */
845 
846 	hal_tx_desc_set_cache_set_num(soc->hal_soc, hal_tx_desc_cached,
847 				      (vdev->bss_ast_hash & 0xF));
848 
849 	hal_tx_desc_set_fw_metadata(hal_tx_desc_cached, fw_metadata);
850 	hal_tx_desc_set_buf_length(hal_tx_desc_cached, tx_desc->length);
851 	hal_tx_desc_set_buf_offset(hal_tx_desc_cached, tx_desc->pkt_offset);
852 
853 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
854 		hal_tx_desc_set_to_fw(hal_tx_desc_cached, 1);
855 
856 	/* verify checksum offload configuration*/
857 	if ((qdf_nbuf_get_tx_cksum(tx_desc->nbuf) ==
858 				   QDF_NBUF_TX_CKSUM_TCP_UDP) ||
859 	      qdf_nbuf_is_tso(tx_desc->nbuf)) {
860 		hal_tx_desc_set_l3_checksum_en(hal_tx_desc_cached, 1);
861 		hal_tx_desc_set_l4_checksum_en(hal_tx_desc_cached, 1);
862 	}
863 
864 	hal_tx_desc_set_bank_id(hal_tx_desc_cached, vdev->bank_id);
865 
866 	dp_tx_vdev_id_set_hal_tx_desc(hal_tx_desc_cached, vdev, msdu_info);
867 
868 	if (tid != HTT_TX_EXT_TID_INVALID)
869 		hal_tx_desc_set_hlos_tid(hal_tx_desc_cached, tid);
870 
871 	dp_tx_set_min_rates_for_critical_frames(soc, hal_tx_desc_cached,
872 						tx_desc->nbuf);
873 	dp_tx_desc_set_ktimestamp(vdev, tx_desc);
874 
875 	hal_ring_hdl = dp_tx_get_hal_ring_hdl(soc, ring_id);
876 
877 	if (qdf_unlikely(dp_tx_hal_ring_access_start(soc, hal_ring_hdl))) {
878 		dp_err("HAL RING Access Failed -- %pK", hal_ring_hdl);
879 		DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
880 		DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
881 		dp_sawf_tx_enqueue_fail_peer_stats(soc, tx_desc);
882 		return status;
883 	}
884 
885 	hal_tx_desc = hal_srng_src_get_next(soc->hal_soc, hal_ring_hdl);
886 	if (qdf_unlikely(!hal_tx_desc)) {
887 		dp_verbose_debug("TCL ring full ring_id:%d", ring_id);
888 		DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
889 		DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
890 		dp_sawf_tx_enqueue_fail_peer_stats(soc, tx_desc);
891 		goto ring_access_fail;
892 	}
893 
894 	tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX;
895 	dp_vdev_peer_stats_update_protocol_cnt_tx(vdev, tx_desc->nbuf);
896 
897 	/* Sync cached descriptor with HW */
898 	hal_tx_desc_sync(hal_tx_desc_cached, hal_tx_desc, num_desc_bytes);
899 
900 	coalesce = dp_tx_attempt_coalescing(soc, vdev, tx_desc, tid,
901 					    msdu_info, ring_id);
902 
903 	DP_STATS_INC_PKT(vdev, tx_i.processed, 1, tx_desc->length);
904 	DP_STATS_INC(soc, tx.tcl_enq[ring_id], 1);
905 	dp_tx_update_stats(soc, tx_desc, ring_id);
906 	status = QDF_STATUS_SUCCESS;
907 
908 	dp_tx_hw_desc_update_evt((uint8_t *)hal_tx_desc_cached,
909 				 hal_ring_hdl, soc, ring_id);
910 
911 ring_access_fail:
912 	dp_tx_ring_access_end_wrapper(soc, hal_ring_hdl, coalesce);
913 	dp_pkt_add_timestamp(vdev, QDF_PKT_TX_DRIVER_EXIT,
914 			     qdf_get_log_timestamp(), tx_desc->nbuf);
915 	return status;
916 }
917 
918 #ifdef IPA_OFFLOAD
919 static void
920 dp_tx_get_ipa_bank_config(struct dp_soc_be *be_soc,
921 			  union hal_tx_bank_config *bank_config)
922 {
923 	bank_config->epd = 0;
924 	bank_config->encap_type = wlan_cfg_pkt_type(be_soc->soc.wlan_cfg_ctx);
925 	bank_config->encrypt_type = 0;
926 
927 	bank_config->src_buffer_swap = 0;
928 	bank_config->link_meta_swap = 0;
929 
930 	bank_config->index_lookup_enable = 0;
931 	bank_config->mcast_pkt_ctrl = HAL_TX_MCAST_CTRL_FW_EXCEPTION;
932 	bank_config->addrx_en = 1;
933 	bank_config->addry_en = 1;
934 
935 	bank_config->mesh_enable = 0;
936 	bank_config->dscp_tid_map_id = 0;
937 	bank_config->vdev_id_check_en = 0;
938 	bank_config->pmac_id = 0;
939 }
940 
941 static void dp_tx_init_ipa_bank_profile(struct dp_soc_be *be_soc)
942 {
943 	union hal_tx_bank_config ipa_config = {0};
944 	int bid;
945 
946 	if (!wlan_cfg_is_ipa_enabled(be_soc->soc.wlan_cfg_ctx)) {
947 		be_soc->ipa_bank_id = DP_BE_INVALID_BANK_ID;
948 		return;
949 	}
950 
951 	dp_tx_get_ipa_bank_config(be_soc, &ipa_config);
952 
953 	/* Let IPA use last HOST owned bank */
954 	bid = be_soc->num_bank_profiles - 1;
955 
956 	be_soc->bank_profiles[bid].is_configured = true;
957 	be_soc->bank_profiles[bid].bank_config.val = ipa_config.val;
958 	hal_tx_populate_bank_register(be_soc->soc.hal_soc,
959 				      &be_soc->bank_profiles[bid].bank_config,
960 				      bid);
961 	qdf_atomic_inc(&be_soc->bank_profiles[bid].ref_count);
962 
963 	dp_info("IPA bank at slot %d config:0x%x", bid,
964 		be_soc->bank_profiles[bid].bank_config.val);
965 
966 	be_soc->ipa_bank_id = bid;
967 }
968 #else /* !IPA_OFFLOAD */
969 static inline void dp_tx_init_ipa_bank_profile(struct dp_soc_be *be_soc)
970 {
971 }
972 #endif /* IPA_OFFLOAD */
973 
974 QDF_STATUS dp_tx_init_bank_profiles(struct dp_soc_be *be_soc)
975 {
976 	int i, num_tcl_banks;
977 
978 	num_tcl_banks = hal_tx_get_num_tcl_banks(be_soc->soc.hal_soc);
979 
980 	qdf_assert_always(num_tcl_banks);
981 	be_soc->num_bank_profiles = num_tcl_banks;
982 
983 	be_soc->bank_profiles = qdf_mem_malloc(num_tcl_banks *
984 					       sizeof(*be_soc->bank_profiles));
985 	if (!be_soc->bank_profiles) {
986 		dp_err("unable to allocate memory for DP TX Profiles!");
987 		return QDF_STATUS_E_NOMEM;
988 	}
989 
990 	DP_TX_BANK_LOCK_CREATE(&be_soc->tx_bank_lock);
991 
992 	for (i = 0; i < num_tcl_banks; i++) {
993 		be_soc->bank_profiles[i].is_configured = false;
994 		qdf_atomic_init(&be_soc->bank_profiles[i].ref_count);
995 	}
996 	dp_info("initialized %u bank profiles", be_soc->num_bank_profiles);
997 
998 	dp_tx_init_ipa_bank_profile(be_soc);
999 
1000 	return QDF_STATUS_SUCCESS;
1001 }
1002 
1003 void dp_tx_deinit_bank_profiles(struct dp_soc_be *be_soc)
1004 {
1005 	qdf_mem_free(be_soc->bank_profiles);
1006 	DP_TX_BANK_LOCK_DESTROY(&be_soc->tx_bank_lock);
1007 }
1008 
1009 static
1010 void dp_tx_get_vdev_bank_config(struct dp_vdev_be *be_vdev,
1011 				union hal_tx_bank_config *bank_config)
1012 {
1013 	struct dp_vdev *vdev = &be_vdev->vdev;
1014 
1015 	bank_config->epd = 0;
1016 
1017 	bank_config->encap_type = vdev->tx_encap_type;
1018 
1019 	/* Only valid for raw frames. Needs work for RAW mode */
1020 	if (vdev->tx_encap_type == htt_cmn_pkt_type_raw) {
1021 		bank_config->encrypt_type = sec_type_map[vdev->sec_type];
1022 	} else {
1023 		bank_config->encrypt_type = 0;
1024 	}
1025 
1026 	bank_config->src_buffer_swap = 0;
1027 	bank_config->link_meta_swap = 0;
1028 
1029 	if ((vdev->search_type == HAL_TX_ADDR_INDEX_SEARCH) &&
1030 	    vdev->opmode == wlan_op_mode_sta) {
1031 		bank_config->index_lookup_enable = 1;
1032 		bank_config->mcast_pkt_ctrl = HAL_TX_MCAST_CTRL_MEC_NOTIFY;
1033 		bank_config->addrx_en = 0;
1034 		bank_config->addry_en = 0;
1035 	} else {
1036 		bank_config->index_lookup_enable = 0;
1037 		bank_config->mcast_pkt_ctrl = HAL_TX_MCAST_CTRL_FW_EXCEPTION;
1038 		bank_config->addrx_en =
1039 			(vdev->hal_desc_addr_search_flags &
1040 			 HAL_TX_DESC_ADDRX_EN) ? 1 : 0;
1041 		bank_config->addry_en =
1042 			(vdev->hal_desc_addr_search_flags &
1043 			 HAL_TX_DESC_ADDRY_EN) ? 1 : 0;
1044 	}
1045 
1046 	bank_config->mesh_enable = vdev->mesh_vdev ? 1 : 0;
1047 
1048 	bank_config->dscp_tid_map_id = vdev->dscp_tid_map_id;
1049 
1050 	/* Disabling vdev id check for now. Needs revist. */
1051 	bank_config->vdev_id_check_en = be_vdev->vdev_id_check_en;
1052 
1053 	bank_config->pmac_id = vdev->lmac_id;
1054 }
1055 
1056 int dp_tx_get_bank_profile(struct dp_soc_be *be_soc,
1057 			   struct dp_vdev_be *be_vdev)
1058 {
1059 	char *temp_str = "";
1060 	bool found_match = false;
1061 	int bank_id = DP_BE_INVALID_BANK_ID;
1062 	int i;
1063 	int unconfigured_slot = DP_BE_INVALID_BANK_ID;
1064 	int zero_ref_count_slot = DP_BE_INVALID_BANK_ID;
1065 	union hal_tx_bank_config vdev_config = {0};
1066 
1067 	/* convert vdev params into hal_tx_bank_config */
1068 	dp_tx_get_vdev_bank_config(be_vdev, &vdev_config);
1069 
1070 	DP_TX_BANK_LOCK_ACQUIRE(&be_soc->tx_bank_lock);
1071 	/* go over all banks and find a matching/unconfigured/unused bank */
1072 	for (i = 0; i < be_soc->num_bank_profiles; i++) {
1073 		if (be_soc->bank_profiles[i].is_configured &&
1074 		    (be_soc->bank_profiles[i].bank_config.val ^
1075 						vdev_config.val) == 0) {
1076 			found_match = true;
1077 			break;
1078 		}
1079 
1080 		if (unconfigured_slot == DP_BE_INVALID_BANK_ID &&
1081 		    !be_soc->bank_profiles[i].is_configured)
1082 			unconfigured_slot = i;
1083 		else if (zero_ref_count_slot  == DP_BE_INVALID_BANK_ID &&
1084 		    !qdf_atomic_read(&be_soc->bank_profiles[i].ref_count))
1085 			zero_ref_count_slot = i;
1086 	}
1087 
1088 	if (found_match) {
1089 		temp_str = "matching";
1090 		bank_id = i;
1091 		goto inc_ref_and_return;
1092 	}
1093 	if (unconfigured_slot != DP_BE_INVALID_BANK_ID) {
1094 		temp_str = "unconfigured";
1095 		bank_id = unconfigured_slot;
1096 		goto configure_and_return;
1097 	}
1098 	if (zero_ref_count_slot != DP_BE_INVALID_BANK_ID) {
1099 		temp_str = "zero_ref_count";
1100 		bank_id = zero_ref_count_slot;
1101 	}
1102 	if (bank_id == DP_BE_INVALID_BANK_ID) {
1103 		dp_alert("unable to find TX bank!");
1104 		QDF_BUG(0);
1105 		return bank_id;
1106 	}
1107 
1108 configure_and_return:
1109 	be_soc->bank_profiles[bank_id].is_configured = true;
1110 	be_soc->bank_profiles[bank_id].bank_config.val = vdev_config.val;
1111 	hal_tx_populate_bank_register(be_soc->soc.hal_soc,
1112 				      &be_soc->bank_profiles[bank_id].bank_config,
1113 				      bank_id);
1114 inc_ref_and_return:
1115 	qdf_atomic_inc(&be_soc->bank_profiles[bank_id].ref_count);
1116 	DP_TX_BANK_LOCK_RELEASE(&be_soc->tx_bank_lock);
1117 
1118 	dp_info("found %s slot at index %d, input:0x%x match:0x%x ref_count %u",
1119 		temp_str, bank_id, vdev_config.val,
1120 		be_soc->bank_profiles[bank_id].bank_config.val,
1121 		qdf_atomic_read(&be_soc->bank_profiles[bank_id].ref_count));
1122 
1123 	dp_info("epd:%x encap:%x encryp:%x src_buf_swap:%x link_meta_swap:%x addrx_en:%x addry_en:%x mesh_en:%x vdev_id_check:%x pmac_id:%x mcast_pkt_ctrl:%x",
1124 		be_soc->bank_profiles[bank_id].bank_config.epd,
1125 		be_soc->bank_profiles[bank_id].bank_config.encap_type,
1126 		be_soc->bank_profiles[bank_id].bank_config.encrypt_type,
1127 		be_soc->bank_profiles[bank_id].bank_config.src_buffer_swap,
1128 		be_soc->bank_profiles[bank_id].bank_config.link_meta_swap,
1129 		be_soc->bank_profiles[bank_id].bank_config.addrx_en,
1130 		be_soc->bank_profiles[bank_id].bank_config.addry_en,
1131 		be_soc->bank_profiles[bank_id].bank_config.mesh_enable,
1132 		be_soc->bank_profiles[bank_id].bank_config.vdev_id_check_en,
1133 		be_soc->bank_profiles[bank_id].bank_config.pmac_id,
1134 		be_soc->bank_profiles[bank_id].bank_config.mcast_pkt_ctrl);
1135 
1136 	return bank_id;
1137 }
1138 
1139 void dp_tx_put_bank_profile(struct dp_soc_be *be_soc,
1140 			    struct dp_vdev_be *be_vdev)
1141 {
1142 	DP_TX_BANK_LOCK_ACQUIRE(&be_soc->tx_bank_lock);
1143 	qdf_atomic_dec(&be_soc->bank_profiles[be_vdev->bank_id].ref_count);
1144 	DP_TX_BANK_LOCK_RELEASE(&be_soc->tx_bank_lock);
1145 }
1146 
1147 void dp_tx_update_bank_profile(struct dp_soc_be *be_soc,
1148 			       struct dp_vdev_be *be_vdev)
1149 {
1150 	dp_tx_put_bank_profile(be_soc, be_vdev);
1151 	be_vdev->bank_id = dp_tx_get_bank_profile(be_soc, be_vdev);
1152 	be_vdev->vdev.bank_id = be_vdev->bank_id;
1153 }
1154 
1155 QDF_STATUS dp_tx_desc_pool_init_be(struct dp_soc *soc,
1156 				   uint32_t num_elem,
1157 				   uint8_t pool_id)
1158 {
1159 	struct dp_tx_desc_pool_s *tx_desc_pool;
1160 	struct dp_hw_cookie_conversion_t *cc_ctx;
1161 	struct dp_soc_be *be_soc;
1162 	struct dp_spt_page_desc *page_desc;
1163 	struct dp_tx_desc_s *tx_desc;
1164 	uint32_t ppt_idx = 0;
1165 	uint32_t avail_entry_index = 0;
1166 
1167 	if (!num_elem) {
1168 		dp_err("desc_num 0 !!");
1169 		return QDF_STATUS_E_FAILURE;
1170 	}
1171 
1172 	be_soc = dp_get_be_soc_from_dp_soc(soc);
1173 	tx_desc_pool = &soc->tx_desc[pool_id];
1174 	cc_ctx  = &be_soc->tx_cc_ctx[pool_id];
1175 
1176 	tx_desc = tx_desc_pool->freelist;
1177 	page_desc = &cc_ctx->page_desc_base[0];
1178 	while (tx_desc) {
1179 		if (avail_entry_index == 0) {
1180 			if (ppt_idx >= cc_ctx->total_page_num) {
1181 				dp_alert("insufficient secondary page tables");
1182 				qdf_assert_always(0);
1183 			}
1184 			page_desc = &cc_ctx->page_desc_base[ppt_idx++];
1185 		}
1186 
1187 		/* put each TX Desc VA to SPT pages and
1188 		 * get corresponding ID
1189 		 */
1190 		DP_CC_SPT_PAGE_UPDATE_VA(page_desc->page_v_addr,
1191 					 avail_entry_index,
1192 					 tx_desc);
1193 		tx_desc->id =
1194 			dp_cc_desc_id_generate(page_desc->ppt_index,
1195 					       avail_entry_index);
1196 		tx_desc->pool_id = pool_id;
1197 		dp_tx_desc_set_magic(tx_desc, DP_TX_MAGIC_PATTERN_FREE);
1198 		tx_desc = tx_desc->next;
1199 		avail_entry_index = (avail_entry_index + 1) &
1200 					DP_CC_SPT_PAGE_MAX_ENTRIES_MASK;
1201 	}
1202 
1203 	return QDF_STATUS_SUCCESS;
1204 }
1205 
1206 void dp_tx_desc_pool_deinit_be(struct dp_soc *soc,
1207 			       struct dp_tx_desc_pool_s *tx_desc_pool,
1208 			       uint8_t pool_id)
1209 {
1210 	struct dp_spt_page_desc *page_desc;
1211 	struct dp_soc_be *be_soc;
1212 	int i = 0;
1213 	struct dp_hw_cookie_conversion_t *cc_ctx;
1214 
1215 	be_soc = dp_get_be_soc_from_dp_soc(soc);
1216 	cc_ctx  = &be_soc->tx_cc_ctx[pool_id];
1217 
1218 	for (i = 0; i < cc_ctx->total_page_num; i++) {
1219 		page_desc = &cc_ctx->page_desc_base[i];
1220 		qdf_mem_zero(page_desc->page_v_addr, qdf_page_size);
1221 	}
1222 }
1223 
1224 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
1225 uint32_t dp_tx_comp_nf_handler(struct dp_intr *int_ctx, struct dp_soc *soc,
1226 			       hal_ring_handle_t hal_ring_hdl, uint8_t ring_id,
1227 			       uint32_t quota)
1228 {
1229 	struct dp_srng *tx_comp_ring = &soc->tx_comp_ring[ring_id];
1230 	uint32_t work_done = 0;
1231 
1232 	if (dp_srng_get_near_full_level(soc, tx_comp_ring) <
1233 			DP_SRNG_THRESH_NEAR_FULL)
1234 		return 0;
1235 
1236 	qdf_atomic_set(&tx_comp_ring->near_full, 1);
1237 	work_done++;
1238 
1239 	return work_done;
1240 }
1241 #endif
1242 
1243 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
1244 	defined(WLAN_CONFIG_TX_DELAY)
1245 #define PPDUID_GET_HW_LINK_ID(PPDU_ID, LINK_ID_OFFSET, LINK_ID_BITS) \
1246 	(((PPDU_ID) >> (LINK_ID_OFFSET)) & ((1 << (LINK_ID_BITS)) - 1))
1247 
1248 #define HW_TX_DELAY_MAX                       0x1000000
1249 #define TX_COMPL_SHIFT_BUFFER_TIMESTAMP_US    10
1250 #define HW_TX_DELAY_MASK                      0x1FFFFFFF
1251 #define TX_COMPL_BUFFER_TSTAMP_US(TSTAMP) \
1252 	(((TSTAMP) << TX_COMPL_SHIFT_BUFFER_TIMESTAMP_US) & \
1253 	 HW_TX_DELAY_MASK)
1254 
1255 static inline
1256 QDF_STATUS dp_mlo_compute_hw_delay_us(struct dp_soc *soc,
1257 				      struct dp_vdev *vdev,
1258 				      struct hal_tx_completion_status *ts,
1259 				      uint32_t *delay_us)
1260 {
1261 	uint32_t ppdu_id;
1262 	uint8_t link_id_offset, link_id_bits;
1263 	uint8_t hw_link_id;
1264 	uint32_t msdu_tqm_enqueue_tstamp_us, final_msdu_tqm_enqueue_tstamp_us;
1265 	uint32_t msdu_compl_tsf_tstamp_us, final_msdu_compl_tsf_tstamp_us;
1266 	uint32_t delay;
1267 	int32_t delta_tsf2, delta_tqm;
1268 
1269 	if (!ts->valid)
1270 		return QDF_STATUS_E_INVAL;
1271 
1272 	link_id_offset = soc->link_id_offset;
1273 	link_id_bits = soc->link_id_bits;
1274 	ppdu_id = ts->ppdu_id;
1275 	hw_link_id = PPDUID_GET_HW_LINK_ID(ppdu_id, link_id_offset,
1276 					   link_id_bits);
1277 
1278 	msdu_tqm_enqueue_tstamp_us =
1279 		TX_COMPL_BUFFER_TSTAMP_US(ts->buffer_timestamp);
1280 	msdu_compl_tsf_tstamp_us = ts->tsf;
1281 
1282 	delta_tsf2 = dp_mlo_get_delta_tsf2_wrt_mlo_offset(soc, hw_link_id);
1283 	delta_tqm = dp_mlo_get_delta_tqm_wrt_mlo_offset(soc);
1284 
1285 	final_msdu_tqm_enqueue_tstamp_us = (msdu_tqm_enqueue_tstamp_us +
1286 			delta_tqm) & HW_TX_DELAY_MASK;
1287 
1288 	final_msdu_compl_tsf_tstamp_us = (msdu_compl_tsf_tstamp_us +
1289 			delta_tsf2) & HW_TX_DELAY_MASK;
1290 
1291 	delay = (final_msdu_compl_tsf_tstamp_us -
1292 		final_msdu_tqm_enqueue_tstamp_us) & HW_TX_DELAY_MASK;
1293 
1294 	if (delay > HW_TX_DELAY_MAX)
1295 		return QDF_STATUS_E_FAILURE;
1296 
1297 	if (delay_us)
1298 		*delay_us = delay;
1299 
1300 	return QDF_STATUS_SUCCESS;
1301 }
1302 #else
1303 static inline
1304 QDF_STATUS dp_mlo_compute_hw_delay_us(struct dp_soc *soc,
1305 				      struct dp_vdev *vdev,
1306 				      struct hal_tx_completion_status *ts,
1307 				      uint32_t *delay_us)
1308 {
1309 	return QDF_STATUS_SUCCESS;
1310 }
1311 #endif
1312 
1313 QDF_STATUS dp_tx_compute_tx_delay_be(struct dp_soc *soc,
1314 				     struct dp_vdev *vdev,
1315 				     struct hal_tx_completion_status *ts,
1316 				     uint32_t *delay_us)
1317 {
1318 	return dp_mlo_compute_hw_delay_us(soc, vdev, ts, delay_us);
1319 }
1320 
1321 static inline
1322 qdf_dma_addr_t dp_tx_nbuf_map_be(struct dp_vdev *vdev,
1323 				 struct dp_tx_desc_s *tx_desc,
1324 				 qdf_nbuf_t nbuf)
1325 {
1326 	qdf_nbuf_dma_clean_range_no_dsb((void *)nbuf->data,
1327 					(void *)(nbuf->data + 256));
1328 
1329 	return (qdf_dma_addr_t)qdf_mem_virt_to_phys(nbuf->data);
1330 }
1331 
1332 static inline
1333 void dp_tx_nbuf_unmap_be(struct dp_soc *soc,
1334 			 struct dp_tx_desc_s *desc)
1335 {
1336 }
1337 
1338 /**
1339  * dp_tx_fast_send_be() - Transmit a frame on a given VAP
1340  * @soc: DP soc handle
1341  * @vdev_id: id of DP vdev handle
1342  * @nbuf: skb
1343  *
1344  * Entry point for Core Tx layer (DP_TX) invoked from
1345  * hard_start_xmit in OSIF/HDD or from dp_rx_process for intravap forwarding
1346  * cases
1347  *
1348  * Return: NULL on success,
1349  *         nbuf when it fails to send
1350  */
1351 qdf_nbuf_t dp_tx_fast_send_be(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
1352 			      qdf_nbuf_t nbuf)
1353 {
1354 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
1355 	struct dp_vdev *vdev = NULL;
1356 	struct dp_pdev *pdev = NULL;
1357 	struct dp_tx_desc_s *tx_desc;
1358 	uint16_t desc_pool_id;
1359 	uint16_t pkt_len;
1360 	qdf_dma_addr_t paddr;
1361 	QDF_STATUS status = QDF_STATUS_E_RESOURCES;
1362 	uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES] = { 0 };
1363 	hal_ring_handle_t hal_ring_hdl = NULL;
1364 	uint32_t *hal_tx_desc_cached;
1365 	void *hal_tx_desc;
1366 	uint8_t desc_size = DP_TX_FAST_DESC_SIZE;
1367 
1368 	if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
1369 		return nbuf;
1370 
1371 	vdev = soc->vdev_id_map[vdev_id];
1372 	if (qdf_unlikely(!vdev))
1373 		return nbuf;
1374 
1375 	desc_pool_id = qdf_nbuf_get_queue_mapping(nbuf) & DP_TX_QUEUE_MASK;
1376 
1377 	pkt_len = qdf_nbuf_headlen(nbuf);
1378 	DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, pkt_len);
1379 	DP_STATS_INC(vdev, tx_i.rcvd_in_fast_xmit_flow, 1);
1380 	DP_STATS_INC(vdev, tx_i.rcvd_per_core[desc_pool_id], 1);
1381 
1382 	pdev = vdev->pdev;
1383 	if (dp_tx_limit_check(vdev))
1384 		return nbuf;
1385 
1386 	tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
1387 
1388 	if (qdf_unlikely(!tx_desc)) {
1389 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
1390 		DP_STATS_INC(vdev, tx_i.dropped.desc_na_exc_alloc_fail.num, 1);
1391 		return nbuf;
1392 	}
1393 
1394 	dp_tx_outstanding_inc(pdev);
1395 
1396 	/* Initialize the SW tx descriptor */
1397 	tx_desc->nbuf = nbuf;
1398 	tx_desc->shinfo_addr = skb_end_pointer(nbuf);
1399 	tx_desc->frm_type = dp_tx_frm_std;
1400 	tx_desc->tx_encap_type = vdev->tx_encap_type;
1401 	tx_desc->vdev_id = vdev_id;
1402 	tx_desc->pdev = pdev;
1403 	tx_desc->pkt_offset = 0;
1404 	tx_desc->length = pkt_len;
1405 	tx_desc->flags |= DP_TX_DESC_FLAG_SIMPLE;
1406 
1407 	paddr =  dp_tx_nbuf_map_be(vdev, tx_desc, nbuf);
1408 	if (!paddr) {
1409 		/* Handle failure */
1410 		dp_err("qdf_nbuf_map failed");
1411 		DP_STATS_INC(vdev, tx_i.dropped.dma_error, 1);
1412 		goto release_desc;
1413 	}
1414 
1415 	tx_desc->dma_addr = paddr;
1416 
1417 	hal_tx_desc_cached = (void *)cached_desc;
1418 	hal_tx_desc_cached[0] = (uint32_t)tx_desc->dma_addr;
1419 	hal_tx_desc_cached[1] = tx_desc->id <<
1420 		TCL_DATA_CMD_BUF_ADDR_INFO_SW_BUFFER_COOKIE_LSB;
1421 
1422 	/* bank_id */
1423 	hal_tx_desc_cached[2] = vdev->bank_id << TCL_DATA_CMD_BANK_ID_LSB;
1424 	hal_tx_desc_cached[3] = vdev->htt_tcl_metadata <<
1425 		TCL_DATA_CMD_TCL_CMD_NUMBER_LSB;
1426 
1427 	hal_tx_desc_cached[4] = tx_desc->length;
1428 	/* l3 and l4 checksum enable */
1429 	hal_tx_desc_cached[4] |= DP_TX_L3_L4_CSUM_ENABLE <<
1430 		TCL_DATA_CMD_IPV4_CHECKSUM_EN_LSB;
1431 
1432 	hal_tx_desc_cached[5] = vdev->lmac_id << TCL_DATA_CMD_PMAC_ID_LSB;
1433 	hal_tx_desc_cached[5] |= vdev->vdev_id << TCL_DATA_CMD_VDEV_ID_LSB;
1434 
1435 	if (vdev->opmode == wlan_op_mode_sta) {
1436 		hal_tx_desc_cached[6] = vdev->bss_ast_idx |
1437 			((vdev->bss_ast_hash & 0xF) <<
1438 			 TCL_DATA_CMD_CACHE_SET_NUM_LSB);
1439 		desc_size = DP_TX_FAST_DESC_SIZE + 4;
1440 	}
1441 
1442 	hal_ring_hdl = dp_tx_get_hal_ring_hdl(soc, desc_pool_id);
1443 
1444 	if (qdf_unlikely(dp_tx_hal_ring_access_start(soc, hal_ring_hdl))) {
1445 		dp_err("HAL RING Access Failed -- %pK", hal_ring_hdl);
1446 		DP_STATS_INC(soc, tx.tcl_ring_full[desc_pool_id], 1);
1447 		DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
1448 		goto ring_access_fail2;
1449 	}
1450 
1451 	hal_tx_desc = hal_srng_src_get_next(soc->hal_soc, hal_ring_hdl);
1452 	if (qdf_unlikely(!hal_tx_desc)) {
1453 		dp_verbose_debug("TCL ring full ring_id:%d", desc_pool_id);
1454 		DP_STATS_INC(soc, tx.tcl_ring_full[desc_pool_id], 1);
1455 		DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
1456 		goto ring_access_fail;
1457 	}
1458 
1459 	tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX;
1460 
1461 	/* Sync cached descriptor with HW */
1462 	qdf_mem_copy(hal_tx_desc, hal_tx_desc_cached, desc_size);
1463 	qdf_dsb();
1464 
1465 	DP_STATS_INC_PKT(vdev, tx_i.processed, 1, tx_desc->length);
1466 	DP_STATS_INC(soc, tx.tcl_enq[desc_pool_id], 1);
1467 	status = QDF_STATUS_SUCCESS;
1468 
1469 ring_access_fail:
1470 	dp_tx_ring_access_end_wrapper(soc, hal_ring_hdl, 0);
1471 
1472 ring_access_fail2:
1473 	if (status != QDF_STATUS_SUCCESS) {
1474 		dp_tx_nbuf_unmap_be(soc, tx_desc);
1475 		goto release_desc;
1476 	}
1477 
1478 	return NULL;
1479 
1480 release_desc:
1481 	dp_tx_desc_release(tx_desc, desc_pool_id);
1482 
1483 	return nbuf;
1484 }
1485