xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/be/dp_be_tx.c (revision 70a19e16789e308182f63b15c75decec7bf0b342)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "cdp_txrx_cmn_struct.h"
21 #include "dp_types.h"
22 #include "dp_tx.h"
23 #include "dp_be_tx.h"
24 #include "dp_tx_desc.h"
25 #include "hal_tx.h"
26 #include <hal_be_api.h>
27 #include <hal_be_tx.h>
28 #include <dp_htt.h>
29 #ifdef FEATURE_WDS
30 #include "dp_txrx_wds.h"
31 #endif
32 
33 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
34 #define DP_TX_BANK_LOCK_CREATE(lock) qdf_mutex_create(lock)
35 #define DP_TX_BANK_LOCK_DESTROY(lock) qdf_mutex_destroy(lock)
36 #define DP_TX_BANK_LOCK_ACQUIRE(lock) qdf_mutex_acquire(lock)
37 #define DP_TX_BANK_LOCK_RELEASE(lock) qdf_mutex_release(lock)
38 #else
39 #define DP_TX_BANK_LOCK_CREATE(lock) qdf_spinlock_create(lock)
40 #define DP_TX_BANK_LOCK_DESTROY(lock) qdf_spinlock_destroy(lock)
41 #define DP_TX_BANK_LOCK_ACQUIRE(lock) qdf_spin_lock_bh(lock)
42 #define DP_TX_BANK_LOCK_RELEASE(lock) qdf_spin_unlock_bh(lock)
43 #endif
44 
45 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
46 #ifdef WLAN_MCAST_MLO
47 /* MLO peer id for reinject*/
48 #define DP_MLO_MCAST_REINJECT_PEER_ID 0XFFFD
49 #define MAX_GSN_NUM 0x0FFF
50 
51 #ifdef QCA_MULTIPASS_SUPPORT
52 #define INVALID_VLAN_ID         0xFFFF
53 #define MULTIPASS_WITH_VLAN_ID 0xFFFE
54 /**
55  * struct dp_mlo_mpass_buf - Multipass buffer
56  * @vlan_id: vlan_id of frame
57  * @nbuf: pointer to skb buf
58  */
59 struct dp_mlo_mpass_buf {
60 	uint16_t vlan_id;
61 	qdf_nbuf_t  nbuf;
62 };
63 #endif
64 #endif
65 #endif
66 
67 #define DP_TX_WBM_COMPLETION_V3_VDEV_ID_GET(_var) \
68 	HTT_TX_WBM_COMPLETION_V2_VDEV_ID_GET(_var)
69 #define DP_TX_WBM_COMPLETION_V3_VALID_GET(_var) \
70 	HTT_TX_WBM_COMPLETION_V2_VALID_GET(_var)
71 #define DP_TX_WBM_COMPLETION_V3_SW_PEER_ID_GET(_var) \
72 	HTT_TX_WBM_COMPLETION_V2_SW_PEER_ID_GET(_var)
73 #define DP_TX_WBM_COMPLETION_V3_TID_NUM_GET(_var) \
74 	HTT_TX_WBM_COMPLETION_V2_TID_NUM_GET(_var)
75 #define DP_TX_WBM_COMPLETION_V3_SCH_CMD_ID_GET(_var) \
76 	HTT_TX_WBM_COMPLETION_V2_SCH_CMD_ID_GET(_var)
77 #define DP_TX_WBM_COMPLETION_V3_ACK_FRAME_RSSI_GET(_var) \
78 	HTT_TX_WBM_COMPLETION_V2_ACK_FRAME_RSSI_GET(_var)
79 
80 extern uint8_t sec_type_map[MAX_CDP_SEC_TYPE];
81 
82 #ifdef DP_TX_COMP_RING_DESC_SANITY_CHECK
83 /*
84  * Value to mark ring desc is invalidated by buffer_virt_addr_63_32 field
85  * of WBM2SW ring Desc.
86  */
87 #define DP_TX_COMP_DESC_BUFF_VA_32BITS_HI_INVALIDATE 0x12121212
88 
89 /**
90  * dp_tx_comp_desc_check_and_invalidate() - sanity check for ring desc and
91  *					    invalidate it after each reaping
92  * @tx_comp_hal_desc: ring desc virtual address
93  * @r_tx_desc: pointer to current dp TX Desc pointer
94  * @tx_desc_va: the original 64 bits Desc VA got from ring Desc
95  * @hw_cc_done: HW cookie conversion done or not
96  *
97  * If HW CC is done, check the buffer_virt_addr_63_32 value to know if
98  * ring Desc is stale or not. if HW CC is not done, then compare PA between
99  * ring Desc and current TX desc.
100  *
101  * Return: None.
102  */
103 static inline
104 void dp_tx_comp_desc_check_and_invalidate(void *tx_comp_hal_desc,
105 					  struct dp_tx_desc_s **r_tx_desc,
106 					  uint64_t tx_desc_va,
107 					  bool hw_cc_done)
108 {
109 	qdf_dma_addr_t desc_dma_addr;
110 
111 	if (qdf_likely(hw_cc_done)) {
112 		/* Check upper 32 bits */
113 		if (DP_TX_COMP_DESC_BUFF_VA_32BITS_HI_INVALIDATE ==
114 		    (tx_desc_va >> 32))
115 			*r_tx_desc = NULL;
116 
117 		/* Invalidate the ring desc for 32 ~ 63 bits of VA */
118 		hal_tx_comp_set_desc_va_63_32(
119 				tx_comp_hal_desc,
120 				DP_TX_COMP_DESC_BUFF_VA_32BITS_HI_INVALIDATE);
121 	} else {
122 		/* Compare PA between ring desc and current TX desc stored */
123 		desc_dma_addr = hal_tx_comp_get_paddr(tx_comp_hal_desc);
124 
125 		if (desc_dma_addr != (*r_tx_desc)->dma_addr)
126 			*r_tx_desc = NULL;
127 	}
128 }
129 #else
130 static inline
131 void dp_tx_comp_desc_check_and_invalidate(void *tx_comp_hal_desc,
132 					  struct dp_tx_desc_s **r_tx_desc,
133 					  uint64_t tx_desc_va,
134 					  bool hw_cc_done)
135 {
136 }
137 #endif
138 
139 #ifdef DP_FEATURE_HW_COOKIE_CONVERSION
140 #ifdef DP_HW_COOKIE_CONVERT_EXCEPTION
141 void dp_tx_comp_get_params_from_hal_desc_be(struct dp_soc *soc,
142 					    void *tx_comp_hal_desc,
143 					    struct dp_tx_desc_s **r_tx_desc)
144 {
145 	uint32_t tx_desc_id;
146 	uint64_t tx_desc_va = 0;
147 	bool hw_cc_done =
148 		hal_tx_comp_get_cookie_convert_done(tx_comp_hal_desc);
149 
150 	if (qdf_likely(hw_cc_done)) {
151 		/* HW cookie conversion done */
152 		tx_desc_va = hal_tx_comp_get_desc_va(tx_comp_hal_desc);
153 		*r_tx_desc = (struct dp_tx_desc_s *)(uintptr_t)tx_desc_va;
154 
155 	} else {
156 		/* SW do cookie conversion to VA */
157 		tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc);
158 		*r_tx_desc =
159 		(struct dp_tx_desc_s *)dp_cc_desc_find(soc, tx_desc_id);
160 	}
161 
162 	dp_tx_comp_desc_check_and_invalidate(tx_comp_hal_desc,
163 					     r_tx_desc, tx_desc_va,
164 					     hw_cc_done);
165 
166 	if (*r_tx_desc)
167 		(*r_tx_desc)->peer_id =
168 				dp_tx_comp_get_peer_id_be(soc,
169 							  tx_comp_hal_desc);
170 }
171 #else
172 void dp_tx_comp_get_params_from_hal_desc_be(struct dp_soc *soc,
173 					    void *tx_comp_hal_desc,
174 					    struct dp_tx_desc_s **r_tx_desc)
175 {
176 	uint64_t tx_desc_va;
177 
178 	tx_desc_va = hal_tx_comp_get_desc_va(tx_comp_hal_desc);
179 	*r_tx_desc = (struct dp_tx_desc_s *)(uintptr_t)tx_desc_va;
180 
181 	dp_tx_comp_desc_check_and_invalidate(tx_comp_hal_desc,
182 					     r_tx_desc,
183 					     tx_desc_va,
184 					     true);
185 	if (*r_tx_desc)
186 		(*r_tx_desc)->peer_id =
187 				dp_tx_comp_get_peer_id_be(soc,
188 							  tx_comp_hal_desc);
189 }
190 #endif /* DP_HW_COOKIE_CONVERT_EXCEPTION */
191 #else
192 
193 void dp_tx_comp_get_params_from_hal_desc_be(struct dp_soc *soc,
194 					    void *tx_comp_hal_desc,
195 					    struct dp_tx_desc_s **r_tx_desc)
196 {
197 	uint32_t tx_desc_id;
198 
199 	/* SW do cookie conversion to VA */
200 	tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc);
201 	*r_tx_desc =
202 	(struct dp_tx_desc_s *)dp_cc_desc_find(soc, tx_desc_id);
203 
204 	dp_tx_comp_desc_check_and_invalidate(tx_comp_hal_desc,
205 					     r_tx_desc, 0,
206 					     false);
207 
208 	if (*r_tx_desc)
209 		(*r_tx_desc)->peer_id =
210 				dp_tx_comp_get_peer_id_be(soc,
211 							  tx_comp_hal_desc);
212 }
213 #endif /* DP_FEATURE_HW_COOKIE_CONVERSION */
214 
215 static inline
216 void dp_tx_process_mec_notify_be(struct dp_soc *soc, uint8_t *status)
217 {
218 	struct dp_vdev *vdev;
219 	uint8_t vdev_id;
220 	uint32_t *htt_desc = (uint32_t *)status;
221 
222 	qdf_assert_always(!soc->mec_fw_offload);
223 
224 	/*
225 	 * Get vdev id from HTT status word in case of MEC
226 	 * notification
227 	 */
228 	vdev_id = DP_TX_WBM_COMPLETION_V3_VDEV_ID_GET(htt_desc[4]);
229 	if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
230 		return;
231 
232 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
233 				     DP_MOD_ID_HTT_COMP);
234 	if (!vdev)
235 		return;
236 	dp_tx_mec_handler(vdev, status);
237 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP);
238 }
239 
240 void dp_tx_process_htt_completion_be(struct dp_soc *soc,
241 				     struct dp_tx_desc_s *tx_desc,
242 				     uint8_t *status,
243 				     uint8_t ring_id)
244 {
245 	uint8_t tx_status;
246 	struct dp_pdev *pdev;
247 	struct dp_vdev *vdev = NULL;
248 	struct hal_tx_completion_status ts = {0};
249 	uint32_t *htt_desc = (uint32_t *)status;
250 	struct dp_txrx_peer *txrx_peer;
251 	dp_txrx_ref_handle txrx_ref_handle = NULL;
252 	struct cdp_tid_tx_stats *tid_stats = NULL;
253 	struct htt_soc *htt_handle;
254 	uint8_t vdev_id;
255 
256 	tx_status = HTT_TX_WBM_COMPLETION_V3_TX_STATUS_GET(htt_desc[0]);
257 	htt_handle = (struct htt_soc *)soc->htt_handle;
258 	htt_wbm_event_record(htt_handle->htt_logger_handle, tx_status, status);
259 
260 	/*
261 	 * There can be scenario where WBM consuming descriptor enqueued
262 	 * from TQM2WBM first and TQM completion can happen before MEC
263 	 * notification comes from FW2WBM. Avoid access any field of tx
264 	 * descriptor in case of MEC notify.
265 	 */
266 	if (tx_status == HTT_TX_FW2WBM_TX_STATUS_MEC_NOTIFY)
267 		return dp_tx_process_mec_notify_be(soc, status);
268 
269 	/*
270 	 * If the descriptor is already freed in vdev_detach,
271 	 * continue to next descriptor
272 	 */
273 	if (qdf_unlikely(!tx_desc->flags)) {
274 		dp_tx_comp_info_rl("Descriptor freed in vdev_detach %d",
275 				   tx_desc->id);
276 		return;
277 	}
278 
279 	if (qdf_unlikely(tx_desc->vdev_id == DP_INVALID_VDEV_ID)) {
280 		dp_tx_comp_info_rl("Invalid vdev_id %d", tx_desc->id);
281 		tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
282 		goto release_tx_desc;
283 	}
284 
285 	pdev = tx_desc->pdev;
286 
287 	if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
288 		dp_tx_comp_info_rl("pdev in down state %d", tx_desc->id);
289 		tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
290 		goto release_tx_desc;
291 	}
292 
293 	qdf_assert(tx_desc->pdev);
294 
295 	vdev_id = tx_desc->vdev_id;
296 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
297 				     DP_MOD_ID_HTT_COMP);
298 
299 	if (qdf_unlikely(!vdev)) {
300 		dp_tx_comp_info_rl("Unable to get vdev ref  %d", tx_desc->id);
301 		tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
302 		goto release_tx_desc;
303 	}
304 
305 	switch (tx_status) {
306 	case HTT_TX_FW2WBM_TX_STATUS_OK:
307 	case HTT_TX_FW2WBM_TX_STATUS_DROP:
308 	case HTT_TX_FW2WBM_TX_STATUS_TTL:
309 	{
310 		uint8_t tid;
311 
312 		if (DP_TX_WBM_COMPLETION_V3_VALID_GET(htt_desc[3])) {
313 			ts.peer_id =
314 				DP_TX_WBM_COMPLETION_V3_SW_PEER_ID_GET(
315 						htt_desc[3]);
316 			ts.tid =
317 				DP_TX_WBM_COMPLETION_V3_TID_NUM_GET(
318 						htt_desc[3]);
319 		} else {
320 			ts.peer_id = HTT_INVALID_PEER;
321 			ts.tid = HTT_INVALID_TID;
322 		}
323 		ts.release_src = HAL_TX_COMP_RELEASE_SOURCE_FW;
324 		ts.ppdu_id =
325 			DP_TX_WBM_COMPLETION_V3_SCH_CMD_ID_GET(
326 					htt_desc[2]);
327 		ts.ack_frame_rssi =
328 			DP_TX_WBM_COMPLETION_V3_ACK_FRAME_RSSI_GET(
329 					htt_desc[2]);
330 
331 		ts.tsf = htt_desc[4];
332 		ts.first_msdu = 1;
333 		ts.last_msdu = 1;
334 		switch (tx_status) {
335 		case HTT_TX_FW2WBM_TX_STATUS_OK:
336 			ts.status = HAL_TX_TQM_RR_FRAME_ACKED;
337 			break;
338 		case HTT_TX_FW2WBM_TX_STATUS_DROP:
339 			ts.status = HAL_TX_TQM_RR_REM_CMD_REM;
340 			break;
341 		case HTT_TX_FW2WBM_TX_STATUS_TTL:
342 			ts.status = HAL_TX_TQM_RR_REM_CMD_TX;
343 			break;
344 		}
345 		tid = ts.tid;
346 		if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
347 			tid = CDP_MAX_DATA_TIDS - 1;
348 
349 		tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
350 
351 		if (qdf_unlikely(pdev->delay_stats_flag) ||
352 		    qdf_unlikely(dp_is_vdev_tx_delay_stats_enabled(vdev)))
353 			dp_tx_compute_delay(vdev, tx_desc, tid, ring_id);
354 		if (tx_status < CDP_MAX_TX_HTT_STATUS)
355 			tid_stats->htt_status_cnt[tx_status]++;
356 
357 		txrx_peer = dp_txrx_peer_get_ref_by_id(soc, ts.peer_id,
358 						       &txrx_ref_handle,
359 						       DP_MOD_ID_HTT_COMP);
360 		if (qdf_likely(txrx_peer))
361 			dp_tx_update_peer_basic_stats(
362 						txrx_peer,
363 						qdf_nbuf_len(tx_desc->nbuf),
364 						tx_status,
365 						pdev->enhanced_stats_en);
366 
367 		dp_tx_comp_process_tx_status(soc, tx_desc, &ts, txrx_peer,
368 					     ring_id);
369 		dp_tx_comp_process_desc(soc, tx_desc, &ts, txrx_peer);
370 		dp_tx_desc_release(tx_desc, tx_desc->pool_id);
371 
372 		if (qdf_likely(txrx_peer))
373 			dp_txrx_peer_unref_delete(txrx_ref_handle,
374 						  DP_MOD_ID_HTT_COMP);
375 
376 		break;
377 	}
378 	case HTT_TX_FW2WBM_TX_STATUS_REINJECT:
379 	{
380 		uint8_t reinject_reason;
381 
382 		reinject_reason =
383 			HTT_TX_WBM_COMPLETION_V3_REINJECT_REASON_GET(
384 								htt_desc[1]);
385 		dp_tx_reinject_handler(soc, vdev, tx_desc,
386 				       status, reinject_reason);
387 		break;
388 	}
389 	case HTT_TX_FW2WBM_TX_STATUS_INSPECT:
390 	{
391 		dp_tx_inspect_handler(soc, vdev, tx_desc, status);
392 		break;
393 	}
394 	case HTT_TX_FW2WBM_TX_STATUS_VDEVID_MISMATCH:
395 	{
396 		DP_STATS_INC(vdev, tx_i.dropped.fail_per_pkt_vdev_id_check, 1);
397 		goto release_tx_desc;
398 	}
399 	default:
400 		dp_tx_comp_err("Invalid HTT tx_status %d\n",
401 			       tx_status);
402 		goto release_tx_desc;
403 	}
404 
405 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP);
406 	return;
407 
408 release_tx_desc:
409 	dp_tx_comp_free_buf(soc, tx_desc, false);
410 	dp_tx_desc_release(tx_desc, tx_desc->pool_id);
411 	if (vdev)
412 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP);
413 }
414 
415 #ifdef QCA_OL_TX_MULTIQ_SUPPORT
416 #ifdef DP_TX_IMPLICIT_RBM_MAPPING
417 /*
418  * dp_tx_get_rbm_id()- Get the RBM ID for data transmission completion.
419  * @dp_soc - DP soc structure pointer
420  * @ring_id - Transmit Queue/ring_id to be used when XPS is enabled
421  *
422  * Return - RBM ID corresponding to TCL ring_id
423  */
424 static inline uint8_t dp_tx_get_rbm_id_be(struct dp_soc *soc,
425 					  uint8_t ring_id)
426 {
427 	return 0;
428 }
429 #else
430 static inline uint8_t dp_tx_get_rbm_id_be(struct dp_soc *soc,
431 					  uint8_t ring_id)
432 {
433 	return (ring_id ? soc->wbm_sw0_bm_id + (ring_id - 1) :
434 			  HAL_WBM_SW2_BM_ID(soc->wbm_sw0_bm_id));
435 }
436 #endif /*DP_TX_IMPLICIT_RBM_MAPPING*/
437 #else
438 static inline uint8_t dp_tx_get_rbm_id_be(struct dp_soc *soc,
439 					  uint8_t tcl_index)
440 {
441 	uint8_t rbm;
442 
443 	rbm = wlan_cfg_get_rbm_id_for_index(soc->wlan_cfg_ctx, tcl_index);
444 	dp_verbose_debug("tcl_id %u rbm %u", tcl_index, rbm);
445 	return rbm;
446 }
447 #endif
448 #ifdef QCA_SUPPORT_TX_MIN_RATES_FOR_SPECIAL_FRAMES
449 
450 /*
451  * dp_tx_set_min_rates_for_critical_frames()- sets min-rates for critical pkts
452  * @dp_soc - DP soc structure pointer
453  * @hal_tx_desc - HAL descriptor where fields are set
454  * nbuf - skb to be considered for min rates
455  *
456  * The function relies on upper layers to set QDF_NBUF_CB_TX_EXTRA_IS_CRITICAL
457  * and uses it to determine if the frame is critical. For a critical frame,
458  * flow override bits are set to classify the frame into HW's high priority
459  * queue. The HW will pick pre-configured min rates for such packets.
460  *
461  * Return - None
462  */
463 static void
464 dp_tx_set_min_rates_for_critical_frames(struct dp_soc *soc,
465 					uint32_t *hal_tx_desc,
466 					qdf_nbuf_t nbuf)
467 {
468 /*
469  * Critical frames should be queued to the high priority queue for the TID on
470  * on which they are sent out (for the concerned peer).
471  * FW is using HTT_MSDU_Q_IDX 2 for HOL (high priority) queue.
472  * htt_msdu_idx = (2 * who_classify_info_sel) + flow_override
473  * Hence, using who_classify_info_sel = 1, flow_override = 0 to select
474  * HOL queue.
475  */
476 	if (QDF_NBUF_CB_TX_EXTRA_IS_CRITICAL(nbuf)) {
477 		hal_tx_desc_set_flow_override_enable(hal_tx_desc, 1);
478 		hal_tx_desc_set_flow_override(hal_tx_desc, 0);
479 		hal_tx_desc_set_who_classify_info_sel(hal_tx_desc, 1);
480 		hal_tx_desc_set_tx_notify_frame(hal_tx_desc,
481 						TX_SEMI_HARD_NOTIFY_E);
482 	}
483 }
484 #else
485 static inline void
486 dp_tx_set_min_rates_for_critical_frames(struct dp_soc *soc,
487 					uint32_t *hal_tx_desc_cached,
488 					qdf_nbuf_t nbuf)
489 {
490 }
491 #endif
492 
493 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
494 	defined(WLAN_MCAST_MLO)
495 #ifdef QCA_MULTIPASS_SUPPORT
496 /**
497  * dp_tx_mlo_mcast_multipass_lookup() - lookup vlan_id in mpass peer list
498  * @be_vdev: Handle to DP be_vdev structure
499  * @ptnr_vdev: DP ptnr_vdev handle
500  * @arg: pointer to dp_mlo_mpass_ buf
501  *
502  * Return: None
503  */
504 static void
505 dp_tx_mlo_mcast_multipass_lookup(struct dp_vdev_be *be_vdev,
506 				 struct dp_vdev *ptnr_vdev,
507 				 void *arg)
508 {
509 	struct dp_mlo_mpass_buf *ptr = (struct dp_mlo_mpass_buf *)arg;
510 	struct dp_txrx_peer *txrx_peer = NULL;
511 	struct vlan_ethhdr *veh = NULL;
512 	qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(ptr->nbuf);
513 	uint16_t vlan_id = 0;
514 	bool not_vlan = ((ptnr_vdev->tx_encap_type == htt_cmn_pkt_type_raw) ||
515 			(htons(eh->ether_type) != ETH_P_8021Q));
516 
517 	if (qdf_unlikely(not_vlan))
518 		return;
519 	veh = (struct vlan_ethhdr *)eh;
520 	vlan_id = (ntohs(veh->h_vlan_TCI) & VLAN_VID_MASK);
521 
522 	qdf_spin_lock_bh(&ptnr_vdev->mpass_peer_mutex);
523 	TAILQ_FOREACH(txrx_peer, &ptnr_vdev->mpass_peer_list,
524 		      mpass_peer_list_elem) {
525 		if (vlan_id == txrx_peer->vlan_id) {
526 			qdf_spin_unlock_bh(&ptnr_vdev->mpass_peer_mutex);
527 			ptr->vlan_id = vlan_id;
528 			return;
529 		}
530 	}
531 	qdf_spin_unlock_bh(&ptnr_vdev->mpass_peer_mutex);
532 }
533 
534 /**
535  * dp_tx_mlo_mcast_multipass_send() - send multipass MLO Mcast packets
536  * @be_vdev: Handle to DP be_vdev structure
537  * @ptnr_vdev: DP ptnr_vdev handle
538  * @arg: pointer to dp_mlo_mpass_ buf
539  *
540  * Return: None
541  */
542 static void
543 dp_tx_mlo_mcast_multipass_send(struct dp_vdev_be *be_vdev,
544 			       struct dp_vdev *ptnr_vdev,
545 			       void *arg)
546 {
547 	struct dp_mlo_mpass_buf *ptr = (struct dp_mlo_mpass_buf *)arg;
548 	struct dp_tx_msdu_info_s msdu_info;
549 	struct dp_vdev_be *be_ptnr_vdev = NULL;
550 	qdf_nbuf_t  nbuf_clone;
551 	uint16_t group_key = 0;
552 
553 	be_ptnr_vdev = dp_get_be_vdev_from_dp_vdev(ptnr_vdev);
554 	if (be_vdev != be_ptnr_vdev) {
555 		nbuf_clone = qdf_nbuf_clone(ptr->nbuf);
556 		if (qdf_unlikely(!nbuf_clone)) {
557 			dp_tx_debug("nbuf clone failed");
558 			return;
559 		}
560 	} else {
561 		nbuf_clone = ptr->nbuf;
562 	}
563 	qdf_mem_zero(&msdu_info, sizeof(msdu_info));
564 	dp_tx_get_queue(ptnr_vdev, nbuf_clone, &msdu_info.tx_queue);
565 	msdu_info.gsn = be_vdev->seq_num;
566 	be_ptnr_vdev->seq_num = be_vdev->seq_num;
567 
568 	if (ptr->vlan_id == MULTIPASS_WITH_VLAN_ID) {
569 		msdu_info.tid = HTT_TX_EXT_TID_INVALID;
570 		HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_SET(
571 						msdu_info.meta_data[0], 1);
572 	} else {
573 		/* return when vlan map is not initialized */
574 		if (!ptnr_vdev->iv_vlan_map)
575 			return;
576 		group_key = ptnr_vdev->iv_vlan_map[ptr->vlan_id];
577 
578 		/*
579 		 * If group key is not installed, drop the frame.
580 		 */
581 
582 		if (!group_key)
583 			return;
584 
585 		dp_tx_remove_vlan_tag(ptnr_vdev, nbuf_clone);
586 		dp_tx_add_groupkey_metadata(ptnr_vdev, &msdu_info, group_key);
587 		msdu_info.exception_fw = 1;
588 	}
589 
590 	nbuf_clone = dp_tx_send_msdu_single(
591 					ptnr_vdev,
592 					nbuf_clone,
593 					&msdu_info,
594 					DP_MLO_MCAST_REINJECT_PEER_ID,
595 					NULL);
596 	if (qdf_unlikely(nbuf_clone)) {
597 		dp_info("pkt send failed");
598 		qdf_nbuf_free(nbuf_clone);
599 		return;
600 	}
601 }
602 
603 /**
604  * dp_tx_mlo_mcast_multipass_handler - If frame needs multipass processing
605  * @soc: DP soc handle
606  * @vdev: DP vdev handle
607  * @nbuf: nbuf to be enqueued
608  *
609  * Return: true if handling is done else false
610  */
611 static bool
612 dp_tx_mlo_mcast_multipass_handler(struct dp_soc *soc,
613 				  struct dp_vdev *vdev,
614 				  qdf_nbuf_t nbuf)
615 {
616 	struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
617 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
618 	qdf_nbuf_t nbuf_copy = NULL;
619 	struct dp_mlo_mpass_buf mpass_buf;
620 
621 	memset(&mpass_buf, 0, sizeof(struct dp_mlo_mpass_buf));
622 	mpass_buf.vlan_id = INVALID_VLAN_ID;
623 	mpass_buf.nbuf = nbuf;
624 
625 	dp_tx_mlo_mcast_multipass_lookup(be_vdev, vdev, &mpass_buf);
626 	if (mpass_buf.vlan_id == INVALID_VLAN_ID) {
627 		dp_mcast_mlo_iter_ptnr_vdev(be_soc, be_vdev,
628 					    dp_tx_mlo_mcast_multipass_lookup,
629 					    &mpass_buf, DP_MOD_ID_TX);
630 		/*
631 		 * Do not drop the frame when vlan_id doesn't match.
632 		 * Send the frame as it is.
633 		 */
634 		if (mpass_buf.vlan_id == INVALID_VLAN_ID)
635 			return false;
636 	}
637 
638 	/* AP can have classic clients, special clients &
639 	 * classic repeaters.
640 	 * 1. Classic clients & special client:
641 	 *	Remove vlan header, find corresponding group key
642 	 *	index, fill in metaheader and enqueue multicast
643 	 *	frame to TCL.
644 	 * 2. Classic repeater:
645 	 *	Pass through to classic repeater with vlan tag
646 	 *	intact without any group key index. Hardware
647 	 *	will know which key to use to send frame to
648 	 *	repeater.
649 	 */
650 	nbuf_copy = qdf_nbuf_copy(nbuf);
651 
652 	/*
653 	 * Send multicast frame to special peers even
654 	 * if pass through to classic repeater fails.
655 	 */
656 	if (nbuf_copy) {
657 		struct dp_mlo_mpass_buf mpass_buf_copy = {0};
658 
659 		mpass_buf_copy.vlan_id = MULTIPASS_WITH_VLAN_ID;
660 		mpass_buf_copy.nbuf = nbuf_copy;
661 		/* send frame on partner vdevs */
662 		dp_mcast_mlo_iter_ptnr_vdev(be_soc, be_vdev,
663 					    dp_tx_mlo_mcast_multipass_send,
664 					    &mpass_buf_copy, DP_MOD_ID_TX);
665 
666 		/* send frame on mcast primary vdev */
667 		dp_tx_mlo_mcast_multipass_send(be_vdev, vdev, &mpass_buf_copy);
668 
669 		if (qdf_unlikely(be_vdev->seq_num > MAX_GSN_NUM))
670 			be_vdev->seq_num = 0;
671 		else
672 			be_vdev->seq_num++;
673 	}
674 
675 	dp_mcast_mlo_iter_ptnr_vdev(be_soc, be_vdev,
676 				    dp_tx_mlo_mcast_multipass_send,
677 				    &mpass_buf, DP_MOD_ID_TX);
678 	dp_tx_mlo_mcast_multipass_send(be_vdev, vdev, &mpass_buf);
679 
680 	if (qdf_unlikely(be_vdev->seq_num > MAX_GSN_NUM))
681 		be_vdev->seq_num = 0;
682 	else
683 		be_vdev->seq_num++;
684 
685 	return true;
686 }
687 #else
688 static bool
689 dp_tx_mlo_mcast_multipass_handler(struct dp_soc *soc, struct dp_vdev *vdev,
690 				  qdf_nbuf_t nbuf)
691 {
692 	return false;
693 }
694 #endif
695 
696 void
697 dp_tx_mlo_mcast_pkt_send(struct dp_vdev_be *be_vdev,
698 			 struct dp_vdev *ptnr_vdev,
699 			 void *arg)
700 {
701 	qdf_nbuf_t  nbuf = (qdf_nbuf_t)arg;
702 	qdf_nbuf_t  nbuf_clone;
703 	struct dp_vdev_be *be_ptnr_vdev = NULL;
704 	struct dp_tx_msdu_info_s msdu_info;
705 
706 	be_ptnr_vdev = dp_get_be_vdev_from_dp_vdev(ptnr_vdev);
707 	if (be_vdev != be_ptnr_vdev) {
708 		nbuf_clone = qdf_nbuf_clone(nbuf);
709 		if (qdf_unlikely(!nbuf_clone)) {
710 			dp_tx_debug("nbuf clone failed");
711 			return;
712 		}
713 	} else {
714 		nbuf_clone = nbuf;
715 	}
716 
717 	qdf_mem_zero(&msdu_info, sizeof(msdu_info));
718 	dp_tx_get_queue(ptnr_vdev, nbuf_clone, &msdu_info.tx_queue);
719 	msdu_info.gsn = be_vdev->seq_num;
720 	be_ptnr_vdev->seq_num = be_vdev->seq_num;
721 
722 	nbuf_clone = dp_tx_send_msdu_single(
723 					ptnr_vdev,
724 					nbuf_clone,
725 					&msdu_info,
726 					DP_MLO_MCAST_REINJECT_PEER_ID,
727 					NULL);
728 	if (qdf_unlikely(nbuf_clone)) {
729 		dp_info("pkt send failed");
730 		qdf_nbuf_free(nbuf_clone);
731 		return;
732 	}
733 }
734 
735 static inline void
736 dp_tx_vdev_id_set_hal_tx_desc(uint32_t *hal_tx_desc_cached,
737 			      struct dp_vdev *vdev,
738 			      struct dp_tx_msdu_info_s *msdu_info)
739 {
740 	hal_tx_desc_set_vdev_id(hal_tx_desc_cached, msdu_info->vdev_id);
741 }
742 
743 void dp_tx_mlo_mcast_handler_be(struct dp_soc *soc,
744 				struct dp_vdev *vdev,
745 				qdf_nbuf_t nbuf)
746 {
747 	struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
748 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
749 
750 	if (qdf_unlikely(vdev->multipass_en) &&
751 	    dp_tx_mlo_mcast_multipass_handler(soc, vdev, nbuf))
752 		return;
753 	/* send frame on partner vdevs */
754 	dp_mcast_mlo_iter_ptnr_vdev(be_soc, be_vdev,
755 				    dp_tx_mlo_mcast_pkt_send,
756 				    nbuf, DP_MOD_ID_REINJECT);
757 
758 	/* send frame on mcast primary vdev */
759 	dp_tx_mlo_mcast_pkt_send(be_vdev, vdev, nbuf);
760 
761 	if (qdf_unlikely(be_vdev->seq_num > MAX_GSN_NUM))
762 		be_vdev->seq_num = 0;
763 	else
764 		be_vdev->seq_num++;
765 }
766 
767 bool dp_tx_mlo_is_mcast_primary_be(struct dp_soc *soc,
768 				   struct dp_vdev *vdev)
769 {
770 	struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
771 
772 	if (be_vdev->mcast_primary)
773 		return true;
774 
775 	return false;
776 }
777 #else
778 static inline void
779 dp_tx_vdev_id_set_hal_tx_desc(uint32_t *hal_tx_desc_cached,
780 			      struct dp_vdev *vdev,
781 			      struct dp_tx_msdu_info_s *msdu_info)
782 {
783 	hal_tx_desc_set_vdev_id(hal_tx_desc_cached, vdev->vdev_id);
784 }
785 #endif
786 #if defined(WLAN_FEATURE_11BE_MLO) && !defined(WLAN_MLO_MULTI_CHIP) && \
787 	!defined(WLAN_MCAST_MLO)
788 void dp_tx_mlo_mcast_handler_be(struct dp_soc *soc,
789 				struct dp_vdev *vdev,
790 				qdf_nbuf_t nbuf)
791 {
792 }
793 
794 bool dp_tx_mlo_is_mcast_primary_be(struct dp_soc *soc,
795 				   struct dp_vdev *vdev)
796 {
797 	return false;
798 }
799 #endif
800 
801 #ifdef CONFIG_SAWF
802 void dp_sawf_config_be(struct dp_soc *soc, uint32_t *hal_tx_desc_cached,
803 		       uint16_t *fw_metadata, qdf_nbuf_t nbuf)
804 {
805 	uint8_t q_id = 0;
806 
807 	if (!wlan_cfg_get_sawf_config(soc->wlan_cfg_ctx))
808 		return;
809 
810 	dp_sawf_tcl_cmd(fw_metadata, nbuf);
811 	q_id = dp_sawf_queue_id_get(nbuf);
812 
813 	if (q_id == DP_SAWF_DEFAULT_Q_INVALID)
814 		return;
815 	hal_tx_desc_set_hlos_tid(hal_tx_desc_cached,
816 				 (q_id & (CDP_DATA_TID_MAX - 1)));
817 	hal_tx_desc_set_flow_override_enable(hal_tx_desc_cached,
818 					     DP_TX_FLOW_OVERRIDE_ENABLE);
819 	hal_tx_desc_set_flow_override(hal_tx_desc_cached,
820 				      DP_TX_FLOW_OVERRIDE_GET(q_id));
821 	hal_tx_desc_set_who_classify_info_sel(hal_tx_desc_cached,
822 					      DP_TX_WHO_CLFY_INF_SEL_GET(q_id));
823 }
824 
825 #else
826 
827 static inline
828 void dp_sawf_config_be(struct dp_soc *soc, uint32_t *hal_tx_desc_cached,
829 		       uint16_t *fw_metadata, qdf_nbuf_t nbuf)
830 {
831 }
832 
833 static inline
834 QDF_STATUS dp_sawf_tx_enqueue_peer_stats(struct dp_soc *soc,
835 					 struct dp_tx_desc_s *tx_desc)
836 {
837 	return QDF_STATUS_SUCCESS;
838 }
839 
840 static inline
841 QDF_STATUS dp_sawf_tx_enqueue_fail_peer_stats(struct dp_soc *soc,
842 					      struct dp_tx_desc_s *tx_desc)
843 {
844 	return QDF_STATUS_SUCCESS;
845 }
846 #endif
847 
848 #ifdef WLAN_SUPPORT_PPEDS
849 /**
850  * dp_ppeds_tx_comp_handler()- Handle tx completions for ppe2tcl ring
851  * @soc: Handle to DP Soc structure
852  * @quota: Max number of tx completions to process
853  *
854  * Return: Number of tx completions processed
855  */
856 int dp_ppeds_tx_comp_handler(struct dp_soc_be *be_soc, uint32_t quota)
857 {
858 	uint32_t num_avail_for_reap = 0;
859 	void *tx_comp_hal_desc;
860 	uint8_t buf_src;
861 	uint32_t count = 0;
862 	struct dp_tx_desc_s *tx_desc = NULL;
863 	struct dp_tx_desc_s *head_desc = NULL;
864 	struct dp_tx_desc_s *tail_desc = NULL;
865 	struct dp_soc *soc = &be_soc->soc;
866 	void *last_prefetch_hw_desc = NULL;
867 	struct dp_tx_desc_s *last_prefetch_sw_desc = NULL;
868 	hal_soc_handle_t hal_soc = soc->hal_soc;
869 	hal_ring_handle_t hal_ring_hdl =
870 				be_soc->ppeds_wbm_release_ring.hal_srng;
871 
872 	if (qdf_unlikely(dp_srng_access_start(NULL, soc, hal_ring_hdl))) {
873 		dp_err("HAL RING Access Failed -- %pK", hal_ring_hdl);
874 		return 0;
875 	}
876 
877 	num_avail_for_reap = hal_srng_dst_num_valid(hal_soc, hal_ring_hdl, 0);
878 
879 	if (num_avail_for_reap >= quota)
880 		num_avail_for_reap = quota;
881 
882 	dp_srng_dst_inv_cached_descs(soc, hal_ring_hdl, num_avail_for_reap);
883 
884 	last_prefetch_hw_desc = dp_srng_dst_prefetch(hal_soc, hal_ring_hdl,
885 						     num_avail_for_reap);
886 
887 	while (qdf_likely(num_avail_for_reap--)) {
888 		tx_comp_hal_desc =  dp_srng_dst_get_next(soc, hal_ring_hdl);
889 		if (qdf_unlikely(!tx_comp_hal_desc))
890 			break;
891 
892 		buf_src = hal_tx_comp_get_buffer_source(hal_soc,
893 							tx_comp_hal_desc);
894 
895 		if (qdf_unlikely(buf_src != HAL_TX_COMP_RELEASE_SOURCE_TQM &&
896 				 buf_src != HAL_TX_COMP_RELEASE_SOURCE_FW)) {
897 			dp_err("Tx comp release_src != TQM | FW but from %d",
898 			       buf_src);
899 			qdf_assert_always(0);
900 		}
901 
902 		dp_tx_comp_get_params_from_hal_desc_be(soc, tx_comp_hal_desc,
903 						       &tx_desc);
904 
905 		if (!tx_desc) {
906 			dp_err("unable to retrieve tx_desc!");
907 			qdf_assert_always(0);
908 			continue;
909 		}
910 
911 		if (qdf_unlikely(!(tx_desc->flags &
912 				   DP_TX_DESC_FLAG_ALLOCATED) ||
913 				 !(tx_desc->flags & DP_TX_DESC_FLAG_PPEDS))) {
914 			qdf_assert_always(0);
915 			continue;
916 		}
917 
918 		tx_desc->buffer_src = buf_src;
919 
920 		if (qdf_unlikely(buf_src == HAL_TX_COMP_RELEASE_SOURCE_FW)) {
921 			qdf_nbuf_free(tx_desc->nbuf);
922 			dp_ppeds_tx_desc_free(soc, tx_desc);
923 		} else {
924 			tx_desc->tx_status =
925 				hal_tx_comp_get_tx_status(tx_comp_hal_desc);
926 
927 			if (!head_desc) {
928 				head_desc = tx_desc;
929 				tail_desc = tx_desc;
930 			}
931 
932 			tail_desc->next = tx_desc;
933 			tx_desc->next = NULL;
934 			tail_desc = tx_desc;
935 
936 			count++;
937 
938 			dp_tx_prefetch_hw_sw_nbuf_desc(soc, hal_soc,
939 						       num_avail_for_reap,
940 						       hal_ring_hdl,
941 						       &last_prefetch_hw_desc,
942 						       &last_prefetch_sw_desc);
943 		}
944 	}
945 
946 	dp_srng_access_end(NULL, soc, hal_ring_hdl);
947 
948 	if (head_desc)
949 		dp_tx_comp_process_desc_list(soc, head_desc,
950 					     CDP_MAX_TX_COMP_PPE_RING);
951 
952 	return count;
953 }
954 #endif
955 
956 #if defined(QCA_SUPPORT_WDS_EXTENDED)
957 static inline void
958 dp_get_peer_from_tx_exc_meta(struct dp_soc *soc, uint32_t *hal_tx_desc_cached,
959 			     struct cdp_tx_exception_metadata *tx_exc_metadata,
960 			     uint16_t *ast_idx, uint16_t *ast_hash)
961 {
962 	struct dp_peer *peer = NULL;
963 
964 	if (tx_exc_metadata->is_wds_extended) {
965 		peer = dp_peer_get_ref_by_id(soc, tx_exc_metadata->peer_id,
966 					     DP_MOD_ID_TX);
967 		if (peer) {
968 			*ast_idx = peer->ast_idx;
969 			*ast_hash = peer->ast_hash;
970 			hal_tx_desc_set_index_lookup_override
971 							(soc->hal_soc,
972 							 hal_tx_desc_cached,
973 							 0x1);
974 			dp_peer_unref_delete(peer, DP_MOD_ID_TX);
975 		}
976 	} else {
977 		return;
978 	}
979 }
980 
981 #else
982 static inline void
983 dp_get_peer_from_tx_exc_meta(struct dp_soc *soc, uint32_t *hal_tx_desc_cached,
984 			     struct cdp_tx_exception_metadata *tx_exc_metadata,
985 			     uint16_t *ast_idx, uint16_t *ast_hash)
986 {
987 }
988 #endif
989 
990 QDF_STATUS
991 dp_tx_hw_enqueue_be(struct dp_soc *soc, struct dp_vdev *vdev,
992 		    struct dp_tx_desc_s *tx_desc, uint16_t fw_metadata,
993 		    struct cdp_tx_exception_metadata *tx_exc_metadata,
994 		    struct dp_tx_msdu_info_s *msdu_info)
995 {
996 	void *hal_tx_desc;
997 	uint32_t *hal_tx_desc_cached;
998 	int coalesce = 0;
999 	struct dp_tx_queue *tx_q = &msdu_info->tx_queue;
1000 	uint8_t ring_id = tx_q->ring_id;
1001 	uint8_t tid = msdu_info->tid;
1002 	struct dp_vdev_be *be_vdev;
1003 	uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES] = { 0 };
1004 	uint8_t bm_id = dp_tx_get_rbm_id_be(soc, ring_id);
1005 	hal_ring_handle_t hal_ring_hdl = NULL;
1006 	QDF_STATUS status = QDF_STATUS_E_RESOURCES;
1007 	uint8_t num_desc_bytes = HAL_TX_DESC_LEN_BYTES;
1008 	uint16_t ast_idx = vdev->bss_ast_idx;
1009 	uint16_t ast_hash = vdev->bss_ast_hash;
1010 
1011 	be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
1012 
1013 	if (!dp_tx_is_desc_id_valid(soc, tx_desc->id)) {
1014 		dp_err_rl("Invalid tx desc id:%d", tx_desc->id);
1015 		return QDF_STATUS_E_RESOURCES;
1016 	}
1017 
1018 	if (qdf_unlikely(tx_exc_metadata)) {
1019 		qdf_assert_always((tx_exc_metadata->tx_encap_type ==
1020 				   CDP_INVALID_TX_ENCAP_TYPE) ||
1021 				   (tx_exc_metadata->tx_encap_type ==
1022 				    vdev->tx_encap_type));
1023 
1024 		if (tx_exc_metadata->tx_encap_type == htt_cmn_pkt_type_raw)
1025 			qdf_assert_always((tx_exc_metadata->sec_type ==
1026 					   CDP_INVALID_SEC_TYPE) ||
1027 					   tx_exc_metadata->sec_type ==
1028 					   vdev->sec_type);
1029 		dp_get_peer_from_tx_exc_meta(soc, (void *)cached_desc,
1030 					     tx_exc_metadata,
1031 					     &ast_idx, &ast_hash);
1032 	}
1033 
1034 	hal_tx_desc_cached = (void *)cached_desc;
1035 
1036 	if (dp_sawf_tag_valid_get(tx_desc->nbuf)) {
1037 		dp_sawf_config_be(soc, hal_tx_desc_cached,
1038 				  &fw_metadata, tx_desc->nbuf);
1039 		dp_sawf_tx_enqueue_peer_stats(soc, tx_desc);
1040 	}
1041 
1042 	hal_tx_desc_set_buf_addr_be(soc->hal_soc, hal_tx_desc_cached,
1043 				    tx_desc->dma_addr, bm_id, tx_desc->id,
1044 				    (tx_desc->flags & DP_TX_DESC_FLAG_FRAG));
1045 	hal_tx_desc_set_lmac_id_be(soc->hal_soc, hal_tx_desc_cached,
1046 				   vdev->lmac_id);
1047 
1048 	hal_tx_desc_set_search_index_be(soc->hal_soc, hal_tx_desc_cached,
1049 					ast_idx);
1050 	/*
1051 	 * Bank_ID is used as DSCP_TABLE number in beryllium
1052 	 * So there is no explicit field used for DSCP_TID_TABLE_NUM.
1053 	 */
1054 
1055 	hal_tx_desc_set_cache_set_num(soc->hal_soc, hal_tx_desc_cached,
1056 				      (ast_hash & 0xF));
1057 
1058 	hal_tx_desc_set_fw_metadata(hal_tx_desc_cached, fw_metadata);
1059 	hal_tx_desc_set_buf_length(hal_tx_desc_cached, tx_desc->length);
1060 	hal_tx_desc_set_buf_offset(hal_tx_desc_cached, tx_desc->pkt_offset);
1061 
1062 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
1063 		hal_tx_desc_set_to_fw(hal_tx_desc_cached, 1);
1064 
1065 	/* verify checksum offload configuration*/
1066 	if ((qdf_nbuf_get_tx_cksum(tx_desc->nbuf) ==
1067 				   QDF_NBUF_TX_CKSUM_TCP_UDP) ||
1068 	      qdf_nbuf_is_tso(tx_desc->nbuf)) {
1069 		hal_tx_desc_set_l3_checksum_en(hal_tx_desc_cached, 1);
1070 		hal_tx_desc_set_l4_checksum_en(hal_tx_desc_cached, 1);
1071 	}
1072 
1073 	hal_tx_desc_set_bank_id(hal_tx_desc_cached, vdev->bank_id);
1074 
1075 	dp_tx_vdev_id_set_hal_tx_desc(hal_tx_desc_cached, vdev, msdu_info);
1076 
1077 	if (tid != HTT_TX_EXT_TID_INVALID)
1078 		hal_tx_desc_set_hlos_tid(hal_tx_desc_cached, tid);
1079 
1080 	dp_tx_set_min_rates_for_critical_frames(soc, hal_tx_desc_cached,
1081 						tx_desc->nbuf);
1082 	dp_tx_desc_set_ktimestamp(vdev, tx_desc);
1083 
1084 	hal_ring_hdl = dp_tx_get_hal_ring_hdl(soc, ring_id);
1085 
1086 	if (qdf_unlikely(dp_tx_hal_ring_access_start(soc, hal_ring_hdl))) {
1087 		dp_err("HAL RING Access Failed -- %pK", hal_ring_hdl);
1088 		DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
1089 		DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
1090 		dp_sawf_tx_enqueue_fail_peer_stats(soc, tx_desc);
1091 		return status;
1092 	}
1093 
1094 	hal_tx_desc = hal_srng_src_get_next(soc->hal_soc, hal_ring_hdl);
1095 	if (qdf_unlikely(!hal_tx_desc)) {
1096 		dp_verbose_debug("TCL ring full ring_id:%d", ring_id);
1097 		DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
1098 		DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
1099 		dp_sawf_tx_enqueue_fail_peer_stats(soc, tx_desc);
1100 		goto ring_access_fail;
1101 	}
1102 
1103 	tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX;
1104 	dp_vdev_peer_stats_update_protocol_cnt_tx(vdev, tx_desc->nbuf);
1105 
1106 	/* Sync cached descriptor with HW */
1107 	hal_tx_desc_sync(hal_tx_desc_cached, hal_tx_desc, num_desc_bytes);
1108 
1109 	coalesce = dp_tx_attempt_coalescing(soc, vdev, tx_desc, tid,
1110 					    msdu_info, ring_id);
1111 
1112 	DP_STATS_INC_PKT(vdev, tx_i.processed, 1, dp_tx_get_pkt_len(tx_desc));
1113 	DP_STATS_INC(soc, tx.tcl_enq[ring_id], 1);
1114 	dp_tx_update_stats(soc, tx_desc, ring_id);
1115 	status = QDF_STATUS_SUCCESS;
1116 
1117 	dp_tx_hw_desc_update_evt((uint8_t *)hal_tx_desc_cached,
1118 				 hal_ring_hdl, soc, ring_id);
1119 
1120 ring_access_fail:
1121 	dp_tx_ring_access_end_wrapper(soc, hal_ring_hdl, coalesce);
1122 	dp_pkt_add_timestamp(vdev, QDF_PKT_TX_DRIVER_EXIT,
1123 			     qdf_get_log_timestamp(), tx_desc->nbuf);
1124 	return status;
1125 }
1126 
1127 #ifdef IPA_OFFLOAD
1128 static void
1129 dp_tx_get_ipa_bank_config(struct dp_soc_be *be_soc,
1130 			  union hal_tx_bank_config *bank_config)
1131 {
1132 	bank_config->epd = 0;
1133 	bank_config->encap_type = wlan_cfg_pkt_type(be_soc->soc.wlan_cfg_ctx);
1134 	bank_config->encrypt_type = 0;
1135 
1136 	bank_config->src_buffer_swap = 0;
1137 	bank_config->link_meta_swap = 0;
1138 
1139 	bank_config->index_lookup_enable = 0;
1140 	bank_config->mcast_pkt_ctrl = HAL_TX_MCAST_CTRL_FW_EXCEPTION;
1141 	bank_config->addrx_en = 1;
1142 	bank_config->addry_en = 1;
1143 
1144 	bank_config->mesh_enable = 0;
1145 	bank_config->dscp_tid_map_id = 0;
1146 	bank_config->vdev_id_check_en = 0;
1147 	bank_config->pmac_id = 0;
1148 }
1149 
1150 static void dp_tx_init_ipa_bank_profile(struct dp_soc_be *be_soc)
1151 {
1152 	union hal_tx_bank_config ipa_config = {0};
1153 	int bid;
1154 
1155 	if (!wlan_cfg_is_ipa_enabled(be_soc->soc.wlan_cfg_ctx)) {
1156 		be_soc->ipa_bank_id = DP_BE_INVALID_BANK_ID;
1157 		return;
1158 	}
1159 
1160 	dp_tx_get_ipa_bank_config(be_soc, &ipa_config);
1161 
1162 	/* Let IPA use last HOST owned bank */
1163 	bid = be_soc->num_bank_profiles - 1;
1164 
1165 	be_soc->bank_profiles[bid].is_configured = true;
1166 	be_soc->bank_profiles[bid].bank_config.val = ipa_config.val;
1167 	hal_tx_populate_bank_register(be_soc->soc.hal_soc,
1168 				      &be_soc->bank_profiles[bid].bank_config,
1169 				      bid);
1170 	qdf_atomic_inc(&be_soc->bank_profiles[bid].ref_count);
1171 
1172 	dp_info("IPA bank at slot %d config:0x%x", bid,
1173 		be_soc->bank_profiles[bid].bank_config.val);
1174 
1175 	be_soc->ipa_bank_id = bid;
1176 }
1177 #else /* !IPA_OFFLOAD */
1178 static inline void dp_tx_init_ipa_bank_profile(struct dp_soc_be *be_soc)
1179 {
1180 }
1181 #endif /* IPA_OFFLOAD */
1182 
1183 QDF_STATUS dp_tx_init_bank_profiles(struct dp_soc_be *be_soc)
1184 {
1185 	int i, num_tcl_banks;
1186 
1187 	num_tcl_banks = hal_tx_get_num_tcl_banks(be_soc->soc.hal_soc);
1188 
1189 	qdf_assert_always(num_tcl_banks);
1190 	be_soc->num_bank_profiles = num_tcl_banks;
1191 
1192 	be_soc->bank_profiles = qdf_mem_malloc(num_tcl_banks *
1193 					       sizeof(*be_soc->bank_profiles));
1194 	if (!be_soc->bank_profiles) {
1195 		dp_err("unable to allocate memory for DP TX Profiles!");
1196 		return QDF_STATUS_E_NOMEM;
1197 	}
1198 
1199 	DP_TX_BANK_LOCK_CREATE(&be_soc->tx_bank_lock);
1200 
1201 	for (i = 0; i < num_tcl_banks; i++) {
1202 		be_soc->bank_profiles[i].is_configured = false;
1203 		qdf_atomic_init(&be_soc->bank_profiles[i].ref_count);
1204 	}
1205 	dp_info("initialized %u bank profiles", be_soc->num_bank_profiles);
1206 
1207 	dp_tx_init_ipa_bank_profile(be_soc);
1208 
1209 	return QDF_STATUS_SUCCESS;
1210 }
1211 
1212 void dp_tx_deinit_bank_profiles(struct dp_soc_be *be_soc)
1213 {
1214 	qdf_mem_free(be_soc->bank_profiles);
1215 	DP_TX_BANK_LOCK_DESTROY(&be_soc->tx_bank_lock);
1216 }
1217 
1218 static
1219 void dp_tx_get_vdev_bank_config(struct dp_vdev_be *be_vdev,
1220 				union hal_tx_bank_config *bank_config)
1221 {
1222 	struct dp_vdev *vdev = &be_vdev->vdev;
1223 
1224 	bank_config->epd = 0;
1225 
1226 	bank_config->encap_type = vdev->tx_encap_type;
1227 
1228 	/* Only valid for raw frames. Needs work for RAW mode */
1229 	if (vdev->tx_encap_type == htt_cmn_pkt_type_raw) {
1230 		bank_config->encrypt_type = sec_type_map[vdev->sec_type];
1231 	} else {
1232 		bank_config->encrypt_type = 0;
1233 	}
1234 
1235 	bank_config->src_buffer_swap = 0;
1236 	bank_config->link_meta_swap = 0;
1237 
1238 	if ((vdev->search_type == HAL_TX_ADDR_INDEX_SEARCH) &&
1239 	    vdev->opmode == wlan_op_mode_sta) {
1240 		bank_config->index_lookup_enable = 1;
1241 		bank_config->mcast_pkt_ctrl = HAL_TX_MCAST_CTRL_MEC_NOTIFY;
1242 		bank_config->addrx_en = 0;
1243 		bank_config->addry_en = 0;
1244 	} else {
1245 		bank_config->index_lookup_enable = 0;
1246 		bank_config->mcast_pkt_ctrl = HAL_TX_MCAST_CTRL_FW_EXCEPTION;
1247 		bank_config->addrx_en =
1248 			(vdev->hal_desc_addr_search_flags &
1249 			 HAL_TX_DESC_ADDRX_EN) ? 1 : 0;
1250 		bank_config->addry_en =
1251 			(vdev->hal_desc_addr_search_flags &
1252 			 HAL_TX_DESC_ADDRY_EN) ? 1 : 0;
1253 	}
1254 
1255 	bank_config->mesh_enable = vdev->mesh_vdev ? 1 : 0;
1256 
1257 	bank_config->dscp_tid_map_id = vdev->dscp_tid_map_id;
1258 
1259 	/* Disabling vdev id check for now. Needs revist. */
1260 	bank_config->vdev_id_check_en = be_vdev->vdev_id_check_en;
1261 
1262 	bank_config->pmac_id = vdev->lmac_id;
1263 }
1264 
1265 int dp_tx_get_bank_profile(struct dp_soc_be *be_soc,
1266 			   struct dp_vdev_be *be_vdev)
1267 {
1268 	char *temp_str = "";
1269 	bool found_match = false;
1270 	int bank_id = DP_BE_INVALID_BANK_ID;
1271 	int i;
1272 	int unconfigured_slot = DP_BE_INVALID_BANK_ID;
1273 	int zero_ref_count_slot = DP_BE_INVALID_BANK_ID;
1274 	union hal_tx_bank_config vdev_config = {0};
1275 
1276 	/* convert vdev params into hal_tx_bank_config */
1277 	dp_tx_get_vdev_bank_config(be_vdev, &vdev_config);
1278 
1279 	DP_TX_BANK_LOCK_ACQUIRE(&be_soc->tx_bank_lock);
1280 	/* go over all banks and find a matching/unconfigured/unused bank */
1281 	for (i = 0; i < be_soc->num_bank_profiles; i++) {
1282 		if (be_soc->bank_profiles[i].is_configured &&
1283 		    (be_soc->bank_profiles[i].bank_config.val ^
1284 						vdev_config.val) == 0) {
1285 			found_match = true;
1286 			break;
1287 		}
1288 
1289 		if (unconfigured_slot == DP_BE_INVALID_BANK_ID &&
1290 		    !be_soc->bank_profiles[i].is_configured)
1291 			unconfigured_slot = i;
1292 		else if (zero_ref_count_slot  == DP_BE_INVALID_BANK_ID &&
1293 		    !qdf_atomic_read(&be_soc->bank_profiles[i].ref_count))
1294 			zero_ref_count_slot = i;
1295 	}
1296 
1297 	if (found_match) {
1298 		temp_str = "matching";
1299 		bank_id = i;
1300 		goto inc_ref_and_return;
1301 	}
1302 	if (unconfigured_slot != DP_BE_INVALID_BANK_ID) {
1303 		temp_str = "unconfigured";
1304 		bank_id = unconfigured_slot;
1305 		goto configure_and_return;
1306 	}
1307 	if (zero_ref_count_slot != DP_BE_INVALID_BANK_ID) {
1308 		temp_str = "zero_ref_count";
1309 		bank_id = zero_ref_count_slot;
1310 	}
1311 	if (bank_id == DP_BE_INVALID_BANK_ID) {
1312 		dp_alert("unable to find TX bank!");
1313 		QDF_BUG(0);
1314 		return bank_id;
1315 	}
1316 
1317 configure_and_return:
1318 	be_soc->bank_profiles[bank_id].is_configured = true;
1319 	be_soc->bank_profiles[bank_id].bank_config.val = vdev_config.val;
1320 	hal_tx_populate_bank_register(be_soc->soc.hal_soc,
1321 				      &be_soc->bank_profiles[bank_id].bank_config,
1322 				      bank_id);
1323 inc_ref_and_return:
1324 	qdf_atomic_inc(&be_soc->bank_profiles[bank_id].ref_count);
1325 	DP_TX_BANK_LOCK_RELEASE(&be_soc->tx_bank_lock);
1326 
1327 	dp_info("found %s slot at index %d, input:0x%x match:0x%x ref_count %u",
1328 		temp_str, bank_id, vdev_config.val,
1329 		be_soc->bank_profiles[bank_id].bank_config.val,
1330 		qdf_atomic_read(&be_soc->bank_profiles[bank_id].ref_count));
1331 
1332 	dp_info("epd:%x encap:%x encryp:%x src_buf_swap:%x link_meta_swap:%x addrx_en:%x addry_en:%x mesh_en:%x vdev_id_check:%x pmac_id:%x mcast_pkt_ctrl:%x",
1333 		be_soc->bank_profiles[bank_id].bank_config.epd,
1334 		be_soc->bank_profiles[bank_id].bank_config.encap_type,
1335 		be_soc->bank_profiles[bank_id].bank_config.encrypt_type,
1336 		be_soc->bank_profiles[bank_id].bank_config.src_buffer_swap,
1337 		be_soc->bank_profiles[bank_id].bank_config.link_meta_swap,
1338 		be_soc->bank_profiles[bank_id].bank_config.addrx_en,
1339 		be_soc->bank_profiles[bank_id].bank_config.addry_en,
1340 		be_soc->bank_profiles[bank_id].bank_config.mesh_enable,
1341 		be_soc->bank_profiles[bank_id].bank_config.vdev_id_check_en,
1342 		be_soc->bank_profiles[bank_id].bank_config.pmac_id,
1343 		be_soc->bank_profiles[bank_id].bank_config.mcast_pkt_ctrl);
1344 
1345 	return bank_id;
1346 }
1347 
1348 void dp_tx_put_bank_profile(struct dp_soc_be *be_soc,
1349 			    struct dp_vdev_be *be_vdev)
1350 {
1351 	DP_TX_BANK_LOCK_ACQUIRE(&be_soc->tx_bank_lock);
1352 	qdf_atomic_dec(&be_soc->bank_profiles[be_vdev->bank_id].ref_count);
1353 	DP_TX_BANK_LOCK_RELEASE(&be_soc->tx_bank_lock);
1354 }
1355 
1356 void dp_tx_update_bank_profile(struct dp_soc_be *be_soc,
1357 			       struct dp_vdev_be *be_vdev)
1358 {
1359 	dp_tx_put_bank_profile(be_soc, be_vdev);
1360 	be_vdev->bank_id = dp_tx_get_bank_profile(be_soc, be_vdev);
1361 	be_vdev->vdev.bank_id = be_vdev->bank_id;
1362 }
1363 
1364 QDF_STATUS dp_tx_desc_pool_init_be(struct dp_soc *soc,
1365 				   uint32_t num_elem,
1366 				   uint8_t pool_id)
1367 {
1368 	struct dp_tx_desc_pool_s *tx_desc_pool;
1369 	struct dp_hw_cookie_conversion_t *cc_ctx;
1370 	struct dp_soc_be *be_soc;
1371 	struct dp_spt_page_desc *page_desc;
1372 	struct dp_tx_desc_s *tx_desc;
1373 	uint32_t ppt_idx = 0;
1374 	uint32_t avail_entry_index = 0;
1375 
1376 	if (!num_elem) {
1377 		dp_err("desc_num 0 !!");
1378 		return QDF_STATUS_E_FAILURE;
1379 	}
1380 
1381 	be_soc = dp_get_be_soc_from_dp_soc(soc);
1382 	tx_desc_pool = &soc->tx_desc[pool_id];
1383 	cc_ctx  = &be_soc->tx_cc_ctx[pool_id];
1384 
1385 	tx_desc = tx_desc_pool->freelist;
1386 	page_desc = &cc_ctx->page_desc_base[0];
1387 	while (tx_desc) {
1388 		if (avail_entry_index == 0) {
1389 			if (ppt_idx >= cc_ctx->total_page_num) {
1390 				dp_alert("insufficient secondary page tables");
1391 				qdf_assert_always(0);
1392 			}
1393 			page_desc = &cc_ctx->page_desc_base[ppt_idx++];
1394 		}
1395 
1396 		/* put each TX Desc VA to SPT pages and
1397 		 * get corresponding ID
1398 		 */
1399 		DP_CC_SPT_PAGE_UPDATE_VA(page_desc->page_v_addr,
1400 					 avail_entry_index,
1401 					 tx_desc);
1402 		tx_desc->id =
1403 			dp_cc_desc_id_generate(page_desc->ppt_index,
1404 					       avail_entry_index);
1405 		tx_desc->pool_id = pool_id;
1406 		dp_tx_desc_set_magic(tx_desc, DP_TX_MAGIC_PATTERN_FREE);
1407 		tx_desc = tx_desc->next;
1408 		avail_entry_index = (avail_entry_index + 1) &
1409 					DP_CC_SPT_PAGE_MAX_ENTRIES_MASK;
1410 	}
1411 
1412 	return QDF_STATUS_SUCCESS;
1413 }
1414 
1415 void dp_tx_desc_pool_deinit_be(struct dp_soc *soc,
1416 			       struct dp_tx_desc_pool_s *tx_desc_pool,
1417 			       uint8_t pool_id)
1418 {
1419 	struct dp_spt_page_desc *page_desc;
1420 	struct dp_soc_be *be_soc;
1421 	int i = 0;
1422 	struct dp_hw_cookie_conversion_t *cc_ctx;
1423 
1424 	be_soc = dp_get_be_soc_from_dp_soc(soc);
1425 	cc_ctx  = &be_soc->tx_cc_ctx[pool_id];
1426 
1427 	for (i = 0; i < cc_ctx->total_page_num; i++) {
1428 		page_desc = &cc_ctx->page_desc_base[i];
1429 		qdf_mem_zero(page_desc->page_v_addr, qdf_page_size);
1430 	}
1431 }
1432 
1433 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
1434 uint32_t dp_tx_comp_nf_handler(struct dp_intr *int_ctx, struct dp_soc *soc,
1435 			       hal_ring_handle_t hal_ring_hdl, uint8_t ring_id,
1436 			       uint32_t quota)
1437 {
1438 	struct dp_srng *tx_comp_ring = &soc->tx_comp_ring[ring_id];
1439 	uint32_t work_done = 0;
1440 
1441 	if (dp_srng_get_near_full_level(soc, tx_comp_ring) <
1442 			DP_SRNG_THRESH_NEAR_FULL)
1443 		return 0;
1444 
1445 	qdf_atomic_set(&tx_comp_ring->near_full, 1);
1446 	work_done++;
1447 
1448 	return work_done;
1449 }
1450 #endif
1451 
1452 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
1453 	defined(WLAN_CONFIG_TX_DELAY)
1454 #define PPDUID_GET_HW_LINK_ID(PPDU_ID, LINK_ID_OFFSET, LINK_ID_BITS) \
1455 	(((PPDU_ID) >> (LINK_ID_OFFSET)) & ((1 << (LINK_ID_BITS)) - 1))
1456 
1457 #define HW_TX_DELAY_MAX                       0x1000000
1458 #define TX_COMPL_SHIFT_BUFFER_TIMESTAMP_US    10
1459 #define HW_TX_DELAY_MASK                      0x1FFFFFFF
1460 #define TX_COMPL_BUFFER_TSTAMP_US(TSTAMP) \
1461 	(((TSTAMP) << TX_COMPL_SHIFT_BUFFER_TIMESTAMP_US) & \
1462 	 HW_TX_DELAY_MASK)
1463 
1464 static inline
1465 QDF_STATUS dp_mlo_compute_hw_delay_us(struct dp_soc *soc,
1466 				      struct dp_vdev *vdev,
1467 				      struct hal_tx_completion_status *ts,
1468 				      uint32_t *delay_us)
1469 {
1470 	uint32_t ppdu_id;
1471 	uint8_t link_id_offset, link_id_bits;
1472 	uint8_t hw_link_id;
1473 	uint32_t msdu_tqm_enqueue_tstamp_us, final_msdu_tqm_enqueue_tstamp_us;
1474 	uint32_t msdu_compl_tsf_tstamp_us, final_msdu_compl_tsf_tstamp_us;
1475 	uint32_t delay;
1476 	int32_t delta_tsf2, delta_tqm;
1477 
1478 	if (!ts->valid)
1479 		return QDF_STATUS_E_INVAL;
1480 
1481 	link_id_offset = soc->link_id_offset;
1482 	link_id_bits = soc->link_id_bits;
1483 	ppdu_id = ts->ppdu_id;
1484 	hw_link_id = PPDUID_GET_HW_LINK_ID(ppdu_id, link_id_offset,
1485 					   link_id_bits);
1486 
1487 	msdu_tqm_enqueue_tstamp_us =
1488 		TX_COMPL_BUFFER_TSTAMP_US(ts->buffer_timestamp);
1489 	msdu_compl_tsf_tstamp_us = ts->tsf;
1490 
1491 	delta_tsf2 = dp_mlo_get_delta_tsf2_wrt_mlo_offset(soc, hw_link_id);
1492 	delta_tqm = dp_mlo_get_delta_tqm_wrt_mlo_offset(soc);
1493 
1494 	final_msdu_tqm_enqueue_tstamp_us = (msdu_tqm_enqueue_tstamp_us +
1495 			delta_tqm) & HW_TX_DELAY_MASK;
1496 
1497 	final_msdu_compl_tsf_tstamp_us = (msdu_compl_tsf_tstamp_us +
1498 			delta_tsf2) & HW_TX_DELAY_MASK;
1499 
1500 	delay = (final_msdu_compl_tsf_tstamp_us -
1501 		final_msdu_tqm_enqueue_tstamp_us) & HW_TX_DELAY_MASK;
1502 
1503 	if (delay > HW_TX_DELAY_MAX)
1504 		return QDF_STATUS_E_FAILURE;
1505 
1506 	if (delay_us)
1507 		*delay_us = delay;
1508 
1509 	return QDF_STATUS_SUCCESS;
1510 }
1511 #else
1512 static inline
1513 QDF_STATUS dp_mlo_compute_hw_delay_us(struct dp_soc *soc,
1514 				      struct dp_vdev *vdev,
1515 				      struct hal_tx_completion_status *ts,
1516 				      uint32_t *delay_us)
1517 {
1518 	return QDF_STATUS_SUCCESS;
1519 }
1520 #endif
1521 
1522 QDF_STATUS dp_tx_compute_tx_delay_be(struct dp_soc *soc,
1523 				     struct dp_vdev *vdev,
1524 				     struct hal_tx_completion_status *ts,
1525 				     uint32_t *delay_us)
1526 {
1527 	return dp_mlo_compute_hw_delay_us(soc, vdev, ts, delay_us);
1528 }
1529 
1530 static inline
1531 qdf_dma_addr_t dp_tx_nbuf_map_be(struct dp_vdev *vdev,
1532 				 struct dp_tx_desc_s *tx_desc,
1533 				 qdf_nbuf_t nbuf)
1534 {
1535 	qdf_nbuf_dma_clean_range_no_dsb((void *)nbuf->data,
1536 					(void *)(nbuf->data + 256));
1537 
1538 	return (qdf_dma_addr_t)qdf_mem_virt_to_phys(nbuf->data);
1539 }
1540 
1541 static inline
1542 void dp_tx_nbuf_unmap_be(struct dp_soc *soc,
1543 			 struct dp_tx_desc_s *desc)
1544 {
1545 }
1546 
1547 /**
1548  * dp_tx_fast_send_be() - Transmit a frame on a given VAP
1549  * @soc: DP soc handle
1550  * @vdev_id: id of DP vdev handle
1551  * @nbuf: skb
1552  *
1553  * Entry point for Core Tx layer (DP_TX) invoked from
1554  * hard_start_xmit in OSIF/HDD or from dp_rx_process for intravap forwarding
1555  * cases
1556  *
1557  * Return: NULL on success,
1558  *         nbuf when it fails to send
1559  */
1560 #ifdef QCA_DP_TX_NBUF_LIST_FREE
1561 qdf_nbuf_t dp_tx_fast_send_be(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
1562 			      qdf_nbuf_t nbuf)
1563 {
1564 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
1565 	struct dp_vdev *vdev = NULL;
1566 	struct dp_pdev *pdev = NULL;
1567 	struct dp_tx_desc_s *tx_desc;
1568 	uint16_t desc_pool_id;
1569 	uint16_t pkt_len;
1570 	qdf_dma_addr_t paddr;
1571 	QDF_STATUS status = QDF_STATUS_E_RESOURCES;
1572 	uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES] = { 0 };
1573 	hal_ring_handle_t hal_ring_hdl = NULL;
1574 	uint32_t *hal_tx_desc_cached;
1575 	void *hal_tx_desc;
1576 	uint8_t desc_size = DP_TX_FAST_DESC_SIZE;
1577 
1578 	if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
1579 		return nbuf;
1580 
1581 	vdev = soc->vdev_id_map[vdev_id];
1582 	if (qdf_unlikely(!vdev))
1583 		return nbuf;
1584 
1585 	desc_pool_id = qdf_nbuf_get_queue_mapping(nbuf) & DP_TX_QUEUE_MASK;
1586 
1587 	pkt_len = qdf_nbuf_headlen(nbuf);
1588 	DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, pkt_len);
1589 	DP_STATS_INC(vdev, tx_i.rcvd_in_fast_xmit_flow, 1);
1590 	DP_STATS_INC(vdev, tx_i.rcvd_per_core[desc_pool_id], 1);
1591 
1592 	pdev = vdev->pdev;
1593 	if (dp_tx_limit_check(vdev, nbuf))
1594 		return nbuf;
1595 
1596 	tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
1597 
1598 	if (qdf_unlikely(!tx_desc)) {
1599 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
1600 		DP_STATS_INC(vdev, tx_i.dropped.desc_na_exc_alloc_fail.num, 1);
1601 		return nbuf;
1602 	}
1603 
1604 	dp_tx_outstanding_inc(pdev);
1605 
1606 	/* Initialize the SW tx descriptor */
1607 	tx_desc->nbuf = nbuf;
1608 	tx_desc->shinfo_addr = skb_end_pointer(nbuf);
1609 	tx_desc->frm_type = dp_tx_frm_std;
1610 	tx_desc->tx_encap_type = vdev->tx_encap_type;
1611 	tx_desc->vdev_id = vdev_id;
1612 	tx_desc->pdev = pdev;
1613 	tx_desc->pkt_offset = 0;
1614 	tx_desc->length = pkt_len;
1615 	tx_desc->flags |= DP_TX_DESC_FLAG_SIMPLE;
1616 	tx_desc->nbuf->fast_recycled = 1;
1617 
1618 	if (nbuf->is_from_recycler && nbuf->fast_xmit)
1619 		tx_desc->flags |= DP_TX_DESC_FLAG_FAST;
1620 
1621 	paddr =  dp_tx_nbuf_map_be(vdev, tx_desc, nbuf);
1622 	if (!paddr) {
1623 		/* Handle failure */
1624 		dp_err("qdf_nbuf_map failed");
1625 		DP_STATS_INC(vdev, tx_i.dropped.dma_error, 1);
1626 		goto release_desc;
1627 	}
1628 
1629 	tx_desc->dma_addr = paddr;
1630 
1631 	hal_tx_desc_cached = (void *)cached_desc;
1632 	hal_tx_desc_cached[0] = (uint32_t)tx_desc->dma_addr;
1633 	hal_tx_desc_cached[1] = tx_desc->id <<
1634 		TCL_DATA_CMD_BUF_ADDR_INFO_SW_BUFFER_COOKIE_LSB;
1635 
1636 	/* bank_id */
1637 	hal_tx_desc_cached[2] = vdev->bank_id << TCL_DATA_CMD_BANK_ID_LSB;
1638 	hal_tx_desc_cached[3] = vdev->htt_tcl_metadata <<
1639 		TCL_DATA_CMD_TCL_CMD_NUMBER_LSB;
1640 
1641 	hal_tx_desc_cached[4] = tx_desc->length;
1642 	/* l3 and l4 checksum enable */
1643 	hal_tx_desc_cached[4] |= DP_TX_L3_L4_CSUM_ENABLE <<
1644 		TCL_DATA_CMD_IPV4_CHECKSUM_EN_LSB;
1645 
1646 	hal_tx_desc_cached[5] = vdev->lmac_id << TCL_DATA_CMD_PMAC_ID_LSB;
1647 	hal_tx_desc_cached[5] |= vdev->vdev_id << TCL_DATA_CMD_VDEV_ID_LSB;
1648 
1649 	if (vdev->opmode == wlan_op_mode_sta) {
1650 		hal_tx_desc_cached[6] = vdev->bss_ast_idx |
1651 			((vdev->bss_ast_hash & 0xF) <<
1652 			 TCL_DATA_CMD_CACHE_SET_NUM_LSB);
1653 		desc_size = DP_TX_FAST_DESC_SIZE + 4;
1654 	}
1655 
1656 	hal_ring_hdl = dp_tx_get_hal_ring_hdl(soc, desc_pool_id);
1657 
1658 	if (qdf_unlikely(dp_tx_hal_ring_access_start(soc, hal_ring_hdl))) {
1659 		dp_err("HAL RING Access Failed -- %pK", hal_ring_hdl);
1660 		DP_STATS_INC(soc, tx.tcl_ring_full[desc_pool_id], 1);
1661 		DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
1662 		goto ring_access_fail2;
1663 	}
1664 
1665 	hal_tx_desc = hal_srng_src_get_next(soc->hal_soc, hal_ring_hdl);
1666 	if (qdf_unlikely(!hal_tx_desc)) {
1667 		dp_verbose_debug("TCL ring full ring_id:%d", desc_pool_id);
1668 		DP_STATS_INC(soc, tx.tcl_ring_full[desc_pool_id], 1);
1669 		DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
1670 		goto ring_access_fail;
1671 	}
1672 
1673 	tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX;
1674 
1675 	/* Sync cached descriptor with HW */
1676 	qdf_mem_copy(hal_tx_desc, hal_tx_desc_cached, desc_size);
1677 	qdf_dsb();
1678 
1679 	DP_STATS_INC_PKT(vdev, tx_i.processed, 1, tx_desc->length);
1680 	DP_STATS_INC(soc, tx.tcl_enq[desc_pool_id], 1);
1681 	status = QDF_STATUS_SUCCESS;
1682 
1683 ring_access_fail:
1684 	dp_tx_ring_access_end_wrapper(soc, hal_ring_hdl, 0);
1685 
1686 ring_access_fail2:
1687 	if (status != QDF_STATUS_SUCCESS) {
1688 		dp_tx_nbuf_unmap_be(soc, tx_desc);
1689 		goto release_desc;
1690 	}
1691 
1692 	return NULL;
1693 
1694 release_desc:
1695 	dp_tx_desc_release(tx_desc, desc_pool_id);
1696 
1697 	return nbuf;
1698 }
1699 #endif
1700