xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/be/dp_be_tx.c (revision 8cfe6b10058a04cafb17eed051f2ddf11bee8931)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "cdp_txrx_cmn_struct.h"
21 #include "dp_types.h"
22 #include "dp_tx.h"
23 #include "dp_be_tx.h"
24 #include "dp_tx_desc.h"
25 #include "hal_tx.h"
26 #include <hal_be_api.h>
27 #include <hal_be_tx.h>
28 #include <dp_htt.h>
29 #ifdef FEATURE_WDS
30 #include "dp_txrx_wds.h"
31 #endif
32 
33 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
34 #define DP_TX_BANK_LOCK_CREATE(lock) qdf_mutex_create(lock)
35 #define DP_TX_BANK_LOCK_DESTROY(lock) qdf_mutex_destroy(lock)
36 #define DP_TX_BANK_LOCK_ACQUIRE(lock) qdf_mutex_acquire(lock)
37 #define DP_TX_BANK_LOCK_RELEASE(lock) qdf_mutex_release(lock)
38 #else
39 #define DP_TX_BANK_LOCK_CREATE(lock) qdf_spinlock_create(lock)
40 #define DP_TX_BANK_LOCK_DESTROY(lock) qdf_spinlock_destroy(lock)
41 #define DP_TX_BANK_LOCK_ACQUIRE(lock) qdf_spin_lock_bh(lock)
42 #define DP_TX_BANK_LOCK_RELEASE(lock) qdf_spin_unlock_bh(lock)
43 #endif
44 
45 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
46 #ifdef WLAN_MCAST_MLO
47 /* MLO peer id for reinject*/
48 #define DP_MLO_MCAST_REINJECT_PEER_ID 0XFFFD
49 #define MAX_GSN_NUM 0x0FFF
50 
51 #ifdef QCA_MULTIPASS_SUPPORT
52 #define INVALID_VLAN_ID         0xFFFF
53 #define MULTIPASS_WITH_VLAN_ID 0xFFFE
54 /**
55  * struct dp_mlo_mpass_buf - Multipass buffer
56  * @vlan_id: vlan_id of frame
57  * @nbuf: pointer to skb buf
58  */
59 struct dp_mlo_mpass_buf {
60 	uint16_t vlan_id;
61 	qdf_nbuf_t  nbuf;
62 };
63 #endif
64 #endif
65 #endif
66 
67 #define DP_TX_WBM_COMPLETION_V3_VDEV_ID_GET(_var) \
68 	HTT_TX_WBM_COMPLETION_V2_VDEV_ID_GET(_var)
69 #define DP_TX_WBM_COMPLETION_V3_VALID_GET(_var) \
70 	HTT_TX_WBM_COMPLETION_V2_VALID_GET(_var)
71 #define DP_TX_WBM_COMPLETION_V3_SW_PEER_ID_GET(_var) \
72 	HTT_TX_WBM_COMPLETION_V2_SW_PEER_ID_GET(_var)
73 #define DP_TX_WBM_COMPLETION_V3_TID_NUM_GET(_var) \
74 	HTT_TX_WBM_COMPLETION_V2_TID_NUM_GET(_var)
75 #define DP_TX_WBM_COMPLETION_V3_SCH_CMD_ID_GET(_var) \
76 	HTT_TX_WBM_COMPLETION_V2_SCH_CMD_ID_GET(_var)
77 #define DP_TX_WBM_COMPLETION_V3_ACK_FRAME_RSSI_GET(_var) \
78 	HTT_TX_WBM_COMPLETION_V2_ACK_FRAME_RSSI_GET(_var)
79 
80 extern uint8_t sec_type_map[MAX_CDP_SEC_TYPE];
81 
82 #ifdef DP_TX_COMP_RING_DESC_SANITY_CHECK
83 /*
84  * Value to mark ring desc is invalidated by buffer_virt_addr_63_32 field
85  * of WBM2SW ring Desc.
86  */
87 #define DP_TX_COMP_DESC_BUFF_VA_32BITS_HI_INVALIDATE 0x12121212
88 
89 /**
90  * dp_tx_comp_desc_check_and_invalidate() - sanity check for ring desc and
91  *					    invalidate it after each reaping
92  * @tx_comp_hal_desc: ring desc virtual address
93  * @r_tx_desc: pointer to current dp TX Desc pointer
94  * @tx_desc_va: the original 64 bits Desc VA got from ring Desc
95  * @hw_cc_done: HW cookie conversion done or not
96  *
97  * If HW CC is done, check the buffer_virt_addr_63_32 value to know if
98  * ring Desc is stale or not. if HW CC is not done, then compare PA between
99  * ring Desc and current TX desc.
100  *
101  * Return: None.
102  */
103 static inline
104 void dp_tx_comp_desc_check_and_invalidate(void *tx_comp_hal_desc,
105 					  struct dp_tx_desc_s **r_tx_desc,
106 					  uint64_t tx_desc_va,
107 					  bool hw_cc_done)
108 {
109 	qdf_dma_addr_t desc_dma_addr;
110 
111 	if (qdf_likely(hw_cc_done)) {
112 		/* Check upper 32 bits */
113 		if (DP_TX_COMP_DESC_BUFF_VA_32BITS_HI_INVALIDATE ==
114 		    (tx_desc_va >> 32))
115 			*r_tx_desc = NULL;
116 
117 		/* Invalidate the ring desc for 32 ~ 63 bits of VA */
118 		hal_tx_comp_set_desc_va_63_32(
119 				tx_comp_hal_desc,
120 				DP_TX_COMP_DESC_BUFF_VA_32BITS_HI_INVALIDATE);
121 	} else {
122 		/* Compare PA between ring desc and current TX desc stored */
123 		desc_dma_addr = hal_tx_comp_get_paddr(tx_comp_hal_desc);
124 
125 		if (desc_dma_addr != (*r_tx_desc)->dma_addr)
126 			*r_tx_desc = NULL;
127 	}
128 }
129 #else
130 static inline
131 void dp_tx_comp_desc_check_and_invalidate(void *tx_comp_hal_desc,
132 					  struct dp_tx_desc_s **r_tx_desc,
133 					  uint64_t tx_desc_va,
134 					  bool hw_cc_done)
135 {
136 }
137 #endif
138 
139 #ifdef DP_FEATURE_HW_COOKIE_CONVERSION
140 #ifdef DP_HW_COOKIE_CONVERT_EXCEPTION
141 void dp_tx_comp_get_params_from_hal_desc_be(struct dp_soc *soc,
142 					    void *tx_comp_hal_desc,
143 					    struct dp_tx_desc_s **r_tx_desc)
144 {
145 	uint32_t tx_desc_id;
146 	uint64_t tx_desc_va = 0;
147 	bool hw_cc_done =
148 		hal_tx_comp_get_cookie_convert_done(tx_comp_hal_desc);
149 
150 	if (qdf_likely(hw_cc_done)) {
151 		/* HW cookie conversion done */
152 		tx_desc_va = hal_tx_comp_get_desc_va(tx_comp_hal_desc);
153 		*r_tx_desc = (struct dp_tx_desc_s *)(uintptr_t)tx_desc_va;
154 
155 	} else {
156 		/* SW do cookie conversion to VA */
157 		tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc);
158 		*r_tx_desc =
159 		(struct dp_tx_desc_s *)dp_cc_desc_find(soc, tx_desc_id);
160 	}
161 
162 	dp_tx_comp_desc_check_and_invalidate(tx_comp_hal_desc,
163 					     r_tx_desc, tx_desc_va,
164 					     hw_cc_done);
165 
166 	if (*r_tx_desc)
167 		(*r_tx_desc)->peer_id =
168 				dp_tx_comp_get_peer_id_be(soc,
169 							  tx_comp_hal_desc);
170 }
171 #else
172 void dp_tx_comp_get_params_from_hal_desc_be(struct dp_soc *soc,
173 					    void *tx_comp_hal_desc,
174 					    struct dp_tx_desc_s **r_tx_desc)
175 {
176 	uint64_t tx_desc_va;
177 
178 	tx_desc_va = hal_tx_comp_get_desc_va(tx_comp_hal_desc);
179 	*r_tx_desc = (struct dp_tx_desc_s *)(uintptr_t)tx_desc_va;
180 
181 	dp_tx_comp_desc_check_and_invalidate(tx_comp_hal_desc,
182 					     r_tx_desc,
183 					     tx_desc_va,
184 					     true);
185 	if (*r_tx_desc)
186 		(*r_tx_desc)->peer_id =
187 				dp_tx_comp_get_peer_id_be(soc,
188 							  tx_comp_hal_desc);
189 }
190 #endif /* DP_HW_COOKIE_CONVERT_EXCEPTION */
191 #else
192 
193 void dp_tx_comp_get_params_from_hal_desc_be(struct dp_soc *soc,
194 					    void *tx_comp_hal_desc,
195 					    struct dp_tx_desc_s **r_tx_desc)
196 {
197 	uint32_t tx_desc_id;
198 
199 	/* SW do cookie conversion to VA */
200 	tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc);
201 	*r_tx_desc =
202 	(struct dp_tx_desc_s *)dp_cc_desc_find(soc, tx_desc_id);
203 
204 	dp_tx_comp_desc_check_and_invalidate(tx_comp_hal_desc,
205 					     r_tx_desc, 0,
206 					     false);
207 
208 	if (*r_tx_desc)
209 		(*r_tx_desc)->peer_id =
210 				dp_tx_comp_get_peer_id_be(soc,
211 							  tx_comp_hal_desc);
212 }
213 #endif /* DP_FEATURE_HW_COOKIE_CONVERSION */
214 
215 static inline
216 void dp_tx_process_mec_notify_be(struct dp_soc *soc, uint8_t *status)
217 {
218 	struct dp_vdev *vdev;
219 	uint8_t vdev_id;
220 	uint32_t *htt_desc = (uint32_t *)status;
221 
222 	qdf_assert_always(!soc->mec_fw_offload);
223 
224 	/*
225 	 * Get vdev id from HTT status word in case of MEC
226 	 * notification
227 	 */
228 	vdev_id = DP_TX_WBM_COMPLETION_V3_VDEV_ID_GET(htt_desc[4]);
229 	if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
230 		return;
231 
232 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
233 				     DP_MOD_ID_HTT_COMP);
234 	if (!vdev)
235 		return;
236 	dp_tx_mec_handler(vdev, status);
237 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP);
238 }
239 
240 void dp_tx_process_htt_completion_be(struct dp_soc *soc,
241 				     struct dp_tx_desc_s *tx_desc,
242 				     uint8_t *status,
243 				     uint8_t ring_id)
244 {
245 	uint8_t tx_status;
246 	struct dp_pdev *pdev;
247 	struct dp_vdev *vdev = NULL;
248 	struct hal_tx_completion_status ts = {0};
249 	uint32_t *htt_desc = (uint32_t *)status;
250 	struct dp_txrx_peer *txrx_peer;
251 	dp_txrx_ref_handle txrx_ref_handle = NULL;
252 	struct cdp_tid_tx_stats *tid_stats = NULL;
253 	struct htt_soc *htt_handle;
254 	uint8_t vdev_id;
255 
256 	tx_status = HTT_TX_WBM_COMPLETION_V3_TX_STATUS_GET(htt_desc[0]);
257 	htt_handle = (struct htt_soc *)soc->htt_handle;
258 	htt_wbm_event_record(htt_handle->htt_logger_handle, tx_status, status);
259 
260 	/*
261 	 * There can be scenario where WBM consuming descriptor enqueued
262 	 * from TQM2WBM first and TQM completion can happen before MEC
263 	 * notification comes from FW2WBM. Avoid access any field of tx
264 	 * descriptor in case of MEC notify.
265 	 */
266 	if (tx_status == HTT_TX_FW2WBM_TX_STATUS_MEC_NOTIFY)
267 		return dp_tx_process_mec_notify_be(soc, status);
268 
269 	/*
270 	 * If the descriptor is already freed in vdev_detach,
271 	 * continue to next descriptor
272 	 */
273 	if (qdf_unlikely(!tx_desc->flags)) {
274 		dp_tx_comp_info_rl("Descriptor freed in vdev_detach %d",
275 				   tx_desc->id);
276 		return;
277 	}
278 
279 	if (qdf_unlikely(tx_desc->vdev_id == DP_INVALID_VDEV_ID)) {
280 		dp_tx_comp_info_rl("Invalid vdev_id %d", tx_desc->id);
281 		tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
282 		goto release_tx_desc;
283 	}
284 
285 	pdev = tx_desc->pdev;
286 
287 	if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
288 		dp_tx_comp_info_rl("pdev in down state %d", tx_desc->id);
289 		tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
290 		goto release_tx_desc;
291 	}
292 
293 	qdf_assert(tx_desc->pdev);
294 
295 	vdev_id = tx_desc->vdev_id;
296 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
297 				     DP_MOD_ID_HTT_COMP);
298 
299 	if (qdf_unlikely(!vdev)) {
300 		dp_tx_comp_info_rl("Unable to get vdev ref  %d", tx_desc->id);
301 		tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
302 		goto release_tx_desc;
303 	}
304 
305 	switch (tx_status) {
306 	case HTT_TX_FW2WBM_TX_STATUS_OK:
307 	case HTT_TX_FW2WBM_TX_STATUS_DROP:
308 	case HTT_TX_FW2WBM_TX_STATUS_TTL:
309 	{
310 		uint8_t tid;
311 
312 		if (DP_TX_WBM_COMPLETION_V3_VALID_GET(htt_desc[3])) {
313 			ts.peer_id =
314 				DP_TX_WBM_COMPLETION_V3_SW_PEER_ID_GET(
315 						htt_desc[3]);
316 			ts.tid =
317 				DP_TX_WBM_COMPLETION_V3_TID_NUM_GET(
318 						htt_desc[3]);
319 		} else {
320 			ts.peer_id = HTT_INVALID_PEER;
321 			ts.tid = HTT_INVALID_TID;
322 		}
323 		ts.release_src = HAL_TX_COMP_RELEASE_SOURCE_FW;
324 		ts.ppdu_id =
325 			DP_TX_WBM_COMPLETION_V3_SCH_CMD_ID_GET(
326 					htt_desc[2]);
327 		ts.ack_frame_rssi =
328 			DP_TX_WBM_COMPLETION_V3_ACK_FRAME_RSSI_GET(
329 					htt_desc[2]);
330 
331 		ts.tsf = htt_desc[4];
332 		ts.first_msdu = 1;
333 		ts.last_msdu = 1;
334 		switch (tx_status) {
335 		case HTT_TX_FW2WBM_TX_STATUS_OK:
336 			ts.status = HAL_TX_TQM_RR_FRAME_ACKED;
337 			break;
338 		case HTT_TX_FW2WBM_TX_STATUS_DROP:
339 			ts.status = HAL_TX_TQM_RR_REM_CMD_REM;
340 			break;
341 		case HTT_TX_FW2WBM_TX_STATUS_TTL:
342 			ts.status = HAL_TX_TQM_RR_REM_CMD_TX;
343 			break;
344 		}
345 		tid = ts.tid;
346 		if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
347 			tid = CDP_MAX_DATA_TIDS - 1;
348 
349 		tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
350 
351 		if (qdf_unlikely(pdev->delay_stats_flag) ||
352 		    qdf_unlikely(dp_is_vdev_tx_delay_stats_enabled(vdev)))
353 			dp_tx_compute_delay(vdev, tx_desc, tid, ring_id);
354 		if (tx_status < CDP_MAX_TX_HTT_STATUS)
355 			tid_stats->htt_status_cnt[tx_status]++;
356 
357 		txrx_peer = dp_txrx_peer_get_ref_by_id(soc, ts.peer_id,
358 						       &txrx_ref_handle,
359 						       DP_MOD_ID_HTT_COMP);
360 		if (qdf_likely(txrx_peer))
361 			dp_tx_update_peer_basic_stats(
362 						txrx_peer,
363 						qdf_nbuf_len(tx_desc->nbuf),
364 						tx_status,
365 						pdev->enhanced_stats_en);
366 
367 		dp_tx_comp_process_tx_status(soc, tx_desc, &ts, txrx_peer,
368 					     ring_id);
369 		dp_tx_comp_process_desc(soc, tx_desc, &ts, txrx_peer);
370 		dp_tx_desc_release(tx_desc, tx_desc->pool_id);
371 
372 		if (qdf_likely(txrx_peer))
373 			dp_txrx_peer_unref_delete(txrx_ref_handle,
374 						  DP_MOD_ID_HTT_COMP);
375 
376 		break;
377 	}
378 	case HTT_TX_FW2WBM_TX_STATUS_REINJECT:
379 	{
380 		uint8_t reinject_reason;
381 
382 		reinject_reason =
383 			HTT_TX_WBM_COMPLETION_V3_REINJECT_REASON_GET(
384 								htt_desc[1]);
385 		dp_tx_reinject_handler(soc, vdev, tx_desc,
386 				       status, reinject_reason);
387 		break;
388 	}
389 	case HTT_TX_FW2WBM_TX_STATUS_INSPECT:
390 	{
391 		dp_tx_inspect_handler(soc, vdev, tx_desc, status);
392 		break;
393 	}
394 	case HTT_TX_FW2WBM_TX_STATUS_VDEVID_MISMATCH:
395 	{
396 		DP_STATS_INC(vdev, tx_i.dropped.fail_per_pkt_vdev_id_check, 1);
397 		goto release_tx_desc;
398 	}
399 	default:
400 		dp_tx_comp_err("Invalid HTT tx_status %d\n",
401 			       tx_status);
402 		goto release_tx_desc;
403 	}
404 
405 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP);
406 	return;
407 
408 release_tx_desc:
409 	dp_tx_comp_free_buf(soc, tx_desc, false);
410 	dp_tx_desc_release(tx_desc, tx_desc->pool_id);
411 	if (vdev)
412 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP);
413 }
414 
415 #ifdef QCA_OL_TX_MULTIQ_SUPPORT
416 #ifdef DP_TX_IMPLICIT_RBM_MAPPING
417 /*
418  * dp_tx_get_rbm_id()- Get the RBM ID for data transmission completion.
419  * @dp_soc - DP soc structure pointer
420  * @ring_id - Transmit Queue/ring_id to be used when XPS is enabled
421  *
422  * Return - RBM ID corresponding to TCL ring_id
423  */
424 static inline uint8_t dp_tx_get_rbm_id_be(struct dp_soc *soc,
425 					  uint8_t ring_id)
426 {
427 	return 0;
428 }
429 #else
430 static inline uint8_t dp_tx_get_rbm_id_be(struct dp_soc *soc,
431 					  uint8_t ring_id)
432 {
433 	return (ring_id ? soc->wbm_sw0_bm_id + (ring_id - 1) :
434 			  HAL_WBM_SW2_BM_ID(soc->wbm_sw0_bm_id));
435 }
436 #endif /*DP_TX_IMPLICIT_RBM_MAPPING*/
437 #else
438 static inline uint8_t dp_tx_get_rbm_id_be(struct dp_soc *soc,
439 					  uint8_t tcl_index)
440 {
441 	uint8_t rbm;
442 
443 	rbm = wlan_cfg_get_rbm_id_for_index(soc->wlan_cfg_ctx, tcl_index);
444 	dp_verbose_debug("tcl_id %u rbm %u", tcl_index, rbm);
445 	return rbm;
446 }
447 #endif
448 
449 #ifdef QCA_SUPPORT_TX_MIN_RATES_FOR_SPECIAL_FRAMES
450 
451 /*
452  * dp_tx_set_min_rates_for_critical_frames()- sets min-rates for critical pkts
453  * @dp_soc - DP soc structure pointer
454  * @hal_tx_desc - HAL descriptor where fields are set
455  * nbuf - skb to be considered for min rates
456  *
457  * The function relies on upper layers to set QDF_NBUF_CB_TX_EXTRA_IS_CRITICAL
458  * and uses it to determine if the frame is critical. For a critical frame,
459  * flow override bits are set to classify the frame into HW's high priority
460  * queue. The HW will pick pre-configured min rates for such packets.
461  *
462  * Return - None
463  */
464 static void
465 dp_tx_set_min_rates_for_critical_frames(struct dp_soc *soc,
466 					uint32_t *hal_tx_desc,
467 					qdf_nbuf_t nbuf)
468 {
469 /*
470  * Critical frames should be queued to the high priority queue for the TID on
471  * on which they are sent out (for the concerned peer).
472  * FW is using HTT_MSDU_Q_IDX 2 for HOL (high priority) queue.
473  * htt_msdu_idx = (2 * who_classify_info_sel) + flow_override
474  * Hence, using who_classify_info_sel = 1, flow_override = 0 to select
475  * HOL queue.
476  */
477 	if (QDF_NBUF_CB_TX_EXTRA_IS_CRITICAL(nbuf)) {
478 		hal_tx_desc_set_flow_override_enable(hal_tx_desc, 1);
479 		hal_tx_desc_set_flow_override(hal_tx_desc, 0);
480 		hal_tx_desc_set_who_classify_info_sel(hal_tx_desc, 1);
481 		hal_tx_desc_set_tx_notify_frame(hal_tx_desc,
482 						TX_SEMI_HARD_NOTIFY_E);
483 	}
484 }
485 #else
486 static inline void
487 dp_tx_set_min_rates_for_critical_frames(struct dp_soc *soc,
488 					uint32_t *hal_tx_desc_cached,
489 					qdf_nbuf_t nbuf)
490 {
491 }
492 #endif
493 
494 #ifdef DP_TX_PACKET_INSPECT_FOR_ILP
495 /**
496  * dp_tx_set_particular_tx_queue() - set particular TX TQM flow queue 3 for
497  *				     TX packets, currently TCP ACK only
498  * @soc: DP soc structure pointer
499  * @hal_tx_desc: HAL descriptor where fields are set
500  * @nbuf: skb to be considered for particular TX queue
501  *
502  * Return: None
503  */
504 static inline
505 void dp_tx_set_particular_tx_queue(struct dp_soc *soc,
506 				   uint32_t *hal_tx_desc,
507 				   qdf_nbuf_t nbuf)
508 {
509 	if (!soc->wlan_cfg_ctx->tx_pkt_inspect_for_ilp)
510 		return;
511 
512 	if (qdf_unlikely(QDF_NBUF_CB_GET_PACKET_TYPE(nbuf) ==
513 			 QDF_NBUF_CB_PACKET_TYPE_TCP_ACK)) {
514 		hal_tx_desc_set_flow_override_enable(hal_tx_desc, 1);
515 		hal_tx_desc_set_flow_override(hal_tx_desc, 1);
516 		hal_tx_desc_set_who_classify_info_sel(hal_tx_desc, 1);
517 	}
518 }
519 #else
520 static inline
521 void dp_tx_set_particular_tx_queue(struct dp_soc *soc,
522 				   uint32_t *hal_tx_desc,
523 				   qdf_nbuf_t nbuf)
524 {
525 }
526 #endif
527 
528 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
529 	defined(WLAN_MCAST_MLO)
530 #ifdef QCA_MULTIPASS_SUPPORT
531 /**
532  * dp_tx_mlo_mcast_multipass_lookup() - lookup vlan_id in mpass peer list
533  * @be_vdev: Handle to DP be_vdev structure
534  * @ptnr_vdev: DP ptnr_vdev handle
535  * @arg: pointer to dp_mlo_mpass_ buf
536  *
537  * Return: None
538  */
539 static void
540 dp_tx_mlo_mcast_multipass_lookup(struct dp_vdev_be *be_vdev,
541 				 struct dp_vdev *ptnr_vdev,
542 				 void *arg)
543 {
544 	struct dp_mlo_mpass_buf *ptr = (struct dp_mlo_mpass_buf *)arg;
545 	struct dp_txrx_peer *txrx_peer = NULL;
546 	struct vlan_ethhdr *veh = NULL;
547 	qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(ptr->nbuf);
548 	uint16_t vlan_id = 0;
549 	bool not_vlan = ((ptnr_vdev->tx_encap_type == htt_cmn_pkt_type_raw) ||
550 			(htons(eh->ether_type) != ETH_P_8021Q));
551 
552 	if (qdf_unlikely(not_vlan))
553 		return;
554 	veh = (struct vlan_ethhdr *)eh;
555 	vlan_id = (ntohs(veh->h_vlan_TCI) & VLAN_VID_MASK);
556 
557 	qdf_spin_lock_bh(&ptnr_vdev->mpass_peer_mutex);
558 	TAILQ_FOREACH(txrx_peer, &ptnr_vdev->mpass_peer_list,
559 		      mpass_peer_list_elem) {
560 		if (vlan_id == txrx_peer->vlan_id) {
561 			qdf_spin_unlock_bh(&ptnr_vdev->mpass_peer_mutex);
562 			ptr->vlan_id = vlan_id;
563 			return;
564 		}
565 	}
566 	qdf_spin_unlock_bh(&ptnr_vdev->mpass_peer_mutex);
567 }
568 
569 /**
570  * dp_tx_mlo_mcast_multipass_send() - send multipass MLO Mcast packets
571  * @be_vdev: Handle to DP be_vdev structure
572  * @ptnr_vdev: DP ptnr_vdev handle
573  * @arg: pointer to dp_mlo_mpass_ buf
574  *
575  * Return: None
576  */
577 static void
578 dp_tx_mlo_mcast_multipass_send(struct dp_vdev_be *be_vdev,
579 			       struct dp_vdev *ptnr_vdev,
580 			       void *arg)
581 {
582 	struct dp_mlo_mpass_buf *ptr = (struct dp_mlo_mpass_buf *)arg;
583 	struct dp_tx_msdu_info_s msdu_info;
584 	struct dp_vdev_be *be_ptnr_vdev = NULL;
585 	qdf_nbuf_t  nbuf_clone;
586 	uint16_t group_key = 0;
587 
588 	be_ptnr_vdev = dp_get_be_vdev_from_dp_vdev(ptnr_vdev);
589 	if (be_vdev != be_ptnr_vdev) {
590 		nbuf_clone = qdf_nbuf_clone(ptr->nbuf);
591 		if (qdf_unlikely(!nbuf_clone)) {
592 			dp_tx_debug("nbuf clone failed");
593 			return;
594 		}
595 	} else {
596 		nbuf_clone = ptr->nbuf;
597 	}
598 	qdf_mem_zero(&msdu_info, sizeof(msdu_info));
599 	dp_tx_get_queue(ptnr_vdev, nbuf_clone, &msdu_info.tx_queue);
600 	msdu_info.gsn = be_vdev->seq_num;
601 	be_ptnr_vdev->seq_num = be_vdev->seq_num;
602 
603 	if (ptr->vlan_id == MULTIPASS_WITH_VLAN_ID) {
604 		msdu_info.tid = HTT_TX_EXT_TID_INVALID;
605 		HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_SET(
606 						msdu_info.meta_data[0], 1);
607 	} else {
608 		/* return when vlan map is not initialized */
609 		if (!ptnr_vdev->iv_vlan_map)
610 			return;
611 		group_key = ptnr_vdev->iv_vlan_map[ptr->vlan_id];
612 
613 		/*
614 		 * If group key is not installed, drop the frame.
615 		 */
616 
617 		if (!group_key)
618 			return;
619 
620 		dp_tx_remove_vlan_tag(ptnr_vdev, nbuf_clone);
621 		dp_tx_add_groupkey_metadata(ptnr_vdev, &msdu_info, group_key);
622 		msdu_info.exception_fw = 1;
623 	}
624 
625 	nbuf_clone = dp_tx_send_msdu_single(
626 					ptnr_vdev,
627 					nbuf_clone,
628 					&msdu_info,
629 					DP_MLO_MCAST_REINJECT_PEER_ID,
630 					NULL);
631 	if (qdf_unlikely(nbuf_clone)) {
632 		dp_info("pkt send failed");
633 		qdf_nbuf_free(nbuf_clone);
634 		return;
635 	}
636 }
637 
638 /**
639  * dp_tx_mlo_mcast_multipass_handler - If frame needs multipass processing
640  * @soc: DP soc handle
641  * @vdev: DP vdev handle
642  * @nbuf: nbuf to be enqueued
643  *
644  * Return: true if handling is done else false
645  */
646 static bool
647 dp_tx_mlo_mcast_multipass_handler(struct dp_soc *soc,
648 				  struct dp_vdev *vdev,
649 				  qdf_nbuf_t nbuf)
650 {
651 	struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
652 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
653 	qdf_nbuf_t nbuf_copy = NULL;
654 	struct dp_mlo_mpass_buf mpass_buf;
655 
656 	memset(&mpass_buf, 0, sizeof(struct dp_mlo_mpass_buf));
657 	mpass_buf.vlan_id = INVALID_VLAN_ID;
658 	mpass_buf.nbuf = nbuf;
659 
660 	dp_tx_mlo_mcast_multipass_lookup(be_vdev, vdev, &mpass_buf);
661 	if (mpass_buf.vlan_id == INVALID_VLAN_ID) {
662 		dp_mcast_mlo_iter_ptnr_vdev(be_soc, be_vdev,
663 					    dp_tx_mlo_mcast_multipass_lookup,
664 					    &mpass_buf, DP_MOD_ID_TX);
665 		/*
666 		 * Do not drop the frame when vlan_id doesn't match.
667 		 * Send the frame as it is.
668 		 */
669 		if (mpass_buf.vlan_id == INVALID_VLAN_ID)
670 			return false;
671 	}
672 
673 	/* AP can have classic clients, special clients &
674 	 * classic repeaters.
675 	 * 1. Classic clients & special client:
676 	 *	Remove vlan header, find corresponding group key
677 	 *	index, fill in metaheader and enqueue multicast
678 	 *	frame to TCL.
679 	 * 2. Classic repeater:
680 	 *	Pass through to classic repeater with vlan tag
681 	 *	intact without any group key index. Hardware
682 	 *	will know which key to use to send frame to
683 	 *	repeater.
684 	 */
685 	nbuf_copy = qdf_nbuf_copy(nbuf);
686 
687 	/*
688 	 * Send multicast frame to special peers even
689 	 * if pass through to classic repeater fails.
690 	 */
691 	if (nbuf_copy) {
692 		struct dp_mlo_mpass_buf mpass_buf_copy = {0};
693 
694 		mpass_buf_copy.vlan_id = MULTIPASS_WITH_VLAN_ID;
695 		mpass_buf_copy.nbuf = nbuf_copy;
696 		/* send frame on partner vdevs */
697 		dp_mcast_mlo_iter_ptnr_vdev(be_soc, be_vdev,
698 					    dp_tx_mlo_mcast_multipass_send,
699 					    &mpass_buf_copy, DP_MOD_ID_TX);
700 
701 		/* send frame on mcast primary vdev */
702 		dp_tx_mlo_mcast_multipass_send(be_vdev, vdev, &mpass_buf_copy);
703 
704 		if (qdf_unlikely(be_vdev->seq_num > MAX_GSN_NUM))
705 			be_vdev->seq_num = 0;
706 		else
707 			be_vdev->seq_num++;
708 	}
709 
710 	dp_mcast_mlo_iter_ptnr_vdev(be_soc, be_vdev,
711 				    dp_tx_mlo_mcast_multipass_send,
712 				    &mpass_buf, DP_MOD_ID_TX);
713 	dp_tx_mlo_mcast_multipass_send(be_vdev, vdev, &mpass_buf);
714 
715 	if (qdf_unlikely(be_vdev->seq_num > MAX_GSN_NUM))
716 		be_vdev->seq_num = 0;
717 	else
718 		be_vdev->seq_num++;
719 
720 	return true;
721 }
722 #else
723 static bool
724 dp_tx_mlo_mcast_multipass_handler(struct dp_soc *soc, struct dp_vdev *vdev,
725 				  qdf_nbuf_t nbuf)
726 {
727 	return false;
728 }
729 #endif
730 
731 void
732 dp_tx_mlo_mcast_pkt_send(struct dp_vdev_be *be_vdev,
733 			 struct dp_vdev *ptnr_vdev,
734 			 void *arg)
735 {
736 	qdf_nbuf_t  nbuf = (qdf_nbuf_t)arg;
737 	qdf_nbuf_t  nbuf_clone;
738 	struct dp_vdev_be *be_ptnr_vdev = NULL;
739 	struct dp_tx_msdu_info_s msdu_info;
740 
741 	be_ptnr_vdev = dp_get_be_vdev_from_dp_vdev(ptnr_vdev);
742 	if (be_vdev != be_ptnr_vdev) {
743 		nbuf_clone = qdf_nbuf_clone(nbuf);
744 		if (qdf_unlikely(!nbuf_clone)) {
745 			dp_tx_debug("nbuf clone failed");
746 			return;
747 		}
748 	} else {
749 		nbuf_clone = nbuf;
750 	}
751 
752 	qdf_mem_zero(&msdu_info, sizeof(msdu_info));
753 	dp_tx_get_queue(ptnr_vdev, nbuf_clone, &msdu_info.tx_queue);
754 	msdu_info.gsn = be_vdev->seq_num;
755 	be_ptnr_vdev->seq_num = be_vdev->seq_num;
756 
757 	nbuf_clone = dp_tx_send_msdu_single(
758 					ptnr_vdev,
759 					nbuf_clone,
760 					&msdu_info,
761 					DP_MLO_MCAST_REINJECT_PEER_ID,
762 					NULL);
763 	if (qdf_unlikely(nbuf_clone)) {
764 		dp_info("pkt send failed");
765 		qdf_nbuf_free(nbuf_clone);
766 		return;
767 	}
768 }
769 
770 static inline void
771 dp_tx_vdev_id_set_hal_tx_desc(uint32_t *hal_tx_desc_cached,
772 			      struct dp_vdev *vdev,
773 			      struct dp_tx_msdu_info_s *msdu_info)
774 {
775 	hal_tx_desc_set_vdev_id(hal_tx_desc_cached, msdu_info->vdev_id);
776 }
777 
778 void dp_tx_mlo_mcast_handler_be(struct dp_soc *soc,
779 				struct dp_vdev *vdev,
780 				qdf_nbuf_t nbuf)
781 {
782 	struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
783 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
784 
785 	if (qdf_unlikely(vdev->multipass_en) &&
786 	    dp_tx_mlo_mcast_multipass_handler(soc, vdev, nbuf))
787 		return;
788 	/* send frame on partner vdevs */
789 	dp_mcast_mlo_iter_ptnr_vdev(be_soc, be_vdev,
790 				    dp_tx_mlo_mcast_pkt_send,
791 				    nbuf, DP_MOD_ID_REINJECT);
792 
793 	/* send frame on mcast primary vdev */
794 	dp_tx_mlo_mcast_pkt_send(be_vdev, vdev, nbuf);
795 
796 	if (qdf_unlikely(be_vdev->seq_num > MAX_GSN_NUM))
797 		be_vdev->seq_num = 0;
798 	else
799 		be_vdev->seq_num++;
800 }
801 
802 bool dp_tx_mlo_is_mcast_primary_be(struct dp_soc *soc,
803 				   struct dp_vdev *vdev)
804 {
805 	struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
806 
807 	if (be_vdev->mcast_primary)
808 		return true;
809 
810 	return false;
811 }
812 
813 #if defined(CONFIG_MLO_SINGLE_DEV)
814 static void
815 dp_tx_mlo_mcast_enhance_be(struct dp_vdev_be *be_vdev,
816 			   struct dp_vdev *ptnr_vdev,
817 			   void *arg)
818 {
819 	struct dp_vdev *vdev = (struct dp_vdev *)be_vdev;
820 	qdf_nbuf_t  nbuf = (qdf_nbuf_t)arg;
821 
822 	if (vdev == ptnr_vdev)
823 		return;
824 
825 	/*
826 	 * Hold the reference to avoid free of nbuf in
827 	 * dp_tx_mcast_enhance() in case of successful
828 	 * conversion
829 	 */
830 	qdf_nbuf_ref(nbuf);
831 
832 	if (qdf_unlikely(!dp_tx_mcast_enhance(ptnr_vdev, nbuf)))
833 		return;
834 
835 	qdf_nbuf_free(nbuf);
836 }
837 
838 qdf_nbuf_t
839 dp_tx_mlo_mcast_send_be(struct dp_soc *soc, struct dp_vdev *vdev,
840 			qdf_nbuf_t nbuf,
841 			struct cdp_tx_exception_metadata *tx_exc_metadata)
842 {
843 	struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
844 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
845 
846 	if (!tx_exc_metadata->is_mlo_mcast)
847 		return nbuf;
848 
849 	if (!be_vdev->mcast_primary) {
850 		qdf_nbuf_free(nbuf);
851 		return NULL;
852 	}
853 
854 	/*
855 	 * In the single netdev model avoid reinjection path as mcast
856 	 * packet is identified in upper layers while peer search to find
857 	 * primary TQM based on dest mac addr
858 	 *
859 	 * New bonding interface added into the bridge so MCSD will update
860 	 * snooping table and wifi driver populates the entries in appropriate
861 	 * child net devices.
862 	 */
863 	if (vdev->mcast_enhancement_en) {
864 		/*
865 		 * As dp_tx_mcast_enhance() can consume the nbuf incase of
866 		 * successful conversion hold the reference of nbuf.
867 		 *
868 		 * Hold the reference to tx on partner links
869 		 */
870 		qdf_nbuf_ref(nbuf);
871 		if (qdf_unlikely(!dp_tx_mcast_enhance(vdev, nbuf))) {
872 			dp_mcast_mlo_iter_ptnr_vdev(be_soc, be_vdev,
873 						    dp_tx_mlo_mcast_enhance_be,
874 						    nbuf, DP_MOD_ID_TX);
875 			qdf_nbuf_free(nbuf);
876 			return NULL;
877 		}
878 		/* release reference taken above */
879 		qdf_nbuf_free(nbuf);
880 	}
881 	dp_tx_mlo_mcast_handler_be(soc, vdev, nbuf);
882 	return NULL;
883 }
884 #endif
885 #else
886 static inline void
887 dp_tx_vdev_id_set_hal_tx_desc(uint32_t *hal_tx_desc_cached,
888 			      struct dp_vdev *vdev,
889 			      struct dp_tx_msdu_info_s *msdu_info)
890 {
891 	hal_tx_desc_set_vdev_id(hal_tx_desc_cached, vdev->vdev_id);
892 }
893 #endif
894 #if defined(WLAN_FEATURE_11BE_MLO) && !defined(WLAN_MLO_MULTI_CHIP) && \
895 	!defined(WLAN_MCAST_MLO)
896 void dp_tx_mlo_mcast_handler_be(struct dp_soc *soc,
897 				struct dp_vdev *vdev,
898 				qdf_nbuf_t nbuf)
899 {
900 }
901 
902 bool dp_tx_mlo_is_mcast_primary_be(struct dp_soc *soc,
903 				   struct dp_vdev *vdev)
904 {
905 	return false;
906 }
907 #endif
908 
909 #ifdef CONFIG_SAWF
910 void dp_sawf_config_be(struct dp_soc *soc, uint32_t *hal_tx_desc_cached,
911 		       uint16_t *fw_metadata, qdf_nbuf_t nbuf)
912 {
913 	uint8_t q_id = 0;
914 
915 	if (!wlan_cfg_get_sawf_config(soc->wlan_cfg_ctx))
916 		return;
917 
918 	dp_sawf_tcl_cmd(fw_metadata, nbuf);
919 	q_id = dp_sawf_queue_id_get(nbuf);
920 
921 	if (q_id == DP_SAWF_DEFAULT_Q_INVALID)
922 		return;
923 	hal_tx_desc_set_hlos_tid(hal_tx_desc_cached,
924 				 (q_id & (CDP_DATA_TID_MAX - 1)));
925 	hal_tx_desc_set_flow_override_enable(hal_tx_desc_cached,
926 					     DP_TX_FLOW_OVERRIDE_ENABLE);
927 	hal_tx_desc_set_flow_override(hal_tx_desc_cached,
928 				      DP_TX_FLOW_OVERRIDE_GET(q_id));
929 	hal_tx_desc_set_who_classify_info_sel(hal_tx_desc_cached,
930 					      DP_TX_WHO_CLFY_INF_SEL_GET(q_id));
931 }
932 
933 #else
934 
935 static inline
936 void dp_sawf_config_be(struct dp_soc *soc, uint32_t *hal_tx_desc_cached,
937 		       uint16_t *fw_metadata, qdf_nbuf_t nbuf)
938 {
939 }
940 
941 static inline
942 QDF_STATUS dp_sawf_tx_enqueue_peer_stats(struct dp_soc *soc,
943 					 struct dp_tx_desc_s *tx_desc)
944 {
945 	return QDF_STATUS_SUCCESS;
946 }
947 
948 static inline
949 QDF_STATUS dp_sawf_tx_enqueue_fail_peer_stats(struct dp_soc *soc,
950 					      struct dp_tx_desc_s *tx_desc)
951 {
952 	return QDF_STATUS_SUCCESS;
953 }
954 #endif
955 
956 #ifdef WLAN_SUPPORT_PPEDS
957 
958 /*
959  * dp_ppeds_stats() - Accounting fw2wbm_tx_drop drops in Tx path
960  * @soc: Handle to DP Soc structure
961  * @peer_id: Peer ID in the descriptor
962  *
963  * Return: NONE
964  */
965 
966 static inline
967 void dp_ppeds_stats(struct dp_soc *soc, uint16_t peer_id)
968 {
969 	struct dp_vdev *vdev = NULL;
970 	struct dp_txrx_peer *txrx_peer = NULL;
971 	dp_txrx_ref_handle txrx_ref_handle = NULL;
972 
973 	DP_STATS_INC(soc, tx.fw2wbm_tx_drop, 1);
974 	txrx_peer = dp_txrx_peer_get_ref_by_id(soc,
975 					       peer_id,
976 					       &txrx_ref_handle,
977 					       DP_MOD_ID_TX_COMP);
978 	if (txrx_peer) {
979 		vdev = txrx_peer->vdev;
980 		DP_STATS_INC(vdev, tx_i.dropped.fw2wbm_tx_drop, 1);
981 		dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_TX_COMP);
982 	}
983 }
984 
985 /**
986  * dp_ppeds_tx_comp_handler()- Handle tx completions for ppe2tcl ring
987  * @soc: Handle to DP Soc structure
988  * @quota: Max number of tx completions to process
989  *
990  * Return: Number of tx completions processed
991  */
992 int dp_ppeds_tx_comp_handler(struct dp_soc_be *be_soc, uint32_t quota)
993 {
994 	uint32_t num_avail_for_reap = 0;
995 	void *tx_comp_hal_desc;
996 	uint8_t buf_src, status = 0;
997 	uint32_t count = 0;
998 	struct dp_tx_desc_s *tx_desc = NULL;
999 	struct dp_tx_desc_s *head_desc = NULL;
1000 	struct dp_tx_desc_s *tail_desc = NULL;
1001 	struct dp_soc *soc = &be_soc->soc;
1002 	void *last_prefetch_hw_desc = NULL;
1003 	struct dp_tx_desc_s *last_prefetch_sw_desc = NULL;
1004 	hal_soc_handle_t hal_soc = soc->hal_soc;
1005 	hal_ring_handle_t hal_ring_hdl =
1006 				be_soc->ppeds_wbm_release_ring.hal_srng;
1007 
1008 	if (qdf_unlikely(dp_srng_access_start(NULL, soc, hal_ring_hdl))) {
1009 		dp_err("HAL RING Access Failed -- %pK", hal_ring_hdl);
1010 		return 0;
1011 	}
1012 
1013 	num_avail_for_reap = hal_srng_dst_num_valid(hal_soc, hal_ring_hdl, 0);
1014 
1015 	if (num_avail_for_reap >= quota)
1016 		num_avail_for_reap = quota;
1017 
1018 	dp_srng_dst_inv_cached_descs(soc, hal_ring_hdl, num_avail_for_reap);
1019 
1020 	last_prefetch_hw_desc = dp_srng_dst_prefetch(hal_soc, hal_ring_hdl,
1021 						     num_avail_for_reap);
1022 
1023 	while (qdf_likely(num_avail_for_reap--)) {
1024 		tx_comp_hal_desc =  dp_srng_dst_get_next(soc, hal_ring_hdl);
1025 		if (qdf_unlikely(!tx_comp_hal_desc))
1026 			break;
1027 
1028 		buf_src = hal_tx_comp_get_buffer_source(hal_soc,
1029 							tx_comp_hal_desc);
1030 
1031 		if (qdf_unlikely(buf_src != HAL_TX_COMP_RELEASE_SOURCE_TQM &&
1032 				 buf_src != HAL_TX_COMP_RELEASE_SOURCE_FW)) {
1033 			dp_err("Tx comp release_src != TQM | FW but from %d",
1034 			       buf_src);
1035 			qdf_assert_always(0);
1036 		}
1037 
1038 		dp_tx_comp_get_params_from_hal_desc_be(soc, tx_comp_hal_desc,
1039 						       &tx_desc);
1040 
1041 		if (!tx_desc) {
1042 			dp_err("unable to retrieve tx_desc!");
1043 			qdf_assert_always(0);
1044 			continue;
1045 		}
1046 
1047 		if (qdf_unlikely(!(tx_desc->flags &
1048 				   DP_TX_DESC_FLAG_ALLOCATED) ||
1049 				 !(tx_desc->flags & DP_TX_DESC_FLAG_PPEDS))) {
1050 			qdf_assert_always(0);
1051 			continue;
1052 		}
1053 
1054 		tx_desc->buffer_src = buf_src;
1055 
1056 		if (qdf_unlikely(buf_src == HAL_TX_COMP_RELEASE_SOURCE_FW)) {
1057 			status = hal_tx_comp_get_tx_status(tx_comp_hal_desc);
1058 			if (status != HTT_TX_FW2WBM_TX_STATUS_OK)
1059 				dp_ppeds_stats(soc, tx_desc->peer_id);
1060 
1061 			qdf_nbuf_free(tx_desc->nbuf);
1062 			dp_ppeds_tx_desc_free(soc, tx_desc);
1063 		} else {
1064 			tx_desc->tx_status =
1065 				hal_tx_comp_get_tx_status(tx_comp_hal_desc);
1066 
1067 			if (!head_desc) {
1068 				head_desc = tx_desc;
1069 				tail_desc = tx_desc;
1070 			}
1071 
1072 			tail_desc->next = tx_desc;
1073 			tx_desc->next = NULL;
1074 			tail_desc = tx_desc;
1075 
1076 			count++;
1077 
1078 			dp_tx_prefetch_hw_sw_nbuf_desc(soc, hal_soc,
1079 						       num_avail_for_reap,
1080 						       hal_ring_hdl,
1081 						       &last_prefetch_hw_desc,
1082 						       &last_prefetch_sw_desc);
1083 		}
1084 	}
1085 
1086 	dp_srng_access_end(NULL, soc, hal_ring_hdl);
1087 
1088 	if (head_desc)
1089 		dp_tx_comp_process_desc_list(soc, head_desc,
1090 					     CDP_MAX_TX_COMP_PPE_RING);
1091 
1092 	return count;
1093 }
1094 #endif
1095 
1096 #if defined(QCA_SUPPORT_WDS_EXTENDED)
1097 static inline void
1098 dp_get_peer_from_tx_exc_meta(struct dp_soc *soc, uint32_t *hal_tx_desc_cached,
1099 			     struct cdp_tx_exception_metadata *tx_exc_metadata,
1100 			     uint16_t *ast_idx, uint16_t *ast_hash)
1101 {
1102 	struct dp_peer *peer = NULL;
1103 
1104 	if (tx_exc_metadata->is_wds_extended) {
1105 		peer = dp_peer_get_ref_by_id(soc, tx_exc_metadata->peer_id,
1106 					     DP_MOD_ID_TX);
1107 		if (peer) {
1108 			*ast_idx = peer->ast_idx;
1109 			*ast_hash = peer->ast_hash;
1110 			hal_tx_desc_set_index_lookup_override
1111 							(soc->hal_soc,
1112 							 hal_tx_desc_cached,
1113 							 0x1);
1114 			dp_peer_unref_delete(peer, DP_MOD_ID_TX);
1115 		}
1116 	} else {
1117 		return;
1118 	}
1119 }
1120 
1121 #else
1122 static inline void
1123 dp_get_peer_from_tx_exc_meta(struct dp_soc *soc, uint32_t *hal_tx_desc_cached,
1124 			     struct cdp_tx_exception_metadata *tx_exc_metadata,
1125 			     uint16_t *ast_idx, uint16_t *ast_hash)
1126 {
1127 }
1128 #endif
1129 
1130 QDF_STATUS
1131 dp_tx_hw_enqueue_be(struct dp_soc *soc, struct dp_vdev *vdev,
1132 		    struct dp_tx_desc_s *tx_desc, uint16_t fw_metadata,
1133 		    struct cdp_tx_exception_metadata *tx_exc_metadata,
1134 		    struct dp_tx_msdu_info_s *msdu_info)
1135 {
1136 	void *hal_tx_desc;
1137 	uint32_t *hal_tx_desc_cached;
1138 	int coalesce = 0;
1139 	struct dp_tx_queue *tx_q = &msdu_info->tx_queue;
1140 	uint8_t ring_id = tx_q->ring_id;
1141 	uint8_t tid = msdu_info->tid;
1142 	struct dp_vdev_be *be_vdev;
1143 	uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES] = { 0 };
1144 	uint8_t bm_id = dp_tx_get_rbm_id_be(soc, ring_id);
1145 	hal_ring_handle_t hal_ring_hdl = NULL;
1146 	QDF_STATUS status = QDF_STATUS_E_RESOURCES;
1147 	uint8_t num_desc_bytes = HAL_TX_DESC_LEN_BYTES;
1148 	uint16_t ast_idx = vdev->bss_ast_idx;
1149 	uint16_t ast_hash = vdev->bss_ast_hash;
1150 
1151 	be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
1152 
1153 	if (!dp_tx_is_desc_id_valid(soc, tx_desc->id)) {
1154 		dp_err_rl("Invalid tx desc id:%d", tx_desc->id);
1155 		return QDF_STATUS_E_RESOURCES;
1156 	}
1157 
1158 	if (qdf_unlikely(tx_exc_metadata)) {
1159 		qdf_assert_always((tx_exc_metadata->tx_encap_type ==
1160 				   CDP_INVALID_TX_ENCAP_TYPE) ||
1161 				   (tx_exc_metadata->tx_encap_type ==
1162 				    vdev->tx_encap_type));
1163 
1164 		if (tx_exc_metadata->tx_encap_type == htt_cmn_pkt_type_raw)
1165 			qdf_assert_always((tx_exc_metadata->sec_type ==
1166 					   CDP_INVALID_SEC_TYPE) ||
1167 					   tx_exc_metadata->sec_type ==
1168 					   vdev->sec_type);
1169 		dp_get_peer_from_tx_exc_meta(soc, (void *)cached_desc,
1170 					     tx_exc_metadata,
1171 					     &ast_idx, &ast_hash);
1172 	}
1173 
1174 	hal_tx_desc_cached = (void *)cached_desc;
1175 
1176 	if (dp_sawf_tag_valid_get(tx_desc->nbuf)) {
1177 		dp_sawf_config_be(soc, hal_tx_desc_cached,
1178 				  &fw_metadata, tx_desc->nbuf);
1179 		dp_sawf_tx_enqueue_peer_stats(soc, tx_desc);
1180 	}
1181 
1182 	hal_tx_desc_set_buf_addr_be(soc->hal_soc, hal_tx_desc_cached,
1183 				    tx_desc->dma_addr, bm_id, tx_desc->id,
1184 				    (tx_desc->flags & DP_TX_DESC_FLAG_FRAG));
1185 	hal_tx_desc_set_lmac_id_be(soc->hal_soc, hal_tx_desc_cached,
1186 				   vdev->lmac_id);
1187 
1188 	hal_tx_desc_set_search_index_be(soc->hal_soc, hal_tx_desc_cached,
1189 					ast_idx);
1190 	/*
1191 	 * Bank_ID is used as DSCP_TABLE number in beryllium
1192 	 * So there is no explicit field used for DSCP_TID_TABLE_NUM.
1193 	 */
1194 
1195 	hal_tx_desc_set_cache_set_num(soc->hal_soc, hal_tx_desc_cached,
1196 				      (ast_hash & 0xF));
1197 
1198 	hal_tx_desc_set_fw_metadata(hal_tx_desc_cached, fw_metadata);
1199 	hal_tx_desc_set_buf_length(hal_tx_desc_cached, tx_desc->length);
1200 	hal_tx_desc_set_buf_offset(hal_tx_desc_cached, tx_desc->pkt_offset);
1201 
1202 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
1203 		hal_tx_desc_set_to_fw(hal_tx_desc_cached, 1);
1204 
1205 	/* verify checksum offload configuration*/
1206 	if ((qdf_nbuf_get_tx_cksum(tx_desc->nbuf) ==
1207 				   QDF_NBUF_TX_CKSUM_TCP_UDP) ||
1208 	      qdf_nbuf_is_tso(tx_desc->nbuf)) {
1209 		hal_tx_desc_set_l3_checksum_en(hal_tx_desc_cached, 1);
1210 		hal_tx_desc_set_l4_checksum_en(hal_tx_desc_cached, 1);
1211 	}
1212 
1213 	hal_tx_desc_set_bank_id(hal_tx_desc_cached, vdev->bank_id);
1214 
1215 	dp_tx_vdev_id_set_hal_tx_desc(hal_tx_desc_cached, vdev, msdu_info);
1216 
1217 	if (tid != HTT_TX_EXT_TID_INVALID)
1218 		hal_tx_desc_set_hlos_tid(hal_tx_desc_cached, tid);
1219 
1220 	dp_tx_set_min_rates_for_critical_frames(soc, hal_tx_desc_cached,
1221 						tx_desc->nbuf);
1222 	dp_tx_set_particular_tx_queue(soc, hal_tx_desc_cached,
1223 				      tx_desc->nbuf);
1224 	dp_tx_desc_set_ktimestamp(vdev, tx_desc);
1225 
1226 	hal_ring_hdl = dp_tx_get_hal_ring_hdl(soc, ring_id);
1227 
1228 	if (qdf_unlikely(dp_tx_hal_ring_access_start(soc, hal_ring_hdl))) {
1229 		dp_err("HAL RING Access Failed -- %pK", hal_ring_hdl);
1230 		DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
1231 		DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
1232 		dp_sawf_tx_enqueue_fail_peer_stats(soc, tx_desc);
1233 		return status;
1234 	}
1235 
1236 	hal_tx_desc = hal_srng_src_get_next(soc->hal_soc, hal_ring_hdl);
1237 	if (qdf_unlikely(!hal_tx_desc)) {
1238 		dp_verbose_debug("TCL ring full ring_id:%d", ring_id);
1239 		DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
1240 		DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
1241 		dp_sawf_tx_enqueue_fail_peer_stats(soc, tx_desc);
1242 		goto ring_access_fail;
1243 	}
1244 
1245 	tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX;
1246 	dp_vdev_peer_stats_update_protocol_cnt_tx(vdev, tx_desc->nbuf);
1247 
1248 	/* Sync cached descriptor with HW */
1249 	hal_tx_desc_sync(hal_tx_desc_cached, hal_tx_desc, num_desc_bytes);
1250 
1251 	coalesce = dp_tx_attempt_coalescing(soc, vdev, tx_desc, tid,
1252 					    msdu_info, ring_id);
1253 
1254 	DP_STATS_INC_PKT(vdev, tx_i.processed, 1, dp_tx_get_pkt_len(tx_desc));
1255 	DP_STATS_INC(soc, tx.tcl_enq[ring_id], 1);
1256 	dp_tx_update_stats(soc, tx_desc, ring_id);
1257 	status = QDF_STATUS_SUCCESS;
1258 
1259 	dp_tx_hw_desc_update_evt((uint8_t *)hal_tx_desc_cached,
1260 				 hal_ring_hdl, soc, ring_id);
1261 
1262 ring_access_fail:
1263 	dp_tx_ring_access_end_wrapper(soc, hal_ring_hdl, coalesce);
1264 	dp_pkt_add_timestamp(vdev, QDF_PKT_TX_DRIVER_EXIT,
1265 			     qdf_get_log_timestamp(), tx_desc->nbuf);
1266 	return status;
1267 }
1268 
1269 #ifdef IPA_OFFLOAD
1270 static void
1271 dp_tx_get_ipa_bank_config(struct dp_soc_be *be_soc,
1272 			  union hal_tx_bank_config *bank_config)
1273 {
1274 	bank_config->epd = 0;
1275 	bank_config->encap_type = wlan_cfg_pkt_type(be_soc->soc.wlan_cfg_ctx);
1276 	bank_config->encrypt_type = 0;
1277 
1278 	bank_config->src_buffer_swap = 0;
1279 	bank_config->link_meta_swap = 0;
1280 
1281 	bank_config->index_lookup_enable = 0;
1282 	bank_config->mcast_pkt_ctrl = HAL_TX_MCAST_CTRL_FW_EXCEPTION;
1283 	bank_config->addrx_en = 1;
1284 	bank_config->addry_en = 1;
1285 
1286 	bank_config->mesh_enable = 0;
1287 	bank_config->dscp_tid_map_id = 0;
1288 	bank_config->vdev_id_check_en = 0;
1289 	bank_config->pmac_id = 0;
1290 }
1291 
1292 static void dp_tx_init_ipa_bank_profile(struct dp_soc_be *be_soc)
1293 {
1294 	union hal_tx_bank_config ipa_config = {0};
1295 	int bid;
1296 
1297 	if (!wlan_cfg_is_ipa_enabled(be_soc->soc.wlan_cfg_ctx)) {
1298 		be_soc->ipa_bank_id = DP_BE_INVALID_BANK_ID;
1299 		return;
1300 	}
1301 
1302 	dp_tx_get_ipa_bank_config(be_soc, &ipa_config);
1303 
1304 	/* Let IPA use last HOST owned bank */
1305 	bid = be_soc->num_bank_profiles - 1;
1306 
1307 	be_soc->bank_profiles[bid].is_configured = true;
1308 	be_soc->bank_profiles[bid].bank_config.val = ipa_config.val;
1309 	hal_tx_populate_bank_register(be_soc->soc.hal_soc,
1310 				      &be_soc->bank_profiles[bid].bank_config,
1311 				      bid);
1312 	qdf_atomic_inc(&be_soc->bank_profiles[bid].ref_count);
1313 
1314 	dp_info("IPA bank at slot %d config:0x%x", bid,
1315 		be_soc->bank_profiles[bid].bank_config.val);
1316 
1317 	be_soc->ipa_bank_id = bid;
1318 }
1319 #else /* !IPA_OFFLOAD */
1320 static inline void dp_tx_init_ipa_bank_profile(struct dp_soc_be *be_soc)
1321 {
1322 }
1323 #endif /* IPA_OFFLOAD */
1324 
1325 QDF_STATUS dp_tx_init_bank_profiles(struct dp_soc_be *be_soc)
1326 {
1327 	int i, num_tcl_banks;
1328 
1329 	num_tcl_banks = hal_tx_get_num_tcl_banks(be_soc->soc.hal_soc);
1330 
1331 	qdf_assert_always(num_tcl_banks);
1332 	be_soc->num_bank_profiles = num_tcl_banks;
1333 
1334 	be_soc->bank_profiles = qdf_mem_malloc(num_tcl_banks *
1335 					       sizeof(*be_soc->bank_profiles));
1336 	if (!be_soc->bank_profiles) {
1337 		dp_err("unable to allocate memory for DP TX Profiles!");
1338 		return QDF_STATUS_E_NOMEM;
1339 	}
1340 
1341 	DP_TX_BANK_LOCK_CREATE(&be_soc->tx_bank_lock);
1342 
1343 	for (i = 0; i < num_tcl_banks; i++) {
1344 		be_soc->bank_profiles[i].is_configured = false;
1345 		qdf_atomic_init(&be_soc->bank_profiles[i].ref_count);
1346 	}
1347 	dp_info("initialized %u bank profiles", be_soc->num_bank_profiles);
1348 
1349 	dp_tx_init_ipa_bank_profile(be_soc);
1350 
1351 	return QDF_STATUS_SUCCESS;
1352 }
1353 
1354 void dp_tx_deinit_bank_profiles(struct dp_soc_be *be_soc)
1355 {
1356 	qdf_mem_free(be_soc->bank_profiles);
1357 	DP_TX_BANK_LOCK_DESTROY(&be_soc->tx_bank_lock);
1358 }
1359 
1360 static
1361 void dp_tx_get_vdev_bank_config(struct dp_vdev_be *be_vdev,
1362 				union hal_tx_bank_config *bank_config)
1363 {
1364 	struct dp_vdev *vdev = &be_vdev->vdev;
1365 
1366 	bank_config->epd = 0;
1367 
1368 	bank_config->encap_type = vdev->tx_encap_type;
1369 
1370 	/* Only valid for raw frames. Needs work for RAW mode */
1371 	if (vdev->tx_encap_type == htt_cmn_pkt_type_raw) {
1372 		bank_config->encrypt_type = sec_type_map[vdev->sec_type];
1373 	} else {
1374 		bank_config->encrypt_type = 0;
1375 	}
1376 
1377 	bank_config->src_buffer_swap = 0;
1378 	bank_config->link_meta_swap = 0;
1379 
1380 	if ((vdev->search_type == HAL_TX_ADDR_INDEX_SEARCH) &&
1381 	    vdev->opmode == wlan_op_mode_sta) {
1382 		bank_config->index_lookup_enable = 1;
1383 		bank_config->mcast_pkt_ctrl = HAL_TX_MCAST_CTRL_MEC_NOTIFY;
1384 		bank_config->addrx_en = 0;
1385 		bank_config->addry_en = 0;
1386 	} else {
1387 		bank_config->index_lookup_enable = 0;
1388 		bank_config->mcast_pkt_ctrl = HAL_TX_MCAST_CTRL_FW_EXCEPTION;
1389 		bank_config->addrx_en =
1390 			(vdev->hal_desc_addr_search_flags &
1391 			 HAL_TX_DESC_ADDRX_EN) ? 1 : 0;
1392 		bank_config->addry_en =
1393 			(vdev->hal_desc_addr_search_flags &
1394 			 HAL_TX_DESC_ADDRY_EN) ? 1 : 0;
1395 	}
1396 
1397 	bank_config->mesh_enable = vdev->mesh_vdev ? 1 : 0;
1398 
1399 	bank_config->dscp_tid_map_id = vdev->dscp_tid_map_id;
1400 
1401 	/* Disabling vdev id check for now. Needs revist. */
1402 	bank_config->vdev_id_check_en = be_vdev->vdev_id_check_en;
1403 
1404 	bank_config->pmac_id = vdev->lmac_id;
1405 }
1406 
1407 int dp_tx_get_bank_profile(struct dp_soc_be *be_soc,
1408 			   struct dp_vdev_be *be_vdev)
1409 {
1410 	char *temp_str = "";
1411 	bool found_match = false;
1412 	int bank_id = DP_BE_INVALID_BANK_ID;
1413 	int i;
1414 	int unconfigured_slot = DP_BE_INVALID_BANK_ID;
1415 	int zero_ref_count_slot = DP_BE_INVALID_BANK_ID;
1416 	union hal_tx_bank_config vdev_config = {0};
1417 
1418 	/* convert vdev params into hal_tx_bank_config */
1419 	dp_tx_get_vdev_bank_config(be_vdev, &vdev_config);
1420 
1421 	DP_TX_BANK_LOCK_ACQUIRE(&be_soc->tx_bank_lock);
1422 	/* go over all banks and find a matching/unconfigured/unused bank */
1423 	for (i = 0; i < be_soc->num_bank_profiles; i++) {
1424 		if (be_soc->bank_profiles[i].is_configured &&
1425 		    (be_soc->bank_profiles[i].bank_config.val ^
1426 						vdev_config.val) == 0) {
1427 			found_match = true;
1428 			break;
1429 		}
1430 
1431 		if (unconfigured_slot == DP_BE_INVALID_BANK_ID &&
1432 		    !be_soc->bank_profiles[i].is_configured)
1433 			unconfigured_slot = i;
1434 		else if (zero_ref_count_slot  == DP_BE_INVALID_BANK_ID &&
1435 		    !qdf_atomic_read(&be_soc->bank_profiles[i].ref_count))
1436 			zero_ref_count_slot = i;
1437 	}
1438 
1439 	if (found_match) {
1440 		temp_str = "matching";
1441 		bank_id = i;
1442 		goto inc_ref_and_return;
1443 	}
1444 	if (unconfigured_slot != DP_BE_INVALID_BANK_ID) {
1445 		temp_str = "unconfigured";
1446 		bank_id = unconfigured_slot;
1447 		goto configure_and_return;
1448 	}
1449 	if (zero_ref_count_slot != DP_BE_INVALID_BANK_ID) {
1450 		temp_str = "zero_ref_count";
1451 		bank_id = zero_ref_count_slot;
1452 	}
1453 	if (bank_id == DP_BE_INVALID_BANK_ID) {
1454 		dp_alert("unable to find TX bank!");
1455 		QDF_BUG(0);
1456 		return bank_id;
1457 	}
1458 
1459 configure_and_return:
1460 	be_soc->bank_profiles[bank_id].is_configured = true;
1461 	be_soc->bank_profiles[bank_id].bank_config.val = vdev_config.val;
1462 	hal_tx_populate_bank_register(be_soc->soc.hal_soc,
1463 				      &be_soc->bank_profiles[bank_id].bank_config,
1464 				      bank_id);
1465 inc_ref_and_return:
1466 	qdf_atomic_inc(&be_soc->bank_profiles[bank_id].ref_count);
1467 	DP_TX_BANK_LOCK_RELEASE(&be_soc->tx_bank_lock);
1468 
1469 	dp_info("found %s slot at index %d, input:0x%x match:0x%x ref_count %u",
1470 		temp_str, bank_id, vdev_config.val,
1471 		be_soc->bank_profiles[bank_id].bank_config.val,
1472 		qdf_atomic_read(&be_soc->bank_profiles[bank_id].ref_count));
1473 
1474 	dp_info("epd:%x encap:%x encryp:%x src_buf_swap:%x link_meta_swap:%x addrx_en:%x addry_en:%x mesh_en:%x vdev_id_check:%x pmac_id:%x mcast_pkt_ctrl:%x",
1475 		be_soc->bank_profiles[bank_id].bank_config.epd,
1476 		be_soc->bank_profiles[bank_id].bank_config.encap_type,
1477 		be_soc->bank_profiles[bank_id].bank_config.encrypt_type,
1478 		be_soc->bank_profiles[bank_id].bank_config.src_buffer_swap,
1479 		be_soc->bank_profiles[bank_id].bank_config.link_meta_swap,
1480 		be_soc->bank_profiles[bank_id].bank_config.addrx_en,
1481 		be_soc->bank_profiles[bank_id].bank_config.addry_en,
1482 		be_soc->bank_profiles[bank_id].bank_config.mesh_enable,
1483 		be_soc->bank_profiles[bank_id].bank_config.vdev_id_check_en,
1484 		be_soc->bank_profiles[bank_id].bank_config.pmac_id,
1485 		be_soc->bank_profiles[bank_id].bank_config.mcast_pkt_ctrl);
1486 
1487 	return bank_id;
1488 }
1489 
1490 void dp_tx_put_bank_profile(struct dp_soc_be *be_soc,
1491 			    struct dp_vdev_be *be_vdev)
1492 {
1493 	DP_TX_BANK_LOCK_ACQUIRE(&be_soc->tx_bank_lock);
1494 	qdf_atomic_dec(&be_soc->bank_profiles[be_vdev->bank_id].ref_count);
1495 	DP_TX_BANK_LOCK_RELEASE(&be_soc->tx_bank_lock);
1496 }
1497 
1498 void dp_tx_update_bank_profile(struct dp_soc_be *be_soc,
1499 			       struct dp_vdev_be *be_vdev)
1500 {
1501 	dp_tx_put_bank_profile(be_soc, be_vdev);
1502 	be_vdev->bank_id = dp_tx_get_bank_profile(be_soc, be_vdev);
1503 	be_vdev->vdev.bank_id = be_vdev->bank_id;
1504 }
1505 
1506 QDF_STATUS dp_tx_desc_pool_init_be(struct dp_soc *soc,
1507 				   uint32_t num_elem,
1508 				   uint8_t pool_id)
1509 {
1510 	struct dp_tx_desc_pool_s *tx_desc_pool;
1511 	struct dp_hw_cookie_conversion_t *cc_ctx;
1512 	struct dp_soc_be *be_soc;
1513 	struct dp_spt_page_desc *page_desc;
1514 	struct dp_tx_desc_s *tx_desc;
1515 	uint32_t ppt_idx = 0;
1516 	uint32_t avail_entry_index = 0;
1517 
1518 	if (!num_elem) {
1519 		dp_err("desc_num 0 !!");
1520 		return QDF_STATUS_E_FAILURE;
1521 	}
1522 
1523 	be_soc = dp_get_be_soc_from_dp_soc(soc);
1524 	tx_desc_pool = &soc->tx_desc[pool_id];
1525 	cc_ctx  = &be_soc->tx_cc_ctx[pool_id];
1526 
1527 	tx_desc = tx_desc_pool->freelist;
1528 	page_desc = &cc_ctx->page_desc_base[0];
1529 	while (tx_desc) {
1530 		if (avail_entry_index == 0) {
1531 			if (ppt_idx >= cc_ctx->total_page_num) {
1532 				dp_alert("insufficient secondary page tables");
1533 				qdf_assert_always(0);
1534 			}
1535 			page_desc = &cc_ctx->page_desc_base[ppt_idx++];
1536 		}
1537 
1538 		/* put each TX Desc VA to SPT pages and
1539 		 * get corresponding ID
1540 		 */
1541 		DP_CC_SPT_PAGE_UPDATE_VA(page_desc->page_v_addr,
1542 					 avail_entry_index,
1543 					 tx_desc);
1544 		tx_desc->id =
1545 			dp_cc_desc_id_generate(page_desc->ppt_index,
1546 					       avail_entry_index);
1547 		tx_desc->pool_id = pool_id;
1548 		dp_tx_desc_set_magic(tx_desc, DP_TX_MAGIC_PATTERN_FREE);
1549 		tx_desc = tx_desc->next;
1550 		avail_entry_index = (avail_entry_index + 1) &
1551 					DP_CC_SPT_PAGE_MAX_ENTRIES_MASK;
1552 	}
1553 
1554 	return QDF_STATUS_SUCCESS;
1555 }
1556 
1557 void dp_tx_desc_pool_deinit_be(struct dp_soc *soc,
1558 			       struct dp_tx_desc_pool_s *tx_desc_pool,
1559 			       uint8_t pool_id)
1560 {
1561 	struct dp_spt_page_desc *page_desc;
1562 	struct dp_soc_be *be_soc;
1563 	int i = 0;
1564 	struct dp_hw_cookie_conversion_t *cc_ctx;
1565 
1566 	be_soc = dp_get_be_soc_from_dp_soc(soc);
1567 	cc_ctx  = &be_soc->tx_cc_ctx[pool_id];
1568 
1569 	for (i = 0; i < cc_ctx->total_page_num; i++) {
1570 		page_desc = &cc_ctx->page_desc_base[i];
1571 		qdf_mem_zero(page_desc->page_v_addr, qdf_page_size);
1572 	}
1573 }
1574 
1575 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
1576 uint32_t dp_tx_comp_nf_handler(struct dp_intr *int_ctx, struct dp_soc *soc,
1577 			       hal_ring_handle_t hal_ring_hdl, uint8_t ring_id,
1578 			       uint32_t quota)
1579 {
1580 	struct dp_srng *tx_comp_ring = &soc->tx_comp_ring[ring_id];
1581 	uint32_t work_done = 0;
1582 
1583 	if (dp_srng_get_near_full_level(soc, tx_comp_ring) <
1584 			DP_SRNG_THRESH_NEAR_FULL)
1585 		return 0;
1586 
1587 	qdf_atomic_set(&tx_comp_ring->near_full, 1);
1588 	work_done++;
1589 
1590 	return work_done;
1591 }
1592 #endif
1593 
1594 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
1595 	defined(WLAN_CONFIG_TX_DELAY)
1596 #define PPDUID_GET_HW_LINK_ID(PPDU_ID, LINK_ID_OFFSET, LINK_ID_BITS) \
1597 	(((PPDU_ID) >> (LINK_ID_OFFSET)) & ((1 << (LINK_ID_BITS)) - 1))
1598 
1599 #define HW_TX_DELAY_MAX                       0x1000000
1600 #define TX_COMPL_SHIFT_BUFFER_TIMESTAMP_US    10
1601 #define HW_TX_DELAY_MASK                      0x1FFFFFFF
1602 #define TX_COMPL_BUFFER_TSTAMP_US(TSTAMP) \
1603 	(((TSTAMP) << TX_COMPL_SHIFT_BUFFER_TIMESTAMP_US) & \
1604 	 HW_TX_DELAY_MASK)
1605 
1606 static inline
1607 QDF_STATUS dp_mlo_compute_hw_delay_us(struct dp_soc *soc,
1608 				      struct dp_vdev *vdev,
1609 				      struct hal_tx_completion_status *ts,
1610 				      uint32_t *delay_us)
1611 {
1612 	uint32_t ppdu_id;
1613 	uint8_t link_id_offset, link_id_bits;
1614 	uint8_t hw_link_id;
1615 	uint32_t msdu_tqm_enqueue_tstamp_us, final_msdu_tqm_enqueue_tstamp_us;
1616 	uint32_t msdu_compl_tsf_tstamp_us, final_msdu_compl_tsf_tstamp_us;
1617 	uint32_t delay;
1618 	int32_t delta_tsf2, delta_tqm;
1619 
1620 	if (!ts->valid)
1621 		return QDF_STATUS_E_INVAL;
1622 
1623 	link_id_offset = soc->link_id_offset;
1624 	link_id_bits = soc->link_id_bits;
1625 	ppdu_id = ts->ppdu_id;
1626 	hw_link_id = PPDUID_GET_HW_LINK_ID(ppdu_id, link_id_offset,
1627 					   link_id_bits);
1628 
1629 	msdu_tqm_enqueue_tstamp_us =
1630 		TX_COMPL_BUFFER_TSTAMP_US(ts->buffer_timestamp);
1631 	msdu_compl_tsf_tstamp_us = ts->tsf;
1632 
1633 	delta_tsf2 = dp_mlo_get_delta_tsf2_wrt_mlo_offset(soc, hw_link_id);
1634 	delta_tqm = dp_mlo_get_delta_tqm_wrt_mlo_offset(soc);
1635 
1636 	final_msdu_tqm_enqueue_tstamp_us = (msdu_tqm_enqueue_tstamp_us +
1637 			delta_tqm) & HW_TX_DELAY_MASK;
1638 
1639 	final_msdu_compl_tsf_tstamp_us = (msdu_compl_tsf_tstamp_us +
1640 			delta_tsf2) & HW_TX_DELAY_MASK;
1641 
1642 	delay = (final_msdu_compl_tsf_tstamp_us -
1643 		final_msdu_tqm_enqueue_tstamp_us) & HW_TX_DELAY_MASK;
1644 
1645 	if (delay > HW_TX_DELAY_MAX)
1646 		return QDF_STATUS_E_FAILURE;
1647 
1648 	if (delay_us)
1649 		*delay_us = delay;
1650 
1651 	return QDF_STATUS_SUCCESS;
1652 }
1653 #else
1654 static inline
1655 QDF_STATUS dp_mlo_compute_hw_delay_us(struct dp_soc *soc,
1656 				      struct dp_vdev *vdev,
1657 				      struct hal_tx_completion_status *ts,
1658 				      uint32_t *delay_us)
1659 {
1660 	return QDF_STATUS_SUCCESS;
1661 }
1662 #endif
1663 
1664 QDF_STATUS dp_tx_compute_tx_delay_be(struct dp_soc *soc,
1665 				     struct dp_vdev *vdev,
1666 				     struct hal_tx_completion_status *ts,
1667 				     uint32_t *delay_us)
1668 {
1669 	return dp_mlo_compute_hw_delay_us(soc, vdev, ts, delay_us);
1670 }
1671 
1672 static inline
1673 qdf_dma_addr_t dp_tx_nbuf_map_be(struct dp_vdev *vdev,
1674 				 struct dp_tx_desc_s *tx_desc,
1675 				 qdf_nbuf_t nbuf)
1676 {
1677 	qdf_nbuf_dma_clean_range_no_dsb((void *)nbuf->data,
1678 					(void *)(nbuf->data + 256));
1679 
1680 	return (qdf_dma_addr_t)qdf_mem_virt_to_phys(nbuf->data);
1681 }
1682 
1683 static inline
1684 void dp_tx_nbuf_unmap_be(struct dp_soc *soc,
1685 			 struct dp_tx_desc_s *desc)
1686 {
1687 }
1688 
1689 /**
1690  * dp_tx_fast_send_be() - Transmit a frame on a given VAP
1691  * @soc: DP soc handle
1692  * @vdev_id: id of DP vdev handle
1693  * @nbuf: skb
1694  *
1695  * Entry point for Core Tx layer (DP_TX) invoked from
1696  * hard_start_xmit in OSIF/HDD or from dp_rx_process for intravap forwarding
1697  * cases
1698  *
1699  * Return: NULL on success,
1700  *         nbuf when it fails to send
1701  */
1702 #ifdef QCA_DP_TX_NBUF_LIST_FREE
1703 qdf_nbuf_t dp_tx_fast_send_be(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
1704 			      qdf_nbuf_t nbuf)
1705 {
1706 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
1707 	struct dp_vdev *vdev = NULL;
1708 	struct dp_pdev *pdev = NULL;
1709 	struct dp_tx_desc_s *tx_desc;
1710 	uint16_t desc_pool_id;
1711 	uint16_t pkt_len;
1712 	qdf_dma_addr_t paddr;
1713 	QDF_STATUS status = QDF_STATUS_E_RESOURCES;
1714 	uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES] = { 0 };
1715 	hal_ring_handle_t hal_ring_hdl = NULL;
1716 	uint32_t *hal_tx_desc_cached;
1717 	void *hal_tx_desc;
1718 	uint8_t desc_size = DP_TX_FAST_DESC_SIZE;
1719 
1720 	if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
1721 		return nbuf;
1722 
1723 	vdev = soc->vdev_id_map[vdev_id];
1724 	if (qdf_unlikely(!vdev))
1725 		return nbuf;
1726 
1727 	desc_pool_id = qdf_nbuf_get_queue_mapping(nbuf) & DP_TX_QUEUE_MASK;
1728 
1729 	pkt_len = qdf_nbuf_headlen(nbuf);
1730 	DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, pkt_len);
1731 	DP_STATS_INC(vdev, tx_i.rcvd_in_fast_xmit_flow, 1);
1732 	DP_STATS_INC(vdev, tx_i.rcvd_per_core[desc_pool_id], 1);
1733 
1734 	pdev = vdev->pdev;
1735 	if (dp_tx_limit_check(vdev, nbuf))
1736 		return nbuf;
1737 
1738 	tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
1739 
1740 	if (qdf_unlikely(!tx_desc)) {
1741 		DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
1742 		DP_STATS_INC(vdev, tx_i.dropped.desc_na_exc_alloc_fail.num, 1);
1743 		return nbuf;
1744 	}
1745 
1746 	dp_tx_outstanding_inc(pdev);
1747 
1748 	/* Initialize the SW tx descriptor */
1749 	tx_desc->nbuf = nbuf;
1750 	tx_desc->shinfo_addr = skb_end_pointer(nbuf);
1751 	tx_desc->frm_type = dp_tx_frm_std;
1752 	tx_desc->tx_encap_type = vdev->tx_encap_type;
1753 	tx_desc->vdev_id = vdev_id;
1754 	tx_desc->pdev = pdev;
1755 	tx_desc->pkt_offset = 0;
1756 	tx_desc->length = pkt_len;
1757 	tx_desc->flags |= DP_TX_DESC_FLAG_SIMPLE;
1758 	tx_desc->nbuf->fast_recycled = 1;
1759 
1760 	if (nbuf->is_from_recycler && nbuf->fast_xmit)
1761 		tx_desc->flags |= DP_TX_DESC_FLAG_FAST;
1762 
1763 	paddr =  dp_tx_nbuf_map_be(vdev, tx_desc, nbuf);
1764 	if (!paddr) {
1765 		/* Handle failure */
1766 		dp_err("qdf_nbuf_map failed");
1767 		DP_STATS_INC(vdev, tx_i.dropped.dma_error, 1);
1768 		goto release_desc;
1769 	}
1770 
1771 	tx_desc->dma_addr = paddr;
1772 
1773 	hal_tx_desc_cached = (void *)cached_desc;
1774 	hal_tx_desc_cached[0] = (uint32_t)tx_desc->dma_addr;
1775 	hal_tx_desc_cached[1] = tx_desc->id <<
1776 		TCL_DATA_CMD_BUF_ADDR_INFO_SW_BUFFER_COOKIE_LSB;
1777 
1778 	/* bank_id */
1779 	hal_tx_desc_cached[2] = vdev->bank_id << TCL_DATA_CMD_BANK_ID_LSB;
1780 	hal_tx_desc_cached[3] = vdev->htt_tcl_metadata <<
1781 		TCL_DATA_CMD_TCL_CMD_NUMBER_LSB;
1782 
1783 	hal_tx_desc_cached[4] = tx_desc->length;
1784 	/* l3 and l4 checksum enable */
1785 	hal_tx_desc_cached[4] |= DP_TX_L3_L4_CSUM_ENABLE <<
1786 		TCL_DATA_CMD_IPV4_CHECKSUM_EN_LSB;
1787 
1788 	hal_tx_desc_cached[5] = vdev->lmac_id << TCL_DATA_CMD_PMAC_ID_LSB;
1789 	hal_tx_desc_cached[5] |= vdev->vdev_id << TCL_DATA_CMD_VDEV_ID_LSB;
1790 
1791 	if (vdev->opmode == wlan_op_mode_sta) {
1792 		hal_tx_desc_cached[6] = vdev->bss_ast_idx |
1793 			((vdev->bss_ast_hash & 0xF) <<
1794 			 TCL_DATA_CMD_CACHE_SET_NUM_LSB);
1795 		desc_size = DP_TX_FAST_DESC_SIZE + 4;
1796 	}
1797 
1798 	hal_ring_hdl = dp_tx_get_hal_ring_hdl(soc, desc_pool_id);
1799 
1800 	if (qdf_unlikely(dp_tx_hal_ring_access_start(soc, hal_ring_hdl))) {
1801 		dp_err("HAL RING Access Failed -- %pK", hal_ring_hdl);
1802 		DP_STATS_INC(soc, tx.tcl_ring_full[desc_pool_id], 1);
1803 		DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
1804 		goto ring_access_fail2;
1805 	}
1806 
1807 	hal_tx_desc = hal_srng_src_get_next(soc->hal_soc, hal_ring_hdl);
1808 	if (qdf_unlikely(!hal_tx_desc)) {
1809 		dp_verbose_debug("TCL ring full ring_id:%d", desc_pool_id);
1810 		DP_STATS_INC(soc, tx.tcl_ring_full[desc_pool_id], 1);
1811 		DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
1812 		goto ring_access_fail;
1813 	}
1814 
1815 	tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX;
1816 
1817 	/* Sync cached descriptor with HW */
1818 	qdf_mem_copy(hal_tx_desc, hal_tx_desc_cached, desc_size);
1819 	qdf_dsb();
1820 
1821 	DP_STATS_INC_PKT(vdev, tx_i.processed, 1, tx_desc->length);
1822 	DP_STATS_INC(soc, tx.tcl_enq[desc_pool_id], 1);
1823 	status = QDF_STATUS_SUCCESS;
1824 
1825 ring_access_fail:
1826 	dp_tx_ring_access_end_wrapper(soc, hal_ring_hdl, 0);
1827 
1828 ring_access_fail2:
1829 	if (status != QDF_STATUS_SUCCESS) {
1830 		dp_tx_nbuf_unmap_be(soc, tx_desc);
1831 		goto release_desc;
1832 	}
1833 
1834 	return NULL;
1835 
1836 release_desc:
1837 	dp_tx_desc_release(tx_desc, desc_pool_id);
1838 
1839 	return nbuf;
1840 }
1841 #endif
1842