xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/be/dp_be_tx.c (revision 3efaabd70475270fea7fcc46621defb016797d6e)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "cdp_txrx_cmn_struct.h"
21 #include "dp_types.h"
22 #include "dp_tx.h"
23 #include "dp_be_tx.h"
24 #include "dp_tx_desc.h"
25 #include "hal_tx.h"
26 #include <hal_be_api.h>
27 #include <hal_be_tx.h>
28 #include <dp_htt.h>
29 #include "dp_internal.h"
30 #ifdef FEATURE_WDS
31 #include "dp_txrx_wds.h"
32 #endif
33 
34 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
35 #define DP_TX_BANK_LOCK_CREATE(lock) qdf_mutex_create(lock)
36 #define DP_TX_BANK_LOCK_DESTROY(lock) qdf_mutex_destroy(lock)
37 #define DP_TX_BANK_LOCK_ACQUIRE(lock) qdf_mutex_acquire(lock)
38 #define DP_TX_BANK_LOCK_RELEASE(lock) qdf_mutex_release(lock)
39 #else
40 #define DP_TX_BANK_LOCK_CREATE(lock) qdf_spinlock_create(lock)
41 #define DP_TX_BANK_LOCK_DESTROY(lock) qdf_spinlock_destroy(lock)
42 #define DP_TX_BANK_LOCK_ACQUIRE(lock) qdf_spin_lock_bh(lock)
43 #define DP_TX_BANK_LOCK_RELEASE(lock) qdf_spin_unlock_bh(lock)
44 #endif
45 
46 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
47 #ifdef WLAN_MCAST_MLO
48 /* MLO peer id for reinject*/
49 #define DP_MLO_MCAST_REINJECT_PEER_ID 0XFFFD
50 #define MAX_GSN_NUM 0x0FFF
51 
52 #ifdef QCA_MULTIPASS_SUPPORT
53 #define INVALID_VLAN_ID         0xFFFF
54 #define MULTIPASS_WITH_VLAN_ID 0xFFFE
55 /**
56  * struct dp_mlo_mpass_buf - Multipass buffer
57  * @vlan_id: vlan_id of frame
58  * @nbuf: pointer to skb buf
59  */
60 struct dp_mlo_mpass_buf {
61 	uint16_t vlan_id;
62 	qdf_nbuf_t  nbuf;
63 };
64 #endif
65 #endif
66 #endif
67 
68 #define DP_TX_WBM_COMPLETION_V3_VDEV_ID_GET(_var) \
69 	HTT_TX_WBM_COMPLETION_V2_VDEV_ID_GET(_var)
70 #define DP_TX_WBM_COMPLETION_V3_VALID_GET(_var) \
71 	HTT_TX_WBM_COMPLETION_V2_VALID_GET(_var)
72 #define DP_TX_WBM_COMPLETION_V3_SW_PEER_ID_GET(_var) \
73 	HTT_TX_WBM_COMPLETION_V2_SW_PEER_ID_GET(_var)
74 #define DP_TX_WBM_COMPLETION_V3_TID_NUM_GET(_var) \
75 	HTT_TX_WBM_COMPLETION_V2_TID_NUM_GET(_var)
76 #define DP_TX_WBM_COMPLETION_V3_SCH_CMD_ID_GET(_var) \
77 	HTT_TX_WBM_COMPLETION_V2_SCH_CMD_ID_GET(_var)
78 #define DP_TX_WBM_COMPLETION_V3_ACK_FRAME_RSSI_GET(_var) \
79 	HTT_TX_WBM_COMPLETION_V2_ACK_FRAME_RSSI_GET(_var)
80 #define DP_TX_WBM_COMPLETION_V3_TRANSMIT_CNT_VALID_GET(_var) \
81 	HTT_TX_WBM_COMPLETION_V2_TRANSMIT_CNT_VALID_GET(_var)
82 
83 extern uint8_t sec_type_map[MAX_CDP_SEC_TYPE];
84 
85 #ifdef DP_TX_COMP_RING_DESC_SANITY_CHECK
86 /*
87  * Value to mark ring desc is invalidated by buffer_virt_addr_63_32 field
88  * of WBM2SW ring Desc.
89  */
90 #define DP_TX_COMP_DESC_BUFF_VA_32BITS_HI_INVALIDATE 0x12121212
91 
92 /**
93  * dp_tx_comp_desc_check_and_invalidate() - sanity check for ring desc and
94  *					    invalidate it after each reaping
95  * @tx_comp_hal_desc: ring desc virtual address
96  * @r_tx_desc: pointer to current dp TX Desc pointer
97  * @tx_desc_va: the original 64 bits Desc VA got from ring Desc
98  * @hw_cc_done: HW cookie conversion done or not
99  *
100  * If HW CC is done, check the buffer_virt_addr_63_32 value to know if
101  * ring Desc is stale or not. if HW CC is not done, then compare PA between
102  * ring Desc and current TX desc.
103  *
104  * Return: None.
105  */
106 static inline
107 void dp_tx_comp_desc_check_and_invalidate(void *tx_comp_hal_desc,
108 					  struct dp_tx_desc_s **r_tx_desc,
109 					  uint64_t tx_desc_va,
110 					  bool hw_cc_done)
111 {
112 	qdf_dma_addr_t desc_dma_addr;
113 
114 	if (qdf_likely(hw_cc_done)) {
115 		/* Check upper 32 bits */
116 		if (DP_TX_COMP_DESC_BUFF_VA_32BITS_HI_INVALIDATE ==
117 		    (tx_desc_va >> 32))
118 			*r_tx_desc = NULL;
119 
120 		/* Invalidate the ring desc for 32 ~ 63 bits of VA */
121 		hal_tx_comp_set_desc_va_63_32(
122 				tx_comp_hal_desc,
123 				DP_TX_COMP_DESC_BUFF_VA_32BITS_HI_INVALIDATE);
124 	} else {
125 		/* Compare PA between ring desc and current TX desc stored */
126 		desc_dma_addr = hal_tx_comp_get_paddr(tx_comp_hal_desc);
127 
128 		if (desc_dma_addr != (*r_tx_desc)->dma_addr)
129 			*r_tx_desc = NULL;
130 	}
131 }
132 #else
133 static inline
134 void dp_tx_comp_desc_check_and_invalidate(void *tx_comp_hal_desc,
135 					  struct dp_tx_desc_s **r_tx_desc,
136 					  uint64_t tx_desc_va,
137 					  bool hw_cc_done)
138 {
139 }
140 #endif
141 
142 #ifdef DP_FEATURE_HW_COOKIE_CONVERSION
143 #ifdef DP_HW_COOKIE_CONVERT_EXCEPTION
144 void dp_tx_comp_get_params_from_hal_desc_be(struct dp_soc *soc,
145 					    void *tx_comp_hal_desc,
146 					    struct dp_tx_desc_s **r_tx_desc)
147 {
148 	uint32_t tx_desc_id;
149 	uint64_t tx_desc_va = 0;
150 	bool hw_cc_done =
151 		hal_tx_comp_get_cookie_convert_done(tx_comp_hal_desc);
152 
153 	if (qdf_likely(hw_cc_done)) {
154 		/* HW cookie conversion done */
155 		tx_desc_va = hal_tx_comp_get_desc_va(tx_comp_hal_desc);
156 		*r_tx_desc = (struct dp_tx_desc_s *)(uintptr_t)tx_desc_va;
157 
158 	} else {
159 		/* SW do cookie conversion to VA */
160 		tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc);
161 		*r_tx_desc =
162 		(struct dp_tx_desc_s *)dp_cc_desc_find(soc, tx_desc_id);
163 	}
164 
165 	dp_tx_comp_desc_check_and_invalidate(tx_comp_hal_desc,
166 					     r_tx_desc, tx_desc_va,
167 					     hw_cc_done);
168 
169 	if (*r_tx_desc)
170 		(*r_tx_desc)->peer_id =
171 				dp_tx_comp_get_peer_id_be(soc,
172 							  tx_comp_hal_desc);
173 }
174 #else
175 void dp_tx_comp_get_params_from_hal_desc_be(struct dp_soc *soc,
176 					    void *tx_comp_hal_desc,
177 					    struct dp_tx_desc_s **r_tx_desc)
178 {
179 	uint64_t tx_desc_va;
180 
181 	tx_desc_va = hal_tx_comp_get_desc_va(tx_comp_hal_desc);
182 	*r_tx_desc = (struct dp_tx_desc_s *)(uintptr_t)tx_desc_va;
183 
184 	dp_tx_comp_desc_check_and_invalidate(tx_comp_hal_desc,
185 					     r_tx_desc,
186 					     tx_desc_va,
187 					     true);
188 	if (*r_tx_desc)
189 		(*r_tx_desc)->peer_id =
190 				dp_tx_comp_get_peer_id_be(soc,
191 							  tx_comp_hal_desc);
192 }
193 #endif /* DP_HW_COOKIE_CONVERT_EXCEPTION */
194 #else
195 
196 void dp_tx_comp_get_params_from_hal_desc_be(struct dp_soc *soc,
197 					    void *tx_comp_hal_desc,
198 					    struct dp_tx_desc_s **r_tx_desc)
199 {
200 	uint32_t tx_desc_id;
201 
202 	/* SW do cookie conversion to VA */
203 	tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc);
204 	*r_tx_desc =
205 	(struct dp_tx_desc_s *)dp_cc_desc_find(soc, tx_desc_id);
206 
207 	dp_tx_comp_desc_check_and_invalidate(tx_comp_hal_desc,
208 					     r_tx_desc, 0,
209 					     false);
210 
211 	if (*r_tx_desc)
212 		(*r_tx_desc)->peer_id =
213 				dp_tx_comp_get_peer_id_be(soc,
214 							  tx_comp_hal_desc);
215 }
216 #endif /* DP_FEATURE_HW_COOKIE_CONVERSION */
217 
218 static inline
219 void dp_tx_process_mec_notify_be(struct dp_soc *soc, uint8_t *status)
220 {
221 	struct dp_vdev *vdev;
222 	uint8_t vdev_id;
223 	uint32_t *htt_desc = (uint32_t *)status;
224 
225 	dp_assert_always_internal(soc->mec_fw_offload);
226 
227 	/*
228 	 * Get vdev id from HTT status word in case of MEC
229 	 * notification
230 	 */
231 	vdev_id = DP_TX_WBM_COMPLETION_V3_VDEV_ID_GET(htt_desc[4]);
232 	if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
233 		return;
234 
235 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
236 				     DP_MOD_ID_HTT_COMP);
237 	if (!vdev)
238 		return;
239 	dp_tx_mec_handler(vdev, status);
240 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP);
241 }
242 
243 void dp_tx_process_htt_completion_be(struct dp_soc *soc,
244 				     struct dp_tx_desc_s *tx_desc,
245 				     uint8_t *status,
246 				     uint8_t ring_id)
247 {
248 	uint8_t tx_status;
249 	struct dp_pdev *pdev;
250 	struct dp_vdev *vdev = NULL;
251 	struct hal_tx_completion_status ts = {0};
252 	uint32_t *htt_desc = (uint32_t *)status;
253 	struct dp_txrx_peer *txrx_peer;
254 	dp_txrx_ref_handle txrx_ref_handle = NULL;
255 	struct cdp_tid_tx_stats *tid_stats = NULL;
256 	struct htt_soc *htt_handle;
257 	uint8_t vdev_id;
258 	uint16_t peer_id;
259 	uint8_t xmit_type;
260 
261 	tx_status = HTT_TX_WBM_COMPLETION_V3_TX_STATUS_GET(htt_desc[0]);
262 	htt_handle = (struct htt_soc *)soc->htt_handle;
263 	htt_wbm_event_record(htt_handle->htt_logger_handle, tx_status, status);
264 
265 	/*
266 	 * There can be scenario where WBM consuming descriptor enqueued
267 	 * from TQM2WBM first and TQM completion can happen before MEC
268 	 * notification comes from FW2WBM. Avoid access any field of tx
269 	 * descriptor in case of MEC notify.
270 	 */
271 	if (tx_status == HTT_TX_FW2WBM_TX_STATUS_MEC_NOTIFY)
272 		return dp_tx_process_mec_notify_be(soc, status);
273 
274 	/*
275 	 * If the descriptor is already freed in vdev_detach,
276 	 * continue to next descriptor
277 	 */
278 	if (qdf_unlikely(!tx_desc->flags)) {
279 		dp_tx_comp_info_rl("Descriptor freed in vdev_detach %d",
280 				   tx_desc->id);
281 		return;
282 	}
283 
284 	if (qdf_unlikely(tx_desc->vdev_id == DP_INVALID_VDEV_ID)) {
285 		dp_tx_comp_info_rl("Invalid vdev_id %d", tx_desc->id);
286 		tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
287 		goto release_tx_desc;
288 	}
289 
290 	pdev = tx_desc->pdev;
291 	if (qdf_unlikely(!pdev)) {
292 		dp_tx_comp_warn("The pdev in TX desc is NULL, dropped.");
293 		dp_tx_comp_warn("tx_status: %u", tx_status);
294 		tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
295 		goto release_tx_desc;
296 	}
297 
298 	if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
299 		dp_tx_comp_info_rl("pdev in down state %d", tx_desc->id);
300 		tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
301 		goto release_tx_desc;
302 	}
303 
304 	qdf_assert(tx_desc->pdev);
305 
306 	vdev_id = tx_desc->vdev_id;
307 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
308 				     DP_MOD_ID_HTT_COMP);
309 
310 	if (qdf_unlikely(!vdev)) {
311 		dp_tx_comp_info_rl("Unable to get vdev ref  %d", tx_desc->id);
312 		tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
313 		goto release_tx_desc;
314 	}
315 
316 	switch (tx_status) {
317 	case HTT_TX_FW2WBM_TX_STATUS_OK:
318 	case HTT_TX_FW2WBM_TX_STATUS_DROP:
319 	case HTT_TX_FW2WBM_TX_STATUS_TTL:
320 	{
321 		uint8_t tid;
322 		uint8_t transmit_cnt_valid = 0;
323 
324 		if (DP_TX_WBM_COMPLETION_V3_VALID_GET(htt_desc[3])) {
325 			ts.peer_id =
326 				DP_TX_WBM_COMPLETION_V3_SW_PEER_ID_GET(
327 						htt_desc[3]);
328 			ts.tid =
329 				DP_TX_WBM_COMPLETION_V3_TID_NUM_GET(
330 						htt_desc[3]);
331 		} else {
332 			ts.peer_id = HTT_INVALID_PEER;
333 			ts.tid = HTT_INVALID_TID;
334 		}
335 		ts.release_src = HAL_TX_COMP_RELEASE_SOURCE_FW;
336 		ts.ppdu_id =
337 			DP_TX_WBM_COMPLETION_V3_SCH_CMD_ID_GET(
338 					htt_desc[2]);
339 		ts.ack_frame_rssi =
340 			DP_TX_WBM_COMPLETION_V3_ACK_FRAME_RSSI_GET(
341 					htt_desc[2]);
342 
343 		transmit_cnt_valid =
344 			DP_TX_WBM_COMPLETION_V3_TRANSMIT_CNT_VALID_GET(
345 					htt_desc[3]);
346 		if (transmit_cnt_valid)
347 			ts.transmit_cnt =
348 				HTT_TX_WBM_COMPLETION_V3_TRANSMIT_COUNT_GET(
349 						htt_desc[1]);
350 
351 		ts.tsf = htt_desc[4];
352 		ts.first_msdu = 1;
353 		ts.last_msdu = 1;
354 		switch (tx_status) {
355 		case HTT_TX_FW2WBM_TX_STATUS_OK:
356 			ts.status = HAL_TX_TQM_RR_FRAME_ACKED;
357 			break;
358 		case HTT_TX_FW2WBM_TX_STATUS_DROP:
359 			ts.status = HAL_TX_TQM_RR_REM_CMD_REM;
360 			break;
361 		case HTT_TX_FW2WBM_TX_STATUS_TTL:
362 			ts.status = HAL_TX_TQM_RR_REM_CMD_TX;
363 			break;
364 		}
365 		tid = ts.tid;
366 		if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
367 			tid = CDP_MAX_DATA_TIDS - 1;
368 
369 		tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
370 
371 		if (qdf_unlikely(pdev->delay_stats_flag) ||
372 		    qdf_unlikely(dp_is_vdev_tx_delay_stats_enabled(vdev)))
373 			dp_tx_compute_delay(vdev, tx_desc, tid, ring_id);
374 		if (tx_status < CDP_MAX_TX_HTT_STATUS)
375 			tid_stats->htt_status_cnt[tx_status]++;
376 
377 		peer_id = dp_tx_comp_adjust_peer_id_be(soc, ts.peer_id);
378 		txrx_peer = dp_txrx_peer_get_ref_by_id(soc, peer_id,
379 						       &txrx_ref_handle,
380 						       DP_MOD_ID_HTT_COMP);
381 		if (qdf_likely(txrx_peer))
382 			dp_tx_update_peer_basic_stats(
383 						txrx_peer,
384 						qdf_nbuf_len(tx_desc->nbuf),
385 						tx_status,
386 						pdev->enhanced_stats_en);
387 
388 		dp_tx_comp_process_tx_status(soc, tx_desc, &ts, txrx_peer,
389 					     ring_id);
390 		dp_tx_comp_process_desc(soc, tx_desc, &ts, txrx_peer);
391 		dp_tx_desc_release(soc, tx_desc, tx_desc->pool_id);
392 
393 		if (qdf_likely(txrx_peer))
394 			dp_txrx_peer_unref_delete(txrx_ref_handle,
395 						  DP_MOD_ID_HTT_COMP);
396 
397 		break;
398 	}
399 	case HTT_TX_FW2WBM_TX_STATUS_REINJECT:
400 	{
401 		uint8_t reinject_reason;
402 
403 		reinject_reason =
404 			HTT_TX_WBM_COMPLETION_V3_REINJECT_REASON_GET(
405 								htt_desc[1]);
406 		dp_tx_reinject_handler(soc, vdev, tx_desc,
407 				       status, reinject_reason);
408 		break;
409 	}
410 	case HTT_TX_FW2WBM_TX_STATUS_INSPECT:
411 	{
412 		dp_tx_inspect_handler(soc, vdev, tx_desc, status);
413 		break;
414 	}
415 	case HTT_TX_FW2WBM_TX_STATUS_VDEVID_MISMATCH:
416 	{
417 		xmit_type = qdf_nbuf_get_vdev_xmit_type(tx_desc->nbuf);
418 		DP_STATS_INC(vdev,
419 			     tx_i[xmit_type].dropped.fail_per_pkt_vdev_id_check,
420 			     1);
421 		goto release_tx_desc;
422 	}
423 	default:
424 		dp_tx_comp_err("Invalid HTT tx_status %d\n",
425 			       tx_status);
426 		goto release_tx_desc;
427 	}
428 
429 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP);
430 	return;
431 
432 release_tx_desc:
433 	dp_tx_comp_free_buf(soc, tx_desc, false);
434 	dp_tx_desc_release(soc, tx_desc, tx_desc->pool_id);
435 	if (vdev)
436 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP);
437 }
438 
439 #ifdef QCA_OL_TX_MULTIQ_SUPPORT
440 #ifdef DP_TX_IMPLICIT_RBM_MAPPING
441 /**
442  * dp_tx_get_rbm_id_be() - Get the RBM ID for data transmission completion.
443  * @soc: DP soc structure pointer
444  * @ring_id: Transmit Queue/ring_id to be used when XPS is enabled
445  *
446  * Return: RBM ID corresponding to TCL ring_id
447  */
448 static inline uint8_t dp_tx_get_rbm_id_be(struct dp_soc *soc,
449 					  uint8_t ring_id)
450 {
451 	return 0;
452 }
453 #else
454 static inline uint8_t dp_tx_get_rbm_id_be(struct dp_soc *soc,
455 					  uint8_t ring_id)
456 {
457 	return (ring_id ? soc->wbm_sw0_bm_id + (ring_id - 1) :
458 			  HAL_WBM_SW2_BM_ID(soc->wbm_sw0_bm_id));
459 }
460 #endif /*DP_TX_IMPLICIT_RBM_MAPPING*/
461 #else
462 static inline uint8_t dp_tx_get_rbm_id_be(struct dp_soc *soc,
463 					  uint8_t tcl_index)
464 {
465 	uint8_t rbm;
466 
467 	rbm = wlan_cfg_get_rbm_id_for_index(soc->wlan_cfg_ctx, tcl_index);
468 	dp_verbose_debug("tcl_id %u rbm %u", tcl_index, rbm);
469 	return rbm;
470 }
471 #endif
472 
473 #ifdef QCA_SUPPORT_TX_MIN_RATES_FOR_SPECIAL_FRAMES
474 
475 /**
476  * dp_tx_set_min_rates_for_critical_frames()- sets min-rates for critical pkts
477  * @soc: DP soc structure pointer
478  * @hal_tx_desc: HAL descriptor where fields are set
479  * @nbuf: skb to be considered for min rates
480  *
481  * The function relies on upper layers to set QDF_NBUF_CB_TX_EXTRA_IS_CRITICAL
482  * and uses it to determine if the frame is critical. For a critical frame,
483  * flow override bits are set to classify the frame into HW's high priority
484  * queue. The HW will pick pre-configured min rates for such packets.
485  *
486  * Return: None
487  */
488 static void
489 dp_tx_set_min_rates_for_critical_frames(struct dp_soc *soc,
490 					uint32_t *hal_tx_desc,
491 					qdf_nbuf_t nbuf)
492 {
493 /*
494  * Critical frames should be queued to the high priority queue for the TID on
495  * on which they are sent out (for the concerned peer).
496  * FW is using HTT_MSDU_Q_IDX 2 for HOL (high priority) queue.
497  * htt_msdu_idx = (2 * who_classify_info_sel) + flow_override
498  * Hence, using who_classify_info_sel = 1, flow_override = 0 to select
499  * HOL queue.
500  */
501 	if (QDF_NBUF_CB_TX_EXTRA_IS_CRITICAL(nbuf)) {
502 		hal_tx_desc_set_flow_override_enable(hal_tx_desc, 1);
503 		hal_tx_desc_set_flow_override(hal_tx_desc, 0);
504 		hal_tx_desc_set_who_classify_info_sel(hal_tx_desc, 1);
505 		hal_tx_desc_set_tx_notify_frame(hal_tx_desc,
506 						TX_SEMI_HARD_NOTIFY_E);
507 	}
508 }
509 #else
510 static inline void
511 dp_tx_set_min_rates_for_critical_frames(struct dp_soc *soc,
512 					uint32_t *hal_tx_desc_cached,
513 					qdf_nbuf_t nbuf)
514 {
515 }
516 #endif
517 
518 #ifdef DP_TX_PACKET_INSPECT_FOR_ILP
519 /**
520  * dp_tx_set_particular_tx_queue() - set particular TX TQM flow queue 3 for
521  *				     TX packets, currently TCP ACK only
522  * @soc: DP soc structure pointer
523  * @hal_tx_desc: HAL descriptor where fields are set
524  * @nbuf: skb to be considered for particular TX queue
525  *
526  * Return: None
527  */
528 static inline
529 void dp_tx_set_particular_tx_queue(struct dp_soc *soc,
530 				   uint32_t *hal_tx_desc,
531 				   qdf_nbuf_t nbuf)
532 {
533 	if (!soc->tx_ilp_enable)
534 		return;
535 
536 	if (qdf_unlikely(QDF_NBUF_CB_GET_PACKET_TYPE(nbuf) ==
537 			 QDF_NBUF_CB_PACKET_TYPE_TCP_ACK)) {
538 		hal_tx_desc_set_flow_override_enable(hal_tx_desc, 1);
539 		hal_tx_desc_set_flow_override(hal_tx_desc, 1);
540 		hal_tx_desc_set_who_classify_info_sel(hal_tx_desc, 1);
541 	}
542 }
543 #else
544 static inline
545 void dp_tx_set_particular_tx_queue(struct dp_soc *soc,
546 				   uint32_t *hal_tx_desc,
547 				   qdf_nbuf_t nbuf)
548 {
549 }
550 #endif
551 
552 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
553 	defined(WLAN_MCAST_MLO)
554 #ifdef QCA_MULTIPASS_SUPPORT
555 /**
556  * dp_tx_mlo_mcast_multipass_lookup() - lookup vlan_id in mpass peer list
557  * @be_vdev: Handle to DP be_vdev structure
558  * @ptnr_vdev: DP ptnr_vdev handle
559  * @arg: pointer to dp_mlo_mpass_ buf
560  *
561  * Return: None
562  */
563 static void
564 dp_tx_mlo_mcast_multipass_lookup(struct dp_vdev_be *be_vdev,
565 				 struct dp_vdev *ptnr_vdev,
566 				 void *arg)
567 {
568 	struct dp_mlo_mpass_buf *ptr = (struct dp_mlo_mpass_buf *)arg;
569 	struct dp_txrx_peer *txrx_peer = NULL;
570 	struct vlan_ethhdr *veh = NULL;
571 	qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(ptr->nbuf);
572 	uint16_t vlan_id = 0;
573 	bool not_vlan = ((ptnr_vdev->tx_encap_type == htt_cmn_pkt_type_raw) ||
574 			(htons(eh->ether_type) != ETH_P_8021Q));
575 
576 	if (qdf_unlikely(not_vlan))
577 		return;
578 	veh = (struct vlan_ethhdr *)eh;
579 	vlan_id = (ntohs(veh->h_vlan_TCI) & VLAN_VID_MASK);
580 
581 	qdf_spin_lock_bh(&ptnr_vdev->mpass_peer_mutex);
582 	TAILQ_FOREACH(txrx_peer, &ptnr_vdev->mpass_peer_list,
583 		      mpass_peer_list_elem) {
584 		if (vlan_id == txrx_peer->vlan_id) {
585 			qdf_spin_unlock_bh(&ptnr_vdev->mpass_peer_mutex);
586 			ptr->vlan_id = vlan_id;
587 			return;
588 		}
589 	}
590 	qdf_spin_unlock_bh(&ptnr_vdev->mpass_peer_mutex);
591 }
592 
593 /**
594  * dp_tx_mlo_mcast_multipass_send() - send multipass MLO Mcast packets
595  * @be_vdev: Handle to DP be_vdev structure
596  * @ptnr_vdev: DP ptnr_vdev handle
597  * @arg: pointer to dp_mlo_mpass_ buf
598  *
599  * Return: None
600  */
601 static void
602 dp_tx_mlo_mcast_multipass_send(struct dp_vdev_be *be_vdev,
603 			       struct dp_vdev *ptnr_vdev,
604 			       void *arg)
605 {
606 	struct dp_mlo_mpass_buf *ptr = (struct dp_mlo_mpass_buf *)arg;
607 	struct dp_tx_msdu_info_s msdu_info;
608 	struct dp_vdev_be *be_ptnr_vdev = NULL;
609 	qdf_nbuf_t  nbuf_clone;
610 	uint16_t group_key = 0;
611 
612 	be_ptnr_vdev = dp_get_be_vdev_from_dp_vdev(ptnr_vdev);
613 	if (be_vdev != be_ptnr_vdev) {
614 		nbuf_clone = qdf_nbuf_clone(ptr->nbuf);
615 		if (qdf_unlikely(!nbuf_clone)) {
616 			dp_tx_debug("nbuf clone failed");
617 			return;
618 		}
619 	} else {
620 		nbuf_clone = ptr->nbuf;
621 	}
622 	qdf_mem_zero(&msdu_info, sizeof(msdu_info));
623 	dp_tx_get_queue(ptnr_vdev, nbuf_clone, &msdu_info.tx_queue);
624 	msdu_info.gsn = be_vdev->mlo_dev_ctxt->seq_num;
625 	msdu_info.xmit_type = qdf_nbuf_get_vdev_xmit_type(ptr->nbuf);
626 
627 
628 	if (ptr->vlan_id == MULTIPASS_WITH_VLAN_ID) {
629 		msdu_info.tid = HTT_TX_EXT_TID_INVALID;
630 		HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_SET(
631 						msdu_info.meta_data[0], 1);
632 	} else {
633 		/* return when vlan map is not initialized */
634 		if (!ptnr_vdev->iv_vlan_map)
635 			return;
636 		group_key = ptnr_vdev->iv_vlan_map[ptr->vlan_id];
637 
638 		/*
639 		 * If group key is not installed, drop the frame.
640 		 */
641 
642 		if (!group_key)
643 			return;
644 
645 		dp_tx_remove_vlan_tag(ptnr_vdev, nbuf_clone);
646 		dp_tx_add_groupkey_metadata(ptnr_vdev, &msdu_info, group_key);
647 		msdu_info.exception_fw = 1;
648 	}
649 
650 	nbuf_clone = dp_tx_send_msdu_single(
651 					ptnr_vdev,
652 					nbuf_clone,
653 					&msdu_info,
654 					DP_MLO_MCAST_REINJECT_PEER_ID,
655 					NULL);
656 	if (qdf_unlikely(nbuf_clone)) {
657 		dp_info("pkt send failed");
658 		qdf_nbuf_free(nbuf_clone);
659 		return;
660 	}
661 }
662 
663 /**
664  * dp_tx_mlo_mcast_multipass_handler - If frame needs multipass processing
665  * @soc: DP soc handle
666  * @vdev: DP vdev handle
667  * @nbuf: nbuf to be enqueued
668  *
669  * Return: true if handling is done else false
670  */
671 static bool
672 dp_tx_mlo_mcast_multipass_handler(struct dp_soc *soc,
673 				  struct dp_vdev *vdev,
674 				  qdf_nbuf_t nbuf)
675 {
676 	struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
677 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
678 	qdf_nbuf_t nbuf_copy = NULL;
679 	struct dp_mlo_mpass_buf mpass_buf;
680 
681 	memset(&mpass_buf, 0, sizeof(struct dp_mlo_mpass_buf));
682 	mpass_buf.vlan_id = INVALID_VLAN_ID;
683 	mpass_buf.nbuf = nbuf;
684 
685 	dp_tx_mlo_mcast_multipass_lookup(be_vdev, vdev, &mpass_buf);
686 	if (mpass_buf.vlan_id == INVALID_VLAN_ID) {
687 		dp_mlo_iter_ptnr_vdev(be_soc, be_vdev,
688 				      dp_tx_mlo_mcast_multipass_lookup,
689 				      &mpass_buf, DP_MOD_ID_TX,
690 				      DP_ALL_VDEV_ITER,
691 				      DP_VDEV_ITERATE_SKIP_SELF);
692 		/*
693 		 * Do not drop the frame when vlan_id doesn't match.
694 		 * Send the frame as it is.
695 		 */
696 		if (mpass_buf.vlan_id == INVALID_VLAN_ID)
697 			return false;
698 	}
699 
700 	/* AP can have classic clients, special clients &
701 	 * classic repeaters.
702 	 * 1. Classic clients & special client:
703 	 *	Remove vlan header, find corresponding group key
704 	 *	index, fill in metaheader and enqueue multicast
705 	 *	frame to TCL.
706 	 * 2. Classic repeater:
707 	 *	Pass through to classic repeater with vlan tag
708 	 *	intact without any group key index. Hardware
709 	 *	will know which key to use to send frame to
710 	 *	repeater.
711 	 */
712 	nbuf_copy = qdf_nbuf_copy(nbuf);
713 
714 	/*
715 	 * Send multicast frame to special peers even
716 	 * if pass through to classic repeater fails.
717 	 */
718 	if (nbuf_copy) {
719 		struct dp_mlo_mpass_buf mpass_buf_copy = {0};
720 
721 		mpass_buf_copy.vlan_id = MULTIPASS_WITH_VLAN_ID;
722 		mpass_buf_copy.nbuf = nbuf_copy;
723 		/* send frame on partner vdevs */
724 		dp_mlo_iter_ptnr_vdev(be_soc, be_vdev,
725 				      dp_tx_mlo_mcast_multipass_send,
726 				      &mpass_buf_copy, DP_MOD_ID_TX,
727 				      DP_LINK_VDEV_ITER,
728 				      DP_VDEV_ITERATE_SKIP_SELF);
729 
730 		/* send frame on mcast primary vdev */
731 		dp_tx_mlo_mcast_multipass_send(be_vdev, vdev, &mpass_buf_copy);
732 
733 		if (qdf_unlikely(be_vdev->mlo_dev_ctxt->seq_num > MAX_GSN_NUM))
734 			be_vdev->mlo_dev_ctxt->seq_num = 0;
735 		else
736 			be_vdev->mlo_dev_ctxt->seq_num++;
737 	}
738 
739 	dp_mlo_iter_ptnr_vdev(be_soc, be_vdev,
740 			      dp_tx_mlo_mcast_multipass_send,
741 			      &mpass_buf, DP_MOD_ID_TX, DP_LINK_VDEV_ITER,
742 			      DP_VDEV_ITERATE_SKIP_SELF);
743 	dp_tx_mlo_mcast_multipass_send(be_vdev, vdev, &mpass_buf);
744 
745 	if (qdf_unlikely(be_vdev->mlo_dev_ctxt->seq_num > MAX_GSN_NUM))
746 		be_vdev->mlo_dev_ctxt->seq_num = 0;
747 	else
748 		be_vdev->mlo_dev_ctxt->seq_num++;
749 
750 	return true;
751 }
752 #else
753 static bool
754 dp_tx_mlo_mcast_multipass_handler(struct dp_soc *soc, struct dp_vdev *vdev,
755 				  qdf_nbuf_t nbuf)
756 {
757 	return false;
758 }
759 #endif
760 
761 void
762 dp_tx_mlo_mcast_pkt_send(struct dp_vdev_be *be_vdev,
763 			 struct dp_vdev *ptnr_vdev,
764 			 void *arg)
765 {
766 	qdf_nbuf_t  nbuf = (qdf_nbuf_t)arg;
767 	qdf_nbuf_t  nbuf_clone;
768 	struct dp_vdev_be *be_ptnr_vdev = NULL;
769 	struct dp_tx_msdu_info_s msdu_info;
770 
771 	be_ptnr_vdev = dp_get_be_vdev_from_dp_vdev(ptnr_vdev);
772 	if (be_vdev != be_ptnr_vdev) {
773 		nbuf_clone = qdf_nbuf_clone(nbuf);
774 		if (qdf_unlikely(!nbuf_clone)) {
775 			dp_tx_debug("nbuf clone failed");
776 			return;
777 		}
778 	} else {
779 		nbuf_clone = nbuf;
780 	}
781 
782 	/* NAWDS clients will accepts on 4 addr format MCAST packets
783 	 * This will ensure to send packets in 4 addr format to NAWDS clients.
784 	 */
785 	if (qdf_unlikely(ptnr_vdev->nawds_enabled)) {
786 		qdf_mem_zero(&msdu_info, sizeof(msdu_info));
787 		dp_tx_get_queue(ptnr_vdev, nbuf_clone, &msdu_info.tx_queue);
788 		dp_tx_nawds_handler(ptnr_vdev->pdev->soc, ptnr_vdev,
789 				    &msdu_info, nbuf_clone, DP_INVALID_PEER);
790 	}
791 
792 	if (qdf_unlikely(dp_tx_proxy_arp(ptnr_vdev, nbuf_clone) !=
793 			 QDF_STATUS_SUCCESS)) {
794 		qdf_nbuf_free(nbuf_clone);
795 		return;
796 	}
797 
798 	qdf_mem_zero(&msdu_info, sizeof(msdu_info));
799 	dp_tx_get_queue(ptnr_vdev, nbuf_clone, &msdu_info.tx_queue);
800 
801 	msdu_info.gsn = be_vdev->mlo_dev_ctxt->seq_num;
802 	msdu_info.xmit_type = qdf_nbuf_get_vdev_xmit_type(nbuf_clone);
803 
804 	DP_STATS_INC(ptnr_vdev,
805 		     tx_i[msdu_info.xmit_type].mlo_mcast.send_pkt_count, 1);
806 	nbuf_clone = dp_tx_send_msdu_single(
807 					ptnr_vdev,
808 					nbuf_clone,
809 					&msdu_info,
810 					DP_MLO_MCAST_REINJECT_PEER_ID,
811 					NULL);
812 	if (qdf_unlikely(nbuf_clone)) {
813 		DP_STATS_INC(ptnr_vdev,
814 			     tx_i[msdu_info.xmit_type].mlo_mcast.fail_pkt_count,
815 			     1);
816 		dp_info("pkt send failed");
817 		qdf_nbuf_free(nbuf_clone);
818 		return;
819 	}
820 }
821 
822 static inline void
823 dp_tx_vdev_id_set_hal_tx_desc(uint32_t *hal_tx_desc_cached,
824 			      struct dp_vdev *vdev,
825 			      struct dp_tx_msdu_info_s *msdu_info)
826 {
827 	hal_tx_desc_set_vdev_id(hal_tx_desc_cached, msdu_info->vdev_id);
828 }
829 
830 void dp_tx_mlo_mcast_handler_be(struct dp_soc *soc,
831 				struct dp_vdev *vdev,
832 				qdf_nbuf_t nbuf)
833 {
834 	struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
835 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
836 
837 	if (qdf_unlikely(vdev->multipass_en) &&
838 	    dp_tx_mlo_mcast_multipass_handler(soc, vdev, nbuf))
839 		return;
840 	/* send frame on partner vdevs */
841 	dp_mlo_iter_ptnr_vdev(be_soc, be_vdev,
842 			      dp_tx_mlo_mcast_pkt_send,
843 			      nbuf, DP_MOD_ID_REINJECT, DP_LINK_VDEV_ITER,
844 			      DP_VDEV_ITERATE_SKIP_SELF);
845 
846 	/* send frame on mcast primary vdev */
847 	dp_tx_mlo_mcast_pkt_send(be_vdev, vdev, nbuf);
848 
849 	if (qdf_unlikely(be_vdev->mlo_dev_ctxt->seq_num > MAX_GSN_NUM))
850 		be_vdev->mlo_dev_ctxt->seq_num = 0;
851 	else
852 		be_vdev->mlo_dev_ctxt->seq_num++;
853 }
854 
855 bool dp_tx_mlo_is_mcast_primary_be(struct dp_soc *soc,
856 				   struct dp_vdev *vdev)
857 {
858 	struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
859 
860 	if (be_vdev->mcast_primary)
861 		return true;
862 
863 	return false;
864 }
865 
866 #if defined(CONFIG_MLO_SINGLE_DEV)
867 static void
868 dp_tx_mlo_mcast_enhance_be(struct dp_vdev_be *be_vdev,
869 			   struct dp_vdev *ptnr_vdev,
870 			   void *arg)
871 {
872 	struct dp_vdev *vdev = (struct dp_vdev *)be_vdev;
873 	qdf_nbuf_t  nbuf = (qdf_nbuf_t)arg;
874 
875 	if (vdev == ptnr_vdev)
876 		return;
877 
878 	/*
879 	 * Hold the reference to avoid free of nbuf in
880 	 * dp_tx_mcast_enhance() in case of successful
881 	 * conversion
882 	 */
883 	qdf_nbuf_ref(nbuf);
884 
885 	if (qdf_unlikely(!dp_tx_mcast_enhance(ptnr_vdev, nbuf)))
886 		return;
887 
888 	qdf_nbuf_free(nbuf);
889 }
890 
891 qdf_nbuf_t
892 dp_tx_mlo_mcast_send_be(struct dp_soc *soc, struct dp_vdev *vdev,
893 			qdf_nbuf_t nbuf,
894 			struct cdp_tx_exception_metadata *tx_exc_metadata)
895 {
896 	struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
897 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
898 
899 	if (!tx_exc_metadata->is_mlo_mcast)
900 		return nbuf;
901 
902 	if (!be_vdev->mcast_primary) {
903 		qdf_nbuf_free(nbuf);
904 		return NULL;
905 	}
906 
907 	/*
908 	 * In the single netdev model avoid reinjection path as mcast
909 	 * packet is identified in upper layers while peer search to find
910 	 * primary TQM based on dest mac addr
911 	 *
912 	 * New bonding interface added into the bridge so MCSD will update
913 	 * snooping table and wifi driver populates the entries in appropriate
914 	 * child net devices.
915 	 */
916 	if (vdev->mcast_enhancement_en) {
917 		/*
918 		 * As dp_tx_mcast_enhance() can consume the nbuf incase of
919 		 * successful conversion hold the reference of nbuf.
920 		 *
921 		 * Hold the reference to tx on partner links
922 		 */
923 		qdf_nbuf_ref(nbuf);
924 		if (qdf_unlikely(!dp_tx_mcast_enhance(vdev, nbuf))) {
925 			dp_mlo_iter_ptnr_vdev(be_soc, be_vdev,
926 					      dp_tx_mlo_mcast_enhance_be,
927 					      nbuf, DP_MOD_ID_TX,
928 					      DP_ALL_VDEV_ITER,
929 					      DP_VDEV_ITERATE_SKIP_SELF);
930 			qdf_nbuf_free(nbuf);
931 			return NULL;
932 		}
933 		/* release reference taken above */
934 		qdf_nbuf_free(nbuf);
935 	}
936 	dp_tx_mlo_mcast_handler_be(soc, vdev, nbuf);
937 	return NULL;
938 }
939 #endif
940 #else
941 static inline void
942 dp_tx_vdev_id_set_hal_tx_desc(uint32_t *hal_tx_desc_cached,
943 			      struct dp_vdev *vdev,
944 			      struct dp_tx_msdu_info_s *msdu_info)
945 {
946 	hal_tx_desc_set_vdev_id(hal_tx_desc_cached, vdev->vdev_id);
947 }
948 #endif
949 #if defined(WLAN_FEATURE_11BE_MLO) && !defined(WLAN_MLO_MULTI_CHIP) && \
950 	!defined(WLAN_MCAST_MLO)
951 void dp_tx_mlo_mcast_handler_be(struct dp_soc *soc,
952 				struct dp_vdev *vdev,
953 				qdf_nbuf_t nbuf)
954 {
955 }
956 
957 bool dp_tx_mlo_is_mcast_primary_be(struct dp_soc *soc,
958 				   struct dp_vdev *vdev)
959 {
960 	return false;
961 }
962 #endif
963 
964 #ifdef CONFIG_SAWF
965 /**
966  * dp_sawf_config_be - Configure sawf specific fields in tcl
967  *
968  * @soc: DP soc handle
969  * @hal_tx_desc_cached: tx descriptor
970  * @fw_metadata: firmware metadata
971  * @nbuf: skb buffer
972  * @msdu_info: msdu info
973  *
974  * Return: void
975  */
976 void dp_sawf_config_be(struct dp_soc *soc, uint32_t *hal_tx_desc_cached,
977 		       uint16_t *fw_metadata, qdf_nbuf_t nbuf,
978 		       struct dp_tx_msdu_info_s *msdu_info)
979 {
980 	uint8_t q_id = 0;
981 
982 	if (!wlan_cfg_get_sawf_config(soc->wlan_cfg_ctx))
983 		return;
984 
985 	q_id = dp_sawf_queue_id_get(nbuf);
986 
987 	if (q_id == DP_SAWF_DEFAULT_Q_INVALID)
988 		return;
989 	msdu_info->tid = (q_id & (CDP_DATA_TID_MAX - 1));
990 	hal_tx_desc_set_hlos_tid(hal_tx_desc_cached,
991 				 (q_id & (CDP_DATA_TID_MAX - 1)));
992 
993 	if ((q_id >= DP_SAWF_DEFAULT_QUEUE_MIN) &&
994 	    (q_id < DP_SAWF_DEFAULT_QUEUE_MAX))
995 		return;
996 
997 	dp_sawf_tcl_cmd(fw_metadata, nbuf);
998 	hal_tx_desc_set_flow_override_enable(hal_tx_desc_cached,
999 					     DP_TX_FLOW_OVERRIDE_ENABLE);
1000 	hal_tx_desc_set_flow_override(hal_tx_desc_cached,
1001 				      DP_TX_FLOW_OVERRIDE_GET(q_id));
1002 	hal_tx_desc_set_who_classify_info_sel(hal_tx_desc_cached,
1003 					      DP_TX_WHO_CLFY_INF_SEL_GET(q_id));
1004 }
1005 
1006 #else
1007 
1008 static inline
1009 void dp_sawf_config_be(struct dp_soc *soc, uint32_t *hal_tx_desc_cached,
1010 		       uint16_t *fw_metadata, qdf_nbuf_t nbuf,
1011 		       struct dp_tx_msdu_info_s *msdu_info)
1012 {
1013 }
1014 
1015 static inline
1016 QDF_STATUS dp_sawf_tx_enqueue_peer_stats(struct dp_soc *soc,
1017 					 struct dp_tx_desc_s *tx_desc)
1018 {
1019 	return QDF_STATUS_SUCCESS;
1020 }
1021 
1022 static inline
1023 QDF_STATUS dp_sawf_tx_enqueue_fail_peer_stats(struct dp_soc *soc,
1024 					      struct dp_tx_desc_s *tx_desc)
1025 {
1026 	return QDF_STATUS_SUCCESS;
1027 }
1028 #endif
1029 
1030 #ifdef WLAN_SUPPORT_PPEDS
1031 
1032 /**
1033  * dp_ppeds_stats() - Accounting fw2wbm_tx_drop drops in Tx path
1034  * @soc: Handle to DP Soc structure
1035  * @peer_id: Peer ID in the descriptor
1036  *
1037  * Return: NONE
1038  */
1039 static inline
1040 void dp_ppeds_stats(struct dp_soc *soc, uint16_t peer_id)
1041 {
1042 	struct dp_vdev *vdev = NULL;
1043 	struct dp_txrx_peer *txrx_peer = NULL;
1044 	dp_txrx_ref_handle txrx_ref_handle = NULL;
1045 
1046 	DP_STATS_INC(soc, tx.fw2wbm_tx_drop, 1);
1047 	txrx_peer = dp_txrx_peer_get_ref_by_id(soc,
1048 					       peer_id,
1049 					       &txrx_ref_handle,
1050 					       DP_MOD_ID_TX_COMP);
1051 	if (txrx_peer) {
1052 		vdev = txrx_peer->vdev;
1053 		DP_STATS_INC(vdev, tx_i[DP_XMIT_LINK].dropped.fw2wbm_tx_drop, 1);
1054 		dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_TX_COMP);
1055 	}
1056 }
1057 
1058 int dp_ppeds_tx_comp_handler(struct dp_soc_be *be_soc, uint32_t quota)
1059 {
1060 	uint32_t num_avail_for_reap = 0;
1061 	void *tx_comp_hal_desc;
1062 	uint8_t buf_src, status = 0;
1063 	uint32_t count = 0;
1064 	struct dp_tx_desc_s *tx_desc = NULL;
1065 	struct dp_tx_desc_s *head_desc = NULL;
1066 	struct dp_tx_desc_s *tail_desc = NULL;
1067 	struct dp_soc *soc = &be_soc->soc;
1068 	void *last_prefetch_hw_desc = NULL;
1069 	struct dp_tx_desc_s *last_prefetch_sw_desc = NULL;
1070 	qdf_nbuf_t  nbuf;
1071 	hal_soc_handle_t hal_soc = soc->hal_soc;
1072 	hal_ring_handle_t hal_ring_hdl =
1073 				be_soc->ppeds_wbm_release_ring.hal_srng;
1074 	struct dp_txrx_peer *txrx_peer = NULL;
1075 	uint16_t peer_id = CDP_INVALID_PEER;
1076 	dp_txrx_ref_handle txrx_ref_handle = NULL;
1077 	struct dp_vdev *vdev = NULL;
1078 	struct dp_pdev *pdev = NULL;
1079 	struct dp_srng *srng;
1080 
1081 	if (qdf_unlikely(dp_srng_access_start(NULL, soc, hal_ring_hdl))) {
1082 		dp_err("HAL RING Access Failed -- %pK", hal_ring_hdl);
1083 		return 0;
1084 	}
1085 
1086 	num_avail_for_reap = hal_srng_dst_num_valid(hal_soc, hal_ring_hdl, 0);
1087 
1088 	if (num_avail_for_reap >= quota)
1089 		num_avail_for_reap = quota;
1090 
1091 	dp_srng_dst_inv_cached_descs(soc, hal_ring_hdl, num_avail_for_reap);
1092 
1093 	last_prefetch_hw_desc = dp_srng_dst_prefetch(hal_soc, hal_ring_hdl,
1094 						     num_avail_for_reap);
1095 
1096 	srng = &be_soc->ppeds_wbm_release_ring;
1097 
1098 	if (srng) {
1099 		hal_update_ring_util(soc->hal_soc, srng->hal_srng,
1100 				     WBM2SW_RELEASE,
1101 				     &be_soc->ppeds_wbm_release_ring.stats);
1102 	}
1103 
1104 	while (qdf_likely(num_avail_for_reap--)) {
1105 		tx_comp_hal_desc =  dp_srng_dst_get_next(soc, hal_ring_hdl);
1106 		if (qdf_unlikely(!tx_comp_hal_desc))
1107 			break;
1108 
1109 		buf_src = hal_tx_comp_get_buffer_source(hal_soc,
1110 							tx_comp_hal_desc);
1111 
1112 		if (qdf_unlikely(buf_src != HAL_TX_COMP_RELEASE_SOURCE_TQM &&
1113 				 buf_src != HAL_TX_COMP_RELEASE_SOURCE_FW)) {
1114 			dp_err("Tx comp release_src != TQM | FW but from %d",
1115 			       buf_src);
1116 			dp_assert_always_internal_ds_stat(0, be_soc,
1117 							  tx.tx_comp_buf_src);
1118 			continue;
1119 		}
1120 
1121 		dp_tx_comp_get_params_from_hal_desc_be(soc, tx_comp_hal_desc,
1122 						       &tx_desc);
1123 
1124 		if (!tx_desc) {
1125 			dp_err("unable to retrieve tx_desc!");
1126 			dp_assert_always_internal_ds_stat(0, be_soc,
1127 							  tx.tx_comp_desc_null);
1128 			continue;
1129 		}
1130 
1131 		if (qdf_unlikely(!(tx_desc->flags &
1132 				   DP_TX_DESC_FLAG_ALLOCATED) ||
1133 				 !(tx_desc->flags & DP_TX_DESC_FLAG_PPEDS))) {
1134 			dp_assert_always_internal_ds_stat(0, be_soc,
1135 						tx.tx_comp_invalid_flag);
1136 			continue;
1137 		}
1138 
1139 		tx_desc->buffer_src = buf_src;
1140 
1141 		if (qdf_unlikely(buf_src == HAL_TX_COMP_RELEASE_SOURCE_FW)) {
1142 			status = hal_tx_comp_get_tx_status(tx_comp_hal_desc);
1143 			if (status != HTT_TX_FW2WBM_TX_STATUS_OK)
1144 				dp_ppeds_stats(soc, tx_desc->peer_id);
1145 
1146 			nbuf = dp_ppeds_tx_desc_free(soc, tx_desc);
1147 			qdf_nbuf_free(nbuf);
1148 		} else {
1149 			tx_desc->tx_status =
1150 				hal_tx_comp_get_tx_status(tx_comp_hal_desc);
1151 
1152 			/*
1153 			 * Add desc sync to account for extended statistics
1154 			 * during Tx completion.
1155 			 */
1156 			if (peer_id != tx_desc->peer_id) {
1157 				if (txrx_peer) {
1158 					dp_txrx_peer_unref_delete(txrx_ref_handle,
1159 								  DP_MOD_ID_TX_COMP);
1160 					txrx_peer = NULL;
1161 					vdev = NULL;
1162 					pdev = NULL;
1163 				}
1164 				peer_id = tx_desc->peer_id;
1165 				txrx_peer =
1166 					dp_txrx_peer_get_ref_by_id(soc, peer_id,
1167 								   &txrx_ref_handle,
1168 								   DP_MOD_ID_TX_COMP);
1169 				if (txrx_peer) {
1170 					vdev = txrx_peer->vdev;
1171 					if (!vdev)
1172 						goto next_desc;
1173 
1174 					pdev = vdev->pdev;
1175 					if (!pdev)
1176 						goto next_desc;
1177 
1178 					dp_tx_desc_update_fast_comp_flag(soc,
1179 									 tx_desc,
1180 									 !pdev->enhanced_stats_en);
1181 					if (pdev->enhanced_stats_en) {
1182 						hal_tx_comp_desc_sync(tx_comp_hal_desc,
1183 								      &tx_desc->comp, 1);
1184 					}
1185 				}
1186 			} else if (txrx_peer && vdev && pdev) {
1187 				dp_tx_desc_update_fast_comp_flag(soc,
1188 								 tx_desc,
1189 								 !pdev->enhanced_stats_en);
1190 				if (pdev->enhanced_stats_en) {
1191 					hal_tx_comp_desc_sync(tx_comp_hal_desc,
1192 							      &tx_desc->comp, 1);
1193 				}
1194 			}
1195 next_desc:
1196 			if (!head_desc) {
1197 				head_desc = tx_desc;
1198 				tail_desc = tx_desc;
1199 			}
1200 
1201 			tail_desc->next = tx_desc;
1202 			tx_desc->next = NULL;
1203 			tail_desc = tx_desc;
1204 
1205 			count++;
1206 
1207 			dp_tx_prefetch_hw_sw_nbuf_desc(soc, hal_soc,
1208 						       num_avail_for_reap,
1209 						       hal_ring_hdl,
1210 						       &last_prefetch_hw_desc,
1211 						       &last_prefetch_sw_desc);
1212 		}
1213 	}
1214 
1215 	dp_srng_access_end(NULL, soc, hal_ring_hdl);
1216 
1217 	if (txrx_peer)
1218 		dp_txrx_peer_unref_delete(txrx_ref_handle,
1219 					  DP_MOD_ID_TX_COMP);
1220 	if (head_desc)
1221 		dp_tx_comp_process_desc_list(soc, head_desc,
1222 					     CDP_MAX_TX_COMP_PPE_RING);
1223 
1224 	return count;
1225 }
1226 #endif
1227 
1228 #if defined(QCA_SUPPORT_WDS_EXTENDED)
1229 static inline void
1230 dp_get_peer_from_tx_exc_meta(struct dp_soc *soc, uint32_t *hal_tx_desc_cached,
1231 			     struct cdp_tx_exception_metadata *tx_exc_metadata,
1232 			     uint16_t *ast_idx, uint16_t *ast_hash)
1233 {
1234 	struct dp_peer *peer = NULL;
1235 
1236 	if (tx_exc_metadata->is_wds_extended) {
1237 		peer = dp_peer_get_ref_by_id(soc, tx_exc_metadata->peer_id,
1238 					     DP_MOD_ID_TX);
1239 		if (peer) {
1240 			*ast_idx = peer->ast_idx;
1241 			*ast_hash = peer->ast_hash;
1242 			hal_tx_desc_set_index_lookup_override
1243 							(soc->hal_soc,
1244 							 hal_tx_desc_cached,
1245 							 0x1);
1246 			dp_peer_unref_delete(peer, DP_MOD_ID_TX);
1247 		}
1248 	} else {
1249 		return;
1250 	}
1251 }
1252 
1253 #else
1254 static inline void
1255 dp_get_peer_from_tx_exc_meta(struct dp_soc *soc, uint32_t *hal_tx_desc_cached,
1256 			     struct cdp_tx_exception_metadata *tx_exc_metadata,
1257 			     uint16_t *ast_idx, uint16_t *ast_hash)
1258 {
1259 }
1260 #endif
1261 
1262 QDF_STATUS
1263 dp_tx_hw_enqueue_be(struct dp_soc *soc, struct dp_vdev *vdev,
1264 		    struct dp_tx_desc_s *tx_desc, uint16_t fw_metadata,
1265 		    struct cdp_tx_exception_metadata *tx_exc_metadata,
1266 		    struct dp_tx_msdu_info_s *msdu_info)
1267 {
1268 	void *hal_tx_desc;
1269 	uint32_t *hal_tx_desc_cached;
1270 	int coalesce = 0;
1271 	struct dp_tx_queue *tx_q = &msdu_info->tx_queue;
1272 	uint8_t ring_id = tx_q->ring_id;
1273 	uint8_t tid;
1274 	struct dp_vdev_be *be_vdev;
1275 	uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES] = { 0 };
1276 	uint8_t bm_id = dp_tx_get_rbm_id_be(soc, ring_id);
1277 	hal_ring_handle_t hal_ring_hdl = NULL;
1278 	QDF_STATUS status = QDF_STATUS_E_RESOURCES;
1279 	uint8_t num_desc_bytes = HAL_TX_DESC_LEN_BYTES;
1280 	uint16_t ast_idx = vdev->bss_ast_idx;
1281 	uint16_t ast_hash = vdev->bss_ast_hash;
1282 
1283 	be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
1284 
1285 	if (!dp_tx_is_desc_id_valid(soc, tx_desc->id)) {
1286 		dp_err_rl("Invalid tx desc id:%d", tx_desc->id);
1287 		return QDF_STATUS_E_RESOURCES;
1288 	}
1289 
1290 	if (qdf_unlikely(tx_exc_metadata)) {
1291 		qdf_assert_always((tx_exc_metadata->tx_encap_type ==
1292 				   CDP_INVALID_TX_ENCAP_TYPE) ||
1293 				   (tx_exc_metadata->tx_encap_type ==
1294 				    vdev->tx_encap_type));
1295 
1296 		if (tx_exc_metadata->tx_encap_type == htt_cmn_pkt_type_raw)
1297 			qdf_assert_always((tx_exc_metadata->sec_type ==
1298 					   CDP_INVALID_SEC_TYPE) ||
1299 					   tx_exc_metadata->sec_type ==
1300 					   vdev->sec_type);
1301 		dp_get_peer_from_tx_exc_meta(soc, (void *)cached_desc,
1302 					     tx_exc_metadata,
1303 					     &ast_idx, &ast_hash);
1304 	}
1305 
1306 	hal_tx_desc_cached = (void *)cached_desc;
1307 
1308 	if (dp_sawf_tag_valid_get(tx_desc->nbuf)) {
1309 		dp_sawf_config_be(soc, hal_tx_desc_cached,
1310 				  &fw_metadata, tx_desc->nbuf, msdu_info);
1311 		dp_sawf_tx_enqueue_peer_stats(soc, tx_desc);
1312 	}
1313 
1314 	hal_tx_desc_set_buf_addr_be(soc->hal_soc, hal_tx_desc_cached,
1315 				    tx_desc->dma_addr, bm_id, tx_desc->id,
1316 				    (tx_desc->flags & DP_TX_DESC_FLAG_FRAG));
1317 	hal_tx_desc_set_lmac_id_be(soc->hal_soc, hal_tx_desc_cached,
1318 				   vdev->lmac_id);
1319 
1320 	hal_tx_desc_set_search_index_be(soc->hal_soc, hal_tx_desc_cached,
1321 					ast_idx);
1322 	/*
1323 	 * Bank_ID is used as DSCP_TABLE number in beryllium
1324 	 * So there is no explicit field used for DSCP_TID_TABLE_NUM.
1325 	 */
1326 
1327 	hal_tx_desc_set_cache_set_num(soc->hal_soc, hal_tx_desc_cached,
1328 				      (ast_hash & 0xF));
1329 
1330 	hal_tx_desc_set_fw_metadata(hal_tx_desc_cached, fw_metadata);
1331 	hal_tx_desc_set_buf_length(hal_tx_desc_cached, tx_desc->length);
1332 	hal_tx_desc_set_buf_offset(hal_tx_desc_cached, tx_desc->pkt_offset);
1333 
1334 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
1335 		hal_tx_desc_set_to_fw(hal_tx_desc_cached, 1);
1336 
1337 	/* verify checksum offload configuration*/
1338 	if ((qdf_nbuf_get_tx_cksum(tx_desc->nbuf) ==
1339 				   QDF_NBUF_TX_CKSUM_TCP_UDP) ||
1340 	      qdf_nbuf_is_tso(tx_desc->nbuf)) {
1341 		hal_tx_desc_set_l3_checksum_en(hal_tx_desc_cached, 1);
1342 		hal_tx_desc_set_l4_checksum_en(hal_tx_desc_cached, 1);
1343 	}
1344 
1345 	hal_tx_desc_set_bank_id(hal_tx_desc_cached, vdev->bank_id);
1346 
1347 	dp_tx_vdev_id_set_hal_tx_desc(hal_tx_desc_cached, vdev, msdu_info);
1348 
1349 	tid = msdu_info->tid;
1350 	if (tid != HTT_TX_EXT_TID_INVALID)
1351 		hal_tx_desc_set_hlos_tid(hal_tx_desc_cached, tid);
1352 
1353 	dp_tx_set_min_rates_for_critical_frames(soc, hal_tx_desc_cached,
1354 						tx_desc->nbuf);
1355 	dp_tx_set_particular_tx_queue(soc, hal_tx_desc_cached,
1356 				      tx_desc->nbuf);
1357 	dp_tx_desc_set_ktimestamp(vdev, tx_desc);
1358 
1359 	hal_ring_hdl = dp_tx_get_hal_ring_hdl(soc, ring_id);
1360 
1361 	if (qdf_unlikely(dp_tx_hal_ring_access_start(soc, hal_ring_hdl))) {
1362 		dp_err("HAL RING Access Failed -- %pK", hal_ring_hdl);
1363 		DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
1364 		DP_STATS_INC(vdev,
1365 			     tx_i[msdu_info->xmit_type].dropped.enqueue_fail,
1366 			     1);
1367 		dp_sawf_tx_enqueue_fail_peer_stats(soc, tx_desc);
1368 		return status;
1369 	}
1370 
1371 	hal_tx_desc = hal_srng_src_get_next(soc->hal_soc, hal_ring_hdl);
1372 	if (qdf_unlikely(!hal_tx_desc)) {
1373 		dp_verbose_debug("TCL ring full ring_id:%d", ring_id);
1374 		DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
1375 		DP_STATS_INC(vdev,
1376 			     tx_i[msdu_info->xmit_type].dropped.enqueue_fail,
1377 			     1);
1378 		dp_sawf_tx_enqueue_fail_peer_stats(soc, tx_desc);
1379 		goto ring_access_fail;
1380 	}
1381 
1382 	tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX;
1383 	dp_vdev_peer_stats_update_protocol_cnt_tx(vdev, tx_desc->nbuf);
1384 
1385 	/* Sync cached descriptor with HW */
1386 	hal_tx_desc_sync(hal_tx_desc_cached, hal_tx_desc, num_desc_bytes);
1387 
1388 	coalesce = dp_tx_attempt_coalescing(soc, vdev, tx_desc, tid,
1389 					    msdu_info, ring_id);
1390 
1391 	DP_STATS_INC_PKT(vdev, tx_i[msdu_info->xmit_type].processed, 1,
1392 			 dp_tx_get_pkt_len(tx_desc));
1393 	DP_STATS_INC(soc, tx.tcl_enq[ring_id], 1);
1394 	dp_tx_update_stats(soc, tx_desc, ring_id);
1395 	status = QDF_STATUS_SUCCESS;
1396 
1397 	dp_tx_hw_desc_update_evt((uint8_t *)hal_tx_desc_cached,
1398 				 hal_ring_hdl, soc, ring_id);
1399 
1400 ring_access_fail:
1401 	dp_tx_ring_access_end_wrapper(soc, hal_ring_hdl, coalesce);
1402 	dp_pkt_add_timestamp(vdev, QDF_PKT_TX_DRIVER_EXIT,
1403 			     qdf_get_log_timestamp(), tx_desc->nbuf);
1404 	return status;
1405 }
1406 
1407 #ifdef IPA_OFFLOAD
1408 static void
1409 dp_tx_get_ipa_bank_config(struct dp_soc_be *be_soc,
1410 			  union hal_tx_bank_config *bank_config)
1411 {
1412 	bank_config->epd = 0;
1413 	bank_config->encap_type = wlan_cfg_pkt_type(be_soc->soc.wlan_cfg_ctx);
1414 	bank_config->encrypt_type = 0;
1415 
1416 	bank_config->src_buffer_swap = 0;
1417 	bank_config->link_meta_swap = 0;
1418 
1419 	bank_config->index_lookup_enable = 0;
1420 	bank_config->mcast_pkt_ctrl = HAL_TX_MCAST_CTRL_FW_EXCEPTION;
1421 	bank_config->addrx_en = 1;
1422 	bank_config->addry_en = 1;
1423 
1424 	bank_config->mesh_enable = 0;
1425 	bank_config->dscp_tid_map_id = 0;
1426 	bank_config->vdev_id_check_en = 0;
1427 	bank_config->pmac_id = 0;
1428 }
1429 
1430 static void dp_tx_init_ipa_bank_profile(struct dp_soc_be *be_soc)
1431 {
1432 	union hal_tx_bank_config ipa_config = {0};
1433 	int bid;
1434 
1435 	if (!wlan_cfg_is_ipa_enabled(be_soc->soc.wlan_cfg_ctx)) {
1436 		be_soc->ipa_bank_id = DP_BE_INVALID_BANK_ID;
1437 		return;
1438 	}
1439 
1440 	dp_tx_get_ipa_bank_config(be_soc, &ipa_config);
1441 
1442 	/* Let IPA use last HOST owned bank */
1443 	bid = be_soc->num_bank_profiles - 1;
1444 
1445 	be_soc->bank_profiles[bid].is_configured = true;
1446 	be_soc->bank_profiles[bid].bank_config.val = ipa_config.val;
1447 	hal_tx_populate_bank_register(be_soc->soc.hal_soc,
1448 				      &be_soc->bank_profiles[bid].bank_config,
1449 				      bid);
1450 	qdf_atomic_inc(&be_soc->bank_profiles[bid].ref_count);
1451 
1452 	dp_info("IPA bank at slot %d config:0x%x", bid,
1453 		be_soc->bank_profiles[bid].bank_config.val);
1454 
1455 	be_soc->ipa_bank_id = bid;
1456 }
1457 #else /* !IPA_OFFLOAD */
1458 static inline void dp_tx_init_ipa_bank_profile(struct dp_soc_be *be_soc)
1459 {
1460 }
1461 #endif /* IPA_OFFLOAD */
1462 
1463 QDF_STATUS dp_tx_init_bank_profiles(struct dp_soc_be *be_soc)
1464 {
1465 	int i, num_tcl_banks;
1466 
1467 	num_tcl_banks = hal_tx_get_num_tcl_banks(be_soc->soc.hal_soc);
1468 
1469 	dp_assert_always_internal(num_tcl_banks);
1470 	be_soc->num_bank_profiles = num_tcl_banks;
1471 
1472 	be_soc->bank_profiles = qdf_mem_malloc(num_tcl_banks *
1473 					       sizeof(*be_soc->bank_profiles));
1474 	if (!be_soc->bank_profiles) {
1475 		dp_err("unable to allocate memory for DP TX Profiles!");
1476 		return QDF_STATUS_E_NOMEM;
1477 	}
1478 
1479 	DP_TX_BANK_LOCK_CREATE(&be_soc->tx_bank_lock);
1480 
1481 	for (i = 0; i < num_tcl_banks; i++) {
1482 		be_soc->bank_profiles[i].is_configured = false;
1483 		qdf_atomic_init(&be_soc->bank_profiles[i].ref_count);
1484 	}
1485 	dp_info("initialized %u bank profiles", be_soc->num_bank_profiles);
1486 
1487 	dp_tx_init_ipa_bank_profile(be_soc);
1488 
1489 	return QDF_STATUS_SUCCESS;
1490 }
1491 
1492 void dp_tx_deinit_bank_profiles(struct dp_soc_be *be_soc)
1493 {
1494 	qdf_mem_free(be_soc->bank_profiles);
1495 	DP_TX_BANK_LOCK_DESTROY(&be_soc->tx_bank_lock);
1496 }
1497 
1498 static
1499 void dp_tx_get_vdev_bank_config(struct dp_vdev_be *be_vdev,
1500 				union hal_tx_bank_config *bank_config)
1501 {
1502 	struct dp_vdev *vdev = &be_vdev->vdev;
1503 
1504 	bank_config->epd = 0;
1505 
1506 	bank_config->encap_type = vdev->tx_encap_type;
1507 
1508 	/* Only valid for raw frames. Needs work for RAW mode */
1509 	if (vdev->tx_encap_type == htt_cmn_pkt_type_raw) {
1510 		bank_config->encrypt_type = sec_type_map[vdev->sec_type];
1511 	} else {
1512 		bank_config->encrypt_type = 0;
1513 	}
1514 
1515 	bank_config->src_buffer_swap = 0;
1516 	bank_config->link_meta_swap = 0;
1517 
1518 	if ((vdev->search_type == HAL_TX_ADDR_INDEX_SEARCH) &&
1519 	    vdev->opmode == wlan_op_mode_sta) {
1520 		bank_config->index_lookup_enable = 1;
1521 		bank_config->mcast_pkt_ctrl = HAL_TX_MCAST_CTRL_MEC_NOTIFY;
1522 		bank_config->addrx_en = 0;
1523 		bank_config->addry_en = 0;
1524 	} else {
1525 		bank_config->index_lookup_enable = 0;
1526 		bank_config->mcast_pkt_ctrl = HAL_TX_MCAST_CTRL_FW_EXCEPTION;
1527 		bank_config->addrx_en =
1528 			(vdev->hal_desc_addr_search_flags &
1529 			 HAL_TX_DESC_ADDRX_EN) ? 1 : 0;
1530 		bank_config->addry_en =
1531 			(vdev->hal_desc_addr_search_flags &
1532 			 HAL_TX_DESC_ADDRY_EN) ? 1 : 0;
1533 	}
1534 
1535 	bank_config->mesh_enable = vdev->mesh_vdev ? 1 : 0;
1536 
1537 	bank_config->dscp_tid_map_id = vdev->dscp_tid_map_id;
1538 
1539 	/* Disabling vdev id check for now. Needs revist. */
1540 	bank_config->vdev_id_check_en = be_vdev->vdev_id_check_en;
1541 
1542 	bank_config->pmac_id = vdev->lmac_id;
1543 }
1544 
1545 int dp_tx_get_bank_profile(struct dp_soc_be *be_soc,
1546 			   struct dp_vdev_be *be_vdev)
1547 {
1548 	char *temp_str = "";
1549 	bool found_match = false;
1550 	int bank_id = DP_BE_INVALID_BANK_ID;
1551 	int i;
1552 	int unconfigured_slot = DP_BE_INVALID_BANK_ID;
1553 	int zero_ref_count_slot = DP_BE_INVALID_BANK_ID;
1554 	union hal_tx_bank_config vdev_config = {0};
1555 
1556 	/* convert vdev params into hal_tx_bank_config */
1557 	dp_tx_get_vdev_bank_config(be_vdev, &vdev_config);
1558 
1559 	DP_TX_BANK_LOCK_ACQUIRE(&be_soc->tx_bank_lock);
1560 	/* go over all banks and find a matching/unconfigured/unused bank */
1561 	for (i = 0; i < be_soc->num_bank_profiles; i++) {
1562 		if (be_soc->bank_profiles[i].is_configured &&
1563 		    (be_soc->bank_profiles[i].bank_config.val ^
1564 						vdev_config.val) == 0) {
1565 			found_match = true;
1566 			break;
1567 		}
1568 
1569 		if (unconfigured_slot == DP_BE_INVALID_BANK_ID &&
1570 		    !be_soc->bank_profiles[i].is_configured)
1571 			unconfigured_slot = i;
1572 		else if (zero_ref_count_slot  == DP_BE_INVALID_BANK_ID &&
1573 		    !qdf_atomic_read(&be_soc->bank_profiles[i].ref_count))
1574 			zero_ref_count_slot = i;
1575 	}
1576 
1577 	if (found_match) {
1578 		temp_str = "matching";
1579 		bank_id = i;
1580 		goto inc_ref_and_return;
1581 	}
1582 	if (unconfigured_slot != DP_BE_INVALID_BANK_ID) {
1583 		temp_str = "unconfigured";
1584 		bank_id = unconfigured_slot;
1585 		goto configure_and_return;
1586 	}
1587 	if (zero_ref_count_slot != DP_BE_INVALID_BANK_ID) {
1588 		temp_str = "zero_ref_count";
1589 		bank_id = zero_ref_count_slot;
1590 	}
1591 	if (bank_id == DP_BE_INVALID_BANK_ID) {
1592 		dp_alert("unable to find TX bank!");
1593 		QDF_BUG(0);
1594 		return bank_id;
1595 	}
1596 
1597 configure_and_return:
1598 	be_soc->bank_profiles[bank_id].is_configured = true;
1599 	be_soc->bank_profiles[bank_id].bank_config.val = vdev_config.val;
1600 	hal_tx_populate_bank_register(be_soc->soc.hal_soc,
1601 				      &be_soc->bank_profiles[bank_id].bank_config,
1602 				      bank_id);
1603 inc_ref_and_return:
1604 	qdf_atomic_inc(&be_soc->bank_profiles[bank_id].ref_count);
1605 	DP_TX_BANK_LOCK_RELEASE(&be_soc->tx_bank_lock);
1606 
1607 	dp_info("found %s slot at index %d, input:0x%x match:0x%x ref_count %u",
1608 		temp_str, bank_id, vdev_config.val,
1609 		be_soc->bank_profiles[bank_id].bank_config.val,
1610 		qdf_atomic_read(&be_soc->bank_profiles[bank_id].ref_count));
1611 
1612 	dp_info("epd:%x encap:%x encryp:%x src_buf_swap:%x link_meta_swap:%x addrx_en:%x addry_en:%x mesh_en:%x vdev_id_check:%x pmac_id:%x mcast_pkt_ctrl:%x",
1613 		be_soc->bank_profiles[bank_id].bank_config.epd,
1614 		be_soc->bank_profiles[bank_id].bank_config.encap_type,
1615 		be_soc->bank_profiles[bank_id].bank_config.encrypt_type,
1616 		be_soc->bank_profiles[bank_id].bank_config.src_buffer_swap,
1617 		be_soc->bank_profiles[bank_id].bank_config.link_meta_swap,
1618 		be_soc->bank_profiles[bank_id].bank_config.addrx_en,
1619 		be_soc->bank_profiles[bank_id].bank_config.addry_en,
1620 		be_soc->bank_profiles[bank_id].bank_config.mesh_enable,
1621 		be_soc->bank_profiles[bank_id].bank_config.vdev_id_check_en,
1622 		be_soc->bank_profiles[bank_id].bank_config.pmac_id,
1623 		be_soc->bank_profiles[bank_id].bank_config.mcast_pkt_ctrl);
1624 
1625 	return bank_id;
1626 }
1627 
1628 void dp_tx_put_bank_profile(struct dp_soc_be *be_soc,
1629 			    struct dp_vdev_be *be_vdev)
1630 {
1631 	DP_TX_BANK_LOCK_ACQUIRE(&be_soc->tx_bank_lock);
1632 	qdf_atomic_dec(&be_soc->bank_profiles[be_vdev->bank_id].ref_count);
1633 	DP_TX_BANK_LOCK_RELEASE(&be_soc->tx_bank_lock);
1634 }
1635 
1636 void dp_tx_update_bank_profile(struct dp_soc_be *be_soc,
1637 			       struct dp_vdev_be *be_vdev)
1638 {
1639 	dp_tx_put_bank_profile(be_soc, be_vdev);
1640 	be_vdev->bank_id = dp_tx_get_bank_profile(be_soc, be_vdev);
1641 	be_vdev->vdev.bank_id = be_vdev->bank_id;
1642 }
1643 
1644 QDF_STATUS dp_tx_desc_pool_init_be(struct dp_soc *soc,
1645 				   uint32_t num_elem,
1646 				   uint8_t pool_id,
1647 				   bool spcl_tx_desc)
1648 {
1649 	struct dp_tx_desc_pool_s *tx_desc_pool;
1650 	struct dp_hw_cookie_conversion_t *cc_ctx;
1651 	struct dp_spt_page_desc *page_desc;
1652 	struct dp_tx_desc_s *tx_desc;
1653 	uint32_t ppt_idx = 0;
1654 	uint32_t avail_entry_index = 0;
1655 
1656 	if (!num_elem) {
1657 		dp_err("desc_num 0 !!");
1658 		return QDF_STATUS_E_FAILURE;
1659 	}
1660 
1661 	if (spcl_tx_desc) {
1662 		tx_desc_pool = dp_get_spcl_tx_desc_pool(soc, pool_id);
1663 		cc_ctx  = dp_get_spcl_tx_cookie_t(soc, pool_id);
1664 	} else {
1665 		tx_desc_pool = dp_get_tx_desc_pool(soc, pool_id);;
1666 		cc_ctx  = dp_get_tx_cookie_t(soc, pool_id);
1667 	}
1668 	tx_desc = tx_desc_pool->freelist;
1669 	page_desc = &cc_ctx->page_desc_base[0];
1670 	while (tx_desc) {
1671 		if (avail_entry_index == 0) {
1672 			if (ppt_idx >= cc_ctx->total_page_num) {
1673 				dp_alert("insufficient secondary page tables");
1674 				qdf_assert_always(0);
1675 			}
1676 			page_desc = &cc_ctx->page_desc_base[ppt_idx++];
1677 		}
1678 
1679 		/* put each TX Desc VA to SPT pages and
1680 		 * get corresponding ID
1681 		 */
1682 		DP_CC_SPT_PAGE_UPDATE_VA(page_desc->page_v_addr,
1683 					 avail_entry_index,
1684 					 tx_desc);
1685 		tx_desc->id =
1686 			dp_cc_desc_id_generate(page_desc->ppt_index,
1687 					       avail_entry_index);
1688 		tx_desc->pool_id = pool_id;
1689 		dp_tx_desc_set_magic(tx_desc, DP_TX_MAGIC_PATTERN_FREE);
1690 		tx_desc = tx_desc->next;
1691 		avail_entry_index = (avail_entry_index + 1) &
1692 					DP_CC_SPT_PAGE_MAX_ENTRIES_MASK;
1693 	}
1694 
1695 	return QDF_STATUS_SUCCESS;
1696 }
1697 
1698 void dp_tx_desc_pool_deinit_be(struct dp_soc *soc,
1699 			       struct dp_tx_desc_pool_s *tx_desc_pool,
1700 			       uint8_t pool_id, bool spcl_tx_desc)
1701 {
1702 	struct dp_spt_page_desc *page_desc;
1703 	int i = 0;
1704 	struct dp_hw_cookie_conversion_t *cc_ctx;
1705 
1706 	if (spcl_tx_desc)
1707 		cc_ctx  = dp_get_spcl_tx_cookie_t(soc, pool_id);
1708 	else
1709 		cc_ctx  = dp_get_tx_cookie_t(soc, pool_id);
1710 
1711 	for (i = 0; i < cc_ctx->total_page_num; i++) {
1712 		page_desc = &cc_ctx->page_desc_base[i];
1713 		qdf_mem_zero(page_desc->page_v_addr, qdf_page_size);
1714 	}
1715 }
1716 
1717 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
1718 uint32_t dp_tx_comp_nf_handler(struct dp_intr *int_ctx, struct dp_soc *soc,
1719 			       hal_ring_handle_t hal_ring_hdl, uint8_t ring_id,
1720 			       uint32_t quota)
1721 {
1722 	struct dp_srng *tx_comp_ring = &soc->tx_comp_ring[ring_id];
1723 	uint32_t work_done = 0;
1724 
1725 	if (dp_srng_get_near_full_level(soc, tx_comp_ring) <
1726 			DP_SRNG_THRESH_NEAR_FULL)
1727 		return 0;
1728 
1729 	qdf_atomic_set(&tx_comp_ring->near_full, 1);
1730 	work_done++;
1731 
1732 	return work_done;
1733 }
1734 #endif
1735 
1736 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
1737 	defined(WLAN_CONFIG_TX_DELAY)
1738 #define PPDUID_GET_HW_LINK_ID(PPDU_ID, LINK_ID_OFFSET, LINK_ID_BITS) \
1739 	(((PPDU_ID) >> (LINK_ID_OFFSET)) & ((1 << (LINK_ID_BITS)) - 1))
1740 
1741 #define HW_TX_DELAY_MAX                       0x1000000
1742 #define TX_COMPL_SHIFT_BUFFER_TIMESTAMP_US    10
1743 #define HW_TX_DELAY_MASK                      0x1FFFFFFF
1744 #define TX_COMPL_BUFFER_TSTAMP_US(TSTAMP) \
1745 	(((TSTAMP) << TX_COMPL_SHIFT_BUFFER_TIMESTAMP_US) & \
1746 	 HW_TX_DELAY_MASK)
1747 
1748 static inline
1749 QDF_STATUS dp_mlo_compute_hw_delay_us(struct dp_soc *soc,
1750 				      struct dp_vdev *vdev,
1751 				      struct hal_tx_completion_status *ts,
1752 				      uint32_t *delay_us)
1753 {
1754 	uint32_t ppdu_id;
1755 	uint8_t link_id_offset, link_id_bits;
1756 	uint8_t hw_link_id;
1757 	uint32_t msdu_tqm_enqueue_tstamp_us, final_msdu_tqm_enqueue_tstamp_us;
1758 	uint32_t msdu_compl_tsf_tstamp_us, final_msdu_compl_tsf_tstamp_us;
1759 	uint32_t delay;
1760 	int32_t delta_tsf2, delta_tqm;
1761 
1762 	if (!ts->valid)
1763 		return QDF_STATUS_E_INVAL;
1764 
1765 	link_id_offset = soc->link_id_offset;
1766 	link_id_bits = soc->link_id_bits;
1767 	ppdu_id = ts->ppdu_id;
1768 	hw_link_id = PPDUID_GET_HW_LINK_ID(ppdu_id, link_id_offset,
1769 					   link_id_bits);
1770 
1771 	msdu_tqm_enqueue_tstamp_us =
1772 		TX_COMPL_BUFFER_TSTAMP_US(ts->buffer_timestamp);
1773 	msdu_compl_tsf_tstamp_us = ts->tsf;
1774 
1775 	delta_tsf2 = dp_mlo_get_delta_tsf2_wrt_mlo_offset(soc, hw_link_id);
1776 	delta_tqm = dp_mlo_get_delta_tqm_wrt_mlo_offset(soc);
1777 
1778 	final_msdu_tqm_enqueue_tstamp_us = (msdu_tqm_enqueue_tstamp_us +
1779 			delta_tqm) & HW_TX_DELAY_MASK;
1780 
1781 	final_msdu_compl_tsf_tstamp_us = (msdu_compl_tsf_tstamp_us +
1782 			delta_tsf2) & HW_TX_DELAY_MASK;
1783 
1784 	delay = (final_msdu_compl_tsf_tstamp_us -
1785 		final_msdu_tqm_enqueue_tstamp_us) & HW_TX_DELAY_MASK;
1786 
1787 	if (delay > HW_TX_DELAY_MAX)
1788 		return QDF_STATUS_E_FAILURE;
1789 
1790 	if (delay_us)
1791 		*delay_us = delay;
1792 
1793 	return QDF_STATUS_SUCCESS;
1794 }
1795 #else
1796 static inline
1797 QDF_STATUS dp_mlo_compute_hw_delay_us(struct dp_soc *soc,
1798 				      struct dp_vdev *vdev,
1799 				      struct hal_tx_completion_status *ts,
1800 				      uint32_t *delay_us)
1801 {
1802 	return QDF_STATUS_SUCCESS;
1803 }
1804 #endif
1805 
1806 QDF_STATUS dp_tx_compute_tx_delay_be(struct dp_soc *soc,
1807 				     struct dp_vdev *vdev,
1808 				     struct hal_tx_completion_status *ts,
1809 				     uint32_t *delay_us)
1810 {
1811 	return dp_mlo_compute_hw_delay_us(soc, vdev, ts, delay_us);
1812 }
1813 
1814 static inline
1815 qdf_dma_addr_t dp_tx_nbuf_map_be(struct dp_vdev *vdev,
1816 				 struct dp_tx_desc_s *tx_desc,
1817 				 qdf_nbuf_t nbuf)
1818 {
1819 	qdf_nbuf_dma_clean_range_no_dsb((void *)nbuf->data,
1820 					(void *)(nbuf->data + 256));
1821 
1822 	return (qdf_dma_addr_t)qdf_mem_virt_to_phys(nbuf->data);
1823 }
1824 
1825 static inline
1826 void dp_tx_nbuf_unmap_be(struct dp_soc *soc,
1827 			 struct dp_tx_desc_s *desc)
1828 {
1829 }
1830 
1831 #ifdef QCA_DP_TX_NBUF_LIST_FREE
1832 qdf_nbuf_t dp_tx_fast_send_be(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
1833 			      qdf_nbuf_t nbuf)
1834 {
1835 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
1836 	struct dp_vdev *vdev = NULL;
1837 	struct dp_pdev *pdev = NULL;
1838 	struct dp_tx_desc_s *tx_desc;
1839 	uint16_t desc_pool_id;
1840 	uint16_t pkt_len;
1841 	qdf_dma_addr_t paddr;
1842 	QDF_STATUS status = QDF_STATUS_E_RESOURCES;
1843 	uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES] = { 0 };
1844 	hal_ring_handle_t hal_ring_hdl = NULL;
1845 	uint32_t *hal_tx_desc_cached;
1846 	void *hal_tx_desc;
1847 	uint8_t tid = HTT_TX_EXT_TID_INVALID;
1848 	uint8_t xmit_type = qdf_nbuf_get_vdev_xmit_type(nbuf);
1849 
1850 	if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
1851 		return nbuf;
1852 
1853 	vdev = soc->vdev_id_map[vdev_id];
1854 	if (qdf_unlikely(!vdev))
1855 		return nbuf;
1856 
1857 	desc_pool_id = qdf_nbuf_get_queue_mapping(nbuf) & DP_TX_QUEUE_MASK;
1858 
1859 	pkt_len = qdf_nbuf_headlen(nbuf);
1860 	DP_STATS_INC_PKT(vdev, tx_i[xmit_type].rcvd, 1, pkt_len);
1861 	DP_STATS_INC(vdev, tx_i[xmit_type].rcvd_in_fast_xmit_flow, 1);
1862 	DP_STATS_INC(vdev, tx_i[xmit_type].rcvd_per_core[desc_pool_id], 1);
1863 
1864 	pdev = vdev->pdev;
1865 	if (dp_tx_limit_check(vdev, nbuf))
1866 		return nbuf;
1867 
1868 	if (qdf_unlikely(vdev->skip_sw_tid_classification
1869 				& DP_TXRX_HLOS_TID_OVERRIDE_ENABLED)) {
1870 		tid = qdf_nbuf_get_priority(nbuf);
1871 
1872 		if (tid == DP_TX_INVALID_QOS_TAG)
1873 			tid = HTT_TX_EXT_TID_INVALID;
1874 	}
1875 
1876 	tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
1877 
1878 	if (qdf_unlikely(!tx_desc)) {
1879 		DP_STATS_INC(vdev, tx_i[xmit_type].dropped.desc_na.num, 1);
1880 		DP_STATS_INC(vdev,
1881 			     tx_i[xmit_type].dropped.desc_na_exc_alloc_fail.num,
1882 			     1);
1883 		return nbuf;
1884 	}
1885 
1886 	dp_tx_outstanding_inc(pdev);
1887 
1888 	/* Initialize the SW tx descriptor */
1889 	tx_desc->nbuf = nbuf;
1890 	tx_desc->frm_type = dp_tx_frm_std;
1891 	tx_desc->tx_encap_type = vdev->tx_encap_type;
1892 	tx_desc->vdev_id = vdev_id;
1893 	tx_desc->pdev = pdev;
1894 	tx_desc->pkt_offset = 0;
1895 	tx_desc->length = pkt_len;
1896 	tx_desc->flags |= pdev->tx_fast_flag;
1897 
1898 	tx_desc->nbuf->fast_recycled = 1;
1899 
1900 	if (nbuf->is_from_recycler && nbuf->fast_xmit)
1901 		tx_desc->flags |= DP_TX_DESC_FLAG_FAST;
1902 
1903 	paddr =  dp_tx_nbuf_map_be(vdev, tx_desc, nbuf);
1904 	if (!paddr) {
1905 		/* Handle failure */
1906 		dp_err("qdf_nbuf_map failed");
1907 		DP_STATS_INC(vdev, tx_i[xmit_type].dropped.dma_error, 1);
1908 		goto release_desc;
1909 	}
1910 
1911 	tx_desc->dma_addr = paddr;
1912 
1913 	hal_tx_desc_cached = (void *)cached_desc;
1914 	hal_tx_desc_cached[0] = (uint32_t)tx_desc->dma_addr;
1915 	hal_tx_desc_cached[1] = tx_desc->id <<
1916 		TCL_DATA_CMD_BUF_ADDR_INFO_SW_BUFFER_COOKIE_LSB;
1917 
1918 	/* bank_id */
1919 	hal_tx_desc_cached[2] = vdev->bank_id << TCL_DATA_CMD_BANK_ID_LSB;
1920 	hal_tx_desc_cached[3] = vdev->htt_tcl_metadata <<
1921 		TCL_DATA_CMD_TCL_CMD_NUMBER_LSB;
1922 
1923 	hal_tx_desc_cached[4] = tx_desc->length;
1924 	/* l3 and l4 checksum enable */
1925 	hal_tx_desc_cached[4] |= DP_TX_L3_L4_CSUM_ENABLE <<
1926 		TCL_DATA_CMD_IPV4_CHECKSUM_EN_LSB;
1927 
1928 	hal_tx_desc_cached[5] = vdev->lmac_id << TCL_DATA_CMD_PMAC_ID_LSB;
1929 	hal_tx_desc_cached[5] |= vdev->vdev_id << TCL_DATA_CMD_VDEV_ID_LSB;
1930 
1931 	if (tid != HTT_TX_EXT_TID_INVALID) {
1932 		hal_tx_desc_cached[5] |= tid << TCL_DATA_CMD_HLOS_TID_LSB;
1933 		hal_tx_desc_cached[5] |= tid << TCL_DATA_CMD_HLOS_TID_OVERWRITE_LSB;
1934 	}
1935 
1936 	if (vdev->opmode == wlan_op_mode_sta)
1937 		hal_tx_desc_cached[6] = vdev->bss_ast_idx |
1938 			((vdev->bss_ast_hash & 0xF) <<
1939 			 TCL_DATA_CMD_CACHE_SET_NUM_LSB);
1940 
1941 	hal_ring_hdl = dp_tx_get_hal_ring_hdl(soc, desc_pool_id);
1942 
1943 	if (qdf_unlikely(dp_tx_hal_ring_access_start(soc, hal_ring_hdl))) {
1944 		dp_err("HAL RING Access Failed -- %pK", hal_ring_hdl);
1945 		DP_STATS_INC(soc, tx.tcl_ring_full[desc_pool_id], 1);
1946 		DP_STATS_INC(vdev, tx_i[xmit_type].dropped.enqueue_fail, 1);
1947 		goto ring_access_fail2;
1948 	}
1949 
1950 	hal_tx_desc = hal_srng_src_get_next(soc->hal_soc, hal_ring_hdl);
1951 	if (qdf_unlikely(!hal_tx_desc)) {
1952 		dp_verbose_debug("TCL ring full ring_id:%d", desc_pool_id);
1953 		DP_STATS_INC(soc, tx.tcl_ring_full[desc_pool_id], 1);
1954 		DP_STATS_INC(vdev, tx_i[xmit_type].dropped.enqueue_fail, 1);
1955 		goto ring_access_fail;
1956 	}
1957 
1958 	tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX;
1959 
1960 	/* Sync cached descriptor with HW */
1961 	qdf_mem_copy(hal_tx_desc, hal_tx_desc_cached, DP_TX_FAST_DESC_SIZE);
1962 	qdf_dsb();
1963 
1964 	DP_STATS_INC_PKT(vdev, tx_i[xmit_type].processed, 1, tx_desc->length);
1965 	DP_STATS_INC(soc, tx.tcl_enq[desc_pool_id], 1);
1966 	status = QDF_STATUS_SUCCESS;
1967 
1968 ring_access_fail:
1969 	dp_tx_ring_access_end_wrapper(soc, hal_ring_hdl, 0);
1970 
1971 ring_access_fail2:
1972 	if (status != QDF_STATUS_SUCCESS) {
1973 		dp_tx_nbuf_unmap_be(soc, tx_desc);
1974 		goto release_desc;
1975 	}
1976 
1977 	return NULL;
1978 
1979 release_desc:
1980 	dp_tx_desc_release(soc, tx_desc, desc_pool_id);
1981 
1982 	return nbuf;
1983 }
1984 #endif
1985 
1986 QDF_STATUS dp_tx_desc_pool_alloc_be(struct dp_soc *soc, uint32_t num_elem,
1987 				    uint8_t pool_id)
1988 {
1989 	return QDF_STATUS_SUCCESS;
1990 }
1991 
1992 void dp_tx_desc_pool_free_be(struct dp_soc *soc, uint8_t pool_id)
1993 {
1994 }
1995