xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/be/dp_be_tx.c (revision 4cfc54cf60be58b902e9fd31baa5eac56a9085a7)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "cdp_txrx_cmn_struct.h"
21 #include "dp_types.h"
22 #include "dp_tx.h"
23 #include "dp_be_tx.h"
24 #include "dp_tx_desc.h"
25 #include "hal_tx.h"
26 #include <hal_be_api.h>
27 #include <hal_be_tx.h>
28 #include <dp_htt.h>
29 #ifdef FEATURE_WDS
30 #include "dp_txrx_wds.h"
31 #endif
32 
33 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
34 #define DP_TX_BANK_LOCK_CREATE(lock) qdf_mutex_create(lock)
35 #define DP_TX_BANK_LOCK_DESTROY(lock) qdf_mutex_destroy(lock)
36 #define DP_TX_BANK_LOCK_ACQUIRE(lock) qdf_mutex_acquire(lock)
37 #define DP_TX_BANK_LOCK_RELEASE(lock) qdf_mutex_release(lock)
38 #else
39 #define DP_TX_BANK_LOCK_CREATE(lock) qdf_spinlock_create(lock)
40 #define DP_TX_BANK_LOCK_DESTROY(lock) qdf_spinlock_destroy(lock)
41 #define DP_TX_BANK_LOCK_ACQUIRE(lock) qdf_spin_lock_bh(lock)
42 #define DP_TX_BANK_LOCK_RELEASE(lock) qdf_spin_unlock_bh(lock)
43 #endif
44 
45 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
46 #ifdef WLAN_MCAST_MLO
47 /* MLO peer id for reinject*/
48 #define DP_MLO_MCAST_REINJECT_PEER_ID 0XFFFD
49 #define MAX_GSN_NUM 0x0FFF
50 #endif
51 #endif
52 
53 #define DP_TX_WBM_COMPLETION_V3_VDEV_ID_GET(_var) \
54 	HTT_TX_WBM_COMPLETION_V2_VDEV_ID_GET(_var)
55 #define DP_TX_WBM_COMPLETION_V3_VALID_GET(_var) \
56 	HTT_TX_WBM_COMPLETION_V2_VALID_GET(_var)
57 #define DP_TX_WBM_COMPLETION_V3_SW_PEER_ID_GET(_var) \
58 	HTT_TX_WBM_COMPLETION_V2_SW_PEER_ID_GET(_var)
59 #define DP_TX_WBM_COMPLETION_V3_TID_NUM_GET(_var) \
60 	HTT_TX_WBM_COMPLETION_V2_TID_NUM_GET(_var)
61 #define DP_TX_WBM_COMPLETION_V3_SCH_CMD_ID_GET(_var) \
62 	HTT_TX_WBM_COMPLETION_V2_SCH_CMD_ID_GET(_var)
63 #define DP_TX_WBM_COMPLETION_V3_ACK_FRAME_RSSI_GET(_var) \
64 	HTT_TX_WBM_COMPLETION_V2_ACK_FRAME_RSSI_GET(_var)
65 
66 extern uint8_t sec_type_map[MAX_CDP_SEC_TYPE];
67 
68 #ifdef DP_USE_REDUCED_PEER_ID_FIELD_WIDTH
69 static inline uint16_t dp_tx_comp_get_peer_id(struct dp_soc *soc,
70 					      void *tx_comp_hal_desc)
71 {
72 	uint16_t peer_id = hal_tx_comp_get_peer_id(tx_comp_hal_desc);
73 	struct dp_tx_comp_peer_id *tx_peer_id =
74 			(struct dp_tx_comp_peer_id *)&peer_id;
75 
76 	return (tx_peer_id->peer_id |
77 	        (tx_peer_id->ml_peer_valid << soc->peer_id_shift));
78 }
79 #else
80 /* Combine ml_peer_valid and peer_id field */
81 #define DP_BE_TX_COMP_PEER_ID_MASK	0x00003fff
82 #define DP_BE_TX_COMP_PEER_ID_SHIFT	0
83 
84 static inline uint16_t dp_tx_comp_get_peer_id(struct dp_soc *soc,
85 					      void *tx_comp_hal_desc)
86 {
87 	uint16_t peer_id = hal_tx_comp_get_peer_id(tx_comp_hal_desc);
88 
89 	return ((peer_id & DP_BE_TX_COMP_PEER_ID_MASK) >>
90 		DP_BE_TX_COMP_PEER_ID_SHIFT);
91 }
92 #endif
93 
94 #ifdef DP_FEATURE_HW_COOKIE_CONVERSION
95 #ifdef DP_HW_COOKIE_CONVERT_EXCEPTION
96 void dp_tx_comp_get_params_from_hal_desc_be(struct dp_soc *soc,
97 					    void *tx_comp_hal_desc,
98 					    struct dp_tx_desc_s **r_tx_desc)
99 {
100 	uint32_t tx_desc_id;
101 
102 	if (qdf_likely(
103 		hal_tx_comp_get_cookie_convert_done(tx_comp_hal_desc))) {
104 		/* HW cookie conversion done */
105 		*r_tx_desc = (struct dp_tx_desc_s *)
106 				hal_tx_comp_get_desc_va(tx_comp_hal_desc);
107 	} else {
108 		/* SW do cookie conversion to VA */
109 		tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc);
110 		*r_tx_desc =
111 		(struct dp_tx_desc_s *)dp_cc_desc_find(soc, tx_desc_id);
112 	}
113 
114 	if (*r_tx_desc)
115 		(*r_tx_desc)->peer_id = dp_tx_comp_get_peer_id(soc,
116 							       tx_comp_hal_desc);
117 }
118 #else
119 void dp_tx_comp_get_params_from_hal_desc_be(struct dp_soc *soc,
120 					    void *tx_comp_hal_desc,
121 					    struct dp_tx_desc_s **r_tx_desc)
122 {
123 	*r_tx_desc = (struct dp_tx_desc_s *)
124 			hal_tx_comp_get_desc_va(tx_comp_hal_desc);
125 
126 	if (*r_tx_desc)
127 		(*r_tx_desc)->peer_id = dp_tx_comp_get_peer_id(soc,
128 							       tx_comp_hal_desc);
129 }
130 #endif /* DP_HW_COOKIE_CONVERT_EXCEPTION */
131 #else
132 
133 void dp_tx_comp_get_params_from_hal_desc_be(struct dp_soc *soc,
134 					    void *tx_comp_hal_desc,
135 					    struct dp_tx_desc_s **r_tx_desc)
136 {
137 	uint32_t tx_desc_id;
138 
139 	/* SW do cookie conversion to VA */
140 	tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc);
141 	*r_tx_desc =
142 	(struct dp_tx_desc_s *)dp_cc_desc_find(soc, tx_desc_id);
143 
144 	if (*r_tx_desc)
145 		(*r_tx_desc)->peer_id = dp_tx_comp_get_peer_id(soc,
146 							       tx_comp_hal_desc);
147 }
148 #endif /* DP_FEATURE_HW_COOKIE_CONVERSION */
149 
150 static inline
151 void dp_tx_process_mec_notify_be(struct dp_soc *soc, uint8_t *status)
152 {
153 	struct dp_vdev *vdev;
154 	uint8_t vdev_id;
155 	uint32_t *htt_desc = (uint32_t *)status;
156 
157 	qdf_assert_always(!soc->mec_fw_offload);
158 
159 	/*
160 	 * Get vdev id from HTT status word in case of MEC
161 	 * notification
162 	 */
163 	vdev_id = DP_TX_WBM_COMPLETION_V3_VDEV_ID_GET(htt_desc[4]);
164 	if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
165 		return;
166 
167 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
168 				     DP_MOD_ID_HTT_COMP);
169 	if (!vdev)
170 		return;
171 	dp_tx_mec_handler(vdev, status);
172 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP);
173 }
174 
175 void dp_tx_process_htt_completion_be(struct dp_soc *soc,
176 				     struct dp_tx_desc_s *tx_desc,
177 				     uint8_t *status,
178 				     uint8_t ring_id)
179 {
180 	uint8_t tx_status;
181 	struct dp_pdev *pdev;
182 	struct dp_vdev *vdev = NULL;
183 	struct hal_tx_completion_status ts = {0};
184 	uint32_t *htt_desc = (uint32_t *)status;
185 	struct dp_txrx_peer *txrx_peer;
186 	dp_txrx_ref_handle txrx_ref_handle = NULL;
187 	struct cdp_tid_tx_stats *tid_stats = NULL;
188 	struct htt_soc *htt_handle;
189 	uint8_t vdev_id;
190 
191 	tx_status = HTT_TX_WBM_COMPLETION_V3_TX_STATUS_GET(htt_desc[0]);
192 	htt_handle = (struct htt_soc *)soc->htt_handle;
193 	htt_wbm_event_record(htt_handle->htt_logger_handle, tx_status, status);
194 
195 	/*
196 	 * There can be scenario where WBM consuming descriptor enqueued
197 	 * from TQM2WBM first and TQM completion can happen before MEC
198 	 * notification comes from FW2WBM. Avoid access any field of tx
199 	 * descriptor in case of MEC notify.
200 	 */
201 	if (tx_status == HTT_TX_FW2WBM_TX_STATUS_MEC_NOTIFY)
202 		return dp_tx_process_mec_notify_be(soc, status);
203 
204 	/*
205 	 * If the descriptor is already freed in vdev_detach,
206 	 * continue to next descriptor
207 	 */
208 	if (qdf_unlikely(!tx_desc->flags)) {
209 		dp_tx_comp_info_rl("Descriptor freed in vdev_detach %d",
210 				   tx_desc->id);
211 		return;
212 	}
213 
214 	if (qdf_unlikely(tx_desc->vdev_id == DP_INVALID_VDEV_ID)) {
215 		dp_tx_comp_info_rl("Invalid vdev_id %d", tx_desc->id);
216 		tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
217 		goto release_tx_desc;
218 	}
219 
220 	pdev = tx_desc->pdev;
221 
222 	if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
223 		dp_tx_comp_info_rl("pdev in down state %d", tx_desc->id);
224 		tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
225 		goto release_tx_desc;
226 	}
227 
228 	qdf_assert(tx_desc->pdev);
229 
230 	vdev_id = tx_desc->vdev_id;
231 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
232 				     DP_MOD_ID_HTT_COMP);
233 
234 	if (qdf_unlikely(!vdev)) {
235 		dp_tx_comp_info_rl("Unable to get vdev ref  %d", tx_desc->id);
236 		tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
237 		goto release_tx_desc;
238 	}
239 
240 	switch (tx_status) {
241 	case HTT_TX_FW2WBM_TX_STATUS_OK:
242 	case HTT_TX_FW2WBM_TX_STATUS_DROP:
243 	case HTT_TX_FW2WBM_TX_STATUS_TTL:
244 	{
245 		uint8_t tid;
246 
247 		if (DP_TX_WBM_COMPLETION_V3_VALID_GET(htt_desc[3])) {
248 			ts.peer_id =
249 				DP_TX_WBM_COMPLETION_V3_SW_PEER_ID_GET(
250 						htt_desc[3]);
251 			ts.tid =
252 				DP_TX_WBM_COMPLETION_V3_TID_NUM_GET(
253 						htt_desc[3]);
254 		} else {
255 			ts.peer_id = HTT_INVALID_PEER;
256 			ts.tid = HTT_INVALID_TID;
257 		}
258 		ts.release_src = HAL_TX_COMP_RELEASE_SOURCE_FW;
259 		ts.ppdu_id =
260 			DP_TX_WBM_COMPLETION_V3_SCH_CMD_ID_GET(
261 					htt_desc[2]);
262 		ts.ack_frame_rssi =
263 			DP_TX_WBM_COMPLETION_V3_ACK_FRAME_RSSI_GET(
264 					htt_desc[2]);
265 
266 		ts.tsf = htt_desc[4];
267 		ts.first_msdu = 1;
268 		ts.last_msdu = 1;
269 		ts.status = (tx_status == HTT_TX_FW2WBM_TX_STATUS_OK ?
270 			     HAL_TX_TQM_RR_FRAME_ACKED :
271 			     HAL_TX_TQM_RR_REM_CMD_REM);
272 		tid = ts.tid;
273 		if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
274 			tid = CDP_MAX_DATA_TIDS - 1;
275 
276 		tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
277 
278 		if (qdf_unlikely(pdev->delay_stats_flag) ||
279 		    qdf_unlikely(dp_is_vdev_tx_delay_stats_enabled(vdev)))
280 			dp_tx_compute_delay(vdev, tx_desc, tid, ring_id);
281 		if (tx_status < CDP_MAX_TX_HTT_STATUS)
282 			tid_stats->htt_status_cnt[tx_status]++;
283 
284 		txrx_peer = dp_txrx_peer_get_ref_by_id(soc, ts.peer_id,
285 						       &txrx_ref_handle,
286 						       DP_MOD_ID_HTT_COMP);
287 		if (qdf_likely(txrx_peer))
288 			dp_tx_update_peer_basic_stats(
289 						txrx_peer,
290 						qdf_nbuf_len(tx_desc->nbuf),
291 						tx_status,
292 						pdev->enhanced_stats_en);
293 
294 		dp_tx_comp_process_tx_status(soc, tx_desc, &ts, txrx_peer,
295 					     ring_id);
296 		dp_tx_comp_process_desc(soc, tx_desc, &ts, txrx_peer);
297 		dp_tx_desc_release(tx_desc, tx_desc->pool_id);
298 
299 		if (qdf_likely(txrx_peer))
300 			dp_txrx_peer_unref_delete(txrx_ref_handle,
301 						  DP_MOD_ID_HTT_COMP);
302 
303 		break;
304 	}
305 	case HTT_TX_FW2WBM_TX_STATUS_REINJECT:
306 	{
307 		uint8_t reinject_reason;
308 
309 		reinject_reason =
310 			HTT_TX_WBM_COMPLETION_V3_REINJECT_REASON_GET(
311 								htt_desc[1]);
312 		dp_tx_reinject_handler(soc, vdev, tx_desc,
313 				       status, reinject_reason);
314 		break;
315 	}
316 	case HTT_TX_FW2WBM_TX_STATUS_INSPECT:
317 	{
318 		dp_tx_inspect_handler(soc, vdev, tx_desc, status);
319 		break;
320 	}
321 	case HTT_TX_FW2WBM_TX_STATUS_VDEVID_MISMATCH:
322 	{
323 		DP_STATS_INC(vdev, tx_i.dropped.fail_per_pkt_vdev_id_check, 1);
324 		goto release_tx_desc;
325 	}
326 	default:
327 		dp_tx_comp_err("Invalid HTT tx_status %d\n",
328 			       tx_status);
329 		goto release_tx_desc;
330 	}
331 
332 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP);
333 	return;
334 
335 release_tx_desc:
336 	dp_tx_comp_free_buf(soc, tx_desc);
337 	dp_tx_desc_release(tx_desc, tx_desc->pool_id);
338 	if (vdev)
339 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP);
340 }
341 
342 #ifdef QCA_OL_TX_MULTIQ_SUPPORT
343 #ifdef DP_TX_IMPLICIT_RBM_MAPPING
344 /*
345  * dp_tx_get_rbm_id()- Get the RBM ID for data transmission completion.
346  * @dp_soc - DP soc structure pointer
347  * @ring_id - Transmit Queue/ring_id to be used when XPS is enabled
348  *
349  * Return - RBM ID corresponding to TCL ring_id
350  */
351 static inline uint8_t dp_tx_get_rbm_id_be(struct dp_soc *soc,
352 					  uint8_t ring_id)
353 {
354 	return 0;
355 }
356 #else
357 static inline uint8_t dp_tx_get_rbm_id_be(struct dp_soc *soc,
358 					  uint8_t ring_id)
359 {
360 	return (ring_id ? soc->wbm_sw0_bm_id + (ring_id - 1) :
361 			  HAL_WBM_SW2_BM_ID(soc->wbm_sw0_bm_id));
362 }
363 #endif /*DP_TX_IMPLICIT_RBM_MAPPING*/
364 #else
365 static inline uint8_t dp_tx_get_rbm_id_be(struct dp_soc *soc,
366 					  uint8_t tcl_index)
367 {
368 	uint8_t rbm;
369 
370 	rbm = wlan_cfg_get_rbm_id_for_index(soc->wlan_cfg_ctx, tcl_index);
371 	dp_verbose_debug("tcl_id %u rbm %u", tcl_index, rbm);
372 	return rbm;
373 }
374 #endif
375 #ifdef QCA_SUPPORT_TX_MIN_RATES_FOR_SPECIAL_FRAMES
376 
377 /*
378  * dp_tx_set_min_rates_for_critical_frames()- sets min-rates for critical pkts
379  * @dp_soc - DP soc structure pointer
380  * @hal_tx_desc - HAL descriptor where fields are set
381  * nbuf - skb to be considered for min rates
382  *
383  * The function relies on upper layers to set QDF_NBUF_CB_TX_EXTRA_IS_CRITICAL
384  * and uses it to determine if the frame is critical. For a critical frame,
385  * flow override bits are set to classify the frame into HW's high priority
386  * queue. The HW will pick pre-configured min rates for such packets.
387  *
388  * Return - None
389  */
390 static void
391 dp_tx_set_min_rates_for_critical_frames(struct dp_soc *soc,
392 					uint32_t *hal_tx_desc,
393 					qdf_nbuf_t nbuf)
394 {
395 /*
396  * Critical frames should be queued to the high priority queue for the TID on
397  * on which they are sent out (for the concerned peer).
398  * FW is using HTT_MSDU_Q_IDX 2 for HOL (high priority) queue.
399  * htt_msdu_idx = (2 * who_classify_info_sel) + flow_override
400  * Hence, using who_classify_info_sel = 1, flow_override = 0 to select
401  * HOL queue.
402  */
403 	if (QDF_NBUF_CB_TX_EXTRA_IS_CRITICAL(nbuf)) {
404 		hal_tx_desc_set_flow_override_enable(hal_tx_desc, 1);
405 		hal_tx_desc_set_flow_override(hal_tx_desc, 0);
406 		hal_tx_desc_set_who_classify_info_sel(hal_tx_desc, 1);
407 		hal_tx_desc_set_tx_notify_frame(hal_tx_desc,
408 						TX_SEMI_HARD_NOTIFY_E);
409 	}
410 }
411 #else
412 static inline void
413 dp_tx_set_min_rates_for_critical_frames(struct dp_soc *soc,
414 					uint32_t *hal_tx_desc_cached,
415 					qdf_nbuf_t nbuf)
416 {
417 }
418 #endif
419 
420 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
421 	defined(WLAN_MCAST_MLO)
422 void dp_tx_mcast_mlo_reinject_routing_set(struct dp_soc *soc, void *arg)
423 {
424 	hal_soc_handle_t hal_soc = soc->hal_soc;
425 	uint8_t *cmd = (uint8_t *)arg;
426 
427 	if (*cmd)
428 		hal_tx_mcast_mlo_reinject_routing_set(
429 					hal_soc,
430 					HAL_TX_MCAST_MLO_REINJECT_TQM_NOTIFY);
431 	else
432 		hal_tx_mcast_mlo_reinject_routing_set(
433 					hal_soc,
434 					HAL_TX_MCAST_MLO_REINJECT_FW_NOTIFY);
435 }
436 
437 void
438 dp_tx_mlo_mcast_pkt_send(struct dp_vdev_be *be_vdev,
439 			 struct dp_vdev *ptnr_vdev,
440 			 void *arg)
441 {
442 	qdf_nbuf_t  nbuf = (qdf_nbuf_t)arg;
443 	qdf_nbuf_t  nbuf_clone;
444 	struct dp_vdev_be *be_ptnr_vdev = NULL;
445 	struct dp_tx_msdu_info_s msdu_info;
446 
447 	be_ptnr_vdev = dp_get_be_vdev_from_dp_vdev(ptnr_vdev);
448 	if (be_vdev != be_ptnr_vdev) {
449 		nbuf_clone = qdf_nbuf_clone(nbuf);
450 		if (qdf_unlikely(!nbuf_clone)) {
451 			dp_tx_debug("nbuf clone failed");
452 			return;
453 		}
454 	} else {
455 		nbuf_clone = nbuf;
456 	}
457 
458 	qdf_mem_zero(&msdu_info, sizeof(msdu_info));
459 	dp_tx_get_queue(ptnr_vdev, nbuf_clone, &msdu_info.tx_queue);
460 	msdu_info.gsn = be_vdev->seq_num;
461 	be_ptnr_vdev->seq_num = be_vdev->seq_num;
462 
463 	nbuf_clone = dp_tx_send_msdu_single(
464 					ptnr_vdev,
465 					nbuf_clone,
466 					&msdu_info,
467 					DP_MLO_MCAST_REINJECT_PEER_ID,
468 					NULL);
469 	if (qdf_unlikely(nbuf_clone)) {
470 		dp_info("pkt send failed");
471 		qdf_nbuf_free(nbuf_clone);
472 		return;
473 	}
474 }
475 
476 static inline void
477 dp_tx_vdev_id_set_hal_tx_desc(uint32_t *hal_tx_desc_cached,
478 			      struct dp_vdev *vdev,
479 			      struct dp_tx_msdu_info_s *msdu_info)
480 {
481 	hal_tx_desc_set_vdev_id(hal_tx_desc_cached, msdu_info->vdev_id);
482 }
483 
484 void dp_tx_mlo_mcast_handler_be(struct dp_soc *soc,
485 				struct dp_vdev *vdev,
486 				qdf_nbuf_t nbuf)
487 {
488 	struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
489 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
490 
491 	/* send frame on partner vdevs */
492 	dp_mcast_mlo_iter_ptnr_vdev(be_soc, be_vdev,
493 				    dp_tx_mlo_mcast_pkt_send,
494 				    nbuf, DP_MOD_ID_REINJECT);
495 
496 	/* send frame on mcast primary vdev */
497 	dp_tx_mlo_mcast_pkt_send(be_vdev, vdev, nbuf);
498 
499 	if (qdf_unlikely(be_vdev->seq_num > MAX_GSN_NUM))
500 		be_vdev->seq_num = 0;
501 	else
502 		be_vdev->seq_num++;
503 }
504 #else
505 static inline void
506 dp_tx_vdev_id_set_hal_tx_desc(uint32_t *hal_tx_desc_cached,
507 			      struct dp_vdev *vdev,
508 			      struct dp_tx_msdu_info_s *msdu_info)
509 {
510 	hal_tx_desc_set_vdev_id(hal_tx_desc_cached, vdev->vdev_id);
511 }
512 #endif
513 #if defined(WLAN_FEATURE_11BE_MLO) && !defined(WLAN_MLO_MULTI_CHIP) && \
514 	!defined(WLAN_MCAST_MLO)
515 void dp_tx_mlo_mcast_handler_be(struct dp_soc *soc,
516 				struct dp_vdev *vdev,
517 				qdf_nbuf_t nbuf)
518 {
519 }
520 #endif
521 
522 #ifdef CONFIG_SAWF
523 void dp_sawf_config_be(struct dp_soc *soc, uint32_t *hal_tx_desc_cached,
524 		       uint16_t *fw_metadata, qdf_nbuf_t nbuf)
525 {
526 	uint8_t q_id = 0;
527 
528 	if (!wlan_cfg_get_sawf_config(soc->wlan_cfg_ctx))
529 		return;
530 
531 	dp_sawf_tcl_cmd(fw_metadata, nbuf);
532 	q_id = dp_sawf_queue_id_get(nbuf);
533 
534 	if (q_id == DP_SAWF_DEFAULT_Q_INVALID)
535 		return;
536 	hal_tx_desc_set_hlos_tid(hal_tx_desc_cached, DP_TX_HLOS_TID_GET(q_id));
537 	hal_tx_desc_set_flow_override_enable(hal_tx_desc_cached,
538 					     DP_TX_FLOW_OVERRIDE_ENABLE);
539 	hal_tx_desc_set_flow_override(hal_tx_desc_cached,
540 				      DP_TX_FLOW_OVERRIDE_GET(q_id));
541 	hal_tx_desc_set_who_classify_info_sel(hal_tx_desc_cached,
542 					      DP_TX_WHO_CLFY_INF_SEL_GET(q_id));
543 }
544 
545 #else
546 
547 static inline
548 void dp_sawf_config_be(struct dp_soc *soc, uint32_t *hal_tx_desc_cached,
549 		       uint16_t *fw_metadata, qdf_nbuf_t nbuf)
550 {
551 }
552 
553 static inline
554 QDF_STATUS dp_sawf_tx_enqueue_peer_stats(struct dp_soc *soc,
555 					 struct dp_tx_desc_s *tx_desc)
556 {
557 	return QDF_STATUS_SUCCESS;
558 }
559 
560 static inline
561 QDF_STATUS dp_sawf_tx_enqueue_fail_peer_stats(struct dp_soc *soc,
562 					      struct dp_tx_desc_s *tx_desc)
563 {
564 	return QDF_STATUS_SUCCESS;
565 }
566 #endif
567 
568 QDF_STATUS
569 dp_tx_hw_enqueue_be(struct dp_soc *soc, struct dp_vdev *vdev,
570 		    struct dp_tx_desc_s *tx_desc, uint16_t fw_metadata,
571 		    struct cdp_tx_exception_metadata *tx_exc_metadata,
572 		    struct dp_tx_msdu_info_s *msdu_info)
573 {
574 	void *hal_tx_desc;
575 	uint32_t *hal_tx_desc_cached;
576 	int coalesce = 0;
577 	struct dp_tx_queue *tx_q = &msdu_info->tx_queue;
578 	uint8_t ring_id = tx_q->ring_id;
579 	uint8_t tid = msdu_info->tid;
580 	struct dp_vdev_be *be_vdev;
581 	uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES] = { 0 };
582 	uint8_t bm_id = dp_tx_get_rbm_id_be(soc, ring_id);
583 	hal_ring_handle_t hal_ring_hdl = NULL;
584 	QDF_STATUS status = QDF_STATUS_E_RESOURCES;
585 
586 	be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
587 
588 	if (!dp_tx_is_desc_id_valid(soc, tx_desc->id)) {
589 		dp_err_rl("Invalid tx desc id:%d", tx_desc->id);
590 		return QDF_STATUS_E_RESOURCES;
591 	}
592 
593 	if (qdf_unlikely(tx_exc_metadata)) {
594 		qdf_assert_always((tx_exc_metadata->tx_encap_type ==
595 				   CDP_INVALID_TX_ENCAP_TYPE) ||
596 				   (tx_exc_metadata->tx_encap_type ==
597 				    vdev->tx_encap_type));
598 
599 		if (tx_exc_metadata->tx_encap_type == htt_cmn_pkt_type_raw)
600 			qdf_assert_always((tx_exc_metadata->sec_type ==
601 					   CDP_INVALID_SEC_TYPE) ||
602 					   tx_exc_metadata->sec_type ==
603 					   vdev->sec_type);
604 	}
605 
606 	hal_tx_desc_cached = (void *)cached_desc;
607 
608 	if (dp_sawf_tag_valid_get(tx_desc->nbuf)) {
609 		dp_sawf_config_be(soc, hal_tx_desc_cached,
610 				  &fw_metadata, tx_desc->nbuf);
611 		dp_sawf_tx_enqueue_peer_stats(soc, tx_desc);
612 	}
613 
614 	hal_tx_desc_set_buf_addr_be(soc->hal_soc, hal_tx_desc_cached,
615 				    tx_desc->dma_addr, bm_id, tx_desc->id,
616 				    (tx_desc->flags & DP_TX_DESC_FLAG_FRAG));
617 	hal_tx_desc_set_lmac_id_be(soc->hal_soc, hal_tx_desc_cached,
618 				   vdev->lmac_id);
619 
620 	hal_tx_desc_set_search_index_be(soc->hal_soc, hal_tx_desc_cached,
621 					vdev->bss_ast_idx);
622 	/*
623 	 * Bank_ID is used as DSCP_TABLE number in beryllium
624 	 * So there is no explicit field used for DSCP_TID_TABLE_NUM.
625 	 */
626 
627 	hal_tx_desc_set_cache_set_num(soc->hal_soc, hal_tx_desc_cached,
628 				      (vdev->bss_ast_hash & 0xF));
629 
630 	hal_tx_desc_set_fw_metadata(hal_tx_desc_cached, fw_metadata);
631 	hal_tx_desc_set_buf_length(hal_tx_desc_cached, tx_desc->length);
632 	hal_tx_desc_set_buf_offset(hal_tx_desc_cached, tx_desc->pkt_offset);
633 
634 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
635 		hal_tx_desc_set_to_fw(hal_tx_desc_cached, 1);
636 
637 	/* verify checksum offload configuration*/
638 	if ((qdf_nbuf_get_tx_cksum(tx_desc->nbuf) ==
639 				   QDF_NBUF_TX_CKSUM_TCP_UDP) ||
640 	      qdf_nbuf_is_tso(tx_desc->nbuf)) {
641 		hal_tx_desc_set_l3_checksum_en(hal_tx_desc_cached, 1);
642 		hal_tx_desc_set_l4_checksum_en(hal_tx_desc_cached, 1);
643 	}
644 
645 	hal_tx_desc_set_bank_id(hal_tx_desc_cached, be_vdev->bank_id);
646 
647 	dp_tx_vdev_id_set_hal_tx_desc(hal_tx_desc_cached, vdev, msdu_info);
648 
649 	if (tid != HTT_TX_EXT_TID_INVALID)
650 		hal_tx_desc_set_hlos_tid(hal_tx_desc_cached, tid);
651 
652 	dp_tx_set_min_rates_for_critical_frames(soc, hal_tx_desc_cached,
653 						tx_desc->nbuf);
654 	dp_tx_desc_set_ktimestamp(vdev, tx_desc);
655 
656 	hal_ring_hdl = dp_tx_get_hal_ring_hdl(soc, ring_id);
657 
658 	if (qdf_unlikely(dp_tx_hal_ring_access_start(soc, hal_ring_hdl))) {
659 		dp_err("HAL RING Access Failed -- %pK", hal_ring_hdl);
660 		DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
661 		DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
662 		dp_sawf_tx_enqueue_fail_peer_stats(soc, tx_desc);
663 		return status;
664 	}
665 
666 	hal_tx_desc = hal_srng_src_get_next(soc->hal_soc, hal_ring_hdl);
667 	if (qdf_unlikely(!hal_tx_desc)) {
668 		dp_verbose_debug("TCL ring full ring_id:%d", ring_id);
669 		DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
670 		DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
671 		dp_sawf_tx_enqueue_fail_peer_stats(soc, tx_desc);
672 		goto ring_access_fail;
673 	}
674 
675 	tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX;
676 	dp_vdev_peer_stats_update_protocol_cnt_tx(vdev, tx_desc->nbuf);
677 
678 	/* Sync cached descriptor with HW */
679 	hal_tx_desc_sync(hal_tx_desc_cached, hal_tx_desc);
680 
681 	coalesce = dp_tx_attempt_coalescing(soc, vdev, tx_desc, tid,
682 					    msdu_info, ring_id);
683 
684 	DP_STATS_INC_PKT(vdev, tx_i.processed, 1, tx_desc->length);
685 	DP_STATS_INC(soc, tx.tcl_enq[ring_id], 1);
686 	dp_tx_update_stats(soc, tx_desc, ring_id);
687 	status = QDF_STATUS_SUCCESS;
688 
689 	dp_tx_hw_desc_update_evt((uint8_t *)hal_tx_desc_cached,
690 				 hal_ring_hdl, soc);
691 
692 ring_access_fail:
693 	dp_tx_ring_access_end_wrapper(soc, hal_ring_hdl, coalesce);
694 
695 	return status;
696 }
697 
698 QDF_STATUS dp_tx_init_bank_profiles(struct dp_soc_be *be_soc)
699 {
700 	int i, num_tcl_banks;
701 
702 	num_tcl_banks = hal_tx_get_num_tcl_banks(be_soc->soc.hal_soc);
703 
704 	qdf_assert_always(num_tcl_banks);
705 	be_soc->num_bank_profiles = num_tcl_banks;
706 
707 	be_soc->bank_profiles = qdf_mem_malloc(num_tcl_banks *
708 					       sizeof(*be_soc->bank_profiles));
709 	if (!be_soc->bank_profiles) {
710 		dp_err("unable to allocate memory for DP TX Profiles!");
711 		return QDF_STATUS_E_NOMEM;
712 	}
713 
714 	DP_TX_BANK_LOCK_CREATE(&be_soc->tx_bank_lock);
715 
716 	for (i = 0; i < num_tcl_banks; i++) {
717 		be_soc->bank_profiles[i].is_configured = false;
718 		qdf_atomic_init(&be_soc->bank_profiles[i].ref_count);
719 	}
720 	dp_info("initialized %u bank profiles", be_soc->num_bank_profiles);
721 	return QDF_STATUS_SUCCESS;
722 }
723 
724 void dp_tx_deinit_bank_profiles(struct dp_soc_be *be_soc)
725 {
726 	qdf_mem_free(be_soc->bank_profiles);
727 	DP_TX_BANK_LOCK_DESTROY(&be_soc->tx_bank_lock);
728 }
729 
730 static
731 void dp_tx_get_vdev_bank_config(struct dp_vdev_be *be_vdev,
732 				union hal_tx_bank_config *bank_config)
733 {
734 	struct dp_vdev *vdev = &be_vdev->vdev;
735 
736 	bank_config->epd = 0;
737 
738 	bank_config->encap_type = vdev->tx_encap_type;
739 
740 	/* Only valid for raw frames. Needs work for RAW mode */
741 	if (vdev->tx_encap_type == htt_cmn_pkt_type_raw) {
742 		bank_config->encrypt_type = sec_type_map[vdev->sec_type];
743 	} else {
744 		bank_config->encrypt_type = 0;
745 	}
746 
747 	bank_config->src_buffer_swap = 0;
748 	bank_config->link_meta_swap = 0;
749 
750 	if ((vdev->search_type == HAL_TX_ADDR_INDEX_SEARCH) &&
751 	    vdev->opmode == wlan_op_mode_sta) {
752 		bank_config->index_lookup_enable = 1;
753 		bank_config->mcast_pkt_ctrl = HAL_TX_MCAST_CTRL_MEC_NOTIFY;
754 		bank_config->addrx_en = 0;
755 		bank_config->addry_en = 0;
756 	} else {
757 		bank_config->index_lookup_enable = 0;
758 		bank_config->mcast_pkt_ctrl = HAL_TX_MCAST_CTRL_FW_EXCEPTION;
759 		bank_config->addrx_en =
760 			(vdev->hal_desc_addr_search_flags &
761 			 HAL_TX_DESC_ADDRX_EN) ? 1 : 0;
762 		bank_config->addry_en =
763 			(vdev->hal_desc_addr_search_flags &
764 			 HAL_TX_DESC_ADDRY_EN) ? 1 : 0;
765 	}
766 
767 	bank_config->mesh_enable = vdev->mesh_vdev ? 1 : 0;
768 
769 	bank_config->dscp_tid_map_id = vdev->dscp_tid_map_id;
770 
771 	/* Disabling vdev id check for now. Needs revist. */
772 	bank_config->vdev_id_check_en = be_vdev->vdev_id_check_en;
773 
774 	bank_config->pmac_id = vdev->lmac_id;
775 }
776 
777 int dp_tx_get_bank_profile(struct dp_soc_be *be_soc,
778 			   struct dp_vdev_be *be_vdev)
779 {
780 	char *temp_str = "";
781 	bool found_match = false;
782 	int bank_id = DP_BE_INVALID_BANK_ID;
783 	int i;
784 	int unconfigured_slot = DP_BE_INVALID_BANK_ID;
785 	int zero_ref_count_slot = DP_BE_INVALID_BANK_ID;
786 	union hal_tx_bank_config vdev_config = {0};
787 
788 	/* convert vdev params into hal_tx_bank_config */
789 	dp_tx_get_vdev_bank_config(be_vdev, &vdev_config);
790 
791 	DP_TX_BANK_LOCK_ACQUIRE(&be_soc->tx_bank_lock);
792 	/* go over all banks and find a matching/unconfigured/unsed bank */
793 	for (i = 0; i < be_soc->num_bank_profiles; i++) {
794 		if (be_soc->bank_profiles[i].is_configured &&
795 		    (be_soc->bank_profiles[i].bank_config.val ^
796 						vdev_config.val) == 0) {
797 			found_match = true;
798 			break;
799 		}
800 
801 		if (unconfigured_slot == DP_BE_INVALID_BANK_ID &&
802 		    !be_soc->bank_profiles[i].is_configured)
803 			unconfigured_slot = i;
804 		else if (zero_ref_count_slot  == DP_BE_INVALID_BANK_ID &&
805 		    !qdf_atomic_read(&be_soc->bank_profiles[i].ref_count))
806 			zero_ref_count_slot = i;
807 	}
808 
809 	if (found_match) {
810 		temp_str = "matching";
811 		bank_id = i;
812 		goto inc_ref_and_return;
813 	}
814 	if (unconfigured_slot != DP_BE_INVALID_BANK_ID) {
815 		temp_str = "unconfigured";
816 		bank_id = unconfigured_slot;
817 		goto configure_and_return;
818 	}
819 	if (zero_ref_count_slot != DP_BE_INVALID_BANK_ID) {
820 		temp_str = "zero_ref_count";
821 		bank_id = zero_ref_count_slot;
822 	}
823 	if (bank_id == DP_BE_INVALID_BANK_ID) {
824 		dp_alert("unable to find TX bank!");
825 		QDF_BUG(0);
826 		return bank_id;
827 	}
828 
829 configure_and_return:
830 	be_soc->bank_profiles[bank_id].is_configured = true;
831 	be_soc->bank_profiles[bank_id].bank_config.val = vdev_config.val;
832 	hal_tx_populate_bank_register(be_soc->soc.hal_soc,
833 				      &be_soc->bank_profiles[bank_id].bank_config,
834 				      bank_id);
835 inc_ref_and_return:
836 	qdf_atomic_inc(&be_soc->bank_profiles[bank_id].ref_count);
837 	DP_TX_BANK_LOCK_RELEASE(&be_soc->tx_bank_lock);
838 
839 	dp_info("found %s slot at index %d, input:0x%x match:0x%x ref_count %u",
840 		temp_str, bank_id, vdev_config.val,
841 		be_soc->bank_profiles[bank_id].bank_config.val,
842 		qdf_atomic_read(&be_soc->bank_profiles[bank_id].ref_count));
843 
844 	dp_info("epd:%x encap:%x encryp:%x src_buf_swap:%x link_meta_swap:%x addrx_en:%x addry_en:%x mesh_en:%x vdev_id_check:%x pmac_id:%x mcast_pkt_ctrl:%x",
845 		be_soc->bank_profiles[bank_id].bank_config.epd,
846 		be_soc->bank_profiles[bank_id].bank_config.encap_type,
847 		be_soc->bank_profiles[bank_id].bank_config.encrypt_type,
848 		be_soc->bank_profiles[bank_id].bank_config.src_buffer_swap,
849 		be_soc->bank_profiles[bank_id].bank_config.link_meta_swap,
850 		be_soc->bank_profiles[bank_id].bank_config.addrx_en,
851 		be_soc->bank_profiles[bank_id].bank_config.addry_en,
852 		be_soc->bank_profiles[bank_id].bank_config.mesh_enable,
853 		be_soc->bank_profiles[bank_id].bank_config.vdev_id_check_en,
854 		be_soc->bank_profiles[bank_id].bank_config.pmac_id,
855 		be_soc->bank_profiles[bank_id].bank_config.mcast_pkt_ctrl);
856 
857 	return bank_id;
858 }
859 
860 void dp_tx_put_bank_profile(struct dp_soc_be *be_soc,
861 			    struct dp_vdev_be *be_vdev)
862 {
863 	DP_TX_BANK_LOCK_ACQUIRE(&be_soc->tx_bank_lock);
864 	qdf_atomic_dec(&be_soc->bank_profiles[be_vdev->bank_id].ref_count);
865 	DP_TX_BANK_LOCK_RELEASE(&be_soc->tx_bank_lock);
866 }
867 
868 void dp_tx_update_bank_profile(struct dp_soc_be *be_soc,
869 			       struct dp_vdev_be *be_vdev)
870 {
871 	dp_tx_put_bank_profile(be_soc, be_vdev);
872 	be_vdev->bank_id = dp_tx_get_bank_profile(be_soc, be_vdev);
873 }
874 
875 QDF_STATUS dp_tx_desc_pool_init_be(struct dp_soc *soc,
876 				   uint32_t num_elem,
877 				   uint8_t pool_id)
878 {
879 	struct dp_tx_desc_pool_s *tx_desc_pool;
880 	struct dp_hw_cookie_conversion_t *cc_ctx;
881 	struct dp_soc_be *be_soc;
882 	struct dp_spt_page_desc *page_desc;
883 	struct dp_tx_desc_s *tx_desc;
884 	uint32_t ppt_idx = 0;
885 	uint32_t avail_entry_index = 0;
886 
887 	if (!num_elem) {
888 		dp_err("desc_num 0 !!");
889 		return QDF_STATUS_E_FAILURE;
890 	}
891 
892 	be_soc = dp_get_be_soc_from_dp_soc(soc);
893 	tx_desc_pool = &soc->tx_desc[pool_id];
894 	cc_ctx  = &be_soc->tx_cc_ctx[pool_id];
895 
896 	tx_desc = tx_desc_pool->freelist;
897 	page_desc = &cc_ctx->page_desc_base[0];
898 	while (tx_desc) {
899 		if (avail_entry_index == 0) {
900 			if (ppt_idx >= cc_ctx->total_page_num) {
901 				dp_alert("insufficient secondary page tables");
902 				qdf_assert_always(0);
903 			}
904 			page_desc = &cc_ctx->page_desc_base[ppt_idx++];
905 		}
906 
907 		/* put each TX Desc VA to SPT pages and
908 		 * get corresponding ID
909 		 */
910 		DP_CC_SPT_PAGE_UPDATE_VA(page_desc->page_v_addr,
911 					 avail_entry_index,
912 					 tx_desc);
913 		tx_desc->id =
914 			dp_cc_desc_id_generate(page_desc->ppt_index,
915 					       avail_entry_index);
916 		tx_desc->pool_id = pool_id;
917 		dp_tx_desc_set_magic(tx_desc, DP_TX_MAGIC_PATTERN_FREE);
918 		tx_desc = tx_desc->next;
919 		avail_entry_index = (avail_entry_index + 1) &
920 					DP_CC_SPT_PAGE_MAX_ENTRIES_MASK;
921 	}
922 
923 	return QDF_STATUS_SUCCESS;
924 }
925 
926 void dp_tx_desc_pool_deinit_be(struct dp_soc *soc,
927 			       struct dp_tx_desc_pool_s *tx_desc_pool,
928 			       uint8_t pool_id)
929 {
930 	struct dp_spt_page_desc *page_desc;
931 	struct dp_soc_be *be_soc;
932 	int i = 0;
933 	struct dp_hw_cookie_conversion_t *cc_ctx;
934 
935 	be_soc = dp_get_be_soc_from_dp_soc(soc);
936 	cc_ctx  = &be_soc->tx_cc_ctx[pool_id];
937 
938 	for (i = 0; i < cc_ctx->total_page_num; i++) {
939 		page_desc = &cc_ctx->page_desc_base[i];
940 		qdf_mem_zero(page_desc->page_v_addr, qdf_page_size);
941 	}
942 }
943 
944 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
945 uint32_t dp_tx_comp_nf_handler(struct dp_intr *int_ctx, struct dp_soc *soc,
946 			       hal_ring_handle_t hal_ring_hdl, uint8_t ring_id,
947 			       uint32_t quota)
948 {
949 	struct dp_srng *tx_comp_ring = &soc->tx_comp_ring[ring_id];
950 	uint32_t work_done = 0;
951 
952 	if (dp_srng_get_near_full_level(soc, tx_comp_ring) <
953 			DP_SRNG_THRESH_NEAR_FULL)
954 		return 0;
955 
956 	qdf_atomic_set(&tx_comp_ring->near_full, 1);
957 	work_done++;
958 
959 	return work_done;
960 }
961 #endif
962 
963 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
964 	defined(CONFIG_SAWF)
965 #define PPDUID_GET_HW_LINK_ID(PPDU_ID, LINK_ID_OFFSET, LINK_ID_BITS) \
966 	(((PPDU_ID) >> (LINK_ID_OFFSET)) & ((1 << (LINK_ID_BITS)) - 1))
967 
968 #define HW_TX_DELAY_MAX                       0x1000000
969 #define TX_COMPL_SHIFT_BUFFER_TIMESTAMP_US    10
970 #define HW_TX_DELAY_MASK                      0x1FFFFFFF
971 #define TX_COMPL_BUFFER_TSTAMP_US(TSTAMP) \
972 	(((TSTAMP) << TX_COMPL_SHIFT_BUFFER_TIMESTAMP_US) & \
973 	 HW_TX_DELAY_MASK)
974 
975 static inline
976 QDF_STATUS dp_mlo_compute_hw_delay_us(struct dp_soc *soc,
977 				      struct dp_vdev *vdev,
978 				      struct hal_tx_completion_status *ts,
979 				      uint32_t *delay_us)
980 {
981 	uint32_t ppdu_id;
982 	uint8_t link_id_offset, link_id_bits;
983 	uint8_t hw_link_id;
984 	uint32_t msdu_tqm_enqueue_tstamp_us, final_msdu_tqm_enqueue_tstamp_us;
985 	uint32_t msdu_compl_tsf_tstamp_us, final_msdu_compl_tsf_tstamp_us;
986 	uint32_t delay;
987 	int32_t delta_tsf2, delta_tqm;
988 
989 	if (!ts->valid)
990 		return QDF_STATUS_E_INVAL;
991 
992 	link_id_offset = soc->link_id_offset;
993 	link_id_bits = soc->link_id_bits;
994 	ppdu_id = ts->ppdu_id;
995 	hw_link_id = PPDUID_GET_HW_LINK_ID(ppdu_id, link_id_offset,
996 					   link_id_bits);
997 
998 	msdu_tqm_enqueue_tstamp_us =
999 		TX_COMPL_BUFFER_TSTAMP_US(ts->buffer_timestamp);
1000 	msdu_compl_tsf_tstamp_us = ts->tsf;
1001 
1002 	delta_tsf2 = dp_mlo_get_delta_tsf2_wrt_mlo_offset(soc, hw_link_id);
1003 	delta_tqm = dp_mlo_get_delta_tqm_wrt_mlo_offset(soc);
1004 
1005 	final_msdu_tqm_enqueue_tstamp_us = (msdu_tqm_enqueue_tstamp_us +
1006 			delta_tqm) & HW_TX_DELAY_MASK;
1007 
1008 	final_msdu_compl_tsf_tstamp_us = (msdu_compl_tsf_tstamp_us +
1009 			delta_tsf2) & HW_TX_DELAY_MASK;
1010 
1011 	delay = (final_msdu_compl_tsf_tstamp_us -
1012 		final_msdu_tqm_enqueue_tstamp_us) & HW_TX_DELAY_MASK;
1013 
1014 	if (delay > HW_TX_DELAY_MAX)
1015 		return QDF_STATUS_E_FAILURE;
1016 
1017 	if (delay_us)
1018 		*delay_us = delay;
1019 
1020 	return QDF_STATUS_SUCCESS;
1021 }
1022 #else
1023 static inline
1024 QDF_STATUS dp_mlo_compute_hw_delay_us(struct dp_soc *soc,
1025 				      struct dp_vdev *vdev,
1026 				      struct hal_tx_completion_status *ts,
1027 				      uint32_t *delay_us)
1028 {
1029 	return QDF_STATUS_SUCCESS;
1030 }
1031 #endif
1032 
1033 QDF_STATUS dp_tx_compute_tx_delay_be(struct dp_soc *soc,
1034 				     struct dp_vdev *vdev,
1035 				     struct hal_tx_completion_status *ts,
1036 				     uint32_t *delay_us)
1037 {
1038 	return dp_mlo_compute_hw_delay_us(soc, vdev, ts, delay_us);
1039 }
1040