1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "cdp_txrx_cmn_struct.h"
21 #include "dp_types.h"
22 #include "dp_tx.h"
23 #include "dp_be_tx.h"
24 #include "dp_tx_desc.h"
25 #include "hal_tx.h"
26 #include <hal_be_api.h>
27 #include <hal_be_tx.h>
28 #include <dp_htt.h>
29 #include "dp_internal.h"
30 #ifdef FEATURE_WDS
31 #include "dp_txrx_wds.h"
32 #endif
33 
34 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
35 #define DP_TX_BANK_LOCK_CREATE(lock) qdf_mutex_create(lock)
36 #define DP_TX_BANK_LOCK_DESTROY(lock) qdf_mutex_destroy(lock)
37 #define DP_TX_BANK_LOCK_ACQUIRE(lock) qdf_mutex_acquire(lock)
38 #define DP_TX_BANK_LOCK_RELEASE(lock) qdf_mutex_release(lock)
39 #else
40 #define DP_TX_BANK_LOCK_CREATE(lock) qdf_spinlock_create(lock)
41 #define DP_TX_BANK_LOCK_DESTROY(lock) qdf_spinlock_destroy(lock)
42 #define DP_TX_BANK_LOCK_ACQUIRE(lock) qdf_spin_lock_bh(lock)
43 #define DP_TX_BANK_LOCK_RELEASE(lock) qdf_spin_unlock_bh(lock)
44 #endif
45 
46 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
47 #ifdef WLAN_MCAST_MLO
48 /* MLO peer id for reinject*/
49 #define DP_MLO_MCAST_REINJECT_PEER_ID 0XFFFD
50 #define MAX_GSN_NUM 0x0FFF
51 
52 #ifdef QCA_MULTIPASS_SUPPORT
53 #define INVALID_VLAN_ID         0xFFFF
54 #define MULTIPASS_WITH_VLAN_ID 0xFFFE
55 /**
56  * struct dp_mlo_mpass_buf - Multipass buffer
57  * @vlan_id: vlan_id of frame
58  * @nbuf: pointer to skb buf
59  */
60 struct dp_mlo_mpass_buf {
61 	uint16_t vlan_id;
62 	qdf_nbuf_t  nbuf;
63 };
64 #endif
65 #endif
66 #endif
67 
68 #define DP_TX_WBM_COMPLETION_V3_VDEV_ID_GET(_var) \
69 	HTT_TX_WBM_COMPLETION_V2_VDEV_ID_GET(_var)
70 #define DP_TX_WBM_COMPLETION_V3_VALID_GET(_var) \
71 	HTT_TX_WBM_COMPLETION_V2_VALID_GET(_var)
72 #define DP_TX_WBM_COMPLETION_V3_SW_PEER_ID_GET(_var) \
73 	HTT_TX_WBM_COMPLETION_V2_SW_PEER_ID_GET(_var)
74 #define DP_TX_WBM_COMPLETION_V3_TID_NUM_GET(_var) \
75 	HTT_TX_WBM_COMPLETION_V2_TID_NUM_GET(_var)
76 #define DP_TX_WBM_COMPLETION_V3_SCH_CMD_ID_GET(_var) \
77 	HTT_TX_WBM_COMPLETION_V2_SCH_CMD_ID_GET(_var)
78 #define DP_TX_WBM_COMPLETION_V3_ACK_FRAME_RSSI_GET(_var) \
79 	HTT_TX_WBM_COMPLETION_V2_ACK_FRAME_RSSI_GET(_var)
80 #define DP_TX_WBM_COMPLETION_V3_TRANSMIT_CNT_VALID_GET(_var) \
81 	HTT_TX_WBM_COMPLETION_V2_TRANSMIT_CNT_VALID_GET(_var)
82 
83 extern uint8_t sec_type_map[MAX_CDP_SEC_TYPE];
84 
85 #ifdef DP_TX_COMP_RING_DESC_SANITY_CHECK
86 /*
87  * Value to mark ring desc is invalidated by buffer_virt_addr_63_32 field
88  * of WBM2SW ring Desc.
89  */
90 #define DP_TX_COMP_DESC_BUFF_VA_32BITS_HI_INVALIDATE 0x12121212
91 
92 /**
93  * dp_tx_comp_desc_check_and_invalidate() - sanity check for ring desc and
94  *					    invalidate it after each reaping
95  * @tx_comp_hal_desc: ring desc virtual address
96  * @r_tx_desc: pointer to current dp TX Desc pointer
97  * @tx_desc_va: the original 64 bits Desc VA got from ring Desc
98  * @hw_cc_done: HW cookie conversion done or not
99  *
100  * If HW CC is done, check the buffer_virt_addr_63_32 value to know if
101  * ring Desc is stale or not. if HW CC is not done, then compare PA between
102  * ring Desc and current TX desc.
103  *
104  * Return: QDF_STATUS_SUCCESS for success,
105  *	   QDF_STATUS_E_PENDING for stale entry,
106  *	   QDF_STATUS_E_INVAL for invalid entry.
107  */
108 static inline
dp_tx_comp_desc_check_and_invalidate(void * tx_comp_hal_desc,struct dp_tx_desc_s ** r_tx_desc,uint64_t tx_desc_va,bool hw_cc_done)109 QDF_STATUS dp_tx_comp_desc_check_and_invalidate(void *tx_comp_hal_desc,
110 						struct dp_tx_desc_s **r_tx_desc,
111 						uint64_t tx_desc_va,
112 						bool hw_cc_done)
113 {
114 	qdf_dma_addr_t desc_dma_addr;
115 	QDF_STATUS status = QDF_STATUS_SUCCESS;
116 
117 	if (qdf_likely(hw_cc_done)) {
118 		/* Check upper 32 bits */
119 		if (DP_TX_COMP_DESC_BUFF_VA_32BITS_HI_INVALIDATE ==
120 		    (tx_desc_va >> 32)) {
121 			*r_tx_desc = NULL;
122 			status = QDF_STATUS_E_PENDING;
123 		} else
124 			/* Invalidate the ring desc for 32 ~ 63 bits of VA */
125 			hal_tx_comp_set_desc_va_63_32(
126 				tx_comp_hal_desc,
127 				DP_TX_COMP_DESC_BUFF_VA_32BITS_HI_INVALIDATE);
128 	} else {
129 		/* Compare PA between ring desc and current TX desc stored */
130 		desc_dma_addr = hal_tx_comp_get_paddr(tx_comp_hal_desc);
131 
132 		if (desc_dma_addr != (*r_tx_desc)->dma_addr) {
133 			*r_tx_desc = NULL;
134 			status = QDF_STATUS_E_INVAL;
135 		}
136 	}
137 
138 	return status;
139 }
140 #else
141 static inline
dp_tx_comp_desc_check_and_invalidate(void * tx_comp_hal_desc,struct dp_tx_desc_s ** r_tx_desc,uint64_t tx_desc_va,bool hw_cc_done)142 QDF_STATUS dp_tx_comp_desc_check_and_invalidate(void *tx_comp_hal_desc,
143 						struct dp_tx_desc_s **r_tx_desc,
144 						uint64_t tx_desc_va,
145 						bool hw_cc_done)
146 {
147 	return QDF_STATUS_SUCCESS;
148 }
149 #endif
150 
151 #ifdef DP_FEATURE_HW_COOKIE_CONVERSION
152 #ifdef DP_HW_COOKIE_CONVERT_EXCEPTION
153 QDF_STATUS
dp_tx_comp_get_params_from_hal_desc_be(struct dp_soc * soc,void * tx_comp_hal_desc,struct dp_tx_desc_s ** r_tx_desc)154 dp_tx_comp_get_params_from_hal_desc_be(struct dp_soc *soc,
155 				       void *tx_comp_hal_desc,
156 				       struct dp_tx_desc_s **r_tx_desc)
157 {
158 	uint32_t tx_desc_id;
159 	uint64_t tx_desc_va = 0;
160 	QDF_STATUS status;
161 	bool hw_cc_done =
162 		hal_tx_comp_get_cookie_convert_done(tx_comp_hal_desc);
163 
164 	if (qdf_likely(hw_cc_done)) {
165 		/* HW cookie conversion done */
166 		tx_desc_va = hal_tx_comp_get_desc_va(tx_comp_hal_desc);
167 		*r_tx_desc = (struct dp_tx_desc_s *)(uintptr_t)tx_desc_va;
168 
169 	} else {
170 		/* SW do cookie conversion to VA */
171 		tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc);
172 		*r_tx_desc =
173 		(struct dp_tx_desc_s *)dp_cc_desc_find(soc, tx_desc_id);
174 	}
175 
176 	status = dp_tx_comp_desc_check_and_invalidate(tx_comp_hal_desc,
177 						      r_tx_desc, tx_desc_va,
178 						      hw_cc_done);
179 
180 	if (*r_tx_desc)
181 		(*r_tx_desc)->peer_id =
182 				dp_tx_comp_get_peer_id_be(soc,
183 							  tx_comp_hal_desc);
184 
185 	return status;
186 }
187 #else
188 QDF_STATUS
dp_tx_comp_get_params_from_hal_desc_be(struct dp_soc * soc,void * tx_comp_hal_desc,struct dp_tx_desc_s ** r_tx_desc)189 dp_tx_comp_get_params_from_hal_desc_be(struct dp_soc *soc,
190 				       void *tx_comp_hal_desc,
191 				       struct dp_tx_desc_s **r_tx_desc)
192 {
193 	uint64_t tx_desc_va;
194 	QDF_STATUS status;
195 
196 	tx_desc_va = hal_tx_comp_get_desc_va(tx_comp_hal_desc);
197 	*r_tx_desc = (struct dp_tx_desc_s *)(uintptr_t)tx_desc_va;
198 
199 	status = dp_tx_comp_desc_check_and_invalidate(tx_comp_hal_desc,
200 						      r_tx_desc, tx_desc_va,
201 						      true);
202 	if (*r_tx_desc)
203 		(*r_tx_desc)->peer_id =
204 				dp_tx_comp_get_peer_id_be(soc,
205 							  tx_comp_hal_desc);
206 
207 	return status;
208 }
209 #endif /* DP_HW_COOKIE_CONVERT_EXCEPTION */
210 #else
211 
212 QDF_STATUS
dp_tx_comp_get_params_from_hal_desc_be(struct dp_soc * soc,void * tx_comp_hal_desc,struct dp_tx_desc_s ** r_tx_desc)213 dp_tx_comp_get_params_from_hal_desc_be(struct dp_soc *soc,
214 				       void *tx_comp_hal_desc,
215 				       struct dp_tx_desc_s **r_tx_desc)
216 {
217 	uint32_t tx_desc_id;
218 	QDF_STATUS status;
219 
220 	/* SW do cookie conversion to VA */
221 	tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc);
222 	*r_tx_desc =
223 	(struct dp_tx_desc_s *)dp_cc_desc_find(soc, tx_desc_id);
224 
225 	status = dp_tx_comp_desc_check_and_invalidate(tx_comp_hal_desc,
226 						      r_tx_desc, 0, false);
227 
228 	if (*r_tx_desc)
229 		(*r_tx_desc)->peer_id =
230 				dp_tx_comp_get_peer_id_be(soc,
231 							  tx_comp_hal_desc);
232 
233 	return status;
234 }
235 #endif /* DP_FEATURE_HW_COOKIE_CONVERSION */
236 
237 static inline
dp_tx_process_mec_notify_be(struct dp_soc * soc,uint8_t * status)238 void dp_tx_process_mec_notify_be(struct dp_soc *soc, uint8_t *status)
239 {
240 	struct dp_vdev *vdev;
241 	uint8_t vdev_id;
242 	uint32_t *htt_desc = (uint32_t *)status;
243 
244 	dp_assert_always_internal(soc->mec_fw_offload);
245 
246 	/*
247 	 * Get vdev id from HTT status word in case of MEC
248 	 * notification
249 	 */
250 	vdev_id = DP_TX_WBM_COMPLETION_V3_VDEV_ID_GET(htt_desc[4]);
251 	if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
252 		return;
253 
254 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
255 				     DP_MOD_ID_HTT_COMP);
256 	if (!vdev)
257 		return;
258 	dp_tx_mec_handler(vdev, status);
259 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP);
260 }
261 
dp_tx_process_htt_completion_be(struct dp_soc * soc,struct dp_tx_desc_s * tx_desc,uint8_t * status,uint8_t ring_id)262 void dp_tx_process_htt_completion_be(struct dp_soc *soc,
263 				     struct dp_tx_desc_s *tx_desc,
264 				     uint8_t *status,
265 				     uint8_t ring_id)
266 {
267 	uint8_t tx_status;
268 	struct dp_pdev *pdev;
269 	struct dp_vdev *vdev = NULL;
270 	struct hal_tx_completion_status ts = {0};
271 	uint32_t *htt_desc = (uint32_t *)status;
272 	struct dp_txrx_peer *txrx_peer;
273 	dp_txrx_ref_handle txrx_ref_handle = NULL;
274 	struct cdp_tid_tx_stats *tid_stats = NULL;
275 	struct htt_soc *htt_handle;
276 	uint8_t vdev_id;
277 	uint16_t peer_id;
278 	uint8_t xmit_type;
279 
280 	tx_status = HTT_TX_WBM_COMPLETION_V3_TX_STATUS_GET(htt_desc[0]);
281 	htt_handle = (struct htt_soc *)soc->htt_handle;
282 	htt_wbm_event_record(htt_handle->htt_logger_handle, tx_status, status);
283 
284 	/*
285 	 * There can be scenario where WBM consuming descriptor enqueued
286 	 * from TQM2WBM first and TQM completion can happen before MEC
287 	 * notification comes from FW2WBM. Avoid access any field of tx
288 	 * descriptor in case of MEC notify.
289 	 */
290 	if (tx_status == HTT_TX_FW2WBM_TX_STATUS_MEC_NOTIFY)
291 		return dp_tx_process_mec_notify_be(soc, status);
292 
293 	/*
294 	 * If the descriptor is already freed in vdev_detach,
295 	 * continue to next descriptor
296 	 */
297 	if (qdf_unlikely(!tx_desc->flags)) {
298 		dp_tx_comp_info_rl("Descriptor freed in vdev_detach %d",
299 				   tx_desc->id);
300 		return;
301 	}
302 
303 	if (qdf_unlikely(tx_desc->vdev_id == DP_INVALID_VDEV_ID)) {
304 		dp_tx_comp_info_rl("Invalid vdev_id %d", tx_desc->id);
305 		tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
306 		goto release_tx_desc;
307 	}
308 
309 	pdev = tx_desc->pdev;
310 	if (qdf_unlikely(!pdev)) {
311 		dp_tx_comp_warn("The pdev in TX desc is NULL, dropped.");
312 		dp_tx_comp_warn("tx_status: %u", tx_status);
313 		tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
314 		goto release_tx_desc;
315 	}
316 
317 	if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
318 		dp_tx_comp_info_rl("pdev in down state %d", tx_desc->id);
319 		tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
320 		goto release_tx_desc;
321 	}
322 
323 	qdf_assert(tx_desc->pdev);
324 
325 	vdev_id = tx_desc->vdev_id;
326 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
327 				     DP_MOD_ID_HTT_COMP);
328 
329 	if (qdf_unlikely(!vdev)) {
330 		dp_tx_comp_info_rl("Unable to get vdev ref  %d", tx_desc->id);
331 		tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
332 		goto release_tx_desc;
333 	}
334 
335 	switch (tx_status) {
336 	case HTT_TX_FW2WBM_TX_STATUS_OK:
337 	case HTT_TX_FW2WBM_TX_STATUS_DROP:
338 	case HTT_TX_FW2WBM_TX_STATUS_TTL:
339 	{
340 		uint8_t tid;
341 		uint8_t transmit_cnt_valid = 0;
342 
343 		if (DP_TX_WBM_COMPLETION_V3_VALID_GET(htt_desc[3])) {
344 			ts.peer_id =
345 				DP_TX_WBM_COMPLETION_V3_SW_PEER_ID_GET(
346 						htt_desc[3]);
347 			ts.tid =
348 				DP_TX_WBM_COMPLETION_V3_TID_NUM_GET(
349 						htt_desc[3]);
350 		} else {
351 			ts.peer_id = HTT_INVALID_PEER;
352 			ts.tid = HTT_INVALID_TID;
353 		}
354 		ts.release_src = HAL_TX_COMP_RELEASE_SOURCE_FW;
355 		ts.ppdu_id =
356 			DP_TX_WBM_COMPLETION_V3_SCH_CMD_ID_GET(
357 					htt_desc[2]);
358 		ts.ack_frame_rssi =
359 			DP_TX_WBM_COMPLETION_V3_ACK_FRAME_RSSI_GET(
360 					htt_desc[2]);
361 
362 		transmit_cnt_valid =
363 			DP_TX_WBM_COMPLETION_V3_TRANSMIT_CNT_VALID_GET(
364 					htt_desc[3]);
365 		if (transmit_cnt_valid)
366 			ts.transmit_cnt =
367 				HTT_TX_WBM_COMPLETION_V3_TRANSMIT_COUNT_GET(
368 						htt_desc[1]);
369 
370 		ts.tsf = htt_desc[4];
371 		ts.first_msdu = 1;
372 		ts.last_msdu = 1;
373 		switch (tx_status) {
374 		case HTT_TX_FW2WBM_TX_STATUS_OK:
375 			ts.status = HAL_TX_TQM_RR_FRAME_ACKED;
376 			break;
377 		case HTT_TX_FW2WBM_TX_STATUS_DROP:
378 			ts.status = HAL_TX_TQM_RR_REM_CMD_REM;
379 			break;
380 		case HTT_TX_FW2WBM_TX_STATUS_TTL:
381 			ts.status = HAL_TX_TQM_RR_REM_CMD_TX;
382 			break;
383 		}
384 		tid = ts.tid;
385 		if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
386 			tid = CDP_MAX_DATA_TIDS - 1;
387 
388 		tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
389 
390 		if (qdf_unlikely(pdev->delay_stats_flag) ||
391 		    qdf_unlikely(dp_is_vdev_tx_delay_stats_enabled(vdev)))
392 			dp_tx_compute_delay(vdev, tx_desc, tid, ring_id);
393 		if (tx_status < CDP_MAX_TX_HTT_STATUS)
394 			tid_stats->htt_status_cnt[tx_status]++;
395 
396 		peer_id = dp_tx_comp_adjust_peer_id_be(soc, ts.peer_id);
397 		txrx_peer = dp_txrx_peer_get_ref_by_id(soc, peer_id,
398 						       &txrx_ref_handle,
399 						       DP_MOD_ID_HTT_COMP);
400 		if (qdf_likely(txrx_peer))
401 			dp_tx_update_peer_basic_stats(
402 						txrx_peer,
403 						qdf_nbuf_len(tx_desc->nbuf),
404 						tx_status,
405 						pdev->enhanced_stats_en);
406 
407 		dp_tx_comp_process_tx_status(soc, tx_desc, &ts, txrx_peer,
408 					     ring_id);
409 		dp_tx_comp_process_desc(soc, tx_desc, &ts, txrx_peer);
410 		dp_tx_desc_release(soc, tx_desc, tx_desc->pool_id);
411 
412 		if (qdf_likely(txrx_peer))
413 			dp_txrx_peer_unref_delete(txrx_ref_handle,
414 						  DP_MOD_ID_HTT_COMP);
415 
416 		break;
417 	}
418 	case HTT_TX_FW2WBM_TX_STATUS_REINJECT:
419 	{
420 		uint8_t reinject_reason;
421 
422 		reinject_reason =
423 			HTT_TX_WBM_COMPLETION_V3_REINJECT_REASON_GET(
424 								htt_desc[1]);
425 		dp_tx_reinject_handler(soc, vdev, tx_desc,
426 				       status, reinject_reason);
427 		break;
428 	}
429 	case HTT_TX_FW2WBM_TX_STATUS_INSPECT:
430 	{
431 		dp_tx_inspect_handler(soc, vdev, tx_desc, status);
432 		break;
433 	}
434 	case HTT_TX_FW2WBM_TX_STATUS_VDEVID_MISMATCH:
435 	{
436 		xmit_type = qdf_nbuf_get_vdev_xmit_type(tx_desc->nbuf);
437 		DP_STATS_INC(vdev,
438 			     tx_i[xmit_type].dropped.fail_per_pkt_vdev_id_check,
439 			     1);
440 		goto release_tx_desc;
441 	}
442 	default:
443 		dp_tx_comp_err("Invalid HTT tx_status %d\n",
444 			       tx_status);
445 		goto release_tx_desc;
446 	}
447 
448 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP);
449 	return;
450 
451 release_tx_desc:
452 	dp_tx_comp_free_buf(soc, tx_desc, false);
453 	dp_tx_desc_release(soc, tx_desc, tx_desc->pool_id);
454 	if (vdev)
455 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP);
456 }
457 
458 #ifdef QCA_OL_TX_MULTIQ_SUPPORT
459 #ifdef DP_TX_IMPLICIT_RBM_MAPPING
460 /**
461  * dp_tx_get_rbm_id_be() - Get the RBM ID for data transmission completion.
462  * @soc: DP soc structure pointer
463  * @ring_id: Transmit Queue/ring_id to be used when XPS is enabled
464  *
465  * Return: RBM ID corresponding to TCL ring_id
466  */
dp_tx_get_rbm_id_be(struct dp_soc * soc,uint8_t ring_id)467 static inline uint8_t dp_tx_get_rbm_id_be(struct dp_soc *soc,
468 					  uint8_t ring_id)
469 {
470 	return 0;
471 }
472 #else
dp_tx_get_rbm_id_be(struct dp_soc * soc,uint8_t ring_id)473 static inline uint8_t dp_tx_get_rbm_id_be(struct dp_soc *soc,
474 					  uint8_t ring_id)
475 {
476 	return (ring_id ? soc->wbm_sw0_bm_id + (ring_id - 1) :
477 			  HAL_WBM_SW2_BM_ID(soc->wbm_sw0_bm_id));
478 }
479 #endif /*DP_TX_IMPLICIT_RBM_MAPPING*/
480 #else
dp_tx_get_rbm_id_be(struct dp_soc * soc,uint8_t tcl_index)481 static inline uint8_t dp_tx_get_rbm_id_be(struct dp_soc *soc,
482 					  uint8_t tcl_index)
483 {
484 	uint8_t rbm;
485 
486 	rbm = wlan_cfg_get_rbm_id_for_index(soc->wlan_cfg_ctx, tcl_index);
487 	dp_verbose_debug("tcl_id %u rbm %u", tcl_index, rbm);
488 	return rbm;
489 }
490 #endif
491 
492 #ifdef QCA_SUPPORT_TX_MIN_RATES_FOR_SPECIAL_FRAMES
493 
494 /**
495  * dp_tx_set_min_rates_for_critical_frames()- sets min-rates for critical pkts
496  * @soc: DP soc structure pointer
497  * @hal_tx_desc: HAL descriptor where fields are set
498  * @nbuf: skb to be considered for min rates
499  *
500  * The function relies on upper layers to set QDF_NBUF_CB_TX_EXTRA_IS_CRITICAL
501  * and uses it to determine if the frame is critical. For a critical frame,
502  * flow override bits are set to classify the frame into HW's high priority
503  * queue. The HW will pick pre-configured min rates for such packets.
504  *
505  * Return: None
506  */
507 static void
dp_tx_set_min_rates_for_critical_frames(struct dp_soc * soc,uint32_t * hal_tx_desc,qdf_nbuf_t nbuf)508 dp_tx_set_min_rates_for_critical_frames(struct dp_soc *soc,
509 					uint32_t *hal_tx_desc,
510 					qdf_nbuf_t nbuf)
511 {
512 /*
513  * Critical frames should be queued to the high priority queue for the TID on
514  * on which they are sent out (for the concerned peer).
515  * FW is using HTT_MSDU_Q_IDX 2 for HOL (high priority) queue.
516  * htt_msdu_idx = (2 * who_classify_info_sel) + flow_override
517  * Hence, using who_classify_info_sel = 1, flow_override = 0 to select
518  * HOL queue.
519  */
520 	if (QDF_NBUF_CB_TX_EXTRA_IS_CRITICAL(nbuf)) {
521 		hal_tx_desc_set_flow_override_enable(hal_tx_desc, 1);
522 		hal_tx_desc_set_flow_override(hal_tx_desc, 0);
523 		hal_tx_desc_set_who_classify_info_sel(hal_tx_desc, 1);
524 		hal_tx_desc_set_tx_notify_frame(hal_tx_desc,
525 						TX_SEMI_HARD_NOTIFY_E);
526 	}
527 }
528 #else
529 static inline void
dp_tx_set_min_rates_for_critical_frames(struct dp_soc * soc,uint32_t * hal_tx_desc_cached,qdf_nbuf_t nbuf)530 dp_tx_set_min_rates_for_critical_frames(struct dp_soc *soc,
531 					uint32_t *hal_tx_desc_cached,
532 					qdf_nbuf_t nbuf)
533 {
534 }
535 #endif
536 
537 #ifdef DP_TX_PACKET_INSPECT_FOR_ILP
538 /**
539  * dp_tx_set_particular_tx_queue() - set particular TX TQM flow queue 3 for
540  *				     TX packets, currently TCP ACK only
541  * @soc: DP soc structure pointer
542  * @hal_tx_desc: HAL descriptor where fields are set
543  * @nbuf: skb to be considered for particular TX queue
544  *
545  * Return: None
546  */
547 static inline
dp_tx_set_particular_tx_queue(struct dp_soc * soc,uint32_t * hal_tx_desc,qdf_nbuf_t nbuf)548 void dp_tx_set_particular_tx_queue(struct dp_soc *soc,
549 				   uint32_t *hal_tx_desc,
550 				   qdf_nbuf_t nbuf)
551 {
552 	if (!soc->tx_ilp_enable)
553 		return;
554 
555 	if (qdf_unlikely(QDF_NBUF_CB_GET_PACKET_TYPE(nbuf) ==
556 			 QDF_NBUF_CB_PACKET_TYPE_TCP_ACK)) {
557 		hal_tx_desc_set_flow_override_enable(hal_tx_desc, 1);
558 		hal_tx_desc_set_flow_override(hal_tx_desc, 1);
559 		hal_tx_desc_set_who_classify_info_sel(hal_tx_desc, 1);
560 	}
561 }
562 #else
563 static inline
dp_tx_set_particular_tx_queue(struct dp_soc * soc,uint32_t * hal_tx_desc,qdf_nbuf_t nbuf)564 void dp_tx_set_particular_tx_queue(struct dp_soc *soc,
565 				   uint32_t *hal_tx_desc,
566 				   qdf_nbuf_t nbuf)
567 {
568 }
569 #endif
570 
571 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
572 	defined(WLAN_MCAST_MLO)
573 #ifdef QCA_MULTIPASS_SUPPORT
574 /**
575  * dp_tx_mlo_mcast_multipass_lookup() - lookup vlan_id in mpass peer list
576  * @be_vdev: Handle to DP be_vdev structure
577  * @ptnr_vdev: DP ptnr_vdev handle
578  * @arg: pointer to dp_mlo_mpass_ buf
579  *
580  * Return: None
581  */
582 static void
dp_tx_mlo_mcast_multipass_lookup(struct dp_vdev_be * be_vdev,struct dp_vdev * ptnr_vdev,void * arg)583 dp_tx_mlo_mcast_multipass_lookup(struct dp_vdev_be *be_vdev,
584 				 struct dp_vdev *ptnr_vdev,
585 				 void *arg)
586 {
587 	struct dp_mlo_mpass_buf *ptr = (struct dp_mlo_mpass_buf *)arg;
588 	struct dp_txrx_peer *txrx_peer = NULL;
589 	struct vlan_ethhdr *veh = NULL;
590 	qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(ptr->nbuf);
591 	uint16_t vlan_id = 0;
592 	bool not_vlan = ((ptnr_vdev->tx_encap_type == htt_cmn_pkt_type_raw) ||
593 			(htons(eh->ether_type) != ETH_P_8021Q));
594 
595 	if (qdf_unlikely(not_vlan))
596 		return;
597 	veh = (struct vlan_ethhdr *)eh;
598 	vlan_id = (ntohs(veh->h_vlan_TCI) & VLAN_VID_MASK);
599 
600 	qdf_spin_lock_bh(&ptnr_vdev->mpass_peer_mutex);
601 	TAILQ_FOREACH(txrx_peer, &ptnr_vdev->mpass_peer_list,
602 		      mpass_peer_list_elem) {
603 		if (vlan_id == txrx_peer->vlan_id) {
604 			qdf_spin_unlock_bh(&ptnr_vdev->mpass_peer_mutex);
605 			ptr->vlan_id = vlan_id;
606 			return;
607 		}
608 	}
609 	qdf_spin_unlock_bh(&ptnr_vdev->mpass_peer_mutex);
610 }
611 
612 /**
613  * dp_tx_mlo_mcast_multipass_send() - send multipass MLO Mcast packets
614  * @be_vdev: Handle to DP be_vdev structure
615  * @ptnr_vdev: DP ptnr_vdev handle
616  * @arg: pointer to dp_mlo_mpass_ buf
617  *
618  * Return: None
619  */
620 static void
dp_tx_mlo_mcast_multipass_send(struct dp_vdev_be * be_vdev,struct dp_vdev * ptnr_vdev,void * arg)621 dp_tx_mlo_mcast_multipass_send(struct dp_vdev_be *be_vdev,
622 			       struct dp_vdev *ptnr_vdev,
623 			       void *arg)
624 {
625 	struct dp_mlo_mpass_buf *ptr = (struct dp_mlo_mpass_buf *)arg;
626 	struct dp_tx_msdu_info_s msdu_info;
627 	struct dp_vdev_be *be_ptnr_vdev = NULL;
628 	qdf_nbuf_t  nbuf_clone;
629 	uint16_t group_key = 0;
630 
631 	be_ptnr_vdev = dp_get_be_vdev_from_dp_vdev(ptnr_vdev);
632 	if (be_vdev != be_ptnr_vdev) {
633 		nbuf_clone = qdf_nbuf_clone(ptr->nbuf);
634 		if (qdf_unlikely(!nbuf_clone)) {
635 			dp_tx_debug("nbuf clone failed");
636 			return;
637 		}
638 	} else {
639 		nbuf_clone = ptr->nbuf;
640 	}
641 	qdf_mem_zero(&msdu_info, sizeof(msdu_info));
642 	dp_tx_get_queue(ptnr_vdev, nbuf_clone, &msdu_info.tx_queue);
643 	msdu_info.gsn = be_vdev->mlo_dev_ctxt->seq_num;
644 	msdu_info.xmit_type = qdf_nbuf_get_vdev_xmit_type(ptr->nbuf);
645 
646 
647 	if (ptr->vlan_id == MULTIPASS_WITH_VLAN_ID) {
648 		msdu_info.tid = HTT_TX_EXT_TID_INVALID;
649 		HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_SET(
650 						msdu_info.meta_data[0], 1);
651 	} else {
652 		/* return when vlan map is not initialized */
653 		if (!ptnr_vdev->iv_vlan_map)
654 			goto nbuf_free;
655 		group_key = ptnr_vdev->iv_vlan_map[ptr->vlan_id];
656 
657 		/*
658 		 * If group key is not installed, drop the frame.
659 		 */
660 
661 		if (!group_key)
662 			goto nbuf_free;
663 
664 		dp_tx_remove_vlan_tag(ptnr_vdev, nbuf_clone);
665 		dp_tx_add_groupkey_metadata(ptnr_vdev, &msdu_info, group_key);
666 		msdu_info.exception_fw = 1;
667 	}
668 
669 	nbuf_clone = dp_tx_send_msdu_single(
670 					ptnr_vdev,
671 					nbuf_clone,
672 					&msdu_info,
673 					DP_MLO_MCAST_REINJECT_PEER_ID,
674 					NULL);
675 
676 nbuf_free:
677 	if (qdf_unlikely(nbuf_clone)) {
678 		dp_info("pkt send failed");
679 		qdf_nbuf_free(nbuf_clone);
680 		return;
681 	}
682 }
683 
684 /**
685  * dp_tx_mlo_mcast_multipass_handler - If frame needs multipass processing
686  * @soc: DP soc handle
687  * @vdev: DP vdev handle
688  * @nbuf: nbuf to be enqueued
689  *
690  * Return: true if handling is done else false
691  */
692 static bool
dp_tx_mlo_mcast_multipass_handler(struct dp_soc * soc,struct dp_vdev * vdev,qdf_nbuf_t nbuf)693 dp_tx_mlo_mcast_multipass_handler(struct dp_soc *soc,
694 				  struct dp_vdev *vdev,
695 				  qdf_nbuf_t nbuf)
696 {
697 	struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
698 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
699 	qdf_nbuf_t nbuf_copy = NULL;
700 	struct dp_mlo_mpass_buf mpass_buf;
701 
702 	memset(&mpass_buf, 0, sizeof(struct dp_mlo_mpass_buf));
703 	mpass_buf.vlan_id = INVALID_VLAN_ID;
704 	mpass_buf.nbuf = nbuf;
705 
706 	dp_tx_mlo_mcast_multipass_lookup(be_vdev, vdev, &mpass_buf);
707 	if (mpass_buf.vlan_id == INVALID_VLAN_ID) {
708 		dp_mlo_iter_ptnr_vdev(be_soc, be_vdev,
709 				      dp_tx_mlo_mcast_multipass_lookup,
710 				      &mpass_buf, DP_MOD_ID_TX,
711 				      DP_ALL_VDEV_ITER,
712 				      DP_VDEV_ITERATE_SKIP_SELF);
713 		/*
714 		 * Do not drop the frame when vlan_id doesn't match.
715 		 * Send the frame as it is.
716 		 */
717 		if (mpass_buf.vlan_id == INVALID_VLAN_ID)
718 			return false;
719 	}
720 
721 	/* AP can have classic clients, special clients &
722 	 * classic repeaters.
723 	 * 1. Classic clients & special client:
724 	 *	Remove vlan header, find corresponding group key
725 	 *	index, fill in metaheader and enqueue multicast
726 	 *	frame to TCL.
727 	 * 2. Classic repeater:
728 	 *	Pass through to classic repeater with vlan tag
729 	 *	intact without any group key index. Hardware
730 	 *	will know which key to use to send frame to
731 	 *	repeater.
732 	 */
733 	nbuf_copy = qdf_nbuf_copy(nbuf);
734 
735 	/*
736 	 * Send multicast frame to special peers even
737 	 * if pass through to classic repeater fails.
738 	 */
739 	if (nbuf_copy) {
740 		struct dp_mlo_mpass_buf mpass_buf_copy = {0};
741 
742 		mpass_buf_copy.vlan_id = MULTIPASS_WITH_VLAN_ID;
743 		mpass_buf_copy.nbuf = nbuf_copy;
744 		/* send frame on partner vdevs */
745 		dp_mlo_iter_ptnr_vdev(be_soc, be_vdev,
746 				      dp_tx_mlo_mcast_multipass_send,
747 				      &mpass_buf_copy, DP_MOD_ID_TX,
748 				      DP_LINK_VDEV_ITER,
749 				      DP_VDEV_ITERATE_SKIP_SELF);
750 
751 		/* send frame on mcast primary vdev */
752 		dp_tx_mlo_mcast_multipass_send(be_vdev, vdev, &mpass_buf_copy);
753 
754 		if (qdf_unlikely(be_vdev->mlo_dev_ctxt->seq_num > MAX_GSN_NUM))
755 			be_vdev->mlo_dev_ctxt->seq_num = 0;
756 		else
757 			be_vdev->mlo_dev_ctxt->seq_num++;
758 	}
759 
760 	dp_mlo_iter_ptnr_vdev(be_soc, be_vdev,
761 			      dp_tx_mlo_mcast_multipass_send,
762 			      &mpass_buf, DP_MOD_ID_TX, DP_LINK_VDEV_ITER,
763 			      DP_VDEV_ITERATE_SKIP_SELF);
764 	dp_tx_mlo_mcast_multipass_send(be_vdev, vdev, &mpass_buf);
765 
766 	if (qdf_unlikely(be_vdev->mlo_dev_ctxt->seq_num > MAX_GSN_NUM))
767 		be_vdev->mlo_dev_ctxt->seq_num = 0;
768 	else
769 		be_vdev->mlo_dev_ctxt->seq_num++;
770 
771 	return true;
772 }
773 #else
774 static bool
dp_tx_mlo_mcast_multipass_handler(struct dp_soc * soc,struct dp_vdev * vdev,qdf_nbuf_t nbuf)775 dp_tx_mlo_mcast_multipass_handler(struct dp_soc *soc, struct dp_vdev *vdev,
776 				  qdf_nbuf_t nbuf)
777 {
778 	return false;
779 }
780 #endif
781 
782 void
dp_tx_mlo_mcast_pkt_send(struct dp_vdev_be * be_vdev,struct dp_vdev * ptnr_vdev,void * arg)783 dp_tx_mlo_mcast_pkt_send(struct dp_vdev_be *be_vdev,
784 			 struct dp_vdev *ptnr_vdev,
785 			 void *arg)
786 {
787 	qdf_nbuf_t  nbuf = (qdf_nbuf_t)arg;
788 	qdf_nbuf_t  nbuf_clone;
789 	struct dp_vdev_be *be_ptnr_vdev = NULL;
790 	struct dp_tx_msdu_info_s msdu_info;
791 
792 	be_ptnr_vdev = dp_get_be_vdev_from_dp_vdev(ptnr_vdev);
793 	if (be_vdev != be_ptnr_vdev) {
794 		nbuf_clone = qdf_nbuf_clone(nbuf);
795 		if (qdf_unlikely(!nbuf_clone)) {
796 			dp_tx_debug("nbuf clone failed");
797 			return;
798 		}
799 	} else {
800 		nbuf_clone = nbuf;
801 	}
802 
803 	/* NAWDS clients will accepts on 4 addr format MCAST packets
804 	 * This will ensure to send packets in 4 addr format to NAWDS clients.
805 	 */
806 	if (qdf_unlikely(ptnr_vdev->nawds_enabled)) {
807 		qdf_mem_zero(&msdu_info, sizeof(msdu_info));
808 		dp_tx_get_queue(ptnr_vdev, nbuf_clone, &msdu_info.tx_queue);
809 		dp_tx_nawds_handler(ptnr_vdev->pdev->soc, ptnr_vdev,
810 				    &msdu_info, nbuf_clone, DP_INVALID_PEER);
811 	}
812 
813 	if (qdf_unlikely(dp_tx_proxy_arp(ptnr_vdev, nbuf_clone) !=
814 			 QDF_STATUS_SUCCESS)) {
815 		qdf_nbuf_free(nbuf_clone);
816 		return;
817 	}
818 
819 	qdf_mem_zero(&msdu_info, sizeof(msdu_info));
820 	dp_tx_get_queue(ptnr_vdev, nbuf_clone, &msdu_info.tx_queue);
821 
822 	msdu_info.gsn = be_vdev->mlo_dev_ctxt->seq_num;
823 	msdu_info.xmit_type = qdf_nbuf_get_vdev_xmit_type(nbuf_clone);
824 
825 	DP_STATS_INC(ptnr_vdev,
826 		     tx_i[msdu_info.xmit_type].mlo_mcast.send_pkt_count, 1);
827 	nbuf_clone = dp_tx_send_msdu_single(
828 					ptnr_vdev,
829 					nbuf_clone,
830 					&msdu_info,
831 					DP_MLO_MCAST_REINJECT_PEER_ID,
832 					NULL);
833 	if (qdf_unlikely(nbuf_clone)) {
834 		DP_STATS_INC(ptnr_vdev,
835 			     tx_i[msdu_info.xmit_type].mlo_mcast.fail_pkt_count,
836 			     1);
837 		dp_info("pkt send failed");
838 		qdf_nbuf_free(nbuf_clone);
839 		return;
840 	}
841 }
842 
843 static inline void
dp_tx_vdev_id_set_hal_tx_desc(uint32_t * hal_tx_desc_cached,struct dp_vdev * vdev,struct dp_tx_msdu_info_s * msdu_info)844 dp_tx_vdev_id_set_hal_tx_desc(uint32_t *hal_tx_desc_cached,
845 			      struct dp_vdev *vdev,
846 			      struct dp_tx_msdu_info_s *msdu_info)
847 {
848 	hal_tx_desc_set_vdev_id(hal_tx_desc_cached, msdu_info->vdev_id);
849 }
850 
dp_tx_mlo_mcast_handler_be(struct dp_soc * soc,struct dp_vdev * vdev,qdf_nbuf_t nbuf)851 void dp_tx_mlo_mcast_handler_be(struct dp_soc *soc,
852 				struct dp_vdev *vdev,
853 				qdf_nbuf_t nbuf)
854 {
855 	struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
856 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
857 
858 	if (qdf_unlikely(vdev->multipass_en) &&
859 	    dp_tx_mlo_mcast_multipass_handler(soc, vdev, nbuf))
860 		return;
861 	/* send frame on partner vdevs */
862 	dp_mlo_iter_ptnr_vdev(be_soc, be_vdev,
863 			      dp_tx_mlo_mcast_pkt_send,
864 			      nbuf, DP_MOD_ID_REINJECT, DP_LINK_VDEV_ITER,
865 			      DP_VDEV_ITERATE_SKIP_SELF);
866 
867 	/* send frame on mcast primary vdev */
868 	dp_tx_mlo_mcast_pkt_send(be_vdev, vdev, nbuf);
869 
870 	if (qdf_unlikely(be_vdev->mlo_dev_ctxt->seq_num > MAX_GSN_NUM))
871 		be_vdev->mlo_dev_ctxt->seq_num = 0;
872 	else
873 		be_vdev->mlo_dev_ctxt->seq_num++;
874 }
875 
dp_tx_mlo_is_mcast_primary_be(struct dp_soc * soc,struct dp_vdev * vdev)876 bool dp_tx_mlo_is_mcast_primary_be(struct dp_soc *soc,
877 				   struct dp_vdev *vdev)
878 {
879 	struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
880 
881 	if (be_vdev->mcast_primary)
882 		return true;
883 
884 	return false;
885 }
886 
887 #if defined(CONFIG_MLO_SINGLE_DEV)
888 static void
dp_tx_mlo_mcast_enhance_be(struct dp_vdev_be * be_vdev,struct dp_vdev * ptnr_vdev,void * arg)889 dp_tx_mlo_mcast_enhance_be(struct dp_vdev_be *be_vdev,
890 			   struct dp_vdev *ptnr_vdev,
891 			   void *arg)
892 {
893 	struct dp_vdev *vdev = (struct dp_vdev *)be_vdev;
894 	qdf_nbuf_t  nbuf = (qdf_nbuf_t)arg;
895 
896 	if (vdev == ptnr_vdev)
897 		return;
898 
899 	/*
900 	 * Hold the reference to avoid free of nbuf in
901 	 * dp_tx_mcast_enhance() in case of successful
902 	 * conversion
903 	 */
904 	qdf_nbuf_ref(nbuf);
905 
906 	if (qdf_unlikely(!dp_tx_mcast_enhance(ptnr_vdev, nbuf)))
907 		return;
908 
909 	qdf_nbuf_free(nbuf);
910 }
911 
912 qdf_nbuf_t
dp_tx_mlo_mcast_send_be(struct dp_soc * soc,struct dp_vdev * vdev,qdf_nbuf_t nbuf,struct cdp_tx_exception_metadata * tx_exc_metadata)913 dp_tx_mlo_mcast_send_be(struct dp_soc *soc, struct dp_vdev *vdev,
914 			qdf_nbuf_t nbuf,
915 			struct cdp_tx_exception_metadata *tx_exc_metadata)
916 {
917 	struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
918 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
919 
920 	if (!tx_exc_metadata->is_mlo_mcast)
921 		return nbuf;
922 
923 	if (!be_vdev->mcast_primary) {
924 		qdf_nbuf_free(nbuf);
925 		return NULL;
926 	}
927 
928 	/*
929 	 * In the single netdev model avoid reinjection path as mcast
930 	 * packet is identified in upper layers while peer search to find
931 	 * primary TQM based on dest mac addr
932 	 *
933 	 * New bonding interface added into the bridge so MCSD will update
934 	 * snooping table and wifi driver populates the entries in appropriate
935 	 * child net devices.
936 	 */
937 	if (vdev->mcast_enhancement_en) {
938 		/*
939 		 * As dp_tx_mcast_enhance() can consume the nbuf incase of
940 		 * successful conversion hold the reference of nbuf.
941 		 *
942 		 * Hold the reference to tx on partner links
943 		 */
944 		qdf_nbuf_ref(nbuf);
945 		if (qdf_unlikely(!dp_tx_mcast_enhance(vdev, nbuf))) {
946 			dp_mlo_iter_ptnr_vdev(be_soc, be_vdev,
947 					      dp_tx_mlo_mcast_enhance_be,
948 					      nbuf, DP_MOD_ID_TX,
949 					      DP_ALL_VDEV_ITER,
950 					      DP_VDEV_ITERATE_SKIP_SELF);
951 			qdf_nbuf_free(nbuf);
952 			return NULL;
953 		}
954 		/* release reference taken above */
955 		qdf_nbuf_free(nbuf);
956 	}
957 	dp_tx_mlo_mcast_handler_be(soc, vdev, nbuf);
958 	return NULL;
959 }
960 #endif
961 #else
962 static inline void
dp_tx_vdev_id_set_hal_tx_desc(uint32_t * hal_tx_desc_cached,struct dp_vdev * vdev,struct dp_tx_msdu_info_s * msdu_info)963 dp_tx_vdev_id_set_hal_tx_desc(uint32_t *hal_tx_desc_cached,
964 			      struct dp_vdev *vdev,
965 			      struct dp_tx_msdu_info_s *msdu_info)
966 {
967 	hal_tx_desc_set_vdev_id(hal_tx_desc_cached, vdev->vdev_id);
968 }
969 #endif
970 #if defined(WLAN_FEATURE_11BE_MLO) && !defined(WLAN_MLO_MULTI_CHIP) && \
971 	!defined(WLAN_MCAST_MLO)
dp_tx_mlo_mcast_handler_be(struct dp_soc * soc,struct dp_vdev * vdev,qdf_nbuf_t nbuf)972 void dp_tx_mlo_mcast_handler_be(struct dp_soc *soc,
973 				struct dp_vdev *vdev,
974 				qdf_nbuf_t nbuf)
975 {
976 }
977 
dp_tx_mlo_is_mcast_primary_be(struct dp_soc * soc,struct dp_vdev * vdev)978 bool dp_tx_mlo_is_mcast_primary_be(struct dp_soc *soc,
979 				   struct dp_vdev *vdev)
980 {
981 	return false;
982 }
983 #endif
984 
985 #ifdef CONFIG_SAWF
986 /**
987  * dp_sawf_config_be - Configure sawf specific fields in tcl
988  *
989  * @soc: DP soc handle
990  * @hal_tx_desc_cached: tx descriptor
991  * @fw_metadata: firmware metadata
992  * @nbuf: skb buffer
993  * @msdu_info: msdu info
994  *
995  * Return: tid value in mark metadata
996  */
dp_sawf_config_be(struct dp_soc * soc,uint32_t * hal_tx_desc_cached,uint16_t * fw_metadata,qdf_nbuf_t nbuf,struct dp_tx_msdu_info_s * msdu_info)997 uint8_t dp_sawf_config_be(struct dp_soc *soc, uint32_t *hal_tx_desc_cached,
998 			  uint16_t *fw_metadata, qdf_nbuf_t nbuf,
999 			  struct dp_tx_msdu_info_s *msdu_info)
1000 {
1001 	uint8_t q_id = 0;
1002 	uint8_t tid = HTT_TX_EXT_TID_INVALID;
1003 
1004 	q_id = dp_sawf_queue_id_get(nbuf);
1005 
1006 	if (q_id == DP_SAWF_DEFAULT_Q_INVALID)
1007 		return HTT_TX_EXT_TID_INVALID;
1008 
1009 	tid = (q_id & (CDP_DATA_TID_MAX - 1));
1010 	if (msdu_info)
1011 		msdu_info->tid = tid;
1012 
1013 	hal_tx_desc_set_hlos_tid(hal_tx_desc_cached,
1014 				 (q_id & (CDP_DATA_TID_MAX - 1)));
1015 
1016 	if ((q_id >= DP_SAWF_DEFAULT_QUEUE_MIN) &&
1017 	    (q_id < DP_SAWF_DEFAULT_QUEUE_MAX))
1018 		return tid;
1019 
1020 	if (!wlan_cfg_get_sawf_config(soc->wlan_cfg_ctx))
1021 		return tid;
1022 
1023 	if (fw_metadata)
1024 		dp_sawf_tcl_cmd(fw_metadata, nbuf);
1025 	hal_tx_desc_set_flow_override_enable(hal_tx_desc_cached,
1026 					     DP_TX_FLOW_OVERRIDE_ENABLE);
1027 	hal_tx_desc_set_flow_override(hal_tx_desc_cached,
1028 				      DP_TX_FLOW_OVERRIDE_GET(q_id));
1029 	hal_tx_desc_set_who_classify_info_sel(hal_tx_desc_cached,
1030 					      DP_TX_WHO_CLFY_INF_SEL_GET(q_id));
1031 
1032 	return tid;
1033 }
1034 
1035 #else
1036 
1037 static inline
dp_sawf_config_be(struct dp_soc * soc,uint32_t * hal_tx_desc_cached,uint16_t * fw_metadata,qdf_nbuf_t nbuf,struct dp_tx_msdu_info_s * msdu_info)1038 uint8_t dp_sawf_config_be(struct dp_soc *soc, uint32_t *hal_tx_desc_cached,
1039 			  uint16_t *fw_metadata, qdf_nbuf_t nbuf,
1040 			  struct dp_tx_msdu_info_s *msdu_info)
1041 {
1042 	return HTT_TX_EXT_TID_INVALID;
1043 }
1044 
1045 static inline
dp_sawf_tx_enqueue_peer_stats(struct dp_soc * soc,struct dp_tx_desc_s * tx_desc)1046 QDF_STATUS dp_sawf_tx_enqueue_peer_stats(struct dp_soc *soc,
1047 					 struct dp_tx_desc_s *tx_desc)
1048 {
1049 	return QDF_STATUS_SUCCESS;
1050 }
1051 
1052 static inline
dp_sawf_tx_enqueue_fail_peer_stats(struct dp_soc * soc,struct dp_tx_desc_s * tx_desc)1053 QDF_STATUS dp_sawf_tx_enqueue_fail_peer_stats(struct dp_soc *soc,
1054 					      struct dp_tx_desc_s *tx_desc)
1055 {
1056 	return QDF_STATUS_SUCCESS;
1057 }
1058 #endif
1059 
1060 #ifdef WLAN_SUPPORT_PPEDS
1061 
1062 /**
1063  * dp_ppeds_stats() - Accounting fw2wbm_tx_drop drops in Tx path
1064  * @soc: Handle to DP Soc structure
1065  * @peer_id: Peer ID in the descriptor
1066  *
1067  * Return: NONE
1068  */
1069 static inline
dp_ppeds_stats(struct dp_soc * soc,uint16_t peer_id)1070 void dp_ppeds_stats(struct dp_soc *soc, uint16_t peer_id)
1071 {
1072 	struct dp_vdev *vdev = NULL;
1073 	struct dp_txrx_peer *txrx_peer = NULL;
1074 	dp_txrx_ref_handle txrx_ref_handle = NULL;
1075 
1076 	DP_STATS_INC(soc, tx.fw2wbm_tx_drop, 1);
1077 	txrx_peer = dp_txrx_peer_get_ref_by_id(soc,
1078 					       peer_id,
1079 					       &txrx_ref_handle,
1080 					       DP_MOD_ID_TX_COMP);
1081 	if (txrx_peer) {
1082 		vdev = txrx_peer->vdev;
1083 		DP_STATS_INC(vdev, tx_i[DP_XMIT_LINK].dropped.fw2wbm_tx_drop, 1);
1084 		dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_TX_COMP);
1085 	}
1086 }
1087 
dp_ppeds_tx_comp_handler(struct dp_soc_be * be_soc,uint32_t quota)1088 int dp_ppeds_tx_comp_handler(struct dp_soc_be *be_soc, uint32_t quota)
1089 {
1090 	uint32_t num_avail_for_reap = 0;
1091 	void *tx_comp_hal_desc;
1092 	uint8_t buf_src, status = 0;
1093 	uint32_t count = 0;
1094 	struct dp_tx_desc_s *tx_desc = NULL;
1095 	struct dp_tx_desc_s *head_desc = NULL;
1096 	struct dp_tx_desc_s *tail_desc = NULL;
1097 	struct dp_soc *soc = &be_soc->soc;
1098 	void *last_prefetch_hw_desc = NULL;
1099 	struct dp_tx_desc_s *last_prefetch_sw_desc = NULL;
1100 	qdf_nbuf_t  nbuf;
1101 	hal_soc_handle_t hal_soc = soc->hal_soc;
1102 	hal_ring_handle_t hal_ring_hdl =
1103 				be_soc->ppeds_wbm_release_ring.hal_srng;
1104 	struct dp_txrx_peer *txrx_peer = NULL;
1105 	uint16_t peer_id = CDP_INVALID_PEER;
1106 	dp_txrx_ref_handle txrx_ref_handle = NULL;
1107 	struct dp_vdev *vdev = NULL;
1108 	struct dp_pdev *pdev = NULL;
1109 	struct dp_srng *srng;
1110 
1111 	if (qdf_unlikely(dp_srng_access_start(NULL, soc, hal_ring_hdl))) {
1112 		dp_err("HAL RING Access Failed -- %pK", hal_ring_hdl);
1113 		return 0;
1114 	}
1115 
1116 	num_avail_for_reap = hal_srng_dst_num_valid(hal_soc, hal_ring_hdl, 0);
1117 
1118 	if (num_avail_for_reap >= quota)
1119 		num_avail_for_reap = quota;
1120 
1121 	dp_srng_dst_inv_cached_descs(soc, hal_ring_hdl, num_avail_for_reap);
1122 
1123 	last_prefetch_hw_desc = dp_srng_dst_prefetch(hal_soc, hal_ring_hdl,
1124 						     num_avail_for_reap);
1125 
1126 	srng = &be_soc->ppeds_wbm_release_ring;
1127 
1128 	if (srng) {
1129 		hal_update_ring_util(soc->hal_soc, srng->hal_srng,
1130 				     WBM2SW_RELEASE,
1131 				     &be_soc->ppeds_wbm_release_ring.stats);
1132 	}
1133 
1134 	while (qdf_likely(num_avail_for_reap--)) {
1135 		tx_comp_hal_desc =  dp_srng_dst_get_next(soc, hal_ring_hdl);
1136 		if (qdf_unlikely(!tx_comp_hal_desc))
1137 			break;
1138 
1139 		buf_src = hal_tx_comp_get_buffer_source(hal_soc,
1140 							tx_comp_hal_desc);
1141 
1142 		if (qdf_unlikely(buf_src != HAL_TX_COMP_RELEASE_SOURCE_TQM &&
1143 				 buf_src != HAL_TX_COMP_RELEASE_SOURCE_FW)) {
1144 			dp_err("Tx comp release_src != TQM | FW but from %d",
1145 			       buf_src);
1146 			dp_assert_always_internal_ds_stat(0, be_soc,
1147 							  tx.tx_comp_buf_src);
1148 			continue;
1149 		}
1150 
1151 		dp_tx_comp_get_params_from_hal_desc_be(soc, tx_comp_hal_desc,
1152 						       &tx_desc);
1153 
1154 		if (!tx_desc) {
1155 			dp_err("unable to retrieve tx_desc!");
1156 			dp_assert_always_internal_ds_stat(0, be_soc,
1157 							  tx.tx_comp_desc_null);
1158 			continue;
1159 		}
1160 
1161 		if (qdf_unlikely(!(tx_desc->flags &
1162 				   DP_TX_DESC_FLAG_ALLOCATED) ||
1163 				 !(tx_desc->flags & DP_TX_DESC_FLAG_PPEDS))) {
1164 			dp_assert_always_internal_ds_stat(0, be_soc,
1165 						tx.tx_comp_invalid_flag);
1166 			continue;
1167 		}
1168 
1169 		tx_desc->buffer_src = buf_src;
1170 
1171 		if (qdf_unlikely(buf_src == HAL_TX_COMP_RELEASE_SOURCE_FW)) {
1172 			status = hal_tx_comp_get_tx_status(tx_comp_hal_desc);
1173 			if (status != HTT_TX_FW2WBM_TX_STATUS_OK)
1174 				dp_ppeds_stats(soc, tx_desc->peer_id);
1175 
1176 			nbuf = dp_ppeds_tx_desc_free(soc, tx_desc);
1177 			qdf_nbuf_free(nbuf);
1178 		} else {
1179 			tx_desc->tx_status =
1180 				hal_tx_comp_get_tx_status(tx_comp_hal_desc);
1181 
1182 			/*
1183 			 * Add desc sync to account for extended statistics
1184 			 * during Tx completion.
1185 			 */
1186 			if (peer_id != tx_desc->peer_id) {
1187 				if (txrx_peer) {
1188 					dp_txrx_peer_unref_delete(txrx_ref_handle,
1189 								  DP_MOD_ID_TX_COMP);
1190 					txrx_peer = NULL;
1191 					vdev = NULL;
1192 					pdev = NULL;
1193 				}
1194 				peer_id = tx_desc->peer_id;
1195 				txrx_peer =
1196 					dp_txrx_peer_get_ref_by_id(soc, peer_id,
1197 								   &txrx_ref_handle,
1198 								   DP_MOD_ID_TX_COMP);
1199 				if (txrx_peer) {
1200 					vdev = txrx_peer->vdev;
1201 					if (!vdev)
1202 						goto next_desc;
1203 
1204 					pdev = vdev->pdev;
1205 					if (!pdev)
1206 						goto next_desc;
1207 
1208 					dp_tx_desc_update_fast_comp_flag(soc,
1209 									 tx_desc,
1210 									 !pdev->enhanced_stats_en);
1211 					if (pdev->enhanced_stats_en) {
1212 						hal_tx_comp_desc_sync(tx_comp_hal_desc,
1213 								      &tx_desc->comp, 1);
1214 					}
1215 				}
1216 			} else if (txrx_peer && vdev && pdev) {
1217 				dp_tx_desc_update_fast_comp_flag(soc,
1218 								 tx_desc,
1219 								 !pdev->enhanced_stats_en);
1220 				if (pdev->enhanced_stats_en) {
1221 					hal_tx_comp_desc_sync(tx_comp_hal_desc,
1222 							      &tx_desc->comp, 1);
1223 				}
1224 			}
1225 next_desc:
1226 			if (!head_desc) {
1227 				head_desc = tx_desc;
1228 				tail_desc = tx_desc;
1229 			}
1230 
1231 			tail_desc->next = tx_desc;
1232 			tx_desc->next = NULL;
1233 			tail_desc = tx_desc;
1234 
1235 			count++;
1236 
1237 			dp_tx_prefetch_hw_sw_nbuf_desc(soc, hal_soc,
1238 						       num_avail_for_reap,
1239 						       hal_ring_hdl,
1240 						       &last_prefetch_hw_desc,
1241 						       &last_prefetch_sw_desc,
1242 						       NULL);
1243 		}
1244 	}
1245 
1246 	dp_srng_access_end(NULL, soc, hal_ring_hdl);
1247 
1248 	if (txrx_peer)
1249 		dp_txrx_peer_unref_delete(txrx_ref_handle,
1250 					  DP_MOD_ID_TX_COMP);
1251 	if (head_desc)
1252 		dp_tx_comp_process_desc_list(soc, head_desc,
1253 					     CDP_MAX_TX_COMP_PPE_RING);
1254 
1255 	return count;
1256 }
1257 #endif
1258 
1259 #if defined(QCA_SUPPORT_WDS_EXTENDED)
1260 static inline void
dp_get_peer_from_tx_exc_meta(struct dp_soc * soc,uint32_t * hal_tx_desc_cached,struct cdp_tx_exception_metadata * tx_exc_metadata,uint16_t * ast_idx,uint16_t * ast_hash)1261 dp_get_peer_from_tx_exc_meta(struct dp_soc *soc, uint32_t *hal_tx_desc_cached,
1262 			     struct cdp_tx_exception_metadata *tx_exc_metadata,
1263 			     uint16_t *ast_idx, uint16_t *ast_hash)
1264 {
1265 	struct dp_peer *peer = NULL;
1266 
1267 	if (tx_exc_metadata->is_wds_extended) {
1268 		peer = dp_peer_get_ref_by_id(soc, tx_exc_metadata->peer_id,
1269 					     DP_MOD_ID_TX);
1270 		if (peer) {
1271 			*ast_idx = peer->ast_idx;
1272 			*ast_hash = peer->ast_hash;
1273 			hal_tx_desc_set_index_lookup_override
1274 							(soc->hal_soc,
1275 							 hal_tx_desc_cached,
1276 							 0x1);
1277 			dp_peer_unref_delete(peer, DP_MOD_ID_TX);
1278 		}
1279 	} else {
1280 		return;
1281 	}
1282 }
1283 
1284 #else
1285 static inline void
dp_get_peer_from_tx_exc_meta(struct dp_soc * soc,uint32_t * hal_tx_desc_cached,struct cdp_tx_exception_metadata * tx_exc_metadata,uint16_t * ast_idx,uint16_t * ast_hash)1286 dp_get_peer_from_tx_exc_meta(struct dp_soc *soc, uint32_t *hal_tx_desc_cached,
1287 			     struct cdp_tx_exception_metadata *tx_exc_metadata,
1288 			     uint16_t *ast_idx, uint16_t *ast_hash)
1289 {
1290 }
1291 #endif
1292 
1293 QDF_STATUS
dp_tx_hw_enqueue_be(struct dp_soc * soc,struct dp_vdev * vdev,struct dp_tx_desc_s * tx_desc,uint16_t fw_metadata,struct cdp_tx_exception_metadata * tx_exc_metadata,struct dp_tx_msdu_info_s * msdu_info)1294 dp_tx_hw_enqueue_be(struct dp_soc *soc, struct dp_vdev *vdev,
1295 		    struct dp_tx_desc_s *tx_desc, uint16_t fw_metadata,
1296 		    struct cdp_tx_exception_metadata *tx_exc_metadata,
1297 		    struct dp_tx_msdu_info_s *msdu_info)
1298 {
1299 	void *hal_tx_desc;
1300 	uint32_t *hal_tx_desc_cached;
1301 	int coalesce = 0;
1302 	struct dp_tx_queue *tx_q = &msdu_info->tx_queue;
1303 	uint8_t ring_id = tx_q->ring_id;
1304 	uint8_t tid;
1305 	struct dp_vdev_be *be_vdev;
1306 	uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES] = { 0 };
1307 	uint8_t bm_id = dp_tx_get_rbm_id_be(soc, ring_id);
1308 	hal_ring_handle_t hal_ring_hdl = NULL;
1309 	QDF_STATUS status = QDF_STATUS_E_RESOURCES;
1310 	uint8_t num_desc_bytes = HAL_TX_DESC_LEN_BYTES;
1311 	uint16_t ast_idx = vdev->bss_ast_idx;
1312 	uint16_t ast_hash = vdev->bss_ast_hash;
1313 
1314 	be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
1315 
1316 	if (!dp_tx_is_desc_id_valid(soc, tx_desc->id)) {
1317 		dp_err_rl("Invalid tx desc id:%d", tx_desc->id);
1318 		return QDF_STATUS_E_RESOURCES;
1319 	}
1320 
1321 	if (qdf_unlikely(tx_exc_metadata)) {
1322 		qdf_assert_always((tx_exc_metadata->tx_encap_type ==
1323 				   CDP_INVALID_TX_ENCAP_TYPE) ||
1324 				   (tx_exc_metadata->tx_encap_type ==
1325 				    vdev->tx_encap_type));
1326 
1327 		if (tx_exc_metadata->tx_encap_type == htt_cmn_pkt_type_raw)
1328 			qdf_assert_always((tx_exc_metadata->sec_type ==
1329 					   CDP_INVALID_SEC_TYPE) ||
1330 					   tx_exc_metadata->sec_type ==
1331 					   vdev->sec_type);
1332 		dp_get_peer_from_tx_exc_meta(soc, (void *)cached_desc,
1333 					     tx_exc_metadata,
1334 					     &ast_idx, &ast_hash);
1335 	}
1336 
1337 	hal_tx_desc_cached = (void *)cached_desc;
1338 
1339 	if (dp_sawf_tag_valid_get(tx_desc->nbuf)) {
1340 		dp_sawf_config_be(soc, hal_tx_desc_cached,
1341 				  &fw_metadata, tx_desc->nbuf, msdu_info);
1342 		dp_sawf_tx_enqueue_peer_stats(soc, tx_desc);
1343 	}
1344 
1345 	hal_tx_desc_set_buf_addr_be(soc->hal_soc, hal_tx_desc_cached,
1346 				    tx_desc->dma_addr, bm_id, tx_desc->id,
1347 				    (tx_desc->flags & DP_TX_DESC_FLAG_FRAG));
1348 	hal_tx_desc_set_lmac_id_be(soc->hal_soc, hal_tx_desc_cached,
1349 				   vdev->lmac_id);
1350 
1351 	hal_tx_desc_set_search_index_be(soc->hal_soc, hal_tx_desc_cached,
1352 					ast_idx);
1353 	/*
1354 	 * Bank_ID is used as DSCP_TABLE number in beryllium
1355 	 * So there is no explicit field used for DSCP_TID_TABLE_NUM.
1356 	 */
1357 
1358 	hal_tx_desc_set_cache_set_num(soc->hal_soc, hal_tx_desc_cached,
1359 				      (ast_hash & 0xF));
1360 
1361 	hal_tx_desc_set_fw_metadata(hal_tx_desc_cached, fw_metadata);
1362 	hal_tx_desc_set_buf_length(hal_tx_desc_cached, tx_desc->length);
1363 	hal_tx_desc_set_buf_offset(hal_tx_desc_cached, tx_desc->pkt_offset);
1364 
1365 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
1366 		hal_tx_desc_set_to_fw(hal_tx_desc_cached, 1);
1367 
1368 	/* verify checksum offload configuration*/
1369 	if ((qdf_nbuf_get_tx_cksum(tx_desc->nbuf) ==
1370 				   QDF_NBUF_TX_CKSUM_TCP_UDP) ||
1371 	      qdf_nbuf_is_tso(tx_desc->nbuf)) {
1372 		hal_tx_desc_set_l3_checksum_en(hal_tx_desc_cached, 1);
1373 		hal_tx_desc_set_l4_checksum_en(hal_tx_desc_cached, 1);
1374 	}
1375 
1376 	hal_tx_desc_set_bank_id(hal_tx_desc_cached, vdev->bank_id);
1377 
1378 	dp_tx_vdev_id_set_hal_tx_desc(hal_tx_desc_cached, vdev, msdu_info);
1379 
1380 	tid = msdu_info->tid;
1381 	if (tid != HTT_TX_EXT_TID_INVALID)
1382 		hal_tx_desc_set_hlos_tid(hal_tx_desc_cached, tid);
1383 
1384 	dp_tx_set_min_rates_for_critical_frames(soc, hal_tx_desc_cached,
1385 						tx_desc->nbuf);
1386 	dp_tx_set_particular_tx_queue(soc, hal_tx_desc_cached,
1387 				      tx_desc->nbuf);
1388 	dp_tx_desc_set_ktimestamp(vdev, tx_desc);
1389 
1390 	hal_ring_hdl = dp_tx_get_hal_ring_hdl(soc, ring_id);
1391 
1392 	if (qdf_unlikely(dp_tx_hal_ring_access_start(soc, hal_ring_hdl))) {
1393 		dp_err("HAL RING Access Failed -- %pK", hal_ring_hdl);
1394 		DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
1395 		DP_STATS_INC(vdev,
1396 			     tx_i[msdu_info->xmit_type].dropped.enqueue_fail,
1397 			     1);
1398 		dp_sawf_tx_enqueue_fail_peer_stats(soc, tx_desc);
1399 		return status;
1400 	}
1401 
1402 	hal_tx_desc = hal_srng_src_get_next(soc->hal_soc, hal_ring_hdl);
1403 	if (qdf_unlikely(!hal_tx_desc)) {
1404 		dp_verbose_debug("TCL ring full ring_id:%d", ring_id);
1405 		DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
1406 		DP_STATS_INC(vdev,
1407 			     tx_i[msdu_info->xmit_type].dropped.enqueue_fail,
1408 			     1);
1409 		dp_sawf_tx_enqueue_fail_peer_stats(soc, tx_desc);
1410 		goto ring_access_fail;
1411 	}
1412 
1413 	tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX;
1414 	dp_vdev_peer_stats_update_protocol_cnt_tx(vdev, tx_desc->nbuf);
1415 
1416 	/* Sync cached descriptor with HW */
1417 	hal_tx_desc_sync(hal_tx_desc_cached, hal_tx_desc, num_desc_bytes);
1418 
1419 	coalesce = dp_tx_attempt_coalescing(soc, vdev, tx_desc, tid,
1420 					    msdu_info, ring_id);
1421 
1422 	DP_STATS_INC_PKT(vdev, tx_i[msdu_info->xmit_type].processed, 1,
1423 			 dp_tx_get_pkt_len(tx_desc));
1424 	DP_STATS_INC(soc, tx.tcl_enq[ring_id], 1);
1425 	dp_tx_update_stats(soc, tx_desc, ring_id);
1426 	status = QDF_STATUS_SUCCESS;
1427 
1428 	dp_tx_hw_desc_update_evt((uint8_t *)hal_tx_desc_cached,
1429 				 hal_ring_hdl, soc, ring_id);
1430 
1431 ring_access_fail:
1432 	dp_tx_ring_access_end_wrapper(soc, hal_ring_hdl, coalesce);
1433 	dp_pkt_add_timestamp(vdev, QDF_PKT_TX_DRIVER_EXIT,
1434 			     qdf_get_log_timestamp(), tx_desc->nbuf);
1435 	return status;
1436 }
1437 
1438 #ifdef IPA_OFFLOAD
1439 static void
dp_tx_get_ipa_bank_config(struct dp_soc_be * be_soc,union hal_tx_bank_config * bank_config)1440 dp_tx_get_ipa_bank_config(struct dp_soc_be *be_soc,
1441 			  union hal_tx_bank_config *bank_config)
1442 {
1443 	bank_config->epd = 0;
1444 	bank_config->encap_type = wlan_cfg_pkt_type(be_soc->soc.wlan_cfg_ctx);
1445 	bank_config->encrypt_type = 0;
1446 
1447 	bank_config->src_buffer_swap = 0;
1448 	bank_config->link_meta_swap = 0;
1449 
1450 	bank_config->index_lookup_enable = 0;
1451 	bank_config->mcast_pkt_ctrl = HAL_TX_MCAST_CTRL_FW_EXCEPTION;
1452 	bank_config->addrx_en = 1;
1453 	bank_config->addry_en = 1;
1454 
1455 	bank_config->mesh_enable = 0;
1456 	bank_config->dscp_tid_map_id = 0;
1457 	bank_config->vdev_id_check_en = 0;
1458 	bank_config->pmac_id = 0;
1459 }
1460 
dp_tx_init_ipa_bank_profile(struct dp_soc_be * be_soc)1461 static void dp_tx_init_ipa_bank_profile(struct dp_soc_be *be_soc)
1462 {
1463 	union hal_tx_bank_config ipa_config = {0};
1464 	int bid;
1465 
1466 	if (!wlan_cfg_is_ipa_enabled(be_soc->soc.wlan_cfg_ctx)) {
1467 		be_soc->ipa_bank_id = DP_BE_INVALID_BANK_ID;
1468 		return;
1469 	}
1470 
1471 	dp_tx_get_ipa_bank_config(be_soc, &ipa_config);
1472 
1473 	/* Let IPA use last HOST owned bank */
1474 	bid = be_soc->num_bank_profiles - 1;
1475 
1476 	be_soc->bank_profiles[bid].is_configured = true;
1477 	be_soc->bank_profiles[bid].bank_config.val = ipa_config.val;
1478 	hal_tx_populate_bank_register(be_soc->soc.hal_soc,
1479 				      &be_soc->bank_profiles[bid].bank_config,
1480 				      bid);
1481 	qdf_atomic_inc(&be_soc->bank_profiles[bid].ref_count);
1482 
1483 	dp_info("IPA bank at slot %d config:0x%x", bid,
1484 		be_soc->bank_profiles[bid].bank_config.val);
1485 
1486 	be_soc->ipa_bank_id = bid;
1487 }
1488 #else /* !IPA_OFFLOAD */
dp_tx_init_ipa_bank_profile(struct dp_soc_be * be_soc)1489 static inline void dp_tx_init_ipa_bank_profile(struct dp_soc_be *be_soc)
1490 {
1491 }
1492 #endif /* IPA_OFFLOAD */
1493 
dp_tx_init_bank_profiles(struct dp_soc_be * be_soc)1494 QDF_STATUS dp_tx_init_bank_profiles(struct dp_soc_be *be_soc)
1495 {
1496 	int i, num_tcl_banks;
1497 
1498 	num_tcl_banks = hal_tx_get_num_tcl_banks(be_soc->soc.hal_soc);
1499 
1500 	dp_assert_always_internal(num_tcl_banks);
1501 	be_soc->num_bank_profiles = num_tcl_banks;
1502 
1503 	be_soc->bank_profiles = qdf_mem_malloc(num_tcl_banks *
1504 					       sizeof(*be_soc->bank_profiles));
1505 	if (!be_soc->bank_profiles) {
1506 		dp_err("unable to allocate memory for DP TX Profiles!");
1507 		return QDF_STATUS_E_NOMEM;
1508 	}
1509 
1510 	DP_TX_BANK_LOCK_CREATE(&be_soc->tx_bank_lock);
1511 
1512 	for (i = 0; i < num_tcl_banks; i++) {
1513 		be_soc->bank_profiles[i].is_configured = false;
1514 		qdf_atomic_init(&be_soc->bank_profiles[i].ref_count);
1515 	}
1516 	dp_info("initialized %u bank profiles", be_soc->num_bank_profiles);
1517 
1518 	dp_tx_init_ipa_bank_profile(be_soc);
1519 
1520 	return QDF_STATUS_SUCCESS;
1521 }
1522 
dp_tx_deinit_bank_profiles(struct dp_soc_be * be_soc)1523 void dp_tx_deinit_bank_profiles(struct dp_soc_be *be_soc)
1524 {
1525 	qdf_mem_free(be_soc->bank_profiles);
1526 	DP_TX_BANK_LOCK_DESTROY(&be_soc->tx_bank_lock);
1527 }
1528 
1529 static
dp_tx_get_vdev_bank_config(struct dp_vdev_be * be_vdev,union hal_tx_bank_config * bank_config)1530 void dp_tx_get_vdev_bank_config(struct dp_vdev_be *be_vdev,
1531 				union hal_tx_bank_config *bank_config)
1532 {
1533 	struct dp_vdev *vdev = &be_vdev->vdev;
1534 
1535 	bank_config->epd = 0;
1536 
1537 	bank_config->encap_type = vdev->tx_encap_type;
1538 
1539 	/* Only valid for raw frames. Needs work for RAW mode */
1540 	if (vdev->tx_encap_type == htt_cmn_pkt_type_raw) {
1541 		bank_config->encrypt_type = sec_type_map[vdev->sec_type];
1542 	} else {
1543 		bank_config->encrypt_type = 0;
1544 	}
1545 
1546 	bank_config->src_buffer_swap = 0;
1547 	bank_config->link_meta_swap = 0;
1548 
1549 	if ((vdev->search_type == HAL_TX_ADDR_INDEX_SEARCH) &&
1550 	    vdev->opmode == wlan_op_mode_sta) {
1551 		bank_config->index_lookup_enable = 1;
1552 		bank_config->mcast_pkt_ctrl = HAL_TX_MCAST_CTRL_MEC_NOTIFY;
1553 		bank_config->addrx_en = 0;
1554 		bank_config->addry_en = 0;
1555 	} else {
1556 		bank_config->index_lookup_enable = 0;
1557 		bank_config->mcast_pkt_ctrl = HAL_TX_MCAST_CTRL_FW_EXCEPTION;
1558 		bank_config->addrx_en =
1559 			(vdev->hal_desc_addr_search_flags &
1560 			 HAL_TX_DESC_ADDRX_EN) ? 1 : 0;
1561 		bank_config->addry_en =
1562 			(vdev->hal_desc_addr_search_flags &
1563 			 HAL_TX_DESC_ADDRY_EN) ? 1 : 0;
1564 	}
1565 
1566 	bank_config->mesh_enable = vdev->mesh_vdev ? 1 : 0;
1567 
1568 	bank_config->dscp_tid_map_id = vdev->dscp_tid_map_id;
1569 
1570 	/* Disabling vdev id check for now. Needs revist. */
1571 	bank_config->vdev_id_check_en = be_vdev->vdev_id_check_en;
1572 
1573 	bank_config->pmac_id = vdev->lmac_id;
1574 }
1575 
dp_tx_get_bank_profile(struct dp_soc_be * be_soc,struct dp_vdev_be * be_vdev)1576 int dp_tx_get_bank_profile(struct dp_soc_be *be_soc,
1577 			   struct dp_vdev_be *be_vdev)
1578 {
1579 	char *temp_str = "";
1580 	bool found_match = false;
1581 	int bank_id = DP_BE_INVALID_BANK_ID;
1582 	int i;
1583 	int unconfigured_slot = DP_BE_INVALID_BANK_ID;
1584 	int zero_ref_count_slot = DP_BE_INVALID_BANK_ID;
1585 	union hal_tx_bank_config vdev_config = {0};
1586 
1587 	/* convert vdev params into hal_tx_bank_config */
1588 	dp_tx_get_vdev_bank_config(be_vdev, &vdev_config);
1589 
1590 	DP_TX_BANK_LOCK_ACQUIRE(&be_soc->tx_bank_lock);
1591 	/* go over all banks and find a matching/unconfigured/unused bank */
1592 	for (i = 0; i < be_soc->num_bank_profiles; i++) {
1593 		if (be_soc->bank_profiles[i].is_configured &&
1594 		    (be_soc->bank_profiles[i].bank_config.val ^
1595 						vdev_config.val) == 0) {
1596 			found_match = true;
1597 			break;
1598 		}
1599 
1600 		if (unconfigured_slot == DP_BE_INVALID_BANK_ID &&
1601 		    !be_soc->bank_profiles[i].is_configured)
1602 			unconfigured_slot = i;
1603 		else if (zero_ref_count_slot  == DP_BE_INVALID_BANK_ID &&
1604 		    !qdf_atomic_read(&be_soc->bank_profiles[i].ref_count))
1605 			zero_ref_count_slot = i;
1606 	}
1607 
1608 	if (found_match) {
1609 		temp_str = "matching";
1610 		bank_id = i;
1611 		goto inc_ref_and_return;
1612 	}
1613 	if (unconfigured_slot != DP_BE_INVALID_BANK_ID) {
1614 		temp_str = "unconfigured";
1615 		bank_id = unconfigured_slot;
1616 		goto configure_and_return;
1617 	}
1618 	if (zero_ref_count_slot != DP_BE_INVALID_BANK_ID) {
1619 		temp_str = "zero_ref_count";
1620 		bank_id = zero_ref_count_slot;
1621 	}
1622 	if (bank_id == DP_BE_INVALID_BANK_ID) {
1623 		dp_alert("unable to find TX bank!");
1624 		QDF_BUG(0);
1625 		return bank_id;
1626 	}
1627 
1628 configure_and_return:
1629 	be_soc->bank_profiles[bank_id].is_configured = true;
1630 	be_soc->bank_profiles[bank_id].bank_config.val = vdev_config.val;
1631 	hal_tx_populate_bank_register(be_soc->soc.hal_soc,
1632 				      &be_soc->bank_profiles[bank_id].bank_config,
1633 				      bank_id);
1634 inc_ref_and_return:
1635 	qdf_atomic_inc(&be_soc->bank_profiles[bank_id].ref_count);
1636 	DP_TX_BANK_LOCK_RELEASE(&be_soc->tx_bank_lock);
1637 
1638 	dp_info("found %s slot at index %d, input:0x%x match:0x%x ref_count %u",
1639 		temp_str, bank_id, vdev_config.val,
1640 		be_soc->bank_profiles[bank_id].bank_config.val,
1641 		qdf_atomic_read(&be_soc->bank_profiles[bank_id].ref_count));
1642 
1643 	dp_info("epd:%x encap:%x encryp:%x src_buf_swap:%x link_meta_swap:%x addrx_en:%x addry_en:%x mesh_en:%x vdev_id_check:%x pmac_id:%x mcast_pkt_ctrl:%x",
1644 		be_soc->bank_profiles[bank_id].bank_config.epd,
1645 		be_soc->bank_profiles[bank_id].bank_config.encap_type,
1646 		be_soc->bank_profiles[bank_id].bank_config.encrypt_type,
1647 		be_soc->bank_profiles[bank_id].bank_config.src_buffer_swap,
1648 		be_soc->bank_profiles[bank_id].bank_config.link_meta_swap,
1649 		be_soc->bank_profiles[bank_id].bank_config.addrx_en,
1650 		be_soc->bank_profiles[bank_id].bank_config.addry_en,
1651 		be_soc->bank_profiles[bank_id].bank_config.mesh_enable,
1652 		be_soc->bank_profiles[bank_id].bank_config.vdev_id_check_en,
1653 		be_soc->bank_profiles[bank_id].bank_config.pmac_id,
1654 		be_soc->bank_profiles[bank_id].bank_config.mcast_pkt_ctrl);
1655 
1656 	return bank_id;
1657 }
1658 
dp_tx_put_bank_profile(struct dp_soc_be * be_soc,struct dp_vdev_be * be_vdev)1659 void dp_tx_put_bank_profile(struct dp_soc_be *be_soc,
1660 			    struct dp_vdev_be *be_vdev)
1661 {
1662 	DP_TX_BANK_LOCK_ACQUIRE(&be_soc->tx_bank_lock);
1663 	qdf_atomic_dec(&be_soc->bank_profiles[be_vdev->bank_id].ref_count);
1664 	DP_TX_BANK_LOCK_RELEASE(&be_soc->tx_bank_lock);
1665 }
1666 
dp_tx_update_bank_profile(struct dp_soc_be * be_soc,struct dp_vdev_be * be_vdev)1667 void dp_tx_update_bank_profile(struct dp_soc_be *be_soc,
1668 			       struct dp_vdev_be *be_vdev)
1669 {
1670 	dp_tx_put_bank_profile(be_soc, be_vdev);
1671 	be_vdev->bank_id = dp_tx_get_bank_profile(be_soc, be_vdev);
1672 	be_vdev->vdev.bank_id = be_vdev->bank_id;
1673 }
1674 
dp_tx_desc_pool_init_be(struct dp_soc * soc,uint32_t num_elem,uint8_t pool_id,bool spcl_tx_desc)1675 QDF_STATUS dp_tx_desc_pool_init_be(struct dp_soc *soc,
1676 				   uint32_t num_elem,
1677 				   uint8_t pool_id,
1678 				   bool spcl_tx_desc)
1679 {
1680 	struct dp_tx_desc_pool_s *tx_desc_pool;
1681 	struct dp_hw_cookie_conversion_t *cc_ctx;
1682 	struct dp_spt_page_desc *page_desc;
1683 	struct dp_tx_desc_s *tx_desc;
1684 	uint32_t ppt_idx = 0;
1685 	uint32_t avail_entry_index = 0;
1686 
1687 	if (!num_elem) {
1688 		dp_err("desc_num 0 !!");
1689 		return QDF_STATUS_E_FAILURE;
1690 	}
1691 
1692 	if (spcl_tx_desc) {
1693 		tx_desc_pool = dp_get_spcl_tx_desc_pool(soc, pool_id);
1694 		cc_ctx  = dp_get_spcl_tx_cookie_t(soc, pool_id);
1695 	} else {
1696 		tx_desc_pool = dp_get_tx_desc_pool(soc, pool_id);;
1697 		cc_ctx  = dp_get_tx_cookie_t(soc, pool_id);
1698 	}
1699 	tx_desc = tx_desc_pool->freelist;
1700 	page_desc = &cc_ctx->page_desc_base[0];
1701 	while (tx_desc) {
1702 		if (avail_entry_index == 0) {
1703 			if (ppt_idx >= cc_ctx->total_page_num) {
1704 				dp_alert("insufficient secondary page tables");
1705 				qdf_assert_always(0);
1706 			}
1707 			page_desc = &cc_ctx->page_desc_base[ppt_idx++];
1708 		}
1709 
1710 		/* put each TX Desc VA to SPT pages and
1711 		 * get corresponding ID
1712 		 */
1713 		DP_CC_SPT_PAGE_UPDATE_VA(page_desc->page_v_addr,
1714 					 avail_entry_index,
1715 					 tx_desc);
1716 		tx_desc->id =
1717 			dp_cc_desc_id_generate(page_desc->ppt_index,
1718 					       avail_entry_index);
1719 		tx_desc->pool_id = pool_id;
1720 		dp_tx_desc_set_magic(tx_desc, DP_TX_MAGIC_PATTERN_FREE);
1721 		tx_desc = tx_desc->next;
1722 		avail_entry_index = (avail_entry_index + 1) &
1723 					DP_CC_SPT_PAGE_MAX_ENTRIES_MASK;
1724 	}
1725 
1726 	return QDF_STATUS_SUCCESS;
1727 }
1728 
dp_tx_desc_pool_deinit_be(struct dp_soc * soc,struct dp_tx_desc_pool_s * tx_desc_pool,uint8_t pool_id,bool spcl_tx_desc)1729 void dp_tx_desc_pool_deinit_be(struct dp_soc *soc,
1730 			       struct dp_tx_desc_pool_s *tx_desc_pool,
1731 			       uint8_t pool_id, bool spcl_tx_desc)
1732 {
1733 	struct dp_spt_page_desc *page_desc;
1734 	int i = 0;
1735 	struct dp_hw_cookie_conversion_t *cc_ctx;
1736 
1737 	if (spcl_tx_desc)
1738 		cc_ctx  = dp_get_spcl_tx_cookie_t(soc, pool_id);
1739 	else
1740 		cc_ctx  = dp_get_tx_cookie_t(soc, pool_id);
1741 
1742 	for (i = 0; i < cc_ctx->total_page_num; i++) {
1743 		page_desc = &cc_ctx->page_desc_base[i];
1744 		qdf_mem_zero(page_desc->page_v_addr, qdf_page_size);
1745 	}
1746 }
1747 
1748 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
dp_tx_comp_nf_handler(struct dp_intr * int_ctx,struct dp_soc * soc,hal_ring_handle_t hal_ring_hdl,uint8_t ring_id,uint32_t quota)1749 uint32_t dp_tx_comp_nf_handler(struct dp_intr *int_ctx, struct dp_soc *soc,
1750 			       hal_ring_handle_t hal_ring_hdl, uint8_t ring_id,
1751 			       uint32_t quota)
1752 {
1753 	struct dp_srng *tx_comp_ring = &soc->tx_comp_ring[ring_id];
1754 	uint32_t work_done = 0;
1755 
1756 	if (dp_srng_get_near_full_level(soc, tx_comp_ring) <
1757 			DP_SRNG_THRESH_NEAR_FULL)
1758 		return 0;
1759 
1760 	qdf_atomic_set(&tx_comp_ring->near_full, 1);
1761 	work_done++;
1762 
1763 	return work_done;
1764 }
1765 #endif
1766 
1767 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
1768 	defined(WLAN_CONFIG_TX_DELAY)
1769 #define PPDUID_GET_HW_LINK_ID(PPDU_ID, LINK_ID_OFFSET, LINK_ID_BITS) \
1770 	(((PPDU_ID) >> (LINK_ID_OFFSET)) & ((1 << (LINK_ID_BITS)) - 1))
1771 
1772 #define HW_TX_DELAY_MAX                       0x1000000
1773 #define TX_COMPL_SHIFT_BUFFER_TIMESTAMP_US    10
1774 #define HW_TX_DELAY_MASK                      0x1FFFFFFF
1775 #define TX_COMPL_BUFFER_TSTAMP_US(TSTAMP) \
1776 	(((TSTAMP) << TX_COMPL_SHIFT_BUFFER_TIMESTAMP_US) & \
1777 	 HW_TX_DELAY_MASK)
1778 
1779 static inline
dp_mlo_compute_hw_delay_us(struct dp_soc * soc,struct dp_vdev * vdev,struct hal_tx_completion_status * ts,uint32_t * delay_us)1780 QDF_STATUS dp_mlo_compute_hw_delay_us(struct dp_soc *soc,
1781 				      struct dp_vdev *vdev,
1782 				      struct hal_tx_completion_status *ts,
1783 				      uint32_t *delay_us)
1784 {
1785 	uint32_t ppdu_id;
1786 	uint8_t link_id_offset, link_id_bits;
1787 	uint8_t hw_link_id;
1788 	uint32_t msdu_tqm_enqueue_tstamp_us, final_msdu_tqm_enqueue_tstamp_us;
1789 	uint32_t msdu_compl_tsf_tstamp_us, final_msdu_compl_tsf_tstamp_us;
1790 	uint32_t delay;
1791 	int32_t delta_tsf2, delta_tqm;
1792 
1793 	if (!ts->valid)
1794 		return QDF_STATUS_E_INVAL;
1795 
1796 	link_id_offset = soc->link_id_offset;
1797 	link_id_bits = soc->link_id_bits;
1798 	ppdu_id = ts->ppdu_id;
1799 	hw_link_id = PPDUID_GET_HW_LINK_ID(ppdu_id, link_id_offset,
1800 					   link_id_bits);
1801 
1802 	msdu_tqm_enqueue_tstamp_us =
1803 		TX_COMPL_BUFFER_TSTAMP_US(ts->buffer_timestamp);
1804 	msdu_compl_tsf_tstamp_us = ts->tsf;
1805 
1806 	delta_tsf2 = dp_mlo_get_delta_tsf2_wrt_mlo_offset(soc, hw_link_id);
1807 	delta_tqm = dp_mlo_get_delta_tqm_wrt_mlo_offset(soc);
1808 
1809 	final_msdu_tqm_enqueue_tstamp_us = (msdu_tqm_enqueue_tstamp_us +
1810 			delta_tqm) & HW_TX_DELAY_MASK;
1811 
1812 	final_msdu_compl_tsf_tstamp_us = (msdu_compl_tsf_tstamp_us +
1813 			delta_tsf2) & HW_TX_DELAY_MASK;
1814 
1815 	delay = (final_msdu_compl_tsf_tstamp_us -
1816 		final_msdu_tqm_enqueue_tstamp_us) & HW_TX_DELAY_MASK;
1817 
1818 	if (delay > HW_TX_DELAY_MAX)
1819 		return QDF_STATUS_E_FAILURE;
1820 
1821 	if (delay_us)
1822 		*delay_us = delay;
1823 
1824 	return QDF_STATUS_SUCCESS;
1825 }
1826 #else
1827 static inline
dp_mlo_compute_hw_delay_us(struct dp_soc * soc,struct dp_vdev * vdev,struct hal_tx_completion_status * ts,uint32_t * delay_us)1828 QDF_STATUS dp_mlo_compute_hw_delay_us(struct dp_soc *soc,
1829 				      struct dp_vdev *vdev,
1830 				      struct hal_tx_completion_status *ts,
1831 				      uint32_t *delay_us)
1832 {
1833 	return QDF_STATUS_SUCCESS;
1834 }
1835 #endif
1836 
dp_tx_compute_tx_delay_be(struct dp_soc * soc,struct dp_vdev * vdev,struct hal_tx_completion_status * ts,uint32_t * delay_us)1837 QDF_STATUS dp_tx_compute_tx_delay_be(struct dp_soc *soc,
1838 				     struct dp_vdev *vdev,
1839 				     struct hal_tx_completion_status *ts,
1840 				     uint32_t *delay_us)
1841 {
1842 	return dp_mlo_compute_hw_delay_us(soc, vdev, ts, delay_us);
1843 }
1844 
1845 static inline
dp_tx_nbuf_map_be(struct dp_vdev * vdev,struct dp_tx_desc_s * tx_desc,qdf_nbuf_t nbuf)1846 qdf_dma_addr_t dp_tx_nbuf_map_be(struct dp_vdev *vdev,
1847 				 struct dp_tx_desc_s *tx_desc,
1848 				 qdf_nbuf_t nbuf)
1849 {
1850 	qdf_nbuf_dma_clean_range_no_dsb((void *)nbuf->data,
1851 					(void *)(nbuf->data + 256));
1852 
1853 	return (qdf_dma_addr_t)qdf_mem_virt_to_phys(nbuf->data);
1854 }
1855 
1856 static inline
dp_tx_nbuf_unmap_be(struct dp_soc * soc,struct dp_tx_desc_s * desc)1857 void dp_tx_nbuf_unmap_be(struct dp_soc *soc,
1858 			 struct dp_tx_desc_s *desc)
1859 {
1860 }
1861 
1862 #ifdef QCA_DP_TX_NBUF_LIST_FREE
dp_tx_fast_send_be(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,qdf_nbuf_t nbuf)1863 qdf_nbuf_t dp_tx_fast_send_be(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
1864 			      qdf_nbuf_t nbuf)
1865 {
1866 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
1867 	struct dp_vdev *vdev = NULL;
1868 	struct dp_pdev *pdev = NULL;
1869 	struct dp_tx_desc_s *tx_desc;
1870 	uint16_t desc_pool_id;
1871 	uint16_t pkt_len;
1872 	qdf_dma_addr_t paddr;
1873 	QDF_STATUS status = QDF_STATUS_E_RESOURCES;
1874 	uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES] = { 0 };
1875 	hal_ring_handle_t hal_ring_hdl = NULL;
1876 	uint32_t *hal_tx_desc_cached;
1877 	void *hal_tx_desc;
1878 	uint8_t tid = HTT_TX_EXT_TID_INVALID;
1879 	uint8_t xmit_type = qdf_nbuf_get_vdev_xmit_type(nbuf);
1880 	uint8_t sawf_tid = HTT_TX_EXT_TID_INVALID;
1881 
1882 	if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
1883 		return nbuf;
1884 
1885 	vdev = soc->vdev_id_map[vdev_id];
1886 	if (qdf_unlikely(!vdev))
1887 		return nbuf;
1888 
1889 	desc_pool_id = qdf_nbuf_get_queue_mapping(nbuf) & DP_TX_QUEUE_MASK;
1890 
1891 	pkt_len = qdf_nbuf_headlen(nbuf);
1892 	DP_STATS_INC_PKT(vdev, tx_i[xmit_type].rcvd, 1, pkt_len);
1893 	DP_STATS_INC(vdev, tx_i[xmit_type].rcvd_in_fast_xmit_flow, 1);
1894 	DP_STATS_INC(vdev, tx_i[xmit_type].rcvd_per_core[desc_pool_id], 1);
1895 
1896 	pdev = vdev->pdev;
1897 	if (dp_tx_limit_check(vdev, nbuf))
1898 		return nbuf;
1899 
1900 	if (qdf_unlikely(vdev->skip_sw_tid_classification
1901 				& DP_TXRX_HLOS_TID_OVERRIDE_ENABLED)) {
1902 		tid = qdf_nbuf_get_priority(nbuf);
1903 
1904 		if (tid >= DP_TX_INVALID_QOS_TAG)
1905 			tid = HTT_TX_EXT_TID_INVALID;
1906 	}
1907 
1908 	tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
1909 
1910 	if (qdf_unlikely(!tx_desc)) {
1911 		DP_STATS_INC(vdev, tx_i[xmit_type].dropped.desc_na.num, 1);
1912 		DP_STATS_INC(vdev,
1913 			     tx_i[xmit_type].dropped.desc_na_exc_alloc_fail.num,
1914 			     1);
1915 		return nbuf;
1916 	}
1917 
1918 	dp_tx_outstanding_inc(pdev);
1919 
1920 	/* Initialize the SW tx descriptor */
1921 	tx_desc->nbuf = nbuf;
1922 	tx_desc->frm_type = dp_tx_frm_std;
1923 	tx_desc->tx_encap_type = vdev->tx_encap_type;
1924 	tx_desc->vdev_id = vdev_id;
1925 	tx_desc->pdev = pdev;
1926 	tx_desc->pkt_offset = 0;
1927 	tx_desc->length = pkt_len;
1928 	tx_desc->flags |= pdev->tx_fast_flag;
1929 
1930 	tx_desc->nbuf->fast_recycled = 1;
1931 
1932 	if (nbuf->is_from_recycler && nbuf->fast_xmit)
1933 		tx_desc->flags |= DP_TX_DESC_FLAG_FAST;
1934 
1935 	paddr =  dp_tx_nbuf_map_be(vdev, tx_desc, nbuf);
1936 	if (!paddr) {
1937 		/* Handle failure */
1938 		dp_err("qdf_nbuf_map failed");
1939 		DP_STATS_INC(vdev, tx_i[xmit_type].dropped.dma_error, 1);
1940 		goto release_desc;
1941 	}
1942 
1943 	tx_desc->dma_addr = paddr;
1944 
1945 	hal_tx_desc_cached = (void *)cached_desc;
1946 	hal_tx_desc_cached[0] = (uint32_t)tx_desc->dma_addr;
1947 	hal_tx_desc_cached[1] = tx_desc->id <<
1948 		TCL_DATA_CMD_BUF_ADDR_INFO_SW_BUFFER_COOKIE_LSB;
1949 
1950 	/* bank_id */
1951 	hal_tx_desc_cached[2] = vdev->bank_id << TCL_DATA_CMD_BANK_ID_LSB;
1952 	hal_tx_desc_cached[3] = vdev->htt_tcl_metadata <<
1953 		TCL_DATA_CMD_TCL_CMD_NUMBER_LSB;
1954 
1955 	hal_tx_desc_cached[4] = tx_desc->length;
1956 	/* l3 and l4 checksum enable */
1957 	hal_tx_desc_cached[4] |= DP_TX_L3_L4_CSUM_ENABLE <<
1958 		TCL_DATA_CMD_IPV4_CHECKSUM_EN_LSB;
1959 
1960 	hal_tx_desc_cached[5] = vdev->lmac_id << TCL_DATA_CMD_PMAC_ID_LSB;
1961 	hal_tx_desc_cached[5] |= vdev->vdev_id << TCL_DATA_CMD_VDEV_ID_LSB;
1962 
1963 	if (qdf_unlikely(dp_sawf_tag_valid_get(nbuf))) {
1964 		sawf_tid = dp_sawf_config_be(soc, hal_tx_desc_cached,
1965 					     NULL, nbuf, NULL);
1966 		if (sawf_tid != HTT_TX_EXT_TID_INVALID)
1967 			tid = sawf_tid;
1968 	}
1969 
1970 	if (tid != HTT_TX_EXT_TID_INVALID) {
1971 		hal_tx_desc_cached[5] |= tid << TCL_DATA_CMD_HLOS_TID_LSB;
1972 		hal_tx_desc_cached[5] |= 1 << TCL_DATA_CMD_HLOS_TID_OVERWRITE_LSB;
1973 	}
1974 
1975 	if (vdev->opmode == wlan_op_mode_sta)
1976 		hal_tx_desc_cached[6] = vdev->bss_ast_idx |
1977 			((vdev->bss_ast_hash & 0xF) <<
1978 			 TCL_DATA_CMD_CACHE_SET_NUM_LSB);
1979 
1980 	hal_ring_hdl = dp_tx_get_hal_ring_hdl(soc, desc_pool_id);
1981 
1982 	if (qdf_unlikely(dp_tx_hal_ring_access_start(soc, hal_ring_hdl))) {
1983 		dp_err("HAL RING Access Failed -- %pK", hal_ring_hdl);
1984 		DP_STATS_INC(soc, tx.tcl_ring_full[desc_pool_id], 1);
1985 		DP_STATS_INC(vdev, tx_i[xmit_type].dropped.enqueue_fail, 1);
1986 		goto ring_access_fail2;
1987 	}
1988 
1989 	hal_tx_desc = hal_srng_src_get_next(soc->hal_soc, hal_ring_hdl);
1990 	if (qdf_unlikely(!hal_tx_desc)) {
1991 		dp_verbose_debug("TCL ring full ring_id:%d", desc_pool_id);
1992 		DP_STATS_INC(soc, tx.tcl_ring_full[desc_pool_id], 1);
1993 		DP_STATS_INC(vdev, tx_i[xmit_type].dropped.enqueue_fail, 1);
1994 		goto ring_access_fail;
1995 	}
1996 
1997 	tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX;
1998 
1999 	/* Sync cached descriptor with HW */
2000 	qdf_mem_copy(hal_tx_desc, hal_tx_desc_cached, DP_TX_FAST_DESC_SIZE);
2001 	qdf_dsb();
2002 
2003 	DP_STATS_INC_PKT(vdev, tx_i[xmit_type].processed, 1, tx_desc->length);
2004 	DP_STATS_INC(soc, tx.tcl_enq[desc_pool_id], 1);
2005 	status = QDF_STATUS_SUCCESS;
2006 
2007 ring_access_fail:
2008 	dp_tx_ring_access_end_wrapper(soc, hal_ring_hdl, 0);
2009 
2010 ring_access_fail2:
2011 	if (status != QDF_STATUS_SUCCESS) {
2012 		dp_tx_nbuf_unmap_be(soc, tx_desc);
2013 		goto release_desc;
2014 	}
2015 
2016 	return NULL;
2017 
2018 release_desc:
2019 	dp_tx_desc_release(soc, tx_desc, desc_pool_id);
2020 
2021 	return nbuf;
2022 }
2023 #endif
2024 
dp_tx_desc_pool_alloc_be(struct dp_soc * soc,uint32_t num_elem,uint8_t pool_id)2025 QDF_STATUS dp_tx_desc_pool_alloc_be(struct dp_soc *soc, uint32_t num_elem,
2026 				    uint8_t pool_id)
2027 {
2028 	return QDF_STATUS_SUCCESS;
2029 }
2030 
dp_tx_desc_pool_free_be(struct dp_soc * soc,uint8_t pool_id)2031 void dp_tx_desc_pool_free_be(struct dp_soc *soc, uint8_t pool_id)
2032 {
2033 }
2034