1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 #include "cdp_txrx_cmn_struct.h"
20 #include "dp_types.h"
21 #include "dp_tx.h"
22 #include "dp_li_tx.h"
23 #include "dp_tx_desc.h"
24 #include <dp_internal.h>
25 #include <dp_htt.h>
26 #include <hal_li_api.h>
27 #include <hal_li_tx.h>
28 #include "dp_peer.h"
29 #ifdef FEATURE_WDS
30 #include "dp_txrx_wds.h"
31 #endif
32 #include "dp_li.h"
33 
34 extern uint8_t sec_type_map[MAX_CDP_SEC_TYPE];
35 
36 QDF_STATUS
dp_tx_comp_get_params_from_hal_desc_li(struct dp_soc * soc,void * tx_comp_hal_desc,struct dp_tx_desc_s ** r_tx_desc)37 dp_tx_comp_get_params_from_hal_desc_li(struct dp_soc *soc,
38 				       void *tx_comp_hal_desc,
39 				       struct dp_tx_desc_s **r_tx_desc)
40 {
41 	uint8_t pool_id;
42 	uint32_t tx_desc_id;
43 
44 	tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc);
45 	pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >>
46 			DP_TX_DESC_ID_POOL_OS;
47 
48 	/* Find Tx descriptor */
49 	*r_tx_desc = dp_tx_desc_find(soc, pool_id,
50 				     (tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >>
51 							DP_TX_DESC_ID_PAGE_OS,
52 				     (tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >>
53 						DP_TX_DESC_ID_OFFSET_OS,
54 				     (tx_desc_id & DP_TX_DESC_ID_SPCL_MASK));
55 	/* Pool id is not matching. Error */
56 	if ((*r_tx_desc)->pool_id != pool_id) {
57 		dp_tx_comp_alert("Tx Comp pool id %d not matched %d",
58 				 pool_id, (*r_tx_desc)->pool_id);
59 
60 		qdf_assert_always(0);
61 	}
62 
63 	(*r_tx_desc)->peer_id = hal_tx_comp_get_peer_id(tx_comp_hal_desc);
64 
65 	return QDF_STATUS_SUCCESS;
66 }
67 
68 static inline
dp_tx_process_mec_notify_li(struct dp_soc * soc,uint8_t * status)69 void dp_tx_process_mec_notify_li(struct dp_soc *soc, uint8_t *status)
70 {
71 	struct dp_vdev *vdev;
72 	uint8_t vdev_id;
73 	uint32_t *htt_desc = (uint32_t *)status;
74 
75 	/*
76 	 * Get vdev id from HTT status word in case of MEC
77 	 * notification
78 	 */
79 	vdev_id = HTT_TX_WBM_COMPLETION_V2_VDEV_ID_GET(htt_desc[3]);
80 	if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
81 		return;
82 
83 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
84 				     DP_MOD_ID_HTT_COMP);
85 	if (!vdev)
86 		return;
87 	dp_tx_mec_handler(vdev, status);
88 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP);
89 }
90 
dp_tx_process_htt_completion_li(struct dp_soc * soc,struct dp_tx_desc_s * tx_desc,uint8_t * status,uint8_t ring_id)91 void dp_tx_process_htt_completion_li(struct dp_soc *soc,
92 				     struct dp_tx_desc_s *tx_desc,
93 				     uint8_t *status,
94 				     uint8_t ring_id)
95 {
96 	uint8_t tx_status;
97 	struct dp_pdev *pdev;
98 	struct dp_vdev *vdev = NULL;
99 	struct hal_tx_completion_status ts = {0};
100 	uint32_t *htt_desc = (uint32_t *)status;
101 	struct dp_txrx_peer *txrx_peer;
102 	dp_txrx_ref_handle txrx_ref_handle = NULL;
103 	struct cdp_tid_tx_stats *tid_stats = NULL;
104 	struct htt_soc *htt_handle;
105 	uint8_t vdev_id;
106 
107 	tx_status = HTT_TX_WBM_COMPLETION_V2_TX_STATUS_GET(htt_desc[0]);
108 	htt_handle = (struct htt_soc *)soc->htt_handle;
109 	htt_wbm_event_record(htt_handle->htt_logger_handle, tx_status, status);
110 
111 	/*
112 	 * There can be scenario where WBM consuming descriptor enqueued
113 	 * from TQM2WBM first and TQM completion can happen before MEC
114 	 * notification comes from FW2WBM. Avoid access any field of tx
115 	 * descriptor in case of MEC notify.
116 	 */
117 	if (tx_status == HTT_TX_FW2WBM_TX_STATUS_MEC_NOTIFY)
118 		return dp_tx_process_mec_notify_li(soc, status);
119 
120 	/*
121 	 * If the descriptor is already freed in vdev_detach,
122 	 * continue to next descriptor
123 	 */
124 	if (qdf_unlikely(!tx_desc->flags)) {
125 		dp_tx_comp_info_rl("Descriptor freed in vdev_detach %d",
126 				   tx_desc->id);
127 		return;
128 	}
129 
130 	if (qdf_unlikely(tx_desc->vdev_id == DP_INVALID_VDEV_ID)) {
131 		dp_tx_comp_info_rl("Invalid vdev_id %d", tx_desc->id);
132 		tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
133 		goto release_tx_desc;
134 	}
135 
136 	pdev = tx_desc->pdev;
137 	if (qdf_unlikely(!pdev)) {
138 		dp_tx_comp_warn("The pdev in TX desc is NULL, dropped.");
139 		dp_tx_comp_warn("tx_status: %u", tx_status);
140 		tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
141 		goto release_tx_desc;
142 	}
143 
144 	if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
145 		dp_tx_comp_info_rl("pdev in down state %d", tx_desc->id);
146 		tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
147 		goto release_tx_desc;
148 	}
149 
150 	qdf_assert(tx_desc->pdev);
151 
152 	vdev_id = tx_desc->vdev_id;
153 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
154 				     DP_MOD_ID_HTT_COMP);
155 
156 	if (qdf_unlikely(!vdev)) {
157 		dp_tx_comp_info_rl("Unable to get vdev ref  %d", tx_desc->id);
158 		tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
159 		goto release_tx_desc;
160 	}
161 
162 	switch (tx_status) {
163 	case HTT_TX_FW2WBM_TX_STATUS_OK:
164 	case HTT_TX_FW2WBM_TX_STATUS_DROP:
165 	case HTT_TX_FW2WBM_TX_STATUS_TTL:
166 	{
167 		uint8_t tid;
168 		uint8_t transmit_cnt_valid = 0;
169 
170 		if (HTT_TX_WBM_COMPLETION_V2_VALID_GET(htt_desc[2])) {
171 			ts.peer_id =
172 				HTT_TX_WBM_COMPLETION_V2_SW_PEER_ID_GET(
173 						htt_desc[2]);
174 			ts.tid =
175 				HTT_TX_WBM_COMPLETION_V2_TID_NUM_GET(
176 						htt_desc[2]);
177 		} else {
178 			ts.peer_id = HTT_INVALID_PEER;
179 			ts.tid = HTT_INVALID_TID;
180 		}
181 		ts.release_src = HAL_TX_COMP_RELEASE_SOURCE_FW;
182 		ts.ppdu_id =
183 			HTT_TX_WBM_COMPLETION_V2_SCH_CMD_ID_GET(
184 					htt_desc[1]);
185 		ts.ack_frame_rssi =
186 			HTT_TX_WBM_COMPLETION_V2_ACK_FRAME_RSSI_GET(
187 					htt_desc[1]);
188 		transmit_cnt_valid =
189 			HTT_TX_WBM_COMPLETION_V2_TRANSMIT_CNT_VALID_GET(
190 					htt_desc[2]);
191 		if (transmit_cnt_valid)
192 			ts.transmit_cnt =
193 				HTT_TX_WBM_COMPLETION_V2_TRANSMIT_COUNT_GET(
194 						htt_desc[0]);
195 
196 		ts.tsf = htt_desc[3];
197 		ts.first_msdu = 1;
198 		ts.last_msdu = 1;
199 		switch (tx_status) {
200 		case HTT_TX_FW2WBM_TX_STATUS_OK:
201 			ts.status = HAL_TX_TQM_RR_FRAME_ACKED;
202 			break;
203 		case HTT_TX_FW2WBM_TX_STATUS_DROP:
204 			ts.status = HAL_TX_TQM_RR_REM_CMD_REM;
205 			break;
206 		case HTT_TX_FW2WBM_TX_STATUS_TTL:
207 			ts.status = HAL_TX_TQM_RR_REM_CMD_TX;
208 			break;
209 		}
210 		tid = ts.tid;
211 		if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
212 			tid = CDP_MAX_DATA_TIDS - 1;
213 
214 		tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
215 
216 		if (qdf_unlikely(pdev->delay_stats_flag) ||
217 		    qdf_unlikely(dp_is_vdev_tx_delay_stats_enabled(vdev)))
218 			dp_tx_compute_delay(vdev, tx_desc, tid, ring_id);
219 		if (tx_status < CDP_MAX_TX_HTT_STATUS)
220 			tid_stats->htt_status_cnt[tx_status]++;
221 
222 		txrx_peer = dp_txrx_peer_get_ref_by_id(soc, ts.peer_id,
223 						       &txrx_ref_handle,
224 						       DP_MOD_ID_HTT_COMP);
225 		if (qdf_likely(txrx_peer)) {
226 			DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1,
227 						   qdf_nbuf_len(tx_desc->nbuf));
228 			if (tx_status != HTT_TX_FW2WBM_TX_STATUS_OK)
229 				DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1);
230 		}
231 
232 		dp_tx_comp_process_tx_status(soc, tx_desc, &ts, txrx_peer,
233 					     ring_id);
234 		dp_tx_comp_process_desc(soc, tx_desc, &ts, txrx_peer);
235 		dp_tx_desc_release(soc, tx_desc, tx_desc->pool_id);
236 
237 		if (qdf_likely(txrx_peer))
238 			dp_txrx_peer_unref_delete(txrx_ref_handle,
239 						  DP_MOD_ID_HTT_COMP);
240 
241 		break;
242 	}
243 	case HTT_TX_FW2WBM_TX_STATUS_REINJECT:
244 	{
245 		uint8_t reinject_reason;
246 
247 		reinject_reason =
248 			HTT_TX_WBM_COMPLETION_V2_REINJECT_REASON_GET(
249 								htt_desc[0]);
250 		dp_tx_reinject_handler(soc, vdev, tx_desc,
251 				       status, reinject_reason);
252 		break;
253 	}
254 	case HTT_TX_FW2WBM_TX_STATUS_INSPECT:
255 	{
256 		dp_tx_inspect_handler(soc, vdev, tx_desc, status);
257 		break;
258 	}
259 	case HTT_TX_FW2WBM_TX_STATUS_VDEVID_MISMATCH:
260 	{
261 		DP_STATS_INC(vdev,
262 			     tx_i[DP_XMIT_LINK].dropped.fail_per_pkt_vdev_id_check,
263 			     1);
264 		goto release_tx_desc;
265 	}
266 	default:
267 		dp_tx_comp_err("Invalid HTT tx_status %d\n",
268 			       tx_status);
269 		goto release_tx_desc;
270 	}
271 
272 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP);
273 	return;
274 
275 release_tx_desc:
276 	dp_tx_comp_free_buf(soc, tx_desc, false);
277 	dp_tx_desc_release(soc, tx_desc, tx_desc->pool_id);
278 	if (vdev)
279 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP);
280 }
281 
282 #ifdef QCA_OL_TX_MULTIQ_SUPPORT
283 /**
284  * dp_tx_get_rbm_id_li() - Get the RBM ID for data transmission completion.
285  * @soc: DP soc structure pointer
286  * @ring_id: Transmit Queue/ring_id to be used when XPS is enabled
287  *
288  * Return: HAL ring handle
289  */
290 #ifdef IPA_OFFLOAD
dp_tx_get_rbm_id_li(struct dp_soc * soc,uint8_t ring_id)291 static inline uint8_t dp_tx_get_rbm_id_li(struct dp_soc *soc,
292 					  uint8_t ring_id)
293 {
294 	return (ring_id + soc->wbm_sw0_bm_id);
295 }
296 #else
297 #ifndef QCA_DP_ENABLE_TX_COMP_RING4
dp_tx_get_rbm_id_li(struct dp_soc * soc,uint8_t ring_id)298 static inline uint8_t dp_tx_get_rbm_id_li(struct dp_soc *soc,
299 					  uint8_t ring_id)
300 {
301 	return (ring_id ? HAL_WBM_SW0_BM_ID + (ring_id - 1) :
302 		HAL_WBM_SW2_BM_ID);
303 }
304 #else
dp_tx_get_rbm_id_li(struct dp_soc * soc,uint8_t ring_id)305 static inline uint8_t dp_tx_get_rbm_id_li(struct dp_soc *soc,
306 					  uint8_t ring_id)
307 {
308 	if (ring_id == soc->num_tcl_data_rings)
309 		return HAL_WBM_SW4_BM_ID(soc->wbm_sw0_bm_id);
310 	return (ring_id + HAL_WBM_SW0_BM_ID(soc->wbm_sw0_bm_id));
311 }
312 #endif
313 #endif
314 #else
315 #ifdef TX_MULTI_TCL
316 #ifdef IPA_OFFLOAD
dp_tx_get_rbm_id_li(struct dp_soc * soc,uint8_t ring_id)317 static inline uint8_t dp_tx_get_rbm_id_li(struct dp_soc *soc,
318 					  uint8_t ring_id)
319 {
320 	if (soc->wlan_cfg_ctx->ipa_enabled)
321 		return (ring_id + soc->wbm_sw0_bm_id);
322 
323 	return soc->wlan_cfg_ctx->tcl_wbm_map_array[ring_id].wbm_rbm_id;
324 }
325 #else
dp_tx_get_rbm_id_li(struct dp_soc * soc,uint8_t ring_id)326 static inline uint8_t dp_tx_get_rbm_id_li(struct dp_soc *soc,
327 					  uint8_t ring_id)
328 {
329 	return soc->wlan_cfg_ctx->tcl_wbm_map_array[ring_id].wbm_rbm_id;
330 }
331 #endif
332 #else
dp_tx_get_rbm_id_li(struct dp_soc * soc,uint8_t ring_id)333 static inline uint8_t dp_tx_get_rbm_id_li(struct dp_soc *soc,
334 					  uint8_t ring_id)
335 {
336 	return (ring_id + soc->wbm_sw0_bm_id);
337 }
338 #endif
339 #endif
340 
341 #if defined(CLEAR_SW2TCL_CONSUMED_DESC)
342 /**
343  * dp_tx_clear_consumed_hw_descs - Reset all the consumed Tx ring descs to 0
344  *
345  * @soc: DP soc handle
346  * @hal_ring_hdl: Source ring pointer
347  *
348  * Return: void
349  */
350 static inline
dp_tx_clear_consumed_hw_descs(struct dp_soc * soc,hal_ring_handle_t hal_ring_hdl)351 void dp_tx_clear_consumed_hw_descs(struct dp_soc *soc,
352 				   hal_ring_handle_t hal_ring_hdl)
353 {
354 	void *desc = hal_srng_src_get_next_consumed(soc->hal_soc, hal_ring_hdl);
355 
356 	while (desc) {
357 		hal_tx_desc_clear(desc);
358 		desc = hal_srng_src_get_next_consumed(soc->hal_soc,
359 						      hal_ring_hdl);
360 	}
361 }
362 
363 #else
364 static inline
dp_tx_clear_consumed_hw_descs(struct dp_soc * soc,hal_ring_handle_t hal_ring_hdl)365 void dp_tx_clear_consumed_hw_descs(struct dp_soc *soc,
366 				   hal_ring_handle_t hal_ring_hdl)
367 {
368 }
369 #endif /* CLEAR_SW2TCL_CONSUMED_DESC */
370 
371 #ifdef WLAN_CONFIG_TX_DELAY
372 static inline
dp_tx_compute_hw_delay_li(struct dp_soc * soc,struct dp_vdev * vdev,struct hal_tx_completion_status * ts,uint32_t * delay_us)373 QDF_STATUS dp_tx_compute_hw_delay_li(struct dp_soc *soc,
374 				     struct dp_vdev *vdev,
375 				     struct hal_tx_completion_status *ts,
376 				     uint32_t *delay_us)
377 {
378 	return dp_tx_compute_hw_delay_us(ts, vdev->delta_tsf, delay_us);
379 }
380 #else
381 static inline
dp_tx_compute_hw_delay_li(struct dp_soc * soc,struct dp_vdev * vdev,struct hal_tx_completion_status * ts,uint32_t * delay_us)382 QDF_STATUS dp_tx_compute_hw_delay_li(struct dp_soc *soc,
383 				     struct dp_vdev *vdev,
384 				     struct hal_tx_completion_status *ts,
385 				     uint32_t *delay_us)
386 {
387 	return QDF_STATUS_SUCCESS;
388 }
389 #endif
390 
391 #ifdef CONFIG_SAWF
392 /**
393  * dp_sawf_config_li - Configure sawf specific fields in tcl
394  *
395  * @soc: DP soc handle
396  * @hal_tx_desc_cached: tx descriptor
397  * @fw_metadata: firmware metadata
398  * @vdev_id: vdev id
399  * @nbuf: skb buffer
400  * @msdu_info: msdu info
401  *
402  * Return: void
403  */
404 static inline
dp_sawf_config_li(struct dp_soc * soc,uint32_t * hal_tx_desc_cached,uint16_t * fw_metadata,uint16_t vdev_id,qdf_nbuf_t nbuf,struct dp_tx_msdu_info_s * msdu_info)405 void dp_sawf_config_li(struct dp_soc *soc, uint32_t *hal_tx_desc_cached,
406 		       uint16_t *fw_metadata, uint16_t vdev_id,
407 		       qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info)
408 {
409 	uint8_t q_id = 0;
410 	uint32_t flow_idx = 0;
411 
412 	q_id = dp_sawf_queue_id_get(nbuf);
413 	if (q_id == DP_SAWF_DEFAULT_Q_INVALID)
414 		return;
415 
416 	msdu_info->tid = (q_id & (CDP_DATA_TID_MAX - 1));
417 	hal_tx_desc_set_hlos_tid(hal_tx_desc_cached,
418 				 (q_id & (CDP_DATA_TID_MAX - 1)));
419 
420 	if ((q_id >= DP_SAWF_DEFAULT_QUEUE_MIN) &&
421 	    (q_id < DP_SAWF_DEFAULT_QUEUE_MAX))
422 		return;
423 
424 	if (!wlan_cfg_get_sawf_config(soc->wlan_cfg_ctx))
425 		return;
426 
427 	dp_sawf_tcl_cmd(fw_metadata, nbuf);
428 
429 	/* For SAWF, q_id starts from DP_SAWF_Q_MAX */
430 	if (!dp_sawf_get_search_index(soc, nbuf, vdev_id,
431 				      q_id, &flow_idx))
432 		hal_tx_desc_set_to_fw(hal_tx_desc_cached, true);
433 
434 	hal_tx_desc_set_search_type_li(soc->hal_soc, hal_tx_desc_cached,
435 				       HAL_TX_ADDR_INDEX_SEARCH);
436 	hal_tx_desc_set_search_index_li(soc->hal_soc, hal_tx_desc_cached,
437 					flow_idx);
438 }
439 #else
440 static inline
dp_sawf_config_li(struct dp_soc * soc,uint32_t * hal_tx_desc_cached,uint16_t * fw_metadata,uint16_t vdev_id,qdf_nbuf_t nbuf,struct dp_tx_msdu_info_s * msdu_info)441 void dp_sawf_config_li(struct dp_soc *soc, uint32_t *hal_tx_desc_cached,
442 		       uint16_t *fw_metadata, uint16_t vdev_id,
443 		       qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info)
444 {
445 }
446 
447 #define dp_sawf_tx_enqueue_peer_stats(soc, tx_desc)
448 #define dp_sawf_tx_enqueue_fail_peer_stats(soc, tx_desc)
449 #endif
450 
451 QDF_STATUS
dp_tx_hw_enqueue_li(struct dp_soc * soc,struct dp_vdev * vdev,struct dp_tx_desc_s * tx_desc,uint16_t fw_metadata,struct cdp_tx_exception_metadata * tx_exc_metadata,struct dp_tx_msdu_info_s * msdu_info)452 dp_tx_hw_enqueue_li(struct dp_soc *soc, struct dp_vdev *vdev,
453 		    struct dp_tx_desc_s *tx_desc, uint16_t fw_metadata,
454 		    struct cdp_tx_exception_metadata *tx_exc_metadata,
455 		    struct dp_tx_msdu_info_s *msdu_info)
456 {
457 	void *hal_tx_desc;
458 	uint32_t *hal_tx_desc_cached;
459 	int coalesce = 0;
460 	struct dp_tx_queue *tx_q = &msdu_info->tx_queue;
461 	uint8_t ring_id = tx_q->ring_id & DP_TX_QUEUE_MASK;
462 	uint8_t tid;
463 
464 	/*
465 	 * Setting it initialization statically here to avoid
466 	 * a memset call jump with qdf_mem_set call
467 	 */
468 	uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES] = { 0 };
469 
470 	enum cdp_sec_type sec_type = ((tx_exc_metadata &&
471 			tx_exc_metadata->sec_type != CDP_INVALID_SEC_TYPE) ?
472 			tx_exc_metadata->sec_type : vdev->sec_type);
473 
474 	/* Return Buffer Manager ID */
475 	uint8_t bm_id = dp_tx_get_rbm_id_li(soc, ring_id);
476 
477 	hal_ring_handle_t hal_ring_hdl = NULL;
478 
479 	QDF_STATUS status = QDF_STATUS_E_RESOURCES;
480 
481 	if (!dp_tx_is_desc_id_valid(soc, tx_desc->id)) {
482 		dp_err_rl("Invalid tx desc id:%d", tx_desc->id);
483 		return QDF_STATUS_E_RESOURCES;
484 	}
485 
486 	hal_tx_desc_cached = (void *)cached_desc;
487 
488 	hal_tx_desc_set_buf_addr(soc->hal_soc, hal_tx_desc_cached,
489 				 tx_desc->dma_addr, bm_id, tx_desc->id,
490 				 (tx_desc->flags & DP_TX_DESC_FLAG_FRAG));
491 	hal_tx_desc_set_lmac_id_li(soc->hal_soc, hal_tx_desc_cached,
492 				   vdev->lmac_id);
493 	hal_tx_desc_set_search_type_li(soc->hal_soc, hal_tx_desc_cached,
494 				       vdev->search_type);
495 	hal_tx_desc_set_search_index_li(soc->hal_soc, hal_tx_desc_cached,
496 					vdev->bss_ast_idx);
497 	hal_tx_desc_set_dscp_tid_table_id(soc->hal_soc, hal_tx_desc_cached,
498 					  vdev->dscp_tid_map_id);
499 
500 	hal_tx_desc_set_encrypt_type(hal_tx_desc_cached,
501 				     sec_type_map[sec_type]);
502 	hal_tx_desc_set_cache_set_num(soc->hal_soc, hal_tx_desc_cached,
503 				      (vdev->bss_ast_hash & 0xF));
504 
505 	if (dp_sawf_tag_valid_get(tx_desc->nbuf)) {
506 		dp_sawf_config_li(soc, hal_tx_desc_cached, &fw_metadata,
507 				  vdev->vdev_id, tx_desc->nbuf, msdu_info);
508 		dp_sawf_tx_enqueue_peer_stats(soc, tx_desc);
509 	}
510 
511 	hal_tx_desc_set_fw_metadata(hal_tx_desc_cached, fw_metadata);
512 	hal_tx_desc_set_buf_length(hal_tx_desc_cached, tx_desc->length);
513 	hal_tx_desc_set_buf_offset(hal_tx_desc_cached, tx_desc->pkt_offset);
514 	hal_tx_desc_set_encap_type(hal_tx_desc_cached, tx_desc->tx_encap_type);
515 	hal_tx_desc_set_addr_search_flags(hal_tx_desc_cached,
516 					  vdev->hal_desc_addr_search_flags);
517 
518 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
519 		hal_tx_desc_set_to_fw(hal_tx_desc_cached, 1);
520 
521 	/* verify checksum offload configuration*/
522 	if ((qdf_nbuf_get_tx_cksum(tx_desc->nbuf) ==
523 				   QDF_NBUF_TX_CKSUM_TCP_UDP) ||
524 	      qdf_nbuf_is_tso(tx_desc->nbuf))  {
525 		hal_tx_desc_set_l3_checksum_en(hal_tx_desc_cached, 1);
526 		hal_tx_desc_set_l4_checksum_en(hal_tx_desc_cached, 1);
527 	}
528 
529 	tid = msdu_info->tid;
530 	if (tid != HTT_TX_EXT_TID_INVALID)
531 		hal_tx_desc_set_hlos_tid(hal_tx_desc_cached, tid);
532 
533 	if (tx_desc->flags & DP_TX_DESC_FLAG_MESH)
534 		hal_tx_desc_set_mesh_en(soc->hal_soc, hal_tx_desc_cached, 1);
535 
536 	if (!dp_tx_desc_set_ktimestamp(vdev, tx_desc))
537 		dp_tx_desc_set_timestamp(tx_desc);
538 
539 	dp_verbose_debug("length:%d , type = %d, dma_addr %llx, offset %d desc id %u",
540 			 tx_desc->length,
541 			 (tx_desc->flags & DP_TX_DESC_FLAG_FRAG),
542 			 (uint64_t)tx_desc->dma_addr, tx_desc->pkt_offset,
543 			 tx_desc->id);
544 
545 	hal_ring_hdl = dp_tx_get_hal_ring_hdl(soc, ring_id);
546 
547 	if (qdf_unlikely(dp_tx_hal_ring_access_start(soc, hal_ring_hdl))) {
548 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
549 			  "%s %d : HAL RING Access Failed -- %pK",
550 			 __func__, __LINE__, hal_ring_hdl);
551 		DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
552 		DP_STATS_INC(vdev, tx_i[DP_XMIT_LINK].dropped.enqueue_fail,
553 			     1);
554 		dp_sawf_tx_enqueue_fail_peer_stats(soc, tx_desc);
555 		return status;
556 	}
557 
558 	dp_tx_clear_consumed_hw_descs(soc, hal_ring_hdl);
559 
560 	/* Sync cached descriptor with HW */
561 
562 	hal_tx_desc = hal_srng_src_get_next(soc->hal_soc, hal_ring_hdl);
563 	if (qdf_unlikely(!hal_tx_desc)) {
564 		dp_verbose_debug("TCL ring full ring_id:%d", ring_id);
565 		DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
566 		DP_STATS_INC(vdev, tx_i[DP_XMIT_LINK].dropped.enqueue_fail,
567 			     1);
568 		dp_sawf_tx_enqueue_fail_peer_stats(soc, tx_desc);
569 		goto ring_access_fail;
570 	}
571 
572 	tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX;
573 	dp_vdev_peer_stats_update_protocol_cnt_tx(vdev, tx_desc->nbuf);
574 	hal_tx_desc_sync(hal_tx_desc_cached, hal_tx_desc);
575 	coalesce = dp_tx_attempt_coalescing(soc, vdev, tx_desc, tid,
576 					    msdu_info, ring_id);
577 	DP_STATS_INC_PKT(vdev, tx_i[DP_XMIT_LINK].processed, 1,
578 			 tx_desc->length);
579 	DP_STATS_INC(soc, tx.tcl_enq[ring_id], 1);
580 	dp_tx_update_stats(soc, tx_desc, ring_id);
581 	status = QDF_STATUS_SUCCESS;
582 
583 	dp_tx_hw_desc_update_evt((uint8_t *)hal_tx_desc_cached,
584 				 hal_ring_hdl, soc, ring_id);
585 
586 ring_access_fail:
587 	dp_tx_ring_access_end_wrapper(soc, hal_ring_hdl, coalesce);
588 	dp_pkt_add_timestamp(vdev, QDF_PKT_TX_DRIVER_EXIT,
589 			     qdf_get_log_timestamp(), tx_desc->nbuf);
590 
591 	return status;
592 }
593 
dp_tx_desc_pool_init_li(struct dp_soc * soc,uint32_t num_elem,uint8_t pool_id,bool spcl_tx_desc)594 QDF_STATUS dp_tx_desc_pool_init_li(struct dp_soc *soc,
595 				   uint32_t num_elem,
596 				   uint8_t pool_id,
597 				   bool spcl_tx_desc)
598 {
599 	uint32_t id, count, page_id, offset, pool_id_32;
600 	struct dp_tx_desc_s *tx_desc;
601 	struct dp_tx_desc_pool_s *tx_desc_pool;
602 	uint16_t num_desc_per_page;
603 
604 	if (spcl_tx_desc)
605 		tx_desc_pool = dp_get_spcl_tx_desc_pool(soc, pool_id);
606 	else
607 		tx_desc_pool = dp_get_tx_desc_pool(soc, pool_id);
608 	tx_desc = tx_desc_pool->freelist;
609 	count = 0;
610 	pool_id_32 = (uint32_t)pool_id;
611 	num_desc_per_page = tx_desc_pool->desc_pages.num_element_per_page;
612 	while (tx_desc) {
613 		page_id = count / num_desc_per_page;
614 		offset = count % num_desc_per_page;
615 		id = ((!!spcl_tx_desc) <<  DP_TX_DESC_ID_SPCL_OS |
616 			(pool_id_32 << DP_TX_DESC_ID_POOL_OS) |
617 			(page_id << DP_TX_DESC_ID_PAGE_OS) | offset);
618 
619 		tx_desc->id = id;
620 		tx_desc->pool_id = pool_id;
621 		tx_desc->vdev_id = DP_INVALID_VDEV_ID;
622 		dp_tx_desc_set_magic(tx_desc, DP_TX_MAGIC_PATTERN_FREE);
623 		tx_desc = tx_desc->next;
624 		count++;
625 	}
626 
627 	return QDF_STATUS_SUCCESS;
628 }
629 
dp_tx_desc_pool_deinit_li(struct dp_soc * soc,struct dp_tx_desc_pool_s * tx_desc_pool,uint8_t pool_id,bool spcl_tx_desc)630 void dp_tx_desc_pool_deinit_li(struct dp_soc *soc,
631 			       struct dp_tx_desc_pool_s *tx_desc_pool,
632 			       uint8_t pool_id, bool spcl_tx_desc)
633 {
634 }
635 
dp_tx_compute_tx_delay_li(struct dp_soc * soc,struct dp_vdev * vdev,struct hal_tx_completion_status * ts,uint32_t * delay_us)636 QDF_STATUS dp_tx_compute_tx_delay_li(struct dp_soc *soc,
637 				     struct dp_vdev *vdev,
638 				     struct hal_tx_completion_status *ts,
639 				     uint32_t *delay_us)
640 {
641 	return dp_tx_compute_hw_delay_li(soc, vdev, ts, delay_us);
642 }
643 
dp_tx_desc_pool_alloc_li(struct dp_soc * soc,uint32_t num_elem,uint8_t pool_id)644 QDF_STATUS dp_tx_desc_pool_alloc_li(struct dp_soc *soc, uint32_t num_elem,
645 				    uint8_t pool_id)
646 {
647 	return QDF_STATUS_SUCCESS;
648 }
649 
dp_tx_desc_pool_free_li(struct dp_soc * soc,uint8_t pool_id)650 void dp_tx_desc_pool_free_li(struct dp_soc *soc, uint8_t pool_id)
651 {
652 }
653