xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/rh/dp_rh_tx.c (revision 7dab70881237c23ddd8f7bd28f5eaaab27f15464)
1 /*
2  * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 #include "cdp_txrx_cmn_struct.h"
19 #include "dp_types.h"
20 #include "dp_tx.h"
21 #include "dp_rh_tx.h"
22 #include "dp_tx_desc.h"
23 #include <dp_internal.h>
24 #include <dp_htt.h>
25 #include <hal_rh_api.h>
26 #include <hal_rh_tx.h>
27 #include "dp_peer.h"
28 #include "dp_rh.h"
29 #include <ce_api.h>
30 #include <ce_internal.h>
31 #include "dp_rh_htt.h"
32 
33 extern uint8_t sec_type_map[MAX_CDP_SEC_TYPE];
34 
35 #if defined(FEATURE_TSO)
36 /**
37  * dp_tx_adjust_tso_download_len_rh() - Adjust download length for TSO packet
38  * @nbuf: socket buffer
39  * @msdu_info: handle to struct dp_tx_msdu_info_s
40  * @download_len: Packet download length that needs adjustment
41  *
42  * Return: uint32_t (Adjusted packet download length)
43  */
44 static uint32_t
45 dp_tx_adjust_tso_download_len_rh(qdf_nbuf_t nbuf,
46 				 struct dp_tx_msdu_info_s *msdu_info,
47 				 uint32_t download_len)
48 {
49 	uint32_t frag0_len;
50 	uint32_t delta;
51 	uint32_t eit_hdr_len;
52 
53 	frag0_len = qdf_nbuf_get_frag_len(nbuf, 0);
54 	download_len -= frag0_len;
55 
56 	eit_hdr_len = msdu_info->u.tso_info.curr_seg->seg.tso_frags[0].length;
57 
58 	/* If EIT header length is less than the MSDU download length, then
59 	 * adjust the download length to just hold EIT header.
60 	 */
61 	if (eit_hdr_len < download_len) {
62 		delta = download_len - eit_hdr_len;
63 		download_len -= delta;
64 	}
65 
66 	return download_len;
67 }
68 #else
69 static uint32_t
70 dp_tx_adjust_tso_download_len_rh(qdf_nbuf_t nbuf,
71 				 struct dp_tx_msdu_info_s *msdu_info,
72 				 uint32_t download_len)
73 {
74 	return download_len;
75 }
76 #endif /* FEATURE_TSO */
77 
78 void dp_tx_comp_get_params_from_hal_desc_rh(struct dp_soc *soc,
79 					    void *tx_comp_hal_desc,
80 					    struct dp_tx_desc_s **r_tx_desc)
81 {
82 }
83 
84 /**
85  * dp_tx_comp_find_tx_desc_rh() - Find software TX descriptor using sw_cookie
86  *
87  * @soc: Handle to DP SoC structure
88  * @sw_cookie: Key to find the TX descriptor
89  *
90  * Return: TX descriptor handle or NULL (if not found)
91  */
92 static struct dp_tx_desc_s *
93 dp_tx_comp_find_tx_desc_rh(struct dp_soc *soc, uint32_t sw_cookie)
94 {
95 	uint8_t pool_id;
96 	struct dp_tx_desc_s *tx_desc;
97 
98 	pool_id = (sw_cookie & DP_TX_DESC_ID_POOL_MASK) >>
99 			DP_TX_DESC_ID_POOL_OS;
100 
101 	/* Find Tx descriptor */
102 	tx_desc = dp_tx_desc_find(soc, pool_id,
103 				  (sw_cookie & DP_TX_DESC_ID_PAGE_MASK) >>
104 						DP_TX_DESC_ID_PAGE_OS,
105 				  (sw_cookie & DP_TX_DESC_ID_OFFSET_MASK) >>
106 						DP_TX_DESC_ID_OFFSET_OS);
107 	/* pool id is not matching. Error */
108 	if (tx_desc && tx_desc->pool_id != pool_id) {
109 		dp_tx_comp_alert("Tx Comp pool id %d not matched %d",
110 				 pool_id, tx_desc->pool_id);
111 
112 		qdf_assert_always(0);
113 	}
114 
115 	return tx_desc;
116 }
117 
118 void dp_tx_process_htt_completion_rh(struct dp_soc *soc,
119 				     struct dp_tx_desc_s *tx_desc,
120 				     uint8_t *status,
121 				     uint8_t ring_id)
122 {
123 }
124 
125 static inline uint32_t
126 dp_tx_adjust_download_len_rh(qdf_nbuf_t nbuf, uint32_t download_len)
127 {
128 	uint32_t frag0_len; /* TCL_DATA_CMD */
129 	uint32_t frag1_len; /* 64 byte payload */
130 
131 	frag0_len = qdf_nbuf_get_frag_len(nbuf, 0);
132 	frag1_len = download_len - frag0_len;
133 
134 	if (qdf_unlikely(qdf_nbuf_len(nbuf) < frag1_len))
135 		frag1_len = qdf_nbuf_len(nbuf);
136 
137 	return frag0_len + frag1_len;
138 }
139 
140 static inline void dp_tx_fill_nbuf_data_attr_rh(qdf_nbuf_t nbuf)
141 {
142 	uint32_t pkt_offset;
143 	uint32_t tx_classify;
144 	uint32_t data_attr;
145 
146 	/* Enable tx_classify bit in CE SRC DESC for all data packets */
147 	tx_classify = 1;
148 	pkt_offset = qdf_nbuf_get_frag_len(nbuf, 0);
149 
150 	data_attr = tx_classify << CE_DESC_TX_CLASSIFY_BIT_S;
151 	data_attr |= pkt_offset  << CE_DESC_PKT_OFFSET_BIT_S;
152 
153 	qdf_nbuf_data_attr_set(nbuf, data_attr);
154 }
155 
156 #ifdef DP_TX_HW_DESC_HISTORY
157 static inline void
158 dp_tx_record_hw_desc_rh(uint8_t *hal_tx_desc_cached, struct dp_soc *soc)
159 {
160 	struct dp_tx_hw_desc_history *tx_hw_desc_history =
161 						&soc->tx_hw_desc_history;
162 	struct dp_tx_hw_desc_evt *evt;
163 	uint32_t idx = 0;
164 	uint16_t slot = 0;
165 
166 	if (!tx_hw_desc_history->allocated)
167 		return;
168 
169 	dp_get_frag_hist_next_atomic_idx(&tx_hw_desc_history->index, &idx,
170 					 &slot,
171 					 DP_TX_HW_DESC_HIST_SLOT_SHIFT,
172 					 DP_TX_HW_DESC_HIST_PER_SLOT_MAX,
173 					 DP_TX_HW_DESC_HIST_MAX);
174 
175 	evt = &tx_hw_desc_history->entry[slot][idx];
176 	qdf_mem_copy(evt->tcl_desc, hal_tx_desc_cached, HAL_TX_DESC_LEN_BYTES);
177 	evt->posted = qdf_get_log_timestamp();
178 	evt->tcl_ring_id = 0;
179 }
180 #else
181 static inline void
182 dp_tx_record_hw_desc_rh(uint8_t *hal_tx_desc_cached, struct dp_soc *soc)
183 {
184 }
185 #endif
186 
187 #if defined(FEATURE_RUNTIME_PM)
188 static void dp_tx_update_write_index(struct dp_soc *soc,
189 				     struct dp_tx_ep_info_rh *tx_ep_info,
190 				     int coalesce)
191 {
192 	int ret;
193 
194 	/* Avoid runtime get and put APIs under high throughput scenarios */
195 	if (dp_get_rtpm_tput_policy_requirement(soc)) {
196 		ce_tx_ring_write_idx_update_wrapper(tx_ep_info->ce_tx_hdl,
197 						    coalesce);
198 		return;
199 	}
200 
201 	ret = hif_rtpm_get(HIF_RTPM_GET_ASYNC, HIF_RTPM_ID_DP);
202 	if (QDF_IS_STATUS_SUCCESS(ret)) {
203 		if (hif_system_pm_state_check(soc->hif_handle)) {
204 			ce_ring_set_event(((struct CE_state *)(tx_ep_info->ce_tx_hdl))->src_ring,
205 					  CE_RING_FLUSH_EVENT);
206 			ce_ring_inc_flush_cnt(((struct CE_state *)(tx_ep_info->ce_tx_hdl))->src_ring);
207 		} else {
208 			ce_tx_ring_write_idx_update_wrapper(tx_ep_info->ce_tx_hdl,
209 							    coalesce);
210 		}
211 		hif_rtpm_put(HIF_RTPM_PUT_ASYNC, HIF_RTPM_ID_DP);
212 	} else {
213 		dp_runtime_get(soc);
214 		ce_ring_set_event(((struct CE_state *)(tx_ep_info->ce_tx_hdl))->src_ring,
215 				  CE_RING_FLUSH_EVENT);
216 		ce_ring_inc_flush_cnt(((struct CE_state *)(tx_ep_info->ce_tx_hdl))->src_ring);
217 		qdf_atomic_inc(&soc->tx_pending_rtpm);
218 		dp_runtime_put(soc);
219 	}
220 }
221 #elif defined(DP_POWER_SAVE)
222 static void dp_tx_update_write_index(struct dp_soc *soc,
223 				     struct dp_tx_ep_info_rh *tx_ep_info)
224 {
225 	if (hif_system_pm_state_check(soc->hif_handle)) {
226 		ce_ring_set_event(((struct CE_state *)(tx_ep_info->ce_tx_hdl))->src_ring,
227 				  CE_RING_FLUSH_EVENT);
228 		ce_ring_inc_flush_cnt(((struct CE_state *)(tx_ep_info->ce_tx_hdl))->src_ring);
229 	} else {
230 		ce_tx_ring_write_idx_update_wrapper(tx_ep_info->ce_tx_hdl,
231 						    coalesce);
232 	}
233 }
234 #else
235 static void dp_tx_update_write_index(struct dp_soc *soc,
236 				     struct dp_tx_ep_info_rh *tx_ep_info)
237 {
238 	ce_tx_ring_write_idx_update_wrapper(tx_ep_info->ce_tx_hdl,
239 					    coalesce);
240 }
241 #endif
242 
243 /*
244  * dp_flush_tx_ring_rh() - flush tx ring write index
245  * @pdev: dp pdev handle
246  * @ring_id: Tx ring id
247  *
248  * Return: 0 on success and error code on failure
249  */
250 int dp_flush_tx_ring_rh(struct dp_pdev *pdev, int ring_id)
251 {
252 	struct dp_pdev_rh *rh_pdev = dp_get_rh_pdev_from_dp_pdev(pdev);
253 	struct dp_tx_ep_info_rh *tx_ep_info = &rh_pdev->tx_ep_info;
254 	int ret;
255 
256 	ce_ring_aquire_lock(tx_ep_info->ce_tx_hdl);
257 	ret = hif_rtpm_get(HIF_RTPM_GET_ASYNC, HIF_RTPM_ID_DP);
258 	if (ret) {
259 		ce_ring_release_lock(tx_ep_info->ce_tx_hdl);
260 		ce_ring_set_event(((struct CE_state *)(tx_ep_info->ce_tx_hdl))->src_ring,
261 				  CE_RING_FLUSH_EVENT);
262 		ce_ring_inc_flush_cnt(((struct CE_state *)(tx_ep_info->ce_tx_hdl))->src_ring);
263 		return ret;
264 	}
265 
266 	ce_tx_ring_write_idx_update_wrapper(tx_ep_info->ce_tx_hdl, false);
267 	ce_ring_release_lock(tx_ep_info->ce_tx_hdl);
268 	hif_rtpm_put(HIF_RTPM_PUT_ASYNC, HIF_RTPM_ID_DP);
269 
270 	return ret;
271 }
272 
273 QDF_STATUS
274 dp_tx_hw_enqueue_rh(struct dp_soc *soc, struct dp_vdev *vdev,
275 		    struct dp_tx_desc_s *tx_desc, uint16_t fw_metadata,
276 		    struct cdp_tx_exception_metadata *tx_exc_metadata,
277 		    struct dp_tx_msdu_info_s *msdu_info)
278 {
279 	struct dp_pdev_rh *rh_pdev = dp_get_rh_pdev_from_dp_pdev(vdev->pdev);
280 	struct dp_tx_ep_info_rh *tx_ep_info = &rh_pdev->tx_ep_info;
281 	uint32_t download_len = tx_ep_info->download_len;
282 	qdf_nbuf_t nbuf = tx_desc->nbuf;
283 	uint8_t tid = msdu_info->tid;
284 	uint32_t *hal_tx_desc_cached;
285 	int coalesce = 0;
286 	int ret;
287 
288 	/*
289 	 * Setting it initialization statically here to avoid
290 	 * a memset call jump with qdf_mem_set call
291 	 */
292 	uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES] = { 0 };
293 
294 	enum cdp_sec_type sec_type = ((tx_exc_metadata &&
295 			tx_exc_metadata->sec_type != CDP_INVALID_SEC_TYPE) ?
296 			tx_exc_metadata->sec_type : vdev->sec_type);
297 
298 	QDF_STATUS status = QDF_STATUS_E_RESOURCES;
299 
300 	if (!dp_tx_is_desc_id_valid(soc, tx_desc->id)) {
301 		dp_err_rl("Invalid tx desc id:%d", tx_desc->id);
302 		return QDF_STATUS_E_RESOURCES;
303 	}
304 
305 	hal_tx_desc_cached = (void *)cached_desc;
306 
307 	hal_tx_desc_set_buf_addr(soc->hal_soc, hal_tx_desc_cached,
308 				 tx_desc->dma_addr, 0, tx_desc->id,
309 				 (tx_desc->flags & DP_TX_DESC_FLAG_FRAG));
310 	hal_tx_desc_set_lmac_id(soc->hal_soc, hal_tx_desc_cached,
311 				vdev->lmac_id);
312 	hal_tx_desc_set_search_type(soc->hal_soc, hal_tx_desc_cached,
313 				    vdev->search_type);
314 	hal_tx_desc_set_search_index(soc->hal_soc, hal_tx_desc_cached,
315 				     vdev->bss_ast_idx);
316 
317 	hal_tx_desc_set_encrypt_type(hal_tx_desc_cached,
318 				     sec_type_map[sec_type]);
319 	hal_tx_desc_set_cache_set_num(soc->hal_soc, hal_tx_desc_cached,
320 				      (vdev->bss_ast_hash & 0xF));
321 
322 	hal_tx_desc_set_fw_metadata(hal_tx_desc_cached, fw_metadata);
323 	hal_tx_desc_set_buf_length(hal_tx_desc_cached, tx_desc->length);
324 	hal_tx_desc_set_buf_offset(hal_tx_desc_cached, tx_desc->pkt_offset);
325 	hal_tx_desc_set_encap_type(hal_tx_desc_cached, tx_desc->tx_encap_type);
326 	hal_tx_desc_set_addr_search_flags(hal_tx_desc_cached,
327 					  vdev->hal_desc_addr_search_flags);
328 
329 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
330 		hal_tx_desc_set_to_fw(hal_tx_desc_cached, 1);
331 
332 	/* verify checksum offload configuration*/
333 	if ((qdf_nbuf_get_tx_cksum(nbuf) == QDF_NBUF_TX_CKSUM_TCP_UDP) ||
334 	    qdf_nbuf_is_tso(nbuf))  {
335 		hal_tx_desc_set_l3_checksum_en(hal_tx_desc_cached, 1);
336 		hal_tx_desc_set_l4_checksum_en(hal_tx_desc_cached, 1);
337 	}
338 
339 	if (tid != HTT_TX_EXT_TID_INVALID)
340 		hal_tx_desc_set_hlos_tid(hal_tx_desc_cached, tid);
341 
342 	if (tx_desc->flags & DP_TX_DESC_FLAG_MESH)
343 		hal_tx_desc_set_mesh_en(soc->hal_soc, hal_tx_desc_cached, 1);
344 
345 	if (!dp_tx_desc_set_ktimestamp(vdev, tx_desc))
346 		dp_tx_desc_set_timestamp(tx_desc);
347 
348 	dp_verbose_debug("length:%d , type = %d, dma_addr %llx, offset %d desc id %u",
349 			 tx_desc->length,
350 			 (tx_desc->flags & DP_TX_DESC_FLAG_FRAG),
351 			 (uint64_t)tx_desc->dma_addr, tx_desc->pkt_offset,
352 			 tx_desc->id);
353 
354 	hal_tx_desc_sync(hal_tx_desc_cached, tx_desc->tcl_cmd_vaddr);
355 
356 	qdf_nbuf_frag_push_head(nbuf, DP_RH_TX_TCL_DESC_SIZE,
357 				(char *)tx_desc->tcl_cmd_vaddr,
358 				tx_desc->tcl_cmd_paddr);
359 
360 	download_len = dp_tx_adjust_download_len_rh(nbuf, download_len);
361 
362 	if (qdf_nbuf_is_tso(nbuf)) {
363 		QDF_NBUF_CB_PADDR(nbuf) =
364 			msdu_info->u.tso_info.curr_seg->seg.tso_frags[0].paddr;
365 		download_len = dp_tx_adjust_tso_download_len_rh(nbuf, msdu_info,
366 								download_len);
367 	}
368 
369 	dp_tx_fill_nbuf_data_attr_rh(nbuf);
370 
371 	ce_ring_aquire_lock(tx_ep_info->ce_tx_hdl);
372 	ret = ce_enqueue_desc(tx_ep_info->ce_tx_hdl, nbuf,
373 			      tx_ep_info->tx_endpoint, download_len);
374 	if (ret) {
375 		ce_ring_release_lock(tx_ep_info->ce_tx_hdl);
376 		dp_verbose_debug("CE tx ring full");
377 		/* TODO: Should this be a separate ce_ring_full stat? */
378 		DP_STATS_INC(soc, tx.tcl_ring_full[0], 1);
379 		DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
380 		goto enqueue_fail;
381 	}
382 
383 	coalesce = dp_tx_attempt_coalescing(soc, vdev, tx_desc, tid,
384 					    msdu_info, 0);
385 
386 	dp_tx_update_write_index(soc, tx_ep_info, coalesce);
387 	ce_ring_release_lock(tx_ep_info->ce_tx_hdl);
388 
389 	tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX;
390 	dp_vdev_peer_stats_update_protocol_cnt_tx(vdev, nbuf);
391 	DP_STATS_INC_PKT(vdev, tx_i.processed, 1, tx_desc->length);
392 	DP_STATS_INC(soc, tx.tcl_enq[0], 1);
393 	dp_tx_update_stats(soc, tx_desc, 0);
394 	status = QDF_STATUS_SUCCESS;
395 
396 	dp_tx_record_hw_desc_rh((uint8_t *)hal_tx_desc_cached, soc);
397 
398 enqueue_fail:
399 	dp_pkt_add_timestamp(vdev, QDF_PKT_TX_DRIVER_EXIT,
400 			     qdf_get_log_timestamp(), tx_desc->nbuf);
401 
402 	return status;
403 }
404 
405 /**
406  * dp_tx_tcl_desc_pool_alloc_rh() - Allocate the tcl descriptor pool
407  *				    based on pool_id
408  * @soc: Handle to DP SoC structure
409  * @num_elem: Number of descriptor elements per pool
410  * @pool_id: Pool to allocate
411  *
412  * Return: QDF_STATUS_SUCCESS
413  *	   QDF_STATUS_E_NOMEM
414  */
415 static QDF_STATUS
416 dp_tx_tcl_desc_pool_alloc_rh(struct dp_soc *soc, uint32_t num_elem,
417 			     uint8_t pool_id)
418 {
419 	struct dp_soc_rh *rh_soc = dp_get_rh_soc_from_dp_soc(soc);
420 	struct dp_tx_tcl_desc_pool_s *tcl_desc_pool;
421 	uint16_t elem_size = DP_RH_TX_TCL_DESC_SIZE;
422 	QDF_STATUS status = QDF_STATUS_SUCCESS;
423 	qdf_dma_context_t memctx = 0;
424 
425 	if (pool_id > MAX_TXDESC_POOLS - 1)
426 		return QDF_STATUS_E_INVAL;
427 
428 	/* Allocate tcl descriptors in coherent memory */
429 	tcl_desc_pool = &rh_soc->tcl_desc_pool[pool_id];
430 	memctx = qdf_get_dma_mem_context(tcl_desc_pool, memctx);
431 	dp_desc_multi_pages_mem_alloc(soc, QDF_DP_TX_TCL_DESC_TYPE,
432 				      &tcl_desc_pool->desc_pages,
433 				      elem_size, num_elem, memctx, false);
434 
435 	if (!tcl_desc_pool->desc_pages.num_pages) {
436 		dp_err("failed to allocate tcl desc Pages");
437 		status = QDF_STATUS_E_NOMEM;
438 		goto err_alloc_fail;
439 	}
440 
441 	return status;
442 
443 err_alloc_fail:
444 	dp_desc_multi_pages_mem_free(soc, QDF_DP_TX_TCL_DESC_TYPE,
445 				     &tcl_desc_pool->desc_pages,
446 				     memctx, false);
447 	return status;
448 }
449 
450 /**
451  * dp_tx_tcl_desc_pool_free_rh() -  Free the tcl descriptor pool
452  * @soc: Handle to DP SoC structure
453  * @pool_id: pool to free
454  *
455  */
456 static void dp_tx_tcl_desc_pool_free_rh(struct dp_soc *soc, uint8_t pool_id)
457 {
458 	struct dp_soc_rh *rh_soc = dp_get_rh_soc_from_dp_soc(soc);
459 	struct dp_tx_tcl_desc_pool_s *tcl_desc_pool;
460 	qdf_dma_context_t memctx = 0;
461 
462 	if (pool_id > MAX_TXDESC_POOLS - 1)
463 		return;
464 
465 	tcl_desc_pool = &rh_soc->tcl_desc_pool[pool_id];
466 	memctx = qdf_get_dma_mem_context(tcl_desc_pool, memctx);
467 
468 	dp_desc_multi_pages_mem_free(soc, QDF_DP_TX_TCL_DESC_TYPE,
469 				     &tcl_desc_pool->desc_pages,
470 				     memctx, false);
471 }
472 
473 /**
474  * dp_tx_tcl_desc_pool_init_rh() - Initialize tcl descriptor pool
475  *				   based on pool_id
476  * @soc: Handle to DP SoC structure
477  * @num_elem: Number of descriptor elements per pool
478  * @pool_id: pool to initialize
479  *
480  * Return: QDF_STATUS_SUCCESS
481  *	   QDF_STATUS_E_FAULT
482  */
483 static QDF_STATUS
484 dp_tx_tcl_desc_pool_init_rh(struct dp_soc *soc, uint32_t num_elem,
485 			    uint8_t pool_id)
486 {
487 	struct dp_soc_rh *rh_soc = dp_get_rh_soc_from_dp_soc(soc);
488 	struct dp_tx_tcl_desc_pool_s *tcl_desc_pool;
489 	struct qdf_mem_dma_page_t *page_info;
490 	QDF_STATUS status;
491 
492 	tcl_desc_pool = &rh_soc->tcl_desc_pool[pool_id];
493 	tcl_desc_pool->elem_size = DP_RH_TX_TCL_DESC_SIZE;
494 	tcl_desc_pool->elem_count = num_elem;
495 
496 	/* Link tcl descriptors into a freelist */
497 	if (qdf_mem_multi_page_link(soc->osdev, &tcl_desc_pool->desc_pages,
498 				    tcl_desc_pool->elem_size,
499 				    tcl_desc_pool->elem_count,
500 				    false)) {
501 		dp_err("failed to link tcl desc Pages");
502 		status = QDF_STATUS_E_FAULT;
503 		goto err_link_fail;
504 	}
505 
506 	page_info = tcl_desc_pool->desc_pages.dma_pages;
507 	tcl_desc_pool->freelist = (uint32_t *)page_info->page_v_addr_start;
508 
509 	return QDF_STATUS_SUCCESS;
510 
511 err_link_fail:
512 	return status;
513 }
514 
515 /**
516  * dp_tx_tcl_desc_pool_deinit_rh() - De-initialize tcl descriptor pool
517  *				     based on pool_id
518  * @soc: Handle to DP SoC structure
519  * @pool_id: pool to de-initialize
520  *
521  */
522 static void dp_tx_tcl_desc_pool_deinit_rh(struct dp_soc *soc, uint8_t pool_id)
523 {
524 }
525 
526 /**
527  * dp_tx_alloc_tcl_desc_rh() - Allocate a tcl descriptor from the pool
528  * @tcl_desc_pool: Tcl descriptor pool
529  * @tx_desc: SW TX descriptor
530  * @index: Index into the tcl descriptor pool
531  */
532 static void dp_tx_alloc_tcl_desc_rh(struct dp_tx_tcl_desc_pool_s *tcl_desc_pool,
533 				    struct dp_tx_desc_s *tx_desc,
534 				    uint32_t index)
535 {
536 	struct qdf_mem_dma_page_t *dma_page;
537 	uint32_t page_id;
538 	uint32_t offset;
539 
540 	tx_desc->tcl_cmd_vaddr = (void *)tcl_desc_pool->freelist;
541 
542 	if (tcl_desc_pool->freelist)
543 		tcl_desc_pool->freelist =
544 			*((uint32_t **)tcl_desc_pool->freelist);
545 
546 	page_id = index / tcl_desc_pool->desc_pages.num_element_per_page;
547 	offset = index % tcl_desc_pool->desc_pages.num_element_per_page;
548 	dma_page = &tcl_desc_pool->desc_pages.dma_pages[page_id];
549 
550 	tx_desc->tcl_cmd_paddr =
551 		dma_page->page_p_addr + offset * tcl_desc_pool->elem_size;
552 }
553 
554 QDF_STATUS dp_tx_desc_pool_init_rh(struct dp_soc *soc,
555 				   uint32_t num_elem,
556 				   uint8_t pool_id)
557 {
558 	struct dp_soc_rh *rh_soc = dp_get_rh_soc_from_dp_soc(soc);
559 	uint32_t id, count, page_id, offset, pool_id_32;
560 	struct dp_tx_desc_s *tx_desc;
561 	struct dp_tx_tcl_desc_pool_s *tcl_desc_pool;
562 	struct dp_tx_desc_pool_s *tx_desc_pool;
563 	uint16_t num_desc_per_page;
564 	QDF_STATUS status;
565 
566 	status = dp_tx_tcl_desc_pool_init_rh(soc, num_elem, pool_id);
567 	if (QDF_IS_STATUS_ERROR(status)) {
568 		dp_err("failed to initialise tcl desc pool %d", pool_id);
569 		goto err_out;
570 	}
571 
572 	status = dp_tx_ext_desc_pool_init_by_id(soc, num_elem, pool_id);
573 	if (QDF_IS_STATUS_ERROR(status)) {
574 		dp_err("failed to initialise tx ext desc pool %d", pool_id);
575 		goto err_deinit_tcl_pool;
576 	}
577 
578 	status = dp_tx_tso_desc_pool_init_by_id(soc, num_elem, pool_id);
579 	if (QDF_IS_STATUS_ERROR(status)) {
580 		dp_err("failed to initialise tso desc pool %d", pool_id);
581 		goto err_deinit_tx_ext_pool;
582 	}
583 
584 	status = dp_tx_tso_num_seg_pool_init_by_id(soc, num_elem, pool_id);
585 	if (QDF_IS_STATUS_ERROR(status)) {
586 		dp_err("failed to initialise tso num seg pool %d", pool_id);
587 		goto err_deinit_tso_pool;
588 	}
589 
590 	tx_desc_pool = &soc->tx_desc[pool_id];
591 	tcl_desc_pool = &rh_soc->tcl_desc_pool[pool_id];
592 	tx_desc = tx_desc_pool->freelist;
593 	count = 0;
594 	pool_id_32 = (uint32_t)pool_id;
595 	num_desc_per_page = tx_desc_pool->desc_pages.num_element_per_page;
596 	while (tx_desc) {
597 		page_id = count / num_desc_per_page;
598 		offset = count % num_desc_per_page;
599 		id = ((pool_id_32 << DP_TX_DESC_ID_POOL_OS) |
600 			(page_id << DP_TX_DESC_ID_PAGE_OS) | offset);
601 
602 		tx_desc->id = id;
603 		tx_desc->pool_id = pool_id;
604 		dp_tx_desc_set_magic(tx_desc, DP_TX_MAGIC_PATTERN_FREE);
605 		dp_tx_alloc_tcl_desc_rh(tcl_desc_pool, tx_desc, count);
606 		tx_desc = tx_desc->next;
607 		count++;
608 	}
609 
610 	return QDF_STATUS_SUCCESS;
611 
612 err_deinit_tso_pool:
613 	dp_tx_tso_desc_pool_deinit_by_id(soc, pool_id);
614 err_deinit_tx_ext_pool:
615 	dp_tx_ext_desc_pool_deinit_by_id(soc, pool_id);
616 err_deinit_tcl_pool:
617 	dp_tx_tcl_desc_pool_deinit_rh(soc, pool_id);
618 err_out:
619 	/* TODO: is assert needed ? */
620 	qdf_assert_always(0);
621 	return status;
622 }
623 
624 void dp_tx_desc_pool_deinit_rh(struct dp_soc *soc,
625 			       struct dp_tx_desc_pool_s *tx_desc_pool,
626 			       uint8_t pool_id)
627 {
628 	dp_tx_tso_num_seg_pool_free_by_id(soc, pool_id);
629 	dp_tx_tso_desc_pool_deinit_by_id(soc, pool_id);
630 	dp_tx_ext_desc_pool_deinit_by_id(soc, pool_id);
631 	dp_tx_tcl_desc_pool_deinit_rh(soc, pool_id);
632 }
633 
634 QDF_STATUS dp_tx_compute_tx_delay_rh(struct dp_soc *soc,
635 				     struct dp_vdev *vdev,
636 				     struct hal_tx_completion_status *ts,
637 				     uint32_t *delay_us)
638 {
639 	return QDF_STATUS_SUCCESS;
640 }
641 
642 QDF_STATUS dp_tx_desc_pool_alloc_rh(struct dp_soc *soc, uint32_t num_elem,
643 				    uint8_t pool_id)
644 {
645 	QDF_STATUS status;
646 
647 	status = dp_tx_tcl_desc_pool_alloc_rh(soc, num_elem, pool_id);
648 	if (QDF_IS_STATUS_ERROR(status)) {
649 		dp_err("failed to allocate tcl desc pool %d", pool_id);
650 		goto err_tcl_desc_pool;
651 	}
652 
653 	status = dp_tx_ext_desc_pool_alloc_by_id(soc, num_elem, pool_id);
654 	if (QDF_IS_STATUS_ERROR(status)) {
655 		dp_err("failed to allocate tx ext desc pool %d", pool_id);
656 		goto err_free_tcl_pool;
657 	}
658 
659 	status = dp_tx_tso_desc_pool_alloc_by_id(soc, num_elem, pool_id);
660 	if (QDF_IS_STATUS_ERROR(status)) {
661 		dp_err("failed to allocate tso desc pool %d", pool_id);
662 		goto err_free_tx_ext_pool;
663 	}
664 
665 	status = dp_tx_tso_num_seg_pool_alloc_by_id(soc, num_elem, pool_id);
666 	if (QDF_IS_STATUS_ERROR(status)) {
667 		dp_err("failed to allocate tso num seg pool %d", pool_id);
668 		goto err_free_tso_pool;
669 	}
670 
671 	return status;
672 
673 err_free_tso_pool:
674 	dp_tx_tso_desc_pool_free_by_id(soc, pool_id);
675 err_free_tx_ext_pool:
676 	dp_tx_ext_desc_pool_free_by_id(soc, pool_id);
677 err_free_tcl_pool:
678 	dp_tx_tcl_desc_pool_free_rh(soc, pool_id);
679 err_tcl_desc_pool:
680 	/* TODO: is assert needed ? */
681 	qdf_assert_always(0);
682 	return status;
683 }
684 
685 void dp_tx_desc_pool_free_rh(struct dp_soc *soc, uint8_t pool_id)
686 {
687 	dp_tx_tso_num_seg_pool_free_by_id(soc, pool_id);
688 	dp_tx_tso_desc_pool_free_by_id(soc, pool_id);
689 	dp_tx_ext_desc_pool_free_by_id(soc, pool_id);
690 	dp_tx_tcl_desc_pool_free_rh(soc, pool_id);
691 }
692 
693 void dp_tx_compl_handler_rh(struct dp_soc *soc, qdf_nbuf_t htt_msg)
694 {
695 	struct dp_tx_desc_s *tx_desc = NULL;
696 	struct dp_tx_desc_s *head_desc = NULL;
697 	struct dp_tx_desc_s *tail_desc = NULL;
698 	uint32_t sw_cookie;
699 	uint32_t num_msdus;
700 	uint32_t *msg_word;
701 	uint8_t ring_id;
702 	uint8_t tx_status;
703 	int i;
704 
705 	DP_HIST_INIT();
706 
707 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
708 	num_msdus = HTT_SOFT_UMAC_TX_COMP_IND_MSDU_COUNT_GET(*msg_word);
709 	msg_word += HTT_SOFT_UMAC_TX_COMPL_IND_SIZE >> 2;
710 
711 	for (i = 0; i < num_msdus; i++) {
712 		sw_cookie = HTT_TX_BUFFER_ADDR_INFO_SW_BUFFER_COOKIE_GET(*(msg_word + 1));
713 
714 		tx_desc = dp_tx_comp_find_tx_desc_rh(soc, sw_cookie);
715 		if (!tx_desc) {
716 			dp_err("failed to find tx desc");
717 			qdf_assert_always(0);
718 		}
719 
720 		/*
721 		 * If the descriptor is already freed in vdev_detach,
722 		 * continue to next descriptor
723 		 */
724 		if (qdf_unlikely((tx_desc->vdev_id == DP_INVALID_VDEV_ID) &&
725 				 !tx_desc->flags)) {
726 			dp_tx_comp_info_rl("Descriptor freed in vdev_detach %d",
727 					   tx_desc->id);
728 			DP_STATS_INC(soc, tx.tx_comp_exception, 1);
729 			dp_tx_desc_check_corruption(tx_desc);
730 			goto next_msdu;
731 		}
732 
733 		if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
734 			dp_tx_comp_info_rl("pdev in down state %d",
735 					   tx_desc->id);
736 			tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
737 			dp_tx_comp_free_buf(soc, tx_desc, false);
738 			dp_tx_desc_release(soc, tx_desc, tx_desc->pool_id);
739 			goto next_msdu;
740 		}
741 
742 		if (!(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED) ||
743 		    !(tx_desc->flags & DP_TX_DESC_FLAG_QUEUED_TX)) {
744 			dp_tx_comp_alert("Txdesc invalid, flgs = %x,id = %d",
745 					 tx_desc->flags, tx_desc->id);
746 			qdf_assert_always(0);
747 		}
748 
749 		if (HTT_TX_BUFFER_ADDR_INFO_RELEASE_SOURCE_GET(*(msg_word + 1)) ==
750 		    HTT_TX_MSDU_RELEASE_SOURCE_FW)
751 			tx_desc->buffer_src = HAL_TX_COMP_RELEASE_SOURCE_FW;
752 		else
753 			tx_desc->buffer_src = HAL_TX_COMP_RELEASE_SOURCE_TQM;
754 
755 		tx_desc->peer_id = HTT_TX_MSDU_INFO_SW_PEER_ID_GET(*(msg_word + 2));
756 		tx_status = HTT_TX_MSDU_INFO_RELEASE_REASON_GET(*(msg_word + 3));
757 
758 		tx_desc->tx_status =
759 			(tx_status == HTT_TX_MSDU_RELEASE_REASON_FRAME_ACKED ?
760 			 HAL_TX_TQM_RR_FRAME_ACKED : HAL_TX_TQM_RR_REM_CMD_REM);
761 
762 		qdf_mem_copy(&tx_desc->comp, msg_word, HTT_TX_MSDU_INFO_SIZE);
763 
764 		DP_HIST_PACKET_COUNT_INC(tx_desc->pdev->pdev_id);
765 
766 		/* First ring descriptor on the cycle */
767 		if (!head_desc) {
768 			head_desc = tx_desc;
769 			tail_desc = tx_desc;
770 		}
771 
772 		tail_desc->next = tx_desc;
773 		tx_desc->next = NULL;
774 		tail_desc = tx_desc;
775 next_msdu:
776 		msg_word += HTT_TX_MSDU_INFO_SIZE >> 2;
777 	}
778 
779 	/* For now, pass ring_id as 0 (zero) as WCN6450 only
780 	 * supports one TX ring.
781 	 */
782 	ring_id = 0;
783 
784 	if (head_desc)
785 		dp_tx_comp_process_desc_list(soc, head_desc, ring_id);
786 
787 	DP_STATS_INC(soc, tx.tx_comp[ring_id], num_msdus);
788 	DP_TX_HIST_STATS_PER_PDEV();
789 }
790