xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/li/dp_li.c (revision edf9fd0441a5a3b63c14b7bb754f301dd8d5e57c)
1 /*
2  * Copyright (c) 2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "dp_types.h"
21 #include <dp_internal.h>
22 #include <dp_htt.h>
23 #include "dp_li.h"
24 #include "dp_li_tx.h"
25 #include "dp_tx_desc.h"
26 #include "dp_li_rx.h"
27 #include "dp_peer.h"
28 #include <wlan_utility.h>
29 
30 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
31 static struct wlan_cfg_tcl_wbm_ring_num_map g_tcl_wbm_map_array[MAX_TCL_DATA_RINGS] = {
32 	{.tcl_ring_num = 0, .wbm_ring_num = 0, .wbm_rbm_id = HAL_LI_WBM_SW0_BM_ID, .for_ipa = 0},
33 	{1, 4, HAL_LI_WBM_SW4_BM_ID, 1}, /* For IPA */
34 	{2, 2, HAL_LI_WBM_SW2_BM_ID, 1} /* For IPA */};
35 #else
36 static struct wlan_cfg_tcl_wbm_ring_num_map g_tcl_wbm_map_array[MAX_TCL_DATA_RINGS] = {
37 	{.tcl_ring_num = 0, .wbm_ring_num = 0, .wbm_rbm_id = HAL_LI_WBM_SW0_BM_ID, .for_ipa = 0},
38 	{1, 1, HAL_LI_WBM_SW1_BM_ID, 0},
39 	{2, 2, HAL_LI_WBM_SW2_BM_ID, 0}
40 };
41 #endif
42 
43 static void dp_soc_cfg_attach_li(struct dp_soc *soc)
44 {
45 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
46 
47 	wlan_cfg_set_rx_rel_ring_id(soc_cfg_ctx, WBM2SW_REL_ERR_RING_NUM);
48 
49 	soc_cfg_ctx->tcl_wbm_map_array = g_tcl_wbm_map_array;
50 }
51 
52 qdf_size_t dp_get_context_size_li(enum dp_context_type context_type)
53 {
54 	switch (context_type) {
55 	case DP_CONTEXT_TYPE_SOC:
56 		return sizeof(struct dp_soc_li);
57 	case DP_CONTEXT_TYPE_PDEV:
58 		return sizeof(struct dp_pdev_li);
59 	case DP_CONTEXT_TYPE_VDEV:
60 		return sizeof(struct dp_vdev_li);
61 	case DP_CONTEXT_TYPE_PEER:
62 		return sizeof(struct dp_peer_li);
63 	default:
64 		return 0;
65 	}
66 }
67 
68 qdf_size_t dp_mon_get_context_size_li(enum dp_context_type context_type)
69 {
70 	switch (context_type) {
71 	case DP_CONTEXT_TYPE_MON_PDEV:
72 		return sizeof(struct dp_mon_pdev_li);
73 	case DP_CONTEXT_TYPE_MON_SOC:
74 		return sizeof(struct dp_mon_soc_li);
75 	default:
76 		return 0;
77 	}
78 }
79 
80 static QDF_STATUS dp_soc_attach_li(struct dp_soc *soc,
81 				   struct cdp_soc_attach_params *params)
82 {
83 	soc->wbm_sw0_bm_id = hal_tx_get_wbm_sw0_bm_id();
84 
85 	return QDF_STATUS_SUCCESS;
86 }
87 
88 static QDF_STATUS dp_soc_detach_li(struct dp_soc *soc)
89 {
90 	return QDF_STATUS_SUCCESS;
91 }
92 
93 static QDF_STATUS dp_soc_init_li(struct dp_soc *soc)
94 {
95 	return QDF_STATUS_SUCCESS;
96 }
97 
98 static QDF_STATUS dp_soc_deinit_li(struct dp_soc *soc)
99 {
100 	return QDF_STATUS_SUCCESS;
101 }
102 
103 static QDF_STATUS dp_pdev_attach_li(struct dp_pdev *pdev,
104 				    struct cdp_pdev_attach_params *params)
105 {
106 	return QDF_STATUS_SUCCESS;
107 }
108 
109 static QDF_STATUS dp_pdev_detach_li(struct dp_pdev *pdev)
110 {
111 	return QDF_STATUS_SUCCESS;
112 }
113 
114 static QDF_STATUS dp_vdev_attach_li(struct dp_soc *soc, struct dp_vdev *vdev)
115 {
116 	return QDF_STATUS_SUCCESS;
117 }
118 
119 static QDF_STATUS dp_vdev_detach_li(struct dp_soc *soc, struct dp_vdev *vdev)
120 {
121 	return QDF_STATUS_SUCCESS;
122 }
123 
124 #ifdef AST_OFFLOAD_ENABLE
125 static void dp_peer_map_detach_li(struct dp_soc *soc)
126 {
127 	dp_soc_wds_detach(soc);
128 	dp_peer_ast_table_detach(soc);
129 	dp_peer_ast_hash_detach(soc);
130 	dp_peer_mec_hash_detach(soc);
131 }
132 
133 static QDF_STATUS dp_peer_map_attach_li(struct dp_soc *soc)
134 {
135 	QDF_STATUS status;
136 
137 	soc->max_peer_id = soc->max_peers;
138 
139 	status = dp_peer_ast_table_attach(soc);
140 	if (!QDF_IS_STATUS_SUCCESS(status))
141 		return status;
142 
143 	status = dp_peer_ast_hash_attach(soc);
144 	if (!QDF_IS_STATUS_SUCCESS(status))
145 		goto ast_table_detach;
146 
147 	status = dp_peer_mec_hash_attach(soc);
148 	if (!QDF_IS_STATUS_SUCCESS(status))
149 		goto hash_detach;
150 
151 	dp_soc_wds_attach(soc);
152 
153 	return QDF_STATUS_SUCCESS;
154 
155 hash_detach:
156 	dp_peer_ast_hash_detach(soc);
157 ast_table_detach:
158 	dp_peer_ast_table_detach(soc);
159 
160 	return status;
161 }
162 #else
163 static void dp_peer_map_detach_li(struct dp_soc *soc)
164 {
165 }
166 
167 static QDF_STATUS dp_peer_map_attach_li(struct dp_soc *soc)
168 {
169 	soc->max_peer_id = soc->max_peers;
170 
171 	return QDF_STATUS_SUCCESS;
172 }
173 #endif
174 
175 qdf_size_t dp_get_soc_context_size_li(void)
176 {
177 	return sizeof(struct dp_soc);
178 }
179 
180 #ifdef NO_RX_PKT_HDR_TLV
181 /**
182  * dp_rxdma_ring_sel_cfg_li() - Setup RXDMA ring config
183  * @soc: Common DP soc handle
184  *
185  * Return: QDF_STATUS
186  */
187 static QDF_STATUS
188 dp_rxdma_ring_sel_cfg_li(struct dp_soc *soc)
189 {
190 	int i;
191 	int mac_id;
192 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
193 	struct dp_srng *rx_mac_srng;
194 	QDF_STATUS status = QDF_STATUS_SUCCESS;
195 
196 	htt_tlv_filter.mpdu_start = 1;
197 	htt_tlv_filter.msdu_start = 1;
198 	htt_tlv_filter.mpdu_end = 1;
199 	htt_tlv_filter.msdu_end = 1;
200 	htt_tlv_filter.attention = 1;
201 	htt_tlv_filter.packet = 1;
202 	htt_tlv_filter.packet_header = 0;
203 
204 	htt_tlv_filter.ppdu_start = 0;
205 	htt_tlv_filter.ppdu_end = 0;
206 	htt_tlv_filter.ppdu_end_user_stats = 0;
207 	htt_tlv_filter.ppdu_end_user_stats_ext = 0;
208 	htt_tlv_filter.ppdu_end_status_done = 0;
209 	htt_tlv_filter.enable_fp = 1;
210 	htt_tlv_filter.enable_md = 0;
211 	htt_tlv_filter.enable_md = 0;
212 	htt_tlv_filter.enable_mo = 0;
213 
214 	htt_tlv_filter.fp_mgmt_filter = 0;
215 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_BA_REQ;
216 	htt_tlv_filter.fp_data_filter = (FILTER_DATA_UCAST |
217 					 FILTER_DATA_MCAST |
218 					 FILTER_DATA_DATA);
219 	htt_tlv_filter.mo_mgmt_filter = 0;
220 	htt_tlv_filter.mo_ctrl_filter = 0;
221 	htt_tlv_filter.mo_data_filter = 0;
222 	htt_tlv_filter.md_data_filter = 0;
223 
224 	htt_tlv_filter.offset_valid = true;
225 
226 	htt_tlv_filter.rx_packet_offset = soc->rx_pkt_tlv_size;
227 	/*Not subscribing rx_pkt_header*/
228 	htt_tlv_filter.rx_header_offset = 0;
229 	htt_tlv_filter.rx_mpdu_start_offset =
230 				hal_rx_mpdu_start_offset_get(soc->hal_soc);
231 	htt_tlv_filter.rx_mpdu_end_offset =
232 				hal_rx_mpdu_end_offset_get(soc->hal_soc);
233 	htt_tlv_filter.rx_msdu_start_offset =
234 				hal_rx_msdu_start_offset_get(soc->hal_soc);
235 	htt_tlv_filter.rx_msdu_end_offset =
236 				hal_rx_msdu_end_offset_get(soc->hal_soc);
237 	htt_tlv_filter.rx_attn_offset =
238 				hal_rx_attn_offset_get(soc->hal_soc);
239 
240 	for (i = 0; i < MAX_PDEV_CNT; i++) {
241 		struct dp_pdev *pdev = soc->pdev_list[i];
242 
243 		if (!pdev)
244 			continue;
245 
246 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
247 			int mac_for_pdev =
248 				dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
249 			/*
250 			 * Obtain lmac id from pdev to access the LMAC ring
251 			 * in soc context
252 			 */
253 			int lmac_id =
254 				dp_get_lmac_id_for_pdev_id(soc, mac_id,
255 							   pdev->pdev_id);
256 
257 			rx_mac_srng = dp_get_rxdma_ring(pdev, lmac_id);
258 			htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
259 					    rx_mac_srng->hal_srng,
260 					    RXDMA_BUF, RX_DATA_BUFFER_SIZE,
261 					    &htt_tlv_filter);
262 		}
263 	}
264 	return status;
265 }
266 #else
267 
268 static QDF_STATUS
269 dp_rxdma_ring_sel_cfg_li(struct dp_soc *soc)
270 {
271 	int i;
272 	int mac_id;
273 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
274 	struct dp_srng *rx_mac_srng;
275 	QDF_STATUS status = QDF_STATUS_SUCCESS;
276 
277 	htt_tlv_filter.mpdu_start = 1;
278 	htt_tlv_filter.msdu_start = 1;
279 	htt_tlv_filter.mpdu_end = 1;
280 	htt_tlv_filter.msdu_end = 1;
281 	htt_tlv_filter.attention = 1;
282 	htt_tlv_filter.packet = 1;
283 	htt_tlv_filter.packet_header = 1;
284 
285 	htt_tlv_filter.ppdu_start = 0;
286 	htt_tlv_filter.ppdu_end = 0;
287 	htt_tlv_filter.ppdu_end_user_stats = 0;
288 	htt_tlv_filter.ppdu_end_user_stats_ext = 0;
289 	htt_tlv_filter.ppdu_end_status_done = 0;
290 	htt_tlv_filter.enable_fp = 1;
291 	htt_tlv_filter.enable_md = 0;
292 	htt_tlv_filter.enable_md = 0;
293 	htt_tlv_filter.enable_mo = 0;
294 
295 	htt_tlv_filter.fp_mgmt_filter = 0;
296 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_BA_REQ;
297 	htt_tlv_filter.fp_data_filter = (FILTER_DATA_UCAST |
298 					 FILTER_DATA_MCAST |
299 					 FILTER_DATA_DATA);
300 	htt_tlv_filter.mo_mgmt_filter = 0;
301 	htt_tlv_filter.mo_ctrl_filter = 0;
302 	htt_tlv_filter.mo_data_filter = 0;
303 	htt_tlv_filter.md_data_filter = 0;
304 
305 	htt_tlv_filter.offset_valid = true;
306 
307 	htt_tlv_filter.rx_packet_offset = soc->rx_pkt_tlv_size;
308 	htt_tlv_filter.rx_header_offset =
309 				hal_rx_pkt_tlv_offset_get(soc->hal_soc);
310 	htt_tlv_filter.rx_mpdu_start_offset =
311 				hal_rx_mpdu_start_offset_get(soc->hal_soc);
312 	htt_tlv_filter.rx_mpdu_end_offset =
313 				hal_rx_mpdu_end_offset_get(soc->hal_soc);
314 	htt_tlv_filter.rx_msdu_start_offset =
315 				hal_rx_msdu_start_offset_get(soc->hal_soc);
316 	htt_tlv_filter.rx_msdu_end_offset =
317 				hal_rx_msdu_end_offset_get(soc->hal_soc);
318 	htt_tlv_filter.rx_attn_offset =
319 				hal_rx_attn_offset_get(soc->hal_soc);
320 
321 	for (i = 0; i < MAX_PDEV_CNT; i++) {
322 		struct dp_pdev *pdev = soc->pdev_list[i];
323 
324 		if (!pdev)
325 			continue;
326 
327 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
328 			int mac_for_pdev =
329 				dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
330 			/*
331 			 * Obtain lmac id from pdev to access the LMAC ring
332 			 * in soc context
333 			 */
334 			int lmac_id =
335 				dp_get_lmac_id_for_pdev_id(soc, mac_id,
336 							   pdev->pdev_id);
337 
338 			rx_mac_srng = dp_get_rxdma_ring(pdev, lmac_id);
339 			htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
340 					    rx_mac_srng->hal_srng,
341 					    RXDMA_BUF, RX_DATA_BUFFER_SIZE,
342 					    &htt_tlv_filter);
343 		}
344 	}
345 	return status;
346 
347 }
348 #endif
349 
350 #ifdef QCA_DP_ENABLE_TX_COMP_RING4
351 static inline
352 void dp_deinit_txcomp_ring4(struct dp_soc *soc)
353 {
354 	if (soc) {
355 		wlan_minidump_remove(soc->tx_comp_ring[3].base_vaddr_unaligned,
356 				     soc->tx_comp_ring[3].alloc_size,
357 				     soc->ctrl_psoc, WLAN_MD_DP_SRNG_TX_COMP,
358 				     "Transmit_completion_ring");
359 		dp_srng_deinit(soc, &soc->tx_comp_ring[3], WBM2SW_RELEASE, 0);
360 	}
361 }
362 
363 static inline
364 QDF_STATUS dp_init_txcomp_ring4(struct dp_soc *soc)
365 {
366 	if (soc) {
367 		if (dp_srng_init(soc, &soc->tx_comp_ring[3],
368 				 WBM2SW_RELEASE, WBM2SW_TXCOMP_RING4_NUM, 0)) {
369 			dp_err("%pK: dp_srng_init failed for rx_rel_ring",
370 			       soc);
371 			return QDF_STATUS_E_FAILURE;
372 		}
373 		wlan_minidump_log(soc->tx_comp_ring[3].base_vaddr_unaligned,
374 				  soc->tx_comp_ring[3].alloc_size,
375 				  soc->ctrl_psoc, WLAN_MD_DP_SRNG_TX_COMP,
376 				  "Transmit_completion_ring");
377 	}
378 	return QDF_STATUS_SUCCESS;
379 }
380 
381 static inline
382 void dp_free_txcomp_ring4(struct dp_soc *soc)
383 {
384 	if (soc)
385 		dp_srng_free(soc, &soc->tx_comp_ring[3]);
386 }
387 
388 static inline
389 QDF_STATUS dp_alloc_txcomp_ring4(struct dp_soc *soc, uint32_t tx_comp_ring_size,
390 				 uint32_t cached)
391 {
392 	if (soc) {
393 		if (dp_srng_alloc(soc, &soc->tx_comp_ring[3], WBM2SW_RELEASE,
394 				  tx_comp_ring_size, cached)) {
395 			dp_err("dp_srng_alloc failed for tx_comp_ring");
396 			return QDF_STATUS_E_FAILURE;
397 		}
398 	}
399 	return QDF_STATUS_SUCCESS;
400 }
401 #else
402 static inline
403 void dp_deinit_txcomp_ring4(struct dp_soc *soc)
404 {
405 }
406 
407 static inline
408 QDF_STATUS dp_init_txcomp_ring4(struct dp_soc *soc)
409 {
410 	return QDF_STATUS_SUCCESS;
411 }
412 
413 static inline
414 void dp_free_txcomp_ring4(struct dp_soc *soc)
415 {
416 }
417 
418 static inline
419 QDF_STATUS dp_alloc_txcomp_ring4(struct dp_soc *soc, uint32_t tx_comp_ring_size,
420 				 uint32_t cached)
421 {
422 	return QDF_STATUS_SUCCESS;
423 }
424 #endif
425 
426 static void dp_soc_srng_deinit_li(struct dp_soc *soc)
427 {
428 	/* Tx Complete ring */
429 	dp_deinit_txcomp_ring4(soc);
430 }
431 
432 static void dp_soc_srng_free_li(struct dp_soc *soc)
433 {
434 	dp_free_txcomp_ring4(soc);
435 }
436 
437 static QDF_STATUS dp_soc_srng_alloc_li(struct dp_soc *soc)
438 {
439 	uint32_t tx_comp_ring_size;
440 	uint32_t cached = WLAN_CFG_DST_RING_CACHED_DESC;
441 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
442 
443 	soc_cfg_ctx = soc->wlan_cfg_ctx;
444 
445 	tx_comp_ring_size = wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
446 	/* Disable cached desc if NSS offload is enabled */
447 	if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx))
448 		cached = 0;
449 
450 	if (dp_alloc_txcomp_ring4(soc, tx_comp_ring_size, cached))
451 		goto fail1;
452 	return QDF_STATUS_SUCCESS;
453 fail1:
454 	dp_soc_srng_free_li(soc);
455 	return QDF_STATUS_E_NOMEM;
456 }
457 
458 static QDF_STATUS dp_soc_srng_init_li(struct dp_soc *soc)
459 {
460 	/* Tx comp ring 3 */
461 	if (dp_init_txcomp_ring4(soc))
462 		goto fail1;
463 
464 	return QDF_STATUS_SUCCESS;
465 fail1:
466 	/*
467 	 * Cleanup will be done as part of soc_detach, which will
468 	 * be called on pdev attach failure
469 	 */
470 	dp_soc_srng_deinit_li(soc);
471 	return QDF_STATUS_E_FAILURE;
472 }
473 
474 static void dp_tx_implicit_rbm_set_li(struct dp_soc *soc,
475 				      uint8_t tx_ring_id,
476 				      uint8_t bm_id)
477 {
478 }
479 
480 static void dp_peer_get_reo_hash_li(struct dp_vdev *vdev,
481 				    struct cdp_peer_setup_info *setup_info,
482 				    enum cdp_host_reo_dest_ring *reo_dest,
483 				    bool *hash_based,
484 				    uint8_t *lmac_peer_id_msb)
485 {
486 	dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
487 }
488 
489 static bool dp_reo_remap_config_li(struct dp_soc *soc,
490 				   uint32_t *remap0,
491 				   uint32_t *remap1,
492 				   uint32_t *remap2)
493 {
494 	return dp_reo_remap_config(soc, remap0, remap1, remap2);
495 }
496 
497 static QDF_STATUS dp_txrx_set_vdev_param_li(struct dp_soc *soc,
498 					    struct dp_vdev *vdev,
499 					    enum cdp_vdev_param_type param,
500 					    cdp_config_param_type val)
501 {
502 	return QDF_STATUS_SUCCESS;
503 }
504 
505 void dp_initialize_arch_ops_li(struct dp_arch_ops *arch_ops)
506 {
507 #ifndef QCA_HOST_MODE_WIFI_DISABLED
508 	arch_ops->tx_hw_enqueue = dp_tx_hw_enqueue_li;
509 	arch_ops->dp_rx_process = dp_rx_process_li;
510 	arch_ops->tx_comp_get_params_from_hal_desc =
511 		dp_tx_comp_get_params_from_hal_desc_li;
512 	arch_ops->dp_tx_process_htt_completion =
513 			dp_tx_process_htt_completion_li;
514 	arch_ops->dp_wbm_get_rx_desc_from_hal_desc =
515 			dp_wbm_get_rx_desc_from_hal_desc_li;
516 	arch_ops->dp_tx_desc_pool_init = dp_tx_desc_pool_init_li;
517 	arch_ops->dp_tx_desc_pool_deinit = dp_tx_desc_pool_deinit_li;
518 	arch_ops->dp_rx_desc_pool_init = dp_rx_desc_pool_init_li;
519 	arch_ops->dp_rx_desc_pool_deinit = dp_rx_desc_pool_deinit_li;
520 #else
521 	arch_ops->dp_rx_desc_pool_init = dp_rx_desc_pool_init_generic;
522 	arch_ops->dp_rx_desc_pool_deinit = dp_rx_desc_pool_deinit_generic;
523 #endif
524 	arch_ops->txrx_get_context_size = dp_get_context_size_li;
525 	arch_ops->txrx_get_mon_context_size = dp_mon_get_context_size_li;
526 	arch_ops->txrx_soc_attach = dp_soc_attach_li;
527 	arch_ops->txrx_soc_detach = dp_soc_detach_li;
528 	arch_ops->txrx_soc_init = dp_soc_init_li;
529 	arch_ops->txrx_soc_deinit = dp_soc_deinit_li;
530 	arch_ops->txrx_soc_srng_alloc = dp_soc_srng_alloc_li;
531 	arch_ops->txrx_soc_srng_init = dp_soc_srng_init_li;
532 	arch_ops->txrx_soc_srng_deinit = dp_soc_srng_deinit_li;
533 	arch_ops->txrx_soc_srng_free = dp_soc_srng_free_li;
534 	arch_ops->txrx_pdev_attach = dp_pdev_attach_li;
535 	arch_ops->txrx_pdev_detach = dp_pdev_detach_li;
536 	arch_ops->txrx_vdev_attach = dp_vdev_attach_li;
537 	arch_ops->txrx_vdev_detach = dp_vdev_detach_li;
538 	arch_ops->txrx_peer_map_attach = dp_peer_map_attach_li;
539 	arch_ops->txrx_peer_map_detach = dp_peer_map_detach_li;
540 	arch_ops->dp_rx_desc_cookie_2_va =
541 			dp_rx_desc_cookie_2_va_li;
542 	arch_ops->dp_rxdma_ring_sel_cfg = dp_rxdma_ring_sel_cfg_li;
543 	arch_ops->dp_rx_peer_metadata_peer_id_get =
544 					dp_rx_peer_metadata_peer_id_get_li;
545 	arch_ops->soc_cfg_attach = dp_soc_cfg_attach_li;
546 	arch_ops->tx_implicit_rbm_set = dp_tx_implicit_rbm_set_li;
547 	arch_ops->peer_get_reo_hash = dp_peer_get_reo_hash_li;
548 	arch_ops->reo_remap_config = dp_reo_remap_config_li;
549 	arch_ops->txrx_set_vdev_param = dp_txrx_set_vdev_param_li;
550 	arch_ops->txrx_print_peer_stats = dp_print_peer_txrx_stats_li;
551 	arch_ops->dp_peer_rx_reorder_queue_setup =
552 					dp_peer_rx_reorder_queue_setup_li;
553 }
554 
555 #ifdef QCA_DP_TX_HW_SW_NBUF_DESC_PREFETCH
556 void dp_tx_comp_get_prefetched_params_from_hal_desc(
557 					struct dp_soc *soc,
558 					void *tx_comp_hal_desc,
559 					struct dp_tx_desc_s **r_tx_desc)
560 {
561 	uint8_t pool_id;
562 	uint32_t tx_desc_id;
563 
564 	tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc);
565 	pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >>
566 		DP_TX_DESC_ID_POOL_OS;
567 
568 	/* Find Tx descriptor */
569 	*r_tx_desc = dp_tx_desc_find(soc, pool_id,
570 			(tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >>
571 			DP_TX_DESC_ID_PAGE_OS,
572 			(tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >>
573 			DP_TX_DESC_ID_OFFSET_OS);
574 	qdf_prefetch((uint8_t *)*r_tx_desc);
575 }
576 #endif
577 
578 #ifdef CONFIG_DP_PKT_ADD_TIMESTAMP
579 void dp_pkt_add_timestamp(struct dp_vdev *vdev,
580 			  enum qdf_pkt_timestamp_index index, uint64_t time,
581 			  qdf_nbuf_t nbuf)
582 {
583 	if (qdf_unlikely(qdf_is_dp_pkt_timestamp_enabled())) {
584 		uint64_t tsf_time;
585 
586 		if (vdev->get_tsf_time) {
587 			vdev->get_tsf_time(vdev->osif_vdev, time, &tsf_time);
588 			qdf_add_dp_pkt_timestamp(nbuf, index, tsf_time);
589 		}
590 	}
591 }
592 
593 void dp_pkt_get_timestamp(uint64_t *time)
594 {
595 	if (qdf_unlikely(qdf_is_dp_pkt_timestamp_enabled()))
596 		*time = qdf_get_log_timestamp();
597 }
598 #endif
599