xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/li/dp_li.c (revision 2888b71da71bce103343119fa1b31f4a0cee07c8)
1 /*
2  * Copyright (c) 2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "dp_types.h"
21 #include <dp_internal.h>
22 #include <dp_htt.h>
23 #include "dp_li.h"
24 #include "dp_li_tx.h"
25 #include "dp_tx_desc.h"
26 #include "dp_li_rx.h"
27 #include "dp_peer.h"
28 #include <wlan_utility.h>
29 #include "dp_ipa.h"
30 #ifdef WIFI_MONITOR_SUPPORT
31 #include <dp_mon_1.0.h>
32 #endif
33 
34 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
35 static struct wlan_cfg_tcl_wbm_ring_num_map g_tcl_wbm_map_array[MAX_TCL_DATA_RINGS] = {
36 	{.tcl_ring_num = 0, .wbm_ring_num = 0, .wbm_rbm_id = HAL_LI_WBM_SW0_BM_ID, .for_ipa = 0},
37 	/*
38 	 * INVALID_WBM_RING_NUM implies re-use of an existing WBM2SW ring
39 	 * as indicated by rbm id.
40 	 */
41 	{1, INVALID_WBM_RING_NUM, HAL_LI_WBM_SW0_BM_ID, 0},
42 	{2, 2, HAL_LI_WBM_SW2_BM_ID, 0}
43 };
44 #else
45 static struct wlan_cfg_tcl_wbm_ring_num_map g_tcl_wbm_map_array[MAX_TCL_DATA_RINGS] = {
46 	{.tcl_ring_num = 0, .wbm_ring_num = 0, .wbm_rbm_id = HAL_LI_WBM_SW0_BM_ID, .for_ipa = 0},
47 	{1, 1, HAL_LI_WBM_SW1_BM_ID, 0},
48 	{2, 2, HAL_LI_WBM_SW2_BM_ID, 0},
49 	/*
50 	 * Although using wbm_ring 4, wbm_ring 3 is mentioned in order to match
51 	 * with the tx_mask in dp_service_srngs. Please be carefull while using
52 	 * this table anywhere else.
53 	 */
54 	{3, 3, HAL_LI_WBM_SW4_BM_ID, 0}
55 };
56 #endif
57 
58 #ifdef IPA_WDI3_TX_TWO_PIPES
59 static inline void
60 dp_soc_cfg_update_tcl_wbm_map_for_ipa(struct wlan_cfg_dp_soc_ctxt *cfg_ctx)
61 {
62 	if (!cfg_ctx->ipa_enabled)
63 		return;
64 
65 	cfg_ctx->tcl_wbm_map_array[IPA_TX_ALT_RING_IDX].wbm_ring_num = 4;
66 	cfg_ctx->tcl_wbm_map_array[IPA_TX_ALT_RING_IDX].wbm_rbm_id =
67 							   HAL_LI_WBM_SW4_BM_ID;
68 }
69 #else
70 static inline void
71 dp_soc_cfg_update_tcl_wbm_map_for_ipa(struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
72 {
73 }
74 #endif
75 
76 static void dp_soc_cfg_attach_li(struct dp_soc *soc)
77 {
78 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
79 
80 	wlan_cfg_set_rx_rel_ring_id(soc_cfg_ctx, WBM2SW_REL_ERR_RING_NUM);
81 
82 	soc_cfg_ctx->tcl_wbm_map_array = g_tcl_wbm_map_array;
83 	dp_soc_cfg_update_tcl_wbm_map_for_ipa(soc_cfg_ctx);
84 }
85 
86 qdf_size_t dp_get_context_size_li(enum dp_context_type context_type)
87 {
88 	switch (context_type) {
89 	case DP_CONTEXT_TYPE_SOC:
90 		return sizeof(struct dp_soc_li);
91 	case DP_CONTEXT_TYPE_PDEV:
92 		return sizeof(struct dp_pdev_li);
93 	case DP_CONTEXT_TYPE_VDEV:
94 		return sizeof(struct dp_vdev_li);
95 	case DP_CONTEXT_TYPE_PEER:
96 		return sizeof(struct dp_peer_li);
97 	default:
98 		return 0;
99 	}
100 }
101 
102 static QDF_STATUS dp_soc_attach_li(struct dp_soc *soc,
103 				   struct cdp_soc_attach_params *params)
104 {
105 	soc->wbm_sw0_bm_id = hal_tx_get_wbm_sw0_bm_id();
106 
107 	return QDF_STATUS_SUCCESS;
108 }
109 
110 static QDF_STATUS dp_soc_detach_li(struct dp_soc *soc)
111 {
112 	return QDF_STATUS_SUCCESS;
113 }
114 
115 static QDF_STATUS dp_soc_init_li(struct dp_soc *soc)
116 {
117 	return QDF_STATUS_SUCCESS;
118 }
119 
120 static QDF_STATUS dp_soc_deinit_li(struct dp_soc *soc)
121 {
122 	return QDF_STATUS_SUCCESS;
123 }
124 
125 static QDF_STATUS dp_pdev_attach_li(struct dp_pdev *pdev,
126 				    struct cdp_pdev_attach_params *params)
127 {
128 	return QDF_STATUS_SUCCESS;
129 }
130 
131 static QDF_STATUS dp_pdev_detach_li(struct dp_pdev *pdev)
132 {
133 	return QDF_STATUS_SUCCESS;
134 }
135 
136 static QDF_STATUS dp_vdev_attach_li(struct dp_soc *soc, struct dp_vdev *vdev)
137 {
138 	return QDF_STATUS_SUCCESS;
139 }
140 
141 static QDF_STATUS dp_vdev_detach_li(struct dp_soc *soc, struct dp_vdev *vdev)
142 {
143 	return QDF_STATUS_SUCCESS;
144 }
145 
146 #ifdef AST_OFFLOAD_ENABLE
147 static void dp_peer_map_detach_li(struct dp_soc *soc)
148 {
149 	dp_soc_wds_detach(soc);
150 	dp_peer_ast_table_detach(soc);
151 	dp_peer_ast_hash_detach(soc);
152 	dp_peer_mec_hash_detach(soc);
153 }
154 
155 static QDF_STATUS dp_peer_map_attach_li(struct dp_soc *soc)
156 {
157 	QDF_STATUS status;
158 
159 	soc->max_peer_id = soc->max_peers;
160 
161 	status = dp_peer_ast_table_attach(soc);
162 	if (!QDF_IS_STATUS_SUCCESS(status))
163 		return status;
164 
165 	status = dp_peer_ast_hash_attach(soc);
166 	if (!QDF_IS_STATUS_SUCCESS(status))
167 		goto ast_table_detach;
168 
169 	status = dp_peer_mec_hash_attach(soc);
170 	if (!QDF_IS_STATUS_SUCCESS(status))
171 		goto hash_detach;
172 
173 	dp_soc_wds_attach(soc);
174 
175 	return QDF_STATUS_SUCCESS;
176 
177 hash_detach:
178 	dp_peer_ast_hash_detach(soc);
179 ast_table_detach:
180 	dp_peer_ast_table_detach(soc);
181 
182 	return status;
183 }
184 #else
185 static void dp_peer_map_detach_li(struct dp_soc *soc)
186 {
187 }
188 
189 static QDF_STATUS dp_peer_map_attach_li(struct dp_soc *soc)
190 {
191 	soc->max_peer_id = soc->max_peers;
192 
193 	return QDF_STATUS_SUCCESS;
194 }
195 #endif
196 
197 qdf_size_t dp_get_soc_context_size_li(void)
198 {
199 	return sizeof(struct dp_soc);
200 }
201 
202 #ifdef NO_RX_PKT_HDR_TLV
203 /**
204  * dp_rxdma_ring_sel_cfg_li() - Setup RXDMA ring config
205  * @soc: Common DP soc handle
206  *
207  * Return: QDF_STATUS
208  */
209 static QDF_STATUS
210 dp_rxdma_ring_sel_cfg_li(struct dp_soc *soc)
211 {
212 	int i;
213 	int mac_id;
214 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
215 	struct dp_srng *rx_mac_srng;
216 	QDF_STATUS status = QDF_STATUS_SUCCESS;
217 
218 	htt_tlv_filter.mpdu_start = 1;
219 	htt_tlv_filter.msdu_start = 1;
220 	htt_tlv_filter.mpdu_end = 1;
221 	htt_tlv_filter.msdu_end = 1;
222 	htt_tlv_filter.attention = 1;
223 	htt_tlv_filter.packet = 1;
224 	htt_tlv_filter.packet_header = 0;
225 
226 	htt_tlv_filter.ppdu_start = 0;
227 	htt_tlv_filter.ppdu_end = 0;
228 	htt_tlv_filter.ppdu_end_user_stats = 0;
229 	htt_tlv_filter.ppdu_end_user_stats_ext = 0;
230 	htt_tlv_filter.ppdu_end_status_done = 0;
231 	htt_tlv_filter.enable_fp = 1;
232 	htt_tlv_filter.enable_md = 0;
233 	htt_tlv_filter.enable_md = 0;
234 	htt_tlv_filter.enable_mo = 0;
235 
236 	htt_tlv_filter.fp_mgmt_filter = 0;
237 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_BA_REQ;
238 	htt_tlv_filter.fp_data_filter = (FILTER_DATA_UCAST |
239 					 FILTER_DATA_MCAST |
240 					 FILTER_DATA_DATA);
241 	htt_tlv_filter.mo_mgmt_filter = 0;
242 	htt_tlv_filter.mo_ctrl_filter = 0;
243 	htt_tlv_filter.mo_data_filter = 0;
244 	htt_tlv_filter.md_data_filter = 0;
245 
246 	htt_tlv_filter.offset_valid = true;
247 
248 	htt_tlv_filter.rx_packet_offset = soc->rx_pkt_tlv_size;
249 	/*Not subscribing rx_pkt_header*/
250 	htt_tlv_filter.rx_header_offset = 0;
251 	htt_tlv_filter.rx_mpdu_start_offset =
252 				hal_rx_mpdu_start_offset_get(soc->hal_soc);
253 	htt_tlv_filter.rx_mpdu_end_offset =
254 				hal_rx_mpdu_end_offset_get(soc->hal_soc);
255 	htt_tlv_filter.rx_msdu_start_offset =
256 				hal_rx_msdu_start_offset_get(soc->hal_soc);
257 	htt_tlv_filter.rx_msdu_end_offset =
258 				hal_rx_msdu_end_offset_get(soc->hal_soc);
259 	htt_tlv_filter.rx_attn_offset =
260 				hal_rx_attn_offset_get(soc->hal_soc);
261 
262 	for (i = 0; i < MAX_PDEV_CNT; i++) {
263 		struct dp_pdev *pdev = soc->pdev_list[i];
264 
265 		if (!pdev)
266 			continue;
267 
268 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
269 			int mac_for_pdev =
270 				dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
271 			/*
272 			 * Obtain lmac id from pdev to access the LMAC ring
273 			 * in soc context
274 			 */
275 			int lmac_id =
276 				dp_get_lmac_id_for_pdev_id(soc, mac_id,
277 							   pdev->pdev_id);
278 
279 			rx_mac_srng = dp_get_rxdma_ring(pdev, lmac_id);
280 			htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
281 					    rx_mac_srng->hal_srng,
282 					    RXDMA_BUF, RX_DATA_BUFFER_SIZE,
283 					    &htt_tlv_filter);
284 		}
285 	}
286 	return status;
287 }
288 #else
289 
290 static QDF_STATUS
291 dp_rxdma_ring_sel_cfg_li(struct dp_soc *soc)
292 {
293 	int i;
294 	int mac_id;
295 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
296 	struct dp_srng *rx_mac_srng;
297 	QDF_STATUS status = QDF_STATUS_SUCCESS;
298 
299 	htt_tlv_filter.mpdu_start = 1;
300 	htt_tlv_filter.msdu_start = 1;
301 	htt_tlv_filter.mpdu_end = 1;
302 	htt_tlv_filter.msdu_end = 1;
303 	htt_tlv_filter.attention = 1;
304 	htt_tlv_filter.packet = 1;
305 	htt_tlv_filter.packet_header = 1;
306 
307 	htt_tlv_filter.ppdu_start = 0;
308 	htt_tlv_filter.ppdu_end = 0;
309 	htt_tlv_filter.ppdu_end_user_stats = 0;
310 	htt_tlv_filter.ppdu_end_user_stats_ext = 0;
311 	htt_tlv_filter.ppdu_end_status_done = 0;
312 	htt_tlv_filter.enable_fp = 1;
313 	htt_tlv_filter.enable_md = 0;
314 	htt_tlv_filter.enable_md = 0;
315 	htt_tlv_filter.enable_mo = 0;
316 
317 	htt_tlv_filter.fp_mgmt_filter = 0;
318 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_BA_REQ;
319 	htt_tlv_filter.fp_data_filter = (FILTER_DATA_UCAST |
320 					 FILTER_DATA_MCAST |
321 					 FILTER_DATA_DATA);
322 	htt_tlv_filter.mo_mgmt_filter = 0;
323 	htt_tlv_filter.mo_ctrl_filter = 0;
324 	htt_tlv_filter.mo_data_filter = 0;
325 	htt_tlv_filter.md_data_filter = 0;
326 
327 	htt_tlv_filter.offset_valid = true;
328 
329 	htt_tlv_filter.rx_packet_offset = soc->rx_pkt_tlv_size;
330 	htt_tlv_filter.rx_header_offset =
331 				hal_rx_pkt_tlv_offset_get(soc->hal_soc);
332 	htt_tlv_filter.rx_mpdu_start_offset =
333 				hal_rx_mpdu_start_offset_get(soc->hal_soc);
334 	htt_tlv_filter.rx_mpdu_end_offset =
335 				hal_rx_mpdu_end_offset_get(soc->hal_soc);
336 	htt_tlv_filter.rx_msdu_start_offset =
337 				hal_rx_msdu_start_offset_get(soc->hal_soc);
338 	htt_tlv_filter.rx_msdu_end_offset =
339 				hal_rx_msdu_end_offset_get(soc->hal_soc);
340 	htt_tlv_filter.rx_attn_offset =
341 				hal_rx_attn_offset_get(soc->hal_soc);
342 
343 	for (i = 0; i < MAX_PDEV_CNT; i++) {
344 		struct dp_pdev *pdev = soc->pdev_list[i];
345 
346 		if (!pdev)
347 			continue;
348 
349 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
350 			int mac_for_pdev =
351 				dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
352 			/*
353 			 * Obtain lmac id from pdev to access the LMAC ring
354 			 * in soc context
355 			 */
356 			int lmac_id =
357 				dp_get_lmac_id_for_pdev_id(soc, mac_id,
358 							   pdev->pdev_id);
359 
360 			rx_mac_srng = dp_get_rxdma_ring(pdev, lmac_id);
361 			htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
362 					    rx_mac_srng->hal_srng,
363 					    RXDMA_BUF, RX_DATA_BUFFER_SIZE,
364 					    &htt_tlv_filter);
365 		}
366 	}
367 	return status;
368 
369 }
370 #endif
371 
372 #ifdef QCA_DP_ENABLE_TX_COMP_RING4
373 static inline
374 void dp_deinit_txcomp_ring4(struct dp_soc *soc)
375 {
376 	if (soc) {
377 		wlan_minidump_remove(soc->tx_comp_ring[3].base_vaddr_unaligned,
378 				     soc->tx_comp_ring[3].alloc_size,
379 				     soc->ctrl_psoc, WLAN_MD_DP_SRNG_TX_COMP,
380 				     "Transmit_completion_ring");
381 		dp_srng_deinit(soc, &soc->tx_comp_ring[3], WBM2SW_RELEASE, 0);
382 	}
383 }
384 
385 static inline
386 QDF_STATUS dp_init_txcomp_ring4(struct dp_soc *soc)
387 {
388 	if (soc) {
389 		if (dp_srng_init(soc, &soc->tx_comp_ring[3],
390 				 WBM2SW_RELEASE, WBM2SW_TXCOMP_RING4_NUM, 0)) {
391 			dp_err("%pK: dp_srng_init failed for rx_rel_ring",
392 			       soc);
393 			return QDF_STATUS_E_FAILURE;
394 		}
395 		wlan_minidump_log(soc->tx_comp_ring[3].base_vaddr_unaligned,
396 				  soc->tx_comp_ring[3].alloc_size,
397 				  soc->ctrl_psoc, WLAN_MD_DP_SRNG_TX_COMP,
398 				  "Transmit_completion_ring");
399 	}
400 	return QDF_STATUS_SUCCESS;
401 }
402 
403 static inline
404 void dp_free_txcomp_ring4(struct dp_soc *soc)
405 {
406 	if (soc)
407 		dp_srng_free(soc, &soc->tx_comp_ring[3]);
408 }
409 
410 static inline
411 QDF_STATUS dp_alloc_txcomp_ring4(struct dp_soc *soc, uint32_t tx_comp_ring_size,
412 				 uint32_t cached)
413 {
414 	if (soc) {
415 		if (dp_srng_alloc(soc, &soc->tx_comp_ring[3], WBM2SW_RELEASE,
416 				  tx_comp_ring_size, cached)) {
417 			dp_err("dp_srng_alloc failed for tx_comp_ring");
418 			return QDF_STATUS_E_FAILURE;
419 		}
420 	}
421 	return QDF_STATUS_SUCCESS;
422 }
423 #else
424 static inline
425 void dp_deinit_txcomp_ring4(struct dp_soc *soc)
426 {
427 }
428 
429 static inline
430 QDF_STATUS dp_init_txcomp_ring4(struct dp_soc *soc)
431 {
432 	return QDF_STATUS_SUCCESS;
433 }
434 
435 static inline
436 void dp_free_txcomp_ring4(struct dp_soc *soc)
437 {
438 }
439 
440 static inline
441 QDF_STATUS dp_alloc_txcomp_ring4(struct dp_soc *soc, uint32_t tx_comp_ring_size,
442 				 uint32_t cached)
443 {
444 	return QDF_STATUS_SUCCESS;
445 }
446 #endif
447 
448 static void dp_soc_srng_deinit_li(struct dp_soc *soc)
449 {
450 	/* Tx Complete ring */
451 	dp_deinit_txcomp_ring4(soc);
452 }
453 
454 static void dp_soc_srng_free_li(struct dp_soc *soc)
455 {
456 	dp_free_txcomp_ring4(soc);
457 }
458 
459 static QDF_STATUS dp_soc_srng_alloc_li(struct dp_soc *soc)
460 {
461 	uint32_t tx_comp_ring_size;
462 	uint32_t cached = WLAN_CFG_DST_RING_CACHED_DESC;
463 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
464 
465 	soc_cfg_ctx = soc->wlan_cfg_ctx;
466 
467 	tx_comp_ring_size = wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
468 	/* Disable cached desc if NSS offload is enabled */
469 	if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx))
470 		cached = 0;
471 
472 	if (dp_alloc_txcomp_ring4(soc, tx_comp_ring_size, cached))
473 		goto fail1;
474 	return QDF_STATUS_SUCCESS;
475 fail1:
476 	dp_soc_srng_free_li(soc);
477 	return QDF_STATUS_E_NOMEM;
478 }
479 
480 static QDF_STATUS dp_soc_srng_init_li(struct dp_soc *soc)
481 {
482 	/* Tx comp ring 3 */
483 	if (dp_init_txcomp_ring4(soc))
484 		goto fail1;
485 
486 	return QDF_STATUS_SUCCESS;
487 fail1:
488 	/*
489 	 * Cleanup will be done as part of soc_detach, which will
490 	 * be called on pdev attach failure
491 	 */
492 	dp_soc_srng_deinit_li(soc);
493 	return QDF_STATUS_E_FAILURE;
494 }
495 
496 static void dp_tx_implicit_rbm_set_li(struct dp_soc *soc,
497 				      uint8_t tx_ring_id,
498 				      uint8_t bm_id)
499 {
500 }
501 
502 static QDF_STATUS dp_txrx_set_vdev_param_li(struct dp_soc *soc,
503 					    struct dp_vdev *vdev,
504 					    enum cdp_vdev_param_type param,
505 					    cdp_config_param_type val)
506 {
507 	return QDF_STATUS_SUCCESS;
508 }
509 
510 bool
511 dp_rx_intrabss_handle_nawds_li(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
512 			       qdf_nbuf_t nbuf_copy,
513 			       struct cdp_tid_rx_stats *tid_stats)
514 {
515 	return false;
516 }
517 
518 static struct dp_peer *dp_find_peer_by_destmac_li(struct dp_soc *soc,
519 						  uint8_t *dest_mac,
520 						  uint8_t vdev_id)
521 {
522 	struct dp_peer *peer = NULL;
523 	struct dp_ast_entry *ast_entry = NULL;
524 	uint16_t peer_id;
525 
526 	qdf_spin_lock_bh(&soc->ast_lock);
527 	ast_entry = dp_peer_ast_hash_find_by_vdevid(soc, dest_mac, vdev_id);
528 
529 	if (!ast_entry) {
530 		qdf_spin_unlock_bh(&soc->ast_lock);
531 		dp_err("NULL ast entry");
532 		return NULL;
533 	}
534 
535 	peer_id = ast_entry->peer_id;
536 	qdf_spin_unlock_bh(&soc->ast_lock);
537 
538 	if (peer_id == HTT_INVALID_PEER)
539 		return NULL;
540 
541 	peer = dp_peer_get_ref_by_id(soc, peer_id,
542 				     DP_MOD_ID_SAWF);
543 	return peer;
544 }
545 
546 static void dp_get_rx_hash_key_li(struct dp_soc *soc,
547 				  struct cdp_lro_hash_config *lro_hash)
548 {
549 	dp_get_rx_hash_key_bytes(lro_hash);
550 }
551 
552 static void dp_peer_get_reo_hash_li(struct dp_vdev *vdev,
553 				    struct cdp_peer_setup_info *setup_info,
554 				    enum cdp_host_reo_dest_ring *reo_dest,
555 				    bool *hash_based,
556 				    uint8_t *lmac_peer_id_msb)
557 {
558 	dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
559 }
560 
561 static bool dp_reo_remap_config_li(struct dp_soc *soc,
562 				   uint32_t *remap0,
563 				   uint32_t *remap1,
564 				   uint32_t *remap2)
565 {
566 	return dp_reo_remap_config(soc, remap0, remap1, remap2);
567 }
568 
569 void dp_initialize_arch_ops_li(struct dp_arch_ops *arch_ops)
570 {
571 #ifndef QCA_HOST_MODE_WIFI_DISABLED
572 	arch_ops->tx_hw_enqueue = dp_tx_hw_enqueue_li;
573 	arch_ops->dp_rx_process = dp_rx_process_li;
574 	arch_ops->dp_tx_send_fast = dp_tx_send;
575 	arch_ops->tx_comp_get_params_from_hal_desc =
576 		dp_tx_comp_get_params_from_hal_desc_li;
577 	arch_ops->dp_tx_process_htt_completion =
578 			dp_tx_process_htt_completion_li;
579 	arch_ops->dp_wbm_get_rx_desc_from_hal_desc =
580 			dp_wbm_get_rx_desc_from_hal_desc_li;
581 	arch_ops->dp_tx_desc_pool_init = dp_tx_desc_pool_init_li;
582 	arch_ops->dp_tx_desc_pool_deinit = dp_tx_desc_pool_deinit_li;
583 	arch_ops->dp_rx_desc_pool_init = dp_rx_desc_pool_init_li;
584 	arch_ops->dp_rx_desc_pool_deinit = dp_rx_desc_pool_deinit_li;
585 	arch_ops->dp_tx_compute_hw_delay = dp_tx_compute_tx_delay_li;
586 #else
587 	arch_ops->dp_rx_desc_pool_init = dp_rx_desc_pool_init_generic;
588 	arch_ops->dp_rx_desc_pool_deinit = dp_rx_desc_pool_deinit_generic;
589 #endif
590 	arch_ops->txrx_get_context_size = dp_get_context_size_li;
591 #ifdef WIFI_MONITOR_SUPPORT
592 	arch_ops->txrx_get_mon_context_size = dp_mon_get_context_size_li;
593 #endif
594 	arch_ops->txrx_soc_attach = dp_soc_attach_li;
595 	arch_ops->txrx_soc_detach = dp_soc_detach_li;
596 	arch_ops->txrx_soc_init = dp_soc_init_li;
597 	arch_ops->txrx_soc_deinit = dp_soc_deinit_li;
598 	arch_ops->txrx_soc_srng_alloc = dp_soc_srng_alloc_li;
599 	arch_ops->txrx_soc_srng_init = dp_soc_srng_init_li;
600 	arch_ops->txrx_soc_srng_deinit = dp_soc_srng_deinit_li;
601 	arch_ops->txrx_soc_srng_free = dp_soc_srng_free_li;
602 	arch_ops->txrx_pdev_attach = dp_pdev_attach_li;
603 	arch_ops->txrx_pdev_detach = dp_pdev_detach_li;
604 	arch_ops->txrx_vdev_attach = dp_vdev_attach_li;
605 	arch_ops->txrx_vdev_detach = dp_vdev_detach_li;
606 	arch_ops->txrx_peer_map_attach = dp_peer_map_attach_li;
607 	arch_ops->txrx_peer_map_detach = dp_peer_map_detach_li;
608 	arch_ops->get_rx_hash_key = dp_get_rx_hash_key_li;
609 	arch_ops->dp_rx_desc_cookie_2_va =
610 			dp_rx_desc_cookie_2_va_li;
611 	arch_ops->dp_rx_intrabss_handle_nawds = dp_rx_intrabss_handle_nawds_li;
612 	arch_ops->dp_rxdma_ring_sel_cfg = dp_rxdma_ring_sel_cfg_li;
613 	arch_ops->dp_rx_peer_metadata_peer_id_get =
614 					dp_rx_peer_metadata_peer_id_get_li;
615 	arch_ops->soc_cfg_attach = dp_soc_cfg_attach_li;
616 	arch_ops->tx_implicit_rbm_set = dp_tx_implicit_rbm_set_li;
617 	arch_ops->txrx_set_vdev_param = dp_txrx_set_vdev_param_li;
618 	arch_ops->txrx_print_peer_stats = dp_print_peer_txrx_stats_li;
619 	arch_ops->dp_peer_rx_reorder_queue_setup =
620 					dp_peer_rx_reorder_queue_setup_li;
621 	arch_ops->dp_find_peer_by_destmac = dp_find_peer_by_destmac_li;
622 	arch_ops->peer_get_reo_hash = dp_peer_get_reo_hash_li;
623 	arch_ops->reo_remap_config = dp_reo_remap_config_li;
624 }
625 
626 #ifdef QCA_DP_TX_HW_SW_NBUF_DESC_PREFETCH
627 void dp_tx_comp_get_prefetched_params_from_hal_desc(
628 					struct dp_soc *soc,
629 					void *tx_comp_hal_desc,
630 					struct dp_tx_desc_s **r_tx_desc)
631 {
632 	uint8_t pool_id;
633 	uint32_t tx_desc_id;
634 
635 	tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc);
636 	pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >>
637 		DP_TX_DESC_ID_POOL_OS;
638 
639 	/* Find Tx descriptor */
640 	*r_tx_desc = dp_tx_desc_find(soc, pool_id,
641 			(tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >>
642 			DP_TX_DESC_ID_PAGE_OS,
643 			(tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >>
644 			DP_TX_DESC_ID_OFFSET_OS);
645 	qdf_prefetch((uint8_t *)*r_tx_desc);
646 }
647 #endif
648