xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/li/dp_li.c (revision 8cfe6b10058a04cafb17eed051f2ddf11bee8931)
1 /*
2  * Copyright (c) 2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "dp_types.h"
21 #include <dp_internal.h>
22 #include <dp_htt.h>
23 #include "dp_li.h"
24 #include "dp_li_tx.h"
25 #include "dp_tx_desc.h"
26 #include "dp_li_rx.h"
27 #include "dp_peer.h"
28 #include <wlan_utility.h>
29 #include "dp_ipa.h"
30 #ifdef WIFI_MONITOR_SUPPORT
31 #include <dp_mon_1.0.h>
32 #endif
33 
34 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
35 static struct wlan_cfg_tcl_wbm_ring_num_map g_tcl_wbm_map_array[MAX_TCL_DATA_RINGS] = {
36 	{.tcl_ring_num = 0, .wbm_ring_num = 0, .wbm_rbm_id = HAL_LI_WBM_SW0_BM_ID, .for_ipa = 0},
37 	/*
38 	 * INVALID_WBM_RING_NUM implies re-use of an existing WBM2SW ring
39 	 * as indicated by rbm id.
40 	 */
41 	{1, INVALID_WBM_RING_NUM, HAL_LI_WBM_SW0_BM_ID, 0},
42 	{2, 2, HAL_LI_WBM_SW2_BM_ID, 0}
43 };
44 #else
45 static struct wlan_cfg_tcl_wbm_ring_num_map g_tcl_wbm_map_array[MAX_TCL_DATA_RINGS] = {
46 	{.tcl_ring_num = 0, .wbm_ring_num = 0, .wbm_rbm_id = HAL_LI_WBM_SW0_BM_ID, .for_ipa = 0},
47 	{1, 1, HAL_LI_WBM_SW1_BM_ID, 0},
48 	{2, 2, HAL_LI_WBM_SW2_BM_ID, 0},
49 	/*
50 	 * Although using wbm_ring 4, wbm_ring 3 is mentioned in order to match
51 	 * with the tx_mask in dp_service_srngs. Please be careful while using
52 	 * this table anywhere else.
53 	 */
54 	{3, 3, HAL_LI_WBM_SW4_BM_ID, 0}
55 };
56 #endif
57 
58 #ifdef IPA_WDI3_TX_TWO_PIPES
59 static inline void
60 dp_soc_cfg_update_tcl_wbm_map_for_ipa(struct wlan_cfg_dp_soc_ctxt *cfg_ctx)
61 {
62 	if (!cfg_ctx->ipa_enabled)
63 		return;
64 
65 	cfg_ctx->tcl_wbm_map_array[IPA_TX_ALT_RING_IDX].wbm_ring_num = 4;
66 	cfg_ctx->tcl_wbm_map_array[IPA_TX_ALT_RING_IDX].wbm_rbm_id =
67 							   HAL_LI_WBM_SW4_BM_ID;
68 }
69 #else
70 static inline void
71 dp_soc_cfg_update_tcl_wbm_map_for_ipa(struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
72 {
73 }
74 #endif
75 
76 static void dp_soc_cfg_attach_li(struct dp_soc *soc)
77 {
78 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
79 
80 	wlan_cfg_set_rx_rel_ring_id(soc_cfg_ctx, WBM2SW_REL_ERR_RING_NUM);
81 
82 	soc_cfg_ctx->tcl_wbm_map_array = g_tcl_wbm_map_array;
83 	dp_soc_cfg_update_tcl_wbm_map_for_ipa(soc_cfg_ctx);
84 }
85 
86 qdf_size_t dp_get_context_size_li(enum dp_context_type context_type)
87 {
88 	switch (context_type) {
89 	case DP_CONTEXT_TYPE_SOC:
90 		return sizeof(struct dp_soc_li);
91 	case DP_CONTEXT_TYPE_PDEV:
92 		return sizeof(struct dp_pdev_li);
93 	case DP_CONTEXT_TYPE_VDEV:
94 		return sizeof(struct dp_vdev_li);
95 	case DP_CONTEXT_TYPE_PEER:
96 		return sizeof(struct dp_peer_li);
97 	default:
98 		return 0;
99 	}
100 }
101 
102 static QDF_STATUS dp_soc_attach_li(struct dp_soc *soc,
103 				   struct cdp_soc_attach_params *params)
104 {
105 	soc->wbm_sw0_bm_id = hal_tx_get_wbm_sw0_bm_id();
106 
107 	return QDF_STATUS_SUCCESS;
108 }
109 
110 static QDF_STATUS dp_soc_detach_li(struct dp_soc *soc)
111 {
112 	return QDF_STATUS_SUCCESS;
113 }
114 
115 static QDF_STATUS dp_soc_init_li(struct dp_soc *soc)
116 {
117 	return QDF_STATUS_SUCCESS;
118 }
119 
120 static QDF_STATUS dp_soc_deinit_li(struct dp_soc *soc)
121 {
122 	return QDF_STATUS_SUCCESS;
123 }
124 
125 static QDF_STATUS dp_pdev_attach_li(struct dp_pdev *pdev,
126 				    struct cdp_pdev_attach_params *params)
127 {
128 	return QDF_STATUS_SUCCESS;
129 }
130 
131 static QDF_STATUS dp_pdev_detach_li(struct dp_pdev *pdev)
132 {
133 	return QDF_STATUS_SUCCESS;
134 }
135 
136 static QDF_STATUS dp_vdev_attach_li(struct dp_soc *soc, struct dp_vdev *vdev)
137 {
138 	return QDF_STATUS_SUCCESS;
139 }
140 
141 static QDF_STATUS dp_vdev_detach_li(struct dp_soc *soc, struct dp_vdev *vdev)
142 {
143 	return QDF_STATUS_SUCCESS;
144 }
145 
146 #ifdef AST_OFFLOAD_ENABLE
147 static void dp_peer_map_detach_li(struct dp_soc *soc)
148 {
149 	dp_soc_wds_detach(soc);
150 	dp_peer_ast_table_detach(soc);
151 	dp_peer_ast_hash_detach(soc);
152 	dp_peer_mec_hash_detach(soc);
153 }
154 
155 static QDF_STATUS dp_peer_map_attach_li(struct dp_soc *soc)
156 {
157 	QDF_STATUS status;
158 
159 	soc->max_peer_id = soc->max_peers;
160 
161 	status = dp_peer_ast_table_attach(soc);
162 	if (!QDF_IS_STATUS_SUCCESS(status))
163 		return status;
164 
165 	status = dp_peer_ast_hash_attach(soc);
166 	if (!QDF_IS_STATUS_SUCCESS(status))
167 		goto ast_table_detach;
168 
169 	status = dp_peer_mec_hash_attach(soc);
170 	if (!QDF_IS_STATUS_SUCCESS(status))
171 		goto hash_detach;
172 
173 	dp_soc_wds_attach(soc);
174 
175 	return QDF_STATUS_SUCCESS;
176 
177 hash_detach:
178 	dp_peer_ast_hash_detach(soc);
179 ast_table_detach:
180 	dp_peer_ast_table_detach(soc);
181 
182 	return status;
183 }
184 #else
185 static void dp_peer_map_detach_li(struct dp_soc *soc)
186 {
187 }
188 
189 static QDF_STATUS dp_peer_map_attach_li(struct dp_soc *soc)
190 {
191 	soc->max_peer_id = soc->max_peers;
192 
193 	return QDF_STATUS_SUCCESS;
194 }
195 #endif
196 
197 static QDF_STATUS dp_peer_setup_li(struct dp_soc *soc, struct dp_peer *peer)
198 {
199 	return QDF_STATUS_SUCCESS;
200 }
201 
202 qdf_size_t dp_get_soc_context_size_li(void)
203 {
204 	return sizeof(struct dp_soc);
205 }
206 
207 #ifdef NO_RX_PKT_HDR_TLV
208 /**
209  * dp_rxdma_ring_sel_cfg_li() - Setup RXDMA ring config
210  * @soc: Common DP soc handle
211  *
212  * Return: QDF_STATUS
213  */
214 static QDF_STATUS
215 dp_rxdma_ring_sel_cfg_li(struct dp_soc *soc)
216 {
217 	int i;
218 	int mac_id;
219 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
220 	struct dp_srng *rx_mac_srng;
221 	QDF_STATUS status = QDF_STATUS_SUCCESS;
222 
223 	htt_tlv_filter.mpdu_start = 1;
224 	htt_tlv_filter.msdu_start = 1;
225 	htt_tlv_filter.mpdu_end = 1;
226 	htt_tlv_filter.msdu_end = 1;
227 	htt_tlv_filter.attention = 1;
228 	htt_tlv_filter.packet = 1;
229 	htt_tlv_filter.packet_header = 0;
230 
231 	htt_tlv_filter.ppdu_start = 0;
232 	htt_tlv_filter.ppdu_end = 0;
233 	htt_tlv_filter.ppdu_end_user_stats = 0;
234 	htt_tlv_filter.ppdu_end_user_stats_ext = 0;
235 	htt_tlv_filter.ppdu_end_status_done = 0;
236 	htt_tlv_filter.enable_fp = 1;
237 	htt_tlv_filter.enable_md = 0;
238 	htt_tlv_filter.enable_md = 0;
239 	htt_tlv_filter.enable_mo = 0;
240 
241 	htt_tlv_filter.fp_mgmt_filter = 0;
242 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_BA_REQ;
243 	htt_tlv_filter.fp_data_filter = (FILTER_DATA_UCAST |
244 					 FILTER_DATA_MCAST |
245 					 FILTER_DATA_DATA);
246 	htt_tlv_filter.mo_mgmt_filter = 0;
247 	htt_tlv_filter.mo_ctrl_filter = 0;
248 	htt_tlv_filter.mo_data_filter = 0;
249 	htt_tlv_filter.md_data_filter = 0;
250 
251 	htt_tlv_filter.offset_valid = true;
252 
253 	htt_tlv_filter.rx_packet_offset = soc->rx_pkt_tlv_size;
254 	/*Not subscribing rx_pkt_header*/
255 	htt_tlv_filter.rx_header_offset = 0;
256 	htt_tlv_filter.rx_mpdu_start_offset =
257 				hal_rx_mpdu_start_offset_get(soc->hal_soc);
258 	htt_tlv_filter.rx_mpdu_end_offset =
259 				hal_rx_mpdu_end_offset_get(soc->hal_soc);
260 	htt_tlv_filter.rx_msdu_start_offset =
261 				hal_rx_msdu_start_offset_get(soc->hal_soc);
262 	htt_tlv_filter.rx_msdu_end_offset =
263 				hal_rx_msdu_end_offset_get(soc->hal_soc);
264 	htt_tlv_filter.rx_attn_offset =
265 				hal_rx_attn_offset_get(soc->hal_soc);
266 
267 	for (i = 0; i < MAX_PDEV_CNT; i++) {
268 		struct dp_pdev *pdev = soc->pdev_list[i];
269 
270 		if (!pdev)
271 			continue;
272 
273 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
274 			int mac_for_pdev =
275 				dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
276 			/*
277 			 * Obtain lmac id from pdev to access the LMAC ring
278 			 * in soc context
279 			 */
280 			int lmac_id =
281 				dp_get_lmac_id_for_pdev_id(soc, mac_id,
282 							   pdev->pdev_id);
283 
284 			rx_mac_srng = dp_get_rxdma_ring(pdev, lmac_id);
285 			htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
286 					    rx_mac_srng->hal_srng,
287 					    RXDMA_BUF, RX_DATA_BUFFER_SIZE,
288 					    &htt_tlv_filter);
289 		}
290 	}
291 	return status;
292 }
293 #else
294 
295 static QDF_STATUS
296 dp_rxdma_ring_sel_cfg_li(struct dp_soc *soc)
297 {
298 	int i;
299 	int mac_id;
300 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
301 	struct dp_srng *rx_mac_srng;
302 	QDF_STATUS status = QDF_STATUS_SUCCESS;
303 
304 	htt_tlv_filter.mpdu_start = 1;
305 	htt_tlv_filter.msdu_start = 1;
306 	htt_tlv_filter.mpdu_end = 1;
307 	htt_tlv_filter.msdu_end = 1;
308 	htt_tlv_filter.attention = 1;
309 	htt_tlv_filter.packet = 1;
310 	htt_tlv_filter.packet_header = 1;
311 
312 	htt_tlv_filter.ppdu_start = 0;
313 	htt_tlv_filter.ppdu_end = 0;
314 	htt_tlv_filter.ppdu_end_user_stats = 0;
315 	htt_tlv_filter.ppdu_end_user_stats_ext = 0;
316 	htt_tlv_filter.ppdu_end_status_done = 0;
317 	htt_tlv_filter.enable_fp = 1;
318 	htt_tlv_filter.enable_md = 0;
319 	htt_tlv_filter.enable_md = 0;
320 	htt_tlv_filter.enable_mo = 0;
321 
322 	htt_tlv_filter.fp_mgmt_filter = 0;
323 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_BA_REQ;
324 	htt_tlv_filter.fp_data_filter = (FILTER_DATA_UCAST |
325 					 FILTER_DATA_MCAST |
326 					 FILTER_DATA_DATA);
327 	htt_tlv_filter.mo_mgmt_filter = 0;
328 	htt_tlv_filter.mo_ctrl_filter = 0;
329 	htt_tlv_filter.mo_data_filter = 0;
330 	htt_tlv_filter.md_data_filter = 0;
331 
332 	htt_tlv_filter.offset_valid = true;
333 
334 	htt_tlv_filter.rx_packet_offset = soc->rx_pkt_tlv_size;
335 	htt_tlv_filter.rx_header_offset =
336 				hal_rx_pkt_tlv_offset_get(soc->hal_soc);
337 	htt_tlv_filter.rx_mpdu_start_offset =
338 				hal_rx_mpdu_start_offset_get(soc->hal_soc);
339 	htt_tlv_filter.rx_mpdu_end_offset =
340 				hal_rx_mpdu_end_offset_get(soc->hal_soc);
341 	htt_tlv_filter.rx_msdu_start_offset =
342 				hal_rx_msdu_start_offset_get(soc->hal_soc);
343 	htt_tlv_filter.rx_msdu_end_offset =
344 				hal_rx_msdu_end_offset_get(soc->hal_soc);
345 	htt_tlv_filter.rx_attn_offset =
346 				hal_rx_attn_offset_get(soc->hal_soc);
347 
348 	for (i = 0; i < MAX_PDEV_CNT; i++) {
349 		struct dp_pdev *pdev = soc->pdev_list[i];
350 
351 		if (!pdev)
352 			continue;
353 
354 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
355 			int mac_for_pdev =
356 				dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
357 			/*
358 			 * Obtain lmac id from pdev to access the LMAC ring
359 			 * in soc context
360 			 */
361 			int lmac_id =
362 				dp_get_lmac_id_for_pdev_id(soc, mac_id,
363 							   pdev->pdev_id);
364 
365 			rx_mac_srng = dp_get_rxdma_ring(pdev, lmac_id);
366 			htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
367 					    rx_mac_srng->hal_srng,
368 					    RXDMA_BUF, RX_DATA_BUFFER_SIZE,
369 					    &htt_tlv_filter);
370 		}
371 	}
372 	return status;
373 
374 }
375 #endif
376 
377 #ifdef QCA_DP_ENABLE_TX_COMP_RING4
378 static inline
379 void dp_deinit_txcomp_ring4(struct dp_soc *soc)
380 {
381 	if (soc) {
382 		wlan_minidump_remove(soc->tx_comp_ring[3].base_vaddr_unaligned,
383 				     soc->tx_comp_ring[3].alloc_size,
384 				     soc->ctrl_psoc, WLAN_MD_DP_SRNG_TX_COMP,
385 				     "Transmit_completion_ring");
386 		dp_srng_deinit(soc, &soc->tx_comp_ring[3], WBM2SW_RELEASE, 0);
387 	}
388 }
389 
390 static inline
391 QDF_STATUS dp_init_txcomp_ring4(struct dp_soc *soc)
392 {
393 	if (soc) {
394 		if (dp_srng_init(soc, &soc->tx_comp_ring[3],
395 				 WBM2SW_RELEASE, WBM2SW_TXCOMP_RING4_NUM, 0)) {
396 			dp_err("%pK: dp_srng_init failed for rx_rel_ring",
397 			       soc);
398 			return QDF_STATUS_E_FAILURE;
399 		}
400 		wlan_minidump_log(soc->tx_comp_ring[3].base_vaddr_unaligned,
401 				  soc->tx_comp_ring[3].alloc_size,
402 				  soc->ctrl_psoc, WLAN_MD_DP_SRNG_TX_COMP,
403 				  "Transmit_completion_ring");
404 	}
405 	return QDF_STATUS_SUCCESS;
406 }
407 
408 static inline
409 void dp_free_txcomp_ring4(struct dp_soc *soc)
410 {
411 	if (soc)
412 		dp_srng_free(soc, &soc->tx_comp_ring[3]);
413 }
414 
415 static inline
416 QDF_STATUS dp_alloc_txcomp_ring4(struct dp_soc *soc, uint32_t tx_comp_ring_size,
417 				 uint32_t cached)
418 {
419 	if (soc) {
420 		if (dp_srng_alloc(soc, &soc->tx_comp_ring[3], WBM2SW_RELEASE,
421 				  tx_comp_ring_size, cached)) {
422 			dp_err("dp_srng_alloc failed for tx_comp_ring");
423 			return QDF_STATUS_E_FAILURE;
424 		}
425 	}
426 	return QDF_STATUS_SUCCESS;
427 }
428 #else
429 static inline
430 void dp_deinit_txcomp_ring4(struct dp_soc *soc)
431 {
432 }
433 
434 static inline
435 QDF_STATUS dp_init_txcomp_ring4(struct dp_soc *soc)
436 {
437 	return QDF_STATUS_SUCCESS;
438 }
439 
440 static inline
441 void dp_free_txcomp_ring4(struct dp_soc *soc)
442 {
443 }
444 
445 static inline
446 QDF_STATUS dp_alloc_txcomp_ring4(struct dp_soc *soc, uint32_t tx_comp_ring_size,
447 				 uint32_t cached)
448 {
449 	return QDF_STATUS_SUCCESS;
450 }
451 #endif
452 
453 static void dp_soc_srng_deinit_li(struct dp_soc *soc)
454 {
455 	/* Tx Complete ring */
456 	dp_deinit_txcomp_ring4(soc);
457 }
458 
459 static void dp_soc_srng_free_li(struct dp_soc *soc)
460 {
461 	dp_free_txcomp_ring4(soc);
462 }
463 
464 static QDF_STATUS dp_soc_srng_alloc_li(struct dp_soc *soc)
465 {
466 	uint32_t tx_comp_ring_size;
467 	uint32_t cached = WLAN_CFG_DST_RING_CACHED_DESC;
468 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
469 
470 	soc_cfg_ctx = soc->wlan_cfg_ctx;
471 
472 	tx_comp_ring_size = wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
473 	/* Disable cached desc if NSS offload is enabled */
474 	if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx))
475 		cached = 0;
476 
477 	if (dp_alloc_txcomp_ring4(soc, tx_comp_ring_size, cached))
478 		goto fail1;
479 	return QDF_STATUS_SUCCESS;
480 fail1:
481 	dp_soc_srng_free_li(soc);
482 	return QDF_STATUS_E_NOMEM;
483 }
484 
485 static QDF_STATUS dp_soc_srng_init_li(struct dp_soc *soc)
486 {
487 	/* Tx comp ring 3 */
488 	if (dp_init_txcomp_ring4(soc))
489 		goto fail1;
490 
491 	return QDF_STATUS_SUCCESS;
492 fail1:
493 	/*
494 	 * Cleanup will be done as part of soc_detach, which will
495 	 * be called on pdev attach failure
496 	 */
497 	dp_soc_srng_deinit_li(soc);
498 	return QDF_STATUS_E_FAILURE;
499 }
500 
501 static void dp_tx_implicit_rbm_set_li(struct dp_soc *soc,
502 				      uint8_t tx_ring_id,
503 				      uint8_t bm_id)
504 {
505 }
506 
507 static QDF_STATUS dp_txrx_set_vdev_param_li(struct dp_soc *soc,
508 					    struct dp_vdev *vdev,
509 					    enum cdp_vdev_param_type param,
510 					    cdp_config_param_type val)
511 {
512 	return QDF_STATUS_SUCCESS;
513 }
514 
515 bool
516 dp_rx_intrabss_handle_nawds_li(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
517 			       qdf_nbuf_t nbuf_copy,
518 			       struct cdp_tid_rx_stats *tid_stats)
519 {
520 	return false;
521 }
522 
523 static void dp_rx_word_mask_subscribe_li(struct dp_soc *soc,
524 					 uint32_t *msg_word,
525 					 void *rx_filter)
526 {
527 }
528 
529 static struct dp_peer *dp_find_peer_by_destmac_li(struct dp_soc *soc,
530 						  uint8_t *dest_mac,
531 						  uint8_t vdev_id)
532 {
533 	struct dp_peer *peer = NULL;
534 	struct dp_ast_entry *ast_entry = NULL;
535 	uint16_t peer_id;
536 
537 	qdf_spin_lock_bh(&soc->ast_lock);
538 	ast_entry = dp_peer_ast_hash_find_by_vdevid(soc, dest_mac, vdev_id);
539 
540 	if (!ast_entry) {
541 		qdf_spin_unlock_bh(&soc->ast_lock);
542 		dp_err("NULL ast entry");
543 		return NULL;
544 	}
545 
546 	peer_id = ast_entry->peer_id;
547 	qdf_spin_unlock_bh(&soc->ast_lock);
548 
549 	if (peer_id == HTT_INVALID_PEER)
550 		return NULL;
551 
552 	peer = dp_peer_get_ref_by_id(soc, peer_id,
553 				     DP_MOD_ID_SAWF);
554 	return peer;
555 }
556 
557 static void dp_get_rx_hash_key_li(struct dp_soc *soc,
558 				  struct cdp_lro_hash_config *lro_hash)
559 {
560 	dp_get_rx_hash_key_bytes(lro_hash);
561 }
562 
563 static void dp_peer_get_reo_hash_li(struct dp_vdev *vdev,
564 				    struct cdp_peer_setup_info *setup_info,
565 				    enum cdp_host_reo_dest_ring *reo_dest,
566 				    bool *hash_based,
567 				    uint8_t *lmac_peer_id_msb)
568 {
569 	dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
570 }
571 
572 static bool dp_reo_remap_config_li(struct dp_soc *soc,
573 				   uint32_t *remap0,
574 				   uint32_t *remap1,
575 				   uint32_t *remap2)
576 {
577 	return dp_reo_remap_config(soc, remap0, remap1, remap2);
578 }
579 
580 static struct dp_soc *dp_rx_replensih_soc_get_li(struct dp_soc *soc,
581 						 uint8_t chip_id)
582 {
583 	return soc;
584 }
585 
586 static uint8_t dp_soc_get_num_soc_li(struct dp_soc *soc)
587 {
588 	return 1;
589 }
590 
591 static QDF_STATUS dp_txrx_get_vdev_mcast_param_li(struct dp_soc *soc,
592 						  struct dp_vdev *vdev,
593 						  cdp_config_param_type *val)
594 {
595 	return QDF_STATUS_SUCCESS;
596 }
597 
598 void dp_initialize_arch_ops_li(struct dp_arch_ops *arch_ops)
599 {
600 #ifndef QCA_HOST_MODE_WIFI_DISABLED
601 	arch_ops->tx_hw_enqueue = dp_tx_hw_enqueue_li;
602 	arch_ops->dp_rx_process = dp_rx_process_li;
603 	arch_ops->dp_tx_send_fast = dp_tx_send;
604 	arch_ops->tx_comp_get_params_from_hal_desc =
605 		dp_tx_comp_get_params_from_hal_desc_li;
606 	arch_ops->dp_tx_process_htt_completion =
607 			dp_tx_process_htt_completion_li;
608 	arch_ops->dp_wbm_get_rx_desc_from_hal_desc =
609 			dp_wbm_get_rx_desc_from_hal_desc_li;
610 	arch_ops->dp_tx_desc_pool_init = dp_tx_desc_pool_init_li;
611 	arch_ops->dp_tx_desc_pool_deinit = dp_tx_desc_pool_deinit_li;
612 	arch_ops->dp_rx_desc_pool_init = dp_rx_desc_pool_init_li;
613 	arch_ops->dp_rx_desc_pool_deinit = dp_rx_desc_pool_deinit_li;
614 	arch_ops->dp_tx_compute_hw_delay = dp_tx_compute_tx_delay_li;
615 	arch_ops->dp_rx_chain_msdus = dp_rx_chain_msdus_li;
616 	arch_ops->dp_rx_wbm_err_reap_desc = dp_rx_wbm_err_reap_desc_li;
617 	arch_ops->dp_rx_null_q_desc_handle = dp_rx_null_q_desc_handle_li;
618 #else
619 	arch_ops->dp_rx_desc_pool_init = dp_rx_desc_pool_init_generic;
620 	arch_ops->dp_rx_desc_pool_deinit = dp_rx_desc_pool_deinit_generic;
621 #endif
622 	arch_ops->txrx_get_context_size = dp_get_context_size_li;
623 #ifdef WIFI_MONITOR_SUPPORT
624 	arch_ops->txrx_get_mon_context_size = dp_mon_get_context_size_li;
625 #endif
626 	arch_ops->txrx_soc_attach = dp_soc_attach_li;
627 	arch_ops->txrx_soc_detach = dp_soc_detach_li;
628 	arch_ops->txrx_soc_init = dp_soc_init_li;
629 	arch_ops->txrx_soc_deinit = dp_soc_deinit_li;
630 	arch_ops->txrx_soc_srng_alloc = dp_soc_srng_alloc_li;
631 	arch_ops->txrx_soc_srng_init = dp_soc_srng_init_li;
632 	arch_ops->txrx_soc_srng_deinit = dp_soc_srng_deinit_li;
633 	arch_ops->txrx_soc_srng_free = dp_soc_srng_free_li;
634 	arch_ops->txrx_pdev_attach = dp_pdev_attach_li;
635 	arch_ops->txrx_pdev_detach = dp_pdev_detach_li;
636 	arch_ops->txrx_vdev_attach = dp_vdev_attach_li;
637 	arch_ops->txrx_vdev_detach = dp_vdev_detach_li;
638 	arch_ops->txrx_peer_map_attach = dp_peer_map_attach_li;
639 	arch_ops->txrx_peer_map_detach = dp_peer_map_detach_li;
640 	arch_ops->get_rx_hash_key = dp_get_rx_hash_key_li;
641 	arch_ops->dp_set_rx_fst = NULL;
642 	arch_ops->dp_get_rx_fst = NULL;
643 	arch_ops->dp_rx_fst_ref = NULL;
644 	arch_ops->dp_rx_fst_deref = NULL;
645 	arch_ops->txrx_peer_setup = dp_peer_setup_li;
646 	arch_ops->dp_rx_desc_cookie_2_va =
647 			dp_rx_desc_cookie_2_va_li;
648 	arch_ops->dp_rx_intrabss_mcast_handler =
649 					dp_rx_intrabss_handle_nawds_li;
650 	arch_ops->dp_rx_word_mask_subscribe = dp_rx_word_mask_subscribe_li;
651 	arch_ops->dp_rxdma_ring_sel_cfg = dp_rxdma_ring_sel_cfg_li;
652 	arch_ops->dp_rx_peer_metadata_peer_id_get =
653 					dp_rx_peer_metadata_peer_id_get_li;
654 	arch_ops->soc_cfg_attach = dp_soc_cfg_attach_li;
655 	arch_ops->tx_implicit_rbm_set = dp_tx_implicit_rbm_set_li;
656 	arch_ops->txrx_set_vdev_param = dp_txrx_set_vdev_param_li;
657 	arch_ops->txrx_print_peer_stats = dp_print_peer_txrx_stats_li;
658 	arch_ops->dp_peer_rx_reorder_queue_setup =
659 					dp_peer_rx_reorder_queue_setup_li;
660 	arch_ops->dp_find_peer_by_destmac = dp_find_peer_by_destmac_li;
661 	arch_ops->peer_get_reo_hash = dp_peer_get_reo_hash_li;
662 	arch_ops->reo_remap_config = dp_reo_remap_config_li;
663 	arch_ops->dp_rx_replenish_soc_get = dp_rx_replensih_soc_get_li;
664 	arch_ops->dp_soc_get_num_soc = dp_soc_get_num_soc_li;
665 	arch_ops->get_reo_qdesc_addr = dp_rx_get_reo_qdesc_addr_li;
666 	arch_ops->txrx_get_vdev_mcast_param = dp_txrx_get_vdev_mcast_param_li;
667 }
668 
669 #ifdef QCA_DP_TX_HW_SW_NBUF_DESC_PREFETCH
670 void dp_tx_comp_get_prefetched_params_from_hal_desc(
671 					struct dp_soc *soc,
672 					void *tx_comp_hal_desc,
673 					struct dp_tx_desc_s **r_tx_desc)
674 {
675 	uint8_t pool_id;
676 	uint32_t tx_desc_id;
677 
678 	tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc);
679 	pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >>
680 		DP_TX_DESC_ID_POOL_OS;
681 
682 	/* Find Tx descriptor */
683 	*r_tx_desc = dp_tx_desc_find(soc, pool_id,
684 			(tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >>
685 			DP_TX_DESC_ID_PAGE_OS,
686 			(tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >>
687 			DP_TX_DESC_ID_OFFSET_OS);
688 	qdf_prefetch((uint8_t *)*r_tx_desc);
689 }
690 #endif
691