xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/li/dp_li.c (revision 6f8e9cffb06eb9abf06cf8c491361bfcadaccff5)
1 /*
2  * Copyright (c) 2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "dp_types.h"
21 #include "dp_rings.h"
22 #include <dp_internal.h>
23 #include <dp_htt.h>
24 #include "dp_li.h"
25 #include "dp_li_tx.h"
26 #include "dp_tx_desc.h"
27 #include "dp_li_rx.h"
28 #include "dp_peer.h"
29 #include <wlan_utility.h>
30 #include "dp_ipa.h"
31 #ifdef WIFI_MONITOR_SUPPORT
32 #include <dp_mon_1.0.h>
33 #endif
34 
35 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
36 static struct wlan_cfg_tcl_wbm_ring_num_map g_tcl_wbm_map_array[MAX_TCL_DATA_RINGS] = {
37 	{.tcl_ring_num = 0, .wbm_ring_num = 0, .wbm_rbm_id = HAL_LI_WBM_SW0_BM_ID, .for_ipa = 0},
38 	/*
39 	 * INVALID_WBM_RING_NUM implies re-use of an existing WBM2SW ring
40 	 * as indicated by rbm id.
41 	 */
42 	{1, INVALID_WBM_RING_NUM, HAL_LI_WBM_SW0_BM_ID, 0},
43 	{2, 2, HAL_LI_WBM_SW2_BM_ID, 0}
44 };
45 #else
46 static struct wlan_cfg_tcl_wbm_ring_num_map g_tcl_wbm_map_array[MAX_TCL_DATA_RINGS] = {
47 	{.tcl_ring_num = 0, .wbm_ring_num = 0, .wbm_rbm_id = HAL_LI_WBM_SW0_BM_ID, .for_ipa = 0},
48 	{1, 1, HAL_LI_WBM_SW1_BM_ID, 0},
49 	{2, 2, HAL_LI_WBM_SW2_BM_ID, 0},
50 	/*
51 	 * Although using wbm_ring 4, wbm_ring 3 is mentioned in order to match
52 	 * with the tx_mask in dp_service_srngs. Please be careful while using
53 	 * this table anywhere else.
54 	 */
55 	{3, 3, HAL_LI_WBM_SW4_BM_ID, 0}
56 };
57 #endif
58 
59 #ifdef IPA_WDI3_TX_TWO_PIPES
60 static inline void
61 dp_soc_cfg_update_tcl_wbm_map_for_ipa(struct wlan_cfg_dp_soc_ctxt *cfg_ctx)
62 {
63 	if (!cfg_ctx->ipa_enabled)
64 		return;
65 
66 	cfg_ctx->tcl_wbm_map_array[IPA_TX_ALT_RING_IDX].wbm_ring_num = 4;
67 	cfg_ctx->tcl_wbm_map_array[IPA_TX_ALT_RING_IDX].wbm_rbm_id =
68 							   HAL_LI_WBM_SW4_BM_ID;
69 }
70 #else
71 static inline void
72 dp_soc_cfg_update_tcl_wbm_map_for_ipa(struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
73 {
74 }
75 #endif
76 
77 static void dp_soc_cfg_attach_li(struct dp_soc *soc)
78 {
79 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
80 
81 	dp_soc_cfg_attach(soc);
82 
83 	wlan_cfg_set_rx_rel_ring_id(soc_cfg_ctx, WBM2SW_REL_ERR_RING_NUM);
84 
85 	soc_cfg_ctx->tcl_wbm_map_array = g_tcl_wbm_map_array;
86 	dp_soc_cfg_update_tcl_wbm_map_for_ipa(soc_cfg_ctx);
87 }
88 
89 qdf_size_t dp_get_context_size_li(enum dp_context_type context_type)
90 {
91 	switch (context_type) {
92 	case DP_CONTEXT_TYPE_SOC:
93 		return sizeof(struct dp_soc_li);
94 	case DP_CONTEXT_TYPE_PDEV:
95 		return sizeof(struct dp_pdev_li);
96 	case DP_CONTEXT_TYPE_VDEV:
97 		return sizeof(struct dp_vdev_li);
98 	case DP_CONTEXT_TYPE_PEER:
99 		return sizeof(struct dp_peer_li);
100 	default:
101 		return 0;
102 	}
103 }
104 
105 static QDF_STATUS dp_soc_attach_li(struct dp_soc *soc,
106 				   struct cdp_soc_attach_params *params)
107 {
108 	soc->wbm_sw0_bm_id = hal_tx_get_wbm_sw0_bm_id();
109 
110 	return QDF_STATUS_SUCCESS;
111 }
112 
113 static QDF_STATUS dp_soc_detach_li(struct dp_soc *soc)
114 {
115 	return QDF_STATUS_SUCCESS;
116 }
117 
118 static void *dp_soc_init_li(struct dp_soc *soc, HTC_HANDLE htc_handle,
119 			    struct hif_opaque_softc *hif_handle)
120 {
121 	wlan_minidump_log(soc, sizeof(*soc), soc->ctrl_psoc,
122 			  WLAN_MD_DP_SOC, "dp_soc");
123 
124 	soc->hif_handle = hif_handle;
125 
126 	soc->hal_soc = hif_get_hal_handle(soc->hif_handle);
127 	if (!soc->hal_soc)
128 		return NULL;
129 
130 	return dp_soc_init(soc, htc_handle, hif_handle);
131 }
132 
133 static QDF_STATUS dp_soc_deinit_li(struct dp_soc *soc)
134 {
135 	qdf_atomic_set(&soc->cmn_init_done, 0);
136 
137 	dp_soc_deinit(soc);
138 
139 	return QDF_STATUS_SUCCESS;
140 }
141 
142 static QDF_STATUS dp_pdev_attach_li(struct dp_pdev *pdev,
143 				    struct cdp_pdev_attach_params *params)
144 {
145 	return QDF_STATUS_SUCCESS;
146 }
147 
148 static QDF_STATUS dp_pdev_detach_li(struct dp_pdev *pdev)
149 {
150 	return QDF_STATUS_SUCCESS;
151 }
152 
153 static QDF_STATUS dp_vdev_attach_li(struct dp_soc *soc, struct dp_vdev *vdev)
154 {
155 	return QDF_STATUS_SUCCESS;
156 }
157 
158 static QDF_STATUS dp_vdev_detach_li(struct dp_soc *soc, struct dp_vdev *vdev)
159 {
160 	return QDF_STATUS_SUCCESS;
161 }
162 
163 #ifdef AST_OFFLOAD_ENABLE
164 static void dp_peer_map_detach_li(struct dp_soc *soc)
165 {
166 	dp_soc_wds_detach(soc);
167 	dp_peer_ast_table_detach(soc);
168 	dp_peer_ast_hash_detach(soc);
169 	dp_peer_mec_hash_detach(soc);
170 }
171 
172 static QDF_STATUS dp_peer_map_attach_li(struct dp_soc *soc)
173 {
174 	QDF_STATUS status;
175 
176 	soc->max_peer_id = soc->max_peers;
177 
178 	status = dp_peer_ast_table_attach(soc);
179 	if (!QDF_IS_STATUS_SUCCESS(status))
180 		return status;
181 
182 	status = dp_peer_ast_hash_attach(soc);
183 	if (!QDF_IS_STATUS_SUCCESS(status))
184 		goto ast_table_detach;
185 
186 	status = dp_peer_mec_hash_attach(soc);
187 	if (!QDF_IS_STATUS_SUCCESS(status))
188 		goto hash_detach;
189 
190 	dp_soc_wds_attach(soc);
191 
192 	return QDF_STATUS_SUCCESS;
193 
194 hash_detach:
195 	dp_peer_ast_hash_detach(soc);
196 ast_table_detach:
197 	dp_peer_ast_table_detach(soc);
198 
199 	return status;
200 }
201 #else
202 static void dp_peer_map_detach_li(struct dp_soc *soc)
203 {
204 }
205 
206 static QDF_STATUS dp_peer_map_attach_li(struct dp_soc *soc)
207 {
208 	soc->max_peer_id = soc->max_peers;
209 
210 	return QDF_STATUS_SUCCESS;
211 }
212 #endif
213 
214 static QDF_STATUS dp_peer_setup_li(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
215 				   uint8_t *peer_mac,
216 				   struct cdp_peer_setup_info *setup_info)
217 {
218 	return dp_peer_setup_wifi3(soc_hdl, vdev_id, peer_mac, setup_info);
219 }
220 
221 qdf_size_t dp_get_soc_context_size_li(void)
222 {
223 	return sizeof(struct dp_soc);
224 }
225 
226 #ifdef NO_RX_PKT_HDR_TLV
227 /**
228  * dp_rxdma_ring_sel_cfg_li() - Setup RXDMA ring config
229  * @soc: Common DP soc handle
230  *
231  * Return: QDF_STATUS
232  */
233 static QDF_STATUS
234 dp_rxdma_ring_sel_cfg_li(struct dp_soc *soc)
235 {
236 	int i;
237 	int mac_id;
238 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
239 	struct dp_srng *rx_mac_srng;
240 	QDF_STATUS status = QDF_STATUS_SUCCESS;
241 	uint32_t target_type = hal_get_target_type(soc->hal_soc);
242 
243 	if (target_type == TARGET_TYPE_QCN9160)
244 		return status;
245 
246 	htt_tlv_filter.mpdu_start = 1;
247 	htt_tlv_filter.msdu_start = 1;
248 	htt_tlv_filter.mpdu_end = 1;
249 	htt_tlv_filter.msdu_end = 1;
250 	htt_tlv_filter.attention = 1;
251 	htt_tlv_filter.packet = 1;
252 	htt_tlv_filter.packet_header = 0;
253 
254 	htt_tlv_filter.ppdu_start = 0;
255 	htt_tlv_filter.ppdu_end = 0;
256 	htt_tlv_filter.ppdu_end_user_stats = 0;
257 	htt_tlv_filter.ppdu_end_user_stats_ext = 0;
258 	htt_tlv_filter.ppdu_end_status_done = 0;
259 	htt_tlv_filter.enable_fp = 1;
260 	htt_tlv_filter.enable_md = 0;
261 	htt_tlv_filter.enable_md = 0;
262 	htt_tlv_filter.enable_mo = 0;
263 
264 	htt_tlv_filter.fp_mgmt_filter = 0;
265 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_BA_REQ;
266 	htt_tlv_filter.fp_data_filter = (FILTER_DATA_UCAST |
267 					 FILTER_DATA_MCAST |
268 					 FILTER_DATA_DATA);
269 	htt_tlv_filter.mo_mgmt_filter = 0;
270 	htt_tlv_filter.mo_ctrl_filter = 0;
271 	htt_tlv_filter.mo_data_filter = 0;
272 	htt_tlv_filter.md_data_filter = 0;
273 
274 	htt_tlv_filter.offset_valid = true;
275 
276 	htt_tlv_filter.rx_packet_offset = soc->rx_pkt_tlv_size;
277 	/*Not subscribing rx_pkt_header*/
278 	htt_tlv_filter.rx_header_offset = 0;
279 	htt_tlv_filter.rx_mpdu_start_offset =
280 				hal_rx_mpdu_start_offset_get(soc->hal_soc);
281 	htt_tlv_filter.rx_mpdu_end_offset =
282 				hal_rx_mpdu_end_offset_get(soc->hal_soc);
283 	htt_tlv_filter.rx_msdu_start_offset =
284 				hal_rx_msdu_start_offset_get(soc->hal_soc);
285 	htt_tlv_filter.rx_msdu_end_offset =
286 				hal_rx_msdu_end_offset_get(soc->hal_soc);
287 	htt_tlv_filter.rx_attn_offset =
288 				hal_rx_attn_offset_get(soc->hal_soc);
289 
290 	for (i = 0; i < MAX_PDEV_CNT; i++) {
291 		struct dp_pdev *pdev = soc->pdev_list[i];
292 
293 		if (!pdev)
294 			continue;
295 
296 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
297 			int mac_for_pdev =
298 				dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
299 			/*
300 			 * Obtain lmac id from pdev to access the LMAC ring
301 			 * in soc context
302 			 */
303 			int lmac_id =
304 				dp_get_lmac_id_for_pdev_id(soc, mac_id,
305 							   pdev->pdev_id);
306 
307 			rx_mac_srng = dp_get_rxdma_ring(pdev, lmac_id);
308 			htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
309 					    rx_mac_srng->hal_srng,
310 					    RXDMA_BUF, RX_DATA_BUFFER_SIZE,
311 					    &htt_tlv_filter);
312 		}
313 	}
314 	return status;
315 }
316 #else
317 
318 static QDF_STATUS
319 dp_rxdma_ring_sel_cfg_li(struct dp_soc *soc)
320 {
321 	int i;
322 	int mac_id;
323 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
324 	struct dp_srng *rx_mac_srng;
325 	QDF_STATUS status = QDF_STATUS_SUCCESS;
326 	uint32_t target_type = hal_get_target_type(soc->hal_soc);
327 
328 	if (target_type == TARGET_TYPE_QCN9160)
329 		return status;
330 
331 	htt_tlv_filter.mpdu_start = 1;
332 	htt_tlv_filter.msdu_start = 1;
333 	htt_tlv_filter.mpdu_end = 1;
334 	htt_tlv_filter.msdu_end = 1;
335 	htt_tlv_filter.attention = 1;
336 	htt_tlv_filter.packet = 1;
337 	htt_tlv_filter.packet_header = 1;
338 
339 	htt_tlv_filter.ppdu_start = 0;
340 	htt_tlv_filter.ppdu_end = 0;
341 	htt_tlv_filter.ppdu_end_user_stats = 0;
342 	htt_tlv_filter.ppdu_end_user_stats_ext = 0;
343 	htt_tlv_filter.ppdu_end_status_done = 0;
344 	htt_tlv_filter.enable_fp = 1;
345 	htt_tlv_filter.enable_md = 0;
346 	htt_tlv_filter.enable_md = 0;
347 	htt_tlv_filter.enable_mo = 0;
348 
349 	htt_tlv_filter.fp_mgmt_filter = 0;
350 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_BA_REQ;
351 	htt_tlv_filter.fp_data_filter = (FILTER_DATA_UCAST |
352 					 FILTER_DATA_MCAST |
353 					 FILTER_DATA_DATA);
354 	htt_tlv_filter.mo_mgmt_filter = 0;
355 	htt_tlv_filter.mo_ctrl_filter = 0;
356 	htt_tlv_filter.mo_data_filter = 0;
357 	htt_tlv_filter.md_data_filter = 0;
358 
359 	htt_tlv_filter.offset_valid = true;
360 
361 	htt_tlv_filter.rx_packet_offset = soc->rx_pkt_tlv_size;
362 	htt_tlv_filter.rx_header_offset =
363 				hal_rx_pkt_tlv_offset_get(soc->hal_soc);
364 	htt_tlv_filter.rx_mpdu_start_offset =
365 				hal_rx_mpdu_start_offset_get(soc->hal_soc);
366 	htt_tlv_filter.rx_mpdu_end_offset =
367 				hal_rx_mpdu_end_offset_get(soc->hal_soc);
368 	htt_tlv_filter.rx_msdu_start_offset =
369 				hal_rx_msdu_start_offset_get(soc->hal_soc);
370 	htt_tlv_filter.rx_msdu_end_offset =
371 				hal_rx_msdu_end_offset_get(soc->hal_soc);
372 	htt_tlv_filter.rx_attn_offset =
373 				hal_rx_attn_offset_get(soc->hal_soc);
374 
375 	for (i = 0; i < MAX_PDEV_CNT; i++) {
376 		struct dp_pdev *pdev = soc->pdev_list[i];
377 
378 		if (!pdev)
379 			continue;
380 
381 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
382 			int mac_for_pdev =
383 				dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
384 			/*
385 			 * Obtain lmac id from pdev to access the LMAC ring
386 			 * in soc context
387 			 */
388 			int lmac_id =
389 				dp_get_lmac_id_for_pdev_id(soc, mac_id,
390 							   pdev->pdev_id);
391 
392 			rx_mac_srng = dp_get_rxdma_ring(pdev, lmac_id);
393 			htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
394 					    rx_mac_srng->hal_srng,
395 					    RXDMA_BUF, RX_DATA_BUFFER_SIZE,
396 					    &htt_tlv_filter);
397 		}
398 	}
399 	return status;
400 
401 }
402 #endif
403 
404 static inline
405 QDF_STATUS dp_srng_init_li(struct dp_soc *soc, struct dp_srng *srng,
406 			   int ring_type, int ring_num, int mac_id)
407 {
408 	return dp_srng_init_idx(soc, srng, ring_type, ring_num, mac_id, 0);
409 }
410 
411 #ifdef QCA_DP_ENABLE_TX_COMP_RING4
412 static inline
413 void dp_deinit_txcomp_ring4(struct dp_soc *soc)
414 {
415 	if (soc) {
416 		wlan_minidump_remove(soc->tx_comp_ring[3].base_vaddr_unaligned,
417 				     soc->tx_comp_ring[3].alloc_size,
418 				     soc->ctrl_psoc, WLAN_MD_DP_SRNG_TX_COMP,
419 				     "Transmit_completion_ring");
420 		dp_srng_deinit(soc, &soc->tx_comp_ring[3], WBM2SW_RELEASE, 0);
421 	}
422 }
423 
424 static inline
425 QDF_STATUS dp_init_txcomp_ring4(struct dp_soc *soc)
426 {
427 	if (soc) {
428 		if (dp_srng_init(soc, &soc->tx_comp_ring[3],
429 				 WBM2SW_RELEASE, WBM2SW_TXCOMP_RING4_NUM, 0)) {
430 			dp_err("%pK: dp_srng_init failed for rx_rel_ring",
431 			       soc);
432 			return QDF_STATUS_E_FAILURE;
433 		}
434 		wlan_minidump_log(soc->tx_comp_ring[3].base_vaddr_unaligned,
435 				  soc->tx_comp_ring[3].alloc_size,
436 				  soc->ctrl_psoc, WLAN_MD_DP_SRNG_TX_COMP,
437 				  "Transmit_completion_ring");
438 	}
439 	return QDF_STATUS_SUCCESS;
440 }
441 
442 static inline
443 void dp_free_txcomp_ring4(struct dp_soc *soc)
444 {
445 	if (soc)
446 		dp_srng_free(soc, &soc->tx_comp_ring[3]);
447 }
448 
449 static inline
450 QDF_STATUS dp_alloc_txcomp_ring4(struct dp_soc *soc, uint32_t tx_comp_ring_size,
451 				 uint32_t cached)
452 {
453 	if (soc) {
454 		if (dp_srng_alloc(soc, &soc->tx_comp_ring[3], WBM2SW_RELEASE,
455 				  tx_comp_ring_size, cached)) {
456 			dp_err("dp_srng_alloc failed for tx_comp_ring");
457 			return QDF_STATUS_E_FAILURE;
458 		}
459 	}
460 	return QDF_STATUS_SUCCESS;
461 }
462 #else
463 static inline
464 void dp_deinit_txcomp_ring4(struct dp_soc *soc)
465 {
466 }
467 
468 static inline
469 QDF_STATUS dp_init_txcomp_ring4(struct dp_soc *soc)
470 {
471 	return QDF_STATUS_SUCCESS;
472 }
473 
474 static inline
475 void dp_free_txcomp_ring4(struct dp_soc *soc)
476 {
477 }
478 
479 static inline
480 QDF_STATUS dp_alloc_txcomp_ring4(struct dp_soc *soc, uint32_t tx_comp_ring_size,
481 				 uint32_t cached)
482 {
483 	return QDF_STATUS_SUCCESS;
484 }
485 #endif
486 
487 static void dp_soc_srng_deinit_li(struct dp_soc *soc)
488 {
489 	/* Tx Complete ring */
490 	dp_deinit_txcomp_ring4(soc);
491 }
492 
493 static void dp_soc_srng_free_li(struct dp_soc *soc)
494 {
495 	dp_free_txcomp_ring4(soc);
496 }
497 
498 static QDF_STATUS dp_soc_srng_alloc_li(struct dp_soc *soc)
499 {
500 	uint32_t tx_comp_ring_size;
501 	uint32_t cached = WLAN_CFG_DST_RING_CACHED_DESC;
502 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
503 
504 	soc_cfg_ctx = soc->wlan_cfg_ctx;
505 
506 	tx_comp_ring_size = wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
507 	/* Disable cached desc if NSS offload is enabled */
508 	if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx))
509 		cached = 0;
510 
511 	if (dp_alloc_txcomp_ring4(soc, tx_comp_ring_size, cached))
512 		goto fail1;
513 	return QDF_STATUS_SUCCESS;
514 fail1:
515 	dp_soc_srng_free_li(soc);
516 	return QDF_STATUS_E_NOMEM;
517 }
518 
519 static QDF_STATUS dp_soc_srng_init_li(struct dp_soc *soc)
520 {
521 	/* Tx comp ring 3 */
522 	if (dp_init_txcomp_ring4(soc))
523 		goto fail1;
524 
525 	return QDF_STATUS_SUCCESS;
526 fail1:
527 	/*
528 	 * Cleanup will be done as part of soc_detach, which will
529 	 * be called on pdev attach failure
530 	 */
531 	dp_soc_srng_deinit_li(soc);
532 	return QDF_STATUS_E_FAILURE;
533 }
534 
535 static void dp_tx_implicit_rbm_set_li(struct dp_soc *soc,
536 				      uint8_t tx_ring_id,
537 				      uint8_t bm_id)
538 {
539 }
540 
541 static QDF_STATUS dp_txrx_set_vdev_param_li(struct dp_soc *soc,
542 					    struct dp_vdev *vdev,
543 					    enum cdp_vdev_param_type param,
544 					    cdp_config_param_type val)
545 {
546 	return QDF_STATUS_SUCCESS;
547 }
548 
549 bool
550 dp_rx_intrabss_handle_nawds_li(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
551 			       qdf_nbuf_t nbuf_copy,
552 			       struct cdp_tid_rx_stats *tid_stats,
553 			       uint8_t link_id)
554 {
555 	return false;
556 }
557 
558 static void dp_rx_word_mask_subscribe_li(struct dp_soc *soc,
559 					 uint32_t *msg_word,
560 					 void *rx_filter)
561 {
562 }
563 
564 static void dp_get_rx_hash_key_li(struct dp_soc *soc,
565 				  struct cdp_lro_hash_config *lro_hash)
566 {
567 	dp_get_rx_hash_key_bytes(lro_hash);
568 }
569 
570 static void dp_peer_get_reo_hash_li(struct dp_vdev *vdev,
571 				    struct cdp_peer_setup_info *setup_info,
572 				    enum cdp_host_reo_dest_ring *reo_dest,
573 				    bool *hash_based,
574 				    uint8_t *lmac_peer_id_msb)
575 {
576 	dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
577 }
578 
579 static bool dp_reo_remap_config_li(struct dp_soc *soc,
580 				   uint32_t *remap0,
581 				   uint32_t *remap1,
582 				   uint32_t *remap2)
583 {
584 	return dp_reo_remap_config(soc, remap0, remap1, remap2);
585 }
586 
587 static uint8_t dp_soc_get_num_soc_li(struct dp_soc *soc)
588 {
589 	return 1;
590 }
591 
592 static QDF_STATUS dp_txrx_get_vdev_mcast_param_li(struct dp_soc *soc,
593 						  struct dp_vdev *vdev,
594 						  cdp_config_param_type *val)
595 {
596 	return QDF_STATUS_SUCCESS;
597 }
598 
599 static uint8_t dp_get_hw_link_id_li(struct dp_pdev *pdev)
600 {
601 	return 0;
602 }
603 
604 static void dp_get_vdev_stats_for_unmap_peer_li(
605 					struct dp_vdev *vdev,
606 					struct dp_peer *peer,
607 					struct cdp_vdev_stats **vdev_stats)
608 {
609 }
610 
611 static struct
612 dp_soc *dp_get_soc_by_chip_id_li(struct dp_soc *soc,
613 				 uint8_t chip_id)
614 {
615 	return soc;
616 }
617 
618 void dp_initialize_arch_ops_li(struct dp_arch_ops *arch_ops)
619 {
620 #ifndef QCA_HOST_MODE_WIFI_DISABLED
621 	arch_ops->tx_hw_enqueue = dp_tx_hw_enqueue_li;
622 	arch_ops->dp_rx_process = dp_rx_process_li;
623 	arch_ops->dp_tx_send_fast = dp_tx_send;
624 	arch_ops->tx_comp_get_params_from_hal_desc =
625 		dp_tx_comp_get_params_from_hal_desc_li;
626 	arch_ops->dp_tx_process_htt_completion =
627 			dp_tx_process_htt_completion_li;
628 	arch_ops->dp_wbm_get_rx_desc_from_hal_desc =
629 			dp_wbm_get_rx_desc_from_hal_desc_li;
630 	arch_ops->dp_tx_desc_pool_alloc = dp_tx_desc_pool_alloc_li;
631 	arch_ops->dp_tx_desc_pool_free = dp_tx_desc_pool_free_li;
632 	arch_ops->dp_tx_desc_pool_init = dp_tx_desc_pool_init_li;
633 	arch_ops->dp_tx_desc_pool_deinit = dp_tx_desc_pool_deinit_li;
634 	arch_ops->dp_rx_desc_pool_init = dp_rx_desc_pool_init_li;
635 	arch_ops->dp_rx_desc_pool_deinit = dp_rx_desc_pool_deinit_li;
636 	arch_ops->dp_tx_compute_hw_delay = dp_tx_compute_tx_delay_li;
637 	arch_ops->dp_rx_chain_msdus = dp_rx_chain_msdus_li;
638 	arch_ops->dp_rx_wbm_err_reap_desc = dp_rx_wbm_err_reap_desc_li;
639 	arch_ops->dp_rx_null_q_desc_handle = dp_rx_null_q_desc_handle_li;
640 #else
641 	arch_ops->dp_rx_desc_pool_init = dp_rx_desc_pool_init_generic;
642 	arch_ops->dp_rx_desc_pool_deinit = dp_rx_desc_pool_deinit_generic;
643 #endif
644 	arch_ops->txrx_get_context_size = dp_get_context_size_li;
645 #ifdef WIFI_MONITOR_SUPPORT
646 	arch_ops->txrx_get_mon_context_size = dp_mon_get_context_size_li;
647 #endif
648 	arch_ops->txrx_soc_attach = dp_soc_attach_li;
649 	arch_ops->txrx_soc_detach = dp_soc_detach_li;
650 	arch_ops->txrx_soc_init = dp_soc_init_li;
651 	arch_ops->txrx_soc_deinit = dp_soc_deinit_li;
652 	arch_ops->txrx_soc_srng_alloc = dp_soc_srng_alloc_li;
653 	arch_ops->txrx_soc_srng_init = dp_soc_srng_init_li;
654 	arch_ops->txrx_soc_srng_deinit = dp_soc_srng_deinit_li;
655 	arch_ops->txrx_soc_srng_free = dp_soc_srng_free_li;
656 	arch_ops->txrx_pdev_attach = dp_pdev_attach_li;
657 	arch_ops->txrx_pdev_detach = dp_pdev_detach_li;
658 	arch_ops->txrx_vdev_attach = dp_vdev_attach_li;
659 	arch_ops->txrx_vdev_detach = dp_vdev_detach_li;
660 	arch_ops->txrx_peer_map_attach = dp_peer_map_attach_li;
661 	arch_ops->txrx_peer_map_detach = dp_peer_map_detach_li;
662 	arch_ops->get_rx_hash_key = dp_get_rx_hash_key_li;
663 	arch_ops->dp_set_rx_fst = NULL;
664 	arch_ops->dp_get_rx_fst = NULL;
665 	arch_ops->dp_rx_fst_ref = NULL;
666 	arch_ops->dp_rx_fst_deref = NULL;
667 	arch_ops->txrx_peer_setup = dp_peer_setup_li;
668 	arch_ops->dp_rx_desc_cookie_2_va =
669 			dp_rx_desc_cookie_2_va_li;
670 	arch_ops->dp_rx_intrabss_mcast_handler =
671 					dp_rx_intrabss_handle_nawds_li;
672 	arch_ops->dp_rx_word_mask_subscribe = dp_rx_word_mask_subscribe_li;
673 	arch_ops->dp_rxdma_ring_sel_cfg = dp_rxdma_ring_sel_cfg_li;
674 	arch_ops->dp_rx_peer_metadata_peer_id_get =
675 					dp_rx_peer_metadata_peer_id_get_li;
676 	arch_ops->soc_cfg_attach = dp_soc_cfg_attach_li;
677 	arch_ops->tx_implicit_rbm_set = dp_tx_implicit_rbm_set_li;
678 	arch_ops->txrx_set_vdev_param = dp_txrx_set_vdev_param_li;
679 	arch_ops->txrx_print_peer_stats = dp_print_peer_txrx_stats_li;
680 	arch_ops->dp_peer_rx_reorder_queue_setup =
681 					dp_peer_rx_reorder_queue_setup_li;
682 	arch_ops->peer_get_reo_hash = dp_peer_get_reo_hash_li;
683 	arch_ops->reo_remap_config = dp_reo_remap_config_li;
684 	arch_ops->dp_get_soc_by_chip_id = dp_get_soc_by_chip_id_li;
685 	arch_ops->dp_soc_get_num_soc = dp_soc_get_num_soc_li;
686 	arch_ops->get_reo_qdesc_addr = dp_rx_get_reo_qdesc_addr_li;
687 	arch_ops->txrx_get_vdev_mcast_param = dp_txrx_get_vdev_mcast_param_li;
688 	arch_ops->get_hw_link_id = dp_get_hw_link_id_li;
689 	arch_ops->txrx_srng_init = dp_srng_init_li;
690 	arch_ops->dp_get_vdev_stats_for_unmap_peer =
691 					dp_get_vdev_stats_for_unmap_peer_li;
692 	arch_ops->dp_get_interface_stats = dp_txrx_get_vdev_stats;
693 #if defined(DP_POWER_SAVE) || defined(FEATURE_RUNTIME_PM)
694 	arch_ops->dp_update_ring_hptp = dp_update_ring_hptp;
695 #endif
696 	arch_ops->dp_flush_tx_ring = dp_flush_tcl_ring;
697 }
698 
699 #ifdef QCA_DP_TX_HW_SW_NBUF_DESC_PREFETCH
700 void dp_tx_comp_get_prefetched_params_from_hal_desc(
701 					struct dp_soc *soc,
702 					void *tx_comp_hal_desc,
703 					struct dp_tx_desc_s **r_tx_desc)
704 {
705 	uint8_t pool_id;
706 	uint32_t tx_desc_id;
707 
708 	tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc);
709 	pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >>
710 		DP_TX_DESC_ID_POOL_OS;
711 
712 	/* Find Tx descriptor */
713 	*r_tx_desc = dp_tx_desc_find(soc, pool_id,
714 			(tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >>
715 			DP_TX_DESC_ID_PAGE_OS,
716 			(tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >>
717 			DP_TX_DESC_ID_OFFSET_OS);
718 	qdf_prefetch((uint8_t *)*r_tx_desc);
719 }
720 #endif
721