xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/li/dp_li.c (revision d0c05845839e5f2ba5a8dcebe0cd3e4cd4e8dfcf)
1 /*
2  * Copyright (c) 2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "dp_types.h"
21 #include <dp_internal.h>
22 #include <dp_htt.h>
23 #include "dp_li.h"
24 #include "dp_li_tx.h"
25 #include "dp_tx_desc.h"
26 #include "dp_li_rx.h"
27 #include "dp_peer.h"
28 #include <wlan_utility.h>
29 #include "dp_ipa.h"
30 
31 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
32 static struct wlan_cfg_tcl_wbm_ring_num_map g_tcl_wbm_map_array[MAX_TCL_DATA_RINGS] = {
33 	{.tcl_ring_num = 0, .wbm_ring_num = 0, .wbm_rbm_id = HAL_LI_WBM_SW0_BM_ID, .for_ipa = 0},
34 	/*
35 	 * INVALID_WBM_RING_NUM implies re-use of an existing WBM2SW ring
36 	 * as indicated by rbm id.
37 	 */
38 	{1, INVALID_WBM_RING_NUM, HAL_LI_WBM_SW0_BM_ID, 0},
39 	{2, 2, HAL_LI_WBM_SW2_BM_ID, 0}
40 };
41 #else
42 static struct wlan_cfg_tcl_wbm_ring_num_map g_tcl_wbm_map_array[MAX_TCL_DATA_RINGS] = {
43 	{.tcl_ring_num = 0, .wbm_ring_num = 0, .wbm_rbm_id = HAL_LI_WBM_SW0_BM_ID, .for_ipa = 0},
44 	{1, 1, HAL_LI_WBM_SW1_BM_ID, 0},
45 	{2, 2, HAL_LI_WBM_SW2_BM_ID, 0},
46 	/*
47 	 * Although using wbm_ring 4, wbm_ring 3 is mentioned in order to match
48 	 * with the tx_mask in dp_service_srngs. Please be carefull while using
49 	 * this table anywhere else.
50 	 */
51 	{3, 3, HAL_LI_WBM_SW4_BM_ID, 0}
52 };
53 #endif
54 
55 #ifdef IPA_WDI3_TX_TWO_PIPES
56 static inline void
57 dp_soc_cfg_update_tcl_wbm_map_for_ipa(struct wlan_cfg_dp_soc_ctxt *cfg_ctx)
58 {
59 	if (!cfg_ctx->ipa_enabled)
60 		return;
61 
62 	cfg_ctx->tcl_wbm_map_array[IPA_TX_ALT_RING_IDX].wbm_ring_num = 4;
63 	cfg_ctx->tcl_wbm_map_array[IPA_TX_ALT_RING_IDX].wbm_rbm_id =
64 							   HAL_LI_WBM_SW4_BM_ID;
65 }
66 #else
67 static inline void
68 dp_soc_cfg_update_tcl_wbm_map_for_ipa(struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
69 {
70 }
71 #endif
72 
73 static void dp_soc_cfg_attach_li(struct dp_soc *soc)
74 {
75 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
76 
77 	wlan_cfg_set_rx_rel_ring_id(soc_cfg_ctx, WBM2SW_REL_ERR_RING_NUM);
78 
79 	soc_cfg_ctx->tcl_wbm_map_array = g_tcl_wbm_map_array;
80 	dp_soc_cfg_update_tcl_wbm_map_for_ipa(soc_cfg_ctx);
81 }
82 
83 qdf_size_t dp_get_context_size_li(enum dp_context_type context_type)
84 {
85 	switch (context_type) {
86 	case DP_CONTEXT_TYPE_SOC:
87 		return sizeof(struct dp_soc_li);
88 	case DP_CONTEXT_TYPE_PDEV:
89 		return sizeof(struct dp_pdev_li);
90 	case DP_CONTEXT_TYPE_VDEV:
91 		return sizeof(struct dp_vdev_li);
92 	case DP_CONTEXT_TYPE_PEER:
93 		return sizeof(struct dp_peer_li);
94 	default:
95 		return 0;
96 	}
97 }
98 
99 qdf_size_t dp_mon_get_context_size_li(enum dp_context_type context_type)
100 {
101 	switch (context_type) {
102 	case DP_CONTEXT_TYPE_MON_PDEV:
103 		return sizeof(struct dp_mon_pdev_li);
104 	case DP_CONTEXT_TYPE_MON_SOC:
105 		return sizeof(struct dp_mon_soc_li);
106 	default:
107 		return 0;
108 	}
109 }
110 
111 static QDF_STATUS dp_soc_attach_li(struct dp_soc *soc,
112 				   struct cdp_soc_attach_params *params)
113 {
114 	soc->wbm_sw0_bm_id = hal_tx_get_wbm_sw0_bm_id();
115 
116 	return QDF_STATUS_SUCCESS;
117 }
118 
119 static QDF_STATUS dp_soc_detach_li(struct dp_soc *soc)
120 {
121 	return QDF_STATUS_SUCCESS;
122 }
123 
124 static QDF_STATUS dp_soc_init_li(struct dp_soc *soc)
125 {
126 	return QDF_STATUS_SUCCESS;
127 }
128 
129 static QDF_STATUS dp_soc_deinit_li(struct dp_soc *soc)
130 {
131 	return QDF_STATUS_SUCCESS;
132 }
133 
134 static QDF_STATUS dp_pdev_attach_li(struct dp_pdev *pdev,
135 				    struct cdp_pdev_attach_params *params)
136 {
137 	return QDF_STATUS_SUCCESS;
138 }
139 
140 static QDF_STATUS dp_pdev_detach_li(struct dp_pdev *pdev)
141 {
142 	return QDF_STATUS_SUCCESS;
143 }
144 
145 static QDF_STATUS dp_vdev_attach_li(struct dp_soc *soc, struct dp_vdev *vdev)
146 {
147 	return QDF_STATUS_SUCCESS;
148 }
149 
150 static QDF_STATUS dp_vdev_detach_li(struct dp_soc *soc, struct dp_vdev *vdev)
151 {
152 	return QDF_STATUS_SUCCESS;
153 }
154 
155 #ifdef AST_OFFLOAD_ENABLE
156 static void dp_peer_map_detach_li(struct dp_soc *soc)
157 {
158 	dp_soc_wds_detach(soc);
159 	dp_peer_ast_table_detach(soc);
160 	dp_peer_ast_hash_detach(soc);
161 	dp_peer_mec_hash_detach(soc);
162 }
163 
164 static QDF_STATUS dp_peer_map_attach_li(struct dp_soc *soc)
165 {
166 	QDF_STATUS status;
167 
168 	soc->max_peer_id = soc->max_peers;
169 
170 	status = dp_peer_ast_table_attach(soc);
171 	if (!QDF_IS_STATUS_SUCCESS(status))
172 		return status;
173 
174 	status = dp_peer_ast_hash_attach(soc);
175 	if (!QDF_IS_STATUS_SUCCESS(status))
176 		goto ast_table_detach;
177 
178 	status = dp_peer_mec_hash_attach(soc);
179 	if (!QDF_IS_STATUS_SUCCESS(status))
180 		goto hash_detach;
181 
182 	dp_soc_wds_attach(soc);
183 
184 	return QDF_STATUS_SUCCESS;
185 
186 hash_detach:
187 	dp_peer_ast_hash_detach(soc);
188 ast_table_detach:
189 	dp_peer_ast_table_detach(soc);
190 
191 	return status;
192 }
193 #else
194 static void dp_peer_map_detach_li(struct dp_soc *soc)
195 {
196 }
197 
198 static QDF_STATUS dp_peer_map_attach_li(struct dp_soc *soc)
199 {
200 	soc->max_peer_id = soc->max_peers;
201 
202 	return QDF_STATUS_SUCCESS;
203 }
204 #endif
205 
206 qdf_size_t dp_get_soc_context_size_li(void)
207 {
208 	return sizeof(struct dp_soc);
209 }
210 
211 #ifdef NO_RX_PKT_HDR_TLV
212 /**
213  * dp_rxdma_ring_sel_cfg_li() - Setup RXDMA ring config
214  * @soc: Common DP soc handle
215  *
216  * Return: QDF_STATUS
217  */
218 static QDF_STATUS
219 dp_rxdma_ring_sel_cfg_li(struct dp_soc *soc)
220 {
221 	int i;
222 	int mac_id;
223 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
224 	struct dp_srng *rx_mac_srng;
225 	QDF_STATUS status = QDF_STATUS_SUCCESS;
226 
227 	htt_tlv_filter.mpdu_start = 1;
228 	htt_tlv_filter.msdu_start = 1;
229 	htt_tlv_filter.mpdu_end = 1;
230 	htt_tlv_filter.msdu_end = 1;
231 	htt_tlv_filter.attention = 1;
232 	htt_tlv_filter.packet = 1;
233 	htt_tlv_filter.packet_header = 0;
234 
235 	htt_tlv_filter.ppdu_start = 0;
236 	htt_tlv_filter.ppdu_end = 0;
237 	htt_tlv_filter.ppdu_end_user_stats = 0;
238 	htt_tlv_filter.ppdu_end_user_stats_ext = 0;
239 	htt_tlv_filter.ppdu_end_status_done = 0;
240 	htt_tlv_filter.enable_fp = 1;
241 	htt_tlv_filter.enable_md = 0;
242 	htt_tlv_filter.enable_md = 0;
243 	htt_tlv_filter.enable_mo = 0;
244 
245 	htt_tlv_filter.fp_mgmt_filter = 0;
246 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_BA_REQ;
247 	htt_tlv_filter.fp_data_filter = (FILTER_DATA_UCAST |
248 					 FILTER_DATA_MCAST |
249 					 FILTER_DATA_DATA);
250 	htt_tlv_filter.mo_mgmt_filter = 0;
251 	htt_tlv_filter.mo_ctrl_filter = 0;
252 	htt_tlv_filter.mo_data_filter = 0;
253 	htt_tlv_filter.md_data_filter = 0;
254 
255 	htt_tlv_filter.offset_valid = true;
256 
257 	htt_tlv_filter.rx_packet_offset = soc->rx_pkt_tlv_size;
258 	/*Not subscribing rx_pkt_header*/
259 	htt_tlv_filter.rx_header_offset = 0;
260 	htt_tlv_filter.rx_mpdu_start_offset =
261 				hal_rx_mpdu_start_offset_get(soc->hal_soc);
262 	htt_tlv_filter.rx_mpdu_end_offset =
263 				hal_rx_mpdu_end_offset_get(soc->hal_soc);
264 	htt_tlv_filter.rx_msdu_start_offset =
265 				hal_rx_msdu_start_offset_get(soc->hal_soc);
266 	htt_tlv_filter.rx_msdu_end_offset =
267 				hal_rx_msdu_end_offset_get(soc->hal_soc);
268 	htt_tlv_filter.rx_attn_offset =
269 				hal_rx_attn_offset_get(soc->hal_soc);
270 
271 	for (i = 0; i < MAX_PDEV_CNT; i++) {
272 		struct dp_pdev *pdev = soc->pdev_list[i];
273 
274 		if (!pdev)
275 			continue;
276 
277 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
278 			int mac_for_pdev =
279 				dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
280 			/*
281 			 * Obtain lmac id from pdev to access the LMAC ring
282 			 * in soc context
283 			 */
284 			int lmac_id =
285 				dp_get_lmac_id_for_pdev_id(soc, mac_id,
286 							   pdev->pdev_id);
287 
288 			rx_mac_srng = dp_get_rxdma_ring(pdev, lmac_id);
289 			htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
290 					    rx_mac_srng->hal_srng,
291 					    RXDMA_BUF, RX_DATA_BUFFER_SIZE,
292 					    &htt_tlv_filter);
293 		}
294 	}
295 	return status;
296 }
297 #else
298 
299 static QDF_STATUS
300 dp_rxdma_ring_sel_cfg_li(struct dp_soc *soc)
301 {
302 	int i;
303 	int mac_id;
304 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
305 	struct dp_srng *rx_mac_srng;
306 	QDF_STATUS status = QDF_STATUS_SUCCESS;
307 
308 	htt_tlv_filter.mpdu_start = 1;
309 	htt_tlv_filter.msdu_start = 1;
310 	htt_tlv_filter.mpdu_end = 1;
311 	htt_tlv_filter.msdu_end = 1;
312 	htt_tlv_filter.attention = 1;
313 	htt_tlv_filter.packet = 1;
314 	htt_tlv_filter.packet_header = 1;
315 
316 	htt_tlv_filter.ppdu_start = 0;
317 	htt_tlv_filter.ppdu_end = 0;
318 	htt_tlv_filter.ppdu_end_user_stats = 0;
319 	htt_tlv_filter.ppdu_end_user_stats_ext = 0;
320 	htt_tlv_filter.ppdu_end_status_done = 0;
321 	htt_tlv_filter.enable_fp = 1;
322 	htt_tlv_filter.enable_md = 0;
323 	htt_tlv_filter.enable_md = 0;
324 	htt_tlv_filter.enable_mo = 0;
325 
326 	htt_tlv_filter.fp_mgmt_filter = 0;
327 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_BA_REQ;
328 	htt_tlv_filter.fp_data_filter = (FILTER_DATA_UCAST |
329 					 FILTER_DATA_MCAST |
330 					 FILTER_DATA_DATA);
331 	htt_tlv_filter.mo_mgmt_filter = 0;
332 	htt_tlv_filter.mo_ctrl_filter = 0;
333 	htt_tlv_filter.mo_data_filter = 0;
334 	htt_tlv_filter.md_data_filter = 0;
335 
336 	htt_tlv_filter.offset_valid = true;
337 
338 	htt_tlv_filter.rx_packet_offset = soc->rx_pkt_tlv_size;
339 	htt_tlv_filter.rx_header_offset =
340 				hal_rx_pkt_tlv_offset_get(soc->hal_soc);
341 	htt_tlv_filter.rx_mpdu_start_offset =
342 				hal_rx_mpdu_start_offset_get(soc->hal_soc);
343 	htt_tlv_filter.rx_mpdu_end_offset =
344 				hal_rx_mpdu_end_offset_get(soc->hal_soc);
345 	htt_tlv_filter.rx_msdu_start_offset =
346 				hal_rx_msdu_start_offset_get(soc->hal_soc);
347 	htt_tlv_filter.rx_msdu_end_offset =
348 				hal_rx_msdu_end_offset_get(soc->hal_soc);
349 	htt_tlv_filter.rx_attn_offset =
350 				hal_rx_attn_offset_get(soc->hal_soc);
351 
352 	for (i = 0; i < MAX_PDEV_CNT; i++) {
353 		struct dp_pdev *pdev = soc->pdev_list[i];
354 
355 		if (!pdev)
356 			continue;
357 
358 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
359 			int mac_for_pdev =
360 				dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
361 			/*
362 			 * Obtain lmac id from pdev to access the LMAC ring
363 			 * in soc context
364 			 */
365 			int lmac_id =
366 				dp_get_lmac_id_for_pdev_id(soc, mac_id,
367 							   pdev->pdev_id);
368 
369 			rx_mac_srng = dp_get_rxdma_ring(pdev, lmac_id);
370 			htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
371 					    rx_mac_srng->hal_srng,
372 					    RXDMA_BUF, RX_DATA_BUFFER_SIZE,
373 					    &htt_tlv_filter);
374 		}
375 	}
376 	return status;
377 
378 }
379 #endif
380 
381 #ifdef QCA_DP_ENABLE_TX_COMP_RING4
382 static inline
383 void dp_deinit_txcomp_ring4(struct dp_soc *soc)
384 {
385 	if (soc) {
386 		wlan_minidump_remove(soc->tx_comp_ring[3].base_vaddr_unaligned,
387 				     soc->tx_comp_ring[3].alloc_size,
388 				     soc->ctrl_psoc, WLAN_MD_DP_SRNG_TX_COMP,
389 				     "Transmit_completion_ring");
390 		dp_srng_deinit(soc, &soc->tx_comp_ring[3], WBM2SW_RELEASE, 0);
391 	}
392 }
393 
394 static inline
395 QDF_STATUS dp_init_txcomp_ring4(struct dp_soc *soc)
396 {
397 	if (soc) {
398 		if (dp_srng_init(soc, &soc->tx_comp_ring[3],
399 				 WBM2SW_RELEASE, WBM2SW_TXCOMP_RING4_NUM, 0)) {
400 			dp_err("%pK: dp_srng_init failed for rx_rel_ring",
401 			       soc);
402 			return QDF_STATUS_E_FAILURE;
403 		}
404 		wlan_minidump_log(soc->tx_comp_ring[3].base_vaddr_unaligned,
405 				  soc->tx_comp_ring[3].alloc_size,
406 				  soc->ctrl_psoc, WLAN_MD_DP_SRNG_TX_COMP,
407 				  "Transmit_completion_ring");
408 	}
409 	return QDF_STATUS_SUCCESS;
410 }
411 
412 static inline
413 void dp_free_txcomp_ring4(struct dp_soc *soc)
414 {
415 	if (soc)
416 		dp_srng_free(soc, &soc->tx_comp_ring[3]);
417 }
418 
419 static inline
420 QDF_STATUS dp_alloc_txcomp_ring4(struct dp_soc *soc, uint32_t tx_comp_ring_size,
421 				 uint32_t cached)
422 {
423 	if (soc) {
424 		if (dp_srng_alloc(soc, &soc->tx_comp_ring[3], WBM2SW_RELEASE,
425 				  tx_comp_ring_size, cached)) {
426 			dp_err("dp_srng_alloc failed for tx_comp_ring");
427 			return QDF_STATUS_E_FAILURE;
428 		}
429 	}
430 	return QDF_STATUS_SUCCESS;
431 }
432 #else
433 static inline
434 void dp_deinit_txcomp_ring4(struct dp_soc *soc)
435 {
436 }
437 
438 static inline
439 QDF_STATUS dp_init_txcomp_ring4(struct dp_soc *soc)
440 {
441 	return QDF_STATUS_SUCCESS;
442 }
443 
444 static inline
445 void dp_free_txcomp_ring4(struct dp_soc *soc)
446 {
447 }
448 
449 static inline
450 QDF_STATUS dp_alloc_txcomp_ring4(struct dp_soc *soc, uint32_t tx_comp_ring_size,
451 				 uint32_t cached)
452 {
453 	return QDF_STATUS_SUCCESS;
454 }
455 #endif
456 
457 static void dp_soc_srng_deinit_li(struct dp_soc *soc)
458 {
459 	/* Tx Complete ring */
460 	dp_deinit_txcomp_ring4(soc);
461 }
462 
463 static void dp_soc_srng_free_li(struct dp_soc *soc)
464 {
465 	dp_free_txcomp_ring4(soc);
466 }
467 
468 static QDF_STATUS dp_soc_srng_alloc_li(struct dp_soc *soc)
469 {
470 	uint32_t tx_comp_ring_size;
471 	uint32_t cached = WLAN_CFG_DST_RING_CACHED_DESC;
472 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
473 
474 	soc_cfg_ctx = soc->wlan_cfg_ctx;
475 
476 	tx_comp_ring_size = wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
477 	/* Disable cached desc if NSS offload is enabled */
478 	if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx))
479 		cached = 0;
480 
481 	if (dp_alloc_txcomp_ring4(soc, tx_comp_ring_size, cached))
482 		goto fail1;
483 	return QDF_STATUS_SUCCESS;
484 fail1:
485 	dp_soc_srng_free_li(soc);
486 	return QDF_STATUS_E_NOMEM;
487 }
488 
489 static QDF_STATUS dp_soc_srng_init_li(struct dp_soc *soc)
490 {
491 	/* Tx comp ring 3 */
492 	if (dp_init_txcomp_ring4(soc))
493 		goto fail1;
494 
495 	return QDF_STATUS_SUCCESS;
496 fail1:
497 	/*
498 	 * Cleanup will be done as part of soc_detach, which will
499 	 * be called on pdev attach failure
500 	 */
501 	dp_soc_srng_deinit_li(soc);
502 	return QDF_STATUS_E_FAILURE;
503 }
504 
505 static void dp_tx_implicit_rbm_set_li(struct dp_soc *soc,
506 				      uint8_t tx_ring_id,
507 				      uint8_t bm_id)
508 {
509 }
510 
511 static QDF_STATUS dp_txrx_set_vdev_param_li(struct dp_soc *soc,
512 					    struct dp_vdev *vdev,
513 					    enum cdp_vdev_param_type param,
514 					    cdp_config_param_type val)
515 {
516 	return QDF_STATUS_SUCCESS;
517 }
518 
519 bool
520 dp_rx_intrabss_handle_nawds_li(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
521 			       qdf_nbuf_t nbuf_copy,
522 			       struct cdp_tid_rx_stats *tid_stats)
523 {
524 	return false;
525 }
526 
527 static struct dp_peer *dp_find_peer_by_destmac_li(struct dp_soc *soc,
528 						  uint8_t *dest_mac,
529 						  uint8_t vdev_id)
530 {
531 	struct dp_peer *peer = NULL;
532 	struct dp_ast_entry *ast_entry = NULL;
533 	uint16_t peer_id;
534 
535 	qdf_spin_lock_bh(&soc->ast_lock);
536 	ast_entry = dp_peer_ast_hash_find_by_vdevid(soc, dest_mac, vdev_id);
537 
538 	if (!ast_entry) {
539 		qdf_spin_unlock_bh(&soc->ast_lock);
540 		dp_err("NULL ast entry");
541 		return NULL;
542 	}
543 
544 	peer_id = ast_entry->peer_id;
545 	qdf_spin_unlock_bh(&soc->ast_lock);
546 
547 	if (peer_id == HTT_INVALID_PEER)
548 		return NULL;
549 
550 	peer = dp_peer_get_ref_by_id(soc, peer_id,
551 				     DP_MOD_ID_SAWF);
552 	return peer;
553 }
554 
555 static void dp_get_rx_hash_key_li(struct dp_soc *soc,
556 				  struct cdp_lro_hash_config *lro_hash)
557 {
558 	dp_get_rx_hash_key_bytes(lro_hash);
559 }
560 
561 void dp_initialize_arch_ops_li(struct dp_arch_ops *arch_ops)
562 {
563 #ifndef QCA_HOST_MODE_WIFI_DISABLED
564 	arch_ops->tx_hw_enqueue = dp_tx_hw_enqueue_li;
565 	arch_ops->dp_rx_process = dp_rx_process_li;
566 	arch_ops->tx_comp_get_params_from_hal_desc =
567 		dp_tx_comp_get_params_from_hal_desc_li;
568 	arch_ops->dp_tx_process_htt_completion =
569 			dp_tx_process_htt_completion_li;
570 	arch_ops->dp_wbm_get_rx_desc_from_hal_desc =
571 			dp_wbm_get_rx_desc_from_hal_desc_li;
572 	arch_ops->dp_tx_desc_pool_init = dp_tx_desc_pool_init_li;
573 	arch_ops->dp_tx_desc_pool_deinit = dp_tx_desc_pool_deinit_li;
574 	arch_ops->dp_rx_desc_pool_init = dp_rx_desc_pool_init_li;
575 	arch_ops->dp_rx_desc_pool_deinit = dp_rx_desc_pool_deinit_li;
576 	arch_ops->dp_tx_compute_hw_delay = dp_tx_compute_tx_delay_li;
577 #else
578 	arch_ops->dp_rx_desc_pool_init = dp_rx_desc_pool_init_generic;
579 	arch_ops->dp_rx_desc_pool_deinit = dp_rx_desc_pool_deinit_generic;
580 #endif
581 	arch_ops->txrx_get_context_size = dp_get_context_size_li;
582 	arch_ops->txrx_get_mon_context_size = dp_mon_get_context_size_li;
583 	arch_ops->txrx_soc_attach = dp_soc_attach_li;
584 	arch_ops->txrx_soc_detach = dp_soc_detach_li;
585 	arch_ops->txrx_soc_init = dp_soc_init_li;
586 	arch_ops->txrx_soc_deinit = dp_soc_deinit_li;
587 	arch_ops->txrx_soc_srng_alloc = dp_soc_srng_alloc_li;
588 	arch_ops->txrx_soc_srng_init = dp_soc_srng_init_li;
589 	arch_ops->txrx_soc_srng_deinit = dp_soc_srng_deinit_li;
590 	arch_ops->txrx_soc_srng_free = dp_soc_srng_free_li;
591 	arch_ops->txrx_pdev_attach = dp_pdev_attach_li;
592 	arch_ops->txrx_pdev_detach = dp_pdev_detach_li;
593 	arch_ops->txrx_vdev_attach = dp_vdev_attach_li;
594 	arch_ops->txrx_vdev_detach = dp_vdev_detach_li;
595 	arch_ops->txrx_peer_map_attach = dp_peer_map_attach_li;
596 	arch_ops->txrx_peer_map_detach = dp_peer_map_detach_li;
597 	arch_ops->get_rx_hash_key = dp_get_rx_hash_key_li;
598 	arch_ops->dp_rx_desc_cookie_2_va =
599 			dp_rx_desc_cookie_2_va_li;
600 	arch_ops->dp_rx_intrabss_handle_nawds = dp_rx_intrabss_handle_nawds_li;
601 	arch_ops->dp_rxdma_ring_sel_cfg = dp_rxdma_ring_sel_cfg_li;
602 	arch_ops->dp_rx_peer_metadata_peer_id_get =
603 					dp_rx_peer_metadata_peer_id_get_li;
604 	arch_ops->soc_cfg_attach = dp_soc_cfg_attach_li;
605 	arch_ops->tx_implicit_rbm_set = dp_tx_implicit_rbm_set_li;
606 	arch_ops->txrx_set_vdev_param = dp_txrx_set_vdev_param_li;
607 	arch_ops->txrx_print_peer_stats = dp_print_peer_txrx_stats_li;
608 	arch_ops->dp_peer_rx_reorder_queue_setup =
609 					dp_peer_rx_reorder_queue_setup_li;
610 	arch_ops->dp_find_peer_by_destmac = dp_find_peer_by_destmac_li;
611 }
612 
613 #ifdef QCA_DP_TX_HW_SW_NBUF_DESC_PREFETCH
614 void dp_tx_comp_get_prefetched_params_from_hal_desc(
615 					struct dp_soc *soc,
616 					void *tx_comp_hal_desc,
617 					struct dp_tx_desc_s **r_tx_desc)
618 {
619 	uint8_t pool_id;
620 	uint32_t tx_desc_id;
621 
622 	tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc);
623 	pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >>
624 		DP_TX_DESC_ID_POOL_OS;
625 
626 	/* Find Tx descriptor */
627 	*r_tx_desc = dp_tx_desc_find(soc, pool_id,
628 			(tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >>
629 			DP_TX_DESC_ID_PAGE_OS,
630 			(tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >>
631 			DP_TX_DESC_ID_OFFSET_OS);
632 	qdf_prefetch((uint8_t *)*r_tx_desc);
633 }
634 #endif
635