xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/be/dp_be.c (revision c96d5d25926d2a81a5d1800dffa4ef543a4a54fb)
1 /*
2  * Copyright (c) 2021 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <dp_internal.h>
20 #include <dp_htt.h>
21 #include "dp_be.h"
22 #include "dp_be_tx.h"
23 #include "dp_be_rx.h"
24 #include <hal_be_api.h>
25 
26 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
27 static struct wlan_cfg_tcl_wbm_ring_num_map g_tcl_wbm_map_array[MAX_TCL_DATA_RINGS] = {
28 	{.tcl_ring_num = 0, .wbm_ring_num = 0, .wbm_rbm_id = HAL_BE_WBM_SW0_BM_ID, .for_ipa = 0},
29 	{1, 4, HAL_BE_WBM_SW4_BM_ID, 0},
30 	{2, 2, HAL_BE_WBM_SW2_BM_ID, 0},
31 	{3, 6, HAL_BE_WBM_SW5_BM_ID, 0},
32 	{4, 7, HAL_BE_WBM_SW6_BM_ID, 0}
33 };
34 
35 #else
36 
37 static struct wlan_cfg_tcl_wbm_ring_num_map g_tcl_wbm_map_array[MAX_TCL_DATA_RINGS] = {
38 	{.tcl_ring_num = 0, .wbm_ring_num = 0, .wbm_rbm_id = HAL_BE_WBM_SW0_BM_ID, .for_ipa = 0},
39 	{1, 1, HAL_BE_WBM_SW1_BM_ID, 0},
40 	{2, 2, HAL_BE_WBM_SW2_BM_ID, 0},
41 	{3, 3, HAL_BE_WBM_SW3_BM_ID, 0},
42 	{4, 4, HAL_BE_WBM_SW4_BM_ID, 0}
43 };
44 #endif
45 
46 static void dp_soc_cfg_attach_be(struct dp_soc *soc)
47 {
48 	soc->wlan_cfg_ctx->tcl_wbm_map_array = g_tcl_wbm_map_array;
49 }
50 
51 qdf_size_t dp_get_context_size_be(enum dp_context_type context_type)
52 {
53 	switch (context_type) {
54 	case DP_CONTEXT_TYPE_SOC:
55 		return sizeof(struct dp_soc_be);
56 	case DP_CONTEXT_TYPE_PDEV:
57 		return sizeof(struct dp_pdev_be);
58 	case DP_CONTEXT_TYPE_VDEV:
59 		return sizeof(struct dp_vdev_be);
60 	case DP_CONTEXT_TYPE_PEER:
61 		return sizeof(struct dp_peer_be);
62 	default:
63 		return 0;
64 	}
65 }
66 
67 #ifdef DP_FEATURE_HW_COOKIE_CONVERSION
68 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
69 /**
70  * dp_cc_wbm_sw_en_cfg() - configure HW cookie conversion enablement
71 			   per wbm2sw ring
72  * @cc_cfg: HAL HW cookie conversion configuration structure pointer
73  *
74  * Return: None
75  */
76 static inline
77 void dp_cc_wbm_sw_en_cfg(struct hal_hw_cc_config *cc_cfg)
78 {
79 	cc_cfg->wbm2sw6_cc_en = 1;
80 	cc_cfg->wbm2sw5_cc_en = 1;
81 	cc_cfg->wbm2sw4_cc_en = 1;
82 	cc_cfg->wbm2sw3_cc_en = 1;
83 	cc_cfg->wbm2sw2_cc_en = 1;
84 	/* disable wbm2sw1 hw cc as it's for FW */
85 	cc_cfg->wbm2sw1_cc_en = 0;
86 	cc_cfg->wbm2sw0_cc_en = 1;
87 	cc_cfg->wbm2fw_cc_en = 0;
88 }
89 #else
90 static inline
91 void dp_cc_wbm_sw_en_cfg(struct hal_hw_cc_config *cc_cfg)
92 {
93 	cc_cfg->wbm2sw6_cc_en = 1;
94 	cc_cfg->wbm2sw5_cc_en = 1;
95 	cc_cfg->wbm2sw4_cc_en = 1;
96 	cc_cfg->wbm2sw3_cc_en = 1;
97 	cc_cfg->wbm2sw2_cc_en = 1;
98 	cc_cfg->wbm2sw1_cc_en = 1;
99 	cc_cfg->wbm2sw0_cc_en = 1;
100 	cc_cfg->wbm2fw_cc_en = 0;
101 }
102 #endif
103 
104 /**
105  * dp_cc_reg_cfg_init() - initialize and configure HW cookie
106 			  conversion register
107  * @soc: SOC handle
108  * @cc_ctx: cookie conversion context pointer
109  * @is_4k_align: page address 4k alignd
110  *
111  * Return: None
112  */
113 static void dp_cc_reg_cfg_init(struct dp_soc *soc,
114 			       struct dp_hw_cookie_conversion_t *cc_ctx,
115 			       bool is_4k_align)
116 {
117 	struct hal_hw_cc_config cc_cfg = { 0 };
118 
119 	if (!soc->wlan_cfg_ctx->hw_cc_enabled) {
120 		dp_info("INI skip HW CC register setting");
121 		return;
122 	}
123 
124 	cc_cfg.lut_base_addr_31_0 = cc_ctx->cmem_base;
125 	cc_cfg.cc_global_en = true;
126 	cc_cfg.page_4k_align = is_4k_align;
127 	cc_cfg.cookie_offset_msb = DP_CC_DESC_ID_SPT_VA_OS_MSB;
128 	cc_cfg.cookie_page_msb = DP_CC_DESC_ID_PPT_PAGE_OS_MSB;
129 	/* 36th bit should be 1 then HW know this is CMEM address */
130 	cc_cfg.lut_base_addr_39_32 = 0x10;
131 
132 	dp_cc_wbm_sw_en_cfg(&cc_cfg);
133 
134 	hal_cookie_conversion_reg_cfg_be(soc->hal_soc, &cc_cfg);
135 }
136 
137 /**
138  * dp_hw_cc_cmem_write() - DP wrapper function for CMEM buffer writing
139  * @hal_soc_hdl: HAL SOC handle
140  * @offset: CMEM address
141  * @value: value to write
142  *
143  * Return: None.
144  */
145 static inline void dp_hw_cc_cmem_write(hal_soc_handle_t hal_soc_hdl,
146 				       uint32_t offset,
147 				       uint32_t value)
148 {
149 	hal_cmem_write(hal_soc_hdl, offset, value);
150 }
151 
152 /**
153  * dp_hw_cc_cmem_addr_init() - Check and initialize CMEM base address for
154 			       HW cookie conversion
155  * @soc: SOC handle
156  * @cc_ctx: cookie conversion context pointer
157  *
158  * Return: 0 in case of success, else error value
159  */
160 static inline QDF_STATUS dp_hw_cc_cmem_addr_init(
161 				struct dp_soc *soc,
162 				struct dp_hw_cookie_conversion_t *cc_ctx)
163 {
164 	dp_info("cmem base 0x%llx, size 0x%llx",
165 		soc->cmem_base, soc->cmem_size);
166 	/* get CMEM for cookie conversion */
167 	if (soc->cmem_size < DP_CC_PPT_MEM_SIZE) {
168 		dp_err("cmem_size %llu bytes < 4K", soc->cmem_size);
169 		return QDF_STATUS_E_RESOURCES;
170 	}
171 	cc_ctx->cmem_base = (uint32_t)(soc->cmem_base +
172 					DP_CC_MEM_OFFSET_IN_CMEM);
173 
174 	return QDF_STATUS_SUCCESS;
175 }
176 
177 #else
178 
179 static inline void dp_cc_reg_cfg_init(struct dp_soc *soc,
180 				      struct dp_hw_cookie_conversion_t *cc_ctx,
181 				      bool is_4k_align) {}
182 
183 static inline void dp_hw_cc_cmem_write(hal_soc_handle_t hal_soc_hdl,
184 				       uint32_t offset,
185 				       uint32_t value)
186 { }
187 
188 static inline QDF_STATUS dp_hw_cc_cmem_addr_init(
189 				struct dp_soc *soc,
190 				struct dp_hw_cookie_conversion_t *cc_ctx)
191 {
192 	return QDF_STATUS_SUCCESS;
193 }
194 #endif
195 
196 static QDF_STATUS dp_hw_cookie_conversion_attach(struct dp_soc_be *be_soc)
197 {
198 	struct dp_soc *soc = DP_SOC_BE_GET_SOC(be_soc);
199 	struct dp_hw_cookie_conversion_t *cc_ctx = &be_soc->hw_cc_ctx;
200 	uint32_t max_tx_rx_desc_num, num_spt_pages, i = 0;
201 	struct dp_spt_page_desc *spt_desc;
202 	struct qdf_mem_dma_page_t *dma_page;
203 	QDF_STATUS qdf_status;
204 
205 	if (soc->cdp_soc.ol_ops->get_con_mode &&
206 	    soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_FTM_MODE)
207 		return QDF_STATUS_SUCCESS;
208 
209 	qdf_status = dp_hw_cc_cmem_addr_init(soc, cc_ctx);
210 	if (!QDF_IS_STATUS_SUCCESS(qdf_status))
211 		return qdf_status;
212 
213 	/* estimate how many SPT DDR pages needed */
214 	max_tx_rx_desc_num = WLAN_CFG_NUM_TX_DESC_MAX * MAX_TXDESC_POOLS +
215 			WLAN_CFG_RX_SW_DESC_NUM_SIZE_MAX * MAX_RXDESC_POOLS;
216 	num_spt_pages = max_tx_rx_desc_num / DP_CC_SPT_PAGE_MAX_ENTRIES;
217 	num_spt_pages = num_spt_pages <= DP_CC_PPT_MAX_ENTRIES ?
218 					num_spt_pages : DP_CC_PPT_MAX_ENTRIES;
219 	dp_info("num_spt_pages needed %d", num_spt_pages);
220 
221 	dp_desc_multi_pages_mem_alloc(soc, DP_HW_CC_SPT_PAGE_TYPE,
222 				      &cc_ctx->page_pool, qdf_page_size,
223 				      num_spt_pages, 0, false);
224 	if (!cc_ctx->page_pool.dma_pages) {
225 		dp_err("spt ddr pages allocation failed");
226 		return QDF_STATUS_E_RESOURCES;
227 	}
228 	cc_ctx->page_desc_base = qdf_mem_malloc(
229 			num_spt_pages * sizeof(struct dp_spt_page_desc));
230 	if (!cc_ctx->page_desc_base) {
231 		dp_err("spt page descs allocation failed");
232 		goto fail_0;
233 	}
234 
235 	/* initial page desc */
236 	spt_desc = cc_ctx->page_desc_base;
237 	dma_page = cc_ctx->page_pool.dma_pages;
238 	while (i < num_spt_pages) {
239 		/* check if page address 4K aligned */
240 		if (qdf_unlikely(dma_page[i].page_p_addr & 0xFFF)) {
241 			dp_err("non-4k aligned pages addr %pK",
242 			       (void *)dma_page[i].page_p_addr);
243 			goto fail_1;
244 		}
245 
246 		spt_desc[i].page_v_addr =
247 					dma_page[i].page_v_addr_start;
248 		spt_desc[i].page_p_addr =
249 					dma_page[i].page_p_addr;
250 		i++;
251 	}
252 
253 	cc_ctx->total_page_num = num_spt_pages;
254 	qdf_spinlock_create(&cc_ctx->cc_lock);
255 
256 	return QDF_STATUS_SUCCESS;
257 fail_1:
258 	qdf_mem_free(cc_ctx->page_desc_base);
259 fail_0:
260 	dp_desc_multi_pages_mem_free(soc, DP_HW_CC_SPT_PAGE_TYPE,
261 				     &cc_ctx->page_pool, 0, false);
262 
263 	return QDF_STATUS_E_FAILURE;
264 }
265 
266 static QDF_STATUS dp_hw_cookie_conversion_detach(struct dp_soc_be *be_soc)
267 {
268 	struct dp_soc *soc = DP_SOC_BE_GET_SOC(be_soc);
269 	struct dp_hw_cookie_conversion_t *cc_ctx = &be_soc->hw_cc_ctx;
270 
271 	if (soc->cdp_soc.ol_ops->get_con_mode &&
272 	    soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_FTM_MODE)
273 		return QDF_STATUS_SUCCESS;
274 
275 	qdf_mem_free(cc_ctx->page_desc_base);
276 	dp_desc_multi_pages_mem_free(soc, DP_HW_CC_SPT_PAGE_TYPE,
277 				     &cc_ctx->page_pool, 0, false);
278 	qdf_spinlock_destroy(&cc_ctx->cc_lock);
279 
280 	return QDF_STATUS_SUCCESS;
281 }
282 
283 static QDF_STATUS dp_hw_cookie_conversion_init(struct dp_soc_be *be_soc)
284 {
285 	struct dp_soc *soc = DP_SOC_BE_GET_SOC(be_soc);
286 	struct dp_hw_cookie_conversion_t *cc_ctx = &be_soc->hw_cc_ctx;
287 	uint32_t i = 0;
288 	struct dp_spt_page_desc *spt_desc;
289 
290 	if (soc->cdp_soc.ol_ops->get_con_mode &&
291 	    soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_FTM_MODE)
292 		return QDF_STATUS_SUCCESS;
293 
294 	if (!cc_ctx->total_page_num) {
295 		dp_err("total page num is 0");
296 		return QDF_STATUS_E_INVAL;
297 	}
298 
299 	spt_desc = cc_ctx->page_desc_base;
300 	while (i < cc_ctx->total_page_num) {
301 		/* write page PA to CMEM */
302 		dp_hw_cc_cmem_write(soc->hal_soc,
303 				    (cc_ctx->cmem_base +
304 				     i * DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED),
305 				    (spt_desc[i].page_p_addr >>
306 				     DP_CC_PPT_ENTRY_HW_APEND_BITS_4K_ALIGNED));
307 
308 		spt_desc[i].ppt_index = i;
309 		spt_desc[i].avail_entry_index = 0;
310 		/* link page desc */
311 		if ((i + 1) != cc_ctx->total_page_num)
312 			spt_desc[i].next = &spt_desc[i + 1];
313 		else
314 			spt_desc[i].next = NULL;
315 		i++;
316 	}
317 
318 	cc_ctx->page_desc_freelist = cc_ctx->page_desc_base;
319 	cc_ctx->free_page_num = cc_ctx->total_page_num;
320 
321 	/* write WBM/REO cookie conversion CFG register */
322 	dp_cc_reg_cfg_init(soc, cc_ctx, true);
323 
324 	return QDF_STATUS_SUCCESS;
325 }
326 
327 static QDF_STATUS dp_hw_cookie_conversion_deinit(struct dp_soc_be *be_soc)
328 {
329 	struct dp_soc *soc = DP_SOC_BE_GET_SOC(be_soc);
330 	struct dp_hw_cookie_conversion_t *cc_ctx = &be_soc->hw_cc_ctx;
331 
332 	if (soc->cdp_soc.ol_ops->get_con_mode &&
333 	    soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_FTM_MODE)
334 		return QDF_STATUS_SUCCESS;
335 
336 	cc_ctx->page_desc_freelist = NULL;
337 	cc_ctx->free_page_num = 0;
338 
339 	return QDF_STATUS_SUCCESS;
340 }
341 
342 uint16_t dp_cc_spt_page_desc_alloc(struct dp_soc_be *be_soc,
343 				   struct dp_spt_page_desc **list_head,
344 				   struct dp_spt_page_desc **list_tail,
345 				   uint16_t num_desc)
346 {
347 	uint16_t num_pages, count;
348 	struct dp_hw_cookie_conversion_t *cc_ctx = &be_soc->hw_cc_ctx;
349 
350 	num_pages = (num_desc / DP_CC_SPT_PAGE_MAX_ENTRIES) +
351 			(num_desc % DP_CC_SPT_PAGE_MAX_ENTRIES ? 1 : 0);
352 
353 	if (num_pages > cc_ctx->free_page_num) {
354 		dp_err("fail: num_pages required %d > free_page_num %d",
355 		       num_pages,
356 		       cc_ctx->free_page_num);
357 		return 0;
358 	}
359 
360 	qdf_spin_lock_bh(&cc_ctx->cc_lock);
361 
362 	*list_head = *list_tail = cc_ctx->page_desc_freelist;
363 	for (count = 0; count < num_pages; count++) {
364 		if (qdf_unlikely(!cc_ctx->page_desc_freelist)) {
365 			cc_ctx->page_desc_freelist = *list_head;
366 			*list_head = *list_tail = NULL;
367 			qdf_spin_unlock_bh(&cc_ctx->cc_lock);
368 			return 0;
369 		}
370 		*list_tail = cc_ctx->page_desc_freelist;
371 		cc_ctx->page_desc_freelist = cc_ctx->page_desc_freelist->next;
372 	}
373 	(*list_tail)->next = NULL;
374 	cc_ctx->free_page_num -= count;
375 
376 	qdf_spin_unlock_bh(&cc_ctx->cc_lock);
377 
378 	return count;
379 }
380 
381 void dp_cc_spt_page_desc_free(struct dp_soc_be *be_soc,
382 			      struct dp_spt_page_desc **list_head,
383 			      struct dp_spt_page_desc **list_tail,
384 			      uint16_t page_nums)
385 {
386 	struct dp_hw_cookie_conversion_t *cc_ctx = &be_soc->hw_cc_ctx;
387 	struct dp_spt_page_desc *temp_list = NULL;
388 
389 	qdf_spin_lock_bh(&cc_ctx->cc_lock);
390 
391 	temp_list = cc_ctx->page_desc_freelist;
392 	cc_ctx->page_desc_freelist = *list_head;
393 	(*list_tail)->next = temp_list;
394 	cc_ctx->free_page_num += page_nums;
395 	*list_tail = NULL;
396 	*list_head = NULL;
397 
398 	qdf_spin_unlock_bh(&cc_ctx->cc_lock);
399 }
400 
401 static QDF_STATUS dp_soc_attach_be(struct dp_soc *soc)
402 {
403 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
404 	QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
405 
406 	soc->wbm_sw0_bm_id = hal_tx_get_wbm_sw0_bm_id();
407 	qdf_status = dp_tx_init_bank_profiles(be_soc);
408 
409 	/* cookie conversion */
410 	qdf_status = dp_hw_cookie_conversion_attach(be_soc);
411 
412 	return qdf_status;
413 }
414 
415 static QDF_STATUS dp_soc_detach_be(struct dp_soc *soc)
416 {
417 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
418 
419 	dp_tx_deinit_bank_profiles(be_soc);
420 
421 	dp_hw_cookie_conversion_detach(be_soc);
422 
423 	return QDF_STATUS_SUCCESS;
424 }
425 
426 static QDF_STATUS dp_soc_init_be(struct dp_soc *soc)
427 {
428 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
429 	QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
430 
431 	qdf_status = dp_hw_cookie_conversion_init(be_soc);
432 
433 	return qdf_status;
434 }
435 
436 static QDF_STATUS dp_soc_deinit_be(struct dp_soc *soc)
437 {
438 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
439 
440 	dp_hw_cookie_conversion_deinit(be_soc);
441 
442 	return QDF_STATUS_SUCCESS;
443 }
444 
445 static QDF_STATUS dp_pdev_attach_be(struct dp_pdev *pdev)
446 {
447 	return QDF_STATUS_SUCCESS;
448 }
449 
450 static QDF_STATUS dp_pdev_detach_be(struct dp_pdev *pdev)
451 {
452 	return QDF_STATUS_SUCCESS;
453 }
454 
455 static QDF_STATUS dp_vdev_attach_be(struct dp_soc *soc, struct dp_vdev *vdev)
456 {
457 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
458 	struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
459 
460 	be_vdev->bank_id = dp_tx_get_bank_profile(be_soc, be_vdev);
461 
462 	/* Needs to be enabled after bring-up*/
463 	be_vdev->vdev_id_check_en = false;
464 
465 	if (be_vdev->bank_id == DP_BE_INVALID_BANK_ID) {
466 		QDF_BUG(0);
467 		return QDF_STATUS_E_FAULT;
468 	}
469 	return QDF_STATUS_SUCCESS;
470 }
471 
472 static QDF_STATUS dp_vdev_detach_be(struct dp_soc *soc, struct dp_vdev *vdev)
473 {
474 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
475 	struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
476 
477 	dp_tx_put_bank_profile(be_soc, be_vdev);
478 	return QDF_STATUS_SUCCESS;
479 }
480 
481 qdf_size_t dp_get_soc_context_size_be(void)
482 {
483 	return sizeof(struct dp_soc_be);
484 }
485 
486 /**
487  * dp_rxdma_ring_sel_cfg_be() - Setup RXDMA ring config
488  * @soc: Common DP soc handle
489  *
490  * Return: QDF_STATUS
491  */
492 static QDF_STATUS
493 dp_rxdma_ring_sel_cfg_be(struct dp_soc *soc)
494 {
495 	int i;
496 	int mac_id;
497 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
498 	struct dp_srng *rx_mac_srng;
499 	QDF_STATUS status = QDF_STATUS_SUCCESS;
500 
501 	/*
502 	 * In Beryllium chipset msdu_start, mpdu_end
503 	 * and rx_attn are part of msdu_end/mpdu_start
504 	 */
505 	htt_tlv_filter.msdu_start = 0;
506 	htt_tlv_filter.mpdu_end = 0;
507 	htt_tlv_filter.attention = 0;
508 	htt_tlv_filter.mpdu_start = 1;
509 	htt_tlv_filter.msdu_end = 1;
510 	htt_tlv_filter.packet = 1;
511 	htt_tlv_filter.packet_header = 1;
512 
513 	htt_tlv_filter.ppdu_start = 0;
514 	htt_tlv_filter.ppdu_end = 0;
515 	htt_tlv_filter.ppdu_end_user_stats = 0;
516 	htt_tlv_filter.ppdu_end_user_stats_ext = 0;
517 	htt_tlv_filter.ppdu_end_status_done = 0;
518 	htt_tlv_filter.enable_fp = 1;
519 	htt_tlv_filter.enable_md = 0;
520 	htt_tlv_filter.enable_md = 0;
521 	htt_tlv_filter.enable_mo = 0;
522 
523 	htt_tlv_filter.fp_mgmt_filter = 0;
524 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_BA_REQ;
525 	htt_tlv_filter.fp_data_filter = (FILTER_DATA_UCAST |
526 					 FILTER_DATA_MCAST |
527 					 FILTER_DATA_DATA);
528 	htt_tlv_filter.mo_mgmt_filter = 0;
529 	htt_tlv_filter.mo_ctrl_filter = 0;
530 	htt_tlv_filter.mo_data_filter = 0;
531 	htt_tlv_filter.md_data_filter = 0;
532 
533 	htt_tlv_filter.offset_valid = true;
534 
535 	/* Not subscribing to mpdu_end, msdu_start and rx_attn */
536 	htt_tlv_filter.rx_mpdu_end_offset = 0;
537 	htt_tlv_filter.rx_msdu_start_offset = 0;
538 	htt_tlv_filter.rx_attn_offset = 0;
539 
540 	htt_tlv_filter.rx_packet_offset = soc->rx_pkt_tlv_size;
541 	htt_tlv_filter.rx_header_offset =
542 				hal_rx_pkt_tlv_offset_get(soc->hal_soc);
543 	htt_tlv_filter.rx_mpdu_start_offset =
544 				hal_rx_mpdu_start_offset_get(soc->hal_soc);
545 	htt_tlv_filter.rx_msdu_end_offset =
546 				hal_rx_msdu_end_offset_get(soc->hal_soc);
547 
548 	dp_info("TLV subscription\n"
549 		"msdu_start %d, mpdu_end %d, attention %d"
550 		"mpdu_start %d, msdu_end %d, pkt_hdr %d, pkt %d\n"
551 		"TLV offsets\n"
552 		"msdu_start %d, mpdu_end %d, attention %d"
553 		"mpdu_start %d, msdu_end %d, pkt_hdr %d, pkt %d\n",
554 		htt_tlv_filter.msdu_start,
555 		htt_tlv_filter.mpdu_end,
556 		htt_tlv_filter.attention,
557 		htt_tlv_filter.mpdu_start,
558 		htt_tlv_filter.msdu_end,
559 		htt_tlv_filter.packet_header,
560 		htt_tlv_filter.packet,
561 		htt_tlv_filter.rx_msdu_start_offset,
562 		htt_tlv_filter.rx_mpdu_end_offset,
563 		htt_tlv_filter.rx_attn_offset,
564 		htt_tlv_filter.rx_mpdu_start_offset,
565 		htt_tlv_filter.rx_msdu_end_offset,
566 		htt_tlv_filter.rx_header_offset,
567 		htt_tlv_filter.rx_packet_offset);
568 
569 	for (i = 0; i < MAX_PDEV_CNT; i++) {
570 		struct dp_pdev *pdev = soc->pdev_list[i];
571 
572 		if (!pdev)
573 			continue;
574 
575 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
576 			int mac_for_pdev =
577 				dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
578 			/*
579 			 * Obtain lmac id from pdev to access the LMAC ring
580 			 * in soc context
581 			 */
582 			int lmac_id =
583 				dp_get_lmac_id_for_pdev_id(soc, mac_id,
584 							   pdev->pdev_id);
585 
586 			rx_mac_srng = dp_get_rxdma_ring(pdev, lmac_id);
587 			htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
588 					    rx_mac_srng->hal_srng,
589 					    RXDMA_BUF, RX_DATA_BUFFER_SIZE,
590 					    &htt_tlv_filter);
591 		}
592 	}
593 	return status;
594 
595 }
596 
597 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
598 /**
599  * dp_service_near_full_srngs_be() - Main bottom half callback for the
600  *				near-full IRQs.
601  * @soc: Datapath SoC handle
602  * @int_ctx: Interrupt context
603  * @dp_budget: Budget of the work that can be done in the bottom half
604  *
605  * Return: work done in the handler
606  */
607 static uint32_t
608 dp_service_near_full_srngs_be(struct dp_soc *soc, struct dp_intr *int_ctx,
609 			      uint32_t dp_budget)
610 {
611 	int ring = 0;
612 	int budget = dp_budget;
613 	uint32_t work_done  = 0;
614 	uint32_t remaining_quota = dp_budget;
615 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
616 	int tx_ring_near_full_mask = int_ctx->tx_ring_near_full_mask;
617 	int rx_near_full_grp_1_mask = int_ctx->rx_near_full_grp_1_mask;
618 	int rx_near_full_grp_2_mask = int_ctx->rx_near_full_grp_2_mask;
619 	int rx_near_full_mask = rx_near_full_grp_1_mask |
620 				rx_near_full_grp_2_mask;
621 
622 	dp_verbose_debug("rx_ring_near_full 0x%x tx_ring_near_full 0x%x",
623 			 rx_near_full_mask,
624 			 tx_ring_near_full_mask);
625 
626 	if (rx_near_full_mask) {
627 		for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
628 			if (!(rx_near_full_mask & (1 << ring)))
629 				continue;
630 
631 			work_done = dp_rx_nf_process(int_ctx,
632 					soc->reo_dest_ring[ring].hal_srng,
633 					ring, remaining_quota);
634 			if (work_done) {
635 				intr_stats->num_rx_ring_near_full_masks[ring]++;
636 				dp_verbose_debug("rx NF mask 0x%x ring %d, work_done %d budget %d",
637 						 rx_near_full_mask, ring,
638 						 work_done,
639 						 budget);
640 				budget -=  work_done;
641 				if (budget <= 0)
642 					goto budget_done;
643 				remaining_quota = budget;
644 			}
645 		}
646 	}
647 
648 	if (tx_ring_near_full_mask) {
649 		for (ring = 0; ring < MAX_TCL_DATA_RINGS; ring++) {
650 			if (!(tx_ring_near_full_mask & (1 << ring)))
651 				continue;
652 
653 			work_done = dp_tx_comp_nf_handler(int_ctx, soc,
654 					soc->tx_comp_ring[ring].hal_srng,
655 					ring, remaining_quota);
656 			if (work_done) {
657 				intr_stats->num_tx_comp_ring_near_full_masks[ring]++;
658 				dp_verbose_debug("tx NF mask 0x%x ring %d, work_done %d budget %d",
659 						 tx_ring_near_full_mask, ring,
660 						 work_done, budget);
661 				budget -=  work_done;
662 				if (budget <= 0)
663 					break;
664 				remaining_quota = budget;
665 			}
666 		}
667 	}
668 
669 	intr_stats->num_near_full_masks++;
670 
671 budget_done:
672 	return dp_budget - budget;
673 }
674 
675 /**
676  * dp_srng_test_and_update_nf_params_be() - Check if the srng is in near full
677  *				state and set the reap_limit appropriately
678  *				as per the near full state
679  * @soc: Datapath soc handle
680  * @dp_srng: Datapath handle for SRNG
681  * @max_reap_limit: [Output Buffer] Buffer to set the max reap limit as per
682  *			the srng near-full state
683  *
684  * Return: 1, if the srng is in near-full state
685  *	   0, if the srng is not in near-full state
686  */
687 static int
688 dp_srng_test_and_update_nf_params_be(struct dp_soc *soc,
689 				     struct dp_srng *dp_srng,
690 				     int *max_reap_limit)
691 {
692 	return _dp_srng_test_and_update_nf_params(soc, dp_srng, max_reap_limit);
693 }
694 
695 /**
696  * dp_init_near_full_arch_ops_be() - Initialize the arch ops handler for the
697  *			near full IRQ handling operations.
698  * @arch_ops: arch ops handle
699  *
700  * Return: none
701  */
702 static inline void
703 dp_init_near_full_arch_ops_be(struct dp_arch_ops *arch_ops)
704 {
705 	arch_ops->dp_service_near_full_srngs = dp_service_near_full_srngs_be;
706 	arch_ops->dp_srng_test_and_update_nf_params =
707 					dp_srng_test_and_update_nf_params_be;
708 }
709 
710 #else
711 static inline void
712 dp_init_near_full_arch_ops_be(struct dp_arch_ops *arch_ops)
713 {
714 }
715 #endif
716 
717 void dp_initialize_arch_ops_be(struct dp_arch_ops *arch_ops)
718 {
719 #ifndef QCA_HOST_MODE_WIFI_DISABLED
720 	arch_ops->tx_hw_enqueue = dp_tx_hw_enqueue_be;
721 	arch_ops->dp_rx_process = dp_rx_process_be;
722 	arch_ops->tx_comp_get_params_from_hal_desc =
723 		dp_tx_comp_get_params_from_hal_desc_be;
724 	arch_ops->dp_tx_desc_pool_init = dp_tx_desc_pool_init_be;
725 	arch_ops->dp_tx_desc_pool_deinit = dp_tx_desc_pool_deinit_be;
726 	arch_ops->dp_rx_desc_pool_init = dp_rx_desc_pool_init_be;
727 	arch_ops->dp_rx_desc_pool_deinit = dp_rx_desc_pool_deinit_be;
728 	arch_ops->dp_wbm_get_rx_desc_from_hal_desc =
729 				dp_wbm_get_rx_desc_from_hal_desc_be;
730 #endif
731 	arch_ops->txrx_get_context_size = dp_get_context_size_be;
732 	arch_ops->dp_rx_desc_cookie_2_va =
733 			dp_rx_desc_cookie_2_va_be;
734 
735 	arch_ops->txrx_soc_attach = dp_soc_attach_be;
736 	arch_ops->txrx_soc_detach = dp_soc_detach_be;
737 	arch_ops->txrx_soc_init = dp_soc_init_be;
738 	arch_ops->txrx_soc_deinit = dp_soc_deinit_be;
739 	arch_ops->txrx_pdev_attach = dp_pdev_attach_be;
740 	arch_ops->txrx_pdev_detach = dp_pdev_detach_be;
741 	arch_ops->txrx_vdev_attach = dp_vdev_attach_be;
742 	arch_ops->txrx_vdev_detach = dp_vdev_detach_be;
743 	arch_ops->dp_rxdma_ring_sel_cfg = dp_rxdma_ring_sel_cfg_be;
744 	arch_ops->soc_cfg_attach = dp_soc_cfg_attach_be;
745 
746 	dp_init_near_full_arch_ops_be(arch_ops);
747 }
748