xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/be/dp_be.c (revision 2f4b444fb7e689b83a4ab0e7b3b38f0bf4def8e0)
1 /*
2  * Copyright (c) 2021 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <wlan_utility.h>
20 #include <dp_internal.h>
21 #include <dp_htt.h>
22 #include "dp_be.h"
23 #include "dp_be_tx.h"
24 #include "dp_be_rx.h"
25 #include <hal_be_api.h>
26 
27 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
28 static struct wlan_cfg_tcl_wbm_ring_num_map g_tcl_wbm_map_array[MAX_TCL_DATA_RINGS] = {
29 	{.tcl_ring_num = 0, .wbm_ring_num = 0, .wbm_rbm_id = HAL_BE_WBM_SW0_BM_ID, .for_ipa = 0},
30 	{1, 4, HAL_BE_WBM_SW4_BM_ID, 0},
31 	{2, 2, HAL_BE_WBM_SW2_BM_ID, 0},
32 	{3, 6, HAL_BE_WBM_SW5_BM_ID, 0},
33 	{4, 7, HAL_BE_WBM_SW6_BM_ID, 0}
34 };
35 
36 #else
37 
38 static struct wlan_cfg_tcl_wbm_ring_num_map g_tcl_wbm_map_array[MAX_TCL_DATA_RINGS] = {
39 	{.tcl_ring_num = 0, .wbm_ring_num = 0, .wbm_rbm_id = HAL_BE_WBM_SW0_BM_ID, .for_ipa = 0},
40 	{1, 1, HAL_BE_WBM_SW1_BM_ID, 0},
41 	{2, 2, HAL_BE_WBM_SW2_BM_ID, 0},
42 	{3, 3, HAL_BE_WBM_SW3_BM_ID, 0},
43 	{4, 4, HAL_BE_WBM_SW4_BM_ID, 0}
44 };
45 #endif
46 
47 static void dp_soc_cfg_attach_be(struct dp_soc *soc)
48 {
49 	soc->wlan_cfg_ctx->tcl_wbm_map_array = g_tcl_wbm_map_array;
50 }
51 
52 qdf_size_t dp_get_context_size_be(enum dp_context_type context_type)
53 {
54 	switch (context_type) {
55 	case DP_CONTEXT_TYPE_SOC:
56 		return sizeof(struct dp_soc_be);
57 	case DP_CONTEXT_TYPE_PDEV:
58 		return sizeof(struct dp_pdev_be);
59 	case DP_CONTEXT_TYPE_VDEV:
60 		return sizeof(struct dp_vdev_be);
61 	case DP_CONTEXT_TYPE_PEER:
62 		return sizeof(struct dp_peer_be);
63 	default:
64 		return 0;
65 	}
66 }
67 
68 #ifdef DP_FEATURE_HW_COOKIE_CONVERSION
69 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
70 /**
71  * dp_cc_wbm_sw_en_cfg() - configure HW cookie conversion enablement
72 			   per wbm2sw ring
73  * @cc_cfg: HAL HW cookie conversion configuration structure pointer
74  *
75  * Return: None
76  */
77 static inline
78 void dp_cc_wbm_sw_en_cfg(struct hal_hw_cc_config *cc_cfg)
79 {
80 	cc_cfg->wbm2sw6_cc_en = 1;
81 	cc_cfg->wbm2sw5_cc_en = 1;
82 	cc_cfg->wbm2sw4_cc_en = 1;
83 	cc_cfg->wbm2sw3_cc_en = 1;
84 	cc_cfg->wbm2sw2_cc_en = 1;
85 	/* disable wbm2sw1 hw cc as it's for FW */
86 	cc_cfg->wbm2sw1_cc_en = 0;
87 	cc_cfg->wbm2sw0_cc_en = 1;
88 	cc_cfg->wbm2fw_cc_en = 0;
89 }
90 #else
91 static inline
92 void dp_cc_wbm_sw_en_cfg(struct hal_hw_cc_config *cc_cfg)
93 {
94 	cc_cfg->wbm2sw6_cc_en = 1;
95 	cc_cfg->wbm2sw5_cc_en = 1;
96 	cc_cfg->wbm2sw4_cc_en = 1;
97 	cc_cfg->wbm2sw3_cc_en = 1;
98 	cc_cfg->wbm2sw2_cc_en = 1;
99 	cc_cfg->wbm2sw1_cc_en = 1;
100 	cc_cfg->wbm2sw0_cc_en = 1;
101 	cc_cfg->wbm2fw_cc_en = 0;
102 }
103 #endif
104 
105 /**
106  * dp_cc_reg_cfg_init() - initialize and configure HW cookie
107 			  conversion register
108  * @soc: SOC handle
109  * @cc_ctx: cookie conversion context pointer
110  * @is_4k_align: page address 4k alignd
111  *
112  * Return: None
113  */
114 static void dp_cc_reg_cfg_init(struct dp_soc *soc,
115 			       struct dp_hw_cookie_conversion_t *cc_ctx,
116 			       bool is_4k_align)
117 {
118 	struct hal_hw_cc_config cc_cfg = { 0 };
119 
120 	if (!soc->wlan_cfg_ctx->hw_cc_enabled) {
121 		dp_info("INI skip HW CC register setting");
122 		return;
123 	}
124 
125 	cc_cfg.lut_base_addr_31_0 = cc_ctx->cmem_base;
126 	cc_cfg.cc_global_en = true;
127 	cc_cfg.page_4k_align = is_4k_align;
128 	cc_cfg.cookie_offset_msb = DP_CC_DESC_ID_SPT_VA_OS_MSB;
129 	cc_cfg.cookie_page_msb = DP_CC_DESC_ID_PPT_PAGE_OS_MSB;
130 	/* 36th bit should be 1 then HW know this is CMEM address */
131 	cc_cfg.lut_base_addr_39_32 = 0x10;
132 
133 	cc_cfg.error_path_cookie_conv_en = true;
134 	cc_cfg.release_path_cookie_conv_en = true;
135 	dp_cc_wbm_sw_en_cfg(&cc_cfg);
136 
137 	hal_cookie_conversion_reg_cfg_be(soc->hal_soc, &cc_cfg);
138 }
139 
140 /**
141  * dp_hw_cc_cmem_write() - DP wrapper function for CMEM buffer writing
142  * @hal_soc_hdl: HAL SOC handle
143  * @offset: CMEM address
144  * @value: value to write
145  *
146  * Return: None.
147  */
148 static inline void dp_hw_cc_cmem_write(hal_soc_handle_t hal_soc_hdl,
149 				       uint32_t offset,
150 				       uint32_t value)
151 {
152 	hal_cmem_write(hal_soc_hdl, offset, value);
153 }
154 
155 /**
156  * dp_hw_cc_cmem_addr_init() - Check and initialize CMEM base address for
157 			       HW cookie conversion
158  * @soc: SOC handle
159  * @cc_ctx: cookie conversion context pointer
160  *
161  * Return: 0 in case of success, else error value
162  */
163 static inline QDF_STATUS dp_hw_cc_cmem_addr_init(
164 				struct dp_soc *soc,
165 				struct dp_hw_cookie_conversion_t *cc_ctx)
166 {
167 	dp_info("cmem base 0x%llx, size 0x%llx",
168 		soc->cmem_base, soc->cmem_size);
169 	/* get CMEM for cookie conversion */
170 	if (soc->cmem_size < DP_CC_PPT_MEM_SIZE) {
171 		dp_err("cmem_size %llu bytes < 4K", soc->cmem_size);
172 		return QDF_STATUS_E_RESOURCES;
173 	}
174 	cc_ctx->cmem_base = (uint32_t)(soc->cmem_base +
175 					DP_CC_MEM_OFFSET_IN_CMEM);
176 
177 	return QDF_STATUS_SUCCESS;
178 }
179 
180 #else
181 
182 static inline void dp_cc_reg_cfg_init(struct dp_soc *soc,
183 				      struct dp_hw_cookie_conversion_t *cc_ctx,
184 				      bool is_4k_align) {}
185 
186 static inline void dp_hw_cc_cmem_write(hal_soc_handle_t hal_soc_hdl,
187 				       uint32_t offset,
188 				       uint32_t value)
189 { }
190 
191 static inline QDF_STATUS dp_hw_cc_cmem_addr_init(
192 				struct dp_soc *soc,
193 				struct dp_hw_cookie_conversion_t *cc_ctx)
194 {
195 	return QDF_STATUS_SUCCESS;
196 }
197 #endif
198 
199 static QDF_STATUS dp_hw_cookie_conversion_attach(struct dp_soc_be *be_soc)
200 {
201 	struct dp_soc *soc = DP_SOC_BE_GET_SOC(be_soc);
202 	struct dp_hw_cookie_conversion_t *cc_ctx = &be_soc->hw_cc_ctx;
203 	uint32_t max_tx_rx_desc_num, num_spt_pages, i = 0;
204 	struct dp_spt_page_desc *spt_desc;
205 	struct qdf_mem_dma_page_t *dma_page;
206 	QDF_STATUS qdf_status;
207 
208 	if (soc->cdp_soc.ol_ops->get_con_mode &&
209 	    soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_FTM_MODE)
210 		return QDF_STATUS_SUCCESS;
211 
212 	qdf_status = dp_hw_cc_cmem_addr_init(soc, cc_ctx);
213 	if (!QDF_IS_STATUS_SUCCESS(qdf_status))
214 		return qdf_status;
215 
216 	/* estimate how many SPT DDR pages needed */
217 	max_tx_rx_desc_num = WLAN_CFG_NUM_TX_DESC_MAX * MAX_TXDESC_POOLS +
218 			WLAN_CFG_RX_SW_DESC_NUM_SIZE_MAX * MAX_RXDESC_POOLS;
219 	num_spt_pages = max_tx_rx_desc_num / DP_CC_SPT_PAGE_MAX_ENTRIES;
220 	num_spt_pages = num_spt_pages <= DP_CC_PPT_MAX_ENTRIES ?
221 					num_spt_pages : DP_CC_PPT_MAX_ENTRIES;
222 	dp_info("num_spt_pages needed %d", num_spt_pages);
223 
224 	dp_desc_multi_pages_mem_alloc(soc, DP_HW_CC_SPT_PAGE_TYPE,
225 				      &cc_ctx->page_pool, qdf_page_size,
226 				      num_spt_pages, 0, false);
227 	if (!cc_ctx->page_pool.dma_pages) {
228 		dp_err("spt ddr pages allocation failed");
229 		return QDF_STATUS_E_RESOURCES;
230 	}
231 	cc_ctx->page_desc_base = qdf_mem_malloc(
232 			num_spt_pages * sizeof(struct dp_spt_page_desc));
233 	if (!cc_ctx->page_desc_base) {
234 		dp_err("spt page descs allocation failed");
235 		goto fail_0;
236 	}
237 
238 	/* initial page desc */
239 	spt_desc = cc_ctx->page_desc_base;
240 	dma_page = cc_ctx->page_pool.dma_pages;
241 	while (i < num_spt_pages) {
242 		/* check if page address 4K aligned */
243 		if (qdf_unlikely(dma_page[i].page_p_addr & 0xFFF)) {
244 			dp_err("non-4k aligned pages addr %pK",
245 			       (void *)dma_page[i].page_p_addr);
246 			goto fail_1;
247 		}
248 
249 		spt_desc[i].page_v_addr =
250 					dma_page[i].page_v_addr_start;
251 		spt_desc[i].page_p_addr =
252 					dma_page[i].page_p_addr;
253 		i++;
254 	}
255 
256 	cc_ctx->total_page_num = num_spt_pages;
257 	qdf_spinlock_create(&cc_ctx->cc_lock);
258 
259 	return QDF_STATUS_SUCCESS;
260 fail_1:
261 	qdf_mem_free(cc_ctx->page_desc_base);
262 fail_0:
263 	dp_desc_multi_pages_mem_free(soc, DP_HW_CC_SPT_PAGE_TYPE,
264 				     &cc_ctx->page_pool, 0, false);
265 
266 	return QDF_STATUS_E_FAILURE;
267 }
268 
269 static QDF_STATUS dp_hw_cookie_conversion_detach(struct dp_soc_be *be_soc)
270 {
271 	struct dp_soc *soc = DP_SOC_BE_GET_SOC(be_soc);
272 	struct dp_hw_cookie_conversion_t *cc_ctx = &be_soc->hw_cc_ctx;
273 
274 	if (soc->cdp_soc.ol_ops->get_con_mode &&
275 	    soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_FTM_MODE)
276 		return QDF_STATUS_SUCCESS;
277 
278 	qdf_mem_free(cc_ctx->page_desc_base);
279 	dp_desc_multi_pages_mem_free(soc, DP_HW_CC_SPT_PAGE_TYPE,
280 				     &cc_ctx->page_pool, 0, false);
281 	qdf_spinlock_destroy(&cc_ctx->cc_lock);
282 
283 	return QDF_STATUS_SUCCESS;
284 }
285 
286 static QDF_STATUS dp_hw_cookie_conversion_init(struct dp_soc_be *be_soc)
287 {
288 	struct dp_soc *soc = DP_SOC_BE_GET_SOC(be_soc);
289 	struct dp_hw_cookie_conversion_t *cc_ctx = &be_soc->hw_cc_ctx;
290 	uint32_t i = 0;
291 	struct dp_spt_page_desc *spt_desc;
292 
293 	if (soc->cdp_soc.ol_ops->get_con_mode &&
294 	    soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_FTM_MODE)
295 		return QDF_STATUS_SUCCESS;
296 
297 	if (!cc_ctx->total_page_num) {
298 		dp_err("total page num is 0");
299 		return QDF_STATUS_E_INVAL;
300 	}
301 
302 	spt_desc = cc_ctx->page_desc_base;
303 	while (i < cc_ctx->total_page_num) {
304 		/* write page PA to CMEM */
305 		dp_hw_cc_cmem_write(soc->hal_soc,
306 				    (cc_ctx->cmem_base +
307 				     i * DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED),
308 				    (spt_desc[i].page_p_addr >>
309 				     DP_CC_PPT_ENTRY_HW_APEND_BITS_4K_ALIGNED));
310 
311 		spt_desc[i].ppt_index = i;
312 		spt_desc[i].avail_entry_index = 0;
313 		/* link page desc */
314 		if ((i + 1) != cc_ctx->total_page_num)
315 			spt_desc[i].next = &spt_desc[i + 1];
316 		else
317 			spt_desc[i].next = NULL;
318 		i++;
319 	}
320 
321 	cc_ctx->page_desc_freelist = cc_ctx->page_desc_base;
322 	cc_ctx->free_page_num = cc_ctx->total_page_num;
323 
324 	/* write WBM/REO cookie conversion CFG register */
325 	dp_cc_reg_cfg_init(soc, cc_ctx, true);
326 
327 	return QDF_STATUS_SUCCESS;
328 }
329 
330 static QDF_STATUS dp_hw_cookie_conversion_deinit(struct dp_soc_be *be_soc)
331 {
332 	struct dp_soc *soc = DP_SOC_BE_GET_SOC(be_soc);
333 	struct dp_hw_cookie_conversion_t *cc_ctx = &be_soc->hw_cc_ctx;
334 
335 	if (soc->cdp_soc.ol_ops->get_con_mode &&
336 	    soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_FTM_MODE)
337 		return QDF_STATUS_SUCCESS;
338 
339 	cc_ctx->page_desc_freelist = NULL;
340 	cc_ctx->free_page_num = 0;
341 
342 	return QDF_STATUS_SUCCESS;
343 }
344 
345 uint16_t dp_cc_spt_page_desc_alloc(struct dp_soc_be *be_soc,
346 				   struct dp_spt_page_desc **list_head,
347 				   struct dp_spt_page_desc **list_tail,
348 				   uint16_t num_desc)
349 {
350 	uint16_t num_pages, count;
351 	struct dp_hw_cookie_conversion_t *cc_ctx = &be_soc->hw_cc_ctx;
352 
353 	num_pages = (num_desc / DP_CC_SPT_PAGE_MAX_ENTRIES) +
354 			(num_desc % DP_CC_SPT_PAGE_MAX_ENTRIES ? 1 : 0);
355 
356 	if (num_pages > cc_ctx->free_page_num) {
357 		dp_err("fail: num_pages required %d > free_page_num %d",
358 		       num_pages,
359 		       cc_ctx->free_page_num);
360 		return 0;
361 	}
362 
363 	qdf_spin_lock_bh(&cc_ctx->cc_lock);
364 
365 	*list_head = *list_tail = cc_ctx->page_desc_freelist;
366 	for (count = 0; count < num_pages; count++) {
367 		if (qdf_unlikely(!cc_ctx->page_desc_freelist)) {
368 			cc_ctx->page_desc_freelist = *list_head;
369 			*list_head = *list_tail = NULL;
370 			qdf_spin_unlock_bh(&cc_ctx->cc_lock);
371 			return 0;
372 		}
373 		*list_tail = cc_ctx->page_desc_freelist;
374 		cc_ctx->page_desc_freelist = cc_ctx->page_desc_freelist->next;
375 	}
376 	(*list_tail)->next = NULL;
377 	cc_ctx->free_page_num -= count;
378 
379 	qdf_spin_unlock_bh(&cc_ctx->cc_lock);
380 
381 	return count;
382 }
383 
384 void dp_cc_spt_page_desc_free(struct dp_soc_be *be_soc,
385 			      struct dp_spt_page_desc **list_head,
386 			      struct dp_spt_page_desc **list_tail,
387 			      uint16_t page_nums)
388 {
389 	struct dp_hw_cookie_conversion_t *cc_ctx = &be_soc->hw_cc_ctx;
390 	struct dp_spt_page_desc *temp_list = NULL;
391 
392 	qdf_spin_lock_bh(&cc_ctx->cc_lock);
393 
394 	temp_list = cc_ctx->page_desc_freelist;
395 	cc_ctx->page_desc_freelist = *list_head;
396 	(*list_tail)->next = temp_list;
397 	cc_ctx->free_page_num += page_nums;
398 	*list_tail = NULL;
399 	*list_head = NULL;
400 
401 	qdf_spin_unlock_bh(&cc_ctx->cc_lock);
402 }
403 
404 static QDF_STATUS dp_soc_attach_be(struct dp_soc *soc)
405 {
406 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
407 	QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
408 
409 	soc->wbm_sw0_bm_id = hal_tx_get_wbm_sw0_bm_id();
410 	qdf_status = dp_tx_init_bank_profiles(be_soc);
411 
412 	/* cookie conversion */
413 	qdf_status = dp_hw_cookie_conversion_attach(be_soc);
414 
415 	return qdf_status;
416 }
417 
418 static QDF_STATUS dp_soc_detach_be(struct dp_soc *soc)
419 {
420 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
421 
422 	dp_tx_deinit_bank_profiles(be_soc);
423 
424 	dp_hw_cookie_conversion_detach(be_soc);
425 
426 	return QDF_STATUS_SUCCESS;
427 }
428 
429 static QDF_STATUS dp_soc_init_be(struct dp_soc *soc)
430 {
431 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
432 	QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
433 
434 	qdf_status = dp_hw_cookie_conversion_init(be_soc);
435 
436 	return qdf_status;
437 }
438 
439 static QDF_STATUS dp_soc_deinit_be(struct dp_soc *soc)
440 {
441 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
442 
443 	dp_hw_cookie_conversion_deinit(be_soc);
444 
445 	return QDF_STATUS_SUCCESS;
446 }
447 
448 static QDF_STATUS dp_pdev_attach_be(struct dp_pdev *pdev)
449 {
450 	return QDF_STATUS_SUCCESS;
451 }
452 
453 static QDF_STATUS dp_pdev_detach_be(struct dp_pdev *pdev)
454 {
455 	return QDF_STATUS_SUCCESS;
456 }
457 
458 static QDF_STATUS dp_vdev_attach_be(struct dp_soc *soc, struct dp_vdev *vdev)
459 {
460 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
461 	struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
462 
463 	be_vdev->bank_id = dp_tx_get_bank_profile(be_soc, be_vdev);
464 
465 	/* Needs to be enabled after bring-up*/
466 	be_vdev->vdev_id_check_en = false;
467 
468 	if (be_vdev->bank_id == DP_BE_INVALID_BANK_ID) {
469 		QDF_BUG(0);
470 		return QDF_STATUS_E_FAULT;
471 	}
472 
473 	if (vdev->opmode == wlan_op_mode_sta)
474 		hal_tx_vdev_mcast_ctrl_set(soc->hal_soc, vdev->vdev_id,
475 					   HAL_TX_MCAST_CTRL_MEC_NOTIFY);
476 
477 	return QDF_STATUS_SUCCESS;
478 }
479 
480 static QDF_STATUS dp_vdev_detach_be(struct dp_soc *soc, struct dp_vdev *vdev)
481 {
482 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
483 	struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
484 
485 	dp_tx_put_bank_profile(be_soc, be_vdev);
486 	return QDF_STATUS_SUCCESS;
487 }
488 
489 qdf_size_t dp_get_soc_context_size_be(void)
490 {
491 	return sizeof(struct dp_soc_be);
492 }
493 
494 /**
495  * dp_rxdma_ring_sel_cfg_be() - Setup RXDMA ring config
496  * @soc: Common DP soc handle
497  *
498  * Return: QDF_STATUS
499  */
500 static QDF_STATUS
501 dp_rxdma_ring_sel_cfg_be(struct dp_soc *soc)
502 {
503 	int i;
504 	int mac_id;
505 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
506 	struct dp_srng *rx_mac_srng;
507 	QDF_STATUS status = QDF_STATUS_SUCCESS;
508 
509 	/*
510 	 * In Beryllium chipset msdu_start, mpdu_end
511 	 * and rx_attn are part of msdu_end/mpdu_start
512 	 */
513 	htt_tlv_filter.msdu_start = 0;
514 	htt_tlv_filter.mpdu_end = 0;
515 	htt_tlv_filter.attention = 0;
516 	htt_tlv_filter.mpdu_start = 1;
517 	htt_tlv_filter.msdu_end = 1;
518 	htt_tlv_filter.packet = 1;
519 	htt_tlv_filter.packet_header = 1;
520 
521 	htt_tlv_filter.ppdu_start = 0;
522 	htt_tlv_filter.ppdu_end = 0;
523 	htt_tlv_filter.ppdu_end_user_stats = 0;
524 	htt_tlv_filter.ppdu_end_user_stats_ext = 0;
525 	htt_tlv_filter.ppdu_end_status_done = 0;
526 	htt_tlv_filter.enable_fp = 1;
527 	htt_tlv_filter.enable_md = 0;
528 	htt_tlv_filter.enable_md = 0;
529 	htt_tlv_filter.enable_mo = 0;
530 
531 	htt_tlv_filter.fp_mgmt_filter = 0;
532 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_BA_REQ;
533 	htt_tlv_filter.fp_data_filter = (FILTER_DATA_UCAST |
534 					 FILTER_DATA_MCAST |
535 					 FILTER_DATA_DATA);
536 	htt_tlv_filter.mo_mgmt_filter = 0;
537 	htt_tlv_filter.mo_ctrl_filter = 0;
538 	htt_tlv_filter.mo_data_filter = 0;
539 	htt_tlv_filter.md_data_filter = 0;
540 
541 	htt_tlv_filter.offset_valid = true;
542 
543 	/* Not subscribing to mpdu_end, msdu_start and rx_attn */
544 	htt_tlv_filter.rx_mpdu_end_offset = 0;
545 	htt_tlv_filter.rx_msdu_start_offset = 0;
546 	htt_tlv_filter.rx_attn_offset = 0;
547 
548 	htt_tlv_filter.rx_packet_offset = soc->rx_pkt_tlv_size;
549 	htt_tlv_filter.rx_header_offset =
550 				hal_rx_pkt_tlv_offset_get(soc->hal_soc);
551 	htt_tlv_filter.rx_mpdu_start_offset =
552 				hal_rx_mpdu_start_offset_get(soc->hal_soc);
553 	htt_tlv_filter.rx_msdu_end_offset =
554 				hal_rx_msdu_end_offset_get(soc->hal_soc);
555 
556 	dp_info("TLV subscription\n"
557 		"msdu_start %d, mpdu_end %d, attention %d"
558 		"mpdu_start %d, msdu_end %d, pkt_hdr %d, pkt %d\n"
559 		"TLV offsets\n"
560 		"msdu_start %d, mpdu_end %d, attention %d"
561 		"mpdu_start %d, msdu_end %d, pkt_hdr %d, pkt %d\n",
562 		htt_tlv_filter.msdu_start,
563 		htt_tlv_filter.mpdu_end,
564 		htt_tlv_filter.attention,
565 		htt_tlv_filter.mpdu_start,
566 		htt_tlv_filter.msdu_end,
567 		htt_tlv_filter.packet_header,
568 		htt_tlv_filter.packet,
569 		htt_tlv_filter.rx_msdu_start_offset,
570 		htt_tlv_filter.rx_mpdu_end_offset,
571 		htt_tlv_filter.rx_attn_offset,
572 		htt_tlv_filter.rx_mpdu_start_offset,
573 		htt_tlv_filter.rx_msdu_end_offset,
574 		htt_tlv_filter.rx_header_offset,
575 		htt_tlv_filter.rx_packet_offset);
576 
577 	for (i = 0; i < MAX_PDEV_CNT; i++) {
578 		struct dp_pdev *pdev = soc->pdev_list[i];
579 
580 		if (!pdev)
581 			continue;
582 
583 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
584 			int mac_for_pdev =
585 				dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
586 			/*
587 			 * Obtain lmac id from pdev to access the LMAC ring
588 			 * in soc context
589 			 */
590 			int lmac_id =
591 				dp_get_lmac_id_for_pdev_id(soc, mac_id,
592 							   pdev->pdev_id);
593 
594 			rx_mac_srng = dp_get_rxdma_ring(pdev, lmac_id);
595 			htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
596 					    rx_mac_srng->hal_srng,
597 					    RXDMA_BUF, RX_DATA_BUFFER_SIZE,
598 					    &htt_tlv_filter);
599 		}
600 	}
601 	return status;
602 
603 }
604 
605 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
606 /**
607  * dp_service_near_full_srngs_be() - Main bottom half callback for the
608  *				near-full IRQs.
609  * @soc: Datapath SoC handle
610  * @int_ctx: Interrupt context
611  * @dp_budget: Budget of the work that can be done in the bottom half
612  *
613  * Return: work done in the handler
614  */
615 static uint32_t
616 dp_service_near_full_srngs_be(struct dp_soc *soc, struct dp_intr *int_ctx,
617 			      uint32_t dp_budget)
618 {
619 	int ring = 0;
620 	int budget = dp_budget;
621 	uint32_t work_done  = 0;
622 	uint32_t remaining_quota = dp_budget;
623 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
624 	int tx_ring_near_full_mask = int_ctx->tx_ring_near_full_mask;
625 	int rx_near_full_grp_1_mask = int_ctx->rx_near_full_grp_1_mask;
626 	int rx_near_full_grp_2_mask = int_ctx->rx_near_full_grp_2_mask;
627 	int rx_near_full_mask = rx_near_full_grp_1_mask |
628 				rx_near_full_grp_2_mask;
629 
630 	dp_verbose_debug("rx_ring_near_full 0x%x tx_ring_near_full 0x%x",
631 			 rx_near_full_mask,
632 			 tx_ring_near_full_mask);
633 
634 	if (rx_near_full_mask) {
635 		for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
636 			if (!(rx_near_full_mask & (1 << ring)))
637 				continue;
638 
639 			work_done = dp_rx_nf_process(int_ctx,
640 					soc->reo_dest_ring[ring].hal_srng,
641 					ring, remaining_quota);
642 			if (work_done) {
643 				intr_stats->num_rx_ring_near_full_masks[ring]++;
644 				dp_verbose_debug("rx NF mask 0x%x ring %d, work_done %d budget %d",
645 						 rx_near_full_mask, ring,
646 						 work_done,
647 						 budget);
648 				budget -=  work_done;
649 				if (budget <= 0)
650 					goto budget_done;
651 				remaining_quota = budget;
652 			}
653 		}
654 	}
655 
656 	if (tx_ring_near_full_mask) {
657 		for (ring = 0; ring < MAX_TCL_DATA_RINGS; ring++) {
658 			if (!(tx_ring_near_full_mask & (1 << ring)))
659 				continue;
660 
661 			work_done = dp_tx_comp_nf_handler(int_ctx, soc,
662 					soc->tx_comp_ring[ring].hal_srng,
663 					ring, remaining_quota);
664 			if (work_done) {
665 				intr_stats->num_tx_comp_ring_near_full_masks[ring]++;
666 				dp_verbose_debug("tx NF mask 0x%x ring %d, work_done %d budget %d",
667 						 tx_ring_near_full_mask, ring,
668 						 work_done, budget);
669 				budget -=  work_done;
670 				if (budget <= 0)
671 					break;
672 				remaining_quota = budget;
673 			}
674 		}
675 	}
676 
677 	intr_stats->num_near_full_masks++;
678 
679 budget_done:
680 	return dp_budget - budget;
681 }
682 
683 /**
684  * dp_srng_test_and_update_nf_params_be() - Check if the srng is in near full
685  *				state and set the reap_limit appropriately
686  *				as per the near full state
687  * @soc: Datapath soc handle
688  * @dp_srng: Datapath handle for SRNG
689  * @max_reap_limit: [Output Buffer] Buffer to set the max reap limit as per
690  *			the srng near-full state
691  *
692  * Return: 1, if the srng is in near-full state
693  *	   0, if the srng is not in near-full state
694  */
695 static int
696 dp_srng_test_and_update_nf_params_be(struct dp_soc *soc,
697 				     struct dp_srng *dp_srng,
698 				     int *max_reap_limit)
699 {
700 	return _dp_srng_test_and_update_nf_params(soc, dp_srng, max_reap_limit);
701 }
702 
703 /**
704  * dp_init_near_full_arch_ops_be() - Initialize the arch ops handler for the
705  *			near full IRQ handling operations.
706  * @arch_ops: arch ops handle
707  *
708  * Return: none
709  */
710 static inline void
711 dp_init_near_full_arch_ops_be(struct dp_arch_ops *arch_ops)
712 {
713 	arch_ops->dp_service_near_full_srngs = dp_service_near_full_srngs_be;
714 	arch_ops->dp_srng_test_and_update_nf_params =
715 					dp_srng_test_and_update_nf_params_be;
716 }
717 
718 #else
719 static inline void
720 dp_init_near_full_arch_ops_be(struct dp_arch_ops *arch_ops)
721 {
722 }
723 #endif
724 
725 #ifdef WLAN_SUPPORT_PPEDS
726 static void dp_soc_ppe_srng_deinit(struct dp_soc *soc)
727 {
728 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
729 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
730 
731 	soc_cfg_ctx = soc->wlan_cfg_ctx;
732 
733 	if (!wlan_cfg_get_dp_soc_is_ppe_enabled(soc_cfg_ctx))
734 		return;
735 
736 	dp_srng_deinit(soc, &be_soc->ppe_release_ring, PPE_RELEASE, 0);
737 	wlan_minidump_remove(be_soc->ppe_release_ring.base_vaddr_unaligned,
738 			     be_soc->ppe_release_ring.alloc_size,
739 			     soc->ctrl_psoc,
740 			     WLAN_MD_DP_SRNG_PPE_RELEASE,
741 			     "ppe_release_ring");
742 
743 	dp_srng_deinit(soc, &be_soc->ppe2tcl_ring, PPE2TCL, 0);
744 	wlan_minidump_remove(be_soc->ppe2tcl_ring.base_vaddr_unaligned,
745 			     be_soc->ppe2tcl_ring.alloc_size,
746 			     soc->ctrl_psoc,
747 			     WLAN_MD_DP_SRNG_PPE2TCL,
748 			     "ppe2tcl_ring");
749 
750 	dp_srng_deinit(soc, &be_soc->reo2ppe_ring, REO2PPE, 0);
751 	wlan_minidump_remove(be_soc->reo2ppe_ring.base_vaddr_unaligned,
752 			     be_soc->reo2ppe_ring.alloc_size,
753 			     soc->ctrl_psoc,
754 			     WLAN_MD_DP_SRNG_REO2PPE,
755 			     "reo2ppe_ring");
756 }
757 
758 static void dp_soc_ppe_srng_free(struct dp_soc *soc)
759 {
760 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
761 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
762 
763 	soc_cfg_ctx = soc->wlan_cfg_ctx;
764 
765 	if (!wlan_cfg_get_dp_soc_is_ppe_enabled(soc_cfg_ctx))
766 		return;
767 
768 	dp_srng_free(soc, &be_soc->ppe_release_ring);
769 
770 	dp_srng_free(soc, &be_soc->ppe2tcl_ring);
771 
772 	dp_srng_free(soc, &be_soc->reo2ppe_ring);
773 }
774 
775 static QDF_STATUS dp_soc_ppe_srng_alloc(struct dp_soc *soc)
776 {
777 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
778 	uint32_t entries;
779 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
780 
781 	soc_cfg_ctx = soc->wlan_cfg_ctx;
782 
783 	if (!wlan_cfg_get_dp_soc_is_ppe_enabled(soc_cfg_ctx))
784 		return QDF_STATUS_SUCCESS;
785 
786 	entries = wlan_cfg_get_dp_soc_reo2ppe_ring_size(soc_cfg_ctx);
787 
788 	if (dp_srng_alloc(soc, &be_soc->reo2ppe_ring, REO2PPE,
789 			  entries, 0)) {
790 		dp_err("%pK: dp_srng_alloc failed for reo2ppe", soc);
791 		goto fail;
792 	}
793 
794 	entries = wlan_cfg_get_dp_soc_ppe2tcl_ring_size(soc_cfg_ctx);
795 	if (dp_srng_alloc(soc, &be_soc->ppe2tcl_ring, PPE2TCL,
796 			  entries, 0)) {
797 		dp_err("%pK: dp_srng_alloc failed for ppe2tcl_ring", soc);
798 		goto fail;
799 	}
800 
801 	entries = wlan_cfg_get_dp_soc_ppe_release_ring_size(soc_cfg_ctx);
802 	if (dp_srng_alloc(soc, &be_soc->ppe_release_ring, PPE_RELEASE,
803 			  entries, 0)) {
804 		dp_err("%pK: dp_srng_alloc failed for ppe_release_ring", soc);
805 		goto fail;
806 	}
807 
808 	return QDF_STATUS_SUCCESS;
809 fail:
810 	dp_soc_ppe_srng_free(soc);
811 	return QDF_STATUS_E_NOMEM;
812 }
813 
814 static QDF_STATUS dp_soc_ppe_srng_init(struct dp_soc *soc)
815 {
816 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
817 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
818 
819 	soc_cfg_ctx = soc->wlan_cfg_ctx;
820 
821 	if (!wlan_cfg_get_dp_soc_is_ppe_enabled(soc_cfg_ctx))
822 		return QDF_STATUS_SUCCESS;
823 
824 	if (dp_srng_init(soc, &be_soc->reo2ppe_ring, REO2PPE, 0, 0)) {
825 		dp_err("%pK: dp_srng_init failed for reo2ppe", soc);
826 		goto fail;
827 	}
828 
829 	wlan_minidump_log(be_soc->reo2ppe_ring.base_vaddr_unaligned,
830 			  be_soc->reo2ppe_ring.alloc_size,
831 			  soc->ctrl_psoc,
832 			  WLAN_MD_DP_SRNG_REO2PPE,
833 			  "reo2ppe_ring");
834 
835 	if (dp_srng_init(soc, &be_soc->ppe2tcl_ring, PPE2TCL, 0, 0)) {
836 		dp_err("%pK: dp_srng_init failed for ppe2tcl_ring", soc);
837 		goto fail;
838 	}
839 
840 	wlan_minidump_log(be_soc->ppe2tcl_ring.base_vaddr_unaligned,
841 			  be_soc->ppe2tcl_ring.alloc_size,
842 			  soc->ctrl_psoc,
843 			  WLAN_MD_DP_SRNG_PPE2TCL,
844 			  "ppe2tcl_ring");
845 
846 	if (dp_srng_init(soc, &be_soc->ppe_release_ring, PPE_RELEASE, 0, 0)) {
847 		dp_err("%pK: dp_srng_init failed for ppe_release_ring", soc);
848 		goto fail;
849 	}
850 
851 	wlan_minidump_log(be_soc->ppe_release_ring.base_vaddr_unaligned,
852 			  be_soc->ppe_release_ring.alloc_size,
853 			  soc->ctrl_psoc,
854 			  WLAN_MD_DP_SRNG_PPE_RELEASE,
855 			  "ppe_release_ring");
856 
857 	return QDF_STATUS_SUCCESS;
858 fail:
859 	dp_soc_ppe_srng_deinit(soc);
860 	return QDF_STATUS_E_NOMEM;
861 }
862 #else
863 static void dp_soc_ppe_srng_deinit(struct dp_soc *soc)
864 {
865 }
866 
867 static void dp_soc_ppe_srng_free(struct dp_soc *soc)
868 {
869 }
870 
871 static QDF_STATUS dp_soc_ppe_srng_alloc(struct dp_soc *soc)
872 {
873 	return QDF_STATUS_SUCCESS;
874 }
875 
876 static QDF_STATUS dp_soc_ppe_srng_init(struct dp_soc *soc)
877 {
878 	return QDF_STATUS_SUCCESS;
879 }
880 #endif
881 
882 static void dp_soc_srng_deinit_be(struct dp_soc *soc)
883 {
884 	dp_soc_ppe_srng_deinit(soc);
885 }
886 
887 static void dp_soc_srng_free_be(struct dp_soc *soc)
888 {
889 	dp_soc_ppe_srng_free(soc);
890 }
891 
892 static QDF_STATUS dp_soc_srng_alloc_be(struct dp_soc *soc)
893 {
894 	return dp_soc_ppe_srng_alloc(soc);
895 }
896 
897 static QDF_STATUS dp_soc_srng_init_be(struct dp_soc *soc)
898 {
899 	return dp_soc_ppe_srng_init(soc);
900 }
901 
902 #ifdef DP_TX_IMPLICIT_RBM_MAPPING
903 static void dp_tx_implicit_rbm_set_be(struct dp_soc *soc,
904 				      uint8_t tx_ring_id,
905 				      uint8_t bm_id)
906 {
907 	hal_tx_config_rbm_mapping_be(soc->hal_soc,
908 				     soc->tcl_data_ring[tx_ring_id].hal_srng,
909 				     bm_id);
910 }
911 #else
912 static void dp_tx_implicit_rbm_set_be(struct dp_soc *soc,
913 				      uint8_t tx_ring_id,
914 				      uint8_t bm_id)
915 {
916 }
917 #endif
918 
919 void dp_initialize_arch_ops_be(struct dp_arch_ops *arch_ops)
920 {
921 #ifndef QCA_HOST_MODE_WIFI_DISABLED
922 	arch_ops->tx_hw_enqueue = dp_tx_hw_enqueue_be;
923 	arch_ops->dp_rx_process = dp_rx_process_be;
924 	arch_ops->tx_comp_get_params_from_hal_desc =
925 		dp_tx_comp_get_params_from_hal_desc_be;
926 	arch_ops->dp_tx_desc_pool_init = dp_tx_desc_pool_init_be;
927 	arch_ops->dp_tx_desc_pool_deinit = dp_tx_desc_pool_deinit_be;
928 	arch_ops->dp_rx_desc_pool_init = dp_rx_desc_pool_init_be;
929 	arch_ops->dp_rx_desc_pool_deinit = dp_rx_desc_pool_deinit_be;
930 	arch_ops->dp_wbm_get_rx_desc_from_hal_desc =
931 				dp_wbm_get_rx_desc_from_hal_desc_be;
932 #endif
933 	arch_ops->txrx_get_context_size = dp_get_context_size_be;
934 	arch_ops->dp_rx_desc_cookie_2_va =
935 			dp_rx_desc_cookie_2_va_be;
936 
937 	arch_ops->txrx_soc_attach = dp_soc_attach_be;
938 	arch_ops->txrx_soc_detach = dp_soc_detach_be;
939 	arch_ops->txrx_soc_init = dp_soc_init_be;
940 	arch_ops->txrx_soc_deinit = dp_soc_deinit_be;
941 	arch_ops->txrx_soc_srng_alloc = dp_soc_srng_alloc_be;
942 	arch_ops->txrx_soc_srng_init = dp_soc_srng_init_be;
943 	arch_ops->txrx_soc_srng_deinit = dp_soc_srng_deinit_be;
944 	arch_ops->txrx_soc_srng_free = dp_soc_srng_free_be;
945 	arch_ops->txrx_pdev_attach = dp_pdev_attach_be;
946 	arch_ops->txrx_pdev_detach = dp_pdev_detach_be;
947 	arch_ops->txrx_vdev_attach = dp_vdev_attach_be;
948 	arch_ops->txrx_vdev_detach = dp_vdev_detach_be;
949 	arch_ops->dp_rxdma_ring_sel_cfg = dp_rxdma_ring_sel_cfg_be;
950 	arch_ops->soc_cfg_attach = dp_soc_cfg_attach_be;
951 	arch_ops->tx_implicit_rbm_set = dp_tx_implicit_rbm_set_be;
952 
953 	dp_init_near_full_arch_ops_be(arch_ops);
954 }
955