1  /*
2   * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
3   *
4   * Permission to use, copy, modify, and/or distribute this software for
5   * any purpose with or without fee is hereby granted, provided that the
6   * above copyright notice and this permission notice appear in all
7   * copies.
8   *
9   * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10   * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11   * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12   * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13   * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14   * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15   * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16   * PERFORMANCE OF THIS SOFTWARE.
17   */
18  
19  #include "dp_types.h"
20  #include <dp_internal.h>
21  #include <dp_htt.h>
22  #include "dp_rh.h"
23  #include "dp_rh_tx.h"
24  #include "dp_rh_htt.h"
25  #include "dp_tx_desc.h"
26  #include "dp_rh_rx.h"
27  #include "dp_peer.h"
28  #include <wlan_utility.h>
29  #include <dp_rings.h>
30  #include <ce_api.h>
31  #include <ce_internal.h>
32  
33  static QDF_STATUS
dp_srng_init_rh(struct dp_soc * soc,struct dp_srng * srng,int ring_type,int ring_num,int mac_id)34  dp_srng_init_rh(struct dp_soc *soc, struct dp_srng *srng, int ring_type,
35  		int ring_num, int mac_id)
36  {
37  	hal_soc_handle_t hal_soc = soc->hal_soc;
38  	struct hal_srng_params ring_params;
39  
40  	if (srng->hal_srng) {
41  		dp_init_err("%pK: Ring type: %d, num:%d is already initialized",
42  			    soc, ring_type, ring_num);
43  		return QDF_STATUS_SUCCESS;
44  	}
45  
46  	/* memset the srng ring to zero */
47  	qdf_mem_zero(srng->base_vaddr_unaligned, srng->alloc_size);
48  
49  	qdf_mem_zero(&ring_params, sizeof(struct hal_srng_params));
50  	ring_params.ring_base_paddr = srng->base_paddr_aligned;
51  	ring_params.ring_base_vaddr = srng->base_vaddr_aligned;
52  
53  	ring_params.num_entries = srng->num_entries;
54  
55  	dp_info("Ring type: %d, num:%d vaddr %pK paddr %pK entries %u",
56  		ring_type, ring_num,
57  		(void *)ring_params.ring_base_vaddr,
58  		(void *)ring_params.ring_base_paddr,
59  		ring_params.num_entries);
60  
61  	if (soc->cdp_soc.ol_ops->get_con_mode &&
62  	    soc->cdp_soc.ol_ops->get_con_mode() ==
63  	    QDF_GLOBAL_MONITOR_MODE) {
64  		if (soc->intr_mode == DP_INTR_MSI &&
65  		    !dp_skip_msi_cfg(soc, ring_type)) {
66  			dp_srng_msi_setup(soc, srng, &ring_params,
67  					  ring_type, ring_num);
68  			dp_verbose_debug("Using MSI for ring_type: %d, ring_num %d",
69  					 ring_type, ring_num);
70  		} else {
71  			ring_params.msi_data = 0;
72  			ring_params.msi_addr = 0;
73  			dp_srng_set_msi2_ring_params(soc, &ring_params, 0, 0);
74  			dp_verbose_debug("Skipping MSI for ring_type: %d, ring_num %d",
75  					 ring_type, ring_num);
76  		}
77  
78  		dp_srng_configure_interrupt_thresholds(soc, &ring_params,
79  						       ring_type, ring_num,
80  						       srng->num_entries);
81  	}
82  
83  	srng->hal_srng = hal_srng_setup(hal_soc, ring_type, ring_num,
84  					mac_id, &ring_params, 0);
85  
86  	if (!srng->hal_srng) {
87  		dp_srng_free(soc, srng);
88  		return QDF_STATUS_E_FAILURE;
89  	}
90  
91  	return QDF_STATUS_SUCCESS;
92  }
93  
94  static QDF_STATUS
dp_peer_setup_rh(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,uint8_t * peer_mac,struct cdp_peer_setup_info * setup_info)95  dp_peer_setup_rh(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
96  		 uint8_t *peer_mac,
97  		 struct cdp_peer_setup_info *setup_info)
98  {
99  	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
100  	struct dp_pdev *pdev;
101  	QDF_STATUS status = QDF_STATUS_SUCCESS;
102  	struct dp_vdev *vdev = NULL;
103  	struct dp_peer *peer =
104  			dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id,
105  					       DP_MOD_ID_CDP);
106  	enum wlan_op_mode vdev_opmode;
107  
108  	if (!peer)
109  		return QDF_STATUS_E_FAILURE;
110  
111  	vdev = peer->vdev;
112  	if (!vdev) {
113  		status = QDF_STATUS_E_FAILURE;
114  		goto fail;
115  	}
116  
117  	/* save vdev related member in case vdev freed */
118  	vdev_opmode = vdev->opmode;
119  	pdev = vdev->pdev;
120  
121  	dp_info("pdev: %d vdev :%d opmode:%u",
122  		pdev->pdev_id, vdev->vdev_id, vdev->opmode);
123  
124  	/*
125  	 * There are corner cases where the AD1 = AD2 = "VAPs address"
126  	 * i.e both the devices have same MAC address. In these
127  	 * cases we want such pkts to be processed in NULL Q handler
128  	 * which is REO2TCL ring. for this reason we should
129  	 * not setup reo_queues and default route for bss_peer.
130  	 */
131  	dp_monitor_peer_tx_init(pdev, peer);
132  
133  	if (!setup_info)
134  		if (dp_peer_legacy_setup(soc, peer) !=
135  				QDF_STATUS_SUCCESS) {
136  			status = QDF_STATUS_E_RESOURCES;
137  			goto fail;
138  		}
139  
140  	if (peer->bss_peer && vdev->opmode == wlan_op_mode_ap) {
141  		status = QDF_STATUS_E_FAILURE;
142  		goto fail;
143  	}
144  
145  	if (vdev_opmode != wlan_op_mode_monitor)
146  		dp_peer_rx_init(pdev, peer);
147  
148  	dp_peer_ppdu_delayed_ba_init(peer);
149  
150  fail:
151  	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
152  	return status;
153  }
154  
155  #ifdef AST_OFFLOAD_ENABLE
dp_peer_map_detach_rh(struct dp_soc * soc)156  static void dp_peer_map_detach_rh(struct dp_soc *soc)
157  {
158  	dp_soc_wds_detach(soc);
159  	dp_peer_ast_table_detach(soc);
160  	dp_peer_ast_hash_detach(soc);
161  	dp_peer_mec_hash_detach(soc);
162  }
163  
dp_peer_map_attach_rh(struct dp_soc * soc)164  static QDF_STATUS dp_peer_map_attach_rh(struct dp_soc *soc)
165  {
166  	QDF_STATUS status;
167  
168  	soc->max_peer_id = soc->max_peers;
169  
170  	status = dp_peer_ast_table_attach(soc);
171  	if (!QDF_IS_STATUS_SUCCESS(status))
172  		return status;
173  
174  	status = dp_peer_ast_hash_attach(soc);
175  	if (!QDF_IS_STATUS_SUCCESS(status))
176  		goto ast_table_detach;
177  
178  	status = dp_peer_mec_hash_attach(soc);
179  	if (!QDF_IS_STATUS_SUCCESS(status))
180  		goto hash_detach;
181  
182  	dp_soc_wds_attach(soc);
183  
184  	return QDF_STATUS_SUCCESS;
185  
186  hash_detach:
187  	dp_peer_ast_hash_detach(soc);
188  ast_table_detach:
189  	dp_peer_ast_table_detach(soc);
190  
191  	return status;
192  }
193  #else
dp_peer_map_detach_rh(struct dp_soc * soc)194  static void dp_peer_map_detach_rh(struct dp_soc *soc)
195  {
196  }
197  
dp_peer_map_attach_rh(struct dp_soc * soc)198  static QDF_STATUS dp_peer_map_attach_rh(struct dp_soc *soc)
199  {
200  	soc->max_peer_id = soc->max_peers;
201  
202  	return QDF_STATUS_SUCCESS;
203  }
204  #endif
205  
206  /**
207   * dp_soc_cfg_init_rh() - initialize target specific configuration
208   *		       during dp_soc_init
209   * @soc: dp soc handle
210   */
dp_soc_cfg_init_rh(struct dp_soc * soc)211  static void dp_soc_cfg_init_rh(struct dp_soc *soc)
212  {
213  	uint32_t target_type;
214  
215  	target_type = hal_get_target_type(soc->hal_soc);
216  	switch (target_type) {
217  	case TARGET_TYPE_WCN6450:
218  		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
219  		soc->ast_override_support = 1;
220  		soc->wlan_cfg_ctx->rxdma1_enable = 0;
221  		break;
222  	default:
223  		qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
224  		qdf_assert_always(0);
225  		break;
226  	}
227  }
228  
dp_soc_cfg_attach_rh(struct dp_soc * soc)229  static void dp_soc_cfg_attach_rh(struct dp_soc *soc)
230  {
231  	int target_type;
232  
233  	target_type = hal_get_target_type(soc->hal_soc);
234  	switch (target_type) {
235  	case TARGET_TYPE_WCN6450:
236  		soc->wlan_cfg_ctx->rxdma1_enable = 0;
237  		break;
238  	default:
239  		qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
240  		qdf_assert_always(0);
241  		break;
242  	}
243  
244  	/*
245  	 * keeping TCL and completion rings number, this data
246  	 * is equivalent number of TX interface rings.
247  	 */
248  	soc->num_tx_comp_rings =
249  		wlan_cfg_num_tx_comp_rings(soc->wlan_cfg_ctx);
250  	soc->num_tcl_data_rings =
251  		wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
252  }
253  
dp_get_context_size_rh(enum dp_context_type context_type)254  qdf_size_t dp_get_context_size_rh(enum dp_context_type context_type)
255  {
256  	switch (context_type) {
257  	case DP_CONTEXT_TYPE_SOC:
258  		return sizeof(struct dp_soc_rh);
259  	case DP_CONTEXT_TYPE_PDEV:
260  		return sizeof(struct dp_pdev_rh);
261  	case DP_CONTEXT_TYPE_VDEV:
262  		return sizeof(struct dp_vdev_rh);
263  	case DP_CONTEXT_TYPE_PEER:
264  		return sizeof(struct dp_peer_rh);
265  	default:
266  		return 0;
267  	}
268  }
269  
dp_mon_get_context_size_rh(enum dp_context_type context_type)270  qdf_size_t dp_mon_get_context_size_rh(enum dp_context_type context_type)
271  {
272  	switch (context_type) {
273  	case DP_CONTEXT_TYPE_MON_PDEV:
274  		return sizeof(struct dp_mon_pdev_rh);
275  	case DP_CONTEXT_TYPE_MON_SOC:
276  		return sizeof(struct dp_mon_soc_rh);
277  	default:
278  		return 0;
279  	}
280  }
281  
dp_soc_attach_rh(struct dp_soc * soc,struct cdp_soc_attach_params * params)282  static QDF_STATUS dp_soc_attach_rh(struct dp_soc *soc,
283  				   struct cdp_soc_attach_params *params)
284  {
285  	soc->wbm_sw0_bm_id = hal_tx_get_wbm_sw0_bm_id();
286  	return QDF_STATUS_SUCCESS;
287  }
288  
dp_soc_detach_rh(struct dp_soc * soc)289  static QDF_STATUS dp_soc_detach_rh(struct dp_soc *soc)
290  {
291  	return QDF_STATUS_SUCCESS;
292  }
293  
dp_soc_deinit_rh(struct dp_soc * soc)294  static QDF_STATUS dp_soc_deinit_rh(struct dp_soc *soc)
295  {
296  	struct htt_soc *htt_soc = soc->htt_handle;
297  
298  	qdf_atomic_set(&soc->cmn_init_done, 0);
299  
300  	/*Degister RX offload flush handlers*/
301  	hif_offld_flush_cb_deregister(soc->hif_handle);
302  
303  	dp_monitor_soc_deinit(soc);
304  
305  	/* free peer tables & AST tables allocated during peer_map_attach */
306  	if (soc->peer_map_attach_success) {
307  		dp_peer_find_detach(soc);
308  		dp_peer_map_detach_rh(soc);
309  		soc->peer_map_attach_success = FALSE;
310  	}
311  
312  	qdf_flush_work(&soc->htt_stats.work);
313  	qdf_disable_work(&soc->htt_stats.work);
314  
315  	qdf_spinlock_destroy(&soc->htt_stats.lock);
316  
317  	qdf_spinlock_destroy(&soc->ast_lock);
318  
319  	dp_peer_mec_spinlock_destroy(soc);
320  
321  	qdf_nbuf_queue_free(&soc->htt_stats.msg);
322  
323  	qdf_nbuf_queue_free(&soc->invalid_buf_queue);
324  
325  	qdf_spinlock_destroy(&soc->rx.defrag.defrag_lock);
326  
327  	qdf_spinlock_destroy(&soc->vdev_map_lock);
328  
329  	dp_soc_tx_desc_sw_pools_deinit(soc);
330  
331  	dp_soc_srng_deinit(soc);
332  
333  	dp_hw_link_desc_ring_deinit(soc);
334  
335  	dp_soc_print_inactive_objects(soc);
336  	qdf_spinlock_destroy(&soc->inactive_peer_list_lock);
337  	qdf_spinlock_destroy(&soc->inactive_vdev_list_lock);
338  
339  	htt_soc_htc_dealloc(soc->htt_handle);
340  
341  	htt_soc_detach(htt_soc);
342  
343  	wlan_minidump_remove(soc, sizeof(*soc), soc->ctrl_psoc,
344  			     WLAN_MD_DP_SOC, "dp_soc");
345  
346  	return QDF_STATUS_SUCCESS;
347  }
348  
dp_soc_init_rh(struct dp_soc * soc,HTC_HANDLE htc_handle,struct hif_opaque_softc * hif_handle)349  static void *dp_soc_init_rh(struct dp_soc *soc, HTC_HANDLE htc_handle,
350  			    struct hif_opaque_softc *hif_handle)
351  {
352  	struct htt_soc *htt_soc = (struct htt_soc *)soc->htt_handle;
353  	bool is_monitor_mode = false;
354  	uint8_t i;
355  
356  	wlan_minidump_log(soc, sizeof(*soc), soc->ctrl_psoc,
357  			  WLAN_MD_DP_SOC, "dp_soc");
358  
359  	soc->hif_handle = hif_handle;
360  
361  	soc->hal_soc = hif_get_hal_handle(soc->hif_handle);
362  	if (!soc->hal_soc)
363  		goto fail1;
364  
365  	htt_soc = htt_soc_attach(soc, htc_handle);
366  	if (!htt_soc)
367  		goto fail1;
368  
369  	soc->htt_handle = htt_soc;
370  
371  	if (htt_soc_htc_prealloc(htt_soc) != QDF_STATUS_SUCCESS)
372  		goto fail2;
373  
374  	htt_set_htc_handle(htt_soc, htc_handle);
375  
376  	dp_soc_cfg_init_rh(soc);
377  
378  	dp_monitor_soc_cfg_init(soc);
379  
380  	/* Note: Any SRNG ring initialization should happen only after
381  	 * Interrupt mode is set and followed by filling up the
382  	 * interrupt mask. IT SHOULD ALWAYS BE IN THIS ORDER.
383  	 */
384  	dp_soc_set_interrupt_mode(soc);
385  	if (soc->cdp_soc.ol_ops->get_con_mode &&
386  	    soc->cdp_soc.ol_ops->get_con_mode() ==
387  	    QDF_GLOBAL_MONITOR_MODE) {
388  		is_monitor_mode = true;
389  		soc->curr_rx_pkt_tlv_size = soc->rx_mon_pkt_tlv_size;
390  	} else {
391  		soc->curr_rx_pkt_tlv_size = soc->rx_pkt_tlv_size;
392  	}
393  
394  	if (is_monitor_mode)
395  		wlan_cfg_fill_interrupt_mask(soc->wlan_cfg_ctx, 0,
396  					     soc->intr_mode, is_monitor_mode,
397  					     false, soc->umac_reset_supported);
398  	if (dp_soc_srng_init(soc)) {
399  		dp_init_err("%pK: dp_soc_srng_init failed", soc);
400  		goto fail3;
401  	}
402  
403  	if (dp_htt_soc_initialize_rh(soc->htt_handle, soc->ctrl_psoc,
404  				     htt_get_htc_handle(htt_soc),
405  				     soc->hal_soc, soc->osdev) == NULL)
406  		goto fail4;
407  
408  	/* Initialize descriptors in TCL Rings */
409  	for (i = 0; i < soc->num_tcl_data_rings; i++) {
410  		hal_tx_init_data_ring(soc->hal_soc,
411  				      soc->tcl_data_ring[i].hal_srng);
412  	}
413  
414  	if (dp_soc_tx_desc_sw_pools_init(soc)) {
415  		dp_init_err("%pK: dp_tx_soc_attach failed", soc);
416  		goto fail5;
417  	}
418  
419  	wlan_cfg_set_rx_hash(soc->wlan_cfg_ctx,
420  			     cfg_get(soc->ctrl_psoc, CFG_DP_RX_HASH));
421  	soc->cce_disable = false;
422  	soc->max_ast_ageout_count = MAX_AST_AGEOUT_COUNT;
423  
424  	soc->sta_mode_search_policy = DP_TX_ADDR_SEARCH_ADDR_POLICY;
425  	qdf_mem_zero(&soc->vdev_id_map, sizeof(soc->vdev_id_map));
426  	qdf_spinlock_create(&soc->vdev_map_lock);
427  	qdf_atomic_init(&soc->num_tx_outstanding);
428  	qdf_atomic_init(&soc->num_tx_exception);
429  	soc->num_tx_allowed =
430  		wlan_cfg_get_dp_soc_tx_device_limit(soc->wlan_cfg_ctx);
431  
432  	if (soc->cdp_soc.ol_ops->get_dp_cfg_param) {
433  		int ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
434  				CDP_CFG_MAX_PEER_ID);
435  
436  		if (ret != -EINVAL)
437  			wlan_cfg_set_max_peer_id(soc->wlan_cfg_ctx, ret);
438  
439  		ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
440  				CDP_CFG_CCE_DISABLE);
441  		if (ret == 1)
442  			soc->cce_disable = true;
443  	}
444  
445  	/* setup the global rx defrag waitlist */
446  	TAILQ_INIT(&soc->rx.defrag.waitlist);
447  	soc->rx.defrag.timeout_ms =
448  		wlan_cfg_get_rx_defrag_min_timeout(soc->wlan_cfg_ctx);
449  	soc->rx.defrag.next_flush_ms = 0;
450  	soc->rx.flags.defrag_timeout_check =
451  		wlan_cfg_get_defrag_timeout_check(soc->wlan_cfg_ctx);
452  	qdf_spinlock_create(&soc->rx.defrag.defrag_lock);
453  
454  	dp_monitor_soc_init(soc);
455  
456  	qdf_atomic_set(&soc->cmn_init_done, 1);
457  
458  	qdf_nbuf_queue_init(&soc->htt_stats.msg);
459  
460  	qdf_spinlock_create(&soc->ast_lock);
461  	dp_peer_mec_spinlock_create(soc);
462  
463  	qdf_nbuf_queue_init(&soc->invalid_buf_queue);
464  
465  	TAILQ_INIT(&soc->inactive_peer_list);
466  	qdf_spinlock_create(&soc->inactive_peer_list_lock);
467  	TAILQ_INIT(&soc->inactive_vdev_list);
468  	qdf_spinlock_create(&soc->inactive_vdev_list_lock);
469  	qdf_spinlock_create(&soc->htt_stats.lock);
470  	/* initialize work queue for stats processing */
471  	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
472  
473  	/*Register RX offload flush handlers*/
474  	hif_offld_flush_cb_register(soc->hif_handle, dp_rx_data_flush);
475  
476  	dp_info("Mem stats: DMA = %u HEAP = %u SKB = %u",
477  		qdf_dma_mem_stats_read(),
478  		qdf_heap_mem_stats_read(),
479  		qdf_skb_total_mem_stats_read());
480  
481  	soc->vdev_stats_id_map = 0;
482  
483  	return soc;
484  fail5:
485  	htt_soc_htc_dealloc(soc->htt_handle);
486  fail4:
487  	dp_soc_srng_deinit(soc);
488  fail3:
489  	htt_htc_pkt_pool_free(htt_soc);
490  fail2:
491  	htt_soc_detach(htt_soc);
492  fail1:
493  	return NULL;
494  }
495  
dp_soc_interrupt_detach_rh(struct cdp_soc_t * txrx_soc)496  static void dp_soc_interrupt_detach_rh(struct cdp_soc_t *txrx_soc)
497  {
498  	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
499  	int i;
500  
501  	if (soc->intr_mode == DP_INTR_POLL) {
502  		qdf_timer_free(&soc->int_timer);
503  	} else {
504  		hif_deconfigure_ext_group_interrupts(soc->hif_handle);
505  		hif_deregister_exec_group(soc->hif_handle, "dp_intr");
506  	}
507  
508  	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
509  		soc->intr_ctx[i].rx_mon_ring_mask = 0;
510  		soc->intr_ctx[i].rxdma2host_ring_mask = 0;
511  
512  		hif_event_history_deinit(soc->hif_handle, i);
513  		qdf_lro_deinit(soc->intr_ctx[i].lro_ctx);
514  	}
515  
516  	qdf_mem_set(&soc->mon_intr_id_lmac_map,
517  		    sizeof(soc->mon_intr_id_lmac_map),
518  		    DP_MON_INVALID_LMAC_ID);
519  }
520  
dp_soc_interrupt_attach_rh(struct cdp_soc_t * txrx_soc)521  static QDF_STATUS dp_soc_interrupt_attach_rh(struct cdp_soc_t *txrx_soc)
522  {
523  	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
524  	int i = 0;
525  	int num_irq = 0;
526  	int lmac_id = 0;
527  	int napi_scale;
528  
529  	qdf_mem_set(&soc->mon_intr_id_lmac_map,
530  		    sizeof(soc->mon_intr_id_lmac_map), DP_MON_INVALID_LMAC_ID);
531  
532  	if (soc->cdp_soc.ol_ops->get_con_mode &&
533  	    soc->cdp_soc.ol_ops->get_con_mode() !=
534  	    QDF_GLOBAL_MONITOR_MODE)
535  		return QDF_STATUS_SUCCESS;
536  
537  	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
538  		int ret = 0;
539  
540  		/* Map of IRQ ids registered with one interrupt context */
541  		int irq_id_map[HIF_MAX_GRP_IRQ];
542  
543  		int rx_mon_mask =
544  			dp_soc_get_mon_mask_for_interrupt_mode(soc, i);
545  		int rxdma2host_ring_mask =
546  			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
547  
548  		soc->intr_ctx[i].dp_intr_id = i;
549  		soc->intr_ctx[i].rx_mon_ring_mask = rx_mon_mask;
550  		soc->intr_ctx[i].rxdma2host_ring_mask = rxdma2host_ring_mask;
551  		soc->intr_ctx[i].soc = soc;
552  
553  		num_irq = 0;
554  
555  		dp_soc_interrupt_map_calculate(soc, i, &irq_id_map[0],
556  					       &num_irq);
557  
558  		napi_scale = wlan_cfg_get_napi_scale_factor(soc->wlan_cfg_ctx);
559  		if (!napi_scale)
560  			napi_scale = QCA_NAPI_DEF_SCALE_BIN_SHIFT;
561  
562  		ret = hif_register_ext_group(soc->hif_handle,
563  					     num_irq, irq_id_map, dp_service_srngs_wrapper,
564  					     &soc->intr_ctx[i], "dp_intr",
565  					     HIF_EXEC_NAPI_TYPE, napi_scale);
566  
567  		dp_debug(" int ctx %u num_irq %u irq_id_map %u %u",
568  			 i, num_irq, irq_id_map[0], irq_id_map[1]);
569  
570  		if (ret) {
571  			dp_init_err("%pK: failed, ret = %d", soc, ret);
572  			dp_soc_interrupt_detach_rh(txrx_soc);
573  			return QDF_STATUS_E_FAILURE;
574  		}
575  
576  		hif_event_history_init(soc->hif_handle, i);
577  		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
578  
579  		if (dp_is_mon_mask_valid(soc, &soc->intr_ctx[i])) {
580  			soc->mon_intr_id_lmac_map[lmac_id] = i;
581  			lmac_id++;
582  		}
583  	}
584  
585  	hif_configure_ext_group_interrupts(soc->hif_handle);
586  
587  	return QDF_STATUS_SUCCESS;
588  }
589  
dp_soc_attach_poll_rh(struct cdp_soc_t * txrx_soc)590  static QDF_STATUS dp_soc_attach_poll_rh(struct cdp_soc_t *txrx_soc)
591  {
592  	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
593  	uint32_t lmac_id = 0;
594  	int i;
595  
596  	qdf_mem_set(&soc->mon_intr_id_lmac_map,
597  		    sizeof(soc->mon_intr_id_lmac_map), DP_MON_INVALID_LMAC_ID);
598  	soc->intr_mode = DP_INTR_POLL;
599  
600  	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
601  		soc->intr_ctx[i].rx_mon_ring_mask =
602  				wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
603  
604  		if (dp_is_mon_mask_valid(soc, &soc->intr_ctx[i])) {
605  			hif_event_history_init(soc->hif_handle, i);
606  			soc->mon_intr_id_lmac_map[lmac_id] = i;
607  			lmac_id++;
608  		}
609  	}
610  
611  	qdf_timer_init(soc->osdev, &soc->int_timer,
612  		       dp_interrupt_timer, (void *)soc,
613  		       QDF_TIMER_TYPE_WAKE_APPS);
614  
615  	return QDF_STATUS_SUCCESS;
616  }
617  
dp_service_srngs_rh(void * dp_ctx,uint32_t dp_budget,int cpu)618  static uint32_t dp_service_srngs_rh(void *dp_ctx, uint32_t dp_budget, int cpu)
619  {
620  	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
621  	struct dp_soc *soc = int_ctx->soc;
622  	uint32_t work_done  = 0;
623  	int budget = dp_budget;
624  	uint32_t remaining_quota = dp_budget;
625  
626  	if (qdf_unlikely(!dp_monitor_is_vdev_timer_running(soc))) {
627  		work_done = dp_process_lmac_rings(int_ctx, remaining_quota);
628  		if (work_done) {
629  			budget -=  work_done;
630  			if (budget <= 0)
631  				goto budget_done;
632  			remaining_quota = budget;
633  		}
634  	}
635  
636  budget_done:
637  	qdf_atomic_clear_bit(cpu, &soc->service_rings_running);
638  
639  	if (soc->notify_fw_callback)
640  		soc->notify_fw_callback(soc);
641  
642  	return dp_budget - budget;
643  }
644  
645  /**
646   * dp_pdev_fill_tx_endpoint_info_rh() - Prefill fixed TX endpoint information
647   *					that is used during packet transmit
648   * @pdev: Handle to DP pdev struct
649   *
650   * Return: QDF_STATUS_SUCCESS/QDF_STATUS_E_NOENT
651   */
dp_pdev_fill_tx_endpoint_info_rh(struct dp_pdev * pdev)652  static QDF_STATUS dp_pdev_fill_tx_endpoint_info_rh(struct dp_pdev *pdev)
653  {
654  	struct dp_pdev_rh *rh_pdev = dp_get_rh_pdev_from_dp_pdev(pdev);
655  	struct dp_soc_rh *rh_soc = dp_get_rh_soc_from_dp_soc(pdev->soc);
656  	struct dp_tx_ep_info_rh *tx_ep_info = &rh_pdev->tx_ep_info;
657  	struct hif_opaque_softc *hif_handle = pdev->soc->hif_handle;
658  	int ul_is_polled, dl_is_polled;
659  	uint8_t ul_pipe, dl_pipe;
660  	int status;
661  
662  	status = hif_map_service_to_pipe(hif_handle, HTT_DATA2_MSG_SVC,
663  					 &ul_pipe, &dl_pipe,
664  					 &ul_is_polled, &dl_is_polled);
665  	if (status) {
666  		hif_err("Failed to map tx pipe: %d", status);
667  		return QDF_STATUS_E_NOENT;
668  	}
669  
670  	tx_ep_info->ce_tx_hdl = hif_get_ce_handle(hif_handle, ul_pipe);
671  
672  	tx_ep_info->download_len = HAL_TX_DESC_LEN_BYTES +
673  				   sizeof(struct tlv_32_hdr) +
674  				   DP_RH_TX_HDR_SIZE_OUTER_HDR_MAX +
675  				   DP_RH_TX_HDR_SIZE_802_1Q +
676  				   DP_RH_TX_HDR_SIZE_LLC_SNAP +
677  				   DP_RH_TX_HDR_SIZE_IP;
678  
679  	tx_ep_info->tx_endpoint = rh_soc->tx_endpoint;
680  
681  	return QDF_STATUS_SUCCESS;
682  }
683  
dp_pdev_attach_rh(struct dp_pdev * pdev,struct cdp_pdev_attach_params * params)684  static QDF_STATUS dp_pdev_attach_rh(struct dp_pdev *pdev,
685  				    struct cdp_pdev_attach_params *params)
686  {
687  	return dp_pdev_fill_tx_endpoint_info_rh(pdev);
688  }
689  
dp_pdev_detach_rh(struct dp_pdev * pdev)690  static QDF_STATUS dp_pdev_detach_rh(struct dp_pdev *pdev)
691  {
692  	return QDF_STATUS_SUCCESS;
693  }
694  
dp_vdev_attach_rh(struct dp_soc * soc,struct dp_vdev * vdev)695  static QDF_STATUS dp_vdev_attach_rh(struct dp_soc *soc, struct dp_vdev *vdev)
696  {
697  	return QDF_STATUS_SUCCESS;
698  }
699  
dp_vdev_detach_rh(struct dp_soc * soc,struct dp_vdev * vdev)700  static QDF_STATUS dp_vdev_detach_rh(struct dp_soc *soc, struct dp_vdev *vdev)
701  {
702  	return QDF_STATUS_SUCCESS;
703  }
704  
dp_get_soc_context_size_rh(void)705  qdf_size_t dp_get_soc_context_size_rh(void)
706  {
707  	return sizeof(struct dp_soc_rh);
708  }
709  
710  #ifdef NO_RX_PKT_HDR_TLV
711  /**
712   * dp_rxdma_ring_sel_cfg_rh() - Setup RXDMA ring config
713   * @soc: Common DP soc handle
714   *
715   * Return: QDF_STATUS
716   */
717  static QDF_STATUS
dp_rxdma_ring_sel_cfg_rh(struct dp_soc * soc)718  dp_rxdma_ring_sel_cfg_rh(struct dp_soc *soc)
719  {
720  	int i;
721  	int mac_id;
722  	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
723  	struct dp_srng *rx_mac_srng;
724  	QDF_STATUS status = QDF_STATUS_SUCCESS;
725  	uint16_t buf_size;
726  
727  	buf_size = wlan_cfg_rx_buffer_size(soc->wlan_cfg_ctx);
728  
729  	htt_tlv_filter.mpdu_start = 1;
730  	htt_tlv_filter.msdu_start = 1;
731  	htt_tlv_filter.mpdu_end = 1;
732  	htt_tlv_filter.msdu_end = 1;
733  	htt_tlv_filter.attention = 1;
734  	htt_tlv_filter.packet = 1;
735  	htt_tlv_filter.packet_header = 0;
736  
737  	htt_tlv_filter.ppdu_start = 0;
738  	htt_tlv_filter.ppdu_end = 0;
739  	htt_tlv_filter.ppdu_end_user_stats = 0;
740  	htt_tlv_filter.ppdu_end_user_stats_ext = 0;
741  	htt_tlv_filter.ppdu_end_status_done = 0;
742  	htt_tlv_filter.enable_fp = 1;
743  	htt_tlv_filter.enable_md = 0;
744  	htt_tlv_filter.enable_md = 0;
745  	htt_tlv_filter.enable_mo = 0;
746  
747  	htt_tlv_filter.fp_mgmt_filter = 0;
748  	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_BA_REQ;
749  	htt_tlv_filter.fp_data_filter = (FILTER_DATA_UCAST |
750  					 FILTER_DATA_MCAST |
751  					 FILTER_DATA_DATA);
752  	htt_tlv_filter.mo_mgmt_filter = 0;
753  	htt_tlv_filter.mo_ctrl_filter = 0;
754  	htt_tlv_filter.mo_data_filter = 0;
755  	htt_tlv_filter.md_data_filter = 0;
756  
757  	htt_tlv_filter.offset_valid = true;
758  
759  	htt_tlv_filter.rx_packet_offset = soc->rx_pkt_tlv_size;
760  	/*Not subscribing rx_pkt_header*/
761  	htt_tlv_filter.rx_header_offset = 0;
762  	htt_tlv_filter.rx_mpdu_start_offset =
763  				hal_rx_mpdu_start_offset_get(soc->hal_soc);
764  	htt_tlv_filter.rx_mpdu_end_offset =
765  				hal_rx_mpdu_end_offset_get(soc->hal_soc);
766  	htt_tlv_filter.rx_msdu_start_offset =
767  				hal_rx_msdu_start_offset_get(soc->hal_soc);
768  	htt_tlv_filter.rx_msdu_end_offset =
769  				hal_rx_msdu_end_offset_get(soc->hal_soc);
770  	htt_tlv_filter.rx_attn_offset =
771  				hal_rx_attn_offset_get(soc->hal_soc);
772  
773  	for (i = 0; i < MAX_PDEV_CNT; i++) {
774  		struct dp_pdev *pdev = soc->pdev_list[i];
775  
776  		if (!pdev)
777  			continue;
778  
779  		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
780  			int mac_for_pdev =
781  				dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
782  			/*
783  			 * Obtain lmac id from pdev to access the LMAC ring
784  			 * in soc context
785  			 */
786  			int lmac_id =
787  				dp_get_lmac_id_for_pdev_id(soc, mac_id,
788  							   pdev->pdev_id);
789  
790  			rx_mac_srng = dp_get_rxdma_ring(pdev, lmac_id);
791  			htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
792  					    rx_mac_srng->hal_srng,
793  					    RXDMA_BUF, buf_size,
794  					    &htt_tlv_filter);
795  		}
796  	}
797  
798  	if (QDF_IS_STATUS_SUCCESS(status))
799  		status = dp_htt_h2t_rx_ring_rfs_cfg(soc->htt_handle);
800  
801  	return status;
802  }
803  #else
804  
805  static QDF_STATUS
dp_rxdma_ring_sel_cfg_rh(struct dp_soc * soc)806  dp_rxdma_ring_sel_cfg_rh(struct dp_soc *soc)
807  {
808  	int i;
809  	int mac_id;
810  	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
811  	struct dp_srng *rx_mac_srng;
812  	QDF_STATUS status = QDF_STATUS_SUCCESS;
813  	uint16_t buf_size;
814  
815  	buf_size = wlan_cfg_rx_buffer_size(soc->wlan_cfg_ctx);
816  
817  	htt_tlv_filter.mpdu_start = 1;
818  	htt_tlv_filter.msdu_start = 1;
819  	htt_tlv_filter.mpdu_end = 1;
820  	htt_tlv_filter.msdu_end = 1;
821  	htt_tlv_filter.attention = 1;
822  	htt_tlv_filter.packet = 1;
823  	htt_tlv_filter.packet_header = 1;
824  
825  	htt_tlv_filter.ppdu_start = 0;
826  	htt_tlv_filter.ppdu_end = 0;
827  	htt_tlv_filter.ppdu_end_user_stats = 0;
828  	htt_tlv_filter.ppdu_end_user_stats_ext = 0;
829  	htt_tlv_filter.ppdu_end_status_done = 0;
830  	htt_tlv_filter.enable_fp = 1;
831  	htt_tlv_filter.enable_md = 0;
832  	htt_tlv_filter.enable_md = 0;
833  	htt_tlv_filter.enable_mo = 0;
834  
835  	htt_tlv_filter.fp_mgmt_filter = 0;
836  	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_BA_REQ;
837  	htt_tlv_filter.fp_data_filter = (FILTER_DATA_UCAST |
838  					 FILTER_DATA_MCAST |
839  					 FILTER_DATA_DATA);
840  	htt_tlv_filter.mo_mgmt_filter = 0;
841  	htt_tlv_filter.mo_ctrl_filter = 0;
842  	htt_tlv_filter.mo_data_filter = 0;
843  	htt_tlv_filter.md_data_filter = 0;
844  
845  	htt_tlv_filter.offset_valid = true;
846  
847  	htt_tlv_filter.rx_packet_offset = soc->rx_pkt_tlv_size;
848  	htt_tlv_filter.rx_header_offset =
849  				hal_rx_pkt_tlv_offset_get(soc->hal_soc);
850  	htt_tlv_filter.rx_mpdu_start_offset =
851  				hal_rx_mpdu_start_offset_get(soc->hal_soc);
852  	htt_tlv_filter.rx_mpdu_end_offset =
853  				hal_rx_mpdu_end_offset_get(soc->hal_soc);
854  	htt_tlv_filter.rx_msdu_start_offset =
855  				hal_rx_msdu_start_offset_get(soc->hal_soc);
856  	htt_tlv_filter.rx_msdu_end_offset =
857  				hal_rx_msdu_end_offset_get(soc->hal_soc);
858  	htt_tlv_filter.rx_attn_offset =
859  				hal_rx_attn_offset_get(soc->hal_soc);
860  
861  	for (i = 0; i < MAX_PDEV_CNT; i++) {
862  		struct dp_pdev *pdev = soc->pdev_list[i];
863  
864  		if (!pdev)
865  			continue;
866  
867  		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
868  			int mac_for_pdev =
869  				dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
870  			/*
871  			 * Obtain lmac id from pdev to access the LMAC ring
872  			 * in soc context
873  			 */
874  			int lmac_id =
875  				dp_get_lmac_id_for_pdev_id(soc, mac_id,
876  							   pdev->pdev_id);
877  
878  			rx_mac_srng = dp_get_rxdma_ring(pdev, lmac_id);
879  			htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
880  					    rx_mac_srng->hal_srng,
881  					    RXDMA_BUF, buf_size,
882  					    &htt_tlv_filter);
883  		}
884  	}
885  
886  	if (QDF_IS_STATUS_SUCCESS(status))
887  		status = dp_htt_h2t_rx_ring_rfs_cfg(soc->htt_handle);
888  
889  	return status;
890  }
891  #endif
892  
dp_soc_srng_deinit_rh(struct dp_soc * soc)893  static void dp_soc_srng_deinit_rh(struct dp_soc *soc)
894  {
895  }
896  
dp_soc_srng_free_rh(struct dp_soc * soc)897  static void dp_soc_srng_free_rh(struct dp_soc *soc)
898  {
899  }
900  
dp_soc_srng_alloc_rh(struct dp_soc * soc)901  static QDF_STATUS dp_soc_srng_alloc_rh(struct dp_soc *soc)
902  {
903  	return QDF_STATUS_SUCCESS;
904  }
905  
dp_soc_srng_init_rh(struct dp_soc * soc)906  static QDF_STATUS dp_soc_srng_init_rh(struct dp_soc *soc)
907  {
908  	return QDF_STATUS_SUCCESS;
909  }
910  
dp_tx_implicit_rbm_set_rh(struct dp_soc * soc,uint8_t tx_ring_id,uint8_t bm_id)911  static void dp_tx_implicit_rbm_set_rh(struct dp_soc *soc,
912  				      uint8_t tx_ring_id,
913  				      uint8_t bm_id)
914  {
915  }
916  
dp_txrx_set_vdev_param_rh(struct dp_soc * soc,struct dp_vdev * vdev,enum cdp_vdev_param_type param,cdp_config_param_type val)917  static QDF_STATUS dp_txrx_set_vdev_param_rh(struct dp_soc *soc,
918  					    struct dp_vdev *vdev,
919  					    enum cdp_vdev_param_type param,
920  					    cdp_config_param_type val)
921  {
922  	return QDF_STATUS_SUCCESS;
923  }
924  
dp_get_rx_hash_key_rh(struct dp_soc * soc,struct cdp_lro_hash_config * lro_hash)925  static void dp_get_rx_hash_key_rh(struct dp_soc *soc,
926  				  struct cdp_lro_hash_config *lro_hash)
927  {
928  	dp_get_rx_hash_key_bytes(lro_hash);
929  }
930  
931  #if defined(DP_POWER_SAVE) || defined(FEATURE_RUNTIME_PM)
dp_update_ring_hptp_rh(struct dp_soc * soc,bool force_flush)932  static void dp_update_ring_hptp_rh(struct dp_soc *soc, bool force_flush)
933  {
934  	struct dp_pdev_rh *rh_pdev =
935  			dp_get_rh_pdev_from_dp_pdev(soc->pdev_list[0]);
936  	struct dp_tx_ep_info_rh *tx_ep_info = &rh_pdev->tx_ep_info;
937  
938  	ce_flush_tx_ring_write_idx(tx_ep_info->ce_tx_hdl, force_flush);
939  }
940  #endif
941  
dp_initialize_arch_ops_rh(struct dp_arch_ops * arch_ops)942  void dp_initialize_arch_ops_rh(struct dp_arch_ops *arch_ops)
943  {
944  	arch_ops->tx_hw_enqueue = dp_tx_hw_enqueue_rh;
945  	arch_ops->tx_comp_get_params_from_hal_desc =
946  		dp_tx_comp_get_params_from_hal_desc_rh;
947  	arch_ops->dp_tx_process_htt_completion =
948  			dp_tx_process_htt_completion_rh;
949  	arch_ops->dp_wbm_get_rx_desc_from_hal_desc =
950  			dp_wbm_get_rx_desc_from_hal_desc_rh;
951  	arch_ops->dp_tx_desc_pool_alloc = dp_tx_desc_pool_alloc_rh;
952  	arch_ops->dp_tx_desc_pool_free = dp_tx_desc_pool_free_rh;
953  	arch_ops->dp_tx_desc_pool_init = dp_tx_desc_pool_init_rh;
954  	arch_ops->dp_tx_desc_pool_deinit = dp_tx_desc_pool_deinit_rh;
955  	arch_ops->dp_rx_desc_pool_init = dp_rx_desc_pool_init_rh;
956  	arch_ops->dp_rx_desc_pool_deinit = dp_rx_desc_pool_deinit_rh;
957  	arch_ops->dp_tx_compute_hw_delay = dp_tx_compute_tx_delay_rh;
958  	arch_ops->txrx_get_context_size = dp_get_context_size_rh;
959  	arch_ops->txrx_get_mon_context_size = dp_mon_get_context_size_rh;
960  	arch_ops->txrx_soc_attach = dp_soc_attach_rh;
961  	arch_ops->txrx_soc_detach = dp_soc_detach_rh;
962  	arch_ops->txrx_soc_init = dp_soc_init_rh;
963  	arch_ops->txrx_soc_deinit = dp_soc_deinit_rh;
964  	arch_ops->txrx_soc_srng_alloc = dp_soc_srng_alloc_rh;
965  	arch_ops->txrx_soc_srng_init = dp_soc_srng_init_rh;
966  	arch_ops->txrx_soc_srng_deinit = dp_soc_srng_deinit_rh;
967  	arch_ops->txrx_soc_srng_free = dp_soc_srng_free_rh;
968  	arch_ops->txrx_pdev_attach = dp_pdev_attach_rh;
969  	arch_ops->txrx_pdev_detach = dp_pdev_detach_rh;
970  	arch_ops->txrx_vdev_attach = dp_vdev_attach_rh;
971  	arch_ops->txrx_vdev_detach = dp_vdev_detach_rh;
972  	arch_ops->txrx_peer_map_attach = dp_peer_map_attach_rh;
973  	arch_ops->txrx_peer_map_detach = dp_peer_map_detach_rh;
974  	arch_ops->get_rx_hash_key = dp_get_rx_hash_key_rh;
975  	arch_ops->dp_rx_desc_cookie_2_va =
976  			dp_rx_desc_cookie_2_va_rh;
977  	arch_ops->dp_rx_intrabss_mcast_handler =
978  					dp_rx_intrabss_handle_nawds_rh;
979  	arch_ops->dp_rx_word_mask_subscribe = dp_rx_word_mask_subscribe_rh;
980  	arch_ops->dp_rxdma_ring_sel_cfg = dp_rxdma_ring_sel_cfg_rh;
981  	arch_ops->dp_rx_peer_metadata_peer_id_get =
982  					dp_rx_peer_metadata_peer_id_get_rh;
983  	arch_ops->soc_cfg_attach = dp_soc_cfg_attach_rh;
984  	arch_ops->tx_implicit_rbm_set = dp_tx_implicit_rbm_set_rh;
985  	arch_ops->txrx_set_vdev_param = dp_txrx_set_vdev_param_rh;
986  	arch_ops->txrx_print_peer_stats = dp_print_peer_txrx_stats_rh;
987  	arch_ops->dp_peer_rx_reorder_queue_setup =
988  					dp_peer_rx_reorder_queue_setup_rh;
989  	arch_ops->peer_get_reo_hash = dp_peer_get_reo_hash_rh;
990  	arch_ops->reo_remap_config = dp_reo_remap_config_rh;
991  	arch_ops->txrx_peer_setup = dp_peer_setup_rh;
992  	arch_ops->txrx_srng_init = dp_srng_init_rh;
993  #if defined(DP_POWER_SAVE) || defined(FEATURE_RUNTIME_PM)
994  	arch_ops->dp_update_ring_hptp = dp_update_ring_hptp_rh;
995  #endif
996  	arch_ops->dp_flush_tx_ring = dp_flush_tx_ring_rh;
997  	arch_ops->dp_soc_interrupt_attach = dp_soc_interrupt_attach_rh;
998  	arch_ops->dp_soc_attach_poll = dp_soc_attach_poll_rh;
999  	arch_ops->dp_soc_interrupt_detach = dp_soc_interrupt_detach_rh;
1000  	arch_ops->dp_service_srngs = dp_service_srngs_rh;
1001  }
1002