1  /*
2   * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
3   *
4   * Permission to use, copy, modify, and/or distribute this software for
5   * any purpose with or without fee is hereby granted, provided that the
6   * above copyright notice and this permission notice appear in all
7   * copies.
8   *
9   * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10   * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11   * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12   * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13   * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14   * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15   * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16   * PERFORMANCE OF THIS SOFTWARE.
17   */
18  
19  #ifndef _DP_RH_RX_H_
20  #define _DP_RH_RX_H_
21  
22  #include <dp_types.h>
23  #include <dp_rx.h>
24  #include "dp_rh.h"
25  
26  /**
27   * dp_rx_desc_pool_init_rh() - Initialize Rx Descriptor pool(s)
28   * @soc: Handle to DP Soc structure
29   * @rx_desc_pool: Rx descriptor pool handler
30   * @pool_id: Rx descriptor pool ID
31   *
32   * Return: None
33   */
34  QDF_STATUS dp_rx_desc_pool_init_rh(struct dp_soc *soc,
35  				   struct rx_desc_pool *rx_desc_pool,
36  				   uint32_t pool_id);
37  
38  /**
39   * dp_rx_desc_pool_deinit_rh() - De-initialize Rx Descriptor pool(s)
40   * @soc: Handle to DP Soc structure
41   * @rx_desc_pool: Rx descriptor pool handler
42   * @pool_id: Rx descriptor pool ID
43   *
44   * Return: None
45   */
46  void dp_rx_desc_pool_deinit_rh(struct dp_soc *soc,
47  			       struct rx_desc_pool *rx_desc_pool,
48  			       uint32_t pool_id);
49  
50  /**
51   * dp_rx_desc_cookie_2_va_rh() - Convert RX Desc cookie ID to VA
52   * @soc:Handle to DP Soc structure
53   * @cookie: cookie used to lookup virtual address
54   *
55   * Return: Rx descriptor virtual address
56   */
57  static inline
dp_rx_desc_cookie_2_va_rh(struct dp_soc * soc,uint32_t cookie)58  struct dp_rx_desc *dp_rx_desc_cookie_2_va_rh(struct dp_soc *soc,
59  					     uint32_t cookie)
60  {
61  	return dp_rx_cookie_2_va_rxdma_buf(soc, cookie);
62  }
63  
64  #define DP_PEER_METADATA_VDEV_ID_MASK	0x003f0000
65  #define DP_PEER_METADATA_VDEV_ID_SHIFT	16
66  #define DP_PEER_METADATA_OFFLOAD_MASK	0x01000000
67  #define DP_PEER_METADATA_OFFLOAD_SHIFT	24
68  
69  #define DP_PEER_METADATA_VDEV_ID_GET_RH(_peer_metadata)		\
70  	(((_peer_metadata) & DP_PEER_METADATA_VDEV_ID_MASK)	\
71  			>> DP_PEER_METADATA_VDEV_ID_SHIFT)
72  
73  #define DP_PEER_METADATA_OFFLOAD_GET_RH(_peer_metadata)		\
74  	(((_peer_metadata) & DP_PEER_METADATA_OFFLOAD_MASK)	\
75  			>> DP_PEER_METADATA_OFFLOAD_SHIFT)
76  
77  static inline uint16_t
dp_rx_peer_metadata_peer_id_get_rh(struct dp_soc * soc,uint32_t peer_metadata)78  dp_rx_peer_metadata_peer_id_get_rh(struct dp_soc *soc, uint32_t peer_metadata)
79  {
80  	struct htt_rx_peer_metadata_v0 *metadata =
81  			(struct htt_rx_peer_metadata_v0 *)&peer_metadata;
82  
83  	return metadata->peer_id;
84  }
85  
86  /**
87   * dp_rx_data_flush() - Flush RX data after reaping from RX rings
88   *
89   * @data: reference to flush RX data
90   *
91   * Return: None
92   */
93  void
94  dp_rx_data_flush(void *data);
95  
96  /**
97   * dp_rx_data_indication_handler() - Indicate when RX data is available in rings
98   *
99   * @soc:DP soc reference
100   * @data_ind: Data indication message info
101   * @vdev_id: Vdev id
102   * @peer_id: Peer id
103   * @msdu_count: Number of MSDUs available in message
104   *
105   * Return: None
106   */
107  void
108  dp_rx_data_indication_handler(struct dp_soc *soc, qdf_nbuf_t data_ind,
109  			      uint16_t vdev_id, uint16_t peer_id,
110  			      uint16_t msdu_count);
111  
112  /**
113   * dp_rx_frag_indication_handler() - Indicate when RX frag data is available in ring
114   *
115   * @soc:DP soc reference
116   * @data_ind: Data indication message info
117   * @vdev_id: Vdev id
118   * @peer_id: Peer id
119   *
120   * Return: None
121   */
122  void
123  dp_rx_frag_indication_handler(struct dp_soc *soc, qdf_nbuf_t data_ind,
124  			      uint16_t vdev_id, uint16_t peer_id);
125  
126  static inline bool
dp_rx_intrabss_handle_nawds_rh(struct dp_soc * soc,struct dp_txrx_peer * ta_peer,qdf_nbuf_t nbuf_copy,struct cdp_tid_rx_stats * tid_stats,uint8_t link_id)127  dp_rx_intrabss_handle_nawds_rh(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
128  			       qdf_nbuf_t nbuf_copy,
129  			       struct cdp_tid_rx_stats *tid_stats,
130  			       uint8_t link_id)
131  {
132  	return false;
133  }
134  
135  /**
136   * dp_wbm_get_rx_desc_from_hal_desc_rh() - NOP in RH arch implementation
137   *
138   * @soc: Handle to DP Soc structure
139   * @ring_desc: ring descriptor structure pointer
140   * @r_rx_desc: pointer to a pointer of Rx Desc
141   *
142   * Return: QDF_STATUS_SUCCESS - succeeded, others - failed
143   */
144  static inline
dp_wbm_get_rx_desc_from_hal_desc_rh(struct dp_soc * soc,void * ring_desc,struct dp_rx_desc ** r_rx_desc)145  QDF_STATUS dp_wbm_get_rx_desc_from_hal_desc_rh(
146  					struct dp_soc *soc,
147  					void *ring_desc,
148  					struct dp_rx_desc **r_rx_desc)
149  {
150  	return QDF_STATUS_SUCCESS;
151  }
152  
153  static inline
dp_rx_word_mask_subscribe_rh(struct dp_soc * soc,uint32_t * msg_word,void * rx_filter)154  void dp_rx_word_mask_subscribe_rh(struct dp_soc *soc,
155  				  uint32_t *msg_word,
156  				  void *rx_filter)
157  {
158  }
159  
160  static inline
dp_peer_get_reo_hash_rh(struct dp_vdev * vdev,struct cdp_peer_setup_info * setup_info,enum cdp_host_reo_dest_ring * reo_dest,bool * hash_based,uint8_t * lmac_peer_id_msb)161  void dp_peer_get_reo_hash_rh(struct dp_vdev *vdev,
162  			     struct cdp_peer_setup_info *setup_info,
163  			     enum cdp_host_reo_dest_ring *reo_dest,
164  			     bool *hash_based,
165  			     uint8_t *lmac_peer_id_msb)
166  {
167  }
168  
169  static inline
dp_reo_remap_config_rh(struct dp_soc * soc,uint32_t * remap0,uint32_t * remap1,uint32_t * remap2)170  bool dp_reo_remap_config_rh(struct dp_soc *soc,
171  			    uint32_t *remap0,
172  			    uint32_t *remap1,
173  			    uint32_t *remap2)
174  {
175  	return false;
176  }
177  
178  /**
179   * dp_rx_prefetch_hw_sw_nbuf_desc() - function to prefetch HW and SW desc
180   * @soc: DP soc structure
181   * @hal_soc: Handle to HAL Soc structure
182   * @quota: quota to process
183   * @hal_ring_hdl: Destination ring pointer
184   * @last_prefetched_hw_desc: pointer to the last prefetched HW descriptor
185   * @last_prefetched_sw_desc: input & output param of last prefetch SW desc
186   *
187   * Return: None
188   */
189  static inline
dp_rx_prefetch_hw_sw_nbuf_desc(struct dp_soc * soc,hal_soc_handle_t hal_soc,uint32_t quota,hal_ring_handle_t hal_ring_hdl,hal_ring_desc_t * last_prefetched_hw_desc,struct dp_rx_desc ** last_prefetched_sw_desc)190  void dp_rx_prefetch_hw_sw_nbuf_desc(struct dp_soc *soc,
191  				    hal_soc_handle_t hal_soc,
192  				    uint32_t quota,
193  				    hal_ring_handle_t hal_ring_hdl,
194  				    hal_ring_desc_t *last_prefetched_hw_desc,
195  				    struct dp_rx_desc **last_prefetched_sw_desc)
196  {
197  }
198  
199  /**
200   * dp_peer_rx_reorder_queue_setup_rh() - NOP for RH arch implementation
201   * @soc: Handle to HAL Soc structure
202   * @peer: DP peer structure
203   * @tid_bitmap: tids to be set up
204   * @ba_window_size: BA window size
205   *
206   * Return: None
207   */
208  static inline
dp_peer_rx_reorder_queue_setup_rh(struct dp_soc * soc,struct dp_peer * peer,uint32_t tid_bitmap,uint32_t ba_window_size)209  QDF_STATUS dp_peer_rx_reorder_queue_setup_rh(struct dp_soc *soc,
210  					     struct dp_peer *peer,
211  					     uint32_t tid_bitmap,
212  					     uint32_t ba_window_size)
213  {
214  	return QDF_STATUS_SUCCESS;
215  }
216  #endif
217