1  /*
2   * Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
3   * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4   *
5   * Permission to use, copy, modify, and/or distribute this software for
6   * any purpose with or without fee is hereby granted, provided that the
7   * above copyright notice and this permission notice appear in all
8   * copies.
9   *
10   * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11   * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12   * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13   * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14   * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15   * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16   * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17   * PERFORMANCE OF THIS SOFTWARE.
18   */
19  
20  #ifndef _DP_RX_BUFFER_POOL_H_
21  #define _DP_RX_BUFFER_POOL_H_
22  
23  #include "dp_types.h"
24  #include "qdf_nbuf.h"
25  #include "qdf_module.h"
26  #include "athdefs.h"
27  #include "wlan_cfg.h"
28  #include "dp_internal.h"
29  #include "dp_rx.h"
30  
31  #ifdef WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL
32  /**
33   * dp_rx_buffer_pool_init() - Initialize emergency buffer pool
34   * @soc: SoC handle
35   * @mac_id: MAC ID
36   *
37   * Return: None
38   */
39  void dp_rx_buffer_pool_init(struct dp_soc *soc, u8 mac_id);
40  
41  /**
42   * dp_rx_buffer_pool_deinit() - De-Initialize emergency buffer pool
43   * @soc: SoC handle
44   * @mac_id: MAC ID
45   *
46   * Return: None
47   */
48  void dp_rx_buffer_pool_deinit(struct dp_soc *soc, u8 mac_id);
49  
50  /**
51   * dp_rx_buffer_pool_refill() - Process the rx nbuf list and
52   * refill the emergency buffer pool
53   * @soc: SoC handle
54   * @nbuf: RX buffer
55   * @mac_id: MAC ID
56   *
57   * Return: Whether the rx nbuf is consumed into the pool or not.
58   */
59  bool dp_rx_buffer_pool_refill(struct dp_soc *soc, qdf_nbuf_t nbuf, u8 mac_id);
60  
61  /**
62   * dp_rx_buffer_pool_nbuf_free() - Free the nbuf or queue it
63   * back into the pool
64   * @soc: SoC handle
65   * @nbuf: RX buffer
66   * @mac_id: MAC ID
67   *
68   * Return: None
69   */
70  void dp_rx_buffer_pool_nbuf_free(struct dp_soc *soc, qdf_nbuf_t nbuf,
71  				 u8 mac_id);
72  
73  /**
74   * dp_rx_buffer_pool_nbuf_alloc() - Allocate nbuf for buffer replenish,
75   * give nbuf from the pool if allocation fails
76   * @soc: SoC handle
77   * @mac_id: MAC ID
78   * @rx_desc_pool: RX descriptor pool
79   * @num_available_buffers: number of available buffers in the ring.
80   *
81   * Return: nbuf
82   */
83  qdf_nbuf_t dp_rx_buffer_pool_nbuf_alloc(struct dp_soc *soc, uint32_t mac_id,
84  					struct rx_desc_pool *rx_desc_pool,
85  					uint32_t num_available_buffers);
86  
87  /**
88   * dp_rx_buffer_pool_nbuf_map() - Map nbuff for buffer replenish
89   * @soc: SoC handle
90   * @rx_desc_pool: RX descriptor pool
91   * @nbuf_frag_info_t: nbuf frag info
92   *
93   * Return: nbuf
94   */
95  QDF_STATUS
96  dp_rx_buffer_pool_nbuf_map(struct dp_soc *soc,
97  			   struct rx_desc_pool *rx_desc_pool,
98  			   struct dp_rx_nbuf_frag_info *nbuf_frag_info_t);
99  
100  /**
101   * dp_rx_schedule_refill_thread() - Schedule RX refill thread to enqueue
102   * buffers in refill pool
103   * @soc: SoC handle
104   *
105   */
dp_rx_schedule_refill_thread(struct dp_soc * soc)106  static inline void dp_rx_schedule_refill_thread(struct dp_soc *soc)
107  {
108  	struct rx_refill_buff_pool *buff_pool = &soc->rx_refill_buff_pool;
109  	uint16_t head = buff_pool->head;
110  	uint16_t tail = buff_pool->tail;
111  	uint16_t num_refill;
112  
113  	if (!buff_pool->is_initialized)
114  		return;
115  
116  	if (tail > head)
117  		num_refill = (tail - head - 1);
118  	else
119  		num_refill = (buff_pool->max_bufq_len - head + tail - 1);
120  
121  	if (soc->cdp_soc.ol_ops->dp_rx_sched_refill_thread &&
122  	    num_refill >= DP_RX_REFILL_THRD_THRESHOLD)
123  		soc->cdp_soc.ol_ops->dp_rx_sched_refill_thread(
124  						dp_soc_to_cdp_soc_t(soc));
125  }
126  #else
127  /**
128   * dp_rx_buffer_pool_init() - Initialize emergency buffer pool
129   * @soc: SoC handle
130   * @mac_id: MAC ID
131   *
132   * Return: None
133   */
134  static inline
dp_rx_buffer_pool_init(struct dp_soc * soc,u8 mac_id)135  void dp_rx_buffer_pool_init(struct dp_soc *soc, u8 mac_id)
136  {
137  	soc->rx_buff_pool[mac_id].is_initialized = false;
138  }
139  
140  /**
141   * dp_rx_buffer_pool_deinit() - De-Initialize emergency buffer pool
142   * @soc: SoC handle
143   * @mac_id: MAC ID
144   *
145   * Return: None
146   */
147  static inline
dp_rx_buffer_pool_deinit(struct dp_soc * soc,u8 mac_id)148  void dp_rx_buffer_pool_deinit(struct dp_soc *soc, u8 mac_id)
149  {
150  }
151  
152  /**
153   * dp_rx_buffer_pool_refill() - Process the rx nbuf list and
154   * refill the emergency buffer pool
155   * @soc: SoC handle
156   * @nbuf: RX buffer
157   * @mac_id: MAC ID
158   *
159   * Return: Whether the rx nbuf is consumed into the pool or not.
160   */
161  static inline
dp_rx_buffer_pool_refill(struct dp_soc * soc,qdf_nbuf_t nbuf,u8 mac_id)162  bool dp_rx_buffer_pool_refill(struct dp_soc *soc, qdf_nbuf_t nbuf, u8 mac_id)
163  {
164  	return false;
165  }
166  
167  /**
168   * dp_rx_buffer_pool_nbuf_free() - Free the nbuf or queue it
169   * back into the pool
170   * @soc: SoC handle
171   * @nbuf: RX buffer
172   * @mac_id: MAC ID
173   *
174   * Return: None
175   */
176  static inline
dp_rx_buffer_pool_nbuf_free(struct dp_soc * soc,qdf_nbuf_t nbuf,u8 mac_id)177  void dp_rx_buffer_pool_nbuf_free(struct dp_soc *soc, qdf_nbuf_t nbuf,
178  				 u8 mac_id)
179  {
180  	qdf_nbuf_free(nbuf);
181  }
182  
183  /**
184   * dp_rx_buffer_pool_nbuf_alloc() - Allocate nbuf for buffer replenish,
185   * give nbuf from the pool if allocation fails
186   * @soc: SoC handle
187   * @mac_id: MAC ID
188   * @rx_desc_pool: RX descriptor pool
189   * @num_available_buffers: number of available buffers in the ring.
190   *
191   * Return: nbuf
192   */
193  static inline qdf_nbuf_t
dp_rx_buffer_pool_nbuf_alloc(struct dp_soc * soc,uint32_t mac_id,struct rx_desc_pool * rx_desc_pool,uint32_t num_available_buffers)194  dp_rx_buffer_pool_nbuf_alloc(struct dp_soc *soc, uint32_t mac_id,
195  			     struct rx_desc_pool *rx_desc_pool,
196  			     uint32_t num_available_buffers)
197  {
198  	return qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size,
199  			      RX_BUFFER_RESERVATION,
200  			      rx_desc_pool->buf_alignment, FALSE);
201  }
202  
203  /**
204   * dp_rx_buffer_pool_nbuf_map() - Map nbuff for buffer replenish
205   * @soc: SoC handle
206   * @rx_desc_pool: RX descriptor pool
207   * @nbuf_frag_info_t: nbuf frag info
208   *
209   * Return: nbuf
210   */
211  static inline QDF_STATUS
dp_rx_buffer_pool_nbuf_map(struct dp_soc * soc,struct rx_desc_pool * rx_desc_pool,struct dp_rx_nbuf_frag_info * nbuf_frag_info_t)212  dp_rx_buffer_pool_nbuf_map(struct dp_soc *soc,
213  			   struct rx_desc_pool *rx_desc_pool,
214  			   struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
215  {
216  	QDF_STATUS status;
217  
218  	status = qdf_nbuf_map_nbytes_single(soc->osdev,
219  					    (nbuf_frag_info_t->virt_addr).nbuf,
220  					    QDF_DMA_FROM_DEVICE,
221  					    rx_desc_pool->buf_size);
222  	if (QDF_IS_STATUS_ERROR(status))
223  		return status;
224  
225  	dp_audio_smmu_map(soc->osdev,
226  			  qdf_mem_paddr_from_dmaaddr(soc->osdev,
227  						     QDF_NBUF_CB_PADDR((nbuf_frag_info_t->virt_addr).nbuf)),
228  			  QDF_NBUF_CB_PADDR((nbuf_frag_info_t->virt_addr).nbuf),
229  			  rx_desc_pool->buf_size);
230  
231  	return status;
232  }
233  
dp_rx_schedule_refill_thread(struct dp_soc * soc)234  static inline void dp_rx_schedule_refill_thread(struct dp_soc *soc) { }
235  
236  #endif /* WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL */
237  #endif /* _DP_RX_BUFFER_POOL_H_ */
238