1 /*
2 * Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
3 * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for
6 * any purpose with or without fee is hereby granted, provided that the
7 * above copyright notice and this permission notice appear in all
8 * copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17 * PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 #include "dp_rx_buffer_pool.h"
21 #include "dp_ipa.h"
22
23 #ifndef DP_RX_BUFFER_POOL_SIZE
24 #define DP_RX_BUFFER_POOL_SIZE 128
25 #endif
26
27 #ifndef DP_RX_BUFF_POOL_ALLOC_THRES
28 #define DP_RX_BUFF_POOL_ALLOC_THRES 1
29 #endif
30
31 #ifdef WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL
dp_rx_buffer_pool_refill(struct dp_soc * soc,qdf_nbuf_t nbuf,u8 mac_id)32 bool dp_rx_buffer_pool_refill(struct dp_soc *soc, qdf_nbuf_t nbuf, u8 mac_id)
33 {
34 struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
35 struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id];
36 struct rx_buff_pool *bufpool = &soc->rx_buff_pool[mac_id];
37 qdf_nbuf_t next_nbuf, first_nbuf, refill_nbuf;
38 bool consumed = false;
39
40 if (!bufpool->is_initialized || !pdev)
41 return consumed;
42
43 /* process only buffers of RXDMA ring */
44 if (soc->wlan_cfg_ctx->rxdma1_enable)
45 return consumed;
46
47 first_nbuf = nbuf;
48
49 while (nbuf) {
50 next_nbuf = qdf_nbuf_next(nbuf);
51
52 if (qdf_likely(qdf_nbuf_queue_head_qlen(&bufpool->emerg_nbuf_q) >=
53 DP_RX_BUFFER_POOL_SIZE))
54 break;
55
56 refill_nbuf = qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size,
57 RX_BUFFER_RESERVATION,
58 rx_desc_pool->buf_alignment,
59 FALSE);
60
61 /* Failed to allocate new nbuf, reset and place it back
62 * in to the pool.
63 */
64 if (!refill_nbuf) {
65 DP_STATS_INC(pdev,
66 rx_buffer_pool.num_bufs_consumed, 1);
67 consumed = true;
68 break;
69 }
70
71 /* Successful allocation!! */
72 DP_STATS_INC(pdev,
73 rx_buffer_pool.num_bufs_alloc_success, 1);
74 qdf_nbuf_queue_head_enqueue_tail(&bufpool->emerg_nbuf_q,
75 refill_nbuf);
76 nbuf = next_nbuf;
77 }
78
79 nbuf = first_nbuf;
80 if (consumed) {
81 /* Free the MSDU/scattered MSDU */
82 while (nbuf) {
83 next_nbuf = qdf_nbuf_next(nbuf);
84 dp_rx_buffer_pool_nbuf_free(soc, nbuf, mac_id);
85 nbuf = next_nbuf;
86 }
87 }
88
89 return consumed;
90 }
91
dp_rx_buffer_pool_nbuf_free(struct dp_soc * soc,qdf_nbuf_t nbuf,u8 mac_id)92 void dp_rx_buffer_pool_nbuf_free(struct dp_soc *soc, qdf_nbuf_t nbuf, u8 mac_id)
93 {
94 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
95 struct rx_desc_pool *rx_desc_pool;
96 struct rx_buff_pool *buff_pool;
97
98 if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
99 mac_id = dp_pdev->lmac_id;
100
101 rx_desc_pool = &soc->rx_desc_buf[mac_id];
102 buff_pool = &soc->rx_buff_pool[mac_id];
103
104 if (qdf_likely(qdf_nbuf_queue_head_qlen(&buff_pool->emerg_nbuf_q) >=
105 DP_RX_BUFFER_POOL_SIZE) ||
106 !buff_pool->is_initialized)
107 return qdf_nbuf_free(nbuf);
108
109 qdf_nbuf_reset(nbuf, RX_BUFFER_RESERVATION,
110 rx_desc_pool->buf_alignment);
111 qdf_nbuf_queue_head_enqueue_tail(&buff_pool->emerg_nbuf_q, nbuf);
112 }
113
dp_rx_refill_buff_pool_enqueue(struct dp_soc * soc)114 void dp_rx_refill_buff_pool_enqueue(struct dp_soc *soc)
115 {
116 struct rx_desc_pool *rx_desc_pool;
117 struct rx_refill_buff_pool *buff_pool;
118 qdf_device_t dev;
119 qdf_nbuf_t nbuf;
120 QDF_STATUS ret;
121 int count, i;
122 uint16_t num_refill;
123 uint16_t total_num_refill;
124 uint16_t total_count = 0;
125 uint16_t head, tail;
126
127 if (!soc)
128 return;
129
130 dev = soc->osdev;
131 buff_pool = &soc->rx_refill_buff_pool;
132 rx_desc_pool = &soc->rx_desc_buf[0];
133 if (!buff_pool->is_initialized)
134 return;
135
136 head = buff_pool->head;
137 tail = buff_pool->tail;
138 if (tail > head)
139 total_num_refill = (tail - head - 1);
140 else
141 total_num_refill = (buff_pool->max_bufq_len - head +
142 tail - 1);
143
144 while (total_num_refill) {
145 if (total_num_refill > DP_RX_REFILL_BUFF_POOL_BURST)
146 num_refill = DP_RX_REFILL_BUFF_POOL_BURST;
147 else
148 num_refill = total_num_refill;
149
150 count = 0;
151 for (i = 0; i < num_refill; i++) {
152 nbuf = qdf_nbuf_alloc(dev, rx_desc_pool->buf_size,
153 RX_BUFFER_RESERVATION,
154 rx_desc_pool->buf_alignment,
155 FALSE);
156 if (qdf_unlikely(!nbuf))
157 continue;
158
159 ret = qdf_nbuf_map_nbytes_single(dev, nbuf,
160 QDF_DMA_FROM_DEVICE,
161 rx_desc_pool->buf_size);
162 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
163 qdf_nbuf_free(nbuf);
164 continue;
165 }
166
167 dp_audio_smmu_map(dev,
168 qdf_mem_paddr_from_dmaaddr(dev,
169 QDF_NBUF_CB_PADDR(nbuf)),
170 QDF_NBUF_CB_PADDR(nbuf),
171 rx_desc_pool->buf_size);
172
173 buff_pool->buf_elem[head++] = nbuf;
174 head &= (buff_pool->max_bufq_len - 1);
175 count++;
176 }
177
178 if (count) {
179 buff_pool->head = head;
180 total_num_refill -= count;
181 total_count += count;
182 }
183 }
184
185 DP_STATS_INC(buff_pool->dp_pdev,
186 rx_refill_buff_pool.num_bufs_refilled,
187 total_count);
188 }
189
dp_rx_refill_buff_pool_dequeue_nbuf(struct dp_soc * soc)190 static inline qdf_nbuf_t dp_rx_refill_buff_pool_dequeue_nbuf(struct dp_soc *soc)
191 {
192 struct rx_refill_buff_pool *buff_pool = &soc->rx_refill_buff_pool;
193 qdf_nbuf_t nbuf = NULL;
194 uint16_t head, tail;
195
196 head = buff_pool->head;
197 tail = buff_pool->tail;
198
199 if (head == tail)
200 return NULL;
201
202 nbuf = buff_pool->buf_elem[tail++];
203 tail &= (buff_pool->max_bufq_len - 1);
204 buff_pool->tail = tail;
205
206 return nbuf;
207 }
208
209 qdf_nbuf_t
dp_rx_buffer_pool_nbuf_alloc(struct dp_soc * soc,uint32_t mac_id,struct rx_desc_pool * rx_desc_pool,uint32_t num_available_buffers)210 dp_rx_buffer_pool_nbuf_alloc(struct dp_soc *soc, uint32_t mac_id,
211 struct rx_desc_pool *rx_desc_pool,
212 uint32_t num_available_buffers)
213 {
214 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
215 struct rx_buff_pool *buff_pool;
216 struct dp_srng *dp_rxdma_srng;
217 qdf_nbuf_t nbuf;
218
219 nbuf = dp_rx_refill_buff_pool_dequeue_nbuf(soc);
220 if (qdf_likely(nbuf)) {
221 DP_STATS_INC(dp_pdev,
222 rx_refill_buff_pool.num_bufs_allocated, 1);
223 return nbuf;
224 }
225
226 if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
227 mac_id = dp_pdev->lmac_id;
228
229 buff_pool = &soc->rx_buff_pool[mac_id];
230 dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
231
232 nbuf = qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size,
233 RX_BUFFER_RESERVATION,
234 rx_desc_pool->buf_alignment,
235 FALSE);
236
237 if (!buff_pool->is_initialized)
238 return nbuf;
239
240 if (qdf_likely(nbuf)) {
241 buff_pool->nbuf_fail_cnt = 0;
242 return nbuf;
243 }
244
245 buff_pool->nbuf_fail_cnt++;
246
247 /* Allocate buffer from the buffer pool */
248 if (buff_pool->nbuf_fail_cnt >= DP_RX_BUFFER_POOL_ALLOC_THRES ||
249 (num_available_buffers < dp_rxdma_srng->num_entries / 10)) {
250 nbuf = qdf_nbuf_queue_head_dequeue(&buff_pool->emerg_nbuf_q);
251 if (nbuf)
252 DP_STATS_INC(dp_pdev,
253 rx_buffer_pool.num_pool_bufs_replenish, 1);
254 }
255
256 return nbuf;
257 }
258
259 QDF_STATUS
dp_rx_buffer_pool_nbuf_map(struct dp_soc * soc,struct rx_desc_pool * rx_desc_pool,struct dp_rx_nbuf_frag_info * nbuf_frag_info_t)260 dp_rx_buffer_pool_nbuf_map(struct dp_soc *soc,
261 struct rx_desc_pool *rx_desc_pool,
262 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
263 {
264 QDF_STATUS ret = QDF_STATUS_SUCCESS;
265
266 if (!QDF_NBUF_CB_PADDR((nbuf_frag_info_t->virt_addr).nbuf)) {
267 ret = qdf_nbuf_map_nbytes_single(soc->osdev,
268 (nbuf_frag_info_t->virt_addr).nbuf,
269 QDF_DMA_FROM_DEVICE,
270 rx_desc_pool->buf_size);
271 if (QDF_IS_STATUS_SUCCESS(ret))
272 dp_audio_smmu_map(soc->osdev,
273 qdf_mem_paddr_from_dmaaddr(soc->osdev,
274 QDF_NBUF_CB_PADDR((nbuf_frag_info_t->virt_addr).nbuf)),
275 QDF_NBUF_CB_PADDR((nbuf_frag_info_t->virt_addr).nbuf),
276 rx_desc_pool->buf_size);
277 }
278
279
280 return ret;
281 }
282
dp_rx_refill_buff_pool_init(struct dp_soc * soc,u8 mac_id)283 static void dp_rx_refill_buff_pool_init(struct dp_soc *soc, u8 mac_id)
284 {
285 struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id];
286 qdf_nbuf_t nbuf;
287 struct rx_refill_buff_pool *buff_pool = &soc->rx_refill_buff_pool;
288 QDF_STATUS ret;
289 uint16_t head = 0;
290 int i;
291
292 if (!wlan_cfg_is_rx_refill_buffer_pool_enabled(soc->wlan_cfg_ctx)) {
293 dp_err("RX refill buffer pool support is disabled");
294 buff_pool->is_initialized = false;
295 return;
296 }
297
298 buff_pool->max_bufq_len =
299 wlan_cfg_get_rx_refill_buf_pool_size(soc->wlan_cfg_ctx);
300
301 buff_pool->buf_elem = qdf_mem_malloc(buff_pool->max_bufq_len *
302 sizeof(qdf_nbuf_t));
303 if (!buff_pool->buf_elem) {
304 dp_err("Failed to allocate memory for RX refill buf element");
305 buff_pool->is_initialized = false;
306 return;
307 }
308
309 buff_pool->dp_pdev = dp_get_pdev_for_lmac_id(soc, 0);
310 buff_pool->tail = 0;
311
312 for (i = 0; i < (buff_pool->max_bufq_len - 1); i++) {
313 nbuf = qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size,
314 RX_BUFFER_RESERVATION,
315 rx_desc_pool->buf_alignment, FALSE);
316 if (!nbuf)
317 continue;
318
319 ret = qdf_nbuf_map_nbytes_single(soc->osdev, nbuf,
320 QDF_DMA_FROM_DEVICE,
321 rx_desc_pool->buf_size);
322 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
323 qdf_nbuf_free(nbuf);
324 continue;
325 }
326
327 dp_audio_smmu_map(soc->osdev,
328 qdf_mem_paddr_from_dmaaddr(soc->osdev,
329 QDF_NBUF_CB_PADDR(nbuf)),
330 QDF_NBUF_CB_PADDR(nbuf),
331 rx_desc_pool->buf_size);
332
333 buff_pool->buf_elem[head] = nbuf;
334 head++;
335 }
336
337 buff_pool->head = head;
338
339 dp_info("RX refill buffer pool required allocation: %u actual allocation: %u",
340 buff_pool->max_bufq_len,
341 buff_pool->head);
342
343 buff_pool->is_initialized = true;
344 }
345
dp_rx_buffer_pool_init(struct dp_soc * soc,u8 mac_id)346 void dp_rx_buffer_pool_init(struct dp_soc *soc, u8 mac_id)
347 {
348 struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id];
349 struct rx_buff_pool *buff_pool = &soc->rx_buff_pool[mac_id];
350 qdf_nbuf_t nbuf;
351 int i;
352
353 dp_rx_refill_buff_pool_init(soc, mac_id);
354
355 if (!wlan_cfg_is_rx_buffer_pool_enabled(soc->wlan_cfg_ctx)) {
356 dp_info("RX buffer pool support is disabled");
357 buff_pool->is_initialized = false;
358 return;
359 }
360
361 if (buff_pool->is_initialized)
362 return;
363
364 qdf_nbuf_queue_head_init(&buff_pool->emerg_nbuf_q);
365
366 for (i = 0; i < DP_RX_BUFFER_POOL_SIZE; i++) {
367 nbuf = qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size,
368 RX_BUFFER_RESERVATION,
369 rx_desc_pool->buf_alignment, FALSE);
370 if (!nbuf)
371 continue;
372 qdf_nbuf_queue_head_enqueue_tail(&buff_pool->emerg_nbuf_q,
373 nbuf);
374 }
375
376 dp_info("RX buffer pool required allocation: %u actual allocation: %u",
377 DP_RX_BUFFER_POOL_SIZE,
378 qdf_nbuf_queue_head_qlen(&buff_pool->emerg_nbuf_q));
379
380 buff_pool->is_initialized = true;
381 }
382
dp_rx_refill_buff_pool_deinit(struct dp_soc * soc,u8 mac_id)383 static void dp_rx_refill_buff_pool_deinit(struct dp_soc *soc, u8 mac_id)
384 {
385 struct rx_refill_buff_pool *buff_pool = &soc->rx_refill_buff_pool;
386 struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id];
387 qdf_nbuf_t nbuf;
388 uint32_t count = 0;
389
390 if (!buff_pool->is_initialized)
391 return;
392
393 while ((nbuf = dp_rx_refill_buff_pool_dequeue_nbuf(soc))) {
394 dp_audio_smmu_unmap(soc->osdev,
395 QDF_NBUF_CB_PADDR(nbuf),
396 rx_desc_pool->buf_size);
397 qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf,
398 QDF_DMA_BIDIRECTIONAL,
399 rx_desc_pool->buf_size);
400 qdf_nbuf_free(nbuf);
401 count++;
402 }
403
404 dp_info("Rx refill buffers freed during deinit %u head: %u, tail: %u",
405 count, buff_pool->head, buff_pool->tail);
406
407 qdf_mem_free(buff_pool->buf_elem);
408 buff_pool->is_initialized = false;
409 }
410
dp_rx_buffer_pool_deinit(struct dp_soc * soc,u8 mac_id)411 void dp_rx_buffer_pool_deinit(struct dp_soc *soc, u8 mac_id)
412 {
413 struct rx_buff_pool *buff_pool = &soc->rx_buff_pool[mac_id];
414 qdf_nbuf_t nbuf;
415
416 dp_rx_refill_buff_pool_deinit(soc, mac_id);
417
418 if (!buff_pool->is_initialized)
419 return;
420
421 dp_info("buffers in the RX buffer pool during deinit: %u",
422 qdf_nbuf_queue_head_qlen(&buff_pool->emerg_nbuf_q));
423
424 while ((nbuf = qdf_nbuf_queue_head_dequeue(&buff_pool->emerg_nbuf_q)))
425 qdf_nbuf_free(nbuf);
426
427 buff_pool->is_initialized = false;
428 }
429 #endif /* WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL */
430