xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_buffer_pool.c (revision e9dba9646bfd1954b96d80bae0adc757244cbde8)
1 /*
2  * Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "dp_rx_buffer_pool.h"
20 #include "dp_ipa.h"
21 
22 #ifndef DP_RX_BUFFER_POOL_SIZE
23 #define DP_RX_BUFFER_POOL_SIZE 128
24 #endif
25 
26 #ifndef DP_RX_REFILL_BUFF_POOL_SIZE
27 #define DP_RX_REFILL_BUFF_POOL_SIZE 2048
28 #endif
29 
30 #ifndef DP_RX_REFILL_BUFF_POOL_BURST
31 #define DP_RX_REFILL_BUFF_POOL_BURST 64
32 #endif
33 
34 #ifndef DP_RX_BUFF_POOL_ALLOC_THRES
35 #define DP_RX_BUFF_POOL_ALLOC_THRES 1
36 #endif
37 
38 #ifdef WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL
39 bool dp_rx_buffer_pool_refill(struct dp_soc *soc, qdf_nbuf_t nbuf, u8 mac_id)
40 {
41 	struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
42 	struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id];
43 	struct rx_buff_pool *bufpool = &soc->rx_buff_pool[mac_id];
44 	qdf_nbuf_t next_nbuf, first_nbuf, refill_nbuf;
45 	bool consumed = false;
46 
47 	if (!bufpool->is_initialized || !pdev)
48 		return consumed;
49 
50 	/* process only buffers of RXDMA ring */
51 	if (qdf_unlikely(rx_desc_pool !=
52 			 dp_rx_get_mon_desc_pool(soc, mac_id, pdev->pdev_id)))
53 		return consumed;
54 
55 	first_nbuf = nbuf;
56 
57 	while (nbuf) {
58 		next_nbuf = qdf_nbuf_next(nbuf);
59 
60 		if (qdf_likely(qdf_nbuf_queue_head_qlen(&bufpool->emerg_nbuf_q) >=
61 		    DP_RX_BUFFER_POOL_SIZE))
62 			break;
63 
64 		refill_nbuf = qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size,
65 					     RX_BUFFER_RESERVATION,
66 					     rx_desc_pool->buf_alignment,
67 					     FALSE);
68 
69 		/* Failed to allocate new nbuf, reset and place it back
70 		 * in to the pool.
71 		 */
72 		if (!refill_nbuf) {
73 			DP_STATS_INC(pdev,
74 				     rx_buffer_pool.num_bufs_consumed, 1);
75 			consumed = true;
76 			break;
77 		}
78 
79 		/* Successful allocation!! */
80 		DP_STATS_INC(pdev,
81 			     rx_buffer_pool.num_bufs_alloc_success, 1);
82 		qdf_nbuf_queue_head_enqueue_tail(&bufpool->emerg_nbuf_q,
83 						 refill_nbuf);
84 		nbuf = next_nbuf;
85 	}
86 
87 	nbuf = first_nbuf;
88 	if (consumed) {
89 		/* Free the MSDU/scattered MSDU */
90 		while (nbuf) {
91 			next_nbuf = qdf_nbuf_next(nbuf);
92 			dp_rx_buffer_pool_nbuf_free(soc, nbuf, mac_id);
93 			nbuf = next_nbuf;
94 		}
95 	}
96 
97 	return consumed;
98 }
99 
100 void dp_rx_buffer_pool_nbuf_free(struct dp_soc *soc, qdf_nbuf_t nbuf, u8 mac_id)
101 {
102 	struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
103 	struct rx_desc_pool *rx_desc_pool;
104 	struct rx_buff_pool *buff_pool;
105 
106 	if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
107 		mac_id = dp_pdev->lmac_id;
108 
109 	rx_desc_pool = &soc->rx_desc_buf[mac_id];
110 	buff_pool = &soc->rx_buff_pool[mac_id];
111 
112 	if (qdf_likely(qdf_nbuf_queue_head_qlen(&buff_pool->emerg_nbuf_q) >=
113 		       DP_RX_BUFFER_POOL_SIZE))
114 		return qdf_nbuf_free(nbuf);
115 
116 	qdf_nbuf_reset(nbuf, RX_BUFFER_RESERVATION,
117 		       rx_desc_pool->buf_alignment);
118 	qdf_nbuf_queue_head_enqueue_tail(&buff_pool->emerg_nbuf_q, nbuf);
119 }
120 
121 void dp_rx_refill_buff_pool_enqueue(struct dp_soc *soc)
122 {
123 	struct rx_desc_pool *rx_desc_pool;
124 	struct rx_refill_buff_pool *buff_pool;
125 	struct dp_pdev *dp_pdev;
126 	qdf_nbuf_t nbuf;
127 	QDF_STATUS ret;
128 	int count, i;
129 	qdf_nbuf_t nbuf_head;
130 	qdf_nbuf_t nbuf_tail;
131 	uint32_t num_req_refill;
132 
133 	if (!soc)
134 		return;
135 
136 	buff_pool = &soc->rx_refill_buff_pool;
137 	if (!buff_pool->is_initialized)
138 		return;
139 
140 	rx_desc_pool = &soc->rx_desc_buf[0];
141 	dp_pdev = dp_get_pdev_for_lmac_id(soc, 0);
142 
143 	num_req_refill = buff_pool->max_bufq_len - buff_pool->bufq_len;
144 
145 	while (num_req_refill) {
146 		if (num_req_refill > DP_RX_REFILL_BUFF_POOL_BURST)
147 			num_req_refill = DP_RX_REFILL_BUFF_POOL_BURST;
148 
149 		count = 0;
150 		nbuf_head = NULL;
151 		nbuf_tail = NULL;
152 		for (i = 0; i < num_req_refill; i++) {
153 			nbuf = qdf_nbuf_alloc(soc->osdev,
154 					      rx_desc_pool->buf_size,
155 					      RX_BUFFER_RESERVATION,
156 					      rx_desc_pool->buf_alignment,
157 					      FALSE);
158 			if (!nbuf)
159 				continue;
160 
161 			ret = qdf_nbuf_map_nbytes_single(soc->osdev, nbuf,
162 							 QDF_DMA_FROM_DEVICE,
163 							 rx_desc_pool->buf_size);
164 			if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
165 				qdf_nbuf_free(nbuf);
166 				continue;
167 			}
168 
169 			dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf,
170 							  rx_desc_pool->buf_size,
171 							  true);
172 			DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, nbuf);
173 			count++;
174 		}
175 		if (count) {
176 			qdf_spin_lock_bh(&buff_pool->bufq_lock);
177 			DP_RX_MERGE_TWO_LIST(buff_pool->buf_head,
178 					     buff_pool->buf_tail,
179 					     nbuf_head, nbuf_tail);
180 			buff_pool->bufq_len += count;
181 
182 			num_req_refill = buff_pool->max_bufq_len -
183 				buff_pool->bufq_len;
184 			qdf_spin_unlock_bh(&buff_pool->bufq_lock);
185 
186 			DP_STATS_INC(dp_pdev,
187 				     rx_refill_buff_pool.num_bufs_refilled,
188 				     count);
189 		}
190 	}
191 }
192 
193 static inline qdf_nbuf_t dp_rx_refill_buff_pool_dequeue_nbuf(struct dp_soc *soc)
194 {
195 	qdf_nbuf_t nbuf = NULL;
196 	struct rx_refill_buff_pool *buff_pool = &soc->rx_refill_buff_pool;
197 
198 	if (!buff_pool->is_initialized || !buff_pool->bufq_len)
199 		return nbuf;
200 
201 	qdf_spin_lock_bh(&buff_pool->bufq_lock);
202 	nbuf = buff_pool->buf_head;
203 	buff_pool->buf_head = qdf_nbuf_next(buff_pool->buf_head);
204 	qdf_nbuf_set_next(nbuf, NULL);
205 	buff_pool->bufq_len--;
206 	qdf_spin_unlock_bh(&buff_pool->bufq_lock);
207 
208 	return nbuf;
209 }
210 
211 qdf_nbuf_t
212 dp_rx_buffer_pool_nbuf_alloc(struct dp_soc *soc, uint32_t mac_id,
213 			     struct rx_desc_pool *rx_desc_pool,
214 			     uint32_t num_available_buffers)
215 {
216 	struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
217 	struct rx_buff_pool *buff_pool;
218 	struct dp_srng *dp_rxdma_srng;
219 	qdf_nbuf_t nbuf;
220 
221 	nbuf = dp_rx_refill_buff_pool_dequeue_nbuf(soc);
222 	if (nbuf) {
223 		DP_STATS_INC(dp_pdev,
224 			     rx_refill_buff_pool.num_bufs_allocated, 1);
225 		return nbuf;
226 	}
227 
228 	if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
229 		mac_id = dp_pdev->lmac_id;
230 
231 	buff_pool = &soc->rx_buff_pool[mac_id];
232 	dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
233 
234 	nbuf = qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size,
235 			      RX_BUFFER_RESERVATION,
236 			      rx_desc_pool->buf_alignment,
237 			      FALSE);
238 
239 	if (!buff_pool->is_initialized)
240 		return nbuf;
241 
242 	if (qdf_likely(nbuf)) {
243 		buff_pool->nbuf_fail_cnt = 0;
244 		return nbuf;
245 	}
246 
247 	buff_pool->nbuf_fail_cnt++;
248 
249 	/* Allocate buffer from the buffer pool */
250 	if (buff_pool->nbuf_fail_cnt >= DP_RX_BUFFER_POOL_ALLOC_THRES ||
251 	    (num_available_buffers < dp_rxdma_srng->num_entries / 10)) {
252 		nbuf = qdf_nbuf_queue_head_dequeue(&buff_pool->emerg_nbuf_q);
253 		if (nbuf)
254 			DP_STATS_INC(dp_pdev,
255 				     rx_buffer_pool.num_pool_bufs_replenish, 1);
256 	}
257 
258 	return nbuf;
259 }
260 
261 QDF_STATUS
262 dp_rx_buffer_pool_nbuf_map(struct dp_soc *soc,
263 			   struct rx_desc_pool *rx_desc_pool,
264 			   struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
265 {
266 	QDF_STATUS ret = QDF_STATUS_SUCCESS;
267 
268 	if (!QDF_NBUF_CB_PADDR((nbuf_frag_info_t->virt_addr).nbuf)) {
269 		ret = qdf_nbuf_map_nbytes_single(soc->osdev,
270 						 (nbuf_frag_info_t->virt_addr).nbuf,
271 						 QDF_DMA_FROM_DEVICE,
272 						 rx_desc_pool->buf_size);
273 
274 		if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret)))
275 			return ret;
276 
277 		dp_ipa_handle_rx_buf_smmu_mapping(soc,
278 						  (qdf_nbuf_t)((nbuf_frag_info_t->virt_addr).nbuf),
279 						  rx_desc_pool->buf_size,
280 						  true);
281 	}
282 
283 	return ret;
284 }
285 
286 static void dp_rx_refill_buff_pool_init(struct dp_soc *soc, u8 mac_id)
287 {
288 	struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id];
289 	qdf_nbuf_t nbuf;
290 	struct rx_refill_buff_pool *buff_pool = &soc->rx_refill_buff_pool;
291 	QDF_STATUS ret;
292 	int i;
293 
294 	if (!wlan_cfg_is_rx_refill_buffer_pool_enabled(soc->wlan_cfg_ctx)) {
295 		dp_err("RX refill buffer pool support is disabled");
296 		buff_pool->is_initialized = false;
297 		return;
298 	}
299 
300 	buff_pool->bufq_len = 0;
301 	buff_pool->buf_head = NULL;
302 	buff_pool->buf_tail = NULL;
303 	buff_pool->max_bufq_len = DP_RX_REFILL_BUFF_POOL_SIZE;
304 	qdf_spinlock_create(&buff_pool->bufq_lock);
305 
306 	for (i = 0; i < buff_pool->max_bufq_len; i++) {
307 		nbuf = qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size,
308 				      RX_BUFFER_RESERVATION,
309 				      rx_desc_pool->buf_alignment, FALSE);
310 		if (!nbuf)
311 			continue;
312 
313 		ret = qdf_nbuf_map_nbytes_single(soc->osdev, nbuf,
314 						 QDF_DMA_FROM_DEVICE,
315 						 rx_desc_pool->buf_size);
316 		if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
317 			qdf_nbuf_free(nbuf);
318 			continue;
319 		}
320 
321 		dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf,
322 						  rx_desc_pool->buf_size,
323 						  true);
324 		DP_RX_LIST_APPEND(buff_pool->buf_head,
325 				  buff_pool->buf_tail, nbuf);
326 		buff_pool->bufq_len++;
327 	}
328 
329 	dp_info("RX refill buffer pool required allocation: %u actual allocation: %u",
330 		buff_pool->max_bufq_len,
331 		buff_pool->bufq_len);
332 
333 	buff_pool->is_initialized = true;
334 }
335 
336 void dp_rx_buffer_pool_init(struct dp_soc *soc, u8 mac_id)
337 {
338 	struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id];
339 	struct rx_buff_pool *buff_pool = &soc->rx_buff_pool[mac_id];
340 	qdf_nbuf_t nbuf;
341 	int i;
342 
343 	dp_rx_refill_buff_pool_init(soc, mac_id);
344 
345 	if (!wlan_cfg_is_rx_buffer_pool_enabled(soc->wlan_cfg_ctx)) {
346 		dp_err("RX buffer pool support is disabled");
347 		buff_pool->is_initialized = false;
348 		return;
349 	}
350 
351 	if (buff_pool->is_initialized)
352 		return;
353 
354 	qdf_nbuf_queue_head_init(&buff_pool->emerg_nbuf_q);
355 
356 	for (i = 0; i < DP_RX_BUFFER_POOL_SIZE; i++) {
357 		nbuf = qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size,
358 				      RX_BUFFER_RESERVATION,
359 				      rx_desc_pool->buf_alignment, FALSE);
360 		if (!nbuf)
361 			continue;
362 		qdf_nbuf_queue_head_enqueue_tail(&buff_pool->emerg_nbuf_q,
363 						 nbuf);
364 	}
365 
366 	dp_info("RX buffer pool required allocation: %u actual allocation: %u",
367 		DP_RX_BUFFER_POOL_SIZE,
368 		qdf_nbuf_queue_head_qlen(&buff_pool->emerg_nbuf_q));
369 
370 	buff_pool->is_initialized = true;
371 }
372 
373 static void dp_rx_refill_buff_pool_deinit(struct dp_soc *soc, u8 mac_id)
374 {
375 	struct rx_refill_buff_pool *buff_pool = &soc->rx_refill_buff_pool;
376 	struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id];
377 	qdf_nbuf_t nbuf;
378 
379 	if (!buff_pool->is_initialized)
380 		return;
381 
382 	while ((nbuf = dp_rx_refill_buff_pool_dequeue_nbuf(soc))) {
383 		dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf,
384 						  rx_desc_pool->buf_size,
385 						  false);
386 		qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf,
387 					     QDF_DMA_BIDIRECTIONAL,
388 					     rx_desc_pool->buf_size);
389 		qdf_nbuf_free(nbuf);
390 	}
391 
392 	buff_pool->is_initialized = false;
393 }
394 
395 void dp_rx_buffer_pool_deinit(struct dp_soc *soc, u8 mac_id)
396 {
397 	struct rx_buff_pool *buff_pool = &soc->rx_buff_pool[mac_id];
398 	qdf_nbuf_t nbuf;
399 
400 	dp_rx_refill_buff_pool_deinit(soc, mac_id);
401 
402 	if (!buff_pool->is_initialized)
403 		return;
404 
405 	dp_info("buffers in the RX buffer pool during deinit: %u",
406 		qdf_nbuf_queue_head_qlen(&buff_pool->emerg_nbuf_q));
407 
408 	while ((nbuf = qdf_nbuf_queue_head_dequeue(&buff_pool->emerg_nbuf_q)))
409 		qdf_nbuf_free(nbuf);
410 
411 	buff_pool->is_initialized = false;
412 }
413 #endif /* WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL */
414