xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_buffer_pool.c (revision 2f4b444fb7e689b83a4ab0e7b3b38f0bf4def8e0)
1 /*
2  * Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "dp_rx_buffer_pool.h"
20 #include "dp_ipa.h"
21 #ifdef WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL
22 #include "dp_rx_mon.h"
23 #endif
24 
25 #ifndef DP_RX_BUFFER_POOL_SIZE
26 #define DP_RX_BUFFER_POOL_SIZE 128
27 #endif
28 
29 #ifndef DP_RX_BUFF_POOL_ALLOC_THRES
30 #define DP_RX_BUFF_POOL_ALLOC_THRES 1
31 #endif
32 
33 #ifdef WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL
34 bool dp_rx_buffer_pool_refill(struct dp_soc *soc, qdf_nbuf_t nbuf, u8 mac_id)
35 {
36 	struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
37 	struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id];
38 	struct rx_buff_pool *bufpool = &soc->rx_buff_pool[mac_id];
39 	qdf_nbuf_t next_nbuf, first_nbuf, refill_nbuf;
40 	bool consumed = false;
41 
42 	if (!bufpool->is_initialized || !pdev)
43 		return consumed;
44 
45 	/* process only buffers of RXDMA ring */
46 	if (qdf_unlikely(rx_desc_pool !=
47 			 dp_rx_get_mon_desc_pool(soc, mac_id, pdev->pdev_id)))
48 		return consumed;
49 
50 	first_nbuf = nbuf;
51 
52 	while (nbuf) {
53 		next_nbuf = qdf_nbuf_next(nbuf);
54 
55 		if (qdf_likely(qdf_nbuf_queue_head_qlen(&bufpool->emerg_nbuf_q) >=
56 		    DP_RX_BUFFER_POOL_SIZE))
57 			break;
58 
59 		refill_nbuf = qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size,
60 					     RX_BUFFER_RESERVATION,
61 					     rx_desc_pool->buf_alignment,
62 					     FALSE);
63 
64 		/* Failed to allocate new nbuf, reset and place it back
65 		 * in to the pool.
66 		 */
67 		if (!refill_nbuf) {
68 			DP_STATS_INC(pdev,
69 				     rx_buffer_pool.num_bufs_consumed, 1);
70 			consumed = true;
71 			break;
72 		}
73 
74 		/* Successful allocation!! */
75 		DP_STATS_INC(pdev,
76 			     rx_buffer_pool.num_bufs_alloc_success, 1);
77 		qdf_nbuf_queue_head_enqueue_tail(&bufpool->emerg_nbuf_q,
78 						 refill_nbuf);
79 		nbuf = next_nbuf;
80 	}
81 
82 	nbuf = first_nbuf;
83 	if (consumed) {
84 		/* Free the MSDU/scattered MSDU */
85 		while (nbuf) {
86 			next_nbuf = qdf_nbuf_next(nbuf);
87 			dp_rx_buffer_pool_nbuf_free(soc, nbuf, mac_id);
88 			nbuf = next_nbuf;
89 		}
90 	}
91 
92 	return consumed;
93 }
94 
95 void dp_rx_buffer_pool_nbuf_free(struct dp_soc *soc, qdf_nbuf_t nbuf, u8 mac_id)
96 {
97 	struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
98 	struct rx_desc_pool *rx_desc_pool;
99 	struct rx_buff_pool *buff_pool;
100 
101 	if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
102 		mac_id = dp_pdev->lmac_id;
103 
104 	rx_desc_pool = &soc->rx_desc_buf[mac_id];
105 	buff_pool = &soc->rx_buff_pool[mac_id];
106 
107 	if (qdf_likely(qdf_nbuf_queue_head_qlen(&buff_pool->emerg_nbuf_q) >=
108 		       DP_RX_BUFFER_POOL_SIZE) ||
109 	    !buff_pool->is_initialized)
110 		return qdf_nbuf_free(nbuf);
111 
112 	qdf_nbuf_reset(nbuf, RX_BUFFER_RESERVATION,
113 		       rx_desc_pool->buf_alignment);
114 	qdf_nbuf_queue_head_enqueue_tail(&buff_pool->emerg_nbuf_q, nbuf);
115 }
116 
117 void dp_rx_refill_buff_pool_enqueue(struct dp_soc *soc)
118 {
119 	struct rx_desc_pool *rx_desc_pool;
120 	struct rx_refill_buff_pool *buff_pool;
121 	qdf_device_t dev;
122 	qdf_nbuf_t nbuf;
123 	QDF_STATUS ret;
124 	int count, i;
125 	uint16_t num_refill;
126 	uint16_t total_num_refill;
127 	uint16_t total_count = 0;
128 	uint16_t head, tail;
129 
130 	if (!soc)
131 		return;
132 
133 	dev = soc->osdev;
134 	buff_pool = &soc->rx_refill_buff_pool;
135 	rx_desc_pool = &soc->rx_desc_buf[0];
136 	if (!buff_pool->is_initialized)
137 		return;
138 
139 	head = buff_pool->head;
140 	tail = buff_pool->tail;
141 	if (tail > head)
142 		total_num_refill = (tail - head - 1);
143 	else
144 		total_num_refill = (DP_RX_REFILL_BUFF_POOL_SIZE - head +
145 				    tail - 1);
146 
147 	while (total_num_refill) {
148 		if (total_num_refill > DP_RX_REFILL_BUFF_POOL_BURST)
149 			num_refill = DP_RX_REFILL_BUFF_POOL_BURST;
150 		else
151 			num_refill = total_num_refill;
152 
153 		count = 0;
154 		for (i = 0; i < num_refill; i++) {
155 			nbuf = qdf_nbuf_alloc(dev, rx_desc_pool->buf_size,
156 					      RX_BUFFER_RESERVATION,
157 					      rx_desc_pool->buf_alignment,
158 					      FALSE);
159 			if (qdf_unlikely(!nbuf))
160 				continue;
161 
162 			ret = qdf_nbuf_map_nbytes_single(dev, nbuf,
163 							 QDF_DMA_FROM_DEVICE,
164 							 rx_desc_pool->buf_size);
165 			if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
166 				qdf_nbuf_free(nbuf);
167 				continue;
168 			}
169 
170 			buff_pool->buf_elem[head++] = nbuf;
171 			head &= (DP_RX_REFILL_BUFF_POOL_SIZE - 1);
172 			count++;
173 		}
174 
175 		if (count) {
176 			buff_pool->head = head;
177 			total_num_refill -= count;
178 			total_count += count;
179 		}
180 	}
181 
182 	DP_STATS_INC(buff_pool->dp_pdev,
183 		     rx_refill_buff_pool.num_bufs_refilled,
184 		     total_count);
185 }
186 
187 static inline qdf_nbuf_t dp_rx_refill_buff_pool_dequeue_nbuf(struct dp_soc *soc)
188 {
189 	struct rx_refill_buff_pool *buff_pool = &soc->rx_refill_buff_pool;
190 	qdf_nbuf_t nbuf = NULL;
191 	uint16_t head, tail;
192 
193 	head = buff_pool->head;
194 	tail = buff_pool->tail;
195 
196 	if (head == tail)
197 		return NULL;
198 
199 	nbuf = buff_pool->buf_elem[tail++];
200 	tail &= (DP_RX_REFILL_BUFF_POOL_SIZE - 1);
201 	buff_pool->tail = tail;
202 
203 	return nbuf;
204 }
205 
206 qdf_nbuf_t
207 dp_rx_buffer_pool_nbuf_alloc(struct dp_soc *soc, uint32_t mac_id,
208 			     struct rx_desc_pool *rx_desc_pool,
209 			     uint32_t num_available_buffers)
210 {
211 	struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
212 	struct rx_buff_pool *buff_pool;
213 	struct dp_srng *dp_rxdma_srng;
214 	qdf_nbuf_t nbuf;
215 
216 	nbuf = dp_rx_refill_buff_pool_dequeue_nbuf(soc);
217 	if (qdf_likely(nbuf)) {
218 		DP_STATS_INC(dp_pdev,
219 			     rx_refill_buff_pool.num_bufs_allocated, 1);
220 		return nbuf;
221 	}
222 
223 	if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
224 		mac_id = dp_pdev->lmac_id;
225 
226 	buff_pool = &soc->rx_buff_pool[mac_id];
227 	dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
228 
229 	nbuf = qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size,
230 			      RX_BUFFER_RESERVATION,
231 			      rx_desc_pool->buf_alignment,
232 			      FALSE);
233 
234 	if (!buff_pool->is_initialized)
235 		return nbuf;
236 
237 	if (qdf_likely(nbuf)) {
238 		buff_pool->nbuf_fail_cnt = 0;
239 		return nbuf;
240 	}
241 
242 	buff_pool->nbuf_fail_cnt++;
243 
244 	/* Allocate buffer from the buffer pool */
245 	if (buff_pool->nbuf_fail_cnt >= DP_RX_BUFFER_POOL_ALLOC_THRES ||
246 	    (num_available_buffers < dp_rxdma_srng->num_entries / 10)) {
247 		nbuf = qdf_nbuf_queue_head_dequeue(&buff_pool->emerg_nbuf_q);
248 		if (nbuf)
249 			DP_STATS_INC(dp_pdev,
250 				     rx_buffer_pool.num_pool_bufs_replenish, 1);
251 	}
252 
253 	return nbuf;
254 }
255 
256 QDF_STATUS
257 dp_rx_buffer_pool_nbuf_map(struct dp_soc *soc,
258 			   struct rx_desc_pool *rx_desc_pool,
259 			   struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
260 {
261 	QDF_STATUS ret = QDF_STATUS_SUCCESS;
262 
263 	if (!QDF_NBUF_CB_PADDR((nbuf_frag_info_t->virt_addr).nbuf))
264 		ret = qdf_nbuf_map_nbytes_single(soc->osdev,
265 						 (nbuf_frag_info_t->virt_addr).nbuf,
266 						 QDF_DMA_FROM_DEVICE,
267 						 rx_desc_pool->buf_size);
268 
269 	return ret;
270 }
271 
272 static void dp_rx_refill_buff_pool_init(struct dp_soc *soc, u8 mac_id)
273 {
274 	struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id];
275 	qdf_nbuf_t nbuf;
276 	struct rx_refill_buff_pool *buff_pool = &soc->rx_refill_buff_pool;
277 	QDF_STATUS ret;
278 	uint16_t head = 0;
279 	int i;
280 
281 	if (!wlan_cfg_is_rx_refill_buffer_pool_enabled(soc->wlan_cfg_ctx)) {
282 		dp_err("RX refill buffer pool support is disabled");
283 		buff_pool->is_initialized = false;
284 		return;
285 	}
286 
287 	buff_pool->max_bufq_len = DP_RX_REFILL_BUFF_POOL_SIZE;
288 	buff_pool->dp_pdev = dp_get_pdev_for_lmac_id(soc, 0);
289 	buff_pool->tail = 0;
290 
291 	for (i = 0; i < (buff_pool->max_bufq_len - 1); i++) {
292 		nbuf = qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size,
293 				      RX_BUFFER_RESERVATION,
294 				      rx_desc_pool->buf_alignment, FALSE);
295 		if (!nbuf)
296 			continue;
297 
298 		ret = qdf_nbuf_map_nbytes_single(soc->osdev, nbuf,
299 						 QDF_DMA_FROM_DEVICE,
300 						 rx_desc_pool->buf_size);
301 		if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
302 			qdf_nbuf_free(nbuf);
303 			continue;
304 		}
305 
306 		buff_pool->buf_elem[head] = nbuf;
307 		head++;
308 	}
309 
310 	buff_pool->head =  head;
311 
312 	dp_info("RX refill buffer pool required allocation: %u actual allocation: %u",
313 		buff_pool->max_bufq_len,
314 		buff_pool->head);
315 
316 	buff_pool->is_initialized = true;
317 }
318 
319 void dp_rx_buffer_pool_init(struct dp_soc *soc, u8 mac_id)
320 {
321 	struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id];
322 	struct rx_buff_pool *buff_pool = &soc->rx_buff_pool[mac_id];
323 	qdf_nbuf_t nbuf;
324 	int i;
325 
326 	dp_rx_refill_buff_pool_init(soc, mac_id);
327 
328 	if (!wlan_cfg_is_rx_buffer_pool_enabled(soc->wlan_cfg_ctx)) {
329 		dp_err("RX buffer pool support is disabled");
330 		buff_pool->is_initialized = false;
331 		return;
332 	}
333 
334 	if (buff_pool->is_initialized)
335 		return;
336 
337 	qdf_nbuf_queue_head_init(&buff_pool->emerg_nbuf_q);
338 
339 	for (i = 0; i < DP_RX_BUFFER_POOL_SIZE; i++) {
340 		nbuf = qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size,
341 				      RX_BUFFER_RESERVATION,
342 				      rx_desc_pool->buf_alignment, FALSE);
343 		if (!nbuf)
344 			continue;
345 		qdf_nbuf_queue_head_enqueue_tail(&buff_pool->emerg_nbuf_q,
346 						 nbuf);
347 	}
348 
349 	dp_info("RX buffer pool required allocation: %u actual allocation: %u",
350 		DP_RX_BUFFER_POOL_SIZE,
351 		qdf_nbuf_queue_head_qlen(&buff_pool->emerg_nbuf_q));
352 
353 	buff_pool->is_initialized = true;
354 }
355 
356 static void dp_rx_refill_buff_pool_deinit(struct dp_soc *soc, u8 mac_id)
357 {
358 	struct rx_refill_buff_pool *buff_pool = &soc->rx_refill_buff_pool;
359 	struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id];
360 	qdf_nbuf_t nbuf;
361 	uint32_t count = 0;
362 
363 	if (!buff_pool->is_initialized)
364 		return;
365 
366 	while ((nbuf = dp_rx_refill_buff_pool_dequeue_nbuf(soc))) {
367 		qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf,
368 					     QDF_DMA_BIDIRECTIONAL,
369 					     rx_desc_pool->buf_size);
370 		qdf_nbuf_free(nbuf);
371 		count++;
372 	}
373 
374 	dp_info("Rx refill buffers freed during deinit %u head: %u, tail: %u",
375 		count, buff_pool->head, buff_pool->tail);
376 
377 	buff_pool->is_initialized = false;
378 }
379 
380 void dp_rx_buffer_pool_deinit(struct dp_soc *soc, u8 mac_id)
381 {
382 	struct rx_buff_pool *buff_pool = &soc->rx_buff_pool[mac_id];
383 	qdf_nbuf_t nbuf;
384 
385 	dp_rx_refill_buff_pool_deinit(soc, mac_id);
386 
387 	if (!buff_pool->is_initialized)
388 		return;
389 
390 	dp_info("buffers in the RX buffer pool during deinit: %u",
391 		qdf_nbuf_queue_head_qlen(&buff_pool->emerg_nbuf_q));
392 
393 	while ((nbuf = qdf_nbuf_queue_head_dequeue(&buff_pool->emerg_nbuf_q)))
394 		qdf_nbuf_free(nbuf);
395 
396 	buff_pool->is_initialized = false;
397 }
398 #endif /* WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL */
399