xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_desc.c (revision eb134979c1cacbd1eb12caa116020b86fad96e1c)
1 /*
2  * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "dp_types.h"
20 #include "dp_rx.h"
21 #include "dp_ipa.h"
22 
23 #ifdef RX_DESC_MULTI_PAGE_ALLOC
24 A_COMPILE_TIME_ASSERT(cookie_size_check,
25 		      PAGE_SIZE / sizeof(union dp_rx_desc_list_elem_t) <=
26 		      1 << DP_RX_DESC_PAGE_ID_SHIFT);
27 
28 /*
29  * dp_rx_desc_pool_is_allocated() - check if memory is allocated for the
30  *					rx descriptor pool
31  *
32  * @rx_desc_pool: rx descriptor pool pointer
33  * Return: QDF_STATUS  QDF_STATUS_SUCCESS
34  *		       QDF_STATUS_E_NOMEM
35  */
36 QDF_STATUS dp_rx_desc_pool_is_allocated(struct rx_desc_pool *rx_desc_pool)
37 {
38 	if (!rx_desc_pool->desc_pages.num_pages) {
39 		dp_err("Multi page alloc fail, size=%d, elem=%d",
40 		       rx_desc_pool->elem_size, rx_desc_pool->pool_size);
41 		return QDF_STATUS_E_NOMEM;
42 	}
43 	return QDF_STATUS_SUCCESS;
44 }
45 
46 /*
47  * dp_rx_desc_pool_alloc() - Allocate a memory pool for software rx
48  *			     descriptors
49  *
50  * @soc: core txrx main context
51  * @num_elem: number of rx descriptors (size of the pool)
52  * @rx_desc_pool: rx descriptor pool pointer
53  *
54  * Return: QDF_STATUS  QDF_STATUS_SUCCESS
55  *		       QDF_STATUS_E_NOMEM
56  *		       QDF_STATUS_E_FAULT
57  */
58 QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc,
59 				 uint32_t num_elem,
60 				 struct rx_desc_pool *rx_desc_pool)
61 {
62 	uint32_t desc_size;
63 	union dp_rx_desc_list_elem_t *rx_desc_elem;
64 
65 	desc_size = sizeof(*rx_desc_elem);
66 	rx_desc_pool->elem_size = desc_size;
67 
68 	dp_desc_multi_pages_mem_alloc(soc, rx_desc_pool->desc_type,
69 				      &rx_desc_pool->desc_pages,
70 				      desc_size, num_elem, 0, true);
71 	if (!rx_desc_pool->desc_pages.num_pages) {
72 		qdf_err("Multi page alloc fail,size=%d, elem=%d",
73 			desc_size, num_elem);
74 		return QDF_STATUS_E_NOMEM;
75 	}
76 
77 	if (qdf_mem_multi_page_link(soc->osdev,
78 				    &rx_desc_pool->desc_pages,
79 				    desc_size, num_elem, true)) {
80 		qdf_err("overflow num link,size=%d, elem=%d",
81 			desc_size, num_elem);
82 		goto free_rx_desc_pool;
83 	}
84 	return QDF_STATUS_SUCCESS;
85 
86 free_rx_desc_pool:
87 	dp_rx_desc_pool_free(soc, rx_desc_pool);
88 
89 	return QDF_STATUS_E_FAULT;
90 }
91 
92 /*
93  * dp_rx_desc_pool_init() - Initialize the software RX descriptor pool
94  *			convert the pool of memory into a list of
95  *			rx descriptors and create locks to access this
96  *			list of rx descriptors.
97  *
98  * @soc: core txrx main context
99  * @pool_id: pool_id which is one of 3 mac_ids
100  * @pool_size: size of the rx descriptor pool
101  * @rx_desc_pool: rx descriptor pool pointer
102  */
103 void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id,
104 			  uint32_t pool_size, struct rx_desc_pool *rx_desc_pool)
105 {
106 	uint32_t id, page_id, offset, num_desc_per_page;
107 	uint32_t count = 0;
108 	union dp_rx_desc_list_elem_t *rx_desc_elem;
109 
110 	/* Initialize the lock */
111 	qdf_spinlock_create(&rx_desc_pool->lock);
112 
113 	qdf_spin_lock_bh(&rx_desc_pool->lock);
114 	rx_desc_pool->pool_size = pool_size;
115 
116 	num_desc_per_page = rx_desc_pool->desc_pages.num_element_per_page;
117 	rx_desc_pool->freelist = (union dp_rx_desc_list_elem_t *)
118 				  *rx_desc_pool->desc_pages.cacheable_pages;
119 
120 	rx_desc_elem = rx_desc_pool->freelist;
121 	while (rx_desc_elem) {
122 		page_id = count / num_desc_per_page;
123 		offset = count % num_desc_per_page;
124 		/*
125 		 * Below cookie size is from REO destination ring
126 		 * reo_destination_ring -> buffer_addr_info -> sw_buffer_cookie
127 		 * cookie size = 21 bits
128 		 * 8 bits - offset
129 		 * 8 bits - page ID
130 		 * 4 bits - pool ID
131 		 */
132 		id = ((pool_id << DP_RX_DESC_POOL_ID_SHIFT) |
133 		      (page_id << DP_RX_DESC_PAGE_ID_SHIFT) |
134 		      offset);
135 		rx_desc_elem->rx_desc.cookie = id;
136 		rx_desc_elem->rx_desc.pool_id = pool_id;
137 		rx_desc_elem->rx_desc.in_use = 0;
138 		rx_desc_elem = rx_desc_elem->next;
139 		count++;
140 	}
141 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
142 }
143 
144 union dp_rx_desc_list_elem_t *dp_rx_desc_find(uint16_t page_id, uint16_t offset,
145 					      struct rx_desc_pool *rx_desc_pool)
146 {
147 	return rx_desc_pool->desc_pages.cacheable_pages[page_id] +
148 		rx_desc_pool->elem_size * offset;
149 }
150 
151 static QDF_STATUS dp_rx_desc_nbuf_collect(struct dp_soc *soc,
152 					  struct rx_desc_pool *rx_desc_pool,
153 					  qdf_nbuf_t *nbuf_unmap_list,
154 					  qdf_nbuf_t *nbuf_free_list)
155 {
156 	uint32_t i, num_desc, page_id, offset, num_desc_per_page;
157 	union dp_rx_desc_list_elem_t *rx_desc_elem;
158 	struct dp_rx_desc *rx_desc;
159 
160 	if (qdf_unlikely(!(rx_desc_pool->desc_pages.cacheable_pages))) {
161 		qdf_err("No pages found on this desc pool");
162 		return QDF_STATUS_E_INVAL;
163 	}
164 	num_desc = rx_desc_pool->pool_size;
165 	num_desc_per_page = rx_desc_pool->desc_pages.num_element_per_page;
166 	for (i = 0; i < num_desc; i++) {
167 		page_id = i / num_desc_per_page;
168 		offset = i % num_desc_per_page;
169 		rx_desc_elem = dp_rx_desc_find(page_id, offset, rx_desc_pool);
170 		rx_desc = &rx_desc_elem->rx_desc;
171 		dp_rx_desc_free_dbg_info(rx_desc);
172 		if (rx_desc->in_use) {
173 			if (!rx_desc->unmapped) {
174 				DP_RX_HEAD_APPEND(*nbuf_unmap_list,
175 						  rx_desc->nbuf);
176 				rx_desc->unmapped = 1;
177 			} else {
178 				DP_RX_HEAD_APPEND(*nbuf_free_list,
179 						  rx_desc->nbuf);
180 			}
181 		}
182 	}
183 	return QDF_STATUS_SUCCESS;
184 }
185 
186 static void dp_rx_desc_nbuf_cleanup(struct dp_soc *soc,
187 				    qdf_nbuf_t nbuf_unmap_list,
188 				    qdf_nbuf_t nbuf_free_list,
189 				    uint16_t buf_size)
190 {
191 	qdf_nbuf_t nbuf = nbuf_unmap_list;
192 	qdf_nbuf_t next;
193 
194 	while (nbuf) {
195 		next = nbuf->next;
196 		if (dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf, buf_size,
197 						      false))
198 			dp_info_rl("Unable to unmap nbuf: %pK", nbuf);
199 		qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf,
200 					     QDF_DMA_BIDIRECTIONAL, buf_size);
201 		qdf_nbuf_free(nbuf);
202 		nbuf = next;
203 	}
204 
205 	nbuf = nbuf_free_list;
206 	while (nbuf) {
207 		next = nbuf->next;
208 		qdf_nbuf_free(nbuf);
209 		nbuf = next;
210 	}
211 }
212 
213 void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id,
214 				   struct rx_desc_pool *rx_desc_pool)
215 {
216 	qdf_nbuf_t nbuf_unmap_list = NULL;
217 	qdf_nbuf_t nbuf_free_list = NULL;
218 
219 	qdf_spin_lock_bh(&rx_desc_pool->lock);
220 	dp_rx_desc_nbuf_collect(soc, rx_desc_pool,
221 				&nbuf_unmap_list, &nbuf_free_list);
222 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
223 	dp_rx_desc_nbuf_cleanup(soc, nbuf_unmap_list, nbuf_free_list,
224 				rx_desc_pool->buf_size);
225 	qdf_spinlock_destroy(&rx_desc_pool->lock);
226 }
227 
228 void dp_rx_desc_nbuf_free(struct dp_soc *soc,
229 			  struct rx_desc_pool *rx_desc_pool)
230 {
231 	qdf_nbuf_t nbuf_unmap_list = NULL;
232 	qdf_nbuf_t nbuf_free_list = NULL;
233 	qdf_spin_lock_bh(&rx_desc_pool->lock);
234 	dp_rx_desc_nbuf_collect(soc, rx_desc_pool,
235 				&nbuf_unmap_list, &nbuf_free_list);
236 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
237 	dp_rx_desc_nbuf_cleanup(soc, nbuf_unmap_list, nbuf_free_list,
238 				rx_desc_pool->buf_size);
239 }
240 
241 void dp_rx_desc_pool_free(struct dp_soc *soc,
242 			  struct rx_desc_pool *rx_desc_pool)
243 {
244 	if (qdf_unlikely(!(rx_desc_pool->desc_pages.cacheable_pages)))
245 		return;
246 
247 	dp_desc_multi_pages_mem_free(soc, rx_desc_pool->desc_type,
248 				     &rx_desc_pool->desc_pages, 0, true);
249 }
250 
251 void dp_rx_desc_pool_deinit(struct dp_soc *soc,
252 			    struct rx_desc_pool *rx_desc_pool)
253 {
254 	qdf_spin_lock_bh(&rx_desc_pool->lock);
255 
256 	rx_desc_pool->freelist = NULL;
257 	rx_desc_pool->pool_size = 0;
258 
259 	/* Deinitialize rx mon desr frag flag */
260 	rx_desc_pool->rx_mon_dest_frag_enable = false;
261 
262 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
263 	qdf_spinlock_destroy(&rx_desc_pool->lock);
264 }
265 #else
266 /*
267  * dp_rx_desc_pool_is_allocated() - check if memory is allocated for the
268  *					rx descriptor pool
269  *
270  * @rx_desc_pool: rx descriptor pool pointer
271  *
272  * Return: QDF_STATUS  QDF_STATUS_SUCCESS
273  *		       QDF_STATUS_E_NOMEM
274  */
275 QDF_STATUS dp_rx_desc_pool_is_allocated(struct rx_desc_pool *rx_desc_pool)
276 {
277 	if (!rx_desc_pool->array) {
278 		dp_err("nss-wifi<4> skip Rx refil");
279 		return QDF_STATUS_E_NOMEM;
280 	}
281 	return QDF_STATUS_SUCCESS;
282 }
283 
284 /*
285  * dp_rx_desc_pool_alloc() - Allocate a memory pool for software rx
286  *			     descriptors
287  *
288  * @soc: core txrx main context
289  * @num_elem: number of rx descriptors (size of the pool)
290  * @rx_desc_pool: rx descriptor pool pointer
291  *
292  * Return: QDF_STATUS  QDF_STATUS_SUCCESS
293  *		       QDF_STATUS_E_NOMEM
294  *		       QDF_STATUS_E_FAULT
295  */
296 QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc,
297 				 uint32_t pool_size,
298 				 struct rx_desc_pool *rx_desc_pool)
299 {
300 	rx_desc_pool->array = qdf_mem_malloc(pool_size *
301 				     sizeof(union dp_rx_desc_list_elem_t));
302 
303 	if (!(rx_desc_pool->array)) {
304 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
305 			  "RX Desc Pool allocation failed");
306 		return QDF_STATUS_E_NOMEM;
307 	}
308 	return QDF_STATUS_SUCCESS;
309 }
310 
311 /*
312  * dp_rx_desc_pool_init() - Initialize the software RX descriptor pool
313  *			convert the pool of memory into a list of
314  *			rx descriptors and create locks to access this
315  *			list of rx descriptors.
316  *
317  * @soc: core txrx main context
318  * @pool_id: pool_id which is one of 3 mac_ids
319  * @pool_size: size of the rx descriptor pool
320  * @rx_desc_pool: rx descriptor pool pointer
321  */
322 void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id,
323 			  uint32_t pool_size, struct rx_desc_pool *rx_desc_pool)
324 {
325 	int i;
326 	/* Initialize the lock */
327 	qdf_spinlock_create(&rx_desc_pool->lock);
328 
329 	qdf_spin_lock_bh(&rx_desc_pool->lock);
330 	rx_desc_pool->pool_size = pool_size;
331 
332 	/* link SW rx descs into a freelist */
333 	rx_desc_pool->freelist = &rx_desc_pool->array[0];
334 	qdf_mem_zero(rx_desc_pool->freelist, rx_desc_pool->pool_size);
335 	for (i = 0; i <= rx_desc_pool->pool_size - 1; i++) {
336 		if (i == rx_desc_pool->pool_size - 1)
337 			rx_desc_pool->array[i].next = NULL;
338 		else
339 			rx_desc_pool->array[i].next =
340 				&rx_desc_pool->array[i + 1];
341 		rx_desc_pool->array[i].rx_desc.cookie = i | (pool_id << 18);
342 		rx_desc_pool->array[i].rx_desc.pool_id = pool_id;
343 		rx_desc_pool->array[i].rx_desc.in_use = 0;
344 	}
345 
346 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
347 }
348 
349 void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id,
350 				   struct rx_desc_pool *rx_desc_pool)
351 {
352 	qdf_nbuf_t nbuf;
353 	int i;
354 
355 	qdf_spin_lock_bh(&rx_desc_pool->lock);
356 	for (i = 0; i < rx_desc_pool->pool_size; i++) {
357 		if (rx_desc_pool->array[i].rx_desc.in_use) {
358 			nbuf = rx_desc_pool->array[i].rx_desc.nbuf;
359 
360 			if (!(rx_desc_pool->array[i].rx_desc.unmapped)) {
361 				dp_ipa_handle_rx_buf_smmu_mapping(
362 							soc, nbuf,
363 							rx_desc_pool->buf_size,
364 							false);
365 				qdf_nbuf_unmap_nbytes_single(
366 							soc->osdev, nbuf,
367 							QDF_DMA_FROM_DEVICE,
368 							rx_desc_pool->buf_size);
369 				rx_desc_pool->array[i].rx_desc.unmapped = 1;
370 			}
371 			qdf_nbuf_free(nbuf);
372 		}
373 	}
374 	qdf_mem_free(rx_desc_pool->array);
375 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
376 	qdf_spinlock_destroy(&rx_desc_pool->lock);
377 }
378 
379 void dp_rx_desc_nbuf_free(struct dp_soc *soc,
380 			  struct rx_desc_pool *rx_desc_pool)
381 {
382 	qdf_nbuf_t nbuf;
383 	int i;
384 
385 	qdf_spin_lock_bh(&rx_desc_pool->lock);
386 	for (i = 0; i < rx_desc_pool->pool_size; i++) {
387 		if (rx_desc_pool->array[i].rx_desc.in_use) {
388 			nbuf = rx_desc_pool->array[i].rx_desc.nbuf;
389 
390 			if (!(rx_desc_pool->array[i].rx_desc.unmapped)) {
391 				dp_ipa_handle_rx_buf_smmu_mapping(
392 						soc, nbuf,
393 						rx_desc_pool->buf_size,
394 						false);
395 				qdf_nbuf_unmap_nbytes_single(
396 							soc->osdev, nbuf,
397 							QDF_DMA_FROM_DEVICE,
398 							rx_desc_pool->buf_size);
399 				rx_desc_pool->array[i].rx_desc.unmapped = 1;
400 			}
401 			qdf_nbuf_free(nbuf);
402 		}
403 	}
404 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
405 }
406 
407 /**
408  * dp_rx_desc_frag_free() - Free desc frag buffer
409  *
410  * @soc: core txrx main context
411  * @rx_desc_pool: rx descriptor pool pointer
412  *
413  * Return: None
414  */
415 #ifdef DP_RX_MON_MEM_FRAG
416 void dp_rx_desc_frag_free(struct dp_soc *soc,
417 			  struct rx_desc_pool *rx_desc_pool)
418 {
419 	qdf_dma_addr_t paddr;
420 	qdf_frag_t vaddr;
421 	int i;
422 
423 	qdf_spin_lock_bh(&rx_desc_pool->lock);
424 	for (i = 0; i < rx_desc_pool->pool_size; i++) {
425 		if (rx_desc_pool->array[i].rx_desc.in_use) {
426 			paddr = rx_desc_pool->array[i].rx_desc.paddr_buf_start;
427 			vaddr = rx_desc_pool->array[i].rx_desc.rx_buf_start;
428 
429 			if (!(rx_desc_pool->array[i].rx_desc.unmapped)) {
430 				qdf_mem_unmap_page(soc->osdev, paddr,
431 						   rx_desc_pool->buf_size,
432 						   QDF_DMA_FROM_DEVICE);
433 				rx_desc_pool->array[i].rx_desc.unmapped = 1;
434 			}
435 			qdf_frag_free(vaddr);
436 		}
437 	}
438 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
439 }
440 #endif
441 
442 void dp_rx_desc_pool_free(struct dp_soc *soc,
443 			  struct rx_desc_pool *rx_desc_pool)
444 {
445 	qdf_mem_free(rx_desc_pool->array);
446 }
447 
448 void dp_rx_desc_pool_deinit(struct dp_soc *soc,
449 			    struct rx_desc_pool *rx_desc_pool)
450 {
451 	qdf_spin_lock_bh(&rx_desc_pool->lock);
452 
453 	rx_desc_pool->freelist = NULL;
454 	rx_desc_pool->pool_size = 0;
455 
456 	/* Deinitialize rx mon desr frag flag */
457 	rx_desc_pool->rx_mon_dest_frag_enable = false;
458 
459 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
460 	qdf_spinlock_destroy(&rx_desc_pool->lock);
461 }
462 
463 #endif /* RX_DESC_MULTI_PAGE_ALLOC */
464 
465 /*
466  * dp_rx_get_free_desc_list() - provide a list of descriptors from
467  *				the free rx desc pool.
468  *
469  * @soc: core txrx main context
470  * @pool_id: pool_id which is one of 3 mac_ids
471  * @rx_desc_pool: rx descriptor pool pointer
472  * @num_descs: number of descs requested from freelist
473  * @desc_list: attach the descs to this list (output parameter)
474  * @tail: attach the point to last desc of free list (output parameter)
475  *
476  * Return: number of descs allocated from free list.
477  */
478 uint16_t dp_rx_get_free_desc_list(struct dp_soc *soc, uint32_t pool_id,
479 				struct rx_desc_pool *rx_desc_pool,
480 				uint16_t num_descs,
481 				union dp_rx_desc_list_elem_t **desc_list,
482 				union dp_rx_desc_list_elem_t **tail)
483 {
484 	uint16_t count;
485 
486 	qdf_spin_lock_bh(&rx_desc_pool->lock);
487 
488 	*desc_list = *tail = rx_desc_pool->freelist;
489 
490 	for (count = 0; count < num_descs; count++) {
491 
492 		if (qdf_unlikely(!rx_desc_pool->freelist)) {
493 			qdf_spin_unlock_bh(&rx_desc_pool->lock);
494 			return count;
495 		}
496 		*tail = rx_desc_pool->freelist;
497 		rx_desc_pool->freelist = rx_desc_pool->freelist->next;
498 	}
499 	(*tail)->next = NULL;
500 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
501 	return count;
502 }
503 
504 /*
505  * dp_rx_add_desc_list_to_free_list() - append unused desc_list back to
506  *					freelist.
507  *
508  * @soc: core txrx main context
509  * @local_desc_list: local desc list provided by the caller
510  * @tail: attach the point to last desc of local desc list
511  * @pool_id: pool_id which is one of 3 mac_ids
512  * @rx_desc_pool: rx descriptor pool pointer
513  */
514 void dp_rx_add_desc_list_to_free_list(struct dp_soc *soc,
515 				union dp_rx_desc_list_elem_t **local_desc_list,
516 				union dp_rx_desc_list_elem_t **tail,
517 				uint16_t pool_id,
518 				struct rx_desc_pool *rx_desc_pool)
519 {
520 	union dp_rx_desc_list_elem_t *temp_list = NULL;
521 
522 	qdf_spin_lock_bh(&rx_desc_pool->lock);
523 
524 
525 	temp_list = rx_desc_pool->freelist;
526 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
527 	"temp_list: %pK, *local_desc_list: %pK, *tail: %pK (*tail)->next: %pK",
528 	temp_list, *local_desc_list, *tail, (*tail)->next);
529 	rx_desc_pool->freelist = *local_desc_list;
530 	(*tail)->next = temp_list;
531 	*tail = NULL;
532 	*local_desc_list = NULL;
533 
534 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
535 }
536