xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_desc.c (revision 45a38684b07295822dc8eba39e293408f203eec8)
1 /*
2  * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "dp_types.h"
20 #include "dp_rx.h"
21 #include "dp_ipa.h"
22 
23 #ifdef RX_DESC_MULTI_PAGE_ALLOC
24 A_COMPILE_TIME_ASSERT(cookie_size_check,
25 		      PAGE_SIZE / sizeof(union dp_rx_desc_list_elem_t) <=
26 		      1 << DP_RX_DESC_PAGE_ID_SHIFT);
27 
28 /*
29  * dp_rx_desc_pool_is_allocated() - check if memory is allocated for the
30  *					rx descriptor pool
31  *
32  * @rx_desc_pool: rx descriptor pool pointer
33  * Return: QDF_STATUS  QDF_STATUS_SUCCESS
34  *		       QDF_STATUS_E_NOMEM
35  */
36 QDF_STATUS dp_rx_desc_pool_is_allocated(struct rx_desc_pool *rx_desc_pool)
37 {
38 	if (!rx_desc_pool->desc_pages.num_pages) {
39 		dp_err("Multi page alloc fail, size=%d, elem=%d",
40 		       rx_desc_pool->elem_size, rx_desc_pool->pool_size);
41 		return QDF_STATUS_E_NOMEM;
42 	}
43 	return QDF_STATUS_SUCCESS;
44 }
45 
46 /*
47  * dp_rx_desc_pool_alloc() - Allocate a memory pool for software rx
48  *			     descriptors
49  *
50  * @soc: core txrx main context
51  * @num_elem: number of rx descriptors (size of the pool)
52  * @rx_desc_pool: rx descriptor pool pointer
53  *
54  * Return: QDF_STATUS  QDF_STATUS_SUCCESS
55  *		       QDF_STATUS_E_NOMEM
56  *		       QDF_STATUS_E_FAULT
57  */
58 QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc,
59 				 uint32_t num_elem,
60 				 struct rx_desc_pool *rx_desc_pool)
61 {
62 	uint32_t desc_size;
63 	union dp_rx_desc_list_elem_t *rx_desc_elem;
64 
65 	desc_size = sizeof(*rx_desc_elem);
66 	rx_desc_pool->elem_size = desc_size;
67 
68 	qdf_mem_multi_pages_alloc(soc->osdev, &rx_desc_pool->desc_pages,
69 				  desc_size, num_elem, 0, true);
70 	if (!rx_desc_pool->desc_pages.num_pages) {
71 		qdf_err("Multi page alloc fail,size=%d, elem=%d",
72 			desc_size, num_elem);
73 		return QDF_STATUS_E_NOMEM;
74 	}
75 
76 	if (qdf_mem_multi_page_link(soc->osdev,
77 				    &rx_desc_pool->desc_pages,
78 				    desc_size, num_elem, true)) {
79 		qdf_err("overflow num link,size=%d, elem=%d",
80 			desc_size, num_elem);
81 		goto free_rx_desc_pool;
82 	}
83 	return QDF_STATUS_SUCCESS;
84 
85 free_rx_desc_pool:
86 	dp_rx_desc_pool_free(soc, rx_desc_pool);
87 
88 	return QDF_STATUS_E_FAULT;
89 }
90 
91 /*
92  * dp_rx_desc_pool_init() - Initialize the software RX descriptor pool
93  *			convert the pool of memory into a list of
94  *			rx descriptors and create locks to access this
95  *			list of rx descriptors.
96  *
97  * @soc: core txrx main context
98  * @pool_id: pool_id which is one of 3 mac_ids
99  * @pool_size: size of the rx descriptor pool
100  * @rx_desc_pool: rx descriptor pool pointer
101  */
102 void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id,
103 			  uint32_t pool_size, struct rx_desc_pool *rx_desc_pool)
104 {
105 	uint32_t id, page_id, offset, num_desc_per_page;
106 	uint32_t count = 0;
107 	union dp_rx_desc_list_elem_t *rx_desc_elem;
108 
109 	/* Initialize the lock */
110 	qdf_spinlock_create(&rx_desc_pool->lock);
111 
112 	qdf_spin_lock_bh(&rx_desc_pool->lock);
113 	rx_desc_pool->pool_size = pool_size;
114 
115 	num_desc_per_page = rx_desc_pool->desc_pages.num_element_per_page;
116 	rx_desc_pool->freelist = (union dp_rx_desc_list_elem_t *)
117 				  *rx_desc_pool->desc_pages.cacheable_pages;
118 
119 	rx_desc_elem = rx_desc_pool->freelist;
120 	while (rx_desc_elem) {
121 		page_id = count / num_desc_per_page;
122 		offset = count % num_desc_per_page;
123 		/*
124 		 * Below cookie size is from REO destination ring
125 		 * reo_destination_ring -> buffer_addr_info -> sw_buffer_cookie
126 		 * cookie size = 21 bits
127 		 * 8 bits - offset
128 		 * 8 bits - page ID
129 		 * 4 bits - pool ID
130 		 */
131 		id = ((pool_id << DP_RX_DESC_POOL_ID_SHIFT) |
132 		      (page_id << DP_RX_DESC_PAGE_ID_SHIFT) |
133 		      offset);
134 		rx_desc_elem->rx_desc.cookie = id;
135 		rx_desc_elem->rx_desc.pool_id = pool_id;
136 		rx_desc_elem->rx_desc.in_use = 0;
137 		rx_desc_elem = rx_desc_elem->next;
138 		count++;
139 	}
140 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
141 }
142 
143 union dp_rx_desc_list_elem_t *dp_rx_desc_find(uint16_t page_id, uint16_t offset,
144 					      struct rx_desc_pool *rx_desc_pool)
145 {
146 	return rx_desc_pool->desc_pages.cacheable_pages[page_id] +
147 		rx_desc_pool->elem_size * offset;
148 }
149 
150 static QDF_STATUS __dp_rx_desc_nbuf_free(struct dp_soc *soc,
151 					 struct rx_desc_pool *rx_desc_pool)
152 {
153 	uint32_t i, num_desc, page_id, offset, num_desc_per_page;
154 	union dp_rx_desc_list_elem_t *rx_desc_elem;
155 	struct dp_rx_desc *rx_desc;
156 	qdf_nbuf_t nbuf;
157 
158 	if (qdf_unlikely(!(rx_desc_pool->
159 					desc_pages.cacheable_pages))) {
160 		qdf_err("No pages found on this desc pool");
161 		return QDF_STATUS_E_INVAL;
162 	}
163 	num_desc = rx_desc_pool->pool_size;
164 	num_desc_per_page =
165 		rx_desc_pool->desc_pages.num_element_per_page;
166 	for (i = 0; i < num_desc; i++) {
167 		page_id = i / num_desc_per_page;
168 		offset = i % num_desc_per_page;
169 		rx_desc_elem = dp_rx_desc_find(page_id, offset, rx_desc_pool);
170 		rx_desc = &rx_desc_elem->rx_desc;
171 		dp_rx_desc_free_dbg_info(rx_desc);
172 		if (rx_desc->in_use) {
173 			nbuf = rx_desc->nbuf;
174 			if (!rx_desc->unmapped) {
175 				dp_ipa_handle_rx_buf_smmu_mapping(
176 							soc, nbuf,
177 							rx_desc_pool->buf_size,
178 							false);
179 				qdf_nbuf_unmap_nbytes_single(
180 							soc->osdev,
181 							rx_desc->nbuf,
182 							QDF_DMA_BIDIRECTIONAL,
183 							rx_desc_pool->buf_size);
184 				rx_desc->unmapped = 1;
185 			}
186 			qdf_nbuf_free(nbuf);
187 		}
188 	}
189 
190 	return QDF_STATUS_SUCCESS;
191 }
192 
193 void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id,
194 				   struct rx_desc_pool *rx_desc_pool)
195 {
196 	QDF_STATUS qdf_status;
197 
198 	qdf_spin_lock_bh(&rx_desc_pool->lock);
199 	qdf_status = __dp_rx_desc_nbuf_free(soc, rx_desc_pool);
200 	if (QDF_IS_STATUS_SUCCESS(qdf_status))
201 		dp_rx_desc_pool_free(soc, rx_desc_pool);
202 
203 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
204 
205 	qdf_spinlock_destroy(&rx_desc_pool->lock);
206 }
207 
208 void dp_rx_desc_nbuf_free(struct dp_soc *soc,
209 			  struct rx_desc_pool *rx_desc_pool)
210 {
211 	qdf_spin_lock_bh(&rx_desc_pool->lock);
212 	__dp_rx_desc_nbuf_free(soc, rx_desc_pool);
213 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
214 }
215 
216 void dp_rx_desc_pool_free(struct dp_soc *soc,
217 			  struct rx_desc_pool *rx_desc_pool)
218 {
219 	if (qdf_unlikely(!(rx_desc_pool->desc_pages.cacheable_pages)))
220 		return;
221 	qdf_mem_multi_pages_free(soc->osdev,
222 				 &rx_desc_pool->desc_pages, 0, true);
223 }
224 
225 void dp_rx_desc_pool_deinit(struct dp_soc *soc,
226 			    struct rx_desc_pool *rx_desc_pool)
227 {
228 	qdf_spin_lock_bh(&rx_desc_pool->lock);
229 
230 	rx_desc_pool->freelist = NULL;
231 	rx_desc_pool->pool_size = 0;
232 
233 	/* Deinitialize rx mon desr frag flag */
234 	rx_desc_pool->rx_mon_dest_frag_enable = false;
235 
236 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
237 	qdf_spinlock_destroy(&rx_desc_pool->lock);
238 }
239 #else
240 /*
241  * dp_rx_desc_pool_is_allocated() - check if memory is allocated for the
242  *					rx descriptor pool
243  *
244  * @rx_desc_pool: rx descriptor pool pointer
245  *
246  * Return: QDF_STATUS  QDF_STATUS_SUCCESS
247  *		       QDF_STATUS_E_NOMEM
248  */
249 QDF_STATUS dp_rx_desc_pool_is_allocated(struct rx_desc_pool *rx_desc_pool)
250 {
251 	if (!rx_desc_pool->array) {
252 		dp_err("nss-wifi<4> skip Rx refil");
253 		return QDF_STATUS_E_NOMEM;
254 	}
255 	return QDF_STATUS_SUCCESS;
256 }
257 
258 /*
259  * dp_rx_desc_pool_alloc() - Allocate a memory pool for software rx
260  *			     descriptors
261  *
262  * @soc: core txrx main context
263  * @num_elem: number of rx descriptors (size of the pool)
264  * @rx_desc_pool: rx descriptor pool pointer
265  *
266  * Return: QDF_STATUS  QDF_STATUS_SUCCESS
267  *		       QDF_STATUS_E_NOMEM
268  *		       QDF_STATUS_E_FAULT
269  */
270 QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc,
271 				 uint32_t pool_size,
272 				 struct rx_desc_pool *rx_desc_pool)
273 {
274 	rx_desc_pool->array = qdf_mem_malloc(pool_size *
275 				     sizeof(union dp_rx_desc_list_elem_t));
276 
277 	if (!(rx_desc_pool->array)) {
278 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
279 			  "RX Desc Pool allocation failed");
280 		return QDF_STATUS_E_NOMEM;
281 	}
282 	return QDF_STATUS_SUCCESS;
283 }
284 
285 /*
286  * dp_rx_desc_pool_init() - Initialize the software RX descriptor pool
287  *			convert the pool of memory into a list of
288  *			rx descriptors and create locks to access this
289  *			list of rx descriptors.
290  *
291  * @soc: core txrx main context
292  * @pool_id: pool_id which is one of 3 mac_ids
293  * @pool_size: size of the rx descriptor pool
294  * @rx_desc_pool: rx descriptor pool pointer
295  */
296 void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id,
297 			  uint32_t pool_size, struct rx_desc_pool *rx_desc_pool)
298 {
299 	int i;
300 	/* Initialize the lock */
301 	qdf_spinlock_create(&rx_desc_pool->lock);
302 
303 	qdf_spin_lock_bh(&rx_desc_pool->lock);
304 	rx_desc_pool->pool_size = pool_size;
305 
306 	/* link SW rx descs into a freelist */
307 	rx_desc_pool->freelist = &rx_desc_pool->array[0];
308 	qdf_mem_zero(rx_desc_pool->freelist, rx_desc_pool->pool_size);
309 	for (i = 0; i <= rx_desc_pool->pool_size - 1; i++) {
310 		if (i == rx_desc_pool->pool_size - 1)
311 			rx_desc_pool->array[i].next = NULL;
312 		else
313 			rx_desc_pool->array[i].next =
314 				&rx_desc_pool->array[i + 1];
315 		rx_desc_pool->array[i].rx_desc.cookie = i | (pool_id << 18);
316 		rx_desc_pool->array[i].rx_desc.pool_id = pool_id;
317 		rx_desc_pool->array[i].rx_desc.in_use = 0;
318 	}
319 
320 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
321 }
322 
323 void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id,
324 				   struct rx_desc_pool *rx_desc_pool)
325 {
326 	qdf_nbuf_t nbuf;
327 	int i;
328 
329 	qdf_spin_lock_bh(&rx_desc_pool->lock);
330 	for (i = 0; i < rx_desc_pool->pool_size; i++) {
331 		if (rx_desc_pool->array[i].rx_desc.in_use) {
332 			nbuf = rx_desc_pool->array[i].rx_desc.nbuf;
333 
334 			if (!(rx_desc_pool->array[i].rx_desc.unmapped)) {
335 				dp_ipa_handle_rx_buf_smmu_mapping(
336 							soc, nbuf,
337 							rx_desc_pool->buf_size,
338 							false);
339 				qdf_nbuf_unmap_nbytes_single(
340 							soc->osdev, nbuf,
341 							QDF_DMA_FROM_DEVICE,
342 							rx_desc_pool->buf_size);
343 				rx_desc_pool->array[i].rx_desc.unmapped = 1;
344 			}
345 			qdf_nbuf_free(nbuf);
346 		}
347 	}
348 	qdf_mem_free(rx_desc_pool->array);
349 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
350 	qdf_spinlock_destroy(&rx_desc_pool->lock);
351 }
352 
353 void dp_rx_desc_nbuf_free(struct dp_soc *soc,
354 			  struct rx_desc_pool *rx_desc_pool)
355 {
356 	qdf_nbuf_t nbuf;
357 	int i;
358 
359 	qdf_spin_lock_bh(&rx_desc_pool->lock);
360 	for (i = 0; i < rx_desc_pool->pool_size; i++) {
361 		if (rx_desc_pool->array[i].rx_desc.in_use) {
362 			nbuf = rx_desc_pool->array[i].rx_desc.nbuf;
363 
364 			if (!(rx_desc_pool->array[i].rx_desc.unmapped)) {
365 				dp_ipa_handle_rx_buf_smmu_mapping(
366 						soc, nbuf,
367 						rx_desc_pool->buf_size,
368 						false);
369 				qdf_nbuf_unmap_nbytes_single(
370 							soc->osdev, nbuf,
371 							QDF_DMA_FROM_DEVICE,
372 							rx_desc_pool->buf_size);
373 				rx_desc_pool->array[i].rx_desc.unmapped = 1;
374 			}
375 			qdf_nbuf_free(nbuf);
376 		}
377 	}
378 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
379 }
380 
381 /**
382  * dp_rx_desc_frag_free() - Free desc frag buffer
383  *
384  * @soc: core txrx main context
385  * @rx_desc_pool: rx descriptor pool pointer
386  *
387  * Return: None
388  */
389 #ifdef DP_RX_MON_MEM_FRAG
390 void dp_rx_desc_frag_free(struct dp_soc *soc,
391 			  struct rx_desc_pool *rx_desc_pool)
392 {
393 	qdf_dma_addr_t paddr;
394 	qdf_frag_t vaddr;
395 	int i;
396 
397 	qdf_spin_lock_bh(&rx_desc_pool->lock);
398 	for (i = 0; i < rx_desc_pool->pool_size; i++) {
399 		if (rx_desc_pool->array[i].rx_desc.in_use) {
400 			paddr = rx_desc_pool->array[i].rx_desc.paddr_buf_start;
401 			vaddr = rx_desc_pool->array[i].rx_desc.rx_buf_start;
402 
403 			if (!(rx_desc_pool->array[i].rx_desc.unmapped)) {
404 				qdf_mem_unmap_page(soc->osdev, paddr,
405 						   rx_desc_pool->buf_size,
406 						   QDF_DMA_FROM_DEVICE);
407 				rx_desc_pool->array[i].rx_desc.unmapped = 1;
408 			}
409 			qdf_frag_free(vaddr);
410 		}
411 	}
412 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
413 }
414 #endif
415 
416 void dp_rx_desc_pool_free(struct dp_soc *soc,
417 			  struct rx_desc_pool *rx_desc_pool)
418 {
419 	qdf_mem_free(rx_desc_pool->array);
420 }
421 
422 void dp_rx_desc_pool_deinit(struct dp_soc *soc,
423 			    struct rx_desc_pool *rx_desc_pool)
424 {
425 	qdf_spin_lock_bh(&rx_desc_pool->lock);
426 
427 	rx_desc_pool->freelist = NULL;
428 	rx_desc_pool->pool_size = 0;
429 
430 	/* Deinitialize rx mon desr frag flag */
431 	rx_desc_pool->rx_mon_dest_frag_enable = false;
432 
433 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
434 	qdf_spinlock_destroy(&rx_desc_pool->lock);
435 }
436 
437 #endif /* RX_DESC_MULTI_PAGE_ALLOC */
438 
439 /*
440  * dp_rx_get_free_desc_list() - provide a list of descriptors from
441  *				the free rx desc pool.
442  *
443  * @soc: core txrx main context
444  * @pool_id: pool_id which is one of 3 mac_ids
445  * @rx_desc_pool: rx descriptor pool pointer
446  * @num_descs: number of descs requested from freelist
447  * @desc_list: attach the descs to this list (output parameter)
448  * @tail: attach the point to last desc of free list (output parameter)
449  *
450  * Return: number of descs allocated from free list.
451  */
452 uint16_t dp_rx_get_free_desc_list(struct dp_soc *soc, uint32_t pool_id,
453 				struct rx_desc_pool *rx_desc_pool,
454 				uint16_t num_descs,
455 				union dp_rx_desc_list_elem_t **desc_list,
456 				union dp_rx_desc_list_elem_t **tail)
457 {
458 	uint16_t count;
459 
460 	qdf_spin_lock_bh(&rx_desc_pool->lock);
461 
462 	*desc_list = *tail = rx_desc_pool->freelist;
463 
464 	for (count = 0; count < num_descs; count++) {
465 
466 		if (qdf_unlikely(!rx_desc_pool->freelist)) {
467 			qdf_spin_unlock_bh(&rx_desc_pool->lock);
468 			return count;
469 		}
470 		*tail = rx_desc_pool->freelist;
471 		rx_desc_pool->freelist = rx_desc_pool->freelist->next;
472 	}
473 	(*tail)->next = NULL;
474 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
475 	return count;
476 }
477 
478 /*
479  * dp_rx_add_desc_list_to_free_list() - append unused desc_list back to
480  *					freelist.
481  *
482  * @soc: core txrx main context
483  * @local_desc_list: local desc list provided by the caller
484  * @tail: attach the point to last desc of local desc list
485  * @pool_id: pool_id which is one of 3 mac_ids
486  * @rx_desc_pool: rx descriptor pool pointer
487  */
488 void dp_rx_add_desc_list_to_free_list(struct dp_soc *soc,
489 				union dp_rx_desc_list_elem_t **local_desc_list,
490 				union dp_rx_desc_list_elem_t **tail,
491 				uint16_t pool_id,
492 				struct rx_desc_pool *rx_desc_pool)
493 {
494 	union dp_rx_desc_list_elem_t *temp_list = NULL;
495 
496 	qdf_spin_lock_bh(&rx_desc_pool->lock);
497 
498 
499 	temp_list = rx_desc_pool->freelist;
500 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
501 	"temp_list: %pK, *local_desc_list: %pK, *tail: %pK (*tail)->next: %pK",
502 	temp_list, *local_desc_list, *tail, (*tail)->next);
503 	rx_desc_pool->freelist = *local_desc_list;
504 	(*tail)->next = temp_list;
505 	*tail = NULL;
506 	*local_desc_list = NULL;
507 
508 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
509 }
510