xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_desc.c (revision 6d768494e5ce14eb1603a695c86739d12ecc6ec2)
1 /*
2  * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "dp_types.h"
20 #include "dp_rx.h"
21 #include "dp_ipa.h"
22 
23 #ifdef RX_DESC_MULTI_PAGE_ALLOC
24 A_COMPILE_TIME_ASSERT(cookie_size_check,
25 		      PAGE_SIZE / sizeof(union dp_rx_desc_list_elem_t) <=
26 		      1 << DP_RX_DESC_PAGE_ID_SHIFT);
27 
28 /*
29  * dp_rx_desc_pool_is_allocated() - check if memory is allocated for the
30  *					rx descriptor pool
31  *
32  * @rx_desc_pool: rx descriptor pool pointer
33  * Return: QDF_STATUS  QDF_STATUS_SUCCESS
34  *		       QDF_STATUS_E_NOMEM
35  */
36 QDF_STATUS dp_rx_desc_pool_is_allocated(struct rx_desc_pool *rx_desc_pool)
37 {
38 	if (!rx_desc_pool->desc_pages.num_pages) {
39 		dp_err("Multi page alloc fail, size=%d, elem=%d",
40 		       rx_desc_pool->elem_size, rx_desc_pool->pool_size);
41 		return QDF_STATUS_E_NOMEM;
42 	}
43 	return QDF_STATUS_SUCCESS;
44 }
45 
46 /*
47  * dp_rx_desc_pool_alloc() - Allocate a memory pool for software rx
48  *			     descriptors
49  *
50  * @soc: core txrx main context
51  * @num_elem: number of rx descriptors (size of the pool)
52  * @rx_desc_pool: rx descriptor pool pointer
53  *
54  * Return: QDF_STATUS  QDF_STATUS_SUCCESS
55  *		       QDF_STATUS_E_NOMEM
56  *		       QDF_STATUS_E_FAULT
57  */
58 QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc,
59 				 uint32_t num_elem,
60 				 struct rx_desc_pool *rx_desc_pool)
61 {
62 	uint32_t desc_size;
63 	union dp_rx_desc_list_elem_t *rx_desc_elem;
64 
65 	desc_size = sizeof(*rx_desc_elem);
66 	rx_desc_pool->elem_size = desc_size;
67 
68 	qdf_mem_multi_pages_alloc(soc->osdev, &rx_desc_pool->desc_pages,
69 				  desc_size, num_elem, 0, true);
70 	if (!rx_desc_pool->desc_pages.num_pages) {
71 		qdf_err("Multi page alloc fail,size=%d, elem=%d",
72 			desc_size, num_elem);
73 		return QDF_STATUS_E_NOMEM;
74 	}
75 
76 	if (qdf_mem_multi_page_link(soc->osdev,
77 				    &rx_desc_pool->desc_pages,
78 				    desc_size, num_elem, true)) {
79 		qdf_err("overflow num link,size=%d, elem=%d",
80 			desc_size, num_elem);
81 		goto free_rx_desc_pool;
82 	}
83 	return QDF_STATUS_SUCCESS;
84 
85 free_rx_desc_pool:
86 	dp_rx_desc_pool_free(soc, rx_desc_pool);
87 
88 	return QDF_STATUS_E_FAULT;
89 }
90 
91 /*
92  * dp_rx_desc_pool_init() - Initialize the software RX descriptor pool
93  *			convert the pool of memory into a list of
94  *			rx descriptors and create locks to access this
95  *			list of rx descriptors.
96  *
97  * @soc: core txrx main context
98  * @pool_id: pool_id which is one of 3 mac_ids
99  * @pool_size: size of the rx descriptor pool
100  * @rx_desc_pool: rx descriptor pool pointer
101  */
102 void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id,
103 			  uint32_t pool_size, struct rx_desc_pool *rx_desc_pool)
104 {
105 	uint32_t id, page_id, offset, num_desc_per_page;
106 	uint32_t count = 0;
107 	union dp_rx_desc_list_elem_t *rx_desc_elem;
108 
109 	/* Initialize the lock */
110 	qdf_spinlock_create(&rx_desc_pool->lock);
111 
112 	qdf_spin_lock_bh(&rx_desc_pool->lock);
113 	rx_desc_pool->pool_size = pool_size;
114 
115 	num_desc_per_page = rx_desc_pool->desc_pages.num_element_per_page;
116 	rx_desc_pool->freelist = (union dp_rx_desc_list_elem_t *)
117 				  *rx_desc_pool->desc_pages.cacheable_pages;
118 
119 	rx_desc_elem = rx_desc_pool->freelist;
120 	while (rx_desc_elem) {
121 		page_id = count / num_desc_per_page;
122 		offset = count % num_desc_per_page;
123 		/*
124 		 * Below cookie size is from REO destination ring
125 		 * reo_destination_ring -> buffer_addr_info -> sw_buffer_cookie
126 		 * cookie size = 21 bits
127 		 * 8 bits - offset
128 		 * 8 bits - page ID
129 		 * 4 bits - pool ID
130 		 */
131 		id = ((pool_id << DP_RX_DESC_POOL_ID_SHIFT) |
132 		      (page_id << DP_RX_DESC_PAGE_ID_SHIFT) |
133 		      offset);
134 		rx_desc_elem->rx_desc.cookie = id;
135 		rx_desc_elem->rx_desc.pool_id = pool_id;
136 		rx_desc_elem->rx_desc.in_use = 0;
137 		rx_desc_elem = rx_desc_elem->next;
138 		count++;
139 	}
140 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
141 }
142 
143 union dp_rx_desc_list_elem_t *dp_rx_desc_find(uint16_t page_id, uint16_t offset,
144 					      struct rx_desc_pool *rx_desc_pool)
145 {
146 	return rx_desc_pool->desc_pages.cacheable_pages[page_id] +
147 		rx_desc_pool->elem_size * offset;
148 }
149 
150 static QDF_STATUS __dp_rx_desc_nbuf_free(struct dp_soc *soc,
151 					 struct rx_desc_pool *rx_desc_pool)
152 {
153 	uint32_t i, num_desc, page_id, offset, num_desc_per_page;
154 	union dp_rx_desc_list_elem_t *rx_desc_elem;
155 	struct dp_rx_desc *rx_desc;
156 	qdf_nbuf_t nbuf;
157 
158 	if (qdf_unlikely(!(rx_desc_pool->
159 					desc_pages.cacheable_pages))) {
160 		qdf_err("No pages found on this desc pool");
161 		return QDF_STATUS_E_INVAL;
162 	}
163 	num_desc = rx_desc_pool->pool_size;
164 	num_desc_per_page =
165 		rx_desc_pool->desc_pages.num_element_per_page;
166 	for (i = 0; i < num_desc; i++) {
167 		page_id = i / num_desc_per_page;
168 		offset = i % num_desc_per_page;
169 		rx_desc_elem = dp_rx_desc_find(page_id, offset, rx_desc_pool);
170 		rx_desc = &rx_desc_elem->rx_desc;
171 		dp_rx_desc_free_dbg_info(rx_desc);
172 		if (rx_desc->in_use) {
173 			nbuf = rx_desc->nbuf;
174 			if (!rx_desc->unmapped) {
175 				dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf,
176 								  false);
177 				qdf_nbuf_unmap_single(soc->osdev, nbuf,
178 						      QDF_DMA_BIDIRECTIONAL);
179 			}
180 			qdf_nbuf_free(nbuf);
181 		}
182 	}
183 
184 	return QDF_STATUS_SUCCESS;
185 }
186 
187 void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id,
188 				   struct rx_desc_pool *rx_desc_pool)
189 {
190 	QDF_STATUS qdf_status;
191 
192 	qdf_spin_lock_bh(&rx_desc_pool->lock);
193 	qdf_status = __dp_rx_desc_nbuf_free(soc, rx_desc_pool);
194 	if (QDF_IS_STATUS_SUCCESS(qdf_status))
195 		dp_rx_desc_pool_free(soc, rx_desc_pool);
196 
197 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
198 
199 	qdf_spinlock_destroy(&rx_desc_pool->lock);
200 }
201 
202 void dp_rx_desc_nbuf_free(struct dp_soc *soc,
203 			  struct rx_desc_pool *rx_desc_pool)
204 {
205 	qdf_spin_lock_bh(&rx_desc_pool->lock);
206 	__dp_rx_desc_nbuf_free(soc, rx_desc_pool);
207 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
208 }
209 
210 void dp_rx_desc_pool_free(struct dp_soc *soc,
211 			  struct rx_desc_pool *rx_desc_pool)
212 {
213 	if (qdf_unlikely(!(rx_desc_pool->desc_pages.cacheable_pages)))
214 		return;
215 	qdf_mem_multi_pages_free(soc->osdev,
216 				 &rx_desc_pool->desc_pages, 0, true);
217 }
218 
219 void dp_rx_desc_pool_deinit(struct dp_soc *soc,
220 			    struct rx_desc_pool *rx_desc_pool)
221 {
222 	qdf_spin_lock_bh(&rx_desc_pool->lock);
223 
224 	rx_desc_pool->freelist = NULL;
225 	rx_desc_pool->pool_size = 0;
226 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
227 	qdf_spinlock_destroy(&rx_desc_pool->lock);
228 }
229 #else
230 /*
231  * dp_rx_desc_pool_is_allocated() - check if memory is allocated for the
232  *					rx descriptor pool
233  *
234  * @rx_desc_pool: rx descriptor pool pointer
235  *
236  * Return: QDF_STATUS  QDF_STATUS_SUCCESS
237  *		       QDF_STATUS_E_NOMEM
238  */
239 QDF_STATUS dp_rx_desc_pool_is_allocated(struct rx_desc_pool *rx_desc_pool)
240 {
241 	if (!rx_desc_pool->array) {
242 		dp_err("nss-wifi<4> skip Rx refil");
243 		return QDF_STATUS_E_NOMEM;
244 	}
245 	return QDF_STATUS_SUCCESS;
246 }
247 
248 /*
249  * dp_rx_desc_pool_alloc() - Allocate a memory pool for software rx
250  *			     descriptors
251  *
252  * @soc: core txrx main context
253  * @num_elem: number of rx descriptors (size of the pool)
254  * @rx_desc_pool: rx descriptor pool pointer
255  *
256  * Return: QDF_STATUS  QDF_STATUS_SUCCESS
257  *		       QDF_STATUS_E_NOMEM
258  *		       QDF_STATUS_E_FAULT
259  */
260 QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc,
261 				 uint32_t pool_size,
262 				 struct rx_desc_pool *rx_desc_pool)
263 {
264 	rx_desc_pool->array = qdf_mem_malloc(pool_size *
265 				     sizeof(union dp_rx_desc_list_elem_t));
266 
267 	if (!(rx_desc_pool->array)) {
268 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
269 			  "RX Desc Pool allocation failed");
270 		return QDF_STATUS_E_NOMEM;
271 	}
272 	return QDF_STATUS_SUCCESS;
273 }
274 
275 /*
276  * dp_rx_desc_pool_init() - Initialize the software RX descriptor pool
277  *			convert the pool of memory into a list of
278  *			rx descriptors and create locks to access this
279  *			list of rx descriptors.
280  *
281  * @soc: core txrx main context
282  * @pool_id: pool_id which is one of 3 mac_ids
283  * @pool_size: size of the rx descriptor pool
284  * @rx_desc_pool: rx descriptor pool pointer
285  */
286 void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id,
287 			  uint32_t pool_size, struct rx_desc_pool *rx_desc_pool)
288 {
289 	int i;
290 	/* Initialize the lock */
291 	qdf_spinlock_create(&rx_desc_pool->lock);
292 
293 	qdf_spin_lock_bh(&rx_desc_pool->lock);
294 	rx_desc_pool->pool_size = pool_size;
295 
296 	/* link SW rx descs into a freelist */
297 	rx_desc_pool->freelist = &rx_desc_pool->array[0];
298 	qdf_mem_zero(rx_desc_pool->freelist, rx_desc_pool->pool_size);
299 	for (i = 0; i <= rx_desc_pool->pool_size - 1; i++) {
300 		if (i == rx_desc_pool->pool_size - 1)
301 			rx_desc_pool->array[i].next = NULL;
302 		else
303 			rx_desc_pool->array[i].next =
304 				&rx_desc_pool->array[i + 1];
305 		rx_desc_pool->array[i].rx_desc.cookie = i | (pool_id << 18);
306 		rx_desc_pool->array[i].rx_desc.pool_id = pool_id;
307 		rx_desc_pool->array[i].rx_desc.in_use = 0;
308 	}
309 
310 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
311 }
312 
313 void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id,
314 				   struct rx_desc_pool *rx_desc_pool)
315 {
316 	qdf_nbuf_t nbuf;
317 	int i;
318 
319 	qdf_spin_lock_bh(&rx_desc_pool->lock);
320 	for (i = 0; i < rx_desc_pool->pool_size; i++) {
321 		if (rx_desc_pool->array[i].rx_desc.in_use) {
322 			nbuf = rx_desc_pool->array[i].rx_desc.nbuf;
323 
324 			if (!(rx_desc_pool->array[i].rx_desc.unmapped)) {
325 				dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf,
326 								  false);
327 
328 				qdf_nbuf_unmap_single(soc->osdev, nbuf,
329 						      QDF_DMA_FROM_DEVICE);
330 			}
331 			qdf_nbuf_free(nbuf);
332 		}
333 	}
334 	qdf_mem_free(rx_desc_pool->array);
335 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
336 	qdf_spinlock_destroy(&rx_desc_pool->lock);
337 }
338 
339 void dp_rx_desc_nbuf_free(struct dp_soc *soc,
340 			  struct rx_desc_pool *rx_desc_pool)
341 {
342 	qdf_nbuf_t nbuf;
343 	int i;
344 
345 	qdf_spin_lock_bh(&rx_desc_pool->lock);
346 	for (i = 0; i < rx_desc_pool->pool_size; i++) {
347 		if (rx_desc_pool->array[i].rx_desc.in_use) {
348 			nbuf = rx_desc_pool->array[i].rx_desc.nbuf;
349 
350 			if (!(rx_desc_pool->array[i].rx_desc.unmapped)) {
351 				dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf,
352 								  false);
353 
354 				qdf_nbuf_unmap_single(soc->osdev, nbuf,
355 						      QDF_DMA_FROM_DEVICE);
356 			}
357 			qdf_nbuf_free(nbuf);
358 		}
359 	}
360 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
361 }
362 
363 void dp_rx_desc_pool_free(struct dp_soc *soc,
364 			  struct rx_desc_pool *rx_desc_pool)
365 {
366 	qdf_mem_free(rx_desc_pool->array);
367 }
368 
369 void dp_rx_desc_pool_deinit(struct dp_soc *soc,
370 			    struct rx_desc_pool *rx_desc_pool)
371 {
372 	qdf_spin_lock_bh(&rx_desc_pool->lock);
373 
374 	rx_desc_pool->freelist = NULL;
375 	rx_desc_pool->pool_size = 0;
376 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
377 	qdf_spinlock_destroy(&rx_desc_pool->lock);
378 }
379 
380 #endif /* RX_DESC_MULTI_PAGE_ALLOC */
381 
382 /*
383  * dp_rx_get_free_desc_list() - provide a list of descriptors from
384  *				the free rx desc pool.
385  *
386  * @soc: core txrx main context
387  * @pool_id: pool_id which is one of 3 mac_ids
388  * @rx_desc_pool: rx descriptor pool pointer
389  * @num_descs: number of descs requested from freelist
390  * @desc_list: attach the descs to this list (output parameter)
391  * @tail: attach the point to last desc of free list (output parameter)
392  *
393  * Return: number of descs allocated from free list.
394  */
395 uint16_t dp_rx_get_free_desc_list(struct dp_soc *soc, uint32_t pool_id,
396 				struct rx_desc_pool *rx_desc_pool,
397 				uint16_t num_descs,
398 				union dp_rx_desc_list_elem_t **desc_list,
399 				union dp_rx_desc_list_elem_t **tail)
400 {
401 	uint16_t count;
402 
403 	qdf_spin_lock_bh(&rx_desc_pool->lock);
404 
405 	*desc_list = *tail = rx_desc_pool->freelist;
406 
407 	for (count = 0; count < num_descs; count++) {
408 
409 		if (qdf_unlikely(!rx_desc_pool->freelist)) {
410 			qdf_spin_unlock_bh(&rx_desc_pool->lock);
411 			return count;
412 		}
413 		*tail = rx_desc_pool->freelist;
414 		rx_desc_pool->freelist = rx_desc_pool->freelist->next;
415 	}
416 	(*tail)->next = NULL;
417 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
418 	return count;
419 }
420 
421 /*
422  * dp_rx_add_desc_list_to_free_list() - append unused desc_list back to
423  *					freelist.
424  *
425  * @soc: core txrx main context
426  * @local_desc_list: local desc list provided by the caller
427  * @tail: attach the point to last desc of local desc list
428  * @pool_id: pool_id which is one of 3 mac_ids
429  * @rx_desc_pool: rx descriptor pool pointer
430  */
431 void dp_rx_add_desc_list_to_free_list(struct dp_soc *soc,
432 				union dp_rx_desc_list_elem_t **local_desc_list,
433 				union dp_rx_desc_list_elem_t **tail,
434 				uint16_t pool_id,
435 				struct rx_desc_pool *rx_desc_pool)
436 {
437 	union dp_rx_desc_list_elem_t *temp_list = NULL;
438 
439 	qdf_spin_lock_bh(&rx_desc_pool->lock);
440 
441 
442 	temp_list = rx_desc_pool->freelist;
443 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
444 	"temp_list: %pK, *local_desc_list: %pK, *tail: %pK (*tail)->next: %pK",
445 	temp_list, *local_desc_list, *tail, (*tail)->next);
446 	rx_desc_pool->freelist = *local_desc_list;
447 	(*tail)->next = temp_list;
448 	*tail = NULL;
449 	*local_desc_list = NULL;
450 
451 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
452 }
453