1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "hal_hw_headers.h"
21 #include "dp_types.h"
22 #include "dp_tx_desc.h"
23 
24 #ifndef DESC_PARTITION
25 #define DP_TX_DESC_SIZE(a) qdf_get_pwr2(a)
26 #define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id)     \
27 do {                                                                 \
28 	uint8_t sig_bit;                                             \
29 	soc->tx_desc[pool_id].offset_filter = num_desc_per_page - 1; \
30 	/* Calculate page divider to find page number */             \
31 	sig_bit = 0;                                                 \
32 	while (num_desc_per_page) {                                  \
33 		sig_bit++;                                           \
34 		num_desc_per_page = num_desc_per_page >> 1;          \
35 	}                                                            \
36 	soc->tx_desc[pool_id].page_divider = (sig_bit - 1);          \
37 } while (0)
38 #else
39 #define DP_TX_DESC_SIZE(a) a
40 #define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id) {}
41 #endif /* DESC_PARTITION */
42 
43 /**
44  * dp_tx_desc_pool_counter_initialize() - Initialize counters
45  * @tx_desc_pool: Handle to DP tx_desc_pool structure
46  * @num_elem: Number of descriptor elements per pool
47  *
48  * Return: None
49  */
50 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
51 static void
dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s * tx_desc_pool,uint16_t num_elem)52 dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool,
53 				  uint16_t num_elem)
54 {
55 }
56 #else
57 static void
dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s * tx_desc_pool,uint16_t num_elem)58 dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool,
59 				  uint16_t num_elem)
60 {
61 	tx_desc_pool->elem_count = num_elem;
62 	tx_desc_pool->num_free = num_elem;
63 	tx_desc_pool->num_allocated = 0;
64 }
65 #endif
66 
67 #ifdef DP_UMAC_HW_RESET_SUPPORT
68 /**
69  * dp_tx_desc_clean_up() - Clean up the tx descriptors
70  * @ctxt: context passed
71  * @elem: element to be cleaned up
72  * @elem_list: element list
73  *
74  */
dp_tx_desc_clean_up(void * ctxt,void * elem,void * elem_list)75 static void dp_tx_desc_clean_up(void *ctxt, void *elem, void *elem_list)
76 {
77 	struct dp_soc *soc = (struct dp_soc *)ctxt;
78 	struct dp_tx_desc_s *tx_desc = (struct dp_tx_desc_s *)elem;
79 	qdf_nbuf_t *nbuf_list = (qdf_nbuf_t *)elem_list;
80 	qdf_nbuf_t nbuf = NULL;
81 
82 	if (tx_desc->nbuf) {
83 		nbuf = dp_tx_comp_free_buf(soc, tx_desc, true);
84 		dp_tx_desc_release(soc, tx_desc, tx_desc->pool_id);
85 
86 		if (nbuf) {
87 			if (!nbuf_list) {
88 				dp_err("potential memory leak");
89 				qdf_assert_always(0);
90 			}
91 
92 			nbuf->next = *nbuf_list;
93 			*nbuf_list = nbuf;
94 		}
95 	}
96 }
97 
dp_tx_desc_pool_cleanup(struct dp_soc * soc,qdf_nbuf_t * nbuf_list,bool cleanup)98 void dp_tx_desc_pool_cleanup(struct dp_soc *soc, qdf_nbuf_t *nbuf_list,
99 			     bool cleanup)
100 {
101 	int i;
102 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
103 	uint8_t num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
104 
105 	if (!cleanup)
106 		return;
107 
108 	for (i = 0; i < num_pool; i++) {
109 		tx_desc_pool = dp_get_tx_desc_pool(soc, i);
110 
111 		TX_DESC_LOCK_LOCK(&tx_desc_pool->lock);
112 		if (tx_desc_pool)
113 			qdf_tx_desc_pool_free_bufs(soc,
114 						   &tx_desc_pool->desc_pages,
115 						   tx_desc_pool->elem_size,
116 						   tx_desc_pool->elem_count,
117 						   true, &dp_tx_desc_clean_up,
118 						   nbuf_list);
119 
120 		TX_DESC_LOCK_UNLOCK(&tx_desc_pool->lock);
121 
122 		tx_desc_pool = dp_get_spcl_tx_desc_pool(soc, i);
123 		TX_DESC_LOCK_LOCK(&tx_desc_pool->lock);
124 
125 		if (tx_desc_pool)
126 			qdf_tx_desc_pool_free_bufs(soc,
127 						   &tx_desc_pool->desc_pages,
128 						   tx_desc_pool->elem_size,
129 						   tx_desc_pool->elem_count,
130 						   true, &dp_tx_desc_clean_up,
131 						   nbuf_list);
132 
133 		TX_DESC_LOCK_UNLOCK(&tx_desc_pool->lock);
134 	}
135 }
136 #endif
137 
138 #ifdef QCA_SUPPORT_DP_GLOBAL_CTX
dp_tx_desc_pool_alloc_mem(struct dp_soc * soc,int8_t pool_id,bool spcl_tx_desc)139 static void dp_tx_desc_pool_alloc_mem(struct dp_soc *soc, int8_t pool_id,
140 				      bool spcl_tx_desc)
141 {
142 	struct dp_global_context *dp_global = NULL;
143 
144 	dp_global = wlan_objmgr_get_global_ctx();
145 
146 	if (spcl_tx_desc) {
147 		dp_global->spcl_tx_desc[soc->arch_id][pool_id] =
148 			qdf_mem_malloc(sizeof(struct dp_tx_desc_pool_s));
149 	} else {
150 		dp_global->tx_desc[soc->arch_id][pool_id] =
151 			qdf_mem_malloc(sizeof(struct dp_tx_desc_pool_s));
152 	}
153 }
154 
dp_tx_desc_pool_free_mem(struct dp_soc * soc,int8_t pool_id,bool spcl_tx_desc)155 static void dp_tx_desc_pool_free_mem(struct dp_soc *soc, int8_t pool_id,
156 				     bool spcl_tx_desc)
157 {
158 	struct dp_global_context *dp_global = NULL;
159 
160 	dp_global = wlan_objmgr_get_global_ctx();
161 	if (spcl_tx_desc) {
162 		if (!dp_global->spcl_tx_desc[soc->arch_id][pool_id])
163 			return;
164 
165 		qdf_mem_free(dp_global->spcl_tx_desc[soc->arch_id][pool_id]);
166 		dp_global->spcl_tx_desc[soc->arch_id][pool_id] = NULL;
167 	} else {
168 		if (!dp_global->tx_desc[soc->arch_id][pool_id])
169 			return;
170 
171 		qdf_mem_free(dp_global->tx_desc[soc->arch_id][pool_id]);
172 		dp_global->tx_desc[soc->arch_id][pool_id] = NULL;
173 	}
174 }
175 #else
dp_tx_desc_pool_alloc_mem(struct dp_soc * soc,int8_t pool_id,bool spcl_tx_desc)176 static void dp_tx_desc_pool_alloc_mem(struct dp_soc *soc, int8_t pool_id,
177 				      bool spcl_tx_desc)
178 {
179 }
180 
dp_tx_desc_pool_free_mem(struct dp_soc * soc,int8_t pool_id,bool spcl_tx_desc)181 static void dp_tx_desc_pool_free_mem(struct dp_soc *soc, int8_t pool_id,
182 				     bool spcl_tx_desc)
183 {
184 }
185 #endif
186 
dp_tx_desc_pool_alloc(struct dp_soc * soc,uint8_t pool_id,uint32_t num_elem,bool spcl_tx_desc)187 QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
188 				 uint32_t num_elem, bool spcl_tx_desc)
189 {
190 	uint32_t desc_size, num_elem_t;
191 	struct dp_tx_desc_pool_s *tx_desc_pool;
192 	QDF_STATUS status;
193 	enum qdf_dp_desc_type desc_type = QDF_DP_TX_DESC_TYPE;
194 
195 	desc_size = DP_TX_DESC_SIZE(sizeof(struct dp_tx_desc_s));
196 
197 	dp_tx_desc_pool_alloc_mem(soc, pool_id, spcl_tx_desc);
198 	if (spcl_tx_desc) {
199 		tx_desc_pool = dp_get_spcl_tx_desc_pool(soc, pool_id);
200 		desc_type = QDF_DP_TX_SPCL_DESC_TYPE;
201 		num_elem_t = num_elem;
202 	} else {
203 		tx_desc_pool = dp_get_tx_desc_pool(soc, pool_id);
204 		desc_type = QDF_DP_TX_DESC_TYPE;
205 		num_elem_t = dp_get_updated_tx_desc(soc->ctrl_psoc, pool_id, num_elem);
206 	}
207 
208 	tx_desc_pool->desc_pages.page_size = DP_BLOCKMEM_SIZE;
209 	dp_desc_multi_pages_mem_alloc(soc, desc_type,
210 				      &tx_desc_pool->desc_pages,
211 				      desc_size, num_elem_t,
212 				      0, true);
213 
214 	if (!tx_desc_pool->desc_pages.num_pages) {
215 		dp_err("Multi page alloc fail, tx desc");
216 		return QDF_STATUS_E_NOMEM;
217 	}
218 
219 	/* Arch specific TX descriptor allocation */
220 	status = soc->arch_ops.dp_tx_desc_pool_alloc(soc, num_elem_t, pool_id);
221 	if (QDF_IS_STATUS_ERROR(status)) {
222 		dp_err("failed to allocate arch specific descriptors");
223 		return QDF_STATUS_E_NOMEM;
224 	}
225 
226 	return QDF_STATUS_SUCCESS;
227 }
228 
dp_tx_desc_pool_free(struct dp_soc * soc,uint8_t pool_id,bool spcl_tx_desc)229 void dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id,
230 			  bool spcl_tx_desc)
231 {
232 	struct dp_tx_desc_pool_s *tx_desc_pool;
233 	enum qdf_dp_desc_type desc_type = QDF_DP_TX_DESC_TYPE;
234 
235 	if (spcl_tx_desc) {
236 		tx_desc_pool = dp_get_spcl_tx_desc_pool(soc, pool_id);
237 		desc_type = QDF_DP_TX_SPCL_DESC_TYPE;
238 	} else {
239 		tx_desc_pool = dp_get_tx_desc_pool(soc, pool_id);
240 		desc_type = QDF_DP_TX_DESC_TYPE;
241 	}
242 
243 	if (tx_desc_pool->desc_pages.num_pages)
244 		dp_desc_multi_pages_mem_free(soc, QDF_DP_TX_DESC_TYPE,
245 					     &tx_desc_pool->desc_pages, 0,
246 					     true);
247 
248 	/* Free arch specific TX descriptor */
249 	soc->arch_ops.dp_tx_desc_pool_free(soc, pool_id);
250 	dp_tx_desc_pool_free_mem(soc, pool_id, spcl_tx_desc);
251 }
252 
dp_tx_desc_pool_init(struct dp_soc * soc,uint8_t pool_id,uint32_t num_elem,bool spcl_tx_desc)253 QDF_STATUS dp_tx_desc_pool_init(struct dp_soc *soc, uint8_t pool_id,
254 				uint32_t num_elem, bool spcl_tx_desc)
255 {
256 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
257 	uint32_t desc_size, num_elem_t;
258 
259 	desc_size = DP_TX_DESC_SIZE(sizeof(struct dp_tx_desc_s));
260 
261 	if (spcl_tx_desc) {
262 		tx_desc_pool = dp_get_spcl_tx_desc_pool(soc, pool_id);
263 		num_elem_t = num_elem;
264 	} else {
265 		tx_desc_pool = dp_get_tx_desc_pool(soc, pool_id);
266 		num_elem_t = dp_get_updated_tx_desc(soc->ctrl_psoc, pool_id, num_elem);
267 	}
268 	if (qdf_mem_multi_page_link(soc->osdev,
269 				    &tx_desc_pool->desc_pages,
270 				    desc_size, num_elem_t, true)) {
271 		dp_err("invalid tx desc allocation -overflow num link");
272 		return QDF_STATUS_E_FAULT;
273 	}
274 
275 	tx_desc_pool->freelist = (struct dp_tx_desc_s *)
276 		*tx_desc_pool->desc_pages.cacheable_pages;
277 	/* Set unique IDs for each Tx descriptor */
278 	if (QDF_STATUS_SUCCESS != soc->arch_ops.dp_tx_desc_pool_init(
279 						soc, num_elem_t,
280 						pool_id, spcl_tx_desc)) {
281 		dp_err("initialization per target failed");
282 		return QDF_STATUS_E_FAULT;
283 	}
284 
285 	tx_desc_pool->elem_size = DP_TX_DESC_SIZE(sizeof(struct dp_tx_desc_s));
286 
287 	dp_tx_desc_pool_counter_initialize(tx_desc_pool, num_elem_t);
288 	TX_DESC_LOCK_CREATE(&tx_desc_pool->lock);
289 
290 	return QDF_STATUS_SUCCESS;
291 }
292 
dp_tx_desc_pool_deinit(struct dp_soc * soc,uint8_t pool_id,bool spcl_tx_desc)293 void dp_tx_desc_pool_deinit(struct dp_soc *soc, uint8_t pool_id,
294 			    bool spcl_tx_desc)
295 {
296 	struct dp_tx_desc_pool_s *tx_desc_pool;
297 
298 	if (spcl_tx_desc)
299 		tx_desc_pool = dp_get_spcl_tx_desc_pool(soc, pool_id);
300 	else
301 		tx_desc_pool = dp_get_tx_desc_pool(soc, pool_id);
302 	soc->arch_ops.dp_tx_desc_pool_deinit(soc, tx_desc_pool,
303 					     pool_id, spcl_tx_desc);
304 	TX_DESC_POOL_MEMBER_CLEAN(tx_desc_pool);
305 	TX_DESC_LOCK_DESTROY(&tx_desc_pool->lock);
306 }
307 
308 QDF_STATUS
dp_tx_ext_desc_pool_alloc_by_id(struct dp_soc * soc,uint32_t num_elem,uint8_t pool_id)309 dp_tx_ext_desc_pool_alloc_by_id(struct dp_soc *soc, uint32_t num_elem,
310 				uint8_t pool_id)
311 {
312 	QDF_STATUS status;
313 	qdf_dma_context_t memctx = 0;
314 	uint16_t elem_size = HAL_TX_EXT_DESC_WITH_META_DATA;
315 	struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
316 	uint16_t link_elem_size = sizeof(struct dp_tx_ext_desc_elem_s);
317 
318 	dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
319 	memctx = qdf_get_dma_mem_context(dp_tx_ext_desc_pool, memctx);
320 
321 	/* Coherent tx extension descriptor alloc */
322 	dp_desc_multi_pages_mem_alloc(soc, QDF_DP_TX_EXT_DESC_TYPE,
323 				      &dp_tx_ext_desc_pool->desc_pages,
324 				      elem_size, num_elem, memctx, false);
325 
326 	if (!dp_tx_ext_desc_pool->desc_pages.num_pages) {
327 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
328 			  "ext desc page alloc fail");
329 		return QDF_STATUS_E_NOMEM;
330 	}
331 
332 	/*
333 	 * Cacheable ext descriptor link alloc
334 	 * This structure also large size already
335 	 * single element is 24bytes, 2K elements are 48Kbytes
336 	 * Have to alloc multi page cacheable memory
337 	 */
338 	dp_desc_multi_pages_mem_alloc(soc, QDF_DP_TX_EXT_DESC_LINK_TYPE,
339 				      &dp_tx_ext_desc_pool->desc_link_pages,
340 				      link_elem_size, num_elem, 0, true);
341 
342 	if (!dp_tx_ext_desc_pool->desc_link_pages.num_pages) {
343 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
344 			  "ext link desc page alloc fail");
345 		status = QDF_STATUS_E_NOMEM;
346 		goto free_ext_desc;
347 	}
348 
349 	return QDF_STATUS_SUCCESS;
350 
351 free_ext_desc:
352 	dp_desc_multi_pages_mem_free(soc, QDF_DP_TX_EXT_DESC_TYPE,
353 				     &dp_tx_ext_desc_pool->desc_pages,
354 				     memctx, false);
355 	return status;
356 }
357 
dp_tx_ext_desc_pool_alloc(struct dp_soc * soc,uint8_t num_pool,uint32_t num_elem)358 QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
359 				     uint32_t num_elem)
360 {
361 	QDF_STATUS status;
362 	uint8_t pool_id, count;
363 
364 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
365 		status = dp_tx_ext_desc_pool_alloc_by_id(soc, num_elem, pool_id);
366 		if (QDF_IS_STATUS_ERROR(status)) {
367 			dp_err("failed to allocate tx ext desc pool %d", pool_id);
368 			goto free_ext_desc_pool;
369 		}
370 	}
371 
372 	return QDF_STATUS_SUCCESS;
373 
374 free_ext_desc_pool:
375 	for (count = 0; count < pool_id; count++)
376 		dp_tx_ext_desc_pool_free_by_id(soc, count);
377 
378 	return status;
379 }
380 
dp_tx_ext_desc_pool_init_by_id(struct dp_soc * soc,uint32_t num_elem,uint8_t pool_id)381 QDF_STATUS dp_tx_ext_desc_pool_init_by_id(struct dp_soc *soc, uint32_t num_elem,
382 					  uint8_t pool_id)
383 {
384 	uint32_t i;
385 	struct dp_tx_ext_desc_elem_s *c_elem, *p_elem;
386 	struct qdf_mem_dma_page_t *page_info;
387 	struct qdf_mem_multi_page_t *pages;
388 	struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
389 	QDF_STATUS status;
390 
391 	/* link tx descriptors into a freelist */
392 	dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
393 	soc->tx_ext_desc[pool_id].elem_size =
394 		HAL_TX_EXT_DESC_WITH_META_DATA;
395 	soc->tx_ext_desc[pool_id].link_elem_size =
396 		sizeof(struct dp_tx_ext_desc_elem_s);
397 	soc->tx_ext_desc[pool_id].elem_count = num_elem;
398 
399 	dp_tx_ext_desc_pool->freelist = (struct dp_tx_ext_desc_elem_s *)
400 		*dp_tx_ext_desc_pool->desc_link_pages.cacheable_pages;
401 
402 	if (qdf_mem_multi_page_link(soc->osdev,
403 				    &dp_tx_ext_desc_pool->desc_link_pages,
404 				    dp_tx_ext_desc_pool->link_elem_size,
405 				    dp_tx_ext_desc_pool->elem_count,
406 				    true)) {
407 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
408 			  "ext link desc page linking fail");
409 		status = QDF_STATUS_E_FAULT;
410 		goto fail;
411 	}
412 
413 	/* Assign coherent memory pointer into linked free list */
414 	pages = &dp_tx_ext_desc_pool->desc_pages;
415 	page_info = dp_tx_ext_desc_pool->desc_pages.dma_pages;
416 	c_elem = dp_tx_ext_desc_pool->freelist;
417 	p_elem = c_elem;
418 	for (i = 0; i < dp_tx_ext_desc_pool->elem_count; i++) {
419 		if (!(i % pages->num_element_per_page)) {
420 		/**
421 		 * First element for new page,
422 		 * should point next page
423 		 */
424 			if (!pages->dma_pages->page_v_addr_start) {
425 				QDF_TRACE(QDF_MODULE_ID_DP,
426 					  QDF_TRACE_LEVEL_ERROR,
427 					  "link over flow");
428 				status = QDF_STATUS_E_FAULT;
429 				goto fail;
430 			}
431 
432 			c_elem->vaddr =
433 				(void *)page_info->page_v_addr_start;
434 			c_elem->paddr = page_info->page_p_addr;
435 			page_info++;
436 		} else {
437 			c_elem->vaddr = (void *)(p_elem->vaddr +
438 				dp_tx_ext_desc_pool->elem_size);
439 			c_elem->paddr = (p_elem->paddr +
440 				dp_tx_ext_desc_pool->elem_size);
441 		}
442 		p_elem = c_elem;
443 		c_elem = c_elem->next;
444 		if (!c_elem)
445 			break;
446 	}
447 	dp_tx_ext_desc_pool->num_free = num_elem;
448 	qdf_spinlock_create(&dp_tx_ext_desc_pool->lock);
449 
450 	return QDF_STATUS_SUCCESS;
451 
452 fail:
453 	return status;
454 }
455 
dp_tx_ext_desc_pool_init(struct dp_soc * soc,uint8_t num_pool,uint32_t num_elem)456 QDF_STATUS dp_tx_ext_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
457 				    uint32_t num_elem)
458 {
459 	uint8_t pool_id;
460 	QDF_STATUS status;
461 
462 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
463 		status = dp_tx_ext_desc_pool_init_by_id(soc, num_elem, pool_id);
464 		if (QDF_IS_STATUS_ERROR(status)) {
465 			dp_err("failed to init ext desc pool %d", pool_id);
466 			goto fail;
467 		}
468 	}
469 
470 	return QDF_STATUS_SUCCESS;
471 fail:
472 	return status;
473 }
474 
dp_tx_ext_desc_pool_free_by_id(struct dp_soc * soc,uint8_t pool_id)475 void dp_tx_ext_desc_pool_free_by_id(struct dp_soc *soc, uint8_t pool_id)
476 {
477 	struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
478 	qdf_dma_context_t memctx = 0;
479 
480 	dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
481 	memctx = qdf_get_dma_mem_context(dp_tx_ext_desc_pool, memctx);
482 
483 	dp_desc_multi_pages_mem_free(soc, QDF_DP_TX_EXT_DESC_LINK_TYPE,
484 				     &dp_tx_ext_desc_pool->desc_link_pages,
485 				     0, true);
486 
487 	dp_desc_multi_pages_mem_free(soc, QDF_DP_TX_EXT_DESC_TYPE,
488 				     &dp_tx_ext_desc_pool->desc_pages,
489 				     memctx, false);
490 }
491 
dp_tx_ext_desc_pool_free(struct dp_soc * soc,uint8_t num_pool)492 void dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
493 {
494 	uint8_t pool_id;
495 
496 	for (pool_id = 0; pool_id < num_pool; pool_id++)
497 		dp_tx_ext_desc_pool_free_by_id(soc, pool_id);
498 }
499 
dp_tx_ext_desc_pool_deinit_by_id(struct dp_soc * soc,uint8_t pool_id)500 void dp_tx_ext_desc_pool_deinit_by_id(struct dp_soc *soc, uint8_t pool_id)
501 {
502 	struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
503 
504 	dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
505 	qdf_spinlock_destroy(&dp_tx_ext_desc_pool->lock);
506 }
507 
dp_tx_ext_desc_pool_deinit(struct dp_soc * soc,uint8_t num_pool)508 void dp_tx_ext_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
509 {
510 	uint8_t pool_id;
511 
512 	for (pool_id = 0; pool_id < num_pool; pool_id++)
513 		dp_tx_ext_desc_pool_deinit_by_id(soc, pool_id);
514 }
515 
516 #if defined(FEATURE_TSO)
dp_tx_tso_desc_pool_alloc_by_id(struct dp_soc * soc,uint32_t num_elem,uint8_t pool_id)517 QDF_STATUS dp_tx_tso_desc_pool_alloc_by_id(struct dp_soc *soc, uint32_t num_elem,
518 					   uint8_t pool_id)
519 {
520 	struct dp_tx_tso_seg_pool_s *tso_desc_pool;
521 	uint32_t desc_size;
522 
523 	desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_seg_elem_t));
524 
525 	tso_desc_pool = &soc->tx_tso_desc[pool_id];
526 	tso_desc_pool->num_free = 0;
527 	dp_desc_multi_pages_mem_alloc(soc, QDF_DP_TX_TSO_DESC_TYPE,
528 				      &tso_desc_pool->desc_pages,
529 				      desc_size, num_elem, 0, true);
530 	if (!tso_desc_pool->desc_pages.num_pages) {
531 		dp_err("Multi page alloc fail, tx desc");
532 		return QDF_STATUS_E_NOMEM;
533 	}
534 
535 	return QDF_STATUS_SUCCESS;
536 }
537 
dp_tx_tso_desc_pool_alloc(struct dp_soc * soc,uint8_t num_pool,uint32_t num_elem)538 QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
539 				     uint32_t num_elem)
540 {
541 	uint32_t pool_id, i;
542 	QDF_STATUS status;
543 
544 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
545 		status = dp_tx_tso_desc_pool_alloc_by_id(soc, num_elem,
546 							 pool_id);
547 		if (QDF_IS_STATUS_ERROR(status)) {
548 			dp_err("failed to allocate TSO desc pool %d", pool_id);
549 			goto fail;
550 		}
551 	}
552 
553 	return QDF_STATUS_SUCCESS;
554 
555 fail:
556 	for (i = 0; i < pool_id; i++)
557 		dp_tx_tso_desc_pool_free_by_id(soc, i);
558 
559 	return QDF_STATUS_E_NOMEM;
560 }
561 
dp_tx_tso_desc_pool_free_by_id(struct dp_soc * soc,uint8_t pool_id)562 void dp_tx_tso_desc_pool_free_by_id(struct dp_soc *soc, uint8_t pool_id)
563 {
564 	struct dp_tx_tso_seg_pool_s *tso_desc_pool;
565 
566 	tso_desc_pool = &soc->tx_tso_desc[pool_id];
567 	dp_desc_multi_pages_mem_free(soc, QDF_DP_TX_TSO_DESC_TYPE,
568 				     &tso_desc_pool->desc_pages,
569 				     0, true);
570 }
571 
dp_tx_tso_desc_pool_free(struct dp_soc * soc,uint8_t num_pool)572 void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
573 {
574 	uint32_t pool_id;
575 
576 	for (pool_id = 0; pool_id < num_pool; pool_id++)
577 		dp_tx_tso_desc_pool_free_by_id(soc, pool_id);
578 }
579 
dp_tx_tso_desc_pool_init_by_id(struct dp_soc * soc,uint32_t num_elem,uint8_t pool_id)580 QDF_STATUS dp_tx_tso_desc_pool_init_by_id(struct dp_soc *soc, uint32_t num_elem,
581 					  uint8_t pool_id)
582 {
583 	struct dp_tx_tso_seg_pool_s *tso_desc_pool;
584 	uint32_t desc_size;
585 
586 	desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_seg_elem_t));
587 
588 	tso_desc_pool = &soc->tx_tso_desc[pool_id];
589 
590 	if (qdf_mem_multi_page_link(soc->osdev,
591 				    &tso_desc_pool->desc_pages,
592 				    desc_size,
593 				    num_elem, true)) {
594 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
595 			  "invalid tso desc allocation - overflow num link");
596 		return QDF_STATUS_E_FAULT;
597 	}
598 
599 	tso_desc_pool->freelist = (struct qdf_tso_seg_elem_t *)
600 		*tso_desc_pool->desc_pages.cacheable_pages;
601 	tso_desc_pool->num_free = num_elem;
602 
603 	TSO_DEBUG("Number of free descriptors: %u\n",
604 		  tso_desc_pool->num_free);
605 	tso_desc_pool->pool_size = num_elem;
606 	qdf_spinlock_create(&tso_desc_pool->lock);
607 
608 	return QDF_STATUS_SUCCESS;
609 }
610 
dp_tx_tso_desc_pool_init(struct dp_soc * soc,uint8_t num_pool,uint32_t num_elem)611 QDF_STATUS dp_tx_tso_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
612 				    uint32_t num_elem)
613 {
614 	QDF_STATUS status;
615 	uint32_t pool_id;
616 
617 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
618 		status = dp_tx_tso_desc_pool_init_by_id(soc, num_elem,
619 							pool_id);
620 		if (QDF_IS_STATUS_ERROR(status)) {
621 			dp_err("failed to initialise TSO desc pool %d", pool_id);
622 			return status;
623 		}
624 	}
625 
626 	return QDF_STATUS_SUCCESS;
627 }
628 
dp_tx_tso_desc_pool_deinit_by_id(struct dp_soc * soc,uint8_t pool_id)629 void dp_tx_tso_desc_pool_deinit_by_id(struct dp_soc *soc, uint8_t pool_id)
630 {
631 	struct dp_tx_tso_seg_pool_s *tso_desc_pool;
632 
633 	tso_desc_pool = &soc->tx_tso_desc[pool_id];
634 
635 	if (tso_desc_pool->pool_size) {
636 		qdf_spin_lock_bh(&tso_desc_pool->lock);
637 		tso_desc_pool->freelist = NULL;
638 		tso_desc_pool->num_free = 0;
639 		tso_desc_pool->pool_size = 0;
640 		qdf_spin_unlock_bh(&tso_desc_pool->lock);
641 		qdf_spinlock_destroy(&tso_desc_pool->lock);
642 	}
643 }
644 
dp_tx_tso_desc_pool_deinit(struct dp_soc * soc,uint8_t num_pool)645 void dp_tx_tso_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
646 {
647 	uint32_t pool_id;
648 
649 	for (pool_id = 0; pool_id < num_pool; pool_id++)
650 		dp_tx_tso_desc_pool_deinit_by_id(soc, pool_id);
651 }
652 
dp_tx_tso_num_seg_pool_alloc_by_id(struct dp_soc * soc,uint32_t num_elem,uint8_t pool_id)653 QDF_STATUS dp_tx_tso_num_seg_pool_alloc_by_id(struct dp_soc *soc,
654 					      uint32_t num_elem,
655 					      uint8_t pool_id)
656 {
657 	struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
658 	uint32_t desc_size;
659 
660 	desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_num_seg_elem_t));
661 
662 	tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
663 	tso_num_seg_pool->num_free = 0;
664 	dp_desc_multi_pages_mem_alloc(soc, QDF_DP_TX_TSO_NUM_SEG_TYPE,
665 				      &tso_num_seg_pool->desc_pages,
666 				      desc_size,
667 				      num_elem, 0, true);
668 
669 	if (!tso_num_seg_pool->desc_pages.num_pages) {
670 		dp_err("Multi page alloc fail, tso_num_seg_pool");
671 		return QDF_STATUS_E_NOMEM;
672 	}
673 
674 	return QDF_STATUS_SUCCESS;
675 }
676 
dp_tx_tso_num_seg_pool_alloc(struct dp_soc * soc,uint8_t num_pool,uint32_t num_elem)677 QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
678 					uint32_t num_elem)
679 {
680 	uint32_t pool_id, i;
681 	QDF_STATUS status;
682 
683 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
684 		status = dp_tx_tso_num_seg_pool_alloc_by_id(soc, num_elem,
685 							    pool_id);
686 		if (QDF_IS_STATUS_ERROR(status)) {
687 			dp_err("failed to allocate TSO num seg pool %d", pool_id);
688 			goto fail;
689 		}
690 	}
691 
692 	return QDF_STATUS_SUCCESS;
693 
694 fail:
695 	for (i = 0; i < pool_id; i++)
696 		dp_tx_tso_num_seg_pool_free_by_id(soc, pool_id);
697 
698 	return QDF_STATUS_E_NOMEM;
699 }
700 
dp_tx_tso_num_seg_pool_free_by_id(struct dp_soc * soc,uint8_t pool_id)701 void dp_tx_tso_num_seg_pool_free_by_id(struct dp_soc *soc, uint8_t pool_id)
702 {
703 	struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
704 
705 	tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
706 	dp_desc_multi_pages_mem_free(soc, QDF_DP_TX_TSO_NUM_SEG_TYPE,
707 				     &tso_num_seg_pool->desc_pages,
708 				     0, true);
709 }
710 
dp_tx_tso_num_seg_pool_free(struct dp_soc * soc,uint8_t num_pool)711 void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t num_pool)
712 {
713 	uint32_t pool_id;
714 
715 	for (pool_id = 0; pool_id < num_pool; pool_id++)
716 		dp_tx_tso_num_seg_pool_free_by_id(soc, pool_id);
717 }
718 
719 QDF_STATUS
dp_tx_tso_num_seg_pool_init_by_id(struct dp_soc * soc,uint32_t num_elem,uint8_t pool_id)720 dp_tx_tso_num_seg_pool_init_by_id(struct dp_soc *soc, uint32_t num_elem,
721 				  uint8_t pool_id)
722 {
723 	struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
724 	uint32_t desc_size;
725 
726 	desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_num_seg_elem_t));
727 	tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
728 
729 	if (qdf_mem_multi_page_link(soc->osdev,
730 				    &tso_num_seg_pool->desc_pages,
731 				    desc_size,
732 				    num_elem, true)) {
733 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
734 			  "invalid tso desc allocation - overflow num link");
735 		return QDF_STATUS_E_FAULT;
736 	}
737 
738 	tso_num_seg_pool->freelist = (struct qdf_tso_num_seg_elem_t *)
739 		*tso_num_seg_pool->desc_pages.cacheable_pages;
740 	tso_num_seg_pool->num_free = num_elem;
741 	tso_num_seg_pool->num_seg_pool_size = num_elem;
742 
743 	qdf_spinlock_create(&tso_num_seg_pool->lock);
744 
745 	return QDF_STATUS_SUCCESS;
746 }
747 
dp_tx_tso_num_seg_pool_init(struct dp_soc * soc,uint8_t num_pool,uint32_t num_elem)748 QDF_STATUS dp_tx_tso_num_seg_pool_init(struct dp_soc *soc, uint8_t num_pool,
749 				       uint32_t num_elem)
750 {
751 	uint32_t pool_id;
752 	QDF_STATUS status;
753 
754 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
755 		status = dp_tx_tso_num_seg_pool_init_by_id(soc, num_elem,
756 							   pool_id);
757 		if (QDF_IS_STATUS_ERROR(status)) {
758 			dp_err("failed to initialise TSO num seg pool %d", pool_id);
759 			return status;
760 		}
761 	}
762 
763 	return QDF_STATUS_SUCCESS;
764 }
765 
dp_tx_tso_num_seg_pool_deinit_by_id(struct dp_soc * soc,uint8_t pool_id)766 void dp_tx_tso_num_seg_pool_deinit_by_id(struct dp_soc *soc, uint8_t pool_id)
767 {
768 	struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
769 
770 	tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
771 
772 	if (tso_num_seg_pool->num_seg_pool_size) {
773 		qdf_spin_lock_bh(&tso_num_seg_pool->lock);
774 		tso_num_seg_pool->freelist = NULL;
775 		tso_num_seg_pool->num_free = 0;
776 		tso_num_seg_pool->num_seg_pool_size = 0;
777 		qdf_spin_unlock_bh(&tso_num_seg_pool->lock);
778 		qdf_spinlock_destroy(&tso_num_seg_pool->lock);
779 	}
780 }
781 
dp_tx_tso_num_seg_pool_deinit(struct dp_soc * soc,uint8_t num_pool)782 void dp_tx_tso_num_seg_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
783 {
784 	uint32_t pool_id;
785 
786 	for (pool_id = 0; pool_id < num_pool; pool_id++)
787 		dp_tx_tso_num_seg_pool_deinit_by_id(soc, pool_id);
788 }
789 #else
dp_tx_tso_desc_pool_alloc_by_id(struct dp_soc * soc,uint32_t num_elem,uint8_t pool_id)790 QDF_STATUS dp_tx_tso_desc_pool_alloc_by_id(struct dp_soc *soc, uint32_t num_elem,
791 					   uint8_t pool_id)
792 {
793 	return QDF_STATUS_SUCCESS;
794 }
795 
dp_tx_tso_desc_pool_alloc(struct dp_soc * soc,uint8_t num_pool,uint32_t num_elem)796 QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
797 				     uint32_t num_elem)
798 {
799 	return QDF_STATUS_SUCCESS;
800 }
801 
dp_tx_tso_desc_pool_init_by_id(struct dp_soc * soc,uint32_t num_elem,uint8_t pool_id)802 QDF_STATUS dp_tx_tso_desc_pool_init_by_id(struct dp_soc *soc, uint32_t num_elem,
803 					  uint8_t pool_id)
804 {
805 	return QDF_STATUS_SUCCESS;
806 }
807 
dp_tx_tso_desc_pool_init(struct dp_soc * soc,uint8_t num_pool,uint32_t num_elem)808 QDF_STATUS dp_tx_tso_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
809 				    uint32_t num_elem)
810 {
811 	return QDF_STATUS_SUCCESS;
812 }
813 
dp_tx_tso_desc_pool_free_by_id(struct dp_soc * soc,uint8_t pool_id)814 void dp_tx_tso_desc_pool_free_by_id(struct dp_soc *soc, uint8_t pool_id)
815 {
816 }
817 
dp_tx_tso_desc_pool_free(struct dp_soc * soc,uint8_t num_pool)818 void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
819 {
820 }
821 
dp_tx_tso_desc_pool_deinit_by_id(struct dp_soc * soc,uint8_t pool_id)822 void dp_tx_tso_desc_pool_deinit_by_id(struct dp_soc *soc, uint8_t pool_id)
823 {
824 }
825 
dp_tx_tso_desc_pool_deinit(struct dp_soc * soc,uint8_t num_pool)826 void dp_tx_tso_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
827 {
828 }
829 
dp_tx_tso_num_seg_pool_alloc_by_id(struct dp_soc * soc,uint32_t num_elem,uint8_t pool_id)830 QDF_STATUS dp_tx_tso_num_seg_pool_alloc_by_id(struct dp_soc *soc,
831 					      uint32_t num_elem,
832 					      uint8_t pool_id)
833 {
834 	return QDF_STATUS_SUCCESS;
835 }
836 
dp_tx_tso_num_seg_pool_alloc(struct dp_soc * soc,uint8_t num_pool,uint32_t num_elem)837 QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
838 					uint32_t num_elem)
839 {
840 	return QDF_STATUS_SUCCESS;
841 }
842 
dp_tx_tso_num_seg_pool_free_by_id(struct dp_soc * soc,uint8_t pool_id)843 void dp_tx_tso_num_seg_pool_free_by_id(struct dp_soc *soc, uint8_t pool_id)
844 {
845 }
846 
dp_tx_tso_num_seg_pool_free(struct dp_soc * soc,uint8_t num_pool)847 void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t num_pool)
848 {
849 }
850 
851 QDF_STATUS
dp_tx_tso_num_seg_pool_init_by_id(struct dp_soc * soc,uint32_t num_elem,uint8_t pool_id)852 dp_tx_tso_num_seg_pool_init_by_id(struct dp_soc *soc, uint32_t num_elem,
853 				  uint8_t pool_id)
854 {
855 	return QDF_STATUS_SUCCESS;
856 }
857 
dp_tx_tso_num_seg_pool_init(struct dp_soc * soc,uint8_t num_pool,uint32_t num_elem)858 QDF_STATUS dp_tx_tso_num_seg_pool_init(struct dp_soc *soc, uint8_t num_pool,
859 				       uint32_t num_elem)
860 {
861 	return QDF_STATUS_SUCCESS;
862 }
863 
dp_tx_tso_num_seg_pool_deinit_by_id(struct dp_soc * soc,uint8_t pool_id)864 void dp_tx_tso_num_seg_pool_deinit_by_id(struct dp_soc *soc, uint8_t pool_id)
865 {
866 }
867 
dp_tx_tso_num_seg_pool_deinit(struct dp_soc * soc,uint8_t num_pool)868 void dp_tx_tso_num_seg_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
869 {
870 }
871 #endif
872