xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx_desc.c (revision 7dab70881237c23ddd8f7bd28f5eaaab27f15464)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "hal_hw_headers.h"
21 #include "dp_types.h"
22 #include "dp_tx_desc.h"
23 
24 #ifndef DESC_PARTITION
25 #define DP_TX_DESC_SIZE(a) qdf_get_pwr2(a)
26 #define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id)     \
27 do {                                                                 \
28 	uint8_t sig_bit;                                             \
29 	soc->tx_desc[pool_id].offset_filter = num_desc_per_page - 1; \
30 	/* Calculate page divider to find page number */             \
31 	sig_bit = 0;                                                 \
32 	while (num_desc_per_page) {                                  \
33 		sig_bit++;                                           \
34 		num_desc_per_page = num_desc_per_page >> 1;          \
35 	}                                                            \
36 	soc->tx_desc[pool_id].page_divider = (sig_bit - 1);          \
37 } while (0)
38 #else
39 #define DP_TX_DESC_SIZE(a) a
40 #define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id) {}
41 #endif /* DESC_PARTITION */
42 
43 /**
44  * dp_tx_desc_pool_counter_initialize() - Initialize counters
45  * @tx_desc_pool: Handle to DP tx_desc_pool structure
46  * @num_elem: Number of descriptor elements per pool
47  *
48  * Return: None
49  */
50 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
51 static void
52 dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool,
53 				  uint16_t num_elem)
54 {
55 }
56 #else
57 static void
58 dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool,
59 				  uint16_t num_elem)
60 {
61 	tx_desc_pool->elem_count = num_elem;
62 	tx_desc_pool->num_free = num_elem;
63 	tx_desc_pool->num_allocated = 0;
64 }
65 #endif
66 
67 #ifdef DP_UMAC_HW_RESET_SUPPORT
68 /**
69  * dp_tx_desc_clean_up() - Clean up the tx descriptors
70  * @ctxt: context passed
71  * @elem: element to be cleaned up
72  * @elem_list: element list
73  *
74  */
75 static void dp_tx_desc_clean_up(void *ctxt, void *elem, void *elem_list)
76 {
77 	struct dp_soc *soc = (struct dp_soc *)ctxt;
78 	struct dp_tx_desc_s *tx_desc = (struct dp_tx_desc_s *)elem;
79 	qdf_nbuf_t *nbuf_list = (qdf_nbuf_t *)elem_list;
80 	qdf_nbuf_t nbuf = NULL;
81 
82 	if (tx_desc->nbuf) {
83 		nbuf = dp_tx_comp_free_buf(soc, tx_desc, true);
84 		dp_tx_desc_release(soc, tx_desc, tx_desc->pool_id);
85 
86 		if (nbuf) {
87 			if (!nbuf_list) {
88 				dp_err("potential memory leak");
89 				qdf_assert_always(0);
90 			}
91 
92 			nbuf->next = *nbuf_list;
93 			*nbuf_list = nbuf;
94 		}
95 	}
96 }
97 
98 void dp_tx_desc_pool_cleanup(struct dp_soc *soc, qdf_nbuf_t *nbuf_list)
99 {
100 	int i;
101 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
102 	uint32_t num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
103 
104 	for (i = 0; i < num_pool; i++) {
105 		tx_desc_pool = &soc->tx_desc[i];
106 
107 		if (tx_desc_pool)
108 			qdf_tx_desc_pool_free_bufs(soc,
109 						   &tx_desc_pool->desc_pages,
110 						   tx_desc_pool->elem_size,
111 						   tx_desc_pool->elem_count,
112 						   true, &dp_tx_desc_clean_up,
113 						   nbuf_list);
114 	}
115 }
116 #endif
117 
118 QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
119 				 uint32_t num_elem)
120 {
121 	uint32_t desc_size, num_elem_t;
122 	struct dp_tx_desc_pool_s *tx_desc_pool;
123 	QDF_STATUS status;
124 
125 	num_elem_t = dp_get_updated_tx_desc(soc->ctrl_psoc, pool_id, num_elem);
126 	desc_size = DP_TX_DESC_SIZE(sizeof(struct dp_tx_desc_s));
127 	tx_desc_pool = &((soc)->tx_desc[(pool_id)]);
128 	tx_desc_pool->desc_pages.page_size = DP_BLOCKMEM_SIZE;
129 	dp_desc_multi_pages_mem_alloc(soc, QDF_DP_TX_DESC_TYPE,
130 				      &tx_desc_pool->desc_pages,
131 				      desc_size, num_elem_t,
132 				      0, true);
133 
134 	if (!tx_desc_pool->desc_pages.num_pages) {
135 		dp_err("Multi page alloc fail, tx desc");
136 		return QDF_STATUS_E_NOMEM;
137 	}
138 
139 	/* Arch specific TX descriptor allocation */
140 	status = soc->arch_ops.dp_tx_desc_pool_alloc(soc, num_elem_t, pool_id);
141 	if (QDF_IS_STATUS_ERROR(status)) {
142 		dp_err("failed to allocate arch specific descriptors");
143 		return QDF_STATUS_E_NOMEM;
144 	}
145 
146 	return QDF_STATUS_SUCCESS;
147 }
148 
149 void dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
150 {
151 	struct dp_tx_desc_pool_s *tx_desc_pool;
152 
153 	tx_desc_pool = &((soc)->tx_desc[pool_id]);
154 
155 	if (tx_desc_pool->desc_pages.num_pages)
156 		dp_desc_multi_pages_mem_free(soc, QDF_DP_TX_DESC_TYPE,
157 					     &tx_desc_pool->desc_pages, 0,
158 					     true);
159 
160 	/* Free arch specific TX descriptor */
161 	soc->arch_ops.dp_tx_desc_pool_free(soc, pool_id);
162 }
163 
164 QDF_STATUS dp_tx_desc_pool_init(struct dp_soc *soc, uint8_t pool_id,
165 				uint32_t num_elem)
166 {
167 	struct dp_tx_desc_pool_s *tx_desc_pool;
168 	uint32_t desc_size, num_elem_t;
169 
170 	desc_size = DP_TX_DESC_SIZE(sizeof(struct dp_tx_desc_s));
171 
172 	num_elem_t = dp_get_updated_tx_desc(soc->ctrl_psoc, pool_id, num_elem);
173 	tx_desc_pool = &soc->tx_desc[pool_id];
174 	if (qdf_mem_multi_page_link(soc->osdev,
175 				    &tx_desc_pool->desc_pages,
176 				    desc_size, num_elem_t, true)) {
177 		dp_err("invalid tx desc allocation -overflow num link");
178 		return QDF_STATUS_E_FAULT;
179 	}
180 
181 	tx_desc_pool->freelist = (struct dp_tx_desc_s *)
182 		*tx_desc_pool->desc_pages.cacheable_pages;
183 	/* Set unique IDs for each Tx descriptor */
184 	if (QDF_STATUS_SUCCESS != soc->arch_ops.dp_tx_desc_pool_init(
185 						soc, num_elem_t, pool_id)) {
186 		dp_err("initialization per target failed");
187 		return QDF_STATUS_E_FAULT;
188 	}
189 
190 	tx_desc_pool->elem_size = DP_TX_DESC_SIZE(sizeof(struct dp_tx_desc_s));
191 
192 	dp_tx_desc_pool_counter_initialize(tx_desc_pool, num_elem_t);
193 	TX_DESC_LOCK_CREATE(&tx_desc_pool->lock);
194 
195 	return QDF_STATUS_SUCCESS;
196 }
197 
198 void dp_tx_desc_pool_deinit(struct dp_soc *soc, uint8_t pool_id)
199 {
200 	struct dp_tx_desc_pool_s *tx_desc_pool;
201 
202 	tx_desc_pool = &soc->tx_desc[pool_id];
203 	soc->arch_ops.dp_tx_desc_pool_deinit(soc, tx_desc_pool, pool_id);
204 	TX_DESC_POOL_MEMBER_CLEAN(tx_desc_pool);
205 	TX_DESC_LOCK_DESTROY(&tx_desc_pool->lock);
206 }
207 
208 QDF_STATUS
209 dp_tx_ext_desc_pool_alloc_by_id(struct dp_soc *soc, uint32_t num_elem,
210 				uint8_t pool_id)
211 {
212 	QDF_STATUS status;
213 	qdf_dma_context_t memctx = 0;
214 	uint16_t elem_size = HAL_TX_EXT_DESC_WITH_META_DATA;
215 	struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
216 	uint16_t link_elem_size = sizeof(struct dp_tx_ext_desc_elem_s);
217 
218 	dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
219 	memctx = qdf_get_dma_mem_context(dp_tx_ext_desc_pool, memctx);
220 
221 	/* Coherent tx extension descriptor alloc */
222 	dp_desc_multi_pages_mem_alloc(soc, QDF_DP_TX_EXT_DESC_TYPE,
223 				      &dp_tx_ext_desc_pool->desc_pages,
224 				      elem_size, num_elem, memctx, false);
225 
226 	if (!dp_tx_ext_desc_pool->desc_pages.num_pages) {
227 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
228 			  "ext desc page alloc fail");
229 		return QDF_STATUS_E_NOMEM;
230 	}
231 
232 	/*
233 	 * Cacheable ext descriptor link alloc
234 	 * This structure also large size already
235 	 * single element is 24bytes, 2K elements are 48Kbytes
236 	 * Have to alloc multi page cacheable memory
237 	 */
238 	dp_desc_multi_pages_mem_alloc(soc, QDF_DP_TX_EXT_DESC_LINK_TYPE,
239 				      &dp_tx_ext_desc_pool->desc_link_pages,
240 				      link_elem_size, num_elem, 0, true);
241 
242 	if (!dp_tx_ext_desc_pool->desc_link_pages.num_pages) {
243 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
244 			  "ext link desc page alloc fail");
245 		status = QDF_STATUS_E_NOMEM;
246 		goto free_ext_desc;
247 	}
248 
249 	return QDF_STATUS_SUCCESS;
250 
251 free_ext_desc:
252 	dp_desc_multi_pages_mem_free(soc, QDF_DP_TX_EXT_DESC_TYPE,
253 				     &dp_tx_ext_desc_pool->desc_pages,
254 				     memctx, false);
255 	return status;
256 }
257 
258 QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
259 				     uint32_t num_elem)
260 {
261 	QDF_STATUS status;
262 	uint8_t pool_id, count;
263 
264 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
265 		status = dp_tx_ext_desc_pool_alloc_by_id(soc, num_elem, pool_id);
266 		if (QDF_IS_STATUS_ERROR(status)) {
267 			dp_err("failed to allocate tx ext desc pool %d", pool_id);
268 			goto free_ext_desc_pool;
269 		}
270 	}
271 
272 	return QDF_STATUS_SUCCESS;
273 
274 free_ext_desc_pool:
275 	for (count = 0; count < pool_id; count++)
276 		dp_tx_ext_desc_pool_free_by_id(soc, count);
277 
278 	return status;
279 }
280 
281 QDF_STATUS dp_tx_ext_desc_pool_init_by_id(struct dp_soc *soc, uint32_t num_elem,
282 					  uint8_t pool_id)
283 {
284 	uint32_t i;
285 	struct dp_tx_ext_desc_elem_s *c_elem, *p_elem;
286 	struct qdf_mem_dma_page_t *page_info;
287 	struct qdf_mem_multi_page_t *pages;
288 	struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
289 	QDF_STATUS status;
290 
291 	/* link tx descriptors into a freelist */
292 	dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
293 	soc->tx_ext_desc[pool_id].elem_size =
294 		HAL_TX_EXT_DESC_WITH_META_DATA;
295 	soc->tx_ext_desc[pool_id].link_elem_size =
296 		sizeof(struct dp_tx_ext_desc_elem_s);
297 	soc->tx_ext_desc[pool_id].elem_count = num_elem;
298 
299 	dp_tx_ext_desc_pool->freelist = (struct dp_tx_ext_desc_elem_s *)
300 		*dp_tx_ext_desc_pool->desc_link_pages.cacheable_pages;
301 
302 	if (qdf_mem_multi_page_link(soc->osdev,
303 				    &dp_tx_ext_desc_pool->desc_link_pages,
304 				    dp_tx_ext_desc_pool->link_elem_size,
305 				    dp_tx_ext_desc_pool->elem_count,
306 				    true)) {
307 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
308 			  "ext link desc page linking fail");
309 		status = QDF_STATUS_E_FAULT;
310 		goto fail;
311 	}
312 
313 	/* Assign coherent memory pointer into linked free list */
314 	pages = &dp_tx_ext_desc_pool->desc_pages;
315 	page_info = dp_tx_ext_desc_pool->desc_pages.dma_pages;
316 	c_elem = dp_tx_ext_desc_pool->freelist;
317 	p_elem = c_elem;
318 	for (i = 0; i < dp_tx_ext_desc_pool->elem_count; i++) {
319 		if (!(i % pages->num_element_per_page)) {
320 		/**
321 		 * First element for new page,
322 		 * should point next page
323 		 */
324 			if (!pages->dma_pages->page_v_addr_start) {
325 				QDF_TRACE(QDF_MODULE_ID_DP,
326 					  QDF_TRACE_LEVEL_ERROR,
327 					  "link over flow");
328 				status = QDF_STATUS_E_FAULT;
329 				goto fail;
330 			}
331 
332 			c_elem->vaddr =
333 				(void *)page_info->page_v_addr_start;
334 			c_elem->paddr = page_info->page_p_addr;
335 			page_info++;
336 		} else {
337 			c_elem->vaddr = (void *)(p_elem->vaddr +
338 				dp_tx_ext_desc_pool->elem_size);
339 			c_elem->paddr = (p_elem->paddr +
340 				dp_tx_ext_desc_pool->elem_size);
341 		}
342 		p_elem = c_elem;
343 		c_elem = c_elem->next;
344 		if (!c_elem)
345 			break;
346 	}
347 	dp_tx_ext_desc_pool->num_free = num_elem;
348 	qdf_spinlock_create(&dp_tx_ext_desc_pool->lock);
349 
350 	return QDF_STATUS_SUCCESS;
351 
352 fail:
353 	return status;
354 }
355 
356 QDF_STATUS dp_tx_ext_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
357 				    uint32_t num_elem)
358 {
359 	uint8_t pool_id;
360 	QDF_STATUS status;
361 
362 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
363 		status = dp_tx_ext_desc_pool_init_by_id(soc, num_elem, pool_id);
364 		if (QDF_IS_STATUS_ERROR(status)) {
365 			dp_err("failed to init ext desc pool %d", pool_id);
366 			goto fail;
367 		}
368 	}
369 
370 	return QDF_STATUS_SUCCESS;
371 fail:
372 	return status;
373 }
374 
375 void dp_tx_ext_desc_pool_free_by_id(struct dp_soc *soc, uint8_t pool_id)
376 {
377 	struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
378 	qdf_dma_context_t memctx = 0;
379 
380 	dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
381 	memctx = qdf_get_dma_mem_context(dp_tx_ext_desc_pool, memctx);
382 
383 	dp_desc_multi_pages_mem_free(soc, QDF_DP_TX_EXT_DESC_LINK_TYPE,
384 				     &dp_tx_ext_desc_pool->desc_link_pages,
385 				     0, true);
386 
387 	dp_desc_multi_pages_mem_free(soc, QDF_DP_TX_EXT_DESC_TYPE,
388 				     &dp_tx_ext_desc_pool->desc_pages,
389 				     memctx, false);
390 }
391 
392 void dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
393 {
394 	uint8_t pool_id;
395 
396 	for (pool_id = 0; pool_id < num_pool; pool_id++)
397 		dp_tx_ext_desc_pool_free_by_id(soc, pool_id);
398 }
399 
400 void dp_tx_ext_desc_pool_deinit_by_id(struct dp_soc *soc, uint8_t pool_id)
401 {
402 	struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
403 
404 	dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
405 	qdf_spinlock_destroy(&dp_tx_ext_desc_pool->lock);
406 }
407 
408 void dp_tx_ext_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
409 {
410 	uint8_t pool_id;
411 
412 	for (pool_id = 0; pool_id < num_pool; pool_id++)
413 		dp_tx_ext_desc_pool_deinit_by_id(soc, pool_id);
414 }
415 
416 #if defined(FEATURE_TSO)
417 QDF_STATUS dp_tx_tso_desc_pool_alloc_by_id(struct dp_soc *soc, uint32_t num_elem,
418 					   uint8_t pool_id)
419 {
420 	struct dp_tx_tso_seg_pool_s *tso_desc_pool;
421 	uint32_t desc_size;
422 
423 	desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_seg_elem_t));
424 
425 	tso_desc_pool = &soc->tx_tso_desc[pool_id];
426 	tso_desc_pool->num_free = 0;
427 	dp_desc_multi_pages_mem_alloc(soc, QDF_DP_TX_TSO_DESC_TYPE,
428 				      &tso_desc_pool->desc_pages,
429 				      desc_size, num_elem, 0, true);
430 	if (!tso_desc_pool->desc_pages.num_pages) {
431 		dp_err("Multi page alloc fail, tx desc");
432 		return QDF_STATUS_E_NOMEM;
433 	}
434 
435 	return QDF_STATUS_SUCCESS;
436 }
437 
438 QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
439 				     uint32_t num_elem)
440 {
441 	uint32_t pool_id, i;
442 	QDF_STATUS status;
443 
444 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
445 		status = dp_tx_tso_desc_pool_alloc_by_id(soc, num_elem,
446 							 pool_id);
447 		if (QDF_IS_STATUS_ERROR(status)) {
448 			dp_err("failed to allocate TSO desc pool %d", pool_id);
449 			goto fail;
450 		}
451 	}
452 
453 	return QDF_STATUS_SUCCESS;
454 
455 fail:
456 	for (i = 0; i < pool_id; i++)
457 		dp_tx_tso_desc_pool_free_by_id(soc, i);
458 
459 	return QDF_STATUS_E_NOMEM;
460 }
461 
462 void dp_tx_tso_desc_pool_free_by_id(struct dp_soc *soc, uint8_t pool_id)
463 {
464 	struct dp_tx_tso_seg_pool_s *tso_desc_pool;
465 
466 	tso_desc_pool = &soc->tx_tso_desc[pool_id];
467 	dp_desc_multi_pages_mem_free(soc, QDF_DP_TX_TSO_DESC_TYPE,
468 				     &tso_desc_pool->desc_pages,
469 				     0, true);
470 }
471 
472 void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
473 {
474 	uint32_t pool_id;
475 
476 	for (pool_id = 0; pool_id < num_pool; pool_id++)
477 		dp_tx_tso_desc_pool_free_by_id(soc, pool_id);
478 }
479 
480 QDF_STATUS dp_tx_tso_desc_pool_init_by_id(struct dp_soc *soc, uint32_t num_elem,
481 					  uint8_t pool_id)
482 {
483 	struct dp_tx_tso_seg_pool_s *tso_desc_pool;
484 	uint32_t desc_size;
485 
486 	desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_seg_elem_t));
487 
488 	tso_desc_pool = &soc->tx_tso_desc[pool_id];
489 
490 	if (qdf_mem_multi_page_link(soc->osdev,
491 				    &tso_desc_pool->desc_pages,
492 				    desc_size,
493 				    num_elem, true)) {
494 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
495 			  "invalid tso desc allocation - overflow num link");
496 		return QDF_STATUS_E_FAULT;
497 	}
498 
499 	tso_desc_pool->freelist = (struct qdf_tso_seg_elem_t *)
500 		*tso_desc_pool->desc_pages.cacheable_pages;
501 	tso_desc_pool->num_free = num_elem;
502 
503 	TSO_DEBUG("Number of free descriptors: %u\n",
504 		  tso_desc_pool->num_free);
505 	tso_desc_pool->pool_size = num_elem;
506 	qdf_spinlock_create(&tso_desc_pool->lock);
507 
508 	return QDF_STATUS_SUCCESS;
509 }
510 
511 QDF_STATUS dp_tx_tso_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
512 				    uint32_t num_elem)
513 {
514 	QDF_STATUS status;
515 	uint32_t pool_id;
516 
517 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
518 		status = dp_tx_tso_desc_pool_init_by_id(soc, num_elem,
519 							pool_id);
520 		if (QDF_IS_STATUS_ERROR(status)) {
521 			dp_err("failed to initialise TSO desc pool %d", pool_id);
522 			return status;
523 		}
524 	}
525 
526 	return QDF_STATUS_SUCCESS;
527 }
528 
529 void dp_tx_tso_desc_pool_deinit_by_id(struct dp_soc *soc, uint8_t pool_id)
530 {
531 	struct dp_tx_tso_seg_pool_s *tso_desc_pool;
532 
533 	tso_desc_pool = &soc->tx_tso_desc[pool_id];
534 
535 	if (tso_desc_pool->pool_size) {
536 		qdf_spin_lock_bh(&tso_desc_pool->lock);
537 		tso_desc_pool->freelist = NULL;
538 		tso_desc_pool->num_free = 0;
539 		tso_desc_pool->pool_size = 0;
540 		qdf_spin_unlock_bh(&tso_desc_pool->lock);
541 		qdf_spinlock_destroy(&tso_desc_pool->lock);
542 	}
543 }
544 
545 void dp_tx_tso_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
546 {
547 	uint32_t pool_id;
548 
549 	for (pool_id = 0; pool_id < num_pool; pool_id++)
550 		dp_tx_tso_desc_pool_deinit_by_id(soc, pool_id);
551 }
552 
553 QDF_STATUS dp_tx_tso_num_seg_pool_alloc_by_id(struct dp_soc *soc,
554 					      uint32_t num_elem,
555 					      uint8_t pool_id)
556 {
557 	struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
558 	uint32_t desc_size;
559 
560 	desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_num_seg_elem_t));
561 
562 	tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
563 	tso_num_seg_pool->num_free = 0;
564 	dp_desc_multi_pages_mem_alloc(soc, QDF_DP_TX_TSO_NUM_SEG_TYPE,
565 				      &tso_num_seg_pool->desc_pages,
566 				      desc_size,
567 				      num_elem, 0, true);
568 
569 	if (!tso_num_seg_pool->desc_pages.num_pages) {
570 		dp_err("Multi page alloc fail, tso_num_seg_pool");
571 		return QDF_STATUS_E_NOMEM;
572 	}
573 
574 	return QDF_STATUS_SUCCESS;
575 }
576 
577 QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
578 					uint32_t num_elem)
579 {
580 	uint32_t pool_id, i;
581 	QDF_STATUS status;
582 
583 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
584 		status = dp_tx_tso_num_seg_pool_alloc_by_id(soc, num_elem,
585 							    pool_id);
586 		if (QDF_IS_STATUS_ERROR(status)) {
587 			dp_err("failed to allocate TSO num seg pool %d", pool_id);
588 			goto fail;
589 		}
590 	}
591 
592 	return QDF_STATUS_SUCCESS;
593 
594 fail:
595 	for (i = 0; i < pool_id; i++)
596 		dp_tx_tso_num_seg_pool_free_by_id(soc, pool_id);
597 
598 	return QDF_STATUS_E_NOMEM;
599 }
600 
601 void dp_tx_tso_num_seg_pool_free_by_id(struct dp_soc *soc, uint8_t pool_id)
602 {
603 	struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
604 
605 	tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
606 	dp_desc_multi_pages_mem_free(soc, QDF_DP_TX_TSO_NUM_SEG_TYPE,
607 				     &tso_num_seg_pool->desc_pages,
608 				     0, true);
609 }
610 
611 void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t num_pool)
612 {
613 	uint32_t pool_id;
614 
615 	for (pool_id = 0; pool_id < num_pool; pool_id++)
616 		dp_tx_tso_num_seg_pool_free_by_id(soc, pool_id);
617 }
618 
619 QDF_STATUS
620 dp_tx_tso_num_seg_pool_init_by_id(struct dp_soc *soc, uint32_t num_elem,
621 				  uint8_t pool_id)
622 {
623 	struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
624 	uint32_t desc_size;
625 
626 	desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_num_seg_elem_t));
627 	tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
628 
629 	if (qdf_mem_multi_page_link(soc->osdev,
630 				    &tso_num_seg_pool->desc_pages,
631 				    desc_size,
632 				    num_elem, true)) {
633 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
634 			  "invalid tso desc allocation - overflow num link");
635 		return QDF_STATUS_E_FAULT;
636 	}
637 
638 	tso_num_seg_pool->freelist = (struct qdf_tso_num_seg_elem_t *)
639 		*tso_num_seg_pool->desc_pages.cacheable_pages;
640 	tso_num_seg_pool->num_free = num_elem;
641 	tso_num_seg_pool->num_seg_pool_size = num_elem;
642 
643 	qdf_spinlock_create(&tso_num_seg_pool->lock);
644 
645 	return QDF_STATUS_SUCCESS;
646 }
647 
648 QDF_STATUS dp_tx_tso_num_seg_pool_init(struct dp_soc *soc, uint8_t num_pool,
649 				       uint32_t num_elem)
650 {
651 	uint32_t pool_id;
652 	QDF_STATUS status;
653 
654 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
655 		status = dp_tx_tso_num_seg_pool_init_by_id(soc, num_elem,
656 							   pool_id);
657 		if (QDF_IS_STATUS_ERROR(status)) {
658 			dp_err("failed to initialise TSO num seg pool %d", pool_id);
659 			return status;
660 		}
661 	}
662 
663 	return QDF_STATUS_SUCCESS;
664 }
665 
666 void dp_tx_tso_num_seg_pool_deinit_by_id(struct dp_soc *soc, uint8_t pool_id)
667 {
668 	struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
669 
670 	tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
671 
672 	if (tso_num_seg_pool->num_seg_pool_size) {
673 		qdf_spin_lock_bh(&tso_num_seg_pool->lock);
674 		tso_num_seg_pool->freelist = NULL;
675 		tso_num_seg_pool->num_free = 0;
676 		tso_num_seg_pool->num_seg_pool_size = 0;
677 		qdf_spin_unlock_bh(&tso_num_seg_pool->lock);
678 		qdf_spinlock_destroy(&tso_num_seg_pool->lock);
679 	}
680 }
681 
682 void dp_tx_tso_num_seg_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
683 {
684 	uint32_t pool_id;
685 
686 	for (pool_id = 0; pool_id < num_pool; pool_id++)
687 		dp_tx_tso_num_seg_pool_deinit_by_id(soc, pool_id);
688 }
689 #else
690 QDF_STATUS dp_tx_tso_desc_pool_alloc_by_id(struct dp_soc *soc, uint32_t num_elem,
691 					   uint8_t pool_id)
692 {
693 	return QDF_STATUS_SUCCESS;
694 }
695 
696 QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
697 				     uint32_t num_elem)
698 {
699 	return QDF_STATUS_SUCCESS;
700 }
701 
702 QDF_STATUS dp_tx_tso_desc_pool_init_by_id(struct dp_soc *soc, uint32_t num_elem,
703 					  uint8_t pool_id)
704 {
705 	return QDF_STATUS_SUCCESS;
706 }
707 
708 QDF_STATUS dp_tx_tso_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
709 				    uint32_t num_elem)
710 {
711 	return QDF_STATUS_SUCCESS;
712 }
713 
714 void dp_tx_tso_desc_pool_free_by_id(struct dp_soc *soc, uint8_t pool_id)
715 {
716 }
717 
718 void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
719 {
720 }
721 
722 void dp_tx_tso_desc_pool_deinit_by_id(struct dp_soc *soc, uint8_t pool_id)
723 {
724 }
725 
726 void dp_tx_tso_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
727 {
728 }
729 
730 QDF_STATUS dp_tx_tso_num_seg_pool_alloc_by_id(struct dp_soc *soc,
731 					      uint32_t num_elem,
732 					      uint8_t pool_id)
733 {
734 	return QDF_STATUS_SUCCESS;
735 }
736 
737 QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
738 					uint32_t num_elem)
739 {
740 	return QDF_STATUS_SUCCESS;
741 }
742 
743 void dp_tx_tso_num_seg_pool_free_by_id(struct dp_soc *soc, uint8_t pool_id)
744 {
745 }
746 
747 void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t num_pool)
748 {
749 }
750 
751 QDF_STATUS
752 dp_tx_tso_num_seg_pool_init_by_id(struct dp_soc *soc, uint32_t num_elem,
753 				  uint8_t pool_id)
754 {
755 	return QDF_STATUS_SUCCESS;
756 }
757 
758 QDF_STATUS dp_tx_tso_num_seg_pool_init(struct dp_soc *soc, uint8_t num_pool,
759 				       uint32_t num_elem)
760 {
761 	return QDF_STATUS_SUCCESS;
762 }
763 
764 void dp_tx_tso_num_seg_pool_deinit_by_id(struct dp_soc *soc, uint8_t pool_id)
765 {
766 }
767 
768 void dp_tx_tso_num_seg_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
769 {
770 }
771 #endif
772