xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx_desc.c (revision acd794ba1c40ef0b32a0cb1237e1f14b17b4af32)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "hal_hw_headers.h"
21 #include "dp_types.h"
22 #include "dp_tx_desc.h"
23 
24 #ifndef DESC_PARTITION
25 #define DP_TX_DESC_SIZE(a) qdf_get_pwr2(a)
26 #define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id)     \
27 do {                                                                 \
28 	uint8_t sig_bit;                                             \
29 	soc->tx_desc[pool_id].offset_filter = num_desc_per_page - 1; \
30 	/* Calculate page divider to find page number */             \
31 	sig_bit = 0;                                                 \
32 	while (num_desc_per_page) {                                  \
33 		sig_bit++;                                           \
34 		num_desc_per_page = num_desc_per_page >> 1;          \
35 	}                                                            \
36 	soc->tx_desc[pool_id].page_divider = (sig_bit - 1);          \
37 } while (0)
38 #else
39 #define DP_TX_DESC_SIZE(a) a
40 #define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id) {}
41 #endif /* DESC_PARTITION */
42 
43 /**
44  * dp_tx_desc_pool_counter_initialize() - Initialize counters
45  * @tx_desc_pool: Handle to DP tx_desc_pool structure
46  * @num_elem: Number of descriptor elements per pool
47  *
48  * Return: None
49  */
50 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
51 static void
52 dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool,
53 				  uint16_t num_elem)
54 {
55 }
56 #else
57 static void
58 dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool,
59 				  uint16_t num_elem)
60 {
61 	tx_desc_pool->elem_count = num_elem;
62 	tx_desc_pool->num_free = num_elem;
63 	tx_desc_pool->num_allocated = 0;
64 }
65 #endif
66 
67 #ifdef DP_UMAC_HW_RESET_SUPPORT
68 /**
69  * dp_tx_desc_clean_up() - Clean up the tx descriptors
70  * @ctxt: context passed
71  * @elem: element to be cleaned up
72  * @elem_list: element list
73  *
74  */
75 static void dp_tx_desc_clean_up(void *ctxt, void *elem, void *elem_list)
76 {
77 	struct dp_soc *soc = (struct dp_soc *)ctxt;
78 	struct dp_tx_desc_s *tx_desc = (struct dp_tx_desc_s *)elem;
79 	qdf_nbuf_t *nbuf_list = (qdf_nbuf_t *)elem_list;
80 	qdf_nbuf_t nbuf = NULL;
81 
82 	if (tx_desc->nbuf) {
83 		nbuf = dp_tx_comp_free_buf(soc, tx_desc, true);
84 		dp_tx_desc_release(soc, tx_desc, tx_desc->pool_id);
85 
86 		if (nbuf) {
87 			if (!nbuf_list) {
88 				dp_err("potential memory leak");
89 				qdf_assert_always(0);
90 			}
91 
92 			nbuf->next = *nbuf_list;
93 			*nbuf_list = nbuf;
94 		}
95 	}
96 }
97 
98 void dp_tx_desc_pool_cleanup(struct dp_soc *soc, qdf_nbuf_t *nbuf_list)
99 {
100 	int i;
101 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
102 	uint8_t num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
103 
104 	for (i = 0; i < num_pool; i++) {
105 		tx_desc_pool = dp_get_tx_desc_pool(soc, i);
106 
107 		if (tx_desc_pool)
108 			qdf_tx_desc_pool_free_bufs(soc,
109 						   &tx_desc_pool->desc_pages,
110 						   tx_desc_pool->elem_size,
111 						   tx_desc_pool->elem_count,
112 						   true, &dp_tx_desc_clean_up,
113 						   nbuf_list);
114 
115 		tx_desc_pool = dp_get_spcl_tx_desc_pool(soc, i);
116 
117 		if (tx_desc_pool)
118 			qdf_tx_desc_pool_free_bufs(soc,
119 						   &tx_desc_pool->desc_pages,
120 						   tx_desc_pool->elem_size,
121 						   tx_desc_pool->elem_count,
122 						   true, &dp_tx_desc_clean_up,
123 						   nbuf_list);
124 	}
125 }
126 #endif
127 
128 #ifdef QCA_SUPPORT_DP_GLOBAL_CTX
129 static void dp_tx_desc_pool_alloc_mem(struct dp_soc *soc, int8_t pool_id,
130 				      bool spcl_tx_desc)
131 {
132 	struct dp_global_context *dp_global = NULL;
133 
134 	dp_global = wlan_objmgr_get_global_ctx();
135 
136 	if (spcl_tx_desc) {
137 		dp_global->spcl_tx_desc[soc->arch_id][pool_id] =
138 			qdf_mem_malloc(sizeof(struct dp_tx_desc_pool_s));
139 	} else {
140 		dp_global->tx_desc[soc->arch_id][pool_id] =
141 			qdf_mem_malloc(sizeof(struct dp_tx_desc_pool_s));
142 	}
143 }
144 
145 static void dp_tx_desc_pool_free_mem(struct dp_soc *soc, int8_t pool_id,
146 				     bool spcl_tx_desc)
147 {
148 	struct dp_global_context *dp_global = NULL;
149 
150 	dp_global = wlan_objmgr_get_global_ctx();
151 	if (spcl_tx_desc) {
152 		if (!dp_global->spcl_tx_desc[soc->arch_id][pool_id])
153 			return;
154 
155 		qdf_mem_free(dp_global->spcl_tx_desc[soc->arch_id][pool_id]);
156 		dp_global->spcl_tx_desc[soc->arch_id][pool_id] = NULL;
157 	} else {
158 		if (!dp_global->tx_desc[soc->arch_id][pool_id])
159 			return;
160 
161 		qdf_mem_free(dp_global->tx_desc[soc->arch_id][pool_id]);
162 		dp_global->tx_desc[soc->arch_id][pool_id] = NULL;
163 	}
164 }
165 #else
166 static void dp_tx_desc_pool_alloc_mem(struct dp_soc *soc, int8_t pool_id,
167 				      bool spcl_tx_desc)
168 {
169 }
170 
171 static void dp_tx_desc_pool_free_mem(struct dp_soc *soc, int8_t pool_id,
172 				     bool spcl_tx_desc)
173 {
174 }
175 #endif
176 
177 QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
178 				 uint32_t num_elem, bool spcl_tx_desc)
179 {
180 	uint32_t desc_size, num_elem_t;
181 	struct dp_tx_desc_pool_s *tx_desc_pool;
182 	QDF_STATUS status;
183 	enum qdf_dp_desc_type desc_type = QDF_DP_TX_DESC_TYPE;
184 
185 	desc_size = DP_TX_DESC_SIZE(sizeof(struct dp_tx_desc_s));
186 
187 	dp_tx_desc_pool_alloc_mem(soc, pool_id, spcl_tx_desc);
188 	if (spcl_tx_desc) {
189 		tx_desc_pool = dp_get_spcl_tx_desc_pool(soc, pool_id);
190 		desc_type = QDF_DP_TX_SPCL_DESC_TYPE;
191 		num_elem_t = num_elem;
192 	} else {
193 		tx_desc_pool = dp_get_tx_desc_pool(soc, pool_id);
194 		desc_type = QDF_DP_TX_DESC_TYPE;
195 		num_elem_t = dp_get_updated_tx_desc(soc->ctrl_psoc, pool_id, num_elem);
196 	}
197 
198 	tx_desc_pool->desc_pages.page_size = DP_BLOCKMEM_SIZE;
199 	dp_desc_multi_pages_mem_alloc(soc, desc_type,
200 				      &tx_desc_pool->desc_pages,
201 				      desc_size, num_elem_t,
202 				      0, true);
203 
204 	if (!tx_desc_pool->desc_pages.num_pages) {
205 		dp_err("Multi page alloc fail, tx desc");
206 		return QDF_STATUS_E_NOMEM;
207 	}
208 
209 	/* Arch specific TX descriptor allocation */
210 	status = soc->arch_ops.dp_tx_desc_pool_alloc(soc, num_elem_t, pool_id);
211 	if (QDF_IS_STATUS_ERROR(status)) {
212 		dp_err("failed to allocate arch specific descriptors");
213 		return QDF_STATUS_E_NOMEM;
214 	}
215 
216 	return QDF_STATUS_SUCCESS;
217 }
218 
219 void dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id,
220 			  bool spcl_tx_desc)
221 {
222 	struct dp_tx_desc_pool_s *tx_desc_pool;
223 	enum qdf_dp_desc_type desc_type = QDF_DP_TX_DESC_TYPE;
224 
225 	if (spcl_tx_desc) {
226 		tx_desc_pool = dp_get_spcl_tx_desc_pool(soc, pool_id);
227 		desc_type = QDF_DP_TX_SPCL_DESC_TYPE;
228 	} else {
229 		tx_desc_pool = dp_get_tx_desc_pool(soc, pool_id);
230 		desc_type = QDF_DP_TX_DESC_TYPE;
231 	}
232 
233 	if (tx_desc_pool->desc_pages.num_pages)
234 		dp_desc_multi_pages_mem_free(soc, QDF_DP_TX_DESC_TYPE,
235 					     &tx_desc_pool->desc_pages, 0,
236 					     true);
237 
238 	/* Free arch specific TX descriptor */
239 	soc->arch_ops.dp_tx_desc_pool_free(soc, pool_id);
240 	dp_tx_desc_pool_free_mem(soc, pool_id, spcl_tx_desc);
241 }
242 
243 QDF_STATUS dp_tx_desc_pool_init(struct dp_soc *soc, uint8_t pool_id,
244 				uint32_t num_elem, bool spcl_tx_desc)
245 {
246 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
247 	uint32_t desc_size, num_elem_t;
248 
249 	desc_size = DP_TX_DESC_SIZE(sizeof(struct dp_tx_desc_s));
250 
251 	if (spcl_tx_desc) {
252 		tx_desc_pool = dp_get_spcl_tx_desc_pool(soc, pool_id);
253 		num_elem_t = num_elem;
254 	} else {
255 		tx_desc_pool = dp_get_tx_desc_pool(soc, pool_id);
256 		num_elem_t = dp_get_updated_tx_desc(soc->ctrl_psoc, pool_id, num_elem);
257 	}
258 	if (qdf_mem_multi_page_link(soc->osdev,
259 				    &tx_desc_pool->desc_pages,
260 				    desc_size, num_elem_t, true)) {
261 		dp_err("invalid tx desc allocation -overflow num link");
262 		return QDF_STATUS_E_FAULT;
263 	}
264 
265 	tx_desc_pool->freelist = (struct dp_tx_desc_s *)
266 		*tx_desc_pool->desc_pages.cacheable_pages;
267 	/* Set unique IDs for each Tx descriptor */
268 	if (QDF_STATUS_SUCCESS != soc->arch_ops.dp_tx_desc_pool_init(
269 						soc, num_elem_t,
270 						pool_id, spcl_tx_desc)) {
271 		dp_err("initialization per target failed");
272 		return QDF_STATUS_E_FAULT;
273 	}
274 
275 	tx_desc_pool->elem_size = DP_TX_DESC_SIZE(sizeof(struct dp_tx_desc_s));
276 
277 	dp_tx_desc_pool_counter_initialize(tx_desc_pool, num_elem_t);
278 	TX_DESC_LOCK_CREATE(&tx_desc_pool->lock);
279 
280 	return QDF_STATUS_SUCCESS;
281 }
282 
283 void dp_tx_desc_pool_deinit(struct dp_soc *soc, uint8_t pool_id,
284 			    bool spcl_tx_desc)
285 {
286 	struct dp_tx_desc_pool_s *tx_desc_pool;
287 
288 	if (spcl_tx_desc)
289 		tx_desc_pool = dp_get_spcl_tx_desc_pool(soc, pool_id);
290 	else
291 		tx_desc_pool = dp_get_tx_desc_pool(soc, pool_id);
292 	soc->arch_ops.dp_tx_desc_pool_deinit(soc, tx_desc_pool,
293 					     pool_id, spcl_tx_desc);
294 	TX_DESC_POOL_MEMBER_CLEAN(tx_desc_pool);
295 	TX_DESC_LOCK_DESTROY(&tx_desc_pool->lock);
296 }
297 
298 QDF_STATUS
299 dp_tx_ext_desc_pool_alloc_by_id(struct dp_soc *soc, uint32_t num_elem,
300 				uint8_t pool_id)
301 {
302 	QDF_STATUS status;
303 	qdf_dma_context_t memctx = 0;
304 	uint16_t elem_size = HAL_TX_EXT_DESC_WITH_META_DATA;
305 	struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
306 	uint16_t link_elem_size = sizeof(struct dp_tx_ext_desc_elem_s);
307 
308 	dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
309 	memctx = qdf_get_dma_mem_context(dp_tx_ext_desc_pool, memctx);
310 
311 	/* Coherent tx extension descriptor alloc */
312 	dp_desc_multi_pages_mem_alloc(soc, QDF_DP_TX_EXT_DESC_TYPE,
313 				      &dp_tx_ext_desc_pool->desc_pages,
314 				      elem_size, num_elem, memctx, false);
315 
316 	if (!dp_tx_ext_desc_pool->desc_pages.num_pages) {
317 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
318 			  "ext desc page alloc fail");
319 		return QDF_STATUS_E_NOMEM;
320 	}
321 
322 	/*
323 	 * Cacheable ext descriptor link alloc
324 	 * This structure also large size already
325 	 * single element is 24bytes, 2K elements are 48Kbytes
326 	 * Have to alloc multi page cacheable memory
327 	 */
328 	dp_desc_multi_pages_mem_alloc(soc, QDF_DP_TX_EXT_DESC_LINK_TYPE,
329 				      &dp_tx_ext_desc_pool->desc_link_pages,
330 				      link_elem_size, num_elem, 0, true);
331 
332 	if (!dp_tx_ext_desc_pool->desc_link_pages.num_pages) {
333 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
334 			  "ext link desc page alloc fail");
335 		status = QDF_STATUS_E_NOMEM;
336 		goto free_ext_desc;
337 	}
338 
339 	return QDF_STATUS_SUCCESS;
340 
341 free_ext_desc:
342 	dp_desc_multi_pages_mem_free(soc, QDF_DP_TX_EXT_DESC_TYPE,
343 				     &dp_tx_ext_desc_pool->desc_pages,
344 				     memctx, false);
345 	return status;
346 }
347 
348 QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
349 				     uint32_t num_elem)
350 {
351 	QDF_STATUS status;
352 	uint8_t pool_id, count;
353 
354 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
355 		status = dp_tx_ext_desc_pool_alloc_by_id(soc, num_elem, pool_id);
356 		if (QDF_IS_STATUS_ERROR(status)) {
357 			dp_err("failed to allocate tx ext desc pool %d", pool_id);
358 			goto free_ext_desc_pool;
359 		}
360 	}
361 
362 	return QDF_STATUS_SUCCESS;
363 
364 free_ext_desc_pool:
365 	for (count = 0; count < pool_id; count++)
366 		dp_tx_ext_desc_pool_free_by_id(soc, count);
367 
368 	return status;
369 }
370 
371 QDF_STATUS dp_tx_ext_desc_pool_init_by_id(struct dp_soc *soc, uint32_t num_elem,
372 					  uint8_t pool_id)
373 {
374 	uint32_t i;
375 	struct dp_tx_ext_desc_elem_s *c_elem, *p_elem;
376 	struct qdf_mem_dma_page_t *page_info;
377 	struct qdf_mem_multi_page_t *pages;
378 	struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
379 	QDF_STATUS status;
380 
381 	/* link tx descriptors into a freelist */
382 	dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
383 	soc->tx_ext_desc[pool_id].elem_size =
384 		HAL_TX_EXT_DESC_WITH_META_DATA;
385 	soc->tx_ext_desc[pool_id].link_elem_size =
386 		sizeof(struct dp_tx_ext_desc_elem_s);
387 	soc->tx_ext_desc[pool_id].elem_count = num_elem;
388 
389 	dp_tx_ext_desc_pool->freelist = (struct dp_tx_ext_desc_elem_s *)
390 		*dp_tx_ext_desc_pool->desc_link_pages.cacheable_pages;
391 
392 	if (qdf_mem_multi_page_link(soc->osdev,
393 				    &dp_tx_ext_desc_pool->desc_link_pages,
394 				    dp_tx_ext_desc_pool->link_elem_size,
395 				    dp_tx_ext_desc_pool->elem_count,
396 				    true)) {
397 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
398 			  "ext link desc page linking fail");
399 		status = QDF_STATUS_E_FAULT;
400 		goto fail;
401 	}
402 
403 	/* Assign coherent memory pointer into linked free list */
404 	pages = &dp_tx_ext_desc_pool->desc_pages;
405 	page_info = dp_tx_ext_desc_pool->desc_pages.dma_pages;
406 	c_elem = dp_tx_ext_desc_pool->freelist;
407 	p_elem = c_elem;
408 	for (i = 0; i < dp_tx_ext_desc_pool->elem_count; i++) {
409 		if (!(i % pages->num_element_per_page)) {
410 		/**
411 		 * First element for new page,
412 		 * should point next page
413 		 */
414 			if (!pages->dma_pages->page_v_addr_start) {
415 				QDF_TRACE(QDF_MODULE_ID_DP,
416 					  QDF_TRACE_LEVEL_ERROR,
417 					  "link over flow");
418 				status = QDF_STATUS_E_FAULT;
419 				goto fail;
420 			}
421 
422 			c_elem->vaddr =
423 				(void *)page_info->page_v_addr_start;
424 			c_elem->paddr = page_info->page_p_addr;
425 			page_info++;
426 		} else {
427 			c_elem->vaddr = (void *)(p_elem->vaddr +
428 				dp_tx_ext_desc_pool->elem_size);
429 			c_elem->paddr = (p_elem->paddr +
430 				dp_tx_ext_desc_pool->elem_size);
431 		}
432 		p_elem = c_elem;
433 		c_elem = c_elem->next;
434 		if (!c_elem)
435 			break;
436 	}
437 	dp_tx_ext_desc_pool->num_free = num_elem;
438 	qdf_spinlock_create(&dp_tx_ext_desc_pool->lock);
439 
440 	return QDF_STATUS_SUCCESS;
441 
442 fail:
443 	return status;
444 }
445 
446 QDF_STATUS dp_tx_ext_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
447 				    uint32_t num_elem)
448 {
449 	uint8_t pool_id;
450 	QDF_STATUS status;
451 
452 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
453 		status = dp_tx_ext_desc_pool_init_by_id(soc, num_elem, pool_id);
454 		if (QDF_IS_STATUS_ERROR(status)) {
455 			dp_err("failed to init ext desc pool %d", pool_id);
456 			goto fail;
457 		}
458 	}
459 
460 	return QDF_STATUS_SUCCESS;
461 fail:
462 	return status;
463 }
464 
465 void dp_tx_ext_desc_pool_free_by_id(struct dp_soc *soc, uint8_t pool_id)
466 {
467 	struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
468 	qdf_dma_context_t memctx = 0;
469 
470 	dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
471 	memctx = qdf_get_dma_mem_context(dp_tx_ext_desc_pool, memctx);
472 
473 	dp_desc_multi_pages_mem_free(soc, QDF_DP_TX_EXT_DESC_LINK_TYPE,
474 				     &dp_tx_ext_desc_pool->desc_link_pages,
475 				     0, true);
476 
477 	dp_desc_multi_pages_mem_free(soc, QDF_DP_TX_EXT_DESC_TYPE,
478 				     &dp_tx_ext_desc_pool->desc_pages,
479 				     memctx, false);
480 }
481 
482 void dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
483 {
484 	uint8_t pool_id;
485 
486 	for (pool_id = 0; pool_id < num_pool; pool_id++)
487 		dp_tx_ext_desc_pool_free_by_id(soc, pool_id);
488 }
489 
490 void dp_tx_ext_desc_pool_deinit_by_id(struct dp_soc *soc, uint8_t pool_id)
491 {
492 	struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
493 
494 	dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
495 	qdf_spinlock_destroy(&dp_tx_ext_desc_pool->lock);
496 }
497 
498 void dp_tx_ext_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
499 {
500 	uint8_t pool_id;
501 
502 	for (pool_id = 0; pool_id < num_pool; pool_id++)
503 		dp_tx_ext_desc_pool_deinit_by_id(soc, pool_id);
504 }
505 
506 #if defined(FEATURE_TSO)
507 QDF_STATUS dp_tx_tso_desc_pool_alloc_by_id(struct dp_soc *soc, uint32_t num_elem,
508 					   uint8_t pool_id)
509 {
510 	struct dp_tx_tso_seg_pool_s *tso_desc_pool;
511 	uint32_t desc_size;
512 
513 	desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_seg_elem_t));
514 
515 	tso_desc_pool = &soc->tx_tso_desc[pool_id];
516 	tso_desc_pool->num_free = 0;
517 	dp_desc_multi_pages_mem_alloc(soc, QDF_DP_TX_TSO_DESC_TYPE,
518 				      &tso_desc_pool->desc_pages,
519 				      desc_size, num_elem, 0, true);
520 	if (!tso_desc_pool->desc_pages.num_pages) {
521 		dp_err("Multi page alloc fail, tx desc");
522 		return QDF_STATUS_E_NOMEM;
523 	}
524 
525 	return QDF_STATUS_SUCCESS;
526 }
527 
528 QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
529 				     uint32_t num_elem)
530 {
531 	uint32_t pool_id, i;
532 	QDF_STATUS status;
533 
534 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
535 		status = dp_tx_tso_desc_pool_alloc_by_id(soc, num_elem,
536 							 pool_id);
537 		if (QDF_IS_STATUS_ERROR(status)) {
538 			dp_err("failed to allocate TSO desc pool %d", pool_id);
539 			goto fail;
540 		}
541 	}
542 
543 	return QDF_STATUS_SUCCESS;
544 
545 fail:
546 	for (i = 0; i < pool_id; i++)
547 		dp_tx_tso_desc_pool_free_by_id(soc, i);
548 
549 	return QDF_STATUS_E_NOMEM;
550 }
551 
552 void dp_tx_tso_desc_pool_free_by_id(struct dp_soc *soc, uint8_t pool_id)
553 {
554 	struct dp_tx_tso_seg_pool_s *tso_desc_pool;
555 
556 	tso_desc_pool = &soc->tx_tso_desc[pool_id];
557 	dp_desc_multi_pages_mem_free(soc, QDF_DP_TX_TSO_DESC_TYPE,
558 				     &tso_desc_pool->desc_pages,
559 				     0, true);
560 }
561 
562 void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
563 {
564 	uint32_t pool_id;
565 
566 	for (pool_id = 0; pool_id < num_pool; pool_id++)
567 		dp_tx_tso_desc_pool_free_by_id(soc, pool_id);
568 }
569 
570 QDF_STATUS dp_tx_tso_desc_pool_init_by_id(struct dp_soc *soc, uint32_t num_elem,
571 					  uint8_t pool_id)
572 {
573 	struct dp_tx_tso_seg_pool_s *tso_desc_pool;
574 	uint32_t desc_size;
575 
576 	desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_seg_elem_t));
577 
578 	tso_desc_pool = &soc->tx_tso_desc[pool_id];
579 
580 	if (qdf_mem_multi_page_link(soc->osdev,
581 				    &tso_desc_pool->desc_pages,
582 				    desc_size,
583 				    num_elem, true)) {
584 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
585 			  "invalid tso desc allocation - overflow num link");
586 		return QDF_STATUS_E_FAULT;
587 	}
588 
589 	tso_desc_pool->freelist = (struct qdf_tso_seg_elem_t *)
590 		*tso_desc_pool->desc_pages.cacheable_pages;
591 	tso_desc_pool->num_free = num_elem;
592 
593 	TSO_DEBUG("Number of free descriptors: %u\n",
594 		  tso_desc_pool->num_free);
595 	tso_desc_pool->pool_size = num_elem;
596 	qdf_spinlock_create(&tso_desc_pool->lock);
597 
598 	return QDF_STATUS_SUCCESS;
599 }
600 
601 QDF_STATUS dp_tx_tso_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
602 				    uint32_t num_elem)
603 {
604 	QDF_STATUS status;
605 	uint32_t pool_id;
606 
607 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
608 		status = dp_tx_tso_desc_pool_init_by_id(soc, num_elem,
609 							pool_id);
610 		if (QDF_IS_STATUS_ERROR(status)) {
611 			dp_err("failed to initialise TSO desc pool %d", pool_id);
612 			return status;
613 		}
614 	}
615 
616 	return QDF_STATUS_SUCCESS;
617 }
618 
619 void dp_tx_tso_desc_pool_deinit_by_id(struct dp_soc *soc, uint8_t pool_id)
620 {
621 	struct dp_tx_tso_seg_pool_s *tso_desc_pool;
622 
623 	tso_desc_pool = &soc->tx_tso_desc[pool_id];
624 
625 	if (tso_desc_pool->pool_size) {
626 		qdf_spin_lock_bh(&tso_desc_pool->lock);
627 		tso_desc_pool->freelist = NULL;
628 		tso_desc_pool->num_free = 0;
629 		tso_desc_pool->pool_size = 0;
630 		qdf_spin_unlock_bh(&tso_desc_pool->lock);
631 		qdf_spinlock_destroy(&tso_desc_pool->lock);
632 	}
633 }
634 
635 void dp_tx_tso_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
636 {
637 	uint32_t pool_id;
638 
639 	for (pool_id = 0; pool_id < num_pool; pool_id++)
640 		dp_tx_tso_desc_pool_deinit_by_id(soc, pool_id);
641 }
642 
643 QDF_STATUS dp_tx_tso_num_seg_pool_alloc_by_id(struct dp_soc *soc,
644 					      uint32_t num_elem,
645 					      uint8_t pool_id)
646 {
647 	struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
648 	uint32_t desc_size;
649 
650 	desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_num_seg_elem_t));
651 
652 	tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
653 	tso_num_seg_pool->num_free = 0;
654 	dp_desc_multi_pages_mem_alloc(soc, QDF_DP_TX_TSO_NUM_SEG_TYPE,
655 				      &tso_num_seg_pool->desc_pages,
656 				      desc_size,
657 				      num_elem, 0, true);
658 
659 	if (!tso_num_seg_pool->desc_pages.num_pages) {
660 		dp_err("Multi page alloc fail, tso_num_seg_pool");
661 		return QDF_STATUS_E_NOMEM;
662 	}
663 
664 	return QDF_STATUS_SUCCESS;
665 }
666 
667 QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
668 					uint32_t num_elem)
669 {
670 	uint32_t pool_id, i;
671 	QDF_STATUS status;
672 
673 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
674 		status = dp_tx_tso_num_seg_pool_alloc_by_id(soc, num_elem,
675 							    pool_id);
676 		if (QDF_IS_STATUS_ERROR(status)) {
677 			dp_err("failed to allocate TSO num seg pool %d", pool_id);
678 			goto fail;
679 		}
680 	}
681 
682 	return QDF_STATUS_SUCCESS;
683 
684 fail:
685 	for (i = 0; i < pool_id; i++)
686 		dp_tx_tso_num_seg_pool_free_by_id(soc, pool_id);
687 
688 	return QDF_STATUS_E_NOMEM;
689 }
690 
691 void dp_tx_tso_num_seg_pool_free_by_id(struct dp_soc *soc, uint8_t pool_id)
692 {
693 	struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
694 
695 	tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
696 	dp_desc_multi_pages_mem_free(soc, QDF_DP_TX_TSO_NUM_SEG_TYPE,
697 				     &tso_num_seg_pool->desc_pages,
698 				     0, true);
699 }
700 
701 void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t num_pool)
702 {
703 	uint32_t pool_id;
704 
705 	for (pool_id = 0; pool_id < num_pool; pool_id++)
706 		dp_tx_tso_num_seg_pool_free_by_id(soc, pool_id);
707 }
708 
709 QDF_STATUS
710 dp_tx_tso_num_seg_pool_init_by_id(struct dp_soc *soc, uint32_t num_elem,
711 				  uint8_t pool_id)
712 {
713 	struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
714 	uint32_t desc_size;
715 
716 	desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_num_seg_elem_t));
717 	tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
718 
719 	if (qdf_mem_multi_page_link(soc->osdev,
720 				    &tso_num_seg_pool->desc_pages,
721 				    desc_size,
722 				    num_elem, true)) {
723 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
724 			  "invalid tso desc allocation - overflow num link");
725 		return QDF_STATUS_E_FAULT;
726 	}
727 
728 	tso_num_seg_pool->freelist = (struct qdf_tso_num_seg_elem_t *)
729 		*tso_num_seg_pool->desc_pages.cacheable_pages;
730 	tso_num_seg_pool->num_free = num_elem;
731 	tso_num_seg_pool->num_seg_pool_size = num_elem;
732 
733 	qdf_spinlock_create(&tso_num_seg_pool->lock);
734 
735 	return QDF_STATUS_SUCCESS;
736 }
737 
738 QDF_STATUS dp_tx_tso_num_seg_pool_init(struct dp_soc *soc, uint8_t num_pool,
739 				       uint32_t num_elem)
740 {
741 	uint32_t pool_id;
742 	QDF_STATUS status;
743 
744 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
745 		status = dp_tx_tso_num_seg_pool_init_by_id(soc, num_elem,
746 							   pool_id);
747 		if (QDF_IS_STATUS_ERROR(status)) {
748 			dp_err("failed to initialise TSO num seg pool %d", pool_id);
749 			return status;
750 		}
751 	}
752 
753 	return QDF_STATUS_SUCCESS;
754 }
755 
756 void dp_tx_tso_num_seg_pool_deinit_by_id(struct dp_soc *soc, uint8_t pool_id)
757 {
758 	struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
759 
760 	tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
761 
762 	if (tso_num_seg_pool->num_seg_pool_size) {
763 		qdf_spin_lock_bh(&tso_num_seg_pool->lock);
764 		tso_num_seg_pool->freelist = NULL;
765 		tso_num_seg_pool->num_free = 0;
766 		tso_num_seg_pool->num_seg_pool_size = 0;
767 		qdf_spin_unlock_bh(&tso_num_seg_pool->lock);
768 		qdf_spinlock_destroy(&tso_num_seg_pool->lock);
769 	}
770 }
771 
772 void dp_tx_tso_num_seg_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
773 {
774 	uint32_t pool_id;
775 
776 	for (pool_id = 0; pool_id < num_pool; pool_id++)
777 		dp_tx_tso_num_seg_pool_deinit_by_id(soc, pool_id);
778 }
779 #else
780 QDF_STATUS dp_tx_tso_desc_pool_alloc_by_id(struct dp_soc *soc, uint32_t num_elem,
781 					   uint8_t pool_id)
782 {
783 	return QDF_STATUS_SUCCESS;
784 }
785 
786 QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
787 				     uint32_t num_elem)
788 {
789 	return QDF_STATUS_SUCCESS;
790 }
791 
792 QDF_STATUS dp_tx_tso_desc_pool_init_by_id(struct dp_soc *soc, uint32_t num_elem,
793 					  uint8_t pool_id)
794 {
795 	return QDF_STATUS_SUCCESS;
796 }
797 
798 QDF_STATUS dp_tx_tso_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
799 				    uint32_t num_elem)
800 {
801 	return QDF_STATUS_SUCCESS;
802 }
803 
804 void dp_tx_tso_desc_pool_free_by_id(struct dp_soc *soc, uint8_t pool_id)
805 {
806 }
807 
808 void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
809 {
810 }
811 
812 void dp_tx_tso_desc_pool_deinit_by_id(struct dp_soc *soc, uint8_t pool_id)
813 {
814 }
815 
816 void dp_tx_tso_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
817 {
818 }
819 
820 QDF_STATUS dp_tx_tso_num_seg_pool_alloc_by_id(struct dp_soc *soc,
821 					      uint32_t num_elem,
822 					      uint8_t pool_id)
823 {
824 	return QDF_STATUS_SUCCESS;
825 }
826 
827 QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
828 					uint32_t num_elem)
829 {
830 	return QDF_STATUS_SUCCESS;
831 }
832 
833 void dp_tx_tso_num_seg_pool_free_by_id(struct dp_soc *soc, uint8_t pool_id)
834 {
835 }
836 
837 void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t num_pool)
838 {
839 }
840 
841 QDF_STATUS
842 dp_tx_tso_num_seg_pool_init_by_id(struct dp_soc *soc, uint32_t num_elem,
843 				  uint8_t pool_id)
844 {
845 	return QDF_STATUS_SUCCESS;
846 }
847 
848 QDF_STATUS dp_tx_tso_num_seg_pool_init(struct dp_soc *soc, uint8_t num_pool,
849 				       uint32_t num_elem)
850 {
851 	return QDF_STATUS_SUCCESS;
852 }
853 
854 void dp_tx_tso_num_seg_pool_deinit_by_id(struct dp_soc *soc, uint8_t pool_id)
855 {
856 }
857 
858 void dp_tx_tso_num_seg_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
859 {
860 }
861 #endif
862