xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx_desc.c (revision 2f4b444fb7e689b83a4ab0e7b3b38f0bf4def8e0)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "hal_hw_headers.h"
20 #include "dp_types.h"
21 #include "dp_tx_desc.h"
22 
23 #ifndef DESC_PARTITION
24 #define DP_TX_DESC_SIZE(a) qdf_get_pwr2(a)
25 #define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id)     \
26 do {                                                                 \
27 	uint8_t sig_bit;                                             \
28 	soc->tx_desc[pool_id].offset_filter = num_desc_per_page - 1; \
29 	/* Calculate page divider to find page number */             \
30 	sig_bit = 0;                                                 \
31 	while (num_desc_per_page) {                                  \
32 		sig_bit++;                                           \
33 		num_desc_per_page = num_desc_per_page >> 1;          \
34 	}                                                            \
35 	soc->tx_desc[pool_id].page_divider = (sig_bit - 1);          \
36 } while (0)
37 #else
38 #define DP_TX_DESC_SIZE(a) a
39 #define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id) {}
40 #endif /* DESC_PARTITION */
41 
42 /**
43  * dp_tx_desc_pool_counter_initialize() - Initialize counters
44  * @tx_desc_pool Handle to DP tx_desc_pool structure
45  * @num_elem Number of descriptor elements per pool
46  *
47  * Return: None
48  */
49 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
50 static void
51 dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool,
52 				  uint16_t num_elem)
53 {
54 }
55 #else
56 static void
57 dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool,
58 				  uint16_t num_elem)
59 {
60 	tx_desc_pool->num_free = num_elem;
61 	tx_desc_pool->num_allocated = 0;
62 }
63 #endif
64 
65 /**
66  * dp_tx_desc_pool_alloc() - Allocate Tx Descriptor pool(s)
67  * @soc Handle to DP SoC structure
68  * @pool_id pool to allocate
69  * @num_elem Number of descriptor elements per pool
70  *
71  * This function allocates memory for SW tx descriptors
72  * (used within host for tx data path).
73  * The number of tx descriptors required will be large
74  * since based on number of clients (1024 clients x 3 radios),
75  * outstanding MSDUs stored in TQM queues and LMAC queues will be significantly
76  * large.
77  *
78  * To avoid allocating a large contiguous memory, it uses multi_page_alloc qdf
79  * function to allocate memory
80  * in multiple pages. It then iterates through the memory allocated across pages
81  * and links each descriptor
82  * to next descriptor, taking care of page boundaries.
83  *
84  * Since WiFi 3.0 HW supports multiple Tx rings, multiple pools are allocated,
85  * one for each ring;
86  * This minimizes lock contention when hard_start_xmit is called
87  * from multiple CPUs.
88  * Alternately, multiple pools can be used for multiple VDEVs for VDEV level
89  * flow control.
90  *
91  * Return: Status code. 0 for success.
92  */
93 QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
94 				 uint16_t num_elem)
95 {
96 	uint32_t desc_size;
97 	struct dp_tx_desc_pool_s *tx_desc_pool;
98 
99 	desc_size = DP_TX_DESC_SIZE(sizeof(struct dp_tx_desc_s));
100 	tx_desc_pool = &((soc)->tx_desc[(pool_id)]);
101 	dp_desc_multi_pages_mem_alloc(soc, DP_TX_DESC_TYPE,
102 				      &tx_desc_pool->desc_pages,
103 				      desc_size, num_elem,
104 				      0, true);
105 
106 	if (!tx_desc_pool->desc_pages.num_pages) {
107 		dp_err("Multi page alloc fail, tx desc");
108 		return QDF_STATUS_E_NOMEM;
109 	}
110 	return QDF_STATUS_SUCCESS;
111 }
112 
113 /**
114  * dp_tx_desc_pool_free() -  Free the tx dexcriptor pools
115  * @soc: Handle to DP SoC structure
116  * @pool_id: pool to free
117  *
118  */
119 void dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
120 {
121 	struct dp_tx_desc_pool_s *tx_desc_pool;
122 
123 	tx_desc_pool = &((soc)->tx_desc[pool_id]);
124 
125 	if (tx_desc_pool->desc_pages.num_pages)
126 		dp_desc_multi_pages_mem_free(soc, DP_TX_DESC_TYPE,
127 					     &tx_desc_pool->desc_pages, 0,
128 					     true);
129 }
130 
131 /**
132  * dp_tx_desc_pool_init() - Initialize Tx Descriptor pool(s)
133  * @soc: Handle to DP SoC structure
134  * @pool_id: pool to allocate
135  * @num_elem: Number of descriptor elements per pool
136  *
137  * Return: QDF_STATUS_SUCCESS
138  *	   QDF_STATUS_E_FAULT
139  */
140 QDF_STATUS dp_tx_desc_pool_init(struct dp_soc *soc, uint8_t pool_id,
141 				uint16_t num_elem)
142 {
143 	struct dp_tx_desc_pool_s *tx_desc_pool;
144 	uint32_t desc_size;
145 
146 	desc_size = DP_TX_DESC_SIZE(sizeof(struct dp_tx_desc_s));
147 
148 	tx_desc_pool = &soc->tx_desc[pool_id];
149 	if (qdf_mem_multi_page_link(soc->osdev,
150 				    &tx_desc_pool->desc_pages,
151 				    desc_size, num_elem, true)) {
152 		dp_err("invalid tx desc allocation -overflow num link");
153 		return QDF_STATUS_E_FAULT;
154 	}
155 
156 	tx_desc_pool->freelist = (struct dp_tx_desc_s *)
157 		*tx_desc_pool->desc_pages.cacheable_pages;
158 	/* Set unique IDs for each Tx descriptor */
159 	if (QDF_STATUS_SUCCESS != soc->arch_ops.dp_tx_desc_pool_init(
160 						soc, num_elem, pool_id)) {
161 		dp_err("initialization per target failed");
162 		return QDF_STATUS_E_FAULT;
163 	}
164 
165 	tx_desc_pool->elem_size = DP_TX_DESC_SIZE(sizeof(struct dp_tx_desc_s));
166 
167 	dp_tx_desc_pool_counter_initialize(tx_desc_pool, num_elem);
168 	TX_DESC_LOCK_CREATE(&tx_desc_pool->lock);
169 
170 	return QDF_STATUS_SUCCESS;
171 }
172 
173 /**
174  * dp_tx_desc_pool_deinit() - de-initialize Tx Descriptor pool(s)
175  * @soc Handle to DP SoC structure
176  * @pool_id: pool to de-initialize
177  *
178  */
179 void dp_tx_desc_pool_deinit(struct dp_soc *soc, uint8_t pool_id)
180 {
181 	struct dp_tx_desc_pool_s *tx_desc_pool;
182 
183 	tx_desc_pool = &soc->tx_desc[pool_id];
184 	soc->arch_ops.dp_tx_desc_pool_deinit(soc, tx_desc_pool, pool_id);
185 	TX_DESC_POOL_MEMBER_CLEAN(tx_desc_pool);
186 	TX_DESC_LOCK_DESTROY(&tx_desc_pool->lock);
187 }
188 
189 /**
190  * dp_tx_ext_desc_pool_alloc() - allocate Tx extenstion Descriptor pool(s)
191  * @soc: Handle to DP SoC structure
192  * @num_pool: Number of pools to allocate
193  * @num_elem: Number of descriptor elements per pool
194  *
195  * Return - QDF_STATUS_SUCCESS
196  *	    QDF_STATUS_E_NOMEM
197  */
198 QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
199 				     uint16_t num_elem)
200 {
201 	QDF_STATUS status = QDF_STATUS_SUCCESS;
202 	qdf_dma_context_t memctx = 0;
203 	uint8_t pool_id, count;
204 	uint16_t elem_size = HAL_TX_EXT_DESC_WITH_META_DATA;
205 	struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
206 	uint16_t link_elem_size = sizeof(struct dp_tx_ext_desc_elem_s);
207 
208 	/* Coherent tx extension descriptor alloc */
209 
210 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
211 		dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
212 		memctx = qdf_get_dma_mem_context(dp_tx_ext_desc_pool, memctx);
213 		dp_desc_multi_pages_mem_alloc(
214 					  soc, DP_TX_EXT_DESC_TYPE,
215 					  &dp_tx_ext_desc_pool->desc_pages,
216 					  elem_size,
217 					  num_elem,
218 					  memctx, false);
219 
220 		if (!dp_tx_ext_desc_pool->desc_pages.num_pages) {
221 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
222 				  "ext desc page alloc fail");
223 			status = QDF_STATUS_E_NOMEM;
224 			goto fail_exit;
225 		}
226 	}
227 
228 	/*
229 	 * Cacheable ext descriptor link alloc
230 	 * This structure also large size already
231 	 * single element is 24bytes, 2K elements are 48Kbytes
232 	 * Have to alloc multi page cacheable memory
233 	 */
234 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
235 		dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
236 		dp_desc_multi_pages_mem_alloc(
237 					  soc,
238 					  DP_TX_EXT_DESC_LINK_TYPE,
239 					  &dp_tx_ext_desc_pool->desc_link_pages,
240 					  link_elem_size,
241 					  num_elem,
242 					  0, true);
243 
244 		if (!dp_tx_ext_desc_pool->desc_link_pages.num_pages) {
245 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
246 				  "ext link desc page alloc fail");
247 			status = QDF_STATUS_E_NOMEM;
248 			goto free_ext_desc_page;
249 		}
250 	}
251 
252 	return status;
253 
254 free_ext_desc_page:
255 	for (count = 0; count < pool_id; count++) {
256 		dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[count]);
257 		dp_desc_multi_pages_mem_free(
258 					soc, DP_TX_EXT_DESC_LINK_TYPE,
259 					&dp_tx_ext_desc_pool->desc_link_pages,
260 					0, true);
261 	}
262 
263 	pool_id = num_pool;
264 
265 fail_exit:
266 	for (count = 0; count < pool_id; count++) {
267 		dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[count]);
268 		memctx = qdf_get_dma_mem_context(dp_tx_ext_desc_pool, memctx);
269 		dp_desc_multi_pages_mem_free(
270 					soc, DP_TX_EXT_DESC_TYPE,
271 					&dp_tx_ext_desc_pool->desc_pages,
272 					memctx, false);
273 	}
274 
275 	return status;
276 }
277 
278 /**
279  * dp_tx_ext_desc_pool_init() - initialize Tx extenstion Descriptor pool(s)
280  * @soc: Handle to DP SoC structure
281  * @num_pool: Number of pools to initialize
282  * @num_elem: Number of descriptor elements per pool
283  *
284  * Return - QDF_STATUS_SUCCESS
285  *	    QDF_STATUS_E_NOMEM
286  */
287 QDF_STATUS dp_tx_ext_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
288 				    uint16_t num_elem)
289 {
290 	uint32_t i;
291 	struct dp_tx_ext_desc_elem_s *c_elem, *p_elem;
292 	struct qdf_mem_dma_page_t *page_info;
293 	struct qdf_mem_multi_page_t *pages;
294 	struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
295 	uint8_t pool_id;
296 	QDF_STATUS status;
297 
298 	/* link tx descriptors into a freelist */
299 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
300 		dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
301 		soc->tx_ext_desc[pool_id].elem_size =
302 			HAL_TX_EXT_DESC_WITH_META_DATA;
303 		soc->tx_ext_desc[pool_id].link_elem_size =
304 			sizeof(struct dp_tx_ext_desc_elem_s);
305 		soc->tx_ext_desc[pool_id].elem_count = num_elem;
306 
307 		dp_tx_ext_desc_pool->freelist = (struct dp_tx_ext_desc_elem_s *)
308 			*dp_tx_ext_desc_pool->desc_link_pages.cacheable_pages;
309 
310 		if (qdf_mem_multi_page_link(soc->osdev,
311 					    &dp_tx_ext_desc_pool->
312 					    desc_link_pages,
313 					    dp_tx_ext_desc_pool->link_elem_size,
314 					    dp_tx_ext_desc_pool->elem_count,
315 					    true)) {
316 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
317 				  "ext link desc page linking fail");
318 			status = QDF_STATUS_E_FAULT;
319 			goto fail;
320 		}
321 
322 		/* Assign coherent memory pointer into linked free list */
323 		pages = &dp_tx_ext_desc_pool->desc_pages;
324 		page_info = dp_tx_ext_desc_pool->desc_pages.dma_pages;
325 		c_elem = dp_tx_ext_desc_pool->freelist;
326 		p_elem = c_elem;
327 		for (i = 0; i < dp_tx_ext_desc_pool->elem_count; i++) {
328 			if (!(i % pages->num_element_per_page)) {
329 			/**
330 			 * First element for new page,
331 			 * should point next page
332 			 */
333 				if (!pages->dma_pages->page_v_addr_start) {
334 					QDF_TRACE(QDF_MODULE_ID_DP,
335 						  QDF_TRACE_LEVEL_ERROR,
336 						  "link over flow");
337 					status = QDF_STATUS_E_FAULT;
338 					goto fail;
339 				}
340 
341 				c_elem->vaddr =
342 					(void *)page_info->page_v_addr_start;
343 				c_elem->paddr = page_info->page_p_addr;
344 				page_info++;
345 			} else {
346 				c_elem->vaddr = (void *)(p_elem->vaddr +
347 					dp_tx_ext_desc_pool->elem_size);
348 				c_elem->paddr = (p_elem->paddr +
349 					dp_tx_ext_desc_pool->elem_size);
350 			}
351 			p_elem = c_elem;
352 			c_elem = c_elem->next;
353 			if (!c_elem)
354 				break;
355 		}
356 		dp_tx_ext_desc_pool->num_free = num_elem;
357 		qdf_spinlock_create(&dp_tx_ext_desc_pool->lock);
358 	}
359 	return QDF_STATUS_SUCCESS;
360 
361 fail:
362 	return status;
363 }
364 
365 /**
366  * dp_tx_ext_desc_pool_free() -  free Tx extenstion Descriptor pool(s)
367  * @soc: Handle to DP SoC structure
368  * @num_pool: Number of pools to free
369  *
370  */
371 void dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
372 {
373 	uint8_t pool_id;
374 	struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
375 	qdf_dma_context_t memctx = 0;
376 
377 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
378 		dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
379 		memctx = qdf_get_dma_mem_context(dp_tx_ext_desc_pool, memctx);
380 
381 		dp_desc_multi_pages_mem_free(
382 					soc, DP_TX_EXT_DESC_LINK_TYPE,
383 					&dp_tx_ext_desc_pool->desc_link_pages,
384 					0, true);
385 
386 		dp_desc_multi_pages_mem_free(
387 					soc, DP_TX_EXT_DESC_TYPE,
388 					&dp_tx_ext_desc_pool->desc_pages,
389 					memctx, false);
390 	}
391 }
392 
393 /**
394  * dp_tx_ext_desc_pool_deinit() -  deinit Tx extenstion Descriptor pool(s)
395  * @soc: Handle to DP SoC structure
396  * @num_pool: Number of pools to de-initialize
397  *
398  */
399 void dp_tx_ext_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
400 {
401 	uint8_t pool_id;
402 	struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
403 
404 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
405 		dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
406 		qdf_spinlock_destroy(&dp_tx_ext_desc_pool->lock);
407 	}
408 }
409 
410 #if defined(FEATURE_TSO)
411 /**
412  * dp_tx_tso_desc_pool_alloc() - allocate TSO Descriptor pool(s)
413  * @soc: Handle to DP SoC structure
414  * @num_pool: Number of pools to allocate
415  * @num_elem: Number of descriptor elements per pool
416  *
417  * Return - QDF_STATUS_SUCCESS
418  *	    QDF_STATUS_E_NOMEM
419  */
420 QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
421 				     uint16_t num_elem)
422 {
423 	struct dp_tx_tso_seg_pool_s *tso_desc_pool;
424 	uint32_t desc_size, pool_id, i;
425 
426 	desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_seg_elem_t));
427 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
428 		tso_desc_pool = &soc->tx_tso_desc[pool_id];
429 		tso_desc_pool->num_free = 0;
430 		dp_desc_multi_pages_mem_alloc(
431 					soc,
432 					DP_TX_TSO_DESC_TYPE,
433 					&tso_desc_pool->desc_pages,
434 					desc_size,
435 					num_elem, 0, true);
436 
437 		if (!tso_desc_pool->desc_pages.num_pages) {
438 			dp_err("Multi page alloc fail, tx desc");
439 			goto fail;
440 		}
441 	}
442 	return QDF_STATUS_SUCCESS;
443 
444 fail:
445 	for (i = 0; i < pool_id; i++) {
446 		tso_desc_pool = &soc->tx_tso_desc[i];
447 		dp_desc_multi_pages_mem_free(soc, DP_TX_TSO_DESC_TYPE,
448 					     &tso_desc_pool->desc_pages,
449 					     0, true);
450 	}
451 	return QDF_STATUS_E_NOMEM;
452 }
453 
454 /**
455  * dp_tx_tso_desc_pool_free() - free TSO Descriptor pool(s)
456  * @soc: Handle to DP SoC structure
457  * @num_pool: Number of pools to free
458  *
459  */
460 void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
461 {
462 	struct dp_tx_tso_seg_pool_s *tso_desc_pool;
463 	uint32_t pool_id;
464 
465 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
466 		tso_desc_pool = &soc->tx_tso_desc[pool_id];
467 		dp_desc_multi_pages_mem_free(soc, DP_TX_TSO_DESC_TYPE,
468 					     &tso_desc_pool->desc_pages,
469 					     0, true);
470 	}
471 }
472 
473 /**
474  * dp_tx_tso_desc_pool_init() - initialize TSO Descriptor pool(s)
475  * @soc: Handle to DP SoC structure
476  * @num_pool: Number of pools to initialize
477  * @num_elem: Number of descriptor elements per pool
478  *
479  * Return - QDF_STATUS_SUCCESS
480  *	    QDF_STATUS_E_NOMEM
481  */
482 QDF_STATUS dp_tx_tso_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
483 				    uint16_t num_elem)
484 {
485 	struct dp_tx_tso_seg_pool_s *tso_desc_pool;
486 	uint32_t desc_size, pool_id;
487 
488 	desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_seg_elem_t));
489 
490 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
491 		tso_desc_pool = &soc->tx_tso_desc[pool_id];
492 
493 		if (qdf_mem_multi_page_link(soc->osdev,
494 					    &tso_desc_pool->desc_pages,
495 					    desc_size,
496 					    num_elem, true)) {
497 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
498 				  "invalid tso desc allocation - overflow num link");
499 			return QDF_STATUS_E_FAULT;
500 		}
501 
502 		tso_desc_pool->freelist = (struct qdf_tso_seg_elem_t *)
503 			*tso_desc_pool->desc_pages.cacheable_pages;
504 		tso_desc_pool->num_free = num_elem;
505 
506 		TSO_DEBUG("Number of free descriptors: %u\n",
507 			  tso_desc_pool->num_free);
508 		tso_desc_pool->pool_size = num_elem;
509 		qdf_spinlock_create(&tso_desc_pool->lock);
510 	}
511 	return QDF_STATUS_SUCCESS;
512 }
513 
514 /**
515  * dp_tx_tso_desc_pool_deinit() - deinitialize TSO Descriptor pool(s)
516  * @soc: Handle to DP SoC structure
517  * @num_pool: Number of pools to free
518  *
519  */
520 void dp_tx_tso_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
521 {
522 	struct dp_tx_tso_seg_pool_s *tso_desc_pool;
523 	uint32_t pool_id;
524 
525 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
526 		tso_desc_pool = &soc->tx_tso_desc[pool_id];
527 		qdf_spin_lock_bh(&tso_desc_pool->lock);
528 
529 		tso_desc_pool->freelist = NULL;
530 		tso_desc_pool->num_free = 0;
531 		tso_desc_pool->pool_size = 0;
532 		qdf_spin_unlock_bh(&tso_desc_pool->lock);
533 		qdf_spinlock_destroy(&tso_desc_pool->lock);
534 	}
535 }
536 
537 /**
538  * dp_tx_tso_num_seg_pool_alloc() - Allocate descriptors that tracks the
539  *                              fragments in each tso segment
540  *
541  * @soc: handle to dp soc structure
542  * @num_pool: number of pools to allocate
543  * @num_elem: total number of descriptors to be allocated
544  *
545  * Return - QDF_STATUS_SUCCESS
546  *	    QDF_STATUS_E_NOMEM
547  */
548 QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
549 					uint16_t num_elem)
550 {
551 	struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
552 	uint32_t desc_size, pool_id, i;
553 
554 	desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_num_seg_elem_t));
555 
556 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
557 		tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
558 		tso_num_seg_pool->num_free = 0;
559 		dp_desc_multi_pages_mem_alloc(soc, DP_TX_TSO_NUM_SEG_TYPE,
560 					      &tso_num_seg_pool->desc_pages,
561 					      desc_size,
562 					      num_elem, 0, true);
563 
564 		if (!tso_num_seg_pool->desc_pages.num_pages) {
565 			dp_err("Multi page alloc fail, tso_num_seg_pool");
566 			goto fail;
567 		}
568 	}
569 	return QDF_STATUS_SUCCESS;
570 
571 fail:
572 	for (i = 0; i < pool_id; i++) {
573 		tso_num_seg_pool = &soc->tx_tso_num_seg[i];
574 		dp_desc_multi_pages_mem_free(soc, DP_TX_TSO_NUM_SEG_TYPE,
575 					     &tso_num_seg_pool->desc_pages,
576 					     0, true);
577 	}
578 	return QDF_STATUS_E_NOMEM;
579 }
580 
581 /**
582  * dp_tx_tso_num_seg_pool_free() - free descriptors that tracks the
583  *                              fragments in each tso segment
584  *
585  * @soc: handle to dp soc structure
586  * @num_pool: number of pools to free
587  */
588 void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t num_pool)
589 {
590 	struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
591 	uint32_t pool_id;
592 
593 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
594 		tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
595 		dp_desc_multi_pages_mem_free(soc, DP_TX_TSO_NUM_SEG_TYPE,
596 					     &tso_num_seg_pool->desc_pages,
597 					     0, true);
598 	}
599 }
600 
601 /**
602  * dp_tx_tso_num_seg_pool_init() - Initialize descriptors that tracks the
603  *                              fragments in each tso segment
604  *
605  * @soc: handle to dp soc structure
606  * @num_pool: number of pools to initialize
607  * @num_elem: total number of descriptors to be initialized
608  *
609  * Return - QDF_STATUS_SUCCESS
610  *	    QDF_STATUS_E_FAULT
611  */
612 QDF_STATUS dp_tx_tso_num_seg_pool_init(struct dp_soc *soc, uint8_t num_pool,
613 				       uint16_t num_elem)
614 {
615 	struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
616 	uint32_t desc_size, pool_id;
617 
618 	desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_num_seg_elem_t));
619 
620 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
621 		tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
622 		if (qdf_mem_multi_page_link(soc->osdev,
623 					    &tso_num_seg_pool->desc_pages,
624 					    desc_size,
625 					    num_elem, true)) {
626 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
627 				  "invalid tso desc allocation - overflow num link");
628 			return QDF_STATUS_E_FAULT;
629 		}
630 
631 		tso_num_seg_pool->freelist = (struct qdf_tso_num_seg_elem_t *)
632 			*tso_num_seg_pool->desc_pages.cacheable_pages;
633 		tso_num_seg_pool->num_free = num_elem;
634 		tso_num_seg_pool->num_seg_pool_size = num_elem;
635 
636 		qdf_spinlock_create(&tso_num_seg_pool->lock);
637 	}
638 	return QDF_STATUS_SUCCESS;
639 }
640 
641 /**
642  * dp_tx_tso_num_seg_pool_deinit() - de-initialize descriptors that tracks the
643  *                              fragments in each tso segment
644  *
645  * @soc: handle to dp soc structure
646  * @num_pool: number of pools to de-initialize
647  *
648  * Return - QDF_STATUS_SUCCESS
649  *	    QDF_STATUS_E_FAULT
650  */
651 void dp_tx_tso_num_seg_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
652 {
653 	struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
654 	uint32_t pool_id;
655 
656 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
657 		tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
658 		qdf_spin_lock_bh(&tso_num_seg_pool->lock);
659 
660 		tso_num_seg_pool->freelist = NULL;
661 		tso_num_seg_pool->num_free = 0;
662 		tso_num_seg_pool->num_seg_pool_size = 0;
663 		qdf_spin_unlock_bh(&tso_num_seg_pool->lock);
664 		qdf_spinlock_destroy(&tso_num_seg_pool->lock);
665 	}
666 }
667 #else
668 QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
669 				     uint16_t num_elem)
670 {
671 	return QDF_STATUS_SUCCESS;
672 }
673 
674 QDF_STATUS dp_tx_tso_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
675 				    uint16_t num_elem)
676 {
677 	return QDF_STATUS_SUCCESS;
678 }
679 
680 void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
681 {
682 }
683 
684 void dp_tx_tso_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
685 {
686 }
687 
688 QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
689 					uint16_t num_elem)
690 {
691 	return QDF_STATUS_SUCCESS;
692 }
693 
694 void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t num_pool)
695 {
696 }
697 
698 QDF_STATUS dp_tx_tso_num_seg_pool_init(struct dp_soc *soc, uint8_t num_pool,
699 				       uint16_t num_elem)
700 {
701 	return QDF_STATUS_SUCCESS;
702 }
703 
704 void dp_tx_tso_num_seg_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
705 {
706 }
707 #endif
708