xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx_desc.c (revision 97f44cd39e4ff816eaa1710279d28cf6b9e65ad9)
1 /*
2  * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "hal_hw_headers.h"
20 #include "dp_types.h"
21 #include "dp_tx_desc.h"
22 
23 #ifndef DESC_PARTITION
24 #define DP_TX_DESC_SIZE(a) qdf_get_pwr2(a)
25 #define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id)     \
26 do {                                                                 \
27 	uint8_t sig_bit;                                             \
28 	soc->tx_desc[pool_id].offset_filter = num_desc_per_page - 1; \
29 	/* Calculate page divider to find page number */             \
30 	sig_bit = 0;                                                 \
31 	while (num_desc_per_page) {                                  \
32 		sig_bit++;                                           \
33 		num_desc_per_page = num_desc_per_page >> 1;          \
34 	}                                                            \
35 	soc->tx_desc[pool_id].page_divider = (sig_bit - 1);          \
36 } while (0)
37 #else
38 #define DP_TX_DESC_SIZE(a) a
39 #define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id) {}
40 #endif /* DESC_PARTITION */
41 
42 /**
43  * dp_tx_desc_pool_counter_initialize() - Initialize counters
44  * @tx_desc_pool Handle to DP tx_desc_pool structure
45  * @num_elem Number of descriptor elements per pool
46  *
47  * Return: None
48  */
49 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
50 static void
51 dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool,
52 				  uint16_t num_elem)
53 {
54 }
55 #else
56 static void
57 dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool,
58 				  uint16_t num_elem)
59 {
60 	tx_desc_pool->num_free = num_elem;
61 	tx_desc_pool->num_allocated = 0;
62 }
63 #endif
64 
65 /**
66  * dp_tx_desc_pool_alloc() - Allocate Tx Descriptor pool(s)
67  * @soc Handle to DP SoC structure
68  * @pool_id pool to allocate
69  * @num_elem Number of descriptor elements per pool
70  *
71  * This function allocates memory for SW tx descriptors
72  * (used within host for tx data path).
73  * The number of tx descriptors required will be large
74  * since based on number of clients (1024 clients x 3 radios),
75  * outstanding MSDUs stored in TQM queues and LMAC queues will be significantly
76  * large.
77  *
78  * To avoid allocating a large contiguous memory, it uses multi_page_alloc qdf
79  * function to allocate memory
80  * in multiple pages. It then iterates through the memory allocated across pages
81  * and links each descriptor
82  * to next descriptor, taking care of page boundaries.
83  *
84  * Since WiFi 3.0 HW supports multiple Tx rings, multiple pools are allocated,
85  * one for each ring;
86  * This minimizes lock contention when hard_start_xmit is called
87  * from multiple CPUs.
88  * Alternately, multiple pools can be used for multiple VDEVs for VDEV level
89  * flow control.
90  *
91  * Return: Status code. 0 for success.
92  */
93 QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
94 				 uint16_t num_elem)
95 {
96 	uint32_t desc_size;
97 	struct dp_tx_desc_pool_s *tx_desc_pool;
98 
99 	desc_size = DP_TX_DESC_SIZE(sizeof(struct dp_tx_desc_s));
100 	tx_desc_pool = &((soc)->tx_desc[(pool_id)]);
101 	dp_desc_multi_pages_mem_alloc(soc, DP_TX_DESC_TYPE,
102 				      &tx_desc_pool->desc_pages,
103 				      desc_size, num_elem,
104 				      0, true);
105 
106 	if (!tx_desc_pool->desc_pages.num_pages) {
107 		dp_err("Multi page alloc fail, tx desc");
108 		return QDF_STATUS_E_NOMEM;
109 	}
110 	return QDF_STATUS_SUCCESS;
111 }
112 
113 /**
114  * dp_tx_desc_pool_free() -  Free the tx dexcriptor pools
115  * @soc: Handle to DP SoC structure
116  * @pool_id: pool to free
117  *
118  */
119 void dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
120 {
121 	struct dp_tx_desc_pool_s *tx_desc_pool;
122 
123 	tx_desc_pool = &((soc)->tx_desc[pool_id]);
124 
125 	if (tx_desc_pool->desc_pages.num_pages)
126 		dp_desc_multi_pages_mem_free(soc, DP_TX_DESC_TYPE,
127 					     &tx_desc_pool->desc_pages, 0,
128 					     true);
129 }
130 
131 /**
132  * dp_tx_desc_pool_init() - Initialize Tx Descriptor pool(s)
133  * @soc: Handle to DP SoC structure
134  * @pool_id: pool to allocate
135  * @num_elem: Number of descriptor elements per pool
136  *
137  * Return: QDF_STATUS_SUCCESS
138  *	   QDF_STATUS_E_FAULT
139  */
140 QDF_STATUS dp_tx_desc_pool_init(struct dp_soc *soc, uint8_t pool_id,
141 				uint16_t num_elem)
142 {
143 	uint32_t id, count, page_id, offset, pool_id_32;
144 	struct dp_tx_desc_pool_s *tx_desc_pool;
145 	struct dp_tx_desc_s *tx_desc_elem;
146 	uint16_t num_desc_per_page;
147 	uint32_t desc_size;
148 
149 	desc_size = DP_TX_DESC_SIZE(sizeof(*tx_desc_elem));
150 
151 	tx_desc_pool = &((soc)->tx_desc[(pool_id)]);
152 	if (qdf_mem_multi_page_link(soc->osdev,
153 				    &tx_desc_pool->desc_pages,
154 				    desc_size, num_elem, true)) {
155 		dp_err("invalid tx desc allocation -overflow num link");
156 		return QDF_STATUS_E_FAULT;
157 	}
158 
159 	tx_desc_pool->freelist = (struct dp_tx_desc_s *)
160 		*tx_desc_pool->desc_pages.cacheable_pages;
161 	/* Set unique IDs for each Tx descriptor */
162 	tx_desc_elem = tx_desc_pool->freelist;
163 	count = 0;
164 	pool_id_32 = (uint32_t)pool_id;
165 	num_desc_per_page = tx_desc_pool->desc_pages.num_element_per_page;
166 	while (tx_desc_elem) {
167 		page_id = count / num_desc_per_page;
168 		offset = count % num_desc_per_page;
169 		id = ((pool_id_32 << DP_TX_DESC_ID_POOL_OS) |
170 			(page_id << DP_TX_DESC_ID_PAGE_OS) | offset);
171 
172 		tx_desc_elem->id = id;
173 		tx_desc_elem->pool_id = pool_id;
174 		tx_desc_elem = tx_desc_elem->next;
175 		count++;
176 	}
177 
178 	tx_desc_pool->elem_size = DP_TX_DESC_SIZE(sizeof(*tx_desc_elem));
179 
180 	dp_tx_desc_pool_counter_initialize(tx_desc_pool, num_elem);
181 	TX_DESC_LOCK_CREATE(&tx_desc_pool->lock);
182 
183 	return QDF_STATUS_SUCCESS;
184 }
185 
186 /**
187  * dp_tx_desc_pool_deinit() - de-initialize Tx Descriptor pool(s)
188  * @soc Handle to DP SoC structure
189  * @pool_id: pool to de-initialize
190  *
191  */
192 void dp_tx_desc_pool_deinit(struct dp_soc *soc, uint8_t pool_id)
193 {
194 	struct dp_tx_desc_pool_s *tx_desc_pool;
195 
196 	tx_desc_pool = &((soc)->tx_desc[(pool_id)]);
197 	TX_DESC_POOL_MEMBER_CLEAN(tx_desc_pool);
198 	TX_DESC_LOCK_DESTROY(&tx_desc_pool->lock);
199 }
200 
201 /**
202  * dp_tx_ext_desc_pool_alloc() - allocate Tx extenstion Descriptor pool(s)
203  * @soc: Handle to DP SoC structure
204  * @num_pool: Number of pools to allocate
205  * @num_elem: Number of descriptor elements per pool
206  *
207  * Return - QDF_STATUS_SUCCESS
208  *	    QDF_STATUS_E_NOMEM
209  */
210 QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
211 				     uint16_t num_elem)
212 {
213 	QDF_STATUS status = QDF_STATUS_SUCCESS;
214 	qdf_dma_context_t memctx = 0;
215 	uint8_t pool_id, count;
216 	uint16_t elem_size = HAL_TX_EXT_DESC_WITH_META_DATA;
217 	struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
218 	uint16_t link_elem_size = sizeof(struct dp_tx_ext_desc_elem_s);
219 
220 	/* Coherent tx extension descriptor alloc */
221 
222 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
223 		dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
224 		memctx = qdf_get_dma_mem_context(dp_tx_ext_desc_pool, memctx);
225 		dp_desc_multi_pages_mem_alloc(
226 					  soc, DP_TX_EXT_DESC_TYPE,
227 					  &dp_tx_ext_desc_pool->desc_pages,
228 					  elem_size,
229 					  num_elem,
230 					  memctx, false);
231 
232 		if (!dp_tx_ext_desc_pool->desc_pages.num_pages) {
233 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
234 				  "ext desc page alloc fail");
235 			status = QDF_STATUS_E_NOMEM;
236 			goto fail_exit;
237 		}
238 	}
239 
240 	/*
241 	 * Cacheable ext descriptor link alloc
242 	 * This structure also large size already
243 	 * single element is 24bytes, 2K elements are 48Kbytes
244 	 * Have to alloc multi page cacheable memory
245 	 */
246 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
247 		dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
248 		dp_desc_multi_pages_mem_alloc(
249 					  soc,
250 					  DP_TX_EXT_DESC_LINK_TYPE,
251 					  &dp_tx_ext_desc_pool->desc_link_pages,
252 					  link_elem_size,
253 					  num_elem,
254 					  0, true);
255 
256 		if (!dp_tx_ext_desc_pool->desc_link_pages.num_pages) {
257 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
258 				  "ext link desc page alloc fail");
259 			status = QDF_STATUS_E_NOMEM;
260 			goto free_ext_desc_page;
261 		}
262 	}
263 	return status;
264 
265 free_ext_desc_page:
266 	for (count = 0; count < pool_id; pool_id++) {
267 		dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
268 		dp_desc_multi_pages_mem_free(
269 					soc, DP_TX_EXT_DESC_LINK_TYPE,
270 					&dp_tx_ext_desc_pool->desc_link_pages,
271 					0, true);
272 	}
273 	pool_id = num_pool;
274 
275 fail_exit:
276 	for (count = 0; count < pool_id; pool_id++) {
277 		dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
278 		memctx = qdf_get_dma_mem_context(dp_tx_ext_desc_pool, memctx);
279 		dp_desc_multi_pages_mem_free(
280 					soc, DP_TX_EXT_DESC_TYPE,
281 					&dp_tx_ext_desc_pool->desc_pages,
282 					memctx, false);
283 	}
284 	return status;
285 }
286 
287 /**
288  * dp_tx_ext_desc_pool_init() - initialize Tx extenstion Descriptor pool(s)
289  * @soc: Handle to DP SoC structure
290  * @num_pool: Number of pools to initialize
291  * @num_elem: Number of descriptor elements per pool
292  *
293  * Return - QDF_STATUS_SUCCESS
294  *	    QDF_STATUS_E_NOMEM
295  */
296 QDF_STATUS dp_tx_ext_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
297 				    uint16_t num_elem)
298 {
299 	uint32_t i;
300 	struct dp_tx_ext_desc_elem_s *c_elem, *p_elem;
301 	struct qdf_mem_dma_page_t *page_info;
302 	struct qdf_mem_multi_page_t *pages;
303 	struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
304 	uint8_t pool_id;
305 	QDF_STATUS status;
306 
307 	/* link tx descriptors into a freelist */
308 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
309 		dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
310 		soc->tx_ext_desc[pool_id].elem_size =
311 			HAL_TX_EXT_DESC_WITH_META_DATA;
312 		soc->tx_ext_desc[pool_id].link_elem_size =
313 			sizeof(struct dp_tx_ext_desc_elem_s);
314 		soc->tx_ext_desc[pool_id].elem_count = num_elem;
315 
316 		dp_tx_ext_desc_pool->freelist = (struct dp_tx_ext_desc_elem_s *)
317 			*dp_tx_ext_desc_pool->desc_link_pages.cacheable_pages;
318 
319 		if (qdf_mem_multi_page_link(soc->osdev,
320 					    &dp_tx_ext_desc_pool->
321 					    desc_link_pages,
322 					    dp_tx_ext_desc_pool->link_elem_size,
323 					    dp_tx_ext_desc_pool->elem_count,
324 					    true)) {
325 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
326 				  "ext link desc page linking fail");
327 			status = QDF_STATUS_E_FAULT;
328 			goto fail;
329 		}
330 
331 		/* Assign coherent memory pointer into linked free list */
332 		pages = &dp_tx_ext_desc_pool->desc_pages;
333 		page_info = dp_tx_ext_desc_pool->desc_pages.dma_pages;
334 		c_elem = dp_tx_ext_desc_pool->freelist;
335 		p_elem = c_elem;
336 		for (i = 0; i < dp_tx_ext_desc_pool->elem_count; i++) {
337 			if (!(i % pages->num_element_per_page)) {
338 			/**
339 			 * First element for new page,
340 			 * should point next page
341 			 */
342 				if (!pages->dma_pages->page_v_addr_start) {
343 					QDF_TRACE(QDF_MODULE_ID_DP,
344 						  QDF_TRACE_LEVEL_ERROR,
345 						  "link over flow");
346 					status = QDF_STATUS_E_FAULT;
347 					goto fail;
348 				}
349 
350 				c_elem->vaddr =
351 					(void *)page_info->page_v_addr_start;
352 				c_elem->paddr = page_info->page_p_addr;
353 				page_info++;
354 			} else {
355 				c_elem->vaddr = (void *)(p_elem->vaddr +
356 					dp_tx_ext_desc_pool->elem_size);
357 				c_elem->paddr = (p_elem->paddr +
358 					dp_tx_ext_desc_pool->elem_size);
359 			}
360 			p_elem = c_elem;
361 			c_elem = c_elem->next;
362 			if (!c_elem)
363 				break;
364 		}
365 		dp_tx_ext_desc_pool->num_free = num_elem;
366 		qdf_spinlock_create(&dp_tx_ext_desc_pool->lock);
367 	}
368 	return QDF_STATUS_SUCCESS;
369 
370 fail:
371 	return status;
372 }
373 
374 /**
375  * dp_tx_ext_desc_pool_free() -  free Tx extenstion Descriptor pool(s)
376  * @soc: Handle to DP SoC structure
377  * @num_pool: Number of pools to free
378  *
379  */
380 void dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
381 {
382 	uint8_t pool_id;
383 	struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
384 	qdf_dma_context_t memctx = 0;
385 
386 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
387 		dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
388 		memctx = qdf_get_dma_mem_context(dp_tx_ext_desc_pool, memctx);
389 
390 		dp_desc_multi_pages_mem_free(
391 					soc, DP_TX_EXT_DESC_LINK_TYPE,
392 					&dp_tx_ext_desc_pool->desc_link_pages,
393 					0, true);
394 
395 		dp_desc_multi_pages_mem_free(
396 					soc, DP_TX_EXT_DESC_TYPE,
397 					&dp_tx_ext_desc_pool->desc_pages,
398 					memctx, false);
399 	}
400 }
401 
402 /**
403  * dp_tx_ext_desc_pool_deinit() -  deinit Tx extenstion Descriptor pool(s)
404  * @soc: Handle to DP SoC structure
405  * @num_pool: Number of pools to de-initialize
406  *
407  */
408 void dp_tx_ext_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
409 {
410 	uint8_t pool_id;
411 	struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
412 
413 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
414 		dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
415 		qdf_spinlock_destroy(&dp_tx_ext_desc_pool->lock);
416 	}
417 }
418 
419 #if defined(FEATURE_TSO)
420 /**
421  * dp_tx_tso_desc_pool_alloc() - allocate TSO Descriptor pool(s)
422  * @soc: Handle to DP SoC structure
423  * @num_pool: Number of pools to allocate
424  * @num_elem: Number of descriptor elements per pool
425  *
426  * Return - QDF_STATUS_SUCCESS
427  *	    QDF_STATUS_E_NOMEM
428  */
429 QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
430 				     uint16_t num_elem)
431 {
432 	struct dp_tx_tso_seg_pool_s *tso_desc_pool;
433 	uint32_t desc_size, pool_id, i;
434 
435 	desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_seg_elem_t));
436 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
437 		tso_desc_pool = &soc->tx_tso_desc[pool_id];
438 		tso_desc_pool->num_free = 0;
439 		dp_desc_multi_pages_mem_alloc(
440 					soc,
441 					DP_TX_TSO_DESC_TYPE,
442 					&tso_desc_pool->desc_pages,
443 					desc_size,
444 					num_elem, 0, true);
445 
446 		if (!tso_desc_pool->desc_pages.num_pages) {
447 			dp_err("Multi page alloc fail, tx desc");
448 			goto fail;
449 		}
450 	}
451 	return QDF_STATUS_SUCCESS;
452 
453 fail:
454 	for (i = 0; i < pool_id; i++) {
455 		tso_desc_pool = &soc->tx_tso_desc[i];
456 		dp_desc_multi_pages_mem_free(soc, DP_TX_TSO_DESC_TYPE,
457 					     &tso_desc_pool->desc_pages,
458 					     0, true);
459 	}
460 	return QDF_STATUS_E_NOMEM;
461 }
462 
463 /**
464  * dp_tx_tso_desc_pool_free() - free TSO Descriptor pool(s)
465  * @soc: Handle to DP SoC structure
466  * @num_pool: Number of pools to free
467  *
468  */
469 void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
470 {
471 	struct dp_tx_tso_seg_pool_s *tso_desc_pool;
472 	uint32_t pool_id;
473 
474 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
475 		tso_desc_pool = &soc->tx_tso_desc[pool_id];
476 		dp_desc_multi_pages_mem_free(soc, DP_TX_TSO_DESC_TYPE,
477 					     &tso_desc_pool->desc_pages,
478 					     0, true);
479 	}
480 }
481 
482 /**
483  * dp_tx_tso_desc_pool_init() - initialize TSO Descriptor pool(s)
484  * @soc: Handle to DP SoC structure
485  * @num_pool: Number of pools to initialize
486  * @num_elem: Number of descriptor elements per pool
487  *
488  * Return - QDF_STATUS_SUCCESS
489  *	    QDF_STATUS_E_NOMEM
490  */
491 QDF_STATUS dp_tx_tso_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
492 				    uint16_t num_elem)
493 {
494 	struct dp_tx_tso_seg_pool_s *tso_desc_pool;
495 	uint32_t desc_size, pool_id;
496 
497 	desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_seg_elem_t));
498 
499 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
500 		tso_desc_pool = &soc->tx_tso_desc[pool_id];
501 
502 		if (qdf_mem_multi_page_link(soc->osdev,
503 					    &tso_desc_pool->desc_pages,
504 					    desc_size,
505 					    num_elem, true)) {
506 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
507 				  "invalid tso desc allocation - overflow num link");
508 			return QDF_STATUS_E_FAULT;
509 		}
510 
511 		tso_desc_pool->freelist = (struct qdf_tso_seg_elem_t *)
512 			*tso_desc_pool->desc_pages.cacheable_pages;
513 		tso_desc_pool->num_free = num_elem;
514 
515 		TSO_DEBUG("Number of free descriptors: %u\n",
516 			  tso_desc_pool->num_free);
517 		tso_desc_pool->pool_size = num_elem;
518 		qdf_spinlock_create(&tso_desc_pool->lock);
519 	}
520 	return QDF_STATUS_SUCCESS;
521 }
522 
523 /**
524  * dp_tx_tso_desc_pool_deinit() - deinitialize TSO Descriptor pool(s)
525  * @soc: Handle to DP SoC structure
526  * @num_pool: Number of pools to free
527  *
528  */
529 void dp_tx_tso_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
530 {
531 	struct dp_tx_tso_seg_pool_s *tso_desc_pool;
532 	uint32_t pool_id;
533 
534 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
535 		tso_desc_pool = &soc->tx_tso_desc[pool_id];
536 		qdf_spin_lock_bh(&tso_desc_pool->lock);
537 
538 		tso_desc_pool->freelist = NULL;
539 		tso_desc_pool->num_free = 0;
540 		tso_desc_pool->pool_size = 0;
541 		qdf_spin_unlock_bh(&tso_desc_pool->lock);
542 		qdf_spinlock_destroy(&tso_desc_pool->lock);
543 	}
544 }
545 
546 /**
547  * dp_tx_tso_num_seg_pool_alloc() - Allocate descriptors that tracks the
548  *                              fragments in each tso segment
549  *
550  * @soc: handle to dp soc structure
551  * @num_pool: number of pools to allocate
552  * @num_elem: total number of descriptors to be allocated
553  *
554  * Return - QDF_STATUS_SUCCESS
555  *	    QDF_STATUS_E_NOMEM
556  */
557 QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
558 					uint16_t num_elem)
559 {
560 	struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
561 	uint32_t desc_size, pool_id, i;
562 
563 	desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_num_seg_elem_t));
564 
565 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
566 		tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
567 		tso_num_seg_pool->num_free = 0;
568 		dp_desc_multi_pages_mem_alloc(soc, DP_TX_TSO_NUM_SEG_TYPE,
569 					      &tso_num_seg_pool->desc_pages,
570 					      desc_size,
571 					      num_elem, 0, true);
572 
573 		if (!tso_num_seg_pool->desc_pages.num_pages) {
574 			dp_err("Multi page alloc fail, tso_num_seg_pool");
575 			goto fail;
576 		}
577 	}
578 	return QDF_STATUS_SUCCESS;
579 
580 fail:
581 	for (i = 0; i < pool_id; i++) {
582 		tso_num_seg_pool = &soc->tx_tso_num_seg[i];
583 		dp_desc_multi_pages_mem_free(soc, DP_TX_TSO_NUM_SEG_TYPE,
584 					     &tso_num_seg_pool->desc_pages,
585 					     0, true);
586 	}
587 	return QDF_STATUS_E_NOMEM;
588 }
589 
590 /**
591  * dp_tx_tso_num_seg_pool_free() - free descriptors that tracks the
592  *                              fragments in each tso segment
593  *
594  * @soc: handle to dp soc structure
595  * @num_pool: number of pools to free
596  */
597 void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t num_pool)
598 {
599 	struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
600 	uint32_t pool_id;
601 
602 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
603 		tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
604 		dp_desc_multi_pages_mem_free(soc, DP_TX_TSO_NUM_SEG_TYPE,
605 					     &tso_num_seg_pool->desc_pages,
606 					     0, true);
607 	}
608 }
609 
610 /**
611  * dp_tx_tso_num_seg_pool_init() - Initialize descriptors that tracks the
612  *                              fragments in each tso segment
613  *
614  * @soc: handle to dp soc structure
615  * @num_pool: number of pools to initialize
616  * @num_elem: total number of descriptors to be initialized
617  *
618  * Return - QDF_STATUS_SUCCESS
619  *	    QDF_STATUS_E_FAULT
620  */
621 QDF_STATUS dp_tx_tso_num_seg_pool_init(struct dp_soc *soc, uint8_t num_pool,
622 				       uint16_t num_elem)
623 {
624 	struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
625 	uint32_t desc_size, pool_id;
626 
627 	desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_num_seg_elem_t));
628 
629 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
630 		tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
631 		if (qdf_mem_multi_page_link(soc->osdev,
632 					    &tso_num_seg_pool->desc_pages,
633 					    desc_size,
634 					    num_elem, true)) {
635 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
636 				  "invalid tso desc allocation - overflow num link");
637 			return QDF_STATUS_E_FAULT;
638 		}
639 
640 		tso_num_seg_pool->freelist = (struct qdf_tso_num_seg_elem_t *)
641 			*tso_num_seg_pool->desc_pages.cacheable_pages;
642 		tso_num_seg_pool->num_free = num_elem;
643 		tso_num_seg_pool->num_seg_pool_size = num_elem;
644 
645 		qdf_spinlock_create(&tso_num_seg_pool->lock);
646 	}
647 	return QDF_STATUS_SUCCESS;
648 }
649 
650 /**
651  * dp_tx_tso_num_seg_pool_deinit() - de-initialize descriptors that tracks the
652  *                              fragments in each tso segment
653  *
654  * @soc: handle to dp soc structure
655  * @num_pool: number of pools to de-initialize
656  *
657  * Return - QDF_STATUS_SUCCESS
658  *	    QDF_STATUS_E_FAULT
659  */
660 void dp_tx_tso_num_seg_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
661 {
662 	struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
663 	uint32_t pool_id;
664 
665 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
666 		tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
667 		qdf_spin_lock_bh(&tso_num_seg_pool->lock);
668 
669 		tso_num_seg_pool->freelist = NULL;
670 		tso_num_seg_pool->num_free = 0;
671 		tso_num_seg_pool->num_seg_pool_size = 0;
672 		qdf_spin_unlock_bh(&tso_num_seg_pool->lock);
673 		qdf_spinlock_destroy(&tso_num_seg_pool->lock);
674 	}
675 }
676 #else
677 QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
678 				     uint16_t num_elem)
679 {
680 	return QDF_STATUS_SUCCESS;
681 }
682 
683 QDF_STATUS dp_tx_tso_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
684 				    uint16_t num_elem)
685 {
686 	return QDF_STATUS_SUCCESS;
687 }
688 
689 void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
690 {
691 }
692 
693 void dp_tx_tso_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
694 {
695 }
696 
697 QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
698 					uint16_t num_elem)
699 {
700 	return QDF_STATUS_SUCCESS;
701 }
702 
703 void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t num_pool)
704 {
705 }
706 
707 QDF_STATUS dp_tx_tso_num_seg_pool_init(struct dp_soc *soc, uint8_t num_pool,
708 				       uint16_t num_elem)
709 {
710 	return QDF_STATUS_SUCCESS;
711 }
712 
713 void dp_tx_tso_num_seg_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
714 {
715 }
716 #endif
717