xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx_desc.c (revision 45c28558a520fd0e975b20c0ad534a0aa7f08021)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "hal_hw_headers.h"
21 #include "dp_types.h"
22 #include "dp_tx_desc.h"
23 
24 #ifndef DESC_PARTITION
25 #define DP_TX_DESC_SIZE(a) qdf_get_pwr2(a)
26 #define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id)     \
27 do {                                                                 \
28 	uint8_t sig_bit;                                             \
29 	soc->tx_desc[pool_id].offset_filter = num_desc_per_page - 1; \
30 	/* Calculate page divider to find page number */             \
31 	sig_bit = 0;                                                 \
32 	while (num_desc_per_page) {                                  \
33 		sig_bit++;                                           \
34 		num_desc_per_page = num_desc_per_page >> 1;          \
35 	}                                                            \
36 	soc->tx_desc[pool_id].page_divider = (sig_bit - 1);          \
37 } while (0)
38 #else
39 #define DP_TX_DESC_SIZE(a) a
40 #define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id) {}
41 #endif /* DESC_PARTITION */
42 
43 /**
44  * dp_tx_desc_pool_counter_initialize() - Initialize counters
45  * @tx_desc_pool Handle to DP tx_desc_pool structure
46  * @num_elem Number of descriptor elements per pool
47  *
48  * Return: None
49  */
50 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
51 static void
52 dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool,
53 				  uint16_t num_elem)
54 {
55 }
56 #else
57 static void
58 dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool,
59 				  uint16_t num_elem)
60 {
61 	tx_desc_pool->num_free = num_elem;
62 	tx_desc_pool->num_allocated = 0;
63 }
64 #endif
65 
66 #ifdef DP_UMAC_HW_RESET_SUPPORT
67 /**
68  * dp_tx_desc_clean_up() -  Clean up the tx dexcriptors
69  * @ctxt: context passed
70  * @elem: element to be cleaned up
71  * @elem_list: element list
72  *
73  */
74 void dp_tx_desc_clean_up(void *ctxt, void *elem, void *elem_list)
75 {
76 	struct dp_soc *soc = (struct dp_soc *)ctxt;
77 	struct dp_tx_desc_s *tx_desc = (struct dp_tx_desc_s *)elem;
78 	qdf_nbuf_t *nbuf_list = (qdf_nbuf_t *)elem_list;
79 	qdf_nbuf_t nbuf = NULL;
80 
81 	if (tx_desc->nbuf) {
82 		nbuf = dp_tx_comp_free_buf(soc, tx_desc, true);
83 		dp_tx_desc_release(tx_desc, tx_desc->pool_id);
84 
85 		if (nbuf) {
86 			if (!nbuf_list) {
87 				dp_err("potential memory leak");
88 				qdf_assert_always(0);
89 			}
90 
91 			nbuf->next = *nbuf_list;
92 			*nbuf_list = nbuf;
93 		}
94 	}
95 }
96 
97 /**
98  * dp_tx_desc_pool_cleanup() -  Clean up the tx dexcriptor pools
99  * @soc: Handle to DP SoC structure
100  * @nbuf_list: nbuf list for delayed free
101  *
102  */
103 void dp_tx_desc_pool_cleanup(struct dp_soc *soc, qdf_nbuf_t *nbuf_list)
104 {
105 	int i;
106 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
107 	uint32_t num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
108 
109 	for (i = 0; i < num_pool; i++) {
110 		tx_desc_pool = &soc->tx_desc[i];
111 
112 		if (tx_desc_pool)
113 			qdf_tx_desc_pool_free_bufs(soc,
114 						   &tx_desc_pool->desc_pages,
115 						   tx_desc_pool->elem_size,
116 						   tx_desc_pool->elem_count,
117 						   true, &dp_tx_desc_clean_up,
118 						   nbuf_list);
119 	}
120 }
121 #endif
122 
123 /**
124  * dp_tx_desc_pool_alloc() - Allocate Tx Descriptor pool(s)
125  * @soc Handle to DP SoC structure
126  * @pool_id pool to allocate
127  * @num_elem Number of descriptor elements per pool
128  *
129  * This function allocates memory for SW tx descriptors
130  * (used within host for tx data path).
131  * The number of tx descriptors required will be large
132  * since based on number of clients (1024 clients x 3 radios),
133  * outstanding MSDUs stored in TQM queues and LMAC queues will be significantly
134  * large.
135  *
136  * To avoid allocating a large contiguous memory, it uses multi_page_alloc qdf
137  * function to allocate memory
138  * in multiple pages. It then iterates through the memory allocated across pages
139  * and links each descriptor
140  * to next descriptor, taking care of page boundaries.
141  *
142  * Since WiFi 3.0 HW supports multiple Tx rings, multiple pools are allocated,
143  * one for each ring;
144  * This minimizes lock contention when hard_start_xmit is called
145  * from multiple CPUs.
146  * Alternately, multiple pools can be used for multiple VDEVs for VDEV level
147  * flow control.
148  *
149  * Return: Status code. 0 for success.
150  */
151 QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
152 				 uint32_t num_elem)
153 {
154 	uint32_t desc_size;
155 	struct dp_tx_desc_pool_s *tx_desc_pool;
156 
157 	desc_size = DP_TX_DESC_SIZE(sizeof(struct dp_tx_desc_s));
158 	tx_desc_pool = &((soc)->tx_desc[(pool_id)]);
159 	tx_desc_pool->desc_pages.page_size = DP_BLOCKMEM_SIZE;
160 	dp_desc_multi_pages_mem_alloc(soc, DP_TX_DESC_TYPE,
161 				      &tx_desc_pool->desc_pages,
162 				      desc_size, num_elem,
163 				      0, true);
164 
165 	if (!tx_desc_pool->desc_pages.num_pages) {
166 		dp_err("Multi page alloc fail, tx desc");
167 		return QDF_STATUS_E_NOMEM;
168 	}
169 	return QDF_STATUS_SUCCESS;
170 }
171 
172 /**
173  * dp_tx_desc_pool_free() -  Free the tx dexcriptor pools
174  * @soc: Handle to DP SoC structure
175  * @pool_id: pool to free
176  *
177  */
178 void dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
179 {
180 	struct dp_tx_desc_pool_s *tx_desc_pool;
181 
182 	tx_desc_pool = &((soc)->tx_desc[pool_id]);
183 
184 	if (tx_desc_pool->desc_pages.num_pages)
185 		dp_desc_multi_pages_mem_free(soc, DP_TX_DESC_TYPE,
186 					     &tx_desc_pool->desc_pages, 0,
187 					     true);
188 }
189 
190 /**
191  * dp_tx_desc_pool_init() - Initialize Tx Descriptor pool(s)
192  * @soc: Handle to DP SoC structure
193  * @pool_id: pool to allocate
194  * @num_elem: Number of descriptor elements per pool
195  *
196  * Return: QDF_STATUS_SUCCESS
197  *	   QDF_STATUS_E_FAULT
198  */
199 QDF_STATUS dp_tx_desc_pool_init(struct dp_soc *soc, uint8_t pool_id,
200 				uint32_t num_elem)
201 {
202 	struct dp_tx_desc_pool_s *tx_desc_pool;
203 	uint32_t desc_size;
204 
205 	desc_size = DP_TX_DESC_SIZE(sizeof(struct dp_tx_desc_s));
206 
207 	tx_desc_pool = &soc->tx_desc[pool_id];
208 	if (qdf_mem_multi_page_link(soc->osdev,
209 				    &tx_desc_pool->desc_pages,
210 				    desc_size, num_elem, true)) {
211 		dp_err("invalid tx desc allocation -overflow num link");
212 		return QDF_STATUS_E_FAULT;
213 	}
214 
215 	tx_desc_pool->freelist = (struct dp_tx_desc_s *)
216 		*tx_desc_pool->desc_pages.cacheable_pages;
217 	/* Set unique IDs for each Tx descriptor */
218 	if (QDF_STATUS_SUCCESS != soc->arch_ops.dp_tx_desc_pool_init(
219 						soc, num_elem, pool_id)) {
220 		dp_err("initialization per target failed");
221 		return QDF_STATUS_E_FAULT;
222 	}
223 
224 	tx_desc_pool->elem_size = DP_TX_DESC_SIZE(sizeof(struct dp_tx_desc_s));
225 
226 	dp_tx_desc_pool_counter_initialize(tx_desc_pool, num_elem);
227 	TX_DESC_LOCK_CREATE(&tx_desc_pool->lock);
228 
229 	return QDF_STATUS_SUCCESS;
230 }
231 
232 /**
233  * dp_tx_desc_pool_deinit() - de-initialize Tx Descriptor pool(s)
234  * @soc Handle to DP SoC structure
235  * @pool_id: pool to de-initialize
236  *
237  */
238 void dp_tx_desc_pool_deinit(struct dp_soc *soc, uint8_t pool_id)
239 {
240 	struct dp_tx_desc_pool_s *tx_desc_pool;
241 
242 	tx_desc_pool = &soc->tx_desc[pool_id];
243 	soc->arch_ops.dp_tx_desc_pool_deinit(soc, tx_desc_pool, pool_id);
244 	TX_DESC_POOL_MEMBER_CLEAN(tx_desc_pool);
245 	TX_DESC_LOCK_DESTROY(&tx_desc_pool->lock);
246 }
247 
248 /**
249  * dp_tx_ext_desc_pool_alloc() - allocate Tx extension Descriptor pool(s)
250  * @soc: Handle to DP SoC structure
251  * @num_pool: Number of pools to allocate
252  * @num_elem: Number of descriptor elements per pool
253  *
254  * Return - QDF_STATUS_SUCCESS
255  *	    QDF_STATUS_E_NOMEM
256  */
257 QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
258 				     uint32_t num_elem)
259 {
260 	QDF_STATUS status = QDF_STATUS_SUCCESS;
261 	qdf_dma_context_t memctx = 0;
262 	uint8_t pool_id, count;
263 	uint16_t elem_size = HAL_TX_EXT_DESC_WITH_META_DATA;
264 	struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
265 	uint16_t link_elem_size = sizeof(struct dp_tx_ext_desc_elem_s);
266 
267 	/* Coherent tx extension descriptor alloc */
268 
269 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
270 		dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
271 		memctx = qdf_get_dma_mem_context(dp_tx_ext_desc_pool, memctx);
272 		dp_desc_multi_pages_mem_alloc(
273 					  soc, DP_TX_EXT_DESC_TYPE,
274 					  &dp_tx_ext_desc_pool->desc_pages,
275 					  elem_size,
276 					  num_elem,
277 					  memctx, false);
278 
279 		if (!dp_tx_ext_desc_pool->desc_pages.num_pages) {
280 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
281 				  "ext desc page alloc fail");
282 			status = QDF_STATUS_E_NOMEM;
283 			goto fail_exit;
284 		}
285 	}
286 
287 	/*
288 	 * Cacheable ext descriptor link alloc
289 	 * This structure also large size already
290 	 * single element is 24bytes, 2K elements are 48Kbytes
291 	 * Have to alloc multi page cacheable memory
292 	 */
293 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
294 		dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
295 		dp_desc_multi_pages_mem_alloc(
296 					  soc,
297 					  DP_TX_EXT_DESC_LINK_TYPE,
298 					  &dp_tx_ext_desc_pool->desc_link_pages,
299 					  link_elem_size,
300 					  num_elem,
301 					  0, true);
302 
303 		if (!dp_tx_ext_desc_pool->desc_link_pages.num_pages) {
304 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
305 				  "ext link desc page alloc fail");
306 			status = QDF_STATUS_E_NOMEM;
307 			goto free_ext_desc_page;
308 		}
309 	}
310 
311 	return status;
312 
313 free_ext_desc_page:
314 	for (count = 0; count < pool_id; count++) {
315 		dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[count]);
316 		dp_desc_multi_pages_mem_free(
317 					soc, DP_TX_EXT_DESC_LINK_TYPE,
318 					&dp_tx_ext_desc_pool->desc_link_pages,
319 					0, true);
320 	}
321 
322 	pool_id = num_pool;
323 
324 fail_exit:
325 	for (count = 0; count < pool_id; count++) {
326 		dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[count]);
327 		memctx = qdf_get_dma_mem_context(dp_tx_ext_desc_pool, memctx);
328 		dp_desc_multi_pages_mem_free(
329 					soc, DP_TX_EXT_DESC_TYPE,
330 					&dp_tx_ext_desc_pool->desc_pages,
331 					memctx, false);
332 	}
333 
334 	return status;
335 }
336 
337 /**
338  * dp_tx_ext_desc_pool_init() - initialize Tx extension Descriptor pool(s)
339  * @soc: Handle to DP SoC structure
340  * @num_pool: Number of pools to initialize
341  * @num_elem: Number of descriptor elements per pool
342  *
343  * Return - QDF_STATUS_SUCCESS
344  *	    QDF_STATUS_E_NOMEM
345  */
346 QDF_STATUS dp_tx_ext_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
347 				    uint32_t num_elem)
348 {
349 	uint32_t i;
350 	struct dp_tx_ext_desc_elem_s *c_elem, *p_elem;
351 	struct qdf_mem_dma_page_t *page_info;
352 	struct qdf_mem_multi_page_t *pages;
353 	struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
354 	uint8_t pool_id;
355 	QDF_STATUS status;
356 
357 	/* link tx descriptors into a freelist */
358 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
359 		dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
360 		soc->tx_ext_desc[pool_id].elem_size =
361 			HAL_TX_EXT_DESC_WITH_META_DATA;
362 		soc->tx_ext_desc[pool_id].link_elem_size =
363 			sizeof(struct dp_tx_ext_desc_elem_s);
364 		soc->tx_ext_desc[pool_id].elem_count = num_elem;
365 
366 		dp_tx_ext_desc_pool->freelist = (struct dp_tx_ext_desc_elem_s *)
367 			*dp_tx_ext_desc_pool->desc_link_pages.cacheable_pages;
368 
369 		if (qdf_mem_multi_page_link(soc->osdev,
370 					    &dp_tx_ext_desc_pool->
371 					    desc_link_pages,
372 					    dp_tx_ext_desc_pool->link_elem_size,
373 					    dp_tx_ext_desc_pool->elem_count,
374 					    true)) {
375 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
376 				  "ext link desc page linking fail");
377 			status = QDF_STATUS_E_FAULT;
378 			goto fail;
379 		}
380 
381 		/* Assign coherent memory pointer into linked free list */
382 		pages = &dp_tx_ext_desc_pool->desc_pages;
383 		page_info = dp_tx_ext_desc_pool->desc_pages.dma_pages;
384 		c_elem = dp_tx_ext_desc_pool->freelist;
385 		p_elem = c_elem;
386 		for (i = 0; i < dp_tx_ext_desc_pool->elem_count; i++) {
387 			if (!(i % pages->num_element_per_page)) {
388 			/**
389 			 * First element for new page,
390 			 * should point next page
391 			 */
392 				if (!pages->dma_pages->page_v_addr_start) {
393 					QDF_TRACE(QDF_MODULE_ID_DP,
394 						  QDF_TRACE_LEVEL_ERROR,
395 						  "link over flow");
396 					status = QDF_STATUS_E_FAULT;
397 					goto fail;
398 				}
399 
400 				c_elem->vaddr =
401 					(void *)page_info->page_v_addr_start;
402 				c_elem->paddr = page_info->page_p_addr;
403 				page_info++;
404 			} else {
405 				c_elem->vaddr = (void *)(p_elem->vaddr +
406 					dp_tx_ext_desc_pool->elem_size);
407 				c_elem->paddr = (p_elem->paddr +
408 					dp_tx_ext_desc_pool->elem_size);
409 			}
410 			p_elem = c_elem;
411 			c_elem = c_elem->next;
412 			if (!c_elem)
413 				break;
414 		}
415 		dp_tx_ext_desc_pool->num_free = num_elem;
416 		qdf_spinlock_create(&dp_tx_ext_desc_pool->lock);
417 	}
418 	return QDF_STATUS_SUCCESS;
419 
420 fail:
421 	return status;
422 }
423 
424 /**
425  * dp_tx_ext_desc_pool_free() -  free Tx extension Descriptor pool(s)
426  * @soc: Handle to DP SoC structure
427  * @num_pool: Number of pools to free
428  *
429  */
430 void dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
431 {
432 	uint8_t pool_id;
433 	struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
434 	qdf_dma_context_t memctx = 0;
435 
436 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
437 		dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
438 		memctx = qdf_get_dma_mem_context(dp_tx_ext_desc_pool, memctx);
439 
440 		dp_desc_multi_pages_mem_free(
441 					soc, DP_TX_EXT_DESC_LINK_TYPE,
442 					&dp_tx_ext_desc_pool->desc_link_pages,
443 					0, true);
444 
445 		dp_desc_multi_pages_mem_free(
446 					soc, DP_TX_EXT_DESC_TYPE,
447 					&dp_tx_ext_desc_pool->desc_pages,
448 					memctx, false);
449 	}
450 }
451 
452 /**
453  * dp_tx_ext_desc_pool_deinit() -  deinit Tx extension Descriptor pool(s)
454  * @soc: Handle to DP SoC structure
455  * @num_pool: Number of pools to de-initialize
456  *
457  */
458 void dp_tx_ext_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
459 {
460 	uint8_t pool_id;
461 	struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
462 
463 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
464 		dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
465 		qdf_spinlock_destroy(&dp_tx_ext_desc_pool->lock);
466 	}
467 }
468 
469 #if defined(FEATURE_TSO)
470 /**
471  * dp_tx_tso_desc_pool_alloc() - allocate TSO Descriptor pool(s)
472  * @soc: Handle to DP SoC structure
473  * @num_pool: Number of pools to allocate
474  * @num_elem: Number of descriptor elements per pool
475  *
476  * Return - QDF_STATUS_SUCCESS
477  *	    QDF_STATUS_E_NOMEM
478  */
479 QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
480 				     uint32_t num_elem)
481 {
482 	struct dp_tx_tso_seg_pool_s *tso_desc_pool;
483 	uint32_t desc_size, pool_id, i;
484 
485 	desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_seg_elem_t));
486 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
487 		tso_desc_pool = &soc->tx_tso_desc[pool_id];
488 		tso_desc_pool->num_free = 0;
489 		dp_desc_multi_pages_mem_alloc(
490 					soc,
491 					DP_TX_TSO_DESC_TYPE,
492 					&tso_desc_pool->desc_pages,
493 					desc_size,
494 					num_elem, 0, true);
495 
496 		if (!tso_desc_pool->desc_pages.num_pages) {
497 			dp_err("Multi page alloc fail, tx desc");
498 			goto fail;
499 		}
500 	}
501 	return QDF_STATUS_SUCCESS;
502 
503 fail:
504 	for (i = 0; i < pool_id; i++) {
505 		tso_desc_pool = &soc->tx_tso_desc[i];
506 		dp_desc_multi_pages_mem_free(soc, DP_TX_TSO_DESC_TYPE,
507 					     &tso_desc_pool->desc_pages,
508 					     0, true);
509 	}
510 	return QDF_STATUS_E_NOMEM;
511 }
512 
513 /**
514  * dp_tx_tso_desc_pool_free() - free TSO Descriptor pool(s)
515  * @soc: Handle to DP SoC structure
516  * @num_pool: Number of pools to free
517  *
518  */
519 void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
520 {
521 	struct dp_tx_tso_seg_pool_s *tso_desc_pool;
522 	uint32_t pool_id;
523 
524 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
525 		tso_desc_pool = &soc->tx_tso_desc[pool_id];
526 		dp_desc_multi_pages_mem_free(soc, DP_TX_TSO_DESC_TYPE,
527 					     &tso_desc_pool->desc_pages,
528 					     0, true);
529 	}
530 }
531 
532 /**
533  * dp_tx_tso_desc_pool_init() - initialize TSO Descriptor pool(s)
534  * @soc: Handle to DP SoC structure
535  * @num_pool: Number of pools to initialize
536  * @num_elem: Number of descriptor elements per pool
537  *
538  * Return - QDF_STATUS_SUCCESS
539  *	    QDF_STATUS_E_NOMEM
540  */
541 QDF_STATUS dp_tx_tso_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
542 				    uint32_t num_elem)
543 {
544 	struct dp_tx_tso_seg_pool_s *tso_desc_pool;
545 	uint32_t desc_size, pool_id;
546 
547 	desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_seg_elem_t));
548 
549 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
550 		tso_desc_pool = &soc->tx_tso_desc[pool_id];
551 
552 		if (qdf_mem_multi_page_link(soc->osdev,
553 					    &tso_desc_pool->desc_pages,
554 					    desc_size,
555 					    num_elem, true)) {
556 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
557 				  "invalid tso desc allocation - overflow num link");
558 			return QDF_STATUS_E_FAULT;
559 		}
560 
561 		tso_desc_pool->freelist = (struct qdf_tso_seg_elem_t *)
562 			*tso_desc_pool->desc_pages.cacheable_pages;
563 		tso_desc_pool->num_free = num_elem;
564 
565 		TSO_DEBUG("Number of free descriptors: %u\n",
566 			  tso_desc_pool->num_free);
567 		tso_desc_pool->pool_size = num_elem;
568 		qdf_spinlock_create(&tso_desc_pool->lock);
569 	}
570 	return QDF_STATUS_SUCCESS;
571 }
572 
573 /**
574  * dp_tx_tso_desc_pool_deinit() - deinitialize TSO Descriptor pool(s)
575  * @soc: Handle to DP SoC structure
576  * @num_pool: Number of pools to free
577  *
578  */
579 void dp_tx_tso_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
580 {
581 	struct dp_tx_tso_seg_pool_s *tso_desc_pool;
582 	uint32_t pool_id;
583 
584 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
585 		tso_desc_pool = &soc->tx_tso_desc[pool_id];
586 		qdf_spin_lock_bh(&tso_desc_pool->lock);
587 
588 		tso_desc_pool->freelist = NULL;
589 		tso_desc_pool->num_free = 0;
590 		tso_desc_pool->pool_size = 0;
591 		qdf_spin_unlock_bh(&tso_desc_pool->lock);
592 		qdf_spinlock_destroy(&tso_desc_pool->lock);
593 	}
594 }
595 
596 /**
597  * dp_tx_tso_num_seg_pool_alloc() - Allocate descriptors that tracks the
598  *                              fragments in each tso segment
599  *
600  * @soc: handle to dp soc structure
601  * @num_pool: number of pools to allocate
602  * @num_elem: total number of descriptors to be allocated
603  *
604  * Return - QDF_STATUS_SUCCESS
605  *	    QDF_STATUS_E_NOMEM
606  */
607 QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
608 					uint32_t num_elem)
609 {
610 	struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
611 	uint32_t desc_size, pool_id, i;
612 
613 	desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_num_seg_elem_t));
614 
615 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
616 		tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
617 		tso_num_seg_pool->num_free = 0;
618 		dp_desc_multi_pages_mem_alloc(soc, DP_TX_TSO_NUM_SEG_TYPE,
619 					      &tso_num_seg_pool->desc_pages,
620 					      desc_size,
621 					      num_elem, 0, true);
622 
623 		if (!tso_num_seg_pool->desc_pages.num_pages) {
624 			dp_err("Multi page alloc fail, tso_num_seg_pool");
625 			goto fail;
626 		}
627 	}
628 	return QDF_STATUS_SUCCESS;
629 
630 fail:
631 	for (i = 0; i < pool_id; i++) {
632 		tso_num_seg_pool = &soc->tx_tso_num_seg[i];
633 		dp_desc_multi_pages_mem_free(soc, DP_TX_TSO_NUM_SEG_TYPE,
634 					     &tso_num_seg_pool->desc_pages,
635 					     0, true);
636 	}
637 	return QDF_STATUS_E_NOMEM;
638 }
639 
640 /**
641  * dp_tx_tso_num_seg_pool_free() - free descriptors that tracks the
642  *                              fragments in each tso segment
643  *
644  * @soc: handle to dp soc structure
645  * @num_pool: number of pools to free
646  */
647 void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t num_pool)
648 {
649 	struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
650 	uint32_t pool_id;
651 
652 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
653 		tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
654 		dp_desc_multi_pages_mem_free(soc, DP_TX_TSO_NUM_SEG_TYPE,
655 					     &tso_num_seg_pool->desc_pages,
656 					     0, true);
657 	}
658 }
659 
660 /**
661  * dp_tx_tso_num_seg_pool_init() - Initialize descriptors that tracks the
662  *                              fragments in each tso segment
663  *
664  * @soc: handle to dp soc structure
665  * @num_pool: number of pools to initialize
666  * @num_elem: total number of descriptors to be initialized
667  *
668  * Return - QDF_STATUS_SUCCESS
669  *	    QDF_STATUS_E_FAULT
670  */
671 QDF_STATUS dp_tx_tso_num_seg_pool_init(struct dp_soc *soc, uint8_t num_pool,
672 				       uint32_t num_elem)
673 {
674 	struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
675 	uint32_t desc_size, pool_id;
676 
677 	desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_num_seg_elem_t));
678 
679 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
680 		tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
681 		if (qdf_mem_multi_page_link(soc->osdev,
682 					    &tso_num_seg_pool->desc_pages,
683 					    desc_size,
684 					    num_elem, true)) {
685 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
686 				  "invalid tso desc allocation - overflow num link");
687 			return QDF_STATUS_E_FAULT;
688 		}
689 
690 		tso_num_seg_pool->freelist = (struct qdf_tso_num_seg_elem_t *)
691 			*tso_num_seg_pool->desc_pages.cacheable_pages;
692 		tso_num_seg_pool->num_free = num_elem;
693 		tso_num_seg_pool->num_seg_pool_size = num_elem;
694 
695 		qdf_spinlock_create(&tso_num_seg_pool->lock);
696 	}
697 	return QDF_STATUS_SUCCESS;
698 }
699 
700 /**
701  * dp_tx_tso_num_seg_pool_deinit() - de-initialize descriptors that tracks the
702  *                              fragments in each tso segment
703  *
704  * @soc: handle to dp soc structure
705  * @num_pool: number of pools to de-initialize
706  *
707  * Return - QDF_STATUS_SUCCESS
708  *	    QDF_STATUS_E_FAULT
709  */
710 void dp_tx_tso_num_seg_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
711 {
712 	struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
713 	uint32_t pool_id;
714 
715 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
716 		tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
717 		qdf_spin_lock_bh(&tso_num_seg_pool->lock);
718 
719 		tso_num_seg_pool->freelist = NULL;
720 		tso_num_seg_pool->num_free = 0;
721 		tso_num_seg_pool->num_seg_pool_size = 0;
722 		qdf_spin_unlock_bh(&tso_num_seg_pool->lock);
723 		qdf_spinlock_destroy(&tso_num_seg_pool->lock);
724 	}
725 }
726 #else
727 QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
728 				     uint32_t num_elem)
729 {
730 	return QDF_STATUS_SUCCESS;
731 }
732 
733 QDF_STATUS dp_tx_tso_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
734 				    uint32_t num_elem)
735 {
736 	return QDF_STATUS_SUCCESS;
737 }
738 
739 void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
740 {
741 }
742 
743 void dp_tx_tso_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
744 {
745 }
746 
747 QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
748 					uint32_t num_elem)
749 {
750 	return QDF_STATUS_SUCCESS;
751 }
752 
753 void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t num_pool)
754 {
755 }
756 
757 QDF_STATUS dp_tx_tso_num_seg_pool_init(struct dp_soc *soc, uint8_t num_pool,
758 				       uint32_t num_elem)
759 {
760 	return QDF_STATUS_SUCCESS;
761 }
762 
763 void dp_tx_tso_num_seg_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
764 {
765 }
766 #endif
767