xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx_desc.c (revision 8cfe6b10058a04cafb17eed051f2ddf11bee8931)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "hal_hw_headers.h"
21 #include "dp_types.h"
22 #include "dp_tx_desc.h"
23 
24 #ifndef DESC_PARTITION
25 #define DP_TX_DESC_SIZE(a) qdf_get_pwr2(a)
26 #define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id)     \
27 do {                                                                 \
28 	uint8_t sig_bit;                                             \
29 	soc->tx_desc[pool_id].offset_filter = num_desc_per_page - 1; \
30 	/* Calculate page divider to find page number */             \
31 	sig_bit = 0;                                                 \
32 	while (num_desc_per_page) {                                  \
33 		sig_bit++;                                           \
34 		num_desc_per_page = num_desc_per_page >> 1;          \
35 	}                                                            \
36 	soc->tx_desc[pool_id].page_divider = (sig_bit - 1);          \
37 } while (0)
38 #else
39 #define DP_TX_DESC_SIZE(a) a
40 #define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id) {}
41 #endif /* DESC_PARTITION */
42 
43 /**
44  * dp_tx_desc_pool_counter_initialize() - Initialize counters
45  * @tx_desc_pool Handle to DP tx_desc_pool structure
46  * @num_elem Number of descriptor elements per pool
47  *
48  * Return: None
49  */
50 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
51 static void
52 dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool,
53 				  uint16_t num_elem)
54 {
55 }
56 #else
57 static void
58 dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool,
59 				  uint16_t num_elem)
60 {
61 	tx_desc_pool->elem_count = num_elem;
62 	tx_desc_pool->num_free = num_elem;
63 	tx_desc_pool->num_allocated = 0;
64 }
65 #endif
66 
67 #ifdef DP_UMAC_HW_RESET_SUPPORT
68 /**
69  * dp_tx_desc_clean_up() - Clean up the tx descriptors
70  * @ctxt: context passed
71  * @elem: element to be cleaned up
72  * @elem_list: element list
73  *
74  */
75 static void dp_tx_desc_clean_up(void *ctxt, void *elem, void *elem_list)
76 {
77 	struct dp_soc *soc = (struct dp_soc *)ctxt;
78 	struct dp_tx_desc_s *tx_desc = (struct dp_tx_desc_s *)elem;
79 	qdf_nbuf_t *nbuf_list = (qdf_nbuf_t *)elem_list;
80 	qdf_nbuf_t nbuf = NULL;
81 
82 	if (tx_desc->nbuf) {
83 		nbuf = dp_tx_comp_free_buf(soc, tx_desc, true);
84 		dp_tx_desc_release(tx_desc, tx_desc->pool_id);
85 
86 		if (nbuf) {
87 			if (!nbuf_list) {
88 				dp_err("potential memory leak");
89 				qdf_assert_always(0);
90 			}
91 
92 			nbuf->next = *nbuf_list;
93 			*nbuf_list = nbuf;
94 		}
95 	}
96 }
97 
98 /**
99  * dp_tx_desc_pool_cleanup() -  Clean up the tx dexcriptor pools
100  * @soc: Handle to DP SoC structure
101  * @nbuf_list: nbuf list for delayed free
102  *
103  */
104 void dp_tx_desc_pool_cleanup(struct dp_soc *soc, qdf_nbuf_t *nbuf_list)
105 {
106 	int i;
107 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
108 	uint32_t num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
109 
110 	for (i = 0; i < num_pool; i++) {
111 		tx_desc_pool = &soc->tx_desc[i];
112 
113 		if (tx_desc_pool)
114 			qdf_tx_desc_pool_free_bufs(soc,
115 						   &tx_desc_pool->desc_pages,
116 						   tx_desc_pool->elem_size,
117 						   tx_desc_pool->elem_count,
118 						   true, &dp_tx_desc_clean_up,
119 						   nbuf_list);
120 	}
121 }
122 #endif
123 
124 /**
125  * dp_tx_desc_pool_alloc() - Allocate Tx Descriptor pool(s)
126  * @soc Handle to DP SoC structure
127  * @pool_id pool to allocate
128  * @num_elem Number of descriptor elements per pool
129  *
130  * This function allocates memory for SW tx descriptors
131  * (used within host for tx data path).
132  * The number of tx descriptors required will be large
133  * since based on number of clients (1024 clients x 3 radios),
134  * outstanding MSDUs stored in TQM queues and LMAC queues will be significantly
135  * large.
136  *
137  * To avoid allocating a large contiguous memory, it uses multi_page_alloc qdf
138  * function to allocate memory
139  * in multiple pages. It then iterates through the memory allocated across pages
140  * and links each descriptor
141  * to next descriptor, taking care of page boundaries.
142  *
143  * Since WiFi 3.0 HW supports multiple Tx rings, multiple pools are allocated,
144  * one for each ring;
145  * This minimizes lock contention when hard_start_xmit is called
146  * from multiple CPUs.
147  * Alternately, multiple pools can be used for multiple VDEVs for VDEV level
148  * flow control.
149  *
150  * Return: Status code. 0 for success.
151  */
152 QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
153 				 uint32_t num_elem)
154 {
155 	uint32_t desc_size;
156 	struct dp_tx_desc_pool_s *tx_desc_pool;
157 
158 	desc_size = DP_TX_DESC_SIZE(sizeof(struct dp_tx_desc_s));
159 	tx_desc_pool = &((soc)->tx_desc[(pool_id)]);
160 	tx_desc_pool->desc_pages.page_size = DP_BLOCKMEM_SIZE;
161 	dp_desc_multi_pages_mem_alloc(soc, DP_TX_DESC_TYPE,
162 				      &tx_desc_pool->desc_pages,
163 				      desc_size, num_elem,
164 				      0, true);
165 
166 	if (!tx_desc_pool->desc_pages.num_pages) {
167 		dp_err("Multi page alloc fail, tx desc");
168 		return QDF_STATUS_E_NOMEM;
169 	}
170 	return QDF_STATUS_SUCCESS;
171 }
172 
173 /**
174  * dp_tx_desc_pool_free() -  Free the tx dexcriptor pools
175  * @soc: Handle to DP SoC structure
176  * @pool_id: pool to free
177  *
178  */
179 void dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
180 {
181 	struct dp_tx_desc_pool_s *tx_desc_pool;
182 
183 	tx_desc_pool = &((soc)->tx_desc[pool_id]);
184 
185 	if (tx_desc_pool->desc_pages.num_pages)
186 		dp_desc_multi_pages_mem_free(soc, DP_TX_DESC_TYPE,
187 					     &tx_desc_pool->desc_pages, 0,
188 					     true);
189 }
190 
191 /**
192  * dp_tx_desc_pool_init() - Initialize Tx Descriptor pool(s)
193  * @soc: Handle to DP SoC structure
194  * @pool_id: pool to allocate
195  * @num_elem: Number of descriptor elements per pool
196  *
197  * Return: QDF_STATUS_SUCCESS
198  *	   QDF_STATUS_E_FAULT
199  */
200 QDF_STATUS dp_tx_desc_pool_init(struct dp_soc *soc, uint8_t pool_id,
201 				uint32_t num_elem)
202 {
203 	struct dp_tx_desc_pool_s *tx_desc_pool;
204 	uint32_t desc_size;
205 
206 	desc_size = DP_TX_DESC_SIZE(sizeof(struct dp_tx_desc_s));
207 
208 	tx_desc_pool = &soc->tx_desc[pool_id];
209 	if (qdf_mem_multi_page_link(soc->osdev,
210 				    &tx_desc_pool->desc_pages,
211 				    desc_size, num_elem, true)) {
212 		dp_err("invalid tx desc allocation -overflow num link");
213 		return QDF_STATUS_E_FAULT;
214 	}
215 
216 	tx_desc_pool->freelist = (struct dp_tx_desc_s *)
217 		*tx_desc_pool->desc_pages.cacheable_pages;
218 	/* Set unique IDs for each Tx descriptor */
219 	if (QDF_STATUS_SUCCESS != soc->arch_ops.dp_tx_desc_pool_init(
220 						soc, num_elem, pool_id)) {
221 		dp_err("initialization per target failed");
222 		return QDF_STATUS_E_FAULT;
223 	}
224 
225 	tx_desc_pool->elem_size = DP_TX_DESC_SIZE(sizeof(struct dp_tx_desc_s));
226 
227 	dp_tx_desc_pool_counter_initialize(tx_desc_pool, num_elem);
228 	TX_DESC_LOCK_CREATE(&tx_desc_pool->lock);
229 
230 	return QDF_STATUS_SUCCESS;
231 }
232 
233 /**
234  * dp_tx_desc_pool_deinit() - de-initialize Tx Descriptor pool(s)
235  * @soc Handle to DP SoC structure
236  * @pool_id: pool to de-initialize
237  *
238  */
239 void dp_tx_desc_pool_deinit(struct dp_soc *soc, uint8_t pool_id)
240 {
241 	struct dp_tx_desc_pool_s *tx_desc_pool;
242 
243 	tx_desc_pool = &soc->tx_desc[pool_id];
244 	soc->arch_ops.dp_tx_desc_pool_deinit(soc, tx_desc_pool, pool_id);
245 	TX_DESC_POOL_MEMBER_CLEAN(tx_desc_pool);
246 	TX_DESC_LOCK_DESTROY(&tx_desc_pool->lock);
247 }
248 
249 /**
250  * dp_tx_ext_desc_pool_alloc() - allocate Tx extension Descriptor pool(s)
251  * @soc: Handle to DP SoC structure
252  * @num_pool: Number of pools to allocate
253  * @num_elem: Number of descriptor elements per pool
254  *
255  * Return - QDF_STATUS_SUCCESS
256  *	    QDF_STATUS_E_NOMEM
257  */
258 QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
259 				     uint32_t num_elem)
260 {
261 	QDF_STATUS status = QDF_STATUS_SUCCESS;
262 	qdf_dma_context_t memctx = 0;
263 	uint8_t pool_id, count;
264 	uint16_t elem_size = HAL_TX_EXT_DESC_WITH_META_DATA;
265 	struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
266 	uint16_t link_elem_size = sizeof(struct dp_tx_ext_desc_elem_s);
267 
268 	/* Coherent tx extension descriptor alloc */
269 
270 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
271 		dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
272 		memctx = qdf_get_dma_mem_context(dp_tx_ext_desc_pool, memctx);
273 		dp_desc_multi_pages_mem_alloc(
274 					  soc, DP_TX_EXT_DESC_TYPE,
275 					  &dp_tx_ext_desc_pool->desc_pages,
276 					  elem_size,
277 					  num_elem,
278 					  memctx, false);
279 
280 		if (!dp_tx_ext_desc_pool->desc_pages.num_pages) {
281 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
282 				  "ext desc page alloc fail");
283 			status = QDF_STATUS_E_NOMEM;
284 			goto fail_exit;
285 		}
286 	}
287 
288 	/*
289 	 * Cacheable ext descriptor link alloc
290 	 * This structure also large size already
291 	 * single element is 24bytes, 2K elements are 48Kbytes
292 	 * Have to alloc multi page cacheable memory
293 	 */
294 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
295 		dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
296 		dp_desc_multi_pages_mem_alloc(
297 					  soc,
298 					  DP_TX_EXT_DESC_LINK_TYPE,
299 					  &dp_tx_ext_desc_pool->desc_link_pages,
300 					  link_elem_size,
301 					  num_elem,
302 					  0, true);
303 
304 		if (!dp_tx_ext_desc_pool->desc_link_pages.num_pages) {
305 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
306 				  "ext link desc page alloc fail");
307 			status = QDF_STATUS_E_NOMEM;
308 			goto free_ext_desc_page;
309 		}
310 	}
311 
312 	return status;
313 
314 free_ext_desc_page:
315 	for (count = 0; count < pool_id; count++) {
316 		dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[count]);
317 		dp_desc_multi_pages_mem_free(
318 					soc, DP_TX_EXT_DESC_LINK_TYPE,
319 					&dp_tx_ext_desc_pool->desc_link_pages,
320 					0, true);
321 	}
322 
323 	pool_id = num_pool;
324 
325 fail_exit:
326 	for (count = 0; count < pool_id; count++) {
327 		dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[count]);
328 		memctx = qdf_get_dma_mem_context(dp_tx_ext_desc_pool, memctx);
329 		dp_desc_multi_pages_mem_free(
330 					soc, DP_TX_EXT_DESC_TYPE,
331 					&dp_tx_ext_desc_pool->desc_pages,
332 					memctx, false);
333 	}
334 
335 	return status;
336 }
337 
338 /**
339  * dp_tx_ext_desc_pool_init() - initialize Tx extension Descriptor pool(s)
340  * @soc: Handle to DP SoC structure
341  * @num_pool: Number of pools to initialize
342  * @num_elem: Number of descriptor elements per pool
343  *
344  * Return - QDF_STATUS_SUCCESS
345  *	    QDF_STATUS_E_NOMEM
346  */
347 QDF_STATUS dp_tx_ext_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
348 				    uint32_t num_elem)
349 {
350 	uint32_t i;
351 	struct dp_tx_ext_desc_elem_s *c_elem, *p_elem;
352 	struct qdf_mem_dma_page_t *page_info;
353 	struct qdf_mem_multi_page_t *pages;
354 	struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
355 	uint8_t pool_id;
356 	QDF_STATUS status;
357 
358 	/* link tx descriptors into a freelist */
359 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
360 		dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
361 		soc->tx_ext_desc[pool_id].elem_size =
362 			HAL_TX_EXT_DESC_WITH_META_DATA;
363 		soc->tx_ext_desc[pool_id].link_elem_size =
364 			sizeof(struct dp_tx_ext_desc_elem_s);
365 		soc->tx_ext_desc[pool_id].elem_count = num_elem;
366 
367 		dp_tx_ext_desc_pool->freelist = (struct dp_tx_ext_desc_elem_s *)
368 			*dp_tx_ext_desc_pool->desc_link_pages.cacheable_pages;
369 
370 		if (qdf_mem_multi_page_link(soc->osdev,
371 					    &dp_tx_ext_desc_pool->
372 					    desc_link_pages,
373 					    dp_tx_ext_desc_pool->link_elem_size,
374 					    dp_tx_ext_desc_pool->elem_count,
375 					    true)) {
376 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
377 				  "ext link desc page linking fail");
378 			status = QDF_STATUS_E_FAULT;
379 			goto fail;
380 		}
381 
382 		/* Assign coherent memory pointer into linked free list */
383 		pages = &dp_tx_ext_desc_pool->desc_pages;
384 		page_info = dp_tx_ext_desc_pool->desc_pages.dma_pages;
385 		c_elem = dp_tx_ext_desc_pool->freelist;
386 		p_elem = c_elem;
387 		for (i = 0; i < dp_tx_ext_desc_pool->elem_count; i++) {
388 			if (!(i % pages->num_element_per_page)) {
389 			/**
390 			 * First element for new page,
391 			 * should point next page
392 			 */
393 				if (!pages->dma_pages->page_v_addr_start) {
394 					QDF_TRACE(QDF_MODULE_ID_DP,
395 						  QDF_TRACE_LEVEL_ERROR,
396 						  "link over flow");
397 					status = QDF_STATUS_E_FAULT;
398 					goto fail;
399 				}
400 
401 				c_elem->vaddr =
402 					(void *)page_info->page_v_addr_start;
403 				c_elem->paddr = page_info->page_p_addr;
404 				page_info++;
405 			} else {
406 				c_elem->vaddr = (void *)(p_elem->vaddr +
407 					dp_tx_ext_desc_pool->elem_size);
408 				c_elem->paddr = (p_elem->paddr +
409 					dp_tx_ext_desc_pool->elem_size);
410 			}
411 			p_elem = c_elem;
412 			c_elem = c_elem->next;
413 			if (!c_elem)
414 				break;
415 		}
416 		dp_tx_ext_desc_pool->num_free = num_elem;
417 		qdf_spinlock_create(&dp_tx_ext_desc_pool->lock);
418 	}
419 	return QDF_STATUS_SUCCESS;
420 
421 fail:
422 	return status;
423 }
424 
425 /**
426  * dp_tx_ext_desc_pool_free() -  free Tx extension Descriptor pool(s)
427  * @soc: Handle to DP SoC structure
428  * @num_pool: Number of pools to free
429  *
430  */
431 void dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
432 {
433 	uint8_t pool_id;
434 	struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
435 	qdf_dma_context_t memctx = 0;
436 
437 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
438 		dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
439 		memctx = qdf_get_dma_mem_context(dp_tx_ext_desc_pool, memctx);
440 
441 		dp_desc_multi_pages_mem_free(
442 					soc, DP_TX_EXT_DESC_LINK_TYPE,
443 					&dp_tx_ext_desc_pool->desc_link_pages,
444 					0, true);
445 
446 		dp_desc_multi_pages_mem_free(
447 					soc, DP_TX_EXT_DESC_TYPE,
448 					&dp_tx_ext_desc_pool->desc_pages,
449 					memctx, false);
450 	}
451 }
452 
453 /**
454  * dp_tx_ext_desc_pool_deinit() -  deinit Tx extension Descriptor pool(s)
455  * @soc: Handle to DP SoC structure
456  * @num_pool: Number of pools to de-initialize
457  *
458  */
459 void dp_tx_ext_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
460 {
461 	uint8_t pool_id;
462 	struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
463 
464 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
465 		dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
466 		qdf_spinlock_destroy(&dp_tx_ext_desc_pool->lock);
467 	}
468 }
469 
470 #if defined(FEATURE_TSO)
471 /**
472  * dp_tx_tso_desc_pool_alloc() - allocate TSO Descriptor pool(s)
473  * @soc: Handle to DP SoC structure
474  * @num_pool: Number of pools to allocate
475  * @num_elem: Number of descriptor elements per pool
476  *
477  * Return - QDF_STATUS_SUCCESS
478  *	    QDF_STATUS_E_NOMEM
479  */
480 QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
481 				     uint32_t num_elem)
482 {
483 	struct dp_tx_tso_seg_pool_s *tso_desc_pool;
484 	uint32_t desc_size, pool_id, i;
485 
486 	desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_seg_elem_t));
487 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
488 		tso_desc_pool = &soc->tx_tso_desc[pool_id];
489 		tso_desc_pool->num_free = 0;
490 		dp_desc_multi_pages_mem_alloc(
491 					soc,
492 					DP_TX_TSO_DESC_TYPE,
493 					&tso_desc_pool->desc_pages,
494 					desc_size,
495 					num_elem, 0, true);
496 
497 		if (!tso_desc_pool->desc_pages.num_pages) {
498 			dp_err("Multi page alloc fail, tx desc");
499 			goto fail;
500 		}
501 	}
502 	return QDF_STATUS_SUCCESS;
503 
504 fail:
505 	for (i = 0; i < pool_id; i++) {
506 		tso_desc_pool = &soc->tx_tso_desc[i];
507 		dp_desc_multi_pages_mem_free(soc, DP_TX_TSO_DESC_TYPE,
508 					     &tso_desc_pool->desc_pages,
509 					     0, true);
510 	}
511 	return QDF_STATUS_E_NOMEM;
512 }
513 
514 /**
515  * dp_tx_tso_desc_pool_free() - free TSO Descriptor pool(s)
516  * @soc: Handle to DP SoC structure
517  * @num_pool: Number of pools to free
518  *
519  */
520 void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
521 {
522 	struct dp_tx_tso_seg_pool_s *tso_desc_pool;
523 	uint32_t pool_id;
524 
525 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
526 		tso_desc_pool = &soc->tx_tso_desc[pool_id];
527 		dp_desc_multi_pages_mem_free(soc, DP_TX_TSO_DESC_TYPE,
528 					     &tso_desc_pool->desc_pages,
529 					     0, true);
530 	}
531 }
532 
533 /**
534  * dp_tx_tso_desc_pool_init() - initialize TSO Descriptor pool(s)
535  * @soc: Handle to DP SoC structure
536  * @num_pool: Number of pools to initialize
537  * @num_elem: Number of descriptor elements per pool
538  *
539  * Return - QDF_STATUS_SUCCESS
540  *	    QDF_STATUS_E_NOMEM
541  */
542 QDF_STATUS dp_tx_tso_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
543 				    uint32_t num_elem)
544 {
545 	struct dp_tx_tso_seg_pool_s *tso_desc_pool;
546 	uint32_t desc_size, pool_id;
547 
548 	desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_seg_elem_t));
549 
550 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
551 		tso_desc_pool = &soc->tx_tso_desc[pool_id];
552 
553 		if (qdf_mem_multi_page_link(soc->osdev,
554 					    &tso_desc_pool->desc_pages,
555 					    desc_size,
556 					    num_elem, true)) {
557 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
558 				  "invalid tso desc allocation - overflow num link");
559 			return QDF_STATUS_E_FAULT;
560 		}
561 
562 		tso_desc_pool->freelist = (struct qdf_tso_seg_elem_t *)
563 			*tso_desc_pool->desc_pages.cacheable_pages;
564 		tso_desc_pool->num_free = num_elem;
565 
566 		TSO_DEBUG("Number of free descriptors: %u\n",
567 			  tso_desc_pool->num_free);
568 		tso_desc_pool->pool_size = num_elem;
569 		qdf_spinlock_create(&tso_desc_pool->lock);
570 	}
571 	return QDF_STATUS_SUCCESS;
572 }
573 
574 /**
575  * dp_tx_tso_desc_pool_deinit() - deinitialize TSO Descriptor pool(s)
576  * @soc: Handle to DP SoC structure
577  * @num_pool: Number of pools to free
578  *
579  */
580 void dp_tx_tso_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
581 {
582 	struct dp_tx_tso_seg_pool_s *tso_desc_pool;
583 	uint32_t pool_id;
584 
585 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
586 		tso_desc_pool = &soc->tx_tso_desc[pool_id];
587 		qdf_spin_lock_bh(&tso_desc_pool->lock);
588 
589 		tso_desc_pool->freelist = NULL;
590 		tso_desc_pool->num_free = 0;
591 		tso_desc_pool->pool_size = 0;
592 		qdf_spin_unlock_bh(&tso_desc_pool->lock);
593 		qdf_spinlock_destroy(&tso_desc_pool->lock);
594 	}
595 }
596 
597 /**
598  * dp_tx_tso_num_seg_pool_alloc() - Allocate descriptors that tracks the
599  *                              fragments in each tso segment
600  *
601  * @soc: handle to dp soc structure
602  * @num_pool: number of pools to allocate
603  * @num_elem: total number of descriptors to be allocated
604  *
605  * Return - QDF_STATUS_SUCCESS
606  *	    QDF_STATUS_E_NOMEM
607  */
608 QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
609 					uint32_t num_elem)
610 {
611 	struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
612 	uint32_t desc_size, pool_id, i;
613 
614 	desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_num_seg_elem_t));
615 
616 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
617 		tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
618 		tso_num_seg_pool->num_free = 0;
619 		dp_desc_multi_pages_mem_alloc(soc, DP_TX_TSO_NUM_SEG_TYPE,
620 					      &tso_num_seg_pool->desc_pages,
621 					      desc_size,
622 					      num_elem, 0, true);
623 
624 		if (!tso_num_seg_pool->desc_pages.num_pages) {
625 			dp_err("Multi page alloc fail, tso_num_seg_pool");
626 			goto fail;
627 		}
628 	}
629 	return QDF_STATUS_SUCCESS;
630 
631 fail:
632 	for (i = 0; i < pool_id; i++) {
633 		tso_num_seg_pool = &soc->tx_tso_num_seg[i];
634 		dp_desc_multi_pages_mem_free(soc, DP_TX_TSO_NUM_SEG_TYPE,
635 					     &tso_num_seg_pool->desc_pages,
636 					     0, true);
637 	}
638 	return QDF_STATUS_E_NOMEM;
639 }
640 
641 /**
642  * dp_tx_tso_num_seg_pool_free() - free descriptors that tracks the
643  *                              fragments in each tso segment
644  *
645  * @soc: handle to dp soc structure
646  * @num_pool: number of pools to free
647  */
648 void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t num_pool)
649 {
650 	struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
651 	uint32_t pool_id;
652 
653 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
654 		tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
655 		dp_desc_multi_pages_mem_free(soc, DP_TX_TSO_NUM_SEG_TYPE,
656 					     &tso_num_seg_pool->desc_pages,
657 					     0, true);
658 	}
659 }
660 
661 /**
662  * dp_tx_tso_num_seg_pool_init() - Initialize descriptors that tracks the
663  *                              fragments in each tso segment
664  *
665  * @soc: handle to dp soc structure
666  * @num_pool: number of pools to initialize
667  * @num_elem: total number of descriptors to be initialized
668  *
669  * Return - QDF_STATUS_SUCCESS
670  *	    QDF_STATUS_E_FAULT
671  */
672 QDF_STATUS dp_tx_tso_num_seg_pool_init(struct dp_soc *soc, uint8_t num_pool,
673 				       uint32_t num_elem)
674 {
675 	struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
676 	uint32_t desc_size, pool_id;
677 
678 	desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_num_seg_elem_t));
679 
680 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
681 		tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
682 		if (qdf_mem_multi_page_link(soc->osdev,
683 					    &tso_num_seg_pool->desc_pages,
684 					    desc_size,
685 					    num_elem, true)) {
686 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
687 				  "invalid tso desc allocation - overflow num link");
688 			return QDF_STATUS_E_FAULT;
689 		}
690 
691 		tso_num_seg_pool->freelist = (struct qdf_tso_num_seg_elem_t *)
692 			*tso_num_seg_pool->desc_pages.cacheable_pages;
693 		tso_num_seg_pool->num_free = num_elem;
694 		tso_num_seg_pool->num_seg_pool_size = num_elem;
695 
696 		qdf_spinlock_create(&tso_num_seg_pool->lock);
697 	}
698 	return QDF_STATUS_SUCCESS;
699 }
700 
701 /**
702  * dp_tx_tso_num_seg_pool_deinit() - de-initialize descriptors that tracks the
703  *                              fragments in each tso segment
704  *
705  * @soc: handle to dp soc structure
706  * @num_pool: number of pools to de-initialize
707  *
708  * Return - QDF_STATUS_SUCCESS
709  *	    QDF_STATUS_E_FAULT
710  */
711 void dp_tx_tso_num_seg_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
712 {
713 	struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
714 	uint32_t pool_id;
715 
716 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
717 		tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
718 		qdf_spin_lock_bh(&tso_num_seg_pool->lock);
719 
720 		tso_num_seg_pool->freelist = NULL;
721 		tso_num_seg_pool->num_free = 0;
722 		tso_num_seg_pool->num_seg_pool_size = 0;
723 		qdf_spin_unlock_bh(&tso_num_seg_pool->lock);
724 		qdf_spinlock_destroy(&tso_num_seg_pool->lock);
725 	}
726 }
727 #else
728 QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
729 				     uint32_t num_elem)
730 {
731 	return QDF_STATUS_SUCCESS;
732 }
733 
734 QDF_STATUS dp_tx_tso_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
735 				    uint32_t num_elem)
736 {
737 	return QDF_STATUS_SUCCESS;
738 }
739 
740 void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
741 {
742 }
743 
744 void dp_tx_tso_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
745 {
746 }
747 
748 QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
749 					uint32_t num_elem)
750 {
751 	return QDF_STATUS_SUCCESS;
752 }
753 
754 void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t num_pool)
755 {
756 }
757 
758 QDF_STATUS dp_tx_tso_num_seg_pool_init(struct dp_soc *soc, uint8_t num_pool,
759 				       uint32_t num_elem)
760 {
761 	return QDF_STATUS_SUCCESS;
762 }
763 
764 void dp_tx_tso_num_seg_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
765 {
766 }
767 #endif
768