xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx_desc.c (revision d0c05845839e5f2ba5a8dcebe0cd3e4cd4e8dfcf)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "hal_hw_headers.h"
21 #include "dp_types.h"
22 #include "dp_tx_desc.h"
23 
24 #ifndef DESC_PARTITION
25 #define DP_TX_DESC_SIZE(a) qdf_get_pwr2(a)
26 #define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id)     \
27 do {                                                                 \
28 	uint8_t sig_bit;                                             \
29 	soc->tx_desc[pool_id].offset_filter = num_desc_per_page - 1; \
30 	/* Calculate page divider to find page number */             \
31 	sig_bit = 0;                                                 \
32 	while (num_desc_per_page) {                                  \
33 		sig_bit++;                                           \
34 		num_desc_per_page = num_desc_per_page >> 1;          \
35 	}                                                            \
36 	soc->tx_desc[pool_id].page_divider = (sig_bit - 1);          \
37 } while (0)
38 #else
39 #define DP_TX_DESC_SIZE(a) a
40 #define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id) {}
41 #endif /* DESC_PARTITION */
42 
43 /**
44  * dp_tx_desc_pool_counter_initialize() - Initialize counters
45  * @tx_desc_pool Handle to DP tx_desc_pool structure
46  * @num_elem Number of descriptor elements per pool
47  *
48  * Return: None
49  */
50 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
51 static void
52 dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool,
53 				  uint16_t num_elem)
54 {
55 }
56 #else
57 static void
58 dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool,
59 				  uint16_t num_elem)
60 {
61 	tx_desc_pool->num_free = num_elem;
62 	tx_desc_pool->num_allocated = 0;
63 }
64 #endif
65 
66 /**
67  * dp_tx_desc_pool_alloc() - Allocate Tx Descriptor pool(s)
68  * @soc Handle to DP SoC structure
69  * @pool_id pool to allocate
70  * @num_elem Number of descriptor elements per pool
71  *
72  * This function allocates memory for SW tx descriptors
73  * (used within host for tx data path).
74  * The number of tx descriptors required will be large
75  * since based on number of clients (1024 clients x 3 radios),
76  * outstanding MSDUs stored in TQM queues and LMAC queues will be significantly
77  * large.
78  *
79  * To avoid allocating a large contiguous memory, it uses multi_page_alloc qdf
80  * function to allocate memory
81  * in multiple pages. It then iterates through the memory allocated across pages
82  * and links each descriptor
83  * to next descriptor, taking care of page boundaries.
84  *
85  * Since WiFi 3.0 HW supports multiple Tx rings, multiple pools are allocated,
86  * one for each ring;
87  * This minimizes lock contention when hard_start_xmit is called
88  * from multiple CPUs.
89  * Alternately, multiple pools can be used for multiple VDEVs for VDEV level
90  * flow control.
91  *
92  * Return: Status code. 0 for success.
93  */
94 QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
95 				 uint32_t num_elem)
96 {
97 	uint32_t desc_size;
98 	struct dp_tx_desc_pool_s *tx_desc_pool;
99 
100 	desc_size = DP_TX_DESC_SIZE(sizeof(struct dp_tx_desc_s));
101 	tx_desc_pool = &((soc)->tx_desc[(pool_id)]);
102 	tx_desc_pool->desc_pages.page_size = DP_BLOCKMEM_SIZE;
103 	dp_desc_multi_pages_mem_alloc(soc, DP_TX_DESC_TYPE,
104 				      &tx_desc_pool->desc_pages,
105 				      desc_size, num_elem,
106 				      0, true);
107 
108 	if (!tx_desc_pool->desc_pages.num_pages) {
109 		dp_err("Multi page alloc fail, tx desc");
110 		return QDF_STATUS_E_NOMEM;
111 	}
112 	return QDF_STATUS_SUCCESS;
113 }
114 
115 /**
116  * dp_tx_desc_pool_free() -  Free the tx dexcriptor pools
117  * @soc: Handle to DP SoC structure
118  * @pool_id: pool to free
119  *
120  */
121 void dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
122 {
123 	struct dp_tx_desc_pool_s *tx_desc_pool;
124 
125 	tx_desc_pool = &((soc)->tx_desc[pool_id]);
126 
127 	if (tx_desc_pool->desc_pages.num_pages)
128 		dp_desc_multi_pages_mem_free(soc, DP_TX_DESC_TYPE,
129 					     &tx_desc_pool->desc_pages, 0,
130 					     true);
131 }
132 
133 /**
134  * dp_tx_desc_pool_init() - Initialize Tx Descriptor pool(s)
135  * @soc: Handle to DP SoC structure
136  * @pool_id: pool to allocate
137  * @num_elem: Number of descriptor elements per pool
138  *
139  * Return: QDF_STATUS_SUCCESS
140  *	   QDF_STATUS_E_FAULT
141  */
142 QDF_STATUS dp_tx_desc_pool_init(struct dp_soc *soc, uint8_t pool_id,
143 				uint32_t num_elem)
144 {
145 	struct dp_tx_desc_pool_s *tx_desc_pool;
146 	uint32_t desc_size;
147 
148 	desc_size = DP_TX_DESC_SIZE(sizeof(struct dp_tx_desc_s));
149 
150 	tx_desc_pool = &soc->tx_desc[pool_id];
151 	if (qdf_mem_multi_page_link(soc->osdev,
152 				    &tx_desc_pool->desc_pages,
153 				    desc_size, num_elem, true)) {
154 		dp_err("invalid tx desc allocation -overflow num link");
155 		return QDF_STATUS_E_FAULT;
156 	}
157 
158 	tx_desc_pool->freelist = (struct dp_tx_desc_s *)
159 		*tx_desc_pool->desc_pages.cacheable_pages;
160 	/* Set unique IDs for each Tx descriptor */
161 	if (QDF_STATUS_SUCCESS != soc->arch_ops.dp_tx_desc_pool_init(
162 						soc, num_elem, pool_id)) {
163 		dp_err("initialization per target failed");
164 		return QDF_STATUS_E_FAULT;
165 	}
166 
167 	tx_desc_pool->elem_size = DP_TX_DESC_SIZE(sizeof(struct dp_tx_desc_s));
168 
169 	dp_tx_desc_pool_counter_initialize(tx_desc_pool, num_elem);
170 	TX_DESC_LOCK_CREATE(&tx_desc_pool->lock);
171 
172 	return QDF_STATUS_SUCCESS;
173 }
174 
175 /**
176  * dp_tx_desc_pool_deinit() - de-initialize Tx Descriptor pool(s)
177  * @soc Handle to DP SoC structure
178  * @pool_id: pool to de-initialize
179  *
180  */
181 void dp_tx_desc_pool_deinit(struct dp_soc *soc, uint8_t pool_id)
182 {
183 	struct dp_tx_desc_pool_s *tx_desc_pool;
184 
185 	tx_desc_pool = &soc->tx_desc[pool_id];
186 	soc->arch_ops.dp_tx_desc_pool_deinit(soc, tx_desc_pool, pool_id);
187 	TX_DESC_POOL_MEMBER_CLEAN(tx_desc_pool);
188 	TX_DESC_LOCK_DESTROY(&tx_desc_pool->lock);
189 }
190 
191 /**
192  * dp_tx_ext_desc_pool_alloc() - allocate Tx extenstion Descriptor pool(s)
193  * @soc: Handle to DP SoC structure
194  * @num_pool: Number of pools to allocate
195  * @num_elem: Number of descriptor elements per pool
196  *
197  * Return - QDF_STATUS_SUCCESS
198  *	    QDF_STATUS_E_NOMEM
199  */
200 QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
201 				     uint32_t num_elem)
202 {
203 	QDF_STATUS status = QDF_STATUS_SUCCESS;
204 	qdf_dma_context_t memctx = 0;
205 	uint8_t pool_id, count;
206 	uint16_t elem_size = HAL_TX_EXT_DESC_WITH_META_DATA;
207 	struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
208 	uint16_t link_elem_size = sizeof(struct dp_tx_ext_desc_elem_s);
209 
210 	/* Coherent tx extension descriptor alloc */
211 
212 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
213 		dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
214 		memctx = qdf_get_dma_mem_context(dp_tx_ext_desc_pool, memctx);
215 		dp_desc_multi_pages_mem_alloc(
216 					  soc, DP_TX_EXT_DESC_TYPE,
217 					  &dp_tx_ext_desc_pool->desc_pages,
218 					  elem_size,
219 					  num_elem,
220 					  memctx, false);
221 
222 		if (!dp_tx_ext_desc_pool->desc_pages.num_pages) {
223 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
224 				  "ext desc page alloc fail");
225 			status = QDF_STATUS_E_NOMEM;
226 			goto fail_exit;
227 		}
228 	}
229 
230 	/*
231 	 * Cacheable ext descriptor link alloc
232 	 * This structure also large size already
233 	 * single element is 24bytes, 2K elements are 48Kbytes
234 	 * Have to alloc multi page cacheable memory
235 	 */
236 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
237 		dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
238 		dp_desc_multi_pages_mem_alloc(
239 					  soc,
240 					  DP_TX_EXT_DESC_LINK_TYPE,
241 					  &dp_tx_ext_desc_pool->desc_link_pages,
242 					  link_elem_size,
243 					  num_elem,
244 					  0, true);
245 
246 		if (!dp_tx_ext_desc_pool->desc_link_pages.num_pages) {
247 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
248 				  "ext link desc page alloc fail");
249 			status = QDF_STATUS_E_NOMEM;
250 			goto free_ext_desc_page;
251 		}
252 	}
253 
254 	return status;
255 
256 free_ext_desc_page:
257 	for (count = 0; count < pool_id; count++) {
258 		dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[count]);
259 		dp_desc_multi_pages_mem_free(
260 					soc, DP_TX_EXT_DESC_LINK_TYPE,
261 					&dp_tx_ext_desc_pool->desc_link_pages,
262 					0, true);
263 	}
264 
265 	pool_id = num_pool;
266 
267 fail_exit:
268 	for (count = 0; count < pool_id; count++) {
269 		dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[count]);
270 		memctx = qdf_get_dma_mem_context(dp_tx_ext_desc_pool, memctx);
271 		dp_desc_multi_pages_mem_free(
272 					soc, DP_TX_EXT_DESC_TYPE,
273 					&dp_tx_ext_desc_pool->desc_pages,
274 					memctx, false);
275 	}
276 
277 	return status;
278 }
279 
280 /**
281  * dp_tx_ext_desc_pool_init() - initialize Tx extenstion Descriptor pool(s)
282  * @soc: Handle to DP SoC structure
283  * @num_pool: Number of pools to initialize
284  * @num_elem: Number of descriptor elements per pool
285  *
286  * Return - QDF_STATUS_SUCCESS
287  *	    QDF_STATUS_E_NOMEM
288  */
289 QDF_STATUS dp_tx_ext_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
290 				    uint32_t num_elem)
291 {
292 	uint32_t i;
293 	struct dp_tx_ext_desc_elem_s *c_elem, *p_elem;
294 	struct qdf_mem_dma_page_t *page_info;
295 	struct qdf_mem_multi_page_t *pages;
296 	struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
297 	uint8_t pool_id;
298 	QDF_STATUS status;
299 
300 	/* link tx descriptors into a freelist */
301 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
302 		dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
303 		soc->tx_ext_desc[pool_id].elem_size =
304 			HAL_TX_EXT_DESC_WITH_META_DATA;
305 		soc->tx_ext_desc[pool_id].link_elem_size =
306 			sizeof(struct dp_tx_ext_desc_elem_s);
307 		soc->tx_ext_desc[pool_id].elem_count = num_elem;
308 
309 		dp_tx_ext_desc_pool->freelist = (struct dp_tx_ext_desc_elem_s *)
310 			*dp_tx_ext_desc_pool->desc_link_pages.cacheable_pages;
311 
312 		if (qdf_mem_multi_page_link(soc->osdev,
313 					    &dp_tx_ext_desc_pool->
314 					    desc_link_pages,
315 					    dp_tx_ext_desc_pool->link_elem_size,
316 					    dp_tx_ext_desc_pool->elem_count,
317 					    true)) {
318 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
319 				  "ext link desc page linking fail");
320 			status = QDF_STATUS_E_FAULT;
321 			goto fail;
322 		}
323 
324 		/* Assign coherent memory pointer into linked free list */
325 		pages = &dp_tx_ext_desc_pool->desc_pages;
326 		page_info = dp_tx_ext_desc_pool->desc_pages.dma_pages;
327 		c_elem = dp_tx_ext_desc_pool->freelist;
328 		p_elem = c_elem;
329 		for (i = 0; i < dp_tx_ext_desc_pool->elem_count; i++) {
330 			if (!(i % pages->num_element_per_page)) {
331 			/**
332 			 * First element for new page,
333 			 * should point next page
334 			 */
335 				if (!pages->dma_pages->page_v_addr_start) {
336 					QDF_TRACE(QDF_MODULE_ID_DP,
337 						  QDF_TRACE_LEVEL_ERROR,
338 						  "link over flow");
339 					status = QDF_STATUS_E_FAULT;
340 					goto fail;
341 				}
342 
343 				c_elem->vaddr =
344 					(void *)page_info->page_v_addr_start;
345 				c_elem->paddr = page_info->page_p_addr;
346 				page_info++;
347 			} else {
348 				c_elem->vaddr = (void *)(p_elem->vaddr +
349 					dp_tx_ext_desc_pool->elem_size);
350 				c_elem->paddr = (p_elem->paddr +
351 					dp_tx_ext_desc_pool->elem_size);
352 			}
353 			p_elem = c_elem;
354 			c_elem = c_elem->next;
355 			if (!c_elem)
356 				break;
357 		}
358 		dp_tx_ext_desc_pool->num_free = num_elem;
359 		qdf_spinlock_create(&dp_tx_ext_desc_pool->lock);
360 	}
361 	return QDF_STATUS_SUCCESS;
362 
363 fail:
364 	return status;
365 }
366 
367 /**
368  * dp_tx_ext_desc_pool_free() -  free Tx extenstion Descriptor pool(s)
369  * @soc: Handle to DP SoC structure
370  * @num_pool: Number of pools to free
371  *
372  */
373 void dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
374 {
375 	uint8_t pool_id;
376 	struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
377 	qdf_dma_context_t memctx = 0;
378 
379 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
380 		dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
381 		memctx = qdf_get_dma_mem_context(dp_tx_ext_desc_pool, memctx);
382 
383 		dp_desc_multi_pages_mem_free(
384 					soc, DP_TX_EXT_DESC_LINK_TYPE,
385 					&dp_tx_ext_desc_pool->desc_link_pages,
386 					0, true);
387 
388 		dp_desc_multi_pages_mem_free(
389 					soc, DP_TX_EXT_DESC_TYPE,
390 					&dp_tx_ext_desc_pool->desc_pages,
391 					memctx, false);
392 	}
393 }
394 
395 /**
396  * dp_tx_ext_desc_pool_deinit() -  deinit Tx extenstion Descriptor pool(s)
397  * @soc: Handle to DP SoC structure
398  * @num_pool: Number of pools to de-initialize
399  *
400  */
401 void dp_tx_ext_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
402 {
403 	uint8_t pool_id;
404 	struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
405 
406 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
407 		dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
408 		qdf_spinlock_destroy(&dp_tx_ext_desc_pool->lock);
409 	}
410 }
411 
412 #if defined(FEATURE_TSO)
413 /**
414  * dp_tx_tso_desc_pool_alloc() - allocate TSO Descriptor pool(s)
415  * @soc: Handle to DP SoC structure
416  * @num_pool: Number of pools to allocate
417  * @num_elem: Number of descriptor elements per pool
418  *
419  * Return - QDF_STATUS_SUCCESS
420  *	    QDF_STATUS_E_NOMEM
421  */
422 QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
423 				     uint32_t num_elem)
424 {
425 	struct dp_tx_tso_seg_pool_s *tso_desc_pool;
426 	uint32_t desc_size, pool_id, i;
427 
428 	desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_seg_elem_t));
429 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
430 		tso_desc_pool = &soc->tx_tso_desc[pool_id];
431 		tso_desc_pool->num_free = 0;
432 		dp_desc_multi_pages_mem_alloc(
433 					soc,
434 					DP_TX_TSO_DESC_TYPE,
435 					&tso_desc_pool->desc_pages,
436 					desc_size,
437 					num_elem, 0, true);
438 
439 		if (!tso_desc_pool->desc_pages.num_pages) {
440 			dp_err("Multi page alloc fail, tx desc");
441 			goto fail;
442 		}
443 	}
444 	return QDF_STATUS_SUCCESS;
445 
446 fail:
447 	for (i = 0; i < pool_id; i++) {
448 		tso_desc_pool = &soc->tx_tso_desc[i];
449 		dp_desc_multi_pages_mem_free(soc, DP_TX_TSO_DESC_TYPE,
450 					     &tso_desc_pool->desc_pages,
451 					     0, true);
452 	}
453 	return QDF_STATUS_E_NOMEM;
454 }
455 
456 /**
457  * dp_tx_tso_desc_pool_free() - free TSO Descriptor pool(s)
458  * @soc: Handle to DP SoC structure
459  * @num_pool: Number of pools to free
460  *
461  */
462 void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
463 {
464 	struct dp_tx_tso_seg_pool_s *tso_desc_pool;
465 	uint32_t pool_id;
466 
467 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
468 		tso_desc_pool = &soc->tx_tso_desc[pool_id];
469 		dp_desc_multi_pages_mem_free(soc, DP_TX_TSO_DESC_TYPE,
470 					     &tso_desc_pool->desc_pages,
471 					     0, true);
472 	}
473 }
474 
475 /**
476  * dp_tx_tso_desc_pool_init() - initialize TSO Descriptor pool(s)
477  * @soc: Handle to DP SoC structure
478  * @num_pool: Number of pools to initialize
479  * @num_elem: Number of descriptor elements per pool
480  *
481  * Return - QDF_STATUS_SUCCESS
482  *	    QDF_STATUS_E_NOMEM
483  */
484 QDF_STATUS dp_tx_tso_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
485 				    uint32_t num_elem)
486 {
487 	struct dp_tx_tso_seg_pool_s *tso_desc_pool;
488 	uint32_t desc_size, pool_id;
489 
490 	desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_seg_elem_t));
491 
492 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
493 		tso_desc_pool = &soc->tx_tso_desc[pool_id];
494 
495 		if (qdf_mem_multi_page_link(soc->osdev,
496 					    &tso_desc_pool->desc_pages,
497 					    desc_size,
498 					    num_elem, true)) {
499 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
500 				  "invalid tso desc allocation - overflow num link");
501 			return QDF_STATUS_E_FAULT;
502 		}
503 
504 		tso_desc_pool->freelist = (struct qdf_tso_seg_elem_t *)
505 			*tso_desc_pool->desc_pages.cacheable_pages;
506 		tso_desc_pool->num_free = num_elem;
507 
508 		TSO_DEBUG("Number of free descriptors: %u\n",
509 			  tso_desc_pool->num_free);
510 		tso_desc_pool->pool_size = num_elem;
511 		qdf_spinlock_create(&tso_desc_pool->lock);
512 	}
513 	return QDF_STATUS_SUCCESS;
514 }
515 
516 /**
517  * dp_tx_tso_desc_pool_deinit() - deinitialize TSO Descriptor pool(s)
518  * @soc: Handle to DP SoC structure
519  * @num_pool: Number of pools to free
520  *
521  */
522 void dp_tx_tso_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
523 {
524 	struct dp_tx_tso_seg_pool_s *tso_desc_pool;
525 	uint32_t pool_id;
526 
527 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
528 		tso_desc_pool = &soc->tx_tso_desc[pool_id];
529 		qdf_spin_lock_bh(&tso_desc_pool->lock);
530 
531 		tso_desc_pool->freelist = NULL;
532 		tso_desc_pool->num_free = 0;
533 		tso_desc_pool->pool_size = 0;
534 		qdf_spin_unlock_bh(&tso_desc_pool->lock);
535 		qdf_spinlock_destroy(&tso_desc_pool->lock);
536 	}
537 }
538 
539 /**
540  * dp_tx_tso_num_seg_pool_alloc() - Allocate descriptors that tracks the
541  *                              fragments in each tso segment
542  *
543  * @soc: handle to dp soc structure
544  * @num_pool: number of pools to allocate
545  * @num_elem: total number of descriptors to be allocated
546  *
547  * Return - QDF_STATUS_SUCCESS
548  *	    QDF_STATUS_E_NOMEM
549  */
550 QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
551 					uint32_t num_elem)
552 {
553 	struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
554 	uint32_t desc_size, pool_id, i;
555 
556 	desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_num_seg_elem_t));
557 
558 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
559 		tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
560 		tso_num_seg_pool->num_free = 0;
561 		dp_desc_multi_pages_mem_alloc(soc, DP_TX_TSO_NUM_SEG_TYPE,
562 					      &tso_num_seg_pool->desc_pages,
563 					      desc_size,
564 					      num_elem, 0, true);
565 
566 		if (!tso_num_seg_pool->desc_pages.num_pages) {
567 			dp_err("Multi page alloc fail, tso_num_seg_pool");
568 			goto fail;
569 		}
570 	}
571 	return QDF_STATUS_SUCCESS;
572 
573 fail:
574 	for (i = 0; i < pool_id; i++) {
575 		tso_num_seg_pool = &soc->tx_tso_num_seg[i];
576 		dp_desc_multi_pages_mem_free(soc, DP_TX_TSO_NUM_SEG_TYPE,
577 					     &tso_num_seg_pool->desc_pages,
578 					     0, true);
579 	}
580 	return QDF_STATUS_E_NOMEM;
581 }
582 
583 /**
584  * dp_tx_tso_num_seg_pool_free() - free descriptors that tracks the
585  *                              fragments in each tso segment
586  *
587  * @soc: handle to dp soc structure
588  * @num_pool: number of pools to free
589  */
590 void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t num_pool)
591 {
592 	struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
593 	uint32_t pool_id;
594 
595 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
596 		tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
597 		dp_desc_multi_pages_mem_free(soc, DP_TX_TSO_NUM_SEG_TYPE,
598 					     &tso_num_seg_pool->desc_pages,
599 					     0, true);
600 	}
601 }
602 
603 /**
604  * dp_tx_tso_num_seg_pool_init() - Initialize descriptors that tracks the
605  *                              fragments in each tso segment
606  *
607  * @soc: handle to dp soc structure
608  * @num_pool: number of pools to initialize
609  * @num_elem: total number of descriptors to be initialized
610  *
611  * Return - QDF_STATUS_SUCCESS
612  *	    QDF_STATUS_E_FAULT
613  */
614 QDF_STATUS dp_tx_tso_num_seg_pool_init(struct dp_soc *soc, uint8_t num_pool,
615 				       uint32_t num_elem)
616 {
617 	struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
618 	uint32_t desc_size, pool_id;
619 
620 	desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_num_seg_elem_t));
621 
622 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
623 		tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
624 		if (qdf_mem_multi_page_link(soc->osdev,
625 					    &tso_num_seg_pool->desc_pages,
626 					    desc_size,
627 					    num_elem, true)) {
628 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
629 				  "invalid tso desc allocation - overflow num link");
630 			return QDF_STATUS_E_FAULT;
631 		}
632 
633 		tso_num_seg_pool->freelist = (struct qdf_tso_num_seg_elem_t *)
634 			*tso_num_seg_pool->desc_pages.cacheable_pages;
635 		tso_num_seg_pool->num_free = num_elem;
636 		tso_num_seg_pool->num_seg_pool_size = num_elem;
637 
638 		qdf_spinlock_create(&tso_num_seg_pool->lock);
639 	}
640 	return QDF_STATUS_SUCCESS;
641 }
642 
643 /**
644  * dp_tx_tso_num_seg_pool_deinit() - de-initialize descriptors that tracks the
645  *                              fragments in each tso segment
646  *
647  * @soc: handle to dp soc structure
648  * @num_pool: number of pools to de-initialize
649  *
650  * Return - QDF_STATUS_SUCCESS
651  *	    QDF_STATUS_E_FAULT
652  */
653 void dp_tx_tso_num_seg_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
654 {
655 	struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
656 	uint32_t pool_id;
657 
658 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
659 		tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
660 		qdf_spin_lock_bh(&tso_num_seg_pool->lock);
661 
662 		tso_num_seg_pool->freelist = NULL;
663 		tso_num_seg_pool->num_free = 0;
664 		tso_num_seg_pool->num_seg_pool_size = 0;
665 		qdf_spin_unlock_bh(&tso_num_seg_pool->lock);
666 		qdf_spinlock_destroy(&tso_num_seg_pool->lock);
667 	}
668 }
669 #else
670 QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
671 				     uint32_t num_elem)
672 {
673 	return QDF_STATUS_SUCCESS;
674 }
675 
676 QDF_STATUS dp_tx_tso_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
677 				    uint32_t num_elem)
678 {
679 	return QDF_STATUS_SUCCESS;
680 }
681 
682 void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
683 {
684 }
685 
686 void dp_tx_tso_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
687 {
688 }
689 
690 QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
691 					uint32_t num_elem)
692 {
693 	return QDF_STATUS_SUCCESS;
694 }
695 
696 void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t num_pool)
697 {
698 }
699 
700 QDF_STATUS dp_tx_tso_num_seg_pool_init(struct dp_soc *soc, uint8_t num_pool,
701 				       uint32_t num_elem)
702 {
703 	return QDF_STATUS_SUCCESS;
704 }
705 
706 void dp_tx_tso_num_seg_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
707 {
708 }
709 #endif
710