xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx_desc.c (revision a86b23ee68a2491aede2e03991f3fb37046f4e41)
1 /*
2  * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "hal_hw_headers.h"
20 #include "dp_types.h"
21 #include "dp_tx_desc.h"
22 
23 #ifndef DESC_PARTITION
24 #define DP_TX_DESC_SIZE(a) qdf_get_pwr2(a)
25 #define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id)     \
26 do {                                                                 \
27 	uint8_t sig_bit;                                             \
28 	soc->tx_desc[pool_id].offset_filter = num_desc_per_page - 1; \
29 	/* Calculate page divider to find page number */             \
30 	sig_bit = 0;                                                 \
31 	while (num_desc_per_page) {                                  \
32 		sig_bit++;                                           \
33 		num_desc_per_page = num_desc_per_page >> 1;          \
34 	}                                                            \
35 	soc->tx_desc[pool_id].page_divider = (sig_bit - 1);          \
36 } while (0)
37 #else
38 #define DP_TX_DESC_SIZE(a) a
39 #define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id) {}
40 #endif /* DESC_PARTITION */
41 
42 /**
43  * dp_tx_desc_pool_counter_initialize() - Initialize counters
44  * @tx_desc_pool Handle to DP tx_desc_pool structure
45  * @num_elem Number of descriptor elements per pool
46  *
47  * Return: None
48  */
49 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
50 static void
51 dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool,
52 				  uint16_t num_elem)
53 {
54 }
55 #else
56 static void
57 dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool,
58 				  uint16_t num_elem)
59 {
60 	tx_desc_pool->num_free = num_elem;
61 	tx_desc_pool->num_allocated = 0;
62 }
63 #endif
64 
65 /**
66  * dp_tx_desc_pool_alloc() - Allocate Tx Descriptor pool(s)
67  * @soc Handle to DP SoC structure
68  * @pool_id pool to allocate
69  * @num_elem Number of descriptor elements per pool
70  *
71  * This function allocates memory for SW tx descriptors
72  * (used within host for tx data path).
73  * The number of tx descriptors required will be large
74  * since based on number of clients (1024 clients x 3 radios),
75  * outstanding MSDUs stored in TQM queues and LMAC queues will be significantly
76  * large.
77  *
78  * To avoid allocating a large contiguous memory, it uses multi_page_alloc qdf
79  * function to allocate memory
80  * in multiple pages. It then iterates through the memory allocated across pages
81  * and links each descriptor
82  * to next descriptor, taking care of page boundaries.
83  *
84  * Since WiFi 3.0 HW supports multiple Tx rings, multiple pools are allocated,
85  * one for each ring;
86  * This minimizes lock contention when hard_start_xmit is called
87  * from multiple CPUs.
88  * Alternately, multiple pools can be used for multiple VDEVs for VDEV level
89  * flow control.
90  *
91  * Return: Status code. 0 for success.
92  */
93 QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
94 				 uint16_t num_elem)
95 {
96 	uint32_t desc_size;
97 	struct dp_tx_desc_pool_s *tx_desc_pool;
98 
99 	desc_size = DP_TX_DESC_SIZE(sizeof(struct dp_tx_desc_s));
100 	tx_desc_pool = &((soc)->tx_desc[(pool_id)]);
101 	qdf_mem_multi_pages_alloc(soc->osdev,
102 				  &tx_desc_pool->desc_pages,
103 				  desc_size, num_elem,
104 				  0, true);
105 
106 	if (!tx_desc_pool->desc_pages.num_pages) {
107 		dp_err("Multi page alloc fail, tx desc");
108 		return QDF_STATUS_E_NOMEM;
109 	}
110 	return QDF_STATUS_SUCCESS;
111 }
112 
113 /**
114  * dp_tx_desc_pool_free() -  Free the tx dexcriptor pools
115  * @soc: Handle to DP SoC structure
116  * @pool_id: pool to free
117  *
118  */
119 void dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
120 {
121 	struct dp_tx_desc_pool_s *tx_desc_pool;
122 
123 	tx_desc_pool = &((soc)->tx_desc[pool_id]);
124 
125 	if (tx_desc_pool->desc_pages.num_pages)
126 		qdf_mem_multi_pages_free(soc->osdev,
127 					 &tx_desc_pool->desc_pages, 0, true);
128 }
129 
130 /**
131  * dp_tx_desc_pool_init() - Initialize Tx Descriptor pool(s)
132  * @soc: Handle to DP SoC structure
133  * @pool_id: pool to allocate
134  * @num_elem: Number of descriptor elements per pool
135  *
136  * Return: QDF_STATUS_SUCCESS
137  *	   QDF_STATUS_E_FAULT
138  */
139 QDF_STATUS dp_tx_desc_pool_init(struct dp_soc *soc, uint8_t pool_id,
140 				uint16_t num_elem)
141 {
142 	uint32_t id, count, page_id, offset, pool_id_32;
143 	struct dp_tx_desc_pool_s *tx_desc_pool;
144 	struct dp_tx_desc_s *tx_desc_elem;
145 	uint16_t num_desc_per_page;
146 	uint32_t desc_size;
147 
148 	desc_size = DP_TX_DESC_SIZE(sizeof(*tx_desc_elem));
149 
150 	tx_desc_pool = &((soc)->tx_desc[(pool_id)]);
151 	if (qdf_mem_multi_page_link(soc->osdev,
152 				    &tx_desc_pool->desc_pages,
153 				    desc_size, num_elem, true)) {
154 		dp_err("invalid tx desc allocation -overflow num link");
155 		return QDF_STATUS_E_FAULT;
156 	}
157 
158 	tx_desc_pool->freelist = (struct dp_tx_desc_s *)
159 		*tx_desc_pool->desc_pages.cacheable_pages;
160 	/* Set unique IDs for each Tx descriptor */
161 	tx_desc_elem = tx_desc_pool->freelist;
162 	count = 0;
163 	pool_id_32 = (uint32_t)pool_id;
164 	num_desc_per_page = tx_desc_pool->desc_pages.num_element_per_page;
165 	while (tx_desc_elem) {
166 		page_id = count / num_desc_per_page;
167 		offset = count % num_desc_per_page;
168 		id = ((pool_id_32 << DP_TX_DESC_ID_POOL_OS) |
169 			(page_id << DP_TX_DESC_ID_PAGE_OS) | offset);
170 
171 		tx_desc_elem->id = id;
172 		tx_desc_elem->pool_id = pool_id;
173 		tx_desc_elem = tx_desc_elem->next;
174 		count++;
175 	}
176 
177 	tx_desc_pool->elem_size = DP_TX_DESC_SIZE(sizeof(*tx_desc_elem));
178 
179 	dp_tx_desc_pool_counter_initialize(tx_desc_pool, num_elem);
180 	TX_DESC_LOCK_CREATE(&tx_desc_pool->lock);
181 
182 	return QDF_STATUS_SUCCESS;
183 }
184 
185 /**
186  * dp_tx_desc_pool_deinit() - de-initialize Tx Descriptor pool(s)
187  * @soc Handle to DP SoC structure
188  * @pool_id: pool to de-initialize
189  *
190  */
191 void dp_tx_desc_pool_deinit(struct dp_soc *soc, uint8_t pool_id)
192 {
193 	struct dp_tx_desc_pool_s *tx_desc_pool;
194 
195 	tx_desc_pool = &((soc)->tx_desc[(pool_id)]);
196 	TX_DESC_POOL_MEMBER_CLEAN(tx_desc_pool);
197 	TX_DESC_LOCK_DESTROY(&tx_desc_pool->lock);
198 }
199 
200 /**
201  * dp_tx_ext_desc_pool_alloc() - allocate Tx extenstion Descriptor pool(s)
202  * @soc: Handle to DP SoC structure
203  * @num_pool: Number of pools to allocate
204  * @num_elem: Number of descriptor elements per pool
205  *
206  * Return - QDF_STATUS_SUCCESS
207  *	    QDF_STATUS_E_NOMEM
208  */
209 QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
210 				     uint16_t num_elem)
211 {
212 	QDF_STATUS status = QDF_STATUS_SUCCESS;
213 	qdf_dma_context_t memctx = 0;
214 	uint8_t pool_id, count;
215 	uint16_t elem_size = HAL_TX_EXT_DESC_WITH_META_DATA;
216 	struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
217 	uint16_t link_elem_size = sizeof(struct dp_tx_ext_desc_elem_s);
218 
219 	/* Coherent tx extension descriptor alloc */
220 
221 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
222 		dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
223 		memctx = qdf_get_dma_mem_context(dp_tx_ext_desc_pool, memctx);
224 		qdf_mem_multi_pages_alloc(soc->osdev,
225 					  &dp_tx_ext_desc_pool->desc_pages,
226 					  elem_size,
227 					  num_elem,
228 					  memctx, false);
229 
230 		if (!dp_tx_ext_desc_pool->desc_pages.num_pages) {
231 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
232 				  "ext desc page alloc fail");
233 			status = QDF_STATUS_E_NOMEM;
234 			goto fail_exit;
235 		}
236 	}
237 
238 	/*
239 	 * Cacheable ext descriptor link alloc
240 	 * This structure also large size already
241 	 * single element is 24bytes, 2K elements are 48Kbytes
242 	 * Have to alloc multi page cacheable memory
243 	 */
244 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
245 		dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
246 		qdf_mem_multi_pages_alloc(soc->osdev,
247 					  &dp_tx_ext_desc_pool->desc_link_pages,
248 					  link_elem_size,
249 					  num_elem,
250 					  0, true);
251 
252 		if (!dp_tx_ext_desc_pool->desc_link_pages.num_pages) {
253 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
254 				  "ext link desc page alloc fail");
255 			status = QDF_STATUS_E_NOMEM;
256 			goto free_ext_desc_page;
257 		}
258 	}
259 	return status;
260 
261 free_ext_desc_page:
262 	for (count = 0; count < pool_id; pool_id++) {
263 		dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
264 		qdf_mem_multi_pages_free(soc->osdev,
265 					 &dp_tx_ext_desc_pool->desc_link_pages,
266 					 0, true);
267 	}
268 	pool_id = num_pool;
269 
270 fail_exit:
271 	for (count = 0; count < pool_id; pool_id++) {
272 		dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
273 		memctx = qdf_get_dma_mem_context(dp_tx_ext_desc_pool, memctx);
274 		qdf_mem_multi_pages_free(soc->osdev,
275 					 &dp_tx_ext_desc_pool->desc_pages,
276 					 memctx, false);
277 	}
278 	return status;
279 }
280 
281 /**
282  * dp_tx_ext_desc_pool_init() - initialize Tx extenstion Descriptor pool(s)
283  * @soc: Handle to DP SoC structure
284  * @num_pool: Number of pools to initialize
285  * @num_elem: Number of descriptor elements per pool
286  *
287  * Return - QDF_STATUS_SUCCESS
288  *	    QDF_STATUS_E_NOMEM
289  */
290 QDF_STATUS dp_tx_ext_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
291 				    uint16_t num_elem)
292 {
293 	uint32_t i;
294 	struct dp_tx_ext_desc_elem_s *c_elem, *p_elem;
295 	struct qdf_mem_dma_page_t *page_info;
296 	struct qdf_mem_multi_page_t *pages;
297 	struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
298 	uint8_t pool_id;
299 	QDF_STATUS status;
300 
301 	/* link tx descriptors into a freelist */
302 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
303 		dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
304 		soc->tx_ext_desc[pool_id].elem_size =
305 			HAL_TX_EXT_DESC_WITH_META_DATA;
306 		soc->tx_ext_desc[pool_id].link_elem_size =
307 			sizeof(struct dp_tx_ext_desc_elem_s);
308 		soc->tx_ext_desc[pool_id].elem_count = num_elem;
309 
310 		dp_tx_ext_desc_pool->freelist = (struct dp_tx_ext_desc_elem_s *)
311 			*dp_tx_ext_desc_pool->desc_link_pages.cacheable_pages;
312 
313 		if (qdf_mem_multi_page_link(soc->osdev,
314 					    &dp_tx_ext_desc_pool->
315 					    desc_link_pages,
316 					    dp_tx_ext_desc_pool->link_elem_size,
317 					    dp_tx_ext_desc_pool->elem_count,
318 					    true)) {
319 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
320 				  "ext link desc page linking fail");
321 			status = QDF_STATUS_E_FAULT;
322 			goto fail;
323 		}
324 
325 		/* Assign coherent memory pointer into linked free list */
326 		pages = &dp_tx_ext_desc_pool->desc_pages;
327 		page_info = dp_tx_ext_desc_pool->desc_pages.dma_pages;
328 		c_elem = dp_tx_ext_desc_pool->freelist;
329 		p_elem = c_elem;
330 		for (i = 0; i < dp_tx_ext_desc_pool->elem_count; i++) {
331 			if (!(i % pages->num_element_per_page)) {
332 			/**
333 			 * First element for new page,
334 			 * should point next page
335 			 */
336 				if (!pages->dma_pages->page_v_addr_start) {
337 					QDF_TRACE(QDF_MODULE_ID_DP,
338 						  QDF_TRACE_LEVEL_ERROR,
339 						  "link over flow");
340 					status = QDF_STATUS_E_FAULT;
341 					goto fail;
342 				}
343 
344 				c_elem->vaddr =
345 					(void *)page_info->page_v_addr_start;
346 				c_elem->paddr = page_info->page_p_addr;
347 				page_info++;
348 			} else {
349 				c_elem->vaddr = (void *)(p_elem->vaddr +
350 					dp_tx_ext_desc_pool->elem_size);
351 				c_elem->paddr = (p_elem->paddr +
352 					dp_tx_ext_desc_pool->elem_size);
353 			}
354 			p_elem = c_elem;
355 			c_elem = c_elem->next;
356 			if (!c_elem)
357 				break;
358 		}
359 		dp_tx_ext_desc_pool->num_free = num_elem;
360 		qdf_spinlock_create(&dp_tx_ext_desc_pool->lock);
361 	}
362 	return QDF_STATUS_SUCCESS;
363 
364 fail:
365 	return status;
366 }
367 
368 /**
369  * dp_tx_ext_desc_pool_free() -  free Tx extenstion Descriptor pool(s)
370  * @soc: Handle to DP SoC structure
371  * @num_pool: Number of pools to free
372  *
373  */
374 void dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
375 {
376 	uint8_t pool_id;
377 	struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
378 	qdf_dma_context_t memctx = 0;
379 
380 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
381 		dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
382 		memctx = qdf_get_dma_mem_context(dp_tx_ext_desc_pool, memctx);
383 
384 		qdf_mem_multi_pages_free(soc->osdev,
385 					 &dp_tx_ext_desc_pool->desc_link_pages,
386 					 0, true);
387 
388 		qdf_mem_multi_pages_free(soc->osdev,
389 					 &dp_tx_ext_desc_pool->desc_pages,
390 					 memctx, false);
391 	}
392 }
393 
394 /**
395  * dp_tx_ext_desc_pool_deinit() -  deinit Tx extenstion Descriptor pool(s)
396  * @soc: Handle to DP SoC structure
397  * @num_pool: Number of pools to de-initialize
398  *
399  */
400 void dp_tx_ext_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
401 {
402 	uint8_t pool_id;
403 	struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
404 
405 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
406 		dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
407 		qdf_spinlock_destroy(&dp_tx_ext_desc_pool->lock);
408 	}
409 }
410 
411 #if defined(FEATURE_TSO)
412 /**
413  * dp_tx_tso_desc_pool_alloc() - allocate TSO Descriptor pool(s)
414  * @soc: Handle to DP SoC structure
415  * @num_pool: Number of pools to allocate
416  * @num_elem: Number of descriptor elements per pool
417  *
418  * Return - QDF_STATUS_SUCCESS
419  *	    QDF_STATUS_E_NOMEM
420  */
421 QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
422 				     uint16_t num_elem)
423 {
424 	struct dp_tx_tso_seg_pool_s *tso_desc_pool;
425 	uint32_t desc_size, pool_id, i;
426 
427 	desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_seg_elem_t));
428 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
429 		tso_desc_pool = &soc->tx_tso_desc[pool_id];
430 		tso_desc_pool->num_free = 0;
431 		qdf_mem_multi_pages_alloc(soc->osdev,
432 					  &tso_desc_pool->desc_pages,
433 					  desc_size,
434 					  num_elem, 0, true);
435 
436 		if (!tso_desc_pool->desc_pages.num_pages) {
437 			dp_err("Multi page alloc fail, tx desc");
438 			goto fail;
439 		}
440 	}
441 	return QDF_STATUS_SUCCESS;
442 
443 fail:
444 	for (i = 0; i < pool_id; i++) {
445 		tso_desc_pool = &soc->tx_tso_desc[i];
446 		qdf_mem_multi_pages_free(soc->osdev,
447 					 &tso_desc_pool->desc_pages,
448 					 0, true);
449 	}
450 	return QDF_STATUS_E_NOMEM;
451 }
452 
453 /**
454  * dp_tx_tso_desc_pool_free() - free TSO Descriptor pool(s)
455  * @soc: Handle to DP SoC structure
456  * @num_pool: Number of pools to free
457  *
458  */
459 void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
460 {
461 	struct dp_tx_tso_seg_pool_s *tso_desc_pool;
462 	uint32_t pool_id;
463 
464 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
465 		tso_desc_pool = &soc->tx_tso_desc[pool_id];
466 		qdf_mem_multi_pages_free(soc->osdev,
467 					 &tso_desc_pool->desc_pages, 0, true);
468 	}
469 }
470 
471 /**
472  * dp_tx_tso_desc_pool_init() - initialize TSO Descriptor pool(s)
473  * @soc: Handle to DP SoC structure
474  * @num_pool: Number of pools to initialize
475  * @num_elem: Number of descriptor elements per pool
476  *
477  * Return - QDF_STATUS_SUCCESS
478  *	    QDF_STATUS_E_NOMEM
479  */
480 QDF_STATUS dp_tx_tso_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
481 				    uint16_t num_elem)
482 {
483 	struct dp_tx_tso_seg_pool_s *tso_desc_pool;
484 	uint32_t desc_size, pool_id;
485 
486 	desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_seg_elem_t));
487 
488 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
489 		tso_desc_pool = &soc->tx_tso_desc[pool_id];
490 
491 		if (qdf_mem_multi_page_link(soc->osdev,
492 					    &tso_desc_pool->desc_pages,
493 					    desc_size,
494 					    num_elem, true)) {
495 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
496 				  "invalid tso desc allocation - overflow num link");
497 			return QDF_STATUS_E_FAULT;
498 		}
499 
500 		tso_desc_pool->freelist = (struct qdf_tso_seg_elem_t *)
501 			*tso_desc_pool->desc_pages.cacheable_pages;
502 		tso_desc_pool->num_free = num_elem;
503 
504 		TSO_DEBUG("Number of free descriptors: %u\n",
505 			  tso_desc_pool->num_free);
506 		tso_desc_pool->pool_size = num_elem;
507 		qdf_spinlock_create(&tso_desc_pool->lock);
508 	}
509 	return QDF_STATUS_SUCCESS;
510 }
511 
512 /**
513  * dp_tx_tso_desc_pool_deinit() - deinitialize TSO Descriptor pool(s)
514  * @soc: Handle to DP SoC structure
515  * @num_pool: Number of pools to free
516  *
517  */
518 void dp_tx_tso_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
519 {
520 	struct dp_tx_tso_seg_pool_s *tso_desc_pool;
521 	uint32_t pool_id;
522 
523 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
524 		tso_desc_pool = &soc->tx_tso_desc[pool_id];
525 		qdf_spin_lock_bh(&tso_desc_pool->lock);
526 
527 		tso_desc_pool->freelist = NULL;
528 		tso_desc_pool->num_free = 0;
529 		tso_desc_pool->pool_size = 0;
530 		qdf_spin_unlock_bh(&tso_desc_pool->lock);
531 		qdf_spinlock_destroy(&tso_desc_pool->lock);
532 	}
533 }
534 
535 /**
536  * dp_tx_tso_num_seg_pool_alloc() - Allocate descriptors that tracks the
537  *                              fragments in each tso segment
538  *
539  * @soc: handle to dp soc structure
540  * @num_pool: number of pools to allocate
541  * @num_elem: total number of descriptors to be allocated
542  *
543  * Return - QDF_STATUS_SUCCESS
544  *	    QDF_STATUS_E_NOMEM
545  */
546 QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
547 					uint16_t num_elem)
548 {
549 	struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
550 	uint32_t desc_size, pool_id, i;
551 
552 	desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_num_seg_elem_t));
553 
554 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
555 		tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
556 		tso_num_seg_pool->num_free = 0;
557 		qdf_mem_multi_pages_alloc(soc->osdev,
558 					  &tso_num_seg_pool->desc_pages,
559 					  desc_size,
560 					  num_elem, 0, true);
561 
562 		if (!tso_num_seg_pool->desc_pages.num_pages) {
563 			dp_err("Multi page alloc fail, tso_num_seg_pool");
564 			goto fail;
565 		}
566 	}
567 	return QDF_STATUS_SUCCESS;
568 
569 fail:
570 	for (i = 0; i < pool_id; i++) {
571 		tso_num_seg_pool = &soc->tx_tso_num_seg[i];
572 		qdf_mem_multi_pages_free(soc->osdev,
573 					 &tso_num_seg_pool->desc_pages,
574 					 0, true);
575 	}
576 	return QDF_STATUS_E_NOMEM;
577 }
578 
579 /**
580  * dp_tx_tso_num_seg_pool_free() - free descriptors that tracks the
581  *                              fragments in each tso segment
582  *
583  * @soc: handle to dp soc structure
584  * @num_pool: number of pools to free
585  */
586 void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t num_pool)
587 {
588 	struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
589 	uint32_t pool_id;
590 
591 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
592 		tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
593 		qdf_mem_multi_pages_free(soc->osdev,
594 					 &tso_num_seg_pool->desc_pages,
595 					 0, true);
596 	}
597 }
598 
599 /**
600  * dp_tx_tso_num_seg_pool_init() - Initialize descriptors that tracks the
601  *                              fragments in each tso segment
602  *
603  * @soc: handle to dp soc structure
604  * @num_pool: number of pools to initialize
605  * @num_elem: total number of descriptors to be initialized
606  *
607  * Return - QDF_STATUS_SUCCESS
608  *	    QDF_STATUS_E_FAULT
609  */
610 QDF_STATUS dp_tx_tso_num_seg_pool_init(struct dp_soc *soc, uint8_t num_pool,
611 				       uint16_t num_elem)
612 {
613 	struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
614 	uint32_t desc_size, pool_id;
615 
616 	desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_num_seg_elem_t));
617 
618 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
619 		tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
620 		if (qdf_mem_multi_page_link(soc->osdev,
621 					    &tso_num_seg_pool->desc_pages,
622 					    desc_size,
623 					    num_elem, true)) {
624 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
625 				  "invalid tso desc allocation - overflow num link");
626 			return QDF_STATUS_E_FAULT;
627 		}
628 
629 		tso_num_seg_pool->freelist = (struct qdf_tso_num_seg_elem_t *)
630 			*tso_num_seg_pool->desc_pages.cacheable_pages;
631 		tso_num_seg_pool->num_free = num_elem;
632 		tso_num_seg_pool->num_seg_pool_size = num_elem;
633 
634 		qdf_spinlock_create(&tso_num_seg_pool->lock);
635 	}
636 	return QDF_STATUS_SUCCESS;
637 }
638 
639 /**
640  * dp_tx_tso_num_seg_pool_deinit() - de-initialize descriptors that tracks the
641  *                              fragments in each tso segment
642  *
643  * @soc: handle to dp soc structure
644  * @num_pool: number of pools to de-initialize
645  *
646  * Return - QDF_STATUS_SUCCESS
647  *	    QDF_STATUS_E_FAULT
648  */
649 void dp_tx_tso_num_seg_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
650 {
651 	struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
652 	uint32_t pool_id;
653 
654 	for (pool_id = 0; pool_id < num_pool; pool_id++) {
655 		tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
656 		qdf_spin_lock_bh(&tso_num_seg_pool->lock);
657 
658 		tso_num_seg_pool->freelist = NULL;
659 		tso_num_seg_pool->num_free = 0;
660 		tso_num_seg_pool->num_seg_pool_size = 0;
661 		qdf_spin_unlock_bh(&tso_num_seg_pool->lock);
662 		qdf_spinlock_destroy(&tso_num_seg_pool->lock);
663 	}
664 }
665 #else
666 QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
667 				     uint16_t num_elem)
668 {
669 	return QDF_STATUS_SUCCESS;
670 }
671 
672 QDF_STATUS dp_tx_tso_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
673 				    uint16_t num_elem)
674 {
675 	return QDF_STATUS_SUCCESS;
676 }
677 
678 void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
679 {
680 }
681 
682 void dp_tx_tso_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
683 {
684 }
685 
686 QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
687 					uint16_t num_elem)
688 {
689 	return QDF_STATUS_SUCCESS;
690 }
691 
692 void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t num_pool)
693 {
694 }
695 
696 QDF_STATUS dp_tx_tso_num_seg_pool_init(struct dp_soc *soc, uint8_t num_pool,
697 				       uint16_t num_elem)
698 {
699 	return QDF_STATUS_SUCCESS;
700 }
701 
702 void dp_tx_tso_num_seg_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
703 {
704 }
705 #endif
706