xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx_desc.c (revision 1b9674e21e24478fba4530f5ae7396b9555e9c6a)
1 /*
2  * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "hal_hw_headers.h"
20 #include "dp_types.h"
21 #include "dp_tx_desc.h"
22 
23 #ifndef DESC_PARTITION
24 #define DP_TX_DESC_SIZE(a) qdf_get_pwr2(a)
25 #define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id)     \
26 do {                                                                 \
27 	uint8_t sig_bit;                                             \
28 	soc->tx_desc[pool_id].offset_filter = num_desc_per_page - 1; \
29 	/* Calculate page divider to find page number */             \
30 	sig_bit = 0;                                                 \
31 	while (num_desc_per_page) {                                  \
32 		sig_bit++;                                           \
33 		num_desc_per_page = num_desc_per_page >> 1;          \
34 	}                                                            \
35 	soc->tx_desc[pool_id].page_divider = (sig_bit - 1);          \
36 } while (0)
37 #else
38 #define DP_TX_DESC_SIZE(a) a
39 #define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id) {}
40 #endif /* DESC_PARTITION */
41 
42 /**
43  * dp_tx_desc_pool_counter_initialize() - Initialize counters
44  * @tx_desc_pool Handle to DP tx_desc_pool structure
45  * @num_elem Number of descriptor elements per pool
46  *
47  * Return: None
48  */
49 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
50 static void
51 dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool,
52 				  uint16_t num_elem)
53 {
54 }
55 #else
56 static void
57 dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool,
58 				  uint16_t num_elem)
59 {
60 	tx_desc_pool->num_free = num_elem;
61 	tx_desc_pool->num_allocated = 0;
62 }
63 #endif
64 
65 /**
66  * dp_tx_desc_pool_alloc() - Allocate Tx Descriptor pool(s)
67  * @soc Handle to DP SoC structure
68  * @num_pool Number of pools to allocate
69  * @num_elem Number of descriptor elements per pool
70  *
71  * This function allocates memory for SW tx descriptors
72  * (used within host for tx data path).
73  * The number of tx descriptors required will be large
74  * since based on number of clients (1024 clients x 3 radios),
75  * outstanding MSDUs stored in TQM queues and LMAC queues will be significantly
76  * large.
77  *
78  * To avoid allocating a large contiguous memory, it uses multi_page_alloc qdf
79  * function to allocate memory
80  * in multiple pages. It then iterates through the memory allocated across pages
81  * and links each descriptor
82  * to next descriptor, taking care of page boundaries.
83  *
84  * Since WiFi 3.0 HW supports multiple Tx rings, multiple pools are allocated,
85  * one for each ring;
86  * This minimizes lock contention when hard_start_xmit is called
87  * from multiple CPUs.
88  * Alternately, multiple pools can be used for multiple VDEVs for VDEV level
89  * flow control.
90  *
91  * Return: Status code. 0 for success.
92  */
93 QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
94 		uint16_t num_elem)
95 {
96 	uint32_t id, count, page_id, offset, pool_id_32;
97 	uint16_t num_page, num_desc_per_page;
98 	struct dp_tx_desc_s *tx_desc_elem;
99 	uint32_t desc_size;
100 	struct dp_tx_desc_pool_s *tx_desc_pool = &((soc)->tx_desc[(pool_id)]);
101 
102 	desc_size = DP_TX_DESC_SIZE(sizeof(*tx_desc_elem));
103 	tx_desc_pool->elem_size = desc_size;
104 	qdf_mem_multi_pages_alloc(soc->osdev,
105 		&tx_desc_pool->desc_pages, desc_size, num_elem,
106 		0, true);
107 	if (!tx_desc_pool->desc_pages.num_pages) {
108 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
109 			"Multi page alloc fail, tx desc");
110 		goto fail_exit;
111 	}
112 
113 
114 	num_page = tx_desc_pool->desc_pages.num_pages;
115 	num_desc_per_page =
116 		tx_desc_pool->desc_pages.num_element_per_page;
117 	tx_desc_pool->freelist = (struct dp_tx_desc_s *)
118 			*tx_desc_pool->desc_pages.cacheable_pages;
119 	if (qdf_mem_multi_page_link(soc->osdev,
120 		&tx_desc_pool->desc_pages, desc_size, num_elem, true)) {
121 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
122 			"invalid tx desc allocation - overflow num link");
123 		goto free_tx_desc;
124 	}
125 
126 	/* Set unique IDs for each Tx descriptor */
127 	tx_desc_elem = tx_desc_pool->freelist;
128 	count = 0;
129 	pool_id_32 = (uint32_t)pool_id;
130 	while (tx_desc_elem) {
131 		page_id = count / num_desc_per_page;
132 		offset = count % num_desc_per_page;
133 		id = ((pool_id_32 << DP_TX_DESC_ID_POOL_OS) |
134 			(page_id << DP_TX_DESC_ID_PAGE_OS) | offset);
135 
136 		tx_desc_elem->id = id;
137 		tx_desc_elem->pool_id = pool_id;
138 		tx_desc_elem = tx_desc_elem->next;
139 		count++;
140 	}
141 
142 	dp_tx_desc_pool_counter_initialize(tx_desc_pool, num_elem);
143 	TX_DESC_LOCK_CREATE(&tx_desc_pool->lock);
144 	return QDF_STATUS_SUCCESS;
145 
146 free_tx_desc:
147 	qdf_mem_multi_pages_free(soc->osdev,
148 		&tx_desc_pool->desc_pages, 0, true);
149 
150 fail_exit:
151 	return QDF_STATUS_E_FAULT;
152 }
153 
154 /**
155  * dp_tx_desc_pool_free() - Free the memory pool allocated for Tx Descriptors
156  *
157  * @soc Handle to DP SoC structure
158  * @pool_id
159  *
160  * Return:
161  */
162 QDF_STATUS dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
163 {
164 	struct dp_tx_desc_pool_s *tx_desc_pool =
165 				&((soc)->tx_desc[(pool_id)]);
166 
167 	qdf_mem_multi_pages_free(soc->osdev,
168 		&tx_desc_pool->desc_pages, 0, true);
169 	TX_DESC_LOCK_DESTROY(&tx_desc_pool->lock);
170 	TX_DESC_POOL_MEMBER_CLEAN(tx_desc_pool);
171 	return QDF_STATUS_SUCCESS;
172 }
173 
174 /**
175  * dp_tx_ext_desc_pool_alloc() - Allocate tx ext descriptor pool
176  * @soc Handle to DP SoC structure
177  * @pool_id
178  *
179  * Return: NONE
180  */
181 QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
182 	uint16_t num_elem)
183 {
184 	uint16_t num_page;
185 	uint32_t count;
186 	struct dp_tx_ext_desc_elem_s *c_elem, *p_elem;
187 	struct qdf_mem_dma_page_t *page_info;
188 	struct qdf_mem_multi_page_t *pages;
189 	QDF_STATUS status;
190 
191 	/* Coherent tx extension descriptor alloc */
192 	soc->tx_ext_desc[pool_id].elem_size = HAL_TX_EXT_DESC_WITH_META_DATA;
193 	soc->tx_ext_desc[pool_id].elem_count = num_elem;
194 	qdf_mem_multi_pages_alloc(soc->osdev,
195 		&soc->tx_ext_desc[pool_id].desc_pages,
196 		soc->tx_ext_desc[pool_id].elem_size,
197 		soc->tx_ext_desc[pool_id].elem_count,
198 		qdf_get_dma_mem_context((&soc->tx_ext_desc[pool_id]), memctx),
199 		false);
200 	if (!soc->tx_ext_desc[pool_id].desc_pages.num_pages) {
201 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
202 			"ext desc page alloc fail");
203 		status = QDF_STATUS_E_NOMEM;
204 		goto fail_exit;
205 	}
206 
207 	num_page = soc->tx_ext_desc[pool_id].desc_pages.num_pages;
208 	/*
209 	 * Cacheable ext descriptor link alloc
210 	 * This structure also large size already
211 	 * single element is 24bytes, 2K elements are 48Kbytes
212 	 * Have to alloc multi page cacheable memory
213 	 */
214 	soc->tx_ext_desc[pool_id].link_elem_size =
215 		sizeof(struct dp_tx_ext_desc_elem_s);
216 	qdf_mem_multi_pages_alloc(soc->osdev,
217 		&soc->tx_ext_desc[pool_id].desc_link_pages,
218 		soc->tx_ext_desc[pool_id].link_elem_size,
219 		soc->tx_ext_desc[pool_id].elem_count, 0,
220 		true);
221 	if (!soc->tx_ext_desc[pool_id].desc_link_pages.num_pages) {
222 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
223 			"ext link desc page alloc fail");
224 		status = QDF_STATUS_E_NOMEM;
225 		goto free_ext_desc_page;
226 	}
227 
228 	/* link tx descriptors into a freelist */
229 	soc->tx_ext_desc[pool_id].freelist = (struct dp_tx_ext_desc_elem_s *)
230 		*soc->tx_ext_desc[pool_id].desc_link_pages.cacheable_pages;
231 	if (qdf_mem_multi_page_link(soc->osdev,
232 		&soc->tx_ext_desc[pool_id].desc_link_pages,
233 		soc->tx_ext_desc[pool_id].link_elem_size,
234 		soc->tx_ext_desc[pool_id].elem_count, true)) {
235 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
236 			"ext link desc page linking fail");
237 		status = QDF_STATUS_E_FAULT;
238 		goto free_ext_link_desc_page;
239 	}
240 
241 	/* Assign coherent memory pointer into linked free list */
242 	pages = &soc->tx_ext_desc[pool_id].desc_pages;
243 	page_info = soc->tx_ext_desc[pool_id].desc_pages.dma_pages;
244 	c_elem = soc->tx_ext_desc[pool_id].freelist;
245 	p_elem = c_elem;
246 	for (count = 0; count < soc->tx_ext_desc[pool_id].elem_count; count++) {
247 		if (!(count % pages->num_element_per_page)) {
248 			/**
249 			 * First element for new page,
250 			 * should point next page
251 			 */
252 			if (!pages->dma_pages->page_v_addr_start) {
253 				QDF_TRACE(QDF_MODULE_ID_DP,
254 					QDF_TRACE_LEVEL_ERROR,
255 					"link over flow");
256 				status = QDF_STATUS_E_FAULT;
257 				goto free_ext_link_desc_page;
258 			}
259 			c_elem->vaddr = (void *)page_info->page_v_addr_start;
260 			c_elem->paddr = page_info->page_p_addr;
261 			page_info++;
262 		} else {
263 			c_elem->vaddr = (void *)(p_elem->vaddr +
264 				soc->tx_ext_desc[pool_id].elem_size);
265 			c_elem->paddr = (p_elem->paddr +
266 				soc->tx_ext_desc[pool_id].elem_size);
267 		}
268 		p_elem = c_elem;
269 		c_elem = c_elem->next;
270 		if (!c_elem)
271 			break;
272 	}
273 
274 	soc->tx_ext_desc[pool_id].num_free = num_elem;
275 	qdf_spinlock_create(&soc->tx_ext_desc[pool_id].lock);
276 	return QDF_STATUS_SUCCESS;
277 
278 free_ext_link_desc_page:
279 	qdf_mem_multi_pages_free(soc->osdev,
280 		&soc->tx_ext_desc[pool_id].desc_link_pages, 0, true);
281 
282 free_ext_desc_page:
283 	qdf_mem_multi_pages_free(soc->osdev,
284 		&soc->tx_ext_desc[pool_id].desc_pages,
285 		qdf_get_dma_mem_context((&soc->tx_ext_desc[pool_id]), memctx),
286 		false);
287 
288 fail_exit:
289 	return status;
290 
291 }
292 
293 /**
294  * dp_tx_ext_desc_pool_free() - free tx ext descriptor pool
295  * @soc: Handle to DP SoC structure
296  * @pool_id: extension descriptor pool id
297  *
298  * Return: NONE
299  */
300 QDF_STATUS dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
301 {
302 	qdf_mem_multi_pages_free(soc->osdev,
303 		&soc->tx_ext_desc[pool_id].desc_link_pages, 0, true);
304 
305 	qdf_mem_multi_pages_free(soc->osdev,
306 		&soc->tx_ext_desc[pool_id].desc_pages,
307 		qdf_get_dma_mem_context((&soc->tx_ext_desc[pool_id]), memctx),
308 		false);
309 
310 	qdf_spinlock_destroy(&soc->tx_ext_desc[pool_id].lock);
311 	return QDF_STATUS_SUCCESS;
312 }
313 
314 /**
315  * dp_tx_tso_desc_pool_alloc() - allocate tx tso descriptor pool
316  * @soc: Handle to DP SoC structure
317  * @pool_id: tso descriptor pool id
318  * @num_elem: number of element
319  *
320  * Return: QDF_STATUS_SUCCESS
321  */
322 #if defined(FEATURE_TSO)
323 QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
324 		uint16_t num_elem)
325 {
326 	int i;
327 	struct qdf_tso_seg_elem_t *c_element;
328 	struct qdf_tso_seg_elem_t *temp;
329 
330 	soc->tx_tso_desc[pool_id].num_free = 0;
331 	c_element = qdf_mem_malloc(sizeof(struct qdf_tso_seg_elem_t));
332 
333 	if (!c_element) {
334 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
335 				FL("Alloc Failed %pK pool_id %d"),
336 				soc, pool_id);
337 		return QDF_STATUS_E_NOMEM;
338 	}
339 
340 	soc->tx_tso_desc[pool_id].freelist = c_element;
341 	soc->tx_tso_desc[pool_id].num_free++;
342 	for (i = 0; i < (num_elem - 1); i++) {
343 		c_element->next =
344 			qdf_mem_malloc(sizeof(struct qdf_tso_seg_elem_t));
345 		if (!c_element->next) {
346 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
347 					FL("Alloc Failed %pK pool_id %d"),
348 					soc, pool_id);
349 			goto fail;
350 		}
351 
352 		soc->tx_tso_desc[pool_id].num_free++;
353 		c_element = c_element->next;
354 		c_element->next = NULL;
355 
356 	}
357 	TSO_DEBUG("Number of free descriptors: %u\n",
358 			soc->tx_tso_desc[pool_id].num_free);
359 	soc->tx_tso_desc[pool_id].pool_size = num_elem;
360 	qdf_spinlock_create(&soc->tx_tso_desc[pool_id].lock);
361 
362 	return QDF_STATUS_SUCCESS;
363 
364 fail:
365 	c_element = soc->tx_tso_desc[pool_id].freelist;
366 	while (c_element) {
367 		temp = c_element->next;
368 		qdf_mem_free(c_element);
369 		c_element = temp;
370 	}
371 
372 	return QDF_STATUS_E_NOMEM;
373 }
374 
375 /**
376  * dp_tx_tso_desc_pool_free() - free tx tso descriptor pool
377  * @soc: Handle to DP SoC structure
378  * @pool_id: extension descriptor pool id
379  *
380  * Return: NONE
381  */
382 void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
383 {
384 	int i;
385 	struct qdf_tso_seg_elem_t *c_element;
386 	struct qdf_tso_seg_elem_t *temp;
387 
388 	qdf_spin_lock_bh(&soc->tx_tso_desc[pool_id].lock);
389 	c_element = soc->tx_tso_desc[pool_id].freelist;
390 
391 	if (!c_element) {
392 		qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock);
393 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
394 			FL("Desc Pool Corrupt %d"), pool_id);
395 			return;
396 	}
397 
398 	for (i = 0; i < soc->tx_tso_desc[pool_id].pool_size; i++) {
399 		temp = c_element->next;
400 		qdf_mem_free(c_element);
401 		c_element = temp;
402 		if (!c_element)
403 			break;
404 	}
405 
406 	soc->tx_tso_desc[pool_id].freelist = NULL;
407 	soc->tx_tso_desc[pool_id].num_free = 0;
408 	soc->tx_tso_desc[pool_id].pool_size = 0;
409 	qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock);
410 	qdf_spinlock_destroy(&soc->tx_tso_desc[pool_id].lock);
411 	return;
412 }
413 /**
414  * dp_tx_tso_num_seg_pool_alloc() - Allocate descriptors that tracks the
415  *                              fragments in each tso segment
416  *
417  * @soc: handle to dp soc structure
418  * @pool_id: descriptor pool id
419  * @num_elem: total number of descriptors to be allocated
420  */
421 QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
422 		uint16_t num_elem)
423 {
424 
425 	int i;
426 	struct qdf_tso_num_seg_elem_t *c_element;
427 	struct qdf_tso_num_seg_elem_t *temp;
428 
429 	soc->tx_tso_num_seg[pool_id].num_free = 0;
430 	c_element = qdf_mem_malloc(sizeof(struct qdf_tso_num_seg_elem_t));
431 
432 	if (!c_element) {
433 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
434 				FL("Alloc Failed %pK pool_id %d"),
435 				soc, pool_id);
436 		return QDF_STATUS_E_NOMEM;
437 	}
438 
439 	soc->tx_tso_num_seg[pool_id].freelist = c_element;
440 	soc->tx_tso_num_seg[pool_id].num_free++;
441 	for (i = 0; i < (num_elem - 1); i++) {
442 		c_element->next =
443 			qdf_mem_malloc(sizeof(struct qdf_tso_num_seg_elem_t));
444 
445 		if (!c_element->next) {
446 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
447 					FL("Alloc Failed %pK pool_id %d"),
448 					soc, pool_id);
449 			goto fail;
450 		}
451 		soc->tx_tso_num_seg[pool_id].num_free++;
452 
453 		c_element = c_element->next;
454 		c_element->next = NULL;
455 	}
456 
457 	soc->tx_tso_num_seg[pool_id].num_seg_pool_size = num_elem;
458 	qdf_spinlock_create(&soc->tx_tso_num_seg[pool_id].lock);
459 
460 	return QDF_STATUS_SUCCESS;
461 
462 fail:
463 	c_element = soc->tx_tso_num_seg[pool_id].freelist;
464 	while (c_element) {
465 		temp = c_element->next;
466 		qdf_mem_free(c_element);
467 		c_element = temp;
468 	}
469 	return QDF_STATUS_E_NOMEM;
470 }
471 
472 /**
473  * dp_tx_tso_num_seg_pool_free() - free pool of descriptors that tracks
474  *			      the fragments in tso segment
475  *
476  *
477  * @soc: handle to dp soc structure
478  * @pool_id: descriptor pool_id
479  */
480 void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t pool_id)
481 {
482 	int i;
483 	struct qdf_tso_num_seg_elem_t *c_element;
484 	struct qdf_tso_num_seg_elem_t *temp;
485 
486 	qdf_spin_lock_bh(&soc->tx_tso_num_seg[pool_id].lock);
487 	c_element = soc->tx_tso_num_seg[pool_id].freelist;
488 
489 	if (!c_element) {
490 		qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock);
491 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
492 			FL("Desc Pool Corrupt %d"), pool_id);
493 			return;
494 	}
495 
496 	for (i = 0; i < soc->tx_tso_num_seg[pool_id].num_seg_pool_size; i++) {
497 		temp = c_element->next;
498 		qdf_mem_free(c_element);
499 		c_element = temp;
500 		if (!c_element)
501 			break;
502 	}
503 
504 	soc->tx_tso_num_seg[pool_id].freelist = NULL;
505 	soc->tx_tso_num_seg[pool_id].num_free = 0;
506 	soc->tx_tso_num_seg[pool_id].num_seg_pool_size = 0;
507 	qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock);
508 	qdf_spinlock_destroy(&soc->tx_tso_num_seg[pool_id].lock);
509 	return;
510 }
511 
512 #else
513 QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
514 		uint16_t num_elem)
515 {
516 	return QDF_STATUS_SUCCESS;
517 }
518 
519 void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
520 {
521 	return;
522 }
523 
524 QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
525 		uint16_t num_elem)
526 {
527 	return QDF_STATUS_SUCCESS;
528 }
529 
530 void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t pool_id)
531 {
532 	return;
533 }
534 #endif
535