xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx_desc.c (revision 92d87f51612f6c3b2285266215edee8911647c2f)
1 /*
2  * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "dp_types.h"
20 #include "dp_tx_desc.h"
21 
22 #ifndef DESC_PARTITION
23 #define DP_TX_DESC_SIZE(a) qdf_get_pwr2(a)
24 #define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id)     \
25 do {                                                                 \
26 	uint8_t sig_bit;                                             \
27 	soc->tx_desc[pool_id].offset_filter = num_desc_per_page - 1; \
28 	/* Calculate page divider to find page number */             \
29 	sig_bit = 0;                                                 \
30 	while (num_desc_per_page) {                                  \
31 		sig_bit++;                                           \
32 		num_desc_per_page = num_desc_per_page >> 1;          \
33 	}                                                            \
34 	soc->tx_desc[pool_id].page_divider = (sig_bit - 1);          \
35 } while (0)
36 #else
37 #define DP_TX_DESC_SIZE(a) a
38 #define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id) {}
39 #endif /* DESC_PARTITION */
40 
41 /**
42  * dp_tx_desc_pool_counter_initialize() - Initialize counters
43  * @tx_desc_pool Handle to DP tx_desc_pool structure
44  * @num_elem Number of descriptor elements per pool
45  *
46  * Return: None
47  */
48 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
49 static void
50 dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool,
51 				  uint16_t num_elem)
52 {
53 }
54 #else
55 static void
56 dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool,
57 				  uint16_t num_elem)
58 {
59 	tx_desc_pool->num_free = num_elem;
60 	tx_desc_pool->num_allocated = 0;
61 }
62 #endif
63 
64 /**
65  * dp_tx_desc_pool_alloc() - Allocate Tx Descriptor pool(s)
66  * @soc Handle to DP SoC structure
67  * @num_pool Number of pools to allocate
68  * @num_elem Number of descriptor elements per pool
69  *
70  * This function allocates memory for SW tx descriptors
71  * (used within host for tx data path).
72  * The number of tx descriptors required will be large
73  * since based on number of clients (1024 clients x 3 radios),
74  * outstanding MSDUs stored in TQM queues and LMAC queues will be significantly
75  * large.
76  *
77  * To avoid allocating a large contiguous memory, it uses multi_page_alloc qdf
78  * function to allocate memory
79  * in multiple pages. It then iterates through the memory allocated across pages
80  * and links each descriptor
81  * to next descriptor, taking care of page boundaries.
82  *
83  * Since WiFi 3.0 HW supports multiple Tx rings, multiple pools are allocated,
84  * one for each ring;
85  * This minimizes lock contention when hard_start_xmit is called
86  * from multiple CPUs.
87  * Alternately, multiple pools can be used for multiple VDEVs for VDEV level
88  * flow control.
89  *
90  * Return: Status code. 0 for success.
91  */
92 QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
93 		uint16_t num_elem)
94 {
95 	uint32_t id, count, page_id, offset, pool_id_32;
96 	uint16_t num_page, num_desc_per_page;
97 	struct dp_tx_desc_s *tx_desc_elem;
98 	uint32_t desc_size;
99 	struct dp_tx_desc_pool_s *tx_desc_pool = &((soc)->tx_desc[(pool_id)]);
100 
101 	desc_size = DP_TX_DESC_SIZE(sizeof(*tx_desc_elem));
102 	tx_desc_pool->elem_size = desc_size;
103 	qdf_mem_multi_pages_alloc(soc->osdev,
104 		&tx_desc_pool->desc_pages, desc_size, num_elem,
105 		0, true);
106 	if (!tx_desc_pool->desc_pages.num_pages) {
107 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
108 			"Multi page alloc fail, tx desc");
109 		goto fail_exit;
110 	}
111 
112 
113 	num_page = tx_desc_pool->desc_pages.num_pages;
114 	num_desc_per_page =
115 		tx_desc_pool->desc_pages.num_element_per_page;
116 	tx_desc_pool->freelist = (struct dp_tx_desc_s *)
117 			*tx_desc_pool->desc_pages.cacheable_pages;
118 	if (qdf_mem_multi_page_link(soc->osdev,
119 		&tx_desc_pool->desc_pages, desc_size, num_elem, true)) {
120 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
121 			"invalid tx desc allocation - overflow num link");
122 		goto free_tx_desc;
123 	}
124 
125 	/* Set unique IDs for each Tx descriptor */
126 	tx_desc_elem = tx_desc_pool->freelist;
127 	count = 0;
128 	pool_id_32 = (uint32_t)pool_id;
129 	while (tx_desc_elem) {
130 		page_id = count / num_desc_per_page;
131 		offset = count % num_desc_per_page;
132 		id = ((pool_id_32 << DP_TX_DESC_ID_POOL_OS) |
133 			(page_id << DP_TX_DESC_ID_PAGE_OS) | offset);
134 
135 		tx_desc_elem->id = id;
136 		tx_desc_elem->pool_id = pool_id;
137 		tx_desc_elem = tx_desc_elem->next;
138 		count++;
139 	}
140 
141 	dp_tx_desc_pool_counter_initialize(tx_desc_pool, num_elem);
142 	TX_DESC_LOCK_CREATE(&tx_desc_pool->lock);
143 	return QDF_STATUS_SUCCESS;
144 
145 free_tx_desc:
146 	qdf_mem_multi_pages_free(soc->osdev,
147 		&tx_desc_pool->desc_pages, 0, true);
148 
149 fail_exit:
150 	return QDF_STATUS_E_FAULT;
151 }
152 
153 /**
154  * dp_tx_desc_pool_free() - Free the memory pool allocated for Tx Descriptors
155  *
156  * @soc Handle to DP SoC structure
157  * @pool_id
158  *
159  * Return:
160  */
161 QDF_STATUS dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
162 {
163 	struct dp_tx_desc_pool_s *tx_desc_pool =
164 				&((soc)->tx_desc[(pool_id)]);
165 
166 	qdf_mem_multi_pages_free(soc->osdev,
167 		&tx_desc_pool->desc_pages, 0, true);
168 	TX_DESC_LOCK_DESTROY(&tx_desc_pool->lock);
169 	TX_DESC_POOL_MEMBER_CLEAN(tx_desc_pool);
170 	return QDF_STATUS_SUCCESS;
171 }
172 
173 /**
174  * dp_tx_ext_desc_pool_alloc() - Allocate tx ext descriptor pool
175  * @soc Handle to DP SoC structure
176  * @pool_id
177  *
178  * Return: NONE
179  */
180 QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
181 	uint16_t num_elem)
182 {
183 	uint16_t num_page;
184 	uint32_t count;
185 	struct dp_tx_ext_desc_elem_s *c_elem, *p_elem;
186 	struct qdf_mem_dma_page_t *page_info;
187 	struct qdf_mem_multi_page_t *pages;
188 	QDF_STATUS status;
189 
190 	/* Coherent tx extension descriptor alloc */
191 	soc->tx_ext_desc[pool_id].elem_size = HAL_TX_EXT_DESC_WITH_META_DATA;
192 	soc->tx_ext_desc[pool_id].elem_count = num_elem;
193 	qdf_mem_multi_pages_alloc(soc->osdev,
194 		&soc->tx_ext_desc[pool_id].desc_pages,
195 		soc->tx_ext_desc[pool_id].elem_size,
196 		soc->tx_ext_desc[pool_id].elem_count,
197 		qdf_get_dma_mem_context((&soc->tx_ext_desc[pool_id]), memctx),
198 		false);
199 	if (!soc->tx_ext_desc[pool_id].desc_pages.num_pages) {
200 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
201 			"ext desc page alloc fail");
202 		status = QDF_STATUS_E_NOMEM;
203 		goto fail_exit;
204 	}
205 
206 	num_page = soc->tx_ext_desc[pool_id].desc_pages.num_pages;
207 	/*
208 	 * Cacheable ext descriptor link alloc
209 	 * This structure also large size already
210 	 * single element is 24bytes, 2K elements are 48Kbytes
211 	 * Have to alloc multi page cacheable memory
212 	 */
213 	soc->tx_ext_desc[pool_id].link_elem_size =
214 		sizeof(struct dp_tx_ext_desc_elem_s);
215 	qdf_mem_multi_pages_alloc(soc->osdev,
216 		&soc->tx_ext_desc[pool_id].desc_link_pages,
217 		soc->tx_ext_desc[pool_id].link_elem_size,
218 		soc->tx_ext_desc[pool_id].elem_count, 0,
219 		true);
220 	if (!soc->tx_ext_desc[pool_id].desc_link_pages.num_pages) {
221 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
222 			"ext link desc page alloc fail");
223 		status = QDF_STATUS_E_NOMEM;
224 		goto free_ext_desc_page;
225 	}
226 
227 	/* link tx descriptors into a freelist */
228 	soc->tx_ext_desc[pool_id].freelist = (struct dp_tx_ext_desc_elem_s *)
229 		*soc->tx_ext_desc[pool_id].desc_link_pages.cacheable_pages;
230 	if (qdf_mem_multi_page_link(soc->osdev,
231 		&soc->tx_ext_desc[pool_id].desc_link_pages,
232 		soc->tx_ext_desc[pool_id].link_elem_size,
233 		soc->tx_ext_desc[pool_id].elem_count, true)) {
234 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
235 			"ext link desc page linking fail");
236 		status = QDF_STATUS_E_FAULT;
237 		goto free_ext_link_desc_page;
238 	}
239 
240 	/* Assign coherent memory pointer into linked free list */
241 	pages = &soc->tx_ext_desc[pool_id].desc_pages;
242 	page_info = soc->tx_ext_desc[pool_id].desc_pages.dma_pages;
243 	c_elem = soc->tx_ext_desc[pool_id].freelist;
244 	p_elem = c_elem;
245 	for (count = 0; count < soc->tx_ext_desc[pool_id].elem_count; count++) {
246 		if (!(count % pages->num_element_per_page)) {
247 			/**
248 			 * First element for new page,
249 			 * should point next page
250 			 */
251 			if (!pages->dma_pages->page_v_addr_start) {
252 				QDF_TRACE(QDF_MODULE_ID_DP,
253 					QDF_TRACE_LEVEL_ERROR,
254 					"link over flow");
255 				status = QDF_STATUS_E_FAULT;
256 				goto free_ext_link_desc_page;
257 			}
258 			c_elem->vaddr = (void *)page_info->page_v_addr_start;
259 			c_elem->paddr = page_info->page_p_addr;
260 			page_info++;
261 		} else {
262 			c_elem->vaddr = (void *)(p_elem->vaddr +
263 				soc->tx_ext_desc[pool_id].elem_size);
264 			c_elem->paddr = (p_elem->paddr +
265 				soc->tx_ext_desc[pool_id].elem_size);
266 		}
267 		p_elem = c_elem;
268 		c_elem = c_elem->next;
269 		if (!c_elem)
270 			break;
271 	}
272 
273 	soc->tx_ext_desc[pool_id].num_free = num_elem;
274 	qdf_spinlock_create(&soc->tx_ext_desc[pool_id].lock);
275 	return QDF_STATUS_SUCCESS;
276 
277 free_ext_link_desc_page:
278 	qdf_mem_multi_pages_free(soc->osdev,
279 		&soc->tx_ext_desc[pool_id].desc_link_pages, 0, true);
280 
281 free_ext_desc_page:
282 	qdf_mem_multi_pages_free(soc->osdev,
283 		&soc->tx_ext_desc[pool_id].desc_pages,
284 		qdf_get_dma_mem_context((&soc->tx_ext_desc[pool_id]), memctx),
285 		false);
286 
287 fail_exit:
288 	return status;
289 
290 }
291 
292 /**
293  * dp_tx_ext_desc_pool_free() - free tx ext descriptor pool
294  * @soc: Handle to DP SoC structure
295  * @pool_id: extension descriptor pool id
296  *
297  * Return: NONE
298  */
299 QDF_STATUS dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
300 {
301 	qdf_mem_multi_pages_free(soc->osdev,
302 		&soc->tx_ext_desc[pool_id].desc_link_pages, 0, true);
303 
304 	qdf_mem_multi_pages_free(soc->osdev,
305 		&soc->tx_ext_desc[pool_id].desc_pages,
306 		qdf_get_dma_mem_context((&soc->tx_ext_desc[pool_id]), memctx),
307 		false);
308 
309 	qdf_spinlock_destroy(&soc->tx_ext_desc[pool_id].lock);
310 	return QDF_STATUS_SUCCESS;
311 }
312 
313 /**
314  * dp_tx_tso_desc_pool_alloc() - allocate tx tso descriptor pool
315  * @soc: Handle to DP SoC structure
316  * @pool_id: tso descriptor pool id
317  * @num_elem: number of element
318  *
319  * Return: QDF_STATUS_SUCCESS
320  */
321 #if defined(FEATURE_TSO)
322 QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
323 		uint16_t num_elem)
324 {
325 	int i;
326 	struct qdf_tso_seg_elem_t *c_element;
327 	struct qdf_tso_seg_elem_t *temp;
328 
329 	soc->tx_tso_desc[pool_id].num_free = 0;
330 	c_element = qdf_mem_malloc(sizeof(struct qdf_tso_seg_elem_t));
331 
332 	if (!c_element) {
333 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
334 				FL("Alloc Failed %pK pool_id %d"),
335 				soc, pool_id);
336 		return QDF_STATUS_E_NOMEM;
337 	}
338 
339 	soc->tx_tso_desc[pool_id].freelist = c_element;
340 	soc->tx_tso_desc[pool_id].num_free++;
341 	for (i = 0; i < (num_elem - 1); i++) {
342 		c_element->next =
343 			qdf_mem_malloc(sizeof(struct qdf_tso_seg_elem_t));
344 		if (!c_element->next) {
345 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
346 					FL("Alloc Failed %pK pool_id %d"),
347 					soc, pool_id);
348 			goto fail;
349 		}
350 
351 		soc->tx_tso_desc[pool_id].num_free++;
352 		c_element = c_element->next;
353 		c_element->next = NULL;
354 
355 	}
356 	TSO_DEBUG("Number of free descriptors: %u\n",
357 			soc->tx_tso_desc[pool_id].num_free);
358 	soc->tx_tso_desc[pool_id].pool_size = num_elem;
359 	qdf_spinlock_create(&soc->tx_tso_desc[pool_id].lock);
360 
361 	return QDF_STATUS_SUCCESS;
362 
363 fail:
364 	c_element = soc->tx_tso_desc[pool_id].freelist;
365 	while (c_element) {
366 		temp = c_element->next;
367 		qdf_mem_free(c_element);
368 		c_element = temp;
369 	}
370 
371 	return QDF_STATUS_E_NOMEM;
372 }
373 
374 /**
375  * dp_tx_tso_desc_pool_free() - free tx tso descriptor pool
376  * @soc: Handle to DP SoC structure
377  * @pool_id: extension descriptor pool id
378  *
379  * Return: NONE
380  */
381 void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
382 {
383 	int i;
384 	struct qdf_tso_seg_elem_t *c_element;
385 	struct qdf_tso_seg_elem_t *temp;
386 
387 	qdf_spin_lock_bh(&soc->tx_tso_desc[pool_id].lock);
388 	c_element = soc->tx_tso_desc[pool_id].freelist;
389 
390 	if (!c_element) {
391 		qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock);
392 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
393 			FL("Desc Pool Corrupt %d"), pool_id);
394 			return;
395 	}
396 
397 	for (i = 0; i < soc->tx_tso_desc[pool_id].pool_size; i++) {
398 		temp = c_element->next;
399 		qdf_mem_free(c_element);
400 		c_element = temp;
401 		if (!c_element)
402 			break;
403 	}
404 
405 	soc->tx_tso_desc[pool_id].freelist = NULL;
406 	soc->tx_tso_desc[pool_id].num_free = 0;
407 	soc->tx_tso_desc[pool_id].pool_size = 0;
408 	qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock);
409 	qdf_spinlock_destroy(&soc->tx_tso_desc[pool_id].lock);
410 	return;
411 }
412 /**
413  * dp_tx_tso_num_seg_pool_alloc() - Allocate descriptors that tracks the
414  *                              fragments in each tso segment
415  *
416  * @soc: handle to dp soc structure
417  * @pool_id: descriptor pool id
418  * @num_elem: total number of descriptors to be allocated
419  */
420 QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
421 		uint16_t num_elem)
422 {
423 
424 	int i;
425 	struct qdf_tso_num_seg_elem_t *c_element;
426 	struct qdf_tso_num_seg_elem_t *temp;
427 
428 	soc->tx_tso_num_seg[pool_id].num_free = 0;
429 	c_element = qdf_mem_malloc(sizeof(struct qdf_tso_num_seg_elem_t));
430 
431 	if (!c_element) {
432 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
433 				FL("Alloc Failed %pK pool_id %d"),
434 				soc, pool_id);
435 		return QDF_STATUS_E_NOMEM;
436 	}
437 
438 	soc->tx_tso_num_seg[pool_id].freelist = c_element;
439 	soc->tx_tso_num_seg[pool_id].num_free++;
440 	for (i = 0; i < (num_elem - 1); i++) {
441 		c_element->next =
442 			qdf_mem_malloc(sizeof(struct qdf_tso_num_seg_elem_t));
443 
444 		if (!c_element->next) {
445 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
446 					FL("Alloc Failed %pK pool_id %d"),
447 					soc, pool_id);
448 			goto fail;
449 		}
450 		soc->tx_tso_num_seg[pool_id].num_free++;
451 
452 		c_element = c_element->next;
453 		c_element->next = NULL;
454 	}
455 
456 	soc->tx_tso_num_seg[pool_id].num_seg_pool_size = num_elem;
457 	qdf_spinlock_create(&soc->tx_tso_num_seg[pool_id].lock);
458 
459 	return QDF_STATUS_SUCCESS;
460 
461 fail:
462 	c_element = soc->tx_tso_num_seg[pool_id].freelist;
463 	while (c_element) {
464 		temp = c_element->next;
465 		qdf_mem_free(c_element);
466 		c_element = temp;
467 	}
468 	return QDF_STATUS_E_NOMEM;
469 }
470 
471 /**
472  * dp_tx_tso_num_seg_pool_free() - free pool of descriptors that tracks
473  *			      the fragments in tso segment
474  *
475  *
476  * @soc: handle to dp soc structure
477  * @pool_id: descriptor pool_id
478  */
479 void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t pool_id)
480 {
481 	int i;
482 	struct qdf_tso_num_seg_elem_t *c_element;
483 	struct qdf_tso_num_seg_elem_t *temp;
484 
485 	qdf_spin_lock_bh(&soc->tx_tso_num_seg[pool_id].lock);
486 	c_element = soc->tx_tso_num_seg[pool_id].freelist;
487 
488 	if (!c_element) {
489 		qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock);
490 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
491 			FL("Desc Pool Corrupt %d"), pool_id);
492 			return;
493 	}
494 
495 	for (i = 0; i < soc->tx_tso_num_seg[pool_id].num_seg_pool_size; i++) {
496 		temp = c_element->next;
497 		qdf_mem_free(c_element);
498 		c_element = temp;
499 		if (!c_element)
500 			break;
501 	}
502 
503 	soc->tx_tso_num_seg[pool_id].freelist = NULL;
504 	soc->tx_tso_num_seg[pool_id].num_free = 0;
505 	soc->tx_tso_num_seg[pool_id].num_seg_pool_size = 0;
506 	qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock);
507 	qdf_spinlock_destroy(&soc->tx_tso_num_seg[pool_id].lock);
508 	return;
509 }
510 
511 #else
512 QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
513 		uint16_t num_elem)
514 {
515 	return QDF_STATUS_SUCCESS;
516 }
517 
518 void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
519 {
520 	return;
521 }
522 
523 QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
524 		uint16_t num_elem)
525 {
526 	return QDF_STATUS_SUCCESS;
527 }
528 
529 void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t pool_id)
530 {
531 	return;
532 }
533 #endif
534