xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx_desc.c (revision d78dedc9dd8c4ee677ac1649d1d42f2a7c3cc1b7)
1 /*
2  * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "dp_types.h"
20 #include "dp_tx_desc.h"
21 
22 #ifndef DESC_PARTITION
23 #define DP_TX_DESC_SIZE(a) qdf_get_pwr2(a)
24 #define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id)     \
25 do {                                                                 \
26 	uint8_t sig_bit;                                             \
27 	soc->tx_desc[pool_id].offset_filter = num_desc_per_page - 1; \
28 	/* Calculate page divider to find page number */             \
29 	sig_bit = 0;                                                 \
30 	while (num_desc_per_page) {                                  \
31 		sig_bit++;                                           \
32 		num_desc_per_page = num_desc_per_page >> 1;          \
33 	}                                                            \
34 	soc->tx_desc[pool_id].page_divider = (sig_bit - 1);          \
35 } while (0)
36 #else
37 #define DP_TX_DESC_SIZE(a) a
38 #define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id) {}
39 #endif /* DESC_PARTITION */
40 
41 /**
42  * dp_tx_desc_pool_counter_initialize() - Initialize counters
43  * @tx_desc_pool Handle to DP tx_desc_pool structure
44  * @num_elem Number of descriptor elements per pool
45  *
46  * Return: None
47  */
48 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
49 static void
50 dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool,
51 				  uint16_t num_elem)
52 {
53 }
54 #else
55 static void
56 dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool,
57 				  uint16_t num_elem)
58 {
59 	tx_desc_pool->num_free = num_elem;
60 	tx_desc_pool->num_allocated = 0;
61 }
62 #endif
63 
64 /**
65  * dp_tx_desc_pool_alloc() - Allocate Tx Descriptor pool(s)
66  * @soc Handle to DP SoC structure
67  * @num_pool Number of pools to allocate
68  * @num_elem Number of descriptor elements per pool
69  *
70  * This function allocates memory for SW tx descriptors
71  * (used within host for tx data path).
72  * The number of tx descriptors required will be large
73  * since based on number of clients (1024 clients x 3 radios),
74  * outstanding MSDUs stored in TQM queues and LMAC queues will be significantly
75  * large.
76  *
77  * To avoid allocating a large contiguous memory, it uses multi_page_alloc qdf
78  * function to allocate memory
79  * in multiple pages. It then iterates through the memory allocated across pages
80  * and links each descriptor
81  * to next descriptor, taking care of page boundaries.
82  *
83  * Since WiFi 3.0 HW supports multiple Tx rings, multiple pools are allocated,
84  * one for each ring;
85  * This minimizes lock contention when hard_start_xmit is called
86  * from multiple CPUs.
87  * Alternately, multiple pools can be used for multiple VDEVs for VDEV level
88  * flow control.
89  *
90  * Return: Status code. 0 for success.
91  */
92 QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
93 		uint16_t num_elem)
94 {
95 	uint32_t id, count, page_id, offset, pool_id_32;
96 	uint16_t num_page, num_desc_per_page;
97 	struct dp_tx_desc_s *tx_desc_elem;
98 	uint32_t desc_size;
99 	struct dp_tx_desc_pool_s *tx_desc_pool = &((soc)->tx_desc[(pool_id)]);
100 
101 	desc_size = DP_TX_DESC_SIZE(sizeof(*tx_desc_elem));
102 	tx_desc_pool->elem_size = desc_size;
103 	qdf_mem_multi_pages_alloc(soc->osdev,
104 		&tx_desc_pool->desc_pages, desc_size, num_elem,
105 		0, true);
106 	if (!tx_desc_pool->desc_pages.num_pages) {
107 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
108 			"Multi page alloc fail, tx desc");
109 		goto fail_exit;
110 	}
111 
112 
113 	num_page = tx_desc_pool->desc_pages.num_pages;
114 	num_desc_per_page =
115 		tx_desc_pool->desc_pages.num_element_per_page;
116 	tx_desc_pool->freelist = (struct dp_tx_desc_s *)
117 			*tx_desc_pool->desc_pages.cacheable_pages;
118 	if (qdf_mem_multi_page_link(soc->osdev,
119 		&tx_desc_pool->desc_pages, desc_size, num_elem, true)) {
120 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
121 			"invalid tx desc allocation - overflow num link");
122 		goto free_tx_desc;
123 	}
124 
125 	/* Set unique IDs for each Tx descriptor */
126 	tx_desc_elem = tx_desc_pool->freelist;
127 	count = 0;
128 	pool_id_32 = (uint32_t)pool_id;
129 	while (tx_desc_elem) {
130 		page_id = count / num_desc_per_page;
131 		offset = count % num_desc_per_page;
132 		id = ((pool_id_32 << DP_TX_DESC_ID_POOL_OS) |
133 			(page_id << DP_TX_DESC_ID_PAGE_OS) | offset);
134 
135 		tx_desc_elem->id = id;
136 		tx_desc_elem->pool_id = pool_id;
137 		tx_desc_elem = tx_desc_elem->next;
138 		count++;
139 	}
140 
141 	dp_tx_desc_pool_counter_initialize(tx_desc_pool, num_elem);
142 	TX_DESC_LOCK_CREATE(&tx_desc_pool->lock);
143 	return QDF_STATUS_SUCCESS;
144 
145 free_tx_desc:
146 	qdf_mem_multi_pages_free(soc->osdev,
147 		&tx_desc_pool->desc_pages, 0, true);
148 
149 fail_exit:
150 	return QDF_STATUS_E_FAULT;
151 }
152 
153 /**
154  * dp_tx_desc_pool_free() - Free the memory pool allocated for Tx Descriptors
155  *
156  * @soc Handle to DP SoC structure
157  * @pool_id
158  *
159  * Return:
160  */
161 QDF_STATUS dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
162 {
163 	struct dp_tx_desc_pool_s *tx_desc_pool =
164 				&((soc)->tx_desc[(pool_id)]);
165 
166 	qdf_mem_multi_pages_free(soc->osdev,
167 		&tx_desc_pool->desc_pages, 0, true);
168 	TX_DESC_LOCK_DESTROY(&tx_desc_pool->lock);
169 	return QDF_STATUS_SUCCESS;
170 }
171 
172 /**
173  * dp_tx_ext_desc_pool_alloc() - Allocate tx ext descriptor pool
174  * @soc Handle to DP SoC structure
175  * @pool_id
176  *
177  * Return: NONE
178  */
179 QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
180 	uint16_t num_elem)
181 {
182 	uint16_t num_page;
183 	uint32_t count;
184 	struct dp_tx_ext_desc_elem_s *c_elem, *p_elem;
185 	struct qdf_mem_dma_page_t *page_info;
186 	struct qdf_mem_multi_page_t *pages;
187 	QDF_STATUS status;
188 
189 	/* Coherent tx extension descriptor alloc */
190 	soc->tx_ext_desc[pool_id].elem_size = HAL_TX_EXT_DESC_WITH_META_DATA;
191 	soc->tx_ext_desc[pool_id].elem_count = num_elem;
192 	qdf_mem_multi_pages_alloc(soc->osdev,
193 		&soc->tx_ext_desc[pool_id].desc_pages,
194 		soc->tx_ext_desc[pool_id].elem_size,
195 		soc->tx_ext_desc[pool_id].elem_count,
196 		qdf_get_dma_mem_context((&soc->tx_ext_desc[pool_id]), memctx),
197 		false);
198 	if (!soc->tx_ext_desc[pool_id].desc_pages.num_pages) {
199 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
200 			"ext desc page alloc fail");
201 		status = QDF_STATUS_E_NOMEM;
202 		goto fail_exit;
203 	}
204 
205 	num_page = soc->tx_ext_desc[pool_id].desc_pages.num_pages;
206 	/*
207 	 * Cacheable ext descriptor link alloc
208 	 * This structure also large size already
209 	 * single element is 24bytes, 2K elements are 48Kbytes
210 	 * Have to alloc multi page cacheable memory
211 	 */
212 	soc->tx_ext_desc[pool_id].link_elem_size =
213 		sizeof(struct dp_tx_ext_desc_elem_s);
214 	qdf_mem_multi_pages_alloc(soc->osdev,
215 		&soc->tx_ext_desc[pool_id].desc_link_pages,
216 		soc->tx_ext_desc[pool_id].link_elem_size,
217 		soc->tx_ext_desc[pool_id].elem_count, 0,
218 		true);
219 	if (!soc->tx_ext_desc[pool_id].desc_link_pages.num_pages) {
220 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
221 			"ext link desc page alloc fail");
222 		status = QDF_STATUS_E_NOMEM;
223 		goto free_ext_desc_page;
224 	}
225 
226 	/* link tx descriptors into a freelist */
227 	soc->tx_ext_desc[pool_id].freelist = (struct dp_tx_ext_desc_elem_s *)
228 		*soc->tx_ext_desc[pool_id].desc_link_pages.cacheable_pages;
229 	if (qdf_mem_multi_page_link(soc->osdev,
230 		&soc->tx_ext_desc[pool_id].desc_link_pages,
231 		soc->tx_ext_desc[pool_id].link_elem_size,
232 		soc->tx_ext_desc[pool_id].elem_count, true)) {
233 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
234 			"ext link desc page linking fail");
235 		status = QDF_STATUS_E_FAULT;
236 		goto free_ext_link_desc_page;
237 	}
238 
239 	/* Assign coherent memory pointer into linked free list */
240 	pages = &soc->tx_ext_desc[pool_id].desc_pages;
241 	page_info = soc->tx_ext_desc[pool_id].desc_pages.dma_pages;
242 	c_elem = soc->tx_ext_desc[pool_id].freelist;
243 	p_elem = c_elem;
244 	for (count = 0; count < soc->tx_ext_desc[pool_id].elem_count; count++) {
245 		if (!(count % pages->num_element_per_page)) {
246 			/**
247 			 * First element for new page,
248 			 * should point next page
249 			 */
250 			if (!pages->dma_pages->page_v_addr_start) {
251 				QDF_TRACE(QDF_MODULE_ID_DP,
252 					QDF_TRACE_LEVEL_ERROR,
253 					"link over flow");
254 				status = QDF_STATUS_E_FAULT;
255 				goto free_ext_link_desc_page;
256 			}
257 			c_elem->vaddr = (void *)page_info->page_v_addr_start;
258 			c_elem->paddr = page_info->page_p_addr;
259 			page_info++;
260 		} else {
261 			c_elem->vaddr = (void *)(p_elem->vaddr +
262 				soc->tx_ext_desc[pool_id].elem_size);
263 			c_elem->paddr = (p_elem->paddr +
264 				soc->tx_ext_desc[pool_id].elem_size);
265 		}
266 		p_elem = c_elem;
267 		c_elem = c_elem->next;
268 		if (!c_elem)
269 			break;
270 	}
271 
272 	soc->tx_ext_desc[pool_id].num_free = num_elem;
273 	qdf_spinlock_create(&soc->tx_ext_desc[pool_id].lock);
274 	return QDF_STATUS_SUCCESS;
275 
276 free_ext_link_desc_page:
277 	qdf_mem_multi_pages_free(soc->osdev,
278 		&soc->tx_ext_desc[pool_id].desc_link_pages, 0, true);
279 
280 free_ext_desc_page:
281 	qdf_mem_multi_pages_free(soc->osdev,
282 		&soc->tx_ext_desc[pool_id].desc_pages,
283 		qdf_get_dma_mem_context((&soc->tx_ext_desc[pool_id]), memctx),
284 		false);
285 
286 fail_exit:
287 	return status;
288 
289 }
290 
291 /**
292  * dp_tx_ext_desc_pool_free() - free tx ext descriptor pool
293  * @soc: Handle to DP SoC structure
294  * @pool_id: extension descriptor pool id
295  *
296  * Return: NONE
297  */
298 QDF_STATUS dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
299 {
300 	qdf_mem_multi_pages_free(soc->osdev,
301 		&soc->tx_ext_desc[pool_id].desc_link_pages, 0, true);
302 
303 	qdf_mem_multi_pages_free(soc->osdev,
304 		&soc->tx_ext_desc[pool_id].desc_pages,
305 		qdf_get_dma_mem_context((&soc->tx_ext_desc[pool_id]), memctx),
306 		false);
307 
308 	qdf_spinlock_destroy(&soc->tx_ext_desc[pool_id].lock);
309 	return QDF_STATUS_SUCCESS;
310 }
311 
312 /**
313  * dp_tx_tso_desc_pool_alloc() - allocate tx tso descriptor pool
314  * @soc: Handle to DP SoC structure
315  * @pool_id: tso descriptor pool id
316  * @num_elem: number of element
317  *
318  * Return: QDF_STATUS_SUCCESS
319  */
320 #if defined(FEATURE_TSO)
321 QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
322 		uint16_t num_elem)
323 {
324 	int i;
325 	struct qdf_tso_seg_elem_t *c_element;
326 	struct qdf_tso_seg_elem_t *temp;
327 
328 	soc->tx_tso_desc[pool_id].num_free = 0;
329 	c_element = qdf_mem_malloc(sizeof(struct qdf_tso_seg_elem_t));
330 
331 	if (!c_element) {
332 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
333 				FL("Alloc Failed %pK pool_id %d"),
334 				soc, pool_id);
335 		return QDF_STATUS_E_NOMEM;
336 	}
337 
338 	soc->tx_tso_desc[pool_id].freelist = c_element;
339 	soc->tx_tso_desc[pool_id].num_free++;
340 	for (i = 0; i < (num_elem - 1); i++) {
341 		c_element->next =
342 			qdf_mem_malloc(sizeof(struct qdf_tso_seg_elem_t));
343 		if (!c_element->next) {
344 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
345 					FL("Alloc Failed %pK pool_id %d"),
346 					soc, pool_id);
347 			goto fail;
348 		}
349 
350 		soc->tx_tso_desc[pool_id].num_free++;
351 		c_element = c_element->next;
352 		c_element->next = NULL;
353 
354 	}
355 	TSO_DEBUG("Number of free descriptors: %u\n",
356 			soc->tx_tso_desc[pool_id].num_free);
357 	soc->tx_tso_desc[pool_id].pool_size = num_elem;
358 	qdf_spinlock_create(&soc->tx_tso_desc[pool_id].lock);
359 
360 	return QDF_STATUS_SUCCESS;
361 
362 fail:
363 	c_element = soc->tx_tso_desc[pool_id].freelist;
364 	while (c_element) {
365 		temp = c_element->next;
366 		qdf_mem_free(c_element);
367 		c_element = temp;
368 	}
369 
370 	return QDF_STATUS_E_NOMEM;
371 }
372 
373 /**
374  * dp_tx_tso_desc_pool_free() - free tx tso descriptor pool
375  * @soc: Handle to DP SoC structure
376  * @pool_id: extension descriptor pool id
377  *
378  * Return: NONE
379  */
380 void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
381 {
382 	int i;
383 	struct qdf_tso_seg_elem_t *c_element;
384 	struct qdf_tso_seg_elem_t *temp;
385 
386 	qdf_spin_lock_bh(&soc->tx_tso_desc[pool_id].lock);
387 	c_element = soc->tx_tso_desc[pool_id].freelist;
388 
389 	if (!c_element) {
390 		qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock);
391 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
392 			FL("Desc Pool Corrupt %d"), pool_id);
393 			return;
394 	}
395 
396 	for (i = 0; i < soc->tx_tso_desc[pool_id].pool_size; i++) {
397 		temp = c_element->next;
398 		qdf_mem_free(c_element);
399 		c_element = temp;
400 		if (!c_element)
401 			break;
402 	}
403 
404 	soc->tx_tso_desc[pool_id].freelist = NULL;
405 	soc->tx_tso_desc[pool_id].num_free = 0;
406 	soc->tx_tso_desc[pool_id].pool_size = 0;
407 	qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock);
408 	qdf_spinlock_destroy(&soc->tx_tso_desc[pool_id].lock);
409 	return;
410 }
411 /**
412  * dp_tx_tso_num_seg_pool_alloc() - Allocate descriptors that tracks the
413  *                              fragments in each tso segment
414  *
415  * @soc: handle to dp soc structure
416  * @pool_id: descriptor pool id
417  * @num_elem: total number of descriptors to be allocated
418  */
419 QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
420 		uint16_t num_elem)
421 {
422 
423 	int i;
424 	struct qdf_tso_num_seg_elem_t *c_element;
425 	struct qdf_tso_num_seg_elem_t *temp;
426 
427 	soc->tx_tso_num_seg[pool_id].num_free = 0;
428 	c_element = qdf_mem_malloc(sizeof(struct qdf_tso_num_seg_elem_t));
429 
430 	if (!c_element) {
431 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
432 				FL("Alloc Failed %pK pool_id %d"),
433 				soc, pool_id);
434 		return QDF_STATUS_E_NOMEM;
435 	}
436 
437 	soc->tx_tso_num_seg[pool_id].freelist = c_element;
438 	soc->tx_tso_num_seg[pool_id].num_free++;
439 	for (i = 0; i < (num_elem - 1); i++) {
440 		c_element->next =
441 			qdf_mem_malloc(sizeof(struct qdf_tso_num_seg_elem_t));
442 
443 		if (!c_element->next) {
444 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
445 					FL("Alloc Failed %pK pool_id %d"),
446 					soc, pool_id);
447 			goto fail;
448 		}
449 		soc->tx_tso_num_seg[pool_id].num_free++;
450 
451 		c_element = c_element->next;
452 		c_element->next = NULL;
453 	}
454 
455 	soc->tx_tso_num_seg[pool_id].num_seg_pool_size = num_elem;
456 	qdf_spinlock_create(&soc->tx_tso_num_seg[pool_id].lock);
457 
458 	return QDF_STATUS_SUCCESS;
459 
460 fail:
461 	c_element = soc->tx_tso_num_seg[pool_id].freelist;
462 	while (c_element) {
463 		temp = c_element->next;
464 		qdf_mem_free(c_element);
465 		c_element = temp;
466 	}
467 	return QDF_STATUS_E_NOMEM;
468 }
469 
470 /**
471  * dp_tx_tso_num_seg_pool_free() - free pool of descriptors that tracks
472  *			      the fragments in tso segment
473  *
474  *
475  * @soc: handle to dp soc structure
476  * @pool_id: descriptor pool_id
477  */
478 void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t pool_id)
479 {
480 	int i;
481 	struct qdf_tso_num_seg_elem_t *c_element;
482 	struct qdf_tso_num_seg_elem_t *temp;
483 
484 	qdf_spin_lock_bh(&soc->tx_tso_num_seg[pool_id].lock);
485 	c_element = soc->tx_tso_num_seg[pool_id].freelist;
486 
487 	if (!c_element) {
488 		qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock);
489 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
490 			FL("Desc Pool Corrupt %d"), pool_id);
491 			return;
492 	}
493 
494 	for (i = 0; i < soc->tx_tso_num_seg[pool_id].num_seg_pool_size; i++) {
495 		temp = c_element->next;
496 		qdf_mem_free(c_element);
497 		c_element = temp;
498 		if (!c_element)
499 			break;
500 	}
501 
502 	soc->tx_tso_num_seg[pool_id].freelist = NULL;
503 	soc->tx_tso_num_seg[pool_id].num_free = 0;
504 	soc->tx_tso_num_seg[pool_id].num_seg_pool_size = 0;
505 	qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock);
506 	qdf_spinlock_destroy(&soc->tx_tso_num_seg[pool_id].lock);
507 	return;
508 }
509 
510 #else
511 QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
512 		uint16_t num_elem)
513 {
514 	return QDF_STATUS_SUCCESS;
515 }
516 
517 void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
518 {
519 	return;
520 }
521 
522 QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
523 		uint16_t num_elem)
524 {
525 	return QDF_STATUS_SUCCESS;
526 }
527 
528 void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t pool_id)
529 {
530 	return;
531 }
532 #endif
533