xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx_desc.c (revision 302a1d9701784af5f4797b1a9fe07ae820b51907)
1 /*
2  * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "hal_hw_headers.h"
20 #include "dp_types.h"
21 #include "dp_tx_desc.h"
22 
23 #ifndef DESC_PARTITION
24 #define DP_TX_DESC_SIZE(a) qdf_get_pwr2(a)
25 #define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id)     \
26 do {                                                                 \
27 	uint8_t sig_bit;                                             \
28 	soc->tx_desc[pool_id].offset_filter = num_desc_per_page - 1; \
29 	/* Calculate page divider to find page number */             \
30 	sig_bit = 0;                                                 \
31 	while (num_desc_per_page) {                                  \
32 		sig_bit++;                                           \
33 		num_desc_per_page = num_desc_per_page >> 1;          \
34 	}                                                            \
35 	soc->tx_desc[pool_id].page_divider = (sig_bit - 1);          \
36 } while (0)
37 #else
38 #define DP_TX_DESC_SIZE(a) a
39 #define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id) {}
40 #endif /* DESC_PARTITION */
41 
42 /**
43  * dp_tx_desc_pool_counter_initialize() - Initialize counters
44  * @tx_desc_pool Handle to DP tx_desc_pool structure
45  * @num_elem Number of descriptor elements per pool
46  *
47  * Return: None
48  */
49 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
50 static void
51 dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool,
52 				  uint16_t num_elem)
53 {
54 }
55 #else
56 static void
57 dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool,
58 				  uint16_t num_elem)
59 {
60 	tx_desc_pool->num_free = num_elem;
61 	tx_desc_pool->num_allocated = 0;
62 }
63 #endif
64 
65 /**
66  * dp_tx_desc_pool_alloc() - Allocate Tx Descriptor pool(s)
67  * @soc Handle to DP SoC structure
68  * @num_pool Number of pools to allocate
69  * @num_elem Number of descriptor elements per pool
70  *
71  * This function allocates memory for SW tx descriptors
72  * (used within host for tx data path).
73  * The number of tx descriptors required will be large
74  * since based on number of clients (1024 clients x 3 radios),
75  * outstanding MSDUs stored in TQM queues and LMAC queues will be significantly
76  * large.
77  *
78  * To avoid allocating a large contiguous memory, it uses multi_page_alloc qdf
79  * function to allocate memory
80  * in multiple pages. It then iterates through the memory allocated across pages
81  * and links each descriptor
82  * to next descriptor, taking care of page boundaries.
83  *
84  * Since WiFi 3.0 HW supports multiple Tx rings, multiple pools are allocated,
85  * one for each ring;
86  * This minimizes lock contention when hard_start_xmit is called
87  * from multiple CPUs.
88  * Alternately, multiple pools can be used for multiple VDEVs for VDEV level
89  * flow control.
90  *
91  * Return: Status code. 0 for success.
92  */
93 QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
94 		uint16_t num_elem)
95 {
96 	uint32_t id, count, page_id, offset, pool_id_32;
97 	uint16_t num_page, num_desc_per_page;
98 	struct dp_tx_desc_s *tx_desc_elem;
99 	uint32_t desc_size;
100 	struct dp_tx_desc_pool_s *tx_desc_pool = &((soc)->tx_desc[(pool_id)]);
101 
102 	desc_size = DP_TX_DESC_SIZE(sizeof(*tx_desc_elem));
103 	tx_desc_pool->elem_size = desc_size;
104 	qdf_mem_multi_pages_alloc(soc->osdev,
105 		&tx_desc_pool->desc_pages, desc_size, num_elem,
106 		0, true);
107 	if (!tx_desc_pool->desc_pages.num_pages) {
108 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
109 			"Multi page alloc fail, tx desc");
110 		goto fail_exit;
111 	}
112 
113 
114 	num_page = tx_desc_pool->desc_pages.num_pages;
115 	num_desc_per_page =
116 		tx_desc_pool->desc_pages.num_element_per_page;
117 	tx_desc_pool->freelist = (struct dp_tx_desc_s *)
118 			*tx_desc_pool->desc_pages.cacheable_pages;
119 	if (qdf_mem_multi_page_link(soc->osdev,
120 		&tx_desc_pool->desc_pages, desc_size, num_elem, true)) {
121 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
122 			"invalid tx desc allocation - overflow num link");
123 		goto free_tx_desc;
124 	}
125 
126 	/* Set unique IDs for each Tx descriptor */
127 	tx_desc_elem = tx_desc_pool->freelist;
128 	count = 0;
129 	pool_id_32 = (uint32_t)pool_id;
130 	while (tx_desc_elem) {
131 		page_id = count / num_desc_per_page;
132 		offset = count % num_desc_per_page;
133 		id = ((pool_id_32 << DP_TX_DESC_ID_POOL_OS) |
134 			(page_id << DP_TX_DESC_ID_PAGE_OS) | offset);
135 
136 		tx_desc_elem->id = id;
137 		tx_desc_elem->pool_id = pool_id;
138 		tx_desc_elem = tx_desc_elem->next;
139 		count++;
140 	}
141 
142 	dp_tx_desc_pool_counter_initialize(tx_desc_pool, num_elem);
143 	TX_DESC_LOCK_CREATE(&tx_desc_pool->lock);
144 	return QDF_STATUS_SUCCESS;
145 
146 free_tx_desc:
147 	qdf_mem_multi_pages_free(soc->osdev,
148 		&tx_desc_pool->desc_pages, 0, true);
149 
150 fail_exit:
151 	return QDF_STATUS_E_FAULT;
152 }
153 
154 /**
155  * dp_tx_desc_pool_free() - Free the memory pool allocated for Tx Descriptors
156  *
157  * @soc Handle to DP SoC structure
158  * @pool_id
159  *
160  * Return:
161  */
162 QDF_STATUS dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
163 {
164 	struct dp_tx_desc_pool_s *tx_desc_pool =
165 				&((soc)->tx_desc[(pool_id)]);
166 
167 	qdf_mem_multi_pages_free(soc->osdev,
168 		&tx_desc_pool->desc_pages, 0, true);
169 	TX_DESC_LOCK_DESTROY(&tx_desc_pool->lock);
170 	TX_DESC_POOL_MEMBER_CLEAN(tx_desc_pool);
171 	return QDF_STATUS_SUCCESS;
172 }
173 
174 /**
175  * dp_tx_ext_desc_pool_alloc() - Allocate tx ext descriptor pool
176  * @soc Handle to DP SoC structure
177  * @pool_id
178  *
179  * Return: NONE
180  */
181 QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
182 	uint16_t num_elem)
183 {
184 	uint16_t num_page;
185 	uint32_t count;
186 	struct dp_tx_ext_desc_elem_s *c_elem, *p_elem;
187 	struct qdf_mem_dma_page_t *page_info;
188 	struct qdf_mem_multi_page_t *pages;
189 	QDF_STATUS status;
190 
191 	/* Coherent tx extension descriptor alloc */
192 	soc->tx_ext_desc[pool_id].elem_size = HAL_TX_EXT_DESC_WITH_META_DATA;
193 	soc->tx_ext_desc[pool_id].elem_count = num_elem;
194 	qdf_mem_multi_pages_alloc(soc->osdev,
195 		&soc->tx_ext_desc[pool_id].desc_pages,
196 		soc->tx_ext_desc[pool_id].elem_size,
197 		soc->tx_ext_desc[pool_id].elem_count,
198 		qdf_get_dma_mem_context((&soc->tx_ext_desc[pool_id]), memctx),
199 		false);
200 	if (!soc->tx_ext_desc[pool_id].desc_pages.num_pages) {
201 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
202 			"ext desc page alloc fail");
203 		status = QDF_STATUS_E_NOMEM;
204 		goto fail_exit;
205 	}
206 
207 	num_page = soc->tx_ext_desc[pool_id].desc_pages.num_pages;
208 	/*
209 	 * Cacheable ext descriptor link alloc
210 	 * This structure also large size already
211 	 * single element is 24bytes, 2K elements are 48Kbytes
212 	 * Have to alloc multi page cacheable memory
213 	 */
214 	soc->tx_ext_desc[pool_id].link_elem_size =
215 		sizeof(struct dp_tx_ext_desc_elem_s);
216 	qdf_mem_multi_pages_alloc(soc->osdev,
217 		&soc->tx_ext_desc[pool_id].desc_link_pages,
218 		soc->tx_ext_desc[pool_id].link_elem_size,
219 		soc->tx_ext_desc[pool_id].elem_count, 0,
220 		true);
221 	if (!soc->tx_ext_desc[pool_id].desc_link_pages.num_pages) {
222 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
223 			"ext link desc page alloc fail");
224 		status = QDF_STATUS_E_NOMEM;
225 		goto free_ext_desc_page;
226 	}
227 
228 	/* link tx descriptors into a freelist */
229 	soc->tx_ext_desc[pool_id].freelist = (struct dp_tx_ext_desc_elem_s *)
230 		*soc->tx_ext_desc[pool_id].desc_link_pages.cacheable_pages;
231 	if (qdf_mem_multi_page_link(soc->osdev,
232 		&soc->tx_ext_desc[pool_id].desc_link_pages,
233 		soc->tx_ext_desc[pool_id].link_elem_size,
234 		soc->tx_ext_desc[pool_id].elem_count, true)) {
235 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
236 			"ext link desc page linking fail");
237 		status = QDF_STATUS_E_FAULT;
238 		goto free_ext_link_desc_page;
239 	}
240 
241 	/* Assign coherent memory pointer into linked free list */
242 	pages = &soc->tx_ext_desc[pool_id].desc_pages;
243 	page_info = soc->tx_ext_desc[pool_id].desc_pages.dma_pages;
244 	c_elem = soc->tx_ext_desc[pool_id].freelist;
245 	p_elem = c_elem;
246 	for (count = 0; count < soc->tx_ext_desc[pool_id].elem_count; count++) {
247 		if (!(count % pages->num_element_per_page)) {
248 			/**
249 			 * First element for new page,
250 			 * should point next page
251 			 */
252 			if (!pages->dma_pages->page_v_addr_start) {
253 				QDF_TRACE(QDF_MODULE_ID_DP,
254 					QDF_TRACE_LEVEL_ERROR,
255 					"link over flow");
256 				status = QDF_STATUS_E_FAULT;
257 				goto free_ext_link_desc_page;
258 			}
259 			c_elem->vaddr = (void *)page_info->page_v_addr_start;
260 			c_elem->paddr = page_info->page_p_addr;
261 			page_info++;
262 		} else {
263 			c_elem->vaddr = (void *)(p_elem->vaddr +
264 				soc->tx_ext_desc[pool_id].elem_size);
265 			c_elem->paddr = (p_elem->paddr +
266 				soc->tx_ext_desc[pool_id].elem_size);
267 		}
268 		p_elem = c_elem;
269 		c_elem = c_elem->next;
270 		if (!c_elem)
271 			break;
272 	}
273 
274 	soc->tx_ext_desc[pool_id].num_free = num_elem;
275 	qdf_spinlock_create(&soc->tx_ext_desc[pool_id].lock);
276 	return QDF_STATUS_SUCCESS;
277 
278 free_ext_link_desc_page:
279 	qdf_mem_multi_pages_free(soc->osdev,
280 		&soc->tx_ext_desc[pool_id].desc_link_pages, 0, true);
281 
282 free_ext_desc_page:
283 	qdf_mem_multi_pages_free(soc->osdev,
284 		&soc->tx_ext_desc[pool_id].desc_pages,
285 		qdf_get_dma_mem_context((&soc->tx_ext_desc[pool_id]), memctx),
286 		false);
287 
288 fail_exit:
289 	return status;
290 
291 }
292 
293 /**
294  * dp_tx_ext_desc_pool_free() - free tx ext descriptor pool
295  * @soc: Handle to DP SoC structure
296  * @pool_id: extension descriptor pool id
297  *
298  * Return: NONE
299  */
300 QDF_STATUS dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
301 {
302 	qdf_mem_multi_pages_free(soc->osdev,
303 		&soc->tx_ext_desc[pool_id].desc_link_pages, 0, true);
304 
305 	qdf_mem_multi_pages_free(soc->osdev,
306 		&soc->tx_ext_desc[pool_id].desc_pages,
307 		qdf_get_dma_mem_context((&soc->tx_ext_desc[pool_id]), memctx),
308 		false);
309 
310 	qdf_spinlock_destroy(&soc->tx_ext_desc[pool_id].lock);
311 	return QDF_STATUS_SUCCESS;
312 }
313 
314 /**
315  * dp_tx_tso_desc_pool_alloc() - allocate tx tso descriptor pool
316  * @soc: Handle to DP SoC structure
317  * @pool_id: tso descriptor pool id
318  * @num_elem: number of element
319  *
320  * Return: QDF_STATUS_SUCCESS
321  */
322 #if defined(FEATURE_TSO)
323 QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
324 		uint16_t num_elem)
325 {
326 	struct dp_tx_tso_seg_pool_s *tso_desc_pool;
327 	uint32_t desc_size;
328 
329 	tso_desc_pool = &soc->tx_tso_desc[pool_id];
330 	tso_desc_pool->num_free = 0;
331 	desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_seg_elem_t));
332 	qdf_mem_multi_pages_alloc(soc->osdev,
333 				  &tso_desc_pool->desc_pages,
334 				  desc_size,
335 				  num_elem, 0, true);
336 
337 	if (!tso_desc_pool->desc_pages.num_pages) {
338 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
339 				FL("Alloc Failed %pK pool_id %d"),
340 				soc, pool_id);
341 		return QDF_STATUS_E_NOMEM;
342 	}
343 
344 	tso_desc_pool->freelist = (struct qdf_tso_seg_elem_t *)
345 				  *tso_desc_pool->desc_pages.cacheable_pages;
346 	tso_desc_pool->num_free = num_elem;
347 	if (qdf_mem_multi_page_link(soc->osdev,
348 				    &tso_desc_pool->desc_pages,
349 				    desc_size,
350 				    num_elem, true)) {
351 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
352 			  "invalid tso desc allocation - overflow num link");
353 		goto free_tso_desc;
354 	}
355 	TSO_DEBUG("Number of free descriptors: %u\n", tso_desc_pool->num_free);
356 	tso_desc_pool->pool_size = num_elem;
357 	qdf_spinlock_create(&tso_desc_pool->lock);
358 
359 	return QDF_STATUS_SUCCESS;
360 
361 free_tso_desc:
362 	qdf_mem_multi_pages_free(soc->osdev,
363 				 &tso_desc_pool->desc_pages, 0, true);
364 
365 	return QDF_STATUS_E_FAULT;
366 }
367 
368 /**
369  * dp_tx_tso_desc_pool_free() - free tx tso descriptor pool
370  * @soc: Handle to DP SoC structure
371  * @pool_id: extension descriptor pool id
372  *
373  * Return: NONE
374  */
375 void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
376 {
377 	struct dp_tx_tso_seg_pool_s *tso_desc_pool;
378 
379 	tso_desc_pool = &soc->tx_tso_desc[pool_id];
380 
381 	qdf_spin_lock_bh(&tso_desc_pool->lock);
382 
383 	qdf_mem_multi_pages_free(soc->osdev,
384 				 &tso_desc_pool->desc_pages, 0, true);
385 	tso_desc_pool->freelist = NULL;
386 	tso_desc_pool->num_free = 0;
387 	tso_desc_pool->pool_size = 0;
388 	qdf_spin_unlock_bh(&tso_desc_pool->lock);
389 	qdf_spinlock_destroy(&tso_desc_pool->lock);
390 	return;
391 }
392 /**
393  * dp_tx_tso_num_seg_pool_alloc() - Allocate descriptors that tracks the
394  *                              fragments in each tso segment
395  *
396  * @soc: handle to dp soc structure
397  * @pool_id: descriptor pool id
398  * @num_elem: total number of descriptors to be allocated
399  */
400 QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
401 		uint16_t num_elem)
402 {
403 	struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
404 	uint32_t desc_size;
405 
406 	tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
407 	tso_num_seg_pool->num_free = 0;
408 	desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_num_seg_elem_t));
409 	qdf_mem_multi_pages_alloc(soc->osdev,
410 				  &tso_num_seg_pool->desc_pages,
411 				  desc_size,
412 				  num_elem, 0, true);
413 	if (!tso_num_seg_pool->desc_pages.num_pages) {
414 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
415 				FL("Alloc Failed %pK pool_id %d"),
416 				soc, pool_id);
417 		return QDF_STATUS_E_NOMEM;
418 	}
419 
420 	if (qdf_mem_multi_page_link(soc->osdev,
421 				    &tso_num_seg_pool->desc_pages,
422 				    desc_size,
423 				    num_elem, true)) {
424 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
425 			  "invalid tso desc allocation - overflow num link");
426 		goto fail;
427 	}
428 
429 	tso_num_seg_pool->freelist = (struct qdf_tso_num_seg_elem_t *)
430 				  *tso_num_seg_pool->desc_pages.cacheable_pages;
431 	tso_num_seg_pool->num_free = num_elem;
432 	tso_num_seg_pool->num_seg_pool_size = num_elem;
433 
434 	qdf_spinlock_create(&tso_num_seg_pool->lock);
435 
436 	return QDF_STATUS_SUCCESS;
437 
438 fail:
439 	qdf_mem_multi_pages_free(soc->osdev,
440 				 &tso_num_seg_pool->desc_pages, 0, true);
441 
442 	return QDF_STATUS_E_NOMEM;
443 }
444 
445 /**
446  * dp_tx_tso_num_seg_pool_free() - free pool of descriptors that tracks
447  *			      the fragments in tso segment
448  *
449  *
450  * @soc: handle to dp soc structure
451  * @pool_id: descriptor pool_id
452  */
453 void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t pool_id)
454 {
455 	struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
456 
457 	tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
458 	qdf_spin_lock_bh(&tso_num_seg_pool->lock);
459 
460 	qdf_mem_multi_pages_free(soc->osdev,
461 				 &tso_num_seg_pool->desc_pages, 0, true);
462 	tso_num_seg_pool->freelist = NULL;
463 	tso_num_seg_pool->num_free = 0;
464 	tso_num_seg_pool->num_seg_pool_size = 0;
465 	qdf_spin_unlock_bh(&tso_num_seg_pool->lock);
466 	qdf_spinlock_destroy(&tso_num_seg_pool->lock);
467 	return;
468 }
469 
470 #else
471 QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
472 		uint16_t num_elem)
473 {
474 	return QDF_STATUS_SUCCESS;
475 }
476 
477 void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
478 {
479 	return;
480 }
481 
482 QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
483 		uint16_t num_elem)
484 {
485 	return QDF_STATUS_SUCCESS;
486 }
487 
488 void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t pool_id)
489 {
490 	return;
491 }
492 #endif
493