xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx_desc.c (revision 1397a33f48ea6455be40871470b286e535820eb8)
1 /*
2  * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "hal_hw_headers.h"
20 #include "dp_types.h"
21 #include "dp_tx_desc.h"
22 
23 #ifndef DESC_PARTITION
24 #define DP_TX_DESC_SIZE(a) qdf_get_pwr2(a)
25 #define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id)     \
26 do {                                                                 \
27 	uint8_t sig_bit;                                             \
28 	soc->tx_desc[pool_id].offset_filter = num_desc_per_page - 1; \
29 	/* Calculate page divider to find page number */             \
30 	sig_bit = 0;                                                 \
31 	while (num_desc_per_page) {                                  \
32 		sig_bit++;                                           \
33 		num_desc_per_page = num_desc_per_page >> 1;          \
34 	}                                                            \
35 	soc->tx_desc[pool_id].page_divider = (sig_bit - 1);          \
36 } while (0)
37 #else
38 #define DP_TX_DESC_SIZE(a) a
39 #define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id) {}
40 #endif /* DESC_PARTITION */
41 
42 /**
43  * dp_tx_desc_pool_counter_initialize() - Initialize counters
44  * @tx_desc_pool Handle to DP tx_desc_pool structure
45  * @num_elem Number of descriptor elements per pool
46  *
47  * Return: None
48  */
49 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
50 static void
51 dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool,
52 				  uint16_t num_elem)
53 {
54 }
55 #else
56 static void
57 dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool,
58 				  uint16_t num_elem)
59 {
60 	tx_desc_pool->num_free = num_elem;
61 	tx_desc_pool->num_allocated = 0;
62 }
63 #endif
64 
65 /**
66  * dp_tx_desc_pool_alloc() - Allocate Tx Descriptor pool(s)
67  * @soc Handle to DP SoC structure
68  * @num_pool Number of pools to allocate
69  * @num_elem Number of descriptor elements per pool
70  *
71  * This function allocates memory for SW tx descriptors
72  * (used within host for tx data path).
73  * The number of tx descriptors required will be large
74  * since based on number of clients (1024 clients x 3 radios),
75  * outstanding MSDUs stored in TQM queues and LMAC queues will be significantly
76  * large.
77  *
78  * To avoid allocating a large contiguous memory, it uses multi_page_alloc qdf
79  * function to allocate memory
80  * in multiple pages. It then iterates through the memory allocated across pages
81  * and links each descriptor
82  * to next descriptor, taking care of page boundaries.
83  *
84  * Since WiFi 3.0 HW supports multiple Tx rings, multiple pools are allocated,
85  * one for each ring;
86  * This minimizes lock contention when hard_start_xmit is called
87  * from multiple CPUs.
88  * Alternately, multiple pools can be used for multiple VDEVs for VDEV level
89  * flow control.
90  *
91  * Return: Status code. 0 for success.
92  */
93 QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
94 		uint16_t num_elem)
95 {
96 	uint32_t id, count, page_id, offset, pool_id_32;
97 	uint16_t num_page, num_desc_per_page;
98 	struct dp_tx_desc_s *tx_desc_elem;
99 	uint32_t desc_size;
100 	struct dp_tx_desc_pool_s *tx_desc_pool = &((soc)->tx_desc[(pool_id)]);
101 
102 	desc_size = DP_TX_DESC_SIZE(sizeof(*tx_desc_elem));
103 	tx_desc_pool->elem_size = desc_size;
104 	if (!soc->dp_soc_reinit)
105 		qdf_mem_multi_pages_alloc(soc->osdev,
106 					  &tx_desc_pool->desc_pages,
107 					  desc_size, num_elem,
108 					  0, true);
109 	if (!tx_desc_pool->desc_pages.num_pages) {
110 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
111 			"Multi page alloc fail, tx desc");
112 		goto fail_exit;
113 	}
114 
115 
116 	num_page = tx_desc_pool->desc_pages.num_pages;
117 	num_desc_per_page =
118 		tx_desc_pool->desc_pages.num_element_per_page;
119 	tx_desc_pool->freelist = (struct dp_tx_desc_s *)
120 			*tx_desc_pool->desc_pages.cacheable_pages;
121 	if (qdf_mem_multi_page_link(soc->osdev,
122 				    &tx_desc_pool->desc_pages,
123 				    desc_size, num_elem, true)) {
124 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
125 			"invalid tx desc allocation - overflow num link");
126 		goto free_tx_desc;
127 	}
128 
129 	/* Set unique IDs for each Tx descriptor */
130 	tx_desc_elem = tx_desc_pool->freelist;
131 	count = 0;
132 	pool_id_32 = (uint32_t)pool_id;
133 	while (tx_desc_elem) {
134 		page_id = count / num_desc_per_page;
135 		offset = count % num_desc_per_page;
136 		id = ((pool_id_32 << DP_TX_DESC_ID_POOL_OS) |
137 			(page_id << DP_TX_DESC_ID_PAGE_OS) | offset);
138 
139 		tx_desc_elem->id = id;
140 		tx_desc_elem->pool_id = pool_id;
141 		tx_desc_elem = tx_desc_elem->next;
142 		count++;
143 	}
144 
145 	dp_tx_desc_pool_counter_initialize(tx_desc_pool, num_elem);
146 	TX_DESC_LOCK_CREATE(&tx_desc_pool->lock);
147 	return QDF_STATUS_SUCCESS;
148 
149 free_tx_desc:
150 	qdf_mem_multi_pages_free(soc->osdev,
151 		&tx_desc_pool->desc_pages, 0, true);
152 
153 fail_exit:
154 	return QDF_STATUS_E_FAULT;
155 }
156 
157 /**
158  * dp_tx_desc_pool_free() - Free the memory pool allocated for Tx Descriptors
159  *
160  * @soc Handle to DP SoC structure
161  * @pool_id
162  *
163  * Return:
164  */
165 QDF_STATUS dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
166 {
167 	struct dp_tx_desc_pool_s *tx_desc_pool =
168 				&((soc)->tx_desc[(pool_id)]);
169 
170 	qdf_mem_multi_pages_free(soc->osdev,
171 		&tx_desc_pool->desc_pages, 0, true);
172 	TX_DESC_LOCK_DESTROY(&tx_desc_pool->lock);
173 	TX_DESC_POOL_MEMBER_CLEAN(tx_desc_pool);
174 	return QDF_STATUS_SUCCESS;
175 }
176 
177 /**
178  * dp_tx_ext_desc_pool_alloc() - Allocate tx ext descriptor pool
179  * @soc Handle to DP SoC structure
180  * @pool_id
181  *
182  * Return: NONE
183  */
184 QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
185 	uint16_t num_elem)
186 {
187 	uint16_t num_page;
188 	uint32_t count;
189 	struct dp_tx_ext_desc_elem_s *c_elem, *p_elem;
190 	struct qdf_mem_dma_page_t *page_info;
191 	struct qdf_mem_multi_page_t *pages;
192 	QDF_STATUS status;
193 	qdf_dma_context_t memctx = 0;
194 
195 	/* Coherent tx extension descriptor alloc */
196 	soc->tx_ext_desc[pool_id].elem_size = HAL_TX_EXT_DESC_WITH_META_DATA;
197 	soc->tx_ext_desc[pool_id].elem_count = num_elem;
198 	memctx = qdf_get_dma_mem_context((&soc->tx_ext_desc[pool_id]), memctx);
199 	if (!soc->dp_soc_reinit) {
200 		qdf_mem_multi_pages_alloc(soc->osdev,
201 					  &soc->tx_ext_desc[pool_id].
202 					  desc_pages,
203 					  soc->tx_ext_desc[pool_id].elem_size,
204 					  soc->tx_ext_desc[pool_id].elem_count,
205 					  memctx, false);
206 	}
207 	if (!soc->tx_ext_desc[pool_id].desc_pages.num_pages) {
208 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
209 			  "ext desc page alloc fail");
210 		status = QDF_STATUS_E_NOMEM;
211 		goto fail_exit;
212 	}
213 
214 	num_page = soc->tx_ext_desc[pool_id].desc_pages.num_pages;
215 	/*
216 	 * Cacheable ext descriptor link alloc
217 	 * This structure also large size already
218 	 * single element is 24bytes, 2K elements are 48Kbytes
219 	 * Have to alloc multi page cacheable memory
220 	 */
221 	soc->tx_ext_desc[pool_id].link_elem_size =
222 		sizeof(struct dp_tx_ext_desc_elem_s);
223 	if (!soc->dp_soc_reinit) {
224 		qdf_mem_multi_pages_alloc(soc->osdev,
225 					  &soc->tx_ext_desc[pool_id].
226 					  desc_link_pages,
227 					  soc->tx_ext_desc[pool_id].
228 					  link_elem_size,
229 					  soc->tx_ext_desc[pool_id].
230 					  elem_count,
231 					  0, true);
232 	}
233 	if (!soc->tx_ext_desc[pool_id].desc_link_pages.num_pages) {
234 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
235 			  "ext link desc page alloc fail");
236 		status = QDF_STATUS_E_NOMEM;
237 		goto free_ext_desc_page;
238 	}
239 
240 	/* link tx descriptors into a freelist */
241 	soc->tx_ext_desc[pool_id].freelist = (struct dp_tx_ext_desc_elem_s *)
242 		*soc->tx_ext_desc[pool_id].desc_link_pages.cacheable_pages;
243 	if (qdf_mem_multi_page_link(soc->osdev,
244 		&soc->tx_ext_desc[pool_id].desc_link_pages,
245 		soc->tx_ext_desc[pool_id].link_elem_size,
246 		soc->tx_ext_desc[pool_id].elem_count, true)) {
247 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
248 			  "ext link desc page linking fail");
249 		status = QDF_STATUS_E_FAULT;
250 		goto free_ext_link_desc_page;
251 	}
252 
253 	/* Assign coherent memory pointer into linked free list */
254 	pages = &soc->tx_ext_desc[pool_id].desc_pages;
255 	page_info = soc->tx_ext_desc[pool_id].desc_pages.dma_pages;
256 	c_elem = soc->tx_ext_desc[pool_id].freelist;
257 	p_elem = c_elem;
258 	for (count = 0; count < soc->tx_ext_desc[pool_id].elem_count; count++) {
259 		if (!(count % pages->num_element_per_page)) {
260 			/**
261 			 * First element for new page,
262 			 * should point next page
263 			 */
264 			if (!pages->dma_pages->page_v_addr_start) {
265 				QDF_TRACE(QDF_MODULE_ID_DP,
266 					QDF_TRACE_LEVEL_ERROR,
267 					"link over flow");
268 				status = QDF_STATUS_E_FAULT;
269 				goto free_ext_link_desc_page;
270 			}
271 			c_elem->vaddr = (void *)page_info->page_v_addr_start;
272 			c_elem->paddr = page_info->page_p_addr;
273 			page_info++;
274 		} else {
275 			c_elem->vaddr = (void *)(p_elem->vaddr +
276 				soc->tx_ext_desc[pool_id].elem_size);
277 			c_elem->paddr = (p_elem->paddr +
278 				soc->tx_ext_desc[pool_id].elem_size);
279 		}
280 		p_elem = c_elem;
281 		c_elem = c_elem->next;
282 		if (!c_elem)
283 			break;
284 	}
285 
286 	soc->tx_ext_desc[pool_id].num_free = num_elem;
287 	qdf_spinlock_create(&soc->tx_ext_desc[pool_id].lock);
288 	return QDF_STATUS_SUCCESS;
289 
290 free_ext_link_desc_page:
291 	qdf_mem_multi_pages_free(soc->osdev,
292 		&soc->tx_ext_desc[pool_id].desc_link_pages, 0, true);
293 
294 free_ext_desc_page:
295 	qdf_mem_multi_pages_free(soc->osdev,
296 		&soc->tx_ext_desc[pool_id].desc_pages,
297 		qdf_get_dma_mem_context((&soc->tx_ext_desc[pool_id]), memctx),
298 		false);
299 
300 fail_exit:
301 	return status;
302 
303 }
304 
305 /**
306  * dp_tx_ext_desc_pool_free() - free tx ext descriptor pool
307  * @soc: Handle to DP SoC structure
308  * @pool_id: extension descriptor pool id
309  *
310  * Return: NONE
311  */
312 QDF_STATUS dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
313 {
314 	qdf_mem_multi_pages_free(soc->osdev,
315 		&soc->tx_ext_desc[pool_id].desc_link_pages, 0, true);
316 
317 	qdf_mem_multi_pages_free(soc->osdev,
318 		&soc->tx_ext_desc[pool_id].desc_pages,
319 		qdf_get_dma_mem_context((&soc->tx_ext_desc[pool_id]), memctx),
320 		false);
321 
322 	qdf_spinlock_destroy(&soc->tx_ext_desc[pool_id].lock);
323 	return QDF_STATUS_SUCCESS;
324 }
325 
326 /**
327  * dp_tx_tso_desc_pool_alloc() - allocate tx tso descriptor pool
328  * @soc: Handle to DP SoC structure
329  * @pool_id: tso descriptor pool id
330  * @num_elem: number of element
331  *
332  * Return: QDF_STATUS_SUCCESS
333  */
334 #if defined(FEATURE_TSO)
335 QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
336 		uint16_t num_elem)
337 {
338 	struct dp_tx_tso_seg_pool_s *tso_desc_pool;
339 	uint32_t desc_size;
340 
341 	tso_desc_pool = &soc->tx_tso_desc[pool_id];
342 	tso_desc_pool->num_free = 0;
343 	desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_seg_elem_t));
344 	if (!soc->dp_soc_reinit)
345 		qdf_mem_multi_pages_alloc(soc->osdev,
346 					  &tso_desc_pool->desc_pages,
347 					  desc_size,
348 					  num_elem, 0, true);
349 
350 	if (!tso_desc_pool->desc_pages.num_pages) {
351 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
352 			  FL("Alloc Failed %pK pool_id %d"),
353 			  soc, pool_id);
354 		return QDF_STATUS_E_NOMEM;
355 	}
356 
357 	tso_desc_pool->freelist = (struct qdf_tso_seg_elem_t *)
358 				  *tso_desc_pool->desc_pages.cacheable_pages;
359 	tso_desc_pool->num_free = num_elem;
360 	if (qdf_mem_multi_page_link(soc->osdev,
361 				    &tso_desc_pool->desc_pages,
362 				    desc_size,
363 				    num_elem, true)) {
364 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
365 			  "invalid tso desc allocation - overflow num link");
366 		goto free_tso_desc;
367 	}
368 	TSO_DEBUG("Number of free descriptors: %u\n", tso_desc_pool->num_free);
369 	tso_desc_pool->pool_size = num_elem;
370 	qdf_spinlock_create(&tso_desc_pool->lock);
371 
372 	return QDF_STATUS_SUCCESS;
373 
374 free_tso_desc:
375 	qdf_mem_multi_pages_free(soc->osdev,
376 				 &tso_desc_pool->desc_pages, 0, true);
377 
378 	return QDF_STATUS_E_FAULT;
379 }
380 
381 /**
382  * dp_tx_tso_desc_pool_free() - free tx tso descriptor pool
383  * @soc: Handle to DP SoC structure
384  * @pool_id: extension descriptor pool id
385  *
386  * Return: NONE
387  */
388 void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
389 {
390 	struct dp_tx_tso_seg_pool_s *tso_desc_pool;
391 
392 	tso_desc_pool = &soc->tx_tso_desc[pool_id];
393 
394 	qdf_spin_lock_bh(&tso_desc_pool->lock);
395 
396 	qdf_mem_multi_pages_free(soc->osdev,
397 				 &tso_desc_pool->desc_pages, 0, true);
398 	tso_desc_pool->freelist = NULL;
399 	tso_desc_pool->num_free = 0;
400 	tso_desc_pool->pool_size = 0;
401 	qdf_spin_unlock_bh(&tso_desc_pool->lock);
402 	qdf_spinlock_destroy(&tso_desc_pool->lock);
403 	return;
404 }
405 /**
406  * dp_tx_tso_num_seg_pool_alloc() - Allocate descriptors that tracks the
407  *                              fragments in each tso segment
408  *
409  * @soc: handle to dp soc structure
410  * @pool_id: descriptor pool id
411  * @num_elem: total number of descriptors to be allocated
412  */
413 QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
414 		uint16_t num_elem)
415 {
416 	struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
417 	uint32_t desc_size;
418 
419 	tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
420 	tso_num_seg_pool->num_free = 0;
421 	desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_num_seg_elem_t));
422 	if (!soc->dp_soc_reinit)
423 		qdf_mem_multi_pages_alloc(soc->osdev,
424 					  &tso_num_seg_pool->desc_pages,
425 					  desc_size,
426 					  num_elem, 0, true);
427 	if (!tso_num_seg_pool->desc_pages.num_pages) {
428 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
429 			  FL("Alloc Failed %pK pool_id %d"),
430 			  soc, pool_id);
431 		return QDF_STATUS_E_NOMEM;
432 	}
433 
434 	if (qdf_mem_multi_page_link(soc->osdev,
435 				    &tso_num_seg_pool->desc_pages,
436 				    desc_size,
437 				    num_elem, true)) {
438 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
439 			  "invalid tso desc allocation - overflow num link");
440 		goto fail;
441 	}
442 
443 	tso_num_seg_pool->freelist = (struct qdf_tso_num_seg_elem_t *)
444 				  *tso_num_seg_pool->desc_pages.cacheable_pages;
445 	tso_num_seg_pool->num_free = num_elem;
446 	tso_num_seg_pool->num_seg_pool_size = num_elem;
447 
448 	qdf_spinlock_create(&tso_num_seg_pool->lock);
449 
450 	return QDF_STATUS_SUCCESS;
451 
452 fail:
453 	qdf_mem_multi_pages_free(soc->osdev,
454 				 &tso_num_seg_pool->desc_pages, 0, true);
455 
456 	return QDF_STATUS_E_NOMEM;
457 }
458 
459 /**
460  * dp_tx_tso_num_seg_pool_free() - free pool of descriptors that tracks
461  *			      the fragments in tso segment
462  *
463  *
464  * @soc: handle to dp soc structure
465  * @pool_id: descriptor pool_id
466  */
467 void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t pool_id)
468 {
469 	struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
470 
471 	tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
472 	qdf_spin_lock_bh(&tso_num_seg_pool->lock);
473 
474 	qdf_mem_multi_pages_free(soc->osdev,
475 				 &tso_num_seg_pool->desc_pages, 0, true);
476 	tso_num_seg_pool->freelist = NULL;
477 	tso_num_seg_pool->num_free = 0;
478 	tso_num_seg_pool->num_seg_pool_size = 0;
479 	qdf_spin_unlock_bh(&tso_num_seg_pool->lock);
480 	qdf_spinlock_destroy(&tso_num_seg_pool->lock);
481 	return;
482 }
483 
484 #else
485 QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
486 		uint16_t num_elem)
487 {
488 	return QDF_STATUS_SUCCESS;
489 }
490 
491 void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
492 {
493 	return;
494 }
495 
496 QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
497 		uint16_t num_elem)
498 {
499 	return QDF_STATUS_SUCCESS;
500 }
501 
502 void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t pool_id)
503 {
504 	return;
505 }
506 #endif
507