xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx_desc.c (revision dd4dc88b837a295134aa9869114a2efee0f4894b)
1 /*
2  * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "hal_hw_headers.h"
20 #include "dp_types.h"
21 #include "dp_tx_desc.h"
22 
23 #ifndef DESC_PARTITION
24 #define DP_TX_DESC_SIZE(a) qdf_get_pwr2(a)
25 #define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id)     \
26 do {                                                                 \
27 	uint8_t sig_bit;                                             \
28 	soc->tx_desc[pool_id].offset_filter = num_desc_per_page - 1; \
29 	/* Calculate page divider to find page number */             \
30 	sig_bit = 0;                                                 \
31 	while (num_desc_per_page) {                                  \
32 		sig_bit++;                                           \
33 		num_desc_per_page = num_desc_per_page >> 1;          \
34 	}                                                            \
35 	soc->tx_desc[pool_id].page_divider = (sig_bit - 1);          \
36 } while (0)
37 #else
38 #define DP_TX_DESC_SIZE(a) a
39 #define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id) {}
40 #endif /* DESC_PARTITION */
41 
42 /**
43  * dp_tx_desc_pool_counter_initialize() - Initialize counters
44  * @tx_desc_pool Handle to DP tx_desc_pool structure
45  * @num_elem Number of descriptor elements per pool
46  *
47  * Return: None
48  */
49 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
50 static void
51 dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool,
52 				  uint16_t num_elem)
53 {
54 }
55 #else
56 static void
57 dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool,
58 				  uint16_t num_elem)
59 {
60 	tx_desc_pool->num_free = num_elem;
61 	tx_desc_pool->num_allocated = 0;
62 }
63 #endif
64 
65 /**
66  * dp_tx_desc_pool_alloc() - Allocate Tx Descriptor pool(s)
67  * @soc Handle to DP SoC structure
68  * @num_pool Number of pools to allocate
69  * @num_elem Number of descriptor elements per pool
70  *
71  * This function allocates memory for SW tx descriptors
72  * (used within host for tx data path).
73  * The number of tx descriptors required will be large
74  * since based on number of clients (1024 clients x 3 radios),
75  * outstanding MSDUs stored in TQM queues and LMAC queues will be significantly
76  * large.
77  *
78  * To avoid allocating a large contiguous memory, it uses multi_page_alloc qdf
79  * function to allocate memory
80  * in multiple pages. It then iterates through the memory allocated across pages
81  * and links each descriptor
82  * to next descriptor, taking care of page boundaries.
83  *
84  * Since WiFi 3.0 HW supports multiple Tx rings, multiple pools are allocated,
85  * one for each ring;
86  * This minimizes lock contention when hard_start_xmit is called
87  * from multiple CPUs.
88  * Alternately, multiple pools can be used for multiple VDEVs for VDEV level
89  * flow control.
90  *
91  * Return: Status code. 0 for success.
92  */
93 QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
94 		uint16_t num_elem)
95 {
96 	uint32_t id, count, page_id, offset, pool_id_32;
97 	uint16_t num_desc_per_page;
98 	struct dp_tx_desc_s *tx_desc_elem;
99 	uint32_t desc_size;
100 	struct dp_tx_desc_pool_s *tx_desc_pool = &((soc)->tx_desc[(pool_id)]);
101 
102 	desc_size = DP_TX_DESC_SIZE(sizeof(*tx_desc_elem));
103 	tx_desc_pool->elem_size = desc_size;
104 	if (!dp_is_soc_reinit(soc))
105 		qdf_mem_multi_pages_alloc(soc->osdev,
106 					  &tx_desc_pool->desc_pages,
107 					  desc_size, num_elem,
108 					  0, true);
109 	if (!tx_desc_pool->desc_pages.num_pages) {
110 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
111 			"Multi page alloc fail, tx desc");
112 		goto fail_exit;
113 	}
114 
115 
116 	num_desc_per_page =
117 		tx_desc_pool->desc_pages.num_element_per_page;
118 	tx_desc_pool->freelist = (struct dp_tx_desc_s *)
119 			*tx_desc_pool->desc_pages.cacheable_pages;
120 	if (qdf_mem_multi_page_link(soc->osdev,
121 				    &tx_desc_pool->desc_pages,
122 				    desc_size, num_elem, true)) {
123 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
124 			"invalid tx desc allocation - overflow num link");
125 		goto free_tx_desc;
126 	}
127 
128 	/* Set unique IDs for each Tx descriptor */
129 	tx_desc_elem = tx_desc_pool->freelist;
130 	count = 0;
131 	pool_id_32 = (uint32_t)pool_id;
132 	while (tx_desc_elem) {
133 		page_id = count / num_desc_per_page;
134 		offset = count % num_desc_per_page;
135 		id = ((pool_id_32 << DP_TX_DESC_ID_POOL_OS) |
136 			(page_id << DP_TX_DESC_ID_PAGE_OS) | offset);
137 
138 		tx_desc_elem->id = id;
139 		tx_desc_elem->pool_id = pool_id;
140 		tx_desc_elem = tx_desc_elem->next;
141 		count++;
142 	}
143 
144 	dp_tx_desc_pool_counter_initialize(tx_desc_pool, num_elem);
145 	TX_DESC_LOCK_CREATE(&tx_desc_pool->lock);
146 	return QDF_STATUS_SUCCESS;
147 
148 free_tx_desc:
149 	qdf_mem_multi_pages_free(soc->osdev,
150 		&tx_desc_pool->desc_pages, 0, true);
151 
152 fail_exit:
153 	return QDF_STATUS_E_FAULT;
154 }
155 
156 /**
157  * dp_tx_desc_pool_free() - Free the memory pool allocated for Tx Descriptors
158  *
159  * @soc Handle to DP SoC structure
160  * @pool_id
161  *
162  * Return:
163  */
164 QDF_STATUS dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
165 {
166 	struct dp_tx_desc_pool_s *tx_desc_pool =
167 				&((soc)->tx_desc[(pool_id)]);
168 
169 	qdf_mem_multi_pages_free(soc->osdev,
170 		&tx_desc_pool->desc_pages, 0, true);
171 	TX_DESC_LOCK_DESTROY(&tx_desc_pool->lock);
172 	TX_DESC_POOL_MEMBER_CLEAN(tx_desc_pool);
173 	return QDF_STATUS_SUCCESS;
174 }
175 
176 /**
177  * dp_tx_ext_desc_pool_alloc() - Allocate tx ext descriptor pool
178  * @soc Handle to DP SoC structure
179  * @pool_id
180  *
181  * Return: NONE
182  */
183 QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
184 	uint16_t num_elem)
185 {
186 	uint16_t num_page;
187 	uint32_t count;
188 	struct dp_tx_ext_desc_elem_s *c_elem, *p_elem;
189 	struct qdf_mem_dma_page_t *page_info;
190 	struct qdf_mem_multi_page_t *pages;
191 	QDF_STATUS status;
192 	qdf_dma_context_t memctx = 0;
193 
194 	/* Coherent tx extension descriptor alloc */
195 	soc->tx_ext_desc[pool_id].elem_size = HAL_TX_EXT_DESC_WITH_META_DATA;
196 	soc->tx_ext_desc[pool_id].elem_count = num_elem;
197 	memctx = qdf_get_dma_mem_context((&soc->tx_ext_desc[pool_id]), memctx);
198 	if (!dp_is_soc_reinit(soc)) {
199 		qdf_mem_multi_pages_alloc(soc->osdev,
200 					  &soc->tx_ext_desc[pool_id].
201 					  desc_pages,
202 					  soc->tx_ext_desc[pool_id].elem_size,
203 					  soc->tx_ext_desc[pool_id].elem_count,
204 					  memctx, false);
205 	}
206 	if (!soc->tx_ext_desc[pool_id].desc_pages.num_pages) {
207 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
208 			  "ext desc page alloc fail");
209 		status = QDF_STATUS_E_NOMEM;
210 		goto fail_exit;
211 	}
212 
213 	num_page = soc->tx_ext_desc[pool_id].desc_pages.num_pages;
214 	/*
215 	 * Cacheable ext descriptor link alloc
216 	 * This structure also large size already
217 	 * single element is 24bytes, 2K elements are 48Kbytes
218 	 * Have to alloc multi page cacheable memory
219 	 */
220 	soc->tx_ext_desc[pool_id].link_elem_size =
221 		sizeof(struct dp_tx_ext_desc_elem_s);
222 	if (!dp_is_soc_reinit(soc)) {
223 		qdf_mem_multi_pages_alloc(soc->osdev,
224 					  &soc->tx_ext_desc[pool_id].
225 					  desc_link_pages,
226 					  soc->tx_ext_desc[pool_id].
227 					  link_elem_size,
228 					  soc->tx_ext_desc[pool_id].
229 					  elem_count,
230 					  0, true);
231 	}
232 	if (!soc->tx_ext_desc[pool_id].desc_link_pages.num_pages) {
233 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
234 			  "ext link desc page alloc fail");
235 		status = QDF_STATUS_E_NOMEM;
236 		goto free_ext_desc_page;
237 	}
238 
239 	/* link tx descriptors into a freelist */
240 	soc->tx_ext_desc[pool_id].freelist = (struct dp_tx_ext_desc_elem_s *)
241 		*soc->tx_ext_desc[pool_id].desc_link_pages.cacheable_pages;
242 	if (qdf_mem_multi_page_link(soc->osdev,
243 		&soc->tx_ext_desc[pool_id].desc_link_pages,
244 		soc->tx_ext_desc[pool_id].link_elem_size,
245 		soc->tx_ext_desc[pool_id].elem_count, true)) {
246 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
247 			  "ext link desc page linking fail");
248 		status = QDF_STATUS_E_FAULT;
249 		goto free_ext_link_desc_page;
250 	}
251 
252 	/* Assign coherent memory pointer into linked free list */
253 	pages = &soc->tx_ext_desc[pool_id].desc_pages;
254 	page_info = soc->tx_ext_desc[pool_id].desc_pages.dma_pages;
255 	c_elem = soc->tx_ext_desc[pool_id].freelist;
256 	p_elem = c_elem;
257 	for (count = 0; count < soc->tx_ext_desc[pool_id].elem_count; count++) {
258 		if (!(count % pages->num_element_per_page)) {
259 			/**
260 			 * First element for new page,
261 			 * should point next page
262 			 */
263 			if (!pages->dma_pages->page_v_addr_start) {
264 				QDF_TRACE(QDF_MODULE_ID_DP,
265 					QDF_TRACE_LEVEL_ERROR,
266 					"link over flow");
267 				status = QDF_STATUS_E_FAULT;
268 				goto free_ext_link_desc_page;
269 			}
270 			c_elem->vaddr = (void *)page_info->page_v_addr_start;
271 			c_elem->paddr = page_info->page_p_addr;
272 			page_info++;
273 		} else {
274 			c_elem->vaddr = (void *)(p_elem->vaddr +
275 				soc->tx_ext_desc[pool_id].elem_size);
276 			c_elem->paddr = (p_elem->paddr +
277 				soc->tx_ext_desc[pool_id].elem_size);
278 		}
279 		p_elem = c_elem;
280 		c_elem = c_elem->next;
281 		if (!c_elem)
282 			break;
283 	}
284 
285 	soc->tx_ext_desc[pool_id].num_free = num_elem;
286 	qdf_spinlock_create(&soc->tx_ext_desc[pool_id].lock);
287 	return QDF_STATUS_SUCCESS;
288 
289 free_ext_link_desc_page:
290 	qdf_mem_multi_pages_free(soc->osdev,
291 		&soc->tx_ext_desc[pool_id].desc_link_pages, 0, true);
292 
293 free_ext_desc_page:
294 	qdf_mem_multi_pages_free(soc->osdev,
295 		&soc->tx_ext_desc[pool_id].desc_pages,
296 		qdf_get_dma_mem_context((&soc->tx_ext_desc[pool_id]), memctx),
297 		false);
298 
299 fail_exit:
300 	return status;
301 
302 }
303 
304 /**
305  * dp_tx_ext_desc_pool_free() - free tx ext descriptor pool
306  * @soc: Handle to DP SoC structure
307  * @pool_id: extension descriptor pool id
308  *
309  * Return: NONE
310  */
311 QDF_STATUS dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
312 {
313 	qdf_mem_multi_pages_free(soc->osdev,
314 		&soc->tx_ext_desc[pool_id].desc_link_pages, 0, true);
315 
316 	qdf_mem_multi_pages_free(soc->osdev,
317 		&soc->tx_ext_desc[pool_id].desc_pages,
318 		qdf_get_dma_mem_context((&soc->tx_ext_desc[pool_id]), memctx),
319 		false);
320 
321 	qdf_spinlock_destroy(&soc->tx_ext_desc[pool_id].lock);
322 	return QDF_STATUS_SUCCESS;
323 }
324 
325 /**
326  * dp_tx_tso_desc_pool_alloc() - allocate tx tso descriptor pool
327  * @soc: Handle to DP SoC structure
328  * @pool_id: tso descriptor pool id
329  * @num_elem: number of element
330  *
331  * Return: QDF_STATUS_SUCCESS
332  */
333 #if defined(FEATURE_TSO)
334 QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
335 		uint16_t num_elem)
336 {
337 	struct dp_tx_tso_seg_pool_s *tso_desc_pool;
338 	uint32_t desc_size;
339 
340 	tso_desc_pool = &soc->tx_tso_desc[pool_id];
341 	tso_desc_pool->num_free = 0;
342 	desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_seg_elem_t));
343 	if (!dp_is_soc_reinit(soc))
344 		qdf_mem_multi_pages_alloc(soc->osdev,
345 					  &tso_desc_pool->desc_pages,
346 					  desc_size,
347 					  num_elem, 0, true);
348 
349 	if (!tso_desc_pool->desc_pages.num_pages) {
350 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
351 			  FL("Alloc Failed %pK pool_id %d"),
352 			  soc, pool_id);
353 		return QDF_STATUS_E_NOMEM;
354 	}
355 
356 	tso_desc_pool->freelist = (struct qdf_tso_seg_elem_t *)
357 				  *tso_desc_pool->desc_pages.cacheable_pages;
358 	tso_desc_pool->num_free = num_elem;
359 	if (qdf_mem_multi_page_link(soc->osdev,
360 				    &tso_desc_pool->desc_pages,
361 				    desc_size,
362 				    num_elem, true)) {
363 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
364 			  "invalid tso desc allocation - overflow num link");
365 		goto free_tso_desc;
366 	}
367 	TSO_DEBUG("Number of free descriptors: %u\n", tso_desc_pool->num_free);
368 	tso_desc_pool->pool_size = num_elem;
369 	qdf_spinlock_create(&tso_desc_pool->lock);
370 
371 	return QDF_STATUS_SUCCESS;
372 
373 free_tso_desc:
374 	qdf_mem_multi_pages_free(soc->osdev,
375 				 &tso_desc_pool->desc_pages, 0, true);
376 
377 	return QDF_STATUS_E_FAULT;
378 }
379 
380 /**
381  * dp_tx_tso_desc_pool_free() - free tx tso descriptor pool
382  * @soc: Handle to DP SoC structure
383  * @pool_id: extension descriptor pool id
384  *
385  * Return: NONE
386  */
387 void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
388 {
389 	struct dp_tx_tso_seg_pool_s *tso_desc_pool;
390 
391 	tso_desc_pool = &soc->tx_tso_desc[pool_id];
392 
393 	qdf_spin_lock_bh(&tso_desc_pool->lock);
394 
395 	qdf_mem_multi_pages_free(soc->osdev,
396 				 &tso_desc_pool->desc_pages, 0, true);
397 	tso_desc_pool->freelist = NULL;
398 	tso_desc_pool->num_free = 0;
399 	tso_desc_pool->pool_size = 0;
400 	qdf_spin_unlock_bh(&tso_desc_pool->lock);
401 	qdf_spinlock_destroy(&tso_desc_pool->lock);
402 	return;
403 }
404 /**
405  * dp_tx_tso_num_seg_pool_alloc() - Allocate descriptors that tracks the
406  *                              fragments in each tso segment
407  *
408  * @soc: handle to dp soc structure
409  * @pool_id: descriptor pool id
410  * @num_elem: total number of descriptors to be allocated
411  */
412 QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
413 		uint16_t num_elem)
414 {
415 	struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
416 	uint32_t desc_size;
417 
418 	tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
419 	tso_num_seg_pool->num_free = 0;
420 	desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_num_seg_elem_t));
421 	if (!dp_is_soc_reinit(soc))
422 		qdf_mem_multi_pages_alloc(soc->osdev,
423 					  &tso_num_seg_pool->desc_pages,
424 					  desc_size,
425 					  num_elem, 0, true);
426 	if (!tso_num_seg_pool->desc_pages.num_pages) {
427 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
428 			  FL("Alloc Failed %pK pool_id %d"),
429 			  soc, pool_id);
430 		return QDF_STATUS_E_NOMEM;
431 	}
432 
433 	if (qdf_mem_multi_page_link(soc->osdev,
434 				    &tso_num_seg_pool->desc_pages,
435 				    desc_size,
436 				    num_elem, true)) {
437 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
438 			  "invalid tso desc allocation - overflow num link");
439 		goto fail;
440 	}
441 
442 	tso_num_seg_pool->freelist = (struct qdf_tso_num_seg_elem_t *)
443 				  *tso_num_seg_pool->desc_pages.cacheable_pages;
444 	tso_num_seg_pool->num_free = num_elem;
445 	tso_num_seg_pool->num_seg_pool_size = num_elem;
446 
447 	qdf_spinlock_create(&tso_num_seg_pool->lock);
448 
449 	return QDF_STATUS_SUCCESS;
450 
451 fail:
452 	qdf_mem_multi_pages_free(soc->osdev,
453 				 &tso_num_seg_pool->desc_pages, 0, true);
454 
455 	return QDF_STATUS_E_NOMEM;
456 }
457 
458 /**
459  * dp_tx_tso_num_seg_pool_free() - free pool of descriptors that tracks
460  *			      the fragments in tso segment
461  *
462  *
463  * @soc: handle to dp soc structure
464  * @pool_id: descriptor pool_id
465  */
466 void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t pool_id)
467 {
468 	struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
469 
470 	tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
471 	qdf_spin_lock_bh(&tso_num_seg_pool->lock);
472 
473 	qdf_mem_multi_pages_free(soc->osdev,
474 				 &tso_num_seg_pool->desc_pages, 0, true);
475 	tso_num_seg_pool->freelist = NULL;
476 	tso_num_seg_pool->num_free = 0;
477 	tso_num_seg_pool->num_seg_pool_size = 0;
478 	qdf_spin_unlock_bh(&tso_num_seg_pool->lock);
479 	qdf_spinlock_destroy(&tso_num_seg_pool->lock);
480 	return;
481 }
482 
483 #else
484 QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
485 		uint16_t num_elem)
486 {
487 	return QDF_STATUS_SUCCESS;
488 }
489 
490 void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
491 {
492 	return;
493 }
494 
495 QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
496 		uint16_t num_elem)
497 {
498 	return QDF_STATUS_SUCCESS;
499 }
500 
501 void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t pool_id)
502 {
503 	return;
504 }
505 #endif
506