xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx_desc.c (revision 11f5a63a6cbdda84849a730de22f0a71e635d58c)
1 /*
2  * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "hal_hw_headers.h"
20 #include "dp_types.h"
21 #include "dp_tx_desc.h"
22 
23 #ifndef DESC_PARTITION
24 #define DP_TX_DESC_SIZE(a) qdf_get_pwr2(a)
25 #define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id)     \
26 do {                                                                 \
27 	uint8_t sig_bit;                                             \
28 	soc->tx_desc[pool_id].offset_filter = num_desc_per_page - 1; \
29 	/* Calculate page divider to find page number */             \
30 	sig_bit = 0;                                                 \
31 	while (num_desc_per_page) {                                  \
32 		sig_bit++;                                           \
33 		num_desc_per_page = num_desc_per_page >> 1;          \
34 	}                                                            \
35 	soc->tx_desc[pool_id].page_divider = (sig_bit - 1);          \
36 } while (0)
37 #else
38 #define DP_TX_DESC_SIZE(a) a
39 #define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id) {}
40 #endif /* DESC_PARTITION */
41 
42 /**
43  * dp_tx_desc_pool_counter_initialize() - Initialize counters
44  * @tx_desc_pool Handle to DP tx_desc_pool structure
45  * @num_elem Number of descriptor elements per pool
46  *
47  * Return: None
48  */
49 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
50 static void
51 dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool,
52 				  uint16_t num_elem)
53 {
54 }
55 #else
56 static void
57 dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool,
58 				  uint16_t num_elem)
59 {
60 	tx_desc_pool->num_free = num_elem;
61 	tx_desc_pool->num_allocated = 0;
62 }
63 #endif
64 
65 /**
66  * dp_tx_desc_pool_alloc() - Allocate Tx Descriptor pool(s)
67  * @soc Handle to DP SoC structure
68  * @num_pool Number of pools to allocate
69  * @num_elem Number of descriptor elements per pool
70  *
71  * This function allocates memory for SW tx descriptors
72  * (used within host for tx data path).
73  * The number of tx descriptors required will be large
74  * since based on number of clients (1024 clients x 3 radios),
75  * outstanding MSDUs stored in TQM queues and LMAC queues will be significantly
76  * large.
77  *
78  * To avoid allocating a large contiguous memory, it uses multi_page_alloc qdf
79  * function to allocate memory
80  * in multiple pages. It then iterates through the memory allocated across pages
81  * and links each descriptor
82  * to next descriptor, taking care of page boundaries.
83  *
84  * Since WiFi 3.0 HW supports multiple Tx rings, multiple pools are allocated,
85  * one for each ring;
86  * This minimizes lock contention when hard_start_xmit is called
87  * from multiple CPUs.
88  * Alternately, multiple pools can be used for multiple VDEVs for VDEV level
89  * flow control.
90  *
91  * Return: Status code. 0 for success.
92  */
93 QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
94 		uint16_t num_elem)
95 {
96 	uint32_t id, count, page_id, offset, pool_id_32;
97 	uint16_t num_desc_per_page;
98 	struct dp_tx_desc_s *tx_desc_elem;
99 	uint32_t desc_size;
100 	struct dp_tx_desc_pool_s *tx_desc_pool = &((soc)->tx_desc[(pool_id)]);
101 
102 	desc_size = DP_TX_DESC_SIZE(sizeof(*tx_desc_elem));
103 	tx_desc_pool->elem_size = desc_size;
104 	if (!dp_is_soc_reinit(soc))
105 		qdf_mem_multi_pages_alloc(soc->osdev,
106 					  &tx_desc_pool->desc_pages,
107 					  desc_size, num_elem,
108 					  0, true);
109 	if (!tx_desc_pool->desc_pages.num_pages) {
110 		dp_err("Multi page alloc fail, tx desc");
111 		goto fail_exit;
112 	}
113 
114 
115 	num_desc_per_page =
116 		tx_desc_pool->desc_pages.num_element_per_page;
117 	tx_desc_pool->freelist = (struct dp_tx_desc_s *)
118 			*tx_desc_pool->desc_pages.cacheable_pages;
119 	if (qdf_mem_multi_page_link(soc->osdev,
120 				    &tx_desc_pool->desc_pages,
121 				    desc_size, num_elem, true)) {
122 		dp_err("invalid tx desc allocation - overflow num link");
123 		goto free_tx_desc;
124 	}
125 
126 	/* Set unique IDs for each Tx descriptor */
127 	tx_desc_elem = tx_desc_pool->freelist;
128 	count = 0;
129 	pool_id_32 = (uint32_t)pool_id;
130 	while (tx_desc_elem) {
131 		page_id = count / num_desc_per_page;
132 		offset = count % num_desc_per_page;
133 		id = ((pool_id_32 << DP_TX_DESC_ID_POOL_OS) |
134 			(page_id << DP_TX_DESC_ID_PAGE_OS) | offset);
135 
136 		tx_desc_elem->id = id;
137 		tx_desc_elem->pool_id = pool_id;
138 		tx_desc_elem = tx_desc_elem->next;
139 		count++;
140 	}
141 
142 	dp_tx_desc_pool_counter_initialize(tx_desc_pool, num_elem);
143 	TX_DESC_LOCK_CREATE(&tx_desc_pool->lock);
144 	return QDF_STATUS_SUCCESS;
145 
146 free_tx_desc:
147 	qdf_mem_multi_pages_free(soc->osdev,
148 		&tx_desc_pool->desc_pages, 0, true);
149 
150 fail_exit:
151 	return QDF_STATUS_E_FAULT;
152 }
153 
154 /**
155  * dp_tx_desc_pool_free() - Free the memory pool allocated for Tx Descriptors
156  *
157  * @soc Handle to DP SoC structure
158  * @pool_id
159  *
160  * Return:
161  */
162 QDF_STATUS dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
163 {
164 	struct dp_tx_desc_pool_s *tx_desc_pool =
165 				&((soc)->tx_desc[(pool_id)]);
166 
167 	qdf_mem_multi_pages_free(soc->osdev,
168 		&tx_desc_pool->desc_pages, 0, true);
169 	TX_DESC_LOCK_DESTROY(&tx_desc_pool->lock);
170 	TX_DESC_POOL_MEMBER_CLEAN(tx_desc_pool);
171 	return QDF_STATUS_SUCCESS;
172 }
173 
174 /**
175  * dp_tx_ext_desc_pool_alloc() - Allocate tx ext descriptor pool
176  * @soc Handle to DP SoC structure
177  * @pool_id
178  *
179  * Return: NONE
180  */
181 QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
182 	uint16_t num_elem)
183 {
184 	uint16_t num_page;
185 	uint32_t count;
186 	struct dp_tx_ext_desc_elem_s *c_elem, *p_elem;
187 	struct qdf_mem_dma_page_t *page_info;
188 	struct qdf_mem_multi_page_t *pages;
189 	QDF_STATUS status;
190 	qdf_dma_context_t memctx = 0;
191 
192 	/* Coherent tx extension descriptor alloc */
193 	soc->tx_ext_desc[pool_id].elem_size = HAL_TX_EXT_DESC_WITH_META_DATA;
194 	soc->tx_ext_desc[pool_id].elem_count = num_elem;
195 	memctx = qdf_get_dma_mem_context((&soc->tx_ext_desc[pool_id]), memctx);
196 	if (!dp_is_soc_reinit(soc)) {
197 		qdf_mem_multi_pages_alloc(soc->osdev,
198 					  &soc->tx_ext_desc[pool_id].
199 					  desc_pages,
200 					  soc->tx_ext_desc[pool_id].elem_size,
201 					  soc->tx_ext_desc[pool_id].elem_count,
202 					  memctx, false);
203 	}
204 	if (!soc->tx_ext_desc[pool_id].desc_pages.num_pages) {
205 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
206 			  "ext desc page alloc fail");
207 		status = QDF_STATUS_E_NOMEM;
208 		goto fail_exit;
209 	}
210 
211 	num_page = soc->tx_ext_desc[pool_id].desc_pages.num_pages;
212 	/*
213 	 * Cacheable ext descriptor link alloc
214 	 * This structure also large size already
215 	 * single element is 24bytes, 2K elements are 48Kbytes
216 	 * Have to alloc multi page cacheable memory
217 	 */
218 	soc->tx_ext_desc[pool_id].link_elem_size =
219 		sizeof(struct dp_tx_ext_desc_elem_s);
220 	if (!dp_is_soc_reinit(soc)) {
221 		qdf_mem_multi_pages_alloc(soc->osdev,
222 					  &soc->tx_ext_desc[pool_id].
223 					  desc_link_pages,
224 					  soc->tx_ext_desc[pool_id].
225 					  link_elem_size,
226 					  soc->tx_ext_desc[pool_id].
227 					  elem_count,
228 					  0, true);
229 	}
230 	if (!soc->tx_ext_desc[pool_id].desc_link_pages.num_pages) {
231 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
232 			  "ext link desc page alloc fail");
233 		status = QDF_STATUS_E_NOMEM;
234 		goto free_ext_desc_page;
235 	}
236 
237 	/* link tx descriptors into a freelist */
238 	soc->tx_ext_desc[pool_id].freelist = (struct dp_tx_ext_desc_elem_s *)
239 		*soc->tx_ext_desc[pool_id].desc_link_pages.cacheable_pages;
240 	if (qdf_mem_multi_page_link(soc->osdev,
241 		&soc->tx_ext_desc[pool_id].desc_link_pages,
242 		soc->tx_ext_desc[pool_id].link_elem_size,
243 		soc->tx_ext_desc[pool_id].elem_count, true)) {
244 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
245 			  "ext link desc page linking fail");
246 		status = QDF_STATUS_E_FAULT;
247 		goto free_ext_link_desc_page;
248 	}
249 
250 	/* Assign coherent memory pointer into linked free list */
251 	pages = &soc->tx_ext_desc[pool_id].desc_pages;
252 	page_info = soc->tx_ext_desc[pool_id].desc_pages.dma_pages;
253 	c_elem = soc->tx_ext_desc[pool_id].freelist;
254 	p_elem = c_elem;
255 	for (count = 0; count < soc->tx_ext_desc[pool_id].elem_count; count++) {
256 		if (!(count % pages->num_element_per_page)) {
257 			/**
258 			 * First element for new page,
259 			 * should point next page
260 			 */
261 			if (!pages->dma_pages->page_v_addr_start) {
262 				QDF_TRACE(QDF_MODULE_ID_DP,
263 					QDF_TRACE_LEVEL_ERROR,
264 					"link over flow");
265 				status = QDF_STATUS_E_FAULT;
266 				goto free_ext_link_desc_page;
267 			}
268 			c_elem->vaddr = (void *)page_info->page_v_addr_start;
269 			c_elem->paddr = page_info->page_p_addr;
270 			page_info++;
271 		} else {
272 			c_elem->vaddr = (void *)(p_elem->vaddr +
273 				soc->tx_ext_desc[pool_id].elem_size);
274 			c_elem->paddr = (p_elem->paddr +
275 				soc->tx_ext_desc[pool_id].elem_size);
276 		}
277 		p_elem = c_elem;
278 		c_elem = c_elem->next;
279 		if (!c_elem)
280 			break;
281 	}
282 
283 	soc->tx_ext_desc[pool_id].num_free = num_elem;
284 	qdf_spinlock_create(&soc->tx_ext_desc[pool_id].lock);
285 	return QDF_STATUS_SUCCESS;
286 
287 free_ext_link_desc_page:
288 	qdf_mem_multi_pages_free(soc->osdev,
289 		&soc->tx_ext_desc[pool_id].desc_link_pages, 0, true);
290 
291 free_ext_desc_page:
292 	qdf_mem_multi_pages_free(soc->osdev,
293 		&soc->tx_ext_desc[pool_id].desc_pages,
294 		qdf_get_dma_mem_context((&soc->tx_ext_desc[pool_id]), memctx),
295 		false);
296 
297 fail_exit:
298 	return status;
299 
300 }
301 
302 /**
303  * dp_tx_ext_desc_pool_free() - free tx ext descriptor pool
304  * @soc: Handle to DP SoC structure
305  * @pool_id: extension descriptor pool id
306  *
307  * Return: NONE
308  */
309 QDF_STATUS dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
310 {
311 	qdf_mem_multi_pages_free(soc->osdev,
312 		&soc->tx_ext_desc[pool_id].desc_link_pages, 0, true);
313 
314 	qdf_mem_multi_pages_free(soc->osdev,
315 		&soc->tx_ext_desc[pool_id].desc_pages,
316 		qdf_get_dma_mem_context((&soc->tx_ext_desc[pool_id]), memctx),
317 		false);
318 
319 	qdf_spinlock_destroy(&soc->tx_ext_desc[pool_id].lock);
320 	return QDF_STATUS_SUCCESS;
321 }
322 
323 /**
324  * dp_tx_tso_desc_pool_alloc() - allocate tx tso descriptor pool
325  * @soc: Handle to DP SoC structure
326  * @pool_id: tso descriptor pool id
327  * @num_elem: number of element
328  *
329  * Return: QDF_STATUS_SUCCESS
330  */
331 #if defined(FEATURE_TSO)
332 QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
333 		uint16_t num_elem)
334 {
335 	struct dp_tx_tso_seg_pool_s *tso_desc_pool;
336 	uint32_t desc_size;
337 
338 	tso_desc_pool = &soc->tx_tso_desc[pool_id];
339 	tso_desc_pool->num_free = 0;
340 	desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_seg_elem_t));
341 	if (!dp_is_soc_reinit(soc))
342 		qdf_mem_multi_pages_alloc(soc->osdev,
343 					  &tso_desc_pool->desc_pages,
344 					  desc_size,
345 					  num_elem, 0, true);
346 
347 	if (!tso_desc_pool->desc_pages.num_pages) {
348 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
349 			  FL("Alloc Failed %pK pool_id %d"),
350 			  soc, pool_id);
351 		return QDF_STATUS_E_NOMEM;
352 	}
353 
354 	tso_desc_pool->freelist = (struct qdf_tso_seg_elem_t *)
355 				  *tso_desc_pool->desc_pages.cacheable_pages;
356 	tso_desc_pool->num_free = num_elem;
357 	if (qdf_mem_multi_page_link(soc->osdev,
358 				    &tso_desc_pool->desc_pages,
359 				    desc_size,
360 				    num_elem, true)) {
361 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
362 			  "invalid tso desc allocation - overflow num link");
363 		goto free_tso_desc;
364 	}
365 	TSO_DEBUG("Number of free descriptors: %u\n", tso_desc_pool->num_free);
366 	tso_desc_pool->pool_size = num_elem;
367 	qdf_spinlock_create(&tso_desc_pool->lock);
368 
369 	return QDF_STATUS_SUCCESS;
370 
371 free_tso_desc:
372 	qdf_mem_multi_pages_free(soc->osdev,
373 				 &tso_desc_pool->desc_pages, 0, true);
374 
375 	return QDF_STATUS_E_FAULT;
376 }
377 
378 /**
379  * dp_tx_tso_desc_pool_free() - free tx tso descriptor pool
380  * @soc: Handle to DP SoC structure
381  * @pool_id: extension descriptor pool id
382  *
383  * Return: NONE
384  */
385 void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
386 {
387 	struct dp_tx_tso_seg_pool_s *tso_desc_pool;
388 
389 	tso_desc_pool = &soc->tx_tso_desc[pool_id];
390 
391 	qdf_spin_lock_bh(&tso_desc_pool->lock);
392 
393 	qdf_mem_multi_pages_free(soc->osdev,
394 				 &tso_desc_pool->desc_pages, 0, true);
395 	tso_desc_pool->freelist = NULL;
396 	tso_desc_pool->num_free = 0;
397 	tso_desc_pool->pool_size = 0;
398 	qdf_spin_unlock_bh(&tso_desc_pool->lock);
399 	qdf_spinlock_destroy(&tso_desc_pool->lock);
400 	return;
401 }
402 /**
403  * dp_tx_tso_num_seg_pool_alloc() - Allocate descriptors that tracks the
404  *                              fragments in each tso segment
405  *
406  * @soc: handle to dp soc structure
407  * @pool_id: descriptor pool id
408  * @num_elem: total number of descriptors to be allocated
409  */
410 QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
411 		uint16_t num_elem)
412 {
413 	struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
414 	uint32_t desc_size;
415 
416 	tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
417 	tso_num_seg_pool->num_free = 0;
418 	desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_num_seg_elem_t));
419 	if (!dp_is_soc_reinit(soc))
420 		qdf_mem_multi_pages_alloc(soc->osdev,
421 					  &tso_num_seg_pool->desc_pages,
422 					  desc_size,
423 					  num_elem, 0, true);
424 	if (!tso_num_seg_pool->desc_pages.num_pages) {
425 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
426 			  FL("Alloc Failed %pK pool_id %d"),
427 			  soc, pool_id);
428 		return QDF_STATUS_E_NOMEM;
429 	}
430 
431 	if (qdf_mem_multi_page_link(soc->osdev,
432 				    &tso_num_seg_pool->desc_pages,
433 				    desc_size,
434 				    num_elem, true)) {
435 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
436 			  "invalid tso desc allocation - overflow num link");
437 		goto fail;
438 	}
439 
440 	tso_num_seg_pool->freelist = (struct qdf_tso_num_seg_elem_t *)
441 				  *tso_num_seg_pool->desc_pages.cacheable_pages;
442 	tso_num_seg_pool->num_free = num_elem;
443 	tso_num_seg_pool->num_seg_pool_size = num_elem;
444 
445 	qdf_spinlock_create(&tso_num_seg_pool->lock);
446 
447 	return QDF_STATUS_SUCCESS;
448 
449 fail:
450 	qdf_mem_multi_pages_free(soc->osdev,
451 				 &tso_num_seg_pool->desc_pages, 0, true);
452 
453 	return QDF_STATUS_E_NOMEM;
454 }
455 
456 /**
457  * dp_tx_tso_num_seg_pool_free() - free pool of descriptors that tracks
458  *			      the fragments in tso segment
459  *
460  *
461  * @soc: handle to dp soc structure
462  * @pool_id: descriptor pool_id
463  */
464 void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t pool_id)
465 {
466 	struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
467 
468 	tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
469 	qdf_spin_lock_bh(&tso_num_seg_pool->lock);
470 
471 	qdf_mem_multi_pages_free(soc->osdev,
472 				 &tso_num_seg_pool->desc_pages, 0, true);
473 	tso_num_seg_pool->freelist = NULL;
474 	tso_num_seg_pool->num_free = 0;
475 	tso_num_seg_pool->num_seg_pool_size = 0;
476 	qdf_spin_unlock_bh(&tso_num_seg_pool->lock);
477 	qdf_spinlock_destroy(&tso_num_seg_pool->lock);
478 	return;
479 }
480 
481 #else
482 QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
483 		uint16_t num_elem)
484 {
485 	return QDF_STATUS_SUCCESS;
486 }
487 
488 void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
489 {
490 	return;
491 }
492 
493 QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
494 		uint16_t num_elem)
495 {
496 	return QDF_STATUS_SUCCESS;
497 }
498 
499 void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t pool_id)
500 {
501 	return;
502 }
503 #endif
504