xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx_desc.h (revision d2cd9eab9b38f8dceb85c744ffada78cad4f5940)
1 /*
2  * Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef DP_TX_DESC_H
20 #define DP_TX_DESC_H
21 
22 #include "dp_types.h"
23 #include "dp_tx.h"
24 #include "dp_internal.h"
25 
26 /**
27  * 21 bits cookie
28  * 3 bits ring id 0 ~ 7,      mask 0x1C0000, offset 18
29  * 8 bits page id 0 ~ 255,    mask 0x03C800, offset 10
30  * 10 bits offset id 0 ~ 1023 mask 0x0003FF, offset 0
31  */
32 /* ???Ring ID needed??? */
33 #define DP_TX_DESC_ID_POOL_MASK    0x1C0000
34 #define DP_TX_DESC_ID_POOL_OS      18
35 #define DP_TX_DESC_ID_PAGE_MASK    0x03FC00
36 #define DP_TX_DESC_ID_PAGE_OS      10
37 #define DP_TX_DESC_ID_OFFSET_MASK  0x0003FF
38 #define DP_TX_DESC_ID_OFFSET_OS    0
39 
40 #define TX_DESC_LOCK_CREATE(lock)  qdf_spinlock_create(lock)
41 #define TX_DESC_LOCK_DESTROY(lock) qdf_spinlock_destroy(lock)
42 #define TX_DESC_LOCK_LOCK(lock)    qdf_spin_lock(lock)
43 #define TX_DESC_LOCK_UNLOCK(lock)  qdf_spin_unlock(lock)
44 #define MAX_POOL_BUFF_COUNT 10000
45 
46 QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
47 		uint16_t num_elem);
48 QDF_STATUS dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
49 QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
50 		uint16_t num_elem);
51 QDF_STATUS dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
52 QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
53 		uint16_t num_elem);
54 void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
55 QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
56 		uint16_t num_elem);
57 void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t pool_id);
58 /**
59  * dp_tx_desc_alloc() - Allocate a Software Tx Descriptor from given pool
60  *
61  * @param soc Handle to DP SoC structure
62  * @param pool_id
63  *
64  * Return:
65  */
66 static inline struct dp_tx_desc_s *dp_tx_desc_alloc(struct dp_soc *soc,
67 		uint8_t desc_pool_id)
68 {
69 	struct dp_tx_desc_s *tx_desc = NULL;
70 
71 	TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
72 
73 	tx_desc = soc->tx_desc[desc_pool_id].freelist;
74 	/* Pool is exhausted */
75 	if (!tx_desc) {
76 		TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
77 		return NULL;
78 	}
79 	if (soc->tx_desc[desc_pool_id].freelist) {
80 		soc->tx_desc[desc_pool_id].freelist =
81 			soc->tx_desc[desc_pool_id].freelist->next;
82 		soc->tx_desc[desc_pool_id].num_allocated++;
83 		soc->tx_desc[desc_pool_id].num_free--;
84 	}
85 
86 	DP_STATS_INC(soc, tx.desc_in_use, 1);
87 	tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
88 	TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
89 
90 	return tx_desc;
91 }
92 
93 /**
94  * dp_tx_desc_alloc_multiple() - Allocate batch of software Tx Descriptors
95  *                            from given pool
96  * @soc: Handle to DP SoC structure
97  * @pool_id: pool id should pick up
98  * @num_requested: number of required descriptor
99  *
100  * allocate multiple tx descriptor and make a link
101  *
102  * Return: h_desc first descriptor pointer
103  */
104 static inline struct dp_tx_desc_s *dp_tx_desc_alloc_multiple(
105 		struct dp_soc *soc, uint8_t desc_pool_id, uint8_t num_requested)
106 {
107 	struct dp_tx_desc_s *c_desc = NULL, *h_desc = NULL;
108 	uint8_t count;
109 
110 	TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
111 
112 	if ((num_requested == 0) ||
113 			(soc->tx_desc[desc_pool_id].num_free < num_requested)) {
114 		TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
115 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
116 			"%s, No Free Desc: Available(%d) num_requested(%d)",
117 			__func__, soc->tx_desc[desc_pool_id].num_free,
118 			num_requested);
119 		return NULL;
120 	}
121 
122 	h_desc = soc->tx_desc[desc_pool_id].freelist;
123 
124 	/* h_desc should never be NULL since num_free > requested */
125 	qdf_assert_always(h_desc);
126 
127 	c_desc = h_desc;
128 	for (count = 0; count < (num_requested - 1); count++) {
129 		c_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
130 		c_desc = c_desc->next;
131 	}
132 	soc->tx_desc[desc_pool_id].num_free -= count;
133 	soc->tx_desc[desc_pool_id].num_allocated += count;
134 	soc->tx_desc[desc_pool_id].freelist = c_desc->next;
135 	c_desc->next = NULL;
136 
137 	TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
138 	return h_desc;
139 }
140 
141 /**
142  * dp_tx_desc_free() - Fee a tx descriptor and attach it to free list
143  *
144  * @soc Handle to DP SoC structure
145  * @pool_id
146  * @tx_desc
147  */
148 static inline void
149 dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
150 		uint8_t desc_pool_id)
151 {
152 	TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
153 
154 	tx_desc->flags = 0;
155 	tx_desc->next = soc->tx_desc[desc_pool_id].freelist;
156 	soc->tx_desc[desc_pool_id].freelist = tx_desc;
157 	DP_STATS_DEC(soc, tx.desc_in_use, 1);
158 	soc->tx_desc[desc_pool_id].num_allocated--;
159 	soc->tx_desc[desc_pool_id].num_free++;
160 
161 
162 	TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
163 }
164 
165 /**
166  * dp_tx_desc_find() - find dp tx descriptor from cokie
167  * @soc - handle for the device sending the data
168  * @tx_desc_id - the ID of the descriptor in question
169  * @return the descriptor object that has the specified ID
170  *
171  *  Use a tx descriptor ID to find the corresponding descriptor object.
172  *
173  */
174 static inline struct dp_tx_desc_s *dp_tx_desc_find(struct dp_soc *soc,
175 		uint8_t pool_id, uint16_t page_id, uint16_t offset)
176 {
177 	return soc->tx_desc[pool_id].desc_pages.cacheable_pages[page_id] +
178 		soc->tx_desc[pool_id].elem_size * offset;
179 }
180 
181 /**
182  * dp_tx_ext_desc_alloc() - Get tx extension descriptor from pool
183  * @soc: handle for the device sending the data
184  * @pool_id: target pool id
185  *
186  * Return: None
187  */
188 static inline
189 struct dp_tx_ext_desc_elem_s *dp_tx_ext_desc_alloc(struct dp_soc *soc,
190 		uint8_t desc_pool_id)
191 {
192 	struct dp_tx_ext_desc_elem_s *c_elem;
193 
194 	TX_DESC_LOCK_LOCK(&soc->tx_ext_desc[desc_pool_id].lock);
195 	c_elem = soc->tx_ext_desc[desc_pool_id].freelist;
196 	soc->tx_ext_desc[desc_pool_id].freelist =
197 		soc->tx_ext_desc[desc_pool_id].freelist->next;
198 	TX_DESC_LOCK_UNLOCK(&soc->tx_ext_desc[desc_pool_id].lock);
199 	return c_elem;
200 }
201 
202 /**
203  * dp_tx_ext_desc_free() - Release tx extension descriptor to the pool
204  * @soc: handle for the device sending the data
205  * @pool_id: target pool id
206  * @elem: ext descriptor pointer should release
207  *
208  * Return: None
209  */
210 static inline void dp_tx_ext_desc_free(struct dp_soc *soc,
211 	struct dp_tx_ext_desc_elem_s *elem, uint8_t desc_pool_id)
212 {
213 	TX_DESC_LOCK_LOCK(&soc->tx_ext_desc[desc_pool_id].lock);
214 	elem->next = soc->tx_ext_desc[desc_pool_id].freelist;
215 	soc->tx_ext_desc[desc_pool_id].freelist = elem;
216 	TX_DESC_LOCK_UNLOCK(&soc->tx_ext_desc[desc_pool_id].lock);
217 	return;
218 }
219 
220 /**
221  * dp_tx_ext_desc_free_multiple() - Fee multiple tx extension descriptor and
222  *                           attach it to free list
223  * @soc: Handle to DP SoC structure
224  * @desc_pool_id: pool id should pick up
225  * @elem: tx descriptor should be freed
226  * @num_free: number of descriptors should be freed
227  *
228  * Return: none
229  */
230 static inline void dp_tx_ext_desc_free_multiple(struct dp_soc *soc,
231 		struct dp_tx_ext_desc_elem_s *elem, uint8_t desc_pool_id,
232 		uint8_t num_free)
233 {
234 	struct dp_tx_ext_desc_elem_s *head, *tail, *c_elem;
235 	uint8_t freed = num_free;
236 
237 	/* caller should always guarantee atleast list of num_free nodes */
238 	qdf_assert_always(head);
239 
240 	head = elem;
241 	c_elem = head;
242 	tail = head;
243 	while (c_elem && freed) {
244 		tail = c_elem;
245 		c_elem = c_elem->next;
246 		freed--;
247 	}
248 
249 	/* caller should always guarantee atleast list of num_free nodes */
250 	qdf_assert_always(tail);
251 
252 	TX_DESC_LOCK_LOCK(&soc->tx_ext_desc[desc_pool_id].lock);
253 	tail->next = soc->tx_ext_desc[desc_pool_id].freelist;
254 	soc->tx_ext_desc[desc_pool_id].freelist = head;
255 	soc->tx_ext_desc[desc_pool_id].num_free += num_free;
256 	TX_DESC_LOCK_UNLOCK(&soc->tx_ext_desc[desc_pool_id].lock);
257 
258 	return;
259 }
260 
261 #if defined(FEATURE_TSO)
262 /**
263  * dp_tx_tso_desc_alloc() - function to allocate a TSO segment
264  * @soc: device soc instance
265  * @pool_id: pool id should pick up tso descriptor
266  *
267  * Allocates a TSO segment element from the free list held in
268  * the soc
269  *
270  * Return: tso_seg, tso segment memory pointer
271  */
272 static inline struct qdf_tso_seg_elem_t *dp_tx_tso_desc_alloc(
273 		struct dp_soc *soc, uint8_t pool_id)
274 {
275 	struct qdf_tso_seg_elem_t *tso_seg = NULL;
276 
277 	TX_DESC_LOCK_LOCK(&soc->tx_tso_desc[pool_id].lock);
278 	if (soc->tx_tso_desc[pool_id].freelist) {
279 		soc->tx_tso_desc[pool_id].num_free--;
280 		tso_seg = soc->tx_tso_desc[pool_id].freelist;
281 		soc->tx_tso_desc[pool_id].freelist =
282 			soc->tx_tso_desc[pool_id].freelist->next;
283 	}
284 	TX_DESC_LOCK_UNLOCK(&soc->tx_tso_desc[pool_id].lock);
285 
286 	return tso_seg;
287 }
288 
289 /**
290  * dp_tx_tso_desc_free() - function to free a TSO segment
291  * @soc: device soc instance
292  * @pool_id: pool id should pick up tso descriptor
293  * @tso_seg: tso segment memory pointer
294  *
295  * Returns a TSO segment element to the free list held in the
296  * HTT pdev
297  *
298  * Return: none
299  */
300 static inline void dp_tx_tso_desc_free(struct dp_soc *soc,
301 		uint8_t pool_id, struct qdf_tso_seg_elem_t *tso_seg)
302 {
303 	TX_DESC_LOCK_LOCK(&soc->tx_tso_desc[pool_id].lock);
304 	tso_seg->next = soc->tx_tso_desc[pool_id].freelist;
305 	soc->tx_tso_desc[pool_id].freelist = tso_seg;
306 	soc->tx_tso_desc[pool_id].num_free++;
307 	TX_DESC_LOCK_UNLOCK(&soc->tx_tso_desc[pool_id].lock);
308 }
309 
310 static inline
311 struct qdf_tso_num_seg_elem_t  *dp_tso_num_seg_alloc(struct dp_soc *soc,
312 		uint8_t pool_id)
313 {
314 	struct qdf_tso_num_seg_elem_t *tso_num_seg = NULL;
315 
316 	TX_DESC_LOCK_LOCK(&soc->tx_tso_num_seg[pool_id].lock);
317 	if (soc->tx_tso_num_seg[pool_id].freelist) {
318 		soc->tx_tso_num_seg[pool_id].num_free--;
319 		tso_num_seg = soc->tx_tso_num_seg[pool_id].freelist;
320 		soc->tx_tso_num_seg[pool_id].freelist =
321 			soc->tx_tso_num_seg[pool_id].freelist->next;
322 	}
323 	TX_DESC_LOCK_UNLOCK(&soc->tx_tso_num_seg[pool_id].lock);
324 
325 	return tso_num_seg;
326 }
327 
328 static inline
329 void dp_tso_num_seg_free(struct dp_soc *soc,
330 		uint8_t pool_id, struct qdf_tso_num_seg_elem_t *tso_num_seg)
331 {
332 	TX_DESC_LOCK_LOCK(&soc->tx_tso_num_seg[pool_id].lock);
333 	tso_num_seg->next = soc->tx_tso_num_seg[pool_id].freelist;
334 	soc->tx_tso_num_seg[pool_id].freelist = tso_num_seg;
335 	soc->tx_tso_num_seg[pool_id].num_free++;
336 	TX_DESC_LOCK_UNLOCK(&soc->tx_tso_num_seg[pool_id].lock);
337 }
338 #endif
339 /*
340  * dp_tx_me_alloc_buf() Alloc descriptor from me pool
341  * @pdev DP_PDEV handle for datapath
342  *
343  * Return:dp_tx_me_buf_t(buf)
344  */
345 static inline struct dp_tx_me_buf_t*
346 dp_tx_me_alloc_buf(struct dp_pdev *pdev)
347 {
348 	struct dp_tx_me_buf_t *buf = NULL;
349 	qdf_spin_lock_bh(&pdev->tx_mutex);
350 	if (pdev->me_buf.freelist) {
351 		buf = pdev->me_buf.freelist;
352 		pdev->me_buf.freelist = pdev->me_buf.freelist->next;
353 		pdev->me_buf.buf_in_use++;
354 	} else {
355 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
356 				"Error allocating memory in pool");
357 		qdf_spin_unlock_bh(&pdev->tx_mutex);
358 		return NULL;
359 	}
360 	qdf_spin_unlock_bh(&pdev->tx_mutex);
361 	return buf;
362 }
363 
364 /*
365  * dp_tx_me_free_buf() - Free me descriptor and add it to pool
366  * @pdev: DP_PDEV handle for datapath
367  * @buf : Allocated ME BUF
368  *
369  * Return:void
370  */
371 static inline void
372 dp_tx_me_free_buf(struct dp_pdev *pdev, struct dp_tx_me_buf_t *buf)
373 {
374 	qdf_spin_lock_bh(&pdev->tx_mutex);
375 	buf->next = pdev->me_buf.freelist;
376 	pdev->me_buf.freelist = buf;
377 	pdev->me_buf.buf_in_use--;
378 	qdf_spin_unlock_bh(&pdev->tx_mutex);
379 }
380 #endif /* DP_TX_DESC_H */
381