xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_desc.c (revision dd4dc88b837a295134aa9869114a2efee0f4894b)
1 /*
2  * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "dp_types.h"
20 #include "dp_rx.h"
21 #include "dp_ipa.h"
22 
23 #ifdef RX_DESC_MULTI_PAGE_ALLOC
24 A_COMPILE_TIME_ASSERT(cookie_size_check,
25 		      PAGE_SIZE / sizeof(union dp_rx_desc_list_elem_t) <=
26 		      1 << DP_RX_DESC_PAGE_ID_SHIFT);
27 
28 QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc, uint32_t pool_id,
29 				 uint32_t num_elem,
30 				 struct rx_desc_pool *rx_desc_pool)
31 {
32 	uint32_t id, page_id, offset, desc_size, num_desc_per_page;
33 	uint32_t count = 0;
34 	union dp_rx_desc_list_elem_t *rx_desc_elem;
35 
36 	desc_size = sizeof(*rx_desc_elem);
37 	rx_desc_pool->elem_size = desc_size;
38 	if (!dp_is_soc_reinit(soc)) {
39 		qdf_mem_multi_pages_alloc(soc->osdev, &rx_desc_pool->desc_pages,
40 					  desc_size, num_elem, 0, true);
41 		if (!rx_desc_pool->desc_pages.num_pages) {
42 			qdf_err("Multi page alloc fail,size=%d, elem=%d",
43 				desc_size, num_elem);
44 			return QDF_STATUS_E_NOMEM;
45 		}
46 	}
47 
48 	num_desc_per_page = rx_desc_pool->desc_pages.num_element_per_page;
49 	rx_desc_pool->freelist = (union dp_rx_desc_list_elem_t *)
50 				  *rx_desc_pool->desc_pages.cacheable_pages;
51 	if (qdf_mem_multi_page_link(soc->osdev,
52 				    &rx_desc_pool->desc_pages,
53 				    desc_size, num_elem, true)) {
54 		qdf_err("overflow num link,size=%d, elem=%d",
55 			desc_size, num_elem);
56 		goto free_rx_desc_pool;
57 	}
58 	/* Initialize the lock */
59 	qdf_spinlock_create(&rx_desc_pool->lock);
60 	qdf_spin_lock_bh(&rx_desc_pool->lock);
61 	rx_desc_pool->pool_size = num_elem;
62 
63 	rx_desc_elem = rx_desc_pool->freelist;
64 	while (rx_desc_elem) {
65 		page_id = count / num_desc_per_page;
66 		offset = count % num_desc_per_page;
67 		/*
68 		 * Below cookie size is from REO destination ring
69 		 * reo_destination_ring -> buffer_addr_info -> sw_buffer_cookie
70 		 * cookie size = 21 bits
71 		 * 8 bits - offset
72 		 * 8 bits - page ID
73 		 * 4 bits - pool ID
74 		 */
75 		id = ((pool_id << DP_RX_DESC_POOL_ID_SHIFT) |
76 		      (page_id << DP_RX_DESC_PAGE_ID_SHIFT) |
77 		      offset);
78 		rx_desc_elem->rx_desc.cookie = id;
79 		rx_desc_elem->rx_desc.pool_id = pool_id;
80 		rx_desc_elem->rx_desc.in_use = 0;
81 		rx_desc_elem = rx_desc_elem->next;
82 		count++;
83 	}
84 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
85 	return QDF_STATUS_SUCCESS;
86 
87 free_rx_desc_pool:
88 	dp_rx_desc_pool_free(soc, rx_desc_pool);
89 
90 	return QDF_STATUS_E_FAULT;
91 }
92 
93 union dp_rx_desc_list_elem_t *dp_rx_desc_find(uint16_t page_id, uint16_t offset,
94 					      struct rx_desc_pool *rx_desc_pool)
95 {
96 	return rx_desc_pool->desc_pages.cacheable_pages[page_id] +
97 		rx_desc_pool->elem_size * offset;
98 }
99 
100 static QDF_STATUS __dp_rx_desc_nbuf_free(struct dp_soc *soc,
101 					 struct rx_desc_pool *rx_desc_pool)
102 {
103 	uint32_t i, num_desc, page_id, offset, num_desc_per_page;
104 	union dp_rx_desc_list_elem_t *rx_desc_elem;
105 	struct dp_rx_desc *rx_desc;
106 	qdf_nbuf_t nbuf;
107 
108 	if (qdf_unlikely(!(rx_desc_pool->
109 					desc_pages.cacheable_pages))) {
110 		qdf_err("No pages found on this desc pool");
111 		return QDF_STATUS_E_INVAL;
112 	}
113 	num_desc = rx_desc_pool->pool_size;
114 	num_desc_per_page =
115 		rx_desc_pool->desc_pages.num_element_per_page;
116 	for (i = 0; i < num_desc; i++) {
117 		page_id = i / num_desc_per_page;
118 		offset = i % num_desc_per_page;
119 		rx_desc_elem = dp_rx_desc_find(page_id, offset, rx_desc_pool);
120 		rx_desc = &rx_desc_elem->rx_desc;
121 		if (rx_desc->in_use) {
122 			nbuf = rx_desc->nbuf;
123 			if (!rx_desc->unmapped) {
124 				dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf,
125 								  false);
126 				qdf_nbuf_unmap_single(soc->osdev, nbuf,
127 						      QDF_DMA_BIDIRECTIONAL);
128 			}
129 			qdf_nbuf_free(nbuf);
130 		}
131 	}
132 
133 	return QDF_STATUS_SUCCESS;
134 }
135 
136 void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id,
137 				   struct rx_desc_pool *rx_desc_pool)
138 {
139 	QDF_STATUS qdf_status;
140 
141 	qdf_spin_lock_bh(&rx_desc_pool->lock);
142 	qdf_status = __dp_rx_desc_nbuf_free(soc, rx_desc_pool);
143 	if (QDF_IS_STATUS_SUCCESS(qdf_status))
144 		dp_rx_desc_pool_free(soc, rx_desc_pool);
145 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
146 
147 	qdf_spinlock_destroy(&rx_desc_pool->lock);
148 }
149 
150 void dp_rx_desc_nbuf_free(struct dp_soc *soc,
151 			  struct rx_desc_pool *rx_desc_pool)
152 {
153 	qdf_spin_lock_bh(&rx_desc_pool->lock);
154 	__dp_rx_desc_nbuf_free(soc, rx_desc_pool);
155 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
156 
157 	qdf_spinlock_destroy(&rx_desc_pool->lock);
158 }
159 
160 void dp_rx_desc_pool_free(struct dp_soc *soc,
161 			  struct rx_desc_pool *rx_desc_pool)
162 {
163 	if (qdf_unlikely(!(rx_desc_pool->desc_pages.cacheable_pages)))
164 		return;
165 	qdf_mem_multi_pages_free(soc->osdev,
166 				 &rx_desc_pool->desc_pages, 0, true);
167 }
168 #else
169 QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc, uint32_t pool_id,
170 	uint32_t pool_size, struct rx_desc_pool *rx_desc_pool)
171 {
172 	uint32_t i;
173 
174 	if (!dp_is_soc_reinit(soc)) {
175 		rx_desc_pool->array =
176 		qdf_mem_malloc(pool_size *
177 		sizeof(union dp_rx_desc_list_elem_t));
178 
179 		if (!(rx_desc_pool->array)) {
180 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
181 				  "%s: RX Desc Pool[%d] allocation failed",
182 				  __func__, pool_id);
183 			return QDF_STATUS_E_NOMEM;
184 		}
185 	}
186 
187 	/* Initialize the lock */
188 	qdf_spinlock_create(&rx_desc_pool->lock);
189 
190 	qdf_spin_lock_bh(&rx_desc_pool->lock);
191 	rx_desc_pool->pool_size = pool_size;
192 
193 	/* link SW rx descs into a freelist */
194 	rx_desc_pool->freelist = &rx_desc_pool->array[0];
195 	for (i = 0; i < rx_desc_pool->pool_size-1; i++) {
196 		rx_desc_pool->array[i].next = &rx_desc_pool->array[i+1];
197 		rx_desc_pool->array[i].rx_desc.cookie = i | (pool_id << 18);
198 		rx_desc_pool->array[i].rx_desc.pool_id = pool_id;
199 		rx_desc_pool->array[i].rx_desc.in_use = 0;
200 	}
201 
202 	rx_desc_pool->array[i].next = NULL;
203 	rx_desc_pool->array[i].rx_desc.cookie = i | (pool_id << 18);
204 	rx_desc_pool->array[i].rx_desc.pool_id = pool_id;
205 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
206 	return QDF_STATUS_SUCCESS;
207 }
208 
209 void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id,
210 				   struct rx_desc_pool *rx_desc_pool)
211 {
212 	qdf_nbuf_t nbuf;
213 	int i;
214 
215 	qdf_spin_lock_bh(&rx_desc_pool->lock);
216 	for (i = 0; i < rx_desc_pool->pool_size; i++) {
217 		if (rx_desc_pool->array[i].rx_desc.in_use) {
218 			nbuf = rx_desc_pool->array[i].rx_desc.nbuf;
219 
220 			if (!(rx_desc_pool->array[i].rx_desc.unmapped)) {
221 				dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf,
222 								  false);
223 
224 				qdf_nbuf_unmap_single(soc->osdev, nbuf,
225 						      QDF_DMA_FROM_DEVICE);
226 			}
227 			qdf_nbuf_free(nbuf);
228 		}
229 	}
230 	qdf_mem_free(rx_desc_pool->array);
231 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
232 	qdf_spinlock_destroy(&rx_desc_pool->lock);
233 }
234 
235 void dp_rx_desc_nbuf_free(struct dp_soc *soc,
236 			  struct rx_desc_pool *rx_desc_pool)
237 {
238 	qdf_nbuf_t nbuf;
239 	int i;
240 
241 	qdf_spin_lock_bh(&rx_desc_pool->lock);
242 	for (i = 0; i < rx_desc_pool->pool_size; i++) {
243 		if (rx_desc_pool->array[i].rx_desc.in_use) {
244 			nbuf = rx_desc_pool->array[i].rx_desc.nbuf;
245 
246 			if (!(rx_desc_pool->array[i].rx_desc.unmapped)) {
247 				dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf,
248 								  false);
249 
250 				qdf_nbuf_unmap_single(soc->osdev, nbuf,
251 						      QDF_DMA_FROM_DEVICE);
252 			}
253 
254 			qdf_nbuf_free(nbuf);
255 		}
256 	}
257 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
258 	qdf_spinlock_destroy(&rx_desc_pool->lock);
259 }
260 
261 void dp_rx_desc_pool_free(struct dp_soc *soc,
262 			  struct rx_desc_pool *rx_desc_pool)
263 {
264 	qdf_mem_free(rx_desc_pool->array);
265 }
266 #endif /* RX_DESC_MULTI_PAGE_ALLOC */
267 /*
268  * dp_rx_get_free_desc_list() - provide a list of descriptors from
269  *				the free rx desc pool.
270  *
271  * @soc: core txrx main context
272  * @pool_id: pool_id which is one of 3 mac_ids
273  * @rx_desc_pool: rx descriptor pool pointer
274  * @num_descs: number of descs requested from freelist
275  * @desc_list: attach the descs to this list (output parameter)
276  * @tail: attach the point to last desc of free list (output parameter)
277  *
278  * Return: number of descs allocated from free list.
279  */
280 uint16_t dp_rx_get_free_desc_list(struct dp_soc *soc, uint32_t pool_id,
281 				struct rx_desc_pool *rx_desc_pool,
282 				uint16_t num_descs,
283 				union dp_rx_desc_list_elem_t **desc_list,
284 				union dp_rx_desc_list_elem_t **tail)
285 {
286 	uint16_t count;
287 
288 	qdf_spin_lock_bh(&rx_desc_pool->lock);
289 
290 	*desc_list = *tail = rx_desc_pool->freelist;
291 
292 	for (count = 0; count < num_descs; count++) {
293 
294 		if (qdf_unlikely(!rx_desc_pool->freelist)) {
295 			qdf_spin_unlock_bh(&rx_desc_pool->lock);
296 			return count;
297 		}
298 		*tail = rx_desc_pool->freelist;
299 		rx_desc_pool->freelist = rx_desc_pool->freelist->next;
300 	}
301 	(*tail)->next = NULL;
302 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
303 	return count;
304 }
305 
306 /*
307  * dp_rx_add_desc_list_to_free_list() - append unused desc_list back to
308  *					freelist.
309  *
310  * @soc: core txrx main context
311  * @local_desc_list: local desc list provided by the caller
312  * @tail: attach the point to last desc of local desc list
313  * @pool_id: pool_id which is one of 3 mac_ids
314  * @rx_desc_pool: rx descriptor pool pointer
315  */
316 void dp_rx_add_desc_list_to_free_list(struct dp_soc *soc,
317 				union dp_rx_desc_list_elem_t **local_desc_list,
318 				union dp_rx_desc_list_elem_t **tail,
319 				uint16_t pool_id,
320 				struct rx_desc_pool *rx_desc_pool)
321 {
322 	union dp_rx_desc_list_elem_t *temp_list = NULL;
323 
324 	qdf_spin_lock_bh(&rx_desc_pool->lock);
325 
326 
327 	temp_list = rx_desc_pool->freelist;
328 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
329 	"temp_list: %pK, *local_desc_list: %pK, *tail: %pK (*tail)->next: %pK",
330 	temp_list, *local_desc_list, *tail, (*tail)->next);
331 	rx_desc_pool->freelist = *local_desc_list;
332 	(*tail)->next = temp_list;
333 
334 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
335 }
336