xref: /wlan-dirver/qca-wifi-host-cmn/wbuff/src/wbuff.c (revision 0626a4da6c07f30da06dd6747e8cc290a60371d8)
1 /*
2  * Copyright (c) 2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * DOC: wbuff.c
21  * wbuff buffer management APIs
22  */
23 
24 #include <wbuff.h>
25 #include "i_wbuff.h"
26 
27 /**
28  * Allocation holder array for all wbuff registered modules
29  */
30 struct wbuff_holder wbuff;
31 
32 /**
33  * wbuff_get_pool_slot_from_len() - get pool_slot from length
34  * @len: length of the buffer
35  *
36  * Return: pool slot
37  */
38 static uint8_t wbuff_get_pool_slot_from_len(uint16_t len)
39 {
40 	if ((len > 0) && (len <= WBUFF_LEN_POOL0))
41 		return WBUFF_POOL_0;
42 	else if ((len > WBUFF_LEN_POOL0) && (len <= WBUFF_LEN_POOL1))
43 		return WBUFF_POOL_1;
44 	else if ((len > WBUFF_LEN_POOL1) && (len <= WBUFF_LEN_POOL2))
45 		return WBUFF_POOL_2;
46 	else
47 		return WBUFF_POOL_3;
48 }
49 
50 /**
51  * wbuff_get_len_from_pool_slot() - get len from pool slot
52  * @pool_slot: wbuff pool_slot
53  *
54  * Return: nbuf length from pool slot
55  */
56 static uint32_t wbuff_get_len_from_pool_slot(uint16_t pool_slot)
57 {
58 	uint32_t len = 0;
59 
60 	switch (pool_slot) {
61 	case 0:
62 		len = WBUFF_LEN_POOL0;
63 		break;
64 	case 1:
65 		len = WBUFF_LEN_POOL1;
66 		break;
67 	case 2:
68 		len = WBUFF_LEN_POOL2;
69 		break;
70 	case 3:
71 		len = WBUFF_LEN_POOL3;
72 		break;
73 	default:
74 		len = 0;
75 	}
76 
77 	return len;
78 }
79 
80 /**
81  * wbuff_get_free_mod_slot() - get free module slot
82  *
83  * Return: module slot
84  */
85 static uint8_t wbuff_get_free_mod_slot(void)
86 {
87 	uint8_t mslot = 0;
88 
89 	for (mslot = 0; mslot < WBUFF_MAX_MODULES; mslot++) {
90 		qdf_spin_lock_bh(&wbuff.mod[mslot].lock);
91 		if (!wbuff.mod[mslot].registered) {
92 			wbuff.mod[mslot].registered = true;
93 			qdf_spin_unlock_bh(&wbuff.mod[mslot].lock);
94 			break;
95 		}
96 		qdf_spin_unlock_bh(&wbuff.mod[mslot].lock);
97 	}
98 
99 	return mslot;
100 }
101 
102 /**
103  * wbuff_is_valid_alloc_req() - validate alloc  request
104  * @req: allocation request from registered module
105  * @num: number of pools required
106  *
107  * Return: true if valid wbuff_alloc_request
108  *         false if invalid wbuff_alloc_request
109  */
110 static bool wbuff_is_valid_alloc_req(struct wbuff_alloc_request *req,
111 				     uint8_t num)
112 {
113 	uint16_t psize = 0;
114 	uint8_t alloc = 0, pslot = 0;
115 
116 	for (alloc = 0; alloc < num; alloc++) {
117 		pslot = req[alloc].slot;
118 		psize = req[alloc].size;
119 		if ((pslot > WBUFF_MAX_POOLS - 1) ||
120 		    (psize > wbuff_alloc_max[pslot]))
121 			return false;
122 	}
123 
124 	return true;
125 }
126 
127 /**
128  * wbuff_prepare_nbuf() - allocate nbuf
129  * @mslot: module slot
130  * @pslot: pool slot
131  * @len: length of the buffer
132  * @reserve: nbuf headroom to start with
133  * @align: alignment for the nbuf
134  *
135  * Return: nbuf if success
136  *         NULL if failure
137  */
138 static qdf_nbuf_t wbuff_prepare_nbuf(uint8_t mslot, uint8_t pslot,
139 				     uint32_t len, int reserve, int align)
140 {
141 	qdf_nbuf_t buf;
142 	unsigned long dev_scratch = 0;
143 
144 	buf = qdf_nbuf_alloc(NULL, roundup(len + reserve, align), reserve,
145 			     align, false);
146 	if (!buf)
147 		return NULL;
148 	dev_scratch = mslot;
149 	dev_scratch <<= WBUFF_MSLOT_SHIFT;
150 	dev_scratch |= ((pslot << WBUFF_PSLOT_SHIFT) | 1);
151 	qdf_nbuf_set_dev_scratch(buf, dev_scratch);
152 
153 	return buf;
154 }
155 
156 /**
157  * wbuff_is_valid_handle() - validate wbuff handle
158  * @handle: wbuff handle passed by module
159  *
160  * Return: true - valid wbuff_handle
161  *         false - invalid wbuff_handle
162  */
163 static bool wbuff_is_valid_handle(struct wbuff_handle *handle)
164 {
165 	if ((handle) && (handle->id < WBUFF_MAX_MODULES) &&
166 	    (wbuff.mod[handle->id].registered))
167 		return true;
168 
169 	return false;
170 }
171 
172 QDF_STATUS wbuff_module_init(void)
173 {
174 	struct wbuff_module *mod = NULL;
175 	uint8_t mslot = 0, pslot = 0;
176 
177 	if (!qdf_nbuf_is_dev_scratch_supported()) {
178 		wbuff.initialized = false;
179 		return QDF_STATUS_E_NOSUPPORT;
180 	}
181 
182 	for (mslot = 0; mslot < WBUFF_MAX_MODULES; mslot++) {
183 		mod = &wbuff.mod[mslot];
184 		qdf_spinlock_create(&mod->lock);
185 		for (pslot = 0; pslot < WBUFF_MAX_POOLS; pslot++)
186 			mod->pool[pslot] = NULL;
187 		mod->registered = false;
188 	}
189 	wbuff.initialized = true;
190 
191 	return QDF_STATUS_SUCCESS;
192 }
193 
194 QDF_STATUS wbuff_module_deinit(void)
195 {
196 	struct wbuff_module *mod = NULL;
197 	uint8_t mslot = 0;
198 
199 	if (!wbuff.initialized)
200 		return QDF_STATUS_E_INVAL;
201 
202 	wbuff.initialized = false;
203 	for (mslot = 0; mslot < WBUFF_MAX_MODULES; mslot++) {
204 		mod = &wbuff.mod[mslot];
205 		if (mod->registered)
206 			wbuff_module_deregister((struct wbuff_mod_handle *)
207 						&mod->handle);
208 		qdf_spinlock_destroy(&mod->lock);
209 	}
210 
211 	return QDF_STATUS_SUCCESS;
212 }
213 
214 struct wbuff_mod_handle *
215 wbuff_module_register(struct wbuff_alloc_request *req, uint8_t num,
216 		      int reserve, int align)
217 {
218 	struct wbuff_module *mod = NULL;
219 	qdf_nbuf_t buf = NULL;
220 	uint32_t len = 0;
221 	uint16_t idx = 0, psize = 0;
222 	uint8_t alloc = 0, mslot = 0, pslot = 0;
223 
224 	if (!wbuff.initialized)
225 		return NULL;
226 
227 	if ((num == 0) || (num > WBUFF_MAX_POOLS))
228 		return NULL;
229 
230 	if (!wbuff_is_valid_alloc_req(req, num))
231 		return NULL;
232 
233 	mslot = wbuff_get_free_mod_slot();
234 	if (mslot == WBUFF_MAX_MODULES)
235 		return NULL;
236 
237 	mod = &wbuff.mod[mslot];
238 
239 	mod->handle.id = mslot;
240 
241 	for (alloc = 0; alloc < num; alloc++) {
242 		pslot = req[alloc].slot;
243 		psize = req[alloc].size;
244 		len = wbuff_get_len_from_pool_slot(pslot);
245 		/**
246 		 * Allocate pool_cnt number of buffers for
247 		 * the pool given by pslot
248 		 */
249 		for (idx = 0; idx < psize; idx++) {
250 			buf = wbuff_prepare_nbuf(mslot, pslot, len, reserve,
251 						 align);
252 			if (!buf)
253 				continue;
254 			if (!mod->pool[pslot]) {
255 				qdf_nbuf_set_next(buf, NULL);
256 				mod->pool[pslot] = buf;
257 			} else {
258 				qdf_nbuf_set_next(buf, mod->pool[pslot]);
259 				mod->pool[pslot] = buf;
260 			}
261 		}
262 	}
263 	mod->reserve = reserve;
264 	mod->align = align;
265 
266 	return (struct wbuff_mod_handle *)&mod->handle;
267 }
268 
269 QDF_STATUS wbuff_module_deregister(struct wbuff_mod_handle *hdl)
270 {
271 	struct wbuff_handle *handle;
272 	struct wbuff_module *mod = NULL;
273 	uint8_t mslot = 0, pslot = 0;
274 	qdf_nbuf_t first = NULL, buf = NULL;
275 
276 	handle = (struct wbuff_handle *)hdl;
277 
278 	if ((!wbuff.initialized) || (!wbuff_is_valid_handle(handle)))
279 		return QDF_STATUS_E_INVAL;
280 
281 	mslot = handle->id;
282 	mod = &wbuff.mod[mslot];
283 
284 	qdf_spin_lock_bh(&mod->lock);
285 	for (pslot = 0; pslot < WBUFF_MAX_POOLS; pslot++) {
286 		first = mod->pool[pslot];
287 		while (first) {
288 			buf = first;
289 			first = qdf_nbuf_next(buf);
290 			qdf_nbuf_free(buf);
291 		}
292 	}
293 	mod->registered = false;
294 	qdf_spin_unlock_bh(&mod->lock);
295 
296 	return QDF_STATUS_SUCCESS;
297 }
298 
299 qdf_nbuf_t wbuff_buff_get(struct wbuff_mod_handle *hdl, uint32_t len)
300 {
301 	struct wbuff_handle *handle;
302 	struct wbuff_module *mod = NULL;
303 	uint8_t mslot = 0;
304 	uint8_t pslot = 0;
305 	qdf_nbuf_t buf = NULL;
306 
307 	handle = (struct wbuff_handle *)hdl;
308 
309 	if ((!wbuff.initialized) || (!wbuff_is_valid_handle(handle)) || !len ||
310 	    (len > WBUFF_MAX_BUFFER_SIZE))
311 		return NULL;
312 
313 	mslot = handle->id;
314 	pslot = wbuff_get_pool_slot_from_len(len);
315 	mod = &wbuff.mod[mslot];
316 
317 	qdf_spin_lock_bh(&mod->lock);
318 	if (mod->pool[pslot]) {
319 		buf = mod->pool[pslot];
320 		mod->pool[pslot] = qdf_nbuf_next(buf);
321 		mod->pending_returns++;
322 	}
323 	qdf_spin_unlock_bh(&mod->lock);
324 	if (buf)
325 		qdf_nbuf_set_next(buf, NULL);
326 
327 	return buf;
328 }
329 
330 qdf_nbuf_t wbuff_buff_put(qdf_nbuf_t buf)
331 {
332 	qdf_nbuf_t buffer = buf;
333 	unsigned long slot_info = 0;
334 	uint8_t mslot = 0, pslot = 0;
335 
336 	if (!wbuff.initialized)
337 		return buffer;
338 
339 	slot_info = qdf_nbuf_get_dev_scratch(buf);
340 	if (!slot_info)
341 		return buffer;
342 
343 	mslot = (slot_info & WBUFF_MSLOT_BITMASK) >> WBUFF_MSLOT_SHIFT;
344 	pslot = (slot_info & WBUFF_PSLOT_BITMASK) >> WBUFF_PSLOT_SHIFT;
345 	qdf_nbuf_reset(buffer, wbuff.mod[mslot].reserve, wbuff.mod[mslot].
346 		       align);
347 	qdf_spin_lock_bh(&wbuff.mod[mslot].lock);
348 	if (wbuff.mod[mslot].registered) {
349 		qdf_nbuf_set_next(buffer, wbuff.mod[mslot].pool[pslot]);
350 		wbuff.mod[mslot].pool[pslot] = buffer;
351 		wbuff.mod[mslot].pending_returns--;
352 		buffer = NULL;
353 	}
354 	qdf_spin_unlock_bh(&wbuff.mod[mslot].lock);
355 
356 	return buffer;
357 }
358