xref: /wlan-dirver/qca-wifi-host-cmn/wbuff/src/wbuff.c (revision 901120c066e139c7f8a2c8e4820561fdd83c67ef)
1 /*
2  * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /**
21  * DOC: wbuff.c
22  * wbuff buffer management APIs
23  */
24 
25 #include <wbuff.h>
26 #include "i_wbuff.h"
27 
28 /**
29  * Allocation holder array for all wbuff registered modules
30  */
31 struct wbuff_holder wbuff;
32 
33 /**
34  * wbuff_get_pool_slot_from_len() - get pool_slot from length
35  * @len: length of the buffer
36  *
37  * Return: pool slot
38  */
39 static uint8_t wbuff_get_pool_slot_from_len(uint16_t len)
40 {
41 	if ((len > 0) && (len <= WBUFF_LEN_POOL0))
42 		return WBUFF_POOL_0;
43 	else if ((len > WBUFF_LEN_POOL0) && (len <= WBUFF_LEN_POOL1))
44 		return WBUFF_POOL_1;
45 	else if ((len > WBUFF_LEN_POOL1) && (len <= WBUFF_LEN_POOL2))
46 		return WBUFF_POOL_2;
47 	else
48 		return WBUFF_POOL_3;
49 }
50 
51 /**
52  * wbuff_get_len_from_pool_slot() - get len from pool slot
53  * @pool_slot: wbuff pool_slot
54  *
55  * Return: nbuf length from pool slot
56  */
57 static uint32_t wbuff_get_len_from_pool_slot(uint16_t pool_slot)
58 {
59 	uint32_t len = 0;
60 
61 	switch (pool_slot) {
62 	case 0:
63 		len = WBUFF_LEN_POOL0;
64 		break;
65 	case 1:
66 		len = WBUFF_LEN_POOL1;
67 		break;
68 	case 2:
69 		len = WBUFF_LEN_POOL2;
70 		break;
71 	case 3:
72 		len = WBUFF_LEN_POOL3;
73 		break;
74 	default:
75 		len = 0;
76 	}
77 
78 	return len;
79 }
80 
81 /**
82  * wbuff_get_free_mod_slot() - get free module slot
83  *
84  * Return: module slot
85  */
86 static uint8_t wbuff_get_free_mod_slot(void)
87 {
88 	uint8_t mslot = 0;
89 
90 	for (mslot = 0; mslot < WBUFF_MAX_MODULES; mslot++) {
91 		qdf_spin_lock_bh(&wbuff.mod[mslot].lock);
92 		if (!wbuff.mod[mslot].registered) {
93 			wbuff.mod[mslot].registered = true;
94 			qdf_spin_unlock_bh(&wbuff.mod[mslot].lock);
95 			break;
96 		}
97 		qdf_spin_unlock_bh(&wbuff.mod[mslot].lock);
98 	}
99 
100 	return mslot;
101 }
102 
103 /**
104  * wbuff_is_valid_alloc_req() - validate alloc  request
105  * @req: allocation request from registered module
106  * @num: number of pools required
107  *
108  * Return: true if valid wbuff_alloc_request
109  *         false if invalid wbuff_alloc_request
110  */
111 static bool wbuff_is_valid_alloc_req(struct wbuff_alloc_request *req,
112 				     uint8_t num)
113 {
114 	uint16_t psize = 0;
115 	uint8_t alloc = 0, pslot = 0;
116 
117 	for (alloc = 0; alloc < num; alloc++) {
118 		pslot = req[alloc].slot;
119 		psize = req[alloc].size;
120 		if ((pslot > WBUFF_MAX_POOLS - 1) ||
121 		    (psize > wbuff_alloc_max[pslot]))
122 			return false;
123 	}
124 
125 	return true;
126 }
127 
128 /**
129  * wbuff_prepare_nbuf() - allocate nbuf
130  * @mslot: module slot
131  * @pslot: pool slot
132  * @len: length of the buffer
133  * @reserve: nbuf headroom to start with
134  * @align: alignment for the nbuf
135  *
136  * Return: nbuf if success
137  *         NULL if failure
138  */
139 static qdf_nbuf_t wbuff_prepare_nbuf(uint8_t mslot, uint8_t pslot,
140 				     uint32_t len, int reserve, int align)
141 {
142 	qdf_nbuf_t buf;
143 	unsigned long dev_scratch = 0;
144 
145 	buf = qdf_nbuf_alloc(NULL, roundup(len + reserve, align), reserve,
146 			     align, false);
147 	if (!buf)
148 		return NULL;
149 	dev_scratch = mslot;
150 	dev_scratch <<= WBUFF_MSLOT_SHIFT;
151 	dev_scratch |= ((pslot << WBUFF_PSLOT_SHIFT) | 1);
152 	qdf_nbuf_set_dev_scratch(buf, dev_scratch);
153 
154 	return buf;
155 }
156 
157 /**
158  * wbuff_is_valid_handle() - validate wbuff handle
159  * @handle: wbuff handle passed by module
160  *
161  * Return: true - valid wbuff_handle
162  *         false - invalid wbuff_handle
163  */
164 static bool wbuff_is_valid_handle(struct wbuff_handle *handle)
165 {
166 	if ((handle) && (handle->id < WBUFF_MAX_MODULES) &&
167 	    (wbuff.mod[handle->id].registered))
168 		return true;
169 
170 	return false;
171 }
172 
173 QDF_STATUS wbuff_module_init(void)
174 {
175 	struct wbuff_module *mod = NULL;
176 	uint8_t mslot = 0, pslot = 0;
177 
178 	if (!qdf_nbuf_is_dev_scratch_supported()) {
179 		wbuff.initialized = false;
180 		return QDF_STATUS_E_NOSUPPORT;
181 	}
182 
183 	for (mslot = 0; mslot < WBUFF_MAX_MODULES; mslot++) {
184 		mod = &wbuff.mod[mslot];
185 		qdf_spinlock_create(&mod->lock);
186 		for (pslot = 0; pslot < WBUFF_MAX_POOLS; pslot++)
187 			mod->pool[pslot] = NULL;
188 		mod->registered = false;
189 	}
190 	wbuff.initialized = true;
191 
192 	return QDF_STATUS_SUCCESS;
193 }
194 
195 QDF_STATUS wbuff_module_deinit(void)
196 {
197 	struct wbuff_module *mod = NULL;
198 	uint8_t mslot = 0;
199 
200 	if (!wbuff.initialized)
201 		return QDF_STATUS_E_INVAL;
202 
203 	wbuff.initialized = false;
204 	for (mslot = 0; mslot < WBUFF_MAX_MODULES; mslot++) {
205 		mod = &wbuff.mod[mslot];
206 		if (mod->registered)
207 			wbuff_module_deregister((struct wbuff_mod_handle *)
208 						&mod->handle);
209 		qdf_spinlock_destroy(&mod->lock);
210 	}
211 
212 	return QDF_STATUS_SUCCESS;
213 }
214 
215 struct wbuff_mod_handle *
216 wbuff_module_register(struct wbuff_alloc_request *req, uint8_t num,
217 		      int reserve, int align)
218 {
219 	struct wbuff_module *mod = NULL;
220 	qdf_nbuf_t buf = NULL;
221 	uint32_t len = 0;
222 	uint16_t idx = 0, psize = 0;
223 	uint8_t alloc = 0, mslot = 0, pslot = 0;
224 
225 	if (!wbuff.initialized)
226 		return NULL;
227 
228 	if ((num == 0) || (num > WBUFF_MAX_POOLS))
229 		return NULL;
230 
231 	if (!wbuff_is_valid_alloc_req(req, num))
232 		return NULL;
233 
234 	mslot = wbuff_get_free_mod_slot();
235 	if (mslot == WBUFF_MAX_MODULES)
236 		return NULL;
237 
238 	mod = &wbuff.mod[mslot];
239 
240 	mod->handle.id = mslot;
241 
242 	for (alloc = 0; alloc < num; alloc++) {
243 		pslot = req[alloc].slot;
244 		psize = req[alloc].size;
245 		len = wbuff_get_len_from_pool_slot(pslot);
246 		/**
247 		 * Allocate pool_cnt number of buffers for
248 		 * the pool given by pslot
249 		 */
250 		for (idx = 0; idx < psize; idx++) {
251 			buf = wbuff_prepare_nbuf(mslot, pslot, len, reserve,
252 						 align);
253 			if (!buf)
254 				continue;
255 			if (!mod->pool[pslot]) {
256 				qdf_nbuf_set_next(buf, NULL);
257 				mod->pool[pslot] = buf;
258 			} else {
259 				qdf_nbuf_set_next(buf, mod->pool[pslot]);
260 				mod->pool[pslot] = buf;
261 			}
262 		}
263 	}
264 	mod->reserve = reserve;
265 	mod->align = align;
266 
267 	return (struct wbuff_mod_handle *)&mod->handle;
268 }
269 
270 QDF_STATUS wbuff_module_deregister(struct wbuff_mod_handle *hdl)
271 {
272 	struct wbuff_handle *handle;
273 	struct wbuff_module *mod = NULL;
274 	uint8_t mslot = 0, pslot = 0;
275 	qdf_nbuf_t first = NULL, buf = NULL;
276 
277 	handle = (struct wbuff_handle *)hdl;
278 
279 	if ((!wbuff.initialized) || (!wbuff_is_valid_handle(handle)))
280 		return QDF_STATUS_E_INVAL;
281 
282 	mslot = handle->id;
283 	mod = &wbuff.mod[mslot];
284 
285 	qdf_spin_lock_bh(&mod->lock);
286 	for (pslot = 0; pslot < WBUFF_MAX_POOLS; pslot++) {
287 		first = mod->pool[pslot];
288 		while (first) {
289 			buf = first;
290 			first = qdf_nbuf_next(buf);
291 			qdf_nbuf_free(buf);
292 		}
293 	}
294 	mod->registered = false;
295 	qdf_spin_unlock_bh(&mod->lock);
296 
297 	return QDF_STATUS_SUCCESS;
298 }
299 
300 qdf_nbuf_t wbuff_buff_get(struct wbuff_mod_handle *hdl, uint32_t len,
301 			  const char *func_name, uint32_t line_num)
302 {
303 	struct wbuff_handle *handle;
304 	struct wbuff_module *mod = NULL;
305 	uint8_t mslot = 0;
306 	uint8_t pslot = 0;
307 	qdf_nbuf_t buf = NULL;
308 
309 	handle = (struct wbuff_handle *)hdl;
310 
311 	if ((!wbuff.initialized) || (!wbuff_is_valid_handle(handle)) || !len ||
312 	    (len > WBUFF_MAX_BUFFER_SIZE))
313 		return NULL;
314 
315 	mslot = handle->id;
316 	pslot = wbuff_get_pool_slot_from_len(len);
317 	mod = &wbuff.mod[mslot];
318 
319 	qdf_spin_lock_bh(&mod->lock);
320 	if (mod->pool[pslot]) {
321 		buf = mod->pool[pslot];
322 		mod->pool[pslot] = qdf_nbuf_next(buf);
323 		mod->pending_returns++;
324 	}
325 	qdf_spin_unlock_bh(&mod->lock);
326 	if (buf) {
327 		qdf_nbuf_set_next(buf, NULL);
328 		qdf_net_buf_debug_update_node(buf, func_name, line_num);
329 	}
330 
331 	return buf;
332 }
333 
334 qdf_nbuf_t wbuff_buff_put(qdf_nbuf_t buf)
335 {
336 	qdf_nbuf_t buffer = buf;
337 	unsigned long slot_info = 0;
338 	uint8_t mslot = 0, pslot = 0;
339 
340 	if (!wbuff.initialized)
341 		return buffer;
342 
343 	slot_info = qdf_nbuf_get_dev_scratch(buf);
344 	if (!slot_info)
345 		return buffer;
346 
347 	mslot = (slot_info & WBUFF_MSLOT_BITMASK) >> WBUFF_MSLOT_SHIFT;
348 	pslot = (slot_info & WBUFF_PSLOT_BITMASK) >> WBUFF_PSLOT_SHIFT;
349 
350 	if (mslot >= WBUFF_MAX_MODULES || pslot >= WBUFF_MAX_POOLS)
351 		return NULL;
352 
353 	qdf_nbuf_reset(buffer, wbuff.mod[mslot].reserve, wbuff.mod[mslot].
354 		       align);
355 	qdf_spin_lock_bh(&wbuff.mod[mslot].lock);
356 	if (wbuff.mod[mslot].registered) {
357 		qdf_nbuf_set_next(buffer, wbuff.mod[mslot].pool[pslot]);
358 		wbuff.mod[mslot].pool[pslot] = buffer;
359 		wbuff.mod[mslot].pending_returns--;
360 		buffer = NULL;
361 	}
362 	qdf_spin_unlock_bh(&wbuff.mod[mslot].lock);
363 
364 	return buffer;
365 }
366