xref: /wlan-dirver/qca-wifi-host-cmn/wbuff/src/wbuff.c (revision aa4f14ce5a0f2d8d7a46d69aa43b7eee261d5846)
1 /*
2  * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021, 2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /**
21  * DOC: wbuff.c
22  * wbuff buffer management APIs
23  */
24 
25 #include <wbuff.h>
26 #include <linux/debugfs.h>
27 #include <linux/seq_file.h>
28 #include <qdf_debugfs.h>
29 #include "i_wbuff.h"
30 
31 /*
32  * Allocation holder array for all wbuff registered modules
33  */
34 struct wbuff_holder wbuff;
35 
36 /**
37  * wbuff_get_pool_slot_from_len() - get pool_id from length
38  * @mod: wbuff module reference
39  * @len: length of the buffer
40  *
41  * Return: pool_id
42  */
43 static uint8_t
44 wbuff_get_pool_slot_from_len(struct wbuff_module *mod, uint16_t len)
45 {
46 	struct wbuff_pool *pool;
47 	uint16_t prev_buf_size = 0;
48 	int i;
49 
50 	for (i = 0; i < WBUFF_MAX_POOLS; i++) {
51 		pool = &mod->wbuff_pool[i];
52 
53 		if (!pool->initialized)
54 			continue;
55 
56 		if ((len > prev_buf_size) && (len <= pool->buffer_size))
57 			break;
58 
59 		prev_buf_size = mod->wbuff_pool[i].buffer_size;
60 	}
61 
62 	return i;
63 }
64 
65 /**
66  * wbuff_is_valid_alloc_req() - validate alloc  request
67  * @req: allocation request from registered module
68  * @num: number of pools required
69  *
70  * Return: true if valid wbuff_alloc_request
71  *         false if invalid wbuff_alloc_request
72  */
73 static bool
74 wbuff_is_valid_alloc_req(struct wbuff_alloc_request *req, uint8_t num)
75 {
76 	int i;
77 
78 	for (i = 0; i < num; i++) {
79 		if (req[i].pool_id >= WBUFF_MAX_POOLS)
80 			return false;
81 	}
82 
83 	return true;
84 }
85 
86 /**
87  * wbuff_prepare_nbuf() - allocate nbuf
88  * @module_id: module ID
89  * @pool_id: pool ID
90  * @len: length of the buffer
91  * @reserve: nbuf headroom to start with
92  * @align: alignment for the nbuf
93  *
94  * Return: nbuf if success
95  *         NULL if failure
96  */
97 static qdf_nbuf_t wbuff_prepare_nbuf(uint8_t module_id, uint8_t pool_id,
98 				     uint32_t len, int reserve, int align)
99 {
100 	qdf_nbuf_t buf;
101 	unsigned long dev_scratch = 0;
102 	struct wbuff_module *mod = &wbuff.mod[module_id];
103 	struct wbuff_pool *wbuff_pool = &mod->wbuff_pool[pool_id];
104 
105 	buf = qdf_nbuf_page_frag_alloc(NULL, len, reserve, align,
106 				       &wbuff.pf_cache);
107 	if (!buf)
108 		return NULL;
109 	dev_scratch = module_id;
110 	dev_scratch <<= WBUFF_MODULE_ID_SHIFT;
111 	dev_scratch |= ((pool_id << WBUFF_POOL_ID_SHIFT) | 1);
112 	qdf_nbuf_set_dev_scratch(buf, dev_scratch);
113 
114 	wbuff_pool->mem_alloc += qdf_nbuf_get_allocsize(buf);
115 
116 	return buf;
117 }
118 
119 /**
120  * wbuff_is_valid_handle() - validate wbuff handle
121  * @handle: wbuff handle passed by module
122  *
123  * Return: true - valid wbuff_handle
124  *         false - invalid wbuff_handle
125  */
126 static bool wbuff_is_valid_handle(struct wbuff_handle *handle)
127 {
128 	if ((handle) && (handle->id < WBUFF_MAX_MODULES) &&
129 	    (wbuff.mod[handle->id].registered))
130 		return true;
131 
132 	return false;
133 }
134 
135 static char *wbuff_get_mod_name(enum wbuff_module_id module_id)
136 {
137 	char *str;
138 
139 	switch (module_id) {
140 	case WBUFF_MODULE_WMI_TX:
141 		str = "WBUFF_MODULE_WMI_TX";
142 		break;
143 	case WBUFF_MODULE_CE_RX:
144 		str = "WBUFF_MODULE_CE_RX";
145 		break;
146 	default:
147 		str = "Invalid Module ID";
148 		break;
149 	}
150 
151 	return str;
152 }
153 
154 static void wbuff_debugfs_print(qdf_debugfs_file_t file, const char *fmt, ...)
155 {
156 	va_list args;
157 
158 	va_start(args, fmt);
159 	seq_vprintf(file, fmt, args);
160 	va_end(args);
161 }
162 
163 static int wbuff_stats_debugfs_show(qdf_debugfs_file_t file, void *data)
164 {
165 	struct wbuff_module *mod;
166 	struct wbuff_pool *wbuff_pool;
167 	int i, j;
168 
169 	wbuff_debugfs_print(file, "WBUFF POOL STATS:\n");
170 	wbuff_debugfs_print(file, "=================\n");
171 
172 	for (i = 0; i < WBUFF_MAX_MODULES; i++) {
173 		mod = &wbuff.mod[i];
174 
175 		if (!mod->registered)
176 			continue;
177 
178 		wbuff_debugfs_print(file, "Module (%d) : %s\n", i,
179 				    wbuff_get_mod_name(i));
180 
181 		wbuff_debugfs_print(file, "%s %25s %20s %20s\n", "Pool ID",
182 				    "Mem Allocated (In Bytes)",
183 				    "Wbuff Success Count",
184 				    "Wbuff Fail Count");
185 
186 		for (j = 0; j < WBUFF_MAX_POOLS; j++) {
187 			wbuff_pool = &mod->wbuff_pool[j];
188 
189 			if (!wbuff_pool->initialized)
190 				continue;
191 
192 			wbuff_debugfs_print(file, "%d %30llu %20llu %20llu\n",
193 					    j, wbuff_pool->mem_alloc,
194 					    wbuff_pool->alloc_success,
195 					    wbuff_pool->alloc_fail);
196 		}
197 		wbuff_debugfs_print(file, "\n");
198 	}
199 
200 	return 0;
201 }
202 
203 static int wbuff_stats_debugfs_open(struct inode *inode, struct file *file)
204 {
205 	return single_open(file, wbuff_stats_debugfs_show,
206 			   inode->i_private);
207 }
208 
209 static const struct file_operations wbuff_stats_fops = {
210 	.owner          = THIS_MODULE,
211 	.open           = wbuff_stats_debugfs_open,
212 	.release        = single_release,
213 	.read           = seq_read,
214 	.llseek         = seq_lseek,
215 };
216 
217 static QDF_STATUS wbuff_debugfs_init(void)
218 {
219 	wbuff.wbuff_debugfs_dir =
220 		qdf_debugfs_create_dir("wbuff", NULL);
221 
222 	if (!wbuff.wbuff_debugfs_dir)
223 		return QDF_STATUS_E_FAILURE;
224 
225 	wbuff.wbuff_stats_dentry =
226 		qdf_debugfs_create_entry("wbuff_stats", QDF_FILE_USR_READ,
227 					 wbuff.wbuff_debugfs_dir, NULL,
228 					 &wbuff_stats_fops);
229 	if (!wbuff.wbuff_stats_dentry)
230 		return QDF_STATUS_E_FAILURE;
231 
232 	return QDF_STATUS_SUCCESS;
233 }
234 
235 static void wbuff_debugfs_exit(void)
236 {
237 	if (!wbuff.wbuff_debugfs_dir)
238 		return;
239 
240 	debugfs_remove_recursive(wbuff.wbuff_debugfs_dir);
241 	wbuff.wbuff_debugfs_dir = NULL;
242 }
243 
244 QDF_STATUS wbuff_module_init(void)
245 {
246 	struct wbuff_module *mod = NULL;
247 	uint8_t module_id = 0, pool_id = 0;
248 
249 	if (!qdf_nbuf_is_dev_scratch_supported()) {
250 		wbuff.initialized = false;
251 		return QDF_STATUS_E_NOSUPPORT;
252 	}
253 
254 	for (module_id = 0; module_id < WBUFF_MAX_MODULES; module_id++) {
255 		mod = &wbuff.mod[module_id];
256 		qdf_spinlock_create(&mod->lock);
257 		for (pool_id = 0; pool_id < WBUFF_MAX_POOLS; pool_id++)
258 			mod->wbuff_pool[pool_id].pool = NULL;
259 		mod->registered = false;
260 	}
261 
262 	wbuff_debugfs_init();
263 
264 	wbuff.initialized = true;
265 
266 	return QDF_STATUS_SUCCESS;
267 }
268 
269 QDF_STATUS wbuff_module_deinit(void)
270 {
271 	struct wbuff_module *mod = NULL;
272 	uint8_t module_id = 0;
273 
274 	if (!wbuff.initialized)
275 		return QDF_STATUS_E_INVAL;
276 
277 	wbuff.initialized = false;
278 	wbuff_debugfs_exit();
279 
280 	for (module_id = 0; module_id < WBUFF_MAX_MODULES; module_id++) {
281 		mod = &wbuff.mod[module_id];
282 		if (mod->registered)
283 			wbuff_module_deregister((struct wbuff_mod_handle *)
284 						&mod->handle);
285 		qdf_spinlock_destroy(&mod->lock);
286 	}
287 
288 	return QDF_STATUS_SUCCESS;
289 }
290 
291 struct wbuff_mod_handle *
292 wbuff_module_register(struct wbuff_alloc_request *req, uint8_t num_pools,
293 		      int reserve, int align, enum wbuff_module_id module_id)
294 {
295 	struct wbuff_module *mod = NULL;
296 	struct wbuff_pool *wbuff_pool;
297 	qdf_nbuf_t buf = NULL;
298 	uint32_t len;
299 	uint16_t pool_size;
300 	uint8_t pool_id;
301 	int i;
302 	int j;
303 
304 	if (!wbuff.initialized)
305 		return NULL;
306 
307 	if ((num_pools == 0) || (num_pools > WBUFF_MAX_POOLS))
308 		return NULL;
309 
310 	if (module_id >= WBUFF_MAX_MODULES)
311 		return NULL;
312 
313 	if (!wbuff_is_valid_alloc_req(req, num_pools))
314 		return NULL;
315 
316 	mod = &wbuff.mod[module_id];
317 	if (mod->registered)
318 		return NULL;
319 
320 	mod->handle.id = module_id;
321 
322 	for (i = 0; i < num_pools; i++) {
323 		pool_id = req[i].pool_id;
324 		pool_size = req[i].pool_size;
325 		len = req[i].buffer_size;
326 		wbuff_pool = &mod->wbuff_pool[pool_id];
327 
328 		if (!pool_size)
329 			continue;
330 
331 		/**
332 		 * Allocate pool_size number of buffers for
333 		 * the pool given by pool_id
334 		 */
335 		for (j = 0; j < pool_size; j++) {
336 			buf = wbuff_prepare_nbuf(module_id, pool_id, len,
337 						 reserve, align);
338 			if (!buf)
339 				continue;
340 
341 			if (!wbuff_pool->pool)
342 				qdf_nbuf_set_next(buf, NULL);
343 			else
344 				qdf_nbuf_set_next(buf, wbuff_pool->pool);
345 
346 			wbuff_pool->pool = buf;
347 		}
348 
349 		wbuff_pool->pool_id = pool_id;
350 		wbuff_pool->buffer_size = len;
351 		wbuff_pool->initialized = true;
352 	}
353 
354 	mod->reserve = reserve;
355 	mod->align = align;
356 	mod->registered = true;
357 
358 
359 	return (struct wbuff_mod_handle *)&mod->handle;
360 }
361 
362 QDF_STATUS wbuff_module_deregister(struct wbuff_mod_handle *hdl)
363 {
364 	struct wbuff_handle *handle;
365 	struct wbuff_module *mod = NULL;
366 	uint8_t module_id = 0, pool_id = 0;
367 	qdf_nbuf_t first = NULL, buf = NULL;
368 	struct wbuff_pool *wbuff_pool;
369 
370 	handle = (struct wbuff_handle *)hdl;
371 
372 	if ((!wbuff.initialized) || (!wbuff_is_valid_handle(handle)))
373 		return QDF_STATUS_E_INVAL;
374 
375 	module_id = handle->id;
376 	mod = &wbuff.mod[module_id];
377 
378 	qdf_spin_lock_bh(&mod->lock);
379 	for (pool_id = 0; pool_id < WBUFF_MAX_POOLS; pool_id++) {
380 		wbuff_pool = &mod->wbuff_pool[pool_id];
381 
382 		if (!wbuff_pool->initialized)
383 			continue;
384 
385 		first = wbuff_pool->pool;
386 		while (first) {
387 			buf = first;
388 			first = qdf_nbuf_next(buf);
389 			qdf_nbuf_free(buf);
390 		}
391 
392 		wbuff_pool->mem_alloc = 0;
393 		wbuff_pool->alloc_success = 0;
394 		wbuff_pool->alloc_fail = 0;
395 
396 	}
397 	mod->registered = false;
398 	qdf_spin_unlock_bh(&mod->lock);
399 
400 	return QDF_STATUS_SUCCESS;
401 }
402 
403 qdf_nbuf_t
404 wbuff_buff_get(struct wbuff_mod_handle *hdl, uint8_t pool_id, uint32_t len,
405 	       const char *func_name, uint32_t line_num)
406 {
407 	struct wbuff_handle *handle;
408 	struct wbuff_module *mod = NULL;
409 	struct wbuff_pool *wbuff_pool;
410 	uint8_t module_id = 0;
411 	qdf_nbuf_t buf = NULL;
412 
413 	handle = (struct wbuff_handle *)hdl;
414 
415 	if ((!wbuff.initialized) || (!wbuff_is_valid_handle(handle)) ||
416 	    ((pool_id >= WBUFF_MAX_POOL_ID && !len)))
417 		return NULL;
418 
419 	module_id = handle->id;
420 	mod = &wbuff.mod[module_id];
421 
422 	if (pool_id == WBUFF_MAX_POOL_ID && len)
423 		pool_id = wbuff_get_pool_slot_from_len(mod, len);
424 
425 	if (pool_id >= WBUFF_MAX_POOLS)
426 		return NULL;
427 
428 	wbuff_pool = &mod->wbuff_pool[pool_id];
429 	if (!wbuff_pool->initialized)
430 		return NULL;
431 
432 	qdf_spin_lock_bh(&mod->lock);
433 	if (wbuff_pool->pool) {
434 		buf = wbuff_pool->pool;
435 		wbuff_pool->pool = qdf_nbuf_next(buf);
436 		mod->pending_returns++;
437 	}
438 	qdf_spin_unlock_bh(&mod->lock);
439 
440 	if (buf) {
441 		qdf_nbuf_set_next(buf, NULL);
442 		qdf_net_buf_debug_update_node(buf, func_name, line_num);
443 		wbuff_pool->alloc_success++;
444 	} else {
445 		wbuff_pool->alloc_fail++;
446 	}
447 
448 	return buf;
449 }
450 
451 qdf_nbuf_t wbuff_buff_put(qdf_nbuf_t buf)
452 {
453 	qdf_nbuf_t buffer = buf;
454 	unsigned long pool_info = 0;
455 	uint8_t module_id = 0, pool_id = 0;
456 	struct wbuff_pool *wbuff_pool;
457 
458 	if (!wbuff.initialized)
459 		return buffer;
460 
461 	pool_info = qdf_nbuf_get_dev_scratch(buf);
462 	if (!pool_info)
463 		return buffer;
464 
465 	module_id = (pool_info & WBUFF_MODULE_ID_BITMASK) >>
466 			WBUFF_MODULE_ID_SHIFT;
467 	pool_id = (pool_info & WBUFF_POOL_ID_BITMASK) >> WBUFF_POOL_ID_SHIFT;
468 
469 	if (module_id >= WBUFF_MAX_MODULES || pool_id >= WBUFF_MAX_POOLS)
470 		return buffer;
471 
472 	wbuff_pool = &wbuff.mod[module_id].wbuff_pool[pool_id];
473 	if (!wbuff_pool->initialized)
474 		return buffer;
475 
476 	qdf_nbuf_reset(buffer, wbuff.mod[module_id].reserve,
477 		       wbuff.mod[module_id].align);
478 
479 	qdf_spin_lock_bh(&wbuff.mod[module_id].lock);
480 	if (wbuff.mod[module_id].registered) {
481 		qdf_nbuf_set_next(buffer, wbuff_pool->pool);
482 		wbuff_pool->pool = buffer;
483 		wbuff.mod[module_id].pending_returns--;
484 		buffer = NULL;
485 	}
486 	qdf_spin_unlock_bh(&wbuff.mod[module_id].lock);
487 
488 	return buffer;
489 }
490