xref: /wlan-dirver/platform/cnss_prealloc/cnss_prealloc.c (revision 2fd60f792b64ddb3dbe6cb492429e28ff9f5242f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2012,2014-2017,2019-2021 The Linux Foundation. All rights reserved.
4  * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
5  */
6 
7 #include <linux/module.h>
8 #include <linux/slab.h>
9 #include <linux/mempool.h>
10 #include <linux/mm.h>
11 #include <linux/err.h>
12 #include <linux/of.h>
13 #include <linux/version.h>
14 #ifdef CONFIG_CNSS_OUT_OF_TREE
15 #include "cnss_prealloc.h"
16 #else
17 #include <net/cnss_prealloc.h>
18 #endif
19 
20 MODULE_LICENSE("GPL v2");
21 MODULE_DESCRIPTION("CNSS prealloc driver");
22 
23 /* cnss preallocation scheme is a memory pool that always tries to keep a
24  * list of free memory for use in emergencies. It is implemented on kernel
25  * features: memorypool and kmem cache.
26  */
27 
28 struct cnss_pool {
29 	size_t size;
30 	int min;
31 	const char name[50];
32 	mempool_t *mp;
33 	struct kmem_cache *cache;
34 };
35 
36 /**
37  * Memory pool
38  * -----------
39  *
40  * How to update this table:
41  *
42  *  1. Add a new row with following elements
43  *      size  : Size of one allocation unit in bytes.
44  *      min   : Minimum units to be reserved. Used only if a regular
45  *              allocation fails.
46  *      name  : Name of the cache/pool. Will be displayed in /proc/slabinfo
47  *              if not merged with another pool.
48  *      mp    : A pointer to memory pool. Updated during init.
49  *      cache : A pointer to cache. Updated during init.
50  * 2. Always keep the table in increasing order
51  * 3. Please keep the reserve pool as minimum as possible as it's always
52  *    preallocated.
53  * 4. Always profile with different use cases after updating this table.
54  * 5. A dynamic view of this pool can be viewed at /proc/slabinfo.
55  * 6. Each pool has a sys node at /sys/kernel/slab/<name>
56  *
57  */
58 
59 /* size, min pool reserve, name, memorypool handler, cache handler*/
60 static struct cnss_pool cnss_pools[] = {
61 	{8 * 1024, 16, "cnss-pool-8k", NULL, NULL},
62 	{16 * 1024, 16, "cnss-pool-16k", NULL, NULL},
63 	{32 * 1024, 22, "cnss-pool-32k", NULL, NULL},
64 	{64 * 1024, 38, "cnss-pool-64k", NULL, NULL},
65 	{128 * 1024, 10, "cnss-pool-128k", NULL, NULL},
66 };
67 
68 /**
69  * cnss_pool_alloc_threshold() - Allocation threshold
70  *
71  * Minimum memory size to be part of cnss pool.
72  *
73  * Return: Size
74  *
75  */
76 static inline size_t cnss_pool_alloc_threshold(void)
77 {
78 	return cnss_pools[0].size;
79 }
80 
81 /**
82  * cnss_pool_int() - Initialize memory pools.
83  *
84  * Create cnss pools as configured by cnss_pools[]. It is the responsibility of
85  * the caller to invoke cnss_pool_deinit() routine to clean it up. This
86  * function needs to be called at early boot to preallocate minimum buffers in
87  * the pool.
88  *
89  * Return: 0 - success, otherwise error code.
90  *
91  */
92 static int cnss_pool_init(void)
93 {
94 	int i;
95 
96 	for (i = 0; i < ARRAY_SIZE(cnss_pools); i++) {
97 		/* Create the slab cache */
98 		cnss_pools[i].cache =
99 			kmem_cache_create_usercopy(cnss_pools[i].name,
100 						   cnss_pools[i].size, 0,
101 						   SLAB_ACCOUNT, 0,
102 						   cnss_pools[i].size, NULL);
103 		if (!cnss_pools[i].cache) {
104 			pr_err("cnss_prealloc: cache %s failed\n",
105 			       cnss_pools[i].name);
106 			continue;
107 		}
108 
109 		/* Create the pool and associate to slab cache */
110 		cnss_pools[i].mp =
111 		    mempool_create(cnss_pools[i].min, mempool_alloc_slab,
112 				   mempool_free_slab, cnss_pools[i].cache);
113 
114 		if (!cnss_pools[i].mp) {
115 			pr_err("cnss_prealloc: mempool %s failed\n",
116 			       cnss_pools[i].name);
117 			kmem_cache_destroy(cnss_pools[i].cache);
118 			cnss_pools[i].cache = NULL;
119 			continue;
120 		}
121 
122 		pr_info("cnss_prealloc: created mempool %s of min size %d * %zu\n",
123 			cnss_pools[i].name, cnss_pools[i].min,
124 			cnss_pools[i].size);
125 	}
126 
127 	return 0;
128 }
129 
130 /**
131  * cnss_pool_deinit() - Free memory pools.
132  *
133  * Free the memory pools and return resources back to the system. It warns
134  * if there is any pending element in memory pool or cache.
135  *
136  */
137 static void cnss_pool_deinit(void)
138 {
139 	int i;
140 
141 	for (i = 0; i < ARRAY_SIZE(cnss_pools); i++) {
142 		pr_info("cnss_prealloc: destroy mempool %s\n",
143 			cnss_pools[i].name);
144 		mempool_destroy(cnss_pools[i].mp);
145 		kmem_cache_destroy(cnss_pools[i].cache);
146 	}
147 }
148 
149 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0))
150 /* In kernel 5.17, slab_cache is removed from page struct, so
151  * store cache in the beginning of memory buffer.
152  */
153 static inline void cnss_pool_put_cache_in_mem(void *mem, struct kmem_cache *cache)
154 {
155 	/* put cache at the beginnging of mem */
156 	(*(struct kmem_cache **)mem) = cache;
157 }
158 
159 static inline struct kmem_cache *cnss_pool_get_cache_from_mem(void *mem)
160 {
161 	struct kmem_cache *cache;
162 
163 	/* read cache from the beginnging of mem */
164 	cache = (struct kmem_cache *)(*(struct kmem_cache **)mem);
165 
166 	return cache;
167 }
168 #else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0)) */
169 /* for older kernel < 5.17, we use page->slab_cache. In such case
170  * we do not reserve headroom in memory buffer to store cache.
171  */
172 static inline void cnss_pool_put_cache_in_mem(void *mem, struct kmem_cache *cache)
173 {
174 }
175 
176 static inline struct kmem_cache *cnss_pool_get_cache_from_mem(void *mem)
177 {
178 	struct page *page;
179 
180 	if (!virt_addr_valid(mem))
181 		return NULL;
182 
183 	/* mem -> page -> cache */
184 	page = virt_to_head_page(mem);
185 	if (!page)
186 		return NULL;
187 
188 	return page->slab_cache;
189 }
190 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0)) */
191 
192 /**
193  * cnss_pool_get_index() - Get the index of memory pool
194  * @mem: Allocated memory
195  *
196  * Returns the index of the memory pool which fits the reqested memory. The
197  * complexity of this check is O(num of memory pools). Returns a negative
198  * value with error code in case of failure.
199  *
200  */
201 static int cnss_pool_get_index(void *mem)
202 {
203 	struct kmem_cache *cache;
204 	int i;
205 
206 	cache = cnss_pool_get_cache_from_mem(mem);
207 	if (!cache)
208 		return -ENOENT;
209 
210 	/* Check if memory belongs to a pool */
211 	for (i = 0; i < ARRAY_SIZE(cnss_pools); i++) {
212 		if (cnss_pools[i].cache == cache)
213 			return i;
214 	}
215 
216 	return -ENOENT;
217 }
218 
219 /**
220  * wcnss_prealloc_get() - Get preallocated memory from a pool
221  * @size: Size to allocate
222  *
223  * Memory pool is chosen based on the size. If memory is not available in a
224  * given pool it goes to next higher sized pool until it succeeds.
225  *
226  * Return: A void pointer to allocated memory
227  */
228 void *wcnss_prealloc_get(size_t size)
229 {
230 
231 	void *mem = NULL;
232 	gfp_t gfp_mask = __GFP_ZERO;
233 	int i;
234 
235 	if (in_interrupt() || !preemptible() || rcu_preempt_depth())
236 		gfp_mask |= GFP_ATOMIC;
237 	else
238 		gfp_mask |= GFP_KERNEL;
239 
240 	if (size >= cnss_pool_alloc_threshold()) {
241 
242 		for (i = 0; i < ARRAY_SIZE(cnss_pools); i++) {
243 			if (cnss_pools[i].size >= size && cnss_pools[i].mp) {
244 				mem = mempool_alloc(cnss_pools[i].mp, gfp_mask);
245 				if (mem) {
246 					cnss_pool_put_cache_in_mem(mem, cnss_pools[i].cache);
247 					break;
248 				}
249 			}
250 		}
251 	}
252 
253 	if (!mem && size >= cnss_pool_alloc_threshold()) {
254 		pr_debug("cnss_prealloc: not available for size %zu, flag %x\n",
255 			 size, gfp_mask);
256 	}
257 
258 	return mem;
259 }
260 EXPORT_SYMBOL(wcnss_prealloc_get);
261 
262 /**
263  * wcnss_prealloc_put() - Relase allocated memory
264  * @mem: Allocated memory
265  *
266  * Free the memory got by wcnss_prealloc_get() to slab or pool reserve if memory
267  * pool doesn't have enough elements.
268  *
269  * Return: 1 - success
270  *         0 - fail
271  */
272 int wcnss_prealloc_put(void *mem)
273 {
274 	int i;
275 
276 	if (!mem)
277 		return 0;
278 
279 	i = cnss_pool_get_index(mem);
280 	if (i >= 0 && i < ARRAY_SIZE(cnss_pools) && cnss_pools[i].mp) {
281 		mempool_free(mem, cnss_pools[i].mp);
282 		return 1;
283 	}
284 
285 	return 0;
286 }
287 EXPORT_SYMBOL(wcnss_prealloc_put);
288 
289 /* Not implemented. Make use of Linux SLAB features. */
290 void wcnss_prealloc_check_memory_leak(void) {}
291 EXPORT_SYMBOL(wcnss_prealloc_check_memory_leak);
292 
293 /* Not implemented. Make use of Linux SLAB features. */
294 int wcnss_pre_alloc_reset(void) { return -EOPNOTSUPP; }
295 EXPORT_SYMBOL(wcnss_pre_alloc_reset);
296 
297 /**
298  * cnss_prealloc_is_valid_dt_node_found - Check if valid device tree node
299  *                                        present
300  *
301  * Valid device tree node means a node with "qcom,wlan" property present
302  * and "status" property not disabled.
303  *
304  * Return: true if valid device tree node found, false if not found
305  */
306 static bool cnss_prealloc_is_valid_dt_node_found(void)
307 {
308 	struct device_node *dn = NULL;
309 
310 	for_each_node_with_property(dn, "qcom,wlan") {
311 		if (of_device_is_available(dn))
312 			break;
313 	}
314 
315 	if (dn)
316 		return true;
317 
318 	return false;
319 }
320 
321 static int __init cnss_prealloc_init(void)
322 {
323 	if (!cnss_prealloc_is_valid_dt_node_found())
324 		return -ENODEV;
325 
326 	return cnss_pool_init();
327 }
328 
329 static void __exit cnss_prealloc_exit(void)
330 {
331 	cnss_pool_deinit();
332 }
333 
334 module_init(cnss_prealloc_init);
335 module_exit(cnss_prealloc_exit);
336 
337