1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * zbud.c
4 *
5 * Copyright (C) 2013, Seth Jennings, IBM
6 *
7 * Concepts based on zcache internal zbud allocator by Dan Magenheimer.
8 *
9 * zbud is an special purpose allocator for storing compressed pages. Contrary
10 * to what its name may suggest, zbud is not a buddy allocator, but rather an
11 * allocator that "buddies" two compressed pages together in a single memory
12 * page.
13 *
14 * While this design limits storage density, it has simple and deterministic
15 * reclaim properties that make it preferable to a higher density approach when
16 * reclaim will be used.
17 *
18 * zbud works by storing compressed pages, or "zpages", together in pairs in a
19 * single memory page called a "zbud page". The first buddy is "left
20 * justified" at the beginning of the zbud page, and the last buddy is "right
21 * justified" at the end of the zbud page. The benefit is that if either
22 * buddy is freed, the freed buddy space, coalesced with whatever slack space
23 * that existed between the buddies, results in the largest possible free region
24 * within the zbud page.
25 *
26 * zbud also provides an attractive lower bound on density. The ratio of zpages
27 * to zbud pages can not be less than 1. This ensures that zbud can never "do
28 * harm" by using more pages to store zpages than the uncompressed zpages would
29 * have used on their own.
30 *
31 * zbud pages are divided into "chunks". The size of the chunks is fixed at
32 * compile time and determined by NCHUNKS_ORDER below. Dividing zbud pages
33 * into chunks allows organizing unbuddied zbud pages into a manageable number
34 * of unbuddied lists according to the number of free chunks available in the
35 * zbud page.
36 *
37 * The zbud API differs from that of conventional allocators in that the
38 * allocation function, zbud_alloc(), returns an opaque handle to the user,
39 * not a dereferenceable pointer. The user must map the handle using
40 * zbud_map() in order to get a usable pointer by which to access the
41 * allocation data and unmap the handle with zbud_unmap() when operations
42 * on the allocation data are complete.
43 */
44
45 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
46
47 #include <linux/atomic.h>
48 #include <linux/list.h>
49 #include <linux/mm.h>
50 #include <linux/module.h>
51 #include <linux/preempt.h>
52 #include <linux/slab.h>
53 #include <linux/spinlock.h>
54 #include <linux/zpool.h>
55
56 /*****************
57 * Structures
58 *****************/
59 /*
60 * NCHUNKS_ORDER determines the internal allocation granularity, effectively
61 * adjusting internal fragmentation. It also determines the number of
62 * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
63 * allocation granularity will be in chunks of size PAGE_SIZE/64. As one chunk
64 * in allocated page is occupied by zbud header, NCHUNKS will be calculated to
65 * 63 which shows the max number of free chunks in zbud page, also there will be
66 * 63 freelists per pool.
67 */
68 #define NCHUNKS_ORDER 6
69
70 #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
71 #define CHUNK_SIZE (1 << CHUNK_SHIFT)
72 #define ZHDR_SIZE_ALIGNED CHUNK_SIZE
73 #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
74
75 struct zbud_pool;
76
77 /**
78 * struct zbud_pool - stores metadata for each zbud pool
79 * @lock: protects all pool fields and first|last_chunk fields of any
80 * zbud page in the pool
81 * @unbuddied: array of lists tracking zbud pages that only contain one buddy;
82 * the lists each zbud page is added to depends on the size of
83 * its free region.
84 * @buddied: list tracking the zbud pages that contain two buddies;
85 * these zbud pages are full
86 * @pages_nr: number of zbud pages in the pool.
87 *
88 * This structure is allocated at pool creation time and maintains metadata
89 * pertaining to a particular zbud pool.
90 */
91 struct zbud_pool {
92 spinlock_t lock;
93 union {
94 /*
95 * Reuse unbuddied[0] as buddied on the ground that
96 * unbuddied[0] is unused.
97 */
98 struct list_head buddied;
99 struct list_head unbuddied[NCHUNKS];
100 };
101 u64 pages_nr;
102 };
103
104 /*
105 * struct zbud_header - zbud page metadata occupying the first chunk of each
106 * zbud page.
107 * @buddy: links the zbud page into the unbuddied/buddied lists in the pool
108 * @first_chunks: the size of the first buddy in chunks, 0 if free
109 * @last_chunks: the size of the last buddy in chunks, 0 if free
110 */
111 struct zbud_header {
112 struct list_head buddy;
113 unsigned int first_chunks;
114 unsigned int last_chunks;
115 };
116
117 /*****************
118 * Helpers
119 *****************/
120 /* Just to make the code easier to read */
121 enum buddy {
122 FIRST,
123 LAST
124 };
125
126 /* Converts an allocation size in bytes to size in zbud chunks */
size_to_chunks(size_t size)127 static int size_to_chunks(size_t size)
128 {
129 return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
130 }
131
132 #define for_each_unbuddied_list(_iter, _begin) \
133 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
134
135 /* Initializes the zbud header of a newly allocated zbud page */
init_zbud_page(struct page * page)136 static struct zbud_header *init_zbud_page(struct page *page)
137 {
138 struct zbud_header *zhdr = page_address(page);
139 zhdr->first_chunks = 0;
140 zhdr->last_chunks = 0;
141 INIT_LIST_HEAD(&zhdr->buddy);
142 return zhdr;
143 }
144
145 /* Resets the struct page fields and frees the page */
free_zbud_page(struct zbud_header * zhdr)146 static void free_zbud_page(struct zbud_header *zhdr)
147 {
148 __free_page(virt_to_page(zhdr));
149 }
150
151 /*
152 * Encodes the handle of a particular buddy within a zbud page
153 * Pool lock should be held as this function accesses first|last_chunks
154 */
encode_handle(struct zbud_header * zhdr,enum buddy bud)155 static unsigned long encode_handle(struct zbud_header *zhdr, enum buddy bud)
156 {
157 unsigned long handle;
158
159 /*
160 * For now, the encoded handle is actually just the pointer to the data
161 * but this might not always be the case. A little information hiding.
162 * Add CHUNK_SIZE to the handle if it is the first allocation to jump
163 * over the zbud header in the first chunk.
164 */
165 handle = (unsigned long)zhdr;
166 if (bud == FIRST)
167 /* skip over zbud header */
168 handle += ZHDR_SIZE_ALIGNED;
169 else /* bud == LAST */
170 handle += PAGE_SIZE - (zhdr->last_chunks << CHUNK_SHIFT);
171 return handle;
172 }
173
174 /* Returns the zbud page where a given handle is stored */
handle_to_zbud_header(unsigned long handle)175 static struct zbud_header *handle_to_zbud_header(unsigned long handle)
176 {
177 return (struct zbud_header *)(handle & PAGE_MASK);
178 }
179
180 /* Returns the number of free chunks in a zbud page */
num_free_chunks(struct zbud_header * zhdr)181 static int num_free_chunks(struct zbud_header *zhdr)
182 {
183 /*
184 * Rather than branch for different situations, just use the fact that
185 * free buddies have a length of zero to simplify everything.
186 */
187 return NCHUNKS - zhdr->first_chunks - zhdr->last_chunks;
188 }
189
190 /*****************
191 * API Functions
192 *****************/
193 /**
194 * zbud_create_pool() - create a new zbud pool
195 * @gfp: gfp flags when allocating the zbud pool structure
196 *
197 * Return: pointer to the new zbud pool or NULL if the metadata allocation
198 * failed.
199 */
zbud_create_pool(gfp_t gfp)200 static struct zbud_pool *zbud_create_pool(gfp_t gfp)
201 {
202 struct zbud_pool *pool;
203 int i;
204
205 pool = kzalloc(sizeof(struct zbud_pool), gfp);
206 if (!pool)
207 return NULL;
208 spin_lock_init(&pool->lock);
209 for_each_unbuddied_list(i, 0)
210 INIT_LIST_HEAD(&pool->unbuddied[i]);
211 INIT_LIST_HEAD(&pool->buddied);
212 pool->pages_nr = 0;
213 return pool;
214 }
215
216 /**
217 * zbud_destroy_pool() - destroys an existing zbud pool
218 * @pool: the zbud pool to be destroyed
219 *
220 * The pool should be emptied before this function is called.
221 */
zbud_destroy_pool(struct zbud_pool * pool)222 static void zbud_destroy_pool(struct zbud_pool *pool)
223 {
224 kfree(pool);
225 }
226
227 /**
228 * zbud_alloc() - allocates a region of a given size
229 * @pool: zbud pool from which to allocate
230 * @size: size in bytes of the desired allocation
231 * @gfp: gfp flags used if the pool needs to grow
232 * @handle: handle of the new allocation
233 *
234 * This function will attempt to find a free region in the pool large enough to
235 * satisfy the allocation request. A search of the unbuddied lists is
236 * performed first. If no suitable free region is found, then a new page is
237 * allocated and added to the pool to satisfy the request.
238 *
239 * gfp should not set __GFP_HIGHMEM as highmem pages cannot be used
240 * as zbud pool pages.
241 *
242 * Return: 0 if success and handle is set, otherwise -EINVAL if the size or
243 * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
244 * a new page.
245 */
zbud_alloc(struct zbud_pool * pool,size_t size,gfp_t gfp,unsigned long * handle)246 static int zbud_alloc(struct zbud_pool *pool, size_t size, gfp_t gfp,
247 unsigned long *handle)
248 {
249 int chunks, i, freechunks;
250 struct zbud_header *zhdr = NULL;
251 enum buddy bud;
252 struct page *page;
253
254 if (!size || (gfp & __GFP_HIGHMEM))
255 return -EINVAL;
256 if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE)
257 return -ENOSPC;
258 chunks = size_to_chunks(size);
259 spin_lock(&pool->lock);
260
261 /* First, try to find an unbuddied zbud page. */
262 for_each_unbuddied_list(i, chunks) {
263 if (!list_empty(&pool->unbuddied[i])) {
264 zhdr = list_first_entry(&pool->unbuddied[i],
265 struct zbud_header, buddy);
266 list_del(&zhdr->buddy);
267 if (zhdr->first_chunks == 0)
268 bud = FIRST;
269 else
270 bud = LAST;
271 goto found;
272 }
273 }
274
275 /* Couldn't find unbuddied zbud page, create new one */
276 spin_unlock(&pool->lock);
277 page = alloc_page(gfp);
278 if (!page)
279 return -ENOMEM;
280 spin_lock(&pool->lock);
281 pool->pages_nr++;
282 zhdr = init_zbud_page(page);
283 bud = FIRST;
284
285 found:
286 if (bud == FIRST)
287 zhdr->first_chunks = chunks;
288 else
289 zhdr->last_chunks = chunks;
290
291 if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0) {
292 /* Add to unbuddied list */
293 freechunks = num_free_chunks(zhdr);
294 list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
295 } else {
296 /* Add to buddied list */
297 list_add(&zhdr->buddy, &pool->buddied);
298 }
299
300 *handle = encode_handle(zhdr, bud);
301 spin_unlock(&pool->lock);
302
303 return 0;
304 }
305
306 /**
307 * zbud_free() - frees the allocation associated with the given handle
308 * @pool: pool in which the allocation resided
309 * @handle: handle associated with the allocation returned by zbud_alloc()
310 */
zbud_free(struct zbud_pool * pool,unsigned long handle)311 static void zbud_free(struct zbud_pool *pool, unsigned long handle)
312 {
313 struct zbud_header *zhdr;
314 int freechunks;
315
316 spin_lock(&pool->lock);
317 zhdr = handle_to_zbud_header(handle);
318
319 /* If first buddy, handle will be page aligned */
320 if ((handle - ZHDR_SIZE_ALIGNED) & ~PAGE_MASK)
321 zhdr->last_chunks = 0;
322 else
323 zhdr->first_chunks = 0;
324
325 /* Remove from existing buddy list */
326 list_del(&zhdr->buddy);
327
328 if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
329 /* zbud page is empty, free */
330 free_zbud_page(zhdr);
331 pool->pages_nr--;
332 } else {
333 /* Add to unbuddied list */
334 freechunks = num_free_chunks(zhdr);
335 list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
336 }
337
338 spin_unlock(&pool->lock);
339 }
340
341 /**
342 * zbud_map() - maps the allocation associated with the given handle
343 * @pool: pool in which the allocation resides
344 * @handle: handle associated with the allocation to be mapped
345 *
346 * While trivial for zbud, the mapping functions for others allocators
347 * implementing this allocation API could have more complex information encoded
348 * in the handle and could create temporary mappings to make the data
349 * accessible to the user.
350 *
351 * Returns: a pointer to the mapped allocation
352 */
zbud_map(struct zbud_pool * pool,unsigned long handle)353 static void *zbud_map(struct zbud_pool *pool, unsigned long handle)
354 {
355 return (void *)(handle);
356 }
357
358 /**
359 * zbud_unmap() - maps the allocation associated with the given handle
360 * @pool: pool in which the allocation resides
361 * @handle: handle associated with the allocation to be unmapped
362 */
zbud_unmap(struct zbud_pool * pool,unsigned long handle)363 static void zbud_unmap(struct zbud_pool *pool, unsigned long handle)
364 {
365 }
366
367 /**
368 * zbud_get_pool_pages() - gets the zbud pool size in pages
369 * @pool: pool whose size is being queried
370 *
371 * Returns: size in pages of the given pool. The pool lock need not be
372 * taken to access pages_nr.
373 */
zbud_get_pool_pages(struct zbud_pool * pool)374 static u64 zbud_get_pool_pages(struct zbud_pool *pool)
375 {
376 return pool->pages_nr;
377 }
378
379 /*****************
380 * zpool
381 ****************/
382
zbud_zpool_create(const char * name,gfp_t gfp)383 static void *zbud_zpool_create(const char *name, gfp_t gfp)
384 {
385 return zbud_create_pool(gfp);
386 }
387
zbud_zpool_destroy(void * pool)388 static void zbud_zpool_destroy(void *pool)
389 {
390 zbud_destroy_pool(pool);
391 }
392
zbud_zpool_malloc(void * pool,size_t size,gfp_t gfp,unsigned long * handle)393 static int zbud_zpool_malloc(void *pool, size_t size, gfp_t gfp,
394 unsigned long *handle)
395 {
396 return zbud_alloc(pool, size, gfp, handle);
397 }
zbud_zpool_free(void * pool,unsigned long handle)398 static void zbud_zpool_free(void *pool, unsigned long handle)
399 {
400 zbud_free(pool, handle);
401 }
402
zbud_zpool_map(void * pool,unsigned long handle,enum zpool_mapmode mm)403 static void *zbud_zpool_map(void *pool, unsigned long handle,
404 enum zpool_mapmode mm)
405 {
406 return zbud_map(pool, handle);
407 }
zbud_zpool_unmap(void * pool,unsigned long handle)408 static void zbud_zpool_unmap(void *pool, unsigned long handle)
409 {
410 zbud_unmap(pool, handle);
411 }
412
zbud_zpool_total_pages(void * pool)413 static u64 zbud_zpool_total_pages(void *pool)
414 {
415 return zbud_get_pool_pages(pool);
416 }
417
418 static struct zpool_driver zbud_zpool_driver = {
419 .type = "zbud",
420 .sleep_mapped = true,
421 .owner = THIS_MODULE,
422 .create = zbud_zpool_create,
423 .destroy = zbud_zpool_destroy,
424 .malloc = zbud_zpool_malloc,
425 .free = zbud_zpool_free,
426 .map = zbud_zpool_map,
427 .unmap = zbud_zpool_unmap,
428 .total_pages = zbud_zpool_total_pages,
429 };
430
431 MODULE_ALIAS("zpool-zbud");
432
init_zbud(void)433 static int __init init_zbud(void)
434 {
435 /* Make sure the zbud header will fit in one chunk */
436 BUILD_BUG_ON(sizeof(struct zbud_header) > ZHDR_SIZE_ALIGNED);
437 pr_info("loaded\n");
438
439 zpool_register_driver(&zbud_zpool_driver);
440
441 return 0;
442 }
443
exit_zbud(void)444 static void __exit exit_zbud(void)
445 {
446 zpool_unregister_driver(&zbud_zpool_driver);
447 pr_info("unloaded\n");
448 }
449
450 module_init(init_zbud);
451 module_exit(exit_zbud);
452
453 MODULE_LICENSE("GPL");
454 MODULE_AUTHOR("Seth Jennings <sjennings@variantweb.net>");
455 MODULE_DESCRIPTION("Buddy Allocator for Compressed Pages");
456