Lines Matching +full:page +full:- +full:size
10 * Released under the terms of 3-clause BSD License
16 * struct page(s) to form a zspage.
18 * Usage of struct page fields:
19 * page->private: points to zspage
20 * page->index: links together all component pages of a zspage
21 * For the huge page, this is always 0, so we use this field
23 * page->page_type: PGTY_zsmalloc, lower 24 bits locate the first object
26 * Usage of struct page flags:
27 * PG_private: identifies the first component page
28 * PG_owner_priv_1: identifies the huge component page
37 * pool->migrate_lock
38 * class->lock
39 * zspage->lock
74 * span more than 1 page which avoids complex case of mapping 2 pages simply
102 #define _PFN_BITS (MAX_POSSIBLE_PHYSMEM_BITS - PAGE_SHIFT)
108 * header keeps handle which is 4byte-aligned address so we
116 #define OBJ_INDEX_BITS (BITS_PER_LONG - _PFN_BITS)
117 #define OBJ_INDEX_MASK ((_AC(1, UL) << OBJ_INDEX_BITS) - 1)
133 * On systems with 4K page size, this gives 255 size classes! There is a
134 * trader-off here:
135 * - Large number of size classes is potentially wasteful as free page are
137 * - Small number of size classes causes large internal fragmentation
138 * - Probably its better to use specific size classes (empirically
146 #define ZS_SIZE_CLASSES (DIV_ROUND_UP(ZS_MAX_ALLOC_SIZE - ZS_MIN_ALLOC_SIZE, \
151 * of ->inuse objects to all objects that page can store). For example,
155 * difference between the least busy page in the group (minimum permitted
156 * number of ->inuse objects) and the most busy page (maximum permitted
157 * number of ->inuse objects) at a reasonable value.
189 * Size of objects stored in this class. Must be multiple
192 int size; member
203 * For every zspage, zspage->freeobj gives head of this list.
211 * It's valid for non-allocated object
241 /* protect page/zspage migration */
255 struct page *first_page;
271 zspage->huge = 1; in SetZsHugePage()
276 return zspage->huge; in ZsHugePage()
299 name = kasprintf(GFP_KERNEL, "zs_handle-%s", pool->name); in create_cache()
301 return -ENOMEM; in create_cache()
302 pool->handle_cachep = kmem_cache_create(name, ZS_HANDLE_SIZE, in create_cache()
305 if (!pool->handle_cachep) in create_cache()
306 return -EINVAL; in create_cache()
308 name = kasprintf(GFP_KERNEL, "zspage-%s", pool->name); in create_cache()
310 return -ENOMEM; in create_cache()
311 pool->zspage_cachep = kmem_cache_create(name, sizeof(struct zspage), in create_cache()
314 if (!pool->zspage_cachep) { in create_cache()
315 kmem_cache_destroy(pool->handle_cachep); in create_cache()
316 pool->handle_cachep = NULL; in create_cache()
317 return -EINVAL; in create_cache()
325 kmem_cache_destroy(pool->handle_cachep); in destroy_cache()
326 kmem_cache_destroy(pool->zspage_cachep); in destroy_cache()
331 return (unsigned long)kmem_cache_alloc(pool->handle_cachep, in cache_alloc_handle()
337 kmem_cache_free(pool->handle_cachep, (void *)handle); in cache_free_handle()
342 return kmem_cache_zalloc(pool->zspage_cachep, in cache_alloc_zspage()
348 kmem_cache_free(pool->zspage_cachep, zspage); in cache_free_zspage()
351 /* class->lock(which owns the handle) synchronizes races */
376 static int zs_zpool_malloc(void *pool, size_t size, gfp_t gfp, in zs_zpool_malloc() argument
379 *handle = zs_malloc(pool, size, gfp); in zs_zpool_malloc()
433 MODULE_ALIAS("zpool-zsmalloc");
436 /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
441 static __maybe_unused int is_first_page(struct page *page) in is_first_page() argument
443 return PagePrivate(page); in is_first_page()
446 /* Protected by class->lock */
449 return zspage->inuse; in get_zspage_inuse()
455 zspage->inuse += val; in mod_zspage_inuse()
458 static inline struct page *get_first_page(struct zspage *zspage) in get_first_page()
460 struct page *first_page = zspage->first_page; in get_first_page()
468 static inline unsigned int get_first_obj_offset(struct page *page) in get_first_obj_offset() argument
470 VM_WARN_ON_ONCE(!PageZsmalloc(page)); in get_first_obj_offset()
471 return page->page_type & FIRST_OBJ_PAGE_TYPE_MASK; in get_first_obj_offset()
474 static inline void set_first_obj_offset(struct page *page, unsigned int offset) in set_first_obj_offset() argument
478 VM_WARN_ON_ONCE(!PageZsmalloc(page)); in set_first_obj_offset()
480 page->page_type &= ~FIRST_OBJ_PAGE_TYPE_MASK; in set_first_obj_offset()
481 page->page_type |= offset & FIRST_OBJ_PAGE_TYPE_MASK; in set_first_obj_offset()
486 return zspage->freeobj; in get_freeobj()
491 zspage->freeobj = obj; in set_freeobj()
497 return pool->size_class[zspage->class]; in zspage_class()
501 * zsmalloc divides the pool into various size classes where each
504 * classes depending on its size. This function returns index of the
505 * size class which has chunk size big enough to hold the given size.
507 static int get_size_class_index(int size) in get_size_class_index() argument
511 if (likely(size > ZS_MIN_ALLOC_SIZE)) in get_size_class_index()
512 idx = DIV_ROUND_UP(size - ZS_MIN_ALLOC_SIZE, in get_size_class_index()
515 return min_t(int, ZS_SIZE_CLASSES - 1, idx); in get_size_class_index()
521 class->stats.objs[type] += cnt; in class_stat_add()
527 class->stats.objs[type] -= cnt; in class_stat_sub()
532 return class->stats.objs[type]; in class_stat_read()
557 struct zs_pool *pool = s->private; in zs_stats_size_show()
566 "class", "size", "10%", "20%", "30%", "40%", in zs_stats_size_show()
573 class = pool->size_class[i]; in zs_stats_size_show()
575 if (class->index != i) in zs_stats_size_show()
578 spin_lock(&class->lock); in zs_stats_size_show()
580 seq_printf(s, " %5u %5u ", i, class->size); in zs_stats_size_show()
589 spin_unlock(&class->lock); in zs_stats_size_show()
591 objs_per_zspage = class->objs_per_zspage; in zs_stats_size_show()
593 class->pages_per_zspage; in zs_stats_size_show()
597 class->pages_per_zspage, freeable); in zs_stats_size_show()
626 pool->stat_dentry = debugfs_create_dir(name, zs_stat_root); in zs_pool_stat_create()
628 debugfs_create_file("classes", S_IFREG | 0444, pool->stat_dentry, pool, in zs_pool_stat_create()
634 debugfs_remove_recursive(pool->stat_dentry); in zs_pool_stat_destroy()
657 * For each size class, zspages are divided into different groups
659 * status of the given page.
666 objs_per_zspage = class->objs_per_zspage; in get_fullness_group()
675 * Take integer division into consideration: a page with one inuse in get_fullness_group()
683 * Each size class maintains various freelists and zspages are assigned
693 list_add(&zspage->list, &class->fullness_list[fullness]); in insert_zspage()
694 zspage->fullness = fullness; in insert_zspage()
703 int fullness = zspage->fullness; in remove_zspage()
705 VM_BUG_ON(list_empty(&class->fullness_list[fullness])); in remove_zspage()
707 list_del_init(&zspage->list); in remove_zspage()
712 * Each size class maintains zspages in different fullness groups depending
714 * objects, the fullness status of the page can change, for instance, from
716 * checks if such a status change has occurred for the given page and
717 * accordingly moves the page from the list of the old fullness group to that
725 if (newfg == zspage->fullness) in fix_fullness_group()
734 static struct zspage *get_zspage(struct page *page) in get_zspage() argument
736 struct zspage *zspage = (struct zspage *)page_private(page); in get_zspage()
738 BUG_ON(zspage->magic != ZSPAGE_MAGIC); in get_zspage()
742 static struct page *get_next_page(struct page *page) in get_next_page() argument
744 struct zspage *zspage = get_zspage(page); in get_next_page()
749 return (struct page *)page->index; in get_next_page()
753 * obj_to_location - get (<page>, <obj_idx>) from encoded object value
755 * @page: page object resides in zspage
758 static void obj_to_location(unsigned long obj, struct page **page, in obj_to_location() argument
761 *page = pfn_to_page(obj >> OBJ_INDEX_BITS); in obj_to_location()
765 static void obj_to_page(unsigned long obj, struct page **page) in obj_to_page() argument
767 *page = pfn_to_page(obj >> OBJ_INDEX_BITS); in obj_to_page()
771 * location_to_obj - get obj value encoded from (<page>, <obj_idx>)
772 * @page: page object resides in zspage
775 static unsigned long location_to_obj(struct page *page, unsigned int obj_idx) in location_to_obj() argument
779 obj = page_to_pfn(page) << OBJ_INDEX_BITS; in location_to_obj()
790 static inline bool obj_allocated(struct page *page, void *obj, in obj_allocated() argument
794 struct zspage *zspage = get_zspage(page); in obj_allocated()
797 VM_BUG_ON_PAGE(!is_first_page(page), page); in obj_allocated()
798 handle = page->index; in obj_allocated()
810 static void reset_page(struct page *page) in reset_page() argument
812 __ClearPageMovable(page); in reset_page()
813 ClearPagePrivate(page); in reset_page()
814 set_page_private(page, 0); in reset_page()
815 page->index = 0; in reset_page()
816 __ClearPageZsmalloc(page); in reset_page()
821 struct page *cursor, *fail; in trylock_zspage()
843 struct page *page, *next; in __free_zspage() local
845 assert_spin_locked(&class->lock); in __free_zspage()
848 VM_BUG_ON(zspage->fullness != ZS_INUSE_RATIO_0); in __free_zspage()
850 next = page = get_first_page(zspage); in __free_zspage()
852 VM_BUG_ON_PAGE(!PageLocked(page), page); in __free_zspage()
853 next = get_next_page(page); in __free_zspage()
854 reset_page(page); in __free_zspage()
855 unlock_page(page); in __free_zspage()
856 dec_zone_page_state(page, NR_ZSPAGES); in __free_zspage()
857 put_page(page); in __free_zspage()
858 page = next; in __free_zspage()
859 } while (page != NULL); in __free_zspage()
863 class_stat_sub(class, ZS_OBJS_ALLOCATED, class->objs_per_zspage); in __free_zspage()
864 atomic_long_sub(class->pages_per_zspage, &pool->pages_allocated); in __free_zspage()
871 VM_BUG_ON(list_empty(&zspage->list)); in free_zspage()
875 * lock_page. The page locks trylock_zspage got will be released in free_zspage()
892 struct page *page = get_first_page(zspage); in init_zspage() local
894 while (page) { in init_zspage()
895 struct page *next_page; in init_zspage()
899 set_first_obj_offset(page, off); in init_zspage()
901 vaddr = kmap_atomic(page); in init_zspage()
904 while ((off += class->size) < PAGE_SIZE) { in init_zspage()
905 link->next = freeobj++ << OBJ_TAG_BITS; in init_zspage()
906 link += class->size / sizeof(*link); in init_zspage()
911 * page, which must point to the first object on the next in init_zspage()
912 * page (if present) in init_zspage()
914 next_page = get_next_page(page); in init_zspage()
916 link->next = freeobj++ << OBJ_TAG_BITS; in init_zspage()
922 link->next = -1UL << OBJ_TAG_BITS; in init_zspage()
925 page = next_page; in init_zspage()
933 struct page *pages[]) in create_page_chain()
936 struct page *page; in create_page_chain() local
937 struct page *prev_page = NULL; in create_page_chain()
938 int nr_pages = class->pages_per_zspage; in create_page_chain()
942 * 1. all pages are linked together using page->index in create_page_chain()
943 * 2. each sub-page point to zspage using page->private in create_page_chain()
945 * we set PG_private to identify the first page (i.e. no other sub-page in create_page_chain()
949 page = pages[i]; in create_page_chain()
950 set_page_private(page, (unsigned long)zspage); in create_page_chain()
951 page->index = 0; in create_page_chain()
953 zspage->first_page = page; in create_page_chain()
954 SetPagePrivate(page); in create_page_chain()
955 if (unlikely(class->objs_per_zspage == 1 && in create_page_chain()
956 class->pages_per_zspage == 1)) in create_page_chain()
959 prev_page->index = (unsigned long)page; in create_page_chain()
961 prev_page = page; in create_page_chain()
966 * Allocate a zspage for the given size class
973 struct page *pages[ZS_MAX_PAGES_PER_ZSPAGE]; in alloc_zspage()
979 zspage->magic = ZSPAGE_MAGIC; in alloc_zspage()
982 for (i = 0; i < class->pages_per_zspage; i++) { in alloc_zspage()
983 struct page *page; in alloc_zspage() local
985 page = alloc_page(gfp); in alloc_zspage()
986 if (!page) { in alloc_zspage()
987 while (--i >= 0) { in alloc_zspage()
995 __SetPageZsmalloc(page); in alloc_zspage()
997 inc_zone_page_state(page, NR_ZSPAGES); in alloc_zspage()
998 pages[i] = page; in alloc_zspage()
1003 zspage->pool = pool; in alloc_zspage()
1004 zspage->class = class->index; in alloc_zspage()
1014 for (i = ZS_INUSE_RATIO_99; i >= ZS_INUSE_RATIO_0; i--) { in find_get_zspage()
1015 zspage = list_first_entry_or_null(&class->fullness_list[i], in find_get_zspage()
1030 if (area->vm_buf) in __zs_cpu_up()
1032 area->vm_buf = kmalloc(ZS_MAX_ALLOC_SIZE, GFP_KERNEL); in __zs_cpu_up()
1033 if (!area->vm_buf) in __zs_cpu_up()
1034 return -ENOMEM; in __zs_cpu_up()
1040 kfree(area->vm_buf); in __zs_cpu_down()
1041 area->vm_buf = NULL; in __zs_cpu_down()
1045 struct page *pages[2], int off, int size) in __zs_map_object() argument
1049 char *buf = area->vm_buf; in __zs_map_object()
1051 /* disable page faults to match kmap_atomic() return conditions */ in __zs_map_object()
1055 if (area->vm_mm == ZS_MM_WO) in __zs_map_object()
1058 sizes[0] = PAGE_SIZE - off; in __zs_map_object()
1059 sizes[1] = size - sizes[0]; in __zs_map_object()
1061 /* copy object to per-cpu buffer */ in __zs_map_object()
1069 return area->vm_buf; in __zs_map_object()
1073 struct page *pages[2], int off, int size) in __zs_unmap_object() argument
1080 if (area->vm_mm == ZS_MM_RO) in __zs_unmap_object()
1083 buf = area->vm_buf; in __zs_unmap_object()
1085 size -= ZS_HANDLE_SIZE; in __zs_unmap_object()
1088 sizes[0] = PAGE_SIZE - off; in __zs_unmap_object()
1089 sizes[1] = size - sizes[0]; in __zs_unmap_object()
1091 /* copy per-cpu buffer to object */ in __zs_unmap_object()
1100 /* enable page faults to match kunmap_atomic() return conditions */ in __zs_unmap_object()
1124 if (prev->pages_per_zspage == pages_per_zspage && in can_merge()
1125 prev->objs_per_zspage == objs_per_zspage) in can_merge()
1133 return get_zspage_inuse(zspage) == class->objs_per_zspage; in zspage_full()
1142 * zs_lookup_class_index() - Returns index of the zsmalloc &size_class
1143 * that hold objects of the provided size.
1145 * @size: object size
1150 * provided size.
1152 unsigned int zs_lookup_class_index(struct zs_pool *pool, unsigned int size) in zs_lookup_class_index() argument
1156 class = pool->size_class[get_size_class_index(size)]; in zs_lookup_class_index()
1158 return class->index; in zs_lookup_class_index()
1164 return atomic_long_read(&pool->pages_allocated); in zs_get_total_pages()
1169 * zs_map_object - get address of allocated object from handle.
1181 * This function returns with preemption and page faults disabled.
1187 struct page *page; in zs_map_object() local
1193 struct page *pages[2]; in zs_map_object()
1197 * Because we use per-cpu mapping areas shared among the in zs_map_object()
1204 read_lock(&pool->migrate_lock); in zs_map_object()
1206 obj_to_location(obj, &page, &obj_idx); in zs_map_object()
1207 zspage = get_zspage(page); in zs_map_object()
1210 * migration cannot move any zpages in this zspage. Here, class->lock in zs_map_object()
1216 read_unlock(&pool->migrate_lock); in zs_map_object()
1219 off = offset_in_page(class->size * obj_idx); in zs_map_object()
1223 area->vm_mm = mm; in zs_map_object()
1224 if (off + class->size <= PAGE_SIZE) { in zs_map_object()
1225 /* this object is contained entirely within a page */ in zs_map_object()
1226 area->vm_addr = kmap_atomic(page); in zs_map_object()
1227 ret = area->vm_addr + off; in zs_map_object()
1232 pages[0] = page; in zs_map_object()
1233 pages[1] = get_next_page(page); in zs_map_object()
1236 ret = __zs_map_object(area, pages, off, class->size); in zs_map_object()
1248 struct page *page; in zs_unmap_object() local
1256 obj_to_location(obj, &page, &obj_idx); in zs_unmap_object()
1257 zspage = get_zspage(page); in zs_unmap_object()
1259 off = offset_in_page(class->size * obj_idx); in zs_unmap_object()
1262 if (off + class->size <= PAGE_SIZE) in zs_unmap_object()
1263 kunmap_atomic(area->vm_addr); in zs_unmap_object()
1265 struct page *pages[2]; in zs_unmap_object()
1267 pages[0] = page; in zs_unmap_object()
1268 pages[1] = get_next_page(page); in zs_unmap_object()
1271 __zs_unmap_object(area, pages, off, class->size); in zs_unmap_object()
1280 * zs_huge_class_size() - Returns the size (in bytes) of the first huge
1284 * The function returns the size of the first huge class - any object of equal
1285 * or bigger size will be stored in zspage consisting of a single physical
1286 * page.
1290 * Return: the size (in bytes) of the first huge zsmalloc &size_class.
1306 struct page *m_page; in obj_malloc()
1310 class = pool->size_class[zspage->class]; in obj_malloc()
1313 offset = obj * class->size; in obj_malloc()
1323 set_freeobj(zspage, link->next >> OBJ_TAG_BITS); in obj_malloc()
1326 link->handle = handle | OBJ_ALLOCATED_TAG; in obj_malloc()
1328 /* record handle to page->index */ in obj_malloc()
1329 zspage->first_page->index = handle | OBJ_ALLOCATED_TAG; in obj_malloc()
1342 * zs_malloc - Allocate block of given size from pool.
1344 * @size: size of block to allocate
1349 * Allocation requests with size > ZS_MAX_ALLOC_SIZE will fail.
1351 unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp) in zs_malloc() argument
1358 if (unlikely(!size)) in zs_malloc()
1359 return (unsigned long)ERR_PTR(-EINVAL); in zs_malloc()
1361 if (unlikely(size > ZS_MAX_ALLOC_SIZE)) in zs_malloc()
1362 return (unsigned long)ERR_PTR(-ENOSPC); in zs_malloc()
1366 return (unsigned long)ERR_PTR(-ENOMEM); in zs_malloc()
1369 size += ZS_HANDLE_SIZE; in zs_malloc()
1370 class = pool->size_class[get_size_class_index(size)]; in zs_malloc()
1372 /* class->lock effectively protects the zpage migration */ in zs_malloc()
1373 spin_lock(&class->lock); in zs_malloc()
1384 spin_unlock(&class->lock); in zs_malloc()
1389 return (unsigned long)ERR_PTR(-ENOMEM); in zs_malloc()
1392 spin_lock(&class->lock); in zs_malloc()
1396 atomic_long_add(class->pages_per_zspage, &pool->pages_allocated); in zs_malloc()
1397 class_stat_add(class, ZS_OBJS_ALLOCATED, class->objs_per_zspage); in zs_malloc()
1403 spin_unlock(&class->lock); in zs_malloc()
1413 struct page *f_page; in obj_free()
1427 link->next = get_freeobj(zspage) << OBJ_TAG_BITS; in obj_free()
1429 f_page->index = 0; in obj_free()
1433 mod_zspage_inuse(zspage, -1); in obj_free()
1439 struct page *f_page; in zs_free()
1448 * The pool->migrate_lock protects the race with zpage's migration in zs_free()
1449 * so it's safe to get the page from handle. in zs_free()
1451 read_lock(&pool->migrate_lock); in zs_free()
1456 spin_lock(&class->lock); in zs_free()
1457 read_unlock(&pool->migrate_lock); in zs_free()
1460 obj_free(class->size, obj); in zs_free()
1466 spin_unlock(&class->lock); in zs_free()
1474 struct page *s_page, *d_page; in zs_object_copy()
1478 int s_size, d_size, size; in zs_object_copy() local
1481 s_size = d_size = class->size; in zs_object_copy()
1486 s_off = offset_in_page(class->size * s_objidx); in zs_object_copy()
1487 d_off = offset_in_page(class->size * d_objidx); in zs_object_copy()
1489 if (s_off + class->size > PAGE_SIZE) in zs_object_copy()
1490 s_size = PAGE_SIZE - s_off; in zs_object_copy()
1492 if (d_off + class->size > PAGE_SIZE) in zs_object_copy()
1493 d_size = PAGE_SIZE - d_off; in zs_object_copy()
1499 size = min(s_size, d_size); in zs_object_copy()
1500 memcpy(d_addr + d_off, s_addr + s_off, size); in zs_object_copy()
1501 written += size; in zs_object_copy()
1503 if (written == class->size) in zs_object_copy()
1506 s_off += size; in zs_object_copy()
1507 s_size -= size; in zs_object_copy()
1508 d_off += size; in zs_object_copy()
1509 d_size -= size; in zs_object_copy()
1524 s_size = class->size - written; in zs_object_copy()
1532 d_size = class->size - written; in zs_object_copy()
1546 struct page *page, int *obj_idx) in find_alloced_obj() argument
1551 void *addr = kmap_atomic(page); in find_alloced_obj()
1553 offset = get_first_obj_offset(page); in find_alloced_obj()
1554 offset += class->size * index; in find_alloced_obj()
1557 if (obj_allocated(page, addr + offset, &handle)) in find_alloced_obj()
1560 offset += class->size; in find_alloced_obj()
1577 struct page *s_page = get_first_page(src_zspage); in migrate_zspage()
1578 struct size_class *class = pool->size_class[src_zspage->class]; in migrate_zspage()
1594 obj_free(class->size, used_obj); in migrate_zspage()
1612 zspage = list_first_entry_or_null(&class->fullness_list[fg], in isolate_src_zspage()
1628 for (fg = ZS_INUSE_RATIO_99; fg >= ZS_INUSE_RATIO_10; fg--) { in isolate_dst_zspage()
1629 zspage = list_first_entry_or_null(&class->fullness_list[fg], in isolate_dst_zspage()
1641 * putback_zspage - add @zspage into right class's fullness list
1643 * @zspage: target page
1664 struct page *curr_page, *page; in lock_zspage() local
1669 * lock each page under migrate_read_lock(). Otherwise, the page we lock in lock_zspage()
1671 * the wrong page to unlock, so we must take a reference to the page in lock_zspage()
1676 page = get_first_page(zspage); in lock_zspage()
1677 if (trylock_page(page)) in lock_zspage()
1679 get_page(page); in lock_zspage()
1681 wait_on_page_locked(page); in lock_zspage()
1682 put_page(page); in lock_zspage()
1685 curr_page = page; in lock_zspage()
1686 while ((page = get_next_page(curr_page))) { in lock_zspage()
1687 if (trylock_page(page)) { in lock_zspage()
1688 curr_page = page; in lock_zspage()
1690 get_page(page); in lock_zspage()
1692 wait_on_page_locked(page); in lock_zspage()
1693 put_page(page); in lock_zspage()
1703 rwlock_init(&zspage->lock); in migrate_lock_init()
1706 static void migrate_read_lock(struct zspage *zspage) __acquires(&zspage->lock) in migrate_read_lock()
1708 read_lock(&zspage->lock); in migrate_read_lock()
1711 static void migrate_read_unlock(struct zspage *zspage) __releases(&zspage->lock) in migrate_read_unlock()
1713 read_unlock(&zspage->lock); in migrate_read_unlock()
1718 write_lock(&zspage->lock); in migrate_write_lock()
1723 write_unlock(&zspage->lock); in migrate_write_unlock()
1731 struct page *newpage, struct page *oldpage) in replace_sub_page()
1733 struct page *page; in replace_sub_page() local
1734 struct page *pages[ZS_MAX_PAGES_PER_ZSPAGE] = {NULL, }; in replace_sub_page()
1737 page = get_first_page(zspage); in replace_sub_page()
1739 if (page == oldpage) in replace_sub_page()
1742 pages[idx] = page; in replace_sub_page()
1744 } while ((page = get_next_page(page)) != NULL); in replace_sub_page()
1749 newpage->index = oldpage->index; in replace_sub_page()
1753 static bool zs_page_isolate(struct page *page, isolate_mode_t mode) in zs_page_isolate() argument
1756 * Page is locked so zspage couldn't be destroyed. For detail, look at in zs_page_isolate()
1759 VM_BUG_ON_PAGE(PageIsolated(page), page); in zs_page_isolate()
1764 static int zs_page_migrate(struct page *newpage, struct page *page, in zs_page_migrate() argument
1770 struct page *dummy; in zs_page_migrate()
1777 VM_BUG_ON_PAGE(!PageIsolated(page), page); in zs_page_migrate()
1779 /* We're committed, tell the world that this is a Zsmalloc page. */ in zs_page_migrate()
1782 /* The page is locked, so this pointer must remain valid */ in zs_page_migrate()
1783 zspage = get_zspage(page); in zs_page_migrate()
1784 pool = zspage->pool; in zs_page_migrate()
1790 write_lock(&pool->migrate_lock); in zs_page_migrate()
1796 spin_lock(&class->lock); in zs_page_migrate()
1800 offset = get_first_obj_offset(page); in zs_page_migrate()
1801 s_addr = kmap_atomic(page); in zs_page_migrate()
1811 addr += class->size) { in zs_page_migrate()
1812 if (obj_allocated(page, addr, &handle)) { in zs_page_migrate()
1823 replace_sub_page(class, zspage, newpage, page); in zs_page_migrate()
1828 write_unlock(&pool->migrate_lock); in zs_page_migrate()
1829 spin_unlock(&class->lock); in zs_page_migrate()
1833 if (page_zone(newpage) != page_zone(page)) { in zs_page_migrate()
1834 dec_zone_page_state(page, NR_ZSPAGES); in zs_page_migrate()
1838 reset_page(page); in zs_page_migrate()
1839 put_page(page); in zs_page_migrate()
1844 static void zs_page_putback(struct page *page) in zs_page_putback() argument
1846 VM_BUG_ON_PAGE(!PageIsolated(page), page); in zs_page_putback()
1869 class = pool->size_class[i]; in async_free_zspage()
1870 if (class->index != i) in async_free_zspage()
1873 spin_lock(&class->lock); in async_free_zspage()
1874 list_splice_init(&class->fullness_list[ZS_INUSE_RATIO_0], in async_free_zspage()
1876 spin_unlock(&class->lock); in async_free_zspage()
1880 list_del(&zspage->list); in async_free_zspage()
1884 spin_lock(&class->lock); in async_free_zspage()
1887 spin_unlock(&class->lock); in async_free_zspage()
1893 schedule_work(&pool->free_work); in kick_deferred_free()
1898 flush_work(&pool->free_work); in zs_flush_migration()
1903 INIT_WORK(&pool->free_work, async_free_zspage); in init_deferred_free()
1908 struct page *page = get_first_page(zspage); in SetZsPageMovable() local
1911 WARN_ON(!trylock_page(page)); in SetZsPageMovable()
1912 __SetPageMovable(page, &zsmalloc_mops); in SetZsPageMovable()
1913 unlock_page(page); in SetZsPageMovable()
1914 } while ((page = get_next_page(page)) != NULL); in SetZsPageMovable()
1934 obj_wasted = obj_allocated - obj_used; in zs_can_compact()
1935 obj_wasted /= class->objs_per_zspage; in zs_can_compact()
1937 return obj_wasted * class->pages_per_zspage; in zs_can_compact()
1951 write_lock(&pool->migrate_lock); in __zs_compact()
1952 spin_lock(&class->lock); in __zs_compact()
1973 pages_freed += class->pages_per_zspage; in __zs_compact()
1978 || rwlock_is_contended(&pool->migrate_lock)) { in __zs_compact()
1982 spin_unlock(&class->lock); in __zs_compact()
1983 write_unlock(&pool->migrate_lock); in __zs_compact()
1985 write_lock(&pool->migrate_lock); in __zs_compact()
1986 spin_lock(&class->lock); in __zs_compact()
1996 spin_unlock(&class->lock); in __zs_compact()
1997 write_unlock(&pool->migrate_lock); in __zs_compact()
2009 * Pool compaction is performed under pool->migrate_lock so it is basically in zs_compact()
2010 * single-threaded. Having more than one thread in __zs_compact() in zs_compact()
2011 * will increase pool->migrate_lock contention, which will impact other in zs_compact()
2012 * zsmalloc operations that need pool->migrate_lock. in zs_compact()
2014 if (atomic_xchg(&pool->compaction_in_progress, 1)) in zs_compact()
2017 for (i = ZS_SIZE_CLASSES - 1; i >= 0; i--) { in zs_compact()
2018 class = pool->size_class[i]; in zs_compact()
2019 if (class->index != i) in zs_compact()
2023 atomic_long_add(pages_freed, &pool->stats.pages_compacted); in zs_compact()
2024 atomic_set(&pool->compaction_in_progress, 0); in zs_compact()
2032 memcpy(stats, &pool->stats, sizeof(struct zs_pool_stats)); in zs_pool_stats()
2040 struct zs_pool *pool = shrinker->private_data; in zs_shrinker_scan()
2058 struct zs_pool *pool = shrinker->private_data; in zs_shrinker_count()
2060 for (i = ZS_SIZE_CLASSES - 1; i >= 0; i--) { in zs_shrinker_count()
2061 class = pool->size_class[i]; in zs_shrinker_count()
2062 if (class->index != i) in zs_shrinker_count()
2073 shrinker_free(pool->shrinker); in zs_unregister_shrinker()
2078 pool->shrinker = shrinker_alloc(0, "mm-zspool:%s", pool->name); in zs_register_shrinker()
2079 if (!pool->shrinker) in zs_register_shrinker()
2080 return -ENOMEM; in zs_register_shrinker()
2082 pool->shrinker->scan_objects = zs_shrinker_scan; in zs_register_shrinker()
2083 pool->shrinker->count_objects = zs_shrinker_count; in zs_register_shrinker()
2084 pool->shrinker->batch = 0; in zs_register_shrinker()
2085 pool->shrinker->private_data = pool; in zs_register_shrinker()
2087 shrinker_register(pool->shrinker); in zs_register_shrinker()
2114 * zs_create_pool - Creates an allocation pool to work from.
2134 rwlock_init(&pool->migrate_lock); in zs_create_pool()
2135 atomic_set(&pool->compaction_in_progress, 0); in zs_create_pool()
2137 pool->name = kstrdup(name, GFP_KERNEL); in zs_create_pool()
2138 if (!pool->name) in zs_create_pool()
2145 * Iterate reversely, because, size of size_class that we want to use in zs_create_pool()
2146 * for merging should be larger or equal to current size. in zs_create_pool()
2148 for (i = ZS_SIZE_CLASSES - 1; i >= 0; i--) { in zs_create_pool()
2149 int size; in zs_create_pool() local
2155 size = ZS_MIN_ALLOC_SIZE + i * ZS_SIZE_CLASS_DELTA; in zs_create_pool()
2156 if (size > ZS_MAX_ALLOC_SIZE) in zs_create_pool()
2157 size = ZS_MAX_ALLOC_SIZE; in zs_create_pool()
2158 pages_per_zspage = calculate_zspage_chain_size(size); in zs_create_pool()
2159 objs_per_zspage = pages_per_zspage * PAGE_SIZE / size; in zs_create_pool()
2163 * so huge_class_size holds the size of the first huge in zs_create_pool()
2169 huge_class_size = size; in zs_create_pool()
2173 * unconditionally adds handle size before it performs in zs_create_pool()
2174 * size class search - so object may be smaller than in zs_create_pool()
2175 * huge class size, yet it still can end up in the huge in zs_create_pool()
2179 huge_class_size -= (ZS_HANDLE_SIZE - 1); in zs_create_pool()
2184 * as alloc/free for that size. Although it is natural that we in zs_create_pool()
2185 * have one size_class for each size, there is a chance that we in zs_create_pool()
2193 pool->size_class[i] = prev_class; in zs_create_pool()
2202 class->size = size; in zs_create_pool()
2203 class->index = i; in zs_create_pool()
2204 class->pages_per_zspage = pages_per_zspage; in zs_create_pool()
2205 class->objs_per_zspage = objs_per_zspage; in zs_create_pool()
2206 spin_lock_init(&class->lock); in zs_create_pool()
2207 pool->size_class[i] = class; in zs_create_pool()
2211 INIT_LIST_HEAD(&class->fullness_list[fullness]); in zs_create_pool()
2247 struct size_class *class = pool->size_class[i]; in zs_destroy_pool()
2252 if (class->index != i) in zs_destroy_pool()
2256 if (list_empty(&class->fullness_list[fg])) in zs_destroy_pool()
2259 pr_err("Class-%d fullness group %d is not empty\n", in zs_destroy_pool()
2260 class->size, fg); in zs_destroy_pool()
2266 kfree(pool->name); in zs_destroy_pool()